aboutsummaryrefslogtreecommitdiff
path: root/sys/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arm64')
-rw-r--r--sys/arm64/acpica/OsdEnvironment.c77
-rw-r--r--sys/arm64/acpica/acpi_iort.c589
-rw-r--r--sys/arm64/acpica/acpi_machdep.c281
-rw-r--r--sys/arm64/acpica/acpi_wakeup.c61
-rw-r--r--sys/arm64/acpica/pci_cfgreg.c76
-rw-r--r--sys/arm64/arm64/autoconf.c92
-rw-r--r--sys/arm64/arm64/bus_machdep.c230
-rw-r--r--sys/arm64/arm64/bus_space_asm.S399
-rw-r--r--sys/arm64/arm64/busdma_bounce.c1357
-rw-r--r--sys/arm64/arm64/busdma_machdep.c285
-rw-r--r--sys/arm64/arm64/bzero.S206
-rw-r--r--sys/arm64/arm64/clock.c39
-rw-r--r--sys/arm64/arm64/copyinout.S226
-rw-r--r--sys/arm64/arm64/cpu_errata.c192
-rw-r--r--sys/arm64/arm64/cpufunc_asm.S182
-rw-r--r--sys/arm64/arm64/db_disasm.c70
-rw-r--r--sys/arm64/arm64/db_interface.c194
-rw-r--r--sys/arm64/arm64/db_trace.c133
-rw-r--r--sys/arm64/arm64/debug_monitor.c565
-rw-r--r--sys/arm64/arm64/disassem.c545
-rw-r--r--sys/arm64/arm64/dump_machdep.c73
-rw-r--r--sys/arm64/arm64/efirt_machdep.c280
-rw-r--r--sys/arm64/arm64/elf32_machdep.c261
-rw-r--r--sys/arm64/arm64/elf_machdep.c284
-rw-r--r--sys/arm64/arm64/exception.S255
-rw-r--r--sys/arm64/arm64/freebsd32_machdep.c438
-rw-r--r--sys/arm64/arm64/genassym.c79
-rw-r--r--sys/arm64/arm64/gic_v3.c1271
-rw-r--r--sys/arm64/arm64/gic_v3_acpi.c389
-rw-r--r--sys/arm64/arm64/gic_v3_fdt.c331
-rw-r--r--sys/arm64/arm64/gic_v3_reg.h434
-rw-r--r--sys/arm64/arm64/gic_v3_var.h145
-rw-r--r--sys/arm64/arm64/gicv3_its.c1960
-rw-r--r--sys/arm64/arm64/identcpu.c1667
-rw-r--r--sys/arm64/arm64/in_cksum.c241
-rw-r--r--sys/arm64/arm64/locore.S859
-rw-r--r--sys/arm64/arm64/machdep.c1375
-rw-r--r--sys/arm64/arm64/machdep_boot.c232
-rw-r--r--sys/arm64/arm64/mem.c138
-rw-r--r--sys/arm64/arm64/memcpy.S219
-rw-r--r--sys/arm64/arm64/memmove.S150
-rw-r--r--sys/arm64/arm64/minidump_machdep.c448
-rw-r--r--sys/arm64/arm64/mp_machdep.c896
-rw-r--r--sys/arm64/arm64/nexus.c549
-rw-r--r--sys/arm64/arm64/ofw_machdep.c58
-rw-r--r--sys/arm64/arm64/pmap.c6710
-rw-r--r--sys/arm64/arm64/stack_machdep.c93
-rw-r--r--sys/arm64/arm64/support.S290
-rw-r--r--sys/arm64/arm64/swtch.S292
-rw-r--r--sys/arm64/arm64/sys_machdep.c45
-rw-r--r--sys/arm64/arm64/trap.c567
-rw-r--r--sys/arm64/arm64/uio_machdep.c134
-rw-r--r--sys/arm64/arm64/uma_machdep.c77
-rw-r--r--sys/arm64/arm64/undefined.c177
-rw-r--r--sys/arm64/arm64/unwind.c53
-rw-r--r--sys/arm64/arm64/vfp.c380
-rw-r--r--sys/arm64/arm64/vm_machdep.c300
-rw-r--r--sys/arm64/broadcom/brcmmdio/mdio_mux_iproc.c399
-rw-r--r--sys/arm64/broadcom/brcmmdio/mdio_nexus_iproc.c234
-rw-r--r--sys/arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c162
-rw-r--r--sys/arm64/broadcom/genet/if_genet.c1762
-rw-r--r--sys/arm64/broadcom/genet/if_genetreg.h223
-rw-r--r--sys/arm64/cavium/thunder_pcie_common.c209
-rw-r--r--sys/arm64/cavium/thunder_pcie_common.h48
-rw-r--r--sys/arm64/cavium/thunder_pcie_fdt.c160
-rw-r--r--sys/arm64/cavium/thunder_pcie_pem.c921
-rw-r--r--sys/arm64/cavium/thunder_pcie_pem.h54
-rw-r--r--sys/arm64/cavium/thunder_pcie_pem_fdt.c201
-rw-r--r--sys/arm64/cloudabi32/cloudabi32_sysvec.c204
-rw-r--r--sys/arm64/cloudabi64/cloudabi64_sysvec.c188
-rw-r--r--sys/arm64/conf/DEFAULTS16
-rw-r--r--sys/arm64/conf/GENERIC364
-rw-r--r--sys/arm64/conf/GENERIC-MMCCAM23
-rw-r--r--sys/arm64/conf/GENERIC-NODEBUG41
-rw-r--r--sys/arm64/conf/GENERIC-UP23
-rw-r--r--sys/arm64/conf/Makefile5
-rw-r--r--sys/arm64/conf/NOTES238
-rw-r--r--sys/arm64/coresight/coresight.c126
-rw-r--r--sys/arm64/coresight/coresight.h163
-rw-r--r--sys/arm64/coresight/coresight_acpi.c373
-rw-r--r--sys/arm64/coresight/coresight_cmd.c159
-rw-r--r--sys/arm64/coresight/coresight_cpu_debug.c164
-rw-r--r--sys/arm64/coresight/coresight_etm4x.c266
-rw-r--r--sys/arm64/coresight/coresight_etm4x.h184
-rw-r--r--sys/arm64/coresight/coresight_etm4x_acpi.c92
-rw-r--r--sys/arm64/coresight/coresight_etm4x_fdt.c94
-rw-r--r--sys/arm64/coresight/coresight_fdt.c153
-rw-r--r--sys/arm64/coresight/coresight_funnel.c144
-rw-r--r--sys/arm64/coresight/coresight_funnel.h80
-rw-r--r--sys/arm64/coresight/coresight_funnel_acpi.c108
-rw-r--r--sys/arm64/coresight/coresight_funnel_fdt.c106
-rw-r--r--sys/arm64/coresight/coresight_if.m58
-rw-r--r--sys/arm64/coresight/coresight_replicator.c128
-rw-r--r--sys/arm64/coresight/coresight_replicator.h48
-rw-r--r--sys/arm64/coresight/coresight_replicator_acpi.c94
-rw-r--r--sys/arm64/coresight/coresight_replicator_fdt.c95
-rw-r--r--sys/arm64/coresight/coresight_tmc.c349
-rw-r--r--sys/arm64/coresight/coresight_tmc.h137
-rw-r--r--sys/arm64/coresight/coresight_tmc_acpi.c92
-rw-r--r--sys/arm64/coresight/coresight_tmc_fdt.c94
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_composite.c309
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_composite.h45
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_frac_pll.c177
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_frac_pll.h42
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_gate.c117
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_gate.h45
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_mux.c136
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_mux.h45
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.c195
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.h42
-rw-r--r--sys/arm64/freescale/imx/imx7gpc.c261
-rw-r--r--sys/arm64/freescale/imx/imx8mq_ccm.c484
-rw-r--r--sys/arm64/freescale/imx/imx8mq_ccm.h173
-rw-r--r--sys/arm64/freescale/imx/imx_ccm_clk.h212
-rw-r--r--sys/arm64/include/_align.h44
-rw-r--r--sys/arm64/include/_bus.h45
-rw-r--r--sys/arm64/include/_inttypes.h213
-rw-r--r--sys/arm64/include/_limits.h85
-rw-r--r--sys/arm64/include/_stdint.h158
-rw-r--r--sys/arm64/include/_types.h100
-rw-r--r--sys/arm64/include/acpica_machdep.h63
-rw-r--r--sys/arm64/include/armreg.h930
-rw-r--r--sys/arm64/include/asm.h105
-rw-r--r--sys/arm64/include/atomic.h609
-rw-r--r--sys/arm64/include/bus.h464
-rw-r--r--sys/arm64/include/bus_dma.h153
-rw-r--r--sys/arm64/include/bus_dma_impl.h97
-rw-r--r--sys/arm64/include/clock.h1
-rw-r--r--sys/arm64/include/counter.h87
-rw-r--r--sys/arm64/include/cpu.h212
-rw-r--r--sys/arm64/include/cpufunc.h244
-rw-r--r--sys/arm64/include/csan.h110
-rw-r--r--sys/arm64/include/db_machdep.h123
-rw-r--r--sys/arm64/include/debug_monitor.h70
-rw-r--r--sys/arm64/include/disassem.h42
-rw-r--r--sys/arm64/include/dump.h74
-rw-r--r--sys/arm64/include/efi.h58
-rw-r--r--sys/arm64/include/elf.h149
-rw-r--r--sys/arm64/include/endian.h122
-rw-r--r--sys/arm64/include/exec.h1
-rw-r--r--sys/arm64/include/float.h94
-rw-r--r--sys/arm64/include/floatingpoint.h3
-rw-r--r--sys/arm64/include/fpu.h6
-rw-r--r--sys/arm64/include/frame.h83
-rw-r--r--sys/arm64/include/hypervisor.h185
-rw-r--r--sys/arm64/include/ieeefp.h43
-rw-r--r--sys/arm64/include/ifunc.h51
-rw-r--r--sys/arm64/include/in_cksum.h52
-rw-r--r--sys/arm64/include/intr.h56
-rw-r--r--sys/arm64/include/iodev.h65
-rw-r--r--sys/arm64/include/kdb.h55
-rw-r--r--sys/arm64/include/machdep.h65
-rw-r--r--sys/arm64/include/md_var.h50
-rw-r--r--sys/arm64/include/memdev.h40
-rw-r--r--sys/arm64/include/metadata.h54
-rw-r--r--sys/arm64/include/minidump.h48
-rw-r--r--sys/arm64/include/ofw_machdep.h44
-rw-r--r--sys/arm64/include/param.h130
-rw-r--r--sys/arm64/include/pcb.h81
-rw-r--r--sys/arm64/include/pci_cfgreg.h36
-rw-r--r--sys/arm64/include/pcpu.h86
-rw-r--r--sys/arm64/include/pcpu_aux.h52
-rw-r--r--sys/arm64/include/pmap.h206
-rw-r--r--sys/arm64/include/pmc_mdep.h67
-rw-r--r--sys/arm64/include/proc.h71
-rw-r--r--sys/arm64/include/procctl.h4
-rw-r--r--sys/arm64/include/profile.h80
-rw-r--r--sys/arm64/include/psl.h1
-rw-r--r--sys/arm64/include/pte.h158
-rw-r--r--sys/arm64/include/ptrace.h1
-rw-r--r--sys/arm64/include/reg.h99
-rw-r--r--sys/arm64/include/reloc.h1
-rw-r--r--sys/arm64/include/resource.h49
-rw-r--r--sys/arm64/include/runq.h46
-rw-r--r--sys/arm64/include/setjmp.h73
-rw-r--r--sys/arm64/include/sf_buf.h51
-rw-r--r--sys/arm64/include/sigframe.h2
-rw-r--r--sys/arm64/include/signal.h50
-rw-r--r--sys/arm64/include/smp.h55
-rw-r--r--sys/arm64/include/stack.h43
-rw-r--r--sys/arm64/include/stdarg.h39
-rw-r--r--sys/arm64/include/sysarch.h47
-rw-r--r--sys/arm64/include/trap.h1
-rw-r--r--sys/arm64/include/ucontext.h89
-rw-r--r--sys/arm64/include/undefined.h68
-rw-r--r--sys/arm64/include/vdso.h39
-rw-r--r--sys/arm64/include/vfp.h77
-rw-r--r--sys/arm64/include/vm.h45
-rw-r--r--sys/arm64/include/vmparam.h246
-rw-r--r--sys/arm64/intel/firmware.c122
-rw-r--r--sys/arm64/intel/intel-smc.h99
-rw-r--r--sys/arm64/intel/stratix10-soc-fpga-mgr.c290
-rw-r--r--sys/arm64/intel/stratix10-svc.c271
-rw-r--r--sys/arm64/intel/stratix10-svc.h60
-rw-r--r--sys/arm64/linux/Makefile7
-rw-r--r--sys/arm64/linux/linux.h300
-rw-r--r--sys/arm64/linux/linux_dummy.c167
-rw-r--r--sys/arm64/linux/linux_genassym.c2
-rw-r--r--sys/arm64/linux/linux_locore.asm63
-rw-r--r--sys/arm64/linux/linux_machdep.c143
-rw-r--r--sys/arm64/linux/linux_proto.h1613
-rw-r--r--sys/arm64/linux/linux_ptrace.c56
-rw-r--r--sys/arm64/linux/linux_support.s57
-rw-r--r--sys/arm64/linux/linux_syscall.h272
-rw-r--r--sys/arm64/linux/linux_syscalls.c302
-rw-r--r--sys/arm64/linux/linux_sysent.c312
-rw-r--r--sys/arm64/linux/linux_systrace_args.c7055
-rw-r--r--sys/arm64/linux/linux_sysvec.c566
-rw-r--r--sys/arm64/linux/linux_vdso.lds.s22
-rw-r--r--sys/arm64/linux/syscalls.conf11
-rw-r--r--sys/arm64/linux/syscalls.master1669
-rw-r--r--sys/arm64/qoriq/clk/ls1046a_clkgen.c255
-rw-r--r--sys/arm64/qoriq/clk/qoriq_clk_pll.c152
-rw-r--r--sys/arm64/qoriq/clk/qoriq_clk_pll.h52
-rw-r--r--sys/arm64/qoriq/clk/qoriq_clkgen.c319
-rw-r--r--sys/arm64/qoriq/clk/qoriq_clkgen.h96
-rw-r--r--sys/arm64/qoriq/ls1046_gpio.c585
-rw-r--r--sys/arm64/qualcomm/qcom_gcc.c148
-rw-r--r--sys/arm64/rockchip/clk/rk3328_cru.c1515
-rw-r--r--sys/arm64/rockchip/clk/rk3399_cru.c1273
-rw-r--r--sys/arm64/rockchip/clk/rk3399_cru_dt.h320
-rw-r--r--sys/arm64/rockchip/clk/rk3399_pmucru.c869
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_armclk.c257
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_armclk.h63
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_composite.c370
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_composite.h60
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_fract.c246
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_fract.h44
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_gate.c135
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_gate.h49
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_mux.c137
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_mux.h47
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_pll.c543
-rw-r--r--sys/arm64/rockchip/clk/rk_clk_pll.h66
-rw-r--r--sys/arm64/rockchip/clk/rk_cru.c306
-rw-r--r--sys/arm64/rockchip/clk/rk_cru.h252
-rw-r--r--sys/arm64/rockchip/if_dwc_rk.c625
-rw-r--r--sys/arm64/rockchip/rk3399_emmcphy.c341
-rw-r--r--sys/arm64/rockchip/rk805.c741
-rw-r--r--sys/arm64/rockchip/rk805reg.h98
-rw-r--r--sys/arm64/rockchip/rk_dwc3.c209
-rw-r--r--sys/arm64/rockchip/rk_gpio.c474
-rw-r--r--sys/arm64/rockchip/rk_grf.c79
-rw-r--r--sys/arm64/rockchip/rk_i2c.c700
-rw-r--r--sys/arm64/rockchip/rk_iodomain.c222
-rw-r--r--sys/arm64/rockchip/rk_pcie.c1402
-rw-r--r--sys/arm64/rockchip/rk_pcie_phy.c364
-rw-r--r--sys/arm64/rockchip/rk_pinctrl.c1348
-rw-r--r--sys/arm64/rockchip/rk_pwm.c403
-rw-r--r--sys/arm64/rockchip/rk_spi.c483
-rw-r--r--sys/arm64/rockchip/rk_tsadc.c792
-rw-r--r--sys/arm64/rockchip/rk_tsadc_if.m43
-rw-r--r--sys/arm64/rockchip/rk_typec_phy.c474
-rw-r--r--sys/arm64/rockchip/rk_usb2phy.c417
254 files changed, 77293 insertions, 0 deletions
diff --git a/sys/arm64/acpica/OsdEnvironment.c b/sys/arm64/acpica/OsdEnvironment.c
new file mode 100644
index 000000000000..6efcee9646d0
--- /dev/null
+++ b/sys/arm64/acpica/OsdEnvironment.c
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 2000,2001 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/sysctl.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/aclocal.h>
+#include <contrib/dev/acpica/include/actables.h>
+
+static u_long acpi_root_phys;
+
+SYSCTL_ULONG(_machdep, OID_AUTO, acpi_root, CTLFLAG_RD, &acpi_root_phys, 0,
+ "The physical address of the RSDP");
+
+ACPI_STATUS
+AcpiOsInitialize(void)
+{
+
+ return (AE_OK);
+}
+
+ACPI_STATUS
+AcpiOsTerminate(void)
+{
+
+ return (AE_OK);
+}
+
+static u_long
+acpi_get_root_from_loader(void)
+{
+ long acpi_root;
+
+ if (resource_long_value("acpi", 0, "rsdp", &acpi_root) == 0)
+ return (acpi_root);
+
+ return (0);
+}
+
+ACPI_PHYSICAL_ADDRESS
+AcpiOsGetRootPointer(void)
+{
+
+ if (acpi_root_phys == 0)
+ acpi_root_phys = acpi_get_root_from_loader();
+
+ return (acpi_root_phys);
+}
diff --git a/sys/arm64/acpica/acpi_iort.c b/sys/arm64/acpica/acpi_iort.c
new file mode 100644
index 000000000000..ec5cf799b333
--- /dev/null
+++ b/sys/arm64/acpica/acpi_iort.c
@@ -0,0 +1,589 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * Author: Jayachandran C Nair <jchandra@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <machine/intr.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <contrib/dev/acpica/include/actables.h>
+
+#include <dev/acpica/acpivar.h>
+
+/*
+ * Track next XREF available for ITS groups.
+ */
+static u_int acpi_its_xref = ACPI_MSI_XREF;
+
+/*
+ * Some types of IORT nodes have a set of mappings. Each of them map
+ * a range of device IDs [base..end] from the current node to another
+ * node. The corresponding device IDs on destination node starts at
+ * outbase.
+ */
+struct iort_map_entry {
+ u_int base;
+ u_int end;
+ u_int outbase;
+ u_int flags;
+ u_int out_node_offset;
+ struct iort_node *out_node;
+};
+
+/*
+ * The ITS group node does not have any outgoing mappings. It has a
+ * of a list of GIC ITS blocks which can handle the device ID. We
+ * will store the PIC XREF used by the block and the blocks proximity
+ * data here, so that it can be retrieved together.
+ */
+struct iort_its_entry {
+ u_int its_id;
+ u_int xref;
+ int pxm;
+};
+
+/*
+ * IORT node. Each node has some device specific data depending on the
+ * type of the node. The node can also have a set of mappings, OR in
+ * case of ITS group nodes a set of ITS entries.
+ * The nodes are kept in a TAILQ by type.
+ */
+struct iort_node {
+ TAILQ_ENTRY(iort_node) next; /* next entry with same type */
+ enum AcpiIortNodeType type; /* ACPI type */
+ u_int node_offset; /* offset in IORT - node ID */
+ u_int nentries; /* items in array below */
+ u_int usecount; /* for bookkeeping */
+ u_int revision; /* node revision */
+ union {
+ ACPI_IORT_ROOT_COMPLEX pci_rc; /* PCI root complex */
+ ACPI_IORT_SMMU smmu;
+ ACPI_IORT_SMMU_V3 smmu_v3;
+ } data;
+ union {
+ struct iort_map_entry *mappings; /* node mappings */
+ struct iort_its_entry *its; /* ITS IDs array */
+ } entries;
+};
+
+/* Lists for each of the types. */
+static TAILQ_HEAD(, iort_node) pci_nodes = TAILQ_HEAD_INITIALIZER(pci_nodes);
+static TAILQ_HEAD(, iort_node) smmu_nodes = TAILQ_HEAD_INITIALIZER(smmu_nodes);
+static TAILQ_HEAD(, iort_node) its_groups = TAILQ_HEAD_INITIALIZER(its_groups);
+
+static int
+iort_entry_get_id_mapping_index(struct iort_node *node)
+{
+
+ switch(node->type) {
+ case ACPI_IORT_NODE_SMMU_V3:
+ /* The ID mapping field was added in version 1 */
+ if (node->revision < 1)
+ return (-1);
+
+ /*
+ * If all the control interrupts are GISCV based the ID
+ * mapping field is ignored.
+ */
+ if (node->data.smmu_v3.EventGsiv != 0 &&
+ node->data.smmu_v3.PriGsiv != 0 &&
+ node->data.smmu_v3.GerrGsiv != 0 &&
+ node->data.smmu_v3.SyncGsiv != 0)
+ return (-1);
+
+ if (node->data.smmu_v3.IdMappingIndex >= node->nentries)
+ return (-1);
+
+ return (node->data.smmu_v3.IdMappingIndex);
+ case ACPI_IORT_NODE_PMCG:
+ return (0);
+ default:
+ break;
+ }
+
+ return (-1);
+}
+
+/*
+ * Lookup an ID in the mappings array. If successful, map the input ID
+ * to the output ID and return the output node found.
+ */
+static struct iort_node *
+iort_entry_lookup(struct iort_node *node, u_int id, u_int *outid)
+{
+ struct iort_map_entry *entry;
+ int i, id_map;
+
+ id_map = iort_entry_get_id_mapping_index(node);
+ entry = node->entries.mappings;
+ for (i = 0; i < node->nentries; i++, entry++) {
+ if (i == id_map)
+ continue;
+ if (entry->base <= id && id <= entry->end)
+ break;
+ }
+ if (i == node->nentries)
+ return (NULL);
+ if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0)
+ *outid = entry->outbase + (id - entry->base);
+ else
+ *outid = entry->outbase;
+ return (entry->out_node);
+}
+
+/*
+ * Map a PCI RID to a SMMU node or an ITS node, based on outtype.
+ */
+static struct iort_node *
+iort_pci_rc_map(u_int seg, u_int rid, u_int outtype, u_int *outid)
+{
+ struct iort_node *node, *out_node;
+ u_int nxtid;
+
+ out_node = NULL;
+ TAILQ_FOREACH(node, &pci_nodes, next) {
+ if (node->data.pci_rc.PciSegmentNumber != seg)
+ continue;
+ out_node = iort_entry_lookup(node, rid, &nxtid);
+ if (out_node != NULL)
+ break;
+ }
+
+ /* Could not find a PCI RC node with segment and device ID. */
+ if (out_node == NULL)
+ return (NULL);
+
+ /* Node can be SMMU or ITS. If SMMU, we need another lookup. */
+ if (outtype == ACPI_IORT_NODE_ITS_GROUP &&
+ (out_node->type == ACPI_IORT_NODE_SMMU_V3 ||
+ out_node->type == ACPI_IORT_NODE_SMMU)) {
+ out_node = iort_entry_lookup(out_node, nxtid, &nxtid);
+ if (out_node == NULL)
+ return (NULL);
+ }
+
+ KASSERT(out_node->type == outtype, ("mapping fail"));
+ *outid = nxtid;
+ return (out_node);
+}
+
+#ifdef notyet
+/*
+ * Not implemented, map a PCIe device to the SMMU it is associated with.
+ */
+int
+acpi_iort_map_smmu(u_int seg, u_int devid, void **smmu, u_int *sid)
+{
+ /* XXX: convert oref to SMMU device */
+ return (ENXIO);
+}
+#endif
+
+/*
+ * Allocate memory for a node, initialize and copy mappings. 'start'
+ * argument provides the table start used to calculate the node offset.
+ */
+static void
+iort_copy_data(struct iort_node *node, ACPI_IORT_NODE *node_entry)
+{
+ ACPI_IORT_ID_MAPPING *map_entry;
+ struct iort_map_entry *mapping;
+ int i;
+
+ map_entry = ACPI_ADD_PTR(ACPI_IORT_ID_MAPPING, node_entry,
+ node_entry->MappingOffset);
+ node->nentries = node_entry->MappingCount;
+ node->usecount = 0;
+ mapping = malloc(sizeof(*mapping) * node->nentries, M_DEVBUF,
+ M_WAITOK | M_ZERO);
+ node->entries.mappings = mapping;
+ for (i = 0; i < node->nentries; i++, mapping++, map_entry++) {
+ mapping->base = map_entry->InputBase;
+ /*
+ * IdCount means "The number of IDs in the range minus one" (ARM DEN 0049D).
+ * We use <= for comparison against this field, so don't add one here.
+ */
+ mapping->end = map_entry->InputBase + map_entry->IdCount;
+ mapping->outbase = map_entry->OutputBase;
+ mapping->out_node_offset = map_entry->OutputReference;
+ mapping->flags = map_entry->Flags;
+ mapping->out_node = NULL;
+ }
+}
+
+/*
+ * Allocate and copy an ITS group.
+ */
+static void
+iort_copy_its(struct iort_node *node, ACPI_IORT_NODE *node_entry)
+{
+ struct iort_its_entry *its;
+ ACPI_IORT_ITS_GROUP *itsg_entry;
+ UINT32 *id;
+ int i;
+
+ itsg_entry = (ACPI_IORT_ITS_GROUP *)node_entry->NodeData;
+ node->nentries = itsg_entry->ItsCount;
+ node->usecount = 0;
+ its = malloc(sizeof(*its) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO);
+ node->entries.its = its;
+ id = &itsg_entry->Identifiers[0];
+ for (i = 0; i < node->nentries; i++, its++, id++) {
+ its->its_id = *id;
+ its->pxm = -1;
+ its->xref = 0;
+ }
+}
+
+/*
+ * Walk the IORT table and add nodes to corresponding list.
+ */
+static void
+iort_add_nodes(ACPI_IORT_NODE *node_entry, u_int node_offset)
+{
+ ACPI_IORT_ROOT_COMPLEX *pci_rc;
+ ACPI_IORT_SMMU *smmu;
+ ACPI_IORT_SMMU_V3 *smmu_v3;
+ struct iort_node *node;
+
+ node = malloc(sizeof(*node), M_DEVBUF, M_WAITOK | M_ZERO);
+ node->type = node_entry->Type;
+ node->node_offset = node_offset;
+ node->revision = node_entry->Revision;
+
+ /* copy nodes depending on type */
+ switch(node_entry->Type) {
+ case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
+ pci_rc = (ACPI_IORT_ROOT_COMPLEX *)node_entry->NodeData;
+ memcpy(&node->data.pci_rc, pci_rc, sizeof(*pci_rc));
+ iort_copy_data(node, node_entry);
+ TAILQ_INSERT_TAIL(&pci_nodes, node, next);
+ break;
+ case ACPI_IORT_NODE_SMMU:
+ smmu = (ACPI_IORT_SMMU *)node_entry->NodeData;
+ memcpy(&node->data.smmu, smmu, sizeof(*smmu));
+ iort_copy_data(node, node_entry);
+ TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
+ break;
+ case ACPI_IORT_NODE_SMMU_V3:
+ smmu_v3 = (ACPI_IORT_SMMU_V3 *)node_entry->NodeData;
+ memcpy(&node->data.smmu_v3, smmu_v3, sizeof(*smmu_v3));
+ iort_copy_data(node, node_entry);
+ TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
+ break;
+ case ACPI_IORT_NODE_ITS_GROUP:
+ iort_copy_its(node, node_entry);
+ TAILQ_INSERT_TAIL(&its_groups, node, next);
+ break;
+ default:
+ printf("ACPI: IORT: Dropping unhandled type %u\n",
+ node_entry->Type);
+ free(node, M_DEVBUF);
+ break;
+ }
+}
+
+/*
+ * For the mapping entry given, walk thru all the possible destination
+ * nodes and resolve the output reference.
+ */
+static void
+iort_resolve_node(struct iort_map_entry *entry, int check_smmu)
+{
+ struct iort_node *node, *np;
+
+ node = NULL;
+ if (check_smmu) {
+ TAILQ_FOREACH(np, &smmu_nodes, next) {
+ if (entry->out_node_offset == np->node_offset) {
+ node = np;
+ break;
+ }
+ }
+ }
+ if (node == NULL) {
+ TAILQ_FOREACH(np, &its_groups, next) {
+ if (entry->out_node_offset == np->node_offset) {
+ node = np;
+ break;
+ }
+ }
+ }
+ if (node != NULL) {
+ node->usecount++;
+ entry->out_node = node;
+ } else {
+ printf("ACPI: IORT: Firmware Bug: no mapping for node %u\n",
+ entry->out_node_offset);
+ }
+}
+
+/*
+ * Resolve all output node references to node pointers.
+ */
+static void
+iort_post_process_mappings(void)
+{
+ struct iort_node *node;
+ int i;
+
+ TAILQ_FOREACH(node, &pci_nodes, next)
+ for (i = 0; i < node->nentries; i++)
+ iort_resolve_node(&node->entries.mappings[i], TRUE);
+ TAILQ_FOREACH(node, &smmu_nodes, next)
+ for (i = 0; i < node->nentries; i++)
+ iort_resolve_node(&node->entries.mappings[i], FALSE);
+ /* TODO: named nodes */
+}
+
+/*
+ * Walk MADT table, assign PIC xrefs to all ITS entries.
+ */
+static void
+madt_resolve_its_xref(ACPI_SUBTABLE_HEADER *entry, void *arg)
+{
+ ACPI_MADT_GENERIC_TRANSLATOR *gict;
+ struct iort_node *its_node;
+ struct iort_its_entry *its_entry;
+ u_int xref;
+ int i, matches;
+
+ if (entry->Type != ACPI_MADT_TYPE_GENERIC_TRANSLATOR)
+ return;
+
+ gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry;
+ matches = 0;
+ xref = acpi_its_xref++;
+ TAILQ_FOREACH(its_node, &its_groups, next) {
+ its_entry = its_node->entries.its;
+ for (i = 0; i < its_node->nentries; i++, its_entry++) {
+ if (its_entry->its_id == gict->TranslationId) {
+ its_entry->xref = xref;
+ matches++;
+ }
+ }
+ }
+ if (matches == 0)
+ printf("ACPI: IORT: Unused ITS block, ID %u\n",
+ gict->TranslationId);
+}
+
+/*
+ * Walk SRAT, assign proximity to all ITS entries.
+ */
+static void
+srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg)
+{
+ ACPI_SRAT_GIC_ITS_AFFINITY *gicits;
+ struct iort_node *its_node;
+ struct iort_its_entry *its_entry;
+ int *map_counts;
+ int i, matches, dom;
+
+ if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY)
+ return;
+
+ matches = 0;
+ map_counts = arg;
+ gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry;
+ dom = acpi_map_pxm_to_vm_domainid(gicits->ProximityDomain);
+
+ /*
+ * Catch firmware and config errors. map_counts keeps a
+ * count of ProximityDomain values mapping to a domain ID
+ */
+#if MAXMEMDOM > 1
+ if (dom == -1)
+ printf("Firmware Error: Proximity Domain %d could not be"
+ " mapped for GIC ITS ID %d!\n",
+ gicits->ProximityDomain, gicits->ItsId);
+#endif
+ /* use dom + 1 as index to handle the case where dom == -1 */
+ i = ++map_counts[dom + 1];
+ if (i > 1) {
+#ifdef NUMA
+ if (dom != -1)
+ printf("ERROR: Multiple Proximity Domains map to the"
+ " same NUMA domain %d!\n", dom);
+#else
+ printf("WARNING: multiple Proximity Domains in SRAT but NUMA"
+ " NOT enabled!\n");
+#endif
+ }
+ TAILQ_FOREACH(its_node, &its_groups, next) {
+ its_entry = its_node->entries.its;
+ for (i = 0; i < its_node->nentries; i++, its_entry++) {
+ if (its_entry->its_id == gicits->ItsId) {
+ its_entry->pxm = dom;
+ matches++;
+ }
+ }
+ }
+ if (matches == 0)
+ printf("ACPI: IORT: ITS block %u in SRAT not found in IORT!\n",
+ gicits->ItsId);
+}
+
+/*
+ * Cross check the ITS Id with MADT and (if available) SRAT.
+ */
+static int
+iort_post_process_its(void)
+{
+ ACPI_TABLE_MADT *madt;
+ ACPI_TABLE_SRAT *srat;
+ vm_paddr_t madt_pa, srat_pa;
+ int map_counts[MAXMEMDOM + 1] = { 0 };
+
+ /* Check ITS block in MADT */
+ madt_pa = acpi_find_table(ACPI_SIG_MADT);
+ KASSERT(madt_pa != 0, ("no MADT!"));
+ madt = acpi_map_table(madt_pa, ACPI_SIG_MADT);
+ KASSERT(madt != NULL, ("can't map MADT!"));
+ acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
+ madt_resolve_its_xref, NULL);
+ acpi_unmap_table(madt);
+
+ /* Get proximtiy if available */
+ srat_pa = acpi_find_table(ACPI_SIG_SRAT);
+ if (srat_pa != 0) {
+ srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT);
+ KASSERT(srat != NULL, ("can't map SRAT!"));
+ acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length,
+ srat_resolve_its_pxm, map_counts);
+ acpi_unmap_table(srat);
+ }
+ return (0);
+}
+
+/*
+ * Find, parse, and save IO Remapping Table ("IORT").
+ */
+static int
+acpi_parse_iort(void *dummy __unused)
+{
+ ACPI_TABLE_IORT *iort;
+ ACPI_IORT_NODE *node_entry;
+ vm_paddr_t iort_pa;
+ u_int node_offset;
+
+ iort_pa = acpi_find_table(ACPI_SIG_IORT);
+ if (iort_pa == 0)
+ return (ENXIO);
+
+ iort = acpi_map_table(iort_pa, ACPI_SIG_IORT);
+ if (iort == NULL) {
+ printf("ACPI: Unable to map the IORT table!\n");
+ return (ENXIO);
+ }
+ for (node_offset = iort->NodeOffset;
+ node_offset < iort->Header.Length;
+ node_offset += node_entry->Length) {
+ node_entry = ACPI_ADD_PTR(ACPI_IORT_NODE, iort, node_offset);
+ iort_add_nodes(node_entry, node_offset);
+ }
+ acpi_unmap_table(iort);
+ iort_post_process_mappings();
+ iort_post_process_its();
+ return (0);
+}
+SYSINIT(acpi_parse_iort, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_parse_iort, NULL);
+
+/*
+ * Provide ITS ID to PIC xref mapping.
+ */
+int
+acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm)
+{
+ struct iort_node *its_node;
+ struct iort_its_entry *its_entry;
+ int i;
+
+ TAILQ_FOREACH(its_node, &its_groups, next) {
+ its_entry = its_node->entries.its;
+ for (i = 0; i < its_node->nentries; i++, its_entry++) {
+ if (its_entry->its_id == its_id) {
+ *xref = its_entry->xref;
+ *pxm = its_entry->pxm;
+ return (0);
+ }
+ }
+ }
+ return (ENOENT);
+}
+
+/*
+ * Find mapping for a PCIe device given segment and device ID
+ * returns the XREF for MSI interrupt setup and the device ID to
+ * use for the interrupt setup
+ */
+int
+acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid)
+{
+ struct iort_node *node;
+
+ node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_ITS_GROUP, devid);
+ if (node == NULL)
+ return (ENOENT);
+
+ /* This should be an ITS node */
+ KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group"));
+
+ /* return first node, we don't handle more than that now. */
+ *xref = node->entries.its[0].xref;
+ return (0);
+}
+
+int
+acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *sid)
+{
+ ACPI_IORT_SMMU_V3 *smmu;
+ struct iort_node *node;
+
+ node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_SMMU_V3, sid);
+ if (node == NULL)
+ return (ENOENT);
+
+ /* This should be an SMMU node. */
+ KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node"));
+
+ smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3;
+ *xref = smmu->BaseAddress;
+
+ return (0);
+}
diff --git a/sys/arm64/acpica/acpi_machdep.c b/sys/arm64/acpica/acpi_machdep.c
new file mode 100644
index 000000000000..05ec00ccbe61
--- /dev/null
+++ b/sys/arm64/acpica/acpi_machdep.c
@@ -0,0 +1,281 @@
+/*-
+ * Copyright (c) 2001 Mitsuru IWASAKI
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/machdep.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <contrib/dev/acpica/include/actables.h>
+
+#include <dev/acpica/acpivar.h>
+
+extern struct bus_space memmap_bus;
+
+int
+acpi_machdep_init(device_t dev)
+{
+
+ return (0);
+}
+
+int
+acpi_machdep_quirks(int *quirks)
+{
+
+ return (0);
+}
+
+static void *
+map_table(vm_paddr_t pa, const char *sig)
+{
+ ACPI_TABLE_HEADER *header;
+ vm_offset_t length;
+ void *table;
+
+ header = pmap_mapbios(pa, sizeof(ACPI_TABLE_HEADER));
+ if (strncmp(header->Signature, sig, ACPI_NAMESEG_SIZE) != 0) {
+ pmap_unmapbios((vm_offset_t)header, sizeof(ACPI_TABLE_HEADER));
+ return (NULL);
+ }
+ length = header->Length;
+ pmap_unmapbios((vm_offset_t)header, sizeof(ACPI_TABLE_HEADER));
+
+ table = pmap_mapbios(pa, length);
+ if (ACPI_FAILURE(AcpiTbChecksum(table, length))) {
+ if (bootverbose)
+ printf("ACPI: Failed checksum for table %s\n", sig);
+#if (ACPI_CHECKSUM_ABORT)
+ pmap_unmapbios(table, length);
+ return (NULL);
+#endif
+ }
+ return (table);
+}
+
+/*
+ * See if a given ACPI table is the requested table. Returns the
+ * length of the able if it matches or zero on failure.
+ */
+static int
+probe_table(vm_paddr_t address, const char *sig)
+{
+ ACPI_TABLE_HEADER *table;
+
+ table = pmap_mapbios(address, sizeof(ACPI_TABLE_HEADER));
+ if (table == NULL) {
+ if (bootverbose)
+ printf("ACPI: Failed to map table at 0x%jx\n",
+ (uintmax_t)address);
+ return (0);
+ }
+
+ if (strncmp(table->Signature, sig, ACPI_NAMESEG_SIZE) != 0) {
+ pmap_unmapbios((vm_offset_t)table, sizeof(ACPI_TABLE_HEADER));
+ return (0);
+ }
+ pmap_unmapbios((vm_offset_t)table, sizeof(ACPI_TABLE_HEADER));
+ return (1);
+}
+
+/* Unmap a table previously mapped via acpi_map_table(). */
+void
+acpi_unmap_table(void *table)
+{
+ ACPI_TABLE_HEADER *header;
+
+ header = (ACPI_TABLE_HEADER *)table;
+ pmap_unmapbios((vm_offset_t)table, header->Length);
+}
+
+/*
+ * Try to map a table at a given physical address previously returned
+ * by acpi_find_table().
+ */
+void *
+acpi_map_table(vm_paddr_t pa, const char *sig)
+{
+
+ return (map_table(pa, sig));
+}
+
+/*
+ * Return the physical address of the requested table or zero if one
+ * is not found.
+ */
+vm_paddr_t
+acpi_find_table(const char *sig)
+{
+ ACPI_PHYSICAL_ADDRESS rsdp_ptr;
+ ACPI_TABLE_RSDP *rsdp;
+ ACPI_TABLE_XSDT *xsdt;
+ ACPI_TABLE_HEADER *table;
+ vm_paddr_t addr;
+ int i, count;
+
+ if (resource_disabled("acpi", 0))
+ return (0);
+
+ /*
+ * Map in the RSDP. Since ACPI uses AcpiOsMapMemory() which in turn
+ * calls pmap_mapbios() to find the RSDP, we assume that we can use
+ * pmap_mapbios() to map the RSDP.
+ */
+ if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0)
+ return (0);
+ rsdp = pmap_mapbios(rsdp_ptr, sizeof(ACPI_TABLE_RSDP));
+ if (rsdp == NULL) {
+ if (bootverbose)
+ printf("ACPI: Failed to map RSDP\n");
+ return (0);
+ }
+
+ addr = 0;
+ if (rsdp->Revision >= 2 && rsdp->XsdtPhysicalAddress != 0) {
+ /*
+ * AcpiOsGetRootPointer only verifies the checksum for
+ * the version 1.0 portion of the RSDP. Version 2.0 has
+ * an additional checksum that we verify first.
+ */
+ if (AcpiTbChecksum((UINT8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH)) {
+ if (bootverbose)
+ printf("ACPI: RSDP failed extended checksum\n");
+ return (0);
+ }
+ xsdt = map_table(rsdp->XsdtPhysicalAddress, ACPI_SIG_XSDT);
+ if (xsdt == NULL) {
+ if (bootverbose)
+ printf("ACPI: Failed to map XSDT\n");
+ pmap_unmapbios((vm_offset_t)rsdp,
+ sizeof(ACPI_TABLE_RSDP));
+ return (0);
+ }
+ count = (xsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) /
+ sizeof(UINT64);
+ for (i = 0; i < count; i++)
+ if (probe_table(xsdt->TableOffsetEntry[i], sig)) {
+ addr = xsdt->TableOffsetEntry[i];
+ break;
+ }
+ acpi_unmap_table(xsdt);
+ }
+ pmap_unmapbios((vm_offset_t)rsdp, sizeof(ACPI_TABLE_RSDP));
+
+ if (addr == 0)
+ return (0);
+
+ /*
+ * Verify that we can map the full table and that its checksum is
+ * correct, etc.
+ */
+ table = map_table(addr, sig);
+ if (table == NULL)
+ return (0);
+ acpi_unmap_table(table);
+
+ return (addr);
+}
+
+int
+acpi_map_addr(struct acpi_generic_address *addr, bus_space_tag_t *tag,
+ bus_space_handle_t *handle, bus_size_t size)
+{
+ bus_addr_t phys;
+
+ /* Check if the device is Memory mapped */
+ if (addr->SpaceId != 0)
+ return (ENXIO);
+
+ phys = addr->Address;
+ *tag = &memmap_bus;
+
+ return (bus_space_map(*tag, phys, size, 0, handle));
+}
+
+#if MAXMEMDOM > 1
+static void
+parse_pxm_tables(void *dummy)
+{
+ uint64_t mmfr0, parange;
+
+ /* Only parse ACPI tables when booting via ACPI */
+ if (arm64_bus_method != ARM64_BUS_ACPI)
+ return;
+
+ if (!get_kernel_reg(ID_AA64MMFR0_EL1, &mmfr0)) {
+ /* chosen arbitrarily */
+ mmfr0 = ID_AA64MMFR0_PARange_1T;
+ }
+
+ switch (ID_AA64MMFR0_PARange_VAL(mmfr0)) {
+ case ID_AA64MMFR0_PARange_4G:
+ parange = (vm_paddr_t)4 << 30 /* GiB */;
+ break;
+ case ID_AA64MMFR0_PARange_64G:
+ parange = (vm_paddr_t)64 << 30 /* GiB */;
+ break;
+ case ID_AA64MMFR0_PARange_1T:
+ parange = (vm_paddr_t)1 << 40 /* TiB */;
+ break;
+ case ID_AA64MMFR0_PARange_4T:
+ parange = (vm_paddr_t)4 << 40 /* TiB */;
+ break;
+ case ID_AA64MMFR0_PARange_16T:
+ parange = (vm_paddr_t)16 << 40 /* TiB */;
+ break;
+ case ID_AA64MMFR0_PARange_256T:
+ parange = (vm_paddr_t)256 << 40 /* TiB */;
+ break;
+ case ID_AA64MMFR0_PARange_4P:
+ parange = (vm_paddr_t)4 << 50 /* PiB */;
+ break;
+ default:
+ /* chosen arbitrarily */
+ parange = (vm_paddr_t)1 << 40 /* TiB */;
+ printf("Unknown value for PARange in mmfr0 (%#lx)\n", mmfr0);
+ break;
+ }
+
+ acpi_pxm_init(MAXCPU, parange);
+ acpi_pxm_parse_tables();
+ acpi_pxm_set_mem_locality();
+}
+SYSINIT(parse_pxm_tables, SI_SUB_VM - 1, SI_ORDER_FIRST, parse_pxm_tables,
+ NULL);
+#endif
diff --git a/sys/arm64/acpica/acpi_wakeup.c b/sys/arm64/acpica/acpi_wakeup.c
new file mode 100644
index 000000000000..7724fddd0fde
--- /dev/null
+++ b/sys/arm64/acpica/acpi_wakeup.c
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+/*
+ * ARM64TODO: Implement this.
+ */
+int
+acpi_sleep_machdep(struct acpi_softc *sc, int state)
+{
+
+ return (-1);
+}
+
+int
+acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result,
+ int intr_enabled)
+{
+
+ /* ARM64TODO: We will need this with acpi_sleep_machdep */
+ KASSERT(sleep_result == -1,
+ ("acpi_wakeup_machdep: Invalid sleep result"));
+
+ return (sleep_result);
+}
diff --git a/sys/arm64/acpica/pci_cfgreg.c b/sys/arm64/acpica/pci_cfgreg.c
new file mode 100644
index 000000000000..e80803fbaa4a
--- /dev/null
+++ b/sys/arm64/acpica/pci_cfgreg.c
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+
+#include <machine/pci_cfgreg.h>
+
+/*
+ * This file contains stubs for ACPI PCI functions
+ */
+
+/*
+ * Read configuration space register
+ */
+uint32_t
+pci_cfgregread(int bus, int slot, int func, int reg, int bytes)
+{
+
+ /* ARM64TODO */
+ panic("pci_cfgregread not implemented");
+ return (0);
+}
+
+/*
+ * Write configuration space register
+ */
+void
+pci_cfgregwrite(int bus, int slot, int func, int reg, u_int32_t data, int bytes)
+{
+
+ /* ARM64TODO */
+ panic("pci_cfgregwrite not implemented");
+}
+
+/*
+ * Initialize access to configuration space
+ */
+int
+pci_cfgregopen(void)
+{
+
+ /* ARM64TODO */
+ panic("pci_cfgregopen not implemented");
+ return (0);
+}
diff --git a/sys/arm64/arm64/autoconf.c b/sys/arm64/arm64/autoconf.c
new file mode 100644
index 000000000000..9788c789cfc4
--- /dev/null
+++ b/sys/arm64/arm64/autoconf.c
@@ -0,0 +1,92 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Setup the system to run on the current machine.
+ *
+ * Configure() is called at boot time and initializes the vba
+ * device tables and the memory controller monitoring. Available
+ * devices are determined (from possibilities mentioned in ioconf.c),
+ * and the drivers are initialized.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cons.h>
+#include <sys/kernel.h>
+
+#include <machine/intr.h>
+
+static void configure_first(void *);
+static void configure(void *);
+static void configure_final(void *);
+
+SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL);
+/* SI_ORDER_SECOND is hookable */
+SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL);
+/* SI_ORDER_MIDDLE is hookable */
+SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL);
+
+/*
+ * Determine i/o configuration for a machine.
+ */
+static void
+configure_first(void *dummy)
+{
+
+ /* nexus0 is the top of the device tree */
+ device_add_child(root_bus, "nexus", 0);
+}
+
+static void
+configure(void *dummy)
+{
+
+ /* initialize new bus architecture */
+ root_bus_configure();
+}
+
+static void
+configure_final(void *dummy)
+{
+
+ /* Enable interrupt reception on this CPU */
+ intr_enable();
+ cninit_finish();
+
+ if (bootverbose)
+ printf("Device configuration finished.\n");
+
+ cold = 0;
+}
diff --git a/sys/arm64/arm64/bus_machdep.c b/sys/arm64/arm64/bus_machdep.c
new file mode 100644
index 000000000000..1fabb91c575f
--- /dev/null
+++ b/sys/arm64/arm64/bus_machdep.c
@@ -0,0 +1,230 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#define KCSAN_RUNTIME
+
+#include "opt_platform.h"
+
+#include <sys/param.h>
+__FBSDID("$FreeBSD$");
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+
+uint8_t generic_bs_r_1(void *, bus_space_handle_t, bus_size_t);
+uint16_t generic_bs_r_2(void *, bus_space_handle_t, bus_size_t);
+uint32_t generic_bs_r_4(void *, bus_space_handle_t, bus_size_t);
+uint64_t generic_bs_r_8(void *, bus_space_handle_t, bus_size_t);
+
+void generic_bs_rm_1(void *, bus_space_handle_t, bus_size_t, uint8_t *,
+ bus_size_t);
+void generic_bs_rm_2(void *, bus_space_handle_t, bus_size_t, uint16_t *,
+ bus_size_t);
+void generic_bs_rm_4(void *, bus_space_handle_t, bus_size_t, uint32_t *,
+ bus_size_t);
+void generic_bs_rm_8(void *, bus_space_handle_t, bus_size_t, uint64_t *,
+ bus_size_t);
+
+void generic_bs_rr_1(void *, bus_space_handle_t, bus_size_t, uint8_t *,
+ bus_size_t);
+void generic_bs_rr_2(void *, bus_space_handle_t, bus_size_t, uint16_t *,
+ bus_size_t);
+void generic_bs_rr_4(void *, bus_space_handle_t, bus_size_t, uint32_t *,
+ bus_size_t);
+void generic_bs_rr_8(void *, bus_space_handle_t, bus_size_t, uint64_t *,
+ bus_size_t);
+
+void generic_bs_w_1(void *, bus_space_handle_t, bus_size_t, uint8_t);
+void generic_bs_w_2(void *, bus_space_handle_t, bus_size_t, uint16_t);
+void generic_bs_w_4(void *, bus_space_handle_t, bus_size_t, uint32_t);
+void generic_bs_w_8(void *, bus_space_handle_t, bus_size_t, uint64_t);
+
+void generic_bs_wm_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *,
+ bus_size_t);
+void generic_bs_wm_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *,
+ bus_size_t);
+void generic_bs_wm_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *,
+ bus_size_t);
+void generic_bs_wm_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *,
+ bus_size_t);
+
+void generic_bs_wr_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *,
+ bus_size_t);
+void generic_bs_wr_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *,
+ bus_size_t);
+void generic_bs_wr_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *,
+ bus_size_t);
+void generic_bs_wr_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *,
+ bus_size_t);
+
+static int
+generic_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
+ bus_space_handle_t *bshp)
+{
+ void *va;
+
+ va = pmap_mapdev(bpa, size);
+ if (va == NULL)
+ return (ENOMEM);
+ *bshp = (bus_space_handle_t)va;
+ return (0);
+}
+
+static void
+generic_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
+{
+
+ pmap_unmapdev(bsh, size);
+}
+
+static void
+generic_bs_barrier(void *t, bus_space_handle_t bsh, bus_size_t offset,
+ bus_size_t size, int flags)
+{
+}
+
+static int
+generic_bs_subregion(void *t, bus_space_handle_t bsh, bus_size_t offset,
+ bus_size_t size, bus_space_handle_t *nbshp)
+{
+
+ *nbshp = bsh + offset;
+ return (0);
+}
+
+struct bus_space memmap_bus = {
+ /* cookie */
+ .bs_cookie = NULL,
+
+ /* mapping/unmapping */
+ .bs_map = generic_bs_map,
+ .bs_unmap = generic_bs_unmap,
+ .bs_subregion = generic_bs_subregion,
+
+ /* allocation/deallocation */
+ .bs_alloc = NULL,
+ .bs_free = NULL,
+
+ /* barrier */
+ .bs_barrier = generic_bs_barrier,
+
+ /* read single */
+ .bs_r_1 = generic_bs_r_1,
+ .bs_r_2 = generic_bs_r_2,
+ .bs_r_4 = generic_bs_r_4,
+ .bs_r_8 = generic_bs_r_8,
+
+ /* read multiple */
+ .bs_rm_1 = generic_bs_rm_1,
+ .bs_rm_2 = generic_bs_rm_2,
+ .bs_rm_4 = generic_bs_rm_4,
+ .bs_rm_8 = generic_bs_rm_8,
+
+ /* read region */
+ .bs_rr_1 = generic_bs_rr_1,
+ .bs_rr_2 = generic_bs_rr_2,
+ .bs_rr_4 = generic_bs_rr_4,
+ .bs_rr_8 = generic_bs_rr_8,
+
+ /* write single */
+ .bs_w_1 = generic_bs_w_1,
+ .bs_w_2 = generic_bs_w_2,
+ .bs_w_4 = generic_bs_w_4,
+ .bs_w_8 = generic_bs_w_8,
+
+ /* write multiple */
+ .bs_wm_1 = generic_bs_wm_1,
+ .bs_wm_2 = generic_bs_wm_2,
+ .bs_wm_4 = generic_bs_wm_4,
+ .bs_wm_8 = generic_bs_wm_8,
+
+ /* write region */
+ .bs_wr_1 = generic_bs_wr_1,
+ .bs_wr_2 = generic_bs_wr_2,
+ .bs_wr_4 = generic_bs_wr_4,
+ .bs_wr_8 = generic_bs_wr_8,
+
+ /* set multiple */
+ .bs_sm_1 = NULL,
+ .bs_sm_2 = NULL,
+ .bs_sm_4 = NULL,
+ .bs_sm_8 = NULL,
+
+ /* set region */
+ .bs_sr_1 = NULL,
+ .bs_sr_2 = NULL,
+ .bs_sr_4 = NULL,
+ .bs_sr_8 = NULL,
+
+ /* copy */
+ .bs_c_1 = NULL,
+ .bs_c_2 = NULL,
+ .bs_c_4 = NULL,
+ .bs_c_8 = NULL,
+
+ /* read single stream */
+ .bs_r_1_s = NULL,
+ .bs_r_2_s = NULL,
+ .bs_r_4_s = NULL,
+ .bs_r_8_s = NULL,
+
+ /* read multiple stream */
+ .bs_rm_1_s = generic_bs_rm_1,
+ .bs_rm_2_s = generic_bs_rm_2,
+ .bs_rm_4_s = generic_bs_rm_4,
+ .bs_rm_8_s = generic_bs_rm_8,
+
+ /* read region stream */
+ .bs_rr_1_s = NULL,
+ .bs_rr_2_s = NULL,
+ .bs_rr_4_s = NULL,
+ .bs_rr_8_s = NULL,
+
+ /* write single stream */
+ .bs_w_1_s = NULL,
+ .bs_w_2_s = NULL,
+ .bs_w_4_s = NULL,
+ .bs_w_8_s = NULL,
+
+ /* write multiple stream */
+ .bs_wm_1_s = generic_bs_wm_1,
+ .bs_wm_2_s = generic_bs_wm_2,
+ .bs_wm_4_s = generic_bs_wm_4,
+ .bs_wm_8_s = generic_bs_wm_8,
+
+ /* write region stream */
+ .bs_wr_1_s = NULL,
+ .bs_wr_2_s = NULL,
+ .bs_wr_4_s = NULL,
+ .bs_wr_8_s = NULL,
+};
+
+#ifdef FDT
+bus_space_tag_t fdtbus_bs_tag = &memmap_bus;
+#endif
diff --git a/sys/arm64/arm64/bus_space_asm.S b/sys/arm64/arm64/bus_space_asm.S
new file mode 100644
index 000000000000..d919bd5c61b1
--- /dev/null
+++ b/sys/arm64/arm64/bus_space_asm.S
@@ -0,0 +1,399 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+
+__FBSDID("$FreeBSD$");
+
+ENTRY(generic_bs_r_1)
+ ldrb w0, [x1, x2]
+ ret
+END(generic_bs_r_1)
+
+ENTRY(generic_bs_r_2)
+ ldrh w0, [x1, x2]
+ ret
+END(generic_bs_r_2)
+
+ENTRY(generic_bs_r_4)
+ ldr w0, [x1, x2]
+ ret
+END(generic_bs_r_4)
+
+ENTRY(generic_bs_r_8)
+ ldr x0, [x1, x2]
+ ret
+END(generic_bs_r_8)
+
+ENTRY(generic_bs_rm_1)
+ /* If there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldrb w1, [x0]
+ strb w1, [x3], #1
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rm_1)
+
+ENTRY(generic_bs_rm_2)
+ /* If there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldrh w1, [x0]
+ strh w1, [x3], #2
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rm_2)
+
+ENTRY(generic_bs_rm_4)
+ /* If there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldr w1, [x0]
+ str w1, [x3], #4
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rm_4)
+
+ENTRY(generic_bs_rm_8)
+ /* If there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldr x1, [x0]
+ str x1, [x3], #8
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rm_8)
+
+ENTRY(generic_bs_rr_1)
+ /* Is there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldrb w1, [x0], #1
+ strb w1, [x3], #1
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rr_1)
+
+ENTRY(generic_bs_rr_2)
+ /* Is there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldrh w1, [x0], #2
+ strh w1, [x3], #2
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rr_2)
+
+ENTRY(generic_bs_rr_4)
+ /* Is there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldr w1, [x0], #4
+ str w1, [x3], #4
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rr_4)
+
+ENTRY(generic_bs_rr_8)
+ /* Is there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldr x1, [x0], #8
+ str x1, [x3], #8
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rr_8)
+
+
+ENTRY(generic_bs_w_1)
+ strb w3, [x1, x2]
+ ret
+END(generic_bs_w_1)
+
+ENTRY(generic_bs_w_2)
+ strh w3, [x1, x2]
+ ret
+END(generic_bs_w_2)
+
+ENTRY(generic_bs_w_4)
+ str w3, [x1, x2]
+ ret
+END(generic_bs_w_4)
+
+ENTRY(generic_bs_w_8)
+ str x3, [x1, x2]
+ ret
+END(generic_bs_w_8)
+
+ENTRY(generic_bs_wm_1)
+ /* If there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldrb w1, [x3], #1
+ strb w1, [x0]
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wm_1)
+
+ENTRY(generic_bs_wm_2)
+ /* If there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldrh w1, [x3], #2
+ strh w1, [x0]
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wm_2)
+
+ENTRY(generic_bs_wm_4)
+ /* If there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldr w1, [x3], #4
+ str w1, [x0]
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wm_4)
+
+ENTRY(generic_bs_wm_8)
+ /* If there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldr x1, [x3], #8
+ str x1, [x0]
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wm_8)
+
+ENTRY(generic_bs_wr_1)
+ /* Is there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldrb w1, [x3], #1
+ strb w1, [x0], #1
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wr_1)
+
+ENTRY(generic_bs_wr_2)
+ /* Is there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldrh w1, [x3], #2
+ strh w1, [x0], #2
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wr_2)
+
+ENTRY(generic_bs_wr_4)
+ /* Is there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldr w1, [x3], #4
+ str w1, [x0], #4
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wr_4)
+
+ENTRY(generic_bs_wr_8)
+ /* Is there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldr x1, [x3], #8
+ str x1, [x0], #8
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wr_8)
diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
new file mode 100644
index 000000000000..9d737d5c9021
--- /dev/null
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -0,0 +1,1357 @@
+/*-
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * Copyright (c) 2015-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship of the FreeBSD Foundation.
+ *
+ * Portions of this software were developed by Semihalf
+ * under sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/memdesc.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/md_var.h>
+#include <arm64/include/bus_dma_impl.h>
+
+#define MAX_BPAGES 4096
+
+enum {
+ BF_COULD_BOUNCE = 0x01,
+ BF_MIN_ALLOC_COMP = 0x02,
+ BF_KMEM_ALLOC = 0x04,
+ BF_COHERENT = 0x10,
+};
+
+struct bounce_zone;
+
+struct bus_dma_tag {
+ struct bus_dma_tag_common common;
+ int map_count;
+ int bounce_flags;
+ bus_dma_segment_t *segments;
+ struct bounce_zone *bounce_zone;
+};
+
+struct bounce_page {
+ vm_offset_t vaddr; /* kva of bounce buffer */
+ bus_addr_t busaddr; /* Physical address */
+ vm_offset_t datavaddr; /* kva of client data */
+ vm_page_t datapage; /* physical page of client data */
+ vm_offset_t dataoffs; /* page offset of client data */
+ bus_size_t datacount; /* client data count */
+ STAILQ_ENTRY(bounce_page) links;
+};
+
+int busdma_swi_pending;
+
+struct bounce_zone {
+ STAILQ_ENTRY(bounce_zone) links;
+ STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
+ int total_bpages;
+ int free_bpages;
+ int reserved_bpages;
+ int active_bpages;
+ int total_bounced;
+ int total_deferred;
+ int map_count;
+ bus_size_t alignment;
+ bus_addr_t lowaddr;
+ char zoneid[8];
+ char lowaddrid[20];
+ struct sysctl_ctx_list sysctl_tree;
+ struct sysctl_oid *sysctl_tree_top;
+};
+
+static struct mtx bounce_lock;
+static int total_bpages;
+static int busdma_zonecount;
+static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
+
+static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "Busdma parameters");
+SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
+ "Total bounce pages");
+
+struct sync_list {
+ vm_offset_t vaddr; /* kva of client data */
+ bus_addr_t paddr; /* physical address */
+ vm_page_t pages; /* starting page of client data */
+ bus_size_t datacount; /* client data count */
+};
+
+struct bus_dmamap {
+ struct bp_list bpages;
+ int pagesneeded;
+ int pagesreserved;
+ bus_dma_tag_t dmat;
+ struct memdesc mem;
+ bus_dmamap_callback_t *callback;
+ void *callback_arg;
+ STAILQ_ENTRY(bus_dmamap) links;
+ u_int flags;
+#define DMAMAP_COULD_BOUNCE (1 << 0)
+#define DMAMAP_FROM_DMAMEM (1 << 1)
+ int sync_count;
+ struct sync_list slist[];
+};
+
+static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
+static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
+
+static void init_bounce_pages(void *dummy);
+static int alloc_bounce_zone(bus_dma_tag_t dmat);
+static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
+static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ int commit);
+static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
+static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
+int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
+static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf,
+ bus_size_t buflen, int *pagesneeded);
+static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ pmap_t pmap, void *buf, bus_size_t buflen, int flags);
+static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int flags);
+static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ int flags);
+
+/*
+ * Allocate a device specific dma_tag.
+ */
+static int
+bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+ int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+ bus_dma_tag_t newtag;
+ int error;
+
+ *dmat = NULL;
+ error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
+ NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
+ maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
+ sizeof (struct bus_dma_tag), (void **)&newtag);
+ if (error != 0)
+ return (error);
+
+ newtag->common.impl = &bus_dma_bounce_impl;
+ newtag->map_count = 0;
+ newtag->segments = NULL;
+
+ if ((flags & BUS_DMA_COHERENT) != 0)
+ newtag->bounce_flags |= BF_COHERENT;
+
+ if (parent != NULL) {
+ if ((newtag->common.filter != NULL ||
+ (parent->bounce_flags & BF_COULD_BOUNCE) != 0))
+ newtag->bounce_flags |= BF_COULD_BOUNCE;
+
+ /* Copy some flags from the parent */
+ newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT;
+ }
+
+ if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
+ newtag->common.alignment > 1)
+ newtag->bounce_flags |= BF_COULD_BOUNCE;
+
+ if (((newtag->bounce_flags & BF_COULD_BOUNCE) != 0) &&
+ (flags & BUS_DMA_ALLOCNOW) != 0) {
+ struct bounce_zone *bz;
+
+ /* Must bounce */
+ if ((error = alloc_bounce_zone(newtag)) != 0) {
+ free(newtag, M_DEVBUF);
+ return (error);
+ }
+ bz = newtag->bounce_zone;
+
+ if (ptoa(bz->total_bpages) < maxsize) {
+ int pages;
+
+ pages = atop(round_page(maxsize)) - bz->total_bpages;
+
+ /* Add pages to our bounce pool */
+ if (alloc_bounce_pages(newtag, pages) < pages)
+ error = ENOMEM;
+ }
+ /* Performed initial allocation */
+ newtag->bounce_flags |= BF_MIN_ALLOC_COMP;
+ } else
+ error = 0;
+
+ if (error != 0)
+ free(newtag, M_DEVBUF);
+ else
+ *dmat = newtag;
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+ __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
+ error);
+ return (error);
+}
+
+static int
+bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+ bus_dma_tag_t dmat_copy, parent;
+ int error;
+
+ error = 0;
+ dmat_copy = dmat;
+
+ if (dmat != NULL) {
+ if (dmat->map_count != 0) {
+ error = EBUSY;
+ goto out;
+ }
+ while (dmat != NULL) {
+ parent = (bus_dma_tag_t)dmat->common.parent;
+ atomic_subtract_int(&dmat->common.ref_count, 1);
+ if (dmat->common.ref_count == 0) {
+ if (dmat->segments != NULL)
+ free(dmat->segments, M_DEVBUF);
+ free(dmat, M_DEVBUF);
+ /*
+ * Last reference count, so
+ * release our reference
+ * count on our parent.
+ */
+ dmat = parent;
+ } else
+ dmat = NULL;
+ }
+ }
+out:
+ CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
+ return (error);
+}
+
+static bool
+bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) == 0)
+ return (true);
+ return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL));
+}
+
+static bus_dmamap_t
+alloc_dmamap(bus_dma_tag_t dmat, int flags)
+{
+ u_long mapsize;
+ bus_dmamap_t map;
+
+ mapsize = sizeof(*map);
+ mapsize += sizeof(struct sync_list) * dmat->common.nsegments;
+ map = malloc(mapsize, M_DEVBUF, flags | M_ZERO);
+ if (map == NULL)
+ return (NULL);
+
+ /* Initialize the new map */
+ STAILQ_INIT(&map->bpages);
+
+ return (map);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static int
+bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ struct bounce_zone *bz;
+ int error, maxpages, pages;
+
+ error = 0;
+
+ if (dmat->segments == NULL) {
+ dmat->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->common.nsegments,
+ M_DEVBUF, M_NOWAIT);
+ if (dmat->segments == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, ENOMEM);
+ return (ENOMEM);
+ }
+ }
+
+ *mapp = alloc_dmamap(dmat, M_NOWAIT);
+ if (*mapp == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, ENOMEM);
+ return (ENOMEM);
+ }
+
+ /*
+ * Bouncing might be required if the driver asks for an active
+ * exclusion region, a data alignment that is stricter than 1, and/or
+ * an active address boundary.
+ */
+ if (dmat->bounce_flags & BF_COULD_BOUNCE) {
+ /* Must bounce */
+ if (dmat->bounce_zone == NULL) {
+ if ((error = alloc_bounce_zone(dmat)) != 0) {
+ free(*mapp, M_DEVBUF);
+ return (error);
+ }
+ }
+ bz = dmat->bounce_zone;
+
+ (*mapp)->flags = DMAMAP_COULD_BOUNCE;
+
+ /*
+ * Attempt to add pages to our pool on a per-instance
+ * basis up to a sane limit.
+ */
+ if (dmat->common.alignment > 1)
+ maxpages = MAX_BPAGES;
+ else
+ maxpages = MIN(MAX_BPAGES, Maxmem -
+ atop(dmat->common.lowaddr));
+ if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0 ||
+ (bz->map_count > 0 && bz->total_bpages < maxpages)) {
+ pages = MAX(atop(dmat->common.maxsize), 1);
+ pages = MIN(maxpages - bz->total_bpages, pages);
+ pages = MAX(pages, 1);
+ if (alloc_bounce_pages(dmat, pages) < pages)
+ error = ENOMEM;
+ if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP)
+ == 0) {
+ if (error == 0) {
+ dmat->bounce_flags |=
+ BF_MIN_ALLOC_COMP;
+ }
+ } else
+ error = 0;
+ }
+ bz->map_count++;
+ }
+ if (error == 0)
+ dmat->map_count++;
+ else
+ free(*mapp, M_DEVBUF);
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->common.flags, error);
+ return (error);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static int
+bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+
+ /* Check we are destroying the correct map type */
+ if ((map->flags & DMAMAP_FROM_DMAMEM) != 0)
+ panic("bounce_bus_dmamap_destroy: Invalid map freed\n");
+
+ if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY);
+ return (EBUSY);
+ }
+ if (dmat->bounce_zone) {
+ KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
+ ("%s: Bounce zone when cannot bounce", __func__));
+ dmat->bounce_zone->map_count--;
+ }
+ free(map, M_DEVBUF);
+ dmat->map_count--;
+ CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
+ return (0);
+}
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+static int
+bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ /*
+ * XXX ARM64TODO:
+ * This bus_dma implementation requires IO-Coherent architecutre.
+ * If IO-Coherency is not guaranteed, the BUS_DMA_COHERENT flag has
+ * to be implented using non-cacheable memory.
+ */
+
+ vm_memattr_t attr;
+ int mflags;
+
+ if (flags & BUS_DMA_NOWAIT)
+ mflags = M_NOWAIT;
+ else
+ mflags = M_WAITOK;
+
+ if (dmat->segments == NULL) {
+ dmat->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->common.nsegments,
+ M_DEVBUF, mflags);
+ if (dmat->segments == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->common.flags, ENOMEM);
+ return (ENOMEM);
+ }
+ }
+ if (flags & BUS_DMA_ZERO)
+ mflags |= M_ZERO;
+ if (flags & BUS_DMA_NOCACHE)
+ attr = VM_MEMATTR_UNCACHEABLE;
+ else if ((flags & BUS_DMA_COHERENT) != 0 &&
+ (dmat->bounce_flags & BF_COHERENT) == 0)
+ /*
+ * If we have a non-coherent tag, and are trying to allocate
+ * a coherent block of memory it needs to be uncached.
+ */
+ attr = VM_MEMATTR_UNCACHEABLE;
+ else
+ attr = VM_MEMATTR_DEFAULT;
+
+ /*
+ * Create the map, but don't set the could bounce flag as
+ * this allocation should never bounce;
+ */
+ *mapp = alloc_dmamap(dmat, mflags);
+ if (*mapp == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->common.flags, ENOMEM);
+ return (ENOMEM);
+ }
+ (*mapp)->flags = DMAMAP_FROM_DMAMEM;
+
+ /*
+ * Allocate the buffer from the malloc(9) allocator if...
+ * - It's small enough to fit into a single power of two sized bucket.
+ * - The alignment is less than or equal to the maximum size
+ * - The low address requirement is fulfilled.
+ * else allocate non-contiguous pages if...
+ * - The page count that could get allocated doesn't exceed
+ * nsegments also when the maximum segment size is less
+ * than PAGE_SIZE.
+ * - The alignment constraint isn't larger than a page boundary.
+ * - There are no boundary-crossing constraints.
+ * else allocate a block of contiguous pages because one or more of the
+ * constraints is something that only the contig allocator can fulfill.
+ *
+ * NOTE: The (dmat->common.alignment <= dmat->maxsize) check
+ * below is just a quick hack. The exact alignment guarantees
+ * of malloc(9) need to be nailed down, and the code below
+ * should be rewritten to take that into account.
+ *
+ * In the meantime warn the user if malloc gets it wrong.
+ */
+ if ((dmat->common.maxsize <= PAGE_SIZE) &&
+ (dmat->common.alignment <= dmat->common.maxsize) &&
+ dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
+ attr == VM_MEMATTR_DEFAULT) {
+ *vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
+ } else if (dmat->common.nsegments >=
+ howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
+ dmat->common.alignment <= PAGE_SIZE &&
+ (dmat->common.boundary % PAGE_SIZE) == 0) {
+ /* Page-based multi-segment allocations allowed */
+ *vaddr = (void *)kmem_alloc_attr(dmat->common.maxsize, mflags,
+ 0ul, dmat->common.lowaddr, attr);
+ dmat->bounce_flags |= BF_KMEM_ALLOC;
+ } else {
+ *vaddr = (void *)kmem_alloc_contig(dmat->common.maxsize, mflags,
+ 0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ?
+ dmat->common.alignment : 1ul, dmat->common.boundary, attr);
+ dmat->bounce_flags |= BF_KMEM_ALLOC;
+ }
+ if (*vaddr == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->common.flags, ENOMEM);
+ free(*mapp, M_DEVBUF);
+ return (ENOMEM);
+ } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
+ printf("bus_dmamem_alloc failed to align memory properly.\n");
+ }
+ dmat->map_count++;
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->common.flags, 0);
+ return (0);
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc. Make the same choice for free/contigfree.
+ */
+static void
+bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+
+ /*
+ * Check the map came from bounce_bus_dmamem_alloc, so the map
+ * should be NULL and the BF_KMEM_ALLOC flag cleared if malloc()
+ * was used and set if kmem_alloc_contig() was used.
+ */
+ if ((map->flags & DMAMAP_FROM_DMAMEM) == 0)
+ panic("bus_dmamem_free: Invalid map freed\n");
+ if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
+ free(vaddr, M_DEVBUF);
+ else
+ kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
+ free(map, M_DEVBUF);
+ dmat->map_count--;
+ CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
+ dmat->bounce_flags);
+}
+
+static bool
+_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
+ int *pagesneeded)
+{
+ bus_addr_t curaddr;
+ bus_size_t sgsize;
+ int count;
+
+ /*
+ * Count the number of bounce pages needed in order to
+ * complete this transfer
+ */
+ count = 0;
+ curaddr = buf;
+ while (buflen != 0) {
+ sgsize = MIN(buflen, dmat->common.maxsegsz);
+ if (bus_dma_run_filter(&dmat->common, curaddr)) {
+ sgsize = MIN(sgsize,
+ PAGE_SIZE - (curaddr & PAGE_MASK));
+ if (pagesneeded == NULL)
+ return (true);
+ count++;
+ }
+ curaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ if (pagesneeded != NULL)
+ *pagesneeded = count;
+ return (count != 0);
+}
+
+static void
+_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags)
+{
+
+ if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
+ _bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded);
+ CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+ }
+}
+
+static void
+_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
+ void *buf, bus_size_t buflen, int flags)
+{
+ vm_offset_t vaddr;
+ vm_offset_t vendaddr;
+ bus_addr_t paddr;
+ bus_size_t sg_len;
+
+ if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
+ CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
+ "alignment= %d", dmat->common.lowaddr,
+ ptoa((vm_paddr_t)Maxmem),
+ dmat->common.boundary, dmat->common.alignment);
+ CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map,
+ map->pagesneeded);
+ /*
+ * Count the number of bounce pages
+ * needed in order to complete this transfer
+ */
+ vaddr = (vm_offset_t)buf;
+ vendaddr = (vm_offset_t)buf + buflen;
+
+ while (vaddr < vendaddr) {
+ sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
+ if (pmap == kernel_pmap)
+ paddr = pmap_kextract(vaddr);
+ else
+ paddr = pmap_extract(pmap, vaddr);
+ if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
+ sg_len = roundup2(sg_len,
+ dmat->common.alignment);
+ map->pagesneeded++;
+ }
+ vaddr += sg_len;
+ }
+ CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+ }
+}
+
+static int
+_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
+{
+
+ /* Reserve Necessary Bounce Pages */
+ mtx_lock(&bounce_lock);
+ if (flags & BUS_DMA_NOWAIT) {
+ if (reserve_bounce_pages(dmat, map, 0) != 0) {
+ mtx_unlock(&bounce_lock);
+ return (ENOMEM);
+ }
+ } else {
+ if (reserve_bounce_pages(dmat, map, 1) != 0) {
+ /* Queue us for resources */
+ STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
+ mtx_unlock(&bounce_lock);
+ return (EINPROGRESS);
+ }
+ }
+ mtx_unlock(&bounce_lock);
+
+ return (0);
+}
+
+/*
+ * Add a single contiguous physical range to the segment list.
+ */
+static bus_size_t
+_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
+ bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
+{
+ bus_addr_t baddr, bmask;
+ int seg;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ bmask = ~(dmat->common.boundary - 1);
+ if (dmat->common.boundary > 0) {
+ baddr = (curaddr + dmat->common.boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * previous segment if possible.
+ */
+ seg = *segp;
+ if (seg == -1) {
+ seg = 0;
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ } else {
+ if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
+ (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
+ (dmat->common.boundary == 0 ||
+ (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= dmat->common.nsegments)
+ return (0);
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ }
+ }
+ *segp = seg;
+ return (sgsize);
+}
+
+/*
+ * Utility function to load a physical buffer. segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+static int
+bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct sync_list *sl;
+ bus_size_t sgsize;
+ bus_addr_t curaddr, sl_end;
+ int error;
+
+ if (segs == NULL)
+ segs = dmat->segments;
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
+ _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
+ if (map->pagesneeded != 0) {
+ error = _bus_dmamap_reserve_pages(dmat, map, flags);
+ if (error)
+ return (error);
+ }
+ }
+
+ sl = map->slist + map->sync_count - 1;
+ sl_end = 0;
+
+ while (buflen > 0) {
+ curaddr = buf;
+ sgsize = MIN(buflen, dmat->common.maxsegsz);
+ if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) &&
+ map->pagesneeded != 0 &&
+ bus_dma_run_filter(&dmat->common, curaddr)) {
+ sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
+ curaddr = add_bounce_page(dmat, map, 0, curaddr,
+ sgsize);
+ } else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
+ if (map->sync_count > 0)
+ sl_end = sl->paddr + sl->datacount;
+
+ if (map->sync_count == 0 || curaddr != sl_end) {
+ if (++map->sync_count > dmat->common.nsegments)
+ break;
+ sl++;
+ sl->vaddr = 0;
+ sl->paddr = curaddr;
+ sl->datacount = sgsize;
+ sl->pages = PHYS_TO_VM_PAGE(curaddr);
+ KASSERT(sl->pages != NULL,
+ ("%s: page at PA:0x%08lx is not in "
+ "vm_page_array", __func__, curaddr));
+ } else
+ sl->datacount += sgsize;
+ }
+ sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+ segp);
+ if (sgsize == 0)
+ break;
+ buf += sgsize;
+ buflen -= sgsize;
+ }
+
+ /*
+ * Did we fit?
+ */
+ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+/*
+ * Utility function to load a linear buffer. segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+static int
+bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct sync_list *sl;
+ bus_size_t sgsize, max_sgsize;
+ bus_addr_t curaddr, sl_pend;
+ vm_offset_t kvaddr, vaddr, sl_vend;
+ int error;
+
+ if (segs == NULL)
+ segs = dmat->segments;
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) {
+ _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
+ if (map->pagesneeded != 0) {
+ error = _bus_dmamap_reserve_pages(dmat, map, flags);
+ if (error)
+ return (error);
+ }
+ }
+
+ sl = map->slist + map->sync_count - 1;
+ vaddr = (vm_offset_t)buf;
+ sl_pend = 0;
+ sl_vend = 0;
+
+ while (buflen > 0) {
+ /*
+ * Get the physical address for this segment.
+ */
+ if (pmap == kernel_pmap) {
+ curaddr = pmap_kextract(vaddr);
+ kvaddr = vaddr;
+ } else {
+ curaddr = pmap_extract(pmap, vaddr);
+ kvaddr = 0;
+ }
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ max_sgsize = MIN(buflen, dmat->common.maxsegsz);
+ sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
+ if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) &&
+ map->pagesneeded != 0 &&
+ bus_dma_run_filter(&dmat->common, curaddr)) {
+ sgsize = roundup2(sgsize, dmat->common.alignment);
+ sgsize = MIN(sgsize, max_sgsize);
+ curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
+ sgsize);
+ } else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
+ sgsize = MIN(sgsize, max_sgsize);
+ if (map->sync_count > 0) {
+ sl_pend = sl->paddr + sl->datacount;
+ sl_vend = sl->vaddr + sl->datacount;
+ }
+
+ if (map->sync_count == 0 ||
+ (kvaddr != 0 && kvaddr != sl_vend) ||
+ (curaddr != sl_pend)) {
+ if (++map->sync_count > dmat->common.nsegments)
+ goto cleanup;
+ sl++;
+ sl->vaddr = kvaddr;
+ sl->paddr = curaddr;
+ if (kvaddr != 0) {
+ sl->pages = NULL;
+ } else {
+ sl->pages = PHYS_TO_VM_PAGE(curaddr);
+ KASSERT(sl->pages != NULL,
+ ("%s: page at PA:0x%08lx is not "
+ "in vm_page_array", __func__,
+ curaddr));
+ }
+ sl->datacount = sgsize;
+ } else
+ sl->datacount += sgsize;
+ } else {
+ sgsize = MIN(sgsize, max_sgsize);
+ }
+ sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+ segp);
+ if (sgsize == 0)
+ break;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+cleanup:
+ /*
+ * Did we fit?
+ */
+ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+static void
+bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
+{
+
+ if ((map->flags & DMAMAP_COULD_BOUNCE) == 0)
+ return;
+ map->mem = *mem;
+ map->dmat = dmat;
+ map->callback = callback;
+ map->callback_arg = callback_arg;
+}
+
+static bus_dma_segment_t *
+bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error)
+{
+
+ if (segs == NULL)
+ segs = dmat->segments;
+ return (segs);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+static void
+bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ struct bounce_page *bpage;
+
+ while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+ STAILQ_REMOVE_HEAD(&map->bpages, links);
+ free_bounce_page(dmat, bpage);
+ }
+
+ map->sync_count = 0;
+}
+
+static void
+dma_preread_safe(vm_offset_t va, vm_size_t size)
+{
+ /*
+ * Write back any partial cachelines immediately before and
+ * after the DMA region.
+ */
+ if (va & (dcache_line_size - 1))
+ cpu_dcache_wb_range(va, 1);
+ if ((va + size) & (dcache_line_size - 1))
+ cpu_dcache_wb_range(va + size, 1);
+
+ cpu_dcache_inv_range(va, size);
+}
+
+static void
+dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
+{
+ uint32_t len, offset;
+ vm_page_t m;
+ vm_paddr_t pa;
+ vm_offset_t va, tempva;
+ bus_size_t size;
+
+ offset = sl->paddr & PAGE_MASK;
+ m = sl->pages;
+ size = sl->datacount;
+ pa = sl->paddr;
+
+ for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
+ tempva = 0;
+ if (sl->vaddr == 0) {
+ len = min(PAGE_SIZE - offset, size);
+ tempva = pmap_quick_enter_page(m);
+ va = tempva | offset;
+ KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
+ ("unexpected vm_page_t phys: 0x%16lx != 0x%16lx",
+ VM_PAGE_TO_PHYS(m) | offset, pa));
+ } else {
+ len = sl->datacount;
+ va = sl->vaddr;
+ }
+
+ switch (op) {
+ case BUS_DMASYNC_PREWRITE:
+ case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
+ cpu_dcache_wb_range(va, len);
+ break;
+ case BUS_DMASYNC_PREREAD:
+ /*
+ * An mbuf may start in the middle of a cacheline. There
+ * will be no cpu writes to the beginning of that line
+ * (which contains the mbuf header) while dma is in
+ * progress. Handle that case by doing a writeback of
+ * just the first cacheline before invalidating the
+ * overall buffer. Any mbuf in a chain may have this
+ * misalignment. Buffers which are not mbufs bounce if
+ * they are not aligned to a cacheline.
+ */
+ dma_preread_safe(va, len);
+ break;
+ case BUS_DMASYNC_POSTREAD:
+ case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
+ cpu_dcache_inv_range(va, len);
+ break;
+ default:
+ panic("unsupported combination of sync operations: "
+ "0x%08x\n", op);
+ }
+
+ if (tempva != 0)
+ pmap_quick_remove_page(tempva);
+ }
+}
+
+static void
+bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dmasync_op_t op)
+{
+ struct bounce_page *bpage;
+ struct sync_list *sl, *end;
+ vm_offset_t datavaddr, tempvaddr;
+
+ if (op == BUS_DMASYNC_POSTWRITE)
+ return;
+
+ if ((op & BUS_DMASYNC_POSTREAD) != 0) {
+ /*
+ * Wait for any DMA operations to complete before the bcopy.
+ */
+ dsb(sy);
+ }
+
+ if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
+ "performing bounce", __func__, dmat, dmat->common.flags,
+ op);
+
+ if ((op & BUS_DMASYNC_PREWRITE) != 0) {
+ while (bpage != NULL) {
+ tempvaddr = 0;
+ datavaddr = bpage->datavaddr;
+ if (datavaddr == 0) {
+ tempvaddr = pmap_quick_enter_page(
+ bpage->datapage);
+ datavaddr = tempvaddr | bpage->dataoffs;
+ }
+
+ bcopy((void *)datavaddr,
+ (void *)bpage->vaddr, bpage->datacount);
+ if (tempvaddr != 0)
+ pmap_quick_remove_page(tempvaddr);
+ if ((dmat->bounce_flags & BF_COHERENT) == 0)
+ cpu_dcache_wb_range(bpage->vaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ dmat->bounce_zone->total_bounced++;
+ } else if ((op & BUS_DMASYNC_PREREAD) != 0) {
+ while (bpage != NULL) {
+ if ((dmat->bounce_flags & BF_COHERENT) == 0)
+ cpu_dcache_wbinv_range(bpage->vaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ }
+
+ if ((op & BUS_DMASYNC_POSTREAD) != 0) {
+ while (bpage != NULL) {
+ if ((dmat->bounce_flags & BF_COHERENT) == 0)
+ cpu_dcache_inv_range(bpage->vaddr,
+ bpage->datacount);
+ tempvaddr = 0;
+ datavaddr = bpage->datavaddr;
+ if (datavaddr == 0) {
+ tempvaddr = pmap_quick_enter_page(
+ bpage->datapage);
+ datavaddr = tempvaddr | bpage->dataoffs;
+ }
+
+ bcopy((void *)bpage->vaddr,
+ (void *)datavaddr, bpage->datacount);
+
+ if (tempvaddr != 0)
+ pmap_quick_remove_page(tempvaddr);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ dmat->bounce_zone->total_bounced++;
+ }
+ }
+
+ /*
+ * Cache maintenance for normal (non-COHERENT non-bounce) buffers.
+ */
+ if (map->sync_count != 0) {
+ sl = &map->slist[0];
+ end = &map->slist[map->sync_count];
+ CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x "
+ "performing sync", __func__, dmat, op);
+
+ for ( ; sl != end; ++sl)
+ dma_dcache_sync(sl, op);
+ }
+
+ if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) {
+ /*
+ * Wait for the bcopy to complete before any DMA operations.
+ */
+ dsb(sy);
+ }
+}
+
+static void
+init_bounce_pages(void *dummy __unused)
+{
+
+ total_bpages = 0;
+ STAILQ_INIT(&bounce_zone_list);
+ STAILQ_INIT(&bounce_map_waitinglist);
+ STAILQ_INIT(&bounce_map_callbacklist);
+ mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
+}
+SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
+
+static struct sysctl_ctx_list *
+busdma_sysctl_tree(struct bounce_zone *bz)
+{
+
+ return (&bz->sysctl_tree);
+}
+
+static struct sysctl_oid *
+busdma_sysctl_tree_top(struct bounce_zone *bz)
+{
+
+ return (bz->sysctl_tree_top);
+}
+
+static int
+alloc_bounce_zone(bus_dma_tag_t dmat)
+{
+ struct bounce_zone *bz;
+
+ /* Check to see if we already have a suitable zone */
+ STAILQ_FOREACH(bz, &bounce_zone_list, links) {
+ if ((dmat->common.alignment <= bz->alignment) &&
+ (dmat->common.lowaddr >= bz->lowaddr)) {
+ dmat->bounce_zone = bz;
+ return (0);
+ }
+ }
+
+ if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
+ M_NOWAIT | M_ZERO)) == NULL)
+ return (ENOMEM);
+
+ STAILQ_INIT(&bz->bounce_page_list);
+ bz->free_bpages = 0;
+ bz->reserved_bpages = 0;
+ bz->active_bpages = 0;
+ bz->lowaddr = dmat->common.lowaddr;
+ bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
+ bz->map_count = 0;
+ snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
+ busdma_zonecount++;
+ snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
+ STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
+ dmat->bounce_zone = bz;
+
+ sysctl_ctx_init(&bz->sysctl_tree);
+ bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
+ SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
+ if (bz->sysctl_tree_top == NULL) {
+ sysctl_ctx_free(&bz->sysctl_tree);
+ return (0); /* XXX error code? */
+ }
+
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
+ "Total bounce pages");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
+ "Free bounce pages");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
+ "Reserved bounce pages");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
+ "Active bounce pages");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
+ "Total bounce requests");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
+ "Total bounce requests that were deferred");
+ SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
+ SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "alignment", CTLFLAG_RD, &bz->alignment, "");
+
+ return (0);
+}
+
+static int
+alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
+{
+ struct bounce_zone *bz;
+ int count;
+
+ bz = dmat->bounce_zone;
+ count = 0;
+ while (numpages > 0) {
+ struct bounce_page *bpage;
+
+ bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+
+ if (bpage == NULL)
+ break;
+ bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
+ M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
+ if (bpage->vaddr == 0) {
+ free(bpage, M_DEVBUF);
+ break;
+ }
+ bpage->busaddr = pmap_kextract(bpage->vaddr);
+ mtx_lock(&bounce_lock);
+ STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
+ total_bpages++;
+ bz->total_bpages++;
+ bz->free_bpages++;
+ mtx_unlock(&bounce_lock);
+ count++;
+ numpages--;
+ }
+ return (count);
+}
+
+static int
+reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
+{
+ struct bounce_zone *bz;
+ int pages;
+
+ mtx_assert(&bounce_lock, MA_OWNED);
+ bz = dmat->bounce_zone;
+ pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
+ if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
+ return (map->pagesneeded - (map->pagesreserved + pages));
+ bz->free_bpages -= pages;
+ bz->reserved_bpages += pages;
+ map->pagesreserved += pages;
+ pages = map->pagesneeded - map->pagesreserved;
+
+ return (pages);
+}
+
+static bus_addr_t
+add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
+ bus_addr_t addr, bus_size_t size)
+{
+ struct bounce_zone *bz;
+ struct bounce_page *bpage;
+
+ KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
+ KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
+ ("add_bounce_page: bad map %p", map));
+
+ bz = dmat->bounce_zone;
+ if (map->pagesneeded == 0)
+ panic("add_bounce_page: map doesn't need any pages");
+ map->pagesneeded--;
+
+ if (map->pagesreserved == 0)
+ panic("add_bounce_page: map doesn't need any pages");
+ map->pagesreserved--;
+
+ mtx_lock(&bounce_lock);
+ bpage = STAILQ_FIRST(&bz->bounce_page_list);
+ if (bpage == NULL)
+ panic("add_bounce_page: free page list is empty");
+
+ STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
+ bz->reserved_bpages--;
+ bz->active_bpages++;
+ mtx_unlock(&bounce_lock);
+
+ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /* Page offset needs to be preserved. */
+ bpage->vaddr |= addr & PAGE_MASK;
+ bpage->busaddr |= addr & PAGE_MASK;
+ }
+ bpage->datavaddr = vaddr;
+ bpage->datapage = PHYS_TO_VM_PAGE(addr);
+ bpage->dataoffs = addr & PAGE_MASK;
+ bpage->datacount = size;
+ STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
+ return (bpage->busaddr);
+}
+
+static void
+free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
+{
+ struct bus_dmamap *map;
+ struct bounce_zone *bz;
+
+ bz = dmat->bounce_zone;
+ bpage->datavaddr = 0;
+ bpage->datacount = 0;
+ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /*
+ * Reset the bounce page to start at offset 0. Other uses
+ * of this bounce page may need to store a full page of
+ * data and/or assume it starts on a page boundary.
+ */
+ bpage->vaddr &= ~PAGE_MASK;
+ bpage->busaddr &= ~PAGE_MASK;
+ }
+
+ mtx_lock(&bounce_lock);
+ STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
+ bz->free_bpages++;
+ bz->active_bpages--;
+ if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
+ if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
+ STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
+ STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
+ map, links);
+ busdma_swi_pending = 1;
+ bz->total_deferred++;
+ swi_sched(vm_ih, 0);
+ }
+ }
+ mtx_unlock(&bounce_lock);
+}
+
+void
+busdma_swi(void)
+{
+ bus_dma_tag_t dmat;
+ struct bus_dmamap *map;
+
+ mtx_lock(&bounce_lock);
+ while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
+ STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
+ mtx_unlock(&bounce_lock);
+ dmat = map->dmat;
+ (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
+ bus_dmamap_load_mem(map->dmat, map, &map->mem,
+ map->callback, map->callback_arg, BUS_DMA_WAITOK);
+ (dmat->common.lockfunc)(dmat->common.lockfuncarg,
+ BUS_DMA_UNLOCK);
+ mtx_lock(&bounce_lock);
+ }
+ mtx_unlock(&bounce_lock);
+}
+
+struct bus_dma_impl bus_dma_bounce_impl = {
+ .tag_create = bounce_bus_dma_tag_create,
+ .tag_destroy = bounce_bus_dma_tag_destroy,
+ .id_mapped = bounce_bus_dma_id_mapped,
+ .map_create = bounce_bus_dmamap_create,
+ .map_destroy = bounce_bus_dmamap_destroy,
+ .mem_alloc = bounce_bus_dmamem_alloc,
+ .mem_free = bounce_bus_dmamem_free,
+ .load_phys = bounce_bus_dmamap_load_phys,
+ .load_buffer = bounce_bus_dmamap_load_buffer,
+ .load_ma = bus_dmamap_load_ma_triv,
+ .map_waitok = bounce_bus_dmamap_waitok,
+ .map_complete = bounce_bus_dmamap_complete,
+ .map_unload = bounce_bus_dmamap_unload,
+ .map_sync = bounce_bus_dmamap_sync
+};
diff --git a/sys/arm64/arm64/busdma_machdep.c b/sys/arm64/arm64/busdma_machdep.c
new file mode 100644
index 000000000000..1a5ac67a2a4f
--- /dev/null
+++ b/sys/arm64/arm64/busdma_machdep.c
@@ -0,0 +1,285 @@
+/*-
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * Copyright (c) 2013, 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Portions of this software were developed by Semihalf
+ * under sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/memdesc.h>
+#include <sys/mutex.h>
+#include <sys/uio.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <arm64/include/bus_dma_impl.h>
+
+/*
+ * Convenience function for manipulating driver locks from busdma (during
+ * busdma_swi, for example). Drivers that don't provide their own locks
+ * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
+ * non-mutex locking scheme don't have to use this at all.
+ */
+void
+busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
+{
+ struct mtx *dmtx;
+
+ dmtx = (struct mtx *)arg;
+ switch (op) {
+ case BUS_DMA_LOCK:
+ mtx_lock(dmtx);
+ break;
+ case BUS_DMA_UNLOCK:
+ mtx_unlock(dmtx);
+ break;
+ default:
+ panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
+ }
+}
+
+/*
+ * dflt_lock should never get called. It gets put into the dma tag when
+ * lockfunc == NULL, which is only valid if the maps that are associated
+ * with the tag are meant to never be defered.
+ * XXX Should have a way to identify which driver is responsible here.
+ */
+void
+bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op)
+{
+
+ panic("driver error: busdma dflt_lock called");
+}
+
+/*
+ * Return true if a match is made.
+ *
+ * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
+ *
+ * If paddr is within the bounds of the dma tag then call the filter callback
+ * to check for a match, if there is no filter callback then assume a match.
+ */
+int
+bus_dma_run_filter(struct bus_dma_tag_common *tc, bus_addr_t paddr)
+{
+ int retval;
+
+ retval = 0;
+ do {
+ if (((paddr > tc->lowaddr && paddr <= tc->highaddr) ||
+ ((paddr & (tc->alignment - 1)) != 0)) &&
+ (tc->filter == NULL ||
+ (*tc->filter)(tc->filterarg, paddr) != 0))
+ retval = 1;
+
+ tc = tc->parent;
+ } while (retval == 0 && tc != NULL);
+ return (retval);
+}
+
+int
+common_bus_dma_tag_create(struct bus_dma_tag_common *parent,
+ bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg,
+ bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags,
+ bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat)
+{
+ void *newtag;
+ struct bus_dma_tag_common *common;
+
+ KASSERT(sz >= sizeof(struct bus_dma_tag_common), ("sz"));
+ /* Return a NULL tag on failure */
+ *dmat = NULL;
+ /* Basic sanity checking */
+ if (boundary != 0 && boundary < maxsegsz)
+ maxsegsz = boundary;
+ if (maxsegsz == 0)
+ return (EINVAL);
+
+ newtag = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
+ if (newtag == NULL) {
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+ __func__, newtag, 0, ENOMEM);
+ return (ENOMEM);
+ }
+
+ common = newtag;
+ common->impl = &bus_dma_bounce_impl;
+ common->parent = parent;
+ common->alignment = alignment;
+ common->boundary = boundary;
+ common->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
+ common->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
+ common->filter = filter;
+ common->filterarg = filterarg;
+ common->maxsize = maxsize;
+ common->nsegments = nsegments;
+ common->maxsegsz = maxsegsz;
+ common->flags = flags;
+ common->ref_count = 1; /* Count ourself */
+ if (lockfunc != NULL) {
+ common->lockfunc = lockfunc;
+ common->lockfuncarg = lockfuncarg;
+ } else {
+ common->lockfunc = bus_dma_dflt_lock;
+ common->lockfuncarg = NULL;
+ }
+
+ /* Take into account any restrictions imposed by our parent tag */
+ if (parent != NULL) {
+ common->impl = parent->impl;
+ common->lowaddr = MIN(parent->lowaddr, common->lowaddr);
+ common->highaddr = MAX(parent->highaddr, common->highaddr);
+ if (common->boundary == 0)
+ common->boundary = parent->boundary;
+ else if (parent->boundary != 0) {
+ common->boundary = MIN(parent->boundary,
+ common->boundary);
+ }
+ if (common->filter == NULL) {
+ /*
+ * Short circuit looking at our parent directly
+ * since we have encapsulated all of its information
+ */
+ common->filter = parent->filter;
+ common->filterarg = parent->filterarg;
+ common->parent = parent->parent;
+ }
+ atomic_add_int(&parent->ref_count, 1);
+ }
+ *dmat = common;
+ return (0);
+}
+
+/*
+ * Allocate a device specific dma_tag.
+ */
+int
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+ int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+ struct bus_dma_tag_common *tc;
+ int error;
+
+ if (parent == NULL) {
+ error = bus_dma_bounce_impl.tag_create(parent, alignment,
+ boundary, lowaddr, highaddr, filter, filterarg, maxsize,
+ nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat);
+ } else {
+ tc = (struct bus_dma_tag_common *)parent;
+ error = tc->impl->tag_create(parent, alignment,
+ boundary, lowaddr, highaddr, filter, filterarg, maxsize,
+ nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat);
+ }
+ return (error);
+}
+
+void
+bus_dma_template_init(bus_dma_tag_template_t *t, bus_dma_tag_t parent)
+{
+
+ if (t == NULL)
+ return;
+
+ t->parent = parent;
+ t->alignment = 1;
+ t->boundary = 0;
+ t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
+ t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
+ t->nsegments = BUS_SPACE_UNRESTRICTED;
+ t->lockfunc = NULL;
+ t->lockfuncarg = NULL;
+ t->flags = 0;
+}
+
+int
+bus_dma_template_tag(bus_dma_tag_template_t *t, bus_dma_tag_t *dmat)
+{
+
+ if (t == NULL || dmat == NULL)
+ return (EINVAL);
+
+ return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
+ t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
+ t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
+ dmat));
+}
+
+void
+bus_dma_template_clone(bus_dma_tag_template_t *t, bus_dma_tag_t dmat)
+{
+ struct bus_dma_tag_common *common;
+
+ if (t == NULL || dmat == NULL)
+ return;
+
+ common = (struct bus_dma_tag_common *)dmat;
+
+ t->parent = (bus_dma_tag_t)common->parent;
+ t->alignment = common->alignment;
+ t->boundary = common->boundary;
+ t->lowaddr = common->lowaddr;
+ t->highaddr = common->highaddr;
+ t->maxsize = common->maxsize;
+ t->nsegments = common->nsegments;
+ t->maxsegsize = common->maxsegsz;
+ t->flags = common->flags;
+ t->lockfunc = common->lockfunc;
+ t->lockfuncarg = common->lockfuncarg;
+}
+
+int
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->tag_destroy(dmat));
+}
+
+int
+bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
+{
+
+ return (0);
+}
diff --git a/sys/arm64/arm64/bzero.S b/sys/arm64/arm64/bzero.S
new file mode 100644
index 000000000000..6c7f1fef1494
--- /dev/null
+++ b/sys/arm64/arm64/bzero.S
@@ -0,0 +1,206 @@
+/*-
+ * Copyright (C) 2016 Cavium Inc.
+ * All rights reserved.
+ *
+ * Developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+
+#include "assym.inc"
+
+ /*
+ * void bzero(void *p, size_t size)
+ *
+ * x0 - p
+ * x1 - size
+ */
+ENTRY(bzero)
+ cbz x1, ending
+
+ /*
+ * x5 is number of cache lines to zero - calculated later and
+ * will become non-zero if buffer is long enough to zero by
+ * cache lines (and if it is allowed.)
+ * We need to zero it before proceeding with buffers of size
+ * smaller than 16 bytes - otherwise the x5 will not be
+ * calculated and will retain random value.
+ * "normal" is used for buffers <= 16 bytes and to align buffer
+ * to cache line for buffers bigger than cache line; non-0 x5
+ * after "normal" has completed indicates that it has been used
+ * to align buffer to cache line and now zero by cache lines will
+ * be performed, and x5 is amount of cache lines to loop through.
+ */
+ mov x5, xzr
+
+ /* No use of cache assisted zero for buffers with size <= 16 */
+ cmp x1, #0x10
+ b.le normal
+
+ /*
+ * Load size of line that will be cleaned by dc zva call.
+ * 0 means that the instruction is not allowed
+ */
+ ldr x7, =dczva_line_size
+ ldr x7, [x7]
+ cbz x7, normal
+
+ /*
+ * Buffer must be larger than cache line for using cache zeroing
+ * (and cache line aligned but this is checked after jump)
+ */
+ cmp x1, x7
+ b.lt normal
+
+ /*
+ * Calculate number of bytes to cache aligned address (x4) nad
+ * number of full cache lines (x5). x6 is final address to zero.
+ */
+ sub x2, x7, #0x01
+ mov x3, -1
+ eor x3, x3, x2
+ add x4, x0, x2
+ and x4, x4, x3
+ subs x4, x4, x0
+ b.eq normal
+
+ /* Calculate number of "lines" in buffer */
+ sub x5, x1, x4
+ rbit x2, x7
+ clz x2, x2
+ lsr x5, x5, x2
+
+ /*
+ * If number of cache lines is 0, we will not be able to zero
+ * by cache lines, so go normal way.
+ */
+ cbz x5, normal
+ /* x6 is final address to zero */
+ add x6, x0, x1
+
+ /*
+ * We are here because x5 is non-0 so normal will be used to
+ * align buffer before cache zeroing. x4 holds number of bytes
+ * needed for alignment.
+ */
+ mov x1, x4
+
+ /* When jumping here: x0 holds pointer, x1 holds size */
+normal:
+ /*
+ * Get buffer offset into 16 byte aligned address; 0 means pointer
+ * is aligned.
+ */
+ ands x2, x0, #0x0f
+ b.eq aligned_to_16
+ /* Calculate one-byte loop runs to 8 byte aligned address. */
+ ands x2, x2, #0x07
+ mov x3, #0x08
+ sub x2, x3, x2
+ /* x2 is number of bytes missing for alignment, x1 is buffer size */
+ cmp x1, x2
+ csel x2, x1, x2, le
+ sub x1, x1, x2
+
+ /*
+ * Byte by byte copy will copy at least enough bytes to align
+ * pointer and at most "size".
+ */
+align:
+ strb wzr, [x0], #0x01
+ subs x2, x2, #0x01
+ b.ne align
+
+ /* Now pointer is aligned to 8 bytes */
+ cmp x1, #0x10
+ b.lt lead_out
+ /*
+ * Check if copy of another 8 bytes is needed to align to 16 byte
+ * address and do it
+ */
+ tbz x0, #0x03, aligned_to_16
+ str xzr, [x0], #0x08
+ sub x1, x1, #0x08
+
+ /* While jumping here: x0 is 16 byte alligned address, x1 is size */
+aligned_to_16:
+ /* If size is less than 16 bytes, use lead_out to copy what remains */
+ cmp x1, #0x10
+ b.lt lead_out
+
+ lsr x2, x1, #0x04
+zero_by_16:
+ stp xzr, xzr, [x0], #0x10
+ subs x2, x2, #0x01
+ b.ne zero_by_16
+
+ /*
+ * Lead out requires addresses to be aligned to 8 bytes. It is used to
+ * zero buffers with sizes < 16 and what can not be zeroed by
+ * zero_by_16 loop.
+ */
+ ands x1, x1, #0x0f
+ b.eq lead_out_end
+lead_out:
+ tbz x1, #0x03, lead_out_dword
+ str xzr, [x0], #0x08
+lead_out_dword:
+ tbz x1, #0x02, lead_out_word
+ str wzr, [x0], #0x04
+lead_out_word:
+ tbz x1, #0x01, lead_out_byte
+ strh wzr, [x0], #0x02
+lead_out_byte:
+ tbz x1, #0x00, lead_out_end
+ strb wzr, [x0], #0x01
+
+lead_out_end:
+ /*
+ * If x5 is non-zero, this means that normal has been used as
+ * a lead in to align buffer address to cache size
+ */
+ cbz x5, ending
+
+ /*
+ * Here x5 holds number of lines to zero; x6 is final address of
+ * buffer. x0 is cache line aligned pointer. x7 is cache line size
+ * in bytes
+ */
+cache_line_zero:
+ dc zva, x0
+ add x0, x0, x7
+ subs x5, x5, #0x01
+ b.ne cache_line_zero
+
+ /* Need to zero remaining bytes? */
+ subs x1, x6, x0
+ b.ne normal
+
+ending:
+ ret
+
+END(bzero)
+
diff --git a/sys/arm64/arm64/clock.c b/sys/arm64/arm64/clock.c
new file mode 100644
index 000000000000..ef68ea4d7e7b
--- /dev/null
+++ b/sys/arm64/arm64/clock.c
@@ -0,0 +1,39 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+
+void
+cpu_initclocks(void)
+{
+
+ cpu_initclocks_bsp();
+}
diff --git a/sys/arm64/arm64/copyinout.S b/sys/arm64/arm64/copyinout.S
new file mode 100644
index 000000000000..5c523d11ed00
--- /dev/null
+++ b/sys/arm64/arm64/copyinout.S
@@ -0,0 +1,226 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/errno.h>
+
+#include <machine/vmparam.h>
+
+#include "assym.inc"
+
+/*
+ * Fault handler for the copy{in,out} functions below.
+ */
+ENTRY(copyio_fault)
+ SET_FAULT_HANDLER(xzr, x1) /* Clear the handler */
+ EXIT_USER_ACCESS_CHECK(w0, x1)
+copyio_fault_nopcb:
+ mov x0, #EFAULT
+ ret
+END(copyio_fault)
+
+/*
+ * Copies from a kernel to user address
+ *
+ * int copyout(const void *kaddr, void *udaddr, size_t len)
+ */
+ENTRY(copyout)
+ cbz x2, 1f
+ adds x3, x1, x2
+ b.cs copyio_fault_nopcb
+ ldr x4, =VM_MAXUSER_ADDRESS
+ cmp x3, x4
+ b.hi copyio_fault_nopcb
+
+ b copycommon
+
+1: mov x0, xzr /* return 0 */
+ ret
+
+END(copyout)
+
+/*
+ * Copies from a user to kernel address
+ *
+ * int copyin(const void *uaddr, void *kdaddr, size_t len)
+ */
+ENTRY(copyin)
+ cbz x2, 1f
+ adds x3, x0, x2
+ b.cs copyio_fault_nopcb
+ ldr x4, =VM_MAXUSER_ADDRESS
+ cmp x3, x4
+ b.hi copyio_fault_nopcb
+
+ b copycommon
+
+1: mov x0, xzr /* return 0 */
+ ret
+
+END(copyin)
+
+/*
+ * Copies a string from a user to kernel address
+ *
+ * int copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done)
+ */
+ENTRY(copyinstr)
+ mov x5, xzr /* count = 0 */
+ mov w4, #1 /* If zero return faulure */
+ cbz x2, 3f /* If len == 0 then skip loop */
+
+ adr x6, copyio_fault /* Get the handler address */
+ SET_FAULT_HANDLER(x6, x7) /* Set the handler */
+ ENTER_USER_ACCESS(w6, x7)
+
+ ldr x7, =VM_MAXUSER_ADDRESS
+1: cmp x0, x7
+ b.cs copyio_fault
+ ldtrb w4, [x0] /* Load from uaddr */
+ add x0, x0, #1 /* Next char */
+ strb w4, [x1], #1 /* Store in kaddr */
+ add x5, x5, #1 /* count++ */
+ cbz w4, 2f /* Break when NUL-terminated */
+ sub x2, x2, #1 /* len-- */
+ cbnz x2, 1b
+
+2: EXIT_USER_ACCESS(w6)
+ SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */
+
+
+3: cbz x3, 4f /* Check if done != NULL */
+ str x5, [x3] /* done = count */
+
+4: mov w1, #ENAMETOOLONG /* Load ENAMETOOLONG to return if failed */
+ cmp w4, #0 /* Check if we saved the NUL-terminator */
+ csel w0, wzr, w1, eq /* If so return success, else failure */
+ ret
+END(copyinstr)
+
+/*
+ * Local helper
+ *
+ * x0 - src pointer
+ * x1 - dst pointer
+ * x2 - size
+ * lr - the return address, so jump here instead of calling
+ *
+ * This function is optimized to minimize concurrent memory accesses. In
+ * present form it is suited for cores with a single memory prefetching
+ * unit.
+ * ARM64TODO:
+ * Consider using separate functions for each ARM64 core. Adding memory
+ * access interleaving might increase a total throughput on A57 or A72.
+ */
+ .text
+ .align 4
+ .local copycommon
+ .type copycommon,@function
+
+copycommon:
+ adr x6, copyio_fault /* Get the handler address */
+ SET_FAULT_HANDLER(x6, x7) /* Set the handler */
+ ENTER_USER_ACCESS(w6, x7)
+
+ /* Check alignment */
+ orr x3, x0, x1
+ ands x3, x3, 0x07
+ b.eq aligned
+
+ /* Unaligned is byte by byte copy */
+byte_by_byte:
+ ldrb w3, [x0], #0x01
+ strb w3, [x1], #0x01
+ subs x2, x2, #0x01
+ b.ne byte_by_byte
+ b ending
+
+aligned:
+ cmp x2, #0x10
+ b.lt lead_out
+ cmp x2, #0x40
+ b.lt by_dwords_start
+
+ /* Block copy */
+ lsr x15, x2, #0x06
+by_blocks:
+ ldp x3, x4, [x0], #0x10
+ ldp x5, x6, [x0], #0x10
+ ldp x7, x8, [x0], #0x10
+ ldp x9, x10, [x0], #0x10
+ stp x3, x4, [x1], #0x10
+ stp x5, x6, [x1], #0x10
+ stp x7, x8, [x1], #0x10
+ stp x9, x10, [x1], #0x10
+
+ subs x15, x15, #0x01
+ b.ne by_blocks
+
+ and x2, x2, #0x3f
+
+by_dwords_start:
+ lsr x15, x2, #0x04
+ cbz x15, lead_out
+by_dwords:
+ ldp x3, x4, [x0], #0x10
+ stp x3, x4, [x1], #0x10
+ subs x15, x15, #0x01
+ b.ne by_dwords
+
+ /* Less than 16 bytes to copy */
+lead_out:
+ tbz x2, #0x03, last_word
+ ldr x3, [x0], #0x08
+ str x3, [x1], #0x08
+
+last_word:
+ tbz x2, #0x02, last_hword
+ ldr w3, [x0], #0x04
+ str w3, [x1], #0x04
+
+last_hword:
+ tbz x2, #0x01, last_byte
+ ldrh w3, [x0], #0x02
+ strh w3, [x1], #0x02
+
+last_byte:
+ tbz x2, #0x00, ending
+ ldrb w3, [x0]
+ strb w3, [x1]
+
+ending:
+ EXIT_USER_ACCESS_CHECK(w6, x7)
+ SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */
+
+ mov x0, xzr /* return 0 */
+ ret
+ .size copycommon, . - copycommon
diff --git a/sys/arm64/arm64/cpu_errata.c b/sys/arm64/arm64/cpu_errata.c
new file mode 100644
index 000000000000..9879e645b827
--- /dev/null
+++ b/sys/arm64/arm64/cpu_errata.c
@@ -0,0 +1,192 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2018 Andrew Turner
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/pcpu.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+
+#include <dev/psci/smccc.h>
+
+typedef void (cpu_quirk_install)(void);
+struct cpu_quirks {
+ cpu_quirk_install *quirk_install;
+ u_int midr_mask;
+ u_int midr_value;
+};
+
+static enum {
+ SSBD_FORCE_ON,
+ SSBD_FORCE_OFF,
+ SSBD_KERNEL,
+} ssbd_method = SSBD_KERNEL;
+
+static cpu_quirk_install install_psci_bp_hardening;
+static cpu_quirk_install install_ssbd_workaround;
+static cpu_quirk_install install_thunderx_bcast_tlbi_workaround;
+
+static struct cpu_quirks cpu_quirks[] = {
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0),
+ .quirk_install = install_psci_bp_hardening,
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0),
+ .quirk_install = install_psci_bp_hardening,
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0),
+ .quirk_install = install_psci_bp_hardening,
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0),
+ .quirk_install = install_psci_bp_hardening,
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value =
+ CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0),
+ .quirk_install = install_psci_bp_hardening,
+ },
+ {
+ .midr_mask = 0,
+ .midr_value = 0,
+ .quirk_install = install_ssbd_workaround,
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value =
+ CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0),
+ .quirk_install = install_thunderx_bcast_tlbi_workaround,
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value =
+ CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX_81XX, 0, 0),
+ .quirk_install = install_thunderx_bcast_tlbi_workaround,
+ },
+};
+
+static void
+install_psci_bp_hardening(void)
+{
+
+ if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) != SMCCC_RET_SUCCESS)
+ return;
+
+ PCPU_SET(bp_harden, smccc_arch_workaround_1);
+}
+
+static void
+install_ssbd_workaround(void)
+{
+ char *env;
+
+ if (PCPU_GET(cpuid) == 0) {
+ env = kern_getenv("kern.cfg.ssbd");
+ if (env != NULL) {
+ if (strcmp(env, "force-on") == 0) {
+ ssbd_method = SSBD_FORCE_ON;
+ } else if (strcmp(env, "force-off") == 0) {
+ ssbd_method = SSBD_FORCE_OFF;
+ }
+ }
+ }
+
+ /* Enable the workaround on this CPU if it's enabled in the firmware */
+ if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS)
+ return;
+
+ switch(ssbd_method) {
+ case SSBD_FORCE_ON:
+ smccc_arch_workaround_2(1);
+ break;
+ case SSBD_FORCE_OFF:
+ smccc_arch_workaround_2(0);
+ break;
+ case SSBD_KERNEL:
+ default:
+ PCPU_SET(ssbd, smccc_arch_workaround_2);
+ break;
+ }
+}
+
+/*
+ * Workaround Cavium erratum 27456.
+ *
+ * Invalidate the local icache when changing address spaces.
+ */
+static void
+install_thunderx_bcast_tlbi_workaround(void)
+{
+ u_int midr;
+
+ midr = get_midr();
+ if (CPU_PART(midr) == CPU_PART_THUNDERX_81XX)
+ PCPU_SET(bcast_tlbi_workaround, 1);
+ else if (CPU_PART(midr) == CPU_PART_THUNDERX) {
+ if (CPU_VAR(midr) == 0) {
+ /* ThunderX 1.x */
+ PCPU_SET(bcast_tlbi_workaround, 1);
+ } else if (CPU_VAR(midr) == 1 && CPU_REV(midr) <= 1) {
+ /* ThunderX 2.0 - 2.1 */
+ PCPU_SET(bcast_tlbi_workaround, 1);
+ }
+ }
+}
+
+void
+install_cpu_errata(void)
+{
+ u_int midr;
+ size_t i;
+
+ midr = get_midr();
+
+ for (i = 0; i < nitems(cpu_quirks); i++) {
+ if ((midr & cpu_quirks[i].midr_mask) ==
+ cpu_quirks[i].midr_value) {
+ cpu_quirks[i].quirk_install();
+ }
+ }
+}
diff --git a/sys/arm64/arm64/cpufunc_asm.S b/sys/arm64/arm64/cpufunc_asm.S
new file mode 100644
index 000000000000..2f28c4f68271
--- /dev/null
+++ b/sys/arm64/arm64/cpufunc_asm.S
@@ -0,0 +1,182 @@
+/*-
+ * Copyright (c) 2014 Robin Randhawa
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/errno.h>
+#include <machine/asm.h>
+#include <machine/param.h>
+
+#include "assym.inc"
+
+__FBSDID("$FreeBSD$");
+
+/*
+ * FIXME:
+ * Need big.LITTLE awareness at some point.
+ * Using arm64_p[id]cache_line_size may not be the best option.
+ * Need better SMP awareness.
+ */
+ .text
+ .align 2
+
+.Lpage_mask:
+ .word PAGE_MASK
+
+/*
+ * Macro to handle the cache. This takes the start address in x0, length
+ * in x1. It will corrupt x0, x1, x2, x3, and x4.
+ */
+.macro cache_handle_range dcop = 0, ic = 0, icop = 0
+.if \ic == 0
+ ldr x3, =dcache_line_size /* Load the D cache line size */
+.else
+ ldr x3, =idcache_line_size /* Load the I & D cache line size */
+.endif
+ ldr x3, [x3]
+ sub x4, x3, #1 /* Get the address mask */
+ and x2, x0, x4 /* Get the low bits of the address */
+ add x1, x1, x2 /* Add these to the size */
+ bic x0, x0, x4 /* Clear the low bit of the address */
+.if \ic != 0
+ mov x2, x0 /* Save the address */
+ mov x4, x1 /* Save the size */
+.endif
+1:
+ dc \dcop, x0
+ add x0, x0, x3 /* Move to the next line */
+ subs x1, x1, x3 /* Reduce the size */
+ b.hi 1b /* Check if we are done */
+ dsb ish
+.if \ic != 0
+2:
+ ic \icop, x2
+ add x2, x2, x3 /* Move to the next line */
+ subs x4, x4, x3 /* Reduce the size */
+ b.hi 2b /* Check if we are done */
+ dsb ish
+ isb
+.endif
+.endm
+
+ENTRY(arm64_nullop)
+ ret
+END(arm64_nullop)
+
+/*
+ * Generic functions to read/modify/write the internal coprocessor registers
+ */
+
+ENTRY(arm64_tlb_flushID)
+ dsb ishst
+#ifdef SMP
+ tlbi vmalle1is
+#else
+ tlbi vmalle1
+#endif
+ dsb ish
+ isb
+ ret
+END(arm64_tlb_flushID)
+
+/*
+ * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_dcache_wb_range)
+ cache_handle_range dcop = cvac
+ ret
+END(arm64_dcache_wb_range)
+
+/*
+ * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_dcache_wbinv_range)
+ cache_handle_range dcop = civac
+ ret
+END(arm64_dcache_wbinv_range)
+
+/*
+ * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
+ *
+ * Note, we must not invalidate everything. If the range is too big we
+ * must use wb-inv of the entire cache.
+ */
+ENTRY(arm64_dcache_inv_range)
+ cache_handle_range dcop = ivac
+ ret
+END(arm64_dcache_inv_range)
+
+/*
+ * void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t)
+ * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
+ * When the CTR_EL0.DIC bit is set icache invalidation becomes an isb.
+ */
+ENTRY(arm64_dic_idc_icache_sync_range)
+ dsb ishst
+ isb
+ ret
+END(arm64_dic_idc_icache_sync_range)
+
+/*
+ * void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_aliasing_icache_sync_range)
+ /*
+ * XXX Temporary solution - I-cache flush should be range based for
+ * PIPT cache or IALLUIS for VIVT or VIPT caches
+ */
+/* cache_handle_range dcop = cvau, ic = 1, icop = ivau */
+ cache_handle_range dcop = cvau
+ ic ialluis
+ dsb ish
+ isb
+ ret
+END(arm64_aliasing_icache_sync_range)
+
+/*
+ * int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_icache_sync_range_checked)
+ adr x5, cache_maint_fault
+ SET_FAULT_HANDLER(x5, x6)
+ /* XXX: See comment in arm64_icache_sync_range */
+ cache_handle_range dcop = cvau
+ ic ialluis
+ dsb ish
+ isb
+ SET_FAULT_HANDLER(xzr, x6)
+ mov x0, #0
+ ret
+END(arm64_icache_sync_range_checked)
+
+ENTRY(cache_maint_fault)
+ SET_FAULT_HANDLER(xzr, x1)
+ mov x0, #EFAULT
+ ret
+END(cache_maint_fault)
diff --git a/sys/arm64/arm64/db_disasm.c b/sys/arm64/arm64/db_disasm.c
new file mode 100644
index 000000000000..73efca0bdee9
--- /dev/null
+++ b/sys/arm64/arm64/db_disasm.c
@@ -0,0 +1,70 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <ddb/ddb.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+
+#include <machine/disassem.h>
+
+static u_int db_disasm_read_word(vm_offset_t);
+static void db_disasm_printaddr(vm_offset_t);
+
+/* Glue code to interface db_disasm to the generic ARM disassembler */
+static const struct disasm_interface db_disasm_interface = {
+ .di_readword = db_disasm_read_word,
+ .di_printaddr = db_disasm_printaddr,
+ .di_printf = db_printf,
+};
+
+static u_int
+db_disasm_read_word(vm_offset_t address)
+{
+
+ return (db_get_value(address, INSN_SIZE, 0));
+}
+
+static void
+db_disasm_printaddr(vm_offset_t address)
+{
+
+ db_printsym((db_addr_t)address, DB_STGY_ANY);
+}
+
+vm_offset_t
+db_disasm(vm_offset_t loc, bool altfmt)
+{
+
+ return (disasm(&db_disasm_interface, loc, altfmt));
+}
+
+/* End of db_disasm.c */
diff --git a/sys/arm64/arm64/db_interface.c b/sys/arm64/arm64/db_interface.c
new file mode 100644
index 000000000000..5138bf3f1cab
--- /dev/null
+++ b/sys/arm64/arm64/db_interface.c
@@ -0,0 +1,194 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+
+#ifdef KDB
+#include <sys/kdb.h>
+#endif
+
+#include <ddb/ddb.h>
+#include <ddb/db_variables.h>
+
+#include <machine/cpu.h>
+#include <machine/pcb.h>
+#include <machine/stack.h>
+#include <machine/vmparam.h>
+
+static int
+db_frame(struct db_variable *vp, db_expr_t *valuep, int op)
+{
+ long *reg;
+
+ if (kdb_frame == NULL)
+ return (0);
+
+ reg = (long *)((uintptr_t)kdb_frame + (db_expr_t)vp->valuep);
+ if (op == DB_VAR_GET)
+ *valuep = *reg;
+ else
+ *reg = *valuep;
+ return (1);
+}
+
+#define DB_OFFSET(x) (db_expr_t *)offsetof(struct trapframe, x)
+struct db_variable db_regs[] = {
+ { "spsr", DB_OFFSET(tf_spsr), db_frame },
+ { "x0", DB_OFFSET(tf_x[0]), db_frame },
+ { "x1", DB_OFFSET(tf_x[1]), db_frame },
+ { "x2", DB_OFFSET(tf_x[2]), db_frame },
+ { "x3", DB_OFFSET(tf_x[3]), db_frame },
+ { "x4", DB_OFFSET(tf_x[4]), db_frame },
+ { "x5", DB_OFFSET(tf_x[5]), db_frame },
+ { "x6", DB_OFFSET(tf_x[6]), db_frame },
+ { "x7", DB_OFFSET(tf_x[7]), db_frame },
+ { "x8", DB_OFFSET(tf_x[8]), db_frame },
+ { "x9", DB_OFFSET(tf_x[9]), db_frame },
+ { "x10", DB_OFFSET(tf_x[10]), db_frame },
+ { "x11", DB_OFFSET(tf_x[11]), db_frame },
+ { "x12", DB_OFFSET(tf_x[12]), db_frame },
+ { "x13", DB_OFFSET(tf_x[13]), db_frame },
+ { "x14", DB_OFFSET(tf_x[14]), db_frame },
+ { "x15", DB_OFFSET(tf_x[15]), db_frame },
+ { "x16", DB_OFFSET(tf_x[16]), db_frame },
+ { "x17", DB_OFFSET(tf_x[17]), db_frame },
+ { "x18", DB_OFFSET(tf_x[18]), db_frame },
+ { "x19", DB_OFFSET(tf_x[19]), db_frame },
+ { "x20", DB_OFFSET(tf_x[20]), db_frame },
+ { "x21", DB_OFFSET(tf_x[21]), db_frame },
+ { "x22", DB_OFFSET(tf_x[22]), db_frame },
+ { "x23", DB_OFFSET(tf_x[23]), db_frame },
+ { "x24", DB_OFFSET(tf_x[24]), db_frame },
+ { "x25", DB_OFFSET(tf_x[25]), db_frame },
+ { "x26", DB_OFFSET(tf_x[26]), db_frame },
+ { "x27", DB_OFFSET(tf_x[27]), db_frame },
+ { "x28", DB_OFFSET(tf_x[28]), db_frame },
+ { "x29", DB_OFFSET(tf_x[29]), db_frame },
+ { "lr", DB_OFFSET(tf_lr), db_frame },
+ { "elr", DB_OFFSET(tf_elr), db_frame },
+ { "sp", DB_OFFSET(tf_sp), db_frame },
+};
+
+struct db_variable *db_eregs = db_regs + nitems(db_regs);
+
+void
+db_show_mdpcpu(struct pcpu *pc)
+{
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+int
+db_read_bytes(vm_offset_t addr, size_t size, char *data)
+{
+ jmp_buf jb;
+ void *prev_jb;
+ const char *src;
+ int ret;
+ uint64_t tmp64;
+ uint32_t tmp32;
+ uint16_t tmp16;
+
+ prev_jb = kdb_jmpbuf(jb);
+ ret = setjmp(jb);
+
+ if (ret == 0) {
+ src = (const char *)addr;
+ if (size == 8 && (addr & 7) == 0) {
+ tmp64 = *((const int *)src);
+ src = (const char *)&tmp64;
+ } else if (size == 4 && (addr & 3) == 0) {
+ tmp32 = *((const int *)src);
+ src = (const char *)&tmp32;
+ } else if (size == 2 && (addr & 1) == 0) {
+ tmp16 = *((const short *)src);
+ src = (const char *)&tmp16;
+ }
+ while (size-- > 0)
+ *data++ = *src++;
+ }
+ (void)kdb_jmpbuf(prev_jb);
+
+ return (ret);
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+int
+db_write_bytes(vm_offset_t addr, size_t size, char *data)
+{
+ jmp_buf jb;
+ void *prev_jb;
+ char *dst;
+ int ret;
+ uint64_t tmp64;
+ uint32_t tmp32;
+ uint16_t tmp16;
+
+ prev_jb = kdb_jmpbuf(jb);
+ ret = setjmp(jb);
+ if (ret == 0) {
+ if (size == 8 && (addr & 7) == 0) {
+ dst = (char *)&tmp64;
+ while (size-- > 0)
+ *dst++ = *data++;
+ *((uint64_t *)addr) = tmp64;
+ } else if (size == 4 && (addr & 3) == 0) {
+ dst = (char *)&tmp32;
+ while (size-- > 0)
+ *dst++ = *data++;
+ *((uint32_t *)addr) = tmp32;
+ } else if (size == 2 && (addr & 1) == 0) {
+ dst = (char *)&tmp16;
+ while (size-- > 0)
+ *dst++ = *data++;
+ *((uint32_t *)addr) = tmp16;
+ } else {
+ dst = (char *)addr;
+ while (size-- > 0)
+ *dst++ = *data++;
+ }
+ dsb(ish);
+
+ /* Clean D-cache and invalidate I-cache */
+ cpu_dcache_wb_range(addr, (vm_size_t)size);
+ cpu_icache_sync_range(addr, (vm_size_t)size);
+ }
+ (void)kdb_jmpbuf(prev_jb);
+
+ return (ret);
+}
diff --git a/sys/arm64/arm64/db_trace.c b/sys/arm64/arm64/db_trace.c
new file mode 100644
index 000000000000..f892935cd13a
--- /dev/null
+++ b/sys/arm64/arm64/db_trace.c
@@ -0,0 +1,133 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_ddb.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/kdb.h>
+
+#include <machine/pcb.h>
+#include <ddb/ddb.h>
+#include <ddb/db_sym.h>
+
+#include <machine/armreg.h>
+#include <machine/debug_monitor.h>
+#include <machine/stack.h>
+
+void
+db_md_list_watchpoints()
+{
+
+ dbg_show_watchpoint();
+}
+
+int
+db_md_clr_watchpoint(db_expr_t addr, db_expr_t size)
+{
+
+ return (dbg_remove_watchpoint(NULL, addr, size));
+}
+
+int
+db_md_set_watchpoint(db_expr_t addr, db_expr_t size)
+{
+
+ return (dbg_setup_watchpoint(NULL, addr, size, HW_BREAKPOINT_RW));
+}
+
+static void
+db_stack_trace_cmd(struct unwind_state *frame)
+{
+ c_db_sym_t sym;
+ const char *name;
+ db_expr_t value;
+ db_expr_t offset;
+
+ while (1) {
+ uint64_t pc = frame->pc;
+ int ret;
+
+ ret = unwind_frame(frame);
+ if (ret < 0)
+ break;
+
+ sym = db_search_symbol(pc, DB_STGY_ANY, &offset);
+ if (sym == C_DB_SYM_NULL) {
+ value = 0;
+ name = "(null)";
+ } else
+ db_symbol_values(sym, &name, &value);
+
+ db_printf("%s() at ", name);
+ db_printsym(frame->pc, DB_STGY_PROC);
+ db_printf("\n");
+
+ db_printf("\t pc = 0x%016lx lr = 0x%016lx\n", pc,
+ frame->pc);
+ db_printf("\t sp = 0x%016lx fp = 0x%016lx\n", frame->sp,
+ frame->fp);
+ /* TODO: Show some more registers */
+ db_printf("\n");
+ }
+}
+
+int
+db_trace_thread(struct thread *thr, int count)
+{
+ struct unwind_state frame;
+ struct pcb *ctx;
+
+ if (thr != curthread) {
+ ctx = kdb_thr_ctx(thr);
+
+ frame.sp = (uint64_t)ctx->pcb_sp;
+ frame.fp = (uint64_t)ctx->pcb_x[29];
+ frame.pc = (uint64_t)ctx->pcb_x[30];
+ db_stack_trace_cmd(&frame);
+ } else
+ db_trace_self();
+ return (0);
+}
+
+void
+db_trace_self(void)
+{
+ struct unwind_state frame;
+ uint64_t sp;
+
+ __asm __volatile("mov %0, sp" : "=&r" (sp));
+
+ frame.sp = sp;
+ frame.fp = (uint64_t)__builtin_frame_address(0);
+ frame.pc = (uint64_t)db_trace_self;
+ db_stack_trace_cmd(&frame);
+}
diff --git a/sys/arm64/arm64/debug_monitor.c b/sys/arm64/arm64/debug_monitor.c
new file mode 100644
index 000000000000..dcb3645cf5d4
--- /dev/null
+++ b/sys/arm64/arm64/debug_monitor.c
@@ -0,0 +1,565 @@
+/*-
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_ddb.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kdb.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/sysent.h>
+
+#include <machine/armreg.h>
+#include <machine/cpu.h>
+#include <machine/debug_monitor.h>
+#include <machine/kdb.h>
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#include <ddb/db_sym.h>
+#endif
+
+enum dbg_t {
+ DBG_TYPE_BREAKPOINT = 0,
+ DBG_TYPE_WATCHPOINT = 1,
+};
+
+static int dbg_watchpoint_num;
+static int dbg_breakpoint_num;
+static struct debug_monitor_state kernel_monitor = {
+ .dbg_flags = DBGMON_KERNEL
+};
+
+/* Called from the exception handlers */
+void dbg_monitor_enter(struct thread *);
+void dbg_monitor_exit(struct thread *, struct trapframe *);
+
+/* Watchpoints/breakpoints control register bitfields */
+#define DBG_WATCH_CTRL_LEN_1 (0x1 << 5)
+#define DBG_WATCH_CTRL_LEN_2 (0x3 << 5)
+#define DBG_WATCH_CTRL_LEN_4 (0xf << 5)
+#define DBG_WATCH_CTRL_LEN_8 (0xff << 5)
+#define DBG_WATCH_CTRL_LEN_MASK(x) ((x) & (0xff << 5))
+#define DBG_WATCH_CTRL_EXEC (0x0 << 3)
+#define DBG_WATCH_CTRL_LOAD (0x1 << 3)
+#define DBG_WATCH_CTRL_STORE (0x2 << 3)
+#define DBG_WATCH_CTRL_ACCESS_MASK(x) ((x) & (0x3 << 3))
+
+/* Common for breakpoint and watchpoint */
+#define DBG_WB_CTRL_EL1 (0x1 << 1)
+#define DBG_WB_CTRL_EL0 (0x2 << 1)
+#define DBG_WB_CTRL_ELX_MASK(x) ((x) & (0x3 << 1))
+#define DBG_WB_CTRL_E (0x1 << 0)
+
+#define DBG_REG_BASE_BVR 0
+#define DBG_REG_BASE_BCR (DBG_REG_BASE_BVR + 16)
+#define DBG_REG_BASE_WVR (DBG_REG_BASE_BCR + 16)
+#define DBG_REG_BASE_WCR (DBG_REG_BASE_WVR + 16)
+
+/* Watchpoint/breakpoint helpers */
+#define DBG_WB_WVR "wvr"
+#define DBG_WB_WCR "wcr"
+#define DBG_WB_BVR "bvr"
+#define DBG_WB_BCR "bcr"
+
+#define DBG_WB_READ(reg, num, val) do { \
+ __asm __volatile("mrs %0, dbg" reg #num "_el1" : "=r" (val)); \
+} while (0)
+
+#define DBG_WB_WRITE(reg, num, val) do { \
+ __asm __volatile("msr dbg" reg #num "_el1, %0" :: "r" (val)); \
+} while (0)
+
+#define READ_WB_REG_CASE(reg, num, offset, val) \
+ case (num + offset): \
+ DBG_WB_READ(reg, num, val); \
+ break
+
+#define WRITE_WB_REG_CASE(reg, num, offset, val) \
+ case (num + offset): \
+ DBG_WB_WRITE(reg, num, val); \
+ break
+
+#define SWITCH_CASES_READ_WB_REG(reg, offset, val) \
+ READ_WB_REG_CASE(reg, 0, offset, val); \
+ READ_WB_REG_CASE(reg, 1, offset, val); \
+ READ_WB_REG_CASE(reg, 2, offset, val); \
+ READ_WB_REG_CASE(reg, 3, offset, val); \
+ READ_WB_REG_CASE(reg, 4, offset, val); \
+ READ_WB_REG_CASE(reg, 5, offset, val); \
+ READ_WB_REG_CASE(reg, 6, offset, val); \
+ READ_WB_REG_CASE(reg, 7, offset, val); \
+ READ_WB_REG_CASE(reg, 8, offset, val); \
+ READ_WB_REG_CASE(reg, 9, offset, val); \
+ READ_WB_REG_CASE(reg, 10, offset, val); \
+ READ_WB_REG_CASE(reg, 11, offset, val); \
+ READ_WB_REG_CASE(reg, 12, offset, val); \
+ READ_WB_REG_CASE(reg, 13, offset, val); \
+ READ_WB_REG_CASE(reg, 14, offset, val); \
+ READ_WB_REG_CASE(reg, 15, offset, val)
+
+#define SWITCH_CASES_WRITE_WB_REG(reg, offset, val) \
+ WRITE_WB_REG_CASE(reg, 0, offset, val); \
+ WRITE_WB_REG_CASE(reg, 1, offset, val); \
+ WRITE_WB_REG_CASE(reg, 2, offset, val); \
+ WRITE_WB_REG_CASE(reg, 3, offset, val); \
+ WRITE_WB_REG_CASE(reg, 4, offset, val); \
+ WRITE_WB_REG_CASE(reg, 5, offset, val); \
+ WRITE_WB_REG_CASE(reg, 6, offset, val); \
+ WRITE_WB_REG_CASE(reg, 7, offset, val); \
+ WRITE_WB_REG_CASE(reg, 8, offset, val); \
+ WRITE_WB_REG_CASE(reg, 9, offset, val); \
+ WRITE_WB_REG_CASE(reg, 10, offset, val); \
+ WRITE_WB_REG_CASE(reg, 11, offset, val); \
+ WRITE_WB_REG_CASE(reg, 12, offset, val); \
+ WRITE_WB_REG_CASE(reg, 13, offset, val); \
+ WRITE_WB_REG_CASE(reg, 14, offset, val); \
+ WRITE_WB_REG_CASE(reg, 15, offset, val)
+
+#ifdef DDB
+static uint64_t
+dbg_wb_read_reg(int reg, int n)
+{
+ uint64_t val = 0;
+
+ switch (reg + n) {
+ SWITCH_CASES_READ_WB_REG(DBG_WB_WVR, DBG_REG_BASE_WVR, val);
+ SWITCH_CASES_READ_WB_REG(DBG_WB_WCR, DBG_REG_BASE_WCR, val);
+ SWITCH_CASES_READ_WB_REG(DBG_WB_BVR, DBG_REG_BASE_BVR, val);
+ SWITCH_CASES_READ_WB_REG(DBG_WB_BCR, DBG_REG_BASE_BCR, val);
+ default:
+ printf("trying to read from wrong debug register %d\n", n);
+ }
+
+ return val;
+}
+#endif /* DDB */
+
+static void
+dbg_wb_write_reg(int reg, int n, uint64_t val)
+{
+ switch (reg + n) {
+ SWITCH_CASES_WRITE_WB_REG(DBG_WB_WVR, DBG_REG_BASE_WVR, val);
+ SWITCH_CASES_WRITE_WB_REG(DBG_WB_WCR, DBG_REG_BASE_WCR, val);
+ SWITCH_CASES_WRITE_WB_REG(DBG_WB_BVR, DBG_REG_BASE_BVR, val);
+ SWITCH_CASES_WRITE_WB_REG(DBG_WB_BCR, DBG_REG_BASE_BCR, val);
+ default:
+ printf("trying to write to wrong debug register %d\n", n);
+ return;
+ }
+ isb();
+}
+
+#ifdef DDB
+void
+kdb_cpu_set_singlestep(void)
+{
+
+ kdb_frame->tf_spsr |= DBG_SPSR_SS;
+ WRITE_SPECIALREG(mdscr_el1, READ_SPECIALREG(mdscr_el1) |
+ DBG_MDSCR_SS | DBG_MDSCR_KDE);
+
+ /*
+ * Disable breakpoints and watchpoints, e.g. stepping
+ * over watched instruction will trigger break exception instead of
+ * single-step exception and locks CPU on that instruction for ever.
+ */
+ if ((kernel_monitor.dbg_flags & DBGMON_ENABLED) != 0) {
+ WRITE_SPECIALREG(mdscr_el1,
+ READ_SPECIALREG(mdscr_el1) & ~DBG_MDSCR_MDE);
+ }
+}
+
+void
+kdb_cpu_clear_singlestep(void)
+{
+
+ WRITE_SPECIALREG(mdscr_el1, READ_SPECIALREG(mdscr_el1) &
+ ~(DBG_MDSCR_SS | DBG_MDSCR_KDE));
+
+ /* Restore breakpoints and watchpoints */
+ if ((kernel_monitor.dbg_flags & DBGMON_ENABLED) != 0) {
+ WRITE_SPECIALREG(mdscr_el1,
+ READ_SPECIALREG(mdscr_el1) | DBG_MDSCR_MDE);
+
+ if ((kernel_monitor.dbg_flags & DBGMON_KERNEL) != 0) {
+ WRITE_SPECIALREG(mdscr_el1,
+ READ_SPECIALREG(mdscr_el1) | DBG_MDSCR_KDE);
+ }
+ }
+}
+
+static const char *
+dbg_watchtype_str(uint32_t type)
+{
+ switch (type) {
+ case DBG_WATCH_CTRL_EXEC:
+ return ("execute");
+ case DBG_WATCH_CTRL_STORE:
+ return ("write");
+ case DBG_WATCH_CTRL_LOAD:
+ return ("read");
+ case DBG_WATCH_CTRL_LOAD | DBG_WATCH_CTRL_STORE:
+ return ("read/write");
+ default:
+ return ("invalid");
+ }
+}
+
+static int
+dbg_watchtype_len(uint32_t len)
+{
+ switch (len) {
+ case DBG_WATCH_CTRL_LEN_1:
+ return (1);
+ case DBG_WATCH_CTRL_LEN_2:
+ return (2);
+ case DBG_WATCH_CTRL_LEN_4:
+ return (4);
+ case DBG_WATCH_CTRL_LEN_8:
+ return (8);
+ default:
+ return (0);
+ }
+}
+
+void
+dbg_show_watchpoint(void)
+{
+ uint32_t wcr, len, type;
+ uint64_t addr;
+ int i;
+
+ db_printf("\nhardware watchpoints:\n");
+ db_printf(" watch status type len address symbol\n");
+ db_printf(" ----- -------- ---------- --- ------------------ ------------------\n");
+ for (i = 0; i < dbg_watchpoint_num; i++) {
+ wcr = dbg_wb_read_reg(DBG_REG_BASE_WCR, i);
+ if ((wcr & DBG_WB_CTRL_E) != 0) {
+ type = DBG_WATCH_CTRL_ACCESS_MASK(wcr);
+ len = DBG_WATCH_CTRL_LEN_MASK(wcr);
+ addr = dbg_wb_read_reg(DBG_REG_BASE_WVR, i);
+ db_printf(" %-5d %-8s %10s %3d 0x%16lx ",
+ i, "enabled", dbg_watchtype_str(type),
+ dbg_watchtype_len(len), addr);
+ db_printsym((db_addr_t)addr, DB_STGY_ANY);
+ db_printf("\n");
+ } else {
+ db_printf(" %-5d disabled\n", i);
+ }
+ }
+}
+#endif /* DDB */
+
+static int
+dbg_find_free_slot(struct debug_monitor_state *monitor, enum dbg_t type)
+{
+ uint64_t *reg;
+ u_int max, i;
+
+ switch(type) {
+ case DBG_TYPE_BREAKPOINT:
+ max = dbg_breakpoint_num;
+ reg = monitor->dbg_bcr;
+ break;
+ case DBG_TYPE_WATCHPOINT:
+ max = dbg_watchpoint_num;
+ reg = monitor->dbg_wcr;
+ break;
+ default:
+ printf("Unsupported debug type\n");
+ return (i);
+ }
+
+ for (i = 0; i < max; i++) {
+ if ((reg[i] & DBG_WB_CTRL_E) == 0)
+ return (i);
+ }
+
+ return (-1);
+}
+
+static int
+dbg_find_slot(struct debug_monitor_state *monitor, enum dbg_t type,
+ vm_offset_t addr)
+{
+ uint64_t *reg_addr, *reg_ctrl;
+ u_int max, i;
+
+ switch(type) {
+ case DBG_TYPE_BREAKPOINT:
+ max = dbg_breakpoint_num;
+ reg_addr = monitor->dbg_bvr;
+ reg_ctrl = monitor->dbg_bcr;
+ break;
+ case DBG_TYPE_WATCHPOINT:
+ max = dbg_watchpoint_num;
+ reg_addr = monitor->dbg_wvr;
+ reg_ctrl = monitor->dbg_wcr;
+ break;
+ default:
+ printf("Unsupported debug type\n");
+ return (i);
+ }
+
+ for (i = 0; i < max; i++) {
+ if (reg_addr[i] == addr &&
+ (reg_ctrl[i] & DBG_WB_CTRL_E) != 0)
+ return (i);
+ }
+
+ return (-1);
+}
+
+int
+dbg_setup_watchpoint(struct debug_monitor_state *monitor, vm_offset_t addr,
+ vm_size_t size, enum dbg_access_t access)
+{
+ uint64_t wcr_size, wcr_priv, wcr_access;
+ u_int i;
+
+ if (monitor == NULL)
+ monitor = &kernel_monitor;
+
+ i = dbg_find_free_slot(monitor, DBG_TYPE_WATCHPOINT);
+ if (i == -1) {
+ printf("Can not find slot for watchpoint, max %d"
+ " watchpoints supported\n", dbg_watchpoint_num);
+ return (i);
+ }
+
+ switch(size) {
+ case 1:
+ wcr_size = DBG_WATCH_CTRL_LEN_1;
+ break;
+ case 2:
+ wcr_size = DBG_WATCH_CTRL_LEN_2;
+ break;
+ case 4:
+ wcr_size = DBG_WATCH_CTRL_LEN_4;
+ break;
+ case 8:
+ wcr_size = DBG_WATCH_CTRL_LEN_8;
+ break;
+ default:
+ printf("Unsupported address size for watchpoint\n");
+ return (-1);
+ }
+
+ if ((monitor->dbg_flags & DBGMON_KERNEL) == 0)
+ wcr_priv = DBG_WB_CTRL_EL0;
+ else
+ wcr_priv = DBG_WB_CTRL_EL1;
+
+ switch(access) {
+ case HW_BREAKPOINT_X:
+ wcr_access = DBG_WATCH_CTRL_EXEC;
+ break;
+ case HW_BREAKPOINT_R:
+ wcr_access = DBG_WATCH_CTRL_LOAD;
+ break;
+ case HW_BREAKPOINT_W:
+ wcr_access = DBG_WATCH_CTRL_STORE;
+ break;
+ case HW_BREAKPOINT_RW:
+ wcr_access = DBG_WATCH_CTRL_LOAD | DBG_WATCH_CTRL_STORE;
+ break;
+ default:
+ printf("Unsupported exception level for watchpoint\n");
+ return (-1);
+ }
+
+ monitor->dbg_wvr[i] = addr;
+ monitor->dbg_wcr[i] = wcr_size | wcr_access | wcr_priv | DBG_WB_CTRL_E;
+ monitor->dbg_enable_count++;
+ monitor->dbg_flags |= DBGMON_ENABLED;
+
+ dbg_register_sync(monitor);
+ return (0);
+}
+
+int
+dbg_remove_watchpoint(struct debug_monitor_state *monitor, vm_offset_t addr,
+ vm_size_t size)
+{
+ u_int i;
+
+ if (monitor == NULL)
+ monitor = &kernel_monitor;
+
+ i = dbg_find_slot(monitor, DBG_TYPE_WATCHPOINT, addr);
+ if (i == -1) {
+ printf("Can not find watchpoint for address 0%lx\n", addr);
+ return (i);
+ }
+
+ monitor->dbg_wvr[i] = 0;
+ monitor->dbg_wcr[i] = 0;
+ monitor->dbg_enable_count--;
+ if (monitor->dbg_enable_count == 0)
+ monitor->dbg_flags &= ~DBGMON_ENABLED;
+
+ dbg_register_sync(monitor);
+ return (0);
+}
+
+void
+dbg_register_sync(struct debug_monitor_state *monitor)
+{
+ uint64_t mdscr;
+ int i;
+
+ if (monitor == NULL)
+ monitor = &kernel_monitor;
+
+ mdscr = READ_SPECIALREG(mdscr_el1);
+ if ((monitor->dbg_flags & DBGMON_ENABLED) == 0) {
+ mdscr &= ~(DBG_MDSCR_MDE | DBG_MDSCR_KDE);
+ } else {
+ for (i = 0; i < dbg_breakpoint_num; i++) {
+ dbg_wb_write_reg(DBG_REG_BASE_BCR, i,
+ monitor->dbg_bcr[i]);
+ dbg_wb_write_reg(DBG_REG_BASE_BVR, i,
+ monitor->dbg_bvr[i]);
+ }
+
+ for (i = 0; i < dbg_watchpoint_num; i++) {
+ dbg_wb_write_reg(DBG_REG_BASE_WCR, i,
+ monitor->dbg_wcr[i]);
+ dbg_wb_write_reg(DBG_REG_BASE_WVR, i,
+ monitor->dbg_wvr[i]);
+ }
+ mdscr |= DBG_MDSCR_MDE;
+ if ((monitor->dbg_flags & DBGMON_KERNEL) == DBGMON_KERNEL)
+ mdscr |= DBG_MDSCR_KDE;
+ }
+ WRITE_SPECIALREG(mdscr_el1, mdscr);
+ isb();
+}
+
+void
+dbg_monitor_init(void)
+{
+ u_int i;
+
+ /* Find out many breakpoints and watchpoints we can use */
+ dbg_watchpoint_num = ((READ_SPECIALREG(id_aa64dfr0_el1) >> 20) & 0xf) + 1;
+ dbg_breakpoint_num = ((READ_SPECIALREG(id_aa64dfr0_el1) >> 12) & 0xf) + 1;
+
+ if (bootverbose && PCPU_GET(cpuid) == 0) {
+ printf("%d watchpoints and %d breakpoints supported\n",
+ dbg_watchpoint_num, dbg_breakpoint_num);
+ }
+
+ /*
+ * We have limited number of {watch,break}points, each consists of
+ * two registers:
+ * - wcr/bcr regsiter configurates corresponding {watch,break}point
+ * behaviour
+ * - wvr/bvr register keeps address we are hunting for
+ *
+ * Reset all breakpoints and watchpoints.
+ */
+ for (i = 0; i < dbg_watchpoint_num; i++) {
+ dbg_wb_write_reg(DBG_REG_BASE_WCR, i, 0);
+ dbg_wb_write_reg(DBG_REG_BASE_WVR, i, 0);
+ }
+
+ for (i = 0; i < dbg_breakpoint_num; i++) {
+ dbg_wb_write_reg(DBG_REG_BASE_BCR, i, 0);
+ dbg_wb_write_reg(DBG_REG_BASE_BVR, i, 0);
+ }
+
+ dbg_enable();
+}
+
+void
+dbg_monitor_enter(struct thread *thread)
+{
+ int i;
+
+ if ((kernel_monitor.dbg_flags & DBGMON_ENABLED) != 0) {
+ /* Install the kernel version of the registers */
+ dbg_register_sync(&kernel_monitor);
+ } else if ((thread->td_pcb->pcb_dbg_regs.dbg_flags & DBGMON_ENABLED) != 0) {
+ /* Disable the user breakpoints until we return to userspace */
+ for (i = 0; i < dbg_watchpoint_num; i++) {
+ dbg_wb_write_reg(DBG_REG_BASE_WCR, i, 0);
+ dbg_wb_write_reg(DBG_REG_BASE_WVR, i, 0);
+ }
+
+ for (i = 0; i < dbg_breakpoint_num; ++i) {
+ dbg_wb_write_reg(DBG_REG_BASE_BCR, i, 0);
+ dbg_wb_write_reg(DBG_REG_BASE_BVR, i, 0);
+ }
+ WRITE_SPECIALREG(mdscr_el1,
+ READ_SPECIALREG(mdscr_el1) &
+ ~(DBG_MDSCR_MDE | DBG_MDSCR_KDE));
+ isb();
+ }
+}
+
+void
+dbg_monitor_exit(struct thread *thread, struct trapframe *frame)
+{
+ int i;
+
+ /*
+ * PSR_D is an aarch64-only flag. On aarch32, it switches
+ * the processor to big-endian, so avoid setting it for
+ * 32bits binaries.
+ */
+ if (!(SV_PROC_FLAG(thread->td_proc, SV_ILP32)))
+ frame->tf_spsr |= PSR_D;
+ if ((thread->td_pcb->pcb_dbg_regs.dbg_flags & DBGMON_ENABLED) != 0) {
+ /* Install the kernel version of the registers */
+ dbg_register_sync(&thread->td_pcb->pcb_dbg_regs);
+ frame->tf_spsr &= ~PSR_D;
+ } else if ((kernel_monitor.dbg_flags & DBGMON_ENABLED) != 0) {
+ /* Disable the user breakpoints until we return to userspace */
+ for (i = 0; i < dbg_watchpoint_num; i++) {
+ dbg_wb_write_reg(DBG_REG_BASE_WCR, i, 0);
+ dbg_wb_write_reg(DBG_REG_BASE_WVR, i, 0);
+ }
+
+ for (i = 0; i < dbg_breakpoint_num; ++i) {
+ dbg_wb_write_reg(DBG_REG_BASE_BCR, i, 0);
+ dbg_wb_write_reg(DBG_REG_BASE_BVR, i, 0);
+ }
+ WRITE_SPECIALREG(mdscr_el1,
+ READ_SPECIALREG(mdscr_el1) &
+ ~(DBG_MDSCR_MDE | DBG_MDSCR_KDE));
+ isb();
+ }
+}
diff --git a/sys/arm64/arm64/disassem.c b/sys/arm64/arm64/disassem.c
new file mode 100644
index 000000000000..ce0bf7660b02
--- /dev/null
+++ b/sys/arm64/arm64/disassem.c
@@ -0,0 +1,545 @@
+/*-
+ * Copyright (c) 2016 Cavium
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+
+#include <sys/systm.h>
+#include <machine/disassem.h>
+#include <machine/armreg.h>
+#include <ddb/ddb.h>
+
+#define ARM64_MAX_TOKEN_LEN 8
+#define ARM64_MAX_TOKEN_CNT 10
+
+#define ARM_INSN_SIZE_OFFSET 30
+#define ARM_INSN_SIZE_MASK 0x3
+
+/* Special options for instruction printing */
+#define OP_SIGN_EXT (1UL << 0) /* Sign-extend immediate value */
+#define OP_LITERAL (1UL << 1) /* Use literal (memory offset) */
+#define OP_MULT_4 (1UL << 2) /* Multiply immediate by 4 */
+#define OP_SF32 (1UL << 3) /* Force 32-bit access */
+#define OP_SF_INV (1UL << 6) /* SF is inverted (1 means 32 bit access) */
+
+static const char *w_reg[] = {
+ "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+ "w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+ "w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+ "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wSP",
+};
+
+static const char *x_reg[] = {
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+ "x24", "x25", "x26", "x27", "x28", "x29", "LR", "SP",
+};
+
+static const char *shift_2[] = {
+ "LSL", "LSR", "ASR", "RSV"
+};
+
+/*
+ * Structure representing single token (operand) inside instruction.
+ * name - name of operand
+ * pos - position within the instruction (in bits)
+ * len - operand length (in bits)
+ */
+struct arm64_insn_token {
+ char name[ARM64_MAX_TOKEN_LEN];
+ int pos;
+ int len;
+};
+
+/*
+ * Define generic types for instruction printing.
+ */
+enum arm64_format_type {
+ TYPE_01, /* OP <RD>, <RN>, <RM>{, <shift [LSL, LSR, ASR]> #<imm>} SF32/64
+ OP <RD>, <RN>, #<imm>{, <shift [0, 12]>} SF32/64 */
+ TYPE_02, /* OP <RT>, [<RN>, #<imm>]{!}] SF32/64
+ OP <RT>, [<RN>], #<imm>{!} SF32/64
+ OP <RT>, <RN>, <RM> {, EXTEND AMOUNT } */
+ TYPE_03, /* OP <RT>, #imm SF32/64 */
+};
+
+/*
+ * Structure representing single parsed instruction format.
+ * name - opcode name
+ * format - opcode format in a human-readable way
+ * type - syntax type for printing
+ * special_ops - special options passed to a printer (if any)
+ * mask - bitmask for instruction matching
+ * pattern - pattern to look for
+ * tokens - array of tokens (operands) inside instruction
+ */
+struct arm64_insn {
+ char* name;
+ char* format;
+ enum arm64_format_type type;
+ uint64_t special_ops;
+ uint32_t mask;
+ uint32_t pattern;
+ struct arm64_insn_token tokens[ARM64_MAX_TOKEN_CNT];
+};
+
+/*
+ * Specify instruction opcode format in a human-readable way. Use notation
+ * obtained from ARM Architecture Reference Manual for ARMv8-A.
+ *
+ * Format string description:
+ * Each group must be separated by "|". Group made of 0/1 is used to
+ * generate mask and pattern for instruction matching. Groups containing
+ * an operand token (in format NAME(length_bits)) are used to retrieve any
+ * operand data from the instruction. Names here must be meaningful
+ * and match the one described in the Manual.
+ *
+ * Token description:
+ * SF - "0" represents 32-bit access, "1" represents 64-bit access
+ * SHIFT - type of shift (instruction dependent)
+ * IMM - immediate value
+ * Rx - register number
+ * OPTION - command specific options
+ * SCALE - scaling of immediate value
+ */
+static struct arm64_insn arm64_i[] = {
+ { "add", "SF(1)|0001011|SHIFT(2)|0|RM(5)|IMM(6)|RN(5)|RD(5)",
+ TYPE_01, 0 },
+ { "mov", "SF(1)|001000100000000000000|RN(5)|RD(5)",
+ TYPE_01, 0 },
+ { "add", "SF(1)|0010001|SHIFT(2)|IMM(12)|RN(5)|RD(5)",
+ TYPE_01, 0 },
+ { "ldr", "1|SF(1)|111000010|IMM(9)|OPTION(2)|RN(5)|RT(5)",
+ TYPE_02, OP_SIGN_EXT }, /* ldr immediate post/pre index */
+ { "ldr", "1|SF(1)|11100101|IMM(12)|RN(5)|RT(5)",
+ TYPE_02, 0 }, /* ldr immediate unsigned */
+ { "ldr", "1|SF(1)|111000011|RM(5)|OPTION(3)|SCALE(1)|10|RN(5)|RT(5)",
+ TYPE_02, 0 }, /* ldr register */
+ { "ldr", "0|SF(1)|011000|IMM(19)|RT(5)",
+ TYPE_03, OP_SIGN_EXT | OP_LITERAL | OP_MULT_4 }, /* ldr literal */
+ { "ldrb", "00|111000010|IMM(9)|OPTION(2)|RN(5)|RT(5)",
+ TYPE_02, OP_SIGN_EXT | OP_SF32 }, /* ldrb immediate post/pre index */
+ { "ldrb", "00|11100101|IMM(12)|RN(5)|RT(5)",
+ TYPE_02, OP_SF32 }, /* ldrb immediate unsigned */
+ { "ldrb", "00|111000011|RM(5)|OPTION(3)|SCALE(1)|10|RN(5)|RT(5)",
+ TYPE_02, OP_SF32 }, /* ldrb register */
+ { "ldrh", "01|111000010|IMM(9)|OPTION(2)|RN(5)|RT(5)", TYPE_02,
+ OP_SIGN_EXT | OP_SF32 }, /* ldrh immediate post/pre index */
+ { "ldrh", "01|11100101|IMM(12)|RN(5)|RT(5)",
+ TYPE_02, OP_SF32 }, /* ldrh immediate unsigned */
+ { "ldrh", "01|111000011|RM(5)|OPTION(3)|SCALE(1)|10|RN(5)|RT(5)",
+ TYPE_02, OP_SF32 }, /* ldrh register */
+ { "ldrsb", "001110001|SF(1)|0|IMM(9)|OPTION(2)|RN(5)|RT(5)",
+ TYPE_02, OP_SIGN_EXT | OP_SF_INV }, /* ldrsb immediate post/pre index */
+ { "ldrsb", "001110011|SF(1)|IMM(12)|RN(5)|RT(5)",\
+ TYPE_02, OP_SF_INV}, /* ldrsb immediate unsigned */
+ { "ldrsb", "001110001|SF(1)|1|RM(5)|OPTION(3)|SCALE(1)|10|RN(5)|RT(5)",
+ TYPE_02, OP_SF_INV }, /* ldrsb register */
+ { "ldrsh", "011110001|SF(1)|0|IMM(9)|OPTION(2)|RN(5)|RT(5)",
+ TYPE_02, OP_SIGN_EXT | OP_SF_INV }, /* ldrsh immediate post/pre index */
+ { "ldrsh", "011110011|SF(1)|IMM(12)|RN(5)|RT(5)",
+ TYPE_02, OP_SF_INV}, /* ldrsh immediate unsigned */
+ { "ldrsh", "011110001|SF(1)|1|RM(5)|OPTION(3)|SCALE(1)|10|RN(5)|RT(5)",
+ TYPE_02, OP_SF_INV }, /* ldrsh register */
+ { "ldrsw", "10111000100|IMM(9)|OPTION(2)|RN(5)|RT(5)",
+ TYPE_02, OP_SIGN_EXT }, /* ldrsw immediate post/pre index */
+ { "ldrsw", "1011100110|IMM(12)|RN(5)|RT(5)",
+ TYPE_02, 0 }, /* ldrsw immediate unsigned */
+ { "ldrsw", "10111000101|RM(5)|OPTION(3)|SCALE(1)|10|RN(5)|RT(5)",
+ TYPE_02, 0 }, /* ldrsw register */
+ { "ldrsw", "10011000|IMM(19)|RT(5)",
+ TYPE_03, OP_SIGN_EXT | OP_LITERAL | OP_MULT_4 }, /* ldr literal */
+ { NULL, NULL }
+};
+
+static void
+arm64_disasm_generate_masks(struct arm64_insn *tab)
+{
+ uint32_t mask, val;
+ int a, i;
+ int len, ret;
+ int token = 0;
+ char *format;
+ int error;
+
+ while (tab->name != NULL) {
+ mask = 0;
+ val = 0;
+ format = tab->format;
+ token = 0;
+ error = 0;
+
+ /*
+ * For each entry analyze format strings from the
+ * left (i.e. from the MSB).
+ */
+ a = (INSN_SIZE * NBBY) - 1;
+ while (*format != '\0' && (a >= 0)) {
+ switch(*format) {
+ case '0':
+ /* Bit is 0, add to mask and pattern */
+ mask |= (1 << a);
+ a--;
+ format++;
+ break;
+ case '1':
+ /* Bit is 1, add to mask and pattern */
+ mask |= (1 << a);
+ val |= (1 << a);
+ a--;
+ format++;
+ break;
+ case '|':
+ /* skip */
+ format++;
+ break;
+ default:
+ /* Token found, copy the name */
+ memset(tab->tokens[token].name, 0,
+ sizeof(tab->tokens[token].name));
+ i = 0;
+ while (*format != '(') {
+ tab->tokens[token].name[i] = *format;
+ i++;
+ format++;
+ if (i >= ARM64_MAX_TOKEN_LEN) {
+ printf("ERROR: token too long in op %s\n",
+ tab->name);
+ error = 1;
+ break;
+ }
+ }
+ if (error != 0)
+ break;
+
+ /* Read the length value */
+ ret = sscanf(format, "(%d)", &len);
+ if (ret == 1) {
+ if (token >= ARM64_MAX_TOKEN_CNT) {
+ printf("ERROR: to many tokens in op %s\n",
+ tab->name);
+ error = 1;
+ break;
+ }
+
+ a -= len;
+ tab->tokens[token].pos = a + 1;
+ tab->tokens[token].len = len;
+ token++;
+ }
+
+ /* Skip to the end of the token */
+ while (*format != 0 && *format != '|')
+ format++;
+ }
+ }
+
+ /* Write mask and pattern to the instruction array */
+ tab->mask = mask;
+ tab->pattern = val;
+
+ /*
+ * If we got here, format string must be parsed and "a"
+ * should point to -1. If it's not, wrong number of bits
+ * in format string. Mark this as invalid and prevent
+ * from being matched.
+ */
+ if (*format != 0 || (a != -1) || (error != 0)) {
+ tab->mask = 0;
+ tab->pattern = 0xffffffff;
+ printf("ERROR: skipping instruction op %s\n",
+ tab->name);
+ }
+
+ tab++;
+ }
+}
+
+static int
+arm64_disasm_read_token(struct arm64_insn *insn, u_int opcode,
+ const char *token, int *val)
+{
+ int i;
+
+ for (i = 0; i < ARM64_MAX_TOKEN_CNT; i++) {
+ if (strcmp(insn->tokens[i].name, token) == 0) {
+ *val = (opcode >> insn->tokens[i].pos &
+ ((1 << insn->tokens[i].len) - 1));
+ return (0);
+ }
+ }
+
+ return (EINVAL);
+}
+
+static int
+arm64_disasm_read_token_sign_ext(struct arm64_insn *insn, u_int opcode,
+ const char *token, int *val)
+{
+ int i;
+ int msk;
+
+ for (i = 0; i < ARM64_MAX_TOKEN_CNT; i++) {
+ if (strcmp(insn->tokens[i].name, token) == 0) {
+ msk = (1 << insn->tokens[i].len) - 1;
+ *val = ((opcode >> insn->tokens[i].pos) & msk);
+
+ /* If last bit is 1, sign-extend the value */
+ if (*val & (1 << (insn->tokens[i].len - 1)))
+ *val |= ~msk;
+
+ return (0);
+ }
+ }
+
+ return (EINVAL);
+}
+
+static const char *
+arm64_reg(int b64, int num)
+{
+
+ if (b64 != 0)
+ return (x_reg[num]);
+
+ return (w_reg[num]);
+}
+
+vm_offset_t
+disasm(const struct disasm_interface *di, vm_offset_t loc, int altfmt)
+{
+ struct arm64_insn *i_ptr = arm64_i;
+ uint32_t insn;
+ int matchp;
+ int ret;
+ int shift, rm, rt, rd, rn, imm, sf, idx, option, scale, amount;
+ int sign_ext;
+ int rm_absent;
+ /* Indicate if immediate should be outside or inside brackets */
+ int inside;
+ /* Print exclamation mark if pre-incremented */
+ int pre;
+
+ /* Initialize defaults, all are 0 except SF indicating 64bit access */
+ shift = rd = rm = rn = imm = idx = option = amount = scale = 0;
+ sign_ext = 0;
+ sf = 1;
+
+ matchp = 0;
+ insn = di->di_readword(loc);
+ while (i_ptr->name) {
+ /* If mask is 0 then the parser was not initialized yet */
+ if ((i_ptr->mask != 0) &&
+ ((insn & i_ptr->mask) == i_ptr->pattern)) {
+ matchp = 1;
+ break;
+ }
+ i_ptr++;
+ }
+ if (matchp == 0)
+ goto undefined;
+
+ /* Global options */
+ if (i_ptr->special_ops & OP_SF32)
+ sf = 0;
+
+ /* Global optional tokens */
+ arm64_disasm_read_token(i_ptr, insn, "SF", &sf);
+ if (i_ptr->special_ops & OP_SF_INV)
+ sf = 1 - sf;
+ if (arm64_disasm_read_token(i_ptr, insn, "SIGN", &sign_ext) == 0)
+ sign_ext = 1 - sign_ext;
+ if (i_ptr->special_ops & OP_SIGN_EXT)
+ sign_ext = 1;
+ if (sign_ext != 0)
+ arm64_disasm_read_token_sign_ext(i_ptr, insn, "IMM", &imm);
+ else
+ arm64_disasm_read_token(i_ptr, insn, "IMM", &imm);
+ if (i_ptr->special_ops & OP_MULT_4)
+ imm <<= 2;
+
+ /* Print opcode by type */
+ switch (i_ptr->type) {
+ case TYPE_01:
+ /* OP <RD>, <RN>, <RM>{, <shift [LSL, LSR, ASR]> #<imm>} SF32/64
+ OP <RD>, <RN>, #<imm>{, <shift [0, 12]>} SF32/64 */
+
+ /* Mandatory tokens */
+ ret = arm64_disasm_read_token(i_ptr, insn, "RD", &rd);
+ ret |= arm64_disasm_read_token(i_ptr, insn, "RN", &rn);
+ if (ret != 0) {
+ printf("ERROR: Missing mandatory token for op %s type %d\n",
+ i_ptr->name, i_ptr->type);
+ goto undefined;
+ }
+
+ /* Optional tokens */
+ arm64_disasm_read_token(i_ptr, insn, "SHIFT", &shift);
+ rm_absent = arm64_disasm_read_token(i_ptr, insn, "RM", &rm);
+
+ di->di_printf("%s\t%s, %s", i_ptr->name, arm64_reg(sf, rd),
+ arm64_reg(sf, rn));
+
+ /* If RM is present use it, otherwise use immediate notation */
+ if (rm_absent == 0) {
+ di->di_printf(", %s", arm64_reg(sf, rm));
+ if (imm != 0)
+ di->di_printf(", %s #%d", shift_2[shift], imm);
+ } else {
+ if (imm != 0 || shift != 0)
+ di->di_printf(", #0x%x", imm);
+ if (shift != 0)
+ di->di_printf(" LSL #12");
+ }
+ break;
+ case TYPE_02:
+ /* OP <RT>, [<RN>, #<imm>]{!}] SF32/64
+ OP <RT>, [<RN>], #<imm>{!} SF32/64
+ OP <RT>, <RN>, <RM> {, EXTEND AMOUNT } */
+
+ /* Mandatory tokens */
+ ret = arm64_disasm_read_token(i_ptr, insn, "RT", &rt);
+ ret |= arm64_disasm_read_token(i_ptr, insn, "RN", &rn);
+ if (ret != 0) {
+ printf("ERROR: Missing mandatory token for op %s type %d\n",
+ i_ptr->name, i_ptr->type);
+ goto undefined;
+ }
+
+ /* Optional tokens */
+ arm64_disasm_read_token(i_ptr, insn, "OPTION", &option);
+ arm64_disasm_read_token(i_ptr, insn, "SCALE", &scale);
+ rm_absent = arm64_disasm_read_token(i_ptr, insn, "RM", &rm);
+
+ if (rm_absent) {
+ /*
+ * In unsigned operation, shift immediate value
+ * and reset options to default.
+ */
+ if (sign_ext == 0) {
+ imm = imm << ((insn >> ARM_INSN_SIZE_OFFSET) &
+ ARM_INSN_SIZE_MASK);
+ option = 0;
+ }
+ switch (option) {
+ case 0x0:
+ pre = 0;
+ inside = 1;
+ break;
+ case 0x1:
+ pre = 0;
+ inside = 0;
+ break;
+ case 0x2:
+ default:
+ pre = 1;
+ inside = 1;
+ break;
+ }
+
+ di->di_printf("%s\t%s, ", i_ptr->name, arm64_reg(sf, rt));
+ if (inside != 0) {
+ di->di_printf("[%s", arm64_reg(1, rn));
+ if (imm != 0)
+ di->di_printf(", #%d", imm);
+ di->di_printf("]");
+ } else {
+ di->di_printf("[%s]", arm64_reg(1, rn));
+ if (imm != 0)
+ di->di_printf(", #%d", imm);
+ }
+ if (pre != 0)
+ di->di_printf("!");
+ } else {
+ /* Last bit of option field determines 32/64 bit offset */
+ di->di_printf("%s\t%s, [%s, %s", i_ptr->name,
+ arm64_reg(sf, rt), arm64_reg(1, rn),
+ arm64_reg(option & 1, rm));
+
+ /* Calculate amount, it's op(31:30) */
+ amount = (insn >> ARM_INSN_SIZE_OFFSET) &
+ ARM_INSN_SIZE_MASK;
+
+ switch (option) {
+ case 0x2:
+ di->di_printf(", uxtw #%d", amount);
+ break;
+ case 0x3:
+ if (scale != 0)
+ di->di_printf(", lsl #%d", amount);
+ break;
+ case 0x6:
+ di->di_printf(", sxtw #%d", amount);
+ break;
+ case 0x7:
+ di->di_printf(", sxts #%d", amount);
+ break;
+ default:
+ di->di_printf(", RSVD");
+ break;
+ }
+ di->di_printf("]");
+ }
+
+ break;
+
+ case TYPE_03:
+ /* OP <RT>, #imm SF32/64 */
+
+ /* Mandatory tokens */
+ ret = arm64_disasm_read_token(i_ptr, insn, "RT", &rt);
+ if (ret != 0) {
+ printf("ERROR: Missing mandatory token for op %s type %d\n",
+ i_ptr->name, i_ptr->type);
+ goto undefined;
+ }
+
+ di->di_printf("%s\t%s, ", i_ptr->name, arm64_reg(sf, rt));
+ if (i_ptr->special_ops & OP_LITERAL)
+ di->di_printf("0x%lx", loc + imm);
+ else
+ di->di_printf("#%d", imm);
+
+ break;
+ default:
+ goto undefined;
+ }
+
+ di->di_printf("\n");
+ return(loc + INSN_SIZE);
+
+undefined:
+ di->di_printf("undefined\t%08x\n", insn);
+ return(loc + INSN_SIZE);
+}
+
+/* Parse format strings at the very beginning */
+SYSINIT(arm64_disasm_generate_masks, SI_SUB_DDB_SERVICES,
+ SI_ORDER_FIRST, arm64_disasm_generate_masks, arm64_i);
diff --git a/sys/arm64/arm64/dump_machdep.c b/sys/arm64/arm64/dump_machdep.c
new file mode 100644
index 000000000000..d92777fea051
--- /dev/null
+++ b/sys/arm64/arm64/dump_machdep.c
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/kerneldump.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+
+#include <machine/dump.h>
+
+int do_minidump = 1;
+TUNABLE_INT("debug.minidump", &do_minidump);
+SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RW, &do_minidump, 0,
+ "Enable mini crash dumps");
+
+void
+dumpsys_wbinv_all(void)
+{
+
+ printf("dumpsys_wbinv_all\n");
+}
+
+void
+dumpsys_map_chunk(vm_paddr_t pa, size_t chunk __unused, void **va)
+{
+
+ printf("dumpsys_map_chunk\n");
+ while(1);
+}
+
+/*
+ * Add a header to be used by libkvm to get the va to pa delta
+ */
+int
+dumpsys_write_aux_headers(struct dumperinfo *di)
+{
+
+ printf("dumpsys_map_chunk\n");
+ return (0);
+}
diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c
new file mode 100644
index 000000000000..cd4e5d7bae00
--- /dev/null
+++ b/sys/arm64/arm64/efirt_machdep.c
@@ -0,0 +1,280 @@
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * Copyright (c) 2001 Doug Rabson
+ * Copyright (c) 2016 The FreeBSD Foundation
+ * Copyright (c) 2017 Andrew Turner
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/efi.h>
+#include <sys/kernel.h>
+#include <sys/linker.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/systm.h>
+#include <sys/vmmeter.h>
+
+#include <machine/metadata.h>
+#include <machine/pcb.h>
+#include <machine/pte.h>
+#include <machine/vfp.h>
+#include <machine/vmparam.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+
+static vm_object_t obj_1t1_pt;
+static vm_pindex_t efi_1t1_idx;
+static pd_entry_t *efi_l0;
+static uint64_t efi_ttbr0;
+
+void
+efi_destroy_1t1_map(void)
+{
+ vm_page_t m;
+
+ if (obj_1t1_pt != NULL) {
+ VM_OBJECT_RLOCK(obj_1t1_pt);
+ TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
+ m->ref_count = VPRC_OBJREF;
+ vm_wire_sub(obj_1t1_pt->resident_page_count);
+ VM_OBJECT_RUNLOCK(obj_1t1_pt);
+ vm_object_deallocate(obj_1t1_pt);
+ }
+
+ obj_1t1_pt = NULL;
+ efi_1t1_idx = 0;
+ efi_l0 = NULL;
+ efi_ttbr0 = 0;
+}
+
+static vm_page_t
+efi_1t1_page(void)
+{
+
+ return (vm_page_grab(obj_1t1_pt, efi_1t1_idx++, VM_ALLOC_NOBUSY |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO));
+}
+
+static pt_entry_t *
+efi_1t1_l3(vm_offset_t va)
+{
+ pd_entry_t *l0, *l1, *l2;
+ pt_entry_t *l3;
+ vm_pindex_t l0_idx, l1_idx, l2_idx;
+ vm_page_t m;
+ vm_paddr_t mphys;
+
+ l0_idx = pmap_l0_index(va);
+ l0 = &efi_l0[l0_idx];
+ if (*l0 == 0) {
+ m = efi_1t1_page();
+ mphys = VM_PAGE_TO_PHYS(m);
+ *l0 = mphys | L0_TABLE;
+ } else {
+ mphys = *l0 & ~ATTR_MASK;
+ }
+
+ l1 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
+ l1_idx = pmap_l1_index(va);
+ l1 += l1_idx;
+ if (*l1 == 0) {
+ m = efi_1t1_page();
+ mphys = VM_PAGE_TO_PHYS(m);
+ *l1 = mphys | L1_TABLE;
+ } else {
+ mphys = *l1 & ~ATTR_MASK;
+ }
+
+ l2 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
+ l2_idx = pmap_l2_index(va);
+ l2 += l2_idx;
+ if (*l2 == 0) {
+ m = efi_1t1_page();
+ mphys = VM_PAGE_TO_PHYS(m);
+ *l2 = mphys | L2_TABLE;
+ } else {
+ mphys = *l2 & ~ATTR_MASK;
+ }
+
+ l3 = (pt_entry_t *)PHYS_TO_DMAP(mphys);
+ l3 += pmap_l3_index(va);
+ KASSERT(*l3 == 0, ("%s: Already mapped: va %#jx *pt %#jx", __func__,
+ va, *l3));
+
+ return (l3);
+}
+
+/*
+ * Map a physical address from EFI runtime space into KVA space. Returns 0 to
+ * indicate a failed mapping so that the caller may handle error.
+ */
+vm_offset_t
+efi_phys_to_kva(vm_paddr_t paddr)
+{
+
+ if (!PHYS_IN_DMAP(paddr))
+ return (0);
+ return (PHYS_TO_DMAP(paddr));
+}
+
+/*
+ * Create the 1:1 virtual to physical map for EFI
+ */
+bool
+efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
+{
+ struct efi_md *p;
+ pt_entry_t *l3, l3_attr;
+ vm_offset_t va;
+ vm_page_t efi_l0_page;
+ uint64_t idx;
+ int i, mode;
+
+ obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, L0_ENTRIES +
+ L0_ENTRIES * Ln_ENTRIES + L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES +
+ L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES * Ln_ENTRIES,
+ VM_PROT_ALL, 0, NULL);
+ VM_OBJECT_WLOCK(obj_1t1_pt);
+ efi_l0_page = efi_1t1_page();
+ VM_OBJECT_WUNLOCK(obj_1t1_pt);
+ efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page));
+ efi_ttbr0 = ASID_TO_OPERAND(ASID_RESERVED_FOR_EFI) |
+ VM_PAGE_TO_PHYS(efi_l0_page);
+
+ for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
+ descsz)) {
+ if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
+ continue;
+ if (p->md_virt != NULL && (uint64_t)p->md_virt != p->md_phys) {
+ if (bootverbose)
+ printf("EFI Runtime entry %d is mapped\n", i);
+ goto fail;
+ }
+ if ((p->md_phys & EFI_PAGE_MASK) != 0) {
+ if (bootverbose)
+ printf("EFI Runtime entry %d is not aligned\n",
+ i);
+ goto fail;
+ }
+ if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
+ p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
+ VM_MAXUSER_ADDRESS) {
+ printf("EFI Runtime entry %d is not in mappable for RT:"
+ "base %#016jx %#jx pages\n",
+ i, (uintmax_t)p->md_phys,
+ (uintmax_t)p->md_pages);
+ goto fail;
+ }
+ if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
+ mode = VM_MEMATTR_WRITE_BACK;
+ else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
+ mode = VM_MEMATTR_WRITE_THROUGH;
+ else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
+ mode = VM_MEMATTR_WRITE_COMBINING;
+ else
+ mode = VM_MEMATTR_DEVICE;
+
+ printf("MAP %lx mode %x pages %lu\n", p->md_phys, mode, p->md_pages);
+
+ l3_attr = ATTR_DEFAULT | ATTR_S1_IDX(mode) |
+ ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
+ if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
+ l3_attr |= ATTR_S1_XN;
+
+ VM_OBJECT_WLOCK(obj_1t1_pt);
+ for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++,
+ va += PAGE_SIZE) {
+ l3 = efi_1t1_l3(va);
+ *l3 = va | l3_attr;
+ }
+ VM_OBJECT_WUNLOCK(obj_1t1_pt);
+ }
+
+ return (true);
+fail:
+ efi_destroy_1t1_map();
+ return (false);
+}
+
+int
+efi_arch_enter(void)
+{
+
+ CRITICAL_ASSERT(curthread);
+
+ /*
+ * Temporarily switch to EFI's page table. However, we leave curpmap
+ * unchanged in order to prevent its ASID from being reclaimed before
+ * we switch back to its page table in efi_arch_leave().
+ */
+ set_ttbr0(efi_ttbr0);
+ if (PCPU_GET(bcast_tlbi_workaround) != 0)
+ invalidate_local_icache();
+
+ return (0);
+}
+
+void
+efi_arch_leave(void)
+{
+
+ /*
+ * Restore the pcpu pointer. Some UEFI implementations trash it and
+ * we don't store it before calling into them. To fix this we need
+ * to restore it after returning to the kernel context. As reading
+ * curpmap will access x18 we need to restore it before loading
+ * the pmap pointer.
+ */
+ __asm __volatile(
+ "mrs x18, tpidr_el1 \n"
+ );
+ set_ttbr0(pmap_to_ttbr0(PCPU_GET(curpmap)));
+ if (PCPU_GET(bcast_tlbi_workaround) != 0)
+ invalidate_local_icache();
+}
+
+int
+efi_rt_arch_call(struct efirt_callinfo *ec)
+{
+
+ panic("not implemented");
+}
diff --git a/sys/arm64/arm64/elf32_machdep.c b/sys/arm64/arm64/elf32_machdep.c
new file mode 100644
index 000000000000..f99523cb6362
--- /dev/null
+++ b/sys/arm64/arm64/elf32_machdep.c
@@ -0,0 +1,261 @@
+/*-
+ * Copyright (c) 2014, 2015 The FreeBSD Foundation.
+ * Copyright (c) 2014, 2017 Andrew Turner.
+ * Copyright (c) 2018 Olivier Houchard
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#define __ELF_WORD_SIZE 32
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/linker.h>
+#include <sys/proc.h>
+#include <sys/sysent.h>
+#include <sys/imgact_elf.h>
+#include <sys/syscall.h>
+#include <sys/signalvar.h>
+#include <sys/vnode.h>
+
+#include <machine/elf.h>
+
+#include <compat/freebsd32/freebsd32_util.h>
+
+#define FREEBSD32_MINUSER 0x00001000
+#define FREEBSD32_MAXUSER ((1ul << 32) - PAGE_SIZE)
+#define FREEBSD32_SHAREDPAGE (FREEBSD32_MAXUSER - PAGE_SIZE)
+#define FREEBSD32_USRSTACK FREEBSD32_SHAREDPAGE
+
+extern const char *freebsd32_syscallnames[];
+
+extern char aarch32_sigcode[];
+extern int sz_aarch32_sigcode;
+
+static int freebsd32_fetch_syscall_args(struct thread *td);
+static void freebsd32_setregs(struct thread *td, struct image_params *imgp,
+ u_long stack);
+static void freebsd32_set_syscall_retval(struct thread *, int);
+
+static boolean_t elf32_arm_abi_supported(struct image_params *, int32_t *,
+ uint32_t *);
+
+extern void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask);
+
+static struct sysentvec elf32_freebsd_sysvec = {
+ .sv_size = SYS_MAXSYSCALL,
+ .sv_table = freebsd32_sysent,
+ .sv_errsize = 0,
+ .sv_errtbl = NULL,
+ .sv_transtrap = NULL,
+ .sv_fixup = elf32_freebsd_fixup,
+ .sv_sendsig = freebsd32_sendsig,
+ .sv_sigcode = aarch32_sigcode,
+ .sv_szsigcode = &sz_aarch32_sigcode,
+ .sv_name = "FreeBSD ELF32",
+ .sv_coredump = elf32_coredump,
+ .sv_imgact_try = NULL,
+ .sv_minsigstksz = MINSIGSTKSZ,
+ .sv_minuser = FREEBSD32_MINUSER,
+ .sv_maxuser = FREEBSD32_MAXUSER,
+ .sv_usrstack = FREEBSD32_USRSTACK,
+ .sv_psstrings = FREEBSD32_PS_STRINGS,
+ .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE,
+ .sv_copyout_auxargs = elf32_freebsd_copyout_auxargs,
+ .sv_copyout_strings = freebsd32_copyout_strings,
+ .sv_setregs = freebsd32_setregs,
+ .sv_fixlimit = NULL, // XXX
+ .sv_maxssiz = NULL,
+ .sv_flags = SV_ABI_FREEBSD | SV_ILP32 | SV_SHP | SV_TIMEKEEP,
+ .sv_set_syscall_retval = freebsd32_set_syscall_retval,
+ .sv_fetch_syscall_args = freebsd32_fetch_syscall_args,
+ .sv_syscallnames = freebsd32_syscallnames,
+ .sv_shared_page_base = FREEBSD32_SHAREDPAGE,
+ .sv_shared_page_len = PAGE_SIZE,
+ .sv_schedtail = NULL,
+ .sv_thread_detach = NULL,
+ .sv_trap = NULL,
+};
+INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec);
+
+static Elf32_Brandinfo freebsd32_brand_info = {
+ .brand = ELFOSABI_FREEBSD,
+ .machine = EM_ARM,
+ .compat_3_brand = "FreeBSD",
+ .emul_path = NULL,
+ .interp_path = "/libexec/ld-elf.so.1",
+ .sysvec = &elf32_freebsd_sysvec,
+ .interp_newpath = "/libexec/ld-elf32.so.1",
+ .brand_note = &elf32_freebsd_brandnote,
+ .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE,
+ .header_supported= elf32_arm_abi_supported,
+};
+
+SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST,
+ (sysinit_cfunc_t)elf32_insert_brand_entry, &freebsd32_brand_info);
+
+static boolean_t
+elf32_arm_abi_supported(struct image_params *imgp, int32_t *osrel __unused,
+ uint32_t *fctl0 __unused)
+{
+ const Elf32_Ehdr *hdr;
+
+ /* Check if we support AArch32 */
+ if (ID_AA64PFR0_EL0_VAL(READ_SPECIALREG(id_aa64pfr0_el1)) !=
+ ID_AA64PFR0_EL0_64_32)
+ return (FALSE);
+
+#define EF_ARM_EABI_VERSION(x) (((x) & EF_ARM_EABIMASK) >> 24)
+#define EF_ARM_EABI_FREEBSD_MIN 4
+ hdr = (const Elf32_Ehdr *)imgp->image_header;
+ if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) {
+ if (bootverbose)
+ uprintf("Attempting to execute non EABI binary "
+ "(rev %d) image %s",
+ EF_ARM_EABI_VERSION(hdr->e_flags),
+ imgp->args->fname);
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+static int
+freebsd32_fetch_syscall_args(struct thread *td)
+{
+ struct proc *p;
+ register_t *ap;
+ struct syscall_args *sa;
+ int error, i, nap;
+ unsigned int args[4];
+
+ nap = 4;
+ p = td->td_proc;
+ ap = td->td_frame->tf_x;
+ sa = &td->td_sa;
+
+ /* r7 is the syscall id */
+ sa->code = td->td_frame->tf_x[7];
+
+ if (sa->code == SYS_syscall) {
+ sa->code = *ap++;
+ nap--;
+ } else if (sa->code == SYS___syscall) {
+ sa->code = ap[1];
+ nap -= 2;
+ ap += 2;
+ }
+
+ if (sa->code >= p->p_sysent->sv_size)
+ sa->callp = &p->p_sysent->sv_table[0];
+ else
+ sa->callp = &p->p_sysent->sv_table[sa->code];
+
+ sa->narg = sa->callp->sy_narg;
+ for (i = 0; i < nap; i++)
+ sa->args[i] = ap[i];
+ if (sa->narg > nap) {
+ if ((sa->narg - nap) > nitems(args))
+ panic("Too many system call arguiments");
+ error = copyin((void *)td->td_frame->tf_x[13], args,
+ (sa->narg - nap) * sizeof(int));
+ for (i = 0; i < (sa->narg - nap); i++)
+ sa->args[i + nap] = args[i];
+ }
+
+ td->td_retval[0] = 0;
+ td->td_retval[1] = 0;
+
+ return (0);
+}
+
+static void
+freebsd32_set_syscall_retval(struct thread *td, int error)
+{
+ struct trapframe *frame;
+
+ frame = td->td_frame;
+ switch (error) {
+ case 0:
+ frame->tf_x[0] = td->td_retval[0];
+ frame->tf_x[1] = td->td_retval[1];
+ frame->tf_spsr &= ~PSR_C;
+ break;
+ case ERESTART:
+ /*
+ * Reconstruct the pc to point at the swi.
+ */
+ if ((frame->tf_spsr & PSR_T) != 0)
+ frame->tf_elr -= 2; //THUMB_INSN_SIZE;
+ else
+ frame->tf_elr -= 4; //INSN_SIZE;
+ break;
+ case EJUSTRETURN:
+ /* nothing to do */
+ break;
+ default:
+ frame->tf_x[0] = error;
+ frame->tf_spsr |= PSR_C;
+ break;
+ }
+}
+
+static void
+freebsd32_setregs(struct thread *td, struct image_params *imgp,
+ uintptr_t stack)
+{
+ struct trapframe *tf = td->td_frame;
+
+ memset(tf, 0, sizeof(struct trapframe));
+
+ /*
+ * We need to set x0 for init as it doesn't call
+ * cpu_set_syscall_retval to copy the value. We also
+ * need to set td_retval for the cases where we do.
+ */
+ tf->tf_x[0] = stack;
+ /* SP_usr is mapped to x13 */
+ tf->tf_x[13] = stack;
+ /* LR_usr is mapped to x14 */
+ tf->tf_x[14] = imgp->entry_addr;
+ tf->tf_elr = imgp->entry_addr;
+ tf->tf_spsr = PSR_M_32;
+}
+
+void
+elf32_dump_thread(struct thread *td, void *dst, size_t *off)
+{
+ /* XXX: VFP */
+}
diff --git a/sys/arm64/arm64/elf_machdep.c b/sys/arm64/arm64/elf_machdep.c
new file mode 100644
index 000000000000..392cdfaee246
--- /dev/null
+++ b/sys/arm64/arm64/elf_machdep.c
@@ -0,0 +1,284 @@
+/*-
+ * Copyright (c) 2014, 2015 The FreeBSD Foundation.
+ * Copyright (c) 2014 Andrew Turner.
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/linker.h>
+#include <sys/proc.h>
+#include <sys/sysent.h>
+#include <sys/imgact_elf.h>
+#include <sys/syscall.h>
+#include <sys/signalvar.h>
+#include <sys/vnode.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+
+#include <machine/elf.h>
+#include <machine/md_var.h>
+
+#include "linker_if.h"
+
+u_long elf_hwcap;
+
+static struct sysentvec elf64_freebsd_sysvec = {
+ .sv_size = SYS_MAXSYSCALL,
+ .sv_table = sysent,
+ .sv_errsize = 0,
+ .sv_errtbl = NULL,
+ .sv_transtrap = NULL,
+ .sv_fixup = __elfN(freebsd_fixup),
+ .sv_sendsig = sendsig,
+ .sv_sigcode = sigcode,
+ .sv_szsigcode = &szsigcode,
+ .sv_name = "FreeBSD ELF64",
+ .sv_coredump = __elfN(coredump),
+ .sv_imgact_try = NULL,
+ .sv_minsigstksz = MINSIGSTKSZ,
+ .sv_minuser = VM_MIN_ADDRESS,
+ .sv_maxuser = VM_MAXUSER_ADDRESS,
+ .sv_usrstack = USRSTACK,
+ .sv_psstrings = PS_STRINGS,
+ .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE,
+ .sv_copyout_auxargs = __elfN(freebsd_copyout_auxargs),
+ .sv_copyout_strings = exec_copyout_strings,
+ .sv_setregs = exec_setregs,
+ .sv_fixlimit = NULL,
+ .sv_maxssiz = NULL,
+ .sv_flags = SV_SHP | SV_TIMEKEEP | SV_ABI_FREEBSD | SV_LP64 |
+ SV_ASLR,
+ .sv_set_syscall_retval = cpu_set_syscall_retval,
+ .sv_fetch_syscall_args = cpu_fetch_syscall_args,
+ .sv_syscallnames = syscallnames,
+ .sv_shared_page_base = SHAREDPAGE,
+ .sv_shared_page_len = PAGE_SIZE,
+ .sv_schedtail = NULL,
+ .sv_thread_detach = NULL,
+ .sv_trap = NULL,
+ .sv_hwcap = &elf_hwcap,
+};
+INIT_SYSENTVEC(elf64_sysvec, &elf64_freebsd_sysvec);
+
+static Elf64_Brandinfo freebsd_brand_info = {
+ .brand = ELFOSABI_FREEBSD,
+ .machine = EM_AARCH64,
+ .compat_3_brand = "FreeBSD",
+ .emul_path = NULL,
+ .interp_path = "/libexec/ld-elf.so.1",
+ .sysvec = &elf64_freebsd_sysvec,
+ .interp_newpath = NULL,
+ .brand_note = &elf64_freebsd_brandnote,
+ .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
+};
+
+SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST,
+ (sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_info);
+
+void
+elf64_dump_thread(struct thread *td __unused, void *dst __unused,
+ size_t *off __unused)
+{
+
+}
+
+bool
+elf_is_ifunc_reloc(Elf_Size r_info __unused)
+{
+
+ return (ELF_R_TYPE(r_info) == R_AARCH64_IRELATIVE);
+}
+
+static int
+reloc_instr_imm(Elf32_Addr *where, Elf_Addr val, u_int msb, u_int lsb)
+{
+
+ /* Check bounds: upper bits must be all ones or all zeros. */
+ if ((uint64_t)((int64_t)val >> (msb + 1)) + 1 > 1)
+ return (-1);
+ val >>= lsb;
+ val &= (1 << (msb - lsb + 1)) - 1;
+ *where |= (Elf32_Addr)val;
+ return (0);
+}
+
+/*
+ * Process a relocation. Support for some static relocations is required
+ * in order for the -zifunc-noplt optimization to work.
+ */
+static int
+elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
+ int type, int flags, elf_lookup_fn lookup)
+{
+#define ARM64_ELF_RELOC_LOCAL (1 << 0)
+#define ARM64_ELF_RELOC_LATE_IFUNC (1 << 1)
+ Elf_Addr *where, addr, addend, val;
+ Elf_Word rtype, symidx;
+ const Elf_Rel *rel;
+ const Elf_Rela *rela;
+ int error;
+
+ switch (type) {
+ case ELF_RELOC_REL:
+ rel = (const Elf_Rel *)data;
+ where = (Elf_Addr *) (relocbase + rel->r_offset);
+ addend = *where;
+ rtype = ELF_R_TYPE(rel->r_info);
+ symidx = ELF_R_SYM(rel->r_info);
+ break;
+ case ELF_RELOC_RELA:
+ rela = (const Elf_Rela *)data;
+ where = (Elf_Addr *) (relocbase + rela->r_offset);
+ addend = rela->r_addend;
+ rtype = ELF_R_TYPE(rela->r_info);
+ symidx = ELF_R_SYM(rela->r_info);
+ break;
+ default:
+ panic("unknown reloc type %d\n", type);
+ }
+
+ if ((flags & ARM64_ELF_RELOC_LATE_IFUNC) != 0) {
+ KASSERT(type == ELF_RELOC_RELA,
+ ("Only RELA ifunc relocations are supported"));
+ if (rtype != R_AARCH64_IRELATIVE)
+ return (0);
+ }
+
+ if ((flags & ARM64_ELF_RELOC_LOCAL) != 0) {
+ if (rtype == R_AARCH64_RELATIVE)
+ *where = elf_relocaddr(lf, relocbase + addend);
+ return (0);
+ }
+
+ error = 0;
+ switch (rtype) {
+ case R_AARCH64_NONE:
+ case R_AARCH64_RELATIVE:
+ break;
+ case R_AARCH64_TSTBR14:
+ error = lookup(lf, symidx, 1, &addr);
+ if (error != 0)
+ return (-1);
+ error = reloc_instr_imm((Elf32_Addr *)where,
+ addr + addend - (Elf_Addr)where, 15, 2);
+ break;
+ case R_AARCH64_CONDBR19:
+ error = lookup(lf, symidx, 1, &addr);
+ if (error != 0)
+ return (-1);
+ error = reloc_instr_imm((Elf32_Addr *)where,
+ addr + addend - (Elf_Addr)where, 20, 2);
+ break;
+ case R_AARCH64_JUMP26:
+ case R_AARCH64_CALL26:
+ error = lookup(lf, symidx, 1, &addr);
+ if (error != 0)
+ return (-1);
+ error = reloc_instr_imm((Elf32_Addr *)where,
+ addr + addend - (Elf_Addr)where, 27, 2);
+ break;
+ case R_AARCH64_ABS64:
+ case R_AARCH64_GLOB_DAT:
+ case R_AARCH64_JUMP_SLOT:
+ error = lookup(lf, symidx, 1, &addr);
+ if (error != 0)
+ return (-1);
+ *where = addr + addend;
+ break;
+ case R_AARCH64_IRELATIVE:
+ addr = relocbase + addend;
+ val = ((Elf64_Addr (*)(void))addr)();
+ if (*where != val)
+ *where = val;
+ break;
+ default:
+ printf("kldload: unexpected relocation type %d\n", rtype);
+ return (-1);
+ }
+ return (error);
+}
+
+int
+elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
+ int type, elf_lookup_fn lookup)
+{
+
+ return (elf_reloc_internal(lf, relocbase, data, type,
+ ARM64_ELF_RELOC_LOCAL, lookup));
+}
+
+/* Process one elf relocation with addend. */
+int
+elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
+ elf_lookup_fn lookup)
+{
+
+ return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
+}
+
+int
+elf_reloc_late(linker_file_t lf, Elf_Addr relocbase, const void *data,
+ int type, elf_lookup_fn lookup)
+{
+
+ return (elf_reloc_internal(lf, relocbase, data, type,
+ ARM64_ELF_RELOC_LATE_IFUNC, lookup));
+}
+
+int
+elf_cpu_load_file(linker_file_t lf)
+{
+
+ if (lf->id != 1)
+ cpu_icache_sync_range((vm_offset_t)lf->address, lf->size);
+ return (0);
+}
+
+int
+elf_cpu_unload_file(linker_file_t lf __unused)
+{
+
+ return (0);
+}
+
+int
+elf_cpu_parse_dynamic(caddr_t loadbase __unused, Elf_Dyn *dynamic __unused)
+{
+
+ return (0);
+}
diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S
new file mode 100644
index 000000000000..123f73b49734
--- /dev/null
+++ b/sys/arm64/arm64/exception.S
@@ -0,0 +1,255 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#include <machine/armreg.h>
+__FBSDID("$FreeBSD$");
+
+#include "assym.inc"
+
+ .text
+
+.macro save_registers el
+.if \el == 1
+ mov x18, sp
+ sub sp, sp, #128
+.endif
+ sub sp, sp, #(TF_SIZE + 16)
+ stp x29, x30, [sp, #(TF_SIZE)]
+ stp x28, x29, [sp, #(TF_X + 28 * 8)]
+ stp x26, x27, [sp, #(TF_X + 26 * 8)]
+ stp x24, x25, [sp, #(TF_X + 24 * 8)]
+ stp x22, x23, [sp, #(TF_X + 22 * 8)]
+ stp x20, x21, [sp, #(TF_X + 20 * 8)]
+ stp x18, x19, [sp, #(TF_X + 18 * 8)]
+ stp x16, x17, [sp, #(TF_X + 16 * 8)]
+ stp x14, x15, [sp, #(TF_X + 14 * 8)]
+ stp x12, x13, [sp, #(TF_X + 12 * 8)]
+ stp x10, x11, [sp, #(TF_X + 10 * 8)]
+ stp x8, x9, [sp, #(TF_X + 8 * 8)]
+ stp x6, x7, [sp, #(TF_X + 6 * 8)]
+ stp x4, x5, [sp, #(TF_X + 4 * 8)]
+ stp x2, x3, [sp, #(TF_X + 2 * 8)]
+ stp x0, x1, [sp, #(TF_X + 0 * 8)]
+ mrs x10, elr_el1
+ mrs x11, spsr_el1
+ mrs x12, esr_el1
+.if \el == 0
+ mrs x18, sp_el0
+.endif
+ str x10, [sp, #(TF_ELR)]
+ stp w11, w12, [sp, #(TF_SPSR)]
+ stp x18, lr, [sp, #(TF_SP)]
+ mrs x18, tpidr_el1
+ add x29, sp, #(TF_SIZE)
+.if \el == 0
+ /* Apply the SSBD (CVE-2018-3639) workaround if needed */
+ ldr x1, [x18, #PC_SSBD]
+ cbz x1, 1f
+ mov w0, #1
+ blr x1
+1:
+
+ ldr x0, [x18, #(PC_CURTHREAD)]
+ bl dbg_monitor_enter
+.endif
+ msr daifclr, #8 /* Enable the debug exception */
+.endm
+
+.macro restore_registers el
+.if \el == 1
+ /*
+ * Disable interrupts and debug exceptions, x18 may change in the
+ * interrupt exception handler. For EL0 exceptions, do_ast already
+ * did this.
+ */
+ msr daifset, #10
+.endif
+.if \el == 0
+ ldr x0, [x18, #PC_CURTHREAD]
+ mov x1, sp
+ bl dbg_monitor_exit
+
+ /* Remove the SSBD (CVE-2018-3639) workaround if needed */
+ ldr x1, [x18, #PC_SSBD]
+ cbz x1, 1f
+ mov w0, #0
+ blr x1
+1:
+.endif
+ ldp x18, lr, [sp, #(TF_SP)]
+ ldp x10, x11, [sp, #(TF_ELR)]
+.if \el == 0
+ msr sp_el0, x18
+.endif
+ msr spsr_el1, x11
+ msr elr_el1, x10
+ ldp x0, x1, [sp, #(TF_X + 0 * 8)]
+ ldp x2, x3, [sp, #(TF_X + 2 * 8)]
+ ldp x4, x5, [sp, #(TF_X + 4 * 8)]
+ ldp x6, x7, [sp, #(TF_X + 6 * 8)]
+ ldp x8, x9, [sp, #(TF_X + 8 * 8)]
+ ldp x10, x11, [sp, #(TF_X + 10 * 8)]
+ ldp x12, x13, [sp, #(TF_X + 12 * 8)]
+ ldp x14, x15, [sp, #(TF_X + 14 * 8)]
+ ldp x16, x17, [sp, #(TF_X + 16 * 8)]
+.if \el == 0
+ /*
+ * We only restore the callee saved registers when returning to
+ * userland as they may have been updated by a system call or signal.
+ */
+ ldp x18, x19, [sp, #(TF_X + 18 * 8)]
+ ldp x20, x21, [sp, #(TF_X + 20 * 8)]
+ ldp x22, x23, [sp, #(TF_X + 22 * 8)]
+ ldp x24, x25, [sp, #(TF_X + 24 * 8)]
+ ldp x26, x27, [sp, #(TF_X + 26 * 8)]
+ ldp x28, x29, [sp, #(TF_X + 28 * 8)]
+.else
+ ldr x29, [sp, #(TF_X + 29 * 8)]
+.endif
+.if \el == 0
+ add sp, sp, #(TF_SIZE + 16)
+.else
+ mov sp, x18
+ mrs x18, tpidr_el1
+.endif
+.endm
+
+.macro do_ast
+ mrs x19, daif
+ /* Make sure the IRQs are enabled before calling ast() */
+ bic x19, x19, #PSR_I
+1:
+ /* Disable interrupts */
+ msr daifset, #10
+
+ /* Read the current thread flags */
+ ldr x1, [x18, #PC_CURTHREAD] /* Load curthread */
+ ldr x2, [x1, #TD_FLAGS]
+
+ /* Check if we have either bits set */
+ mov x3, #((TDF_ASTPENDING|TDF_NEEDRESCHED) >> 8)
+ lsl x3, x3, #8
+ and x2, x2, x3
+ cbz x2, 2f
+
+ /* Restore interrupts */
+ msr daif, x19
+
+ /* handle the ast */
+ mov x0, sp
+ bl _C_LABEL(ast)
+
+ /* Re-check for new ast scheduled */
+ b 1b
+2:
+.endm
+
+ENTRY(handle_el1h_sync)
+ save_registers 1
+ ldr x0, [x18, #PC_CURTHREAD]
+ mov x1, sp
+ bl do_el1h_sync
+ restore_registers 1
+ ERET
+END(handle_el1h_sync)
+
+ENTRY(handle_el1h_irq)
+ save_registers 1
+ mov x0, sp
+ bl intr_irq_handler
+ restore_registers 1
+ ERET
+END(handle_el1h_irq)
+
+ENTRY(handle_el0_sync)
+ save_registers 0
+ ldr x0, [x18, #PC_CURTHREAD]
+ mov x1, sp
+ str x1, [x0, #TD_FRAME]
+ bl do_el0_sync
+ do_ast
+ restore_registers 0
+ ERET
+END(handle_el0_sync)
+
+ENTRY(handle_el0_irq)
+ save_registers 0
+ mov x0, sp
+ bl intr_irq_handler
+ do_ast
+ restore_registers 0
+ ERET
+END(handle_el0_irq)
+
+ENTRY(handle_serror)
+ save_registers 0
+ mov x0, sp
+1: bl do_serror
+ b 1b
+END(handle_serror)
+
+ENTRY(handle_empty_exception)
+ save_registers 0
+ mov x0, sp
+1: bl unhandled_exception
+ b 1b
+END(handle_unhandled_exception)
+
+.macro vempty
+ .align 7
+ b handle_empty_exception
+.endm
+
+.macro vector name
+ .align 7
+ b handle_\name
+.endm
+
+ .align 11
+ .globl exception_vectors
+exception_vectors:
+ vempty /* Synchronous EL1t */
+ vempty /* IRQ EL1t */
+ vempty /* FIQ EL1t */
+ vempty /* Error EL1t */
+
+ vector el1h_sync /* Synchronous EL1h */
+ vector el1h_irq /* IRQ EL1h */
+ vempty /* FIQ EL1h */
+ vector serror /* Error EL1h */
+
+ vector el0_sync /* Synchronous 64-bit EL0 */
+ vector el0_irq /* IRQ 64-bit EL0 */
+ vempty /* FIQ 64-bit EL0 */
+ vector serror /* Error 64-bit EL0 */
+
+ vector el0_sync /* Synchronous 32-bit EL0 */
+ vector el0_irq /* IRQ 32-bit EL0 */
+ vempty /* FIQ 32-bit EL0 */
+ vector serror /* Error 32-bit EL0 */
+
diff --git a/sys/arm64/arm64/freebsd32_machdep.c b/sys/arm64/arm64/freebsd32_machdep.c
new file mode 100644
index 000000000000..b1e070feb4f6
--- /dev/null
+++ b/sys/arm64/arm64/freebsd32_machdep.c
@@ -0,0 +1,438 @@
+/*-
+ * Copyright (c) 2018 Olivier Houchard
+ * Copyright (c) 2017 Nuxi, https://nuxi.nl/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/syscallsubr.h>
+#include <sys/ktr.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <machine/armreg.h>
+#ifdef VFP
+#include <machine/vfp.h>
+#endif
+#include <compat/freebsd32/freebsd32_proto.h>
+#include <compat/freebsd32/freebsd32_signal.h>
+
+extern void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask);
+
+/*
+ * The first two fields of a ucontext_t are the signal mask and the machine
+ * context. The next field is uc_link; we want to avoid destroying the link
+ * when copying out contexts.
+ */
+#define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link)
+
+#ifdef VFP
+static void get_fpcontext32(struct thread *td, mcontext32_vfp_t *);
+#endif
+
+/*
+ * Stubs for machine dependent 32-bits system calls.
+ */
+
+int
+freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap)
+{
+ int error;
+
+#define ARM_SYNC_ICACHE 0
+#define ARM_DRAIN_WRITEBUF 1
+#define ARM_SET_TP 2
+#define ARM_GET_TP 3
+#define ARM_GET_VFPSTATE 4
+
+ switch(uap->op) {
+ case ARM_SET_TP:
+ WRITE_SPECIALREG(tpidr_el0, uap->parms);
+ WRITE_SPECIALREG(tpidrro_el0, uap->parms);
+ return 0;
+ case ARM_SYNC_ICACHE:
+ {
+ struct {
+ uint32_t addr;
+ uint32_t size;
+ } args;
+
+ if ((error = copyin(uap->parms, &args, sizeof(args))) != 0)
+ return (error);
+ if ((uint64_t)args.addr + (uint64_t)args.size > 0xffffffff)
+ return (EINVAL);
+ cpu_icache_sync_range_checked(args.addr, args.size);
+ return 0;
+ }
+ case ARM_GET_VFPSTATE:
+ {
+ mcontext32_vfp_t mcontext_vfp;
+
+ struct {
+ uint32_t mc_vfp_size;
+ uint32_t mc_vfp;
+ } args;
+ if ((error = copyin(uap->parms, &args, sizeof(args))) != 0)
+ return (error);
+ if (args.mc_vfp_size != sizeof(mcontext_vfp))
+ return (EINVAL);
+#ifdef VFP
+ get_fpcontext32(td, &mcontext_vfp);
+#else
+ bzero(&mcontext_vfp, sizeof(mcontext_vfp));
+#endif
+ error = copyout(&mcontext_vfp,
+ (void *)(uintptr_t)args.mc_vfp,
+ sizeof(mcontext_vfp));
+ return error;
+ }
+ }
+
+ return (EINVAL);
+}
+
+#ifdef VFP
+static void
+get_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp)
+{
+ struct pcb *curpcb;
+ int i;
+
+ critical_enter();
+ curpcb = curthread->td_pcb;
+
+ if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
+ /*
+ * If we have just been running VFP instructions we will
+ * need to save the state to memcpy it below.
+ */
+ vfp_save_state(td, curpcb);
+
+ KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
+ ("Called get_fpcontext while the kernel is using the VFP"));
+ KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
+ ("Non-userspace FPU flags set in get_fpcontext"));
+ for (i = 0; i < 32; i++)
+ mcp->mcv_reg[i] = (uint64_t)curpcb->pcb_fpustate.vfp_regs[i];
+ mcp->mcv_fpscr = VFP_FPSCR_FROM_SRCR(curpcb->pcb_fpustate.vfp_fpcr,
+ curpcb->pcb_fpustate.vfp_fpsr);
+ }
+ critical_exit();
+}
+
+static void
+set_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp)
+{
+ struct pcb *pcb;
+ int i;
+
+ critical_enter();
+ pcb = td->td_pcb;
+ if (td == curthread)
+ vfp_discard(td);
+ for (i = 0; i < 32; i++)
+ pcb->pcb_fpustate.vfp_regs[i] = mcp->mcv_reg[i];
+ pcb->pcb_fpustate.vfp_fpsr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr);
+ pcb->pcb_fpustate.vfp_fpcr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr);
+ critical_exit();
+}
+#endif
+static void
+get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
+{
+ struct pcb *pcb;
+ struct trapframe *tf;
+ int i;
+
+ pcb = td->td_pcb;
+ tf = td->td_frame;
+
+ if ((flags & GET_MC_CLEAR_RET) != 0) {
+ mcp->mc_gregset[0] = 0;
+ mcp->mc_gregset[16] = tf->tf_spsr & ~PSR_C;
+ } else {
+ mcp->mc_gregset[0] = tf->tf_x[0];
+ mcp->mc_gregset[16] = tf->tf_spsr;
+ }
+ for (i = 1; i < 15; i++)
+ mcp->mc_gregset[i] = tf->tf_x[i];
+ mcp->mc_gregset[15] = tf->tf_elr;
+
+ mcp->mc_vfp_size = 0;
+ mcp->mc_vfp_ptr = 0;
+
+ memset(mcp->mc_spare, 0, sizeof(mcp->mc_spare));
+}
+
+static int
+set_mcontext32(struct thread *td, mcontext32_t *mcp)
+{
+ struct trapframe *tf;
+ mcontext32_vfp_t mc_vfp;
+ int i;
+
+ tf = td->td_frame;
+
+ for (i = 0; i < 15; i++)
+ tf->tf_x[i] = mcp->mc_gregset[i];
+ tf->tf_elr = mcp->mc_gregset[15];
+ tf->tf_spsr = mcp->mc_gregset[16];
+#ifdef VFP
+ if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != 0) {
+ if (copyin((void *)(uintptr_t)mcp->mc_vfp_ptr, &mc_vfp,
+ sizeof(mc_vfp)) != 0)
+ return (EFAULT);
+ set_fpcontext32(td, &mc_vfp);
+ }
+#endif
+
+ return (0);
+}
+
+#define UC_COPY_SIZE offsetof(ucontext32_t, uc_link)
+
+int
+freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap)
+{
+ ucontext32_t uc;
+ int ret;
+
+ if (uap->ucp == NULL)
+ ret = EINVAL;
+ else {
+ memset(&uc, 0, sizeof(uc));
+ get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
+ PROC_LOCK(td->td_proc);
+ uc.uc_sigmask = td->td_sigmask;
+ PROC_UNLOCK(td->td_proc);
+ ret = copyout(&uc, uap->ucp, UC_COPY_SIZE);
+ }
+ return (ret);
+}
+
+int
+freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap)
+{
+ ucontext32_t uc;
+ int ret;
+
+ if (uap->ucp == NULL)
+ ret = EINVAL;
+ else {
+ ret = copyin(uap->ucp, &uc, UC_COPY_SIZE);
+ if (ret == 0) {
+ ret = set_mcontext32(td, &uc.uc_mcontext);
+ if (ret == 0)
+ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask,
+ NULL, 0);
+ }
+ }
+ return (ret);
+}
+
+int
+freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
+{
+ ucontext32_t uc;
+ int error;
+
+ if (uap == NULL)
+ return (EFAULT);
+ if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
+ return (EFAULT);
+ error = set_mcontext32(td, &uc.uc_mcontext);
+ if (error != 0)
+ return (0);
+
+ /* Restore signal mask. */
+ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
+
+ return (EJUSTRETURN);
+
+}
+
+int
+freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap)
+{
+ ucontext32_t uc;
+ int ret;
+
+ if (uap->oucp == NULL || uap->ucp == NULL)
+ ret = EINVAL;
+ else {
+ bzero(&uc, sizeof(uc));
+ get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
+ PROC_LOCK(td->td_proc);
+ uc.uc_sigmask = td->td_sigmask;
+ PROC_UNLOCK(td->td_proc);
+ ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE);
+ if (ret == 0) {
+ ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
+ if (ret == 0) {
+ ret = set_mcontext32(td, &uc.uc_mcontext);
+ kern_sigprocmask(td, SIG_SETMASK,
+ &uc.uc_sigmask, NULL, 0);
+ }
+ }
+ }
+ return (ret);
+}
+
+void
+freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
+{
+ struct thread *td;
+ struct proc *p;
+ struct trapframe *tf;
+ struct sigframe32 *fp, frame;
+ struct sigacts *psp;
+ struct siginfo32 siginfo;
+ struct sysentvec *sysent;
+ int onstack;
+ int sig;
+ int code;
+
+ siginfo_to_siginfo32(&ksi->ksi_info, &siginfo);
+ td = curthread;
+ p = td->td_proc;
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ sig = ksi->ksi_signo;
+ code = ksi->ksi_code;
+ psp = p->p_sigacts;
+ mtx_assert(&psp->ps_mtx, MA_OWNED);
+ tf = td->td_frame;
+ onstack = sigonstack(tf->tf_x[13]);
+
+ CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
+ catcher, sig);
+
+ /* Allocate and validate space for the signal handler context. */
+ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
+ SIGISMEMBER(psp->ps_sigonstack, sig)) {
+ fp = (struct sigframe32 *)((uintptr_t)td->td_sigstk.ss_sp +
+ td->td_sigstk.ss_size);
+#if defined(COMPAT_43)
+ td->td_sigstk.ss_flags |= SS_ONSTACK;
+#endif
+ } else
+ fp = (struct sigframe32 *)td->td_frame->tf_x[13];
+
+ /* make room on the stack */
+ fp--;
+
+ /* make the stack aligned */
+ fp = (struct sigframe32 *)((unsigned long)(fp) &~ (8 - 1));
+ /* Populate the siginfo frame. */
+ get_mcontext32(td, &frame.sf_uc.uc_mcontext, 0);
+#ifdef VFP
+ get_fpcontext32(td, &frame.sf_vfp);
+ frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp);
+ frame.sf_uc.uc_mcontext.mc_vfp_ptr = (uint32_t)(uintptr_t)&fp->sf_vfp;
+#else
+ frame.sf_uc.uc_mcontext.mc_vfp_size = 0;
+ frame.sf_uc.uc_mcontext.mc_vfp_ptr = (uint32_t)NULL;
+#endif
+ frame.sf_si = siginfo;
+ frame.sf_uc.uc_sigmask = *mask;
+ frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
+ ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
+ frame.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp;
+ frame.sf_uc.uc_stack.ss_size = td->td_sigstk.ss_size;
+
+ mtx_unlock(&psp->ps_mtx);
+ PROC_UNLOCK(td->td_proc);
+
+ /* Copy the sigframe out to the user's stack. */
+ if (copyout(&frame, fp, sizeof(*fp)) != 0) {
+ /* Process has trashed its stack. Kill it. */
+ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
+ PROC_LOCK(p);
+ sigexit(td, SIGILL);
+ }
+
+ /*
+ * Build context to run handler in. We invoke the handler
+ * directly, only returning via the trampoline. Note the
+ * trampoline version numbers are coordinated with machine-
+ * dependent code in libc.
+ */
+
+ tf->tf_x[0] = sig;
+ tf->tf_x[1] = (register_t)&fp->sf_si;
+ tf->tf_x[2] = (register_t)&fp->sf_uc;
+
+ /* the trampoline uses r5 as the uc address */
+ tf->tf_x[5] = (register_t)&fp->sf_uc;
+ tf->tf_elr = (register_t)catcher;
+ tf->tf_x[13] = (register_t)fp;
+ sysent = p->p_sysent;
+ if (sysent->sv_sigcode_base != 0)
+ tf->tf_x[14] = (register_t)sysent->sv_sigcode_base;
+ else
+ tf->tf_x[14] = (register_t)(sysent->sv_psstrings -
+ *(sysent->sv_szsigcode));
+ /* Set the mode to enter in the signal handler */
+ if ((register_t)catcher & 1)
+ tf->tf_spsr |= PSR_T;
+ else
+ tf->tf_spsr &= ~PSR_T;
+
+ CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_x[14],
+ tf->tf_x[13]);
+
+ PROC_LOCK(p);
+ mtx_lock(&psp->ps_mtx);
+
+}
+
+#ifdef COMPAT_43
+/*
+ * COMPAT_FREEBSD32 assumes we have this system call when COMPAT_43 is defined.
+ * FreeBSD/arm provies a similar getpagesize() syscall.
+ */
+#define ARM32_PAGE_SIZE 4096
+int
+ofreebsd32_getpagesize(struct thread *td,
+ struct ofreebsd32_getpagesize_args *uap)
+{
+
+ td->td_retval[0] = ARM32_PAGE_SIZE;
+ return (0);
+}
+
+/*
+ * Mirror the osigreturn definition in kern_sig.c for !i386 platforms. This
+ * mirrors what's connected to the FreeBSD/arm syscall.
+ */
+int
+ofreebsd32_sigreturn(struct thread *td, struct ofreebsd32_sigreturn_args *uap)
+{
+
+ return (nosys(td, (struct nosys_args *)uap));
+}
+#endif
diff --git a/sys/arm64/arm64/genassym.c b/sys/arm64/arm64/genassym.c
new file mode 100644
index 000000000000..3f664d898916
--- /dev/null
+++ b/sys/arm64/arm64/genassym.c
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 2004 Olivier Houchard
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/assym.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+
+#include <machine/frame.h>
+#include <machine/machdep.h>
+#include <machine/pcb.h>
+
+/* Sizeof arm64_bootparams, rounded to keep stack alignment */
+ASSYM(BOOTPARAMS_SIZE, roundup2(sizeof(struct arm64_bootparams),
+ STACKALIGNBYTES + 1));
+ASSYM(BP_MODULEP, offsetof(struct arm64_bootparams, modulep));
+ASSYM(BP_KERN_L1PT, offsetof(struct arm64_bootparams, kern_l1pt));
+ASSYM(BP_KERN_DELTA, offsetof(struct arm64_bootparams, kern_delta));
+ASSYM(BP_KERN_STACK, offsetof(struct arm64_bootparams, kern_stack));
+ASSYM(BP_KERN_L0PT, offsetof(struct arm64_bootparams, kern_l0pt));
+ASSYM(BP_BOOT_EL, offsetof(struct arm64_bootparams, boot_el));
+
+ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
+ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
+
+ASSYM(PCPU_SIZE, sizeof(struct pcpu));
+ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
+ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
+ASSYM(PC_SSBD, offsetof(struct pcpu, pc_ssbd));
+
+/* Size of pcb, rounded to keep stack alignment */
+ASSYM(PCB_SIZE, roundup2(sizeof(struct pcb), STACKALIGNBYTES + 1));
+ASSYM(PCB_SINGLE_STEP_SHIFT, PCB_SINGLE_STEP_SHIFT);
+ASSYM(PCB_REGS, offsetof(struct pcb, pcb_x));
+ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp));
+ASSYM(PCB_TPIDRRO, offsetof(struct pcb, pcb_tpidrro_el0));
+ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
+ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
+
+ASSYM(SF_UC, offsetof(struct sigframe, sf_uc));
+
+ASSYM(TD_PROC, offsetof(struct thread, td_proc));
+ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
+ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
+ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
+ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
+
+ASSYM(TF_SIZE, sizeof(struct trapframe));
+ASSYM(TF_SP, offsetof(struct trapframe, tf_sp));
+ASSYM(TF_ELR, offsetof(struct trapframe, tf_elr));
+ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr));
+ASSYM(TF_X, offsetof(struct trapframe, tf_x));
diff --git a/sys/arm64/arm64/gic_v3.c b/sys/arm64/arm64/gic_v3.c
new file mode 100644
index 000000000000..a83ef576e30e
--- /dev/null
+++ b/sys/arm64/arm64/gic_v3.c
@@ -0,0 +1,1271 @@
+/*-
+ * Copyright (c) 2015-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bitstring.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/cpuset.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_intr.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#ifdef DEV_ACPI
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+#endif
+
+#include "pic_if.h"
+
+#include <arm/arm/gic_common.h>
+#include "gic_v3_reg.h"
+#include "gic_v3_var.h"
+
+static bus_get_domain_t gic_v3_get_domain;
+static bus_read_ivar_t gic_v3_read_ivar;
+
+static pic_disable_intr_t gic_v3_disable_intr;
+static pic_enable_intr_t gic_v3_enable_intr;
+static pic_map_intr_t gic_v3_map_intr;
+static pic_setup_intr_t gic_v3_setup_intr;
+static pic_teardown_intr_t gic_v3_teardown_intr;
+static pic_post_filter_t gic_v3_post_filter;
+static pic_post_ithread_t gic_v3_post_ithread;
+static pic_pre_ithread_t gic_v3_pre_ithread;
+static pic_bind_intr_t gic_v3_bind_intr;
+#ifdef SMP
+static pic_init_secondary_t gic_v3_init_secondary;
+static pic_ipi_send_t gic_v3_ipi_send;
+static pic_ipi_setup_t gic_v3_ipi_setup;
+#endif
+
+static u_int gic_irq_cpu;
+#ifdef SMP
+static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
+static u_int sgi_first_unused = GIC_FIRST_SGI;
+#endif
+
+static device_method_t gic_v3_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_detach, gic_v3_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_get_domain, gic_v3_get_domain),
+ DEVMETHOD(bus_read_ivar, gic_v3_read_ivar),
+
+ /* Interrupt controller interface */
+ DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
+ DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
+ DEVMETHOD(pic_map_intr, gic_v3_map_intr),
+ DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
+ DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
+ DEVMETHOD(pic_post_filter, gic_v3_post_filter),
+ DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
+ DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
+#ifdef SMP
+ DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
+ DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
+ DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
+ DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
+#endif
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
+ sizeof(struct gic_v3_softc));
+
+/*
+ * Driver-specific definitions.
+ */
+MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
+
+/*
+ * Helper functions and definitions.
+ */
+/* Destination registers, either Distributor or Re-Distributor */
+enum gic_v3_xdist {
+ DIST = 0,
+ REDIST,
+};
+
+struct gic_v3_irqsrc {
+ struct intr_irqsrc gi_isrc;
+ uint32_t gi_irq;
+ enum intr_polarity gi_pol;
+ enum intr_trigger gi_trig;
+};
+
+/* Helper routines starting with gic_v3_ */
+static int gic_v3_dist_init(struct gic_v3_softc *);
+static int gic_v3_redist_alloc(struct gic_v3_softc *);
+static int gic_v3_redist_find(struct gic_v3_softc *);
+static int gic_v3_redist_init(struct gic_v3_softc *);
+static int gic_v3_cpu_init(struct gic_v3_softc *);
+static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
+
+/* A sequence of init functions for primary (boot) CPU */
+typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
+/* Primary CPU initialization sequence */
+static gic_v3_initseq_t gic_v3_primary_init[] = {
+ gic_v3_dist_init,
+ gic_v3_redist_alloc,
+ gic_v3_redist_init,
+ gic_v3_cpu_init,
+ NULL
+};
+
+#ifdef SMP
+/* Secondary CPU initialization sequence */
+static gic_v3_initseq_t gic_v3_secondary_init[] = {
+ gic_v3_redist_init,
+ gic_v3_cpu_init,
+ NULL
+};
+#endif
+
+uint32_t
+gic_r_read_4(device_t dev, bus_size_t offset)
+{
+ struct gic_v3_softc *sc;
+ struct resource *rdist;
+
+ sc = device_get_softc(dev);
+ rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
+ return (bus_read_4(rdist, offset));
+}
+
+uint64_t
+gic_r_read_8(device_t dev, bus_size_t offset)
+{
+ struct gic_v3_softc *sc;
+ struct resource *rdist;
+
+ sc = device_get_softc(dev);
+ rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
+ return (bus_read_8(rdist, offset));
+}
+
+void
+gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
+{
+ struct gic_v3_softc *sc;
+ struct resource *rdist;
+
+ sc = device_get_softc(dev);
+ rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
+ bus_write_4(rdist, offset, val);
+}
+
+void
+gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
+{
+ struct gic_v3_softc *sc;
+ struct resource *rdist;
+
+ sc = device_get_softc(dev);
+ rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
+ bus_write_8(rdist, offset, val);
+}
+
+/*
+ * Device interface.
+ */
+int
+gic_v3_attach(device_t dev)
+{
+ struct gic_v3_softc *sc;
+ gic_v3_initseq_t *init_func;
+ uint32_t typer;
+ int rid;
+ int err;
+ size_t i;
+ u_int irq;
+ const char *name;
+
+ sc = device_get_softc(dev);
+ sc->gic_registered = FALSE;
+ sc->dev = dev;
+ err = 0;
+
+ /* Initialize mutex */
+ mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
+
+ /*
+ * Allocate array of struct resource.
+ * One entry for Distributor and all remaining for Re-Distributor.
+ */
+ sc->gic_res = malloc(
+ sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
+ M_GIC_V3, M_WAITOK);
+
+ /* Now allocate corresponding resources */
+ for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
+ sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+ if (sc->gic_res[rid] == NULL)
+ return (ENXIO);
+ }
+
+ /*
+ * Distributor interface
+ */
+ sc->gic_dist = sc->gic_res[0];
+
+ /*
+ * Re-Dristributor interface
+ */
+ /* Allocate space under region descriptions */
+ sc->gic_redists.regions = malloc(
+ sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
+ M_GIC_V3, M_WAITOK);
+
+ /* Fill-up bus_space information for each region. */
+ for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
+ sc->gic_redists.regions[i] = sc->gic_res[rid];
+
+ /* Get the number of supported SPI interrupts */
+ typer = gic_d_read(sc, 4, GICD_TYPER);
+ sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
+ if (sc->gic_nirqs > GIC_I_NUM_MAX)
+ sc->gic_nirqs = GIC_I_NUM_MAX;
+
+ sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
+ M_GIC_V3, M_WAITOK | M_ZERO);
+ name = device_get_nameunit(dev);
+ for (irq = 0; irq < sc->gic_nirqs; irq++) {
+ struct intr_irqsrc *isrc;
+
+ sc->gic_irqs[irq].gi_irq = irq;
+ sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
+ sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
+
+ isrc = &sc->gic_irqs[irq].gi_isrc;
+ if (irq <= GIC_LAST_SGI) {
+ err = intr_isrc_register(isrc, sc->dev,
+ INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
+ } else if (irq <= GIC_LAST_PPI) {
+ err = intr_isrc_register(isrc, sc->dev,
+ INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
+ } else {
+ err = intr_isrc_register(isrc, sc->dev, 0,
+ "%s,s%u", name, irq - GIC_FIRST_SPI);
+ }
+ if (err != 0) {
+ /* XXX call intr_isrc_deregister() */
+ free(sc->gic_irqs, M_DEVBUF);
+ return (err);
+ }
+ }
+
+ /*
+ * Read the Peripheral ID2 register. This is an implementation
+ * defined register, but seems to be implemented in all GICv3
+ * parts and Linux expects it to be there.
+ */
+ sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
+
+ /* Get the number of supported interrupt identifier bits */
+ sc->gic_idbits = GICD_TYPER_IDBITS(typer);
+
+ if (bootverbose) {
+ device_printf(dev, "SPIs: %u, IDs: %u\n",
+ sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
+ }
+
+ /* Train init sequence for boot CPU */
+ for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
+ err = (*init_func)(sc);
+ if (err != 0)
+ return (err);
+ }
+
+ return (0);
+}
+
+int
+gic_v3_detach(device_t dev)
+{
+ struct gic_v3_softc *sc;
+ size_t i;
+ int rid;
+
+ sc = device_get_softc(dev);
+
+ if (device_is_attached(dev)) {
+ /*
+ * XXX: We should probably deregister PIC
+ */
+ if (sc->gic_registered)
+ panic("Trying to detach registered PIC");
+ }
+ for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
+ bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
+
+ for (i = 0; i <= mp_maxid; i++)
+ free(sc->gic_redists.pcpu[i], M_GIC_V3);
+
+ free(sc->gic_res, M_GIC_V3);
+ free(sc->gic_redists.regions, M_GIC_V3);
+
+ return (0);
+}
+
+static int
+gic_v3_get_domain(device_t dev, device_t child, int *domain)
+{
+ struct gic_v3_devinfo *di;
+
+ di = device_get_ivars(child);
+ if (di->gic_domain < 0)
+ return (ENOENT);
+
+ *domain = di->gic_domain;
+ return (0);
+}
+
+static int
+gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
+{
+ struct gic_v3_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ switch (which) {
+ case GICV3_IVAR_NIRQS:
+ *result = (NIRQ - sc->gic_nirqs) / sc->gic_nchildren;
+ return (0);
+ case GICV3_IVAR_REDIST:
+ *result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
+ return (0);
+ case GIC_IVAR_HW_REV:
+ KASSERT(
+ GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
+ GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
+ ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
+ GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
+ *result = GICR_PIDR2_ARCH(sc->gic_pidr2);
+ return (0);
+ case GIC_IVAR_BUS:
+ KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
+ ("gic_v3_read_ivar: Unknown bus type"));
+ KASSERT(sc->gic_bus <= GIC_BUS_MAX,
+ ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
+ *result = sc->gic_bus;
+ return (0);
+ }
+
+ return (ENOENT);
+}
+
+int
+arm_gic_v3_intr(void *arg)
+{
+ struct gic_v3_softc *sc = arg;
+ struct gic_v3_irqsrc *gi;
+ struct intr_pic *pic;
+ uint64_t active_irq;
+ struct trapframe *tf;
+
+ pic = sc->gic_pic;
+
+ while (1) {
+ if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
+ /*
+ * Hardware: Cavium ThunderX
+ * Chip revision: Pass 1.0 (early version)
+ * Pass 1.1 (production)
+ * ERRATUM: 22978, 23154
+ */
+ __asm __volatile(
+ "nop;nop;nop;nop;nop;nop;nop;nop; \n"
+ "mrs %0, ICC_IAR1_EL1 \n"
+ "nop;nop;nop;nop; \n"
+ "dsb sy \n"
+ : "=&r" (active_irq));
+ } else {
+ active_irq = gic_icc_read(IAR1);
+ }
+
+ if (active_irq >= GIC_FIRST_LPI) {
+ intr_child_irq_handler(pic, active_irq);
+ continue;
+ }
+
+ if (__predict_false(active_irq >= sc->gic_nirqs))
+ return (FILTER_HANDLED);
+
+ tf = curthread->td_intr_frame;
+ gi = &sc->gic_irqs[active_irq];
+ if (active_irq <= GIC_LAST_SGI) {
+ /* Call EOI for all IPI before dispatch. */
+ gic_icc_write(EOIR1, (uint64_t)active_irq);
+#ifdef SMP
+ intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
+#else
+ device_printf(sc->dev, "SGI %ju on UP system detected\n",
+ (uintmax_t)(active_irq - GIC_FIRST_SGI));
+#endif
+ } else if (active_irq >= GIC_FIRST_PPI &&
+ active_irq <= GIC_LAST_SPI) {
+ if (gi->gi_trig == INTR_TRIGGER_EDGE)
+ gic_icc_write(EOIR1, gi->gi_irq);
+
+ if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
+ if (gi->gi_trig != INTR_TRIGGER_EDGE)
+ gic_icc_write(EOIR1, gi->gi_irq);
+ gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
+ device_printf(sc->dev,
+ "Stray irq %lu disabled\n", active_irq);
+ }
+ }
+ }
+}
+
+#ifdef FDT
+static int
+gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
+ enum intr_polarity *polp, enum intr_trigger *trigp)
+{
+ u_int irq;
+
+ if (ncells < 3)
+ return (EINVAL);
+
+ /*
+ * The 1st cell is the interrupt type:
+ * 0 = SPI
+ * 1 = PPI
+ * The 2nd cell contains the interrupt number:
+ * [0 - 987] for SPI
+ * [0 - 15] for PPI
+ * The 3rd cell is the flags, encoded as follows:
+ * bits[3:0] trigger type and level flags
+ * 1 = edge triggered
+ * 2 = edge triggered (PPI only)
+ * 4 = level-sensitive
+ * 8 = level-sensitive (PPI only)
+ */
+ switch (cells[0]) {
+ case 0:
+ irq = GIC_FIRST_SPI + cells[1];
+ /* SPI irq is checked later. */
+ break;
+ case 1:
+ irq = GIC_FIRST_PPI + cells[1];
+ if (irq > GIC_LAST_PPI) {
+ device_printf(dev, "unsupported PPI interrupt "
+ "number %u\n", cells[1]);
+ return (EINVAL);
+ }
+ break;
+ default:
+ device_printf(dev, "unsupported interrupt type "
+ "configuration %u\n", cells[0]);
+ return (EINVAL);
+ }
+
+ switch (cells[2] & FDT_INTR_MASK) {
+ case FDT_INTR_EDGE_RISING:
+ *trigp = INTR_TRIGGER_EDGE;
+ *polp = INTR_POLARITY_HIGH;
+ break;
+ case FDT_INTR_EDGE_FALLING:
+ *trigp = INTR_TRIGGER_EDGE;
+ *polp = INTR_POLARITY_LOW;
+ break;
+ case FDT_INTR_LEVEL_HIGH:
+ *trigp = INTR_TRIGGER_LEVEL;
+ *polp = INTR_POLARITY_HIGH;
+ break;
+ case FDT_INTR_LEVEL_LOW:
+ *trigp = INTR_TRIGGER_LEVEL;
+ *polp = INTR_POLARITY_LOW;
+ break;
+ default:
+ device_printf(dev, "unsupported trigger/polarity "
+ "configuration 0x%02x\n", cells[2]);
+ return (EINVAL);
+ }
+
+ /* Check the interrupt is valid */
+ if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
+ return (EINVAL);
+
+ *irqp = irq;
+ return (0);
+}
+#endif
+
+static int
+gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
+ enum intr_polarity *polp, enum intr_trigger *trigp)
+{
+ struct gic_v3_irqsrc *gi;
+
+ /* SPI-mapped MSI */
+ gi = (struct gic_v3_irqsrc *)msi_data->isrc;
+ if (gi == NULL)
+ return (ENXIO);
+
+ *irqp = gi->gi_irq;
+
+ /* MSI/MSI-X interrupts are always edge triggered with high polarity */
+ *polp = INTR_POLARITY_HIGH;
+ *trigp = INTR_TRIGGER_EDGE;
+
+ return (0);
+}
+
+static int
+do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
+ enum intr_polarity *polp, enum intr_trigger *trigp)
+{
+ struct gic_v3_softc *sc;
+ enum intr_polarity pol;
+ enum intr_trigger trig;
+ struct intr_map_data_msi *dam;
+#ifdef FDT
+ struct intr_map_data_fdt *daf;
+#endif
+#ifdef DEV_ACPI
+ struct intr_map_data_acpi *daa;
+#endif
+ u_int irq;
+
+ sc = device_get_softc(dev);
+
+ switch (data->type) {
+#ifdef FDT
+ case INTR_MAP_DATA_FDT:
+ daf = (struct intr_map_data_fdt *)data;
+ if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
+ &trig) != 0)
+ return (EINVAL);
+ break;
+#endif
+#ifdef DEV_ACPI
+ case INTR_MAP_DATA_ACPI:
+ daa = (struct intr_map_data_acpi *)data;
+ irq = daa->irq;
+ pol = daa->pol;
+ trig = daa->trig;
+ break;
+#endif
+ case INTR_MAP_DATA_MSI:
+ /* SPI-mapped MSI */
+ dam = (struct intr_map_data_msi *)data;
+ if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (irq >= sc->gic_nirqs)
+ return (EINVAL);
+ switch (pol) {
+ case INTR_POLARITY_CONFORM:
+ case INTR_POLARITY_LOW:
+ case INTR_POLARITY_HIGH:
+ break;
+ default:
+ return (EINVAL);
+ }
+ switch (trig) {
+ case INTR_TRIGGER_CONFORM:
+ case INTR_TRIGGER_EDGE:
+ case INTR_TRIGGER_LEVEL:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ *irqp = irq;
+ if (polp != NULL)
+ *polp = pol;
+ if (trigp != NULL)
+ *trigp = trig;
+ return (0);
+}
+
+static int
+gic_v3_map_intr(device_t dev, struct intr_map_data *data,
+ struct intr_irqsrc **isrcp)
+{
+ struct gic_v3_softc *sc;
+ int error;
+ u_int irq;
+
+ error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
+ if (error == 0) {
+ sc = device_get_softc(dev);
+ *isrcp = GIC_INTR_ISRC(sc, irq);
+ }
+ return (error);
+}
+
+static int
+gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+ struct gic_v3_softc *sc = device_get_softc(dev);
+ struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
+ enum intr_trigger trig;
+ enum intr_polarity pol;
+ uint32_t reg;
+ u_int irq;
+ int error;
+
+ if (data == NULL)
+ return (ENOTSUP);
+
+ error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
+ if (error != 0)
+ return (error);
+
+ if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
+ trig == INTR_TRIGGER_CONFORM)
+ return (EINVAL);
+
+ /* Compare config if this is not first setup. */
+ if (isrc->isrc_handlers != 0) {
+ if (pol != gi->gi_pol || trig != gi->gi_trig)
+ return (EINVAL);
+ else
+ return (0);
+ }
+
+ gi->gi_pol = pol;
+ gi->gi_trig = trig;
+
+ /*
+ * XXX - In case that per CPU interrupt is going to be enabled in time
+ * when SMP is already started, we need some IPI call which
+ * enables it on others CPUs. Further, it's more complicated as
+ * pic_enable_source() and pic_disable_source() should act on
+ * per CPU basis only. Thus, it should be solved here somehow.
+ */
+ if (isrc->isrc_flags & INTR_ISRCF_PPI)
+ CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
+
+ if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
+ mtx_lock_spin(&sc->gic_mtx);
+
+ /* Set the trigger and polarity */
+ if (irq <= GIC_LAST_PPI)
+ reg = gic_r_read(sc, 4,
+ GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
+ else
+ reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
+ if (trig == INTR_TRIGGER_LEVEL)
+ reg &= ~(2 << ((irq % 16) * 2));
+ else
+ reg |= 2 << ((irq % 16) * 2);
+
+ if (irq <= GIC_LAST_PPI) {
+ gic_r_write(sc, 4,
+ GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
+ gic_v3_wait_for_rwp(sc, REDIST);
+ } else {
+ gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
+ gic_v3_wait_for_rwp(sc, DIST);
+ }
+
+ mtx_unlock_spin(&sc->gic_mtx);
+
+ gic_v3_bind_intr(dev, isrc);
+ }
+
+ return (0);
+}
+
+static int
+gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+ struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
+
+ if (isrc->isrc_handlers == 0) {
+ gi->gi_pol = INTR_POLARITY_CONFORM;
+ gi->gi_trig = INTR_TRIGGER_CONFORM;
+ }
+
+ return (0);
+}
+
+static void
+gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gic_v3_softc *sc;
+ struct gic_v3_irqsrc *gi;
+ u_int irq;
+
+ sc = device_get_softc(dev);
+ gi = (struct gic_v3_irqsrc *)isrc;
+ irq = gi->gi_irq;
+
+ if (irq <= GIC_LAST_PPI) {
+ /* SGIs and PPIs in corresponding Re-Distributor */
+ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
+ GICD_I_MASK(irq));
+ gic_v3_wait_for_rwp(sc, REDIST);
+ } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
+ /* SPIs in distributor */
+ gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
+ gic_v3_wait_for_rwp(sc, DIST);
+ } else
+ panic("%s: Unsupported IRQ %u", __func__, irq);
+}
+
+static void
+gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gic_v3_softc *sc;
+ struct gic_v3_irqsrc *gi;
+ u_int irq;
+
+ sc = device_get_softc(dev);
+ gi = (struct gic_v3_irqsrc *)isrc;
+ irq = gi->gi_irq;
+
+ if (irq <= GIC_LAST_PPI) {
+ /* SGIs and PPIs in corresponding Re-Distributor */
+ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
+ GICD_I_MASK(irq));
+ gic_v3_wait_for_rwp(sc, REDIST);
+ } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
+ /* SPIs in distributor */
+ gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
+ gic_v3_wait_for_rwp(sc, DIST);
+ } else
+ panic("%s: Unsupported IRQ %u", __func__, irq);
+}
+
+static void
+gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
+
+ gic_v3_disable_intr(dev, isrc);
+ gic_icc_write(EOIR1, gi->gi_irq);
+}
+
+static void
+gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+
+ gic_v3_enable_intr(dev, isrc);
+}
+
+static void
+gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
+
+ if (gi->gi_trig == INTR_TRIGGER_EDGE)
+ return;
+
+ gic_icc_write(EOIR1, gi->gi_irq);
+}
+
+static int
+gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gic_v3_softc *sc;
+ struct gic_v3_irqsrc *gi;
+ int cpu;
+
+ gi = (struct gic_v3_irqsrc *)isrc;
+ if (gi->gi_irq <= GIC_LAST_PPI)
+ return (EINVAL);
+
+ KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
+ ("%s: Attempting to bind an invalid IRQ", __func__));
+
+ sc = device_get_softc(dev);
+
+ if (CPU_EMPTY(&isrc->isrc_cpu)) {
+ gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
+ CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
+ gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
+ CPU_AFFINITY(gic_irq_cpu));
+ } else {
+ /*
+ * We can only bind to a single CPU so select
+ * the first CPU found.
+ */
+ cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
+ gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
+ }
+
+ return (0);
+}
+
+#ifdef SMP
+static void
+gic_v3_init_secondary(device_t dev)
+{
+ device_t child;
+ struct gic_v3_softc *sc;
+ gic_v3_initseq_t *init_func;
+ struct intr_irqsrc *isrc;
+ u_int cpu, irq;
+ int err, i;
+
+ sc = device_get_softc(dev);
+ cpu = PCPU_GET(cpuid);
+
+ /* Train init sequence for boot CPU */
+ for (init_func = gic_v3_secondary_init; *init_func != NULL;
+ init_func++) {
+ err = (*init_func)(sc);
+ if (err != 0) {
+ device_printf(dev,
+ "Could not initialize GIC for CPU%u\n", cpu);
+ return;
+ }
+ }
+
+ /* Unmask attached SGI interrupts. */
+ for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
+ isrc = GIC_INTR_ISRC(sc, irq);
+ if (intr_isrc_init_on_cpu(isrc, cpu))
+ gic_v3_enable_intr(dev, isrc);
+ }
+
+ /* Unmask attached PPI interrupts. */
+ for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
+ isrc = GIC_INTR_ISRC(sc, irq);
+ if (intr_isrc_init_on_cpu(isrc, cpu))
+ gic_v3_enable_intr(dev, isrc);
+ }
+
+ for (i = 0; i < sc->gic_nchildren; i++) {
+ child = sc->gic_children[i];
+ PIC_INIT_SECONDARY(child);
+ }
+}
+
+static void
+gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
+ u_int ipi)
+{
+ struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
+ uint64_t aff, val, irq;
+ int i;
+
+#define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
+#define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
+ aff = GIC_AFFINITY(0);
+ irq = gi->gi_irq;
+ val = 0;
+
+ /* Iterate through all CPUs in set */
+ for (i = 0; i <= mp_maxid; i++) {
+ /* Move to the next affinity group */
+ if (aff != GIC_AFFINITY(i)) {
+ /* Send the IPI */
+ if (val != 0) {
+ gic_icc_write(SGI1R, val);
+ val = 0;
+ }
+ aff = GIC_AFFINITY(i);
+ }
+
+ /* Send the IPI to this cpu */
+ if (CPU_ISSET(i, &cpus)) {
+#define ICC_SGI1R_AFFINITY(aff) \
+ (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
+ ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
+ ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
+ /* Set the affinity when the first at this level */
+ if (val == 0)
+ val = ICC_SGI1R_AFFINITY(aff) |
+ irq << ICC_SGI1R_EL1_SGIID_SHIFT;
+ /* Set the bit to send the IPI to te CPU */
+ val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
+ }
+ }
+
+ /* Send the IPI to the last cpu affinity group */
+ if (val != 0)
+ gic_icc_write(SGI1R, val);
+#undef GIC_AFF_MASK
+#undef GIC_AFFINITY
+}
+
+static int
+gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
+{
+ struct intr_irqsrc *isrc;
+ struct gic_v3_softc *sc = device_get_softc(dev);
+
+ if (sgi_first_unused > GIC_LAST_SGI)
+ return (ENOSPC);
+
+ isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
+ sgi_to_ipi[sgi_first_unused++] = ipi;
+
+ CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
+
+ *isrcp = isrc;
+ return (0);
+}
+#endif /* SMP */
+
+/*
+ * Helper routines
+ */
+static void
+gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
+{
+ struct resource *res;
+ u_int cpuid;
+ size_t us_left = 1000000;
+
+ cpuid = PCPU_GET(cpuid);
+
+ switch (xdist) {
+ case DIST:
+ res = sc->gic_dist;
+ break;
+ case REDIST:
+ res = &sc->gic_redists.pcpu[cpuid]->res;
+ break;
+ default:
+ KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
+ return;
+ }
+
+ while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
+ DELAY(1);
+ if (us_left-- == 0)
+ panic("GICD Register write pending for too long");
+ }
+}
+
+/* CPU interface. */
+static __inline void
+gic_v3_cpu_priority(uint64_t mask)
+{
+
+ /* Set prority mask */
+ gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
+}
+
+static int
+gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
+{
+ uint64_t sre;
+ u_int cpuid;
+
+ cpuid = PCPU_GET(cpuid);
+ /*
+ * Set the SRE bit to enable access to GIC CPU interface
+ * via system registers.
+ */
+ sre = READ_SPECIALREG(icc_sre_el1);
+ sre |= ICC_SRE_EL1_SRE;
+ WRITE_SPECIALREG(icc_sre_el1, sre);
+ isb();
+ /*
+ * Now ensure that the bit is set.
+ */
+ sre = READ_SPECIALREG(icc_sre_el1);
+ if ((sre & ICC_SRE_EL1_SRE) == 0) {
+ /* We are done. This was disabled in EL2 */
+ device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
+ "via system registers\n", cpuid);
+ return (ENXIO);
+ } else if (bootverbose) {
+ device_printf(sc->dev,
+ "CPU%u enabled CPU interface via system registers\n",
+ cpuid);
+ }
+
+ return (0);
+}
+
+static int
+gic_v3_cpu_init(struct gic_v3_softc *sc)
+{
+ int err;
+
+ /* Enable access to CPU interface via system registers */
+ err = gic_v3_cpu_enable_sre(sc);
+ if (err != 0)
+ return (err);
+ /* Priority mask to minimum - accept all interrupts */
+ gic_v3_cpu_priority(GIC_PRIORITY_MIN);
+ /* Disable EOI mode */
+ gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
+ /* Enable group 1 (insecure) interrups */
+ gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
+
+ return (0);
+}
+
+/* Distributor */
+static int
+gic_v3_dist_init(struct gic_v3_softc *sc)
+{
+ uint64_t aff;
+ u_int i;
+
+ /*
+ * 1. Disable the Distributor
+ */
+ gic_d_write(sc, 4, GICD_CTLR, 0);
+ gic_v3_wait_for_rwp(sc, DIST);
+
+ /*
+ * 2. Configure the Distributor
+ */
+ /* Set all SPIs to be Group 1 Non-secure */
+ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
+ gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
+
+ /* Set all global interrupts to be level triggered, active low. */
+ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
+ gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
+
+ /* Set priority to all shared interrupts */
+ for (i = GIC_FIRST_SPI;
+ i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
+ /* Set highest priority */
+ gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
+ }
+
+ /*
+ * Disable all interrupts. Leave PPI and SGIs as they are enabled in
+ * Re-Distributor registers.
+ */
+ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
+ gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
+
+ gic_v3_wait_for_rwp(sc, DIST);
+
+ /*
+ * 3. Enable Distributor
+ */
+ /* Enable Distributor with ARE, Group 1 */
+ gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
+ GICD_CTLR_G1);
+
+ /*
+ * 4. Route all interrupts to boot CPU.
+ */
+ aff = CPU_AFFINITY(0);
+ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
+ gic_d_write(sc, 4, GICD_IROUTER(i), aff);
+
+ return (0);
+}
+
+/* Re-Distributor */
+static int
+gic_v3_redist_alloc(struct gic_v3_softc *sc)
+{
+ u_int cpuid;
+
+ /* Allocate struct resource for all CPU's Re-Distributor registers */
+ for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
+ if (CPU_ISSET(cpuid, &all_cpus) != 0)
+ sc->gic_redists.pcpu[cpuid] =
+ malloc(sizeof(*sc->gic_redists.pcpu[0]),
+ M_GIC_V3, M_WAITOK);
+ else
+ sc->gic_redists.pcpu[cpuid] = NULL;
+ return (0);
+}
+
+static int
+gic_v3_redist_find(struct gic_v3_softc *sc)
+{
+ struct resource r_res;
+ bus_space_handle_t r_bsh;
+ uint64_t aff;
+ uint64_t typer;
+ uint32_t pidr2;
+ u_int cpuid;
+ size_t i;
+
+ cpuid = PCPU_GET(cpuid);
+
+ aff = CPU_AFFINITY(cpuid);
+ /* Affinity in format for comparison with typer */
+ aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
+ (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
+
+ if (bootverbose) {
+ device_printf(sc->dev,
+ "Start searching for Re-Distributor\n");
+ }
+ /* Iterate through Re-Distributor regions */
+ for (i = 0; i < sc->gic_redists.nregions; i++) {
+ /* Take a copy of the region's resource */
+ r_res = *sc->gic_redists.regions[i];
+ r_bsh = rman_get_bushandle(&r_res);
+
+ pidr2 = bus_read_4(&r_res, GICR_PIDR2);
+ switch (GICR_PIDR2_ARCH(pidr2)) {
+ case GICR_PIDR2_ARCH_GICv3: /* fall through */
+ case GICR_PIDR2_ARCH_GICv4:
+ break;
+ default:
+ device_printf(sc->dev,
+ "No Re-Distributor found for CPU%u\n", cpuid);
+ return (ENODEV);
+ }
+
+ do {
+ typer = bus_read_8(&r_res, GICR_TYPER);
+ if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
+ KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
+ ("Invalid pointer to per-CPU redistributor"));
+ /* Copy res contents to its final destination */
+ sc->gic_redists.pcpu[cpuid]->res = r_res;
+ sc->gic_redists.pcpu[cpuid]->lpi_enabled = false;
+ if (bootverbose) {
+ device_printf(sc->dev,
+ "CPU%u Re-Distributor has been found\n",
+ cpuid);
+ }
+ return (0);
+ }
+
+ r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
+ if ((typer & GICR_TYPER_VLPIS) != 0) {
+ r_bsh +=
+ (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
+ }
+
+ rman_set_bushandle(&r_res, r_bsh);
+ } while ((typer & GICR_TYPER_LAST) == 0);
+ }
+
+ device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
+ return (ENXIO);
+}
+
+static int
+gic_v3_redist_wake(struct gic_v3_softc *sc)
+{
+ uint32_t waker;
+ size_t us_left = 1000000;
+
+ waker = gic_r_read(sc, 4, GICR_WAKER);
+ /* Wake up Re-Distributor for this CPU */
+ waker &= ~GICR_WAKER_PS;
+ gic_r_write(sc, 4, GICR_WAKER, waker);
+ /*
+ * When clearing ProcessorSleep bit it is required to wait for
+ * ChildrenAsleep to become zero following the processor power-on.
+ */
+ while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
+ DELAY(1);
+ if (us_left-- == 0) {
+ panic("Could not wake Re-Distributor for CPU%u",
+ PCPU_GET(cpuid));
+ }
+ }
+
+ if (bootverbose) {
+ device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
+ PCPU_GET(cpuid));
+ }
+
+ return (0);
+}
+
+static int
+gic_v3_redist_init(struct gic_v3_softc *sc)
+{
+ int err;
+ size_t i;
+
+ err = gic_v3_redist_find(sc);
+ if (err != 0)
+ return (err);
+
+ err = gic_v3_redist_wake(sc);
+ if (err != 0)
+ return (err);
+
+ /* Configure SGIs and PPIs to be Group1 Non-secure */
+ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
+ 0xFFFFFFFF);
+
+ /* Disable SPIs */
+ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
+ GICR_I_ENABLER_PPI_MASK);
+ /* Enable SGIs */
+ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
+ GICR_I_ENABLER_SGI_MASK);
+
+ /* Set priority for SGIs and PPIs */
+ for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
+ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
+ GIC_PRIORITY_MAX);
+ }
+
+ gic_v3_wait_for_rwp(sc, REDIST);
+
+ return (0);
+}
diff --git a/sys/arm64/arm64/gic_v3_acpi.c b/sys/arm64/arm64/gic_v3_acpi.c
new file mode 100644
index 000000000000..b54ecfb014e5
--- /dev/null
+++ b/sys/arm64/arm64/gic_v3_acpi.c
@@ -0,0 +1,389 @@
+/*-
+ * Copyright (c) 2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <machine/intr.h>
+#include <machine/resource.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include "gic_v3_reg.h"
+#include "gic_v3_var.h"
+
+struct gic_v3_acpi_devinfo {
+ struct gic_v3_devinfo di_gic_dinfo;
+ struct resource_list di_rl;
+};
+
+static device_identify_t gic_v3_acpi_identify;
+static device_probe_t gic_v3_acpi_probe;
+static device_attach_t gic_v3_acpi_attach;
+static bus_alloc_resource_t gic_v3_acpi_bus_alloc_res;
+
+static void gic_v3_acpi_bus_attach(device_t);
+
+static device_method_t gic_v3_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, gic_v3_acpi_identify),
+ DEVMETHOD(device_probe, gic_v3_acpi_probe),
+ DEVMETHOD(device_attach, gic_v3_acpi_attach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_alloc_resource, gic_v3_acpi_bus_alloc_res),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(gic, gic_v3_acpi_driver, gic_v3_acpi_methods,
+ sizeof(struct gic_v3_softc), gic_v3_driver);
+
+static devclass_t gic_v3_acpi_devclass;
+
+EARLY_DRIVER_MODULE(gic_v3, acpi, gic_v3_acpi_driver, gic_v3_acpi_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+
+struct madt_table_data {
+ device_t parent;
+ device_t dev;
+ ACPI_MADT_GENERIC_DISTRIBUTOR *dist;
+ int count;
+};
+
+static void
+madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
+{
+ struct madt_table_data *madt_data;
+
+ madt_data = (struct madt_table_data *)arg;
+
+ switch(entry->Type) {
+ case ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR:
+ if (madt_data->dist != NULL) {
+ if (bootverbose)
+ device_printf(madt_data->parent,
+ "gic: Already have a distributor table");
+ break;
+ }
+ madt_data->dist = (ACPI_MADT_GENERIC_DISTRIBUTOR *)entry;
+ break;
+
+ case ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR:
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+rdist_map(ACPI_SUBTABLE_HEADER *entry, void *arg)
+{
+ ACPI_MADT_GENERIC_REDISTRIBUTOR *redist;
+ struct madt_table_data *madt_data;
+
+ madt_data = (struct madt_table_data *)arg;
+
+ switch(entry->Type) {
+ case ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR:
+ redist = (ACPI_MADT_GENERIC_REDISTRIBUTOR *)entry;
+
+ madt_data->count++;
+ BUS_SET_RESOURCE(madt_data->parent, madt_data->dev,
+ SYS_RES_MEMORY, madt_data->count, redist->BaseAddress,
+ redist->Length);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+gic_v3_acpi_identify(driver_t *driver, device_t parent)
+{
+ struct madt_table_data madt_data;
+ ACPI_TABLE_MADT *madt;
+ vm_paddr_t physaddr;
+ device_t dev;
+
+ physaddr = acpi_find_table(ACPI_SIG_MADT);
+ if (physaddr == 0)
+ return;
+
+ madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
+ if (madt == NULL) {
+ device_printf(parent, "gic: Unable to map the MADT\n");
+ return;
+ }
+
+ madt_data.parent = parent;
+ madt_data.dist = NULL;
+ madt_data.count = 0;
+
+ acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
+ madt_handler, &madt_data);
+ if (madt_data.dist == NULL) {
+ device_printf(parent,
+ "No gic interrupt or distributor table\n");
+ goto out;
+ }
+ /* This is for the wrong GIC version */
+ if (madt_data.dist->Version != ACPI_MADT_GIC_VERSION_V3)
+ goto out;
+
+ dev = BUS_ADD_CHILD(parent, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE,
+ "gic", -1);
+ if (dev == NULL) {
+ device_printf(parent, "add gic child failed\n");
+ goto out;
+ }
+
+ /* Add the MADT data */
+ BUS_SET_RESOURCE(parent, dev, SYS_RES_MEMORY, 0,
+ madt_data.dist->BaseAddress, 128 * 1024);
+
+ madt_data.dev = dev;
+ acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
+ rdist_map, &madt_data);
+
+ acpi_set_private(dev, (void *)(uintptr_t)madt_data.dist->Version);
+
+out:
+ acpi_unmap_table(madt);
+}
+
+static int
+gic_v3_acpi_probe(device_t dev)
+{
+
+ switch((uintptr_t)acpi_get_private(dev)) {
+ case ACPI_MADT_GIC_VERSION_V3:
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ device_set_desc(dev, GIC_V3_DEVSTR);
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+static void
+madt_count_redistrib(ACPI_SUBTABLE_HEADER *entry, void *arg)
+{
+ struct gic_v3_softc *sc = arg;
+
+ if (entry->Type == ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR)
+ sc->gic_redists.nregions++;
+}
+
+static int
+gic_v3_acpi_count_regions(device_t dev)
+{
+ struct gic_v3_softc *sc;
+ ACPI_TABLE_MADT *madt;
+ vm_paddr_t physaddr;
+
+ sc = device_get_softc(dev);
+
+ physaddr = acpi_find_table(ACPI_SIG_MADT);
+ if (physaddr == 0)
+ return (ENXIO);
+
+ madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
+ if (madt == NULL) {
+ device_printf(dev, "Unable to map the MADT\n");
+ return (ENXIO);
+ }
+
+ acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
+ madt_count_redistrib, sc);
+ acpi_unmap_table(madt);
+
+ return (sc->gic_redists.nregions > 0 ? 0 : ENXIO);
+}
+
+static int
+gic_v3_acpi_attach(device_t dev)
+{
+ struct gic_v3_softc *sc;
+ int err;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->gic_bus = GIC_BUS_ACPI;
+
+ err = gic_v3_acpi_count_regions(dev);
+ if (err != 0)
+ goto error;
+
+ err = gic_v3_attach(dev);
+ if (err != 0)
+ goto error;
+
+ sc->gic_pic = intr_pic_register(dev, ACPI_INTR_XREF);
+ if (sc->gic_pic == NULL) {
+ device_printf(dev, "could not register PIC\n");
+ err = ENXIO;
+ goto error;
+ }
+
+ if (intr_pic_claim_root(dev, ACPI_INTR_XREF, arm_gic_v3_intr, sc,
+ GIC_LAST_SGI - GIC_FIRST_SGI + 1) != 0) {
+ err = ENXIO;
+ goto error;
+ }
+
+ /*
+ * Try to register the ITS driver to this GIC. The GIC will act as
+ * a bus in that case. Failure here will not affect the main GIC
+ * functionality.
+ */
+ gic_v3_acpi_bus_attach(dev);
+
+ if (device_get_children(dev, &sc->gic_children, &sc->gic_nchildren) !=0)
+ sc->gic_nchildren = 0;
+
+ return (0);
+
+error:
+ if (bootverbose) {
+ device_printf(dev,
+ "Failed to attach. Error %d\n", err);
+ }
+ /* Failure so free resources */
+ gic_v3_detach(dev);
+
+ return (err);
+}
+
+static void
+gic_v3_add_children(ACPI_SUBTABLE_HEADER *entry, void *arg)
+{
+ ACPI_MADT_GENERIC_TRANSLATOR *gict;
+ struct gic_v3_acpi_devinfo *di;
+ struct gic_v3_softc *sc;
+ device_t child, dev;
+ u_int xref;
+ int err, pxm;
+
+ if (entry->Type == ACPI_MADT_TYPE_GENERIC_TRANSLATOR) {
+ /* We have an ITS, add it as a child */
+ gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry;
+ dev = arg;
+ sc = device_get_softc(dev);
+
+ child = device_add_child(dev, "its", -1);
+ if (child == NULL)
+ return;
+
+ di = malloc(sizeof(*di), M_GIC_V3, M_WAITOK | M_ZERO);
+ resource_list_init(&di->di_rl);
+ resource_list_add(&di->di_rl, SYS_RES_MEMORY, 0,
+ gict->BaseAddress, gict->BaseAddress + 128 * 1024 - 1,
+ 128 * 1024);
+ err = acpi_iort_its_lookup(gict->TranslationId, &xref, &pxm);
+ if (err == 0) {
+ di->di_gic_dinfo.gic_domain = pxm;
+ di->di_gic_dinfo.msi_xref = xref;
+ } else {
+ di->di_gic_dinfo.gic_domain = -1;
+ di->di_gic_dinfo.msi_xref = ACPI_MSI_XREF;
+ }
+ sc->gic_nchildren++;
+ device_set_ivars(child, di);
+ }
+}
+
+static void
+gic_v3_acpi_bus_attach(device_t dev)
+{
+ ACPI_TABLE_MADT *madt;
+ vm_paddr_t physaddr;
+
+ physaddr = acpi_find_table(ACPI_SIG_MADT);
+ if (physaddr == 0)
+ return;
+
+ madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
+ if (madt == NULL) {
+ device_printf(dev, "Unable to map the MADT to add children\n");
+ return;
+ }
+
+ acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
+ gic_v3_add_children, dev);
+
+ acpi_unmap_table(madt);
+
+ bus_generic_attach(dev);
+}
+
+static struct resource *
+gic_v3_acpi_bus_alloc_res(device_t bus, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+ struct gic_v3_acpi_devinfo *di;
+ struct resource_list_entry *rle;
+
+ /* We only allocate memory */
+ if (type != SYS_RES_MEMORY)
+ return (NULL);
+
+ if (RMAN_IS_DEFAULT_RANGE(start, end)) {
+ if ((di = device_get_ivars(child)) == NULL)
+ return (NULL);
+
+ /* Find defaults for this rid */
+ rle = resource_list_find(&di->di_rl, type, *rid);
+ if (rle == NULL)
+ return (NULL);
+
+ start = rle->start;
+ end = rle->end;
+ count = rle->count;
+ }
+
+ return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
+ count, flags));
+}
diff --git a/sys/arm64/arm64/gic_v3_fdt.c b/sys/arm64/arm64/gic_v3_fdt.c
new file mode 100644
index 000000000000..c8a9615a8a5f
--- /dev/null
+++ b/sys/arm64/arm64/gic_v3_fdt.c
@@ -0,0 +1,331 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bitstring.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <machine/intr.h>
+#include <machine/resource.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm/arm/gic_common.h>
+#include "gic_v3_reg.h"
+#include "gic_v3_var.h"
+
+/*
+ * FDT glue.
+ */
+static int gic_v3_fdt_probe(device_t);
+static int gic_v3_fdt_attach(device_t);
+static int gic_v3_fdt_print_child(device_t, device_t);
+
+static struct resource *gic_v3_ofw_bus_alloc_res(device_t, device_t, int, int *,
+ rman_res_t, rman_res_t, rman_res_t, u_int);
+static const struct ofw_bus_devinfo *gic_v3_ofw_get_devinfo(device_t, device_t);
+
+static device_method_t gic_v3_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, gic_v3_fdt_probe),
+ DEVMETHOD(device_attach, gic_v3_fdt_attach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_print_child, gic_v3_fdt_print_child),
+ DEVMETHOD(bus_alloc_resource, gic_v3_ofw_bus_alloc_res),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+
+ /* ofw_bus interface */
+ DEVMETHOD(ofw_bus_get_devinfo, gic_v3_ofw_get_devinfo),
+ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
+ DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
+ DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
+ DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
+ DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(gic, gic_v3_fdt_driver, gic_v3_fdt_methods,
+ sizeof(struct gic_v3_softc), gic_v3_driver);
+
+static devclass_t gic_v3_fdt_devclass;
+
+EARLY_DRIVER_MODULE(gic_v3, simplebus, gic_v3_fdt_driver, gic_v3_fdt_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+EARLY_DRIVER_MODULE(gic_v3, ofwbus, gic_v3_fdt_driver, gic_v3_fdt_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+
+/*
+ * Helper functions declarations.
+ */
+static int gic_v3_ofw_bus_attach(device_t);
+
+/*
+ * Device interface.
+ */
+static int
+gic_v3_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "arm,gic-v3"))
+ return (ENXIO);
+
+ device_set_desc(dev, GIC_V3_DEVSTR);
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+gic_v3_fdt_attach(device_t dev)
+{
+ struct gic_v3_softc *sc;
+ pcell_t redist_regions;
+ intptr_t xref;
+ int err;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->gic_bus = GIC_BUS_FDT;
+
+ /*
+ * Recover number of the Re-Distributor regions.
+ */
+ if (OF_getencprop(ofw_bus_get_node(dev), "#redistributor-regions",
+ &redist_regions, sizeof(redist_regions)) <= 0)
+ sc->gic_redists.nregions = 1;
+ else
+ sc->gic_redists.nregions = redist_regions;
+
+ err = gic_v3_attach(dev);
+ if (err != 0)
+ goto error;
+
+ xref = OF_xref_from_node(ofw_bus_get_node(dev));
+ sc->gic_pic = intr_pic_register(dev, xref);
+ if (sc->gic_pic == NULL) {
+ device_printf(dev, "could not register PIC\n");
+ err = ENXIO;
+ goto error;
+ }
+
+ /* Register xref */
+ OF_device_register_xref(xref, dev);
+
+ if (intr_pic_claim_root(dev, xref, arm_gic_v3_intr, sc,
+ GIC_LAST_SGI - GIC_FIRST_SGI + 1) != 0) {
+ err = ENXIO;
+ goto error;
+ }
+
+ /*
+ * Try to register ITS to this GIC.
+ * GIC will act as a bus in that case.
+ * Failure here will not affect main GIC functionality.
+ */
+ if (gic_v3_ofw_bus_attach(dev) != 0) {
+ if (bootverbose) {
+ device_printf(dev,
+ "Failed to attach ITS to this GIC\n");
+ }
+ }
+
+ if (device_get_children(dev, &sc->gic_children, &sc->gic_nchildren) != 0)
+ sc->gic_nchildren = 0;
+
+ return (err);
+
+error:
+ if (bootverbose) {
+ device_printf(dev,
+ "Failed to attach. Error %d\n", err);
+ }
+ /* Failure so free resources */
+ gic_v3_detach(dev);
+
+ return (err);
+}
+
+/* OFW bus interface */
+struct gic_v3_ofw_devinfo {
+ struct gic_v3_devinfo di_gic_dinfo;
+ struct ofw_bus_devinfo di_dinfo;
+ struct resource_list di_rl;
+};
+
+static int
+gic_v3_fdt_print_child(device_t bus, device_t child)
+{
+ struct gic_v3_ofw_devinfo *di = device_get_ivars(child);
+ struct resource_list *rl = &di->di_rl;
+ int retval = 0;
+
+ retval += bus_print_child_header(bus, child);
+ retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
+ retval += bus_print_child_footer(bus, child);
+
+ return (retval);
+}
+
+static const struct ofw_bus_devinfo *
+gic_v3_ofw_get_devinfo(device_t bus __unused, device_t child)
+{
+ struct gic_v3_ofw_devinfo *di;
+
+ di = device_get_ivars(child);
+ return (&di->di_dinfo);
+}
+
+static struct resource *
+gic_v3_ofw_bus_alloc_res(device_t bus, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+ struct gic_v3_ofw_devinfo *di;
+ struct resource_list_entry *rle;
+ int ranges_len;
+
+ if (RMAN_IS_DEFAULT_RANGE(start, end)) {
+ if ((di = device_get_ivars(child)) == NULL)
+ return (NULL);
+ if (type != SYS_RES_MEMORY)
+ return (NULL);
+
+ /* Find defaults for this rid */
+ rle = resource_list_find(&di->di_rl, type, *rid);
+ if (rle == NULL)
+ return (NULL);
+
+ start = rle->start;
+ end = rle->end;
+ count = rle->count;
+ }
+ /*
+ * XXX: No ranges remap!
+ * Absolute address is expected.
+ */
+ if (ofw_bus_has_prop(bus, "ranges")) {
+ ranges_len = OF_getproplen(ofw_bus_get_node(bus), "ranges");
+ if (ranges_len != 0) {
+ if (bootverbose) {
+ device_printf(child,
+ "Ranges remap not supported\n");
+ }
+ return (NULL);
+ }
+ }
+ return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
+ count, flags));
+}
+
+/* Helper functions */
+
+/*
+ * Bus capability support for GICv3.
+ * Collects and configures device informations and finally
+ * adds ITS device as a child of GICv3 in Newbus hierarchy.
+ */
+static int
+gic_v3_ofw_bus_attach(device_t dev)
+{
+ struct gic_v3_ofw_devinfo *di;
+ struct gic_v3_softc *sc;
+ device_t child;
+ phandle_t parent, node;
+ pcell_t addr_cells, size_cells;
+
+ sc = device_get_softc(dev);
+ parent = ofw_bus_get_node(dev);
+ if (parent > 0) {
+ addr_cells = 2;
+ OF_getencprop(parent, "#address-cells", &addr_cells,
+ sizeof(addr_cells));
+ size_cells = 2;
+ OF_getencprop(parent, "#size-cells", &size_cells,
+ sizeof(size_cells));
+ /* Iterate through all GIC subordinates */
+ for (node = OF_child(parent); node > 0; node = OF_peer(node)) {
+ /* Allocate and populate devinfo. */
+ di = malloc(sizeof(*di), M_GIC_V3, M_WAITOK | M_ZERO);
+
+ /* Read the numa node, or -1 if there is none */
+ if (OF_getencprop(node, "numa-node-id",
+ &di->di_gic_dinfo.gic_domain,
+ sizeof(di->di_gic_dinfo.gic_domain)) <= 0) {
+ di->di_gic_dinfo.gic_domain = -1;
+ }
+
+ if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node)) {
+ if (bootverbose) {
+ device_printf(dev,
+ "Could not set up devinfo for ITS\n");
+ }
+ free(di, M_GIC_V3);
+ continue;
+ }
+
+ /* Initialize and populate resource list. */
+ resource_list_init(&di->di_rl);
+ ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells,
+ &di->di_rl);
+
+ /* Should not have any interrupts, so don't add any */
+
+ /* Add newbus device for this FDT node */
+ child = device_add_child(dev, NULL, -1);
+ if (!child) {
+ if (bootverbose) {
+ device_printf(dev,
+ "Could not add child: %s\n",
+ di->di_dinfo.obd_name);
+ }
+ resource_list_free(&di->di_rl);
+ ofw_bus_gen_destroy_devinfo(&di->di_dinfo);
+ free(di, M_GIC_V3);
+ continue;
+ }
+
+ sc->gic_nchildren++;
+ device_set_ivars(child, di);
+ }
+ }
+
+ return (bus_generic_attach(dev));
+}
diff --git a/sys/arm64/arm64/gic_v3_reg.h b/sys/arm64/arm64/gic_v3_reg.h
new file mode 100644
index 000000000000..34082b1bde0a
--- /dev/null
+++ b/sys/arm64/arm64/gic_v3_reg.h
@@ -0,0 +1,434 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _GIC_V3_REG_H_
+#define _GIC_V3_REG_H_
+
+/*
+ * Maximum number of interrupts
+ * supported by GIC (including SGIs, PPIs and SPIs)
+ */
+#define GIC_I_NUM_MAX (1020)
+/*
+ * Priority MAX/MIN values
+ */
+#define GIC_PRIORITY_MAX (0x00UL)
+/* Upper value is determined by LPI max priority */
+#define GIC_PRIORITY_MIN (0xFCUL)
+
+/* Numbers for shared peripheral interrupts */
+#define GIC_LAST_SPI (1019)
+/* Numbers for local peripheral interrupts */
+#define GIC_FIRST_LPI (8192)
+
+/*
+ * Registers (v2/v3)
+ */
+/* GICD_CTLR */
+#define GICD_CTLR_G1 (1 << 0)
+#define GICD_CTLR_G1A (1 << 1)
+#define GICD_CTLR_ARE_NS (1 << 4)
+#define GICD_CTLR_RWP (1 << 31)
+/* GICD_TYPER */
+#define GICD_TYPER_IDBITS(n) ((((n) >> 19) & 0x1F) + 1)
+
+/*
+ * Registers (v3)
+ */
+#define GICD_IROUTER(n) (0x6000 + ((n) * 8))
+
+#define GICD_PIDR4 0xFFD0
+#define GICD_PIDR5 0xFFD4
+#define GICD_PIDR6 0xFFD8
+#define GICD_PIDR7 0xFFDC
+#define GICD_PIDR0 0xFFE0
+#define GICD_PIDR1 0xFFE4
+#define GICD_PIDR2 0xFFE8
+
+#define GICR_PIDR2_ARCH_SHIFT 4
+#define GICR_PIDR2_ARCH_MASK 0xF0
+#define GICR_PIDR2_ARCH(x) \
+ (((x) & GICR_PIDR2_ARCH_MASK) >> GICR_PIDR2_ARCH_SHIFT)
+#define GICR_PIDR2_ARCH_GICv3 0x3
+#define GICR_PIDR2_ARCH_GICv4 0x4
+
+#define GICD_PIDR3 0xFFEC
+
+/* Redistributor registers */
+#define GICR_CTLR GICD_CTLR
+#define GICR_CTLR_LPI_ENABLE (1 << 0)
+
+#define GICR_PIDR2 GICD_PIDR2
+
+#define GICR_TYPER (0x0008)
+#define GICR_TYPER_PLPIS (1 << 0)
+#define GICR_TYPER_VLPIS (1 << 1)
+#define GICR_TYPER_LAST (1 << 4)
+#define GICR_TYPER_CPUNUM_SHIFT (8)
+#define GICR_TYPER_CPUNUM_MASK (0xFFFUL << GICR_TYPER_CPUNUM_SHIFT)
+#define GICR_TYPER_CPUNUM(x) \
+ (((x) & GICR_TYPER_CPUNUM_MASK) >> GICR_TYPER_CPUNUM_SHIFT)
+#define GICR_TYPER_AFF_SHIFT (32)
+
+#define GICR_WAKER (0x0014)
+#define GICR_WAKER_PS (1 << 1) /* Processor sleep */
+#define GICR_WAKER_CA (1 << 2) /* Children asleep */
+
+#define GICR_PROPBASER (0x0070)
+#define GICR_PROPBASER_IDBITS_MASK 0x1FUL
+/*
+ * Cacheability
+ * 0x0 - Device-nGnRnE
+ * 0x1 - Normal Inner Non-cacheable
+ * 0x2 - Normal Inner Read-allocate, Write-through
+ * 0x3 - Normal Inner Read-allocate, Write-back
+ * 0x4 - Normal Inner Write-allocate, Write-through
+ * 0x5 - Normal Inner Write-allocate, Write-back
+ * 0x6 - Normal Inner Read-allocate, Write-allocate, Write-through
+ * 0x7 - Normal Inner Read-allocate, Write-allocate, Write-back
+ */
+#define GICR_PROPBASER_CACHE_SHIFT 7
+#define GICR_PROPBASER_CACHE_DnGnRnE 0x0UL
+#define GICR_PROPBASER_CACHE_NIN 0x1UL
+#define GICR_PROPBASER_CACHE_NIRAWT 0x2UL
+#define GICR_PROPBASER_CACHE_NIRAWB 0x3UL
+#define GICR_PROPBASER_CACHE_NIWAWT 0x4UL
+#define GICR_PROPBASER_CACHE_NIWAWB 0x5UL
+#define GICR_PROPBASER_CACHE_NIRAWAWT 0x6UL
+#define GICR_PROPBASER_CACHE_NIRAWAWB 0x7UL
+#define GICR_PROPBASER_CACHE_MASK \
+ (0x7UL << GICR_PROPBASER_CACHE_SHIFT)
+
+/*
+ * Shareability
+ * 0x0 - Non-shareable
+ * 0x1 - Inner-shareable
+ * 0x2 - Outer-shareable
+ * 0x3 - Reserved. Threated as 0x0
+ */
+#define GICR_PROPBASER_SHARE_SHIFT 10
+#define GICR_PROPBASER_SHARE_NS 0x0UL
+#define GICR_PROPBASER_SHARE_IS 0x1UL
+#define GICR_PROPBASER_SHARE_OS 0x2UL
+#define GICR_PROPBASER_SHARE_RES 0x3UL
+#define GICR_PROPBASER_SHARE_MASK \
+ (0x3UL << GICR_PROPBASER_SHARE_SHIFT)
+
+#define GICR_PENDBASER (0x0078)
+/*
+ * Cacheability
+ * 0x0 - Device-nGnRnE
+ * 0x1 - Normal Inner Non-cacheable
+ * 0x2 - Normal Inner Read-allocate, Write-through
+ * 0x3 - Normal Inner Read-allocate, Write-back
+ * 0x4 - Normal Inner Write-allocate, Write-through
+ * 0x5 - Normal Inner Write-allocate, Write-back
+ * 0x6 - Normal Inner Read-allocate, Write-allocate, Write-through
+ * 0x7 - Normal Inner Read-allocate, Write-allocate, Write-back
+ */
+#define GICR_PENDBASER_CACHE_SHIFT 7
+#define GICR_PENDBASER_CACHE_DnGnRnE 0x0UL
+#define GICR_PENDBASER_CACHE_NIN 0x1UL
+#define GICR_PENDBASER_CACHE_NIRAWT 0x2UL
+#define GICR_PENDBASER_CACHE_NIRAWB 0x3UL
+#define GICR_PENDBASER_CACHE_NIWAWT 0x4UL
+#define GICR_PENDBASER_CACHE_NIWAWB 0x5UL
+#define GICR_PENDBASER_CACHE_NIRAWAWT 0x6UL
+#define GICR_PENDBASER_CACHE_NIRAWAWB 0x7UL
+#define GICR_PENDBASER_CACHE_MASK \
+ (0x7UL << GICR_PENDBASER_CACHE_SHIFT)
+
+/*
+ * Shareability
+ * 0x0 - Non-shareable
+ * 0x1 - Inner-shareable
+ * 0x2 - Outer-shareable
+ * 0x3 - Reserved. Threated as 0x0
+ */
+#define GICR_PENDBASER_SHARE_SHIFT 10
+#define GICR_PENDBASER_SHARE_NS 0x0UL
+#define GICR_PENDBASER_SHARE_IS 0x1UL
+#define GICR_PENDBASER_SHARE_OS 0x2UL
+#define GICR_PENDBASER_SHARE_RES 0x3UL
+#define GICR_PENDBASER_SHARE_MASK \
+ (0x3UL << GICR_PENDBASER_SHARE_SHIFT)
+
+/* Re-distributor registers for SGIs and PPIs */
+#define GICR_RD_BASE_SIZE PAGE_SIZE_64K
+#define GICR_SGI_BASE_SIZE PAGE_SIZE_64K
+#define GICR_VLPI_BASE_SIZE PAGE_SIZE_64K
+#define GICR_RESERVED_SIZE PAGE_SIZE_64K
+
+#define GICR_IGROUPR0 (0x0080)
+#define GICR_ISENABLER0 (0x0100)
+#define GICR_ICENABLER0 (0x0180)
+#define GICR_I_ENABLER_SGI_MASK (0x0000FFFF)
+#define GICR_I_ENABLER_PPI_MASK (0xFFFF0000)
+
+#define GICR_I_PER_IPRIORITYn (GICD_I_PER_IPRIORITYn)
+
+/* ITS registers */
+#define GITS_PIDR2 GICR_PIDR2
+#define GITS_PIDR2_ARCH_MASK GICR_PIDR2_ARCH_MASK
+#define GITS_PIDR2_ARCH_GICv3 GICR_PIDR2_ARCH_GICv3
+#define GITS_PIDR2_ARCH_GICv4 GICR_PIDR2_ARCH_GICv4
+
+#define GITS_CTLR (0x0000)
+#define GITS_CTLR_EN (1 << 0)
+
+#define GITS_IIDR (0x0004)
+#define GITS_IIDR_PRODUCT_SHIFT 24
+#define GITS_IIDR_PRODUCT_MASK (0xff << GITS_IIDR_PRODUCT_SHIFT)
+#define GITS_IIDR_VARIANT_SHIFT 16
+#define GITS_IIDR_VARIANT_MASK (0xf << GITS_IIDR_VARIANT_SHIFT)
+#define GITS_IIDR_REVISION_SHIFT 12
+#define GITS_IIDR_REVISION_MASK (0xf << GITS_IIDR_REVISION_SHIFT)
+#define GITS_IIDR_IMPLEMENTOR_SHIFT 0
+#define GITS_IIDR_IMPLEMENTOR_MASK (0xfff << GITS_IIDR_IMPLEMENTOR_SHIFT)
+
+#define GITS_IIDR_RAW(impl, prod, var, rev) \
+ ((prod) << GITS_IIDR_PRODUCT_SHIFT | \
+ (var) << GITS_IIDR_VARIANT_SHIFT | \
+ (rev) << GITS_IIDR_REVISION_SHIFT | \
+ (impl) << GITS_IIDR_IMPLEMENTOR_SHIFT)
+
+#define GITS_IIDR_IMPL_ARM (0x43B)
+#define GITS_IIDR_PROD_GIC500 (0x0)
+#define GITS_IIDR_IMPL_CAVIUM (0x34c)
+#define GITS_IIDR_PROD_THUNDER (0xa1)
+#define GITS_IIDR_VAR_THUNDER_1 (0x0)
+
+#define GITS_CBASER (0x0080)
+#define GITS_CBASER_VALID (1UL << 63)
+/*
+ * Cacheability
+ * 0x0 - Device-nGnRnE
+ * 0x1 - Normal Inner Non-cacheable
+ * 0x2 - Normal Inner Read-allocate, Write-through
+ * 0x3 - Normal Inner Read-allocate, Write-back
+ * 0x4 - Normal Inner Write-allocate, Write-through
+ * 0x5 - Normal Inner Write-allocate, Write-back
+ * 0x6 - Normal Inner Read-allocate, Write-allocate, Write-through
+ * 0x7 - Normal Inner Read-allocate, Write-allocate, Write-back
+ */
+#define GITS_CBASER_CACHE_SHIFT 59
+#define GITS_CBASER_CACHE_DnGnRnE 0x0UL
+#define GITS_CBASER_CACHE_NIN 0x1UL
+#define GITS_CBASER_CACHE_NIRAWT 0x2UL
+#define GITS_CBASER_CACHE_NIRAWB 0x3UL
+#define GITS_CBASER_CACHE_NIWAWT 0x4UL
+#define GITS_CBASER_CACHE_NIWAWB 0x5UL
+#define GITS_CBASER_CACHE_NIRAWAWT 0x6UL
+#define GITS_CBASER_CACHE_NIRAWAWB 0x7UL
+#define GITS_CBASER_CACHE_MASK (0x7UL << GITS_CBASER_CACHE_SHIFT)
+/*
+ * Shareability
+ * 0x0 - Non-shareable
+ * 0x1 - Inner-shareable
+ * 0x2 - Outer-shareable
+ * 0x3 - Reserved. Threated as 0x0
+ */
+#define GITS_CBASER_SHARE_SHIFT 10
+#define GITS_CBASER_SHARE_NS 0x0UL
+#define GITS_CBASER_SHARE_IS 0x1UL
+#define GITS_CBASER_SHARE_OS 0x2UL
+#define GITS_CBASER_SHARE_RES 0x3UL
+#define GITS_CBASER_SHARE_MASK \
+ (0x3UL << GITS_CBASER_SHARE_SHIFT)
+
+#define GITS_CBASER_PA_SHIFT 12
+#define GITS_CBASER_PA_MASK (0xFFFFFFFFFUL << GITS_CBASER_PA_SHIFT)
+
+#define GITS_CWRITER (0x0088)
+#define GITS_CREADR (0x0090)
+
+#define GITS_BASER_BASE (0x0100)
+#define GITS_BASER(x) (GITS_BASER_BASE + (x) * 8)
+
+#define GITS_BASER_VALID (1UL << 63)
+
+#define GITS_BASER_TYPE_SHIFT 56
+#define GITS_BASER_TYPE(x) \
+ (((x) & GITS_BASER_TYPE_MASK) >> GITS_BASER_TYPE_SHIFT)
+#define GITS_BASER_TYPE_UNIMPL 0x0UL /* Unimplemented */
+#define GITS_BASER_TYPE_DEV 0x1UL /* Devices */
+#define GITS_BASER_TYPE_VP 0x2UL /* Virtual Processors */
+#define GITS_BASER_TYPE_PP 0x3UL /* Physical Processors */
+#define GITS_BASER_TYPE_IC 0x4UL /* Interrupt Collections */
+#define GITS_BASER_TYPE_RES5 0x5UL /* Reserved */
+#define GITS_BASER_TYPE_RES6 0x6UL /* Reserved */
+#define GITS_BASER_TYPE_RES7 0x7UL /* Reserved */
+#define GITS_BASER_TYPE_MASK (0x7UL << GITS_BASER_TYPE_SHIFT)
+/*
+ * Cacheability
+ * 0x0 - Non-cacheable, non-bufferable
+ * 0x1 - Non-cacheable
+ * 0x2 - Read-allocate, Write-through
+ * 0x3 - Read-allocate, Write-back
+ * 0x4 - Write-allocate, Write-through
+ * 0x5 - Write-allocate, Write-back
+ * 0x6 - Read-allocate, Write-allocate, Write-through
+ * 0x7 - Read-allocate, Write-allocate, Write-back
+ */
+#define GITS_BASER_CACHE_SHIFT 59
+#define GITS_BASER_CACHE_NCNB 0x0UL
+#define GITS_BASER_CACHE_NC 0x1UL
+#define GITS_BASER_CACHE_RAWT 0x2UL
+#define GITS_BASER_CACHE_RAWB 0x3UL
+#define GITS_BASER_CACHE_WAWT 0x4UL
+#define GITS_BASER_CACHE_WAWB 0x5UL
+#define GITS_BASER_CACHE_RAWAWT 0x6UL
+#define GITS_BASER_CACHE_RAWAWB 0x7UL
+#define GITS_BASER_CACHE_MASK (0x7UL << GITS_BASER_CACHE_SHIFT)
+
+#define GITS_BASER_ESIZE_SHIFT 48
+#define GITS_BASER_ESIZE_MASK (0x1FUL << GITS_BASER_ESIZE_SHIFT)
+#define GITS_BASER_ESIZE(x) \
+ ((((x) & GITS_BASER_ESIZE_MASK) >> GITS_BASER_ESIZE_SHIFT) + 1)
+
+#define GITS_BASER_PA_SHIFT 12
+#define GITS_BASER_PA_MASK (0xFFFFFFFFFUL << GITS_BASER_PA_SHIFT)
+
+/*
+ * Shareability
+ * 0x0 - Non-shareable
+ * 0x1 - Inner-shareable
+ * 0x2 - Outer-shareable
+ * 0x3 - Reserved. Threated as 0x0
+ */
+#define GITS_BASER_SHARE_SHIFT 10
+#define GITS_BASER_SHARE_NS 0x0UL
+#define GITS_BASER_SHARE_IS 0x1UL
+#define GITS_BASER_SHARE_OS 0x2UL
+#define GITS_BASER_SHARE_RES 0x3UL
+#define GITS_BASER_SHARE_MASK (0x3UL << GITS_BASER_SHARE_SHIFT)
+
+#define GITS_BASER_PSZ_SHIFT 8
+#define GITS_BASER_PSZ_4K 0x0UL
+#define GITS_BASER_PSZ_16K 0x1UL
+#define GITS_BASER_PSZ_64K 0x2UL
+#define GITS_BASER_PSZ_MASK (0x3UL << GITS_BASER_PSZ_SHIFT)
+
+#define GITS_BASER_SIZE_MASK 0xFFUL
+
+#define GITS_BASER_NUM 8
+
+#define GITS_TYPER (0x0008)
+#define GITS_TYPER_PTA (1UL << 19)
+#define GITS_TYPER_DEVB_SHIFT 13
+#define GITS_TYPER_DEVB_MASK (0x1FUL << GITS_TYPER_DEVB_SHIFT)
+/* Number of device identifiers implemented */
+#define GITS_TYPER_DEVB(x) \
+ ((((x) & GITS_TYPER_DEVB_MASK) >> GITS_TYPER_DEVB_SHIFT) + 1)
+#define GITS_TYPER_ITTES_SHIFT 4
+#define GITS_TYPER_ITTES_MASK (0xFUL << GITS_TYPER_ITTES_SHIFT)
+/* Number of bytes per ITT Entry */
+#define GITS_TYPER_ITTES(x) \
+ ((((x) & GITS_TYPER_ITTES_MASK) >> GITS_TYPER_ITTES_SHIFT) + 1)
+
+#define GITS_TRANSLATER (0x10040)
+/*
+ * LPI related
+ */
+#define LPI_CONF_PRIO_MASK (0xFC)
+#define LPI_CONF_GROUP1 (1 << 1)
+#define LPI_CONF_ENABLE (1 << 0)
+
+/*
+ * GIC 500 ITS tracking facility
+ */
+#define GITS_TRKCTLR 0xC000
+#define GITS_TRKR 0xC004
+#define GITS_TRKDIDR 0xC008
+#define GITS_TRKPIDR 0xC00C
+#define GITS_TRKVIDR 0xC010
+#define GITS_TRKTGTR 0xC014
+#define GITS_TRKICR 0xC018
+#define GITS_TRKLCR 0xC018
+
+/*
+ * CPU interface
+ */
+
+/*
+ * Registers list (ICC_xyz_EL1):
+ *
+ * PMR - Priority Mask Register
+ * * interrupts of priority higher than specified
+ * in this mask will be signalled to the CPU.
+ * (0xff - lowest possible prio., 0x00 - highest prio.)
+ *
+ * CTLR - Control Register
+ * * controls behavior of the CPU interface and displays
+ * implemented features.
+ *
+ * IGRPEN1 - Interrupt Group 1 Enable Register
+ *
+ * IAR1 - Interrupt Acknowledge Register Group 1
+ * * contains number of the highest priority pending
+ * interrupt from the Group 1.
+ *
+ * EOIR1 - End of Interrupt Register Group 1
+ * * Writes inform CPU interface about completed Group 1
+ * interrupts processing.
+ */
+
+#define gic_icc_write(reg, val) \
+do { \
+ WRITE_SPECIALREG(icc_ ##reg ##_el1, val); \
+ isb(); \
+} while (0)
+
+#define gic_icc_read(reg) \
+({ \
+ uint64_t val; \
+ \
+ val = READ_SPECIALREG(icc_ ##reg ##_el1); \
+ (val); \
+})
+
+#define gic_icc_set(reg, mask) \
+do { \
+ uint64_t val; \
+ val = gic_icc_read(reg); \
+ val |= (mask); \
+ gic_icc_write(reg, val); \
+} while (0)
+
+#define gic_icc_clear(reg, mask) \
+do { \
+ uint64_t val; \
+ val = gic_icc_read(reg); \
+ val &= ~(mask); \
+ gic_icc_write(reg, val); \
+} while (0)
+
+#endif /* _GIC_V3_REG_H_ */
diff --git a/sys/arm64/arm64/gic_v3_var.h b/sys/arm64/arm64/gic_v3_var.h
new file mode 100644
index 000000000000..f855e425d66d
--- /dev/null
+++ b/sys/arm64/arm64/gic_v3_var.h
@@ -0,0 +1,145 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _GIC_V3_VAR_H_
+#define _GIC_V3_VAR_H_
+
+#include <arm/arm/gic_common.h>
+
+#define GIC_V3_DEVSTR "ARM Generic Interrupt Controller v3.0"
+
+DECLARE_CLASS(gic_v3_driver);
+
+struct gic_v3_irqsrc;
+
+struct redist_pcpu {
+ struct resource res; /* mem resource for redist */
+ vm_offset_t pend_base;
+ bool lpi_enabled; /* redist LPI configured? */
+};
+
+struct gic_redists {
+ /*
+ * Re-Distributor region description.
+ * We will have few of those depending
+ * on the #redistributor-regions property in FDT.
+ */
+ struct resource ** regions;
+ /* Number of Re-Distributor regions */
+ u_int nregions;
+ /* Per-CPU Re-Distributor data */
+ struct redist_pcpu *pcpu[MAXCPU];
+};
+
+struct gic_v3_softc {
+ device_t dev;
+ struct resource ** gic_res;
+ struct mtx gic_mtx;
+ /* Distributor */
+ struct resource * gic_dist;
+ /* Re-Distributors */
+ struct gic_redists gic_redists;
+
+ uint32_t gic_pidr2;
+ u_int gic_bus;
+
+ u_int gic_nirqs;
+ u_int gic_idbits;
+
+ boolean_t gic_registered;
+
+ int gic_nchildren;
+ device_t *gic_children;
+ struct intr_pic *gic_pic;
+ struct gic_v3_irqsrc *gic_irqs;
+};
+
+struct gic_v3_devinfo {
+ int gic_domain;
+ int msi_xref;
+};
+
+#define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc)
+
+MALLOC_DECLARE(M_GIC_V3);
+
+/* ivars */
+#define GICV3_IVAR_NIRQS 1000
+/* 1001 was GICV3_IVAR_REDIST_VADDR */
+#define GICV3_IVAR_REDIST 1002
+
+__BUS_ACCESSOR(gicv3, nirqs, GICV3, NIRQS, u_int);
+__BUS_ACCESSOR(gicv3, redist, GICV3, REDIST, void *);
+
+/* Device methods */
+int gic_v3_attach(device_t dev);
+int gic_v3_detach(device_t dev);
+int arm_gic_v3_intr(void *);
+
+uint32_t gic_r_read_4(device_t, bus_size_t);
+uint64_t gic_r_read_8(device_t, bus_size_t);
+void gic_r_write_4(device_t, bus_size_t, uint32_t var);
+void gic_r_write_8(device_t, bus_size_t, uint64_t var);
+
+/*
+ * GIC Distributor accessors.
+ * Notice that only GIC sofc can be passed.
+ */
+#define gic_d_read(sc, len, reg) \
+({ \
+ bus_read_##len(sc->gic_dist, reg); \
+})
+
+#define gic_d_write(sc, len, reg, val) \
+({ \
+ bus_write_##len(sc->gic_dist, reg, val);\
+})
+
+/* GIC Re-Distributor accessors (per-CPU) */
+#define gic_r_read(sc, len, reg) \
+({ \
+ u_int cpu = PCPU_GET(cpuid); \
+ \
+ bus_read_##len( \
+ &sc->gic_redists.pcpu[cpu]->res, \
+ reg); \
+})
+
+#define gic_r_write(sc, len, reg, val) \
+({ \
+ u_int cpu = PCPU_GET(cpuid); \
+ \
+ bus_write_##len( \
+ &sc->gic_redists.pcpu[cpu]->res, \
+ reg, val); \
+})
+
+#endif /* _GIC_V3_VAR_H_ */
diff --git a/sys/arm64/arm64/gicv3_its.c b/sys/arm64/arm64/gicv3_its.c
new file mode 100644
index 000000000000..bfb069c195a5
--- /dev/null
+++ b/sys/arm64/arm64/gicv3_its.c
@@ -0,0 +1,1960 @@
+/*-
+ * Copyright (c) 2015-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cpuset.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/rman.h>
+#include <sys/sbuf.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/vmem.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+#include <arm/arm/gic_common.h>
+#include <arm64/arm64/gic_v3_reg.h>
+#include <arm64/arm64/gic_v3_var.h>
+
+#ifdef FDT
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "pcib_if.h"
+#include "pic_if.h"
+#include "msi_if.h"
+
+MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS",
+ "ARM GICv3 Interrupt Translation Service");
+
+#define LPI_NIRQS (64 * 1024)
+
+/* The size and alignment of the command circular buffer */
+#define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */
+#define ITS_CMDQ_ALIGN (64 * 1024)
+
+#define LPI_CONFTAB_SIZE LPI_NIRQS
+#define LPI_CONFTAB_ALIGN (64 * 1024)
+#define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */
+
+/* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */
+#define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8)
+#define LPI_PENDTAB_ALIGN (64 * 1024)
+#define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */
+
+#define LPI_INT_TRANS_TAB_ALIGN 256
+#define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1)
+
+/* ITS commands encoding */
+#define ITS_CMD_MOVI (0x01)
+#define ITS_CMD_SYNC (0x05)
+#define ITS_CMD_MAPD (0x08)
+#define ITS_CMD_MAPC (0x09)
+#define ITS_CMD_MAPTI (0x0a)
+#define ITS_CMD_MAPI (0x0b)
+#define ITS_CMD_INV (0x0c)
+#define ITS_CMD_INVALL (0x0d)
+/* Command */
+#define CMD_COMMAND_MASK (0xFFUL)
+/* PCI device ID */
+#define CMD_DEVID_SHIFT (32)
+#define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT)
+/* Size of IRQ ID bitfield */
+#define CMD_SIZE_MASK (0xFFUL)
+/* Virtual LPI ID */
+#define CMD_ID_MASK (0xFFFFFFFFUL)
+/* Physical LPI ID */
+#define CMD_PID_SHIFT (32)
+#define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT)
+/* Collection */
+#define CMD_COL_MASK (0xFFFFUL)
+/* Target (CPU or Re-Distributor) */
+#define CMD_TARGET_SHIFT (16)
+#define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT)
+/* Interrupt Translation Table address */
+#define CMD_ITT_MASK (0xFFFFFFFFFF00UL)
+/* Valid command bit */
+#define CMD_VALID_SHIFT (63)
+#define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT)
+
+#define ITS_TARGET_NONE 0xFBADBEEF
+
+/* LPI chunk owned by ITS device */
+struct lpi_chunk {
+ u_int lpi_base;
+ u_int lpi_free; /* First free LPI in set */
+ u_int lpi_num; /* Total number of LPIs in chunk */
+ u_int lpi_busy; /* Number of busy LPIs in chink */
+};
+
+/* ITS device */
+struct its_dev {
+ TAILQ_ENTRY(its_dev) entry;
+ /* PCI device */
+ device_t pci_dev;
+ /* Device ID (i.e. PCI device ID) */
+ uint32_t devid;
+ /* List of assigned LPIs */
+ struct lpi_chunk lpis;
+ /* Virtual address of ITT */
+ vm_offset_t itt;
+ size_t itt_size;
+};
+
+/*
+ * ITS command descriptor.
+ * Idea for command description passing taken from Linux.
+ */
+struct its_cmd_desc {
+ uint8_t cmd_type;
+
+ union {
+ struct {
+ struct its_dev *its_dev;
+ struct its_col *col;
+ uint32_t id;
+ } cmd_desc_movi;
+
+ struct {
+ struct its_col *col;
+ } cmd_desc_sync;
+
+ struct {
+ struct its_col *col;
+ uint8_t valid;
+ } cmd_desc_mapc;
+
+ struct {
+ struct its_dev *its_dev;
+ struct its_col *col;
+ uint32_t pid;
+ uint32_t id;
+ } cmd_desc_mapvi;
+
+ struct {
+ struct its_dev *its_dev;
+ struct its_col *col;
+ uint32_t pid;
+ } cmd_desc_mapi;
+
+ struct {
+ struct its_dev *its_dev;
+ uint8_t valid;
+ } cmd_desc_mapd;
+
+ struct {
+ struct its_dev *its_dev;
+ struct its_col *col;
+ uint32_t pid;
+ } cmd_desc_inv;
+
+ struct {
+ struct its_col *col;
+ } cmd_desc_invall;
+ };
+};
+
+/* ITS command. Each command is 32 bytes long */
+struct its_cmd {
+ uint64_t cmd_dword[4]; /* ITS command double word */
+};
+
+/* An ITS private table */
+struct its_ptable {
+ vm_offset_t ptab_vaddr;
+ unsigned long ptab_size;
+};
+
+/* ITS collection description. */
+struct its_col {
+ uint64_t col_target; /* Target Re-Distributor */
+ uint64_t col_id; /* Collection ID */
+};
+
+struct gicv3_its_irqsrc {
+ struct intr_irqsrc gi_isrc;
+ u_int gi_id;
+ u_int gi_lpi;
+ struct its_dev *gi_its_dev;
+ TAILQ_ENTRY(gicv3_its_irqsrc) gi_link;
+};
+
+struct gicv3_its_softc {
+ device_t dev;
+ struct intr_pic *sc_pic;
+ struct resource *sc_its_res;
+
+ cpuset_t sc_cpus;
+ u_int gic_irq_cpu;
+
+ struct its_ptable sc_its_ptab[GITS_BASER_NUM];
+ struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */
+
+ /*
+ * TODO: We should get these from the parent as we only want a
+ * single copy of each across the interrupt controller.
+ */
+ uint8_t *sc_conf_base;
+ vm_offset_t sc_pend_base[MAXCPU];
+
+ /* Command handling */
+ struct mtx sc_its_cmd_lock;
+ struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */
+ size_t sc_its_cmd_next_idx;
+
+ vmem_t *sc_irq_alloc;
+ struct gicv3_its_irqsrc **sc_irqs;
+ u_int sc_irq_base;
+ u_int sc_irq_length;
+ u_int sc_irq_count;
+
+ struct mtx sc_its_dev_lock;
+ TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list;
+ TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs;
+
+#define ITS_FLAGS_CMDQ_FLUSH 0x00000001
+#define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002
+#define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004
+ u_int sc_its_flags;
+ bool trace_enable;
+};
+
+static void *conf_base;
+
+typedef void (its_quirk_func_t)(device_t);
+static its_quirk_func_t its_quirk_cavium_22375;
+
+static const struct {
+ const char *desc;
+ uint32_t iidr;
+ uint32_t iidr_mask;
+ its_quirk_func_t *func;
+} its_quirks[] = {
+ {
+ /* Cavium ThunderX Pass 1.x */
+ .desc = "Cavium ThunderX errata: 22375, 24313",
+ .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM,
+ GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0),
+ .iidr_mask = ~GITS_IIDR_REVISION_MASK,
+ .func = its_quirk_cavium_22375,
+ },
+};
+
+#define gic_its_read_4(sc, reg) \
+ bus_read_4((sc)->sc_its_res, (reg))
+#define gic_its_read_8(sc, reg) \
+ bus_read_8((sc)->sc_its_res, (reg))
+
+#define gic_its_write_4(sc, reg, val) \
+ bus_write_4((sc)->sc_its_res, (reg), (val))
+#define gic_its_write_8(sc, reg, val) \
+ bus_write_8((sc)->sc_its_res, (reg), (val))
+
+static device_attach_t gicv3_its_attach;
+static device_detach_t gicv3_its_detach;
+
+static pic_disable_intr_t gicv3_its_disable_intr;
+static pic_enable_intr_t gicv3_its_enable_intr;
+static pic_map_intr_t gicv3_its_map_intr;
+static pic_setup_intr_t gicv3_its_setup_intr;
+static pic_post_filter_t gicv3_its_post_filter;
+static pic_post_ithread_t gicv3_its_post_ithread;
+static pic_pre_ithread_t gicv3_its_pre_ithread;
+static pic_bind_intr_t gicv3_its_bind_intr;
+#ifdef SMP
+static pic_init_secondary_t gicv3_its_init_secondary;
+#endif
+static msi_alloc_msi_t gicv3_its_alloc_msi;
+static msi_release_msi_t gicv3_its_release_msi;
+static msi_alloc_msix_t gicv3_its_alloc_msix;
+static msi_release_msix_t gicv3_its_release_msix;
+static msi_map_msi_t gicv3_its_map_msi;
+
+static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *);
+static void its_cmd_mapc(device_t, struct its_col *, uint8_t);
+static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *);
+static void its_cmd_mapd(device_t, struct its_dev *, uint8_t);
+static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *);
+static void its_cmd_invall(device_t, struct its_col *);
+
+static device_method_t gicv3_its_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_detach, gicv3_its_detach),
+
+ /* Interrupt controller interface */
+ DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr),
+ DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr),
+ DEVMETHOD(pic_map_intr, gicv3_its_map_intr),
+ DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr),
+ DEVMETHOD(pic_post_filter, gicv3_its_post_filter),
+ DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread),
+ DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread),
+#ifdef SMP
+ DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr),
+ DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary),
+#endif
+
+ /* MSI/MSI-X */
+ DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi),
+ DEVMETHOD(msi_release_msi, gicv3_its_release_msi),
+ DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix),
+ DEVMETHOD(msi_release_msix, gicv3_its_release_msix),
+ DEVMETHOD(msi_map_msi, gicv3_its_map_msi),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods,
+ sizeof(struct gicv3_its_softc));
+
+static void
+gicv3_its_cmdq_init(struct gicv3_its_softc *sc)
+{
+ vm_paddr_t cmd_paddr;
+ uint64_t reg, tmp;
+
+ /* Set up the command circular buffer */
+ sc->sc_its_cmd_base = contigmalloc(ITS_CMDQ_SIZE, M_GICV3_ITS,
+ M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0);
+ sc->sc_its_cmd_next_idx = 0;
+
+ cmd_paddr = vtophys(sc->sc_its_cmd_base);
+
+ /* Set the base of the command buffer */
+ reg = GITS_CBASER_VALID |
+ (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) |
+ cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) |
+ (ITS_CMDQ_SIZE / 4096 - 1);
+ gic_its_write_8(sc, GITS_CBASER, reg);
+
+ /* Read back to check for fixed value fields */
+ tmp = gic_its_read_8(sc, GITS_CBASER);
+
+ if ((tmp & GITS_CBASER_SHARE_MASK) !=
+ (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) {
+ /* Check if the hardware reported non-shareable */
+ if ((tmp & GITS_CBASER_SHARE_MASK) ==
+ (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) {
+ /* If so remove the cache attribute */
+ reg &= ~GITS_CBASER_CACHE_MASK;
+ reg &= ~GITS_CBASER_SHARE_MASK;
+ /* Set to Non-cacheable, Non-shareable */
+ reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT;
+ reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT;
+
+ gic_its_write_8(sc, GITS_CBASER, reg);
+ }
+
+ /* The command queue has to be flushed after each command */
+ sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH;
+ }
+
+ /* Get the next command from the start of the buffer */
+ gic_its_write_8(sc, GITS_CWRITER, 0x0);
+}
+
+static int
+gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
+{
+ vm_offset_t table;
+ vm_paddr_t paddr;
+ uint64_t cache, reg, share, tmp, type;
+ size_t esize, its_tbl_size, nidents, nitspages, npages;
+ int i, page_size;
+ int devbits;
+
+ if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) {
+ /*
+ * GITS_TYPER[17:13] of ThunderX reports that device IDs
+ * are to be 21 bits in length. The entry size of the ITS
+ * table can be read from GITS_BASERn[52:48] and on ThunderX
+ * is supposed to be 8 bytes in length (for device table).
+ * Finally the page size that is to be used by ITS to access
+ * this table will be set to 64KB.
+ *
+ * This gives 0x200000 entries of size 0x8 bytes covered by
+ * 256 pages each of which 64KB in size. The number of pages
+ * (minus 1) should then be written to GITS_BASERn[7:0]. In
+ * that case this value would be 0xFF but on ThunderX the
+ * maximum value that HW accepts is 0xFD.
+ *
+ * Set an arbitrary number of device ID bits to 20 in order
+ * to limit the number of entries in ITS device table to
+ * 0x100000 and the table size to 8MB.
+ */
+ devbits = 20;
+ cache = 0;
+ } else {
+ devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
+ cache = GITS_BASER_CACHE_WAWB;
+ }
+ share = GITS_BASER_SHARE_IS;
+ page_size = PAGE_SIZE_64K;
+
+ for (i = 0; i < GITS_BASER_NUM; i++) {
+ reg = gic_its_read_8(sc, GITS_BASER(i));
+ /* The type of table */
+ type = GITS_BASER_TYPE(reg);
+ /* The table entry size */
+ esize = GITS_BASER_ESIZE(reg);
+
+ switch(type) {
+ case GITS_BASER_TYPE_DEV:
+ nidents = (1 << devbits);
+ its_tbl_size = esize * nidents;
+ its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K);
+ break;
+ case GITS_BASER_TYPE_VP:
+ case GITS_BASER_TYPE_PP: /* Undocumented? */
+ case GITS_BASER_TYPE_IC:
+ its_tbl_size = page_size;
+ break;
+ default:
+ continue;
+ }
+ npages = howmany(its_tbl_size, PAGE_SIZE);
+
+ /* Allocate the table */
+ table = (vm_offset_t)contigmalloc(npages * PAGE_SIZE,
+ M_GICV3_ITS, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1,
+ PAGE_SIZE_64K, 0);
+
+ sc->sc_its_ptab[i].ptab_vaddr = table;
+ sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE;
+
+ paddr = vtophys(table);
+
+ while (1) {
+ nitspages = howmany(its_tbl_size, page_size);
+
+ /* Clear the fields we will be setting */
+ reg &= ~(GITS_BASER_VALID |
+ GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK |
+ GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK |
+ GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK |
+ GITS_BASER_SIZE_MASK);
+ /* Set the new values */
+ reg |= GITS_BASER_VALID |
+ (cache << GITS_BASER_CACHE_SHIFT) |
+ (type << GITS_BASER_TYPE_SHIFT) |
+ ((esize - 1) << GITS_BASER_ESIZE_SHIFT) |
+ paddr | (share << GITS_BASER_SHARE_SHIFT) |
+ (nitspages - 1);
+
+ switch (page_size) {
+ case PAGE_SIZE: /* 4KB */
+ reg |=
+ GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
+ break;
+ case PAGE_SIZE_16K: /* 16KB */
+ reg |=
+ GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
+ break;
+ case PAGE_SIZE_64K: /* 64KB */
+ reg |=
+ GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
+ break;
+ }
+
+ gic_its_write_8(sc, GITS_BASER(i), reg);
+
+ /* Read back to check */
+ tmp = gic_its_read_8(sc, GITS_BASER(i));
+
+ /* Do the shareability masks line up? */
+ if ((tmp & GITS_BASER_SHARE_MASK) !=
+ (reg & GITS_BASER_SHARE_MASK)) {
+ share = (tmp & GITS_BASER_SHARE_MASK) >>
+ GITS_BASER_SHARE_SHIFT;
+ continue;
+ }
+
+ if ((tmp & GITS_BASER_PSZ_MASK) !=
+ (reg & GITS_BASER_PSZ_MASK)) {
+ switch (page_size) {
+ case PAGE_SIZE_16K:
+ page_size = PAGE_SIZE;
+ continue;
+ case PAGE_SIZE_64K:
+ page_size = PAGE_SIZE_16K;
+ continue;
+ }
+ }
+
+ if (tmp != reg) {
+ device_printf(dev, "GITS_BASER%d: "
+ "unable to be updated: %lx != %lx\n",
+ i, reg, tmp);
+ return (ENXIO);
+ }
+
+ /* We should have made all needed changes */
+ break;
+ }
+ }
+
+ return (0);
+}
+
+static void
+gicv3_its_conftable_init(struct gicv3_its_softc *sc)
+{
+ void *conf_table;
+
+ conf_table = atomic_load_ptr(&conf_base);
+ if (conf_table == NULL) {
+ conf_table = contigmalloc(LPI_CONFTAB_SIZE,
+ M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR,
+ LPI_CONFTAB_ALIGN, 0);
+
+ if (atomic_cmpset_ptr((uintptr_t *)&conf_base,
+ (uintptr_t)NULL, (uintptr_t)conf_table) == 0) {
+ contigfree(conf_table, LPI_CONFTAB_SIZE, M_GICV3_ITS);
+ conf_table = atomic_load_ptr(&conf_base);
+ }
+ }
+ sc->sc_conf_base = conf_table;
+
+ /* Set the default configuration */
+ memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1,
+ LPI_CONFTAB_SIZE);
+
+ /* Flush the table to memory */
+ cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE);
+}
+
+static void
+gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
+{
+ int i;
+
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ISSET(i, &sc->sc_cpus) == 0)
+ continue;
+
+ sc->sc_pend_base[i] = (vm_offset_t)contigmalloc(
+ LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO,
+ 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
+
+ /* Flush so the ITS can see the memory */
+ cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i],
+ LPI_PENDTAB_SIZE);
+ }
+}
+
+static void
+its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc)
+{
+ device_t gicv3;
+ uint64_t xbaser, tmp;
+ uint32_t ctlr;
+ u_int cpuid;
+
+ gicv3 = device_get_parent(dev);
+ cpuid = PCPU_GET(cpuid);
+
+ /* Disable LPIs */
+ ctlr = gic_r_read_4(gicv3, GICR_CTLR);
+ ctlr &= ~GICR_CTLR_LPI_ENABLE;
+ gic_r_write_4(gicv3, GICR_CTLR, ctlr);
+
+ /* Make sure changes are observable my the GIC */
+ dsb(sy);
+
+ /*
+ * Set the redistributor base
+ */
+ xbaser = vtophys(sc->sc_conf_base) |
+ (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
+ (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) |
+ (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1);
+ gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
+
+ /* Check the cache attributes we set */
+ tmp = gic_r_read_8(gicv3, GICR_PROPBASER);
+
+ if ((tmp & GICR_PROPBASER_SHARE_MASK) !=
+ (xbaser & GICR_PROPBASER_SHARE_MASK)) {
+ if ((tmp & GICR_PROPBASER_SHARE_MASK) ==
+ (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) {
+ /* We need to mark as non-cacheable */
+ xbaser &= ~(GICR_PROPBASER_SHARE_MASK |
+ GICR_PROPBASER_CACHE_MASK);
+ /* Non-cacheable */
+ xbaser |= GICR_PROPBASER_CACHE_NIN <<
+ GICR_PROPBASER_CACHE_SHIFT;
+ /* Non-sareable */
+ xbaser |= GICR_PROPBASER_SHARE_NS <<
+ GICR_PROPBASER_SHARE_SHIFT;
+ gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
+ }
+ sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH;
+ }
+
+ /*
+ * Set the LPI pending table base
+ */
+ xbaser = vtophys(sc->sc_pend_base[cpuid]) |
+ (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) |
+ (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT);
+
+ gic_r_write_8(gicv3, GICR_PENDBASER, xbaser);
+
+ tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
+
+ if ((tmp & GICR_PENDBASER_SHARE_MASK) ==
+ (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) {
+ /* Clear the cahce and shareability bits */
+ xbaser &= ~(GICR_PENDBASER_CACHE_MASK |
+ GICR_PENDBASER_SHARE_MASK);
+ /* Mark as non-shareable */
+ xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT;
+ /* And non-cacheable */
+ xbaser |= GICR_PENDBASER_CACHE_NIN <<
+ GICR_PENDBASER_CACHE_SHIFT;
+ }
+
+ /* Enable LPIs */
+ ctlr = gic_r_read_4(gicv3, GICR_CTLR);
+ ctlr |= GICR_CTLR_LPI_ENABLE;
+ gic_r_write_4(gicv3, GICR_CTLR, ctlr);
+
+ /* Make sure the GIC has seen everything */
+ dsb(sy);
+}
+
+static int
+its_init_cpu(device_t dev, struct gicv3_its_softc *sc)
+{
+ device_t gicv3;
+ vm_paddr_t target;
+ u_int cpuid;
+ struct redist_pcpu *rpcpu;
+
+ gicv3 = device_get_parent(dev);
+ cpuid = PCPU_GET(cpuid);
+ if (!CPU_ISSET(cpuid, &sc->sc_cpus))
+ return (0);
+
+ /* Check if the ITS is enabled on this CPU */
+ if ((gic_r_read_4(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0)
+ return (ENXIO);
+
+ rpcpu = gicv3_get_redist(dev);
+
+ /* Do per-cpu LPI init once */
+ if (!rpcpu->lpi_enabled) {
+ its_init_cpu_lpi(dev, sc);
+ rpcpu->lpi_enabled = true;
+ }
+
+ if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) {
+ /* This ITS wants the redistributor physical address */
+ target = vtophys(rman_get_virtual(&rpcpu->res));
+ } else {
+ /* This ITS wants the unique processor number */
+ target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) <<
+ CMD_TARGET_SHIFT;
+ }
+
+ sc->sc_its_cols[cpuid]->col_target = target;
+ sc->sc_its_cols[cpuid]->col_id = cpuid;
+
+ its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1);
+ its_cmd_invall(dev, sc->sc_its_cols[cpuid]);
+
+ return (0);
+}
+
+static int
+gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS)
+{
+ struct gicv3_its_softc *sc;
+ int rv;
+
+ sc = arg1;
+
+ rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req);
+ if (rv != 0 || req->newptr == NULL)
+ return (rv);
+ if (sc->trace_enable)
+ gic_its_write_8(sc, GITS_TRKCTLR, 3);
+ else
+ gic_its_write_8(sc, GITS_TRKCTLR, 0);
+
+ return (0);
+}
+
+static int
+gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS)
+{
+ struct gicv3_its_softc *sc;
+ struct sbuf *sb;
+ int err;
+
+ sc = arg1;
+ sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (sb == NULL) {
+ device_printf(sc->dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+ sbuf_cat(sb, "\n");
+ sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n",
+ gic_its_read_4(sc, GITS_TRKCTLR));
+ sbuf_printf(sb, "GITS_TRKR: 0x%08X\n",
+ gic_its_read_4(sc, GITS_TRKR));
+ sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n",
+ gic_its_read_4(sc, GITS_TRKDIDR));
+ sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n",
+ gic_its_read_4(sc, GITS_TRKPIDR));
+ sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n",
+ gic_its_read_4(sc, GITS_TRKVIDR));
+ sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n",
+ gic_its_read_4(sc, GITS_TRKTGTR));
+
+ err = sbuf_finish(sb);
+ if (err)
+ device_printf(sc->dev, "Error finishing sbuf: %d\n", err);
+ sbuf_delete(sb);
+ return(err);
+}
+
+static int
+gicv3_its_init_sysctl(struct gicv3_its_softc *sc)
+{
+ struct sysctl_oid *oid, *child;
+ struct sysctl_ctx_list *ctx_list;
+
+ ctx_list = device_get_sysctl_ctx(sc->dev);
+ child = device_get_sysctl_tree(sc->dev);
+ oid = SYSCTL_ADD_NODE(ctx_list,
+ SYSCTL_CHILDREN(child), OID_AUTO, "tracing",
+ CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing");
+ if (oid == NULL)
+ return (ENXIO);
+
+ /* Add registers */
+ SYSCTL_ADD_PROC(ctx_list,
+ SYSCTL_CHILDREN(oid), OID_AUTO, "enable",
+ CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
+ gicv3_its_sysctl_trace_enable, "CU", "Enable tracing");
+ SYSCTL_ADD_PROC(ctx_list,
+ SYSCTL_CHILDREN(oid), OID_AUTO, "capture",
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
+ gicv3_its_sysctl_trace_regs, "", "Captured tracing registers.");
+
+ return (0);
+}
+
+static int
+gicv3_its_attach(device_t dev)
+{
+ struct gicv3_its_softc *sc;
+ uint32_t iidr;
+ int domain, err, i, rid;
+
+ sc = device_get_softc(dev);
+
+ sc->sc_irq_length = gicv3_get_nirqs(dev);
+ sc->sc_irq_base = GIC_FIRST_LPI;
+ sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length;
+
+ rid = 0;
+ sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->sc_its_res == NULL) {
+ device_printf(dev, "Could not allocate memory\n");
+ return (ENXIO);
+ }
+
+ iidr = gic_its_read_4(sc, GITS_IIDR);
+ for (i = 0; i < nitems(its_quirks); i++) {
+ if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) {
+ if (bootverbose) {
+ device_printf(dev, "Applying %s\n",
+ its_quirks[i].desc);
+ }
+ its_quirks[i].func(dev);
+ break;
+ }
+ }
+
+ /* Allocate the private tables */
+ err = gicv3_its_table_init(dev, sc);
+ if (err != 0)
+ return (err);
+
+ /* Protects access to the device list */
+ mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
+
+ /* Protects access to the ITS command circular buffer. */
+ mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN);
+
+ CPU_ZERO(&sc->sc_cpus);
+ if (bus_get_domain(dev, &domain) == 0) {
+ if (domain < MAXMEMDOM)
+ CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus);
+ } else {
+ CPU_COPY(&all_cpus, &sc->sc_cpus);
+ }
+
+ /* Allocate the command circular buffer */
+ gicv3_its_cmdq_init(sc);
+
+ /* Allocate the per-CPU collections */
+ for (int cpu = 0; cpu <= mp_maxid; cpu++)
+ if (CPU_ISSET(cpu, &sc->sc_cpus) != 0)
+ sc->sc_its_cols[cpu] = malloc(
+ sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS,
+ M_WAITOK | M_ZERO);
+ else
+ sc->sc_its_cols[cpu] = NULL;
+
+ /* Enable the ITS */
+ gic_its_write_4(sc, GITS_CTLR,
+ gic_its_read_4(sc, GITS_CTLR) | GITS_CTLR_EN);
+
+ /* Create the LPI configuration table */
+ gicv3_its_conftable_init(sc);
+
+ /* And the pending tebles */
+ gicv3_its_pendtables_init(sc);
+
+ /* Enable LPIs on this CPU */
+ its_init_cpu(dev, sc);
+
+ TAILQ_INIT(&sc->sc_its_dev_list);
+ TAILQ_INIT(&sc->sc_free_irqs);
+
+ /*
+ * Create the vmem object to allocate INTRNG IRQs from. We try to
+ * use all IRQs not already used by the GICv3.
+ * XXX: This assumes there are no other interrupt controllers in the
+ * system.
+ */
+ sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0,
+ gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK);
+
+ sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length,
+ M_GICV3_ITS, M_WAITOK | M_ZERO);
+
+ /* For GIC-500 install tracking sysctls. */
+ if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) ==
+ GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0))
+ gicv3_its_init_sysctl(sc);
+
+ return (0);
+}
+
+static int
+gicv3_its_detach(device_t dev)
+{
+
+ return (ENXIO);
+}
+
+static void
+its_quirk_cavium_22375(device_t dev)
+{
+ struct gicv3_its_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375;
+}
+
+static void
+gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gicv3_its_softc *sc;
+ struct gicv3_its_irqsrc *girq;
+ uint8_t *conf;
+
+ sc = device_get_softc(dev);
+ girq = (struct gicv3_its_irqsrc *)isrc;
+ conf = sc->sc_conf_base;
+
+ conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE;
+
+ if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
+ /* Clean D-cache under command. */
+ cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
+ } else {
+ /* DSB inner shareable, store */
+ dsb(ishst);
+ }
+
+ its_cmd_inv(dev, girq->gi_its_dev, girq);
+}
+
+static void
+gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gicv3_its_softc *sc;
+ struct gicv3_its_irqsrc *girq;
+ uint8_t *conf;
+
+ sc = device_get_softc(dev);
+ girq = (struct gicv3_its_irqsrc *)isrc;
+ conf = sc->sc_conf_base;
+
+ conf[girq->gi_lpi] |= LPI_CONF_ENABLE;
+
+ if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
+ /* Clean D-cache under command. */
+ cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
+ } else {
+ /* DSB inner shareable, store */
+ dsb(ishst);
+ }
+
+ its_cmd_inv(dev, girq->gi_its_dev, girq);
+}
+
+static int
+gicv3_its_intr(void *arg, uintptr_t irq)
+{
+ struct gicv3_its_softc *sc = arg;
+ struct gicv3_its_irqsrc *girq;
+ struct trapframe *tf;
+
+ irq -= sc->sc_irq_base;
+ girq = sc->sc_irqs[irq];
+ if (girq == NULL)
+ panic("gicv3_its_intr: Invalid interrupt %ld",
+ irq + sc->sc_irq_base);
+
+ tf = curthread->td_intr_frame;
+ intr_isrc_dispatch(&girq->gi_isrc, tf);
+ return (FILTER_HANDLED);
+}
+
+static void
+gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gicv3_its_irqsrc *girq;
+ struct gicv3_its_softc *sc;
+
+ sc = device_get_softc(dev);
+ girq = (struct gicv3_its_irqsrc *)isrc;
+ gicv3_its_disable_intr(dev, isrc);
+ gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
+}
+
+static void
+gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+
+ gicv3_its_enable_intr(dev, isrc);
+}
+
+static void
+gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gicv3_its_irqsrc *girq;
+ struct gicv3_its_softc *sc;
+
+ sc = device_get_softc(dev);
+ girq = (struct gicv3_its_irqsrc *)isrc;
+ gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
+}
+
+static int
+gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gicv3_its_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (CPU_EMPTY(&isrc->isrc_cpu)) {
+ sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu,
+ &sc->sc_cpus);
+ CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu);
+ }
+
+ return (0);
+}
+
+static int
+gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct gicv3_its_irqsrc *girq;
+
+ gicv3_its_select_cpu(dev, isrc);
+
+ girq = (struct gicv3_its_irqsrc *)isrc;
+ its_cmd_movi(dev, girq);
+ return (0);
+}
+
+static int
+gicv3_its_map_intr(device_t dev, struct intr_map_data *data,
+ struct intr_irqsrc **isrcp)
+{
+
+ /*
+ * This should never happen, we only call this function to map
+ * interrupts found before the controller driver is ready.
+ */
+ panic("gicv3_its_map_intr: Unable to map a MSI interrupt");
+}
+
+static int
+gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+
+ /* Bind the interrupt to a CPU */
+ gicv3_its_bind_intr(dev, isrc);
+
+ return (0);
+}
+
+#ifdef SMP
+static void
+gicv3_its_init_secondary(device_t dev)
+{
+ struct gicv3_its_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ /*
+ * This is fatal as otherwise we may bind interrupts to this CPU.
+ * We need a way to tell the interrupt framework to only bind to a
+ * subset of given CPUs when it performs the shuffle.
+ */
+ if (its_init_cpu(dev, sc) != 0)
+ panic("gicv3_its_init_secondary: No usable ITS on CPU%d",
+ PCPU_GET(cpuid));
+}
+#endif
+
+static uint32_t
+its_get_devid(device_t pci_dev)
+{
+ uintptr_t id;
+
+ if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
+ panic("its_get_devid: Unable to get the MSI DeviceID");
+
+ return (id);
+}
+
+static struct its_dev *
+its_device_find(device_t dev, device_t child)
+{
+ struct gicv3_its_softc *sc;
+ struct its_dev *its_dev = NULL;
+
+ sc = device_get_softc(dev);
+
+ mtx_lock_spin(&sc->sc_its_dev_lock);
+ TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) {
+ if (its_dev->pci_dev == child)
+ break;
+ }
+ mtx_unlock_spin(&sc->sc_its_dev_lock);
+
+ return (its_dev);
+}
+
+static struct its_dev *
+its_device_get(device_t dev, device_t child, u_int nvecs)
+{
+ struct gicv3_its_softc *sc;
+ struct its_dev *its_dev;
+ vmem_addr_t irq_base;
+ size_t esize;
+
+ sc = device_get_softc(dev);
+
+ its_dev = its_device_find(dev, child);
+ if (its_dev != NULL)
+ return (its_dev);
+
+ its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO);
+ if (its_dev == NULL)
+ return (NULL);
+
+ its_dev->pci_dev = child;
+ its_dev->devid = its_get_devid(child);
+
+ its_dev->lpis.lpi_busy = 0;
+ its_dev->lpis.lpi_num = nvecs;
+ its_dev->lpis.lpi_free = nvecs;
+
+ if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT,
+ &irq_base) != 0) {
+ free(its_dev, M_GICV3_ITS);
+ return (NULL);
+ }
+ its_dev->lpis.lpi_base = irq_base;
+
+ /* Get ITT entry size */
+ esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER));
+
+ /*
+ * Allocate ITT for this device.
+ * PA has to be 256 B aligned. At least two entries for device.
+ */
+ its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
+ its_dev->itt = (vm_offset_t)contigmalloc(its_dev->itt_size,
+ M_GICV3_ITS, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR,
+ LPI_INT_TRANS_TAB_ALIGN, 0);
+ if (its_dev->itt == 0) {
+ vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs);
+ free(its_dev, M_GICV3_ITS);
+ return (NULL);
+ }
+
+ mtx_lock_spin(&sc->sc_its_dev_lock);
+ TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
+ mtx_unlock_spin(&sc->sc_its_dev_lock);
+
+ /* Map device to its ITT */
+ its_cmd_mapd(dev, its_dev, 1);
+
+ return (its_dev);
+}
+
+static void
+its_device_release(device_t dev, struct its_dev *its_dev)
+{
+ struct gicv3_its_softc *sc;
+
+ KASSERT(its_dev->lpis.lpi_busy == 0,
+ ("its_device_release: Trying to release an inuse ITS device"));
+
+ /* Unmap device in ITS */
+ its_cmd_mapd(dev, its_dev, 0);
+
+ sc = device_get_softc(dev);
+
+ /* Remove the device from the list of devices */
+ mtx_lock_spin(&sc->sc_its_dev_lock);
+ TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry);
+ mtx_unlock_spin(&sc->sc_its_dev_lock);
+
+ /* Free ITT */
+ KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device"));
+ contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS);
+
+ /* Free the IRQ allocation */
+ vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
+ its_dev->lpis.lpi_num);
+
+ free(its_dev, M_GICV3_ITS);
+}
+
+static struct gicv3_its_irqsrc *
+gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq)
+{
+ struct gicv3_its_irqsrc *girq = NULL;
+
+ KASSERT(sc->sc_irqs[irq] == NULL,
+ ("%s: Interrupt %u already allocated", __func__, irq));
+ mtx_lock_spin(&sc->sc_its_dev_lock);
+ if (!TAILQ_EMPTY(&sc->sc_free_irqs)) {
+ girq = TAILQ_FIRST(&sc->sc_free_irqs);
+ TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link);
+ }
+ mtx_unlock_spin(&sc->sc_its_dev_lock);
+ if (girq == NULL) {
+ girq = malloc(sizeof(*girq), M_GICV3_ITS,
+ M_NOWAIT | M_ZERO);
+ if (girq == NULL)
+ return (NULL);
+ girq->gi_id = -1;
+ if (intr_isrc_register(&girq->gi_isrc, dev, 0,
+ "%s,%u", device_get_nameunit(dev), irq) != 0) {
+ free(girq, M_GICV3_ITS);
+ return (NULL);
+ }
+ }
+ girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI;
+ sc->sc_irqs[irq] = girq;
+
+ return (girq);
+}
+
+static void
+gicv3_its_release_irqsrc(struct gicv3_its_softc *sc,
+ struct gicv3_its_irqsrc *girq)
+{
+ u_int irq;
+
+ mtx_assert(&sc->sc_its_dev_lock, MA_OWNED);
+
+ irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base;
+ sc->sc_irqs[irq] = NULL;
+
+ girq->gi_id = -1;
+ girq->gi_its_dev = NULL;
+ TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link);
+}
+
+static int
+gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
+ device_t *pic, struct intr_irqsrc **srcs)
+{
+ struct gicv3_its_softc *sc;
+ struct gicv3_its_irqsrc *girq;
+ struct its_dev *its_dev;
+ u_int irq;
+ int i;
+
+ its_dev = its_device_get(dev, child, count);
+ if (its_dev == NULL)
+ return (ENXIO);
+
+ KASSERT(its_dev->lpis.lpi_free >= count,
+ ("gicv3_its_alloc_msi: No free LPIs"));
+ sc = device_get_softc(dev);
+ irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
+ its_dev->lpis.lpi_free;
+
+ /* Allocate the irqsrc for each MSI */
+ for (i = 0; i < count; i++, irq++) {
+ its_dev->lpis.lpi_free--;
+ srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev,
+ sc, irq);
+ if (srcs[i] == NULL)
+ break;
+ }
+
+ /* The allocation failed, release them */
+ if (i != count) {
+ mtx_lock_spin(&sc->sc_its_dev_lock);
+ for (i = 0; i < count; i++) {
+ girq = (struct gicv3_its_irqsrc *)srcs[i];
+ if (girq == NULL)
+ break;
+ gicv3_its_release_irqsrc(sc, girq);
+ srcs[i] = NULL;
+ }
+ mtx_unlock_spin(&sc->sc_its_dev_lock);
+ return (ENXIO);
+ }
+
+ /* Finish the allocation now we have all MSI irqsrcs */
+ for (i = 0; i < count; i++) {
+ girq = (struct gicv3_its_irqsrc *)srcs[i];
+ girq->gi_id = i;
+ girq->gi_its_dev = its_dev;
+
+ /* Map the message to the given IRQ */
+ gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
+ its_cmd_mapti(dev, girq);
+ }
+ its_dev->lpis.lpi_busy += count;
+ *pic = dev;
+
+ return (0);
+}
+
+static int
+gicv3_its_release_msi(device_t dev, device_t child, int count,
+ struct intr_irqsrc **isrc)
+{
+ struct gicv3_its_softc *sc;
+ struct gicv3_its_irqsrc *girq;
+ struct its_dev *its_dev;
+ int i;
+
+ its_dev = its_device_find(dev, child);
+
+ KASSERT(its_dev != NULL,
+ ("gicv3_its_release_msi: Releasing a MSI interrupt with "
+ "no ITS device"));
+ KASSERT(its_dev->lpis.lpi_busy >= count,
+ ("gicv3_its_release_msi: Releasing more interrupts than "
+ "were allocated: releasing %d, allocated %d", count,
+ its_dev->lpis.lpi_busy));
+
+ sc = device_get_softc(dev);
+ mtx_lock_spin(&sc->sc_its_dev_lock);
+ for (i = 0; i < count; i++) {
+ girq = (struct gicv3_its_irqsrc *)isrc[i];
+ gicv3_its_release_irqsrc(sc, girq);
+ }
+ mtx_unlock_spin(&sc->sc_its_dev_lock);
+ its_dev->lpis.lpi_busy -= count;
+
+ if (its_dev->lpis.lpi_busy == 0)
+ its_device_release(dev, its_dev);
+
+ return (0);
+}
+
+static int
+gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic,
+ struct intr_irqsrc **isrcp)
+{
+ struct gicv3_its_softc *sc;
+ struct gicv3_its_irqsrc *girq;
+ struct its_dev *its_dev;
+ u_int nvecs, irq;
+
+ nvecs = pci_msix_count(child);
+ its_dev = its_device_get(dev, child, nvecs);
+ if (its_dev == NULL)
+ return (ENXIO);
+
+ KASSERT(its_dev->lpis.lpi_free > 0,
+ ("gicv3_its_alloc_msix: No free LPIs"));
+ sc = device_get_softc(dev);
+ irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
+ its_dev->lpis.lpi_free;
+
+ girq = gicv3_its_alloc_irqsrc(dev, sc, irq);
+ if (girq == NULL)
+ return (ENXIO);
+ girq->gi_id = its_dev->lpis.lpi_busy;
+ girq->gi_its_dev = its_dev;
+
+ its_dev->lpis.lpi_free--;
+ its_dev->lpis.lpi_busy++;
+
+ /* Map the message to the given IRQ */
+ gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
+ its_cmd_mapti(dev, girq);
+
+ *pic = dev;
+ *isrcp = (struct intr_irqsrc *)girq;
+
+ return (0);
+}
+
+static int
+gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
+{
+ struct gicv3_its_softc *sc;
+ struct gicv3_its_irqsrc *girq;
+ struct its_dev *its_dev;
+
+ its_dev = its_device_find(dev, child);
+
+ KASSERT(its_dev != NULL,
+ ("gicv3_its_release_msix: Releasing a MSI-X interrupt with "
+ "no ITS device"));
+ KASSERT(its_dev->lpis.lpi_busy > 0,
+ ("gicv3_its_release_msix: Releasing more interrupts than "
+ "were allocated: allocated %d", its_dev->lpis.lpi_busy));
+
+ sc = device_get_softc(dev);
+ girq = (struct gicv3_its_irqsrc *)isrc;
+ gicv3_its_release_irqsrc(sc, girq);
+ its_dev->lpis.lpi_busy--;
+
+ if (its_dev->lpis.lpi_busy == 0)
+ its_device_release(dev, its_dev);
+
+ return (0);
+}
+
+static int
+gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
+ uint64_t *addr, uint32_t *data)
+{
+ struct gicv3_its_softc *sc;
+ struct gicv3_its_irqsrc *girq;
+
+ sc = device_get_softc(dev);
+ girq = (struct gicv3_its_irqsrc *)isrc;
+
+ *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER;
+ *data = girq->gi_id;
+
+ return (0);
+}
+
+/*
+ * Commands handling.
+ */
+
+static __inline void
+cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type)
+{
+ /* Command field: DW0 [7:0] */
+ cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK);
+ cmd->cmd_dword[0] |= htole64(cmd_type);
+}
+
+static __inline void
+cmd_format_devid(struct its_cmd *cmd, uint32_t devid)
+{
+ /* Device ID field: DW0 [63:32] */
+ cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK);
+ cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT);
+}
+
+static __inline void
+cmd_format_size(struct its_cmd *cmd, uint16_t size)
+{
+ /* Size field: DW1 [4:0] */
+ cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK);
+ cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK));
+}
+
+static __inline void
+cmd_format_id(struct its_cmd *cmd, uint32_t id)
+{
+ /* ID field: DW1 [31:0] */
+ cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK);
+ cmd->cmd_dword[1] |= htole64(id);
+}
+
+static __inline void
+cmd_format_pid(struct its_cmd *cmd, uint32_t pid)
+{
+ /* Physical ID field: DW1 [63:32] */
+ cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK);
+ cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT);
+}
+
+static __inline void
+cmd_format_col(struct its_cmd *cmd, uint16_t col_id)
+{
+ /* Collection field: DW2 [16:0] */
+ cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK);
+ cmd->cmd_dword[2] |= htole64(col_id);
+}
+
+static __inline void
+cmd_format_target(struct its_cmd *cmd, uint64_t target)
+{
+ /* Target Address field: DW2 [47:16] */
+ cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK);
+ cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK);
+}
+
+static __inline void
+cmd_format_itt(struct its_cmd *cmd, uint64_t itt)
+{
+ /* ITT Address field: DW2 [47:8] */
+ cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK);
+ cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK);
+}
+
+static __inline void
+cmd_format_valid(struct its_cmd *cmd, uint8_t valid)
+{
+ /* Valid field: DW2 [63] */
+ cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK);
+ cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT);
+}
+
+static inline bool
+its_cmd_queue_full(struct gicv3_its_softc *sc)
+{
+ size_t read_idx, next_write_idx;
+
+ /* Get the index of the next command */
+ next_write_idx = (sc->sc_its_cmd_next_idx + 1) %
+ (ITS_CMDQ_SIZE / sizeof(struct its_cmd));
+ /* And the index of the current command being read */
+ read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd);
+
+ /*
+ * The queue is full when the write offset points
+ * at the command before the current read offset.
+ */
+ return (next_write_idx == read_idx);
+}
+
+static inline void
+its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
+{
+
+ if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
+ /* Clean D-cache under command. */
+ cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd));
+ } else {
+ /* DSB inner shareable, store */
+ dsb(ishst);
+ }
+
+}
+
+static inline uint64_t
+its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd)
+{
+ uint64_t off;
+
+ off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd);
+
+ return (off);
+}
+
+static void
+its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first,
+ struct its_cmd *cmd_last)
+{
+ struct gicv3_its_softc *sc;
+ uint64_t first, last, read;
+ size_t us_left;
+
+ sc = device_get_softc(dev);
+
+ /*
+ * XXX ARM64TODO: This is obviously a significant delay.
+ * The reason for that is that currently the time frames for
+ * the command to complete are not known.
+ */
+ us_left = 1000000;
+
+ first = its_cmd_cwriter_offset(sc, cmd_first);
+ last = its_cmd_cwriter_offset(sc, cmd_last);
+
+ for (;;) {
+ read = gic_its_read_8(sc, GITS_CREADR);
+ if (first < last) {
+ if (read < first || read >= last)
+ break;
+ } else if (read < first && read >= last)
+ break;
+
+ if (us_left-- == 0) {
+ /* This means timeout */
+ device_printf(dev,
+ "Timeout while waiting for CMD completion.\n");
+ return;
+ }
+ DELAY(1);
+ }
+}
+
+static struct its_cmd *
+its_cmd_alloc_locked(device_t dev)
+{
+ struct gicv3_its_softc *sc;
+ struct its_cmd *cmd;
+ size_t us_left;
+
+ sc = device_get_softc(dev);
+
+ /*
+ * XXX ARM64TODO: This is obviously a significant delay.
+ * The reason for that is that currently the time frames for
+ * the command to complete (and therefore free the descriptor)
+ * are not known.
+ */
+ us_left = 1000000;
+
+ mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED);
+ while (its_cmd_queue_full(sc)) {
+ if (us_left-- == 0) {
+ /* Timeout while waiting for free command */
+ device_printf(dev,
+ "Timeout while waiting for free command\n");
+ return (NULL);
+ }
+ DELAY(1);
+ }
+
+ cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
+ sc->sc_its_cmd_next_idx++;
+ sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd);
+
+ return (cmd);
+}
+
+static uint64_t
+its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc)
+{
+ uint64_t target;
+ uint8_t cmd_type;
+ u_int size;
+
+ cmd_type = desc->cmd_type;
+ target = ITS_TARGET_NONE;
+
+ switch (cmd_type) {
+ case ITS_CMD_MOVI: /* Move interrupt ID to another collection */
+ target = desc->cmd_desc_movi.col->col_target;
+ cmd_format_command(cmd, ITS_CMD_MOVI);
+ cmd_format_id(cmd, desc->cmd_desc_movi.id);
+ cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id);
+ cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid);
+ break;
+ case ITS_CMD_SYNC: /* Wait for previous commands completion */
+ target = desc->cmd_desc_sync.col->col_target;
+ cmd_format_command(cmd, ITS_CMD_SYNC);
+ cmd_format_target(cmd, target);
+ break;
+ case ITS_CMD_MAPD: /* Assign ITT to device */
+ cmd_format_command(cmd, ITS_CMD_MAPD);
+ cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt));
+ /*
+ * Size describes number of bits to encode interrupt IDs
+ * supported by the device minus one.
+ * When V (valid) bit is zero, this field should be written
+ * as zero.
+ */
+ if (desc->cmd_desc_mapd.valid != 0) {
+ size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num);
+ size = MAX(1, size) - 1;
+ } else
+ size = 0;
+
+ cmd_format_size(cmd, size);
+ cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid);
+ cmd_format_valid(cmd, desc->cmd_desc_mapd.valid);
+ break;
+ case ITS_CMD_MAPC: /* Map collection to Re-Distributor */
+ target = desc->cmd_desc_mapc.col->col_target;
+ cmd_format_command(cmd, ITS_CMD_MAPC);
+ cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id);
+ cmd_format_valid(cmd, desc->cmd_desc_mapc.valid);
+ cmd_format_target(cmd, target);
+ break;
+ case ITS_CMD_MAPTI:
+ target = desc->cmd_desc_mapvi.col->col_target;
+ cmd_format_command(cmd, ITS_CMD_MAPTI);
+ cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid);
+ cmd_format_id(cmd, desc->cmd_desc_mapvi.id);
+ cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid);
+ cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id);
+ break;
+ case ITS_CMD_MAPI:
+ target = desc->cmd_desc_mapi.col->col_target;
+ cmd_format_command(cmd, ITS_CMD_MAPI);
+ cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid);
+ cmd_format_id(cmd, desc->cmd_desc_mapi.pid);
+ cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id);
+ break;
+ case ITS_CMD_INV:
+ target = desc->cmd_desc_inv.col->col_target;
+ cmd_format_command(cmd, ITS_CMD_INV);
+ cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid);
+ cmd_format_id(cmd, desc->cmd_desc_inv.pid);
+ break;
+ case ITS_CMD_INVALL:
+ cmd_format_command(cmd, ITS_CMD_INVALL);
+ cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id);
+ break;
+ default:
+ panic("its_cmd_prepare: Invalid command: %x", cmd_type);
+ }
+
+ return (target);
+}
+
+static int
+its_cmd_send(device_t dev, struct its_cmd_desc *desc)
+{
+ struct gicv3_its_softc *sc;
+ struct its_cmd *cmd, *cmd_sync, *cmd_write;
+ struct its_col col_sync;
+ struct its_cmd_desc desc_sync;
+ uint64_t target, cwriter;
+
+ sc = device_get_softc(dev);
+ mtx_lock_spin(&sc->sc_its_cmd_lock);
+ cmd = its_cmd_alloc_locked(dev);
+ if (cmd == NULL) {
+ device_printf(dev, "could not allocate ITS command\n");
+ mtx_unlock_spin(&sc->sc_its_cmd_lock);
+ return (EBUSY);
+ }
+
+ target = its_cmd_prepare(cmd, desc);
+ its_cmd_sync(sc, cmd);
+
+ if (target != ITS_TARGET_NONE) {
+ cmd_sync = its_cmd_alloc_locked(dev);
+ if (cmd_sync != NULL) {
+ desc_sync.cmd_type = ITS_CMD_SYNC;
+ col_sync.col_target = target;
+ desc_sync.cmd_desc_sync.col = &col_sync;
+ its_cmd_prepare(cmd_sync, &desc_sync);
+ its_cmd_sync(sc, cmd_sync);
+ }
+ }
+
+ /* Update GITS_CWRITER */
+ cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd);
+ gic_its_write_8(sc, GITS_CWRITER, cwriter);
+ cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
+ mtx_unlock_spin(&sc->sc_its_cmd_lock);
+
+ its_cmd_wait_completion(dev, cmd, cmd_write);
+
+ return (0);
+}
+
+/* Handlers to send commands */
+static void
+its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq)
+{
+ struct gicv3_its_softc *sc;
+ struct its_cmd_desc desc;
+ struct its_col *col;
+
+ sc = device_get_softc(dev);
+ col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
+
+ desc.cmd_type = ITS_CMD_MOVI;
+ desc.cmd_desc_movi.its_dev = girq->gi_its_dev;
+ desc.cmd_desc_movi.col = col;
+ desc.cmd_desc_movi.id = girq->gi_id;
+
+ its_cmd_send(dev, &desc);
+}
+
+static void
+its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid)
+{
+ struct its_cmd_desc desc;
+
+ desc.cmd_type = ITS_CMD_MAPC;
+ desc.cmd_desc_mapc.col = col;
+ /*
+ * Valid bit set - map the collection.
+ * Valid bit cleared - unmap the collection.
+ */
+ desc.cmd_desc_mapc.valid = valid;
+
+ its_cmd_send(dev, &desc);
+}
+
+static void
+its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq)
+{
+ struct gicv3_its_softc *sc;
+ struct its_cmd_desc desc;
+ struct its_col *col;
+ u_int col_id;
+
+ sc = device_get_softc(dev);
+
+ col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1;
+ col = sc->sc_its_cols[col_id];
+
+ desc.cmd_type = ITS_CMD_MAPTI;
+ desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev;
+ desc.cmd_desc_mapvi.col = col;
+ /* The EventID sent to the device */
+ desc.cmd_desc_mapvi.id = girq->gi_id;
+ /* The physical interrupt presented to softeware */
+ desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI;
+
+ its_cmd_send(dev, &desc);
+}
+
+static void
+its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid)
+{
+ struct its_cmd_desc desc;
+
+ desc.cmd_type = ITS_CMD_MAPD;
+ desc.cmd_desc_mapd.its_dev = its_dev;
+ desc.cmd_desc_mapd.valid = valid;
+
+ its_cmd_send(dev, &desc);
+}
+
+static void
+its_cmd_inv(device_t dev, struct its_dev *its_dev,
+ struct gicv3_its_irqsrc *girq)
+{
+ struct gicv3_its_softc *sc;
+ struct its_cmd_desc desc;
+ struct its_col *col;
+
+ sc = device_get_softc(dev);
+ col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
+
+ desc.cmd_type = ITS_CMD_INV;
+ /* The EventID sent to the device */
+ desc.cmd_desc_inv.pid = girq->gi_id;
+ desc.cmd_desc_inv.its_dev = its_dev;
+ desc.cmd_desc_inv.col = col;
+
+ its_cmd_send(dev, &desc);
+}
+
+static void
+its_cmd_invall(device_t dev, struct its_col *col)
+{
+ struct its_cmd_desc desc;
+
+ desc.cmd_type = ITS_CMD_INVALL;
+ desc.cmd_desc_invall.col = col;
+
+ its_cmd_send(dev, &desc);
+}
+
+#ifdef FDT
+static device_probe_t gicv3_its_fdt_probe;
+static device_attach_t gicv3_its_fdt_attach;
+
+static device_method_t gicv3_its_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, gicv3_its_fdt_probe),
+ DEVMETHOD(device_attach, gicv3_its_fdt_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+#define its_baseclasses its_fdt_baseclasses
+DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods,
+ sizeof(struct gicv3_its_softc), gicv3_its_driver);
+#undef its_baseclasses
+static devclass_t gicv3_its_fdt_devclass;
+
+EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver,
+ gicv3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+
+static int
+gicv3_its_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
+ return (ENXIO);
+
+ device_set_desc(dev, "ARM GIC Interrupt Translation Service");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+gicv3_its_fdt_attach(device_t dev)
+{
+ struct gicv3_its_softc *sc;
+ phandle_t xref;
+ int err;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ err = gicv3_its_attach(dev);
+ if (err != 0)
+ return (err);
+
+ /* Register this device as a interrupt controller */
+ xref = OF_xref_from_node(ofw_bus_get_node(dev));
+ sc->sc_pic = intr_pic_register(dev, xref);
+ intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
+ gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
+
+ /* Register this device to handle MSI interrupts */
+ intr_msi_register(dev, xref);
+
+ return (0);
+}
+#endif
+
+#ifdef DEV_ACPI
+static device_probe_t gicv3_its_acpi_probe;
+static device_attach_t gicv3_its_acpi_attach;
+
+static device_method_t gicv3_its_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, gicv3_its_acpi_probe),
+ DEVMETHOD(device_attach, gicv3_its_acpi_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+#define its_baseclasses its_acpi_baseclasses
+DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods,
+ sizeof(struct gicv3_its_softc), gicv3_its_driver);
+#undef its_baseclasses
+static devclass_t gicv3_its_acpi_devclass;
+
+EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver,
+ gicv3_its_acpi_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+
+static int
+gicv3_its_acpi_probe(device_t dev)
+{
+
+ if (gic_get_bus(dev) != GIC_BUS_ACPI)
+ return (EINVAL);
+
+ if (gic_get_hw_rev(dev) < 3)
+ return (EINVAL);
+
+ device_set_desc(dev, "ARM GIC Interrupt Translation Service");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+gicv3_its_acpi_attach(device_t dev)
+{
+ struct gicv3_its_softc *sc;
+ struct gic_v3_devinfo *di;
+ int err;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ err = gicv3_its_attach(dev);
+ if (err != 0)
+ return (err);
+
+ di = device_get_ivars(dev);
+ sc->sc_pic = intr_pic_register(dev, di->msi_xref);
+ intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
+ gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
+
+ /* Register this device to handle MSI interrupts */
+ intr_msi_register(dev, di->msi_xref);
+
+ return (0);
+}
+#endif
diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c
new file mode 100644
index 000000000000..d9c5c50fe568
--- /dev/null
+++ b/sys/arm64/arm64/identcpu.c
@@ -0,0 +1,1667 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Semihalf
+ * under sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/pcpu.h>
+#include <sys/sbuf.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <machine/atomic.h>
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/undefined.h>
+#include <machine/elf.h>
+
+static void print_cpu_features(u_int cpu);
+static u_long parse_cpu_features_hwcap(void);
+
+char machine[] = "arm64";
+
+#ifdef SCTL_MASK32
+extern int adaptive_machine_arch;
+#endif
+
+static SYSCTL_NODE(_machdep, OID_AUTO, cache, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "Cache management tuning");
+
+static int allow_dic = 1;
+SYSCTL_INT(_machdep_cache, OID_AUTO, allow_dic, CTLFLAG_RDTUN, &allow_dic, 0,
+ "Allow optimizations based on the DIC cache bit");
+
+static int allow_idc = 1;
+SYSCTL_INT(_machdep_cache, OID_AUTO, allow_idc, CTLFLAG_RDTUN, &allow_idc, 0,
+ "Allow optimizations based on the IDC cache bit");
+
+static void check_cpu_regs(u_int cpu);
+
+/*
+ * The default implementation of I-cache sync assumes we have an
+ * aliasing cache until we know otherwise.
+ */
+void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t) =
+ &arm64_aliasing_icache_sync_range;
+
+static int
+sysctl_hw_machine(SYSCTL_HANDLER_ARGS)
+{
+#ifdef SCTL_MASK32
+ static const char machine32[] = "arm";
+#endif
+ int error;
+
+#ifdef SCTL_MASK32
+ if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch)
+ error = SYSCTL_OUT(req, machine32, sizeof(machine32));
+ else
+#endif
+ error = SYSCTL_OUT(req, machine, sizeof(machine));
+ return (error);
+}
+
+SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD |
+ CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class");
+
+static char cpu_model[64];
+SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
+ cpu_model, sizeof(cpu_model), "Machine model");
+
+/*
+ * Per-CPU affinity as provided in MPIDR_EL1
+ * Indexed by CPU number in logical order selected by the system.
+ * Relevant fields can be extracted using CPU_AFFn macros,
+ * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
+ *
+ * Fields used by us:
+ * Aff1 - Cluster number
+ * Aff0 - CPU number in Aff1 cluster
+ */
+uint64_t __cpu_affinity[MAXCPU];
+static u_int cpu_aff_levels;
+
+struct cpu_desc {
+ u_int cpu_impl;
+ u_int cpu_part_num;
+ u_int cpu_variant;
+ u_int cpu_revision;
+ const char *cpu_impl_name;
+ const char *cpu_part_name;
+
+ uint64_t mpidr;
+ uint64_t id_aa64afr0;
+ uint64_t id_aa64afr1;
+ uint64_t id_aa64dfr0;
+ uint64_t id_aa64dfr1;
+ uint64_t id_aa64isar0;
+ uint64_t id_aa64isar1;
+ uint64_t id_aa64mmfr0;
+ uint64_t id_aa64mmfr1;
+ uint64_t id_aa64mmfr2;
+ uint64_t id_aa64pfr0;
+ uint64_t id_aa64pfr1;
+ uint64_t ctr;
+};
+
+static struct cpu_desc cpu_desc[MAXCPU];
+static struct cpu_desc kern_cpu_desc;
+static struct cpu_desc user_cpu_desc;
+static u_int cpu_print_regs;
+#define PRINT_ID_AA64_AFR0 0x00000001
+#define PRINT_ID_AA64_AFR1 0x00000002
+#define PRINT_ID_AA64_DFR0 0x00000010
+#define PRINT_ID_AA64_DFR1 0x00000020
+#define PRINT_ID_AA64_ISAR0 0x00000100
+#define PRINT_ID_AA64_ISAR1 0x00000200
+#define PRINT_ID_AA64_MMFR0 0x00001000
+#define PRINT_ID_AA64_MMFR1 0x00002000
+#define PRINT_ID_AA64_MMFR2 0x00004000
+#define PRINT_ID_AA64_PFR0 0x00010000
+#define PRINT_ID_AA64_PFR1 0x00020000
+#define PRINT_CTR_EL0 0x10000000
+
+struct cpu_parts {
+ u_int part_id;
+ const char *part_name;
+};
+#define CPU_PART_NONE { 0, "Unknown Processor" }
+
+struct cpu_implementers {
+ u_int impl_id;
+ const char *impl_name;
+ /*
+ * Part number is implementation defined
+ * so each vendor will have its own set of values and names.
+ */
+ const struct cpu_parts *cpu_parts;
+};
+#define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
+
+/*
+ * Per-implementer table of (PartNum, CPU Name) pairs.
+ */
+/* ARM Ltd. */
+static const struct cpu_parts cpu_parts_arm[] = {
+ { CPU_PART_FOUNDATION, "Foundation-Model" },
+ { CPU_PART_CORTEX_A35, "Cortex-A35" },
+ { CPU_PART_CORTEX_A53, "Cortex-A53" },
+ { CPU_PART_CORTEX_A55, "Cortex-A55" },
+ { CPU_PART_CORTEX_A57, "Cortex-A57" },
+ { CPU_PART_CORTEX_A65, "Cortex-A65" },
+ { CPU_PART_CORTEX_A72, "Cortex-A72" },
+ { CPU_PART_CORTEX_A73, "Cortex-A73" },
+ { CPU_PART_CORTEX_A75, "Cortex-A75" },
+ { CPU_PART_CORTEX_A76, "Cortex-A76" },
+ { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
+ { CPU_PART_CORTEX_A77, "Cortex-A77" },
+ { CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
+ CPU_PART_NONE,
+};
+
+/* Cavium */
+static const struct cpu_parts cpu_parts_cavium[] = {
+ { CPU_PART_THUNDERX, "ThunderX" },
+ { CPU_PART_THUNDERX2, "ThunderX2" },
+ CPU_PART_NONE,
+};
+
+/* APM / Ampere */
+static const struct cpu_parts cpu_parts_apm[] = {
+ { CPU_PART_EMAG8180, "eMAG 8180" },
+ CPU_PART_NONE,
+};
+
+/* Unknown */
+static const struct cpu_parts cpu_parts_none[] = {
+ CPU_PART_NONE,
+};
+
+/*
+ * Implementers table.
+ */
+const struct cpu_implementers cpu_implementers[] = {
+ { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
+ { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
+ { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
+ { CPU_IMPL_DEC, "DEC", cpu_parts_none },
+ { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
+ { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
+ { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
+ { CPU_IMPL_APM, "APM", cpu_parts_apm },
+ { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
+ { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
+ { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
+ CPU_IMPLEMENTER_NONE,
+};
+
+#define MRS_TYPE_MASK 0xf
+#define MRS_INVALID 0
+#define MRS_EXACT 1
+#define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4))
+#define MRS_EXACT_FIELD(x) ((x) >> 4)
+#define MRS_LOWER 2
+
+struct mrs_field_value {
+ uint64_t value;
+ const char *desc;
+};
+
+#define MRS_FIELD_VALUE(_value, _desc) \
+ { \
+ .value = (_value), \
+ .desc = (_desc), \
+ }
+
+#define MRS_FIELD_VALUE_NONE_IMPL(_reg, _field, _none, _impl) \
+ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _none, ""), \
+ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _impl, #_field)
+
+#define MRS_FIELD_VALUE_COUNT(_reg, _field, _desc) \
+ MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, "1 " _desc), \
+ MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "2 " _desc "s"), \
+ MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "3 " _desc "s"), \
+ MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "4 " _desc "s"), \
+ MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "5 " _desc "s"), \
+ MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "6 " _desc "s"), \
+ MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "7 " _desc "s"), \
+ MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc "s"), \
+ MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "9 " _desc "s"), \
+ MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "10 "_desc "s"), \
+ MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "11 "_desc "s"), \
+ MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "12 "_desc "s"), \
+ MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "13 "_desc "s"), \
+ MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "14 "_desc "s"), \
+ MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \
+ MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s")
+
+#define MRS_FIELD_VALUE_END { .desc = NULL }
+
+struct mrs_field {
+ const char *name;
+ struct mrs_field_value *values;
+ uint64_t mask;
+ bool sign;
+ u_int type;
+ u_int shift;
+};
+
+#define MRS_FIELD(_register, _name, _sign, _type, _values) \
+ { \
+ .name = #_name, \
+ .sign = (_sign), \
+ .type = (_type), \
+ .shift = _register ## _ ## _name ## _SHIFT, \
+ .mask = _register ## _ ## _name ## _MASK, \
+ .values = (_values), \
+ }
+
+#define MRS_FIELD_END { .type = MRS_INVALID, }
+
+/* ID_AA64AFR0_EL1 */
+static struct mrs_field id_aa64afr0_fields[] = {
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64AFR1_EL1 */
+static struct mrs_field id_aa64afr1_fields[] = {
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64DFR0_EL1 */
+static struct mrs_field_value id_aa64dfr0_pmsver[] = {
+ MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_V1, "SPE"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64dfr0_ctx_cmps[] = {
+ MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, CTX_CMPs, "CTX BKPT"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64dfr0_wrps[] = {
+ MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, WRPs, "Watchpoint"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64dfr0_brps[] = {
+ MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64dfr0_pmuver[] = {
+ MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3, "PMUv3"),
+ MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_1, "PMUv3+16 bit evtCount"),
+ MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64dfr0_tracever[] = {
+ MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_IMPL, "Trace"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64dfr0_debugver[] = {
+ MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8, "Debugv8"),
+ MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_VHE, "Debugv8_VHE"),
+ MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8.2"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field id_aa64dfr0_fields[] = {
+ MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver),
+ MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT,
+ id_aa64dfr0_ctx_cmps),
+ MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_EXACT, id_aa64dfr0_wrps),
+ MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, id_aa64dfr0_brps),
+ MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver),
+ MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT,
+ id_aa64dfr0_tracever),
+ MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6),
+ id_aa64dfr0_debugver),
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64DFR1 */
+static struct mrs_field id_aa64dfr1_fields[] = {
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64ISAR0_EL1 */
+static struct mrs_field_value id_aa64isar0_rndr[] = {
+ MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_IMPL, "RNG"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_tlb[] = {
+ MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOS, "TLBI-OS"),
+ MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOSR, "TLBI-OSR"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_ts[] = {
+ MRS_FIELD_VALUE(ID_AA64ISAR0_TS_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_4, "CondM-8.4"),
+ MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_5, "CondM-8.5"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_fhm[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, FHM, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_dp[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, DP, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_sm4[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM4, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_sm3[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM3, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_sha3[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA3, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_rdm[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, RDM, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_atomic[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, Atomic, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_crc32[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, CRC32, NONE, BASE),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_sha2[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA2, NONE, BASE),
+ MRS_FIELD_VALUE(ID_AA64ISAR0_SHA2_512, "SHA2+SHA512"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_sha1[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA1, NONE, BASE),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar0_aes[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, AES, NONE, BASE),
+ MRS_FIELD_VALUE(ID_AA64ISAR0_AES_PMULL, "AES+PMULL"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field id_aa64isar0_fields[] = {
+ MRS_FIELD(ID_AA64ISAR0, RNDR, false, MRS_LOWER, id_aa64isar0_rndr),
+ MRS_FIELD(ID_AA64ISAR0, TLB, false, MRS_LOWER, id_aa64isar0_tlb),
+ MRS_FIELD(ID_AA64ISAR0, TS, false, MRS_LOWER, id_aa64isar0_ts),
+ MRS_FIELD(ID_AA64ISAR0, FHM, false, MRS_LOWER, id_aa64isar0_fhm),
+ MRS_FIELD(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp),
+ MRS_FIELD(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4),
+ MRS_FIELD(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3),
+ MRS_FIELD(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3),
+ MRS_FIELD(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm),
+ MRS_FIELD(ID_AA64ISAR0, Atomic, false, MRS_LOWER, id_aa64isar0_atomic),
+ MRS_FIELD(ID_AA64ISAR0, CRC32, false, MRS_LOWER, id_aa64isar0_crc32),
+ MRS_FIELD(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2),
+ MRS_FIELD(ID_AA64ISAR0, SHA1, false, MRS_LOWER, id_aa64isar0_sha1),
+ MRS_FIELD(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes),
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64ISAR1_EL1 */
+static struct mrs_field_value id_aa64isar1_i8mm[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, I8MM, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_dgh[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, DGH, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_bf16[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, BF16, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_specres[] = {
+ MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_IMPL, "PredInv"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_sb[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, SB, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_frintts[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FRINTTS, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_gpi[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPI, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_gpa[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_lrcpc[] = {
+ MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_3, "RCPC-8.3"),
+ MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_4, "RCPC-8.4"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_fcma[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FCMA, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_jscvt[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, JSCVT, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_api[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, API, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_apa[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, APA, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64isar1_dpb[] = {
+ MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVAP, "DCPoP"),
+ MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVADP, "DCCVADP"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field id_aa64isar1_fields[] = {
+ MRS_FIELD(ID_AA64ISAR1, I8MM, false, MRS_LOWER, id_aa64isar1_i8mm),
+ MRS_FIELD(ID_AA64ISAR1, DGH, false, MRS_LOWER, id_aa64isar1_dgh),
+ MRS_FIELD(ID_AA64ISAR1, BF16, false, MRS_LOWER, id_aa64isar1_bf16),
+ MRS_FIELD(ID_AA64ISAR1, SPECRES, false, MRS_LOWER,
+ id_aa64isar1_specres),
+ MRS_FIELD(ID_AA64ISAR1, SB, false, MRS_LOWER, id_aa64isar1_sb),
+ MRS_FIELD(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER,
+ id_aa64isar1_frintts),
+ MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi),
+ MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa),
+ MRS_FIELD(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc),
+ MRS_FIELD(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma),
+ MRS_FIELD(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt),
+ MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api),
+ MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa),
+ MRS_FIELD(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb),
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64MMFR0_EL1 */
+static struct mrs_field_value id_aa64mmfr0_tgran4[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran4, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr0_tgran64[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran64, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr0_tgran16[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran16, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr0_bigend_el0[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEndEL0, FIXED, MIXED),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr0_snsmem[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, SNSMem, NONE, DISTINCT),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr0_bigend[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEnd, FIXED, MIXED),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr0_asid_bits[] = {
+ MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_8, "8bit ASID"),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_16, "16bit ASID"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr0_parange[] = {
+ MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4G, "4GB PA"),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_64G, "64GB PA"),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_1T, "1TB PA"),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4T, "4TB PA"),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_16T, "16TB PA"),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_256T, "256TB PA"),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4P, "4PB PA"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field id_aa64mmfr0_fields[] = {
+ MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4),
+ MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT,
+ id_aa64mmfr0_tgran64),
+ MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT,
+ id_aa64mmfr0_tgran16),
+ MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT,
+ id_aa64mmfr0_bigend_el0),
+ MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem),
+ MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend),
+ MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT,
+ id_aa64mmfr0_asid_bits),
+ MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT,
+ id_aa64mmfr0_parange),
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64MMFR1_EL1 */
+static struct mrs_field_value id_aa64mmfr1_xnx[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, XNX, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr1_specsei[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, SpecSEI, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr1_pan[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, PAN, NONE, IMPL),
+ MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_ATS1E1, "PAN+ATS1E1"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr1_lo[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, LO, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr1_hpds[] = {
+ MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_HPD, "HPD"),
+ MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_TTPBHA, "HPD+TTPBHA"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr1_vh[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, VH, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr1_vmidbits[] = {
+ MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_8, "8bit VMID"),
+ MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_16, "16bit VMID"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr1_hafdbs[] = {
+ MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF, "HAF"),
+ MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF_DBS, "HAF+DS"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field id_aa64mmfr1_fields[] = {
+ MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx),
+ MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT,
+ id_aa64mmfr1_specsei),
+ MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan),
+ MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo),
+ MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds),
+ MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh),
+ MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT,
+ id_aa64mmfr1_vmidbits),
+ MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs),
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64MMFR2_EL1 */
+static struct mrs_field_value id_aa64mmfr2_nv[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, NV, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr2_ccidx[] = {
+ MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_32, "32bit CCIDX"),
+ MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_64, "64bit CCIDX"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr2_varange[] = {
+ MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_48, "48bit VA"),
+ MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_52, "52bit VA"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr2_iesb[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IESB, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr2_lsm[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, LSM, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr2_uao[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, UAO, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64mmfr2_cnp[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, CnP, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field id_aa64mmfr2_fields[] = {
+ MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv),
+ MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx),
+ MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT,
+ id_aa64mmfr2_varange),
+ MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb),
+ MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm),
+ MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao),
+ MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp),
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64PFR0_EL1 */
+static struct mrs_field_value id_aa64pfr0_csv3[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_ISOLATED, "CSV3"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_csv2[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_ISOLATED, "CSV2"),
+ MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_SCXTNUM, "SCXTNUM"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_dit[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR0_DIT_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64PFR0_DIT_PSTATE, "PSTATE.DIT"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_amu[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR0_AMU_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1, "AMUv1"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_mpam[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, MPAM, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_sel2[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SEL2, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_sve[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SVE, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_ras[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64PFR0_RAS_V1, "RASv1"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_gic[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, GIC, CPUIF_NONE, CPUIF_EN),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_advsimd[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, AdvSIMD, NONE, IMPL),
+ MRS_FIELD_VALUE(ID_AA64PFR0_AdvSIMD_HP, "AdvSIMD+HP"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_fp[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, FP, NONE, IMPL),
+ MRS_FIELD_VALUE(ID_AA64PFR0_FP_HP, "FP+HP"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_el3[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL3, NONE, 64),
+ MRS_FIELD_VALUE(ID_AA64PFR0_EL3_64_32, "EL3 32"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_el2[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL2, NONE, 64),
+ MRS_FIELD_VALUE(ID_AA64PFR0_EL2_64_32, "EL2 32"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_el1[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64, "EL1"),
+ MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64_32, "EL1 32"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr0_el0[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64, "EL0"),
+ MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64_32, "EL0 32"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field id_aa64pfr0_fields[] = {
+ MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_EXACT, id_aa64pfr0_csv3),
+ MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_EXACT, id_aa64pfr0_csv2),
+ MRS_FIELD(ID_AA64PFR0, DIT, false, MRS_EXACT, id_aa64pfr0_dit),
+ MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_EXACT, id_aa64pfr0_amu),
+ MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_EXACT, id_aa64pfr0_mpam),
+ MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_EXACT, id_aa64pfr0_sel2),
+ MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve),
+ MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras),
+ MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic),
+ MRS_FIELD(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, id_aa64pfr0_advsimd),
+ MRS_FIELD(ID_AA64PFR0, FP, true, MRS_LOWER, id_aa64pfr0_fp),
+ MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3),
+ MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2),
+ MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1),
+ MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0),
+ MRS_FIELD_END,
+};
+
+
+/* ID_AA64PFR1_EL1 */
+static struct mrs_field_value id_aa64pfr1_bt[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR1_BT_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64PFR1_BT_IMPL, "BTI"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr1_ssbs[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE, "PSTATE.SSBS"),
+ MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE_MSR, "PSTATE.SSBS MSR"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field_value id_aa64pfr1_mte[] = {
+ MRS_FIELD_VALUE(ID_AA64PFR1_MTE_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL_EL0, "MTE EL0"),
+ MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL, "MTE"),
+ MRS_FIELD_VALUE_END,
+};
+
+static struct mrs_field id_aa64pfr1_fields[] = {
+ MRS_FIELD(ID_AA64PFR1, BT, false, MRS_EXACT, id_aa64pfr1_bt),
+ MRS_FIELD(ID_AA64PFR1, SSBS, false, MRS_EXACT, id_aa64pfr1_ssbs),
+ MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_EXACT, id_aa64pfr1_mte),
+ MRS_FIELD_END,
+};
+
+struct mrs_user_reg {
+ u_int reg;
+ u_int CRm;
+ u_int Op2;
+ size_t offset;
+ struct mrs_field *fields;
+};
+
+static struct mrs_user_reg user_regs[] = {
+ { /* id_aa64isar0_el1 */
+ .reg = ID_AA64ISAR0_EL1,
+ .CRm = 6,
+ .Op2 = 0,
+ .offset = __offsetof(struct cpu_desc, id_aa64isar0),
+ .fields = id_aa64isar0_fields,
+ },
+ { /* id_aa64isar1_el1 */
+ .reg = ID_AA64ISAR1_EL1,
+ .CRm = 6,
+ .Op2 = 1,
+ .offset = __offsetof(struct cpu_desc, id_aa64isar1),
+ .fields = id_aa64isar1_fields,
+ },
+ { /* id_aa64pfr0_el1 */
+ .reg = ID_AA64PFR0_EL1,
+ .CRm = 4,
+ .Op2 = 0,
+ .offset = __offsetof(struct cpu_desc, id_aa64pfr0),
+ .fields = id_aa64pfr0_fields,
+ },
+ { /* id_aa64pfr0_el1 */
+ .reg = ID_AA64PFR1_EL1,
+ .CRm = 4,
+ .Op2 = 1,
+ .offset = __offsetof(struct cpu_desc, id_aa64pfr1),
+ .fields = id_aa64pfr1_fields,
+ },
+ { /* id_aa64dfr0_el1 */
+ .reg = ID_AA64DFR0_EL1,
+ .CRm = 5,
+ .Op2 = 0,
+ .offset = __offsetof(struct cpu_desc, id_aa64dfr0),
+ .fields = id_aa64dfr0_fields,
+ },
+ { /* id_aa64mmfr0_el1 */
+ .reg = ID_AA64MMFR0_EL1,
+ .CRm = 7,
+ .Op2 = 0,
+ .offset = __offsetof(struct cpu_desc, id_aa64mmfr0),
+ .fields = id_aa64mmfr0_fields,
+ },
+};
+
+#define CPU_DESC_FIELD(desc, idx) \
+ *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
+
+static int
+user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
+ uint32_t esr)
+{
+ uint64_t value;
+ int CRm, Op2, i, reg;
+
+ if ((insn & MRS_MASK) != MRS_VALUE)
+ return (0);
+
+ /*
+ * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
+ * These are in the EL1 CPU identification space.
+ * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
+ * CRm == {4-7} holds the ID_AA64 registers.
+ *
+ * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
+ * Table D9-2 System instruction encodings for non-Debug System
+ * register accesses.
+ */
+ if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
+ return (0);
+
+ CRm = mrs_CRm(insn);
+ if (CRm > 7 || (CRm < 4 && CRm != 0))
+ return (0);
+
+ Op2 = mrs_Op2(insn);
+ value = 0;
+
+ for (i = 0; i < nitems(user_regs); i++) {
+ if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
+ value = CPU_DESC_FIELD(user_cpu_desc, i);
+ break;
+ }
+ }
+
+ if (CRm == 0) {
+ switch (Op2) {
+ case 0:
+ value = READ_SPECIALREG(midr_el1);
+ break;
+ case 5:
+ value = READ_SPECIALREG(mpidr_el1);
+ break;
+ case 6:
+ value = READ_SPECIALREG(revidr_el1);
+ break;
+ default:
+ return (0);
+ }
+ }
+
+ /*
+ * We will handle this instruction, move to the next so we
+ * don't trap here again.
+ */
+ frame->tf_elr += INSN_SIZE;
+
+ reg = MRS_REGISTER(insn);
+ /* If reg is 31 then write to xzr, i.e. do nothing */
+ if (reg == 31)
+ return (1);
+
+ if (reg < nitems(frame->tf_x))
+ frame->tf_x[reg] = value;
+ else if (reg == 30)
+ frame->tf_lr = value;
+
+ return (1);
+}
+
+bool
+extract_user_id_field(u_int reg, u_int field_shift, uint8_t *val)
+{
+ uint64_t value;
+ int i;
+
+ for (i = 0; i < nitems(user_regs); i++) {
+ if (user_regs[i].reg == reg) {
+ value = CPU_DESC_FIELD(user_cpu_desc, i);
+ *val = value >> field_shift;
+ return (true);
+ }
+ }
+
+ return (false);
+}
+
+bool
+get_kernel_reg(u_int reg, uint64_t *val)
+{
+ int i;
+
+ for (i = 0; i < nitems(user_regs); i++) {
+ if (user_regs[i].reg == reg) {
+ *val = CPU_DESC_FIELD(kern_cpu_desc, i);
+ return (true);
+ }
+ }
+
+ return (false);
+}
+
+static uint64_t
+update_lower_register(uint64_t val, uint64_t new_val, u_int shift,
+ int width, bool sign)
+{
+ uint64_t mask;
+ uint64_t new_field, old_field;
+ bool update;
+
+ KASSERT(width > 0 && width < 64, ("%s: Invalid width %d", __func__,
+ width));
+
+ mask = (1ul << width) - 1;
+ new_field = (new_val >> shift) & mask;
+ old_field = (val >> shift) & mask;
+
+ update = false;
+ if (sign) {
+ /*
+ * The field is signed. Toggle the upper bit so the comparison
+ * works on unsigned values as this makes positive numbers,
+ * i.e. those with a 0 bit, larger than negative numbers,
+ * i.e. those with a 1 bit, in an unsigned comparison.
+ */
+ if ((new_field ^ (1ul << (width - 1))) <
+ (old_field ^ (1ul << (width - 1))))
+ update = true;
+ } else {
+ if (new_field < old_field)
+ update = true;
+ }
+
+ if (update) {
+ val &= ~(mask << shift);
+ val |= new_field << shift;
+ }
+
+ return (val);
+}
+
+void
+update_special_regs(u_int cpu)
+{
+ struct mrs_field *fields;
+ uint64_t user_reg, kern_reg, value;
+ int i, j;
+
+ if (cpu == 0) {
+ /* Create a user visible cpu description with safe values */
+ memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
+ /* Safe values for these registers */
+ user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE |
+ ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 |
+ ID_AA64PFR0_EL0_64;
+ user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8;
+ }
+
+ for (i = 0; i < nitems(user_regs); i++) {
+ value = CPU_DESC_FIELD(cpu_desc[cpu], i);
+ if (cpu == 0) {
+ kern_reg = value;
+ user_reg = value;
+ } else {
+ kern_reg = CPU_DESC_FIELD(kern_cpu_desc, i);
+ user_reg = CPU_DESC_FIELD(user_cpu_desc, i);
+ }
+
+ fields = user_regs[i].fields;
+ for (j = 0; fields[j].type != 0; j++) {
+ switch (fields[j].type & MRS_TYPE_MASK) {
+ case MRS_EXACT:
+ user_reg &= ~(0xful << fields[j].shift);
+ user_reg |=
+ (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
+ fields[j].shift;
+ break;
+ case MRS_LOWER:
+ user_reg = update_lower_register(user_reg,
+ value, fields[j].shift, 4, fields[j].sign);
+ break;
+ default:
+ panic("Invalid field type: %d", fields[j].type);
+ }
+ kern_reg = update_lower_register(kern_reg, value,
+ fields[j].shift, 4, fields[j].sign);
+ }
+
+ CPU_DESC_FIELD(kern_cpu_desc, i) = kern_reg;
+ CPU_DESC_FIELD(user_cpu_desc, i) = user_reg;
+ }
+}
+
+/* HWCAP */
+extern u_long elf_hwcap;
+bool __read_frequently lse_supported = false;
+
+bool __read_frequently icache_aliasing = false;
+bool __read_frequently icache_vmid = false;
+
+int64_t dcache_line_size; /* The minimum D cache line size */
+int64_t icache_line_size; /* The minimum I cache line size */
+int64_t idcache_line_size; /* The minimum cache line size */
+
+static void
+identify_cpu_sysinit(void *dummy __unused)
+{
+ int cpu;
+ bool dic, idc;
+
+ dic = (allow_dic != 0);
+ idc = (allow_idc != 0);
+
+ CPU_FOREACH(cpu) {
+ check_cpu_regs(cpu);
+ if (cpu != 0)
+ update_special_regs(cpu);
+
+ if (CTR_DIC_VAL(cpu_desc[cpu].ctr) == 0)
+ dic = false;
+ if (CTR_IDC_VAL(cpu_desc[cpu].ctr) == 0)
+ idc = false;
+ }
+
+ /* Exposed to userspace as AT_HWCAP */
+ elf_hwcap = parse_cpu_features_hwcap();
+
+ if (dic && idc) {
+ arm64_icache_sync_range = &arm64_dic_idc_icache_sync_range;
+ if (bootverbose)
+ printf("Enabling DIC & IDC ICache sync\n");
+ }
+
+ if ((elf_hwcap & HWCAP_ATOMICS) != 0) {
+ lse_supported = true;
+ if (bootverbose)
+ printf("Enabling LSE atomics in the kernel\n");
+ }
+#ifdef LSE_ATOMICS
+ if (!lse_supported)
+ panic("CPU does not support LSE atomic instructions");
+#endif
+
+ install_undef_handler(true, user_mrs_handler);
+}
+SYSINIT(identify_cpu, SI_SUB_CPU, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
+
+static void
+cpu_features_sysinit(void *dummy __unused)
+{
+ u_int cpu;
+
+ CPU_FOREACH(cpu)
+ print_cpu_features(cpu);
+}
+SYSINIT(cpu_features, SI_SUB_SMP, SI_ORDER_ANY, cpu_features_sysinit, NULL);
+
+static u_long
+parse_cpu_features_hwcap(void)
+{
+ u_long hwcap = 0;
+
+ if (ID_AA64ISAR0_DP_VAL(user_cpu_desc.id_aa64isar0) ==
+ ID_AA64ISAR0_DP_IMPL)
+ hwcap |= HWCAP_ASIMDDP;
+
+ if (ID_AA64ISAR0_SM4_VAL(user_cpu_desc.id_aa64isar0) ==
+ ID_AA64ISAR0_SM4_IMPL)
+ hwcap |= HWCAP_SM4;
+
+ if (ID_AA64ISAR0_SM3_VAL(user_cpu_desc.id_aa64isar0) ==
+ ID_AA64ISAR0_SM3_IMPL)
+ hwcap |= HWCAP_SM3;
+
+ if (ID_AA64ISAR0_RDM_VAL(user_cpu_desc.id_aa64isar0) ==
+ ID_AA64ISAR0_RDM_IMPL)
+ hwcap |= HWCAP_ASIMDRDM;
+
+ if (ID_AA64ISAR0_Atomic_VAL(user_cpu_desc.id_aa64isar0) ==
+ ID_AA64ISAR0_Atomic_IMPL)
+ hwcap |= HWCAP_ATOMICS;
+
+ if (ID_AA64ISAR0_CRC32_VAL(user_cpu_desc.id_aa64isar0) ==
+ ID_AA64ISAR0_CRC32_BASE)
+ hwcap |= HWCAP_CRC32;
+
+ switch (ID_AA64ISAR0_SHA2_VAL(user_cpu_desc.id_aa64isar0)) {
+ case ID_AA64ISAR0_SHA2_BASE:
+ hwcap |= HWCAP_SHA2;
+ break;
+ case ID_AA64ISAR0_SHA2_512:
+ hwcap |= HWCAP_SHA2 | HWCAP_SHA512;
+ break;
+ default:
+ break;
+ }
+
+ if (ID_AA64ISAR0_SHA1_VAL(user_cpu_desc.id_aa64isar0))
+ hwcap |= HWCAP_SHA1;
+
+ switch (ID_AA64ISAR0_AES_VAL(user_cpu_desc.id_aa64isar0)) {
+ case ID_AA64ISAR0_AES_BASE:
+ hwcap |= HWCAP_AES;
+ break;
+ case ID_AA64ISAR0_AES_PMULL:
+ hwcap |= HWCAP_PMULL | HWCAP_AES;
+ break;
+ default:
+ break;
+ }
+
+ if (ID_AA64ISAR1_LRCPC_VAL(user_cpu_desc.id_aa64isar1) ==
+ ID_AA64ISAR1_LRCPC_RCPC_8_3)
+ hwcap |= HWCAP_LRCPC;
+
+ if (ID_AA64ISAR1_FCMA_VAL(user_cpu_desc.id_aa64isar1) ==
+ ID_AA64ISAR1_FCMA_IMPL)
+ hwcap |= HWCAP_FCMA;
+
+ if (ID_AA64ISAR1_JSCVT_VAL(user_cpu_desc.id_aa64isar1) ==
+ ID_AA64ISAR1_JSCVT_IMPL)
+ hwcap |= HWCAP_JSCVT;
+
+ if (ID_AA64ISAR1_DPB_VAL(user_cpu_desc.id_aa64isar1) ==
+ ID_AA64ISAR1_DPB_DCCVAP)
+ hwcap |= HWCAP_DCPOP;
+
+ if (ID_AA64PFR0_SVE_VAL(user_cpu_desc.id_aa64pfr0) ==
+ ID_AA64PFR0_SVE_IMPL)
+ hwcap |= HWCAP_SVE;
+
+ switch (ID_AA64PFR0_AdvSIMD_VAL(user_cpu_desc.id_aa64pfr0)) {
+ case ID_AA64PFR0_AdvSIMD_IMPL:
+ hwcap |= HWCAP_ASIMD;
+ break;
+ case ID_AA64PFR0_AdvSIMD_HP:
+ hwcap |= HWCAP_ASIMD | HWCAP_ASIMDDP;
+ break;
+ default:
+ break;
+ }
+
+ switch (ID_AA64PFR0_FP_VAL(user_cpu_desc.id_aa64pfr0)) {
+ case ID_AA64PFR0_FP_IMPL:
+ hwcap |= HWCAP_FP;
+ break;
+ case ID_AA64PFR0_FP_HP:
+ hwcap |= HWCAP_FP | HWCAP_FPHP;
+ break;
+ default:
+ break;
+ }
+
+ return (hwcap);
+}
+
+static void
+print_ctr_fields(struct sbuf *sb, uint64_t reg, void *arg)
+{
+
+ sbuf_printf(sb, "%u byte D-cacheline,", CTR_DLINE_SIZE(reg));
+ sbuf_printf(sb, "%u byte I-cacheline,", CTR_ILINE_SIZE(reg));
+ reg &= ~(CTR_DLINE_MASK | CTR_ILINE_MASK);
+
+ switch(CTR_L1IP_VAL(reg)) {
+ case CTR_L1IP_VPIPT:
+ sbuf_printf(sb, "VPIPT");
+ break;
+ case CTR_L1IP_AIVIVT:
+ sbuf_printf(sb, "AIVIVT");
+ break;
+ case CTR_L1IP_VIPT:
+ sbuf_printf(sb, "VIPT");
+ break;
+ case CTR_L1IP_PIPT:
+ sbuf_printf(sb, "PIPT");
+ break;
+ }
+ sbuf_printf(sb, " ICache,");
+ reg &= ~CTR_L1IP_MASK;
+
+ sbuf_printf(sb, "%d byte ERG,", CTR_ERG_SIZE(reg));
+ sbuf_printf(sb, "%d byte CWG", CTR_CWG_SIZE(reg));
+ reg &= ~(CTR_ERG_MASK | CTR_CWG_MASK);
+
+ if (CTR_IDC_VAL(reg) != 0)
+ sbuf_printf(sb, ",IDC");
+ if (CTR_DIC_VAL(reg) != 0)
+ sbuf_printf(sb, ",DIC");
+ reg &= ~(CTR_IDC_MASK | CTR_DIC_MASK);
+ reg &= ~CTR_RES1;
+
+ if (reg != 0)
+ sbuf_printf(sb, ",%lx", reg);
+}
+
+static void
+print_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
+ void (*print_fields)(struct sbuf *, uint64_t, void *), void *arg)
+{
+
+ sbuf_printf(sb, "%29s = <", reg_name);
+
+ print_fields(sb, reg, arg);
+
+ sbuf_finish(sb);
+ printf("%s>\n", sbuf_data(sb));
+ sbuf_clear(sb);
+}
+
+static void
+print_id_fields(struct sbuf *sb, uint64_t reg, void *arg)
+{
+ struct mrs_field *fields = arg;
+ struct mrs_field_value *fv;
+ int field, i, j, printed;
+
+#define SEP_STR ((printed++) == 0) ? "" : ","
+ printed = 0;
+ for (i = 0; fields[i].type != 0; i++) {
+ fv = fields[i].values;
+
+ /* TODO: Handle with an unknown message */
+ if (fv == NULL)
+ continue;
+
+ field = (reg & fields[i].mask) >> fields[i].shift;
+ for (j = 0; fv[j].desc != NULL; j++) {
+ if ((fv[j].value >> fields[i].shift) != field)
+ continue;
+
+ if (fv[j].desc[0] != '\0')
+ sbuf_printf(sb, "%s%s", SEP_STR, fv[j].desc);
+ break;
+ }
+ if (fv[j].desc == NULL)
+ sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR,
+ fields[i].name, field);
+
+ reg &= ~(0xful << fields[i].shift);
+ }
+
+ if (reg != 0)
+ sbuf_printf(sb, "%s%#lx", SEP_STR, reg);
+#undef SEP_STR
+}
+
+static void
+print_id_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
+ struct mrs_field *fields)
+{
+
+ print_register(sb, reg_name, reg, print_id_fields, fields);
+}
+
+static void
+print_cpu_features(u_int cpu)
+{
+ struct sbuf *sb;
+
+ sb = sbuf_new_auto();
+ sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
+ cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
+ cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
+
+ sbuf_cat(sb, " affinity:");
+ switch(cpu_aff_levels) {
+ default:
+ case 4:
+ sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
+ /* FALLTHROUGH */
+ case 3:
+ sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
+ /* FALLTHROUGH */
+ case 2:
+ sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
+ /* FALLTHROUGH */
+ case 1:
+ case 0: /* On UP this will be zero */
+ sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
+ break;
+ }
+ sbuf_finish(sb);
+ printf("%s\n", sbuf_data(sb));
+ sbuf_clear(sb);
+
+ /*
+ * There is a hardware errata where, if one CPU is performing a TLB
+ * invalidation while another is performing a store-exclusive the
+ * store-exclusive may return the wrong status. A workaround seems
+ * to be to use an IPI to invalidate on each CPU, however given the
+ * limited number of affected units (pass 1.1 is the evaluation
+ * hardware revision), and the lack of information from Cavium
+ * this has not been implemented.
+ *
+ * At the time of writing this the only information is from:
+ * https://lkml.org/lkml/2016/8/4/722
+ */
+ /*
+ * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
+ * triggers on pass 2.0+.
+ */
+ if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
+ CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
+ printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
+ "hardware bugs that may cause the incorrect operation of "
+ "atomic operations.\n");
+
+ /* Cache Type Register */
+ if (cpu == 0 || (cpu_print_regs & PRINT_CTR_EL0) != 0) {
+ print_register(sb, "Cache Type",
+ cpu_desc[cpu].ctr, print_ctr_fields, NULL);
+ }
+
+ /* AArch64 Instruction Set Attribute Register 0 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0)
+ print_id_register(sb, "Instruction Set Attributes 0",
+ cpu_desc[cpu].id_aa64isar0, id_aa64isar0_fields);
+
+ /* AArch64 Instruction Set Attribute Register 1 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0)
+ print_id_register(sb, "Instruction Set Attributes 1",
+ cpu_desc[cpu].id_aa64isar1, id_aa64isar1_fields);
+
+ /* AArch64 Processor Feature Register 0 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0)
+ print_id_register(sb, "Processor Features 0",
+ cpu_desc[cpu].id_aa64pfr0, id_aa64pfr0_fields);
+
+ /* AArch64 Processor Feature Register 1 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0)
+ print_id_register(sb, "Processor Features 1",
+ cpu_desc[cpu].id_aa64pfr1, id_aa64pfr1_fields);
+
+ /* AArch64 Memory Model Feature Register 0 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0)
+ print_id_register(sb, "Memory Model Features 0",
+ cpu_desc[cpu].id_aa64mmfr0, id_aa64mmfr0_fields);
+
+ /* AArch64 Memory Model Feature Register 1 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0)
+ print_id_register(sb, "Memory Model Features 1",
+ cpu_desc[cpu].id_aa64mmfr1, id_aa64mmfr1_fields);
+
+ /* AArch64 Memory Model Feature Register 2 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0)
+ print_id_register(sb, "Memory Model Features 2",
+ cpu_desc[cpu].id_aa64mmfr2, id_aa64mmfr2_fields);
+
+ /* AArch64 Debug Feature Register 0 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0)
+ print_id_register(sb, "Debug Features 0",
+ cpu_desc[cpu].id_aa64dfr0, id_aa64dfr0_fields);
+
+ /* AArch64 Memory Model Feature Register 1 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0)
+ print_id_register(sb, "Debug Features 1",
+ cpu_desc[cpu].id_aa64dfr1, id_aa64dfr1_fields);
+
+ /* AArch64 Auxiliary Feature Register 0 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0)
+ print_id_register(sb, "Auxiliary Features 0",
+ cpu_desc[cpu].id_aa64afr0, id_aa64afr0_fields);
+
+ /* AArch64 Auxiliary Feature Register 1 */
+ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0)
+ print_id_register(sb, "Auxiliary Features 1",
+ cpu_desc[cpu].id_aa64afr1, id_aa64afr1_fields);
+
+ sbuf_delete(sb);
+ sb = NULL;
+#undef SEP_STR
+}
+
+void
+identify_cache(uint64_t ctr)
+{
+
+ /* Identify the L1 cache type */
+ switch (CTR_L1IP_VAL(ctr)) {
+ case CTR_L1IP_PIPT:
+ break;
+ case CTR_L1IP_VPIPT:
+ icache_vmid = true;
+ break;
+ default:
+ case CTR_L1IP_VIPT:
+ icache_aliasing = true;
+ break;
+ }
+
+ if (dcache_line_size == 0) {
+ KASSERT(icache_line_size == 0, ("%s: i-cacheline size set: %ld",
+ __func__, icache_line_size));
+
+ /* Get the D cache line size */
+ dcache_line_size = CTR_DLINE_SIZE(ctr);
+ /* And the same for the I cache */
+ icache_line_size = CTR_ILINE_SIZE(ctr);
+
+ idcache_line_size = MIN(dcache_line_size, icache_line_size);
+ }
+
+ if (dcache_line_size != CTR_DLINE_SIZE(ctr)) {
+ printf("WARNING: D-cacheline size mismatch %ld != %d\n",
+ dcache_line_size, CTR_DLINE_SIZE(ctr));
+ }
+
+ if (icache_line_size != CTR_ILINE_SIZE(ctr)) {
+ printf("WARNING: I-cacheline size mismatch %ld != %d\n",
+ icache_line_size, CTR_ILINE_SIZE(ctr));
+ }
+}
+
+void
+identify_cpu(u_int cpu)
+{
+ u_int midr;
+ u_int impl_id;
+ u_int part_id;
+ size_t i;
+ const struct cpu_parts *cpu_partsp = NULL;
+
+ midr = get_midr();
+
+ impl_id = CPU_IMPL(midr);
+ for (i = 0; i < nitems(cpu_implementers); i++) {
+ if (impl_id == cpu_implementers[i].impl_id ||
+ cpu_implementers[i].impl_id == 0) {
+ cpu_desc[cpu].cpu_impl = impl_id;
+ cpu_desc[cpu].cpu_impl_name =
+ cpu_implementers[i].impl_name;
+ cpu_partsp = cpu_implementers[i].cpu_parts;
+ break;
+ }
+ }
+
+ part_id = CPU_PART(midr);
+ for (i = 0; &cpu_partsp[i] != NULL; i++) {
+ if (part_id == cpu_partsp[i].part_id ||
+ cpu_partsp[i].part_id == 0) {
+ cpu_desc[cpu].cpu_part_num = part_id;
+ cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
+ break;
+ }
+ }
+
+ cpu_desc[cpu].cpu_revision = CPU_REV(midr);
+ cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
+
+ snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d",
+ cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
+ cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
+
+ /* Save affinity for current CPU */
+ cpu_desc[cpu].mpidr = get_mpidr();
+ CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
+
+ cpu_desc[cpu].ctr = READ_SPECIALREG(ctr_el0);
+ cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
+ cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
+ cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
+ cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
+ cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
+ cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
+ cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1);
+ cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
+ cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
+}
+
+static void
+check_cpu_regs(u_int cpu)
+{
+
+ switch (cpu_aff_levels) {
+ case 0:
+ if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
+ CPU_AFF0(cpu_desc[0].mpidr))
+ cpu_aff_levels = 1;
+ /* FALLTHROUGH */
+ case 1:
+ if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
+ CPU_AFF1(cpu_desc[0].mpidr))
+ cpu_aff_levels = 2;
+ /* FALLTHROUGH */
+ case 2:
+ if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
+ CPU_AFF2(cpu_desc[0].mpidr))
+ cpu_aff_levels = 3;
+ /* FALLTHROUGH */
+ case 3:
+ if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
+ CPU_AFF3(cpu_desc[0].mpidr))
+ cpu_aff_levels = 4;
+ break;
+ }
+
+ if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
+ cpu_print_regs |= PRINT_ID_AA64_AFR0;
+ if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
+ cpu_print_regs |= PRINT_ID_AA64_AFR1;
+
+ if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
+ cpu_print_regs |= PRINT_ID_AA64_DFR0;
+ if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
+ cpu_print_regs |= PRINT_ID_AA64_DFR1;
+
+ if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
+ cpu_print_regs |= PRINT_ID_AA64_ISAR0;
+ if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
+ cpu_print_regs |= PRINT_ID_AA64_ISAR1;
+
+ if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
+ cpu_print_regs |= PRINT_ID_AA64_MMFR0;
+ if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
+ cpu_print_regs |= PRINT_ID_AA64_MMFR1;
+ if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
+ cpu_print_regs |= PRINT_ID_AA64_MMFR2;
+
+ if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
+ cpu_print_regs |= PRINT_ID_AA64_PFR0;
+ if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
+ cpu_print_regs |= PRINT_ID_AA64_PFR1;
+
+ if (cpu_desc[cpu].ctr != cpu_desc[0].ctr) {
+ /*
+ * If the cache type register is different we may
+ * have a different l1 cache type.
+ */
+ identify_cache(cpu_desc[cpu].ctr);
+ cpu_print_regs |= PRINT_CTR_EL0;
+ }
+}
diff --git a/sys/arm64/arm64/in_cksum.c b/sys/arm64/arm64/in_cksum.c
new file mode 100644
index 000000000000..ae02e91d9203
--- /dev/null
+++ b/sys/arm64/arm64/in_cksum.c
@@ -0,0 +1,241 @@
+/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1996
+ * Matt Thomas <matt@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ * (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+
+static const u_int32_t in_masks[] = {
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const void *buf, int len)
+{
+ const u_int32_t *lw = (const u_int32_t *) buf;
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (long) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (long) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((long) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ if ((clen ^ (long) addr) & 1)
+ sum += in_cksumdata(addr, mlen) << 8;
+ else
+ sum += in_cksumdata(addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+ u_int64_t sum = in_cksumdata(ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+ REDUCE16;
+ return (~sum & 0xffff);
+}
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
new file mode 100644
index 000000000000..b9147df32815
--- /dev/null
+++ b/sys/arm64/arm64/locore.S
@@ -0,0 +1,859 @@
+/*-
+ * Copyright (c) 2012-2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "assym.inc"
+#include "opt_kstack_pages.h"
+#include <sys/syscall.h>
+#include <machine/asm.h>
+#include <machine/armreg.h>
+#include <machine/hypervisor.h>
+#include <machine/param.h>
+#include <machine/pte.h>
+#include <machine/vm.h>
+#include <machine/vmparam.h>
+
+#define VIRT_BITS 48
+#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
+
+ .globl kernbase
+ .set kernbase, KERNBASE
+
+
+/* U-Boot booti related constants. */
+#if defined(LINUX_BOOT_ABI)
+#define FDT_MAGIC 0xEDFE0DD0 /* FDT blob Magic */
+
+#ifndef UBOOT_IMAGE_OFFSET
+#define UBOOT_IMAGE_OFFSET 0 /* Image offset from start of */
+#endif /* 2 MiB page */
+
+#ifndef UBOOT_IMAGE_SIZE /* Total size of image */
+#define UBOOT_IMAGE_SIZE _end - _start
+#endif
+
+#ifndef UBOOT_IMAGE_FLAGS
+#define UBOOT_IMAGE_FLAGS 0 /* LE kernel, unspecified */
+#endif /* page size */
+#endif /* defined(LINUX_BOOT_ABI) */
+
+/*
+ * We assume:
+ * MMU on with an identity map, or off
+ * D-Cache: off
+ * I-Cache: on or off
+ * We are loaded at a 2MiB aligned address
+ */
+
+ .text
+ .globl _start
+_start:
+#if defined(LINUX_BOOT_ABI)
+ /* U-boot image header */
+ b 1f /* code 0 */
+ .long 0 /* code 1 */
+ .quad UBOOT_IMAGE_OFFSET /* Image offset in 2 MiB page, LE */
+ .quad UBOOT_IMAGE_SIZE /* Image size, LE */
+ .quad UBOOT_IMAGE_FLAGS /* Flags for kernel. LE */
+ .quad 0 /* Reserved */
+ .quad 0 /* Reserved */
+ .quad 0 /* Reserved */
+ .long 0x644d5241 /* Magic "ARM\x64", LE */
+ .long 0 /* Reserved for PE COFF offset*/
+1:
+#endif /* defined(LINUX_BOOT_ABI) */
+
+ /* Drop to EL1 */
+ bl drop_to_el1
+
+ /*
+ * Disable the MMU. We may have entered the kernel with it on and
+ * will need to update the tables later. If this has been set up
+ * with anything other than a VA == PA map then this will fail,
+ * but in this case the code to find where we are running from
+ * would have also failed.
+ */
+ dsb sy
+ mrs x2, sctlr_el1
+ bic x2, x2, SCTLR_M
+ msr sctlr_el1, x2
+ isb
+
+ /* Set the context id */
+ msr contextidr_el1, xzr
+
+ /* Get the virt -> phys offset */
+ bl get_virt_delta
+
+ /*
+ * At this point:
+ * x29 = PA - VA
+ * x28 = Our physical load address
+ */
+
+ /* Create the page tables */
+ bl create_pagetables
+
+ /*
+ * At this point:
+ * x27 = TTBR0 table
+ * x26 = Kernel L1 table
+ * x24 = TTBR1 table
+ */
+
+ /* Enable the mmu */
+ bl start_mmu
+
+ /* Load the new ttbr0 pagetable */
+ adr x27, pagetable_l0_ttbr0
+
+ /* Jump to the virtual address space */
+ ldr x15, .Lvirtdone
+ br x15
+
+virtdone:
+ /* Set up the stack */
+ adr x25, initstack_end
+ mov sp, x25
+ sub sp, sp, #PCB_SIZE
+
+ /* Zero the BSS */
+ ldr x15, .Lbss
+ ldr x14, .Lend
+1:
+ str xzr, [x15], #8
+ cmp x15, x14
+ b.lo 1b
+
+ /* Backup the module pointer */
+ mov x1, x0
+
+ /* Make the page table base a virtual address */
+ sub x26, x26, x29
+ sub x24, x24, x29
+
+ sub sp, sp, #BOOTPARAMS_SIZE
+ mov x0, sp
+
+ /* Degate the delda so it is VA -> PA */
+ neg x29, x29
+
+ str x1, [x0, #BP_MODULEP]
+ str x26, [x0, #BP_KERN_L1PT]
+ str x29, [x0, #BP_KERN_DELTA]
+ adr x25, initstack
+ str x25, [x0, #BP_KERN_STACK]
+ str x24, [x0, #BP_KERN_L0PT]
+ str x23, [x0, #BP_BOOT_EL]
+ str x27, [x0, 40] /* kern_ttbr0 */
+
+ /* trace back starts here */
+ mov fp, #0
+ /* Branch to C code */
+ bl initarm
+ bl mi_startup
+
+ /* We should not get here */
+ brk 0
+
+ .align 3
+.Lvirtdone:
+ .quad virtdone
+.Lbss:
+ .quad __bss_start
+.Lend:
+ .quad _end
+
+#ifdef SMP
+/*
+ * mpentry(unsigned long)
+ *
+ * Called by a core when it is being brought online.
+ * The data in x0 is passed straight to init_secondary.
+ */
+ENTRY(mpentry)
+ /* Disable interrupts */
+ msr daifset, #2
+
+ /* Drop to EL1 */
+ bl drop_to_el1
+
+ /* Set the context id */
+ msr contextidr_el1, xzr
+
+ /* Load the kernel page table */
+ adr x24, pagetable_l0_ttbr1
+ /* Load the identity page table */
+ adr x27, pagetable_l0_ttbr0_boostrap
+
+ /* Enable the mmu */
+ bl start_mmu
+
+ /* Load the new ttbr0 pagetable */
+ adr x27, pagetable_l0_ttbr0
+
+ /* Jump to the virtual address space */
+ ldr x15, =mp_virtdone
+ br x15
+
+mp_virtdone:
+ /* Start using the AP boot stack */
+ ldr x4, =bootstack
+ ldr x4, [x4]
+ mov sp, x4
+
+ /* Load the kernel ttbr0 pagetable */
+ msr ttbr0_el1, x27
+ isb
+
+ /* Invalidate the TLB */
+ tlbi vmalle1
+ dsb sy
+ isb
+
+ b init_secondary
+END(mpentry)
+#endif
+
+/*
+ * If we are started in EL2, configure the required hypervisor
+ * registers and drop to EL1.
+ */
+drop_to_el1:
+ mrs x23, CurrentEL
+ lsr x23, x23, #2
+ cmp x23, #0x2
+ b.eq 1f
+ ret
+1:
+ /* Configure the Hypervisor */
+ mov x2, #(HCR_RW)
+ msr hcr_el2, x2
+
+ /* Load the Virtualization Process ID Register */
+ mrs x2, midr_el1
+ msr vpidr_el2, x2
+
+ /* Load the Virtualization Multiprocess ID Register */
+ mrs x2, mpidr_el1
+ msr vmpidr_el2, x2
+
+ /* Set the bits that need to be 1 in sctlr_el1 */
+ ldr x2, .Lsctlr_res1
+ msr sctlr_el1, x2
+
+ /* Don't trap to EL2 for exceptions */
+ mov x2, #CPTR_RES1
+ msr cptr_el2, x2
+
+ /* Don't trap to EL2 for CP15 traps */
+ msr hstr_el2, xzr
+
+ /* Enable access to the physical timers at EL1 */
+ mrs x2, cnthctl_el2
+ orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
+ msr cnthctl_el2, x2
+
+ /* Set the counter offset to a known value */
+ msr cntvoff_el2, xzr
+
+ /* Hypervisor trap functions */
+ adr x2, hyp_vectors
+ msr vbar_el2, x2
+
+ mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
+ msr spsr_el2, x2
+
+ /* Configure GICv3 CPU interface */
+ mrs x2, id_aa64pfr0_el1
+ /* Extract GIC bits from the register */
+ ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
+ /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
+ cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
+ b.ne 2f
+
+ mrs x2, icc_sre_el2
+ orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
+ orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
+ msr icc_sre_el2, x2
+2:
+
+ /* Set the address to return to our return address */
+ msr elr_el2, x30
+ isb
+
+ eret
+
+ .align 3
+.Lsctlr_res1:
+ .quad SCTLR_RES1
+
+#define VECT_EMPTY \
+ .align 7; \
+ 1: b 1b
+
+ .align 11
+hyp_vectors:
+ VECT_EMPTY /* Synchronous EL2t */
+ VECT_EMPTY /* IRQ EL2t */
+ VECT_EMPTY /* FIQ EL2t */
+ VECT_EMPTY /* Error EL2t */
+
+ VECT_EMPTY /* Synchronous EL2h */
+ VECT_EMPTY /* IRQ EL2h */
+ VECT_EMPTY /* FIQ EL2h */
+ VECT_EMPTY /* Error EL2h */
+
+ VECT_EMPTY /* Synchronous 64-bit EL1 */
+ VECT_EMPTY /* IRQ 64-bit EL1 */
+ VECT_EMPTY /* FIQ 64-bit EL1 */
+ VECT_EMPTY /* Error 64-bit EL1 */
+
+ VECT_EMPTY /* Synchronous 32-bit EL1 */
+ VECT_EMPTY /* IRQ 32-bit EL1 */
+ VECT_EMPTY /* FIQ 32-bit EL1 */
+ VECT_EMPTY /* Error 32-bit EL1 */
+
+/*
+ * Get the delta between the physical address we were loaded to and the
+ * virtual address we expect to run from. This is used when building the
+ * initial page table.
+ */
+get_virt_delta:
+ /* Load the physical address of virt_map */
+ adr x29, virt_map
+ /* Load the virtual address of virt_map stored in virt_map */
+ ldr x28, [x29]
+ /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
+ sub x29, x29, x28
+ /* Find the load address for the kernel */
+ mov x28, #(KERNBASE)
+ add x28, x28, x29
+ ret
+
+ .align 3
+virt_map:
+ .quad virt_map
+
+/*
+ * This builds the page tables containing the identity map, and the kernel
+ * virtual map.
+ *
+ * It relys on:
+ * We were loaded to an address that is on a 2MiB boundary
+ * All the memory must not cross a 1GiB boundaty
+ * x28 contains the physical address we were loaded from
+ *
+ * TODO: This is out of date.
+ * There are at least 5 pages before that address for the page tables
+ * The pages used are:
+ * - The Kernel L2 table
+ * - The Kernel L1 table
+ * - The Kernel L0 table (TTBR1)
+ * - The identity (PA = VA) L1 table
+ * - The identity (PA = VA) L0 table (TTBR0)
+ * - The DMAP L1 tables
+ */
+create_pagetables:
+ /* Save the Link register */
+ mov x5, x30
+
+ /* Clean the page table */
+ adr x6, pagetable
+ mov x26, x6
+ adr x27, pagetable_end
+1:
+ stp xzr, xzr, [x6], #16
+ stp xzr, xzr, [x6], #16
+ stp xzr, xzr, [x6], #16
+ stp xzr, xzr, [x6], #16
+ cmp x6, x27
+ b.lo 1b
+
+ /*
+ * Build the TTBR1 maps.
+ */
+
+ /* Find the size of the kernel */
+ mov x6, #(KERNBASE)
+
+#if defined(LINUX_BOOT_ABI)
+ /* X19 is used as 'map FDT data' flag */
+ mov x19, xzr
+
+ /* No modules or FDT pointer ? */
+ cbz x0, booti_no_fdt
+
+ /* Test if modulep points to modules descriptor or to FDT */
+ ldr w8, [x0]
+ ldr w7, =FDT_MAGIC
+ cmp w7, w8
+ b.eq booti_fdt
+#endif
+
+ /* Booted with modules pointer */
+ /* Find modulep - begin */
+ sub x8, x0, x6
+ /* Add two 2MiB pages for the module data and round up */
+ ldr x7, =(3 * L2_SIZE - 1)
+ add x8, x8, x7
+ b common
+
+#if defined(LINUX_BOOT_ABI)
+booti_fdt:
+ /* Booted by U-Boot booti with FDT data */
+ /* Set 'map FDT data' flag */
+ mov x19, #1
+
+booti_no_fdt:
+ /* Booted by U-Boot booti without FTD data */
+ /* Find the end - begin */
+ ldr x7, .Lend
+ sub x8, x7, x6
+
+ /*
+ * Add one 2MiB page for copy of FDT data (maximum FDT size),
+ * one for metadata and round up
+ */
+ ldr x7, =(3 * L2_SIZE - 1)
+ add x8, x8, x7
+#endif
+
+common:
+ /* Get the number of l2 pages to allocate, rounded down */
+ lsr x10, x8, #(L2_SHIFT)
+
+ /* Create the kernel space L2 table */
+ mov x6, x26
+ mov x7, #VM_MEMATTR_WRITE_BACK
+ mov x8, #(KERNBASE & L2_BLOCK_MASK)
+ mov x9, x28
+ bl build_l2_block_pagetable
+
+ /* Move to the l1 table */
+ add x26, x26, #PAGE_SIZE
+
+ /* Link the l1 -> l2 table */
+ mov x9, x6
+ mov x6, x26
+ bl link_l1_pagetable
+
+ /* Move to the l0 table */
+ add x24, x26, #PAGE_SIZE
+
+ /* Link the l0 -> l1 table */
+ mov x9, x6
+ mov x6, x24
+ mov x10, #1
+ bl link_l0_pagetable
+
+ /* Link the DMAP tables */
+ ldr x8, =DMAP_MIN_ADDRESS
+ adr x9, pagetable_dmap;
+ mov x10, #DMAP_TABLES
+ bl link_l0_pagetable
+
+ /*
+ * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG.
+ * They are only needed early on, so the VA = PA map is uncached.
+ */
+ add x27, x24, #PAGE_SIZE
+
+ mov x6, x27 /* The initial page table */
+#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
+ /* Create a table for the UART */
+ mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE))
+ mov x8, #(SOCDEV_VA) /* VA start */
+ mov x9, #(SOCDEV_PA) /* PA start */
+ mov x10, #1
+ bl build_l1_block_pagetable
+#endif
+
+#if defined(LINUX_BOOT_ABI)
+ /* Map FDT data ? */
+ cbz x19, 1f
+
+ /* Create the identity mapping for FDT data (2 MiB max) */
+ mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE))
+ mov x9, x0
+ mov x8, x0 /* VA start (== PA start) */
+ mov x10, #1
+ bl build_l1_block_pagetable
+
+1:
+#endif
+
+ /* Create the VA = PA map */
+ mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE))
+ mov x9, x27
+ mov x8, x9 /* VA start (== PA start) */
+ mov x10, #1
+ bl build_l1_block_pagetable
+
+ /* Move to the l0 table */
+ add x27, x27, #PAGE_SIZE
+
+ /* Link the l0 -> l1 table */
+ mov x9, x6
+ mov x6, x27
+ mov x10, #1
+ bl link_l0_pagetable
+
+ /* Restore the Link register */
+ mov x30, x5
+ ret
+
+/*
+ * Builds an L0 -> L1 table descriptor
+ *
+ * This is a link for a 512GiB block of memory with up to 1GiB regions mapped
+ * within it by build_l1_block_pagetable.
+ *
+ * x6 = L0 table
+ * x8 = Virtual Address
+ * x9 = L1 PA (trashed)
+ * x10 = Entry count
+ * x11, x12 and x13 are trashed
+ */
+link_l0_pagetable:
+ /*
+ * Link an L0 -> L1 table entry.
+ */
+ /* Find the table index */
+ lsr x11, x8, #L0_SHIFT
+ and x11, x11, #L0_ADDR_MASK
+
+ /* Build the L0 block entry */
+ mov x12, #L0_TABLE
+
+ /* Only use the output address bits */
+ lsr x9, x9, #PAGE_SHIFT
+1: orr x13, x12, x9, lsl #PAGE_SHIFT
+
+ /* Store the entry */
+ str x13, [x6, x11, lsl #3]
+
+ sub x10, x10, #1
+ add x11, x11, #1
+ add x9, x9, #1
+ cbnz x10, 1b
+
+ ret
+
+/*
+ * Builds an L1 -> L2 table descriptor
+ *
+ * This is a link for a 1GiB block of memory with up to 2MiB regions mapped
+ * within it by build_l2_block_pagetable.
+ *
+ * x6 = L1 table
+ * x8 = Virtual Address
+ * x9 = L2 PA (trashed)
+ * x11, x12 and x13 are trashed
+ */
+link_l1_pagetable:
+ /*
+ * Link an L1 -> L2 table entry.
+ */
+ /* Find the table index */
+ lsr x11, x8, #L1_SHIFT
+ and x11, x11, #Ln_ADDR_MASK
+
+ /* Build the L1 block entry */
+ mov x12, #L1_TABLE
+
+ /* Only use the output address bits */
+ lsr x9, x9, #PAGE_SHIFT
+ orr x13, x12, x9, lsl #PAGE_SHIFT
+
+ /* Store the entry */
+ str x13, [x6, x11, lsl #3]
+
+ ret
+
+/*
+ * Builds count 1 GiB page table entry
+ * x6 = L1 table
+ * x7 = Variable lower block attributes
+ * x8 = VA start
+ * x9 = PA start (trashed)
+ * x10 = Entry count
+ * x11, x12 and x13 are trashed
+ */
+build_l1_block_pagetable:
+ /*
+ * Build the L1 table entry.
+ */
+ /* Find the table index */
+ lsr x11, x8, #L1_SHIFT
+ and x11, x11, #Ln_ADDR_MASK
+
+ /* Build the L1 block entry */
+ orr x12, x7, #L1_BLOCK
+ orr x12, x12, #(ATTR_AF)
+#ifdef SMP
+ orr x12, x12, ATTR_SH(ATTR_SH_IS)
+#endif
+
+ /* Only use the output address bits */
+ lsr x9, x9, #L1_SHIFT
+
+ /* Set the physical address for this virtual address */
+1: orr x13, x12, x9, lsl #L1_SHIFT
+
+ /* Store the entry */
+ str x13, [x6, x11, lsl #3]
+
+ sub x10, x10, #1
+ add x11, x11, #1
+ add x9, x9, #1
+ cbnz x10, 1b
+
+ ret
+
+/*
+ * Builds count 2 MiB page table entry
+ * x6 = L2 table
+ * x7 = Type (0 = Device, 1 = Normal)
+ * x8 = VA start
+ * x9 = PA start (trashed)
+ * x10 = Entry count
+ * x11, x12 and x13 are trashed
+ */
+build_l2_block_pagetable:
+ /*
+ * Build the L2 table entry.
+ */
+ /* Find the table index */
+ lsr x11, x8, #L2_SHIFT
+ and x11, x11, #Ln_ADDR_MASK
+
+ /* Build the L2 block entry */
+ lsl x12, x7, #2
+ orr x12, x12, #L2_BLOCK
+ orr x12, x12, #(ATTR_AF)
+ orr x12, x12, #(ATTR_S1_UXN)
+#ifdef SMP
+ orr x12, x12, ATTR_SH(ATTR_SH_IS)
+#endif
+
+ /* Only use the output address bits */
+ lsr x9, x9, #L2_SHIFT
+
+ /* Set the physical address for this virtual address */
+1: orr x13, x12, x9, lsl #L2_SHIFT
+
+ /* Store the entry */
+ str x13, [x6, x11, lsl #3]
+
+ sub x10, x10, #1
+ add x11, x11, #1
+ add x9, x9, #1
+ cbnz x10, 1b
+
+ ret
+
+start_mmu:
+ dsb sy
+
+ /* Load the exception vectors */
+ ldr x2, =exception_vectors
+ msr vbar_el1, x2
+
+ /* Load ttbr0 and ttbr1 */
+ msr ttbr0_el1, x27
+ msr ttbr1_el1, x24
+ isb
+
+ /* Clear the Monitor Debug System control register */
+ msr mdscr_el1, xzr
+
+ /* Invalidate the TLB */
+ tlbi vmalle1is
+ dsb ish
+ isb
+
+ ldr x2, mair
+ msr mair_el1, x2
+
+ /*
+ * Setup TCR according to the PARange and ASIDBits fields
+ * from ID_AA64MMFR0_EL1 and the HAFDBS field from the
+ * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS
+ * to 1 only if the ASIDBits field equals 0b0010.
+ */
+ ldr x2, tcr
+ mrs x3, id_aa64mmfr0_el1
+
+ /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */
+ bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH)
+ and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK)
+
+ /* Check if the HW supports 16 bit ASIDS */
+ cmp x3, #(ID_AA64MMFR0_ASIDBits_16)
+ /* If so x3 == 1, else x3 == 0 */
+ cset x3, eq
+ /* Set TCR.AS with x3 */
+ bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH)
+
+ /*
+ * Check if the HW supports access flag and dirty state updates,
+ * and set TCR_EL1.HA and TCR_EL1.HD accordingly.
+ */
+ mrs x3, id_aa64mmfr1_el1
+ and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK)
+ cmp x3, #1
+ b.ne 1f
+ orr x2, x2, #(TCR_HA)
+ b 2f
+1:
+ cmp x3, #2
+ b.ne 2f
+ orr x2, x2, #(TCR_HA | TCR_HD)
+2:
+ msr tcr_el1, x2
+
+ /*
+ * Setup SCTLR.
+ */
+ ldr x2, sctlr_set
+ ldr x3, sctlr_clear
+ mrs x1, sctlr_el1
+ bic x1, x1, x3 /* Clear the required bits */
+ orr x1, x1, x2 /* Set the required bits */
+ msr sctlr_el1, x1
+ isb
+
+ ret
+
+ .align 3
+mair:
+ .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE) | \
+ MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \
+ MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \
+ MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH)
+tcr:
+ .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG1_4K | \
+ TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
+sctlr_set:
+ /* Bits to set */
+ .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
+ SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
+ SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
+ SCTLR_M | SCTLR_CP15BEN)
+sctlr_clear:
+ /* Bits to clear */
+ .quad (SCTLR_EE | SCTLR_EOE | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
+ SCTLR_ITD | SCTLR_A)
+
+ .globl abort
+abort:
+ b abort
+
+ //.section .init_pagetable
+ .align 12 /* 4KiB aligned */
+ /*
+ * 6 initial tables (in the following order):
+ * L2 for kernel (High addresses)
+ * L1 for kernel
+ * L0 for kernel
+ * L1 bootstrap for user (Low addresses)
+ * L0 bootstrap for user
+ * L0 for user
+ */
+pagetable:
+ .space PAGE_SIZE
+pagetable_l1_ttbr1:
+ .space PAGE_SIZE
+pagetable_l0_ttbr1:
+ .space PAGE_SIZE
+pagetable_l1_ttbr0_bootstrap:
+ .space PAGE_SIZE
+pagetable_l0_ttbr0_boostrap:
+ .space PAGE_SIZE
+pagetable_l0_ttbr0:
+ .space PAGE_SIZE
+
+ .globl pagetable_dmap
+pagetable_dmap:
+ .space PAGE_SIZE * DMAP_TABLES
+pagetable_end:
+
+el2_pagetable:
+ .space PAGE_SIZE
+
+ .globl init_pt_va
+init_pt_va:
+ .quad pagetable /* XXX: Keep page tables VA */
+
+ .align 4
+initstack:
+ .space (PAGE_SIZE * KSTACK_PAGES)
+initstack_end:
+
+
+ENTRY(sigcode)
+ mov x0, sp
+ add x0, x0, #SF_UC
+
+1:
+ mov x8, #SYS_sigreturn
+ svc 0
+
+ /* sigreturn failed, exit */
+ mov x8, #SYS_exit
+ svc 0
+
+ b 1b
+END(sigcode)
+ /* This may be copied to the stack, keep it 16-byte aligned */
+ .align 3
+esigcode:
+
+ .data
+ .align 3
+ .global szsigcode
+szsigcode:
+ .quad esigcode - sigcode
+
+ENTRY(aarch32_sigcode)
+ .word 0xe1a0000d // mov r0, sp
+ .word 0xe2800040 // add r0, r0, #SIGF_UC
+ .word 0xe59f700c // ldr r7, [pc, #12]
+ .word 0xef000000 // swi #0
+ .word 0xe59f7008 // ldr r7, [pc, #8]
+ .word 0xef000000 // swi #0
+ .word 0xeafffffa // b . - 16
+END(aarch32_sigcode)
+ .word SYS_sigreturn
+ .word SYS_exit
+ .align 3
+aarch32_esigcode:
+ .data
+ .global sz_aarch32_sigcode
+sz_aarch32_sigcode:
+ .quad aarch32_esigcode - aarch32_sigcode
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
new file mode 100644
index 000000000000..cb8d33ff57d5
--- /dev/null
+++ b/sys/arm64/arm64/machdep.c
@@ -0,0 +1,1375 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_acpi.h"
+#include "opt_platform.h"
+#include "opt_ddb.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/cons.h>
+#include <sys/cpu.h>
+#include <sys/csan.h>
+#include <sys/devmap.h>
+#include <sys/efi.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/limits.h>
+#include <sys/linker.h>
+#include <sys/msgbuf.h>
+#include <sys/pcpu.h>
+#include <sys/physmem.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/reboot.h>
+#include <sys/rwlock.h>
+#include <sys/sched.h>
+#include <sys/signalvar.h>
+#include <sys/syscallsubr.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <sys/ucontext.h>
+#include <sys/vdso.h>
+#include <sys/vmmeter.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_phys.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_pager.h>
+
+#include <machine/armreg.h>
+#include <machine/cpu.h>
+#include <machine/debug_monitor.h>
+#include <machine/kdb.h>
+#include <machine/machdep.h>
+#include <machine/metadata.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/reg.h>
+#include <machine/undefined.h>
+#include <machine/vmparam.h>
+
+#ifdef VFP
+#include <machine/vfp.h>
+#endif
+
+#ifdef DEV_ACPI
+#include <contrib/dev/acpica/include/acpi.h>
+#include <machine/acpica_machdep.h>
+#endif
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/openfirm.h>
+#endif
+
+static void get_fpcontext(struct thread *td, mcontext_t *mcp);
+static void set_fpcontext(struct thread *td, mcontext_t *mcp);
+
+enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
+
+struct pcpu __pcpu[MAXCPU];
+
+static struct trapframe proc0_tf;
+
+int early_boot = 1;
+int cold = 1;
+static int boot_el;
+
+struct kva_md_info kmi;
+
+int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */
+int has_pan;
+
+/*
+ * Physical address of the EFI System Table. Stashed from the metadata hints
+ * passed into the kernel and used by the EFI code to call runtime services.
+ */
+vm_paddr_t efi_systbl_phys;
+static struct efi_map_header *efihdr;
+
+/* pagezero_* implementations are provided in support.S */
+void pagezero_simple(void *);
+void pagezero_cache(void *);
+
+/* pagezero_simple is default pagezero */
+void (*pagezero)(void *p) = pagezero_simple;
+
+int (*apei_nmi)(void);
+
+static void
+pan_setup(void)
+{
+ uint64_t id_aa64mfr1;
+
+ id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
+ if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
+ has_pan = 1;
+}
+
+void
+pan_enable(void)
+{
+
+ /*
+ * The LLVM integrated assembler doesn't understand the PAN
+ * PSTATE field. Because of this we need to manually create
+ * the instruction in an asm block. This is equivalent to:
+ * msr pan, #1
+ *
+ * This sets the PAN bit, stopping the kernel from accessing
+ * memory when userspace can also access it unless the kernel
+ * uses the userspace load/store instructions.
+ */
+ if (has_pan) {
+ WRITE_SPECIALREG(sctlr_el1,
+ READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
+ __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
+ }
+}
+
+bool
+has_hyp(void)
+{
+
+ return (boot_el == 2);
+}
+
+static void
+cpu_startup(void *dummy)
+{
+ vm_paddr_t size;
+ int i;
+
+ printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
+ ptoa((uintmax_t)realmem) / 1024 / 1024);
+
+ if (bootverbose) {
+ printf("Physical memory chunk(s):\n");
+ for (i = 0; phys_avail[i + 1] != 0; i += 2) {
+ size = phys_avail[i + 1] - phys_avail[i];
+ printf("%#016jx - %#016jx, %ju bytes (%ju pages)\n",
+ (uintmax_t)phys_avail[i],
+ (uintmax_t)phys_avail[i + 1] - 1,
+ (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
+ }
+ }
+
+ printf("avail memory = %ju (%ju MB)\n",
+ ptoa((uintmax_t)vm_free_count()),
+ ptoa((uintmax_t)vm_free_count()) / 1024 / 1024);
+
+ undef_init();
+ install_cpu_errata();
+
+ vm_ksubmap_init(&kmi);
+ bufinit();
+ vm_pager_bufferinit();
+}
+
+SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
+
+static void
+late_ifunc_resolve(void *dummy __unused)
+{
+ link_elf_late_ireloc();
+}
+SYSINIT(late_ifunc_resolve, SI_SUB_CPU, SI_ORDER_ANY, late_ifunc_resolve, NULL);
+
+int
+cpu_idle_wakeup(int cpu)
+{
+
+ return (0);
+}
+
+int
+fill_regs(struct thread *td, struct reg *regs)
+{
+ struct trapframe *frame;
+
+ frame = td->td_frame;
+ regs->sp = frame->tf_sp;
+ regs->lr = frame->tf_lr;
+ regs->elr = frame->tf_elr;
+ regs->spsr = frame->tf_spsr;
+
+ memcpy(regs->x, frame->tf_x, sizeof(regs->x));
+
+#ifdef COMPAT_FREEBSD32
+ /*
+ * We may be called here for a 32bits process, if we're using a
+ * 64bits debugger. If so, put PC and SPSR where it expects it.
+ */
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+ regs->x[15] = frame->tf_elr;
+ regs->x[16] = frame->tf_spsr;
+ }
+#endif
+ return (0);
+}
+
+int
+set_regs(struct thread *td, struct reg *regs)
+{
+ struct trapframe *frame;
+
+ frame = td->td_frame;
+ frame->tf_sp = regs->sp;
+ frame->tf_lr = regs->lr;
+ frame->tf_elr = regs->elr;
+ frame->tf_spsr &= ~PSR_FLAGS;
+ frame->tf_spsr |= regs->spsr & PSR_FLAGS;
+
+ memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
+
+#ifdef COMPAT_FREEBSD32
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+ /*
+ * We may be called for a 32bits process if we're using
+ * a 64bits debugger. If so, get PC and SPSR from where
+ * it put it.
+ */
+ frame->tf_elr = regs->x[15];
+ frame->tf_spsr = regs->x[16] & PSR_FLAGS;
+ }
+#endif
+ return (0);
+}
+
+int
+fill_fpregs(struct thread *td, struct fpreg *regs)
+{
+#ifdef VFP
+ struct pcb *pcb;
+
+ pcb = td->td_pcb;
+ if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
+ /*
+ * If we have just been running VFP instructions we will
+ * need to save the state to memcpy it below.
+ */
+ if (td == curthread)
+ vfp_save_state(td, pcb);
+
+ KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
+ ("Called fill_fpregs while the kernel is using the VFP"));
+ memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
+ sizeof(regs->fp_q));
+ regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
+ regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
+ } else
+#endif
+ memset(regs, 0, sizeof(*regs));
+ return (0);
+}
+
+int
+set_fpregs(struct thread *td, struct fpreg *regs)
+{
+#ifdef VFP
+ struct pcb *pcb;
+
+ pcb = td->td_pcb;
+ KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
+ ("Called set_fpregs while the kernel is using the VFP"));
+ memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
+ pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
+ pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
+#endif
+ return (0);
+}
+
+int
+fill_dbregs(struct thread *td, struct dbreg *regs)
+{
+ struct debug_monitor_state *monitor;
+ int count, i;
+ uint8_t debug_ver, nbkpts;
+
+ memset(regs, 0, sizeof(*regs));
+
+ extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_DebugVer_SHIFT,
+ &debug_ver);
+ extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_BRPs_SHIFT,
+ &nbkpts);
+
+ /*
+ * The BRPs field contains the number of breakpoints - 1. Armv8-A
+ * allows the hardware to provide 2-16 breakpoints so this won't
+ * overflow an 8 bit value.
+ */
+ count = nbkpts + 1;
+
+ regs->db_info = debug_ver;
+ regs->db_info <<= 8;
+ regs->db_info |= count;
+
+ monitor = &td->td_pcb->pcb_dbg_regs;
+ if ((monitor->dbg_flags & DBGMON_ENABLED) != 0) {
+ for (i = 0; i < count; i++) {
+ regs->db_regs[i].dbr_addr = monitor->dbg_bvr[i];
+ regs->db_regs[i].dbr_ctrl = monitor->dbg_bcr[i];
+ }
+ }
+
+ return (0);
+}
+
+int
+set_dbregs(struct thread *td, struct dbreg *regs)
+{
+ struct debug_monitor_state *monitor;
+ int count;
+ int i;
+
+ monitor = &td->td_pcb->pcb_dbg_regs;
+ count = 0;
+ monitor->dbg_enable_count = 0;
+ for (i = 0; i < DBG_BRP_MAX; i++) {
+ /* TODO: Check these values */
+ monitor->dbg_bvr[i] = regs->db_regs[i].dbr_addr;
+ monitor->dbg_bcr[i] = regs->db_regs[i].dbr_ctrl;
+ if ((monitor->dbg_bcr[i] & 1) != 0)
+ monitor->dbg_enable_count++;
+ }
+ if (monitor->dbg_enable_count > 0)
+ monitor->dbg_flags |= DBGMON_ENABLED;
+
+ return (0);
+}
+
+#ifdef COMPAT_FREEBSD32
+int
+fill_regs32(struct thread *td, struct reg32 *regs)
+{
+ int i;
+ struct trapframe *tf;
+
+ tf = td->td_frame;
+ for (i = 0; i < 13; i++)
+ regs->r[i] = tf->tf_x[i];
+ /* For arm32, SP is r13 and LR is r14 */
+ regs->r_sp = tf->tf_x[13];
+ regs->r_lr = tf->tf_x[14];
+ regs->r_pc = tf->tf_elr;
+ regs->r_cpsr = tf->tf_spsr;
+
+ return (0);
+}
+
+int
+set_regs32(struct thread *td, struct reg32 *regs)
+{
+ int i;
+ struct trapframe *tf;
+
+ tf = td->td_frame;
+ for (i = 0; i < 13; i++)
+ tf->tf_x[i] = regs->r[i];
+ /* For arm 32, SP is r13 an LR is r14 */
+ tf->tf_x[13] = regs->r_sp;
+ tf->tf_x[14] = regs->r_lr;
+ tf->tf_elr = regs->r_pc;
+ tf->tf_spsr = regs->r_cpsr;
+
+ return (0);
+}
+
+int
+fill_fpregs32(struct thread *td, struct fpreg32 *regs)
+{
+
+ printf("ARM64TODO: fill_fpregs32");
+ return (EDOOFUS);
+}
+
+int
+set_fpregs32(struct thread *td, struct fpreg32 *regs)
+{
+
+ printf("ARM64TODO: set_fpregs32");
+ return (EDOOFUS);
+}
+
+int
+fill_dbregs32(struct thread *td, struct dbreg32 *regs)
+{
+
+ printf("ARM64TODO: fill_dbregs32");
+ return (EDOOFUS);
+}
+
+int
+set_dbregs32(struct thread *td, struct dbreg32 *regs)
+{
+
+ printf("ARM64TODO: set_dbregs32");
+ return (EDOOFUS);
+}
+#endif
+
+int
+ptrace_set_pc(struct thread *td, u_long addr)
+{
+
+ td->td_frame->tf_elr = addr;
+ return (0);
+}
+
+int
+ptrace_single_step(struct thread *td)
+{
+
+ td->td_frame->tf_spsr |= PSR_SS;
+ td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
+ return (0);
+}
+
+int
+ptrace_clear_single_step(struct thread *td)
+{
+
+ td->td_frame->tf_spsr &= ~PSR_SS;
+ td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
+ return (0);
+}
+
+void
+exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
+{
+ struct trapframe *tf = td->td_frame;
+
+ memset(tf, 0, sizeof(struct trapframe));
+
+ tf->tf_x[0] = stack;
+ tf->tf_sp = STACKALIGN(stack);
+ tf->tf_lr = imgp->entry_addr;
+ tf->tf_elr = imgp->entry_addr;
+}
+
+/* Sanity check these are the same size, they will be memcpy'd to and fro */
+CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
+ sizeof((struct gpregs *)0)->gp_x);
+CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
+ sizeof((struct reg *)0)->x);
+
+int
+get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
+{
+ struct trapframe *tf = td->td_frame;
+
+ if (clear_ret & GET_MC_CLEAR_RET) {
+ mcp->mc_gpregs.gp_x[0] = 0;
+ mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
+ } else {
+ mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
+ mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
+ }
+
+ memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
+ sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
+
+ mcp->mc_gpregs.gp_sp = tf->tf_sp;
+ mcp->mc_gpregs.gp_lr = tf->tf_lr;
+ mcp->mc_gpregs.gp_elr = tf->tf_elr;
+ get_fpcontext(td, mcp);
+
+ return (0);
+}
+
+int
+set_mcontext(struct thread *td, mcontext_t *mcp)
+{
+ struct trapframe *tf = td->td_frame;
+ uint32_t spsr;
+
+ spsr = mcp->mc_gpregs.gp_spsr;
+ if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
+ (spsr & PSR_AARCH32) != 0 ||
+ (spsr & PSR_DAIF) != (td->td_frame->tf_spsr & PSR_DAIF))
+ return (EINVAL);
+
+ memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
+
+ tf->tf_sp = mcp->mc_gpregs.gp_sp;
+ tf->tf_lr = mcp->mc_gpregs.gp_lr;
+ tf->tf_elr = mcp->mc_gpregs.gp_elr;
+ tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
+ set_fpcontext(td, mcp);
+
+ return (0);
+}
+
+static void
+get_fpcontext(struct thread *td, mcontext_t *mcp)
+{
+#ifdef VFP
+ struct pcb *curpcb;
+
+ critical_enter();
+
+ curpcb = curthread->td_pcb;
+
+ if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
+ /*
+ * If we have just been running VFP instructions we will
+ * need to save the state to memcpy it below.
+ */
+ vfp_save_state(td, curpcb);
+
+ KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
+ ("Called get_fpcontext while the kernel is using the VFP"));
+ KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
+ ("Non-userspace FPU flags set in get_fpcontext"));
+ memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
+ sizeof(mcp->mc_fpregs));
+ mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
+ mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
+ mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
+ mcp->mc_flags |= _MC_FP_VALID;
+ }
+
+ critical_exit();
+#endif
+}
+
+static void
+set_fpcontext(struct thread *td, mcontext_t *mcp)
+{
+#ifdef VFP
+ struct pcb *curpcb;
+
+ critical_enter();
+
+ if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
+ curpcb = curthread->td_pcb;
+
+ /*
+ * Discard any vfp state for the current thread, we
+ * are about to override it.
+ */
+ vfp_discard(td);
+
+ KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
+ ("Called set_fpcontext while the kernel is using the VFP"));
+ memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
+ sizeof(mcp->mc_fpregs));
+ curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
+ curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
+ curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
+ }
+
+ critical_exit();
+#endif
+}
+
+void
+cpu_idle(int busy)
+{
+
+ spinlock_enter();
+ if (!busy)
+ cpu_idleclock();
+ if (!sched_runnable())
+ __asm __volatile(
+ "dsb sy \n"
+ "wfi \n");
+ if (!busy)
+ cpu_activeclock();
+ spinlock_exit();
+}
+
+void
+cpu_halt(void)
+{
+
+ /* We should have shutdown by now, if not enter a low power sleep */
+ intr_disable();
+ while (1) {
+ __asm __volatile("wfi");
+ }
+}
+
+/*
+ * Flush the D-cache for non-DMA I/O so that the I-cache can
+ * be made coherent later.
+ */
+void
+cpu_flush_dcache(void *ptr, size_t len)
+{
+
+ /* ARM64TODO TBD */
+}
+
+/* Get current clock frequency for the given CPU ID. */
+int
+cpu_est_clockrate(int cpu_id, uint64_t *rate)
+{
+ struct pcpu *pc;
+
+ pc = pcpu_find(cpu_id);
+ if (pc == NULL || rate == NULL)
+ return (EINVAL);
+
+ if (pc->pc_clock == 0)
+ return (EOPNOTSUPP);
+
+ *rate = pc->pc_clock;
+ return (0);
+}
+
+void
+cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
+{
+
+ pcpu->pc_acpi_id = 0xffffffff;
+}
+
+void
+spinlock_enter(void)
+{
+ struct thread *td;
+ register_t daif;
+
+ td = curthread;
+ if (td->td_md.md_spinlock_count == 0) {
+ daif = intr_disable();
+ td->td_md.md_spinlock_count = 1;
+ td->td_md.md_saved_daif = daif;
+ critical_enter();
+ } else
+ td->td_md.md_spinlock_count++;
+}
+
+void
+spinlock_exit(void)
+{
+ struct thread *td;
+ register_t daif;
+
+ td = curthread;
+ daif = td->td_md.md_saved_daif;
+ td->td_md.md_spinlock_count--;
+ if (td->td_md.md_spinlock_count == 0) {
+ critical_exit();
+ intr_restore(daif);
+ }
+}
+
+#ifndef _SYS_SYSPROTO_H_
+struct sigreturn_args {
+ ucontext_t *ucp;
+};
+#endif
+
+int
+sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
+{
+ ucontext_t uc;
+ int error;
+
+ if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
+ return (EFAULT);
+
+ error = set_mcontext(td, &uc.uc_mcontext);
+ if (error != 0)
+ return (error);
+
+ /* Restore signal mask. */
+ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
+
+ return (EJUSTRETURN);
+}
+
+/*
+ * Construct a PCB from a trapframe. This is called from kdb_trap() where
+ * we want to start a backtrace from the function that caused us to enter
+ * the debugger. We have the context in the trapframe, but base the trace
+ * on the PCB. The PCB doesn't have to be perfect, as long as it contains
+ * enough for a backtrace.
+ */
+void
+makectx(struct trapframe *tf, struct pcb *pcb)
+{
+ int i;
+
+ for (i = 0; i < PCB_LR; i++)
+ pcb->pcb_x[i] = tf->tf_x[i];
+
+ pcb->pcb_x[PCB_LR] = tf->tf_lr;
+ pcb->pcb_pc = tf->tf_elr;
+ pcb->pcb_sp = tf->tf_sp;
+}
+
+void
+sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
+{
+ struct thread *td;
+ struct proc *p;
+ struct trapframe *tf;
+ struct sigframe *fp, frame;
+ struct sigacts *psp;
+ struct sysentvec *sysent;
+ int onstack, sig;
+
+ td = curthread;
+ p = td->td_proc;
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+
+ sig = ksi->ksi_signo;
+ psp = p->p_sigacts;
+ mtx_assert(&psp->ps_mtx, MA_OWNED);
+
+ tf = td->td_frame;
+ onstack = sigonstack(tf->tf_sp);
+
+ CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
+ catcher, sig);
+
+ /* Allocate and validate space for the signal handler context. */
+ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
+ SIGISMEMBER(psp->ps_sigonstack, sig)) {
+ fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
+ td->td_sigstk.ss_size);
+#if defined(COMPAT_43)
+ td->td_sigstk.ss_flags |= SS_ONSTACK;
+#endif
+ } else {
+ fp = (struct sigframe *)td->td_frame->tf_sp;
+ }
+
+ /* Make room, keeping the stack aligned */
+ fp--;
+ fp = (struct sigframe *)STACKALIGN(fp);
+
+ /* Fill in the frame to copy out */
+ bzero(&frame, sizeof(frame));
+ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
+ frame.sf_si = ksi->ksi_info;
+ frame.sf_uc.uc_sigmask = *mask;
+ frame.sf_uc.uc_stack = td->td_sigstk;
+ frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
+ (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
+ mtx_unlock(&psp->ps_mtx);
+ PROC_UNLOCK(td->td_proc);
+
+ /* Copy the sigframe out to the user's stack. */
+ if (copyout(&frame, fp, sizeof(*fp)) != 0) {
+ /* Process has trashed its stack. Kill it. */
+ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
+ PROC_LOCK(p);
+ sigexit(td, SIGILL);
+ }
+
+ tf->tf_x[0]= sig;
+ tf->tf_x[1] = (register_t)&fp->sf_si;
+ tf->tf_x[2] = (register_t)&fp->sf_uc;
+
+ tf->tf_elr = (register_t)catcher;
+ tf->tf_sp = (register_t)fp;
+ sysent = p->p_sysent;
+ if (sysent->sv_sigcode_base != 0)
+ tf->tf_lr = (register_t)sysent->sv_sigcode_base;
+ else
+ tf->tf_lr = (register_t)(sysent->sv_psstrings -
+ *(sysent->sv_szsigcode));
+
+ CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
+ tf->tf_sp);
+
+ PROC_LOCK(p);
+ mtx_lock(&psp->ps_mtx);
+}
+
+static void
+init_proc0(vm_offset_t kstack)
+{
+ struct pcpu *pcpup = &__pcpu[0];
+
+ proc_linkup0(&proc0, &thread0);
+ thread0.td_kstack = kstack;
+ thread0.td_kstack_pages = KSTACK_PAGES;
+ thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
+ thread0.td_kstack_pages * PAGE_SIZE) - 1;
+ thread0.td_pcb->pcb_fpflags = 0;
+ thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
+ thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
+ thread0.td_frame = &proc0_tf;
+ pcpup->pc_curpcb = thread0.td_pcb;
+}
+
+typedef struct {
+ uint32_t type;
+ uint64_t phys_start;
+ uint64_t virt_start;
+ uint64_t num_pages;
+ uint64_t attr;
+} EFI_MEMORY_DESCRIPTOR;
+
+typedef void (*efi_map_entry_cb)(struct efi_md *);
+
+static void
+foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb)
+{
+ struct efi_md *map, *p;
+ size_t efisz;
+ int ndesc, i;
+
+ /*
+ * Memory map data provided by UEFI via the GetMemoryMap
+ * Boot Services API.
+ */
+ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
+ map = (struct efi_md *)((uint8_t *)efihdr + efisz);
+
+ if (efihdr->descriptor_size == 0)
+ return;
+ ndesc = efihdr->memory_size / efihdr->descriptor_size;
+
+ for (i = 0, p = map; i < ndesc; i++,
+ p = efi_next_descriptor(p, efihdr->descriptor_size)) {
+ cb(p);
+ }
+}
+
+static void
+exclude_efi_map_entry(struct efi_md *p)
+{
+
+ switch (p->md_type) {
+ case EFI_MD_TYPE_CODE:
+ case EFI_MD_TYPE_DATA:
+ case EFI_MD_TYPE_BS_CODE:
+ case EFI_MD_TYPE_BS_DATA:
+ case EFI_MD_TYPE_FREE:
+ /*
+ * We're allowed to use any entry with these types.
+ */
+ break;
+ default:
+ physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE,
+ EXFLAG_NOALLOC);
+ }
+}
+
+static void
+exclude_efi_map_entries(struct efi_map_header *efihdr)
+{
+
+ foreach_efi_map_entry(efihdr, exclude_efi_map_entry);
+}
+
+static void
+add_efi_map_entry(struct efi_md *p)
+{
+
+ switch (p->md_type) {
+ case EFI_MD_TYPE_RT_DATA:
+ /*
+ * Runtime data will be excluded after the DMAP
+ * region is created to stop it from being added
+ * to phys_avail.
+ */
+ case EFI_MD_TYPE_CODE:
+ case EFI_MD_TYPE_DATA:
+ case EFI_MD_TYPE_BS_CODE:
+ case EFI_MD_TYPE_BS_DATA:
+ case EFI_MD_TYPE_FREE:
+ /*
+ * We're allowed to use any entry with these types.
+ */
+ physmem_hardware_region(p->md_phys,
+ p->md_pages * PAGE_SIZE);
+ break;
+ }
+}
+
+static void
+add_efi_map_entries(struct efi_map_header *efihdr)
+{
+
+ foreach_efi_map_entry(efihdr, add_efi_map_entry);
+}
+
+static void
+print_efi_map_entry(struct efi_md *p)
+{
+ const char *type;
+ static const char *types[] = {
+ "Reserved",
+ "LoaderCode",
+ "LoaderData",
+ "BootServicesCode",
+ "BootServicesData",
+ "RuntimeServicesCode",
+ "RuntimeServicesData",
+ "ConventionalMemory",
+ "UnusableMemory",
+ "ACPIReclaimMemory",
+ "ACPIMemoryNVS",
+ "MemoryMappedIO",
+ "MemoryMappedIOPortSpace",
+ "PalCode",
+ "PersistentMemory"
+ };
+
+ if (p->md_type < nitems(types))
+ type = types[p->md_type];
+ else
+ type = "<INVALID>";
+ printf("%23s %012lx %12p %08lx ", type, p->md_phys,
+ p->md_virt, p->md_pages);
+ if (p->md_attr & EFI_MD_ATTR_UC)
+ printf("UC ");
+ if (p->md_attr & EFI_MD_ATTR_WC)
+ printf("WC ");
+ if (p->md_attr & EFI_MD_ATTR_WT)
+ printf("WT ");
+ if (p->md_attr & EFI_MD_ATTR_WB)
+ printf("WB ");
+ if (p->md_attr & EFI_MD_ATTR_UCE)
+ printf("UCE ");
+ if (p->md_attr & EFI_MD_ATTR_WP)
+ printf("WP ");
+ if (p->md_attr & EFI_MD_ATTR_RP)
+ printf("RP ");
+ if (p->md_attr & EFI_MD_ATTR_XP)
+ printf("XP ");
+ if (p->md_attr & EFI_MD_ATTR_NV)
+ printf("NV ");
+ if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
+ printf("MORE_RELIABLE ");
+ if (p->md_attr & EFI_MD_ATTR_RO)
+ printf("RO ");
+ if (p->md_attr & EFI_MD_ATTR_RT)
+ printf("RUNTIME");
+ printf("\n");
+}
+
+static void
+print_efi_map_entries(struct efi_map_header *efihdr)
+{
+
+ printf("%23s %12s %12s %8s %4s\n",
+ "Type", "Physical", "Virtual", "#Pages", "Attr");
+ foreach_efi_map_entry(efihdr, print_efi_map_entry);
+}
+
+#ifdef FDT
+static void
+try_load_dtb(caddr_t kmdp)
+{
+ vm_offset_t dtbp;
+
+ dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
+#if defined(FDT_DTB_STATIC)
+ /*
+ * In case the device tree blob was not retrieved (from metadata) try
+ * to use the statically embedded one.
+ */
+ if (dtbp == 0)
+ dtbp = (vm_offset_t)&fdt_static_dtb;
+#endif
+
+ if (dtbp == (vm_offset_t)NULL) {
+ printf("ERROR loading DTB\n");
+ return;
+ }
+
+ if (OF_install(OFW_FDT, 0) == FALSE)
+ panic("Cannot install FDT");
+
+ if (OF_init((void *)dtbp) != 0)
+ panic("OF_init failed with the found device tree");
+
+ parse_fdt_bootargs();
+}
+#endif
+
+static bool
+bus_probe(void)
+{
+ bool has_acpi, has_fdt;
+ char *order, *env;
+
+ has_acpi = has_fdt = false;
+
+#ifdef FDT
+ has_fdt = (OF_peer(0) != 0);
+#endif
+#ifdef DEV_ACPI
+ has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
+#endif
+
+ env = kern_getenv("kern.cfg.order");
+ if (env != NULL) {
+ order = env;
+ while (order != NULL) {
+ if (has_acpi &&
+ strncmp(order, "acpi", 4) == 0 &&
+ (order[4] == ',' || order[4] == '\0')) {
+ arm64_bus_method = ARM64_BUS_ACPI;
+ break;
+ }
+ if (has_fdt &&
+ strncmp(order, "fdt", 3) == 0 &&
+ (order[3] == ',' || order[3] == '\0')) {
+ arm64_bus_method = ARM64_BUS_FDT;
+ break;
+ }
+ order = strchr(order, ',');
+ }
+ freeenv(env);
+
+ /* If we set the bus method it is valid */
+ if (arm64_bus_method != ARM64_BUS_NONE)
+ return (true);
+ }
+ /* If no order or an invalid order was set use the default */
+ if (arm64_bus_method == ARM64_BUS_NONE) {
+ if (has_fdt)
+ arm64_bus_method = ARM64_BUS_FDT;
+ else if (has_acpi)
+ arm64_bus_method = ARM64_BUS_ACPI;
+ }
+
+ /*
+ * If no option was set the default is valid, otherwise we are
+ * setting one to get cninit() working, then calling panic to tell
+ * the user about the invalid bus setup.
+ */
+ return (env == NULL);
+}
+
+static void
+cache_setup(void)
+{
+ int dczva_line_shift;
+ uint32_t dczid_el0;
+
+ identify_cache(READ_SPECIALREG(ctr_el0));
+
+ dczid_el0 = READ_SPECIALREG(dczid_el0);
+
+ /* Check if dc zva is not prohibited */
+ if (dczid_el0 & DCZID_DZP)
+ dczva_line_size = 0;
+ else {
+ /* Same as with above calculations */
+ dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
+ dczva_line_size = sizeof(int) << dczva_line_shift;
+
+ /* Change pagezero function */
+ pagezero = pagezero_cache;
+ }
+}
+
+int
+memory_mapping_mode(vm_paddr_t pa)
+{
+ struct efi_md *map, *p;
+ size_t efisz;
+ int ndesc, i;
+
+ if (efihdr == NULL)
+ return (VM_MEMATTR_WRITE_BACK);
+
+ /*
+ * Memory map data provided by UEFI via the GetMemoryMap
+ * Boot Services API.
+ */
+ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
+ map = (struct efi_md *)((uint8_t *)efihdr + efisz);
+
+ if (efihdr->descriptor_size == 0)
+ return (VM_MEMATTR_WRITE_BACK);
+ ndesc = efihdr->memory_size / efihdr->descriptor_size;
+
+ for (i = 0, p = map; i < ndesc; i++,
+ p = efi_next_descriptor(p, efihdr->descriptor_size)) {
+ if (pa < p->md_phys ||
+ pa >= p->md_phys + p->md_pages * EFI_PAGE_SIZE)
+ continue;
+ if (p->md_type == EFI_MD_TYPE_IOMEM ||
+ p->md_type == EFI_MD_TYPE_IOPORT)
+ return (VM_MEMATTR_DEVICE);
+ else if ((p->md_attr & EFI_MD_ATTR_WB) != 0 ||
+ p->md_type == EFI_MD_TYPE_RECLAIM)
+ return (VM_MEMATTR_WRITE_BACK);
+ else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
+ return (VM_MEMATTR_WRITE_THROUGH);
+ else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
+ return (VM_MEMATTR_WRITE_COMBINING);
+ break;
+ }
+
+ return (VM_MEMATTR_DEVICE);
+}
+
+void
+initarm(struct arm64_bootparams *abp)
+{
+ struct efi_fb *efifb;
+ struct pcpu *pcpup;
+ char *env;
+#ifdef FDT
+ struct mem_region mem_regions[FDT_MEM_REGIONS];
+ int mem_regions_sz;
+#endif
+ vm_offset_t lastaddr;
+ caddr_t kmdp;
+ bool valid;
+
+ boot_el = abp->boot_el;
+
+ /* Parse loader or FDT boot parametes. Determine last used address. */
+ lastaddr = parse_boot_param(abp);
+
+ /* Find the kernel address */
+ kmdp = preload_search_by_type("elf kernel");
+ if (kmdp == NULL)
+ kmdp = preload_search_by_type("elf64 kernel");
+
+ identify_cpu(0);
+ update_special_regs(0);
+
+ link_elf_ireloc(kmdp);
+ try_load_dtb(kmdp);
+
+ efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
+
+ /* Load the physical memory ranges */
+ efihdr = (struct efi_map_header *)preload_search_info(kmdp,
+ MODINFO_METADATA | MODINFOMD_EFI_MAP);
+ if (efihdr != NULL)
+ add_efi_map_entries(efihdr);
+#ifdef FDT
+ else {
+ /* Grab physical memory regions information from device tree. */
+ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
+ NULL) != 0)
+ panic("Cannot get physical memory regions");
+ physmem_hardware_regions(mem_regions, mem_regions_sz);
+ }
+ if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
+ physmem_exclude_regions(mem_regions, mem_regions_sz,
+ EXFLAG_NODUMP | EXFLAG_NOALLOC);
+#endif
+
+ /* Exclude the EFI framebuffer from our view of physical memory. */
+ efifb = (struct efi_fb *)preload_search_info(kmdp,
+ MODINFO_METADATA | MODINFOMD_EFI_FB);
+ if (efifb != NULL)
+ physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
+ EXFLAG_NOALLOC);
+
+ /* Set the pcpu data, this is needed by pmap_bootstrap */
+ pcpup = &__pcpu[0];
+ pcpu_init(pcpup, 0, sizeof(struct pcpu));
+
+ /*
+ * Set the pcpu pointer with a backup in tpidr_el1 to be
+ * loaded when entering the kernel from userland.
+ */
+ __asm __volatile(
+ "mov x18, %0 \n"
+ "msr tpidr_el1, %0" :: "r"(pcpup));
+
+ PCPU_SET(curthread, &thread0);
+ PCPU_SET(midr, get_midr());
+
+ /* Do basic tuning, hz etc */
+ init_param1();
+
+ cache_setup();
+ pan_setup();
+
+ /* Bootstrap enough of pmap to enter the kernel proper */
+ pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
+ KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
+ /* Exclude entries neexed in teh DMAP region, but not phys_avail */
+ if (efihdr != NULL)
+ exclude_efi_map_entries(efihdr);
+ physmem_init_kernel_globals();
+
+ devmap_bootstrap(0, NULL);
+
+ valid = bus_probe();
+
+ cninit();
+ set_ttbr0(abp->kern_ttbr0);
+ cpu_tlb_flushID();
+
+ if (!valid)
+ panic("Invalid bus configuration: %s",
+ kern_getenv("kern.cfg.order"));
+
+ init_proc0(abp->kern_stack);
+ msgbufinit(msgbufp, msgbufsize);
+ mutex_init();
+ init_param2(physmem);
+
+ dbg_init();
+ kdb_init();
+ pan_enable();
+
+ kcsan_cpu_init(0);
+
+ env = kern_getenv("kernelname");
+ if (env != NULL)
+ strlcpy(kernelname, env, sizeof(kernelname));
+
+ if (boothowto & RB_VERBOSE) {
+ print_efi_map_entries(efihdr);
+ physmem_print_tables();
+ }
+
+ early_boot = 0;
+}
+
+void
+dbg_init(void)
+{
+
+ /* Clear OS lock */
+ WRITE_SPECIALREG(oslar_el1, 0);
+
+ /* This permits DDB to use debug registers for watchpoints. */
+ dbg_monitor_init();
+
+ /* TODO: Eventually will need to initialize debug registers here. */
+}
+
+#ifdef DDB
+#include <ddb/ddb.h>
+
+DB_SHOW_COMMAND(specialregs, db_show_spregs)
+{
+#define PRINT_REG(reg) \
+ db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
+
+ PRINT_REG(actlr_el1);
+ PRINT_REG(afsr0_el1);
+ PRINT_REG(afsr1_el1);
+ PRINT_REG(aidr_el1);
+ PRINT_REG(amair_el1);
+ PRINT_REG(ccsidr_el1);
+ PRINT_REG(clidr_el1);
+ PRINT_REG(contextidr_el1);
+ PRINT_REG(cpacr_el1);
+ PRINT_REG(csselr_el1);
+ PRINT_REG(ctr_el0);
+ PRINT_REG(currentel);
+ PRINT_REG(daif);
+ PRINT_REG(dczid_el0);
+ PRINT_REG(elr_el1);
+ PRINT_REG(esr_el1);
+ PRINT_REG(far_el1);
+#if 0
+ /* ARM64TODO: Enable VFP before reading floating-point registers */
+ PRINT_REG(fpcr);
+ PRINT_REG(fpsr);
+#endif
+ PRINT_REG(id_aa64afr0_el1);
+ PRINT_REG(id_aa64afr1_el1);
+ PRINT_REG(id_aa64dfr0_el1);
+ PRINT_REG(id_aa64dfr1_el1);
+ PRINT_REG(id_aa64isar0_el1);
+ PRINT_REG(id_aa64isar1_el1);
+ PRINT_REG(id_aa64pfr0_el1);
+ PRINT_REG(id_aa64pfr1_el1);
+ PRINT_REG(id_afr0_el1);
+ PRINT_REG(id_dfr0_el1);
+ PRINT_REG(id_isar0_el1);
+ PRINT_REG(id_isar1_el1);
+ PRINT_REG(id_isar2_el1);
+ PRINT_REG(id_isar3_el1);
+ PRINT_REG(id_isar4_el1);
+ PRINT_REG(id_isar5_el1);
+ PRINT_REG(id_mmfr0_el1);
+ PRINT_REG(id_mmfr1_el1);
+ PRINT_REG(id_mmfr2_el1);
+ PRINT_REG(id_mmfr3_el1);
+#if 0
+ /* Missing from llvm */
+ PRINT_REG(id_mmfr4_el1);
+#endif
+ PRINT_REG(id_pfr0_el1);
+ PRINT_REG(id_pfr1_el1);
+ PRINT_REG(isr_el1);
+ PRINT_REG(mair_el1);
+ PRINT_REG(midr_el1);
+ PRINT_REG(mpidr_el1);
+ PRINT_REG(mvfr0_el1);
+ PRINT_REG(mvfr1_el1);
+ PRINT_REG(mvfr2_el1);
+ PRINT_REG(revidr_el1);
+ PRINT_REG(sctlr_el1);
+ PRINT_REG(sp_el0);
+ PRINT_REG(spsel);
+ PRINT_REG(spsr_el1);
+ PRINT_REG(tcr_el1);
+ PRINT_REG(tpidr_el0);
+ PRINT_REG(tpidr_el1);
+ PRINT_REG(tpidrro_el0);
+ PRINT_REG(ttbr0_el1);
+ PRINT_REG(ttbr1_el1);
+ PRINT_REG(vbar_el1);
+#undef PRINT_REG
+}
+
+DB_SHOW_COMMAND(vtop, db_show_vtop)
+{
+ uint64_t phys;
+
+ if (have_addr) {
+ phys = arm64_address_translate_s1e1r(addr);
+ db_printf("EL1 physical address reg (read): 0x%016lx\n", phys);
+ phys = arm64_address_translate_s1e1w(addr);
+ db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
+ phys = arm64_address_translate_s1e0r(addr);
+ db_printf("EL0 physical address reg (read): 0x%016lx\n", phys);
+ phys = arm64_address_translate_s1e0w(addr);
+ db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
+ } else
+ db_printf("show vtop <virt_addr>\n");
+}
+#endif
diff --git a/sys/arm64/arm64/machdep_boot.c b/sys/arm64/arm64/machdep_boot.c
new file mode 100644
index 000000000000..9ab4edf616e2
--- /dev/null
+++ b/sys/arm64/arm64/machdep_boot.c
@@ -0,0 +1,232 @@
+/*-
+ * Copyright (c) 2004 Olivier Houchard
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/ctype.h>
+#include <sys/linker.h>
+#include <sys/reboot.h>
+#include <sys/sysctl.h>
+#ifdef FDT
+#include <sys/boot.h>
+#endif
+
+#include <machine/cpu.h>
+#include <machine/machdep.h>
+#include <machine/metadata.h>
+#include <machine/vmparam.h>
+
+#ifdef FDT
+#include <contrib/libfdt/libfdt.h>
+#include <dev/fdt/fdt_common.h>
+#endif
+
+extern int *end;
+static char *loader_envp;
+static char static_kenv[4096];
+
+#ifdef FDT
+#define CMDLINE_GUARD "FreeBSD:"
+#define LBABI_MAX_COMMAND_LINE 512
+static char linux_command_line[LBABI_MAX_COMMAND_LINE + 1];
+#endif
+
+/*
+ * Fake up a boot descriptor table
+ */
+ #define PRELOAD_PUSH_VALUE(type, value) do { \
+ *(type *)(preload_ptr + size) = (value); \
+ size += sizeof(type); \
+} while (0)
+
+ #define PRELOAD_PUSH_STRING(str) do { \
+ uint32_t ssize; \
+ ssize = strlen(str) + 1; \
+ PRELOAD_PUSH_VALUE(uint32_t, ssize); \
+ strcpy((char*)(preload_ptr + size), str); \
+ size += ssize; \
+ size = roundup(size, sizeof(u_long)); \
+} while (0)
+
+/* Build minimal set of metatda. */
+static vm_offset_t
+fake_preload_metadata(void *dtb_ptr, size_t dtb_size)
+{
+ vm_offset_t lastaddr;
+ static char fake_preload[256];
+ caddr_t preload_ptr;
+ size_t size;
+
+ lastaddr = (vm_offset_t)&end;
+ preload_ptr = (caddr_t)&fake_preload[0];
+ size = 0;
+
+ PRELOAD_PUSH_VALUE(uint32_t, MODINFO_NAME);
+ PRELOAD_PUSH_STRING("kernel");
+
+ PRELOAD_PUSH_VALUE(uint32_t, MODINFO_TYPE);
+ PRELOAD_PUSH_STRING("elf kernel");
+
+ PRELOAD_PUSH_VALUE(uint32_t, MODINFO_ADDR);
+ PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
+ PRELOAD_PUSH_VALUE(uint64_t, VM_MIN_KERNEL_ADDRESS);
+
+ PRELOAD_PUSH_VALUE(uint32_t, MODINFO_SIZE);
+ PRELOAD_PUSH_VALUE(uint32_t, sizeof(size_t));
+ PRELOAD_PUSH_VALUE(uint64_t, (size_t)(&end - VM_MIN_KERNEL_ADDRESS));
+
+ if (dtb_ptr != NULL) {
+ /* Copy DTB to KVA space and insert it into module chain. */
+ lastaddr = roundup(lastaddr, sizeof(int));
+ PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_DTBP);
+ PRELOAD_PUSH_VALUE(uint32_t, sizeof(uint64_t));
+ PRELOAD_PUSH_VALUE(uint64_t, (uint64_t)lastaddr);
+ memmove((void *)lastaddr, dtb_ptr, dtb_size);
+ lastaddr += dtb_size;
+ lastaddr = roundup(lastaddr, sizeof(int));
+ }
+ /* End marker */
+ PRELOAD_PUSH_VALUE(uint32_t, 0);
+ PRELOAD_PUSH_VALUE(uint32_t, 0);
+
+ preload_metadata = (caddr_t)(uintptr_t)fake_preload;
+
+ init_static_kenv(NULL, 0);
+
+ return (lastaddr);
+}
+
+#ifdef FDT
+
+/* Convert the U-Boot command line into FreeBSD kenv and boot options. */
+static void
+cmdline_set_env(char *cmdline, const char *guard)
+{
+ size_t guard_len;
+
+ /* Skip leading spaces. */
+ while (isspace(*cmdline))
+ cmdline++;
+
+ /* Test and remove guard. */
+ if (guard != NULL && guard[0] != '\0') {
+ guard_len = strlen(guard);
+ if (strncasecmp(cmdline, guard, guard_len) != 0)
+ return;
+ cmdline += guard_len;
+ }
+
+ boothowto |= boot_parse_cmdline(cmdline);
+}
+
+void
+parse_fdt_bootargs(void)
+{
+
+ if (loader_envp == NULL && fdt_get_chosen_bootargs(linux_command_line,
+ LBABI_MAX_COMMAND_LINE) == 0) {
+ init_static_kenv(static_kenv, sizeof(static_kenv));
+ cmdline_set_env(linux_command_line, CMDLINE_GUARD);
+ }
+}
+
+#endif
+
+#if defined(LINUX_BOOT_ABI) && defined(FDT)
+static vm_offset_t
+linux_parse_boot_param(struct arm64_bootparams *abp)
+{
+ struct fdt_header *dtb_ptr;
+ size_t dtb_size;
+
+ if (abp->modulep == 0)
+ return (0);
+ /* Test if modulep point to valid DTB. */
+ dtb_ptr = (struct fdt_header *)abp->modulep;
+ if (fdt_check_header(dtb_ptr) != 0)
+ return (0);
+ dtb_size = fdt_totalsize(dtb_ptr);
+ return (fake_preload_metadata(dtb_ptr, dtb_size));
+}
+
+#endif
+
+static vm_offset_t
+freebsd_parse_boot_param(struct arm64_bootparams *abp)
+{
+ vm_offset_t lastaddr = 0;
+ void *kmdp;
+#ifdef DDB
+ vm_offset_t ksym_start;
+ vm_offset_t ksym_end;
+#endif
+
+ if (abp->modulep == 0)
+ return (0);
+
+ preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
+ kmdp = preload_search_by_type("elf kernel");
+ if (kmdp == NULL)
+ return (0);
+
+ boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
+ loader_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
+ init_static_kenv(loader_envp, 0);
+ lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
+#ifdef DDB
+ ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
+ ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
+ db_fetch_ksymtab(ksym_start, ksym_end, 0);
+#endif
+ return (lastaddr);
+}
+
+vm_offset_t
+parse_boot_param(struct arm64_bootparams *abp)
+{
+ vm_offset_t lastaddr;
+
+#if defined(LINUX_BOOT_ABI) && defined(FDT)
+ lastaddr = linux_parse_boot_param(abp);
+ if (lastaddr != 0)
+ return (lastaddr);
+#endif
+ lastaddr = freebsd_parse_boot_param(abp);
+ if (lastaddr != 0)
+ return (lastaddr);
+
+ /* Fall back to hardcoded metadata. */
+ lastaddr = fake_preload_metadata(NULL, 0);
+
+ return (lastaddr);
+}
diff --git a/sys/arm64/arm64/mem.c b/sys/arm64/arm64/mem.c
new file mode 100644
index 000000000000..d51744c6fbe3
--- /dev/null
+++ b/sys/arm64/arm64/mem.c
@@ -0,0 +1,138 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/memrange.h>
+#include <sys/uio.h>
+
+#include <machine/memdev.h>
+#include <machine/vmparam.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_page.h>
+
+struct mem_range_softc mem_range_softc;
+
+int
+memrw(struct cdev *dev, struct uio *uio, int flags)
+{
+ struct iovec *iov;
+ struct vm_page m;
+ vm_page_t marr;
+ vm_offset_t off, v;
+ u_int cnt;
+ int error;
+
+ error = 0;
+
+ while (uio->uio_resid > 0 && error == 0) {
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ if (uio->uio_iovcnt < 0)
+ panic("memrw");
+ continue;
+ }
+
+ v = uio->uio_offset;
+ off = v & PAGE_MASK;
+ cnt = ulmin(iov->iov_len, PAGE_SIZE - (u_int)off);
+ if (cnt == 0)
+ continue;
+
+ switch(dev2unit(dev)) {
+ case CDEV_MINOR_KMEM:
+ /* If the address is in the DMAP just copy it */
+ if (VIRT_IN_DMAP(v)) {
+ error = uiomove((void *)v, cnt, uio);
+ break;
+ }
+
+ if (!kernacc((void *)v, cnt, uio->uio_rw == UIO_READ ?
+ VM_PROT_READ : VM_PROT_WRITE)) {
+ error = EFAULT;
+ break;
+ }
+
+ /* Get the physical address to read */
+ v = pmap_extract(kernel_pmap, v);
+ if (v == 0) {
+ error = EFAULT;
+ break;
+ }
+
+ /* FALLTHROUGH */
+ case CDEV_MINOR_MEM:
+ /* If within the DMAP use this to copy from */
+ if (PHYS_IN_DMAP(v)) {
+ v = PHYS_TO_DMAP(v);
+ error = uiomove((void *)v, cnt, uio);
+ break;
+ }
+
+ /* Have uiomove_fromphys handle the data */
+ m.phys_addr = trunc_page(v);
+ marr = &m;
+ uiomove_fromphys(&marr, off, cnt, uio);
+ break;
+ }
+ }
+
+ return (error);
+}
+
+/*
+ * allow user processes to MMAP some memory sections
+ * instead of going through read/write
+ */
+/* ARGSUSED */
+int
+memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int prot __unused, vm_memattr_t *memattr __unused)
+{
+ if (dev2unit(dev) == CDEV_MINOR_MEM) {
+ *paddr = offset;
+ return (0);
+ }
+ return (-1);
+}
+
+int
+memioctl_md(struct cdev *dev __unused, u_long cmd __unused,
+ caddr_t data __unused, int flags __unused, struct thread *td __unused)
+{
+ return (ENOTTY);
+}
diff --git a/sys/arm64/arm64/memcpy.S b/sys/arm64/arm64/memcpy.S
new file mode 100644
index 000000000000..f98c2513fa58
--- /dev/null
+++ b/sys/arm64/arm64/memcpy.S
@@ -0,0 +1,219 @@
+/* Copyright (c) 2012, Linaro Limited
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the Linaro nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+/*
+ * Copyright (c) 2015 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
+ *
+ */
+
+#define dstin x0
+#define src x1
+#define count x2
+#define dst x3
+#define srcend x4
+#define dstend x5
+#define A_l x6
+#define A_lw w6
+#define A_h x7
+#define A_hw w7
+#define B_l x8
+#define B_lw w8
+#define B_h x9
+#define C_l x10
+#define C_h x11
+#define D_l x12
+#define D_h x13
+#define E_l src
+#define E_h count
+#define F_l srcend
+#define F_h dst
+#define tmp1 x9
+
+#define L(l) .L ## l
+
+/* Copies are split into 3 main cases: small copies of up to 16 bytes,
+ medium copies of 17..96 bytes which are fully unrolled. Large copies
+ of more than 96 bytes align the destination and use an unrolled loop
+ processing 64 bytes per iteration.
+ Small and medium copies read all data before writing, allowing any
+ kind of overlap, and memmove tailcalls memcpy for these cases as
+ well as non-overlapping copies.
+*/
+
+ENTRY(memcpy)
+ prfm PLDL1KEEP, [src]
+ add srcend, src, count
+ add dstend, dstin, count
+ cmp count, 16
+ b.ls L(copy16)
+ cmp count, 96
+ b.hi L(copy_long)
+
+ /* Medium copies: 17..96 bytes. */
+ sub tmp1, count, 1
+ ldp A_l, A_h, [src]
+ tbnz tmp1, 6, L(copy96)
+ ldp D_l, D_h, [srcend, -16]
+ tbz tmp1, 5, 1f
+ ldp B_l, B_h, [src, 16]
+ ldp C_l, C_h, [srcend, -32]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstend, -32]
+1:
+ stp A_l, A_h, [dstin]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ .p2align 4
+ /* Small copies: 0..16 bytes. */
+L(copy16):
+ cmp count, 8
+ b.lo 1f
+ ldr A_l, [src]
+ ldr A_h, [srcend, -8]
+ str A_l, [dstin]
+ str A_h, [dstend, -8]
+ ret
+ .p2align 4
+1:
+ tbz count, 2, 1f
+ ldr A_lw, [src]
+ ldr A_hw, [srcend, -4]
+ str A_lw, [dstin]
+ str A_hw, [dstend, -4]
+ ret
+
+ /* Copy 0..3 bytes. Use a branchless sequence that copies the same
+ byte 3 times if count==1, or the 2nd byte twice if count==2. */
+1:
+ cbz count, 2f
+ lsr tmp1, count, 1
+ ldrb A_lw, [src]
+ ldrb A_hw, [srcend, -1]
+ ldrb B_lw, [src, tmp1]
+ strb A_lw, [dstin]
+ strb B_lw, [dstin, tmp1]
+ strb A_hw, [dstend, -1]
+2: ret
+
+ .p2align 4
+ /* Copy 64..96 bytes. Copy 64 bytes from the start and
+ 32 bytes from the end. */
+L(copy96):
+ ldp B_l, B_h, [src, 16]
+ ldp C_l, C_h, [src, 32]
+ ldp D_l, D_h, [src, 48]
+ ldp E_l, E_h, [srcend, -32]
+ ldp F_l, F_h, [srcend, -16]
+ stp A_l, A_h, [dstin]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstin, 32]
+ stp D_l, D_h, [dstin, 48]
+ stp E_l, E_h, [dstend, -32]
+ stp F_l, F_h, [dstend, -16]
+ ret
+
+ /* Align DST to 16 byte alignment so that we don't cross cache line
+ boundaries on both loads and stores. There are at least 96 bytes
+ to copy, so copy 16 bytes unaligned and then align. The loop
+ copies 64 bytes per iteration and prefetches one iteration ahead. */
+
+ .p2align 4
+L(copy_long):
+ and tmp1, dstin, 15
+ bic dst, dstin, 15
+ ldp D_l, D_h, [src]
+ sub src, src, tmp1
+ add count, count, tmp1 /* Count is now 16 too large. */
+ ldp A_l, A_h, [src, 16]
+ stp D_l, D_h, [dstin]
+ ldp B_l, B_h, [src, 32]
+ ldp C_l, C_h, [src, 48]
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 128 + 16 /* Test and readjust count. */
+ b.ls 2f
+1:
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [src, 16]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [src, 32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [src, 48]
+ stp D_l, D_h, [dst, 64]!
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 64
+ b.hi 1b
+
+ /* Write the last full set of 64 bytes. The remainder is at most 64
+ bytes, so it is safe to always copy 64 bytes from the end even if
+ there is just 1 byte left. */
+2:
+ ldp E_l, E_h, [srcend, -64]
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [srcend, -48]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [srcend, -32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [srcend, -16]
+ stp D_l, D_h, [dst, 64]
+ stp E_l, E_h, [dstend, -64]
+ stp A_l, A_h, [dstend, -48]
+ stp B_l, B_h, [dstend, -32]
+ stp C_l, C_h, [dstend, -16]
+ ret
+END(memcpy)
diff --git a/sys/arm64/arm64/memmove.S b/sys/arm64/arm64/memmove.S
new file mode 100644
index 000000000000..4b99dccc536e
--- /dev/null
+++ b/sys/arm64/arm64/memmove.S
@@ -0,0 +1,150 @@
+/* Copyright (c) 2013, Linaro Limited
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the Linaro nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+/*
+ * Copyright (c) 2015 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses
+ */
+
+/* Parameters and result. */
+#define dstin x0
+#define src x1
+#define count x2
+#define srcend x3
+#define dstend x4
+#define tmp1 x5
+#define A_l x6
+#define A_h x7
+#define B_l x8
+#define B_h x9
+#define C_l x10
+#define C_h x11
+#define D_l x12
+#define D_h x13
+#define E_l count
+#define E_h tmp1
+
+/* All memmoves up to 96 bytes are done by memcpy as it supports overlaps.
+ Larger backwards copies are also handled by memcpy. The only remaining
+ case is forward large copies. The destination is aligned, and an
+ unrolled loop processes 64 bytes per iteration.
+*/
+
+ENTRY(bcopy)
+ /* Switch the input pointers when called as bcopy */
+ mov x3, x1
+ mov x1, x0
+ mov x0, x3
+EENTRY(memmove)
+ sub tmp1, dstin, src
+ cmp count, 96
+ ccmp tmp1, count, 2, hi
+ b.hs memcpy
+
+ cbz tmp1, 3f
+ add dstend, dstin, count
+ add srcend, src, count
+
+ /* Align dstend to 16 byte alignment so that we don't cross cache line
+ boundaries on both loads and stores. There are at least 96 bytes
+ to copy, so copy 16 bytes unaligned and then align. The loop
+ copies 64 bytes per iteration and prefetches one iteration ahead. */
+
+ and tmp1, dstend, 15
+ ldp D_l, D_h, [srcend, -16]
+ sub srcend, srcend, tmp1
+ sub count, count, tmp1
+ ldp A_l, A_h, [srcend, -16]
+ stp D_l, D_h, [dstend, -16]
+ ldp B_l, B_h, [srcend, -32]
+ ldp C_l, C_h, [srcend, -48]
+ ldp D_l, D_h, [srcend, -64]!
+ sub dstend, dstend, tmp1
+ subs count, count, 128
+ b.ls 2f
+ nop
+1:
+ stp A_l, A_h, [dstend, -16]
+ ldp A_l, A_h, [srcend, -16]
+ stp B_l, B_h, [dstend, -32]
+ ldp B_l, B_h, [srcend, -32]
+ stp C_l, C_h, [dstend, -48]
+ ldp C_l, C_h, [srcend, -48]
+ stp D_l, D_h, [dstend, -64]!
+ ldp D_l, D_h, [srcend, -64]!
+ subs count, count, 64
+ b.hi 1b
+
+ /* Write the last full set of 64 bytes. The remainder is at most 64
+ bytes, so it is safe to always copy 64 bytes from the start even if
+ there is just 1 byte left. */
+2:
+ ldp E_l, E_h, [src, 48]
+ stp A_l, A_h, [dstend, -16]
+ ldp A_l, A_h, [src, 32]
+ stp B_l, B_h, [dstend, -32]
+ ldp B_l, B_h, [src, 16]
+ stp C_l, C_h, [dstend, -48]
+ ldp C_l, C_h, [src]
+ stp D_l, D_h, [dstend, -64]
+ stp E_l, E_h, [dstin, 48]
+ stp A_l, A_h, [dstin, 32]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstin]
+3: ret
+EEND(memmove)
+END(bcopy)
diff --git a/sys/arm64/arm64/minidump_machdep.c b/sys/arm64/arm64/minidump_machdep.c
new file mode 100644
index 000000000000..ba22f7dfc16f
--- /dev/null
+++ b/sys/arm64/arm64/minidump_machdep.c
@@ -0,0 +1,448 @@
+/*-
+ * Copyright (c) 2006 Peter Wemm
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_watchdog.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/cons.h>
+#include <sys/kernel.h>
+#include <sys/kerneldump.h>
+#include <sys/msgbuf.h>
+#include <sys/watchdog.h>
+#include <sys/vmmeter.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_page.h>
+#include <vm/vm_phys.h>
+#include <vm/pmap.h>
+
+#include <machine/md_var.h>
+#include <machine/pte.h>
+#include <machine/minidump.h>
+
+CTASSERT(sizeof(struct kerneldumpheader) == 512);
+
+uint64_t *vm_page_dump;
+int vm_page_dump_size;
+
+static struct kerneldumpheader kdh;
+
+/* Handle chunked writes. */
+static size_t fragsz;
+static void *dump_va;
+static size_t counter, progress, dumpsize;
+
+static uint64_t tmpbuffer[Ln_ENTRIES];
+
+CTASSERT(sizeof(*vm_page_dump) == 8);
+
+static int
+is_dumpable(vm_paddr_t pa)
+{
+ vm_page_t m;
+ int i;
+
+ if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
+ return ((m->flags & PG_NODUMP) == 0);
+ for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
+ if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
+ return (1);
+ }
+ return (0);
+}
+
+static int
+blk_flush(struct dumperinfo *di)
+{
+ int error;
+
+ if (fragsz == 0)
+ return (0);
+
+ error = dump_append(di, dump_va, 0, fragsz);
+ fragsz = 0;
+ return (error);
+}
+
+static struct {
+ int min_per;
+ int max_per;
+ int visited;
+} progress_track[10] = {
+ { 0, 10, 0},
+ { 10, 20, 0},
+ { 20, 30, 0},
+ { 30, 40, 0},
+ { 40, 50, 0},
+ { 50, 60, 0},
+ { 60, 70, 0},
+ { 70, 80, 0},
+ { 80, 90, 0},
+ { 90, 100, 0}
+};
+
+static void
+report_progress(size_t progress, size_t dumpsize)
+{
+ int sofar, i;
+
+ sofar = 100 - ((progress * 100) / dumpsize);
+ for (i = 0; i < nitems(progress_track); i++) {
+ if (sofar < progress_track[i].min_per ||
+ sofar > progress_track[i].max_per)
+ continue;
+ if (progress_track[i].visited)
+ return;
+ progress_track[i].visited = 1;
+ printf("..%d%%", sofar);
+ return;
+ }
+}
+
+static int
+blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
+{
+ size_t len;
+ int error, c;
+ u_int maxdumpsz;
+
+ maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
+ if (maxdumpsz == 0) /* seatbelt */
+ maxdumpsz = PAGE_SIZE;
+ error = 0;
+ if ((sz % PAGE_SIZE) != 0) {
+ printf("size not page aligned\n");
+ return (EINVAL);
+ }
+ if (ptr != NULL && pa != 0) {
+ printf("cant have both va and pa!\n");
+ return (EINVAL);
+ }
+ if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
+ printf("address not page aligned %p\n", ptr);
+ return (EINVAL);
+ }
+ if (ptr != NULL) {
+ /*
+ * If we're doing a virtual dump, flush any
+ * pre-existing pa pages.
+ */
+ error = blk_flush(di);
+ if (error)
+ return (error);
+ }
+ while (sz) {
+ len = maxdumpsz - fragsz;
+ if (len > sz)
+ len = sz;
+ counter += len;
+ progress -= len;
+ if (counter >> 22) {
+ report_progress(progress, dumpsize);
+ counter &= (1 << 22) - 1;
+ }
+
+ wdog_kern_pat(WD_LASTVAL);
+
+ if (ptr) {
+ error = dump_append(di, ptr, 0, len);
+ if (error)
+ return (error);
+ ptr += len;
+ sz -= len;
+ } else {
+ dump_va = (void *)PHYS_TO_DMAP(pa);
+ fragsz += len;
+ pa += len;
+ sz -= len;
+ error = blk_flush(di);
+ if (error)
+ return (error);
+ }
+
+ /* Check for user abort. */
+ c = cncheckc();
+ if (c == 0x03)
+ return (ECANCELED);
+ if (c != -1)
+ printf(" (CTRL-C to abort) ");
+ }
+
+ return (0);
+}
+
+int
+minidumpsys(struct dumperinfo *di)
+{
+ struct minidumphdr mdhdr;
+ pd_entry_t *l0, *l1, *l2;
+ pt_entry_t *l3;
+ vm_offset_t va;
+ vm_paddr_t pa;
+ uint64_t bits;
+ uint32_t pmapsize;
+ int bit, error, i, j, retry_count;
+
+ retry_count = 0;
+ retry:
+ retry_count++;
+ error = 0;
+ pmapsize = 0;
+ for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
+ pmapsize += PAGE_SIZE;
+ if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3))
+ continue;
+
+ if ((*l1 & ATTR_DESCR_MASK) == L1_BLOCK) {
+ pa = *l1 & ~ATTR_MASK;
+ for (i = 0; i < Ln_ENTRIES * Ln_ENTRIES;
+ i++, pa += PAGE_SIZE)
+ if (is_dumpable(pa))
+ dump_add_page(pa);
+ pmapsize += (Ln_ENTRIES - 1) * PAGE_SIZE;
+ va += L1_SIZE - L2_SIZE;
+ } else if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
+ pa = *l2 & ~ATTR_MASK;
+ for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
+ if (is_dumpable(pa))
+ dump_add_page(pa);
+ }
+ } else if ((*l2 & ATTR_DESCR_MASK) == L2_TABLE) {
+ for (i = 0; i < Ln_ENTRIES; i++) {
+ if ((l3[i] & ATTR_DESCR_MASK) != L3_PAGE)
+ continue;
+ pa = l3[i] & ~ATTR_MASK;
+ if (is_dumpable(pa))
+ dump_add_page(pa);
+ }
+ }
+ }
+
+ /* Calculate dump size. */
+ dumpsize = pmapsize;
+ dumpsize += round_page(msgbufp->msg_size);
+ dumpsize += round_page(vm_page_dump_size);
+ for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
+ bits = vm_page_dump[i];
+ while (bits) {
+ bit = ffsl(bits) - 1;
+ pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
+ bit) * PAGE_SIZE;
+ /* Clear out undumpable pages now if needed */
+ if (is_dumpable(pa))
+ dumpsize += PAGE_SIZE;
+ else
+ dump_drop_page(pa);
+ bits &= ~(1ul << bit);
+ }
+ }
+ dumpsize += PAGE_SIZE;
+
+ progress = dumpsize;
+
+ /* Initialize mdhdr */
+ bzero(&mdhdr, sizeof(mdhdr));
+ strcpy(mdhdr.magic, MINIDUMP_MAGIC);
+ mdhdr.version = MINIDUMP_VERSION;
+ mdhdr.msgbufsize = msgbufp->msg_size;
+ mdhdr.bitmapsize = vm_page_dump_size;
+ mdhdr.pmapsize = pmapsize;
+ mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
+ mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
+ mdhdr.dmapbase = DMAP_MIN_ADDRESS;
+ mdhdr.dmapend = DMAP_MAX_ADDRESS;
+
+ dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION,
+ dumpsize);
+
+ error = dump_start(di, &kdh);
+ if (error != 0)
+ goto fail;
+
+ printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
+ ptoa((uintmax_t)physmem) / 1048576);
+
+ /* Dump my header */
+ bzero(&tmpbuffer, sizeof(tmpbuffer));
+ bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
+ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
+ if (error)
+ goto fail;
+
+ /* Dump msgbuf up front */
+ error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
+ round_page(msgbufp->msg_size));
+ if (error)
+ goto fail;
+
+ /* Dump bitmap */
+ error = blk_write(di, (char *)vm_page_dump, 0,
+ round_page(vm_page_dump_size));
+ if (error)
+ goto fail;
+
+ /* Dump kernel page directory pages */
+ bzero(&tmpbuffer, sizeof(tmpbuffer));
+ for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
+ if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) {
+ /* We always write a page, even if it is zero */
+ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
+ if (error)
+ goto fail;
+ /* flush, in case we reuse tmpbuffer in the same block*/
+ error = blk_flush(di);
+ if (error)
+ goto fail;
+ } else if ((*l1 & ATTR_DESCR_MASK) == L1_BLOCK) {
+ /*
+ * Handle a 1GB block mapping: write out 512 fake L2
+ * pages.
+ */
+ pa = (*l1 & ~ATTR_MASK) | (va & L1_OFFSET);
+
+ for (i = 0; i < Ln_ENTRIES; i++) {
+ for (j = 0; j < Ln_ENTRIES; j++) {
+ tmpbuffer[j] = pa + i * L2_SIZE +
+ j * PAGE_SIZE | ATTR_DEFAULT |
+ L3_PAGE;
+ }
+ error = blk_write(di, (char *)&tmpbuffer, 0,
+ PAGE_SIZE);
+ if (error)
+ goto fail;
+ }
+ /* flush, in case we reuse tmpbuffer in the same block*/
+ error = blk_flush(di);
+ if (error)
+ goto fail;
+ bzero(&tmpbuffer, sizeof(tmpbuffer));
+ va += L1_SIZE - L2_SIZE;
+ } else if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
+ pa = (*l2 & ~ATTR_MASK) | (va & L2_OFFSET);
+
+ /* Generate fake l3 entries based upon the l1 entry */
+ for (i = 0; i < Ln_ENTRIES; i++) {
+ tmpbuffer[i] = pa + (i * PAGE_SIZE) |
+ ATTR_DEFAULT | L3_PAGE;
+ }
+ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
+ if (error)
+ goto fail;
+ /* flush, in case we reuse fakepd in the same block */
+ error = blk_flush(di);
+ if (error)
+ goto fail;
+ bzero(&tmpbuffer, sizeof(tmpbuffer));
+ continue;
+ } else {
+ pa = *l2 & ~ATTR_MASK;
+
+ error = blk_write(di, NULL, pa, PAGE_SIZE);
+ if (error)
+ goto fail;
+ }
+ }
+
+ /* Dump memory chunks */
+ for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
+ bits = vm_page_dump[i];
+ while (bits) {
+ bit = ffsl(bits) - 1;
+ pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
+ bit) * PAGE_SIZE;
+ error = blk_write(di, 0, pa, PAGE_SIZE);
+ if (error)
+ goto fail;
+ bits &= ~(1ul << bit);
+ }
+ }
+
+ error = blk_flush(di);
+ if (error)
+ goto fail;
+
+ error = dump_finish(di, &kdh);
+ if (error != 0)
+ goto fail;
+
+ printf("\nDump complete\n");
+ return (0);
+
+fail:
+ if (error < 0)
+ error = -error;
+
+ printf("\n");
+ if (error == ENOSPC) {
+ printf("Dump map grown while dumping. ");
+ if (retry_count < 5) {
+ printf("Retrying...\n");
+ goto retry;
+ }
+ printf("Dump failed.\n");
+ }
+ else if (error == ECANCELED)
+ printf("Dump aborted\n");
+ else if (error == E2BIG) {
+ printf("Dump failed. Partition too small (about %lluMB were "
+ "needed this time).\n", (long long)dumpsize >> 20);
+ } else
+ printf("** DUMP FAILED (ERROR %d) **\n", error);
+ return (error);
+}
+
+void
+dump_add_page(vm_paddr_t pa)
+{
+ int idx, bit;
+
+ pa >>= PAGE_SHIFT;
+ idx = pa >> 6; /* 2^6 = 64 */
+ bit = pa & 63;
+ atomic_set_long(&vm_page_dump[idx], 1ul << bit);
+}
+
+void
+dump_drop_page(vm_paddr_t pa)
+{
+ int idx, bit;
+
+ pa >>= PAGE_SHIFT;
+ idx = pa >> 6; /* 2^6 = 64 */
+ bit = pa & 63;
+ atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
+}
diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c
new file mode 100644
index 000000000000..8c8ceafe18e9
--- /dev/null
+++ b/sys/arm64/arm64/mp_machdep.c
@@ -0,0 +1,896 @@
+/*-
+ * Copyright (c) 2015-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_acpi.h"
+#include "opt_ddb.h"
+#include "opt_kstack_pages.h"
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/csan.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+
+#include <machine/machdep.h>
+#include <machine/debug_monitor.h>
+#include <machine/intr.h>
+#include <machine/smp.h>
+#ifdef VFP
+#include <machine/vfp.h>
+#endif
+
+#ifdef DEV_ACPI
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+#endif
+
+#ifdef FDT
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_cpu.h>
+#endif
+
+#include <dev/psci/psci.h>
+
+#include "pic_if.h"
+
+#define MP_QUIRK_CPULIST 0x01 /* The list of cpus may be wrong, */
+ /* don't panic if one fails to start */
+static uint32_t mp_quirks;
+
+#ifdef FDT
+static struct {
+ const char *compat;
+ uint32_t quirks;
+} fdt_quirks[] = {
+ { "arm,foundation-aarch64", MP_QUIRK_CPULIST },
+ { "arm,fvp-base", MP_QUIRK_CPULIST },
+ /* This is incorrect in some DTS files */
+ { "arm,vfp-base", MP_QUIRK_CPULIST },
+ { NULL, 0 },
+};
+#endif
+
+typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
+typedef void intr_ipi_handler_t(void *);
+
+#define INTR_IPI_NAMELEN (MAXCOMLEN + 1)
+struct intr_ipi {
+ intr_ipi_handler_t * ii_handler;
+ void * ii_handler_arg;
+ intr_ipi_send_t * ii_send;
+ void * ii_send_arg;
+ char ii_name[INTR_IPI_NAMELEN];
+ u_long * ii_count;
+};
+
+static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
+
+static struct intr_ipi *intr_ipi_lookup(u_int);
+static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
+ void *);
+
+static void ipi_ast(void *);
+static void ipi_hardclock(void *);
+static void ipi_preempt(void *);
+static void ipi_rendezvous(void *);
+static void ipi_stop(void *);
+
+struct pcb stoppcbs[MAXCPU];
+
+/*
+ * Not all systems boot from the first CPU in the device tree. To work around
+ * this we need to find which CPU we have booted from so when we later
+ * enable the secondary CPUs we skip this one.
+ */
+static int cpu0 = -1;
+
+void mpentry(unsigned long cpuid);
+void init_secondary(uint64_t);
+
+/* Synchronize AP startup. */
+static struct mtx ap_boot_mtx;
+
+/* Stacks for AP initialization, discarded once idle threads are started. */
+void *bootstack;
+static void *bootstacks[MAXCPU];
+
+/* Count of started APs, used to synchronize access to bootstack. */
+static volatile int aps_started;
+
+/* Set to 1 once we're ready to let the APs out of the pen. */
+static volatile int aps_ready;
+
+/* Temporary variables for init_secondary() */
+void *dpcpu[MAXCPU - 1];
+
+static void
+release_aps(void *dummy __unused)
+{
+ int i, started;
+
+ /* Only release CPUs if they exist */
+ if (mp_ncpus == 1)
+ return;
+
+ intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
+ intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
+ intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
+ intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
+ intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
+ intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
+
+ atomic_store_rel_int(&aps_ready, 1);
+ /* Wake up the other CPUs */
+ __asm __volatile(
+ "dsb ishst \n"
+ "sev \n"
+ ::: "memory");
+
+ printf("Release APs...");
+
+ started = 0;
+ for (i = 0; i < 2000; i++) {
+ if (smp_started) {
+ printf("done\n");
+ return;
+ }
+ /*
+ * Don't time out while we are making progress. Some large
+ * systems can take a while to start all CPUs.
+ */
+ if (smp_cpus > started) {
+ i = 0;
+ started = smp_cpus;
+ }
+ DELAY(1000);
+ }
+
+ printf("APs not started\n");
+}
+SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
+
+void
+init_secondary(uint64_t cpu)
+{
+ struct pcpu *pcpup;
+ pmap_t pmap0;
+
+ pcpup = &__pcpu[cpu];
+ /*
+ * Set the pcpu pointer with a backup in tpidr_el1 to be
+ * loaded when entering the kernel from userland.
+ */
+ __asm __volatile(
+ "mov x18, %0 \n"
+ "msr tpidr_el1, %0" :: "r"(pcpup));
+
+ /*
+ * Identify current CPU. This is necessary to setup
+ * affinity registers and to provide support for
+ * runtime chip identification.
+ *
+ * We need this before signalling the CPU is ready to
+ * let the boot CPU use the results.
+ */
+ identify_cpu(cpu);
+
+ /* Ensure the stores in identify_cpu have completed */
+ atomic_thread_fence_acq_rel();
+
+ /* Signal the BSP and spin until it has released all APs. */
+ atomic_add_int(&aps_started, 1);
+ while (!atomic_load_int(&aps_ready))
+ __asm __volatile("wfe");
+
+ pcpup->pc_midr = get_midr();
+
+ /* Initialize curthread */
+ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
+ pcpup->pc_curthread = pcpup->pc_idlethread;
+
+ /* Initialize curpmap to match TTBR0's current setting. */
+ pmap0 = vmspace_pmap(&vmspace0);
+ KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
+ ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
+ pcpup->pc_curpmap = pmap0;
+
+ install_cpu_errata();
+
+ intr_pic_init_secondary();
+
+ /* Start per-CPU event timers. */
+ cpu_initclocks_ap();
+
+#ifdef VFP
+ vfp_init();
+#endif
+
+ dbg_init();
+ pan_enable();
+
+ mtx_lock_spin(&ap_boot_mtx);
+ atomic_add_rel_32(&smp_cpus, 1);
+ if (smp_cpus == mp_ncpus) {
+ /* enable IPI's, tlb shootdown, freezes etc */
+ atomic_store_rel_int(&smp_started, 1);
+ }
+ mtx_unlock_spin(&ap_boot_mtx);
+
+ kcsan_cpu_init(cpu);
+
+ /*
+ * Assert that smp_after_idle_runnable condition is reasonable.
+ */
+ MPASS(PCPU_GET(curpcb) == NULL);
+
+ /* Enter the scheduler */
+ sched_throw(NULL);
+
+ panic("scheduler returned us to init_secondary");
+ /* NOTREACHED */
+}
+
+static void
+smp_after_idle_runnable(void *arg __unused)
+{
+ struct pcpu *pc;
+ int cpu;
+
+ for (cpu = 1; cpu < mp_ncpus; cpu++) {
+ if (bootstacks[cpu] != NULL) {
+ pc = pcpu_find(cpu);
+ while (atomic_load_ptr(&pc->pc_curpcb) == NULL)
+ cpu_spinwait();
+ kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
+ }
+ }
+}
+SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
+ smp_after_idle_runnable, NULL);
+
+/*
+ * Send IPI thru interrupt controller.
+ */
+static void
+pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
+{
+
+ KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
+
+ /*
+ * Ensure that this CPU's stores will be visible to IPI
+ * recipients before starting to send the interrupts.
+ */
+ dsb(ishst);
+
+ PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
+}
+
+/*
+ * Setup IPI handler on interrupt controller.
+ *
+ * Not SMP coherent.
+ */
+static void
+intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
+ void *arg)
+{
+ struct intr_irqsrc *isrc;
+ struct intr_ipi *ii;
+ int error;
+
+ KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
+ KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
+
+ error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
+ if (error != 0)
+ return;
+
+ isrc->isrc_handlers++;
+
+ ii = intr_ipi_lookup(ipi);
+ KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
+
+ ii->ii_handler = hand;
+ ii->ii_handler_arg = arg;
+ ii->ii_send = pic_ipi_send;
+ ii->ii_send_arg = isrc;
+ strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
+ ii->ii_count = intr_ipi_setup_counters(name);
+}
+
+static void
+intr_ipi_send(cpuset_t cpus, u_int ipi)
+{
+ struct intr_ipi *ii;
+
+ ii = intr_ipi_lookup(ipi);
+ if (ii->ii_count == NULL)
+ panic("%s: not setup IPI %u", __func__, ipi);
+
+ ii->ii_send(ii->ii_send_arg, cpus, ipi);
+}
+
+static void
+ipi_ast(void *dummy __unused)
+{
+
+ CTR0(KTR_SMP, "IPI_AST");
+}
+
+static void
+ipi_hardclock(void *dummy __unused)
+{
+
+ CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
+ hardclockintr();
+}
+
+static void
+ipi_preempt(void *dummy __unused)
+{
+ CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
+ sched_preempt(curthread);
+}
+
+static void
+ipi_rendezvous(void *dummy __unused)
+{
+
+ CTR0(KTR_SMP, "IPI_RENDEZVOUS");
+ smp_rendezvous_action();
+}
+
+static void
+ipi_stop(void *dummy __unused)
+{
+ u_int cpu;
+
+ CTR0(KTR_SMP, "IPI_STOP");
+
+ cpu = PCPU_GET(cpuid);
+ savectx(&stoppcbs[cpu]);
+
+ /* Indicate we are stopped */
+ CPU_SET_ATOMIC(cpu, &stopped_cpus);
+
+ /* Wait for restart */
+ while (!CPU_ISSET(cpu, &started_cpus))
+ cpu_spinwait();
+
+#ifdef DDB
+ dbg_register_sync(NULL);
+#endif
+
+ CPU_CLR_ATOMIC(cpu, &started_cpus);
+ CPU_CLR_ATOMIC(cpu, &stopped_cpus);
+ CTR0(KTR_SMP, "IPI_STOP (restart)");
+}
+
+struct cpu_group *
+cpu_topo(void)
+{
+
+ return (smp_topo_none());
+}
+
+/* Determine if we running MP machine */
+int
+cpu_mp_probe(void)
+{
+
+ /* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
+ return (1);
+}
+
+static bool
+start_cpu(u_int id, uint64_t target_cpu)
+{
+ struct pcpu *pcpup;
+ vm_paddr_t pa;
+ u_int cpuid;
+ int err, naps;
+
+ /* Check we are able to start this cpu */
+ if (id > mp_maxid)
+ return (false);
+
+ KASSERT(id < MAXCPU, ("Too many CPUs"));
+
+ /* We are already running on cpu 0 */
+ if (id == cpu0)
+ return (true);
+
+ /*
+ * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other
+ * CPUs ordered as they are likely grouped into clusters so it can be
+ * useful to keep that property, e.g. for the GICv3 driver to send
+ * an IPI to all CPUs in the cluster.
+ */
+ cpuid = id;
+ if (cpuid < cpu0)
+ cpuid += mp_maxid + 1;
+ cpuid -= cpu0;
+
+ pcpup = &__pcpu[cpuid];
+ pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
+
+ dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
+ dpcpu_init(dpcpu[cpuid - 1], cpuid);
+
+ bootstacks[cpuid] = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
+
+ naps = atomic_load_int(&aps_started);
+ bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE;
+
+ printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
+ pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
+ err = psci_cpu_on(target_cpu, pa, cpuid);
+ if (err != PSCI_RETVAL_SUCCESS) {
+ /*
+ * Panic here if INVARIANTS are enabled and PSCI failed to
+ * start the requested CPU. psci_cpu_on() returns PSCI_MISSING
+ * to indicate we are unable to use it to start the given CPU.
+ */
+ KASSERT(err == PSCI_MISSING ||
+ (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
+ ("Failed to start CPU %u (%lx), error %d\n",
+ id, target_cpu, err));
+
+ pcpu_destroy(pcpup);
+ kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
+ dpcpu[cpuid - 1] = NULL;
+ kmem_free((vm_offset_t)bootstacks[cpuid], PAGE_SIZE);
+ bootstacks[cpuid] = NULL;
+ mp_ncpus--;
+
+ /* Notify the user that the CPU failed to start */
+ printf("Failed to start CPU %u (%lx), error %d\n",
+ id, target_cpu, err);
+ } else {
+ /* Wait for the AP to switch to its boot stack. */
+ while (atomic_load_int(&aps_started) < naps + 1)
+ cpu_spinwait();
+ CPU_SET(cpuid, &all_cpus);
+ }
+
+ return (true);
+}
+
+#ifdef DEV_ACPI
+static void
+madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
+{
+ ACPI_MADT_GENERIC_INTERRUPT *intr;
+ u_int *cpuid;
+ u_int id;
+
+ switch(entry->Type) {
+ case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
+ intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
+ cpuid = arg;
+ id = *cpuid;
+ start_cpu(id, intr->ArmMpidr);
+ __pcpu[id].pc_acpi_id = intr->Uid;
+ (*cpuid)++;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+cpu_init_acpi(void)
+{
+ ACPI_TABLE_MADT *madt;
+ vm_paddr_t physaddr;
+ u_int cpuid;
+
+ physaddr = acpi_find_table(ACPI_SIG_MADT);
+ if (physaddr == 0)
+ return;
+
+ madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
+ if (madt == NULL) {
+ printf("Unable to map the MADT, not starting APs\n");
+ return;
+ }
+
+ cpuid = 0;
+ acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
+ madt_handler, &cpuid);
+
+ acpi_unmap_table(madt);
+
+#if MAXMEMDOM > 1
+ acpi_pxm_set_cpu_locality();
+#endif
+}
+#endif
+
+#ifdef FDT
+static boolean_t
+cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
+{
+ uint64_t target_cpu;
+ int domain;
+
+ target_cpu = reg[0];
+ if (addr_size == 2) {
+ target_cpu <<= 32;
+ target_cpu |= reg[1];
+ }
+
+ if (!start_cpu(id, target_cpu))
+ return (FALSE);
+
+ /* Try to read the numa node of this cpu */
+ if (vm_ndomains == 1 ||
+ OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
+ domain = 0;
+ __pcpu[id].pc_domain = domain;
+ if (domain < MAXMEMDOM)
+ CPU_SET(id, &cpuset_domain[domain]);
+
+ return (TRUE);
+}
+#endif
+
+/* Initialize and fire up non-boot processors */
+void
+cpu_mp_start(void)
+{
+#ifdef FDT
+ phandle_t node;
+ int i;
+#endif
+
+ mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
+
+ CPU_SET(0, &all_cpus);
+
+ switch(arm64_bus_method) {
+#ifdef DEV_ACPI
+ case ARM64_BUS_ACPI:
+ mp_quirks = MP_QUIRK_CPULIST;
+ KASSERT(cpu0 >= 0, ("Current CPU was not found"));
+ cpu_init_acpi();
+ break;
+#endif
+#ifdef FDT
+ case ARM64_BUS_FDT:
+ node = OF_peer(0);
+ for (i = 0; fdt_quirks[i].compat != NULL; i++) {
+ if (ofw_bus_node_is_compatible(node,
+ fdt_quirks[i].compat) != 0) {
+ mp_quirks = fdt_quirks[i].quirks;
+ }
+ }
+ KASSERT(cpu0 >= 0, ("Current CPU was not found"));
+ ofw_cpu_early_foreach(cpu_init_fdt, true);
+ break;
+#endif
+ default:
+ break;
+ }
+}
+
+/* Introduce rest of cores to the world */
+void
+cpu_mp_announce(void)
+{
+}
+
+#ifdef DEV_ACPI
+static void
+cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
+{
+ ACPI_MADT_GENERIC_INTERRUPT *intr;
+ u_int *cores = arg;
+ uint64_t mpidr_reg;
+
+ switch(entry->Type) {
+ case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
+ intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
+ if (cpu0 < 0) {
+ mpidr_reg = READ_SPECIALREG(mpidr_el1);
+ if ((mpidr_reg & 0xff00fffffful) == intr->ArmMpidr)
+ cpu0 = *cores;
+ }
+ (*cores)++;
+ break;
+ default:
+ break;
+ }
+}
+
+static u_int
+cpu_count_acpi(void)
+{
+ ACPI_TABLE_MADT *madt;
+ vm_paddr_t physaddr;
+ u_int cores;
+
+ physaddr = acpi_find_table(ACPI_SIG_MADT);
+ if (physaddr == 0)
+ return (0);
+
+ madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
+ if (madt == NULL) {
+ printf("Unable to map the MADT, not starting APs\n");
+ return (0);
+ }
+
+ cores = 0;
+ acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
+ cpu_count_acpi_handler, &cores);
+
+ acpi_unmap_table(madt);
+
+ return (cores);
+}
+#endif
+
+#ifdef FDT
+static boolean_t
+cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
+{
+ uint64_t mpidr_fdt, mpidr_reg;
+
+ if (cpu0 < 0) {
+ mpidr_fdt = reg[0];
+ if (addr_size == 2) {
+ mpidr_fdt <<= 32;
+ mpidr_fdt |= reg[1];
+ }
+
+ mpidr_reg = READ_SPECIALREG(mpidr_el1);
+
+ if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt)
+ cpu0 = id;
+ }
+
+ return (TRUE);
+}
+#endif
+
+void
+cpu_mp_setmaxid(void)
+{
+ int cores;
+
+ mp_ncpus = 1;
+ mp_maxid = 0;
+
+ switch(arm64_bus_method) {
+#ifdef DEV_ACPI
+ case ARM64_BUS_ACPI:
+ cores = cpu_count_acpi();
+ if (cores > 0) {
+ cores = MIN(cores, MAXCPU);
+ if (bootverbose)
+ printf("Found %d CPUs in the ACPI tables\n",
+ cores);
+ mp_ncpus = cores;
+ mp_maxid = cores - 1;
+ }
+ break;
+#endif
+#ifdef FDT
+ case ARM64_BUS_FDT:
+ cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false);
+ if (cores > 0) {
+ cores = MIN(cores, MAXCPU);
+ if (bootverbose)
+ printf("Found %d CPUs in the device tree\n",
+ cores);
+ mp_ncpus = cores;
+ mp_maxid = cores - 1;
+ }
+ break;
+#endif
+ default:
+ if (bootverbose)
+ printf("No CPU data, limiting to 1 core\n");
+ break;
+ }
+
+ if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
+ if (cores > 0 && cores < mp_ncpus) {
+ mp_ncpus = cores;
+ mp_maxid = cores - 1;
+ }
+ }
+}
+
+/*
+ * Lookup IPI source.
+ */
+static struct intr_ipi *
+intr_ipi_lookup(u_int ipi)
+{
+
+ if (ipi >= INTR_IPI_COUNT)
+ panic("%s: no such IPI %u", __func__, ipi);
+
+ return (&ipi_sources[ipi]);
+}
+
+/*
+ * interrupt controller dispatch function for IPIs. It should
+ * be called straight from the interrupt controller, when associated
+ * interrupt source is learned. Or from anybody who has an interrupt
+ * source mapped.
+ */
+void
+intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
+{
+ void *arg;
+ struct intr_ipi *ii;
+
+ ii = intr_ipi_lookup(ipi);
+ if (ii->ii_count == NULL)
+ panic("%s: not setup IPI %u", __func__, ipi);
+
+ intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
+
+ /*
+ * Supply ipi filter with trapframe argument
+ * if none is registered.
+ */
+ arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
+ ii->ii_handler(arg);
+}
+
+#ifdef notyet
+/*
+ * Map IPI into interrupt controller.
+ *
+ * Not SMP coherent.
+ */
+static int
+ipi_map(struct intr_irqsrc *isrc, u_int ipi)
+{
+ boolean_t is_percpu;
+ int error;
+
+ if (ipi >= INTR_IPI_COUNT)
+ panic("%s: no such IPI %u", __func__, ipi);
+
+ KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
+
+ isrc->isrc_type = INTR_ISRCT_NAMESPACE;
+ isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
+ isrc->isrc_nspc_num = ipi_next_num;
+
+ error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
+ if (error == 0) {
+ isrc->isrc_dev = intr_irq_root_dev;
+ ipi_next_num++;
+ }
+ return (error);
+}
+
+/*
+ * Setup IPI handler to interrupt source.
+ *
+ * Note that there could be more ways how to send and receive IPIs
+ * on a platform like fast interrupts for example. In that case,
+ * one can call this function with ASIF_NOALLOC flag set and then
+ * call intr_ipi_dispatch() when appropriate.
+ *
+ * Not SMP coherent.
+ */
+int
+intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
+ void *arg, u_int flags)
+{
+ struct intr_irqsrc *isrc;
+ int error;
+
+ if (filter == NULL)
+ return(EINVAL);
+
+ isrc = intr_ipi_lookup(ipi);
+ if (isrc->isrc_ipifilter != NULL)
+ return (EEXIST);
+
+ if ((flags & AISHF_NOALLOC) == 0) {
+ error = ipi_map(isrc, ipi);
+ if (error != 0)
+ return (error);
+ }
+
+ isrc->isrc_ipifilter = filter;
+ isrc->isrc_arg = arg;
+ isrc->isrc_handlers = 1;
+ isrc->isrc_count = intr_ipi_setup_counters(name);
+ isrc->isrc_index = 0; /* it should not be used in IPI case */
+
+ if (isrc->isrc_dev != NULL) {
+ PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
+ PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
+ }
+ return (0);
+}
+#endif
+
+/* Sending IPI */
+void
+ipi_all_but_self(u_int ipi)
+{
+ cpuset_t cpus;
+
+ cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &cpus);
+ CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
+ intr_ipi_send(cpus, ipi);
+}
+
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+ cpuset_t cpus;
+
+ CPU_ZERO(&cpus);
+ CPU_SET(cpu, &cpus);
+
+ CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
+ intr_ipi_send(cpus, ipi);
+}
+
+void
+ipi_selected(cpuset_t cpus, u_int ipi)
+{
+
+ CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
+ intr_ipi_send(cpus, ipi);
+}
diff --git a/sys/arm64/arm64/nexus.c b/sys/arm64/arm64/nexus.c
new file mode 100644
index 000000000000..924496ec7f52
--- /dev/null
+++ b/sys/arm64/arm64/nexus.c
@@ -0,0 +1,549 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This code implements a `root nexus' for Arm Architecture
+ * machines. The function of the root nexus is to serve as an
+ * attachment point for both processors and buses, and to manage
+ * resources which are common to all of them. In particular,
+ * this code implements the core resource managers for interrupt
+ * requests, DMA requests (which rightfully should be a part of the
+ * ISA code but it's easier to do it here for now), I/O port addresses,
+ * and I/O memory address space.
+ */
+
+#include "opt_acpi.h"
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <sys/interrupt.h>
+
+#include <machine/machdep.h>
+#include <machine/vmparam.h>
+#include <machine/pcb.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/resource.h>
+#include <machine/intr.h>
+
+#ifdef FDT
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/openfirm.h>
+#include "ofw_bus_if.h"
+#endif
+#ifdef DEV_ACPI
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+#include "acpi_bus_if.h"
+#include "pcib_if.h"
+#endif
+
+extern struct bus_space memmap_bus;
+
+static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device");
+
+struct nexus_device {
+ struct resource_list nx_resources;
+};
+
+#define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev))
+
+static struct rman mem_rman;
+static struct rman irq_rman;
+
+static int nexus_attach(device_t);
+
+#ifdef FDT
+static device_probe_t nexus_fdt_probe;
+static device_attach_t nexus_fdt_attach;
+#endif
+#ifdef DEV_ACPI
+static device_probe_t nexus_acpi_probe;
+static device_attach_t nexus_acpi_attach;
+#endif
+
+static int nexus_print_child(device_t, device_t);
+static device_t nexus_add_child(device_t, u_int, const char *, int);
+static struct resource *nexus_alloc_resource(device_t, device_t, int, int *,
+ rman_res_t, rman_res_t, rman_res_t, u_int);
+static int nexus_activate_resource(device_t, device_t, int, int,
+ struct resource *);
+static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig,
+ enum intr_polarity pol);
+static struct resource_list *nexus_get_reslist(device_t, device_t);
+static int nexus_set_resource(device_t, device_t, int, int,
+ rman_res_t, rman_res_t);
+static int nexus_deactivate_resource(device_t, device_t, int, int,
+ struct resource *);
+static int nexus_release_resource(device_t, device_t, int, int,
+ struct resource *);
+
+static int nexus_setup_intr(device_t dev, device_t child, struct resource *res,
+ int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep);
+static int nexus_teardown_intr(device_t, device_t, struct resource *, void *);
+static bus_space_tag_t nexus_get_bus_tag(device_t, device_t);
+#ifdef SMP
+static int nexus_bind_intr(device_t, device_t, struct resource *, int);
+#endif
+
+#ifdef FDT
+static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent,
+ int icells, pcell_t *intr);
+#endif
+
+static device_method_t nexus_methods[] = {
+ /* Bus interface */
+ DEVMETHOD(bus_print_child, nexus_print_child),
+ DEVMETHOD(bus_add_child, nexus_add_child),
+ DEVMETHOD(bus_alloc_resource, nexus_alloc_resource),
+ DEVMETHOD(bus_activate_resource, nexus_activate_resource),
+ DEVMETHOD(bus_config_intr, nexus_config_intr),
+ DEVMETHOD(bus_get_resource_list, nexus_get_reslist),
+ DEVMETHOD(bus_set_resource, nexus_set_resource),
+ DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource),
+ DEVMETHOD(bus_release_resource, nexus_release_resource),
+ DEVMETHOD(bus_setup_intr, nexus_setup_intr),
+ DEVMETHOD(bus_teardown_intr, nexus_teardown_intr),
+ DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag),
+#ifdef SMP
+ DEVMETHOD(bus_bind_intr, nexus_bind_intr),
+#endif
+ { 0, 0 }
+};
+
+static driver_t nexus_driver = {
+ "nexus",
+ nexus_methods,
+ 1 /* no softc */
+};
+
+static int
+nexus_attach(device_t dev)
+{
+
+ mem_rman.rm_start = 0;
+ mem_rman.rm_end = BUS_SPACE_MAXADDR;
+ mem_rman.rm_type = RMAN_ARRAY;
+ mem_rman.rm_descr = "I/O memory addresses";
+ if (rman_init(&mem_rman) ||
+ rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR))
+ panic("nexus_attach mem_rman");
+ irq_rman.rm_start = 0;
+ irq_rman.rm_end = ~0;
+ irq_rman.rm_type = RMAN_ARRAY;
+ irq_rman.rm_descr = "Interrupts";
+ if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0))
+ panic("nexus_attach irq_rman");
+
+ bus_generic_probe(dev);
+ bus_generic_attach(dev);
+
+ return (0);
+}
+
+static int
+nexus_print_child(device_t bus, device_t child)
+{
+ int retval = 0;
+
+ retval += bus_print_child_header(bus, child);
+ retval += printf("\n");
+
+ return (retval);
+}
+
+static device_t
+nexus_add_child(device_t bus, u_int order, const char *name, int unit)
+{
+ device_t child;
+ struct nexus_device *ndev;
+
+ ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO);
+ if (!ndev)
+ return (0);
+ resource_list_init(&ndev->nx_resources);
+
+ child = device_add_child_ordered(bus, order, name, unit);
+
+ /* should we free this in nexus_child_detached? */
+ device_set_ivars(child, ndev);
+
+ return (child);
+}
+
+/*
+ * Allocate a resource on behalf of child. NB: child is usually going to be a
+ * child of one of our descendants, not a direct child of nexus0.
+ * (Exceptions include footbridge.)
+ */
+static struct resource *
+nexus_alloc_resource(device_t bus, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+ struct nexus_device *ndev = DEVTONX(child);
+ struct resource *rv;
+ struct resource_list_entry *rle;
+ struct rman *rm;
+ int needactivate = flags & RF_ACTIVE;
+
+ /*
+ * If this is an allocation of the "default" range for a given
+ * RID, and we know what the resources for this device are
+ * (ie. they aren't maintained by a child bus), then work out
+ * the start/end values.
+ */
+ if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) {
+ if (device_get_parent(child) != bus || ndev == NULL)
+ return(NULL);
+ rle = resource_list_find(&ndev->nx_resources, type, *rid);
+ if (rle == NULL)
+ return(NULL);
+ start = rle->start;
+ end = rle->end;
+ count = rle->count;
+ }
+
+ switch (type) {
+ case SYS_RES_IRQ:
+ rm = &irq_rman;
+ break;
+
+ case SYS_RES_MEMORY:
+ case SYS_RES_IOPORT:
+ rm = &mem_rman;
+ break;
+
+ default:
+ return (NULL);
+ }
+
+ rv = rman_reserve_resource(rm, start, end, count, flags, child);
+ if (rv == NULL)
+ return (NULL);
+
+ rman_set_rid(rv, *rid);
+ rman_set_bushandle(rv, rman_get_start(rv));
+
+ if (needactivate) {
+ if (bus_activate_resource(child, type, *rid, rv)) {
+ rman_release_resource(rv);
+ return (NULL);
+ }
+ }
+
+ return (rv);
+}
+
+static int
+nexus_release_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *res)
+{
+ int error;
+
+ if (rman_get_flags(res) & RF_ACTIVE) {
+ error = bus_deactivate_resource(child, type, rid, res);
+ if (error)
+ return (error);
+ }
+ return (rman_release_resource(res));
+}
+
+static int
+nexus_config_intr(device_t dev, int irq, enum intr_trigger trig,
+ enum intr_polarity pol)
+{
+
+ /*
+ * On arm64 (due to INTRNG), ACPI interrupt configuration is
+ * done in nexus_acpi_map_intr().
+ */
+ return (0);
+}
+
+static int
+nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags,
+ driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep)
+{
+ int error;
+
+ if ((rman_get_flags(res) & RF_SHAREABLE) == 0)
+ flags |= INTR_EXCL;
+
+ /* We depend here on rman_activate_resource() being idempotent. */
+ error = rman_activate_resource(res);
+ if (error)
+ return (error);
+
+ error = intr_setup_irq(child, res, filt, intr, arg, flags, cookiep);
+
+ return (error);
+}
+
+static int
+nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih)
+{
+
+ return (intr_teardown_irq(child, r, ih));
+}
+
+#ifdef SMP
+static int
+nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu)
+{
+
+ return (intr_bind_irq(child, irq, cpu));
+}
+#endif
+
+static bus_space_tag_t
+nexus_get_bus_tag(device_t bus __unused, device_t child __unused)
+{
+
+ return(&memmap_bus);
+}
+
+static int
+nexus_activate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ int err;
+ bus_addr_t paddr;
+ bus_size_t psize;
+ bus_space_handle_t vaddr;
+
+ if ((err = rman_activate_resource(r)) != 0)
+ return (err);
+
+ /*
+ * If this is a memory resource, map it into the kernel.
+ */
+ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
+ paddr = (bus_addr_t)rman_get_start(r);
+ psize = (bus_size_t)rman_get_size(r);
+ err = bus_space_map(&memmap_bus, paddr, psize, 0, &vaddr);
+ if (err != 0) {
+ rman_deactivate_resource(r);
+ return (err);
+ }
+ rman_set_bustag(r, &memmap_bus);
+ rman_set_virtual(r, (void *)vaddr);
+ rman_set_bushandle(r, vaddr);
+ } else if (type == SYS_RES_IRQ) {
+ err = intr_activate_irq(child, r);
+ if (err != 0) {
+ rman_deactivate_resource(r);
+ return (err);
+ }
+ }
+ return (0);
+}
+
+static struct resource_list *
+nexus_get_reslist(device_t dev, device_t child)
+{
+ struct nexus_device *ndev = DEVTONX(child);
+
+ return (&ndev->nx_resources);
+}
+
+static int
+nexus_set_resource(device_t dev, device_t child, int type, int rid,
+ rman_res_t start, rman_res_t count)
+{
+ struct nexus_device *ndev = DEVTONX(child);
+ struct resource_list *rl = &ndev->nx_resources;
+
+ /* XXX this should return a success/failure indicator */
+ resource_list_add(rl, type, rid, start, start + count - 1, count);
+
+ return(0);
+}
+
+static int
+nexus_deactivate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ bus_size_t psize;
+ bus_space_handle_t vaddr;
+
+ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
+ psize = (bus_size_t)rman_get_size(r);
+ vaddr = rman_get_bushandle(r);
+
+ if (vaddr != 0) {
+ bus_space_unmap(&memmap_bus, vaddr, psize);
+ rman_set_virtual(r, NULL);
+ rman_set_bushandle(r, 0);
+ }
+ } else if (type == SYS_RES_IRQ) {
+ intr_deactivate_irq(child, r);
+ }
+
+ return (rman_deactivate_resource(r));
+}
+
+#ifdef FDT
+static device_method_t nexus_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nexus_fdt_probe),
+ DEVMETHOD(device_attach, nexus_fdt_attach),
+
+ /* OFW interface */
+ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr),
+
+ DEVMETHOD_END,
+};
+
+#define nexus_baseclasses nexus_fdt_baseclasses
+DEFINE_CLASS_1(nexus, nexus_fdt_driver, nexus_fdt_methods, 1, nexus_driver);
+#undef nexus_baseclasses
+static devclass_t nexus_fdt_devclass;
+
+EARLY_DRIVER_MODULE(nexus_fdt, root, nexus_fdt_driver, nexus_fdt_devclass,
+ 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST);
+
+static int
+nexus_fdt_probe(device_t dev)
+{
+
+ if (arm64_bus_method != ARM64_BUS_FDT)
+ return (ENXIO);
+
+ device_quiet(dev);
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+nexus_fdt_attach(device_t dev)
+{
+
+ nexus_add_child(dev, 10, "ofwbus", 0);
+ return (nexus_attach(dev));
+}
+
+static int
+nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells,
+ pcell_t *intr)
+{
+ u_int irq;
+ struct intr_map_data_fdt *fdt_data;
+ size_t len;
+
+ len = sizeof(*fdt_data) + icells * sizeof(pcell_t);
+ fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data(
+ INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO);
+ fdt_data->iparent = iparent;
+ fdt_data->ncells = icells;
+ memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t));
+ irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data);
+ return (irq);
+}
+#endif
+
+#ifdef DEV_ACPI
+static int nexus_acpi_map_intr(device_t dev, device_t child, u_int irq, int trig, int pol);
+
+static device_method_t nexus_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nexus_acpi_probe),
+ DEVMETHOD(device_attach, nexus_acpi_attach),
+
+ /* ACPI interface */
+ DEVMETHOD(acpi_bus_map_intr, nexus_acpi_map_intr),
+
+ DEVMETHOD_END,
+};
+
+#define nexus_baseclasses nexus_acpi_baseclasses
+DEFINE_CLASS_1(nexus, nexus_acpi_driver, nexus_acpi_methods, 1,
+ nexus_driver);
+#undef nexus_baseclasses
+static devclass_t nexus_acpi_devclass;
+
+EARLY_DRIVER_MODULE(nexus_acpi, root, nexus_acpi_driver, nexus_acpi_devclass,
+ 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST);
+
+static int
+nexus_acpi_probe(device_t dev)
+{
+
+ if (arm64_bus_method != ARM64_BUS_ACPI || acpi_identify() != 0)
+ return (ENXIO);
+
+ device_quiet(dev);
+ return (BUS_PROBE_LOW_PRIORITY);
+}
+
+static int
+nexus_acpi_attach(device_t dev)
+{
+
+ nexus_add_child(dev, 10, "acpi", 0);
+ return (nexus_attach(dev));
+}
+
+static int
+nexus_acpi_map_intr(device_t dev, device_t child, u_int irq, int trig, int pol)
+{
+ struct intr_map_data_acpi *acpi_data;
+ size_t len;
+
+ len = sizeof(*acpi_data);
+ acpi_data = (struct intr_map_data_acpi *)intr_alloc_map_data(
+ INTR_MAP_DATA_ACPI, len, M_WAITOK | M_ZERO);
+ acpi_data->irq = irq;
+ acpi_data->pol = pol;
+ acpi_data->trig = trig;
+
+ /*
+ * TODO: This will only handle a single interrupt controller.
+ * ACPI will map multiple controllers into a single virtual IRQ
+ * space. Each controller has a System Vector Base to hold the
+ * first irq it handles in this space. As such the correct way
+ * to handle interrupts with ACPI is to search through the
+ * controllers for the largest base value that is no larger than
+ * the IRQ value.
+ */
+ irq = intr_map_irq(NULL, ACPI_INTR_XREF,
+ (struct intr_map_data *)acpi_data);
+ return (irq);
+}
+#endif
diff --git a/sys/arm64/arm64/ofw_machdep.c b/sys/arm64/arm64/ofw_machdep.c
new file mode 100644
index 000000000000..3941c1d35617
--- /dev/null
+++ b/sys/arm64/arm64/ofw_machdep.c
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2015 Ian Lepore <ian@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+
+#include <machine/bus.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_subr.h>
+
+extern struct bus_space memmap_bus;
+
+int
+OF_decode_addr(phandle_t dev, int regno, bus_space_tag_t *tag,
+ bus_space_handle_t *handle, bus_size_t *sz)
+{
+ bus_addr_t addr;
+ bus_size_t size;
+ int err;
+
+ err = ofw_reg_to_paddr(dev, regno, &addr, &size, NULL);
+ if (err != 0)
+ return (err);
+
+ *tag = &memmap_bus;
+
+ if (sz != NULL)
+ *sz = size;
+
+ return (bus_space_map(*tag, addr, size, 0, handle));
+}
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
new file mode 100644
index 000000000000..df160cc05012
--- /dev/null
+++ b/sys/arm64/arm64/pmap.c
@@ -0,0 +1,6710 @@
+/*-
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ * Copyright (c) 2003 Peter Wemm
+ * All rights reserved.
+ * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ * Copyright (c) 2014-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * This software was developed by Andrew Turner under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
+ */
+/*-
+ * Copyright (c) 2003 Networks Associates Technology, Inc.
+ * All rights reserved.
+ *
+ * This software was developed for the FreeBSD Project by Jake Burkholder,
+ * Safeport Network Services, and Network Associates Laboratories, the
+ * Security Research Division of Network Associates, Inc. under
+ * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
+ * CHATS research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Manages physical address maps.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include "opt_vm.h"
+
+#include <sys/param.h>
+#include <sys/bitstring.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/msgbuf.h>
+#include <sys/mutex.h>
+#include <sys/physmem.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/sbuf.h>
+#include <sys/sx.h>
+#include <sys/vmem.h>
+#include <sys/vmmeter.h>
+#include <sys/sched.h>
+#include <sys/sysctl.h>
+#include <sys/_unrhdr.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_phys.h>
+#include <vm/vm_radix.h>
+#include <vm/vm_reserv.h>
+#include <vm/uma.h>
+
+#include <machine/machdep.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+
+#define PMAP_ASSERT_STAGE1(pmap) MPASS((pmap)->pm_stage == PM_STAGE1)
+#define PMAP_ASSERT_STAGE2(pmap) MPASS((pmap)->pm_stage == PM_STAGE2)
+
+#define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t)))
+#define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t)))
+#define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t)))
+#define NL3PG (PAGE_SIZE/(sizeof (pt_entry_t)))
+
+#define NUL0E L0_ENTRIES
+#define NUL1E (NUL0E * NL1PG)
+#define NUL2E (NUL1E * NL2PG)
+
+#if !defined(DIAGNOSTIC)
+#ifdef __GNUC_GNU_INLINE__
+#define PMAP_INLINE __attribute__((__gnu_inline__)) inline
+#else
+#define PMAP_INLINE extern inline
+#endif
+#else
+#define PMAP_INLINE
+#endif
+
+#ifdef PV_STATS
+#define PV_STAT(x) do { x ; } while (0)
+#else
+#define PV_STAT(x) do { } while (0)
+#endif
+
+#define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
+#define pa_to_pvh(pa) (&pv_table[pmap_l2_pindex(pa)])
+
+#define NPV_LIST_LOCKS MAXCPU
+
+#define PHYS_TO_PV_LIST_LOCK(pa) \
+ (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
+
+#define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
+ struct rwlock **_lockp = (lockp); \
+ struct rwlock *_new_lock; \
+ \
+ _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
+ if (_new_lock != *_lockp) { \
+ if (*_lockp != NULL) \
+ rw_wunlock(*_lockp); \
+ *_lockp = _new_lock; \
+ rw_wlock(*_lockp); \
+ } \
+} while (0)
+
+#define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
+
+#define RELEASE_PV_LIST_LOCK(lockp) do { \
+ struct rwlock **_lockp = (lockp); \
+ \
+ if (*_lockp != NULL) { \
+ rw_wunlock(*_lockp); \
+ *_lockp = NULL; \
+ } \
+} while (0)
+
+#define VM_PAGE_TO_PV_LIST_LOCK(m) \
+ PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
+
+/*
+ * The presence of this flag indicates that the mapping is writeable.
+ * If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise
+ * it is dirty. This flag may only be set on managed mappings.
+ *
+ * The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it
+ * as a software managed bit.
+ */
+#define ATTR_SW_DBM ATTR_DBM
+
+struct pmap kernel_pmap_store;
+
+/* Used for mapping ACPI memory before VM is initialized */
+#define PMAP_PREINIT_MAPPING_COUNT 32
+#define PMAP_PREINIT_MAPPING_SIZE (PMAP_PREINIT_MAPPING_COUNT * L2_SIZE)
+static vm_offset_t preinit_map_va; /* Start VA of pre-init mapping space */
+static int vm_initialized = 0; /* No need to use pre-init maps when set */
+
+/*
+ * Reserve a few L2 blocks starting from 'preinit_map_va' pointer.
+ * Always map entire L2 block for simplicity.
+ * VA of L2 block = preinit_map_va + i * L2_SIZE
+ */
+static struct pmap_preinit_mapping {
+ vm_paddr_t pa;
+ vm_offset_t va;
+ vm_size_t size;
+} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
+
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+vm_offset_t kernel_vm_end = 0;
+
+/*
+ * Data for the pv entry allocation mechanism.
+ */
+static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
+static struct mtx pv_chunks_mutex;
+static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
+static struct md_page *pv_table;
+static struct md_page pv_dummy;
+
+vm_paddr_t dmap_phys_base; /* The start of the dmap region */
+vm_paddr_t dmap_phys_max; /* The limit of the dmap region */
+vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */
+
+/* This code assumes all L1 DMAP entries will be used */
+CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
+CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
+
+#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
+extern pt_entry_t pagetable_dmap[];
+
+#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
+static vm_paddr_t physmap[PHYSMAP_SIZE];
+static u_int physmap_idx;
+
+static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "VM/pmap parameters");
+
+/*
+ * This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs
+ * that it has currently allocated to a pmap, a cursor ("asid_next") to
+ * optimize its search for a free ASID in the bit vector, and an epoch number
+ * ("asid_epoch") to indicate when it has reclaimed all previously allocated
+ * ASIDs that are not currently active on a processor.
+ *
+ * The current epoch number is always in the range [0, INT_MAX). Negative
+ * numbers and INT_MAX are reserved for special cases that are described
+ * below.
+ */
+struct asid_set {
+ int asid_bits;
+ bitstr_t *asid_set;
+ int asid_set_size;
+ int asid_next;
+ int asid_epoch;
+ struct mtx asid_set_mutex;
+};
+
+static struct asid_set asids;
+static struct asid_set vmids;
+
+static SYSCTL_NODE(_vm_pmap, OID_AUTO, asid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "ASID allocator");
+SYSCTL_INT(_vm_pmap_asid, OID_AUTO, bits, CTLFLAG_RD, &asids.asid_bits, 0,
+ "The number of bits in an ASID");
+SYSCTL_INT(_vm_pmap_asid, OID_AUTO, next, CTLFLAG_RD, &asids.asid_next, 0,
+ "The last allocated ASID plus one");
+SYSCTL_INT(_vm_pmap_asid, OID_AUTO, epoch, CTLFLAG_RD, &asids.asid_epoch, 0,
+ "The current epoch number");
+
+static SYSCTL_NODE(_vm_pmap, OID_AUTO, vmid, CTLFLAG_RD, 0, "VMID allocator");
+SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, bits, CTLFLAG_RD, &vmids.asid_bits, 0,
+ "The number of bits in an VMID");
+SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, next, CTLFLAG_RD, &vmids.asid_next, 0,
+ "The last allocated VMID plus one");
+SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, epoch, CTLFLAG_RD, &vmids.asid_epoch, 0,
+ "The current epoch number");
+
+void (*pmap_clean_stage2_tlbi)(void);
+void (*pmap_invalidate_vpipt_icache)(void);
+
+/*
+ * A pmap's cookie encodes an ASID and epoch number. Cookies for reserved
+ * ASIDs have a negative epoch number, specifically, INT_MIN. Cookies for
+ * dynamically allocated ASIDs have a non-negative epoch number.
+ *
+ * An invalid ASID is represented by -1.
+ *
+ * There are two special-case cookie values: (1) COOKIE_FROM(-1, INT_MIN),
+ * which indicates that an ASID should never be allocated to the pmap, and
+ * (2) COOKIE_FROM(-1, INT_MAX), which indicates that an ASID should be
+ * allocated when the pmap is next activated.
+ */
+#define COOKIE_FROM(asid, epoch) ((long)((u_int)(asid) | \
+ ((u_long)(epoch) << 32)))
+#define COOKIE_TO_ASID(cookie) ((int)(cookie))
+#define COOKIE_TO_EPOCH(cookie) ((int)((u_long)(cookie) >> 32))
+
+static int superpages_enabled = 1;
+SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
+ CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
+ "Are large page mappings enabled?");
+
+/*
+ * Internal flags for pmap_enter()'s helper functions.
+ */
+#define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
+#define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
+
+static void free_pv_chunk(struct pv_chunk *pc);
+static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
+static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
+static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
+static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
+static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
+ vm_offset_t va);
+
+static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
+static bool pmap_activate_int(pmap_t pmap);
+static void pmap_alloc_asid(pmap_t pmap);
+static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
+static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
+static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
+ vm_offset_t va, struct rwlock **lockp);
+static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
+static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
+ vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
+static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
+ u_int flags, vm_page_t m, struct rwlock **lockp);
+static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
+ pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
+static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
+ pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
+static void pmap_reset_asid_set(pmap_t pmap);
+static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
+ vm_page_t m, struct rwlock **lockp);
+
+static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
+ struct rwlock **lockp);
+
+static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ struct spglist *free);
+static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
+static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
+
+/*
+ * These load the old table data and store the new value.
+ * They need to be atomic as the System MMU may write to the table at
+ * the same time as the CPU.
+ */
+#define pmap_clear(table) atomic_store_64(table, 0)
+#define pmap_clear_bits(table, bits) atomic_clear_64(table, bits)
+#define pmap_load(table) (*table)
+#define pmap_load_clear(table) atomic_swap_64(table, 0)
+#define pmap_load_store(table, entry) atomic_swap_64(table, entry)
+#define pmap_set_bits(table, bits) atomic_set_64(table, bits)
+#define pmap_store(table, entry) atomic_store_64(table, entry)
+
+/********************/
+/* Inline functions */
+/********************/
+
+static __inline void
+pagecopy(void *s, void *d)
+{
+
+ memcpy(d, s, PAGE_SIZE);
+}
+
+static __inline pd_entry_t *
+pmap_l0(pmap_t pmap, vm_offset_t va)
+{
+
+ return (&pmap->pm_l0[pmap_l0_index(va)]);
+}
+
+static __inline pd_entry_t *
+pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
+{
+ pd_entry_t *l1;
+
+ l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
+ return (&l1[pmap_l1_index(va)]);
+}
+
+static __inline pd_entry_t *
+pmap_l1(pmap_t pmap, vm_offset_t va)
+{
+ pd_entry_t *l0;
+
+ l0 = pmap_l0(pmap, va);
+ if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
+ return (NULL);
+
+ return (pmap_l0_to_l1(l0, va));
+}
+
+static __inline pd_entry_t *
+pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
+{
+ pd_entry_t *l2;
+
+ l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
+ return (&l2[pmap_l2_index(va)]);
+}
+
+static __inline pd_entry_t *
+pmap_l2(pmap_t pmap, vm_offset_t va)
+{
+ pd_entry_t *l1;
+
+ l1 = pmap_l1(pmap, va);
+ if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
+ return (NULL);
+
+ return (pmap_l1_to_l2(l1, va));
+}
+
+static __inline pt_entry_t *
+pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
+{
+ pt_entry_t *l3;
+
+ l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK);
+ return (&l3[pmap_l3_index(va)]);
+}
+
+/*
+ * Returns the lowest valid pde for a given virtual address.
+ * The next level may or may not point to a valid page or block.
+ */
+static __inline pd_entry_t *
+pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
+{
+ pd_entry_t *l0, *l1, *l2, desc;
+
+ l0 = pmap_l0(pmap, va);
+ desc = pmap_load(l0) & ATTR_DESCR_MASK;
+ if (desc != L0_TABLE) {
+ *level = -1;
+ return (NULL);
+ }
+
+ l1 = pmap_l0_to_l1(l0, va);
+ desc = pmap_load(l1) & ATTR_DESCR_MASK;
+ if (desc != L1_TABLE) {
+ *level = 0;
+ return (l0);
+ }
+
+ l2 = pmap_l1_to_l2(l1, va);
+ desc = pmap_load(l2) & ATTR_DESCR_MASK;
+ if (desc != L2_TABLE) {
+ *level = 1;
+ return (l1);
+ }
+
+ *level = 2;
+ return (l2);
+}
+
+/*
+ * Returns the lowest valid pte block or table entry for a given virtual
+ * address. If there are no valid entries return NULL and set the level to
+ * the first invalid level.
+ */
+static __inline pt_entry_t *
+pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
+{
+ pd_entry_t *l1, *l2, desc;
+ pt_entry_t *l3;
+
+ l1 = pmap_l1(pmap, va);
+ if (l1 == NULL) {
+ *level = 0;
+ return (NULL);
+ }
+ desc = pmap_load(l1) & ATTR_DESCR_MASK;
+ if (desc == L1_BLOCK) {
+ *level = 1;
+ return (l1);
+ }
+
+ if (desc != L1_TABLE) {
+ *level = 1;
+ return (NULL);
+ }
+
+ l2 = pmap_l1_to_l2(l1, va);
+ desc = pmap_load(l2) & ATTR_DESCR_MASK;
+ if (desc == L2_BLOCK) {
+ *level = 2;
+ return (l2);
+ }
+
+ if (desc != L2_TABLE) {
+ *level = 2;
+ return (NULL);
+ }
+
+ *level = 3;
+ l3 = pmap_l2_to_l3(l2, va);
+ if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
+ return (NULL);
+
+ return (l3);
+}
+
+bool
+pmap_ps_enabled(pmap_t pmap __unused)
+{
+
+ return (superpages_enabled != 0);
+}
+
+bool
+pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
+ pd_entry_t **l2, pt_entry_t **l3)
+{
+ pd_entry_t *l0p, *l1p, *l2p;
+
+ if (pmap->pm_l0 == NULL)
+ return (false);
+
+ l0p = pmap_l0(pmap, va);
+ *l0 = l0p;
+
+ if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
+ return (false);
+
+ l1p = pmap_l0_to_l1(l0p, va);
+ *l1 = l1p;
+
+ if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
+ *l2 = NULL;
+ *l3 = NULL;
+ return (true);
+ }
+
+ if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
+ return (false);
+
+ l2p = pmap_l1_to_l2(l1p, va);
+ *l2 = l2p;
+
+ if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
+ *l3 = NULL;
+ return (true);
+ }
+
+ if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE)
+ return (false);
+
+ *l3 = pmap_l2_to_l3(l2p, va);
+
+ return (true);
+}
+
+static __inline int
+pmap_l3_valid(pt_entry_t l3)
+{
+
+ return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
+}
+
+CTASSERT(L1_BLOCK == L2_BLOCK);
+
+static pt_entry_t
+pmap_pte_memattr(pmap_t pmap, vm_memattr_t memattr)
+{
+ pt_entry_t val;
+
+ if (pmap->pm_stage == PM_STAGE1) {
+ val = ATTR_S1_IDX(memattr);
+ if (memattr == VM_MEMATTR_DEVICE)
+ val |= ATTR_S1_XN;
+ return (val);
+ }
+
+ val = 0;
+
+ switch (memattr) {
+ case VM_MEMATTR_DEVICE:
+ return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_DEVICE_nGnRnE) |
+ ATTR_S2_XN(ATTR_S2_XN_ALL));
+ case VM_MEMATTR_UNCACHEABLE:
+ return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_NC));
+ case VM_MEMATTR_WRITE_BACK:
+ return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WB));
+ case VM_MEMATTR_WRITE_THROUGH:
+ return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WT));
+ default:
+ panic("%s: invalid memory attribute %x", __func__, memattr);
+ }
+}
+
+static pt_entry_t
+pmap_pte_prot(pmap_t pmap, vm_prot_t prot)
+{
+ pt_entry_t val;
+
+ val = 0;
+ if (pmap->pm_stage == PM_STAGE1) {
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ val |= ATTR_S1_XN;
+ if ((prot & VM_PROT_WRITE) == 0)
+ val |= ATTR_S1_AP(ATTR_S1_AP_RO);
+ } else {
+ if ((prot & VM_PROT_WRITE) != 0)
+ val |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
+ if ((prot & VM_PROT_READ) != 0)
+ val |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ val |= ATTR_S2_XN(ATTR_S2_XN_ALL);
+ }
+
+ return (val);
+}
+
+/*
+ * Checks if the PTE is dirty.
+ */
+static inline int
+pmap_pte_dirty(pmap_t pmap, pt_entry_t pte)
+{
+
+ KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
+
+ if (pmap->pm_stage == PM_STAGE1) {
+ KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0,
+ ("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
+
+ return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
+ (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM));
+ }
+
+ return ((pte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) ==
+ ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE));
+}
+
+static __inline void
+pmap_resident_count_inc(pmap_t pmap, int count)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ pmap->pm_stats.resident_count += count;
+}
+
+static __inline void
+pmap_resident_count_dec(pmap_t pmap, int count)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT(pmap->pm_stats.resident_count >= count,
+ ("pmap %p resident count underflow %ld %d", pmap,
+ pmap->pm_stats.resident_count, count));
+ pmap->pm_stats.resident_count -= count;
+}
+
+static pt_entry_t *
+pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
+ u_int *l2_slot)
+{
+ pt_entry_t *l2;
+ pd_entry_t *l1;
+
+ l1 = (pd_entry_t *)l1pt;
+ *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
+
+ /* Check locore has used a table L1 map */
+ KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
+ ("Invalid bootstrap L1 table"));
+ /* Find the address of the L2 table */
+ l2 = (pt_entry_t *)init_pt_va;
+ *l2_slot = pmap_l2_index(va);
+
+ return (l2);
+}
+
+static vm_paddr_t
+pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
+{
+ u_int l1_slot, l2_slot;
+ pt_entry_t *l2;
+
+ l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
+
+ return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
+}
+
+static vm_offset_t
+pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
+ vm_offset_t freemempos)
+{
+ pt_entry_t *l2;
+ vm_offset_t va;
+ vm_paddr_t l2_pa, pa;
+ u_int l1_slot, l2_slot, prev_l1_slot;
+ int i;
+
+ dmap_phys_base = min_pa & ~L1_OFFSET;
+ dmap_phys_max = 0;
+ dmap_max_addr = 0;
+ l2 = NULL;
+ prev_l1_slot = -1;
+
+#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
+ memset(pagetable_dmap, 0, PAGE_SIZE * DMAP_TABLES);
+
+ for (i = 0; i < (physmap_idx * 2); i += 2) {
+ pa = physmap[i] & ~L2_OFFSET;
+ va = pa - dmap_phys_base + DMAP_MIN_ADDRESS;
+
+ /* Create L2 mappings at the start of the region */
+ if ((pa & L1_OFFSET) != 0) {
+ l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
+ if (l1_slot != prev_l1_slot) {
+ prev_l1_slot = l1_slot;
+ l2 = (pt_entry_t *)freemempos;
+ l2_pa = pmap_early_vtophys(kern_l1,
+ (vm_offset_t)l2);
+ freemempos += PAGE_SIZE;
+
+ pmap_store(&pagetable_dmap[l1_slot],
+ (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
+
+ memset(l2, 0, PAGE_SIZE);
+ }
+ KASSERT(l2 != NULL,
+ ("pmap_bootstrap_dmap: NULL l2 map"));
+ for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
+ pa += L2_SIZE, va += L2_SIZE) {
+ /*
+ * We are on a boundary, stop to
+ * create a level 1 block
+ */
+ if ((pa & L1_OFFSET) == 0)
+ break;
+
+ l2_slot = pmap_l2_index(va);
+ KASSERT(l2_slot != 0, ("..."));
+ pmap_store(&l2[l2_slot],
+ (pa & ~L2_OFFSET) | ATTR_DEFAULT |
+ ATTR_S1_XN |
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
+ L2_BLOCK);
+ }
+ KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS),
+ ("..."));
+ }
+
+ for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] &&
+ (physmap[i + 1] - pa) >= L1_SIZE;
+ pa += L1_SIZE, va += L1_SIZE) {
+ l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
+ pmap_store(&pagetable_dmap[l1_slot],
+ (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_S1_XN |
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L1_BLOCK);
+ }
+
+ /* Create L2 mappings at the end of the region */
+ if (pa < physmap[i + 1]) {
+ l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
+ if (l1_slot != prev_l1_slot) {
+ prev_l1_slot = l1_slot;
+ l2 = (pt_entry_t *)freemempos;
+ l2_pa = pmap_early_vtophys(kern_l1,
+ (vm_offset_t)l2);
+ freemempos += PAGE_SIZE;
+
+ pmap_store(&pagetable_dmap[l1_slot],
+ (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
+
+ memset(l2, 0, PAGE_SIZE);
+ }
+ KASSERT(l2 != NULL,
+ ("pmap_bootstrap_dmap: NULL l2 map"));
+ for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
+ pa += L2_SIZE, va += L2_SIZE) {
+ l2_slot = pmap_l2_index(va);
+ pmap_store(&l2[l2_slot],
+ (pa & ~L2_OFFSET) | ATTR_DEFAULT |
+ ATTR_S1_XN |
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
+ L2_BLOCK);
+ }
+ }
+
+ if (pa > dmap_phys_max) {
+ dmap_phys_max = pa;
+ dmap_max_addr = va;
+ }
+ }
+
+ cpu_tlb_flushID();
+
+ return (freemempos);
+}
+
+static vm_offset_t
+pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
+{
+ vm_offset_t l2pt;
+ vm_paddr_t pa;
+ pd_entry_t *l1;
+ u_int l1_slot;
+
+ KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
+
+ l1 = (pd_entry_t *)l1pt;
+ l1_slot = pmap_l1_index(va);
+ l2pt = l2_start;
+
+ for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
+ KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
+
+ pa = pmap_early_vtophys(l1pt, l2pt);
+ pmap_store(&l1[l1_slot],
+ (pa & ~Ln_TABLE_MASK) | L1_TABLE);
+ l2pt += PAGE_SIZE;
+ }
+
+ /* Clean the L2 page table */
+ memset((void *)l2_start, 0, l2pt - l2_start);
+
+ return l2pt;
+}
+
+static vm_offset_t
+pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
+{
+ vm_offset_t l3pt;
+ vm_paddr_t pa;
+ pd_entry_t *l2;
+ u_int l2_slot;
+
+ KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
+
+ l2 = pmap_l2(kernel_pmap, va);
+ l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE);
+ l2_slot = pmap_l2_index(va);
+ l3pt = l3_start;
+
+ for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
+ KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
+
+ pa = pmap_early_vtophys(l1pt, l3pt);
+ pmap_store(&l2[l2_slot],
+ (pa & ~Ln_TABLE_MASK) | ATTR_S1_UXN | L2_TABLE);
+ l3pt += PAGE_SIZE;
+ }
+
+ /* Clean the L2 page table */
+ memset((void *)l3_start, 0, l3pt - l3_start);
+
+ return l3pt;
+}
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ */
+void
+pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
+ vm_size_t kernlen)
+{
+ vm_offset_t freemempos;
+ vm_offset_t dpcpu, msgbufpv;
+ vm_paddr_t start_pa, pa, min_pa;
+ uint64_t kern_delta;
+ int i;
+
+ /* Verify that the ASID is set through TTBR0. */
+ KASSERT((READ_SPECIALREG(tcr_el1) & TCR_A1) == 0,
+ ("pmap_bootstrap: TCR_EL1.A1 != 0"));
+
+ kern_delta = KERNBASE - kernstart;
+
+ printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
+ printf("%lx\n", l1pt);
+ printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
+
+ /* Set this early so we can use the pagetable walking functions */
+ kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
+ PMAP_LOCK_INIT(kernel_pmap);
+ kernel_pmap->pm_l0_paddr = l0pt - kern_delta;
+ kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
+ kernel_pmap->pm_stage = PM_STAGE1;
+ kernel_pmap->pm_asid_set = &asids;
+
+ /* Assume the address we were loaded to is a valid physical address */
+ min_pa = KERNBASE - kern_delta;
+
+ physmap_idx = physmem_avail(physmap, nitems(physmap));
+ physmap_idx /= 2;
+
+ /*
+ * Find the minimum physical address. physmap is sorted,
+ * but may contain empty ranges.
+ */
+ for (i = 0; i < physmap_idx * 2; i += 2) {
+ if (physmap[i] == physmap[i + 1])
+ continue;
+ if (physmap[i] <= min_pa)
+ min_pa = physmap[i];
+ }
+
+ freemempos = KERNBASE + kernlen;
+ freemempos = roundup2(freemempos, PAGE_SIZE);
+
+ /* Create a direct map region early so we can use it for pa -> va */
+ freemempos = pmap_bootstrap_dmap(l1pt, min_pa, freemempos);
+
+ start_pa = pa = KERNBASE - kern_delta;
+
+ /*
+ * Create the l2 tables up to VM_MAX_KERNEL_ADDRESS. We assume that the
+ * loader allocated the first and only l2 page table page used to map
+ * the kernel, preloaded files and module metadata.
+ */
+ freemempos = pmap_bootstrap_l2(l1pt, KERNBASE + L1_SIZE, freemempos);
+ /* And the l3 tables for the early devmap */
+ freemempos = pmap_bootstrap_l3(l1pt,
+ VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE), freemempos);
+
+ cpu_tlb_flushID();
+
+#define alloc_pages(var, np) \
+ (var) = freemempos; \
+ freemempos += (np * PAGE_SIZE); \
+ memset((char *)(var), 0, ((np) * PAGE_SIZE));
+
+ /* Allocate dynamic per-cpu area. */
+ alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
+ dpcpu_init((void *)dpcpu, 0);
+
+ /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
+ alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
+ msgbufp = (void *)msgbufpv;
+
+ /* Reserve some VA space for early BIOS/ACPI mapping */
+ preinit_map_va = roundup2(freemempos, L2_SIZE);
+
+ virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
+ virtual_avail = roundup2(virtual_avail, L1_SIZE);
+ virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
+ kernel_vm_end = virtual_avail;
+
+ pa = pmap_early_vtophys(l1pt, freemempos);
+
+ physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
+
+ cpu_tlb_flushID();
+}
+
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
+}
+
+static void
+pmap_init_asids(struct asid_set *set, int bits)
+{
+ int i;
+
+ set->asid_bits = bits;
+
+ /*
+ * We may be too early in the overall initialization process to use
+ * bit_alloc().
+ */
+ set->asid_set_size = 1 << set->asid_bits;
+ set->asid_set = (bitstr_t *)kmem_malloc(bitstr_size(set->asid_set_size),
+ M_WAITOK | M_ZERO);
+ for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
+ bit_set(set->asid_set, i);
+ set->asid_next = ASID_FIRST_AVAILABLE;
+ mtx_init(&set->asid_set_mutex, "asid set", NULL, MTX_SPIN);
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init(void)
+{
+ vm_size_t s;
+ uint64_t mmfr1;
+ int i, pv_npg, vmid_bits;
+
+ /*
+ * Are large page mappings enabled?
+ */
+ TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
+ if (superpages_enabled) {
+ KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
+ ("pmap_init: can't assign to pagesizes[1]"));
+ pagesizes[1] = L2_SIZE;
+ }
+
+ /*
+ * Initialize the ASID allocator.
+ */
+ pmap_init_asids(&asids,
+ (READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8);
+
+ if (has_hyp()) {
+ mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
+ vmid_bits = 8;
+
+ if (ID_AA64MMFR1_VMIDBits_VAL(mmfr1) ==
+ ID_AA64MMFR1_VMIDBits_16)
+ vmid_bits = 16;
+ pmap_init_asids(&vmids, vmid_bits);
+ }
+
+ /*
+ * Initialize the pv chunk list mutex.
+ */
+ mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
+
+ /*
+ * Initialize the pool of pv list locks.
+ */
+ for (i = 0; i < NPV_LIST_LOCKS; i++)
+ rw_init(&pv_list_locks[i], "pmap pv list");
+
+ /*
+ * Calculate the size of the pv head table for superpages.
+ */
+ pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
+
+ /*
+ * Allocate memory for the pv head table for superpages.
+ */
+ s = (vm_size_t)(pv_npg * sizeof(struct md_page));
+ s = round_page(s);
+ pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ for (i = 0; i < pv_npg; i++)
+ TAILQ_INIT(&pv_table[i].pv_list);
+ TAILQ_INIT(&pv_dummy.pv_list);
+
+ vm_initialized = 1;
+}
+
+static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "2MB page mapping counters");
+
+static u_long pmap_l2_demotions;
+SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
+ &pmap_l2_demotions, 0, "2MB page demotions");
+
+static u_long pmap_l2_mappings;
+SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
+ &pmap_l2_mappings, 0, "2MB page mappings");
+
+static u_long pmap_l2_p_failures;
+SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
+ &pmap_l2_p_failures, 0, "2MB page promotion failures");
+
+static u_long pmap_l2_promotions;
+SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
+ &pmap_l2_promotions, 0, "2MB page promotions");
+
+/*
+ * Invalidate a single TLB entry.
+ */
+static __inline void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+ uint64_t r;
+
+ PMAP_ASSERT_STAGE1(pmap);
+
+ dsb(ishst);
+ if (pmap == kernel_pmap) {
+ r = atop(va);
+ __asm __volatile("tlbi vaae1is, %0" : : "r" (r));
+ } else {
+ r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va);
+ __asm __volatile("tlbi vae1is, %0" : : "r" (r));
+ }
+ dsb(ish);
+ isb();
+}
+
+static __inline void
+pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ uint64_t end, r, start;
+
+ PMAP_ASSERT_STAGE1(pmap);
+
+ dsb(ishst);
+ if (pmap == kernel_pmap) {
+ start = atop(sva);
+ end = atop(eva);
+ for (r = start; r < end; r++)
+ __asm __volatile("tlbi vaae1is, %0" : : "r" (r));
+ } else {
+ start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
+ start |= atop(sva);
+ end |= atop(eva);
+ for (r = start; r < end; r++)
+ __asm __volatile("tlbi vae1is, %0" : : "r" (r));
+ }
+ dsb(ish);
+ isb();
+}
+
+static __inline void
+pmap_invalidate_all(pmap_t pmap)
+{
+ uint64_t r;
+
+ PMAP_ASSERT_STAGE1(pmap);
+
+ dsb(ishst);
+ if (pmap == kernel_pmap) {
+ __asm __volatile("tlbi vmalle1is");
+ } else {
+ r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
+ __asm __volatile("tlbi aside1is, %0" : : "r" (r));
+ }
+ dsb(ish);
+ isb();
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+vm_paddr_t
+pmap_extract(pmap_t pmap, vm_offset_t va)
+{
+ pt_entry_t *pte, tpte;
+ vm_paddr_t pa;
+ int lvl;
+
+ pa = 0;
+ PMAP_LOCK(pmap);
+ /*
+ * Find the block or page map for this virtual address. pmap_pte
+ * will return either a valid block/page entry, or NULL.
+ */
+ pte = pmap_pte(pmap, va, &lvl);
+ if (pte != NULL) {
+ tpte = pmap_load(pte);
+ pa = tpte & ~ATTR_MASK;
+ switch(lvl) {
+ case 1:
+ KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
+ ("pmap_extract: Invalid L1 pte found: %lx",
+ tpte & ATTR_DESCR_MASK));
+ pa |= (va & L1_OFFSET);
+ break;
+ case 2:
+ KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
+ ("pmap_extract: Invalid L2 pte found: %lx",
+ tpte & ATTR_DESCR_MASK));
+ pa |= (va & L2_OFFSET);
+ break;
+ case 3:
+ KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
+ ("pmap_extract: Invalid L3 pte found: %lx",
+ tpte & ATTR_DESCR_MASK));
+ pa |= (va & L3_OFFSET);
+ break;
+ }
+ }
+ PMAP_UNLOCK(pmap);
+ return (pa);
+}
+
+/*
+ * Routine: pmap_extract_and_hold
+ * Function:
+ * Atomically extract and hold the physical page
+ * with the given pmap and virtual address pair
+ * if that mapping permits the given protection.
+ */
+vm_page_t
+pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+{
+ pt_entry_t *pte, tpte;
+ vm_offset_t off;
+ vm_page_t m;
+ int lvl;
+ bool use;
+
+ m = NULL;
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, va, &lvl);
+ if (pte != NULL) {
+ tpte = pmap_load(pte);
+
+ KASSERT(lvl > 0 && lvl <= 3,
+ ("pmap_extract_and_hold: Invalid level %d", lvl));
+ CTASSERT(L1_BLOCK == L2_BLOCK);
+ KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
+ (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
+ ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
+ tpte & ATTR_DESCR_MASK));
+
+ use = false;
+ if ((prot & VM_PROT_WRITE) == 0)
+ use = true;
+ else if (pmap->pm_stage == PM_STAGE1 &&
+ (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW))
+ use = true;
+ else if (pmap->pm_stage == PM_STAGE2 &&
+ ((tpte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) ==
+ ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)))
+ use = true;
+
+ if (use) {
+ switch(lvl) {
+ case 1:
+ off = va & L1_OFFSET;
+ break;
+ case 2:
+ off = va & L2_OFFSET;
+ break;
+ case 3:
+ default:
+ off = 0;
+ }
+ m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
+ if (!vm_page_wire_mapped(m))
+ m = NULL;
+ }
+ }
+ PMAP_UNLOCK(pmap);
+ return (m);
+}
+
+vm_paddr_t
+pmap_kextract(vm_offset_t va)
+{
+ pt_entry_t *pte, tpte;
+
+ if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
+ return (DMAP_TO_PHYS(va));
+ pte = pmap_l1(kernel_pmap, va);
+ if (pte == NULL)
+ return (0);
+
+ /*
+ * A concurrent pmap_update_entry() will clear the entry's valid bit
+ * but leave the rest of the entry unchanged. Therefore, we treat a
+ * non-zero entry as being valid, and we ignore the valid bit when
+ * determining whether the entry maps a block, page, or table.
+ */
+ tpte = pmap_load(pte);
+ if (tpte == 0)
+ return (0);
+ if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
+ return ((tpte & ~ATTR_MASK) | (va & L1_OFFSET));
+ pte = pmap_l1_to_l2(&tpte, va);
+ tpte = pmap_load(pte);
+ if (tpte == 0)
+ return (0);
+ if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
+ return ((tpte & ~ATTR_MASK) | (va & L2_OFFSET));
+ pte = pmap_l2_to_l3(&tpte, va);
+ tpte = pmap_load(pte);
+ if (tpte == 0)
+ return (0);
+ return ((tpte & ~ATTR_MASK) | (va & L3_OFFSET));
+}
+
+/***************************************************
+ * Low level mapping routines.....
+ ***************************************************/
+
+void
+pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
+{
+ pd_entry_t *pde;
+ pt_entry_t *pte, attr;
+ vm_offset_t va;
+ int lvl;
+
+ KASSERT((pa & L3_OFFSET) == 0,
+ ("pmap_kenter: Invalid physical address"));
+ KASSERT((sva & L3_OFFSET) == 0,
+ ("pmap_kenter: Invalid virtual address"));
+ KASSERT((size & PAGE_MASK) == 0,
+ ("pmap_kenter: Mapping is not page-sized"));
+
+ attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
+ ATTR_S1_IDX(mode) | L3_PAGE;
+ va = sva;
+ while (size != 0) {
+ pde = pmap_pde(kernel_pmap, va, &lvl);
+ KASSERT(pde != NULL,
+ ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
+ KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
+
+ pte = pmap_l2_to_l3(pde, va);
+ pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
+
+ va += PAGE_SIZE;
+ pa += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ pmap_invalidate_range(kernel_pmap, sva, va);
+}
+
+void
+pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
+{
+
+ pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE);
+}
+
+/*
+ * Remove a page from the kernel pagetables.
+ */
+PMAP_INLINE void
+pmap_kremove(vm_offset_t va)
+{
+ pt_entry_t *pte;
+ int lvl;
+
+ pte = pmap_pte(kernel_pmap, va, &lvl);
+ KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
+ KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
+
+ pmap_clear(pte);
+ pmap_invalidate_page(kernel_pmap, va);
+}
+
+void
+pmap_kremove_device(vm_offset_t sva, vm_size_t size)
+{
+ pt_entry_t *pte;
+ vm_offset_t va;
+ int lvl;
+
+ KASSERT((sva & L3_OFFSET) == 0,
+ ("pmap_kremove_device: Invalid virtual address"));
+ KASSERT((size & PAGE_MASK) == 0,
+ ("pmap_kremove_device: Mapping is not page-sized"));
+
+ va = sva;
+ while (size != 0) {
+ pte = pmap_pte(kernel_pmap, va, &lvl);
+ KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
+ KASSERT(lvl == 3,
+ ("Invalid device pagetable level: %d != 3", lvl));
+ pmap_clear(pte);
+
+ va += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ pmap_invalidate_range(kernel_pmap, sva, va);
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
+{
+ return PHYS_TO_DMAP(start);
+}
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ * Note: SMP coherent. Uses a ranged shootdown IPI.
+ */
+void
+pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
+{
+ pd_entry_t *pde;
+ pt_entry_t *pte, pa;
+ vm_offset_t va;
+ vm_page_t m;
+ int i, lvl;
+
+ va = sva;
+ for (i = 0; i < count; i++) {
+ pde = pmap_pde(kernel_pmap, va, &lvl);
+ KASSERT(pde != NULL,
+ ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
+ KASSERT(lvl == 2,
+ ("pmap_qenter: Invalid level %d", lvl));
+
+ m = ma[i];
+ pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
+ ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
+ ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
+ pte = pmap_l2_to_l3(pde, va);
+ pmap_load_store(pte, pa);
+
+ va += L3_SIZE;
+ }
+ pmap_invalidate_range(kernel_pmap, sva, va);
+}
+
+/*
+ * This routine tears out page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ */
+void
+pmap_qremove(vm_offset_t sva, int count)
+{
+ pt_entry_t *pte;
+ vm_offset_t va;
+ int lvl;
+
+ KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
+
+ va = sva;
+ while (count-- > 0) {
+ pte = pmap_pte(kernel_pmap, va, &lvl);
+ KASSERT(lvl == 3,
+ ("Invalid device pagetable level: %d != 3", lvl));
+ if (pte != NULL) {
+ pmap_clear(pte);
+ }
+
+ va += PAGE_SIZE;
+ }
+ pmap_invalidate_range(kernel_pmap, sva, va);
+}
+
+/***************************************************
+ * Page table page management routines.....
+ ***************************************************/
+/*
+ * Schedule the specified unused page table page to be freed. Specifically,
+ * add the page to the specified list of pages that will be released to the
+ * physical memory manager after the TLB has been updated.
+ */
+static __inline void
+pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
+ boolean_t set_PG_ZERO)
+{
+
+ if (set_PG_ZERO)
+ m->flags |= PG_ZERO;
+ else
+ m->flags &= ~PG_ZERO;
+ SLIST_INSERT_HEAD(free, m, plinks.s.ss);
+}
+
+/*
+ * Decrements a page table page's reference count, which is used to record the
+ * number of valid page table entries within the page. If the reference count
+ * drops to zero, then the page table page is unmapped. Returns TRUE if the
+ * page table page was unmapped and FALSE otherwise.
+ */
+static inline boolean_t
+pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
+{
+
+ --m->ref_count;
+ if (m->ref_count == 0) {
+ _pmap_unwire_l3(pmap, va, m, free);
+ return (TRUE);
+ } else
+ return (FALSE);
+}
+
+static void
+_pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ /*
+ * unmap the page table page
+ */
+ if (m->pindex >= (NUL2E + NUL1E)) {
+ /* l1 page */
+ pd_entry_t *l0;
+
+ l0 = pmap_l0(pmap, va);
+ pmap_clear(l0);
+ } else if (m->pindex >= NUL2E) {
+ /* l2 page */
+ pd_entry_t *l1;
+
+ l1 = pmap_l1(pmap, va);
+ pmap_clear(l1);
+ } else {
+ /* l3 page */
+ pd_entry_t *l2;
+
+ l2 = pmap_l2(pmap, va);
+ pmap_clear(l2);
+ }
+ pmap_resident_count_dec(pmap, 1);
+ if (m->pindex < NUL2E) {
+ /* We just released an l3, unhold the matching l2 */
+ pd_entry_t *l1, tl1;
+ vm_page_t l2pg;
+
+ l1 = pmap_l1(pmap, va);
+ tl1 = pmap_load(l1);
+ l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
+ pmap_unwire_l3(pmap, va, l2pg, free);
+ } else if (m->pindex < (NUL2E + NUL1E)) {
+ /* We just released an l2, unhold the matching l1 */
+ pd_entry_t *l0, tl0;
+ vm_page_t l1pg;
+
+ l0 = pmap_l0(pmap, va);
+ tl0 = pmap_load(l0);
+ l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
+ pmap_unwire_l3(pmap, va, l1pg, free);
+ }
+ pmap_invalidate_page(pmap, va);
+
+ /*
+ * Put page on a list so that it is released after
+ * *ALL* TLB shootdown is done
+ */
+ pmap_add_delayed_free_list(m, free, TRUE);
+}
+
+/*
+ * After removing a page table entry, this routine is used to
+ * conditionally free the page, and manage the reference count.
+ */
+static int
+pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
+ struct spglist *free)
+{
+ vm_page_t mpte;
+
+ if (va >= VM_MAXUSER_ADDRESS)
+ return (0);
+ KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
+ mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
+ return (pmap_unwire_l3(pmap, va, mpte, free));
+}
+
+/*
+ * Release a page table page reference after a failed attempt to create a
+ * mapping.
+ */
+static void
+pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
+{
+ struct spglist free;
+
+ SLIST_INIT(&free);
+ if (pmap_unwire_l3(pmap, va, mpte, &free)) {
+ /*
+ * Although "va" was never mapped, the TLB could nonetheless
+ * have intermediate entries that refer to the freed page
+ * table pages. Invalidate those entries.
+ *
+ * XXX redundant invalidation (See _pmap_unwire_l3().)
+ */
+ pmap_invalidate_page(pmap, va);
+ vm_page_free_pages_toq(&free, true);
+ }
+}
+
+void
+pmap_pinit0(pmap_t pmap)
+{
+
+ PMAP_LOCK_INIT(pmap);
+ bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
+ pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1);
+ pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
+ pmap->pm_root.rt_root = 0;
+ pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN);
+ pmap->pm_stage = PM_STAGE1;
+ pmap->pm_asid_set = &asids;
+
+ PCPU_SET(curpmap, pmap);
+}
+
+int
+pmap_pinit_stage(pmap_t pmap, enum pmap_stage stage)
+{
+ vm_page_t l0pt;
+
+ /*
+ * allocate the l0 page
+ */
+ while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
+ vm_wait(NULL);
+
+ pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(l0pt);
+ pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
+
+ if ((l0pt->flags & PG_ZERO) == 0)
+ pagezero(pmap->pm_l0);
+
+ pmap->pm_root.rt_root = 0;
+ bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
+ pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
+
+ pmap->pm_stage = stage;
+ switch (stage) {
+ case PM_STAGE1:
+ pmap->pm_asid_set = &asids;
+ break;
+ case PM_STAGE2:
+ pmap->pm_asid_set = &vmids;
+ break;
+ default:
+ panic("%s: Invalid pmap type %d", __func__, stage);
+ break;
+ }
+
+ /* XXX Temporarily disable deferred ASID allocation. */
+ pmap_alloc_asid(pmap);
+
+ return (1);
+}
+
+int
+pmap_pinit(pmap_t pmap)
+{
+
+ return (pmap_pinit_stage(pmap, PM_STAGE1));
+}
+
+/*
+ * This routine is called if the desired page table page does not exist.
+ *
+ * If page table page allocation fails, this routine may sleep before
+ * returning NULL. It sleeps only if a lock pointer was given.
+ *
+ * Note: If a page allocation fails at page table level two or three,
+ * one or two pages may be held during the wait, only to be released
+ * afterwards. This conservative approach is easily argued to avoid
+ * race conditions.
+ */
+static vm_page_t
+_pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
+{
+ vm_page_t m, l1pg, l2pg;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ /*
+ * Allocate a page table page.
+ */
+ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
+ if (lockp != NULL) {
+ RELEASE_PV_LIST_LOCK(lockp);
+ PMAP_UNLOCK(pmap);
+ vm_wait(NULL);
+ PMAP_LOCK(pmap);
+ }
+
+ /*
+ * Indicate the need to retry. While waiting, the page table
+ * page may have been allocated.
+ */
+ return (NULL);
+ }
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+
+ /*
+ * Because of AArch64's weak memory consistency model, we must have a
+ * barrier here to ensure that the stores for zeroing "m", whether by
+ * pmap_zero_page() or an earlier function, are visible before adding
+ * "m" to the page table. Otherwise, a page table walk by another
+ * processor's MMU could see the mapping to "m" and a stale, non-zero
+ * PTE within "m".
+ */
+ dmb(ishst);
+
+ /*
+ * Map the pagetable page into the process address space, if
+ * it isn't already there.
+ */
+
+ if (ptepindex >= (NUL2E + NUL1E)) {
+ pd_entry_t *l0;
+ vm_pindex_t l0index;
+
+ l0index = ptepindex - (NUL2E + NUL1E);
+ l0 = &pmap->pm_l0[l0index];
+ pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
+ } else if (ptepindex >= NUL2E) {
+ vm_pindex_t l0index, l1index;
+ pd_entry_t *l0, *l1;
+ pd_entry_t tl0;
+
+ l1index = ptepindex - NUL2E;
+ l0index = l1index >> L0_ENTRIES_SHIFT;
+
+ l0 = &pmap->pm_l0[l0index];
+ tl0 = pmap_load(l0);
+ if (tl0 == 0) {
+ /* recurse for allocating page dir */
+ if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
+ lockp) == NULL) {
+ vm_page_unwire_noq(m);
+ vm_page_free_zero(m);
+ return (NULL);
+ }
+ } else {
+ l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
+ l1pg->ref_count++;
+ }
+
+ l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
+ l1 = &l1[ptepindex & Ln_ADDR_MASK];
+ pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
+ } else {
+ vm_pindex_t l0index, l1index;
+ pd_entry_t *l0, *l1, *l2;
+ pd_entry_t tl0, tl1;
+
+ l1index = ptepindex >> Ln_ENTRIES_SHIFT;
+ l0index = l1index >> L0_ENTRIES_SHIFT;
+
+ l0 = &pmap->pm_l0[l0index];
+ tl0 = pmap_load(l0);
+ if (tl0 == 0) {
+ /* recurse for allocating page dir */
+ if (_pmap_alloc_l3(pmap, NUL2E + l1index,
+ lockp) == NULL) {
+ vm_page_unwire_noq(m);
+ vm_page_free_zero(m);
+ return (NULL);
+ }
+ tl0 = pmap_load(l0);
+ l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
+ l1 = &l1[l1index & Ln_ADDR_MASK];
+ } else {
+ l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
+ l1 = &l1[l1index & Ln_ADDR_MASK];
+ tl1 = pmap_load(l1);
+ if (tl1 == 0) {
+ /* recurse for allocating page dir */
+ if (_pmap_alloc_l3(pmap, NUL2E + l1index,
+ lockp) == NULL) {
+ vm_page_unwire_noq(m);
+ vm_page_free_zero(m);
+ return (NULL);
+ }
+ } else {
+ l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
+ l2pg->ref_count++;
+ }
+ }
+
+ l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
+ l2 = &l2[ptepindex & Ln_ADDR_MASK];
+ pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
+ }
+
+ pmap_resident_count_inc(pmap, 1);
+
+ return (m);
+}
+
+static pd_entry_t *
+pmap_alloc_l2(pmap_t pmap, vm_offset_t va, vm_page_t *l2pgp,
+ struct rwlock **lockp)
+{
+ pd_entry_t *l1, *l2;
+ vm_page_t l2pg;
+ vm_pindex_t l2pindex;
+
+retry:
+ l1 = pmap_l1(pmap, va);
+ if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
+ l2 = pmap_l1_to_l2(l1, va);
+ if (va < VM_MAXUSER_ADDRESS) {
+ /* Add a reference to the L2 page. */
+ l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
+ l2pg->ref_count++;
+ } else
+ l2pg = NULL;
+ } else if (va < VM_MAXUSER_ADDRESS) {
+ /* Allocate a L2 page. */
+ l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
+ l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
+ if (l2pg == NULL) {
+ if (lockp != NULL)
+ goto retry;
+ else
+ return (NULL);
+ }
+ l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
+ l2 = &l2[pmap_l2_index(va)];
+ } else
+ panic("pmap_alloc_l2: missing page table page for va %#lx",
+ va);
+ *l2pgp = l2pg;
+ return (l2);
+}
+
+static vm_page_t
+pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
+{
+ vm_pindex_t ptepindex;
+ pd_entry_t *pde, tpde;
+#ifdef INVARIANTS
+ pt_entry_t *pte;
+#endif
+ vm_page_t m;
+ int lvl;
+
+ /*
+ * Calculate pagetable page index
+ */
+ ptepindex = pmap_l2_pindex(va);
+retry:
+ /*
+ * Get the page directory entry
+ */
+ pde = pmap_pde(pmap, va, &lvl);
+
+ /*
+ * If the page table page is mapped, we just increment the hold count,
+ * and activate it. If we get a level 2 pde it will point to a level 3
+ * table.
+ */
+ switch (lvl) {
+ case -1:
+ break;
+ case 0:
+#ifdef INVARIANTS
+ pte = pmap_l0_to_l1(pde, va);
+ KASSERT(pmap_load(pte) == 0,
+ ("pmap_alloc_l3: TODO: l0 superpages"));
+#endif
+ break;
+ case 1:
+#ifdef INVARIANTS
+ pte = pmap_l1_to_l2(pde, va);
+ KASSERT(pmap_load(pte) == 0,
+ ("pmap_alloc_l3: TODO: l1 superpages"));
+#endif
+ break;
+ case 2:
+ tpde = pmap_load(pde);
+ if (tpde != 0) {
+ m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
+ m->ref_count++;
+ return (m);
+ }
+ break;
+ default:
+ panic("pmap_alloc_l3: Invalid level %d", lvl);
+ }
+
+ /*
+ * Here if the pte page isn't mapped, or if it has been deallocated.
+ */
+ m = _pmap_alloc_l3(pmap, ptepindex, lockp);
+ if (m == NULL && lockp != NULL)
+ goto retry;
+
+ return (m);
+}
+
+/***************************************************
+ * Pmap allocation/deallocation routines.
+ ***************************************************/
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap_t pmap)
+{
+ struct asid_set *set;
+ vm_page_t m;
+ int asid;
+
+ KASSERT(pmap->pm_stats.resident_count == 0,
+ ("pmap_release: pmap resident count %ld != 0",
+ pmap->pm_stats.resident_count));
+ KASSERT(vm_radix_is_empty(&pmap->pm_root),
+ ("pmap_release: pmap has reserved page table page(s)"));
+
+ set = pmap->pm_asid_set;
+ KASSERT(set != NULL, ("%s: NULL asid set", __func__));
+
+ /*
+ * Allow the ASID to be reused. In stage 2 VMIDs we don't invalidate
+ * the entries when removing them so rely on a later tlb invalidation.
+ * this will happen when updating the VMID generation. Because of this
+ * we don't reuse VMIDs within a generation.
+ */
+ if (pmap->pm_stage == PM_STAGE1) {
+ mtx_lock_spin(&set->asid_set_mutex);
+ if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch) {
+ asid = COOKIE_TO_ASID(pmap->pm_cookie);
+ KASSERT(asid >= ASID_FIRST_AVAILABLE &&
+ asid < set->asid_set_size,
+ ("pmap_release: pmap cookie has out-of-range asid"));
+ bit_clear(set->asid_set, asid);
+ }
+ mtx_unlock_spin(&set->asid_set_mutex);
+ }
+
+ m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
+ vm_page_unwire_noq(m);
+ vm_page_free_zero(m);
+}
+
+static int
+kvm_size(SYSCTL_HANDLER_ARGS)
+{
+ unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
+
+ return sysctl_handle_long(oidp, &ksize, 0, req);
+}
+SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ 0, 0, kvm_size, "LU",
+ "Size of KVM");
+
+static int
+kvm_free(SYSCTL_HANDLER_ARGS)
+{
+ unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
+
+ return sysctl_handle_long(oidp, &kfree, 0, req);
+}
+SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ 0, 0, kvm_free, "LU",
+ "Amount of KVM free");
+
+/*
+ * grow the number of kernel page table entries, if needed
+ */
+void
+pmap_growkernel(vm_offset_t addr)
+{
+ vm_paddr_t paddr;
+ vm_page_t nkpg;
+ pd_entry_t *l0, *l1, *l2;
+
+ mtx_assert(&kernel_map->system_mtx, MA_OWNED);
+
+ addr = roundup2(addr, L2_SIZE);
+ if (addr - 1 >= vm_map_max(kernel_map))
+ addr = vm_map_max(kernel_map);
+ while (kernel_vm_end < addr) {
+ l0 = pmap_l0(kernel_pmap, kernel_vm_end);
+ KASSERT(pmap_load(l0) != 0,
+ ("pmap_growkernel: No level 0 kernel entry"));
+
+ l1 = pmap_l0_to_l1(l0, kernel_vm_end);
+ if (pmap_load(l1) == 0) {
+ /* We need a new PDP entry */
+ nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
+ VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ if (nkpg == NULL)
+ panic("pmap_growkernel: no memory to grow kernel");
+ if ((nkpg->flags & PG_ZERO) == 0)
+ pmap_zero_page(nkpg);
+ /* See the dmb() in _pmap_alloc_l3(). */
+ dmb(ishst);
+ paddr = VM_PAGE_TO_PHYS(nkpg);
+ pmap_store(l1, paddr | L1_TABLE);
+ continue; /* try again */
+ }
+ l2 = pmap_l1_to_l2(l1, kernel_vm_end);
+ if (pmap_load(l2) != 0) {
+ kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
+ if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
+ kernel_vm_end = vm_map_max(kernel_map);
+ break;
+ }
+ continue;
+ }
+
+ nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
+ VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
+ if (nkpg == NULL)
+ panic("pmap_growkernel: no memory to grow kernel");
+ if ((nkpg->flags & PG_ZERO) == 0)
+ pmap_zero_page(nkpg);
+ /* See the dmb() in _pmap_alloc_l3(). */
+ dmb(ishst);
+ paddr = VM_PAGE_TO_PHYS(nkpg);
+ pmap_store(l2, paddr | L2_TABLE);
+
+ kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
+ if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
+ kernel_vm_end = vm_map_max(kernel_map);
+ break;
+ }
+ }
+}
+
+/***************************************************
+ * page management routines.
+ ***************************************************/
+
+CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
+CTASSERT(_NPCM == 3);
+CTASSERT(_NPCPV == 168);
+
+static __inline struct pv_chunk *
+pv_to_chunk(pv_entry_t pv)
+{
+
+ return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
+}
+
+#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
+
+#define PC_FREE0 0xfffffffffffffffful
+#define PC_FREE1 0xfffffffffffffffful
+#define PC_FREE2 0x000000fffffffffful
+
+static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
+
+#if 0
+#ifdef PV_STATS
+static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
+ "Current number of pv entry chunks");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
+ "Current number of pv entry chunks allocated");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
+ "Current number of pv entry chunks frees");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
+ "Number of times tried to get a chunk page but failed.");
+
+static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
+static int pv_entry_spare;
+
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
+ "Current number of pv entry frees");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
+ "Current number of pv entry allocs");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
+ "Current number of pv entries");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
+ "Current number of spare pv entries");
+#endif
+#endif /* 0 */
+
+/*
+ * We are in a serious low memory condition. Resort to
+ * drastic measures to free some pages so we can allocate
+ * another pv entry chunk.
+ *
+ * Returns NULL if PV entries were reclaimed from the specified pmap.
+ *
+ * We do not, however, unmap 2mpages because subsequent accesses will
+ * allocate per-page pv entries until repromotion occurs, thereby
+ * exacerbating the shortage of free pv entries.
+ */
+static vm_page_t
+reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
+{
+ struct pv_chunk *pc, *pc_marker, *pc_marker_end;
+ struct pv_chunk_header pc_marker_b, pc_marker_end_b;
+ struct md_page *pvh;
+ pd_entry_t *pde;
+ pmap_t next_pmap, pmap;
+ pt_entry_t *pte, tpte;
+ pv_entry_t pv;
+ vm_offset_t va;
+ vm_page_t m, m_pc;
+ struct spglist free;
+ uint64_t inuse;
+ int bit, field, freed, lvl;
+ static int active_reclaims = 0;
+
+ PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
+ KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
+
+ pmap = NULL;
+ m_pc = NULL;
+ SLIST_INIT(&free);
+ bzero(&pc_marker_b, sizeof(pc_marker_b));
+ bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
+ pc_marker = (struct pv_chunk *)&pc_marker_b;
+ pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
+
+ mtx_lock(&pv_chunks_mutex);
+ active_reclaims++;
+ TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
+ TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
+ while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
+ SLIST_EMPTY(&free)) {
+ next_pmap = pc->pc_pmap;
+ if (next_pmap == NULL) {
+ /*
+ * The next chunk is a marker. However, it is
+ * not our marker, so active_reclaims must be
+ * > 1. Consequently, the next_chunk code
+ * will not rotate the pv_chunks list.
+ */
+ goto next_chunk;
+ }
+ mtx_unlock(&pv_chunks_mutex);
+
+ /*
+ * A pv_chunk can only be removed from the pc_lru list
+ * when both pv_chunks_mutex is owned and the
+ * corresponding pmap is locked.
+ */
+ if (pmap != next_pmap) {
+ if (pmap != NULL && pmap != locked_pmap)
+ PMAP_UNLOCK(pmap);
+ pmap = next_pmap;
+ /* Avoid deadlock and lock recursion. */
+ if (pmap > locked_pmap) {
+ RELEASE_PV_LIST_LOCK(lockp);
+ PMAP_LOCK(pmap);
+ mtx_lock(&pv_chunks_mutex);
+ continue;
+ } else if (pmap != locked_pmap) {
+ if (PMAP_TRYLOCK(pmap)) {
+ mtx_lock(&pv_chunks_mutex);
+ continue;
+ } else {
+ pmap = NULL; /* pmap is not locked */
+ mtx_lock(&pv_chunks_mutex);
+ pc = TAILQ_NEXT(pc_marker, pc_lru);
+ if (pc == NULL ||
+ pc->pc_pmap != next_pmap)
+ continue;
+ goto next_chunk;
+ }
+ }
+ }
+
+ /*
+ * Destroy every non-wired, 4 KB page mapping in the chunk.
+ */
+ freed = 0;
+ for (field = 0; field < _NPCM; field++) {
+ for (inuse = ~pc->pc_map[field] & pc_freemask[field];
+ inuse != 0; inuse &= ~(1UL << bit)) {
+ bit = ffsl(inuse) - 1;
+ pv = &pc->pc_pventry[field * 64 + bit];
+ va = pv->pv_va;
+ pde = pmap_pde(pmap, va, &lvl);
+ if (lvl != 2)
+ continue;
+ pte = pmap_l2_to_l3(pde, va);
+ tpte = pmap_load(pte);
+ if ((tpte & ATTR_SW_WIRED) != 0)
+ continue;
+ tpte = pmap_load_clear(pte);
+ m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
+ if (pmap_pte_dirty(pmap, tpte))
+ vm_page_dirty(m);
+ if ((tpte & ATTR_AF) != 0) {
+ pmap_invalidate_page(pmap, va);
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ }
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list)) {
+ vm_page_aflag_clear(m,
+ PGA_WRITEABLE);
+ }
+ }
+ pc->pc_map[field] |= 1UL << bit;
+ pmap_unuse_pt(pmap, va, pmap_load(pde), &free);
+ freed++;
+ }
+ }
+ if (freed == 0) {
+ mtx_lock(&pv_chunks_mutex);
+ goto next_chunk;
+ }
+ /* Every freed mapping is for a 4 KB page. */
+ pmap_resident_count_dec(pmap, freed);
+ PV_STAT(atomic_add_long(&pv_entry_frees, freed));
+ PV_STAT(atomic_add_int(&pv_entry_spare, freed));
+ PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
+ pc->pc_map[2] == PC_FREE2) {
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
+ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
+ /* Entire chunk is free; return it. */
+ m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+ dump_drop_page(m_pc->phys_addr);
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+ break;
+ }
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ mtx_lock(&pv_chunks_mutex);
+ /* One freed pv entry in locked_pmap is sufficient. */
+ if (pmap == locked_pmap)
+ break;
+
+next_chunk:
+ TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
+ TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
+ if (active_reclaims == 1 && pmap != NULL) {
+ /*
+ * Rotate the pv chunks list so that we do not
+ * scan the same pv chunks that could not be
+ * freed (because they contained a wired
+ * and/or superpage mapping) on every
+ * invocation of reclaim_pv_chunk().
+ */
+ while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
+ MPASS(pc->pc_pmap != NULL);
+ TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+ TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
+ }
+ }
+ }
+ TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
+ TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
+ active_reclaims--;
+ mtx_unlock(&pv_chunks_mutex);
+ if (pmap != NULL && pmap != locked_pmap)
+ PMAP_UNLOCK(pmap);
+ if (m_pc == NULL && !SLIST_EMPTY(&free)) {
+ m_pc = SLIST_FIRST(&free);
+ SLIST_REMOVE_HEAD(&free, plinks.s.ss);
+ /* Recycle a freed page table page. */
+ m_pc->ref_count = 1;
+ }
+ vm_page_free_pages_toq(&free, true);
+ return (m_pc);
+}
+
+/*
+ * free the pv_entry back to the free list
+ */
+static void
+free_pv_entry(pmap_t pmap, pv_entry_t pv)
+{
+ struct pv_chunk *pc;
+ int idx, field, bit;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PV_STAT(atomic_add_long(&pv_entry_frees, 1));
+ PV_STAT(atomic_add_int(&pv_entry_spare, 1));
+ PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
+ pc = pv_to_chunk(pv);
+ idx = pv - &pc->pc_pventry[0];
+ field = idx / 64;
+ bit = idx % 64;
+ pc->pc_map[field] |= 1ul << bit;
+ if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
+ pc->pc_map[2] != PC_FREE2) {
+ /* 98% of the time, pc is already at the head of the list. */
+ if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ }
+ return;
+ }
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ free_pv_chunk(pc);
+}
+
+static void
+free_pv_chunk(struct pv_chunk *pc)
+{
+ vm_page_t m;
+
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+ mtx_unlock(&pv_chunks_mutex);
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
+ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
+ /* entire chunk is free, return it */
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+ dump_drop_page(m->phys_addr);
+ vm_page_unwire_noq(m);
+ vm_page_free(m);
+}
+
+/*
+ * Returns a new PV entry, allocating a new PV chunk from the system when
+ * needed. If this PV chunk allocation fails and a PV list lock pointer was
+ * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
+ * returned.
+ *
+ * The given PV list lock may be released.
+ */
+static pv_entry_t
+get_pv_entry(pmap_t pmap, struct rwlock **lockp)
+{
+ int bit, field;
+ pv_entry_t pv;
+ struct pv_chunk *pc;
+ vm_page_t m;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
+retry:
+ pc = TAILQ_FIRST(&pmap->pm_pvchunk);
+ if (pc != NULL) {
+ for (field = 0; field < _NPCM; field++) {
+ if (pc->pc_map[field]) {
+ bit = ffsl(pc->pc_map[field]) - 1;
+ break;
+ }
+ }
+ if (field < _NPCM) {
+ pv = &pc->pc_pventry[field * 64 + bit];
+ pc->pc_map[field] &= ~(1ul << bit);
+ /* If this was the last item, move it to tail */
+ if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
+ pc->pc_map[2] == 0) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
+ pc_list);
+ }
+ PV_STAT(atomic_add_long(&pv_entry_count, 1));
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
+ return (pv);
+ }
+ }
+ /* No free items, allocate another chunk */
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED);
+ if (m == NULL) {
+ if (lockp == NULL) {
+ PV_STAT(pc_chunk_tryfail++);
+ return (NULL);
+ }
+ m = reclaim_pv_chunk(pmap, lockp);
+ if (m == NULL)
+ goto retry;
+ }
+ PV_STAT(atomic_add_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
+ dump_add_page(m->phys_addr);
+ pc = (void *)PHYS_TO_DMAP(m->phys_addr);
+ pc->pc_pmap = pmap;
+ pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
+ pc->pc_map[1] = PC_FREE1;
+ pc->pc_map[2] = PC_FREE2;
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
+ mtx_unlock(&pv_chunks_mutex);
+ pv = &pc->pc_pventry[0];
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ PV_STAT(atomic_add_long(&pv_entry_count, 1));
+ PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
+ return (pv);
+}
+
+/*
+ * Ensure that the number of spare PV entries in the specified pmap meets or
+ * exceeds the given count, "needed".
+ *
+ * The given PV list lock may be released.
+ */
+static void
+reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
+{
+ struct pch new_tail;
+ struct pv_chunk *pc;
+ vm_page_t m;
+ int avail, free;
+ bool reclaimed;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
+
+ /*
+ * Newly allocated PV chunks must be stored in a private list until
+ * the required number of PV chunks have been allocated. Otherwise,
+ * reclaim_pv_chunk() could recycle one of these chunks. In
+ * contrast, these chunks must be added to the pmap upon allocation.
+ */
+ TAILQ_INIT(&new_tail);
+retry:
+ avail = 0;
+ TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
+ bit_count((bitstr_t *)pc->pc_map, 0,
+ sizeof(pc->pc_map) * NBBY, &free);
+ if (free == 0)
+ break;
+ avail += free;
+ if (avail >= needed)
+ break;
+ }
+ for (reclaimed = false; avail < needed; avail += _NPCPV) {
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED);
+ if (m == NULL) {
+ m = reclaim_pv_chunk(pmap, lockp);
+ if (m == NULL)
+ goto retry;
+ reclaimed = true;
+ }
+ PV_STAT(atomic_add_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
+ dump_add_page(m->phys_addr);
+ pc = (void *)PHYS_TO_DMAP(m->phys_addr);
+ pc->pc_pmap = pmap;
+ pc->pc_map[0] = PC_FREE0;
+ pc->pc_map[1] = PC_FREE1;
+ pc->pc_map[2] = PC_FREE2;
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
+ PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
+
+ /*
+ * The reclaim might have freed a chunk from the current pmap.
+ * If that chunk contained available entries, we need to
+ * re-count the number of available entries.
+ */
+ if (reclaimed)
+ goto retry;
+ }
+ if (!TAILQ_EMPTY(&new_tail)) {
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
+ mtx_unlock(&pv_chunks_mutex);
+ }
+}
+
+/*
+ * First find and then remove the pv entry for the specified pmap and virtual
+ * address from the specified pv list. Returns the pv entry if found and NULL
+ * otherwise. This operation can be performed on pv lists for either 4KB or
+ * 2MB page mappings.
+ */
+static __inline pv_entry_t
+pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ break;
+ }
+ }
+ return (pv);
+}
+
+/*
+ * After demotion from a 2MB page mapping to 512 4KB page mappings,
+ * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
+ * entries for each of the 4KB page mappings.
+ */
+static void
+pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ struct pv_chunk *pc;
+ pv_entry_t pv;
+ vm_offset_t va_last;
+ vm_page_t m;
+ int bit, field;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((va & L2_OFFSET) == 0,
+ ("pmap_pv_demote_l2: va is not 2mpage aligned"));
+ KASSERT((pa & L2_OFFSET) == 0,
+ ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
+
+ /*
+ * Transfer the 2mpage's pv entry for this mapping to the first
+ * page's pv list. Once this transfer begins, the pv list lock
+ * must not be released until the last pv entry is reinstantiated.
+ */
+ pvh = pa_to_pvh(pa);
+ pv = pmap_pvh_remove(pvh, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
+ m = PHYS_TO_VM_PAGE(pa);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ /* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
+ PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
+ va_last = va + L2_SIZE - PAGE_SIZE;
+ for (;;) {
+ pc = TAILQ_FIRST(&pmap->pm_pvchunk);
+ KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
+ pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
+ for (field = 0; field < _NPCM; field++) {
+ while (pc->pc_map[field]) {
+ bit = ffsl(pc->pc_map[field]) - 1;
+ pc->pc_map[field] &= ~(1ul << bit);
+ pv = &pc->pc_pventry[field * 64 + bit];
+ va += PAGE_SIZE;
+ pv->pv_va = va;
+ m++;
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_pv_demote_l2: page %p is not managed", m));
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ if (va == va_last)
+ goto out;
+ }
+ }
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
+ }
+out:
+ if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
+ }
+ PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
+}
+
+/*
+ * First find and then destroy the pv entry for the specified pmap and virtual
+ * address. This operation can be performed on pv lists for either 4KB or 2MB
+ * page mappings.
+ */
+static void
+pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ pv = pmap_pvh_remove(pvh, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
+ free_pv_entry(pmap, pv);
+}
+
+/*
+ * Conditionally create the PV entry for a 4KB page mapping if the required
+ * memory can be allocated without resorting to reclamation.
+ */
+static boolean_t
+pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ struct rwlock **lockp)
+{
+ pv_entry_t pv;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ /* Pass NULL instead of the lock pointer to disable reclamation. */
+ if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
+ pv->pv_va = va;
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ return (TRUE);
+ } else
+ return (FALSE);
+}
+
+/*
+ * Create the PV entry for a 2MB page mapping. Always returns true unless the
+ * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
+ * false if the PV entry cannot be allocated without resorting to reclamation.
+ */
+static bool
+pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
+ struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ vm_paddr_t pa;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ /* Pass NULL instead of the lock pointer to disable reclamation. */
+ if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
+ NULL : lockp)) == NULL)
+ return (false);
+ pv->pv_va = va;
+ pa = l2e & ~ATTR_MASK;
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
+ pvh = pa_to_pvh(pa);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ return (true);
+}
+
+static void
+pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
+{
+ pt_entry_t newl2, oldl2;
+ vm_page_t ml3;
+ vm_paddr_t ml3pa;
+
+ KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
+ KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ ml3 = pmap_remove_pt_page(pmap, va);
+ if (ml3 == NULL)
+ panic("pmap_remove_kernel_l2: Missing pt page");
+
+ ml3pa = VM_PAGE_TO_PHYS(ml3);
+ newl2 = ml3pa | L2_TABLE;
+
+ /*
+ * If this page table page was unmapped by a promotion, then it
+ * contains valid mappings. Zero it to invalidate those mappings.
+ */
+ if (ml3->valid != 0)
+ pagezero((void *)PHYS_TO_DMAP(ml3pa));
+
+ /*
+ * Demote the mapping. The caller must have already invalidated the
+ * mapping (i.e., the "break" in break-before-make).
+ */
+ oldl2 = pmap_load_store(l2, newl2);
+ KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
+ __func__, l2, oldl2));
+}
+
+/*
+ * pmap_remove_l2: Do the things to unmap a level 2 superpage.
+ */
+static int
+pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
+ pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ pt_entry_t old_l2;
+ vm_offset_t eva, va;
+ vm_page_t m, ml3;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
+ old_l2 = pmap_load_clear(l2);
+ KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
+ ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
+
+ /*
+ * Since a promotion must break the 4KB page mappings before making
+ * the 2MB page mapping, a pmap_invalidate_page() suffices.
+ */
+ pmap_invalidate_page(pmap, sva);
+
+ if (old_l2 & ATTR_SW_WIRED)
+ pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
+ pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
+ if (old_l2 & ATTR_SW_MANAGED) {
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK);
+ pvh = pa_to_pvh(old_l2 & ~ATTR_MASK);
+ pmap_pvh_free(pvh, pmap, sva);
+ eva = sva + L2_SIZE;
+ for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
+ va < eva; va += PAGE_SIZE, m++) {
+ if (pmap_pte_dirty(pmap, old_l2))
+ vm_page_dirty(m);
+ if (old_l2 & ATTR_AF)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ }
+ }
+ if (pmap == kernel_pmap) {
+ pmap_remove_kernel_l2(pmap, l2, sva);
+ } else {
+ ml3 = pmap_remove_pt_page(pmap, sva);
+ if (ml3 != NULL) {
+ KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
+ ("pmap_remove_l2: l3 page not promoted"));
+ pmap_resident_count_dec(pmap, 1);
+ KASSERT(ml3->ref_count == NL3PG,
+ ("pmap_remove_l2: l3 page ref count error"));
+ ml3->ref_count = 0;
+ pmap_add_delayed_free_list(ml3, free, FALSE);
+ }
+ }
+ return (pmap_unuse_pt(pmap, sva, l1e, free));
+}
+
+/*
+ * pmap_remove_l3: do the things to unmap a page in a process
+ */
+static int
+pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
+ pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ pt_entry_t old_l3;
+ vm_page_t m;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ old_l3 = pmap_load_clear(l3);
+ pmap_invalidate_page(pmap, va);
+ if (old_l3 & ATTR_SW_WIRED)
+ pmap->pm_stats.wired_count -= 1;
+ pmap_resident_count_dec(pmap, 1);
+ if (old_l3 & ATTR_SW_MANAGED) {
+ m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
+ if (pmap_pte_dirty(pmap, old_l3))
+ vm_page_dirty(m);
+ if (old_l3 & ATTR_AF)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
+ pmap_pvh_free(&m->md, pmap, va);
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ }
+ }
+ return (pmap_unuse_pt(pmap, va, l2e, free));
+}
+
+/*
+ * Remove the specified range of addresses from the L3 page table that is
+ * identified by the given L2 entry.
+ */
+static void
+pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
+ vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ struct rwlock *new_lock;
+ pt_entry_t *l3, old_l3;
+ vm_offset_t va;
+ vm_page_t l3pg, m;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
+ ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
+ l3pg = sva < VM_MAXUSER_ADDRESS ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) :
+ NULL;
+ va = eva;
+ for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
+ if (!pmap_l3_valid(pmap_load(l3))) {
+ if (va != eva) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = eva;
+ }
+ continue;
+ }
+ old_l3 = pmap_load_clear(l3);
+ if ((old_l3 & ATTR_SW_WIRED) != 0)
+ pmap->pm_stats.wired_count--;
+ pmap_resident_count_dec(pmap, 1);
+ if ((old_l3 & ATTR_SW_MANAGED) != 0) {
+ m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
+ if (pmap_pte_dirty(pmap, old_l3))
+ vm_page_dirty(m);
+ if ((old_l3 & ATTR_AF) != 0)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m));
+ if (new_lock != *lockp) {
+ if (*lockp != NULL) {
+ /*
+ * Pending TLB invalidations must be
+ * performed before the PV list lock is
+ * released. Otherwise, a concurrent
+ * pmap_remove_all() on a physical page
+ * could return while a stale TLB entry
+ * still provides access to that page.
+ */
+ if (va != eva) {
+ pmap_invalidate_range(pmap, va,
+ sva);
+ va = eva;
+ }
+ rw_wunlock(*lockp);
+ }
+ *lockp = new_lock;
+ rw_wlock(*lockp);
+ }
+ pmap_pvh_free(&m->md, pmap, sva);
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ }
+ }
+ if (va == eva)
+ va = sva;
+ if (l3pg != NULL && pmap_unwire_l3(pmap, sva, l3pg, free)) {
+ sva += L3_SIZE;
+ break;
+ }
+ }
+ if (va != eva)
+ pmap_invalidate_range(pmap, va, sva);
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ struct rwlock *lock;
+ vm_offset_t va_next;
+ pd_entry_t *l0, *l1, *l2;
+ pt_entry_t l3_paddr;
+ struct spglist free;
+
+ /*
+ * Perform an unsynchronized read. This is, however, safe.
+ */
+ if (pmap->pm_stats.resident_count == 0)
+ return;
+
+ SLIST_INIT(&free);
+
+ PMAP_LOCK(pmap);
+
+ lock = NULL;
+ for (; sva < eva; sva = va_next) {
+ if (pmap->pm_stats.resident_count == 0)
+ break;
+
+ l0 = pmap_l0(pmap, sva);
+ if (pmap_load(l0) == 0) {
+ va_next = (sva + L0_SIZE) & ~L0_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ l1 = pmap_l0_to_l1(l0, sva);
+ if (pmap_load(l1) == 0) {
+ va_next = (sva + L1_SIZE) & ~L1_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ /*
+ * Calculate index for next page table.
+ */
+ va_next = (sva + L2_SIZE) & ~L2_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+
+ l2 = pmap_l1_to_l2(l1, sva);
+ if (l2 == NULL)
+ continue;
+
+ l3_paddr = pmap_load(l2);
+
+ if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
+ if (sva + L2_SIZE == va_next && eva >= va_next) {
+ pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
+ &free, &lock);
+ continue;
+ } else if (pmap_demote_l2_locked(pmap, l2, sva,
+ &lock) == NULL)
+ continue;
+ l3_paddr = pmap_load(l2);
+ }
+
+ /*
+ * Weed out invalid mappings.
+ */
+ if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
+ continue;
+
+ /*
+ * Limit our scan to either the end of the va represented
+ * by the current page table page, or to the end of the
+ * range being removed.
+ */
+ if (va_next > eva)
+ va_next = eva;
+
+ pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
+ &lock);
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+ vm_page_free_pages_toq(&free, true);
+}
+
+/*
+ * Routine: pmap_remove_all
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ *
+ * Notes:
+ * Original versions of this routine were very
+ * inefficient because they iteratively called
+ * pmap_remove (slow...)
+ */
+
+void
+pmap_remove_all(vm_page_t m)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ pmap_t pmap;
+ struct rwlock *lock;
+ pd_entry_t *pde, tpde;
+ pt_entry_t *pte, tpte;
+ vm_offset_t va;
+ struct spglist free;
+ int lvl, pvh_gen, md_gen;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_remove_all: page %p is not managed", m));
+ SLIST_INIT(&free);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
+ pa_to_pvh(VM_PAGE_TO_PHYS(m));
+retry:
+ rw_wlock(lock);
+ while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ va = pv->pv_va;
+ pte = pmap_pte(pmap, va, &lvl);
+ KASSERT(pte != NULL,
+ ("pmap_remove_all: no page table entry found"));
+ KASSERT(lvl == 2,
+ ("pmap_remove_all: invalid pte level %d", lvl));
+
+ pmap_demote_l2_locked(pmap, pte, va, &lock);
+ PMAP_UNLOCK(pmap);
+ }
+ while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+ pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ md_gen = m->md.pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ pmap_resident_count_dec(pmap, 1);
+
+ pde = pmap_pde(pmap, pv->pv_va, &lvl);
+ KASSERT(pde != NULL,
+ ("pmap_remove_all: no page directory entry found"));
+ KASSERT(lvl == 2,
+ ("pmap_remove_all: invalid pde level %d", lvl));
+ tpde = pmap_load(pde);
+
+ pte = pmap_l2_to_l3(pde, pv->pv_va);
+ tpte = pmap_load_clear(pte);
+ if (tpte & ATTR_SW_WIRED)
+ pmap->pm_stats.wired_count--;
+ if ((tpte & ATTR_AF) != 0) {
+ pmap_invalidate_page(pmap, pv->pv_va);
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ }
+
+ /*
+ * Update the vm_page_t clean and reference bits.
+ */
+ if (pmap_pte_dirty(pmap, tpte))
+ vm_page_dirty(m);
+ pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ free_pv_entry(pmap, pv);
+ PMAP_UNLOCK(pmap);
+ }
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ rw_wunlock(lock);
+ vm_page_free_pages_toq(&free, true);
+}
+
+/*
+ * pmap_protect_l2: do the things to protect a 2MB page in a pmap
+ */
+static void
+pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
+ pt_entry_t nbits)
+{
+ pd_entry_t old_l2;
+ vm_page_t m, mt;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
+ KASSERT((sva & L2_OFFSET) == 0,
+ ("pmap_protect_l2: sva is not 2mpage aligned"));
+ old_l2 = pmap_load(l2);
+ KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
+ ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2));
+
+ /*
+ * Return if the L2 entry already has the desired access restrictions
+ * in place.
+ */
+retry:
+ if ((old_l2 & mask) == nbits)
+ return;
+
+ /*
+ * When a dirty read/write superpage mapping is write protected,
+ * update the dirty field of each of the superpage's constituent 4KB
+ * pages.
+ */
+ if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
+ (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
+ pmap_pte_dirty(pmap, old_l2)) {
+ m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
+ for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
+ vm_page_dirty(mt);
+ }
+
+ if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
+ goto retry;
+
+ /*
+ * Since a promotion must break the 4KB page mappings before making
+ * the 2MB page mapping, a pmap_invalidate_page() suffices.
+ */
+ pmap_invalidate_page(pmap, sva);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ vm_offset_t va, va_next;
+ pd_entry_t *l0, *l1, *l2;
+ pt_entry_t *l3p, l3, mask, nbits;
+
+ PMAP_ASSERT_STAGE1(pmap);
+ KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
+ if (prot == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+
+ mask = nbits = 0;
+ if ((prot & VM_PROT_WRITE) == 0) {
+ mask |= ATTR_S1_AP_RW_BIT | ATTR_SW_DBM;
+ nbits |= ATTR_S1_AP(ATTR_S1_AP_RO);
+ }
+ if ((prot & VM_PROT_EXECUTE) == 0) {
+ mask |= ATTR_S1_XN;
+ nbits |= ATTR_S1_XN;
+ }
+ if (mask == 0)
+ return;
+
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = va_next) {
+ l0 = pmap_l0(pmap, sva);
+ if (pmap_load(l0) == 0) {
+ va_next = (sva + L0_SIZE) & ~L0_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ l1 = pmap_l0_to_l1(l0, sva);
+ if (pmap_load(l1) == 0) {
+ va_next = (sva + L1_SIZE) & ~L1_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ va_next = (sva + L2_SIZE) & ~L2_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+
+ l2 = pmap_l1_to_l2(l1, sva);
+ if (pmap_load(l2) == 0)
+ continue;
+
+ if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
+ if (sva + L2_SIZE == va_next && eva >= va_next) {
+ pmap_protect_l2(pmap, l2, sva, mask, nbits);
+ continue;
+ } else if (pmap_demote_l2(pmap, l2, sva) == NULL)
+ continue;
+ }
+ KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
+ ("pmap_protect: Invalid L2 entry after demotion"));
+
+ if (va_next > eva)
+ va_next = eva;
+
+ va = va_next;
+ for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
+ sva += L3_SIZE) {
+ l3 = pmap_load(l3p);
+retry:
+ /*
+ * Go to the next L3 entry if the current one is
+ * invalid or already has the desired access
+ * restrictions in place. (The latter case occurs
+ * frequently. For example, in a "buildworld"
+ * workload, almost 1 out of 4 L3 entries already
+ * have the desired restrictions.)
+ */
+ if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) {
+ if (va != va_next) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = va_next;
+ }
+ continue;
+ }
+
+ /*
+ * When a dirty read/write mapping is write protected,
+ * update the page's dirty field.
+ */
+ if ((l3 & ATTR_SW_MANAGED) != 0 &&
+ (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
+ pmap_pte_dirty(pmap, l3))
+ vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
+
+ if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits))
+ goto retry;
+ if (va == va_next)
+ va = sva;
+ }
+ if (va != va_next)
+ pmap_invalidate_range(pmap, va, sva);
+ }
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * Inserts the specified page table page into the specified pmap's collection
+ * of idle page table pages. Each of a pmap's page table pages is responsible
+ * for mapping a distinct range of virtual addresses. The pmap's collection is
+ * ordered by this virtual address range.
+ *
+ * If "promoted" is false, then the page table page "mpte" must be zero filled.
+ */
+static __inline int
+pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
+ return (vm_radix_insert(&pmap->pm_root, mpte));
+}
+
+/*
+ * Removes the page table page mapping the specified virtual address from the
+ * specified pmap's collection of idle page table pages, and returns it.
+ * Otherwise, returns NULL if there is no page table page corresponding to the
+ * specified virtual address.
+ */
+static __inline vm_page_t
+pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
+}
+
+/*
+ * Performs a break-before-make update of a pmap entry. This is needed when
+ * either promoting or demoting pages to ensure the TLB doesn't get into an
+ * inconsistent state.
+ */
+static void
+pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
+ vm_offset_t va, vm_size_t size)
+{
+ register_t intr;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ /*
+ * Ensure we don't get switched out with the page table in an
+ * inconsistent state. We also need to ensure no interrupts fire
+ * as they may make use of an address we are about to invalidate.
+ */
+ intr = intr_disable();
+
+ /*
+ * Clear the old mapping's valid bit, but leave the rest of the entry
+ * unchanged, so that a lockless, concurrent pmap_kextract() can still
+ * lookup the physical address.
+ */
+ pmap_clear_bits(pte, ATTR_DESCR_VALID);
+ pmap_invalidate_range(pmap, va, va + size);
+
+ /* Create the new mapping */
+ pmap_store(pte, newpte);
+ dsb(ishst);
+
+ intr_restore(intr);
+}
+
+#if VM_NRESERVLEVEL > 0
+/*
+ * After promotion from 512 4KB page mappings to a single 2MB page mapping,
+ * replace the many pv entries for the 4KB page mappings by a single pv entry
+ * for the 2MB page mapping.
+ */
+static void
+pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ vm_offset_t va_last;
+ vm_page_t m;
+
+ KASSERT((pa & L2_OFFSET) == 0,
+ ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
+
+ /*
+ * Transfer the first page's pv entry for this mapping to the 2mpage's
+ * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
+ * a transfer avoids the possibility that get_pv_entry() calls
+ * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
+ * mappings that is being promoted.
+ */
+ m = PHYS_TO_VM_PAGE(pa);
+ va = va & ~L2_OFFSET;
+ pv = pmap_pvh_remove(&m->md, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
+ pvh = pa_to_pvh(pa);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ /* Free the remaining NPTEPG - 1 pv entries. */
+ va_last = va + L2_SIZE - PAGE_SIZE;
+ do {
+ m++;
+ va += PAGE_SIZE;
+ pmap_pvh_free(&m->md, pmap, va);
+ } while (va < va_last);
+}
+
+/*
+ * Tries to promote the 512, contiguous 4KB page mappings that are within a
+ * single level 2 table entry to a single 2MB page mapping. For promotion
+ * to occur, two conditions must be met: (1) the 4KB page mappings must map
+ * aligned, contiguous physical memory and (2) the 4KB page mappings must have
+ * identical characteristics.
+ */
+static void
+pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
+ struct rwlock **lockp)
+{
+ pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
+ vm_page_t mpte;
+ vm_offset_t sva;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
+
+ sva = va & ~L2_OFFSET;
+ firstl3 = pmap_l2_to_l3(l2, sva);
+ newl2 = pmap_load(firstl3);
+
+setl2:
+ if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) {
+ atomic_add_long(&pmap_l2_p_failures, 1);
+ CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return;
+ }
+
+ if ((newl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
+ (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
+ if (!atomic_fcmpset_64(l2, &newl2, newl2 & ~ATTR_SW_DBM))
+ goto setl2;
+ newl2 &= ~ATTR_SW_DBM;
+ }
+
+ pa = newl2 + L2_SIZE - PAGE_SIZE;
+ for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
+ oldl3 = pmap_load(l3);
+setl3:
+ if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
+ (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
+ if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
+ ~ATTR_SW_DBM))
+ goto setl3;
+ oldl3 &= ~ATTR_SW_DBM;
+ }
+ if (oldl3 != pa) {
+ atomic_add_long(&pmap_l2_p_failures, 1);
+ CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return;
+ }
+ pa -= PAGE_SIZE;
+ }
+
+ /*
+ * Save the page table page in its current state until the L2
+ * mapping the superpage is demoted by pmap_demote_l2() or
+ * destroyed by pmap_remove_l3().
+ */
+ mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
+ KASSERT(mpte >= vm_page_array &&
+ mpte < &vm_page_array[vm_page_array_size],
+ ("pmap_promote_l2: page table page is out of range"));
+ KASSERT(mpte->pindex == pmap_l2_pindex(va),
+ ("pmap_promote_l2: page table page's pindex is wrong"));
+ if (pmap_insert_pt_page(pmap, mpte, true)) {
+ atomic_add_long(&pmap_l2_p_failures, 1);
+ CTR2(KTR_PMAP,
+ "pmap_promote_l2: failure for va %#lx in pmap %p", va,
+ pmap);
+ return;
+ }
+
+ if ((newl2 & ATTR_SW_MANAGED) != 0)
+ pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
+
+ newl2 &= ~ATTR_DESCR_MASK;
+ newl2 |= L2_BLOCK;
+
+ pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
+
+ atomic_add_long(&pmap_l2_promotions, 1);
+ CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
+ pmap);
+}
+#endif /* VM_NRESERVLEVEL > 0 */
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind)
+{
+ struct rwlock *lock;
+ pd_entry_t *pde;
+ pt_entry_t new_l3, orig_l3;
+ pt_entry_t *l2, *l3;
+ pv_entry_t pv;
+ vm_paddr_t opa, pa;
+ vm_page_t mpte, om;
+ boolean_t nosleep;
+ int lvl, rv;
+
+ va = trunc_page(va);
+ if ((m->oflags & VPO_UNMANAGED) == 0)
+ VM_PAGE_OBJECT_BUSY_ASSERT(m);
+ pa = VM_PAGE_TO_PHYS(m);
+ new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | L3_PAGE);
+ new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
+ new_l3 |= pmap_pte_prot(pmap, prot);
+
+ if ((flags & PMAP_ENTER_WIRED) != 0)
+ new_l3 |= ATTR_SW_WIRED;
+ if (pmap->pm_stage == PM_STAGE1) {
+ if (va < VM_MAXUSER_ADDRESS)
+ new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
+ else
+ new_l3 |= ATTR_S1_UXN;
+ if (pmap != kernel_pmap)
+ new_l3 |= ATTR_S1_nG;
+ } else {
+ /*
+ * Clear the access flag on executable mappings, this will be
+ * set later when the page is accessed. The fault handler is
+ * required to invalidate the I-cache.
+ *
+ * TODO: Switch to the valid flag to allow hardware management
+ * of the access flag. Much of the pmap code assumes the
+ * valid flag is set and fails to destroy the old page tables
+ * correctly if it is clear.
+ */
+ if (prot & VM_PROT_EXECUTE)
+ new_l3 &= ~ATTR_AF;
+ }
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ new_l3 |= ATTR_SW_MANAGED;
+ if ((prot & VM_PROT_WRITE) != 0) {
+ new_l3 |= ATTR_SW_DBM;
+ if ((flags & VM_PROT_WRITE) == 0) {
+ if (pmap->pm_stage == PM_STAGE1)
+ new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
+ else
+ new_l3 &=
+ ~ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
+ }
+ }
+ }
+
+ CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
+
+ lock = NULL;
+ PMAP_LOCK(pmap);
+ if (psind == 1) {
+ /* Assert the required virtual and physical alignment. */
+ KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
+ KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
+ rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
+ flags, m, &lock);
+ goto out;
+ }
+ mpte = NULL;
+
+ /*
+ * In the case that a page table page is not
+ * resident, we are creating it here.
+ */
+retry:
+ pde = pmap_pde(pmap, va, &lvl);
+ if (pde != NULL && lvl == 2) {
+ l3 = pmap_l2_to_l3(pde, va);
+ if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
+ mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
+ mpte->ref_count++;
+ }
+ goto havel3;
+ } else if (pde != NULL && lvl == 1) {
+ l2 = pmap_l1_to_l2(pde, va);
+ if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
+ (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
+ l3 = &l3[pmap_l3_index(va)];
+ if (va < VM_MAXUSER_ADDRESS) {
+ mpte = PHYS_TO_VM_PAGE(
+ pmap_load(l2) & ~ATTR_MASK);
+ mpte->ref_count++;
+ }
+ goto havel3;
+ }
+ /* We need to allocate an L3 table. */
+ }
+ if (va < VM_MAXUSER_ADDRESS) {
+ nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
+
+ /*
+ * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order
+ * to handle the possibility that a superpage mapping for "va"
+ * was created while we slept.
+ */
+ mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va),
+ nosleep ? NULL : &lock);
+ if (mpte == NULL && nosleep) {
+ CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
+ rv = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ goto retry;
+ } else
+ panic("pmap_enter: missing L3 table for kernel va %#lx", va);
+
+havel3:
+ orig_l3 = pmap_load(l3);
+ opa = orig_l3 & ~ATTR_MASK;
+ pv = NULL;
+
+ /*
+ * Is the specified virtual address already mapped?
+ */
+ if (pmap_l3_valid(orig_l3)) {
+ /*
+ * Only allow adding new entries on stage 2 tables for now.
+ * This simplifies cache invalidation as we may need to call
+ * into EL2 to perform such actions.
+ */
+ PMAP_ASSERT_STAGE1(pmap);
+ /*
+ * Wiring change, just update stats. We don't worry about
+ * wiring PT pages as they remain resident as long as there
+ * are valid mappings in them. Hence, if a user page is wired,
+ * the PT page will be also.
+ */
+ if ((flags & PMAP_ENTER_WIRED) != 0 &&
+ (orig_l3 & ATTR_SW_WIRED) == 0)
+ pmap->pm_stats.wired_count++;
+ else if ((flags & PMAP_ENTER_WIRED) == 0 &&
+ (orig_l3 & ATTR_SW_WIRED) != 0)
+ pmap->pm_stats.wired_count--;
+
+ /*
+ * Remove the extra PT page reference.
+ */
+ if (mpte != NULL) {
+ mpte->ref_count--;
+ KASSERT(mpte->ref_count > 0,
+ ("pmap_enter: missing reference to page table page,"
+ " va: 0x%lx", va));
+ }
+
+ /*
+ * Has the physical page changed?
+ */
+ if (opa == pa) {
+ /*
+ * No, might be a protection or wiring change.
+ */
+ if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
+ (new_l3 & ATTR_SW_DBM) != 0)
+ vm_page_aflag_set(m, PGA_WRITEABLE);
+ goto validate;
+ }
+
+ /*
+ * The physical page has changed. Temporarily invalidate
+ * the mapping.
+ */
+ orig_l3 = pmap_load_clear(l3);
+ KASSERT((orig_l3 & ~ATTR_MASK) == opa,
+ ("pmap_enter: unexpected pa update for %#lx", va));
+ if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
+ om = PHYS_TO_VM_PAGE(opa);
+
+ /*
+ * The pmap lock is sufficient to synchronize with
+ * concurrent calls to pmap_page_test_mappings() and
+ * pmap_ts_referenced().
+ */
+ if (pmap_pte_dirty(pmap, orig_l3))
+ vm_page_dirty(om);
+ if ((orig_l3 & ATTR_AF) != 0) {
+ pmap_invalidate_page(pmap, va);
+ vm_page_aflag_set(om, PGA_REFERENCED);
+ }
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+ pv = pmap_pvh_remove(&om->md, pmap, va);
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ free_pv_entry(pmap, pv);
+ if ((om->a.flags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&om->md.pv_list) &&
+ ((om->flags & PG_FICTITIOUS) != 0 ||
+ TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
+ } else {
+ KASSERT((orig_l3 & ATTR_AF) != 0,
+ ("pmap_enter: unmanaged mapping lacks ATTR_AF"));
+ pmap_invalidate_page(pmap, va);
+ }
+ orig_l3 = 0;
+ } else {
+ /*
+ * Increment the counters.
+ */
+ if ((new_l3 & ATTR_SW_WIRED) != 0)
+ pmap->pm_stats.wired_count++;
+ pmap_resident_count_inc(pmap, 1);
+ }
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ if (pv == NULL) {
+ pv = get_pv_entry(pmap, &lock);
+ pv->pv_va = va;
+ }
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ if ((new_l3 & ATTR_SW_DBM) != 0)
+ vm_page_aflag_set(m, PGA_WRITEABLE);
+ }
+
+validate:
+ if (pmap->pm_stage == PM_STAGE1) {
+ /*
+ * Sync icache if exec permission and attribute
+ * VM_MEMATTR_WRITE_BACK is set. Do it now, before the mapping
+ * is stored and made valid for hardware table walk. If done
+ * later, then other can access this page before caches are
+ * properly synced. Don't do it for kernel memory which is
+ * mapped with exec permission even if the memory isn't going
+ * to hold executable code. The only time when icache sync is
+ * needed is after kernel module is loaded and the relocation
+ * info is processed. And it's done in elf_cpu_load_file().
+ */
+ if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
+ m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
+ (opa != pa || (orig_l3 & ATTR_S1_XN))) {
+ PMAP_ASSERT_STAGE1(pmap);
+ cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
+ }
+ } else {
+ cpu_dcache_wb_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
+ }
+
+ /*
+ * Update the L3 entry
+ */
+ if (pmap_l3_valid(orig_l3)) {
+ PMAP_ASSERT_STAGE1(pmap);
+ KASSERT(opa == pa, ("pmap_enter: invalid update"));
+ if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) {
+ /* same PA, different attributes */
+ orig_l3 = pmap_load_store(l3, new_l3);
+ pmap_invalidate_page(pmap, va);
+ if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
+ pmap_pte_dirty(pmap, orig_l3))
+ vm_page_dirty(m);
+ } else {
+ /*
+ * orig_l3 == new_l3
+ * This can happens if multiple threads simultaneously
+ * access not yet mapped page. This bad for performance
+ * since this can cause full demotion-NOP-promotion
+ * cycle.
+ * Another possible reasons are:
+ * - VM and pmap memory layout are diverged
+ * - tlb flush is missing somewhere and CPU doesn't see
+ * actual mapping.
+ */
+ CTR4(KTR_PMAP, "%s: already mapped page - "
+ "pmap %p va 0x%#lx pte 0x%lx",
+ __func__, pmap, va, new_l3);
+ }
+ } else {
+ /* New mapping */
+ pmap_store(l3, new_l3);
+ dsb(ishst);
+ }
+
+#if VM_NRESERVLEVEL > 0
+ /*
+ * Try to promote from level 3 pages to a level 2 superpage. This
+ * currently only works on stage 1 pmaps as pmap_promote_l2 looks at
+ * stage 1 specific fields and performs a break-before-make sequence
+ * that is incorrect a stage 2 pmap.
+ */
+ if ((mpte == NULL || mpte->ref_count == NL3PG) &&
+ pmap_ps_enabled(pmap) && pmap->pm_stage == PM_STAGE1 &&
+ (m->flags & PG_FICTITIOUS) == 0 &&
+ vm_reserv_level_iffullpop(m) == 0) {
+ pmap_promote_l2(pmap, pde, va, &lock);
+ }
+#endif
+
+ rv = KERN_SUCCESS;
+out:
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+ return (rv);
+}
+
+/*
+ * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
+ * if successful. Returns false if (1) a page table page cannot be allocated
+ * without sleeping, (2) a mapping already exists at the specified virtual
+ * address, or (3) a PV entry cannot be allocated without reclaiming another
+ * PV entry.
+ */
+static bool
+pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ struct rwlock **lockp)
+{
+ pd_entry_t new_l2;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
+
+ new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
+ ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
+ L2_BLOCK);
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ new_l2 |= ATTR_SW_MANAGED;
+ new_l2 &= ~ATTR_AF;
+ }
+ if ((prot & VM_PROT_EXECUTE) == 0 ||
+ m->md.pv_memattr == VM_MEMATTR_DEVICE)
+ new_l2 |= ATTR_S1_XN;
+ if (va < VM_MAXUSER_ADDRESS)
+ new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
+ else
+ new_l2 |= ATTR_S1_UXN;
+ if (pmap != kernel_pmap)
+ new_l2 |= ATTR_S1_nG;
+ return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
+ PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
+ KERN_SUCCESS);
+}
+
+/*
+ * Returns true if every page table entry in the specified page table is
+ * zero.
+ */
+static bool
+pmap_every_pte_zero(vm_paddr_t pa)
+{
+ pt_entry_t *pt_end, *pte;
+
+ KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
+ pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
+ for (pt_end = pte + Ln_ENTRIES; pte < pt_end; pte++) {
+ if (*pte != 0)
+ return (false);
+ }
+ return (true);
+}
+
+/*
+ * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
+ * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
+ * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
+ * a mapping already exists at the specified virtual address. Returns
+ * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
+ * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
+ * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
+ *
+ * The parameter "m" is only used when creating a managed, writeable mapping.
+ */
+static int
+pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
+ vm_page_t m, struct rwlock **lockp)
+{
+ struct spglist free;
+ pd_entry_t *l2, old_l2;
+ vm_page_t l2pg, mt;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags &
+ PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
+ CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
+ va, pmap);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+
+ /*
+ * If there are existing mappings, either abort or remove them.
+ */
+ if ((old_l2 = pmap_load(l2)) != 0) {
+ KASSERT(l2pg == NULL || l2pg->ref_count > 1,
+ ("pmap_enter_l2: l2pg's ref count is too low"));
+ if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va <
+ VM_MAXUSER_ADDRESS || (old_l2 & ATTR_DESCR_MASK) ==
+ L2_BLOCK || !pmap_every_pte_zero(old_l2 & ~ATTR_MASK))) {
+ if (l2pg != NULL)
+ l2pg->ref_count--;
+ CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (KERN_FAILURE);
+ }
+ SLIST_INIT(&free);
+ if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
+ (void)pmap_remove_l2(pmap, l2, va,
+ pmap_load(pmap_l1(pmap, va)), &free, lockp);
+ else
+ pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
+ &free, lockp);
+ if (va < VM_MAXUSER_ADDRESS) {
+ vm_page_free_pages_toq(&free, true);
+ KASSERT(pmap_load(l2) == 0,
+ ("pmap_enter_l2: non-zero L2 entry %p", l2));
+ } else {
+ KASSERT(SLIST_EMPTY(&free),
+ ("pmap_enter_l2: freed kernel page table page"));
+
+ /*
+ * Both pmap_remove_l2() and pmap_remove_l3_range()
+ * will leave the kernel page table page zero filled.
+ * Nonetheless, the TLB could have an intermediate
+ * entry for the kernel page table page.
+ */
+ mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
+ if (pmap_insert_pt_page(pmap, mt, false))
+ panic("pmap_enter_l2: trie insert failed");
+ pmap_clear(l2);
+ pmap_invalidate_page(pmap, va);
+ }
+ }
+
+ if ((new_l2 & ATTR_SW_MANAGED) != 0) {
+ /*
+ * Abort this mapping if its PV entry could not be created.
+ */
+ if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
+ if (l2pg != NULL)
+ pmap_abort_ptp(pmap, va, l2pg);
+ CTR2(KTR_PMAP,
+ "pmap_enter_l2: failure for va %#lx in pmap %p",
+ va, pmap);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ if ((new_l2 & ATTR_SW_DBM) != 0)
+ for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
+ vm_page_aflag_set(mt, PGA_WRITEABLE);
+ }
+
+ /*
+ * Increment counters.
+ */
+ if ((new_l2 & ATTR_SW_WIRED) != 0)
+ pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
+ pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
+
+ /*
+ * Map the superpage.
+ */
+ pmap_store(l2, new_l2);
+ dsb(ishst);
+
+ atomic_add_long(&pmap_l2_mappings, 1);
+ CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
+ va, pmap);
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Maps a sequence of resident pages belonging to the same object.
+ * The sequence begins with the given page m_start. This page is
+ * mapped at the given virtual address start. Each subsequent page is
+ * mapped at a virtual address that is offset from start by the same
+ * amount as the page is offset from m_start within the object. The
+ * last page in the sequence is the page with the largest offset from
+ * m_start that can be mapped at a virtual address less than the given
+ * virtual address end. Not every virtual page between start and end
+ * is mapped; only those for which a resident page exists with the
+ * corresponding offset from m_start are mapped.
+ */
+void
+pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
+ vm_page_t m_start, vm_prot_t prot)
+{
+ struct rwlock *lock;
+ vm_offset_t va;
+ vm_page_t m, mpte;
+ vm_pindex_t diff, psize;
+
+ VM_OBJECT_ASSERT_LOCKED(m_start->object);
+
+ psize = atop(end - start);
+ mpte = NULL;
+ m = m_start;
+ lock = NULL;
+ PMAP_LOCK(pmap);
+ while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
+ va = start + ptoa(diff);
+ if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
+ m->psind == 1 && pmap_ps_enabled(pmap) &&
+ pmap_enter_2mpage(pmap, va, m, prot, &lock))
+ m = &m[L2_SIZE / PAGE_SIZE - 1];
+ else
+ mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
+ &lock);
+ m = TAILQ_NEXT(m, listq);
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * but is *MUCH* faster than pmap_enter...
+ */
+
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
+{
+ struct rwlock *lock;
+
+ lock = NULL;
+ PMAP_LOCK(pmap);
+ (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+}
+
+static vm_page_t
+pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
+{
+ pd_entry_t *pde;
+ pt_entry_t *l2, *l3, l3_val;
+ vm_paddr_t pa;
+ int lvl;
+
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
+ (m->oflags & VPO_UNMANAGED) != 0,
+ ("pmap_enter_quick_locked: managed mapping within the clean submap"));
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
+
+ CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
+ /*
+ * In the case that a page table page is not
+ * resident, we are creating it here.
+ */
+ if (va < VM_MAXUSER_ADDRESS) {
+ vm_pindex_t l2pindex;
+
+ /*
+ * Calculate pagetable page index
+ */
+ l2pindex = pmap_l2_pindex(va);
+ if (mpte && (mpte->pindex == l2pindex)) {
+ mpte->ref_count++;
+ } else {
+ /*
+ * Get the l2 entry
+ */
+ pde = pmap_pde(pmap, va, &lvl);
+
+ /*
+ * If the page table page is mapped, we just increment
+ * the hold count, and activate it. Otherwise, we
+ * attempt to allocate a page table page. If this
+ * attempt fails, we don't retry. Instead, we give up.
+ */
+ if (lvl == 1) {
+ l2 = pmap_l1_to_l2(pde, va);
+ if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
+ L2_BLOCK)
+ return (NULL);
+ }
+ if (lvl == 2 && pmap_load(pde) != 0) {
+ mpte =
+ PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
+ mpte->ref_count++;
+ } else {
+ /*
+ * Pass NULL instead of the PV list lock
+ * pointer, because we don't intend to sleep.
+ */
+ mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
+ if (mpte == NULL)
+ return (mpte);
+ }
+ }
+ l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
+ l3 = &l3[pmap_l3_index(va)];
+ } else {
+ mpte = NULL;
+ pde = pmap_pde(kernel_pmap, va, &lvl);
+ KASSERT(pde != NULL,
+ ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
+ va));
+ KASSERT(lvl == 2,
+ ("pmap_enter_quick_locked: Invalid level %d", lvl));
+ l3 = pmap_l2_to_l3(pde, va);
+ }
+
+ /*
+ * Abort if a mapping already exists.
+ */
+ if (pmap_load(l3) != 0) {
+ if (mpte != NULL)
+ mpte->ref_count--;
+ return (NULL);
+ }
+
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0 &&
+ !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
+ if (mpte != NULL)
+ pmap_abort_ptp(pmap, va, mpte);
+ return (NULL);
+ }
+
+ /*
+ * Increment counters
+ */
+ pmap_resident_count_inc(pmap, 1);
+
+ pa = VM_PAGE_TO_PHYS(m);
+ l3_val = pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
+ ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
+ if ((prot & VM_PROT_EXECUTE) == 0 ||
+ m->md.pv_memattr == VM_MEMATTR_DEVICE)
+ l3_val |= ATTR_S1_XN;
+ if (va < VM_MAXUSER_ADDRESS)
+ l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
+ else
+ l3_val |= ATTR_S1_UXN;
+ if (pmap != kernel_pmap)
+ l3_val |= ATTR_S1_nG;
+
+ /*
+ * Now validate mapping with RO protection
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ l3_val |= ATTR_SW_MANAGED;
+ l3_val &= ~ATTR_AF;
+ }
+
+ /* Sync icache before the mapping is stored to PTE */
+ if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
+ m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
+ cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
+
+ pmap_store(l3, l3_val);
+ dsb(ishst);
+
+ return (mpte);
+}
+
+/*
+ * This code maps large physical mmap regions into the
+ * processor address space. Note that some shortcuts
+ * are taken, but the code works.
+ */
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size)
+{
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
+ ("pmap_object_init_pt: non-device object"));
+}
+
+/*
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
+ */
+void
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t va_next;
+ pd_entry_t *l0, *l1, *l2;
+ pt_entry_t *l3;
+
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = va_next) {
+ l0 = pmap_l0(pmap, sva);
+ if (pmap_load(l0) == 0) {
+ va_next = (sva + L0_SIZE) & ~L0_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ l1 = pmap_l0_to_l1(l0, sva);
+ if (pmap_load(l1) == 0) {
+ va_next = (sva + L1_SIZE) & ~L1_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ va_next = (sva + L2_SIZE) & ~L2_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+
+ l2 = pmap_l1_to_l2(l1, sva);
+ if (pmap_load(l2) == 0)
+ continue;
+
+ if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
+ if ((pmap_load(l2) & ATTR_SW_WIRED) == 0)
+ panic("pmap_unwire: l2 %#jx is missing "
+ "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2));
+
+ /*
+ * Are we unwiring the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + L2_SIZE == va_next && eva >= va_next) {
+ pmap_clear_bits(l2, ATTR_SW_WIRED);
+ pmap->pm_stats.wired_count -= L2_SIZE /
+ PAGE_SIZE;
+ continue;
+ } else if (pmap_demote_l2(pmap, l2, sva) == NULL)
+ panic("pmap_unwire: demotion failed");
+ }
+ KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
+ ("pmap_unwire: Invalid l2 entry after demotion"));
+
+ if (va_next > eva)
+ va_next = eva;
+ for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
+ sva += L3_SIZE) {
+ if (pmap_load(l3) == 0)
+ continue;
+ if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
+ panic("pmap_unwire: l3 %#jx is missing "
+ "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
+
+ /*
+ * ATTR_SW_WIRED must be cleared atomically. Although
+ * the pmap lock synchronizes access to ATTR_SW_WIRED,
+ * the System MMU may write to the entry concurrently.
+ */
+ pmap_clear_bits(l3, ATTR_SW_WIRED);
+ pmap->pm_stats.wired_count--;
+ }
+ }
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ *
+ * Because the executable mappings created by this routine are copied,
+ * it should not have to flush the instruction cache.
+ */
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
+ vm_offset_t src_addr)
+{
+ struct rwlock *lock;
+ pd_entry_t *l0, *l1, *l2, srcptepaddr;
+ pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte;
+ vm_offset_t addr, end_addr, va_next;
+ vm_page_t dst_l2pg, dstmpte, srcmpte;
+
+ PMAP_ASSERT_STAGE1(dst_pmap);
+ PMAP_ASSERT_STAGE1(src_pmap);
+
+ if (dst_addr != src_addr)
+ return;
+ end_addr = src_addr + len;
+ lock = NULL;
+ if (dst_pmap < src_pmap) {
+ PMAP_LOCK(dst_pmap);
+ PMAP_LOCK(src_pmap);
+ } else {
+ PMAP_LOCK(src_pmap);
+ PMAP_LOCK(dst_pmap);
+ }
+ for (addr = src_addr; addr < end_addr; addr = va_next) {
+ l0 = pmap_l0(src_pmap, addr);
+ if (pmap_load(l0) == 0) {
+ va_next = (addr + L0_SIZE) & ~L0_OFFSET;
+ if (va_next < addr)
+ va_next = end_addr;
+ continue;
+ }
+ l1 = pmap_l0_to_l1(l0, addr);
+ if (pmap_load(l1) == 0) {
+ va_next = (addr + L1_SIZE) & ~L1_OFFSET;
+ if (va_next < addr)
+ va_next = end_addr;
+ continue;
+ }
+ va_next = (addr + L2_SIZE) & ~L2_OFFSET;
+ if (va_next < addr)
+ va_next = end_addr;
+ l2 = pmap_l1_to_l2(l1, addr);
+ srcptepaddr = pmap_load(l2);
+ if (srcptepaddr == 0)
+ continue;
+ if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
+ if ((addr & L2_OFFSET) != 0 ||
+ addr + L2_SIZE > end_addr)
+ continue;
+ l2 = pmap_alloc_l2(dst_pmap, addr, &dst_l2pg, NULL);
+ if (l2 == NULL)
+ break;
+ if (pmap_load(l2) == 0 &&
+ ((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
+ pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
+ PMAP_ENTER_NORECLAIM, &lock))) {
+ mask = ATTR_AF | ATTR_SW_WIRED;
+ nbits = 0;
+ if ((srcptepaddr & ATTR_SW_DBM) != 0)
+ nbits |= ATTR_S1_AP_RW_BIT;
+ pmap_store(l2, (srcptepaddr & ~mask) | nbits);
+ pmap_resident_count_inc(dst_pmap, L2_SIZE /
+ PAGE_SIZE);
+ atomic_add_long(&pmap_l2_mappings, 1);
+ } else
+ pmap_abort_ptp(dst_pmap, addr, dst_l2pg);
+ continue;
+ }
+ KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
+ ("pmap_copy: invalid L2 entry"));
+ srcptepaddr &= ~ATTR_MASK;
+ srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
+ KASSERT(srcmpte->ref_count > 0,
+ ("pmap_copy: source page table page is unused"));
+ if (va_next > end_addr)
+ va_next = end_addr;
+ src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
+ src_pte = &src_pte[pmap_l3_index(addr)];
+ dstmpte = NULL;
+ for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
+ ptetemp = pmap_load(src_pte);
+
+ /*
+ * We only virtual copy managed pages.
+ */
+ if ((ptetemp & ATTR_SW_MANAGED) == 0)
+ continue;
+
+ if (dstmpte != NULL) {
+ KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
+ ("dstmpte pindex/addr mismatch"));
+ dstmpte->ref_count++;
+ } else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
+ NULL)) == NULL)
+ goto out;
+ dst_pte = (pt_entry_t *)
+ PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
+ dst_pte = &dst_pte[pmap_l3_index(addr)];
+ if (pmap_load(dst_pte) == 0 &&
+ pmap_try_insert_pv_entry(dst_pmap, addr,
+ PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) {
+ /*
+ * Clear the wired, modified, and accessed
+ * (referenced) bits during the copy.
+ */
+ mask = ATTR_AF | ATTR_SW_WIRED;
+ nbits = 0;
+ if ((ptetemp & ATTR_SW_DBM) != 0)
+ nbits |= ATTR_S1_AP_RW_BIT;
+ pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
+ pmap_resident_count_inc(dst_pmap, 1);
+ } else {
+ pmap_abort_ptp(dst_pmap, addr, dstmpte);
+ goto out;
+ }
+ /* Have we copied all of the valid mappings? */
+ if (dstmpte->ref_count >= srcmpte->ref_count)
+ break;
+ }
+ }
+out:
+ /*
+ * XXX This barrier may not be needed because the destination pmap is
+ * not active.
+ */
+ dsb(ishst);
+
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(src_pmap);
+ PMAP_UNLOCK(dst_pmap);
+}
+
+/*
+ * pmap_zero_page zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents.
+ */
+void
+pmap_zero_page(vm_page_t m)
+{
+ vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+
+ pagezero((void *)va);
+}
+
+/*
+ * pmap_zero_page_area zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents.
+ *
+ * off and size may not cover an area beyond a single hardware page.
+ */
+void
+pmap_zero_page_area(vm_page_t m, int off, int size)
+{
+ vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+
+ if (off == 0 && size == PAGE_SIZE)
+ pagezero((void *)va);
+ else
+ bzero((char *)va + off, size);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bcopy to copy the page, one machine dependent page at a
+ * time.
+ */
+void
+pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
+{
+ vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
+ vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
+
+ pagecopy((void *)src, (void *)dst);
+}
+
+int unmapped_buf_allowed = 1;
+
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_page_t m_a, m_b;
+ vm_paddr_t p_a, p_b;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ m_a = ma[a_offset >> PAGE_SHIFT];
+ p_a = m_a->phys_addr;
+ b_pg_offset = b_offset & PAGE_MASK;
+ m_b = mb[b_offset >> PAGE_SHIFT];
+ p_b = m_b->phys_addr;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ if (__predict_false(!PHYS_IN_DMAP(p_a))) {
+ panic("!DMAP a %lx", p_a);
+ } else {
+ a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
+ }
+ if (__predict_false(!PHYS_IN_DMAP(p_b))) {
+ panic("!DMAP b %lx", p_b);
+ } else {
+ b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
+ }
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
+vm_offset_t
+pmap_quick_enter_page(vm_page_t m)
+{
+
+ return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
+}
+
+void
+pmap_quick_remove_page(vm_offset_t addr)
+{
+}
+
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page. This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
+boolean_t
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+{
+ struct md_page *pvh;
+ struct rwlock *lock;
+ pv_entry_t pv;
+ int loops = 0;
+ boolean_t rv;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_page_exists_quick: page %p is not managed", m));
+ rv = FALSE;
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ if (PV_PMAP(pv) == pmap) {
+ rv = TRUE;
+ break;
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ if (PV_PMAP(pv) == pmap) {
+ rv = TRUE;
+ break;
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ }
+ rw_runlock(lock);
+ return (rv);
+}
+
+/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ struct rwlock *lock;
+ struct md_page *pvh;
+ pmap_t pmap;
+ pt_entry_t *pte;
+ pv_entry_t pv;
+ int count, lvl, md_gen, pvh_gen;
+
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ return (0);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+restart:
+ count = 0;
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pte(pmap, pv->pv_va, &lvl);
+ if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ if ((m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ pvh_gen = pvh->pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen ||
+ pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pte(pmap, pv->pv_va, &lvl);
+ if (pte != NULL &&
+ (pmap_load(pte) & ATTR_SW_WIRED) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ }
+ rw_runlock(lock);
+ return (count);
+}
+
+/*
+ * Returns true if the given page is mapped individually or as part of
+ * a 2mpage. Otherwise, returns false.
+ */
+bool
+pmap_page_is_mapped(vm_page_t m)
+{
+ struct rwlock *lock;
+ bool rv;
+
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ return (false);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+ rv = !TAILQ_EMPTY(&m->md.pv_list) ||
+ ((m->flags & PG_FICTITIOUS) == 0 &&
+ !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
+ rw_runlock(lock);
+ return (rv);
+}
+
+/*
+ * Destroy all managed, non-wired mappings in the given user-space
+ * pmap. This pmap cannot be active on any processor besides the
+ * caller.
+ *
+ * This function cannot be applied to the kernel pmap. Moreover, it
+ * is not intended for general use. It is only to be used during
+ * process termination. Consequently, it can be implemented in ways
+ * that make it faster than pmap_remove(). First, it can more quickly
+ * destroy mappings by iterating over the pmap's collection of PV
+ * entries, rather than searching the page table. Second, it doesn't
+ * have to test and clear the page table entries atomically, because
+ * no processor is currently accessing the user address space. In
+ * particular, a page table entry's dirty bit won't change state once
+ * this function starts.
+ */
+void
+pmap_remove_pages(pmap_t pmap)
+{
+ pd_entry_t *pde;
+ pt_entry_t *pte, tpte;
+ struct spglist free;
+ vm_page_t m, ml3, mt;
+ pv_entry_t pv;
+ struct md_page *pvh;
+ struct pv_chunk *pc, *npc;
+ struct rwlock *lock;
+ int64_t bit;
+ uint64_t inuse, bitmask;
+ int allfree, field, freed, idx, lvl;
+ vm_paddr_t pa;
+
+ KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
+
+ lock = NULL;
+
+ SLIST_INIT(&free);
+ PMAP_LOCK(pmap);
+ TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
+ allfree = 1;
+ freed = 0;
+ for (field = 0; field < _NPCM; field++) {
+ inuse = ~pc->pc_map[field] & pc_freemask[field];
+ while (inuse != 0) {
+ bit = ffsl(inuse) - 1;
+ bitmask = 1UL << bit;
+ idx = field * 64 + bit;
+ pv = &pc->pc_pventry[idx];
+ inuse &= ~bitmask;
+
+ pde = pmap_pde(pmap, pv->pv_va, &lvl);
+ KASSERT(pde != NULL,
+ ("Attempting to remove an unmapped page"));
+
+ switch(lvl) {
+ case 1:
+ pte = pmap_l1_to_l2(pde, pv->pv_va);
+ tpte = pmap_load(pte);
+ KASSERT((tpte & ATTR_DESCR_MASK) ==
+ L2_BLOCK,
+ ("Attempting to remove an invalid "
+ "block: %lx", tpte));
+ break;
+ case 2:
+ pte = pmap_l2_to_l3(pde, pv->pv_va);
+ tpte = pmap_load(pte);
+ KASSERT((tpte & ATTR_DESCR_MASK) ==
+ L3_PAGE,
+ ("Attempting to remove an invalid "
+ "page: %lx", tpte));
+ break;
+ default:
+ panic(
+ "Invalid page directory level: %d",
+ lvl);
+ }
+
+/*
+ * We cannot remove wired pages from a process' mapping at this time
+ */
+ if (tpte & ATTR_SW_WIRED) {
+ allfree = 0;
+ continue;
+ }
+
+ pa = tpte & ~ATTR_MASK;
+
+ m = PHYS_TO_VM_PAGE(pa);
+ KASSERT(m->phys_addr == pa,
+ ("vm_page_t %p phys_addr mismatch %016jx %016jx",
+ m, (uintmax_t)m->phys_addr,
+ (uintmax_t)tpte));
+
+ KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
+ m < &vm_page_array[vm_page_array_size],
+ ("pmap_remove_pages: bad pte %#jx",
+ (uintmax_t)tpte));
+
+ /*
+ * Because this pmap is not active on other
+ * processors, the dirty bit cannot have
+ * changed state since we last loaded pte.
+ */
+ pmap_clear(pte);
+
+ /*
+ * Update the vm_page_t clean/reference bits.
+ */
+ if (pmap_pte_dirty(pmap, tpte)) {
+ switch (lvl) {
+ case 1:
+ for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
+ vm_page_dirty(mt);
+ break;
+ case 2:
+ vm_page_dirty(m);
+ break;
+ }
+ }
+
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
+
+ /* Mark free */
+ pc->pc_map[field] |= bitmask;
+ switch (lvl) {
+ case 1:
+ pmap_resident_count_dec(pmap,
+ L2_SIZE / PAGE_SIZE);
+ pvh = pa_to_pvh(tpte & ~ATTR_MASK);
+ TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
+ pvh->pv_gen++;
+ if (TAILQ_EMPTY(&pvh->pv_list)) {
+ for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
+ if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&mt->md.pv_list))
+ vm_page_aflag_clear(mt, PGA_WRITEABLE);
+ }
+ ml3 = pmap_remove_pt_page(pmap,
+ pv->pv_va);
+ if (ml3 != NULL) {
+ KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
+ ("pmap_remove_pages: l3 page not promoted"));
+ pmap_resident_count_dec(pmap,1);
+ KASSERT(ml3->ref_count == NL3PG,
+ ("pmap_remove_pages: l3 page ref count error"));
+ ml3->ref_count = 0;
+ pmap_add_delayed_free_list(ml3,
+ &free, FALSE);
+ }
+ break;
+ case 2:
+ pmap_resident_count_dec(pmap, 1);
+ TAILQ_REMOVE(&m->md.pv_list, pv,
+ pv_next);
+ m->md.pv_gen++;
+ if ((m->a.flags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(
+ VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m,
+ PGA_WRITEABLE);
+ }
+ break;
+ }
+ pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde),
+ &free);
+ freed++;
+ }
+ }
+ PV_STAT(atomic_add_long(&pv_entry_frees, freed));
+ PV_STAT(atomic_add_int(&pv_entry_spare, freed));
+ PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
+ if (allfree) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ free_pv_chunk(pc);
+ }
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+ pmap_invalidate_all(pmap);
+ PMAP_UNLOCK(pmap);
+ vm_page_free_pages_toq(&free, true);
+}
+
+/*
+ * This is used to check if a page has been accessed or modified.
+ */
+static boolean_t
+pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
+{
+ struct rwlock *lock;
+ pv_entry_t pv;
+ struct md_page *pvh;
+ pt_entry_t *pte, mask, value;
+ pmap_t pmap;
+ int lvl, md_gen, pvh_gen;
+ boolean_t rv;
+
+ rv = FALSE;
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+restart:
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pte(pmap, pv->pv_va, &lvl);
+ KASSERT(lvl == 3,
+ ("pmap_page_test_mappings: Invalid level %d", lvl));
+ mask = 0;
+ value = 0;
+ if (modified) {
+ mask |= ATTR_S1_AP_RW_BIT;
+ value |= ATTR_S1_AP(ATTR_S1_AP_RW);
+ }
+ if (accessed) {
+ mask |= ATTR_AF | ATTR_DESCR_MASK;
+ value |= ATTR_AF | L3_PAGE;
+ }
+ rv = (pmap_load(pte) & mask) == value;
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ goto out;
+ }
+ if ((m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ pvh_gen = pvh->pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen ||
+ pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pte(pmap, pv->pv_va, &lvl);
+ KASSERT(lvl == 2,
+ ("pmap_page_test_mappings: Invalid level %d", lvl));
+ mask = 0;
+ value = 0;
+ if (modified) {
+ mask |= ATTR_S1_AP_RW_BIT;
+ value |= ATTR_S1_AP(ATTR_S1_AP_RW);
+ }
+ if (accessed) {
+ mask |= ATTR_AF | ATTR_DESCR_MASK;
+ value |= ATTR_AF | L2_BLOCK;
+ }
+ rv = (pmap_load(pte) & mask) == value;
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ goto out;
+ }
+ }
+out:
+ rw_runlock(lock);
+ return (rv);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page was modified
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_is_modified: page %p is not managed", m));
+
+ /*
+ * If the page is not busied then this check is racy.
+ */
+ if (!pmap_page_is_write_mapped(m))
+ return (FALSE);
+ return (pmap_page_test_mappings(m, FALSE, TRUE));
+}
+
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is eligible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *pte;
+ boolean_t rv;
+ int lvl;
+
+ rv = FALSE;
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, addr, &lvl);
+ if (pte != NULL && pmap_load(pte) != 0) {
+ rv = TRUE;
+ }
+ PMAP_UNLOCK(pmap);
+ return (rv);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_is_referenced: page %p is not managed", m));
+ return (pmap_page_test_mappings(m, TRUE, FALSE));
+}
+
+/*
+ * Clear the write and modified bits in each of the given page's mappings.
+ */
+void
+pmap_remove_write(vm_page_t m)
+{
+ struct md_page *pvh;
+ pmap_t pmap;
+ struct rwlock *lock;
+ pv_entry_t next_pv, pv;
+ pt_entry_t oldpte, *pte;
+ vm_offset_t va;
+ int lvl, md_gen, pvh_gen;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+ vm_page_assert_busied(m);
+
+ if (!pmap_page_is_write_mapped(m))
+ return;
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
+ pa_to_pvh(VM_PAGE_TO_PHYS(m));
+retry_pv_loop:
+ rw_wlock(lock);
+ TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
+ pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(lock);
+ goto retry_pv_loop;
+ }
+ }
+ va = pv->pv_va;
+ pte = pmap_pte(pmap, pv->pv_va, &lvl);
+ if ((pmap_load(pte) & ATTR_SW_DBM) != 0)
+ (void)pmap_demote_l2_locked(pmap, pte, va, &lock);
+ KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
+ ("inconsistent pv lock %p %p for page %p",
+ lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
+ PMAP_UNLOCK(pmap);
+ }
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ md_gen = m->md.pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen ||
+ md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(lock);
+ goto retry_pv_loop;
+ }
+ }
+ pte = pmap_pte(pmap, pv->pv_va, &lvl);
+ oldpte = pmap_load(pte);
+retry:
+ if ((oldpte & ATTR_SW_DBM) != 0) {
+ if (!atomic_fcmpset_long(pte, &oldpte,
+ (oldpte | ATTR_S1_AP_RW_BIT) & ~ATTR_SW_DBM))
+ goto retry;
+ if ((oldpte & ATTR_S1_AP_RW_BIT) ==
+ ATTR_S1_AP(ATTR_S1_AP_RW))
+ vm_page_dirty(m);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ rw_wunlock(lock);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+}
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return a count of reference bits for a page, clearing those bits.
+ * It is not necessary for every reference bit to be cleared, but it
+ * is necessary that 0 only be returned when there are truly no
+ * reference bits set.
+ *
+ * As an optimization, update the page's dirty field if a modified bit is
+ * found while counting reference bits. This opportunistic update can be
+ * performed at low cost and can eliminate the need for some future calls
+ * to pmap_is_modified(). However, since this function stops after
+ * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ * dirty pages. Those dirty pages will only be detected by a future call
+ * to pmap_is_modified().
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+ struct md_page *pvh;
+ pv_entry_t pv, pvf;
+ pmap_t pmap;
+ struct rwlock *lock;
+ pd_entry_t *pde, tpde;
+ pt_entry_t *pte, tpte;
+ vm_offset_t va;
+ vm_paddr_t pa;
+ int cleared, lvl, md_gen, not_cleared, pvh_gen;
+ struct spglist free;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_ts_referenced: page %p is not managed", m));
+ SLIST_INIT(&free);
+ cleared = 0;
+ pa = VM_PAGE_TO_PHYS(m);
+ lock = PHYS_TO_PV_LIST_LOCK(pa);
+ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
+ rw_wlock(lock);
+retry:
+ not_cleared = 0;
+ if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
+ goto small_mappings;
+ pv = pvf;
+ do {
+ if (pvf == NULL)
+ pvf = pv;
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ va = pv->pv_va;
+ pde = pmap_pde(pmap, pv->pv_va, &lvl);
+ KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found"));
+ KASSERT(lvl == 1,
+ ("pmap_ts_referenced: invalid pde level %d", lvl));
+ tpde = pmap_load(pde);
+ KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE,
+ ("pmap_ts_referenced: found an invalid l1 table"));
+ pte = pmap_l1_to_l2(pde, pv->pv_va);
+ tpte = pmap_load(pte);
+ if (pmap_pte_dirty(pmap, tpte)) {
+ /*
+ * Although "tpte" is mapping a 2MB page, because
+ * this function is called at a 4KB page granularity,
+ * we only update the 4KB page under test.
+ */
+ vm_page_dirty(m);
+ }
+
+ if ((tpte & ATTR_AF) != 0) {
+ /*
+ * Since this reference bit is shared by 512 4KB pages,
+ * it should not be cleared every time it is tested.
+ * Apply a simple "hash" function on the physical page
+ * number, the virtual superpage number, and the pmap
+ * address to select one 4KB page out of the 512 on
+ * which testing the reference bit will result in
+ * clearing that reference bit. This function is
+ * designed to avoid the selection of the same 4KB page
+ * for every 2MB page mapping.
+ *
+ * On demotion, a mapping that hasn't been referenced
+ * is simply destroyed. To avoid the possibility of a
+ * subsequent page fault on a demoted wired mapping,
+ * always leave its reference bit set. Moreover,
+ * since the superpage is wired, the current state of
+ * its reference bit won't affect page replacement.
+ */
+ if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
+ (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
+ (tpte & ATTR_SW_WIRED) == 0) {
+ pmap_clear_bits(pte, ATTR_AF);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ cleared++;
+ } else
+ not_cleared++;
+ }
+ PMAP_UNLOCK(pmap);
+ /* Rotate the PV list if it has more than one entry. */
+ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ }
+ if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
+ goto out;
+ } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
+small_mappings:
+ if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
+ goto out;
+ pv = pvf;
+ do {
+ if (pvf == NULL)
+ pvf = pv;
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ md_gen = m->md.pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ pde = pmap_pde(pmap, pv->pv_va, &lvl);
+ KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
+ KASSERT(lvl == 2,
+ ("pmap_ts_referenced: invalid pde level %d", lvl));
+ tpde = pmap_load(pde);
+ KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE,
+ ("pmap_ts_referenced: found an invalid l2 table"));
+ pte = pmap_l2_to_l3(pde, pv->pv_va);
+ tpte = pmap_load(pte);
+ if (pmap_pte_dirty(pmap, tpte))
+ vm_page_dirty(m);
+ if ((tpte & ATTR_AF) != 0) {
+ if ((tpte & ATTR_SW_WIRED) == 0) {
+ pmap_clear_bits(pte, ATTR_AF);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ cleared++;
+ } else
+ not_cleared++;
+ }
+ PMAP_UNLOCK(pmap);
+ /* Rotate the PV list if it has more than one entry. */
+ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ }
+ } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
+ not_cleared < PMAP_TS_REFERENCED_MAX);
+out:
+ rw_wunlock(lock);
+ vm_page_free_pages_toq(&free, true);
+ return (cleared + not_cleared);
+}
+
+/*
+ * Apply the given advice to the specified range of addresses within the
+ * given pmap. Depending on the advice, clear the referenced and/or
+ * modified flags in each mapping and set the mapped page's dirty field.
+ */
+void
+pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
+{
+ struct rwlock *lock;
+ vm_offset_t va, va_next;
+ vm_page_t m;
+ pd_entry_t *l0, *l1, *l2, oldl2;
+ pt_entry_t *l3, oldl3;
+
+ PMAP_ASSERT_STAGE1(pmap);
+
+ if (advice != MADV_DONTNEED && advice != MADV_FREE)
+ return;
+
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = va_next) {
+ l0 = pmap_l0(pmap, sva);
+ if (pmap_load(l0) == 0) {
+ va_next = (sva + L0_SIZE) & ~L0_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+ l1 = pmap_l0_to_l1(l0, sva);
+ if (pmap_load(l1) == 0) {
+ va_next = (sva + L1_SIZE) & ~L1_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+ va_next = (sva + L2_SIZE) & ~L2_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ l2 = pmap_l1_to_l2(l1, sva);
+ oldl2 = pmap_load(l2);
+ if (oldl2 == 0)
+ continue;
+ if ((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK) {
+ if ((oldl2 & ATTR_SW_MANAGED) == 0)
+ continue;
+ lock = NULL;
+ if (!pmap_demote_l2_locked(pmap, l2, sva, &lock)) {
+ if (lock != NULL)
+ rw_wunlock(lock);
+
+ /*
+ * The 2MB page mapping was destroyed.
+ */
+ continue;
+ }
+
+ /*
+ * Unless the page mappings are wired, remove the
+ * mapping to a single page so that a subsequent
+ * access may repromote. Choosing the last page
+ * within the address range [sva, min(va_next, eva))
+ * generally results in more repromotions. Since the
+ * underlying page table page is fully populated, this
+ * removal never frees a page table page.
+ */
+ if ((oldl2 & ATTR_SW_WIRED) == 0) {
+ va = eva;
+ if (va > va_next)
+ va = va_next;
+ va -= PAGE_SIZE;
+ KASSERT(va >= sva,
+ ("pmap_advise: no address gap"));
+ l3 = pmap_l2_to_l3(l2, va);
+ KASSERT(pmap_load(l3) != 0,
+ ("pmap_advise: invalid PTE"));
+ pmap_remove_l3(pmap, l3, va, pmap_load(l2),
+ NULL, &lock);
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+ }
+ KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
+ ("pmap_advise: invalid L2 entry after demotion"));
+ if (va_next > eva)
+ va_next = eva;
+ va = va_next;
+ for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
+ sva += L3_SIZE) {
+ oldl3 = pmap_load(l3);
+ if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) !=
+ (ATTR_SW_MANAGED | L3_PAGE))
+ goto maybe_invlrng;
+ else if (pmap_pte_dirty(pmap, oldl3)) {
+ if (advice == MADV_DONTNEED) {
+ /*
+ * Future calls to pmap_is_modified()
+ * can be avoided by making the page
+ * dirty now.
+ */
+ m = PHYS_TO_VM_PAGE(oldl3 & ~ATTR_MASK);
+ vm_page_dirty(m);
+ }
+ while (!atomic_fcmpset_long(l3, &oldl3,
+ (oldl3 & ~ATTR_AF) |
+ ATTR_S1_AP(ATTR_S1_AP_RO)))
+ cpu_spinwait();
+ } else if ((oldl3 & ATTR_AF) != 0)
+ pmap_clear_bits(l3, ATTR_AF);
+ else
+ goto maybe_invlrng;
+ if (va == va_next)
+ va = sva;
+ continue;
+maybe_invlrng:
+ if (va != va_next) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = va_next;
+ }
+ }
+ if (va != va_next)
+ pmap_invalidate_range(pmap, va, sva);
+ }
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(vm_page_t m)
+{
+ struct md_page *pvh;
+ struct rwlock *lock;
+ pmap_t pmap;
+ pv_entry_t next_pv, pv;
+ pd_entry_t *l2, oldl2;
+ pt_entry_t *l3, oldl3;
+ vm_offset_t va;
+ int md_gen, pvh_gen;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_clear_modify: page %p is not managed", m));
+ vm_page_assert_busied(m);
+
+ if (!pmap_page_is_write_mapped(m))
+ return;
+ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
+ pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_wlock(lock);
+restart:
+ TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
+ pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ va = pv->pv_va;
+ l2 = pmap_l2(pmap, va);
+ oldl2 = pmap_load(l2);
+ /* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */
+ if ((oldl2 & ATTR_SW_DBM) != 0 &&
+ pmap_demote_l2_locked(pmap, l2, va, &lock) &&
+ (oldl2 & ATTR_SW_WIRED) == 0) {
+ /*
+ * Write protect the mapping to a single page so that
+ * a subsequent write access may repromote.
+ */
+ va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK);
+ l3 = pmap_l2_to_l3(l2, va);
+ oldl3 = pmap_load(l3);
+ while (!atomic_fcmpset_long(l3, &oldl3,
+ (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO)))
+ cpu_spinwait();
+ vm_page_dirty(m);
+ pmap_invalidate_page(pmap, va);
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ l2 = pmap_l2(pmap, pv->pv_va);
+ l3 = pmap_l2_to_l3(l2, pv->pv_va);
+ oldl3 = pmap_load(l3);
+ if (pmap_l3_valid(oldl3) &&
+ (oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){
+ pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO));
+ pmap_invalidate_page(pmap, pv->pv_va);
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ rw_wunlock(lock);
+}
+
+void *
+pmap_mapbios(vm_paddr_t pa, vm_size_t size)
+{
+ struct pmap_preinit_mapping *ppim;
+ vm_offset_t va, offset;
+ pd_entry_t *pde;
+ pt_entry_t *l2;
+ int i, lvl, l2_blocks, free_l2_count, start_idx;
+
+ if (!vm_initialized) {
+ /*
+ * No L3 ptables so map entire L2 blocks where start VA is:
+ * preinit_map_va + start_idx * L2_SIZE
+ * There may be duplicate mappings (multiple VA -> same PA) but
+ * ARM64 dcache is always PIPT so that's acceptable.
+ */
+ if (size == 0)
+ return (NULL);
+
+ /* Calculate how many L2 blocks are needed for the mapping */
+ l2_blocks = (roundup2(pa + size, L2_SIZE) -
+ rounddown2(pa, L2_SIZE)) >> L2_SHIFT;
+
+ offset = pa & L2_OFFSET;
+
+ if (preinit_map_va == 0)
+ return (NULL);
+
+ /* Map 2MiB L2 blocks from reserved VA space */
+
+ free_l2_count = 0;
+ start_idx = -1;
+ /* Find enough free contiguous VA space */
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (free_l2_count > 0 && ppim->pa != 0) {
+ /* Not enough space here */
+ free_l2_count = 0;
+ start_idx = -1;
+ continue;
+ }
+
+ if (ppim->pa == 0) {
+ /* Free L2 block */
+ if (start_idx == -1)
+ start_idx = i;
+ free_l2_count++;
+ if (free_l2_count == l2_blocks)
+ break;
+ }
+ }
+ if (free_l2_count != l2_blocks)
+ panic("%s: too many preinit mappings", __func__);
+
+ va = preinit_map_va + (start_idx * L2_SIZE);
+ for (i = start_idx; i < start_idx + l2_blocks; i++) {
+ /* Mark entries as allocated */
+ ppim = pmap_preinit_mapping + i;
+ ppim->pa = pa;
+ ppim->va = va + offset;
+ ppim->size = size;
+ }
+
+ /* Map L2 blocks */
+ pa = rounddown2(pa, L2_SIZE);
+ for (i = 0; i < l2_blocks; i++) {
+ pde = pmap_pde(kernel_pmap, va, &lvl);
+ KASSERT(pde != NULL,
+ ("pmap_mapbios: Invalid page entry, va: 0x%lx",
+ va));
+ KASSERT(lvl == 1,
+ ("pmap_mapbios: Invalid level %d", lvl));
+
+ /* Insert L2_BLOCK */
+ l2 = pmap_l1_to_l2(pde, va);
+ pmap_load_store(l2,
+ pa | ATTR_DEFAULT | ATTR_S1_XN |
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
+
+ va += L2_SIZE;
+ pa += L2_SIZE;
+ }
+ pmap_invalidate_all(kernel_pmap);
+
+ va = preinit_map_va + (start_idx * L2_SIZE);
+
+ } else {
+ /* kva_alloc may be used to map the pages */
+ offset = pa & PAGE_MASK;
+ size = round_page(offset + size);
+
+ va = kva_alloc(size);
+ if (va == 0)
+ panic("%s: Couldn't allocate KVA", __func__);
+
+ pde = pmap_pde(kernel_pmap, va, &lvl);
+ KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl));
+
+ /* L3 table is linked */
+ va = trunc_page(va);
+ pa = trunc_page(pa);
+ pmap_kenter(va, size, pa, memory_mapping_mode(pa));
+ }
+
+ return ((void *)(va + offset));
+}
+
+void
+pmap_unmapbios(vm_offset_t va, vm_size_t size)
+{
+ struct pmap_preinit_mapping *ppim;
+ vm_offset_t offset, tmpsize, va_trunc;
+ pd_entry_t *pde;
+ pt_entry_t *l2;
+ int i, lvl, l2_blocks, block;
+ bool preinit_map;
+
+ l2_blocks =
+ (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
+ KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
+
+ /* Remove preinit mapping */
+ preinit_map = false;
+ block = 0;
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == va) {
+ KASSERT(ppim->size == size,
+ ("pmap_unmapbios: size mismatch"));
+ ppim->va = 0;
+ ppim->pa = 0;
+ ppim->size = 0;
+ preinit_map = true;
+ offset = block * L2_SIZE;
+ va_trunc = rounddown2(va, L2_SIZE) + offset;
+
+ /* Remove L2_BLOCK */
+ pde = pmap_pde(kernel_pmap, va_trunc, &lvl);
+ KASSERT(pde != NULL,
+ ("pmap_unmapbios: Invalid page entry, va: 0x%lx",
+ va_trunc));
+ l2 = pmap_l1_to_l2(pde, va_trunc);
+ pmap_clear(l2);
+
+ if (block == (l2_blocks - 1))
+ break;
+ block++;
+ }
+ }
+ if (preinit_map) {
+ pmap_invalidate_all(kernel_pmap);
+ return;
+ }
+
+ /* Unmap the pages reserved with kva_alloc. */
+ if (vm_initialized) {
+ offset = va & PAGE_MASK;
+ size = round_page(offset + size);
+ va = trunc_page(va);
+
+ pde = pmap_pde(kernel_pmap, va, &lvl);
+ KASSERT(pde != NULL,
+ ("pmap_unmapbios: Invalid page entry, va: 0x%lx", va));
+ KASSERT(lvl == 2, ("pmap_unmapbios: Invalid level %d", lvl));
+
+ /* Unmap and invalidate the pages */
+ for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
+ pmap_kremove(va + tmpsize);
+
+ kva_free(va, size);
+ }
+}
+
+/*
+ * Sets the memory attribute for the specified page.
+ */
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+
+ m->md.pv_memattr = ma;
+
+ /*
+ * If "m" is a normal page, update its direct mapping. This update
+ * can be relied upon to perform any cache operations that are
+ * required for data coherence.
+ */
+ if ((m->flags & PG_FICTITIOUS) == 0 &&
+ pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
+ m->md.pv_memattr) != 0)
+ panic("memory attribute change on the direct map failed");
+}
+
+/*
+ * Changes the specified virtual address range's memory type to that given by
+ * the parameter "mode". The specified virtual address range must be
+ * completely contained within either the direct map or the kernel map. If
+ * the virtual address range is contained within the kernel map, then the
+ * memory type for each of the corresponding ranges of the direct map is also
+ * changed. (The corresponding ranges of the direct map are those ranges that
+ * map the same physical pages as the specified virtual address range.) These
+ * changes to the direct map are necessary because Intel describes the
+ * behavior of their processors as "undefined" if two or more mappings to the
+ * same physical page have different memory types.
+ *
+ * Returns zero if the change completed successfully, and either EINVAL or
+ * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
+ * of the virtual address range was not mapped, and ENOMEM is returned if
+ * there was insufficient memory available to complete the change. In the
+ * latter case, the memory type may have been changed on some part of the
+ * virtual address range or the direct map.
+ */
+int
+pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
+{
+ int error;
+
+ PMAP_LOCK(kernel_pmap);
+ error = pmap_change_attr_locked(va, size, mode);
+ PMAP_UNLOCK(kernel_pmap);
+ return (error);
+}
+
+static int
+pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
+{
+ vm_offset_t base, offset, tmpva;
+ pt_entry_t l3, *pte, *newpte;
+ int lvl;
+
+ PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
+ base = trunc_page(va);
+ offset = va & PAGE_MASK;
+ size = round_page(offset + size);
+
+ if (!VIRT_IN_DMAP(base) &&
+ !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
+ return (EINVAL);
+
+ for (tmpva = base; tmpva < base + size; ) {
+ pte = pmap_pte(kernel_pmap, tmpva, &lvl);
+ if (pte == NULL)
+ return (EINVAL);
+
+ if ((pmap_load(pte) & ATTR_S1_IDX_MASK) == ATTR_S1_IDX(mode)) {
+ /*
+ * We already have the correct attribute,
+ * ignore this entry.
+ */
+ switch (lvl) {
+ default:
+ panic("Invalid DMAP table level: %d\n", lvl);
+ case 1:
+ tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
+ break;
+ case 2:
+ tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
+ break;
+ case 3:
+ tmpva += PAGE_SIZE;
+ break;
+ }
+ } else {
+ /*
+ * Split the entry to an level 3 table, then
+ * set the new attribute.
+ */
+ switch (lvl) {
+ default:
+ panic("Invalid DMAP table level: %d\n", lvl);
+ case 1:
+ newpte = pmap_demote_l1(kernel_pmap, pte,
+ tmpva & ~L1_OFFSET);
+ if (newpte == NULL)
+ return (EINVAL);
+ pte = pmap_l1_to_l2(pte, tmpva);
+ case 2:
+ newpte = pmap_demote_l2(kernel_pmap, pte,
+ tmpva);
+ if (newpte == NULL)
+ return (EINVAL);
+ pte = pmap_l2_to_l3(pte, tmpva);
+ case 3:
+ /* Update the entry */
+ l3 = pmap_load(pte);
+ l3 &= ~ATTR_S1_IDX_MASK;
+ l3 |= ATTR_S1_IDX(mode);
+ if (mode == VM_MEMATTR_DEVICE)
+ l3 |= ATTR_S1_XN;
+
+ pmap_update_entry(kernel_pmap, pte, l3, tmpva,
+ PAGE_SIZE);
+
+ /*
+ * If moving to a non-cacheable entry flush
+ * the cache.
+ */
+ if (mode == VM_MEMATTR_UNCACHEABLE)
+ cpu_dcache_wbinv_range(tmpva, L3_SIZE);
+
+ break;
+ }
+ tmpva += PAGE_SIZE;
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Create an L2 table to map all addresses within an L1 mapping.
+ */
+static pt_entry_t *
+pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
+{
+ pt_entry_t *l2, newl2, oldl1;
+ vm_offset_t tmpl1;
+ vm_paddr_t l2phys, phys;
+ vm_page_t ml2;
+ int i;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ oldl1 = pmap_load(l1);
+ KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
+ ("pmap_demote_l1: Demoting a non-block entry"));
+ KASSERT((va & L1_OFFSET) == 0,
+ ("pmap_demote_l1: Invalid virtual address %#lx", va));
+ KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
+ ("pmap_demote_l1: Level 1 table shouldn't be managed"));
+
+ tmpl1 = 0;
+ if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
+ tmpl1 = kva_alloc(PAGE_SIZE);
+ if (tmpl1 == 0)
+ return (NULL);
+ }
+
+ if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
+ CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (NULL);
+ }
+
+ l2phys = VM_PAGE_TO_PHYS(ml2);
+ l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
+
+ /* Address the range points at */
+ phys = oldl1 & ~ATTR_MASK;
+ /* The attributed from the old l1 table to be copied */
+ newl2 = oldl1 & ATTR_MASK;
+
+ /* Create the new entries */
+ for (i = 0; i < Ln_ENTRIES; i++) {
+ l2[i] = newl2 | phys;
+ phys += L2_SIZE;
+ }
+ KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
+ ("Invalid l2 page (%lx != %lx)", l2[0],
+ (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
+
+ if (tmpl1 != 0) {
+ pmap_kenter(tmpl1, PAGE_SIZE,
+ DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET,
+ VM_MEMATTR_WRITE_BACK);
+ l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
+ }
+
+ pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
+
+ if (tmpl1 != 0) {
+ pmap_kremove(tmpl1);
+ kva_free(tmpl1, PAGE_SIZE);
+ }
+
+ return (l2);
+}
+
+static void
+pmap_fill_l3(pt_entry_t *firstl3, pt_entry_t newl3)
+{
+ pt_entry_t *l3;
+
+ for (l3 = firstl3; l3 - firstl3 < Ln_ENTRIES; l3++) {
+ *l3 = newl3;
+ newl3 += L3_SIZE;
+ }
+}
+
+static void
+pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
+ struct rwlock **lockp)
+{
+ struct spglist free;
+
+ SLIST_INIT(&free);
+ (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
+ lockp);
+ vm_page_free_pages_toq(&free, true);
+}
+
+/*
+ * Create an L3 table to map all addresses within an L2 mapping.
+ */
+static pt_entry_t *
+pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
+ struct rwlock **lockp)
+{
+ pt_entry_t *l3, newl3, oldl2;
+ vm_offset_t tmpl2;
+ vm_paddr_t l3phys;
+ vm_page_t ml3;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
+ l3 = NULL;
+ oldl2 = pmap_load(l2);
+ KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
+ ("pmap_demote_l2: Demoting a non-block entry"));
+ va &= ~L2_OFFSET;
+
+ tmpl2 = 0;
+ if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
+ tmpl2 = kva_alloc(PAGE_SIZE);
+ if (tmpl2 == 0)
+ return (NULL);
+ }
+
+ /*
+ * Invalidate the 2MB page mapping and return "failure" if the
+ * mapping was never accessed.
+ */
+ if ((oldl2 & ATTR_AF) == 0) {
+ KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
+ ("pmap_demote_l2: a wired mapping is missing ATTR_AF"));
+ pmap_demote_l2_abort(pmap, va, l2, lockp);
+ CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p",
+ va, pmap);
+ goto fail;
+ }
+
+ if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
+ KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
+ ("pmap_demote_l2: page table page for a wired mapping"
+ " is missing"));
+
+ /*
+ * If the page table page is missing and the mapping
+ * is for a kernel address, the mapping must belong to
+ * the direct map. Page table pages are preallocated
+ * for every other part of the kernel address space,
+ * so the direct map region is the only part of the
+ * kernel address space that must be handled here.
+ */
+ KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va),
+ ("pmap_demote_l2: No saved mpte for va %#lx", va));
+
+ /*
+ * If the 2MB page mapping belongs to the direct map
+ * region of the kernel's address space, then the page
+ * allocation request specifies the highest possible
+ * priority (VM_ALLOC_INTERRUPT). Otherwise, the
+ * priority is normal.
+ */
+ ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
+ (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
+
+ /*
+ * If the allocation of the new page table page fails,
+ * invalidate the 2MB page mapping and return "failure".
+ */
+ if (ml3 == NULL) {
+ pmap_demote_l2_abort(pmap, va, l2, lockp);
+ CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ goto fail;
+ }
+
+ if (va < VM_MAXUSER_ADDRESS) {
+ ml3->ref_count = NL3PG;
+ pmap_resident_count_inc(pmap, 1);
+ }
+ }
+ l3phys = VM_PAGE_TO_PHYS(ml3);
+ l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
+ newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE;
+ KASSERT((oldl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) !=
+ (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM),
+ ("pmap_demote_l2: L2 entry is writeable but not dirty"));
+
+ /*
+ * If the page table page is not leftover from an earlier promotion,
+ * or the mapping attributes have changed, (re)initialize the L3 table.
+ *
+ * When pmap_update_entry() clears the old L2 mapping, it (indirectly)
+ * performs a dsb(). That dsb() ensures that the stores for filling
+ * "l3" are visible before "l3" is added to the page table.
+ */
+ if (ml3->valid == 0 || (l3[0] & ATTR_MASK) != (newl3 & ATTR_MASK))
+ pmap_fill_l3(l3, newl3);
+
+ /*
+ * Map the temporary page so we don't lose access to the l2 table.
+ */
+ if (tmpl2 != 0) {
+ pmap_kenter(tmpl2, PAGE_SIZE,
+ DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET,
+ VM_MEMATTR_WRITE_BACK);
+ l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
+ }
+
+ /*
+ * The spare PV entries must be reserved prior to demoting the
+ * mapping, that is, prior to changing the PDE. Otherwise, the state
+ * of the L2 and the PV lists will be inconsistent, which can result
+ * in reclaim_pv_chunk() attempting to remove a PV entry from the
+ * wrong PV list and pmap_pv_demote_l2() failing to find the expected
+ * PV entry for the 2MB page mapping that is being demoted.
+ */
+ if ((oldl2 & ATTR_SW_MANAGED) != 0)
+ reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
+
+ /*
+ * Pass PAGE_SIZE so that a single TLB invalidation is performed on
+ * the 2MB page mapping.
+ */
+ pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
+
+ /*
+ * Demote the PV entry.
+ */
+ if ((oldl2 & ATTR_SW_MANAGED) != 0)
+ pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
+
+ atomic_add_long(&pmap_l2_demotions, 1);
+ CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
+ " in pmap %p %lx", va, pmap, l3[0]);
+
+fail:
+ if (tmpl2 != 0) {
+ pmap_kremove(tmpl2);
+ kva_free(tmpl2, PAGE_SIZE);
+ }
+
+ return (l3);
+
+}
+
+static pt_entry_t *
+pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
+{
+ struct rwlock *lock;
+ pt_entry_t *l3;
+
+ lock = NULL;
+ l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
+ if (lock != NULL)
+ rw_wunlock(lock);
+ return (l3);
+}
+
+/*
+ * Perform the pmap work for mincore(2). If the page is not both referenced and
+ * modified by this pmap, returns its physical address so that the caller can
+ * find other mappings.
+ */
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
+{
+ pt_entry_t *pte, tpte;
+ vm_paddr_t mask, pa;
+ int lvl, val;
+ bool managed;
+
+ PMAP_ASSERT_STAGE1(pmap);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, addr, &lvl);
+ if (pte != NULL) {
+ tpte = pmap_load(pte);
+
+ switch (lvl) {
+ case 3:
+ mask = L3_OFFSET;
+ break;
+ case 2:
+ mask = L2_OFFSET;
+ break;
+ case 1:
+ mask = L1_OFFSET;
+ break;
+ default:
+ panic("pmap_mincore: invalid level %d", lvl);
+ }
+
+ managed = (tpte & ATTR_SW_MANAGED) != 0;
+ val = MINCORE_INCORE;
+ if (lvl != 3)
+ val |= MINCORE_PSIND(3 - lvl);
+ if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed &&
+ (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
+ val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
+ if ((tpte & ATTR_AF) == ATTR_AF)
+ val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
+
+ pa = (tpte & ~ATTR_MASK) | (addr & mask);
+ } else {
+ managed = false;
+ val = 0;
+ }
+
+ if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
+ (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
+ *pap = pa;
+ }
+ PMAP_UNLOCK(pmap);
+ return (val);
+}
+
+/*
+ * Garbage collect every ASID that is neither active on a processor nor
+ * reserved.
+ */
+static void
+pmap_reset_asid_set(pmap_t pmap)
+{
+ pmap_t curpmap;
+ int asid, cpuid, epoch;
+ struct asid_set *set;
+ enum pmap_stage stage;
+
+ set = pmap->pm_asid_set;
+ stage = pmap->pm_stage;
+
+ set = pmap->pm_asid_set;
+ KASSERT(set != NULL, ("%s: NULL asid set", __func__));
+ mtx_assert(&set->asid_set_mutex, MA_OWNED);
+
+ /*
+ * Ensure that the store to asid_epoch is globally visible before the
+ * loads from pc_curpmap are performed.
+ */
+ epoch = set->asid_epoch + 1;
+ if (epoch == INT_MAX)
+ epoch = 0;
+ set->asid_epoch = epoch;
+ dsb(ishst);
+ if (stage == PM_STAGE1) {
+ __asm __volatile("tlbi vmalle1is");
+ } else {
+ KASSERT(pmap_clean_stage2_tlbi != NULL,
+ ("%s: Unset stage 2 tlb invalidation callback\n",
+ __func__));
+ pmap_clean_stage2_tlbi();
+ }
+ dsb(ish);
+ bit_nclear(set->asid_set, ASID_FIRST_AVAILABLE,
+ set->asid_set_size - 1);
+ CPU_FOREACH(cpuid) {
+ if (cpuid == curcpu)
+ continue;
+ if (stage == PM_STAGE1) {
+ curpmap = pcpu_find(cpuid)->pc_curpmap;
+ PMAP_ASSERT_STAGE1(pmap);
+ } else {
+ curpmap = pcpu_find(cpuid)->pc_curvmpmap;
+ if (curpmap == NULL)
+ continue;
+ PMAP_ASSERT_STAGE2(pmap);
+ }
+ KASSERT(curpmap->pm_asid_set == set, ("Incorrect set"));
+ asid = COOKIE_TO_ASID(curpmap->pm_cookie);
+ if (asid == -1)
+ continue;
+ bit_set(set->asid_set, asid);
+ curpmap->pm_cookie = COOKIE_FROM(asid, epoch);
+ }
+}
+
+/*
+ * Allocate a new ASID for the specified pmap.
+ */
+static void
+pmap_alloc_asid(pmap_t pmap)
+{
+ struct asid_set *set;
+ int new_asid;
+
+ set = pmap->pm_asid_set;
+ KASSERT(set != NULL, ("%s: NULL asid set", __func__));
+
+ mtx_lock_spin(&set->asid_set_mutex);
+
+ /*
+ * While this processor was waiting to acquire the asid set mutex,
+ * pmap_reset_asid_set() running on another processor might have
+ * updated this pmap's cookie to the current epoch. In which case, we
+ * don't need to allocate a new ASID.
+ */
+ if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch)
+ goto out;
+
+ bit_ffc_at(set->asid_set, set->asid_next, set->asid_set_size,
+ &new_asid);
+ if (new_asid == -1) {
+ bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
+ set->asid_next, &new_asid);
+ if (new_asid == -1) {
+ pmap_reset_asid_set(pmap);
+ bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
+ set->asid_set_size, &new_asid);
+ KASSERT(new_asid != -1, ("ASID allocation failure"));
+ }
+ }
+ bit_set(set->asid_set, new_asid);
+ set->asid_next = new_asid + 1;
+ pmap->pm_cookie = COOKIE_FROM(new_asid, set->asid_epoch);
+out:
+ mtx_unlock_spin(&set->asid_set_mutex);
+}
+
+/*
+ * Compute the value that should be stored in ttbr0 to activate the specified
+ * pmap. This value may change from time to time.
+ */
+uint64_t
+pmap_to_ttbr0(pmap_t pmap)
+{
+
+ return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) |
+ pmap->pm_l0_paddr);
+}
+
+static bool
+pmap_activate_int(pmap_t pmap)
+{
+ struct asid_set *set;
+ int epoch;
+
+ KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap"));
+ KASSERT(pmap != kernel_pmap, ("kernel pmap activation"));
+
+ if ((pmap->pm_stage == PM_STAGE1 && pmap == PCPU_GET(curpmap)) ||
+ (pmap->pm_stage == PM_STAGE2 && pmap == PCPU_GET(curvmpmap))) {
+ /*
+ * Handle the possibility that the old thread was preempted
+ * after an "ic" or "tlbi" instruction but before it performed
+ * a "dsb" instruction. If the old thread migrates to a new
+ * processor, its completion of a "dsb" instruction on that
+ * new processor does not guarantee that the "ic" or "tlbi"
+ * instructions performed on the old processor have completed.
+ */
+ dsb(ish);
+ return (false);
+ }
+
+ set = pmap->pm_asid_set;
+ KASSERT(set != NULL, ("%s: NULL asid set", __func__));
+
+ /*
+ * Ensure that the store to curpmap is globally visible before the
+ * load from asid_epoch is performed.
+ */
+ if (pmap->pm_stage == PM_STAGE1)
+ PCPU_SET(curpmap, pmap);
+ else
+ PCPU_SET(curvmpmap, pmap);
+ dsb(ish);
+ epoch = COOKIE_TO_EPOCH(pmap->pm_cookie);
+ if (epoch >= 0 && epoch != set->asid_epoch)
+ pmap_alloc_asid(pmap);
+
+ if (pmap->pm_stage == PM_STAGE1) {
+ set_ttbr0(pmap_to_ttbr0(pmap));
+ if (PCPU_GET(bcast_tlbi_workaround) != 0)
+ invalidate_local_icache();
+ }
+ return (true);
+}
+
+void
+pmap_activate_vm(pmap_t pmap)
+{
+
+ PMAP_ASSERT_STAGE2(pmap);
+
+ (void)pmap_activate_int(pmap);
+}
+
+void
+pmap_activate(struct thread *td)
+{
+ pmap_t pmap;
+
+ pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ PMAP_ASSERT_STAGE1(pmap);
+ critical_enter();
+ (void)pmap_activate_int(pmap);
+ critical_exit();
+}
+
+/*
+ * To eliminate the unused parameter "old", we would have to add an instruction
+ * to cpu_switch().
+ */
+struct pcb *
+pmap_switch(struct thread *old __unused, struct thread *new)
+{
+ pcpu_bp_harden bp_harden;
+ struct pcb *pcb;
+
+ /* Store the new curthread */
+ PCPU_SET(curthread, new);
+
+ /* And the new pcb */
+ pcb = new->td_pcb;
+ PCPU_SET(curpcb, pcb);
+
+ /*
+ * TODO: We may need to flush the cache here if switching
+ * to a user process.
+ */
+
+ if (pmap_activate_int(vmspace_pmap(new->td_proc->p_vmspace))) {
+ /*
+ * Stop userspace from training the branch predictor against
+ * other processes. This will call into a CPU specific
+ * function that clears the branch predictor state.
+ */
+ bp_harden = PCPU_GET(bp_harden);
+ if (bp_harden != NULL)
+ bp_harden();
+ }
+
+ return (pcb);
+}
+
+void
+pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
+{
+
+ PMAP_ASSERT_STAGE1(pmap);
+ if (va >= VM_MIN_KERNEL_ADDRESS) {
+ cpu_icache_sync_range(va, sz);
+ } else {
+ u_int len, offset;
+ vm_paddr_t pa;
+
+ /* Find the length of data in this page to flush */
+ offset = va & PAGE_MASK;
+ len = imin(PAGE_SIZE - offset, sz);
+
+ while (sz != 0) {
+ /* Extract the physical address & find it in the DMAP */
+ pa = pmap_extract(pmap, va);
+ if (pa != 0)
+ cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
+
+ /* Move to the next page */
+ sz -= len;
+ va += len;
+ /* Set the length for the next iteration */
+ len = imin(PAGE_SIZE, sz);
+ }
+ }
+}
+
+static int
+pmap_stage2_fault(pmap_t pmap, uint64_t esr, uint64_t far)
+{
+ pd_entry_t *pdep;
+ pt_entry_t *ptep, pte;
+ int rv, lvl, dfsc;
+
+ PMAP_ASSERT_STAGE2(pmap);
+ rv = KERN_FAILURE;
+
+ /* Data and insn aborts use same encoding for FSC field. */
+ dfsc = esr & ISS_DATA_DFSC_MASK;
+ switch (dfsc) {
+ case ISS_DATA_DFSC_TF_L0:
+ case ISS_DATA_DFSC_TF_L1:
+ case ISS_DATA_DFSC_TF_L2:
+ case ISS_DATA_DFSC_TF_L3:
+ PMAP_LOCK(pmap);
+ pdep = pmap_pde(pmap, far, &lvl);
+ if (pdep == NULL || lvl != (dfsc - ISS_DATA_DFSC_TF_L1)) {
+ PMAP_LOCK(pmap);
+ break;
+ }
+
+ switch (lvl) {
+ case 0:
+ ptep = pmap_l0_to_l1(pdep, far);
+ break;
+ case 1:
+ ptep = pmap_l1_to_l2(pdep, far);
+ break;
+ case 2:
+ ptep = pmap_l2_to_l3(pdep, far);
+ break;
+ default:
+ panic("%s: Invalid pde level %d", __func__,lvl);
+ }
+ goto fault_exec;
+
+ case ISS_DATA_DFSC_AFF_L1:
+ case ISS_DATA_DFSC_AFF_L2:
+ case ISS_DATA_DFSC_AFF_L3:
+ PMAP_LOCK(pmap);
+ ptep = pmap_pte(pmap, far, &lvl);
+fault_exec:
+ if (ptep != NULL && (pte = pmap_load(ptep)) != 0) {
+ if (icache_vmid) {
+ pmap_invalidate_vpipt_icache();
+ } else {
+ /*
+ * If accessing an executable page invalidate
+ * the I-cache so it will be valid when we
+ * continue execution in the guest. The D-cache
+ * is assumed to already be clean to the Point
+ * of Coherency.
+ */
+ if ((pte & ATTR_S2_XN_MASK) !=
+ ATTR_S2_XN(ATTR_S2_XN_NONE)) {
+ invalidate_icache();
+ }
+ }
+ pmap_set_bits(ptep, ATTR_AF | ATTR_DESCR_VALID);
+ rv = KERN_SUCCESS;
+ }
+ PMAP_UNLOCK(pmap);
+ break;
+ }
+
+ return (rv);
+}
+
+int
+pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
+{
+ pt_entry_t pte, *ptep;
+ register_t intr;
+ uint64_t ec, par;
+ int lvl, rv;
+
+ rv = KERN_FAILURE;
+
+ ec = ESR_ELx_EXCEPTION(esr);
+ switch (ec) {
+ case EXCP_INSN_ABORT_L:
+ case EXCP_INSN_ABORT:
+ case EXCP_DATA_ABORT_L:
+ case EXCP_DATA_ABORT:
+ break;
+ default:
+ return (rv);
+ }
+
+ if (pmap->pm_stage == PM_STAGE2)
+ return (pmap_stage2_fault(pmap, esr, far));
+
+ /* Data and insn aborts use same encoding for FSC field. */
+ switch (esr & ISS_DATA_DFSC_MASK) {
+ case ISS_DATA_DFSC_AFF_L1:
+ case ISS_DATA_DFSC_AFF_L2:
+ case ISS_DATA_DFSC_AFF_L3:
+ PMAP_LOCK(pmap);
+ ptep = pmap_pte(pmap, far, &lvl);
+ if (ptep != NULL) {
+ pmap_set_bits(ptep, ATTR_AF);
+ rv = KERN_SUCCESS;
+ /*
+ * XXXMJ as an optimization we could mark the entry
+ * dirty if this is a write fault.
+ */
+ }
+ PMAP_UNLOCK(pmap);
+ break;
+ case ISS_DATA_DFSC_PF_L1:
+ case ISS_DATA_DFSC_PF_L2:
+ case ISS_DATA_DFSC_PF_L3:
+ if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) ||
+ (esr & ISS_DATA_WnR) == 0)
+ return (rv);
+ PMAP_LOCK(pmap);
+ ptep = pmap_pte(pmap, far, &lvl);
+ if (ptep != NULL &&
+ ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) {
+ if ((pte & ATTR_S1_AP_RW_BIT) ==
+ ATTR_S1_AP(ATTR_S1_AP_RO)) {
+ pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT);
+ pmap_invalidate_page(pmap, far);
+ }
+ rv = KERN_SUCCESS;
+ }
+ PMAP_UNLOCK(pmap);
+ break;
+ case ISS_DATA_DFSC_TF_L0:
+ case ISS_DATA_DFSC_TF_L1:
+ case ISS_DATA_DFSC_TF_L2:
+ case ISS_DATA_DFSC_TF_L3:
+ /*
+ * Retry the translation. A break-before-make sequence can
+ * produce a transient fault.
+ */
+ if (pmap == kernel_pmap) {
+ /*
+ * The translation fault may have occurred within a
+ * critical section. Therefore, we must check the
+ * address without acquiring the kernel pmap's lock.
+ */
+ if (pmap_kextract(far) != 0)
+ rv = KERN_SUCCESS;
+ } else {
+ PMAP_LOCK(pmap);
+ /* Ask the MMU to check the address. */
+ intr = intr_disable();
+ par = arm64_address_translate_s1e0r(far);
+ intr_restore(intr);
+ PMAP_UNLOCK(pmap);
+
+ /*
+ * If the translation was successful, then we can
+ * return success to the trap handler.
+ */
+ if (PAR_SUCCESS(par))
+ rv = KERN_SUCCESS;
+ }
+ break;
+ }
+
+ return (rv);
+}
+
+/*
+ * Increase the starting virtual address of the given mapping if a
+ * different alignment might result in more superpage mappings.
+ */
+void
+pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
+ vm_offset_t *addr, vm_size_t size)
+{
+ vm_offset_t superpage_offset;
+
+ if (size < L2_SIZE)
+ return;
+ if (object != NULL && (object->flags & OBJ_COLORED) != 0)
+ offset += ptoa(object->pg_color);
+ superpage_offset = offset & L2_OFFSET;
+ if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
+ (*addr & L2_OFFSET) == superpage_offset)
+ return;
+ if ((*addr & L2_OFFSET) < superpage_offset)
+ *addr = (*addr & ~L2_OFFSET) + superpage_offset;
+ else
+ *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
+}
+
+/**
+ * Get the kernel virtual address of a set of physical pages. If there are
+ * physical addresses not covered by the DMAP perform a transient mapping
+ * that will be removed when calling pmap_unmap_io_transient.
+ *
+ * \param page The pages the caller wishes to obtain the virtual
+ * address on the kernel memory map.
+ * \param vaddr On return contains the kernel virtual memory address
+ * of the pages passed in the page parameter.
+ * \param count Number of pages passed in.
+ * \param can_fault TRUE if the thread using the mapped pages can take
+ * page faults, FALSE otherwise.
+ *
+ * \returns TRUE if the caller must call pmap_unmap_io_transient when
+ * finished or FALSE otherwise.
+ *
+ */
+boolean_t
+pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+ boolean_t can_fault)
+{
+ vm_paddr_t paddr;
+ boolean_t needs_mapping;
+ int error, i;
+
+ /*
+ * Allocate any KVA space that we need, this is done in a separate
+ * loop to prevent calling vmem_alloc while pinned.
+ */
+ needs_mapping = FALSE;
+ for (i = 0; i < count; i++) {
+ paddr = VM_PAGE_TO_PHYS(page[i]);
+ if (__predict_false(!PHYS_IN_DMAP(paddr))) {
+ error = vmem_alloc(kernel_arena, PAGE_SIZE,
+ M_BESTFIT | M_WAITOK, &vaddr[i]);
+ KASSERT(error == 0, ("vmem_alloc failed: %d", error));
+ needs_mapping = TRUE;
+ } else {
+ vaddr[i] = PHYS_TO_DMAP(paddr);
+ }
+ }
+
+ /* Exit early if everything is covered by the DMAP */
+ if (!needs_mapping)
+ return (FALSE);
+
+ if (!can_fault)
+ sched_pin();
+ for (i = 0; i < count; i++) {
+ paddr = VM_PAGE_TO_PHYS(page[i]);
+ if (!PHYS_IN_DMAP(paddr)) {
+ panic(
+ "pmap_map_io_transient: TODO: Map out of DMAP data");
+ }
+ }
+
+ return (needs_mapping);
+}
+
+void
+pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+ boolean_t can_fault)
+{
+ vm_paddr_t paddr;
+ int i;
+
+ if (!can_fault)
+ sched_unpin();
+ for (i = 0; i < count; i++) {
+ paddr = VM_PAGE_TO_PHYS(page[i]);
+ if (!PHYS_IN_DMAP(paddr)) {
+ panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
+ }
+ }
+}
+
+boolean_t
+pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
+{
+
+ return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
+}
+
+/*
+ * Track a range of the kernel's virtual address space that is contiguous
+ * in various mapping attributes.
+ */
+struct pmap_kernel_map_range {
+ vm_offset_t sva;
+ pt_entry_t attrs;
+ int l3pages;
+ int l3contig;
+ int l2blocks;
+ int l1blocks;
+};
+
+static void
+sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
+ vm_offset_t eva)
+{
+ const char *mode;
+ int index;
+
+ if (eva <= range->sva)
+ return;
+
+ index = range->attrs & ATTR_S1_IDX_MASK;
+ switch (index) {
+ case ATTR_S1_IDX(VM_MEMATTR_DEVICE):
+ mode = "DEV";
+ break;
+ case ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE):
+ mode = "UC";
+ break;
+ case ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK):
+ mode = "WB";
+ break;
+ case ATTR_S1_IDX(VM_MEMATTR_WRITE_THROUGH):
+ mode = "WT";
+ break;
+ default:
+ printf(
+ "%s: unknown memory type %x for range 0x%016lx-0x%016lx\n",
+ __func__, index, range->sva, eva);
+ mode = "??";
+ break;
+ }
+
+ sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c %3s %d %d %d %d\n",
+ range->sva, eva,
+ (range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
+ (range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
+ (range->attrs & ATTR_S1_AP_USER) != 0 ? 'u' : 's',
+ mode, range->l1blocks, range->l2blocks, range->l3contig,
+ range->l3pages);
+
+ /* Reset to sentinel value. */
+ range->sva = 0xfffffffffffffffful;
+}
+
+/*
+ * Determine whether the attributes specified by a page table entry match those
+ * being tracked by the current range.
+ */
+static bool
+sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
+{
+
+ return (range->attrs == attrs);
+}
+
+static void
+sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
+ pt_entry_t attrs)
+{
+
+ memset(range, 0, sizeof(*range));
+ range->sva = va;
+ range->attrs = attrs;
+}
+
+/*
+ * Given a leaf PTE, derive the mapping's attributes. If they do not match
+ * those of the current run, dump the address range and its attributes, and
+ * begin a new run.
+ */
+static void
+sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
+ vm_offset_t va, pd_entry_t l0e, pd_entry_t l1e, pd_entry_t l2e,
+ pt_entry_t l3e)
+{
+ pt_entry_t attrs;
+
+ attrs = l0e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
+ attrs |= l1e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
+ if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK)
+ attrs |= l1e & ATTR_S1_IDX_MASK;
+ attrs |= l2e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
+ if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK)
+ attrs |= l2e & ATTR_S1_IDX_MASK;
+ attrs |= l3e & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK);
+
+ if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
+ sysctl_kmaps_dump(sb, range, va);
+ sysctl_kmaps_reinit(range, va, attrs);
+ }
+}
+
+static int
+sysctl_kmaps(SYSCTL_HANDLER_ARGS)
+{
+ struct pmap_kernel_map_range range;
+ struct sbuf sbuf, *sb;
+ pd_entry_t l0e, *l1, l1e, *l2, l2e;
+ pt_entry_t *l3, l3e;
+ vm_offset_t sva;
+ vm_paddr_t pa;
+ int error, i, j, k, l;
+
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error != 0)
+ return (error);
+ sb = &sbuf;
+ sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
+
+ /* Sentinel value. */
+ range.sva = 0xfffffffffffffffful;
+
+ /*
+ * Iterate over the kernel page tables without holding the kernel pmap
+ * lock. Kernel page table pages are never freed, so at worst we will
+ * observe inconsistencies in the output.
+ */
+ for (sva = 0xffff000000000000ul, i = pmap_l0_index(sva); i < Ln_ENTRIES;
+ i++) {
+ if (i == pmap_l0_index(DMAP_MIN_ADDRESS))
+ sbuf_printf(sb, "\nDirect map:\n");
+ else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS))
+ sbuf_printf(sb, "\nKernel map:\n");
+
+ l0e = kernel_pmap->pm_l0[i];
+ if ((l0e & ATTR_DESCR_VALID) == 0) {
+ sysctl_kmaps_dump(sb, &range, sva);
+ sva += L0_SIZE;
+ continue;
+ }
+ pa = l0e & ~ATTR_MASK;
+ l1 = (pd_entry_t *)PHYS_TO_DMAP(pa);
+
+ for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) {
+ l1e = l1[j];
+ if ((l1e & ATTR_DESCR_VALID) == 0) {
+ sysctl_kmaps_dump(sb, &range, sva);
+ sva += L1_SIZE;
+ continue;
+ }
+ if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) {
+ sysctl_kmaps_check(sb, &range, sva, l0e, l1e,
+ 0, 0);
+ range.l1blocks++;
+ sva += L1_SIZE;
+ continue;
+ }
+ pa = l1e & ~ATTR_MASK;
+ l2 = (pd_entry_t *)PHYS_TO_DMAP(pa);
+
+ for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) {
+ l2e = l2[k];
+ if ((l2e & ATTR_DESCR_VALID) == 0) {
+ sysctl_kmaps_dump(sb, &range, sva);
+ sva += L2_SIZE;
+ continue;
+ }
+ if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) {
+ sysctl_kmaps_check(sb, &range, sva,
+ l0e, l1e, l2e, 0);
+ range.l2blocks++;
+ sva += L2_SIZE;
+ continue;
+ }
+ pa = l2e & ~ATTR_MASK;
+ l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
+
+ for (l = pmap_l3_index(sva); l < Ln_ENTRIES;
+ l++, sva += L3_SIZE) {
+ l3e = l3[l];
+ if ((l3e & ATTR_DESCR_VALID) == 0) {
+ sysctl_kmaps_dump(sb, &range,
+ sva);
+ continue;
+ }
+ sysctl_kmaps_check(sb, &range, sva,
+ l0e, l1e, l2e, l3e);
+ if ((l3e & ATTR_CONTIGUOUS) != 0)
+ range.l3contig += l % 16 == 0 ?
+ 1 : 0;
+ else
+ range.l3pages++;
+ }
+ }
+ }
+ }
+
+ error = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (error);
+}
+SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ NULL, 0, sysctl_kmaps, "A",
+ "Dump kernel address layout");
diff --git a/sys/arm64/arm64/stack_machdep.c b/sys/arm64/arm64/stack_machdep.c
new file mode 100644
index 000000000000..5af5dde2d461
--- /dev/null
+++ b/sys/arm64/arm64/stack_machdep.c
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/stack.h>
+
+#include <machine/vmparam.h>
+#include <machine/pcb.h>
+#include <machine/stack.h>
+
+static void
+stack_capture(struct stack *st, struct unwind_state *frame)
+{
+
+ stack_zero(st);
+ while (1) {
+ unwind_frame(frame);
+ if (!INKERNEL((vm_offset_t)frame->fp) ||
+ !INKERNEL((vm_offset_t)frame->pc))
+ break;
+ if (stack_put(st, frame->pc) == -1)
+ break;
+ }
+}
+
+int
+stack_save_td(struct stack *st, struct thread *td)
+{
+ struct unwind_state frame;
+
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+ KASSERT(!TD_IS_SWAPPED(td),
+ ("stack_save_td: thread %p is swapped", td));
+
+ if (TD_IS_RUNNING(td))
+ return (EOPNOTSUPP);
+
+ frame.sp = td->td_pcb->pcb_sp;
+ frame.fp = td->td_pcb->pcb_x[29];
+ frame.pc = td->td_pcb->pcb_x[30];
+
+ stack_capture(st, &frame);
+ return (0);
+}
+
+void
+stack_save(struct stack *st)
+{
+ struct unwind_state frame;
+ uint64_t sp;
+
+ __asm __volatile("mov %0, sp" : "=&r" (sp));
+
+ frame.sp = sp;
+ frame.fp = (uint64_t)__builtin_frame_address(0);
+ frame.pc = (uint64_t)stack_save;
+
+ stack_capture(st, &frame);
+}
diff --git a/sys/arm64/arm64/support.S b/sys/arm64/arm64/support.S
new file mode 100644
index 000000000000..c5aba58c95f1
--- /dev/null
+++ b/sys/arm64/arm64/support.S
@@ -0,0 +1,290 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include <machine/setjmp.h>
+#include <machine/param.h>
+#include <machine/vmparam.h>
+
+#include "assym.inc"
+
+/*
+ * One of the fu* or su* functions failed, return -1.
+ */
+ENTRY(fsu_fault)
+ SET_FAULT_HANDLER(xzr, x1) /* Reset the handler function */
+ EXIT_USER_ACCESS_CHECK(w0, x1)
+fsu_fault_nopcb:
+ mov x0, #-1
+ ret
+END(fsu_fault)
+
+/*
+ * int casueword32(volatile uint32_t *, uint32_t, uint32_t *, uint32_t)
+ */
+ENTRY(casueword32)
+ ldr x4, =(VM_MAXUSER_ADDRESS-3)
+ cmp x0, x4
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ mov w5, #1
+ SET_FAULT_HANDLER(x6, x4) /* And set it */
+ ENTER_USER_ACCESS(w6, x4)
+1: ldxr w4, [x0] /* Load-exclusive the data */
+ cmp w4, w1 /* Compare */
+ b.ne 2f /* Not equal, exit */
+ stxr w5, w3, [x0] /* Store the new data */
+2: EXIT_USER_ACCESS(w6)
+ SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */
+ str w4, [x2] /* Store the read data */
+ mov w0, w5 /* Result same as store status */
+ ret /* Return */
+END(casueword32)
+
+/*
+ * int casueword(volatile u_long *, u_long, u_long *, u_long)
+ */
+ENTRY(casueword)
+ ldr x4, =(VM_MAXUSER_ADDRESS-7)
+ cmp x0, x4
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ mov w5, #1
+ SET_FAULT_HANDLER(x6, x4) /* And set it */
+ ENTER_USER_ACCESS(w6, x4)
+1: ldxr x4, [x0] /* Load-exclusive the data */
+ cmp x4, x1 /* Compare */
+ b.ne 2f /* Not equal, exit */
+ stxr w5, x3, [x0] /* Store the new data */
+2: EXIT_USER_ACCESS(w6)
+ SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */
+ str x4, [x2] /* Store the read data */
+ mov w0, w5 /* Result same as store status */
+ ret /* Return */
+END(casueword)
+
+/*
+ * int fubyte(volatile const void *)
+ */
+ENTRY(fubyte)
+ ldr x1, =VM_MAXUSER_ADDRESS
+ cmp x0, x1
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x1) /* And set it */
+ ldtrb w0, [x0] /* Try loading the data */
+ SET_FAULT_HANDLER(xzr, x1) /* Reset the fault handler */
+ ret /* Return */
+END(fubyte)
+
+/*
+ * int fuword(volatile const void *)
+ */
+ENTRY(fuword16)
+ ldr x1, =(VM_MAXUSER_ADDRESS-1)
+ cmp x0, x1
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x1) /* And set it */
+ ldtrh w0, [x0] /* Try loading the data */
+ SET_FAULT_HANDLER(xzr, x1) /* Reset the fault handler */
+ ret /* Return */
+END(fuword16)
+
+/*
+ * int32_t fueword32(volatile const void *, int32_t *)
+ */
+ENTRY(fueword32)
+ ldr x2, =(VM_MAXUSER_ADDRESS-3)
+ cmp x0, x2
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ ldtr w0, [x0] /* Try loading the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ str w0, [x1] /* Save the data in kernel space */
+ mov w0, #0 /* Success */
+ ret /* Return */
+END(fueword32)
+
+/*
+ * long fueword(volatile const void *, int64_t *)
+ * int64_t fueword64(volatile const void *, int64_t *)
+ */
+ENTRY(fueword)
+EENTRY(fueword64)
+ ldr x2, =(VM_MAXUSER_ADDRESS-7)
+ cmp x0, x2
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ ldtr x0, [x0] /* Try loading the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ str x0, [x1] /* Save the data in kernel space */
+ mov x0, #0 /* Success */
+ ret /* Return */
+EEND(fueword64)
+END(fueword)
+
+/*
+ * int subyte(volatile void *, int)
+ */
+ENTRY(subyte)
+ ldr x2, =VM_MAXUSER_ADDRESS
+ cmp x0, x2
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ sttrb w1, [x0] /* Try storing the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ mov x0, #0 /* Success */
+ ret /* Return */
+END(subyte)
+
+/*
+ * int suword16(volatile void *, int)
+ */
+ENTRY(suword16)
+ ldr x2, =(VM_MAXUSER_ADDRESS-1)
+ cmp x0, x2
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ sttrh w1, [x0] /* Try storing the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ mov x0, #0 /* Success */
+ ret /* Return */
+END(suword16)
+
+/*
+ * int suword32(volatile void *, int)
+ */
+ENTRY(suword32)
+ ldr x2, =(VM_MAXUSER_ADDRESS-3)
+ cmp x0, x2
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ sttr w1, [x0] /* Try storing the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ mov x0, #0 /* Success */
+ ret /* Return */
+END(suword32)
+
+/*
+ * int suword(volatile void *, long)
+ */
+ENTRY(suword)
+EENTRY(suword64)
+ ldr x2, =(VM_MAXUSER_ADDRESS-7)
+ cmp x0, x2
+ b.cs fsu_fault_nopcb
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ sttr x1, [x0] /* Try storing the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ mov x0, #0 /* Success */
+ ret /* Return */
+EEND(suword64)
+END(suword)
+
+ENTRY(setjmp)
+ /* Store the stack pointer */
+ mov x8, sp
+ str x8, [x0], #8
+
+ /* Store the general purpose registers and lr */
+ stp x19, x20, [x0], #16
+ stp x21, x22, [x0], #16
+ stp x23, x24, [x0], #16
+ stp x25, x26, [x0], #16
+ stp x27, x28, [x0], #16
+ stp x29, lr, [x0], #16
+
+ /* Return value */
+ mov x0, #0
+ ret
+END(setjmp)
+
+ENTRY(longjmp)
+ /* Restore the stack pointer */
+ ldr x8, [x0], #8
+ mov sp, x8
+
+ /* Restore the general purpose registers and lr */
+ ldp x19, x20, [x0], #16
+ ldp x21, x22, [x0], #16
+ ldp x23, x24, [x0], #16
+ ldp x25, x26, [x0], #16
+ ldp x27, x28, [x0], #16
+ ldp x29, lr, [x0], #16
+
+ /* Load the return value */
+ mov x0, x1
+ ret
+END(longjmp)
+
+/*
+ * pagezero, simple implementation
+ */
+ENTRY(pagezero_simple)
+ add x1, x0, #PAGE_SIZE
+
+1:
+ stp xzr, xzr, [x0], #0x10
+ stp xzr, xzr, [x0], #0x10
+ stp xzr, xzr, [x0], #0x10
+ stp xzr, xzr, [x0], #0x10
+ cmp x0, x1
+ b.ne 1b
+ ret
+
+END(pagezero_simple)
+
+/*
+ * pagezero, cache assisted
+ */
+ENTRY(pagezero_cache)
+ add x1, x0, #PAGE_SIZE
+
+ ldr x2, =dczva_line_size
+ ldr x2, [x2]
+
+1:
+ dc zva, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.ne 1b
+ ret
+
+END(pagezero_cache)
diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S
new file mode 100644
index 000000000000..144cc0873f68
--- /dev/null
+++ b/sys/arm64/arm64/swtch.S
@@ -0,0 +1,292 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "assym.inc"
+#include "opt_kstack_pages.h"
+#include "opt_sched.h"
+
+#include <machine/asm.h>
+
+__FBSDID("$FreeBSD$");
+
+.macro clear_step_flag pcbflags, tmp
+ tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
+ mrs \tmp, mdscr_el1
+ bic \tmp, \tmp, #1
+ msr mdscr_el1, \tmp
+ isb
+999:
+.endm
+
+.macro set_step_flag pcbflags, tmp
+ tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
+ mrs \tmp, mdscr_el1
+ orr \tmp, \tmp, #1
+ msr mdscr_el1, \tmp
+ isb
+999:
+.endm
+
+/*
+ * void cpu_throw(struct thread *old, struct thread *new)
+ */
+ENTRY(cpu_throw)
+ /* Of old == NULL skip disabling stepping */
+ cbz x0, 1f
+
+ /* If we were single stepping, disable it */
+ ldr x4, [x0, #TD_PCB]
+ ldr w5, [x4, #PCB_FLAGS]
+ clear_step_flag w5, x6
+1:
+
+#ifdef VFP
+ /* Backup the new thread pointer around a call to C code */
+ mov x19, x0
+ mov x20, x1
+ bl vfp_discard
+ mov x1, x20
+ mov x0, x19
+#endif
+
+ bl pmap_switch
+ mov x4, x0
+
+ /* If we are single stepping, enable it */
+ ldr w5, [x4, #PCB_FLAGS]
+ set_step_flag w5, x6
+
+ /* Restore the registers */
+ ldp x5, x6, [x4, #PCB_SP]
+ mov sp, x5
+ msr tpidr_el0, x6
+ ldr x6, [x4, #PCB_TPIDRRO]
+ msr tpidrro_el0, x6
+ ldp x8, x9, [x4, #PCB_REGS + 8 * 8]
+ ldp x10, x11, [x4, #PCB_REGS + 10 * 8]
+ ldp x12, x13, [x4, #PCB_REGS + 12 * 8]
+ ldp x14, x15, [x4, #PCB_REGS + 14 * 8]
+ ldp x16, x17, [x4, #PCB_REGS + 16 * 8]
+ ldr x19, [x4, #PCB_REGS + 19 * 8]
+ ldp x20, x21, [x4, #PCB_REGS + 20 * 8]
+ ldp x22, x23, [x4, #PCB_REGS + 22 * 8]
+ ldp x24, x25, [x4, #PCB_REGS + 24 * 8]
+ ldp x26, x27, [x4, #PCB_REGS + 26 * 8]
+ ldp x28, x29, [x4, #PCB_REGS + 28 * 8]
+ ldr x30, [x4, #PCB_REGS + 30 * 8]
+
+ ret
+END(cpu_throw)
+
+/*
+ * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
+ *
+ * x0 = old
+ * x1 = new
+ * x2 = mtx
+ * x3 to x7, x16 and x17 are caller saved
+ */
+ENTRY(cpu_switch)
+ /*
+ * Save the old context.
+ */
+ ldr x4, [x0, #TD_PCB]
+
+ /* Store the callee-saved registers */
+ stp x8, x9, [x4, #PCB_REGS + 8 * 8]
+ stp x10, x11, [x4, #PCB_REGS + 10 * 8]
+ stp x12, x13, [x4, #PCB_REGS + 12 * 8]
+ stp x14, x15, [x4, #PCB_REGS + 14 * 8]
+ stp x16, x17, [x4, #PCB_REGS + 16 * 8]
+ stp x18, x19, [x4, #PCB_REGS + 18 * 8]
+ stp x20, x21, [x4, #PCB_REGS + 20 * 8]
+ stp x22, x23, [x4, #PCB_REGS + 22 * 8]
+ stp x24, x25, [x4, #PCB_REGS + 24 * 8]
+ stp x26, x27, [x4, #PCB_REGS + 26 * 8]
+ stp x28, x29, [x4, #PCB_REGS + 28 * 8]
+ str x30, [x4, #PCB_REGS + 30 * 8]
+ /* And the old stack pointer */
+ mov x5, sp
+ mrs x6, tpidrro_el0
+ str x6, [x4, #PCB_TPIDRRO]
+ mrs x6, tpidr_el0
+ stp x5, x6, [x4, #PCB_SP]
+
+ /* If we were single stepping, disable it */
+ ldr w5, [x4, #PCB_FLAGS]
+ clear_step_flag w5, x6
+
+ mov x19, x0
+ mov x20, x1
+ mov x21, x2
+
+#ifdef VFP
+ /* Load the pcb address */
+ mov x1, x4
+ bl vfp_save_state
+ mov x1, x20
+ mov x0, x19
+#endif
+
+ bl pmap_switch
+ /* Move the new pcb out of the way */
+ mov x4, x0
+
+ mov x2, x21
+ mov x1, x20
+ mov x0, x19
+
+ /*
+ * Release the old thread.
+ */
+ stlr x2, [x0, #TD_LOCK]
+#if defined(SCHED_ULE) && defined(SMP)
+ /* Spin if TD_LOCK points to a blocked_lock */
+ ldr x2, =_C_LABEL(blocked_lock)
+1:
+ ldar x3, [x1, #TD_LOCK]
+ cmp x3, x2
+ b.eq 1b
+#endif
+
+ /* If we are single stepping, enable it */
+ ldr w5, [x4, #PCB_FLAGS]
+ set_step_flag w5, x6
+
+ /* Restore the registers */
+ ldp x5, x6, [x4, #PCB_SP]
+ mov sp, x5
+ msr tpidr_el0, x6
+ ldr x6, [x4, #PCB_TPIDRRO]
+ msr tpidrro_el0, x6
+ ldp x8, x9, [x4, #PCB_REGS + 8 * 8]
+ ldp x10, x11, [x4, #PCB_REGS + 10 * 8]
+ ldp x12, x13, [x4, #PCB_REGS + 12 * 8]
+ ldp x14, x15, [x4, #PCB_REGS + 14 * 8]
+ ldp x16, x17, [x4, #PCB_REGS + 16 * 8]
+ ldr x19, [x4, #PCB_REGS + 19 * 8]
+ ldp x20, x21, [x4, #PCB_REGS + 20 * 8]
+ ldp x22, x23, [x4, #PCB_REGS + 22 * 8]
+ ldp x24, x25, [x4, #PCB_REGS + 24 * 8]
+ ldp x26, x27, [x4, #PCB_REGS + 26 * 8]
+ ldp x28, x29, [x4, #PCB_REGS + 28 * 8]
+ ldr x30, [x4, #PCB_REGS + 30 * 8]
+
+ str xzr, [x4, #PCB_REGS + 18 * 8]
+ ret
+.Lcpu_switch_panic_str:
+ .asciz "cpu_switch: %p\0"
+END(cpu_switch)
+
+ENTRY(fork_trampoline)
+ mov x0, x8
+ mov x1, x9
+ mov x2, sp
+ mov fp, #0 /* Stack traceback stops here. */
+ bl _C_LABEL(fork_exit)
+
+ /* Restore the registers other than x0 and x1 */
+ ldp x2, x3, [sp, #TF_X + 2 * 8]
+ ldp x4, x5, [sp, #TF_X + 4 * 8]
+ ldp x6, x7, [sp, #TF_X + 6 * 8]
+ ldp x8, x9, [sp, #TF_X + 8 * 8]
+ ldp x10, x11, [sp, #TF_X + 10 * 8]
+ ldp x12, x13, [sp, #TF_X + 12 * 8]
+ ldp x14, x15, [sp, #TF_X + 14 * 8]
+ ldp x16, x17, [sp, #TF_X + 16 * 8]
+ ldr x19, [sp, #TF_X + 19 * 8]
+ ldp x20, x21, [sp, #TF_X + 20 * 8]
+ ldp x22, x23, [sp, #TF_X + 22 * 8]
+ ldp x24, x25, [sp, #TF_X + 24 * 8]
+ ldp x26, x27, [sp, #TF_X + 26 * 8]
+ ldp x28, x29, [sp, #TF_X + 28 * 8]
+
+ /*
+ * Disable interrupts to avoid
+ * overwriting spsr_el1 and sp_el0 by an IRQ exception.
+ */
+ msr daifset, #2
+
+ /* Restore sp and lr */
+ ldp x0, x1, [sp, #TF_SP]
+ msr sp_el0, x0
+ mov lr, x1
+
+ /* Restore elr and spsr */
+ ldp x0, x1, [sp, #TF_ELR]
+ msr elr_el1, x0
+ msr spsr_el1, x1
+
+ /* Finally x0 and x1 */
+ ldp x0, x1, [sp, #TF_X + 0 * 8]
+ ldr x18, [sp, #TF_X + 18 * 8]
+
+ /*
+ * No need for interrupts reenabling since PSR
+ * will be set to the desired value anyway.
+ */
+ ERET
+
+END(fork_trampoline)
+
+ENTRY(savectx)
+ /* Store the callee-saved registers */
+ stp x8, x9, [x0, #PCB_REGS + 8 * 8]
+ stp x10, x11, [x0, #PCB_REGS + 10 * 8]
+ stp x12, x13, [x0, #PCB_REGS + 12 * 8]
+ stp x14, x15, [x0, #PCB_REGS + 14 * 8]
+ stp x16, x17, [x0, #PCB_REGS + 16 * 8]
+ stp x18, x19, [x0, #PCB_REGS + 18 * 8]
+ stp x20, x21, [x0, #PCB_REGS + 20 * 8]
+ stp x22, x23, [x0, #PCB_REGS + 22 * 8]
+ stp x24, x25, [x0, #PCB_REGS + 24 * 8]
+ stp x26, x27, [x0, #PCB_REGS + 26 * 8]
+ stp x28, x29, [x0, #PCB_REGS + 28 * 8]
+ str x30, [x0, #PCB_REGS + 30 * 8]
+ /* And the old stack pointer */
+ mov x5, sp
+ mrs x6, tpidrro_el0
+ str x6, [x0, #PCB_TPIDRRO]
+ mrs x6, tpidr_el0
+ stp x5, x6, [x0, #PCB_SP]
+
+ /* Store the VFP registers */
+#ifdef VFP
+ mov x28, lr
+ mov x1, x0 /* move pcb to the correct register */
+ mov x0, xzr /* td = NULL */
+ bl vfp_save_state
+ mov lr, x28
+#endif
+
+ ret
+END(savectx)
+
diff --git a/sys/arm64/arm64/sys_machdep.c b/sys/arm64/arm64/sys_machdep.c
new file mode 100644
index 000000000000..dfb2c4ad85b8
--- /dev/null
+++ b/sys/arm64/arm64/sys_machdep.c
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysproto.h>
+
+#include <machine/sysarch.h>
+
+int
+sysarch(struct thread *td, struct sysarch_args *uap)
+{
+
+ return (ENOTSUP);
+}
diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c
new file mode 100644
index 000000000000..9856a35d0010
--- /dev/null
+++ b/sys/arm64/arm64/trap.c
@@ -0,0 +1,567 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/syscall.h>
+#include <sys/sysent.h>
+#ifdef KDB
+#include <sys/kdb.h>
+#endif
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_param.h>
+#include <vm/vm_extern.h>
+
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <machine/pcpu.h>
+#include <machine/undefined.h>
+
+#ifdef KDTRACE_HOOKS
+#include <sys/dtrace_bsd.h>
+#endif
+
+#ifdef VFP
+#include <machine/vfp.h>
+#endif
+
+#ifdef KDB
+#include <machine/db_machdep.h>
+#endif
+
+#ifdef DDB
+#include <ddb/db_output.h>
+#endif
+
+extern register_t fsu_intr_fault;
+
+/* Called from exception.S */
+void do_el1h_sync(struct thread *, struct trapframe *);
+void do_el0_sync(struct thread *, struct trapframe *);
+void do_el0_error(struct trapframe *);
+void do_serror(struct trapframe *);
+void unhandled_exception(struct trapframe *);
+
+static void print_registers(struct trapframe *frame);
+
+int (*dtrace_invop_jump_addr)(struct trapframe *);
+
+typedef void (abort_handler)(struct thread *, struct trapframe *, uint64_t,
+ uint64_t, int);
+
+static abort_handler align_abort;
+static abort_handler data_abort;
+
+static abort_handler *abort_handlers[] = {
+ [ISS_DATA_DFSC_TF_L0] = data_abort,
+ [ISS_DATA_DFSC_TF_L1] = data_abort,
+ [ISS_DATA_DFSC_TF_L2] = data_abort,
+ [ISS_DATA_DFSC_TF_L3] = data_abort,
+ [ISS_DATA_DFSC_AFF_L1] = data_abort,
+ [ISS_DATA_DFSC_AFF_L2] = data_abort,
+ [ISS_DATA_DFSC_AFF_L3] = data_abort,
+ [ISS_DATA_DFSC_PF_L1] = data_abort,
+ [ISS_DATA_DFSC_PF_L2] = data_abort,
+ [ISS_DATA_DFSC_PF_L3] = data_abort,
+ [ISS_DATA_DFSC_ALIGN] = align_abort,
+};
+
+static __inline void
+call_trapsignal(struct thread *td, int sig, int code, void *addr, int trapno)
+{
+ ksiginfo_t ksi;
+
+ ksiginfo_init_trap(&ksi);
+ ksi.ksi_signo = sig;
+ ksi.ksi_code = code;
+ ksi.ksi_addr = addr;
+ ksi.ksi_trapno = trapno;
+ trapsignal(td, &ksi);
+}
+
+int
+cpu_fetch_syscall_args(struct thread *td)
+{
+ struct proc *p;
+ register_t *ap;
+ struct syscall_args *sa;
+ int nap;
+
+ nap = 8;
+ p = td->td_proc;
+ ap = td->td_frame->tf_x;
+ sa = &td->td_sa;
+
+ sa->code = td->td_frame->tf_x[8];
+
+ if (sa->code == SYS_syscall || sa->code == SYS___syscall) {
+ sa->code = *ap++;
+ nap--;
+ }
+
+ if (sa->code >= p->p_sysent->sv_size)
+ sa->callp = &p->p_sysent->sv_table[0];
+ else
+ sa->callp = &p->p_sysent->sv_table[sa->code];
+
+ sa->narg = sa->callp->sy_narg;
+ memcpy(sa->args, ap, nap * sizeof(register_t));
+ if (sa->narg > nap)
+ panic("ARM64TODO: Could we have more than 8 args?");
+
+ td->td_retval[0] = 0;
+ td->td_retval[1] = 0;
+
+ return (0);
+}
+
+#include "../../kern/subr_syscall.c"
+
+static void
+svc_handler(struct thread *td, struct trapframe *frame)
+{
+
+ if ((frame->tf_esr & ESR_ELx_ISS_MASK) == 0) {
+ syscallenter(td);
+ syscallret(td);
+ } else {
+ call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr,
+ ESR_ELx_EXCEPTION(frame->tf_esr));
+ userret(td, frame);
+ }
+}
+
+static void
+align_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
+ uint64_t far, int lower)
+{
+ if (!lower) {
+ print_registers(frame);
+ printf(" far: %16lx\n", far);
+ printf(" esr: %.8lx\n", esr);
+ panic("Misaligned access from kernel space!");
+ }
+
+ call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr,
+ ESR_ELx_EXCEPTION(frame->tf_esr));
+ userret(td, frame);
+}
+
+static void
+data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
+ uint64_t far, int lower)
+{
+ struct vm_map *map;
+ struct proc *p;
+ struct pcb *pcb;
+ vm_prot_t ftype;
+ int error, sig, ucode;
+#ifdef KDB
+ bool handled;
+#endif
+
+ /*
+ * According to the ARMv8-A rev. A.g, B2.10.5 "Load-Exclusive
+ * and Store-Exclusive instruction usage restrictions", state
+ * of the exclusive monitors after data abort exception is unknown.
+ */
+ clrex();
+
+#ifdef KDB
+ if (kdb_active) {
+ kdb_reenter();
+ return;
+ }
+#endif
+
+ pcb = td->td_pcb;
+ p = td->td_proc;
+ if (lower)
+ map = &p->p_vmspace->vm_map;
+ else {
+ intr_enable();
+
+ /* The top bit tells us which range to use */
+ if (far >= VM_MAXUSER_ADDRESS) {
+ map = kernel_map;
+ } else {
+ map = &p->p_vmspace->vm_map;
+ if (map == NULL)
+ map = kernel_map;
+ }
+ }
+
+ /*
+ * Try to handle translation, access flag, and permission faults.
+ * Translation faults may occur as a result of the required
+ * break-before-make sequence used when promoting or demoting
+ * superpages. Such faults must not occur while holding the pmap lock,
+ * or pmap_fault() will recurse on that lock.
+ */
+ if ((lower || map == kernel_map || pcb->pcb_onfault != 0) &&
+ pmap_fault(map->pmap, esr, far) == KERN_SUCCESS)
+ return;
+
+ KASSERT(td->td_md.md_spinlock_count == 0,
+ ("data abort with spinlock held"));
+ if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK |
+ WARN_GIANTOK, NULL, "Kernel page fault") != 0) {
+ print_registers(frame);
+ printf(" far: %16lx\n", far);
+ printf(" esr: %.8lx\n", esr);
+ panic("data abort in critical section or under mutex");
+ }
+
+ switch (ESR_ELx_EXCEPTION(esr)) {
+ case EXCP_INSN_ABORT:
+ case EXCP_INSN_ABORT_L:
+ ftype = VM_PROT_EXECUTE;
+ break;
+ default:
+ ftype = (esr & ISS_DATA_WnR) == 0 ? VM_PROT_READ :
+ VM_PROT_READ | VM_PROT_WRITE;
+ break;
+ }
+
+ /* Fault in the page. */
+ error = vm_fault_trap(map, far, ftype, VM_FAULT_NORMAL, &sig, &ucode);
+ if (error != KERN_SUCCESS) {
+ if (lower) {
+ call_trapsignal(td, sig, ucode, (void *)far,
+ ESR_ELx_EXCEPTION(esr));
+ } else {
+ if (td->td_intr_nesting_level == 0 &&
+ pcb->pcb_onfault != 0) {
+ frame->tf_x[0] = error;
+ frame->tf_elr = pcb->pcb_onfault;
+ return;
+ }
+
+ printf("Fatal data abort:\n");
+ print_registers(frame);
+ printf(" far: %16lx\n", far);
+ printf(" esr: %.8lx\n", esr);
+
+#ifdef KDB
+ if (debugger_on_trap) {
+ kdb_why = KDB_WHY_TRAP;
+ handled = kdb_trap(ESR_ELx_EXCEPTION(esr), 0,
+ frame);
+ kdb_why = KDB_WHY_UNSET;
+ if (handled)
+ return;
+ }
+#endif
+ panic("vm_fault failed: %lx", frame->tf_elr);
+ }
+ }
+
+ if (lower)
+ userret(td, frame);
+}
+
+static void
+print_registers(struct trapframe *frame)
+{
+ u_int reg;
+
+ for (reg = 0; reg < nitems(frame->tf_x); reg++) {
+ printf(" %sx%d: %16lx\n", (reg < 10) ? " " : "", reg,
+ frame->tf_x[reg]);
+ }
+ printf(" sp: %16lx\n", frame->tf_sp);
+ printf(" lr: %16lx\n", frame->tf_lr);
+ printf(" elr: %16lx\n", frame->tf_elr);
+ printf("spsr: %8x\n", frame->tf_spsr);
+}
+
+void
+do_el1h_sync(struct thread *td, struct trapframe *frame)
+{
+ struct trapframe *oframe;
+ uint32_t exception;
+ uint64_t esr, far;
+ int dfsc;
+
+ /* Read the esr register to get the exception details */
+ esr = frame->tf_esr;
+ exception = ESR_ELx_EXCEPTION(esr);
+
+#ifdef KDTRACE_HOOKS
+ if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, exception))
+ return;
+#endif
+
+ CTR4(KTR_TRAP,
+ "do_el1_sync: curthread: %p, esr %lx, elr: %lx, frame: %p", td,
+ esr, frame->tf_elr, frame);
+
+ oframe = td->td_frame;
+
+ switch (exception) {
+ case EXCP_BRK:
+ case EXCP_WATCHPT_EL1:
+ case EXCP_SOFTSTP_EL1:
+ break;
+ default:
+ td->td_frame = frame;
+ break;
+ }
+
+ switch (exception) {
+ case EXCP_FP_SIMD:
+ case EXCP_TRAP_FP:
+#ifdef VFP
+ if ((td->td_pcb->pcb_fpflags & PCB_FP_KERN) != 0) {
+ vfp_restore_state();
+ } else
+#endif
+ {
+ print_registers(frame);
+ printf(" esr: %.8lx\n", esr);
+ panic("VFP exception in the kernel");
+ }
+ break;
+ case EXCP_INSN_ABORT:
+ case EXCP_DATA_ABORT:
+ far = READ_SPECIALREG(far_el1);
+ dfsc = esr & ISS_DATA_DFSC_MASK;
+ if (dfsc < nitems(abort_handlers) &&
+ abort_handlers[dfsc] != NULL) {
+ abort_handlers[dfsc](td, frame, esr, far, 0);
+ } else {
+ print_registers(frame);
+ printf(" far: %16lx\n", far);
+ printf(" esr: %.8lx\n", esr);
+ panic("Unhandled EL1 %s abort: %x",
+ exception == EXCP_INSN_ABORT ? "instruction" :
+ "data", dfsc);
+ }
+ break;
+ case EXCP_BRK:
+#ifdef KDTRACE_HOOKS
+ if ((esr & ESR_ELx_ISS_MASK) == 0x40d && \
+ dtrace_invop_jump_addr != 0) {
+ dtrace_invop_jump_addr(frame);
+ break;
+ }
+#endif
+#ifdef KDB
+ kdb_trap(exception, 0,
+ (td->td_frame != NULL) ? td->td_frame : frame);
+#else
+ panic("No debugger in kernel.\n");
+#endif
+ frame->tf_elr += 4;
+ break;
+ case EXCP_WATCHPT_EL1:
+ case EXCP_SOFTSTP_EL1:
+#ifdef KDB
+ kdb_trap(exception, 0,
+ (td->td_frame != NULL) ? td->td_frame : frame);
+#else
+ panic("No debugger in kernel.\n");
+#endif
+ break;
+ case EXCP_UNKNOWN:
+ if (undef_insn(1, frame))
+ break;
+ /* FALLTHROUGH */
+ default:
+ print_registers(frame);
+ printf(" far: %16lx\n", READ_SPECIALREG(far_el1));
+ panic("Unknown kernel exception %x esr_el1 %lx\n", exception,
+ esr);
+ }
+
+ td->td_frame = oframe;
+}
+
+void
+do_el0_sync(struct thread *td, struct trapframe *frame)
+{
+ pcpu_bp_harden bp_harden;
+ uint32_t exception;
+ uint64_t esr, far;
+ int dfsc;
+
+ /* Check we have a sane environment when entering from userland */
+ KASSERT((uintptr_t)get_pcpu() >= VM_MIN_KERNEL_ADDRESS,
+ ("Invalid pcpu address from userland: %p (tpidr %lx)",
+ get_pcpu(), READ_SPECIALREG(tpidr_el1)));
+
+ esr = frame->tf_esr;
+ exception = ESR_ELx_EXCEPTION(esr);
+ switch (exception) {
+ case EXCP_INSN_ABORT_L:
+ far = READ_SPECIALREG(far_el1);
+
+ /*
+ * Userspace may be trying to train the branch predictor to
+ * attack the kernel. If we are on a CPU affected by this
+ * call the handler to clear the branch predictor state.
+ */
+ if (far > VM_MAXUSER_ADDRESS) {
+ bp_harden = PCPU_GET(bp_harden);
+ if (bp_harden != NULL)
+ bp_harden();
+ }
+ break;
+ case EXCP_UNKNOWN:
+ case EXCP_DATA_ABORT_L:
+ case EXCP_DATA_ABORT:
+ far = READ_SPECIALREG(far_el1);
+ break;
+ }
+ intr_enable();
+
+ CTR4(KTR_TRAP,
+ "do_el0_sync: curthread: %p, esr %lx, elr: %lx, frame: %p", td, esr,
+ frame->tf_elr, frame);
+
+ switch (exception) {
+ case EXCP_FP_SIMD:
+ case EXCP_TRAP_FP:
+#ifdef VFP
+ vfp_restore_state();
+#else
+ panic("VFP exception in userland");
+#endif
+ break;
+ case EXCP_SVC32:
+ case EXCP_SVC64:
+ svc_handler(td, frame);
+ break;
+ case EXCP_INSN_ABORT_L:
+ case EXCP_DATA_ABORT_L:
+ case EXCP_DATA_ABORT:
+ dfsc = esr & ISS_DATA_DFSC_MASK;
+ if (dfsc < nitems(abort_handlers) &&
+ abort_handlers[dfsc] != NULL)
+ abort_handlers[dfsc](td, frame, esr, far, 1);
+ else {
+ print_registers(frame);
+ printf(" far: %16lx\n", far);
+ printf(" esr: %.8lx\n", esr);
+ panic("Unhandled EL0 %s abort: %x",
+ exception == EXCP_INSN_ABORT_L ? "instruction" :
+ "data", dfsc);
+ }
+ break;
+ case EXCP_UNKNOWN:
+ if (!undef_insn(0, frame))
+ call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)far,
+ exception);
+ userret(td, frame);
+ break;
+ case EXCP_SP_ALIGN:
+ call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp,
+ exception);
+ userret(td, frame);
+ break;
+ case EXCP_PC_ALIGN:
+ call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr,
+ exception);
+ userret(td, frame);
+ break;
+ case EXCP_BRKPT_EL0:
+ case EXCP_BRK:
+ call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr,
+ exception);
+ userret(td, frame);
+ break;
+ case EXCP_MSR:
+ call_trapsignal(td, SIGILL, ILL_PRVOPC, (void *)frame->tf_elr,
+ exception);
+ userret(td, frame);
+ break;
+ case EXCP_SOFTSTP_EL0:
+ td->td_frame->tf_spsr &= ~PSR_SS;
+ td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
+ WRITE_SPECIALREG(mdscr_el1,
+ READ_SPECIALREG(mdscr_el1) & ~DBG_MDSCR_SS);
+ call_trapsignal(td, SIGTRAP, TRAP_TRACE,
+ (void *)frame->tf_elr, exception);
+ userret(td, frame);
+ break;
+ default:
+ call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)frame->tf_elr,
+ exception);
+ userret(td, frame);
+ break;
+ }
+
+ KASSERT((td->td_pcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
+ ("Kernel VFP flags set while entering userspace"));
+ KASSERT(
+ td->td_pcb->pcb_fpusaved == &td->td_pcb->pcb_fpustate,
+ ("Kernel VFP state in use when entering userspace"));
+}
+
+/*
+ * TODO: We will need to handle these later when we support ARMv8.2 RAS.
+ */
+void
+do_serror(struct trapframe *frame)
+{
+ uint64_t esr, far;
+
+ far = READ_SPECIALREG(far_el1);
+ esr = frame->tf_esr;
+
+ print_registers(frame);
+ printf(" far: %16lx\n", far);
+ printf(" esr: %.8lx\n", esr);
+ panic("Unhandled System Error");
+}
+
+void
+unhandled_exception(struct trapframe *frame)
+{
+ uint64_t esr, far;
+
+ far = READ_SPECIALREG(far_el1);
+ esr = frame->tf_esr;
+
+ print_registers(frame);
+ printf(" far: %16lx\n", far);
+ printf(" esr: %.8lx\n", esr);
+ panic("Unhandled exception");
+}
diff --git a/sys/arm64/arm64/uio_machdep.c b/sys/arm64/arm64/uio_machdep.c
new file mode 100644
index 000000000000..11ed239fa9dd
--- /dev/null
+++ b/sys/arm64/arm64/uio_machdep.c
@@ -0,0 +1,134 @@
+/*-
+ * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
+ * Copyright (c) 1982, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <machine/vmparam.h>
+
+/*
+ * Implement uiomove(9) from physical memory using the direct map to
+ * avoid the creation and destruction of ephemeral mappings.
+ */
+int
+uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
+{
+ struct thread *td = curthread;
+ struct iovec *iov;
+ void *cp;
+ vm_offset_t page_offset, vaddr;
+ size_t cnt;
+ int error = 0;
+ int save = 0;
+ boolean_t mapped;
+
+ KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
+ ("uiomove_fromphys: mode"));
+ KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
+ ("uiomove_fromphys proc"));
+ save = td->td_pflags & TDP_DEADLKTREAT;
+ td->td_pflags |= TDP_DEADLKTREAT;
+ mapped = FALSE;
+ while (n > 0 && uio->uio_resid) {
+ iov = uio->uio_iov;
+ cnt = iov->iov_len;
+ if (cnt == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ continue;
+ }
+ if (cnt > n)
+ cnt = n;
+ page_offset = offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - page_offset);
+ if (uio->uio_segflg != UIO_NOCOPY) {
+ mapped = pmap_map_io_transient(
+ &ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE);
+ cp = (char *)vaddr + page_offset;
+ }
+ switch (uio->uio_segflg) {
+ case UIO_USERSPACE:
+ maybe_yield();
+ if (uio->uio_rw == UIO_READ)
+ error = copyout(cp, iov->iov_base, cnt);
+ else
+ error = copyin(iov->iov_base, cp, cnt);
+ if (error)
+ goto out;
+ break;
+ case UIO_SYSSPACE:
+ if (uio->uio_rw == UIO_READ)
+ bcopy(cp, iov->iov_base, cnt);
+ else
+ bcopy(iov->iov_base, cp, cnt);
+ break;
+ case UIO_NOCOPY:
+ break;
+ }
+ if (__predict_false(mapped)) {
+ pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT],
+ &vaddr, 1, TRUE);
+ mapped = FALSE;
+ }
+ iov->iov_base = (char *)iov->iov_base + cnt;
+ iov->iov_len -= cnt;
+ uio->uio_resid -= cnt;
+ uio->uio_offset += cnt;
+ offset += cnt;
+ n -= cnt;
+ }
+out:
+ if (__predict_false(mapped)) {
+ panic("ARM64TODO: uiomove_fromphys");
+ pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1,
+ TRUE);
+ }
+ if (save == 0)
+ td->td_pflags &= ~TDP_DEADLKTREAT;
+ return (error);
+}
diff --git a/sys/arm64/arm64/uma_machdep.c b/sys/arm64/arm64/uma_machdep.c
new file mode 100644
index 000000000000..4ab256ed2179
--- /dev/null
+++ b/sys/arm64/arm64/uma_machdep.c
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/vmmeter.h>
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <vm/uma.h>
+#include <vm/uma_int.h>
+#include <machine/md_var.h>
+#include <machine/vmparam.h>
+
+void *
+uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
+ int wait)
+{
+ vm_page_t m;
+ vm_paddr_t pa;
+ void *va;
+
+ *flags = UMA_SLAB_PRIV;
+ m = vm_page_alloc_domain(NULL, 0, domain,
+ malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
+ if (m == NULL)
+ return (NULL);
+ pa = m->phys_addr;
+ if ((wait & M_NODUMP) == 0)
+ dump_add_page(pa);
+ va = (void *)PHYS_TO_DMAP(pa);
+ if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
+ bzero(va, PAGE_SIZE);
+ return (va);
+}
+
+void
+uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
+{
+ vm_page_t m;
+ vm_paddr_t pa;
+
+ pa = DMAP_TO_PHYS((vm_offset_t)mem);
+ dump_drop_page(pa);
+ m = PHYS_TO_VM_PAGE(pa);
+ vm_page_unwire_noq(m);
+ vm_page_free(m);
+}
diff --git a/sys/arm64/arm64/undefined.c b/sys/arm64/arm64/undefined.c
new file mode 100644
index 000000000000..2cdb5f9a97fb
--- /dev/null
+++ b/sys/arm64/arm64/undefined.c
@@ -0,0 +1,177 @@
+/*-
+ * Copyright (c) 2017 Andrew Turner
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/signal.h>
+#include <sys/signalvar.h>
+#include <sys/sysent.h>
+
+#include <machine/frame.h>
+#include <machine/undefined.h>
+#include <machine/vmparam.h>
+
+MALLOC_DEFINE(M_UNDEF, "undefhandler", "Undefined instruction handler data");
+
+struct undef_handler {
+ LIST_ENTRY(undef_handler) uh_link;
+ undef_handler_t uh_handler;
+};
+
+/*
+ * Create two undefined instruction handler lists, one for userspace, one for
+ * the kernel. This allows us to handle instructions that will trap
+ */
+LIST_HEAD(, undef_handler) undef_handlers[2];
+
+/*
+ * Work around a bug in QEMU prior to 2.5.1 where reading unknown ID
+ * registers would raise an exception when they should return 0.
+ */
+static int
+id_aa64mmfr2_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
+ uint32_t esr)
+{
+ int reg;
+
+#define MRS_ID_AA64MMFR2_EL0_MASK (MRS_MASK | 0x000fffe0)
+#define MRS_ID_AA64MMFR2_EL0_VALUE (MRS_VALUE | 0x00080740)
+
+ /* mrs xn, id_aa64mfr2_el1 */
+ if ((insn & MRS_ID_AA64MMFR2_EL0_MASK) == MRS_ID_AA64MMFR2_EL0_VALUE) {
+ reg = MRS_REGISTER(insn);
+
+ frame->tf_elr += INSN_SIZE;
+ if (reg < nitems(frame->tf_x)) {
+ frame->tf_x[reg] = 0;
+ } else if (reg == 30) {
+ frame->tf_lr = 0;
+ }
+ /* If reg is 32 then write to xzr, i.e. do nothing */
+
+ return (1);
+ }
+ return (0);
+}
+
+#ifdef COMPAT_FREEBSD32
+/* arm32 GDB breakpoints */
+#define GDB_BREAKPOINT 0xe6000011
+#define GDB5_BREAKPOINT 0xe7ffdefe
+static int
+gdb_trapper(vm_offset_t va, uint32_t insn, struct trapframe *frame,
+ uint32_t esr)
+{
+ struct thread *td = curthread;
+
+ if (insn == GDB_BREAKPOINT || insn == GDB5_BREAKPOINT) {
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
+ va < VM_MAXUSER_ADDRESS) {
+ ksiginfo_t ksi;
+
+ ksiginfo_init_trap(&ksi);
+ ksi.ksi_signo = SIGTRAP;
+ ksi.ksi_code = TRAP_TRACE;
+ ksi.ksi_addr = (void *)va;
+ trapsignal(td, &ksi);
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+void
+undef_init(void)
+{
+
+ LIST_INIT(&undef_handlers[0]);
+ LIST_INIT(&undef_handlers[1]);
+
+ install_undef_handler(false, id_aa64mmfr2_handler);
+#ifdef COMPAT_FREEBSD32
+ install_undef_handler(true, gdb_trapper);
+#endif
+}
+
+void *
+install_undef_handler(bool user, undef_handler_t func)
+{
+ struct undef_handler *uh;
+
+ uh = malloc(sizeof(*uh), M_UNDEF, M_WAITOK);
+ uh->uh_handler = func;
+ LIST_INSERT_HEAD(&undef_handlers[user ? 0 : 1], uh, uh_link);
+
+ return (uh);
+}
+
+void
+remove_undef_handler(void *handle)
+{
+ struct undef_handler *uh;
+
+ uh = handle;
+ LIST_REMOVE(uh, uh_link);
+ free(handle, M_UNDEF);
+}
+
+int
+undef_insn(u_int el, struct trapframe *frame)
+{
+ struct undef_handler *uh;
+ uint32_t insn;
+ int ret;
+
+ KASSERT(el < 2, ("Invalid exception level %u", el));
+
+ if (el == 0) {
+ ret = fueword32((uint32_t *)frame->tf_elr, &insn);
+ if (ret != 0)
+ panic("Unable to read userspace faulting instruction");
+ } else {
+ insn = *(uint32_t *)frame->tf_elr;
+ }
+
+ LIST_FOREACH(uh, &undef_handlers[el], uh_link) {
+ ret = uh->uh_handler(frame->tf_elr, insn, frame, frame->tf_esr);
+ if (ret)
+ return (1);
+ }
+
+ return (0);
+}
diff --git a/sys/arm64/arm64/unwind.c b/sys/arm64/arm64/unwind.c
new file mode 100644
index 000000000000..bef9c6fa31f1
--- /dev/null
+++ b/sys/arm64/arm64/unwind.c
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+
+#include <machine/stack.h>
+#include <machine/vmparam.h>
+
+int
+unwind_frame(struct unwind_state *frame)
+{
+ uint64_t fp;
+
+ fp = frame->fp;
+ if (!INKERNEL(fp))
+ return (-1);
+
+ frame->sp = fp + 0x10;
+ /* FP to previous frame (X29) */
+ frame->fp = *(uint64_t *)(fp);
+ /* LR (X30) */
+ frame->pc = *(uint64_t *)(fp + 8) - 4;
+
+ return (0);
+}
diff --git a/sys/arm64/arm64/vfp.c b/sys/arm64/arm64/vfp.c
new file mode 100644
index 000000000000..51fba7a8a300
--- /dev/null
+++ b/sys/arm64/arm64/vfp.c
@@ -0,0 +1,380 @@
+/*-
+ * Copyright (c) 2015-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef VFP
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+
+#include <machine/armreg.h>
+#include <machine/pcb.h>
+#include <machine/vfp.h>
+
+/* Sanity check we can store all the VFP registers */
+CTASSERT(sizeof(((struct pcb *)0)->pcb_fpustate.vfp_regs) == 16 * 32);
+
+static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
+ "Kernel contexts for VFP state");
+
+struct fpu_kern_ctx {
+ struct vfpstate *prev;
+#define FPU_KERN_CTX_DUMMY 0x01 /* avoided save for the kern thread */
+#define FPU_KERN_CTX_INUSE 0x02
+ uint32_t flags;
+ struct vfpstate state;
+};
+
+static void
+vfp_enable(void)
+{
+ uint32_t cpacr;
+
+ cpacr = READ_SPECIALREG(cpacr_el1);
+ cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_NONE;
+ WRITE_SPECIALREG(cpacr_el1, cpacr);
+ isb();
+}
+
+static void
+vfp_disable(void)
+{
+ uint32_t cpacr;
+
+ cpacr = READ_SPECIALREG(cpacr_el1);
+ cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_ALL1;
+ WRITE_SPECIALREG(cpacr_el1, cpacr);
+ isb();
+}
+
+/*
+ * Called when the thread is dying or when discarding the kernel VFP state.
+ * If the thread was the last to use the VFP unit mark it as unused to tell
+ * the kernel the fp state is unowned. Ensure the VFP unit is off so we get
+ * an exception on the next access.
+ */
+void
+vfp_discard(struct thread *td)
+{
+
+#ifdef INVARIANTS
+ if (td != NULL)
+ CRITICAL_ASSERT(td);
+#endif
+ if (PCPU_GET(fpcurthread) == td)
+ PCPU_SET(fpcurthread, NULL);
+
+ vfp_disable();
+}
+
+static void
+vfp_store(struct vfpstate *state)
+{
+ __int128_t *vfp_state;
+ uint64_t fpcr, fpsr;
+
+ vfp_state = state->vfp_regs;
+ __asm __volatile(
+ "mrs %0, fpcr \n"
+ "mrs %1, fpsr \n"
+ "stp q0, q1, [%2, #16 * 0]\n"
+ "stp q2, q3, [%2, #16 * 2]\n"
+ "stp q4, q5, [%2, #16 * 4]\n"
+ "stp q6, q7, [%2, #16 * 6]\n"
+ "stp q8, q9, [%2, #16 * 8]\n"
+ "stp q10, q11, [%2, #16 * 10]\n"
+ "stp q12, q13, [%2, #16 * 12]\n"
+ "stp q14, q15, [%2, #16 * 14]\n"
+ "stp q16, q17, [%2, #16 * 16]\n"
+ "stp q18, q19, [%2, #16 * 18]\n"
+ "stp q20, q21, [%2, #16 * 20]\n"
+ "stp q22, q23, [%2, #16 * 22]\n"
+ "stp q24, q25, [%2, #16 * 24]\n"
+ "stp q26, q27, [%2, #16 * 26]\n"
+ "stp q28, q29, [%2, #16 * 28]\n"
+ "stp q30, q31, [%2, #16 * 30]\n"
+ : "=&r"(fpcr), "=&r"(fpsr) : "r"(vfp_state));
+
+ state->vfp_fpcr = fpcr;
+ state->vfp_fpsr = fpsr;
+}
+
+static void
+vfp_restore(struct vfpstate *state)
+{
+ __int128_t *vfp_state;
+ uint64_t fpcr, fpsr;
+
+ vfp_state = state->vfp_regs;
+ fpcr = state->vfp_fpcr;
+ fpsr = state->vfp_fpsr;
+
+ __asm __volatile(
+ "ldp q0, q1, [%2, #16 * 0]\n"
+ "ldp q2, q3, [%2, #16 * 2]\n"
+ "ldp q4, q5, [%2, #16 * 4]\n"
+ "ldp q6, q7, [%2, #16 * 6]\n"
+ "ldp q8, q9, [%2, #16 * 8]\n"
+ "ldp q10, q11, [%2, #16 * 10]\n"
+ "ldp q12, q13, [%2, #16 * 12]\n"
+ "ldp q14, q15, [%2, #16 * 14]\n"
+ "ldp q16, q17, [%2, #16 * 16]\n"
+ "ldp q18, q19, [%2, #16 * 18]\n"
+ "ldp q20, q21, [%2, #16 * 20]\n"
+ "ldp q22, q23, [%2, #16 * 22]\n"
+ "ldp q24, q25, [%2, #16 * 24]\n"
+ "ldp q26, q27, [%2, #16 * 26]\n"
+ "ldp q28, q29, [%2, #16 * 28]\n"
+ "ldp q30, q31, [%2, #16 * 30]\n"
+ "msr fpcr, %0 \n"
+ "msr fpsr, %1 \n"
+ : : "r"(fpcr), "r"(fpsr), "r"(vfp_state));
+}
+
+void
+vfp_save_state(struct thread *td, struct pcb *pcb)
+{
+ uint32_t cpacr;
+
+ KASSERT(pcb != NULL, ("NULL vfp pcb"));
+ KASSERT(td == NULL || td->td_pcb == pcb, ("Invalid vfp pcb"));
+
+ /*
+ * savectx() will be called on panic with dumppcb as an argument,
+ * dumppcb doesn't have pcb_fpusaved set, so set it to save
+ * the VFP registers.
+ */
+ if (pcb->pcb_fpusaved == NULL)
+ pcb->pcb_fpusaved = &pcb->pcb_fpustate;
+
+ if (td == NULL)
+ td = curthread;
+
+ critical_enter();
+ /*
+ * Only store the registers if the VFP is enabled,
+ * i.e. return if we are trapping on FP access.
+ */
+ cpacr = READ_SPECIALREG(cpacr_el1);
+ if ((cpacr & CPACR_FPEN_MASK) == CPACR_FPEN_TRAP_NONE) {
+ KASSERT(PCPU_GET(fpcurthread) == td,
+ ("Storing an invalid VFP state"));
+
+ vfp_store(pcb->pcb_fpusaved);
+ dsb(ish);
+ vfp_disable();
+ }
+ critical_exit();
+}
+
+void
+vfp_restore_state(void)
+{
+ struct pcb *curpcb;
+ u_int cpu;
+
+ critical_enter();
+
+ cpu = PCPU_GET(cpuid);
+ curpcb = curthread->td_pcb;
+ curpcb->pcb_fpflags |= PCB_FP_STARTED;
+
+ vfp_enable();
+
+ /*
+ * If the previous thread on this cpu to use the VFP was not the
+ * current thread, or the current thread last used it on a different
+ * cpu we need to restore the old state.
+ */
+ if (PCPU_GET(fpcurthread) != curthread || cpu != curpcb->pcb_vfpcpu) {
+ vfp_restore(curthread->td_pcb->pcb_fpusaved);
+ PCPU_SET(fpcurthread, curthread);
+ curpcb->pcb_vfpcpu = cpu;
+ }
+
+ critical_exit();
+}
+
+void
+vfp_init(void)
+{
+ uint64_t pfr;
+
+ /* Check if there is a vfp unit present */
+ pfr = READ_SPECIALREG(id_aa64pfr0_el1);
+ if ((pfr & ID_AA64PFR0_FP_MASK) == ID_AA64PFR0_FP_NONE)
+ return;
+
+ /* Disable to be enabled when it's used */
+ vfp_disable();
+}
+
+SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
+
+struct fpu_kern_ctx *
+fpu_kern_alloc_ctx(u_int flags)
+{
+ struct fpu_kern_ctx *res;
+ size_t sz;
+
+ sz = sizeof(struct fpu_kern_ctx);
+ res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
+ M_NOWAIT : M_WAITOK) | M_ZERO);
+ return (res);
+}
+
+void
+fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
+{
+
+ KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx"));
+ /* XXXAndrew clear the memory ? */
+ free(ctx, M_FPUKERN_CTX);
+}
+
+void
+fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
+{
+ struct pcb *pcb;
+
+ pcb = td->td_pcb;
+ KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
+ ("ctx is required when !FPU_KERN_NOCTX"));
+ KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0,
+ ("using inuse ctx"));
+ KASSERT((pcb->pcb_fpflags & PCB_FP_NOSAVE) == 0,
+ ("recursive fpu_kern_enter while in PCB_FP_NOSAVE state"));
+
+ if ((flags & FPU_KERN_NOCTX) != 0) {
+ critical_enter();
+ if (curthread == PCPU_GET(fpcurthread)) {
+ vfp_save_state(curthread, pcb);
+ }
+ PCPU_SET(fpcurthread, NULL);
+
+ vfp_enable();
+ pcb->pcb_fpflags |= PCB_FP_KERN | PCB_FP_NOSAVE |
+ PCB_FP_STARTED;
+ return;
+ }
+
+ if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
+ ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
+ return;
+ }
+ /*
+ * Check either we are already using the VFP in the kernel, or
+ * the the saved state points to the default user space.
+ */
+ KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0 ||
+ pcb->pcb_fpusaved == &pcb->pcb_fpustate,
+ ("Mangled pcb_fpusaved %x %p %p", pcb->pcb_fpflags, pcb->pcb_fpusaved, &pcb->pcb_fpustate));
+ ctx->flags = FPU_KERN_CTX_INUSE;
+ vfp_save_state(curthread, pcb);
+ ctx->prev = pcb->pcb_fpusaved;
+ pcb->pcb_fpusaved = &ctx->state;
+ pcb->pcb_fpflags |= PCB_FP_KERN;
+ pcb->pcb_fpflags &= ~PCB_FP_STARTED;
+
+ return;
+}
+
+int
+fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
+{
+ struct pcb *pcb;
+
+ pcb = td->td_pcb;
+
+ if ((pcb->pcb_fpflags & PCB_FP_NOSAVE) != 0) {
+ KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
+ KASSERT(PCPU_GET(fpcurthread) == NULL,
+ ("non-NULL fpcurthread for PCB_FP_NOSAVE"));
+ CRITICAL_ASSERT(td);
+
+ vfp_disable();
+ pcb->pcb_fpflags &= ~(PCB_FP_NOSAVE | PCB_FP_STARTED);
+ critical_exit();
+ } else {
+ KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
+ ("FPU context not inuse"));
+ ctx->flags &= ~FPU_KERN_CTX_INUSE;
+
+ if (is_fpu_kern_thread(0) &&
+ (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
+ return (0);
+ KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx"));
+ critical_enter();
+ vfp_discard(td);
+ critical_exit();
+ pcb->pcb_fpflags &= ~PCB_FP_STARTED;
+ pcb->pcb_fpusaved = ctx->prev;
+ }
+
+ if (pcb->pcb_fpusaved == &pcb->pcb_fpustate) {
+ pcb->pcb_fpflags &= ~PCB_FP_KERN;
+ } else {
+ KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0,
+ ("unpaired fpu_kern_leave"));
+ }
+
+ return (0);
+}
+
+int
+fpu_kern_thread(u_int flags)
+{
+ struct pcb *pcb = curthread->td_pcb;
+
+ KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
+ ("Only kthread may use fpu_kern_thread"));
+ KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
+ ("Mangled pcb_fpusaved"));
+ KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) == 0,
+ ("Thread already setup for the VFP"));
+ pcb->pcb_fpflags |= PCB_FP_KERN;
+ return (0);
+}
+
+int
+is_fpu_kern_thread(u_int flags)
+{
+ struct pcb *curpcb;
+
+ if ((curthread->td_pflags & TDP_KTHREAD) == 0)
+ return (0);
+ curpcb = curthread->td_pcb;
+ return ((curpcb->pcb_fpflags & PCB_FP_KERN) != 0);
+}
+#endif
diff --git a/sys/arm64/arm64/vm_machdep.c b/sys/arm64/arm64/vm_machdep.c
new file mode 100644
index 000000000000..3b928ad7cabf
--- /dev/null
+++ b/sys/arm64/arm64/vm_machdep.c
@@ -0,0 +1,300 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/limits.h>
+#include <sys/proc.h>
+#include <sys/sf_buf.h>
+#include <sys/signal.h>
+#include <sys/sysent.h>
+#include <sys/unistd.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/uma.h>
+#include <vm/uma_int.h>
+
+#include <machine/armreg.h>
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/frame.h>
+
+#ifdef VFP
+#include <machine/vfp.h>
+#endif
+
+#include <dev/psci/psci.h>
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the pcb, set up the stack so that the child
+ * ready to run and return to user mode.
+ */
+void
+cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
+{
+ struct pcb *pcb2;
+ struct trapframe *tf;
+
+ if ((flags & RFPROC) == 0)
+ return;
+
+ if (td1 == curthread) {
+ /*
+ * Save the tpidr_el0 and the vfp state, these normally happen
+ * in cpu_switch, but if userland changes these then forks
+ * this may not have happened.
+ */
+ td1->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0);
+ td1->td_pcb->pcb_tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
+#ifdef VFP
+ if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0)
+ vfp_save_state(td1, td1->td_pcb);
+#endif
+ }
+
+ pcb2 = (struct pcb *)(td2->td_kstack +
+ td2->td_kstack_pages * PAGE_SIZE) - 1;
+
+ td2->td_pcb = pcb2;
+ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
+
+ tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1);
+ bcopy(td1->td_frame, tf, sizeof(*tf));
+ tf->tf_x[0] = 0;
+ tf->tf_x[1] = 0;
+ tf->tf_spsr = td1->td_frame->tf_spsr & (PSR_M_32 | PSR_DAIF);
+
+ td2->td_frame = tf;
+
+ /* Set the return value registers for fork() */
+ td2->td_pcb->pcb_x[8] = (uintptr_t)fork_return;
+ td2->td_pcb->pcb_x[9] = (uintptr_t)td2;
+ td2->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline;
+ td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame;
+ td2->td_pcb->pcb_fpusaved = &td2->td_pcb->pcb_fpustate;
+ td2->td_pcb->pcb_vfpcpu = UINT_MAX;
+
+ /* Setup to release spin count in fork_exit(). */
+ td2->td_md.md_spinlock_count = 1;
+ td2->td_md.md_saved_daif = td1->td_md.md_saved_daif & ~DAIF_I_MASKED;
+}
+
+void
+cpu_reset(void)
+{
+
+ psci_reset();
+
+ printf("cpu_reset failed");
+ while(1)
+ __asm volatile("wfi" ::: "memory");
+}
+
+void
+cpu_thread_swapin(struct thread *td)
+{
+}
+
+void
+cpu_thread_swapout(struct thread *td)
+{
+}
+
+void
+cpu_set_syscall_retval(struct thread *td, int error)
+{
+ struct trapframe *frame;
+
+ frame = td->td_frame;
+
+ switch (error) {
+ case 0:
+ frame->tf_x[0] = td->td_retval[0];
+ frame->tf_x[1] = td->td_retval[1];
+ frame->tf_spsr &= ~PSR_C; /* carry bit */
+ break;
+ case ERESTART:
+ frame->tf_elr -= 4;
+ break;
+ case EJUSTRETURN:
+ break;
+ default:
+ frame->tf_spsr |= PSR_C; /* carry bit */
+ frame->tf_x[0] = SV_ABI_ERRNO(td->td_proc, error);
+ break;
+ }
+}
+
+/*
+ * Initialize machine state, mostly pcb and trap frame for a new
+ * thread, about to return to userspace. Put enough state in the new
+ * thread's PCB to get it to go back to the fork_return(), which
+ * finalizes the thread state and handles peculiarities of the first
+ * return to userspace for the new thread.
+ */
+void
+cpu_copy_thread(struct thread *td, struct thread *td0)
+{
+ bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
+ bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
+
+ td->td_pcb->pcb_x[8] = (uintptr_t)fork_return;
+ td->td_pcb->pcb_x[9] = (uintptr_t)td;
+ td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline;
+ td->td_pcb->pcb_sp = (uintptr_t)td->td_frame;
+ td->td_pcb->pcb_fpusaved = &td->td_pcb->pcb_fpustate;
+ td->td_pcb->pcb_vfpcpu = UINT_MAX;
+
+ /* Setup to release spin count in fork_exit(). */
+ td->td_md.md_spinlock_count = 1;
+ td->td_md.md_saved_daif = td0->td_md.md_saved_daif & ~DAIF_I_MASKED;
+}
+
+/*
+ * Set that machine state for performing an upcall that starts
+ * the entry function with the given argument.
+ */
+void
+cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
+ stack_t *stack)
+{
+ struct trapframe *tf = td->td_frame;
+
+ /* 32bits processes use r13 for sp */
+ if (td->td_frame->tf_spsr & PSR_M_32)
+ tf->tf_x[13] = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size);
+ else
+ tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size);
+ tf->tf_elr = (register_t)entry;
+ tf->tf_x[0] = (register_t)arg;
+}
+
+int
+cpu_set_user_tls(struct thread *td, void *tls_base)
+{
+ struct pcb *pcb;
+
+ if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS)
+ return (EINVAL);
+
+ pcb = td->td_pcb;
+ if (td->td_frame->tf_spsr & PSR_M_32) {
+ /* 32bits arm stores the user TLS into tpidrro */
+ pcb->pcb_tpidrro_el0 = (register_t)tls_base;
+ pcb->pcb_tpidr_el0 = (register_t)tls_base;
+ if (td == curthread) {
+ WRITE_SPECIALREG(tpidrro_el0, tls_base);
+ WRITE_SPECIALREG(tpidr_el0, tls_base);
+ }
+ } else {
+ pcb->pcb_tpidr_el0 = (register_t)tls_base;
+ if (td == curthread)
+ WRITE_SPECIALREG(tpidr_el0, tls_base);
+ }
+
+ return (0);
+}
+
+void
+cpu_thread_exit(struct thread *td)
+{
+}
+
+void
+cpu_thread_alloc(struct thread *td)
+{
+
+ td->td_pcb = (struct pcb *)(td->td_kstack +
+ td->td_kstack_pages * PAGE_SIZE) - 1;
+ td->td_frame = (struct trapframe *)STACKALIGN(
+ (struct trapframe *)td->td_pcb - 1);
+}
+
+void
+cpu_thread_free(struct thread *td)
+{
+}
+
+void
+cpu_thread_clean(struct thread *td)
+{
+}
+
+/*
+ * Intercept the return address from a freshly forked process that has NOT
+ * been scheduled yet.
+ *
+ * This is needed to make kernel threads stay in kernel mode.
+ */
+void
+cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
+{
+
+ td->td_pcb->pcb_x[8] = (uintptr_t)func;
+ td->td_pcb->pcb_x[9] = (uintptr_t)arg;
+ td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline;
+ td->td_pcb->pcb_sp = (uintptr_t)td->td_frame;
+ td->td_pcb->pcb_fpusaved = &td->td_pcb->pcb_fpustate;
+ td->td_pcb->pcb_vfpcpu = UINT_MAX;
+}
+
+void
+cpu_exit(struct thread *td)
+{
+}
+
+bool
+cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
+{
+
+ return (true);
+}
+
+int
+cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
+ int com __unused, void *data __unused)
+{
+
+ return (EINVAL);
+}
+
+void
+swi_vm(void *v)
+{
+
+ if (busdma_swi_pending != 0)
+ busdma_swi();
+}
diff --git a/sys/arm64/broadcom/brcmmdio/mdio_mux_iproc.c b/sys/arm64/broadcom/brcmmdio/mdio_mux_iproc.c
new file mode 100644
index 000000000000..51930d88e43f
--- /dev/null
+++ b/sys/arm64/broadcom/brcmmdio/mdio_mux_iproc.c
@@ -0,0 +1,399 @@
+/*-
+ * Copyright (c) 2019 Juniper Networks, Inc.
+ * Copyright (c) 2019 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/systm.h>
+
+#include <dev/fdt/simplebus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_bus.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include "mdio_if.h"
+
+#define REG_BASE_RID 0
+
+#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
+#define MDIO_RATE_ADJ_INT_OFFSET 0x004
+#define MDIO_RATE_ADJ_DIVIDENT_SHIFT 16
+
+#define MDIO_SCAN_CTRL_OFFSET 0x008
+#define MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR 28
+
+#define MDIO_PARAM_OFFSET 0x23c
+#define MDIO_PARAM_MIIM_CYCLE 29
+#define MDIO_PARAM_INTERNAL_SEL 25
+#define MDIO_PARAM_BUS_ID 22
+#define MDIO_PARAM_C45_SEL 21
+#define MDIO_PARAM_PHY_ID 16
+#define MDIO_PARAM_PHY_DATA 0
+
+#define MDIO_READ_OFFSET 0x240
+#define MDIO_READ_DATA_MASK 0xffff
+#define MDIO_ADDR_OFFSET 0x244
+
+#define MDIO_CTRL_OFFSET 0x248
+#define MDIO_CTRL_WRITE_OP 0x1
+#define MDIO_CTRL_READ_OP 0x2
+
+#define MDIO_STAT_OFFSET 0x24c
+#define MDIO_STAT_DONE 1
+
+#define BUS_MAX_ADDR 32
+#define EXT_BUS_START_ADDR 16
+
+#define MDIO_REG_ADDR_SPACE_SIZE 0x250
+
+#define MDIO_OPERATING_FREQUENCY 11000000
+#define MDIO_RATE_ADJ_DIVIDENT 1
+
+#define MII_ADDR_C45 (1<<30)
+
+static int brcm_iproc_mdio_probe(device_t);
+static int brcm_iproc_mdio_attach(device_t);
+static int brcm_iproc_mdio_detach(device_t);
+
+/* OFW bus interface */
+struct brcm_mdio_ofw_devinfo {
+ struct ofw_bus_devinfo di_dinfo;
+ struct resource_list di_rl;
+};
+
+struct brcm_iproc_mdio_softc {
+ struct simplebus_softc sbus;
+ device_t dev;
+ struct resource * reg_base;
+ uint32_t clock_rate;
+};
+
+MALLOC_DEFINE(M_BRCM_IPROC_MDIO, "Broadcom IPROC MDIO",
+ "Broadcom IPROC MDIO dynamic memory");
+
+static int brcm_iproc_config(struct brcm_iproc_mdio_softc*);
+static const struct ofw_bus_devinfo *
+brcm_iproc_mdio_get_devinfo(device_t, device_t);
+static int brcm_iproc_mdio_write_mux(device_t, int, int, int, int);
+static int brcm_iproc_mdio_read_mux(device_t, int, int, int);
+
+static device_method_t brcm_iproc_mdio_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, brcm_iproc_mdio_probe),
+ DEVMETHOD(device_attach, brcm_iproc_mdio_attach),
+ DEVMETHOD(device_detach, brcm_iproc_mdio_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
+ DEVMETHOD(bus_release_resource, bus_generic_release_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+
+ /* ofw_bus interface */
+ DEVMETHOD(ofw_bus_get_devinfo, brcm_iproc_mdio_get_devinfo),
+ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
+ DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
+ DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
+ DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
+ DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
+
+ /* MDIO interface */
+ DEVMETHOD(mdio_writereg_mux, brcm_iproc_mdio_write_mux),
+ DEVMETHOD(mdio_readreg_mux, brcm_iproc_mdio_read_mux),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(brcm_iproc_mdio, brcm_iproc_mdio_driver,
+ brcm_iproc_mdio_fdt_methods, sizeof(struct brcm_iproc_mdio_softc));
+
+static devclass_t brcm_iproc_mdio_fdt_devclass;
+
+EARLY_DRIVER_MODULE(brcm_iproc_mdio, ofwbus, brcm_iproc_mdio_driver,
+ brcm_iproc_mdio_fdt_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+EARLY_DRIVER_MODULE(brcm_iproc_mdio, simplebus, brcm_iproc_mdio_driver,
+ brcm_iproc_mdio_fdt_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+
+static struct ofw_compat_data mdio_compat_data[] = {
+ {"brcm,mdio-mux-iproc", true},
+ {NULL, false}
+};
+
+static int
+brcm_iproc_switch(struct brcm_iproc_mdio_softc *sc, int child)
+{
+ uint32_t param, bus_id;
+ uint32_t bus_dir;
+
+ /* select bus and its properties */
+ bus_dir = (child < EXT_BUS_START_ADDR);
+ bus_id = bus_dir ? child : (child - EXT_BUS_START_ADDR);
+
+ param = (bus_dir ? 1 : 0) << MDIO_PARAM_INTERNAL_SEL;
+ param |= (bus_id << MDIO_PARAM_BUS_ID);
+
+ bus_write_4(sc->reg_base, MDIO_PARAM_OFFSET, param);
+
+ return (0);
+}
+
+static int
+iproc_mdio_wait_for_idle(struct brcm_iproc_mdio_softc *sc, uint32_t result)
+{
+ unsigned int timeout = 1000; /* loop for 1s */
+ uint32_t val;
+
+ do {
+ val = bus_read_4(sc->reg_base, MDIO_STAT_OFFSET);
+ if ((val & MDIO_STAT_DONE) == result)
+ return (0);
+
+ pause("BRCM MDIO SLEEP", 1000 / hz);
+ } while (timeout--);
+
+ return (ETIMEDOUT);
+}
+
+/* start_miim_ops- Program and start MDIO transaction over mdio bus.
+ * @base: Base address
+ * @phyid: phyid of the selected bus.
+ * @reg: register offset to be read/written.
+ * @val :0 if read op else value to be written in @reg;
+ * @op: Operation that need to be carried out.
+ * MDIO_CTRL_READ_OP: Read transaction.
+ * MDIO_CTRL_WRITE_OP: Write transaction.
+ *
+ * Return value: Successful Read operation returns read reg values and write
+ * operation returns 0. Failure operation returns negative error code.
+ */
+static int
+brcm_iproc_mdio_op(struct brcm_iproc_mdio_softc *sc,
+ uint16_t phyid, uint32_t reg, uint32_t val, uint32_t op)
+{
+ uint32_t param;
+ int ret;
+
+ bus_write_4(sc->reg_base, MDIO_CTRL_OFFSET, 0);
+ bus_read_4(sc->reg_base, MDIO_STAT_OFFSET);
+ ret = iproc_mdio_wait_for_idle(sc, 0);
+ if (ret)
+ goto err;
+
+ param = bus_read_4(sc->reg_base, MDIO_PARAM_OFFSET);
+ param |= phyid << MDIO_PARAM_PHY_ID;
+ param |= val << MDIO_PARAM_PHY_DATA;
+ if (reg & MII_ADDR_C45)
+ param |= (1 << MDIO_PARAM_C45_SEL);
+
+ bus_write_4(sc->reg_base, MDIO_PARAM_OFFSET, param);
+
+ bus_write_4(sc->reg_base, MDIO_ADDR_OFFSET, reg);
+
+ bus_write_4(sc->reg_base, MDIO_CTRL_OFFSET, op);
+
+ ret = iproc_mdio_wait_for_idle(sc, 1);
+ if (ret)
+ goto err;
+
+ if (op == MDIO_CTRL_READ_OP)
+ ret = bus_read_4(sc->reg_base, MDIO_READ_OFFSET) & MDIO_READ_DATA_MASK;
+err:
+ return ret;
+}
+
+static int
+brcm_iproc_config(struct brcm_iproc_mdio_softc *sc)
+{
+ uint32_t divisor;
+ uint32_t val;
+
+ /* Disable external mdio master access */
+ val = bus_read_4(sc->reg_base, MDIO_SCAN_CTRL_OFFSET);
+ val |= 1 << MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR;
+ bus_write_4(sc->reg_base, MDIO_SCAN_CTRL_OFFSET, val);
+
+ if (sc->clock_rate) {
+ /* use rate adjust regs to derrive the mdio's operating
+ * frequency from the specified core clock
+ */
+ divisor = sc->clock_rate / MDIO_OPERATING_FREQUENCY;
+ divisor = divisor / (MDIO_RATE_ADJ_DIVIDENT + 1);
+ val = divisor;
+ val |= MDIO_RATE_ADJ_DIVIDENT << MDIO_RATE_ADJ_DIVIDENT_SHIFT;
+ bus_write_4(sc->reg_base, MDIO_RATE_ADJ_EXT_OFFSET, val);
+ bus_write_4(sc->reg_base, MDIO_RATE_ADJ_INT_OFFSET, val);
+ }
+
+ return (0);
+}
+
+static int
+brcm_iproc_mdio_write_mux(device_t dev, int bus, int phy, int reg, int val)
+{
+ struct brcm_iproc_mdio_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (brcm_iproc_switch(sc, bus) != 0) {
+ device_printf(dev, "Failed to set BUS MUX\n");
+ return (EINVAL);
+ }
+
+ return (brcm_iproc_mdio_op(sc, phy, reg, val, MDIO_CTRL_WRITE_OP));
+}
+
+static int
+brcm_iproc_mdio_read_mux(device_t dev, int bus, int phy, int reg)
+{
+ struct brcm_iproc_mdio_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (brcm_iproc_switch(sc, bus) != 0) {
+ device_printf(dev, "Failed to set BUS MUX\n");
+ return (EINVAL);
+ }
+
+ return (brcm_iproc_mdio_op(sc, phy, reg, 0, MDIO_CTRL_READ_OP));
+}
+
+static int
+brcm_iproc_mdio_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ if (!ofw_bus_search_compatible(dev, mdio_compat_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, "Broadcom MDIO MUX driver");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+brcm_iproc_mdio_attach(device_t dev)
+{
+ struct brcm_iproc_mdio_softc *sc;
+ phandle_t node, parent;
+ struct brcm_mdio_ofw_devinfo *di;
+ int rid;
+ device_t child;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ /* Allocate memory resources */
+ rid = REG_BASE_RID;
+ sc->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->reg_base == NULL) {
+ device_printf(dev, "Could not allocate memory\n");
+ return (ENXIO);
+ }
+
+ /* Configure MDIO controlled */
+ if (brcm_iproc_config(sc) < 0) {
+ device_printf(dev, "Unable to initialize IPROC MDIO\n");
+ goto error;
+ }
+
+ parent = ofw_bus_get_node(dev);
+ simplebus_init(dev, parent);
+
+ /* Iterate through all bus subordinates */
+ for (node = OF_child(parent); node > 0; node = OF_peer(node)) {
+ /* Allocate and populate devinfo. */
+ di = malloc(sizeof(*di), M_BRCM_IPROC_MDIO, M_WAITOK | M_ZERO);
+ if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) {
+ free(di, M_BRCM_IPROC_MDIO);
+ continue;
+ }
+
+ /* Initialize and populate resource list. */
+ resource_list_init(&di->di_rl);
+ ofw_bus_reg_to_rl(dev, node, sc->sbus.acells, sc->sbus.scells,
+ &di->di_rl);
+ ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL);
+
+ /* Add newbus device for this FDT node */
+ child = device_add_child(dev, NULL, -1);
+ if (child == NULL) {
+ printf("Failed to add child\n");
+ resource_list_free(&di->di_rl);
+ ofw_bus_gen_destroy_devinfo(&di->di_dinfo);
+ free(di, M_BRCM_IPROC_MDIO);
+ continue;
+ }
+
+ device_set_ivars(child, di);
+ }
+
+ /*
+ * Register device to this node/xref.
+ * Thanks to that we will be able to retrieve device_t structure
+ * while holding only node reference acquired from FDT.
+ */
+ node = ofw_bus_get_node(dev);
+ OF_device_register_xref(OF_xref_from_node(node), dev);
+
+ return (bus_generic_attach(dev));
+
+error:
+ brcm_iproc_mdio_detach(dev);
+ return (ENXIO);
+}
+
+static const struct ofw_bus_devinfo *
+brcm_iproc_mdio_get_devinfo(device_t bus __unused, device_t child)
+{
+ struct brcm_mdio_ofw_devinfo *di;
+
+ di = device_get_ivars(child);
+ return (&di->di_dinfo);
+}
+
+static int
+brcm_iproc_mdio_detach(device_t dev)
+{
+ struct brcm_iproc_mdio_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->reg_base != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY, REG_BASE_RID,
+ sc->reg_base);
+ }
+
+ return (0);
+}
diff --git a/sys/arm64/broadcom/brcmmdio/mdio_nexus_iproc.c b/sys/arm64/broadcom/brcmmdio/mdio_nexus_iproc.c
new file mode 100644
index 000000000000..7385f6896db4
--- /dev/null
+++ b/sys/arm64/broadcom/brcmmdio/mdio_nexus_iproc.c
@@ -0,0 +1,234 @@
+/*-
+ * Copyright (c) 2019 Juniper Networks, Inc.
+ * Copyright (c) 2019 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/systm.h>
+
+#include <dev/fdt/simplebus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_bus.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include "mdio_if.h"
+
+MALLOC_DEFINE(M_BRCM_IPROC_NEXUS, "Broadcom IPROC MDIO NEXUS",
+ "Broadcom IPROC MDIO NEXUS dynamic memory");
+
+struct brcm_mdionexus_softc {
+ struct simplebus_softc simplebus_sc;
+ uint32_t mux_id;
+};
+
+/* OFW bus interface */
+struct brcm_mdionexus_ofw_devinfo {
+ struct ofw_bus_devinfo di_dinfo;
+ struct resource_list di_rl;
+};
+
+static device_probe_t brcm_mdionexus_fdt_probe;
+static device_attach_t brcm_mdionexus_fdt_attach;
+
+static const struct ofw_bus_devinfo * brcm_mdionexus_ofw_get_devinfo(device_t,
+ device_t);
+static int brcm_mdionexus_mdio_readreg(device_t, int, int);
+static int brcm_mdionexus_mdio_writereg(device_t, int, int, int);
+
+static device_method_t brcm_mdionexus_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, brcm_mdionexus_fdt_probe),
+ DEVMETHOD(device_attach, brcm_mdionexus_fdt_attach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
+ DEVMETHOD(bus_release_resource, bus_generic_release_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+
+ /* ofw_bus interface */
+ DEVMETHOD(ofw_bus_get_devinfo, brcm_mdionexus_ofw_get_devinfo),
+ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
+ DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
+ DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
+ DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
+ DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
+
+ /* MDIO interface */
+ /* MDIO interface */
+ DEVMETHOD(mdio_readreg, brcm_mdionexus_mdio_readreg),
+ DEVMETHOD(mdio_writereg, brcm_mdionexus_mdio_writereg),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(brcm_mdionexus, brcm_mdionexus_fdt_driver, brcm_mdionexus_fdt_methods,
+ sizeof(struct brcm_mdionexus_softc));
+
+static devclass_t brcm_mdionexus_fdt_devclass;
+
+static driver_t brcm_mdionexus_driver = {
+ "brcm_mdionexus",
+ brcm_mdionexus_fdt_methods,
+ sizeof(struct brcm_mdionexus_softc)
+};
+EARLY_DRIVER_MODULE(brcm_mdionexus, brcm_iproc_mdio, brcm_mdionexus_driver,
+ brcm_mdionexus_fdt_devclass, NULL, NULL, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+
+static int brcm_mdionexus_ofw_bus_attach(device_t);
+
+static int
+brcm_mdionexus_mdio_readreg(device_t dev, int phy, int reg)
+{
+ struct brcm_mdionexus_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ return (MDIO_READREG_MUX(device_get_parent(dev),
+ sc->mux_id, phy, reg));
+}
+
+static int
+brcm_mdionexus_mdio_writereg(device_t dev, int phy, int reg, int val)
+{
+ struct brcm_mdionexus_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ return (MDIO_WRITEREG_MUX(device_get_parent(dev),
+ sc->mux_id, phy, reg, val));
+}
+
+static __inline void
+get_addr_size_cells(phandle_t node, pcell_t *addr_cells, pcell_t *size_cells)
+{
+
+ *addr_cells = 2;
+ /* Find address cells if present */
+ OF_getencprop(node, "#address-cells", addr_cells, sizeof(*addr_cells));
+
+ *size_cells = 2;
+ /* Find size cells if present */
+ OF_getencprop(node, "#size-cells", size_cells, sizeof(*size_cells));
+}
+
+static int
+brcm_mdionexus_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ device_set_desc(dev, "Broadcom MDIO nexus");
+ return (BUS_PROBE_SPECIFIC);
+}
+
+static int
+brcm_mdionexus_fdt_attach(device_t dev)
+{
+ struct brcm_mdionexus_softc *sc;
+ int err;
+ pcell_t addr_cells, size_cells, buf[2];
+ phandle_t node;
+
+ sc = device_get_softc(dev);
+
+ node = ofw_bus_get_node(dev);
+ get_addr_size_cells(node, &addr_cells, &size_cells);
+ if ((addr_cells != 1) || (size_cells != 0)) {
+ device_printf(dev, "Only addr_cells=1 and size_cells=0 are supported\n");
+ return (EINVAL);
+ }
+
+ if (OF_getencprop(node, "reg", buf, sizeof(pcell_t)) < 0)
+ return (ENXIO);
+
+ sc->mux_id = buf[0];
+
+ err = brcm_mdionexus_ofw_bus_attach(dev);
+ if (err != 0)
+ return (err);
+
+ return (bus_generic_attach(dev));
+}
+
+static const struct ofw_bus_devinfo *
+brcm_mdionexus_ofw_get_devinfo(device_t bus __unused, device_t child)
+{
+ struct brcm_mdionexus_ofw_devinfo *di;
+
+ di = device_get_ivars(child);
+ return (&di->di_dinfo);
+}
+
+static int
+brcm_mdionexus_ofw_bus_attach(device_t dev)
+{
+ struct simplebus_softc *sc;
+ struct brcm_mdionexus_ofw_devinfo *di;
+ device_t child;
+ phandle_t parent, node;
+
+ parent = ofw_bus_get_node(dev);
+ simplebus_init(dev, parent);
+
+ sc = (struct simplebus_softc *)device_get_softc(dev);
+
+ /* Iterate through all bus subordinates */
+ for (node = OF_child(parent); node > 0; node = OF_peer(node)) {
+ /* Allocate and populate devinfo. */
+ di = malloc(sizeof(*di), M_BRCM_IPROC_NEXUS, M_WAITOK | M_ZERO);
+ if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) {
+ free(di, M_BRCM_IPROC_NEXUS);
+ continue;
+ }
+
+ /* Initialize and populate resource list. */
+ resource_list_init(&di->di_rl);
+ ofw_bus_reg_to_rl(dev, node, sc->acells, sc->scells,
+ &di->di_rl);
+ ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL);
+
+ /* Add newbus device for this FDT node */
+ child = device_add_child(dev, NULL, -1);
+ if (child == NULL) {
+ resource_list_free(&di->di_rl);
+ ofw_bus_gen_destroy_devinfo(&di->di_dinfo);
+ free(di, M_BRCM_IPROC_NEXUS);
+ continue;
+ }
+
+ device_set_ivars(child, di);
+ }
+
+ return (0);
+}
diff --git a/sys/arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c b/sys/arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c
new file mode 100644
index 000000000000..e2e29ad17815
--- /dev/null
+++ b/sys/arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c
@@ -0,0 +1,162 @@
+/*-
+ * Copyright (c) 2019 Juniper Networks, Inc.
+ * Copyright (c) 2019 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/systm.h>
+
+#include <dev/fdt/simplebus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_bus.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include "mdio_if.h"
+
+#define BLK_ADDR_REG_OFFSET 0x1f
+#define PLL_AFE1_100MHZ_BLK 0x2100
+#define PLL_CLK_AMP_OFFSET 0x03
+#define PLL_CLK_AMP_2P05V 0x2b18
+
+struct ns2_pcie_phy_softc {
+ uint32_t phy_id;
+};
+
+static device_probe_t ns2_pcie_phy_fdt_probe;
+static device_attach_t ns2_pcie_phy_fdt_attach;
+
+static int ns2_pci_phy_init(device_t dev);
+
+static device_method_t ns2_pcie_phy_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ns2_pcie_phy_fdt_probe),
+ DEVMETHOD(device_attach, ns2_pcie_phy_fdt_attach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(ns2_pcie_phy, ns2_pcie_phy_fdt_driver, ns2_pcie_phy_fdt_methods,
+ sizeof(struct ns2_pcie_phy_softc));
+
+static devclass_t ns2_pcie_phy_fdt_devclass;
+
+static driver_t ns2_pcie_phy_driver = {
+ "ns2_pcie_phy",
+ ns2_pcie_phy_fdt_methods,
+ sizeof(struct ns2_pcie_phy_softc)
+};
+EARLY_DRIVER_MODULE(ns2_pcie_phy, brcm_mdionexus, ns2_pcie_phy_driver,
+ ns2_pcie_phy_fdt_devclass, NULL, NULL, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+
+static int
+ns2_pci_phy_init(device_t dev)
+{
+ struct ns2_pcie_phy_softc *sc;
+ int err;
+
+ sc = device_get_softc(dev);
+
+ /* select the AFE 100MHz block page */
+ err = MDIO_WRITEREG(device_get_parent(dev), sc->phy_id,
+ BLK_ADDR_REG_OFFSET, PLL_AFE1_100MHZ_BLK);
+ if (err)
+ goto err;
+
+ /* set the 100 MHz reference clock amplitude to 2.05 v */
+ err = MDIO_WRITEREG(device_get_parent(dev), sc->phy_id,
+ PLL_CLK_AMP_OFFSET, PLL_CLK_AMP_2P05V);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ device_printf(dev, "Error %d writing to phy\n", err);
+ return (err);
+}
+
+static __inline void
+get_addr_size_cells(phandle_t node, pcell_t *addr_cells, pcell_t *size_cells)
+{
+
+ *addr_cells = 2;
+ /* Find address cells if present */
+ OF_getencprop(node, "#address-cells", addr_cells, sizeof(*addr_cells));
+
+ *size_cells = 2;
+ /* Find size cells if present */
+ OF_getencprop(node, "#size-cells", size_cells, sizeof(*size_cells));
+}
+
+static int
+ns2_pcie_phy_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "brcm,ns2-pcie-phy"))
+ return (ENXIO);
+
+ device_set_desc(dev, "Broadcom NS2 PCIe PHY");
+ return (BUS_PROBE_SPECIFIC);
+}
+
+static int
+ns2_pcie_phy_fdt_attach(device_t dev)
+{
+ struct ns2_pcie_phy_softc *sc;
+ pcell_t addr_cells, size_cells, buf[2];
+ phandle_t node;
+
+ sc = device_get_softc(dev);
+
+ node = ofw_bus_get_node(dev);
+ get_addr_size_cells(OF_parent(node), &addr_cells, &size_cells);
+ if ((addr_cells != 1) || (size_cells != 0)) {
+ device_printf(dev,
+ "Only addr_cells=1 and size_cells=0 are supported\n");
+ return (EINVAL);
+ }
+
+ if (OF_getencprop(node, "reg", buf, sizeof(pcell_t)) < 0)
+ return (ENXIO);
+
+ sc->phy_id = buf[0];
+
+ if (ns2_pci_phy_init(dev) < 0)
+ return (EINVAL);
+
+ return (bus_generic_attach(dev));
+}
diff --git a/sys/arm64/broadcom/genet/if_genet.c b/sys/arm64/broadcom/genet/if_genet.c
new file mode 100644
index 000000000000..9c8b322dee87
--- /dev/null
+++ b/sys/arm64/broadcom/genet/if_genet.c
@@ -0,0 +1,1762 @@
+/*-
+ * Copyright (c) 2020 Michael J Karels
+ * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
+ *
+ * This driver is derived in large part from bcmgenet.c from NetBSD by
+ * Jared McNeill. Parts of the structure and other common code in
+ * this driver have been copied from if_awg.c for the Allwinner EMAC,
+ * also by Jared McNeill.
+ */
+
+#include "opt_device_polling.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/module.h>
+#include <sys/taskqueue.h>
+#include <sys/gpio.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#define __BIT(_x) (1 << (_x))
+#include "if_genetreg.h"
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+#include <dev/mii/mii_fdt.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#define ICMPV6_HACK /* workaround for chip issue */
+#ifdef ICMPV6_HACK
+#include <netinet/icmp6.h>
+#endif
+
+#include "syscon_if.h"
+#include "miibus_if.h"
+#include "gpio_if.h"
+
+#define RD4(sc, reg) bus_read_4((sc)->res[_RES_MAC], (reg))
+#define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_MAC], (reg), (val))
+
+#define GEN_LOCK(sc) mtx_lock(&(sc)->mtx)
+#define GEN_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
+#define GEN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
+#define GEN_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
+
+#define TX_DESC_COUNT GENET_DMA_DESC_COUNT
+#define RX_DESC_COUNT GENET_DMA_DESC_COUNT
+
+#define TX_NEXT(n, count) (((n) + 1) & ((count) - 1))
+#define RX_NEXT(n, count) (((n) + 1) & ((count) - 1))
+
+#define TX_MAX_SEGS 20
+
+/* Maximum number of mbufs to send to if_input */
+static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
+TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch);
+
+static struct ofw_compat_data compat_data[] = {
+ { "brcm,genet-v1", 1 },
+ { "brcm,genet-v2", 2 },
+ { "brcm,genet-v3", 3 },
+ { "brcm,genet-v4", 4 },
+ { "brcm,genet-v5", 5 },
+ { NULL, 0 }
+};
+
+enum {
+ _RES_MAC, /* what to call this? */
+ _RES_IRQ1,
+ _RES_IRQ2,
+ _RES_NITEMS
+};
+
+static struct resource_spec gen_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 1, RF_ACTIVE },
+ { -1, 0 }
+};
+
+/* structure per ring entry */
+struct gen_ring_ent {
+ bus_dmamap_t map;
+ struct mbuf *mbuf;
+};
+
+struct tx_queue {
+ int hwindex; /* hardware index */
+ int nentries;
+ u_int queued; /* or avail? */
+ u_int cur;
+ u_int next;
+ u_int prod_idx;
+ u_int cons_idx;
+ struct gen_ring_ent *entries;
+};
+
+struct rx_queue {
+ int hwindex; /* hardware index */
+ int nentries;
+ u_int cur;
+ u_int prod_idx;
+ u_int cons_idx;
+ struct gen_ring_ent *entries;
+};
+
+struct gen_softc {
+ struct resource *res[_RES_NITEMS];
+ struct mtx mtx;
+ if_t ifp;
+ device_t dev;
+ device_t miibus;
+ mii_contype_t phy_mode;
+
+ struct callout stat_ch;
+ struct task link_task;
+ void *ih;
+ void *ih2;
+ int type;
+ int if_flags;
+ int link;
+ bus_dma_tag_t tx_buf_tag;
+ /*
+ * The genet chip has multiple queues for transmit and receive.
+ * This driver uses only one (queue 16, the default), but is cast
+ * with multiple rings. The additional rings are used for different
+ * priorities.
+ */
+#define DEF_TXQUEUE 0
+#define NTXQUEUE 1
+ struct tx_queue tx_queue[NTXQUEUE];
+ struct gen_ring_ent tx_ring_ent[TX_DESC_COUNT]; /* ring entries */
+
+ bus_dma_tag_t rx_buf_tag;
+#define DEF_RXQUEUE 0
+#define NRXQUEUE 1
+ struct rx_queue rx_queue[NRXQUEUE];
+ struct gen_ring_ent rx_ring_ent[RX_DESC_COUNT]; /* ring entries */
+};
+
+static void gen_init(void *softc);
+static void gen_start(if_t ifp);
+static void gen_destroy(struct gen_softc *sc);
+static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
+static int gen_parse_tx(struct mbuf *m, int csum_flags);
+static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
+static int gen_get_phy_mode(device_t dev);
+static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
+static void gen_set_enaddr(struct gen_softc *sc);
+static void gen_setup_rxfilter(struct gen_softc *sc);
+static void gen_reset(struct gen_softc *sc);
+static void gen_enable(struct gen_softc *sc);
+static void gen_dma_disable(device_t dev);
+static int gen_bus_dma_init(struct gen_softc *sc);
+static void gen_bus_dma_teardown(struct gen_softc *sc);
+static void gen_enable_intr(struct gen_softc *sc);
+static void gen_init_txrings(struct gen_softc *sc);
+static void gen_init_rxrings(struct gen_softc *sc);
+static void gen_intr(void *softc);
+static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
+static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
+static void gen_intr2(void *softc);
+static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
+static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
+ struct mbuf *m);
+static void gen_link_task(void *arg, int pending);
+static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
+static int gen_media_change(if_t ifp);
+static void gen_tick(void *softc);
+
+static int
+gen_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "RPi4 Gigabit Ethernet");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+gen_attach(device_t dev)
+{
+ struct ether_addr eaddr;
+ struct gen_softc *sc;
+ int major, minor, error, mii_flags;
+ bool eaddr_found;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+
+ if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
+ if (major != REV_MAJOR_V5) {
+ device_printf(dev, "version %d is not supported\n", major);
+ error = ENXIO;
+ goto fail;
+ }
+ minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
+ device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
+ RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
+ callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
+ TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
+
+ error = gen_get_phy_mode(dev);
+ if (error != 0)
+ goto fail;
+
+ bzero(&eaddr, sizeof(eaddr));
+ eaddr_found = gen_get_eaddr(dev, &eaddr);
+
+ /* reset core */
+ gen_reset(sc);
+
+ gen_dma_disable(dev);
+
+ /* Setup DMA */
+ error = gen_bus_dma_init(sc);
+ if (error != 0) {
+ device_printf(dev, "cannot setup bus dma\n");
+ goto fail;
+ }
+
+ /* Install interrupt handlers */
+ error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
+ if (error != 0) {
+ device_printf(dev, "cannot setup interrupt handler1\n");
+ goto fail;
+ }
+
+ error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
+ if (error != 0) {
+ device_printf(dev, "cannot setup interrupt handler2\n");
+ goto fail;
+ }
+
+ /* Setup ethernet interface */
+ sc->ifp = if_alloc(IFT_ETHER);
+ if_setsoftc(sc->ifp, sc);
+ if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
+ if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
+ if_setstartfn(sc->ifp, gen_start);
+ if_setioctlfn(sc->ifp, gen_ioctl);
+ if_setinitfn(sc->ifp, gen_init);
+ if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
+ if_setsendqready(sc->ifp);
+#define GEN_CSUM_FEATURES (CSUM_UDP | CSUM_TCP)
+ if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
+ if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
+ IFCAP_HWCSUM_IPV6);
+ if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
+
+ /* Attach MII driver */
+ mii_flags = 0;
+ switch (sc->phy_mode)
+ {
+ case MII_CONTYPE_RGMII_ID:
+ mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
+ break;
+ case MII_CONTYPE_RGMII_RXID:
+ mii_flags |= MIIF_RX_DELAY;
+ break;
+ case MII_CONTYPE_RGMII_TXID:
+ mii_flags |= MIIF_TX_DELAY;
+ break;
+ default:
+ break;
+ }
+ error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
+ gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
+ mii_flags);
+ if (error != 0) {
+ device_printf(dev, "cannot attach PHY\n");
+ goto fail;
+ }
+
+ /* If address was not found, create one based on the hostid and name. */
+ if (eaddr_found == 0)
+ ether_gen_addr(sc->ifp, &eaddr);
+ /* Attach ethernet interface */
+ ether_ifattach(sc->ifp, eaddr.octet);
+
+fail:
+ if (error)
+ gen_destroy(sc);
+ return (error);
+}
+
+/* Free resources after failed attach. This is not a complete detach. */
+static void
+gen_destroy(struct gen_softc *sc)
+{
+
+ if (sc->miibus) { /* can't happen */
+ device_delete_child(sc->dev, sc->miibus);
+ sc->miibus = NULL;
+ }
+ bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
+ bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
+ gen_bus_dma_teardown(sc);
+ callout_drain(&sc->stat_ch);
+ if (mtx_initialized(&sc->mtx))
+ mtx_destroy(&sc->mtx);
+ bus_release_resources(sc->dev, gen_spec, sc->res);
+ if (sc->ifp != NULL) {
+ if_free(sc->ifp);
+ sc->ifp = NULL;
+ }
+}
+
+static int
+gen_get_phy_mode(device_t dev)
+{
+ struct gen_softc *sc;
+ phandle_t node;
+ mii_contype_t type;
+ int error = 0;
+
+ sc = device_get_softc(dev);
+ node = ofw_bus_get_node(dev);
+ type = mii_fdt_get_contype(node);
+
+ switch (type) {
+ case MII_CONTYPE_RGMII:
+ case MII_CONTYPE_RGMII_ID:
+ case MII_CONTYPE_RGMII_RXID:
+ case MII_CONTYPE_RGMII_TXID:
+ sc->phy_mode = type;
+ break;
+ default:
+ device_printf(dev, "unknown phy-mode '%s'\n",
+ mii_fdt_contype_to_name(type));
+ error = ENXIO;
+ break;
+ }
+
+ return (error);
+}
+
+static bool
+gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
+{
+ struct gen_softc *sc;
+ uint32_t maclo, machi, val;
+ phandle_t node;
+
+ sc = device_get_softc(dev);
+
+ node = ofw_bus_get_node(dev);
+ if (OF_getprop(node, "mac-address", eaddr->octet,
+ ETHER_ADDR_LEN) != -1 ||
+ OF_getprop(node, "local-mac-address", eaddr->octet,
+ ETHER_ADDR_LEN) != -1 ||
+ OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
+ return (true);
+
+ device_printf(dev, "No Ethernet address found in fdt!\n");
+ maclo = machi = 0;
+
+ val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
+ if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
+ maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
+ machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
+ }
+
+ if (maclo == 0 && machi == 0) {
+ if (bootverbose)
+ device_printf(dev,
+ "No Ethernet address found in controller\n");
+ return (false);
+ } else {
+ eaddr->octet[0] = maclo & 0xff;
+ eaddr->octet[1] = (maclo >> 8) & 0xff;
+ eaddr->octet[2] = (maclo >> 16) & 0xff;
+ eaddr->octet[3] = (maclo >> 24) & 0xff;
+ eaddr->octet[4] = machi & 0xff;
+ eaddr->octet[5] = (machi >> 8) & 0xff;
+ return (true);
+ }
+}
+
+static void
+gen_reset(struct gen_softc *sc)
+{
+ uint32_t val;
+
+ val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
+ val |= GENET_SYS_RBUF_FLUSH_RESET;
+ WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
+ DELAY(10);
+
+ val &= ~GENET_SYS_RBUF_FLUSH_RESET;
+ WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
+ DELAY(10);
+
+ WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
+ DELAY(10);
+
+ WR4(sc, GENET_UMAC_CMD, 0);
+ WR4(sc, GENET_UMAC_CMD,
+ GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
+ DELAY(10);
+ WR4(sc, GENET_UMAC_CMD, 0);
+
+ WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
+ GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
+ WR4(sc, GENET_UMAC_MIB_CTRL, 0);
+
+ WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
+
+ val = RD4(sc, GENET_RBUF_CTRL);
+ val |= GENET_RBUF_ALIGN_2B;
+ WR4(sc, GENET_RBUF_CTRL, val);
+
+ WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
+}
+
+static void
+gen_enable(struct gen_softc *sc)
+{
+ u_int val;
+
+ /* Enable transmitter and receiver */
+ val = RD4(sc, GENET_UMAC_CMD);
+ val |= GENET_UMAC_CMD_TXEN;
+ val |= GENET_UMAC_CMD_RXEN;
+ WR4(sc, GENET_UMAC_CMD, val);
+
+ /* Enable interrupts */
+ gen_enable_intr(sc);
+ WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
+ GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
+}
+
+static void
+gen_enable_offload(struct gen_softc *sc)
+{
+ uint32_t check_ctrl, buf_ctrl;
+
+ check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
+ buf_ctrl = RD4(sc, GENET_RBUF_CTRL);
+ if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
+ check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
+ buf_ctrl |= GENET_RBUF_64B_EN;
+ } else {
+ check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
+ buf_ctrl &= ~GENET_RBUF_64B_EN;
+ }
+ WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
+ WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
+
+ buf_ctrl = RD4(sc, GENET_TBUF_CTRL);
+ if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
+ 0)
+ buf_ctrl |= GENET_RBUF_64B_EN;
+ else
+ buf_ctrl &= ~GENET_RBUF_64B_EN;
+ WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
+}
+
+static void
+gen_dma_disable(device_t dev)
+{
+ struct gen_softc *sc = device_get_softc(dev);
+ int val;
+
+ val = RD4(sc, GENET_TX_DMA_CTRL);
+ val &= ~GENET_TX_DMA_CTRL_EN;
+ val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
+ WR4(sc, GENET_TX_DMA_CTRL, val);
+
+ val = RD4(sc, GENET_RX_DMA_CTRL);
+ val &= ~GENET_RX_DMA_CTRL_EN;
+ val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
+ WR4(sc, GENET_RX_DMA_CTRL, val);
+}
+
+static int
+gen_bus_dma_init(struct gen_softc *sc)
+{
+ struct device *dev = sc->dev;
+ int i, error;
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* Parent tag */
+ 4, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->tx_buf_tag);
+ if (error != 0) {
+ device_printf(dev, "cannot create TX buffer tag\n");
+ return (error);
+ }
+
+ for (i = 0; i < TX_DESC_COUNT; i++) {
+ error = bus_dmamap_create(sc->tx_buf_tag, 0,
+ &sc->tx_ring_ent[i].map);
+ if (error != 0) {
+ device_printf(dev, "cannot create TX buffer map\n");
+ return (error);
+ }
+ }
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* Parent tag */
+ 4, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES, 1, /* maxsize, nsegs */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->rx_buf_tag);
+ if (error != 0) {
+ device_printf(dev, "cannot create RX buffer tag\n");
+ return (error);
+ }
+
+ for (i = 0; i < RX_DESC_COUNT; i++) {
+ error = bus_dmamap_create(sc->rx_buf_tag, 0,
+ &sc->rx_ring_ent[i].map);
+ if (error != 0) {
+ device_printf(dev, "cannot create RX buffer map\n");
+ return (error);
+ }
+ }
+ return (0);
+}
+
+static void
+gen_bus_dma_teardown(struct gen_softc *sc)
+{
+ int i, error;
+
+ if (sc->tx_buf_tag != NULL) {
+ for (i = 0; i < TX_DESC_COUNT; i++) {
+ error = bus_dmamap_destroy(sc->tx_buf_tag,
+ sc->tx_ring_ent[i].map);
+ sc->tx_ring_ent[i].map = NULL;
+ if (error)
+ device_printf(sc->dev,
+ "%s: bus_dmamap_destroy failed: %d\n",
+ __func__, error);
+ }
+ error = bus_dma_tag_destroy(sc->tx_buf_tag);
+ sc->tx_buf_tag = NULL;
+ if (error)
+ device_printf(sc->dev,
+ "%s: bus_dma_tag_destroy failed: %d\n", __func__,
+ error);
+ }
+
+ if (sc->tx_buf_tag != NULL) {
+ for (i = 0; i < RX_DESC_COUNT; i++) {
+ error = bus_dmamap_destroy(sc->rx_buf_tag,
+ sc->rx_ring_ent[i].map);
+ sc->rx_ring_ent[i].map = NULL;
+ if (error)
+ device_printf(sc->dev,
+ "%s: bus_dmamap_destroy failed: %d\n",
+ __func__, error);
+ }
+ error = bus_dma_tag_destroy(sc->rx_buf_tag);
+ sc->rx_buf_tag = NULL;
+ if (error)
+ device_printf(sc->dev,
+ "%s: bus_dma_tag_destroy failed: %d\n", __func__,
+ error);
+ }
+}
+
+static void
+gen_enable_intr(struct gen_softc *sc)
+{
+
+ WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
+ GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
+}
+
+/*
+ * "queue" is the software queue index (0-4); "qid" is the hardware index
+ * (0-16). "base" is the starting index in the ring array.
+ */
+static void
+gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
+ int nentries)
+{
+ struct tx_queue *q;
+ uint32_t val;
+
+ q = &sc->tx_queue[queue];
+ q->entries = &sc->tx_ring_ent[base];
+ q->hwindex = qid;
+ q->nentries = nentries;
+
+ /* TX ring */
+
+ q->queued = 0;
+ q->cons_idx = q->prod_idx = 0;
+
+ WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
+
+ WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
+ WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
+ WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
+ WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
+ WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
+ (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
+ (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
+ WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
+ WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
+ WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
+ TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
+ WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
+ WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
+ WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
+ WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
+ WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
+
+ WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */
+
+ /* Enable transmit DMA */
+ val = RD4(sc, GENET_TX_DMA_CTRL);
+ val |= GENET_TX_DMA_CTRL_EN;
+ val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
+ WR4(sc, GENET_TX_DMA_CTRL, val);
+}
+
+/*
+ * "queue" is the software queue index (0-4); "qid" is the hardware index
+ * (0-16). "base" is the starting index in the ring array.
+ */
+static void
+gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
+ int nentries)
+{
+ struct rx_queue *q;
+ uint32_t val;
+ int i;
+
+ q = &sc->rx_queue[queue];
+ q->entries = &sc->rx_ring_ent[base];
+ q->hwindex = qid;
+ q->nentries = nentries;
+ q->cons_idx = q->prod_idx = 0;
+
+ WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
+
+ WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
+ WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
+ WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
+ WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
+ WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
+ (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
+ (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
+ WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
+ WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
+ WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
+ RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
+ WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
+ WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
+ (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
+ WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
+ WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
+
+ WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */
+
+ /* fill ring */
+ for (i = 0; i < RX_DESC_COUNT; i++)
+ gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
+
+ /* Enable receive DMA */
+ val = RD4(sc, GENET_RX_DMA_CTRL);
+ val |= GENET_RX_DMA_CTRL_EN;
+ val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
+ WR4(sc, GENET_RX_DMA_CTRL, val);
+}
+
+static void
+gen_init_txrings(struct gen_softc *sc)
+{
+ int base = 0;
+#ifdef PRI_RINGS
+ int i;
+
+ /* init priority rings */
+ for (i = 0; i < PRI_RINGS; i++) {
+ gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
+ sc->tx_queue[i].queue = i;
+ base += TX_DESC_PRICOUNT;
+ dma_ring_conf |= 1 << i;
+ dma_control |= DMA_RENABLE(i);
+ }
+#endif
+
+ /* init GENET_DMA_DEFAULT_QUEUE (16) */
+ gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
+ TX_DESC_COUNT);
+ sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
+}
+
+static void
+gen_init_rxrings(struct gen_softc *sc)
+{
+ int base = 0;
+#ifdef PRI_RINGS
+ int i;
+
+ /* init priority rings */
+ for (i = 0; i < PRI_RINGS; i++) {
+ gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
+ sc->rx_queue[i].queue = i;
+ base += TX_DESC_PRICOUNT;
+ dma_ring_conf |= 1 << i;
+ dma_control |= DMA_RENABLE(i);
+ }
+#endif
+
+ /* init GENET_DMA_DEFAULT_QUEUE (16) */
+ gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
+ RX_DESC_COUNT);
+ sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
+
+}
+
+static void
+gen_init_locked(struct gen_softc *sc)
+{
+ struct mii_data *mii;
+ if_t ifp;
+
+ mii = device_get_softc(sc->miibus);
+ ifp = sc->ifp;
+
+ GEN_ASSERT_LOCKED(sc);
+
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
+ return;
+
+ switch (sc->phy_mode)
+ {
+ case MII_CONTYPE_RGMII:
+ case MII_CONTYPE_RGMII_ID:
+ case MII_CONTYPE_RGMII_RXID:
+ case MII_CONTYPE_RGMII_TXID:
+ WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
+ break;
+ default:
+ WR4(sc, GENET_SYS_PORT_CTRL, 0);
+ }
+
+ gen_set_enaddr(sc);
+
+ /* Setup RX filter */
+ gen_setup_rxfilter(sc);
+
+ gen_init_txrings(sc);
+ gen_init_rxrings(sc);
+ gen_enable(sc);
+ gen_enable_offload(sc);
+
+ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
+
+ mii_mediachg(mii);
+ callout_reset(&sc->stat_ch, hz, gen_tick, sc);
+}
+
+static void
+gen_init(void *softc)
+{
+ struct gen_softc *sc;
+
+ sc = softc;
+ GEN_LOCK(sc);
+ gen_init_locked(sc);
+ GEN_UNLOCK(sc);
+}
+
+static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static void
+gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
+{
+ uint32_t addr0 = (ea[0] << 8) | ea[1];
+ uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
+
+ WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
+ WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
+}
+
+static u_int
+gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
+{
+ struct gen_softc *sc = arg;
+
+ /* "count + 2" to account for unicast and broadcast */
+ gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
+ return (1); /* increment to count */
+}
+
+static void
+gen_setup_rxfilter(struct gen_softc *sc)
+{
+ struct ifnet *ifp = sc->ifp;
+ uint32_t cmd, mdf_ctrl;
+ u_int n;
+
+ GEN_ASSERT_LOCKED(sc);
+
+ cmd = RD4(sc, GENET_UMAC_CMD);
+
+ /*
+ * Count the required number of hardware filters. We need one
+ * for each multicast address, plus one for our own address and
+ * the broadcast address.
+ */
+ n = if_llmaddr_count(ifp) + 2;
+
+ if (n > GENET_MAX_MDF_FILTER)
+ ifp->if_flags |= IFF_ALLMULTI;
+ else
+ ifp->if_flags &= ~IFF_ALLMULTI;
+
+ if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
+ cmd |= GENET_UMAC_CMD_PROMISC;
+ mdf_ctrl = 0;
+ } else {
+ cmd &= ~GENET_UMAC_CMD_PROMISC;
+ gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
+ gen_setup_rxfilter_mdf(sc, 1, IF_LLADDR(ifp));
+ (void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
+ mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1) &~
+ (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
+ }
+
+ WR4(sc, GENET_UMAC_CMD, cmd);
+ WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
+}
+
+static void
+gen_set_enaddr(struct gen_softc *sc)
+{
+ uint8_t *enaddr;
+ uint32_t val;
+ if_t ifp;
+
+ GEN_ASSERT_LOCKED(sc);
+
+ ifp = sc->ifp;
+
+ /* Write our unicast address */
+ enaddr = IF_LLADDR(ifp);
+ /* Write hardware address */
+ val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
+ (enaddr[0] << 24);
+ WR4(sc, GENET_UMAC_MAC0, val);
+ val = enaddr[5] | (enaddr[4] << 8);
+ WR4(sc, GENET_UMAC_MAC1, val);
+}
+
+static void
+gen_start_locked(struct gen_softc *sc)
+{
+ struct mbuf *m;
+ if_t ifp;
+ int cnt, err;
+
+ GEN_ASSERT_LOCKED(sc);
+
+ if (!sc->link)
+ return;
+
+ ifp = sc->ifp;
+
+ if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING)
+ return;
+
+ for (cnt = 0; ; cnt++) {
+ m = if_dequeue(ifp);
+ if (m == NULL)
+ break;
+
+ err = gen_encap(sc, &m);
+ if (err != 0) {
+ if (err == ENOBUFS)
+ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
+ if (m != NULL)
+ if_sendq_prepend(ifp, m);
+ break;
+ }
+ if_bpfmtap(ifp, m);
+ }
+}
+
+static void
+gen_start(if_t ifp)
+{
+ struct gen_softc *sc;
+
+ sc = if_getsoftc(ifp);
+
+ GEN_LOCK(sc);
+ gen_start_locked(sc);
+ GEN_UNLOCK(sc);
+}
+
+/* Test for any delayed checksum */
+#define CSUM_DELAY_ANY (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
+
+static int
+gen_encap(struct gen_softc *sc, struct mbuf **mp)
+{
+ bus_dmamap_t map;
+ bus_dma_segment_t segs[TX_MAX_SEGS];
+ int error, nsegs, cur, first, i, index, offset;
+ uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
+ struct mbuf *m;
+ struct statusblock *sb = NULL;
+ struct tx_queue *q;
+ struct gen_ring_ent *ent;
+
+ GEN_ASSERT_LOCKED(sc);
+
+ q = &sc->tx_queue[DEF_TXQUEUE];
+
+ m = *mp;
+#ifdef ICMPV6_HACK
+ /*
+ * Reflected ICMPv6 packets, e.g. echo replies, tend to get laid
+ * out with only the Ethernet header in the first mbuf, and this
+ * doesn't seem to work.
+ */
+#define ICMP6_LEN (sizeof(struct ether_header) + sizeof(struct ip6_hdr) + \
+ sizeof(struct icmp6_hdr))
+ if (m->m_len == sizeof(struct ether_header)) {
+ int ether_type = mtod(m, struct ether_header *)->ether_type;
+ if (ntohs(ether_type) == ETHERTYPE_IPV6 &&
+ m->m_next->m_len >= sizeof(struct ip6_hdr)) {
+ struct ip6_hdr *ip6;
+
+ ip6 = mtod(m->m_next, struct ip6_hdr *);
+ if (ip6->ip6_nxt == IPPROTO_ICMPV6) {
+ m = m_pullup(m,
+ MIN(m->m_pkthdr.len, ICMP6_LEN));
+ if (m == NULL) {
+ if (sc->ifp->if_flags & IFF_DEBUG)
+ device_printf(sc->dev,
+ "ICMPV6 pullup fail\n");
+ *mp = NULL;
+ return (ENOMEM);
+ }
+ }
+ }
+ }
+#undef ICMP6_LEN
+#endif
+ if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
+ 0) {
+ csum_flags = m->m_pkthdr.csum_flags;
+ csumdata = m->m_pkthdr.csum_data;
+ M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
+ if (m == NULL) {
+ if (sc->ifp->if_flags & IFF_DEBUG)
+ device_printf(sc->dev, "prepend fail\n");
+ *mp = NULL;
+ return (ENOMEM);
+ }
+ offset = gen_parse_tx(m, csum_flags);
+ sb = mtod(m, struct statusblock *);
+ if ((csum_flags & CSUM_DELAY_ANY) != 0) {
+ csuminfo = (offset << TXCSUM_OFF_SHIFT) |
+ (offset + csumdata);
+ csuminfo |= TXCSUM_LEN_VALID;
+ if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
+ csuminfo |= TXCSUM_UDP;
+ sb->txcsuminfo = csuminfo;
+ } else
+ sb->txcsuminfo = 0;
+ }
+
+ *mp = m;
+
+ cur = first = q->cur;
+ ent = &q->entries[cur];
+ map = ent->map;
+ error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (error == EFBIG) {
+ m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
+ if (m == NULL) {
+ device_printf(sc->dev,
+ "gen_encap: m_collapse failed\n");
+ m_freem(*mp);
+ *mp = NULL;
+ return (ENOMEM);
+ }
+ *mp = m;
+ error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
+ segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ m_freem(*mp);
+ *mp = NULL;
+ }
+ }
+ if (error != 0) {
+ device_printf(sc->dev,
+ "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
+ return (error);
+ }
+ if (nsegs == 0) {
+ m_freem(*mp);
+ *mp = NULL;
+ return (EIO);
+ }
+
+ /* Remove statusblock after mapping, before possible requeue or bpf. */
+ if (sb != NULL) {
+ m->m_data += sizeof(struct statusblock);
+ m->m_len -= sizeof(struct statusblock);
+ m->m_pkthdr.len -= sizeof(struct statusblock);
+ }
+ if (q->queued + nsegs > q->nentries) {
+ bus_dmamap_unload(sc->tx_buf_tag, map);
+ return (ENOBUFS);
+ }
+
+ bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
+
+ index = q->prod_idx & (q->nentries - 1);
+ for (i = 0; i < nsegs; i++) {
+ ent = &q->entries[cur];
+ length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
+ if (i == 0) {
+ length_status |= GENET_TX_DESC_STATUS_SOP |
+ GENET_TX_DESC_STATUS_CRC;
+ if ((csum_flags & CSUM_DELAY_ANY) != 0)
+ length_status |= GENET_TX_DESC_STATUS_CKSUM;
+ }
+ if (i == nsegs - 1)
+ length_status |= GENET_TX_DESC_STATUS_EOP;
+
+ length_status |= segs[i].ds_len <<
+ GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
+
+ WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
+ (uint32_t)segs[i].ds_addr);
+ WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
+ (uint32_t)(segs[i].ds_addr >> 32));
+ WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
+
+ ++q->queued;
+ cur = TX_NEXT(cur, q->nentries);
+ index = TX_NEXT(index, q->nentries);
+ }
+
+ q->prod_idx += nsegs;
+ q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
+ /* We probably don't need to write the producer index on every iter */
+ if (nsegs != 0)
+ WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
+ q->cur = cur;
+
+ /* Store mbuf in the last segment */
+ q->entries[first].mbuf = m;
+
+ return (0);
+}
+
+/*
+ * Parse a packet to find the offset of the transport header for checksum
+ * offload. Ensure that the link and network headers are contiguous with
+ * the status block, or transmission fails.
+ */
+static int
+gen_parse_tx(struct mbuf *m, int csum_flags)
+{
+ int offset, off_in_m;
+ bool copy = false, shift = false;
+ u_char *p, *copy_p = NULL;
+ struct mbuf *m0 = m;
+ uint16_t ether_type;
+
+ if (m->m_len == sizeof(struct statusblock)) {
+ /* M_PREPEND placed statusblock at end; move to beginning */
+ m->m_data = m->m_pktdat;
+ copy_p = mtodo(m, sizeof(struct statusblock));
+ m = m->m_next;
+ off_in_m = 0;
+ p = mtod(m, u_char *);
+ copy = true;
+ } else {
+ /*
+ * If statusblock is not at beginning of mbuf (likely),
+ * then remember to move mbuf contents down before copying
+ * after them.
+ */
+ if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
+ shift = true;
+ p = mtodo(m, sizeof(struct statusblock));
+ off_in_m = sizeof(struct statusblock);
+ }
+
+/*
+ * If headers need to be copied contiguous to statusblock, do so.
+ * If copying to the internal mbuf data area, and the status block
+ * is not at the beginning of that area, shift the status block (which
+ * is empty) and following data.
+ */
+#define COPY(size) { \
+ int hsize = size; \
+ if (copy) { \
+ if (shift) { \
+ u_char *p0; \
+ shift = false; \
+ p0 = mtodo(m0, sizeof(struct statusblock)); \
+ m0->m_data = m0->m_pktdat; \
+ bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
+ m0->m_len - sizeof(struct statusblock)); \
+ copy_p = mtodo(m0, sizeof(struct statusblock)); \
+ } \
+ bcopy(p, copy_p, hsize); \
+ m0->m_len += hsize; \
+ m0->m_pkthdr.len += hsize; /* unneeded */ \
+ m->m_len -= hsize; \
+ m->m_data += hsize; \
+ } \
+ copy_p += hsize; \
+}
+
+ KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
+ sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
+
+ if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
+ offset = sizeof(struct ether_vlan_header);
+ ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
+ COPY(sizeof(struct ether_vlan_header));
+ if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
+ m = m->m_next;
+ off_in_m = 0;
+ p = mtod(m, u_char *);
+ copy = true;
+ } else {
+ off_in_m += sizeof(struct ether_vlan_header);
+ p += sizeof(struct ether_vlan_header);
+ }
+ } else {
+ offset = sizeof(struct ether_header);
+ ether_type = ntohs(((struct ether_header *)p)->ether_type);
+ COPY(sizeof(struct ether_header));
+ if (m->m_len == off_in_m + sizeof(struct ether_header)) {
+ m = m->m_next;
+ off_in_m = 0;
+ p = mtod(m, u_char *);
+ copy = true;
+ } else {
+ off_in_m += sizeof(struct ether_header);
+ p += sizeof(struct ether_header);
+ }
+ }
+ if (ether_type == ETHERTYPE_IP) {
+ COPY(((struct ip *)p)->ip_hl << 2);
+ offset += ((struct ip *)p)->ip_hl << 2;
+ } else if (ether_type == ETHERTYPE_IPV6) {
+ COPY(sizeof(struct ip6_hdr));
+ offset += sizeof(struct ip6_hdr);
+ } else {
+ /*
+ * Unknown whether other cases require moving a header;
+ * ARP works without.
+ */
+ }
+ return (offset);
+#undef COPY
+}
+
+static void
+gen_intr(void *arg)
+{
+ struct gen_softc *sc = arg;
+ uint32_t val;
+
+ GEN_LOCK(sc);
+
+ val = RD4(sc, GENET_INTRL2_CPU_STAT);
+ val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
+ WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
+
+ if (val & GENET_IRQ_RXDMA_DONE)
+ gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
+
+ if (val & GENET_IRQ_TXDMA_DONE) {
+ gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
+ if (!if_sendq_empty(sc->ifp))
+ gen_start_locked(sc);
+ }
+
+ GEN_UNLOCK(sc);
+}
+
+static int
+gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
+{
+ if_t ifp;
+ struct mbuf *m, *mh, *mt;
+ struct statusblock *sb = NULL;
+ int error, index, len, cnt, npkt, n;
+ uint32_t status, prod_idx, total;
+
+ ifp = sc->ifp;
+ mh = mt = NULL;
+ cnt = 0;
+ npkt = 0;
+
+ prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
+ GENET_RX_DMA_PROD_CONS_MASK;
+ total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
+
+ index = q->cons_idx & (RX_DESC_COUNT - 1);
+ for (n = 0; n < total; n++) {
+ bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
+
+ m = q->entries[index].mbuf;
+
+ if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
+ sb = mtod(m, struct statusblock *);
+ status = sb->status_buflen;
+ } else
+ status = RD4(sc, GENET_RX_DESC_STATUS(index));
+
+ len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
+ GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
+
+ /* check for errors */
+ if ((status &
+ (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
+ GENET_RX_DESC_STATUS_RX_ERROR)) !=
+ (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
+ if (ifp->if_flags & IFF_DEBUG)
+ device_printf(sc->dev,
+ "error/frag %x csum %x\n", status,
+ sb->rxcsum);
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ continue;
+ }
+
+ error = gen_newbuf_rx(sc, q, index);
+ if (error != 0) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ if (ifp->if_flags & IFF_DEBUG)
+ device_printf(sc->dev, "gen_newbuf_rx %d\n",
+ error);
+ /* reuse previous mbuf */
+ (void) gen_mapbuf_rx(sc, q, index, m);
+ continue;
+ }
+
+ if (sb != NULL) {
+ if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
+ /* L4 checksum checked; not sure about L3. */
+ m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
+ CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ m->m_data += sizeof(struct statusblock);
+ m->m_len -= sizeof(struct statusblock);
+ len -= sizeof(struct statusblock);
+ }
+ if (len > ETHER_ALIGN) {
+ m_adj(m, ETHER_ALIGN);
+ len -= ETHER_ALIGN;
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = len;
+ m->m_len = len;
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+
+ m->m_nextpkt = NULL;
+ if (mh == NULL)
+ mh = m;
+ else
+ mt->m_nextpkt = m;
+ mt = m;
+ ++cnt;
+ ++npkt;
+
+ index = RX_NEXT(index, q->nentries);
+
+ q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
+ WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
+
+ if (cnt == gen_rx_batch) {
+ GEN_UNLOCK(sc);
+ if_input(ifp, mh);
+ GEN_LOCK(sc);
+ mh = mt = NULL;
+ cnt = 0;
+ }
+ }
+
+ if (mh != NULL) {
+ GEN_UNLOCK(sc);
+ if_input(ifp, mh);
+ GEN_LOCK(sc);
+ }
+
+ return (npkt);
+}
+
+static void
+gen_txintr(struct gen_softc *sc, struct tx_queue *q)
+{
+ uint32_t cons_idx, total;
+ struct gen_ring_ent *ent;
+ if_t ifp;
+ int i, prog;
+
+ GEN_ASSERT_LOCKED(sc);
+
+ ifp = sc->ifp;
+
+ cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
+ GENET_TX_DMA_PROD_CONS_MASK;
+ total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
+
+ prog = 0;
+ for (i = q->next; q->queued > 0 && total > 0;
+ i = TX_NEXT(i, q->nentries), total--) {
+ /* XXX check for errors */
+
+ ent = &q->entries[i];
+ if (ent->mbuf != NULL) {
+ bus_dmamap_sync(sc->tx_buf_tag, ent->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->tx_buf_tag, ent->map);
+ m_freem(ent->mbuf);
+ ent->mbuf = NULL;
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ }
+
+ prog++;
+ --q->queued;
+ }
+
+ if (prog > 0) {
+ q->next = i;
+ if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
+ }
+
+ q->cons_idx = cons_idx;
+}
+
+static void
+gen_intr2(void *arg)
+{
+ struct gen_softc *sc = arg;
+
+ device_printf(sc->dev, "gen_intr2\n");
+}
+
+static int
+gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
+{
+ struct mbuf *m;
+
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ return (ENOBUFS);
+
+ m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
+ m_adj(m, ETHER_ALIGN);
+
+ return (gen_mapbuf_rx(sc, q, index, m));
+}
+
+static int
+gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
+ struct mbuf *m)
+{
+ bus_dma_segment_t seg;
+ bus_dmamap_t map;
+ int nsegs;
+
+ map = q->entries[index].map;
+ if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
+ BUS_DMA_NOWAIT) != 0) {
+ m_freem(m);
+ return (ENOBUFS);
+ }
+
+ bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
+
+ q->entries[index].mbuf = m;
+ WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
+ WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
+
+ return (0);
+}
+
+static int
+gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
+{
+ struct gen_softc *sc;
+ struct mii_data *mii;
+ struct ifreq *ifr;
+ int flags, enable, error;
+
+ sc = if_getsoftc(ifp);
+ mii = device_get_softc(sc->miibus);
+ ifr = (struct ifreq *)data;
+ error = 0;
+
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ GEN_LOCK(sc);
+ if (if_getflags(ifp) & IFF_UP) {
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
+ flags = if_getflags(ifp) ^ sc->if_flags;
+ if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
+ gen_setup_rxfilter(sc);
+ } else
+ gen_init_locked(sc);
+ } else {
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
+ gen_reset(sc);
+ }
+ sc->if_flags = if_getflags(ifp);
+ GEN_UNLOCK(sc);
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
+ GEN_LOCK(sc);
+ gen_setup_rxfilter(sc);
+ GEN_UNLOCK(sc);
+ }
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+ break;
+
+ case SIOCSIFCAP:
+ enable = if_getcapenable(ifp);
+ flags = ifr->ifr_reqcap ^ enable;
+ if (flags & IFCAP_RXCSUM)
+ enable ^= IFCAP_RXCSUM;
+ if (flags & IFCAP_RXCSUM_IPV6)
+ enable ^= IFCAP_RXCSUM_IPV6;
+ if (flags & IFCAP_TXCSUM)
+ enable ^= IFCAP_TXCSUM;
+ if (flags & IFCAP_TXCSUM_IPV6)
+ enable ^= IFCAP_TXCSUM_IPV6;
+ if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
+ if_sethwassist(ifp, GEN_CSUM_FEATURES);
+ else
+ if_sethwassist(ifp, 0);
+ if_setcapenable(ifp, enable);
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
+ gen_enable_offload(sc);
+ break;
+
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+ return (error);
+}
+
+static void
+gen_tick(void *softc)
+{
+ struct gen_softc *sc;
+ struct mii_data *mii;
+ if_t ifp;
+ int link;
+
+ sc = softc;
+ ifp = sc->ifp;
+ mii = device_get_softc(sc->miibus);
+
+ GEN_ASSERT_LOCKED(sc);
+
+ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
+ return;
+
+ link = sc->link;
+ mii_tick(mii);
+ if (sc->link && !link)
+ gen_start_locked(sc);
+
+ callout_reset(&sc->stat_ch, hz, gen_tick, sc);
+}
+
+#define MII_BUSY_RETRY 1000
+
+static int
+gen_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct gen_softc *sc;
+ int retry, val;
+
+ sc = device_get_softc(dev);
+ val = 0;
+
+ WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
+ (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
+ val = RD4(sc, GENET_MDIO_CMD);
+ WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
+ for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
+ if (((val = RD4(sc, GENET_MDIO_CMD)) &
+ GENET_MDIO_START_BUSY) == 0) {
+ if (val & GENET_MDIO_READ_FAILED)
+ return (0); /* -1? */
+ val &= GENET_MDIO_VAL_MASK;
+ break;
+ }
+ DELAY(10);
+ }
+
+ if (retry == 0)
+ device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
+ phy, reg);
+
+ return (val);
+}
+
+static int
+gen_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+ struct gen_softc *sc;
+ int retry;
+
+ sc = device_get_softc(dev);
+
+ WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
+ (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
+ (val & GENET_MDIO_VAL_MASK));
+ val = RD4(sc, GENET_MDIO_CMD);
+ WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
+ for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
+ val = RD4(sc, GENET_MDIO_CMD);
+ if ((val & GENET_MDIO_START_BUSY) == 0)
+ break;
+ DELAY(10);
+ }
+ if (retry == 0)
+ device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
+ phy, reg);
+
+ return (0);
+}
+
+static void
+gen_update_link_locked(struct gen_softc *sc)
+{
+ struct mii_data *mii;
+ uint32_t val;
+ u_int speed;
+
+ GEN_ASSERT_LOCKED(sc);
+
+ if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
+ return;
+ mii = device_get_softc(sc->miibus);
+
+ if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
+ (IFM_ACTIVE | IFM_AVALID)) {
+ switch (IFM_SUBTYPE(mii->mii_media_active)) {
+ case IFM_1000_T:
+ case IFM_1000_SX:
+ speed = GENET_UMAC_CMD_SPEED_1000;
+ sc->link = 1;
+ break;
+ case IFM_100_TX:
+ speed = GENET_UMAC_CMD_SPEED_100;
+ sc->link = 1;
+ break;
+ case IFM_10_T:
+ speed = GENET_UMAC_CMD_SPEED_10;
+ sc->link = 1;
+ break;
+ default:
+ sc->link = 0;
+ break;
+ }
+ } else
+ sc->link = 0;
+
+ if (sc->link == 0)
+ return;
+
+ val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
+ val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
+ val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
+ val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
+ if (sc->phy_mode == MII_CONTYPE_RGMII)
+ val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
+ else
+ val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
+ WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
+
+ val = RD4(sc, GENET_UMAC_CMD);
+ val &= ~GENET_UMAC_CMD_SPEED;
+ val |= speed;
+ WR4(sc, GENET_UMAC_CMD, val);
+}
+
+static void
+gen_link_task(void *arg, int pending)
+{
+ struct gen_softc *sc;
+
+ sc = arg;
+
+ GEN_LOCK(sc);
+ gen_update_link_locked(sc);
+ GEN_UNLOCK(sc);
+}
+
+static void
+gen_miibus_statchg(device_t dev)
+{
+ struct gen_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ taskqueue_enqueue(taskqueue_swi, &sc->link_task);
+}
+
+static void
+gen_media_status(if_t ifp, struct ifmediareq *ifmr)
+{
+ struct gen_softc *sc;
+ struct mii_data *mii;
+
+ sc = if_getsoftc(ifp);
+ mii = device_get_softc(sc->miibus);
+
+ GEN_LOCK(sc);
+ mii_pollstat(mii);
+ ifmr->ifm_active = mii->mii_media_active;
+ ifmr->ifm_status = mii->mii_media_status;
+ GEN_UNLOCK(sc);
+}
+
+static int
+gen_media_change(if_t ifp)
+{
+ struct gen_softc *sc;
+ struct mii_data *mii;
+ int error;
+
+ sc = if_getsoftc(ifp);
+ mii = device_get_softc(sc->miibus);
+
+ GEN_LOCK(sc);
+ error = mii_mediachg(mii);
+ GEN_UNLOCK(sc);
+
+ return (error);
+}
+
+static device_method_t gen_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, gen_probe),
+ DEVMETHOD(device_attach, gen_attach),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, gen_miibus_readreg),
+ DEVMETHOD(miibus_writereg, gen_miibus_writereg),
+ DEVMETHOD(miibus_statchg, gen_miibus_statchg),
+
+ DEVMETHOD_END
+};
+
+static driver_t gen_driver = {
+ "genet",
+ gen_methods,
+ sizeof(struct gen_softc),
+};
+
+static devclass_t gen_devclass;
+
+DRIVER_MODULE(genet, simplebus, gen_driver, gen_devclass, 0, 0);
+DRIVER_MODULE(miibus, genet, miibus_driver, miibus_devclass, 0, 0);
+MODULE_DEPEND(genet, ether, 1, 1, 1);
+MODULE_DEPEND(genet, miibus, 1, 1, 1);
diff --git a/sys/arm64/broadcom/genet/if_genetreg.h b/sys/arm64/broadcom/genet/if_genetreg.h
new file mode 100644
index 000000000000..3b6e709d1938
--- /dev/null
+++ b/sys/arm64/broadcom/genet/if_genetreg.h
@@ -0,0 +1,223 @@
+/* $NetBSD: bcmgenetreg.h,v 1.2 2020/02/22 13:41:41 jmcneill Exp $ */
+
+/* derived from NetBSD's bcmgenetreg.h */
+
+/*-
+ * Copyright (c) 2020 Michael J Karels
+ * Copyright (c) 2020 Jared McNeill <jmcneill@invisible.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Broadcom GENETv5
+ */
+
+#ifndef _BCMGENETREG_H
+#define _BCMGENETREG_H
+
+#define GENET_SYS_REV_CTRL 0x000
+#define SYS_REV_MAJOR __BITS(27,24)
+#define SYS_REV_MINOR __BITS(19,16)
+#define REV_MAJOR 0xf000000
+#define REV_MAJOR_SHIFT 24
+#define REV_MAJOR_V5 6
+#define REV_MINOR 0xf0000
+#define REV_MINOR_SHIFT 16
+#define REV_PHY 0xffff
+#define GENET_SYS_PORT_CTRL 0x004
+#define GENET_SYS_PORT_MODE_EXT_GPHY 3
+#define GENET_SYS_RBUF_FLUSH_CTRL 0x008
+#define GENET_SYS_RBUF_FLUSH_RESET __BIT(1)
+#define GENET_SYS_TBUF_FLUSH_CTRL 0x00c
+#define GENET_EXT_RGMII_OOB_CTRL 0x08c
+#define GENET_EXT_RGMII_OOB_ID_MODE_DISABLE __BIT(16)
+#define GENET_EXT_RGMII_OOB_RGMII_MODE_EN __BIT(6)
+#define GENET_EXT_RGMII_OOB_OOB_DISABLE __BIT(5)
+#define GENET_EXT_RGMII_OOB_RGMII_LINK __BIT(4)
+#define GENET_INTRL2_CPU_STAT 0x200
+#define GENET_INTRL2_CPU_CLEAR 0x208
+#define GENET_INTRL2_CPU_STAT_MASK 0x20c
+#define GENET_INTRL2_CPU_SET_MASK 0x210
+#define GENET_INTRL2_CPU_CLEAR_MASK 0x214
+#define GENET_IRQ_MDIO_ERROR __BIT(24)
+#define GENET_IRQ_MDIO_DONE __BIT(23)
+#define GENET_IRQ_TXDMA_DONE __BIT(16)
+#define GENET_IRQ_RXDMA_DONE __BIT(13)
+#define GENET_RBUF_CTRL 0x300
+#define GENET_RBUF_BAD_DIS __BIT(2)
+#define GENET_RBUF_ALIGN_2B __BIT(1)
+#define GENET_RBUF_64B_EN __BIT(0)
+#define GENET_RBUF_CHECK_CTRL 0x314
+#define GENET_RBUF_CHECK_CTRL_EN __BIT(0)
+#define GENET_RBUF_CHECK_SKIP_FCS __BIT(4)
+#define GENET_RBUF_TBUF_SIZE_CTRL 0x3b4
+#define GENET_TBUF_CTRL 0x600
+#define GENET_UMAC_CMD 0x808
+#define GENET_UMAC_CMD_LCL_LOOP_EN __BIT(15)
+#define GENET_UMAC_CMD_SW_RESET __BIT(13)
+#define GENET_UMAC_CMD_PROMISC __BIT(4)
+#ifdef __BITS
+#define GENET_UMAC_CMD_SPEED __BITS(3,2)
+#define GENET_UMAC_CMD_SPEED_10 0
+#define GENET_UMAC_CMD_SPEED_100 1
+#define GENET_UMAC_CMD_SPEED_1000 2
+#else
+#define GENET_UMAC_CMD_SPEED (3 << 2)
+#define GENET_UMAC_CMD_SPEED_10 (0 << 2)
+#define GENET_UMAC_CMD_SPEED_100 (1 << 2)
+#define GENET_UMAC_CMD_SPEED_1000 (2 << 2)
+#define GENET_UMAC_CMD_CRC_FWD __BIT(6)
+#endif
+#define GENET_UMAC_CMD_RXEN __BIT(1)
+#define GENET_UMAC_CMD_TXEN __BIT(0)
+#define GENET_UMAC_MAC0 0x80c
+#define GENET_UMAC_MAC1 0x810
+#define GENET_UMAC_MAX_FRAME_LEN 0x814
+#define GENET_UMAC_TX_FLUSH 0xb34
+#define GENET_UMAC_MIB_CTRL 0xd80
+#define GENET_UMAC_MIB_RESET_TX __BIT(2)
+#define GENET_UMAC_MIB_RESET_RUNT __BIT(1)
+#define GENET_UMAC_MIB_RESET_RX __BIT(0)
+#define GENET_MDIO_CMD 0xe14
+#define GENET_MDIO_START_BUSY __BIT(29)
+#define GENET_MDIO_READ_FAILED __BIT(28)
+#define GENET_MDIO_READ __BIT(27)
+#define GENET_MDIO_WRITE __BIT(26)
+#define GENET_MDIO_PMD __BITS(25,21)
+#define GENET_MDIO_REG __BITS(20,16)
+#define GENET_MDIO_ADDR_SHIFT 21
+#define GENET_MDIO_REG_SHIFT 16
+#define GENET_MDIO_VAL_MASK 0xffff
+#define GENET_UMAC_MDF_CTRL 0xe50
+#define GENET_UMAC_MDF_ADDR0(n) (0xe54 + (n) * 0x8)
+#define GENET_UMAC_MDF_ADDR1(n) (0xe58 + (n) * 0x8)
+#define GENET_MAX_MDF_FILTER 17
+
+#define GENET_DMA_DESC_COUNT 256
+#define GENET_DMA_DESC_SIZE 12
+#define GENET_DMA_DEFAULT_QUEUE 16
+
+#define GENET_DMA_RING_SIZE 0x40
+#define GENET_DMA_RINGS_SIZE (GENET_DMA_RING_SIZE * (GENET_DMA_DEFAULT_QUEUE + 1))
+
+#define GENET_RX_BASE 0x2000
+#define GENET_TX_BASE 0x4000
+
+#define GENET_RX_DMA_RINGBASE(qid) (GENET_RX_BASE + 0xc00 + GENET_DMA_RING_SIZE * (qid))
+#define GENET_RX_DMA_WRITE_PTR_LO(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x00)
+#define GENET_RX_DMA_WRITE_PTR_HI(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x04)
+#define GENET_RX_DMA_PROD_INDEX(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x08)
+#define GENET_RX_DMA_CONS_INDEX(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x0c)
+#define GENET_RX_DMA_PROD_CONS_MASK 0xffff
+#define GENET_RX_DMA_RING_BUF_SIZE(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x10)
+#define GENET_RX_DMA_RING_BUF_SIZE_DESC_COUNT __BITS(31,16)
+#define GENET_RX_DMA_RING_BUF_SIZE_BUF_LENGTH __BITS(15,0)
+#define GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT 16
+#define GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK 0xffff
+#define GENET_RX_DMA_START_ADDR_LO(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x14)
+#define GENET_RX_DMA_START_ADDR_HI(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x18)
+#define GENET_RX_DMA_END_ADDR_LO(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x1c)
+#define GENET_RX_DMA_END_ADDR_HI(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x20)
+#define GENET_RX_DMA_XON_XOFF_THRES(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x28)
+#define GENET_RX_DMA_XON_XOFF_THRES_LO __BITS(31,16)
+#define GENET_RX_DMA_XON_XOFF_THRES_HI __BITS(15,0)
+#define GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT 16
+#define GENET_RX_DMA_READ_PTR_LO(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x2c)
+#define GENET_RX_DMA_READ_PTR_HI(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x30)
+
+#define GENET_TX_DMA_RINGBASE(qid) (GENET_TX_BASE + 0xc00 + GENET_DMA_RING_SIZE * (qid))
+#define GENET_TX_DMA_READ_PTR_LO(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x00)
+#define GENET_TX_DMA_READ_PTR_HI(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x04)
+#define GENET_TX_DMA_CONS_INDEX(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x08)
+#define GENET_TX_DMA_PROD_INDEX(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x0c)
+#define GENET_TX_DMA_PROD_CONS_MASK 0xffff
+#define GENET_TX_DMA_RING_BUF_SIZE(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x10)
+#define GENET_TX_DMA_RING_BUF_SIZE_DESC_COUNT __BITS(31,16)
+#define GENET_TX_DMA_RING_BUF_SIZE_BUF_LENGTH __BITS(15,0)
+#define GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT 16
+#define GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK 0xffff
+#define GENET_TX_DMA_START_ADDR_LO(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x14)
+#define GENET_TX_DMA_START_ADDR_HI(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x18)
+#define GENET_TX_DMA_END_ADDR_LO(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x1c)
+#define GENET_TX_DMA_END_ADDR_HI(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x20)
+#define GENET_TX_DMA_MBUF_DONE_THRES(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x24)
+#define GENET_TX_DMA_FLOW_PERIOD(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x28)
+#define GENET_TX_DMA_WRITE_PTR_LO(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x2c)
+#define GENET_TX_DMA_WRITE_PTR_HI(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x30)
+
+#define GENET_RX_DESC_STATUS(idx) (GENET_RX_BASE + GENET_DMA_DESC_SIZE * (idx) + 0x00)
+#define GENET_RX_DESC_STATUS_BUFLEN __BITS(27,16)
+#define GENET_RX_DESC_STATUS_BUFLEN_MASK 0xfff0000
+#define GENET_RX_DESC_STATUS_BUFLEN_SHIFT 16
+#define GENET_RX_DESC_STATUS_OWN __BIT(15) /* ??? */
+#define GENET_RX_DESC_STATUS_CKSUM_OK __BIT(15)
+#define GENET_RX_DESC_STATUS_EOP __BIT(14)
+#define GENET_RX_DESC_STATUS_SOP __BIT(13)
+#define GENET_RX_DESC_STATUS_RX_ERROR __BIT(2)
+#define GENET_RX_DESC_ADDRESS_LO(idx) (GENET_RX_BASE + GENET_DMA_DESC_SIZE * (idx) + 0x04)
+#define GENET_RX_DESC_ADDRESS_HI(idx) (GENET_RX_BASE + GENET_DMA_DESC_SIZE * (idx) + 0x08)
+
+#define GENET_TX_DESC_STATUS(idx) (GENET_TX_BASE + GENET_DMA_DESC_SIZE * (idx) + 0x00)
+#define GENET_TX_DESC_STATUS_BUFLEN __BITS(27,16)
+#define GENET_TX_DESC_STATUS_OWN __BIT(15)
+#define GENET_TX_DESC_STATUS_EOP __BIT(14)
+#define GENET_TX_DESC_STATUS_SOP __BIT(13)
+#define GENET_TX_DESC_STATUS_QTAG __BITS(12,7)
+#define GENET_TX_DESC_STATUS_CRC __BIT(6)
+#define GENET_TX_DESC_STATUS_CKSUM __BIT(4)
+#define GENET_TX_DESC_STATUS_BUFLEN_SHIFT 16
+#define GENET_TX_DESC_STATUS_BUFLEN_MASK 0x7ff0000
+#define GENET_TX_DESC_STATUS_QTAG_MASK 0x1f80
+#define GENET_TX_DESC_ADDRESS_LO(idx) (GENET_TX_BASE + GENET_DMA_DESC_SIZE * (idx) + 0x04)
+#define GENET_TX_DESC_ADDRESS_HI(idx) (GENET_TX_BASE + GENET_DMA_DESC_SIZE * (idx) + 0x08)
+
+/* Status block prepended to tx/rx packets (optional) */
+struct statusblock {
+ u_int32_t status_buflen;
+ u_int32_t extstatus;
+ u_int32_t rxcsum;
+ u_int32_t spare1[9];
+ u_int32_t txcsuminfo;
+ u_int32_t spare2[3];
+};
+
+/* bits in txcsuminfo */
+#define TXCSUM_LEN_VALID __BIT(31)
+#define TXCSUM_OFF_SHIFT 16
+#define TXCSUM_UDP __BIT(15)
+
+#define GENET_RX_DMA_RING_CFG (GENET_RX_BASE + 0x1040 + 0x00)
+#define GENET_RX_DMA_CTRL (GENET_RX_BASE + 0x1040 + 0x04)
+#define GENET_RX_DMA_CTRL_RBUF_EN(qid) __BIT((qid) + 1)
+#define GENET_RX_DMA_CTRL_EN __BIT(0)
+#define GENET_RX_SCB_BURST_SIZE (GENET_RX_BASE + 0x1040 + 0x0c)
+
+#define GENET_TX_DMA_RING_CFG (GENET_TX_BASE + 0x1040 + 0x00)
+#define GENET_TX_DMA_CTRL (GENET_TX_BASE + 0x1040 + 0x04)
+#define GENET_TX_DMA_CTRL_RBUF_EN(qid) __BIT((qid) + 1)
+#define GENET_TX_DMA_CTRL_EN __BIT(0)
+#define GENET_TX_SCB_BURST_SIZE (GENET_TX_BASE + 0x1040 + 0x0c)
+
+#endif /* !_BCMGENETREG_H */
diff --git a/sys/arm64/cavium/thunder_pcie_common.c b/sys/arm64/cavium/thunder_pcie_common.c
new file mode 100644
index 000000000000..8f1d4834b1a5
--- /dev/null
+++ b/sys/arm64/cavium/thunder_pcie_common.c
@@ -0,0 +1,209 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Common PCIe functions for Cavium Thunder SOC */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+
+#ifdef FDT
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_pci.h>
+#endif
+
+#include <sys/pciio.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+#include <dev/pci/pcib_private.h>
+#include <dev/pci/pci_host_generic.h>
+#ifdef FDT
+#include <dev/pci/pci_host_generic_fdt.h>
+#endif
+
+#include "thunder_pcie_common.h"
+
+MALLOC_DEFINE(M_THUNDER_PCIE, "Thunder PCIe driver", "Thunder PCIe driver memory");
+
+#define THUNDER_CFG_BASE_TO_ECAM(x) ((((x) >> 36UL) & 0x3) | (((x) >> 42UL) & 0x4))
+
+uint32_t
+range_addr_is_pci(struct pcie_range *ranges, uint64_t addr, uint64_t size)
+{
+ struct pcie_range *r;
+ int tuple;
+
+ for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
+ r = &ranges[tuple];
+ if (addr >= r->pci_base &&
+ addr < (r->pci_base + r->size) &&
+ size < r->size) {
+ /* Address is within PCI range */
+ return (1);
+ }
+ }
+
+ /* Address is outside PCI range */
+ return (0);
+}
+
+uint32_t
+range_addr_is_phys(struct pcie_range *ranges, uint64_t addr, uint64_t size)
+{
+ struct pcie_range *r;
+ int tuple;
+
+ for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
+ r = &ranges[tuple];
+ if (addr >= r->phys_base &&
+ addr < (r->phys_base + r->size) &&
+ size < r->size) {
+ /* Address is within Physical range */
+ return (1);
+ }
+ }
+
+ /* Address is outside Physical range */
+ return (0);
+}
+
+uint64_t
+range_addr_phys_to_pci(struct pcie_range *ranges, uint64_t phys_addr)
+{
+ struct pcie_range *r;
+ uint64_t offset;
+ int tuple;
+
+ /* Find physical address corresponding to given bus address */
+ for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
+ r = &ranges[tuple];
+ if (phys_addr >= r->phys_base &&
+ phys_addr < (r->phys_base + r->size)) {
+ /* Given phys addr is in this range.
+ * Translate phys addr to bus addr.
+ */
+ offset = phys_addr - r->phys_base;
+ return (r->pci_base + offset);
+ }
+ }
+ return (0);
+}
+
+uint64_t
+range_addr_pci_to_phys(struct pcie_range *ranges, uint64_t pci_addr)
+{
+ struct pcie_range *r;
+ uint64_t offset;
+ int tuple;
+
+ /* Find physical address corresponding to given bus address */
+ for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
+ r = &ranges[tuple];
+ if (pci_addr >= r->pci_base &&
+ pci_addr < (r->pci_base + r->size)) {
+ /* Given pci addr is in this range.
+ * Translate bus addr to phys addr.
+ */
+ offset = pci_addr - r->pci_base;
+ return (r->phys_base + offset);
+ }
+ }
+ return (0);
+}
+
+int
+thunder_pcie_identify_ecam(device_t dev, int *ecam)
+{
+ rman_res_t start;
+
+ /* Check if we're running on Cavium ThunderX */
+ if (!CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK,
+ CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0))
+ return (EINVAL);
+
+ start = bus_get_resource_start(dev, SYS_RES_MEMORY, 0);
+ *ecam = THUNDER_CFG_BASE_TO_ECAM(start);
+
+ device_printf(dev, "ThunderX quirk, setting ECAM to %d\n", *ecam);
+
+ return (0);
+}
+
+#ifdef THUNDERX_PASS_1_1_ERRATA
+struct resource *
+thunder_pcie_alloc_resource(device_t dev, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+ pci_addr_t map, testval;
+
+ /*
+ * If Enhanced Allocation is not used, we can't allocate any random
+ * range. All internal devices have hardcoded place where they can
+ * be located within PCI address space. Fortunately, we can read
+ * this value from BAR.
+ */
+ if (((type == SYS_RES_IOPORT) || (type == SYS_RES_MEMORY)) &&
+ RMAN_IS_DEFAULT_RANGE(start, end)) {
+ /* Read BAR manually to get resource address and size */
+ pci_read_bar(child, *rid, &map, &testval, NULL);
+
+ /* Mask the information bits */
+ if (PCI_BAR_MEM(map))
+ map &= PCIM_BAR_MEM_BASE;
+ else
+ map &= PCIM_BAR_IO_BASE;
+
+ if (PCI_BAR_MEM(testval))
+ testval &= PCIM_BAR_MEM_BASE;
+ else
+ testval &= PCIM_BAR_IO_BASE;
+
+ start = map;
+ end = start + count - 1;
+ }
+
+ return (pci_host_generic_core_alloc_resource(dev, child, type, rid,
+ start, end, count, flags));
+}
+#endif
diff --git a/sys/arm64/cavium/thunder_pcie_common.h b/sys/arm64/cavium/thunder_pcie_common.h
new file mode 100644
index 000000000000..057dff5d3b4b
--- /dev/null
+++ b/sys/arm64/cavium/thunder_pcie_common.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAVIUM_THUNDER_PCIE_COMMON_H_
+#define _CAVIUM_THUNDER_PCIE_COMMON_H_
+
+DECLARE_CLASS(thunder_pcie_driver);
+DECLARE_CLASS(thunder_pem_driver);
+
+MALLOC_DECLARE(M_THUNDER_PCIE);
+
+uint32_t range_addr_is_pci(struct pcie_range *, uint64_t, uint64_t);
+uint32_t range_addr_is_phys(struct pcie_range *, uint64_t, uint64_t);
+uint64_t range_addr_phys_to_pci(struct pcie_range *, uint64_t);
+uint64_t range_addr_pci_to_phys(struct pcie_range *, uint64_t);
+
+int thunder_pcie_identify_ecam(device_t, int *);
+#ifdef THUNDERX_PASS_1_1_ERRATA
+struct resource *thunder_pcie_alloc_resource(device_t,
+ device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int);
+#endif
+
+#endif /* _CAVIUM_THUNDER_PCIE_COMMON_H_ */
diff --git a/sys/arm64/cavium/thunder_pcie_fdt.c b/sys/arm64/cavium/thunder_pcie_fdt.c
new file mode 100644
index 000000000000..115b89b67b88
--- /dev/null
+++ b/sys/arm64/cavium/thunder_pcie_fdt.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2016 Cavium Inc.
+ * All rights reserved.
+ *
+ * Developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/kernel.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/cpuset.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_host_generic.h>
+#include <dev/pci/pci_host_generic_fdt.h>
+#include <dev/pci/pcib_private.h>
+
+#include "thunder_pcie_common.h"
+
+#include "pcib_if.h"
+
+#ifdef THUNDERX_PASS_1_1_ERRATA
+static struct resource * thunder_pcie_fdt_alloc_resource(device_t, device_t,
+ int, int *, rman_res_t, rman_res_t, rman_res_t, u_int);
+#endif
+static int thunder_pcie_fdt_attach(device_t);
+static int thunder_pcie_fdt_probe(device_t);
+static int thunder_pcie_fdt_get_id(device_t, device_t, enum pci_id_type,
+ uintptr_t *);
+
+static device_method_t thunder_pcie_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, thunder_pcie_fdt_probe),
+ DEVMETHOD(device_attach, thunder_pcie_fdt_attach),
+#ifdef THUNDERX_PASS_1_1_ERRATA
+ DEVMETHOD(bus_alloc_resource, thunder_pcie_fdt_alloc_resource),
+#endif
+
+ /* pcib interface */
+ DEVMETHOD(pcib_get_id, thunder_pcie_fdt_get_id),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(pcib, thunder_pcie_fdt_driver, thunder_pcie_fdt_methods,
+ sizeof(struct generic_pcie_fdt_softc), generic_pcie_fdt_driver);
+
+static devclass_t thunder_pcie_fdt_devclass;
+
+DRIVER_MODULE(thunder_pcib, simplebus, thunder_pcie_fdt_driver,
+ thunder_pcie_fdt_devclass, 0, 0);
+DRIVER_MODULE(thunder_pcib, ofwbus, thunder_pcie_fdt_driver,
+ thunder_pcie_fdt_devclass, 0, 0);
+
+static int
+thunder_pcie_fdt_probe(device_t dev)
+{
+
+ /* Check if we're running on Cavium ThunderX */
+ if (!CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK,
+ CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0))
+ return (ENXIO);
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_is_compatible(dev, "pci-host-ecam-generic") ||
+ ofw_bus_is_compatible(dev, "cavium,thunder-pcie") ||
+ ofw_bus_is_compatible(dev, "cavium,pci-host-thunder-ecam")) {
+ device_set_desc(dev, "Cavium Integrated PCI/PCI-E Controller");
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+thunder_pcie_fdt_attach(device_t dev)
+{
+ struct generic_pcie_fdt_softc *sc;
+
+ sc = device_get_softc(dev);
+ thunder_pcie_identify_ecam(dev, &sc->base.ecam);
+ sc->base.coherent = 1;
+
+ return (pci_host_generic_attach(dev));
+}
+
+static int
+thunder_pcie_fdt_get_id(device_t pci, device_t child, enum pci_id_type type,
+ uintptr_t *id)
+{
+ phandle_t node;
+ int bsf;
+
+ if (type != PCI_ID_MSI)
+ return (pcib_get_id(pci, child, type, id));
+
+ node = ofw_bus_get_node(pci);
+ if (OF_hasprop(node, "msi-map"))
+ return (generic_pcie_get_id(pci, child, type, id));
+
+ bsf = pci_get_rid(child);
+ *id = (pci_get_domain(child) << PCI_RID_DOMAIN_SHIFT) | bsf;
+
+ return (0);
+}
+
+#ifdef THUNDERX_PASS_1_1_ERRATA
+static struct resource *
+thunder_pcie_fdt_alloc_resource(device_t dev, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+
+ if ((int)ofw_bus_get_node(child) > 0)
+ return (pci_host_generic_alloc_resource(dev, child,
+ type, rid, start, end, count, flags));
+
+ return (thunder_pcie_alloc_resource(dev, child,
+ type, rid, start, end, count, flags));
+}
+#endif
diff --git a/sys/arm64/cavium/thunder_pcie_pem.c b/sys/arm64/cavium/thunder_pcie_pem.c
new file mode 100644
index 000000000000..9296280ad95f
--- /dev/null
+++ b/sys/arm64/cavium/thunder_pcie_pem.c
@@ -0,0 +1,921 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* PCIe external MAC root complex driver (PEM) for Cavium Thunder SOC */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/endian.h>
+
+#ifdef FDT
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_pci.h>
+#endif
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pci_host_generic.h>
+#include <dev/pci/pcib_private.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/smp.h>
+#include <machine/intr.h>
+
+#include <arm64/cavium/thunder_pcie_common.h>
+#include <arm64/cavium/thunder_pcie_pem.h>
+#include "pcib_if.h"
+
+#define THUNDER_PEM_DEVICE_ID 0xa020
+#define THUNDER_PEM_VENDOR_ID 0x177d
+
+/* ThunderX specific defines */
+#define THUNDER_PEMn_REG_BASE(unit) (0x87e0c0000000UL | ((unit) << 24))
+#define PCIERC_CFG002 0x08
+#define PCIERC_CFG006 0x18
+#define PCIERC_CFG032 0x80
+#define PCIERC_CFG006_SEC_BUS(reg) (((reg) >> 8) & 0xFF)
+#define PEM_CFG_RD_REG_ALIGN(reg) ((reg) & ~0x3)
+#define PEM_CFG_RD_REG_DATA(val) (((val) >> 32) & 0xFFFFFFFF)
+#define PEM_CFG_RD 0x30
+#define PEM_CFG_LINK_MASK 0x3
+#define PEM_CFG_LINK_RDY 0x3
+#define PEM_CFG_SLIX_TO_REG(slix) ((slix) << 4)
+#define SBNUM_OFFSET 0x8
+#define SBNUM_MASK 0xFF
+#define PEM_ON_REG 0x420
+#define PEM_CTL_STATUS 0x0
+#define PEM_LINK_ENABLE (1 << 4)
+#define PEM_LINK_DLLA (1 << 29)
+#define PEM_LINK_LT (1 << 27)
+#define PEM_BUS_SHIFT (24)
+#define PEM_SLOT_SHIFT (19)
+#define PEM_FUNC_SHIFT (16)
+#define SLIX_S2M_REGX_ACC 0x874001000000UL
+#define SLIX_S2M_REGX_ACC_SIZE 0x1000
+#define SLIX_S2M_REGX_ACC_SPACING 0x001000000000UL
+#define SLI_BASE 0x880000000000UL
+#define SLI_WINDOW_SPACING 0x004000000000UL
+#define SLI_PCI_OFFSET 0x001000000000UL
+#define SLI_NODE_SHIFT (44)
+#define SLI_NODE_MASK (3)
+#define SLI_GROUP_SHIFT (40)
+#define SLI_ID_SHIFT (24)
+#define SLI_ID_MASK (7)
+#define SLI_PEMS_PER_GROUP (3)
+#define SLI_GROUPS_PER_NODE (2)
+#define SLI_PEMS_PER_NODE (SLI_PEMS_PER_GROUP * SLI_GROUPS_PER_NODE)
+#define SLI_ACC_REG_CNT (256)
+
+/*
+ * Each PEM device creates its own bus with
+ * own address translation, so we can adjust bus addresses
+ * as we want. To support 32-bit cards let's assume
+ * PCI window assignment looks as following:
+ *
+ * 0x00000000 - 0x000FFFFF IO
+ * 0x00100000 - 0xFFFFFFFF Memory
+ */
+#define PCI_IO_BASE 0x00000000UL
+#define PCI_IO_SIZE 0x00100000UL
+#define PCI_MEMORY_BASE PCI_IO_SIZE
+#define PCI_MEMORY_SIZE 0xFFF00000UL
+
+#define RID_PEM_SPACE 1
+
+static int thunder_pem_activate_resource(device_t, device_t, int, int,
+ struct resource *);
+static int thunder_pem_adjust_resource(device_t, device_t, int,
+ struct resource *, rman_res_t, rman_res_t);
+static struct resource * thunder_pem_alloc_resource(device_t, device_t, int,
+ int *, rman_res_t, rman_res_t, rman_res_t, u_int);
+static int thunder_pem_alloc_msi(device_t, device_t, int, int, int *);
+static int thunder_pem_release_msi(device_t, device_t, int, int *);
+static int thunder_pem_alloc_msix(device_t, device_t, int *);
+static int thunder_pem_release_msix(device_t, device_t, int);
+static int thunder_pem_map_msi(device_t, device_t, int, uint64_t *, uint32_t *);
+static int thunder_pem_get_id(device_t, device_t, enum pci_id_type,
+ uintptr_t *);
+static int thunder_pem_attach(device_t);
+static int thunder_pem_deactivate_resource(device_t, device_t, int, int,
+ struct resource *);
+static bus_dma_tag_t thunder_pem_get_dma_tag(device_t, device_t);
+static int thunder_pem_detach(device_t);
+static uint64_t thunder_pem_config_reg_read(struct thunder_pem_softc *, int);
+static int thunder_pem_link_init(struct thunder_pem_softc *);
+static int thunder_pem_maxslots(device_t);
+static int thunder_pem_probe(device_t);
+static uint32_t thunder_pem_read_config(device_t, u_int, u_int, u_int, u_int,
+ int);
+static int thunder_pem_read_ivar(device_t, device_t, int, uintptr_t *);
+static void thunder_pem_release_all(device_t);
+static int thunder_pem_release_resource(device_t, device_t, int, int,
+ struct resource *);
+static struct rman * thunder_pem_rman(struct thunder_pem_softc *, int);
+static void thunder_pem_slix_s2m_regx_acc_modify(struct thunder_pem_softc *,
+ int, int);
+static void thunder_pem_write_config(device_t, u_int, u_int, u_int, u_int,
+ uint32_t, int);
+static int thunder_pem_write_ivar(device_t, device_t, int, uintptr_t);
+
+/* Global handlers for SLI interface */
+static bus_space_handle_t sli0_s2m_regx_base = 0;
+static bus_space_handle_t sli1_s2m_regx_base = 0;
+
+static device_method_t thunder_pem_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, thunder_pem_probe),
+ DEVMETHOD(device_attach, thunder_pem_attach),
+ DEVMETHOD(device_detach, thunder_pem_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, thunder_pem_read_ivar),
+ DEVMETHOD(bus_write_ivar, thunder_pem_write_ivar),
+ DEVMETHOD(bus_alloc_resource, thunder_pem_alloc_resource),
+ DEVMETHOD(bus_release_resource, thunder_pem_release_resource),
+ DEVMETHOD(bus_adjust_resource, thunder_pem_adjust_resource),
+ DEVMETHOD(bus_activate_resource, thunder_pem_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, thunder_pem_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
+ DEVMETHOD(bus_get_dma_tag, thunder_pem_get_dma_tag),
+
+ /* pcib interface */
+ DEVMETHOD(pcib_maxslots, thunder_pem_maxslots),
+ DEVMETHOD(pcib_read_config, thunder_pem_read_config),
+ DEVMETHOD(pcib_write_config, thunder_pem_write_config),
+ DEVMETHOD(pcib_alloc_msix, thunder_pem_alloc_msix),
+ DEVMETHOD(pcib_release_msix, thunder_pem_release_msix),
+ DEVMETHOD(pcib_alloc_msi, thunder_pem_alloc_msi),
+ DEVMETHOD(pcib_release_msi, thunder_pem_release_msi),
+ DEVMETHOD(pcib_map_msi, thunder_pem_map_msi),
+ DEVMETHOD(pcib_get_id, thunder_pem_get_id),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(pcib, thunder_pem_driver, thunder_pem_methods,
+ sizeof(struct thunder_pem_softc));
+
+static devclass_t thunder_pem_devclass;
+extern struct bus_space memmap_bus;
+
+DRIVER_MODULE(thunder_pem, pci, thunder_pem_driver, thunder_pem_devclass, 0, 0);
+MODULE_DEPEND(thunder_pem, pci, 1, 1, 1);
+
+static int
+thunder_pem_maxslots(device_t dev)
+{
+
+#if 0
+ /* max slots per bus acc. to standard */
+ return (PCI_SLOTMAX);
+#else
+ /*
+ * ARM64TODO Workaround - otherwise an em(4) interface appears to be
+ * present on every PCI function on the bus to which it is connected
+ */
+ return (0);
+#endif
+}
+
+static int
+thunder_pem_read_ivar(device_t dev, device_t child, int index,
+ uintptr_t *result)
+{
+ struct thunder_pem_softc *sc;
+ int secondary_bus = 0;
+
+ sc = device_get_softc(dev);
+
+ if (index == PCIB_IVAR_BUS) {
+ secondary_bus = thunder_pem_config_reg_read(sc, PCIERC_CFG006);
+ *result = PCIERC_CFG006_SEC_BUS(secondary_bus);
+ return (0);
+ }
+ if (index == PCIB_IVAR_DOMAIN) {
+ *result = sc->id;
+ return (0);
+ }
+
+ return (ENOENT);
+}
+
+static int
+thunder_pem_write_ivar(device_t dev, device_t child, int index,
+ uintptr_t value)
+{
+
+ return (ENOENT);
+}
+
+static int
+thunder_pem_activate_resource(device_t dev, device_t child, int type, int rid,
+ struct resource *r)
+{
+ int err;
+ bus_addr_t paddr;
+ bus_size_t psize;
+ bus_space_handle_t vaddr;
+ struct thunder_pem_softc *sc;
+
+ if ((err = rman_activate_resource(r)) != 0)
+ return (err);
+
+ sc = device_get_softc(dev);
+
+ /*
+ * If this is a memory resource, map it into the kernel.
+ */
+ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
+ paddr = (bus_addr_t)rman_get_start(r);
+ psize = (bus_size_t)rman_get_size(r);
+
+ paddr = range_addr_pci_to_phys(sc->ranges, paddr);
+
+ err = bus_space_map(&memmap_bus, paddr, psize, 0, &vaddr);
+ if (err != 0) {
+ rman_deactivate_resource(r);
+ return (err);
+ }
+ rman_set_bustag(r, &memmap_bus);
+ rman_set_virtual(r, (void *)vaddr);
+ rman_set_bushandle(r, vaddr);
+ }
+ return (0);
+}
+
+/*
+ * This function is an exact copy of nexus_deactivate_resource()
+ * Keep it up-to-date with all changes in nexus. To be removed
+ * once bus-mapping interface is developed.
+ */
+static int
+thunder_pem_deactivate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ bus_size_t psize;
+ bus_space_handle_t vaddr;
+
+ psize = (bus_size_t)rman_get_size(r);
+ vaddr = rman_get_bushandle(r);
+
+ if (vaddr != 0) {
+ bus_space_unmap(&memmap_bus, vaddr, psize);
+ rman_set_virtual(r, NULL);
+ rman_set_bushandle(r, 0);
+ }
+
+ return (rman_deactivate_resource(r));
+}
+
+static int
+thunder_pem_adjust_resource(device_t dev, device_t child, int type,
+ struct resource *res, rman_res_t start, rman_res_t end)
+{
+ struct thunder_pem_softc *sc;
+ struct rman *rm;
+
+ sc = device_get_softc(dev);
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ if (type == PCI_RES_BUS)
+ return (pci_domain_adjust_bus(sc->id, child, res, start, end));
+#endif
+
+ rm = thunder_pem_rman(sc, type);
+ if (rm == NULL)
+ return (bus_generic_adjust_resource(dev, child, type, res,
+ start, end));
+ if (!rman_is_region_manager(res, rm))
+ /*
+ * This means a child device has a memory or I/O
+ * resource not from you which shouldn't happen.
+ */
+ return (EINVAL);
+ return (rman_adjust_resource(res, start, end));
+}
+
+static bus_dma_tag_t
+thunder_pem_get_dma_tag(device_t dev, device_t child)
+{
+ struct thunder_pem_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (sc->dmat);
+}
+
+static int
+thunder_pem_alloc_msi(device_t pci, device_t child, int count, int maxcount,
+ int *irqs)
+{
+ device_t bus;
+
+ bus = device_get_parent(pci);
+ return (PCIB_ALLOC_MSI(device_get_parent(bus), child, count, maxcount,
+ irqs));
+}
+
+static int
+thunder_pem_release_msi(device_t pci, device_t child, int count, int *irqs)
+{
+ device_t bus;
+
+ bus = device_get_parent(pci);
+ return (PCIB_RELEASE_MSI(device_get_parent(bus), child, count, irqs));
+}
+
+static int
+thunder_pem_alloc_msix(device_t pci, device_t child, int *irq)
+{
+ device_t bus;
+
+ bus = device_get_parent(pci);
+ return (PCIB_ALLOC_MSIX(device_get_parent(bus), child, irq));
+}
+
+static int
+thunder_pem_release_msix(device_t pci, device_t child, int irq)
+{
+ device_t bus;
+
+ bus = device_get_parent(pci);
+ return (PCIB_RELEASE_MSIX(device_get_parent(bus), child, irq));
+}
+
+static int
+thunder_pem_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
+ uint32_t *data)
+{
+ device_t bus;
+
+ bus = device_get_parent(pci);
+ return (PCIB_MAP_MSI(device_get_parent(bus), child, irq, addr, data));
+}
+
+static int
+thunder_pem_get_id(device_t pci, device_t child, enum pci_id_type type,
+ uintptr_t *id)
+{
+ int bsf;
+ int pem;
+
+ if (type != PCI_ID_MSI)
+ return (pcib_get_id(pci, child, type, id));
+
+ bsf = pci_get_rid(child);
+
+ /* PEM (PCIe MAC/root complex) number is equal to domain */
+ pem = pci_get_domain(child);
+
+ /*
+ * Set appropriate device ID (passed by the HW along with
+ * the transaction to memory) for different root complex
+ * numbers using hard-coded domain portion for each group.
+ */
+ if (pem < 3)
+ *id = (0x1 << PCI_RID_DOMAIN_SHIFT) | bsf;
+ else if (pem < 6)
+ *id = (0x3 << PCI_RID_DOMAIN_SHIFT) | bsf;
+ else if (pem < 9)
+ *id = (0x9 << PCI_RID_DOMAIN_SHIFT) | bsf;
+ else if (pem < 12)
+ *id = (0xB << PCI_RID_DOMAIN_SHIFT) | bsf;
+ else
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+thunder_pem_identify(device_t dev)
+{
+ struct thunder_pem_softc *sc;
+ rman_res_t start;
+
+ sc = device_get_softc(dev);
+ start = rman_get_start(sc->reg);
+
+ /* Calculate PEM designations from its address */
+ sc->node = (start >> SLI_NODE_SHIFT) & SLI_NODE_MASK;
+ sc->id = ((start >> SLI_ID_SHIFT) & SLI_ID_MASK) +
+ (SLI_PEMS_PER_NODE * sc->node);
+ sc->sli = sc->id % SLI_PEMS_PER_GROUP;
+ sc->sli_group = (sc->id / SLI_PEMS_PER_GROUP) % SLI_GROUPS_PER_NODE;
+ sc->sli_window_base = SLI_BASE |
+ (((uint64_t)sc->node) << SLI_NODE_SHIFT) |
+ ((uint64_t)sc->sli_group << SLI_GROUP_SHIFT);
+ sc->sli_window_base += SLI_WINDOW_SPACING * sc->sli;
+
+ return (0);
+}
+
+static void
+thunder_pem_slix_s2m_regx_acc_modify(struct thunder_pem_softc *sc,
+ int sli_group, int slix)
+{
+ uint64_t regval;
+ bus_space_handle_t handle = 0;
+
+ KASSERT(slix >= 0 && slix <= SLI_ACC_REG_CNT, ("Invalid SLI index"));
+
+ if (sli_group == 0)
+ handle = sli0_s2m_regx_base;
+ else if (sli_group == 1)
+ handle = sli1_s2m_regx_base;
+ else
+ device_printf(sc->dev, "SLI group is not correct\n");
+
+ if (handle) {
+ /* Clear lower 32-bits of the SLIx register */
+ regval = bus_space_read_8(sc->reg_bst, handle,
+ PEM_CFG_SLIX_TO_REG(slix));
+ regval &= ~(0xFFFFFFFFUL);
+ bus_space_write_8(sc->reg_bst, handle,
+ PEM_CFG_SLIX_TO_REG(slix), regval);
+ }
+}
+
+static int
+thunder_pem_link_init(struct thunder_pem_softc *sc)
+{
+ uint64_t regval;
+
+ /* check whether PEM is safe to access. */
+ regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_ON_REG);
+ if ((regval & PEM_CFG_LINK_MASK) != PEM_CFG_LINK_RDY) {
+ device_printf(sc->dev, "PEM%d is not ON\n", sc->id);
+ return (ENXIO);
+ }
+
+ regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS);
+ regval |= PEM_LINK_ENABLE;
+ bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS, regval);
+
+ /* Wait 1ms as per Cavium specification */
+ DELAY(1000);
+
+ regval = thunder_pem_config_reg_read(sc, PCIERC_CFG032);
+
+ if (((regval & PEM_LINK_DLLA) == 0) || ((regval & PEM_LINK_LT) != 0)) {
+ device_printf(sc->dev, "PCIe RC: Port %d Link Timeout\n",
+ sc->id);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+thunder_pem_init(struct thunder_pem_softc *sc)
+{
+ int i, retval = 0;
+
+ retval = thunder_pem_link_init(sc);
+ if (retval) {
+ device_printf(sc->dev, "%s failed\n", __func__);
+ return retval;
+ }
+
+ /* To support 32-bit PCIe devices, set S2M_REGx_ACC[BA]=0x0 */
+ for (i = 0; i < SLI_ACC_REG_CNT; i++) {
+ thunder_pem_slix_s2m_regx_acc_modify(sc, sc->sli_group, i);
+ }
+
+ return (retval);
+}
+
+static uint64_t
+thunder_pem_config_reg_read(struct thunder_pem_softc *sc, int reg)
+{
+ uint64_t data;
+
+ /* Write to ADDR register */
+ bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD,
+ PEM_CFG_RD_REG_ALIGN(reg));
+ bus_space_barrier(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD, 8,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ /* Read from DATA register */
+ data = PEM_CFG_RD_REG_DATA(bus_space_read_8(sc->reg_bst, sc->reg_bsh,
+ PEM_CFG_RD));
+
+ return (data);
+}
+
+static uint32_t
+thunder_pem_read_config(device_t dev, u_int bus, u_int slot,
+ u_int func, u_int reg, int bytes)
+{
+ uint64_t offset;
+ uint32_t data;
+ struct thunder_pem_softc *sc;
+ bus_space_tag_t t;
+ bus_space_handle_t h;
+
+ if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) ||
+ (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX))
+ return (~0U);
+
+ sc = device_get_softc(dev);
+
+ /* Calculate offset */
+ offset = (bus << PEM_BUS_SHIFT) | (slot << PEM_SLOT_SHIFT) |
+ (func << PEM_FUNC_SHIFT);
+ t = sc->reg_bst;
+ h = sc->pem_sli_base;
+
+ bus_space_map(sc->reg_bst, sc->sli_window_base + offset,
+ PCIE_REGMAX, 0, &h);
+
+ switch (bytes) {
+ case 1:
+ data = bus_space_read_1(t, h, reg);
+ break;
+ case 2:
+ data = le16toh(bus_space_read_2(t, h, reg));
+ break;
+ case 4:
+ data = le32toh(bus_space_read_4(t, h, reg));
+ break;
+ default:
+ data = ~0U;
+ break;
+ }
+
+ bus_space_unmap(sc->reg_bst, h, PCIE_REGMAX);
+
+ return (data);
+}
+
+static void
+thunder_pem_write_config(device_t dev, u_int bus, u_int slot,
+ u_int func, u_int reg, uint32_t val, int bytes)
+{
+ uint64_t offset;
+ struct thunder_pem_softc *sc;
+ bus_space_tag_t t;
+ bus_space_handle_t h;
+
+ if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) ||
+ (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX))
+ return;
+
+ sc = device_get_softc(dev);
+
+ /* Calculate offset */
+ offset = (bus << PEM_BUS_SHIFT) | (slot << PEM_SLOT_SHIFT) |
+ (func << PEM_FUNC_SHIFT);
+ t = sc->reg_bst;
+ h = sc->pem_sli_base;
+
+ bus_space_map(sc->reg_bst, sc->sli_window_base + offset,
+ PCIE_REGMAX, 0, &h);
+
+ switch (bytes) {
+ case 1:
+ bus_space_write_1(t, h, reg, val);
+ break;
+ case 2:
+ bus_space_write_2(t, h, reg, htole16(val));
+ break;
+ case 4:
+ bus_space_write_4(t, h, reg, htole32(val));
+ break;
+ default:
+ break;
+ }
+
+ bus_space_unmap(sc->reg_bst, h, PCIE_REGMAX);
+}
+
+static struct resource *
+thunder_pem_alloc_resource(device_t dev, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+ struct thunder_pem_softc *sc = device_get_softc(dev);
+ struct rman *rm = NULL;
+ struct resource *res;
+ device_t parent_dev;
+
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ if (type == PCI_RES_BUS)
+ return (pci_domain_alloc_bus(sc->id, child, rid, start, end,
+ count, flags));
+#endif
+ rm = thunder_pem_rman(sc, type);
+ if (rm == NULL) {
+ /* Find parent device. On ThunderX we know an exact path. */
+ parent_dev = device_get_parent(device_get_parent(dev));
+ return (BUS_ALLOC_RESOURCE(parent_dev, dev, type, rid, start,
+ end, count, flags));
+ }
+
+ if (!RMAN_IS_DEFAULT_RANGE(start, end)) {
+ /*
+ * We might get PHYS addresses here inherited from EFI.
+ * Convert to PCI if necessary.
+ */
+ if (range_addr_is_phys(sc->ranges, start, count)) {
+ start = range_addr_phys_to_pci(sc->ranges, start);
+ end = start + count - 1;
+ }
+ }
+
+ if (bootverbose) {
+ device_printf(dev,
+ "thunder_pem_alloc_resource: start=%#lx, end=%#lx, count=%#lx\n",
+ start, end, count);
+ }
+
+ res = rman_reserve_resource(rm, start, end, count, flags, child);
+ if (res == NULL)
+ goto fail;
+
+ rman_set_rid(res, *rid);
+
+ if (flags & RF_ACTIVE)
+ if (bus_activate_resource(child, type, *rid, res)) {
+ rman_release_resource(res);
+ goto fail;
+ }
+
+ return (res);
+
+fail:
+ if (bootverbose) {
+ device_printf(dev, "%s FAIL: type=%d, rid=%d, "
+ "start=%016lx, end=%016lx, count=%016lx, flags=%x\n",
+ __func__, type, *rid, start, end, count, flags);
+ }
+
+ return (NULL);
+}
+
+static int
+thunder_pem_release_resource(device_t dev, device_t child, int type, int rid,
+ struct resource *res)
+{
+ device_t parent_dev;
+#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
+ struct thunder_pem_softc *sc = device_get_softc(dev);
+
+ if (type == PCI_RES_BUS)
+ return (pci_domain_release_bus(sc->id, child, rid, res));
+#endif
+ /* Find parent device. On ThunderX we know an exact path. */
+ parent_dev = device_get_parent(device_get_parent(dev));
+
+ if ((type != SYS_RES_MEMORY) && (type != SYS_RES_IOPORT))
+ return (BUS_RELEASE_RESOURCE(parent_dev, child,
+ type, rid, res));
+
+ return (rman_release_resource(res));
+}
+
+static struct rman *
+thunder_pem_rman(struct thunder_pem_softc *sc, int type)
+{
+
+ switch (type) {
+ case SYS_RES_IOPORT:
+ return (&sc->io_rman);
+ case SYS_RES_MEMORY:
+ return (&sc->mem_rman);
+ default:
+ break;
+ }
+
+ return (NULL);
+}
+
+static int
+thunder_pem_probe(device_t dev)
+{
+ uint16_t pci_vendor_id;
+ uint16_t pci_device_id;
+
+ pci_vendor_id = pci_get_vendor(dev);
+ pci_device_id = pci_get_device(dev);
+
+ if ((pci_vendor_id == THUNDER_PEM_VENDOR_ID) &&
+ (pci_device_id == THUNDER_PEM_DEVICE_ID)) {
+ device_set_desc_copy(dev, THUNDER_PEM_DESC);
+ return (0);
+ }
+
+ return (ENXIO);
+}
+
+static int
+thunder_pem_attach(device_t dev)
+{
+ devclass_t pci_class;
+ device_t parent;
+ struct thunder_pem_softc *sc;
+ int error;
+ int rid;
+ int tuple;
+ uint64_t base, size;
+ struct rman *rman;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ /* Allocate memory for resource */
+ pci_class = devclass_find("pci");
+ parent = device_get_parent(dev);
+ if (device_get_devclass(parent) == pci_class)
+ rid = PCIR_BAR(0);
+ else
+ rid = RID_PEM_SPACE;
+
+ sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+ if (sc->reg == NULL) {
+ device_printf(dev, "Failed to allocate resource\n");
+ return (ENXIO);
+ }
+ sc->reg_bst = rman_get_bustag(sc->reg);
+ sc->reg_bsh = rman_get_bushandle(sc->reg);
+
+ /* Create the parent DMA tag to pass down the coherent flag */
+ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE, /* maxsize */
+ BUS_SPACE_UNRESTRICTED, /* nsegments */
+ BUS_SPACE_MAXSIZE, /* maxsegsize */
+ BUS_DMA_COHERENT, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->dmat);
+ if (error != 0)
+ return (error);
+
+ /* Map SLI, do it only once */
+ if (!sli0_s2m_regx_base) {
+ bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC,
+ SLIX_S2M_REGX_ACC_SIZE, 0, &sli0_s2m_regx_base);
+ }
+ if (!sli1_s2m_regx_base) {
+ bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC +
+ SLIX_S2M_REGX_ACC_SPACING, SLIX_S2M_REGX_ACC_SIZE, 0,
+ &sli1_s2m_regx_base);
+ }
+
+ if ((sli0_s2m_regx_base == 0) || (sli1_s2m_regx_base == 0)) {
+ device_printf(dev,
+ "bus_space_map failed to map slix_s2m_regx_base\n");
+ goto fail;
+ }
+
+ /* Identify PEM */
+ if (thunder_pem_identify(dev) != 0)
+ goto fail;
+
+ /* Initialize rman and allocate regions */
+ sc->mem_rman.rm_type = RMAN_ARRAY;
+ sc->mem_rman.rm_descr = "PEM PCIe Memory";
+ error = rman_init(&sc->mem_rman);
+ if (error != 0) {
+ device_printf(dev, "memory rman_init() failed. error = %d\n",
+ error);
+ goto fail;
+ }
+ sc->io_rman.rm_type = RMAN_ARRAY;
+ sc->io_rman.rm_descr = "PEM PCIe IO";
+ error = rman_init(&sc->io_rman);
+ if (error != 0) {
+ device_printf(dev, "IO rman_init() failed. error = %d\n",
+ error);
+ goto fail_mem;
+ }
+
+ /*
+ * We ignore the values that may have been provided in FDT
+ * and configure ranges according to the below formula
+ * for all types of devices. This is because some DTBs provided
+ * by EFI do not have proper ranges property or don't have them
+ * at all.
+ */
+ /* Fill memory window */
+ sc->ranges[0].pci_base = PCI_MEMORY_BASE;
+ sc->ranges[0].size = PCI_MEMORY_SIZE;
+ sc->ranges[0].phys_base = sc->sli_window_base + SLI_PCI_OFFSET +
+ sc->ranges[0].pci_base;
+ sc->ranges[0].flags = SYS_RES_MEMORY;
+
+ /* Fill IO window */
+ sc->ranges[1].pci_base = PCI_IO_BASE;
+ sc->ranges[1].size = PCI_IO_SIZE;
+ sc->ranges[1].phys_base = sc->sli_window_base + SLI_PCI_OFFSET +
+ sc->ranges[1].pci_base;
+ sc->ranges[1].flags = SYS_RES_IOPORT;
+
+ for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
+ base = sc->ranges[tuple].pci_base;
+ size = sc->ranges[tuple].size;
+ if (size == 0)
+ continue; /* empty range element */
+
+ rman = thunder_pem_rman(sc, sc->ranges[tuple].flags);
+ if (rman != NULL)
+ error = rman_manage_region(rman, base,
+ base + size - 1);
+ else
+ error = EINVAL;
+ if (error) {
+ device_printf(dev,
+ "rman_manage_region() failed. error = %d\n", error);
+ rman_fini(&sc->mem_rman);
+ return (error);
+ }
+ if (bootverbose) {
+ device_printf(dev,
+ "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Flags:0x%jx\n",
+ sc->ranges[tuple].pci_base,
+ sc->ranges[tuple].phys_base,
+ sc->ranges[tuple].size,
+ sc->ranges[tuple].flags);
+ }
+ }
+
+ if (thunder_pem_init(sc)) {
+ device_printf(dev, "Failure during PEM init\n");
+ goto fail_io;
+ }
+
+ device_add_child(dev, "pci", -1);
+
+ return (bus_generic_attach(dev));
+
+fail_io:
+ rman_fini(&sc->io_rman);
+fail_mem:
+ rman_fini(&sc->mem_rman);
+fail:
+ bus_free_resource(dev, SYS_RES_MEMORY, sc->reg);
+ return (ENXIO);
+}
+
+static void
+thunder_pem_release_all(device_t dev)
+{
+ struct thunder_pem_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ rman_fini(&sc->io_rman);
+ rman_fini(&sc->mem_rman);
+
+ if (sc->reg != NULL)
+ bus_free_resource(dev, SYS_RES_MEMORY, sc->reg);
+}
+
+static int
+thunder_pem_detach(device_t dev)
+{
+
+ thunder_pem_release_all(dev);
+
+ return (0);
+}
diff --git a/sys/arm64/cavium/thunder_pcie_pem.h b/sys/arm64/cavium/thunder_pcie_pem.h
new file mode 100644
index 000000000000..2fc8df1b0d6a
--- /dev/null
+++ b/sys/arm64/cavium/thunder_pcie_pem.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Cavium Inc.
+ * All rights reserved.
+ *
+ * Developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __THUNDER_PCIE_PEM_H__
+#define __THUNDER_PCIE_PEM_H__
+
+#define THUNDER_PEM_DESC "ThunderX PEM"
+
+struct thunder_pem_softc {
+ device_t dev;
+ struct resource *reg;
+ bus_space_tag_t reg_bst;
+ bus_space_handle_t reg_bsh;
+ bus_dma_tag_t dmat;
+ struct pcie_range ranges[MAX_RANGES_TUPLES];
+ struct rman mem_rman;
+ struct rman io_rman;
+ bus_space_handle_t pem_sli_base;
+ uint32_t node;
+ uint32_t id;
+ uint32_t sli;
+ uint32_t sli_group;
+ uint64_t sli_window_base;
+};
+
+#endif
diff --git a/sys/arm64/cavium/thunder_pcie_pem_fdt.c b/sys/arm64/cavium/thunder_pcie_pem_fdt.c
new file mode 100644
index 000000000000..a605b3c56efd
--- /dev/null
+++ b/sys/arm64/cavium/thunder_pcie_pem_fdt.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2016 Cavium Inc.
+ * All rights reserved.
+ *
+ * Developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/kernel.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/cpuset.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcib_private.h>
+#include <dev/pci/pci_host_generic.h>
+
+#include <machine/intr.h>
+
+#include "thunder_pcie_common.h"
+#include "thunder_pcie_pem.h"
+
+#include "pcib_if.h"
+
+static int thunder_pem_fdt_probe(device_t);
+static int thunder_pem_fdt_alloc_msix(device_t, device_t, int *);
+static int thunder_pem_fdt_release_msix(device_t, device_t, int);
+static int thunder_pem_fdt_alloc_msi(device_t, device_t, int, int, int *);
+static int thunder_pem_fdt_release_msi(device_t, device_t, int, int *);
+static int thunder_pem_fdt_map_msi(device_t, device_t, int, uint64_t *,
+ uint32_t *);
+static int thunder_pem_fdt_get_id(device_t, device_t, enum pci_id_type,
+ uintptr_t *);
+
+static device_method_t thunder_pem_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, thunder_pem_fdt_probe),
+
+ /* pcib interface */
+ DEVMETHOD(pcib_alloc_msix, thunder_pem_fdt_alloc_msix),
+ DEVMETHOD(pcib_release_msix, thunder_pem_fdt_release_msix),
+ DEVMETHOD(pcib_alloc_msi, thunder_pem_fdt_alloc_msi),
+ DEVMETHOD(pcib_release_msi, thunder_pem_fdt_release_msi),
+ DEVMETHOD(pcib_map_msi, thunder_pem_fdt_map_msi),
+ DEVMETHOD(pcib_get_id, thunder_pem_fdt_get_id),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(pcib, thunder_pem_fdt_driver, thunder_pem_fdt_methods,
+ sizeof(struct thunder_pem_softc), thunder_pem_driver);
+
+static devclass_t thunder_pem_fdt_devclass;
+
+DRIVER_MODULE(thunder_pem, simplebus, thunder_pem_fdt_driver,
+ thunder_pem_fdt_devclass, 0, 0);
+DRIVER_MODULE(thunder_pem, ofwbus, thunder_pem_fdt_driver,
+ thunder_pem_fdt_devclass, 0, 0);
+
+static int
+thunder_pem_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_is_compatible(dev, "cavium,pci-host-thunder-pem")) {
+ device_set_desc(dev, THUNDER_PEM_DESC);
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+thunder_pem_fdt_alloc_msi(device_t pci, device_t child, int count, int maxcount,
+ int *irqs)
+{
+ phandle_t msi_parent;
+ int err;
+
+ err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (err != 0)
+ return (err);
+ return (intr_alloc_msi(pci, child, msi_parent, count, maxcount,
+ irqs));
+}
+
+static int
+thunder_pem_fdt_release_msi(device_t pci, device_t child, int count, int *irqs)
+{
+ phandle_t msi_parent;
+ int err;
+
+ err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (err != 0)
+ return (err);
+ return (intr_release_msi(pci, child, msi_parent, count, irqs));
+}
+
+static int
+thunder_pem_fdt_alloc_msix(device_t pci, device_t child, int *irq)
+{
+ phandle_t msi_parent;
+ int err;
+
+ err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (err != 0)
+ return (err);
+ return (intr_alloc_msix(pci, child, msi_parent, irq));
+}
+
+static int
+thunder_pem_fdt_release_msix(device_t pci, device_t child, int irq)
+{
+ phandle_t msi_parent;
+ int err;
+
+ err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (err != 0)
+ return (err);
+ return (intr_release_msix(pci, child, msi_parent, irq));
+}
+
+static int
+thunder_pem_fdt_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
+ uint32_t *data)
+{
+ phandle_t msi_parent;
+ int err;
+
+ err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (err != 0)
+ return (err);
+ return (intr_map_msi(pci, child, msi_parent, irq, addr, data));
+}
+
+static int
+thunder_pem_fdt_get_id(device_t dev, device_t child, enum pci_id_type type,
+ uintptr_t *id)
+{
+ phandle_t node;
+ int err;
+ uint32_t rid;
+ uint16_t pci_rid;
+
+ if (type != PCI_ID_MSI)
+ return (pcib_get_id(dev, child, type, id));
+
+ node = ofw_bus_get_node(dev);
+ pci_rid = pci_get_rid(child);
+
+ err = ofw_bus_msimap(node, pci_rid, NULL, &rid);
+ if (err != 0)
+ return (err);
+ *id = rid;
+
+ return (0);
+}
diff --git a/sys/arm64/cloudabi32/cloudabi32_sysvec.c b/sys/arm64/cloudabi32/cloudabi32_sysvec.c
new file mode 100644
index 000000000000..96d9432ff368
--- /dev/null
+++ b/sys/arm64/cloudabi32/cloudabi32_sysvec.c
@@ -0,0 +1,204 @@
+/*-
+ * Copyright (c) 2015-2017 Nuxi, https://nuxi.nl/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/imgact.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/sysent.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <machine/vmparam.h>
+
+#include <compat/cloudabi/cloudabi_util.h>
+
+#include <compat/cloudabi32/cloudabi32_syscall.h>
+#include <compat/cloudabi32/cloudabi32_util.h>
+
+extern const char *cloudabi32_syscallnames[];
+extern struct sysent cloudabi32_sysent[];
+
+static void
+cloudabi32_proc_setregs(struct thread *td, struct image_params *imgp,
+ uintptr_t stack)
+{
+ struct trapframe *regs;
+
+ regs = td->td_frame;
+ memset(regs, 0, sizeof(*regs));
+ regs->tf_x[0] =
+ stack + roundup(sizeof(cloudabi32_tcb_t), sizeof(register_t));
+ regs->tf_x[13] = STACKALIGN(stack);
+ regs->tf_elr = imgp->entry_addr;
+ regs->tf_spsr |= PSR_AARCH32;
+ (void)cpu_set_user_tls(td, TO_PTR(stack));
+}
+
+static int
+cloudabi32_fetch_syscall_args(struct thread *td)
+{
+ struct trapframe *frame;
+ struct syscall_args *sa;
+ int error;
+
+ frame = td->td_frame;
+ sa = &td->td_sa;
+
+ /* Obtain system call number. */
+ sa->code = frame->tf_x[0];
+ if (sa->code >= CLOUDABI32_SYS_MAXSYSCALL)
+ return (ENOSYS);
+ sa->callp = &cloudabi32_sysent[sa->code];
+ sa->narg = sa->callp->sy_narg;
+
+ /*
+ * Fetch system call arguments.
+ *
+ * The vDSO has already made sure that the arguments are
+ * eight-byte aligned. Pointers and size_t parameters are
+ * zero-extended. This makes it possible to copy in the
+ * arguments directly. As long as the call doesn't use 32-bit
+ * data structures, we can just invoke the same system call
+ * implementation used by 64-bit processes.
+ */
+ error = copyin((void *)frame->tf_x[2], sa->args,
+ sa->narg * sizeof(sa->args[0]));
+ if (error != 0)
+ return (error);
+
+ /* Default system call return values. */
+ td->td_retval[0] = 0;
+ td->td_retval[1] = 0;
+ return (0);
+}
+
+static void
+cloudabi32_set_syscall_retval(struct thread *td, int error)
+{
+ struct trapframe *frame = td->td_frame;
+
+ switch (error) {
+ case 0:
+ /*
+ * System call succeeded.
+ *
+ * Simply copy out the 64-bit return values into the
+ * same buffer provided for system call arguments. The
+ * vDSO will copy them to the right spot, truncating
+ * pointers and size_t values to 32 bits.
+ */
+ if (copyout(td->td_retval, (void *)frame->tf_x[2],
+ sizeof(td->td_retval)) == 0) {
+ frame->tf_x[0] = 0;
+ frame->tf_spsr &= ~PSR_C;
+ } else {
+ frame->tf_x[0] = CLOUDABI_EFAULT;
+ frame->tf_spsr |= PSR_C;
+ }
+ break;
+ case ERESTART:
+ /* Restart system call. */
+ frame->tf_elr -= 4;
+ break;
+ case EJUSTRETURN:
+ break;
+ default:
+ /* System call returned an error. */
+ frame->tf_x[0] = cloudabi_convert_errno(error);
+ frame->tf_spsr |= PSR_C;
+ break;
+ }
+}
+
+static void
+cloudabi32_schedtail(struct thread *td)
+{
+ struct trapframe *frame = td->td_frame;
+ register_t retval[2];
+
+ /* Return values for processes returning from fork. */
+ if ((td->td_pflags & TDP_FORKING) != 0) {
+ retval[0] = CLOUDABI_PROCESS_CHILD;
+ retval[1] = td->td_tid;
+ copyout(retval, (void *)frame->tf_x[2], sizeof(retval));
+ }
+ frame->tf_spsr |= PSR_AARCH32;
+}
+
+int
+cloudabi32_thread_setregs(struct thread *td,
+ const cloudabi32_threadattr_t *attr, uint32_t tcb)
+{
+ struct trapframe *frame;
+
+ /*
+ * Pass in the thread ID of the new thread and the argument
+ * pointer provided by the parent thread in as arguments to the
+ * entry point.
+ */
+ frame = td->td_frame;
+ memset(frame, 0, sizeof(*frame));
+ frame->tf_x[0] = td->td_tid;
+ frame->tf_x[1] = attr->argument;
+ frame->tf_x[13] = STACKALIGN(attr->stack + attr->stack_len);
+ frame->tf_elr = attr->entry_point;
+
+ /* Set up TLS. */
+ return (cpu_set_user_tls(td, TO_PTR(tcb)));
+}
+
+static struct sysentvec cloudabi32_elf_sysvec = {
+ .sv_size = CLOUDABI32_SYS_MAXSYSCALL,
+ .sv_table = cloudabi32_sysent,
+ .sv_fixup = cloudabi32_fixup,
+ .sv_name = "CloudABI ELF32",
+ .sv_coredump = elf32_coredump,
+ .sv_minuser = VM_MIN_ADDRESS,
+ .sv_maxuser = (uintmax_t)1 << 32,
+ .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE,
+ .sv_copyout_strings = cloudabi32_copyout_strings,
+ .sv_setregs = cloudabi32_proc_setregs,
+ .sv_flags = SV_ABI_CLOUDABI | SV_CAPSICUM | SV_ILP32,
+ .sv_set_syscall_retval = cloudabi32_set_syscall_retval,
+ .sv_fetch_syscall_args = cloudabi32_fetch_syscall_args,
+ .sv_syscallnames = cloudabi32_syscallnames,
+ .sv_schedtail = cloudabi32_schedtail,
+};
+
+INIT_SYSENTVEC(elf_sysvec, &cloudabi32_elf_sysvec);
+
+Elf32_Brandinfo cloudabi32_brand = {
+ .brand = ELFOSABI_CLOUDABI,
+ .machine = EM_ARM,
+ .sysvec = &cloudabi32_elf_sysvec,
+ .flags = BI_BRAND_ONLY_STATIC,
+};
diff --git a/sys/arm64/cloudabi64/cloudabi64_sysvec.c b/sys/arm64/cloudabi64/cloudabi64_sysvec.c
new file mode 100644
index 000000000000..a493774551cf
--- /dev/null
+++ b/sys/arm64/cloudabi64/cloudabi64_sysvec.c
@@ -0,0 +1,188 @@
+/*-
+ * Copyright (c) 2015 Nuxi, https://nuxi.nl/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/imgact.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/sysent.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <machine/vmparam.h>
+
+#include <compat/cloudabi/cloudabi_util.h>
+
+#include <compat/cloudabi64/cloudabi64_syscall.h>
+#include <compat/cloudabi64/cloudabi64_util.h>
+
+extern const char *cloudabi64_syscallnames[];
+extern struct sysent cloudabi64_sysent[];
+
+static void
+cloudabi64_proc_setregs(struct thread *td, struct image_params *imgp,
+ uintptr_t stack)
+{
+ struct trapframe *regs;
+
+ exec_setregs(td, imgp, stack);
+
+ /*
+ * The stack now contains a pointer to the TCB and the auxiliary
+ * vector. Let x0 point to the auxiliary vector, and set
+ * tpidr_el0 to the TCB.
+ */
+ regs = td->td_frame;
+ regs->tf_x[0] =
+ stack + roundup(sizeof(cloudabi64_tcb_t), sizeof(register_t));
+ (void)cpu_set_user_tls(td, TO_PTR(stack));
+}
+
+static int
+cloudabi64_fetch_syscall_args(struct thread *td)
+{
+ struct trapframe *frame;
+ struct syscall_args *sa;
+ int i;
+
+ frame = td->td_frame;
+ sa = &td->td_sa;
+
+ /* Obtain system call number. */
+ sa->code = frame->tf_x[8];
+ if (sa->code >= CLOUDABI64_SYS_MAXSYSCALL)
+ return (ENOSYS);
+ sa->callp = &cloudabi64_sysent[sa->code];
+ sa->narg = sa->callp->sy_narg;
+
+ /* Fetch system call arguments. */
+ for (i = 0; i < MAXARGS; i++)
+ sa->args[i] = frame->tf_x[i];
+
+ /* Default system call return values. */
+ td->td_retval[0] = 0;
+ td->td_retval[1] = frame->tf_x[1];
+ return (0);
+}
+
+static void
+cloudabi64_set_syscall_retval(struct thread *td, int error)
+{
+ struct trapframe *frame = td->td_frame;
+
+ switch (error) {
+ case 0:
+ /* System call succeeded. */
+ frame->tf_x[0] = td->td_retval[0];
+ frame->tf_x[1] = td->td_retval[1];
+ frame->tf_spsr &= ~PSR_C;
+ break;
+ case ERESTART:
+ /* Restart system call. */
+ frame->tf_elr -= 4;
+ break;
+ case EJUSTRETURN:
+ break;
+ default:
+ /* System call returned an error. */
+ frame->tf_x[0] = cloudabi_convert_errno(error);
+ frame->tf_spsr |= PSR_C;
+ break;
+ }
+}
+
+static void
+cloudabi64_schedtail(struct thread *td)
+{
+ struct trapframe *frame = td->td_frame;
+
+ /*
+ * Initial register values for processes returning from fork.
+ * Make sure that we only set these values when forking, not
+ * when creating a new thread.
+ */
+ if ((td->td_pflags & TDP_FORKING) != 0) {
+ frame->tf_x[0] = CLOUDABI_PROCESS_CHILD;
+ frame->tf_x[1] = td->td_tid;
+ }
+}
+
+int
+cloudabi64_thread_setregs(struct thread *td,
+ const cloudabi64_threadattr_t *attr, uint64_t tcb)
+{
+ struct trapframe *frame;
+ stack_t stack;
+
+ /* Perform standard register initialization. */
+ stack.ss_sp = TO_PTR(attr->stack);
+ stack.ss_size = attr->stack_len;
+ cpu_set_upcall(td, TO_PTR(attr->entry_point), NULL, &stack);
+
+ /*
+ * Pass in the thread ID of the new thread and the argument
+ * pointer provided by the parent thread in as arguments to the
+ * entry point.
+ */
+ frame = td->td_frame;
+ frame->tf_x[0] = td->td_tid;
+ frame->tf_x[1] = attr->argument;
+
+ /* Set up TLS. */
+ return (cpu_set_user_tls(td, TO_PTR(tcb)));
+}
+
+static struct sysentvec cloudabi64_elf_sysvec = {
+ .sv_size = CLOUDABI64_SYS_MAXSYSCALL,
+ .sv_table = cloudabi64_sysent,
+ .sv_fixup = cloudabi64_fixup,
+ .sv_name = "CloudABI ELF64",
+ .sv_coredump = elf64_coredump,
+ .sv_minuser = VM_MIN_ADDRESS,
+ .sv_maxuser = VM_MAXUSER_ADDRESS,
+ .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE,
+ .sv_copyout_strings = cloudabi64_copyout_strings,
+ .sv_setregs = cloudabi64_proc_setregs,
+ .sv_flags = SV_ABI_CLOUDABI | SV_CAPSICUM | SV_LP64,
+ .sv_set_syscall_retval = cloudabi64_set_syscall_retval,
+ .sv_fetch_syscall_args = cloudabi64_fetch_syscall_args,
+ .sv_syscallnames = cloudabi64_syscallnames,
+ .sv_schedtail = cloudabi64_schedtail,
+};
+
+INIT_SYSENTVEC(elf_sysvec, &cloudabi64_elf_sysvec);
+
+Elf64_Brandinfo cloudabi64_brand = {
+ .brand = ELFOSABI_CLOUDABI,
+ .machine = EM_AARCH64,
+ .sysvec = &cloudabi64_elf_sysvec,
+ .flags = BI_CAN_EXEC_DYN | BI_BRAND_ONLY_STATIC,
+};
diff --git a/sys/arm64/conf/DEFAULTS b/sys/arm64/conf/DEFAULTS
new file mode 100644
index 000000000000..ffc08435380f
--- /dev/null
+++ b/sys/arm64/conf/DEFAULTS
@@ -0,0 +1,16 @@
+#
+# DEFAULTS -- Default kernel configuration file for FreeBSD/arm64
+#
+# $FreeBSD$
+
+machine arm64 aarch64
+
+# Pseudo devices.
+device mem # Memory and kernel memory devices
+
+# Default partitioning schemes
+options GEOM_PART_BSD
+options GEOM_PART_MBR
+options GEOM_PART_GPT
+
+options NEW_PCIB
diff --git a/sys/arm64/conf/GENERIC b/sys/arm64/conf/GENERIC
new file mode 100644
index 000000000000..7cf5831a39fa
--- /dev/null
+++ b/sys/arm64/conf/GENERIC
@@ -0,0 +1,364 @@
+#
+# GENERIC -- Generic kernel configuration file for FreeBSD/arm64
+#
+# For more information on this file, please read the config(5) manual page,
+# and/or the handbook section on Kernel Configuration Files:
+#
+# https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html
+#
+# The handbook is also available locally in /usr/share/doc/handbook
+# if you've installed the doc distribution, otherwise always see the
+# FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the
+# latest information.
+#
+# An exhaustive list of options and more detailed explanations of the
+# device lines is also present in the ../../conf/NOTES and NOTES files.
+# If you are in doubt as to the purpose or necessity of a line, check first
+# in NOTES.
+#
+# $FreeBSD$
+
+cpu ARM64
+ident GENERIC
+
+makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols
+makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support
+
+options SCHED_ULE # ULE scheduler
+options NUMA # Non-Uniform Memory Architecture support
+options PREEMPTION # Enable kernel thread preemption
+options VIMAGE # Subsystem virtualization, e.g. VNET
+options INET # InterNETworking
+options INET6 # IPv6 communications protocols
+options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5
+options TCP_HHOOK # hhook(9) framework for TCP
+options TCP_OFFLOAD # TCP offload
+options TCP_RFC7413 # TCP Fast Open
+options SCTP_SUPPORT # Allow kldload of SCTP
+options FFS # Berkeley Fast Filesystem
+options SOFTUPDATES # Enable FFS soft updates support
+options UFS_ACL # Support for access control lists
+options UFS_DIRHASH # Improve performance on big directories
+options UFS_GJOURNAL # Enable gjournal-based UFS journaling
+options QUOTA # Enable disk quotas for UFS
+options MD_ROOT # MD is a potential root device
+options NFSCL # Network Filesystem Client
+options NFSD # Network Filesystem Server
+options NFSLOCKD # Network Lock Manager
+options NFS_ROOT # NFS usable as /, requires NFSCL
+options MSDOSFS # MSDOS Filesystem
+options CD9660 # ISO 9660 Filesystem
+options PROCFS # Process filesystem (requires PSEUDOFS)
+options PSEUDOFS # Pseudo-filesystem framework
+options TMPFS # Efficient memory filesystem
+options GEOM_RAID # Soft RAID functionality.
+options GEOM_LABEL # Provides labelization
+options COMPAT_FREEBSD32 # Compatible with FreeBSD/arm
+options COMPAT_FREEBSD11 # Compatible with FreeBSD11
+options COMPAT_FREEBSD12 # Compatible with FreeBSD12
+options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI
+options KTRACE # ktrace(1) support
+options STACK # stack(9) support
+options SYSVSHM # SYSV-style shared memory
+options SYSVMSG # SYSV-style message queues
+options SYSVSEM # SYSV-style semaphores
+options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions
+options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed.
+options KBD_INSTALL_CDEV # install a CDEV entry in /dev
+options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4)
+options AUDIT # Security event auditing
+options CAPABILITY_MODE # Capsicum capability mode
+options CAPABILITIES # Capsicum capabilities
+options MAC # TrustedBSD MAC Framework
+options KDTRACE_FRAME # Ensure frames are compiled in
+options KDTRACE_HOOKS # Kernel DTrace hooks
+options DDB_CTF # Kernel ELF linker loads CTF data
+options VFP # Floating-point support
+options RACCT # Resource accounting framework
+options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default
+options RCTL # Resource limits
+options SMP
+options INTRNG
+
+# Debugging support. Always need this:
+options KDB # Enable kernel debugger support.
+options KDB_TRACE # Print a stack trace for a panic.
+# For full debugger support use (turn off in stable branch):
+options DDB # Support DDB.
+#options GDB # Support remote GDB.
+options DEADLKRES # Enable the deadlock resolver
+options INVARIANTS # Enable calls of extra sanity checking
+options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS
+options WITNESS # Enable checks to detect deadlocks and cycles
+options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed
+options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones
+options ALT_BREAK_TO_DEBUGGER # Enter debugger on keyboard escape sequence
+options USB_DEBUG # enable debug msgs
+options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default
+
+# Kernel Sanitizers
+#options COVERAGE # Generic kernel coverage. Used by KCOV
+#options KCOV # Kernel Coverage Sanitizer
+# Warning: KUBSAN can result in a kernel too large for loader to load
+#options KUBSAN # Kernel Undefined Behavior Sanitizer
+#options KCSAN # Kernel Concurrency Sanitizer
+
+# Kernel dump features.
+options EKCD # Support for encrypted kernel dumps
+options GZIO # gzip-compressed kernel and user dumps
+options ZSTDIO # zstd-compressed kernel and user dumps
+options DEBUGNET # debugnet networking
+options NETDUMP # netdump(4) client support
+
+# SoC support
+options SOC_ALLWINNER_A64
+options SOC_ALLWINNER_H5
+options SOC_ALLWINNER_H6
+options SOC_CAVM_THUNDERX
+options SOC_FREESCALE_IMX8
+options SOC_HISI_HI6220
+options SOC_INTEL_STRATIX10
+options SOC_BRCM_BCM2837
+options SOC_BRCM_BCM2838
+options SOC_MARVELL_8K
+options SOC_NXP_LS
+options SOC_ROCKCHIP_RK3328
+options SOC_ROCKCHIP_RK3399
+options SOC_XILINX_ZYNQ
+
+# Timer drivers
+device a10_timer
+
+# Annapurna Alpine drivers
+device al_ccu # Alpine Cache Coherency Unit
+device al_nb_service # Alpine North Bridge Service
+device al_iofic # I/O Fabric Interrupt Controller
+device al_serdes # Serializer/Deserializer
+device al_udma # Universal DMA
+
+# Qualcomm Snapdragon drivers
+device qcom_gcc # Global Clock Controller
+
+# VirtIO support
+device virtio
+device virtio_pci
+device virtio_mmio
+device virtio_blk
+device vtnet
+
+# CPU frequency control
+device cpufreq
+
+# Bus drivers
+device pci
+device pci_n1sdp # ARM Neoverse N1 SDP PCI
+device al_pci # Annapurna Alpine PCI-E
+options PCI_HP # PCI-Express native HotPlug
+options PCI_IOV # PCI SR-IOV support
+
+# PCI/PCI-X/PCIe Ethernet NICs that use iflib infrastructure
+device iflib
+device em # Intel PRO/1000 Gigabit Ethernet Family
+device ix # Intel 10Gb Ethernet Family
+
+# Ethernet NICs
+device mdio
+device mii
+device miibus # MII bus support
+device awg # Allwinner EMAC Gigabit Ethernet
+device axgbe # AMD Opteron A1100 integrated NIC
+device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet
+device neta # Marvell Armada 370/38x/XP/3700 NIC
+device smc # SMSC LAN91C111
+device vnic # Cavium ThunderX NIC
+device al_eth # Annapurna Alpine Ethernet NIC
+device dwc_rk # Rockchip Designware
+device dwc_socfpga # Altera SOCFPGA Ethernet MAC
+device genet # Broadcom on RPi4
+device ffec # iMX FFEC
+
+# Etherswitch devices
+device etherswitch # Enable etherswitch support
+device miiproxy # Required for etherswitch
+device e6000sw # Marvell mv88e6085 based switches
+
+# Block devices
+device ahci
+device scbus
+device da
+
+# ATA/SCSI peripherals
+device pass # Passthrough device (direct ATA/SCSI access)
+
+# NVM Express (NVMe) support
+device nvme # base NVMe driver
+options NVME_USE_NVD=0 # prefer the cam(4) based nda(4) driver
+device nvd # expose NVMe namespaces as disks, depends on nvme
+
+# MMC/SD/SDIO Card slot support
+device sdhci
+device sdhci_xenon # Marvell Xenon SD/MMC controller
+device aw_mmc # Allwinner SD/MMC controller
+device mmc # mmc/sd bus
+device mmcsd # mmc/sd flash cards
+device dwmmc
+device dwmmc_altera
+device dwmmc_hisi
+device rk_dwmmc
+device rk_emmcphy
+
+# Serial (COM) ports
+device uart # Generic UART driver
+device uart_imx # iMX8 UART
+device uart_msm # Qualcomm MSM UART driver
+device uart_mu # RPI3 aux port
+device uart_mvebu # Armada 3700 UART driver
+device uart_ns8250 # ns8250-type UART driver
+device uart_snps
+device pl011
+
+# USB support
+device aw_usbphy # Allwinner USB PHY
+device rk_usb2phy # Rockchip USB2PHY
+device rk_typec_phy # Rockchip TypeC PHY
+device dwcotg # DWC OTG controller
+device ohci # OHCI USB interface
+device ehci # EHCI USB interface (USB 2.0)
+device ehci_mv # Marvell EHCI USB interface
+device xhci # XHCI USB interface (USB 3.0)
+device dwc3 # Synopsys DWC controller
+device aw_dwc3 # Allwinner DWC3 controller
+device rk_dwc3 # Rockchip DWC3 controller
+device usb # USB Bus (required)
+device ukbd # Keyboard
+device umass # Disks/Mass storage - Requires scbus and da
+
+# USB ethernet support
+device muge
+device smcphy
+device smsc
+
+# Sound support
+device sound
+device a10_codec
+
+# DMA controller
+device a31_dmac
+
+# GPIO / PINCTRL
+device a37x0_gpio # Marvell Armada 37x0 GPIO controller
+device aw_gpio # Allwinner GPIO controller
+device dwgpio # Synopsys DesignWare APB GPIO Controller
+device gpio
+device gpioled
+device fdt_pinctrl
+device gpioregulator
+device ls1046_gpio # LS1046A GPIO controller
+device mv_gpio # Marvell GPIO controller
+device mvebu_pinctrl # Marvell Pinmux Controller
+device rk_gpio # RockChip GPIO Controller
+device rk_pinctrl # RockChip Pinmux Controller
+
+# I2C
+device a37x0_iic # Armada 37x0 I2C controller
+device aw_rsb # Allwinner Reduced Serial Bus
+device bcm2835_bsc # Broadcom BCM283x I2C bus
+device iicbus
+device iic
+device twsi # Allwinner I2C controller
+device rk_i2c # RockChip I2C controller
+device syr827 # Silergy SYR827 PMIC
+device sy8106a # SY8106A Buck Regulator
+device vf_i2c # Freescale Vybrid I2C controller
+device fsliic # Freescale iMX I2C controller
+
+# Clock and reset controllers
+device aw_ccu # Allwinner clock controller
+
+# Interrupt controllers
+device aw_nmi # Allwinner NMI support
+device mv_cp110_icu # Marvell CP110 ICU
+device mv_ap806_gicp # Marvell AP806 GICP
+device mv_ap806_sei # Marvell AP806 SEI
+
+# Real-time clock support
+device aw_rtc # Allwinner Real-time Clock
+device mv_rtc # Marvell Real-time Clock
+
+# Crypto accelerators
+device safexcel # Inside Secure EIP-97
+
+# Watchdog controllers
+device aw_wdog # Allwinner Watchdog
+
+# Power management controllers
+device axp81x # X-Powers AXP81x PMIC
+device rk805 # RockChip RK805 PMIC
+
+# EFUSE
+device aw_sid # Allwinner Secure ID EFUSE
+
+# Thermal sensors
+device aw_thermal # Allwinner Thermal Sensor Controller
+device mv_thermal # Marvell Thermal Sensor Controller
+
+# SPI
+device spibus
+device a37x0_spi # Marvell Armada 37x0 SPI Controller
+device bcm2835_spi # Broadcom BCM283x SPI bus
+device rk_spi # RockChip SPI controller
+
+# PWM
+device pwm
+device aw_pwm
+device rk_pwm
+
+# Console
+device vt
+device kbdmux
+
+device vt_efifb
+
+# EVDEV support
+device evdev # input event device support
+options EVDEV_SUPPORT # evdev support in legacy drivers
+device uinput # install /dev/uinput cdev
+device aw_cir
+
+# Pseudo devices.
+device crypto # core crypto support
+device loop # Network loopback
+device ether # Ethernet support
+device vlan # 802.1Q VLAN support
+device tuntap # Packet tunnel.
+device md # Memory "disks"
+device gif # IPv6 and IPv4 tunneling
+device firmware # firmware assist module
+options EFIRT # EFI Runtime Services
+
+# EXT_RESOURCES pseudo devices
+options EXT_RESOURCES
+device clk
+device phy
+device hwreset
+device nvmem
+device regulator
+device syscon
+device aw_syscon
+
+# IO Domains
+device rk_iodomain
+
+# The `bpf' device enables the Berkeley Packet Filter.
+# Be aware of the administrative consequences of enabling this!
+# Note that 'bpf' is required for DHCP.
+device bpf # Berkeley packet filter
+
+# Chip-specific errata
+options THUNDERX_PASS_1_1_ERRATA
+
+options FDT
+device acpi
+
+# DTBs
+makeoptions MODULES_EXTRA="dtb/allwinner dtb/imx8 dtb/mv dtb/rockchip dtb/rpi"
diff --git a/sys/arm64/conf/GENERIC-MMCCAM b/sys/arm64/conf/GENERIC-MMCCAM
new file mode 100644
index 000000000000..ab45fcb8168d
--- /dev/null
+++ b/sys/arm64/conf/GENERIC-MMCCAM
@@ -0,0 +1,23 @@
+#
+# GENERIC-MMCCAM
+#
+# Custom kernel for arm64 plus MMCCAM as opposed to the prior MMC stack. It is
+# present to keep it building in tree since it wouldn't work in LINT.
+#
+# $FreeBSD$
+
+#NO_UNIVERSE
+
+include GENERIC
+ident GENERIC-MMCCAM
+
+# Add CAMDEBUG stuff
+options CAMDEBUG
+options CAM_DEBUG_FLAGS=(CAM_DEBUG_INFO|CAM_DEBUG_PROBE|CAM_DEBUG_PERIPH)
+
+# pass(4) device
+device pass
+options MMCCAM
+
+nodevice mmc
+nodevice mmcsd
diff --git a/sys/arm64/conf/GENERIC-NODEBUG b/sys/arm64/conf/GENERIC-NODEBUG
new file mode 100644
index 000000000000..ccd519734180
--- /dev/null
+++ b/sys/arm64/conf/GENERIC-NODEBUG
@@ -0,0 +1,41 @@
+#
+# GENERIC-NODEBUG -- WITNESS and INVARIANTS free kernel configuration file
+# for FreeBSD/arm64
+#
+# This configuration file removes several debugging options, including
+# WITNESS and INVARIANTS checking, which are known to have significant
+# performance impact on running systems. When benchmarking new features
+# this kernel should be used instead of the standard GENERIC.
+# This kernel configuration should never appear outside of the HEAD
+# of the FreeBSD tree.
+#
+# For more information on this file, please read the config(5) manual page,
+# and/or the handbook section on Kernel Configuration Files:
+#
+# https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html
+#
+# The handbook is also available locally in /usr/share/doc/handbook
+# if you've installed the doc distribution, otherwise always see the
+# FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the
+# latest information.
+#
+# An exhaustive list of options and more detailed explanations of the
+# device lines is also present in the ../../conf/NOTES and NOTES files.
+# If you are in doubt as to the purpose or necessity of a line, check first
+# in NOTES.
+#
+# $FreeBSD$
+
+include GENERIC
+
+ident GENERIC-NODEBUG
+
+nooptions INVARIANTS
+nooptions INVARIANT_SUPPORT
+nooptions WITNESS
+nooptions WITNESS_SKIPSPIN
+nooptions DEADLKRES
+nooptions USB_DEBUG
+nooptions COVERAGE
+nooptions KCOV
+nooptions MALLOC_DEBUG_MAXZONES
diff --git a/sys/arm64/conf/GENERIC-UP b/sys/arm64/conf/GENERIC-UP
new file mode 100644
index 000000000000..f6c4e6edeca9
--- /dev/null
+++ b/sys/arm64/conf/GENERIC-UP
@@ -0,0 +1,23 @@
+#
+# GENERIC -- Generic kernel configuration file for FreeBSD/arm64 with SMP disabled
+#
+# For more information on this file, please read the config(5) manual page,
+# and/or the handbook section on Kernel Configuration Files:
+#
+# https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html
+#
+# The handbook is also available locally in /usr/share/doc/handbook
+# if you've installed the doc distribution, otherwise always see the
+# FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the
+# latest information.
+#
+# An exhaustive list of options and more detailed explanations of the
+# device lines is also present in the ../../conf/NOTES and NOTES files.
+# If you are in doubt as to the purpose or necessity of a line, check first
+# in NOTES.
+#
+# $FreeBSD$
+
+include GENERIC
+ident GENERIC-UP
+nooptions SMP
diff --git a/sys/arm64/conf/Makefile b/sys/arm64/conf/Makefile
new file mode 100644
index 000000000000..5b01233d605c
--- /dev/null
+++ b/sys/arm64/conf/Makefile
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+TARGET=arm64
+
+.include "${.CURDIR}/../../conf/makeLINT.mk"
diff --git a/sys/arm64/conf/NOTES b/sys/arm64/conf/NOTES
new file mode 100644
index 000000000000..7d42cd2f08bb
--- /dev/null
+++ b/sys/arm64/conf/NOTES
@@ -0,0 +1,238 @@
+#
+# NOTES -- Lines that can be cut/pasted into kernel and hints configs.
+#
+# This file contains machine dependent kernel configuration notes. For
+# machine independent notes, look in /sys/conf/NOTES.
+#
+# $FreeBSD$
+#
+
+#
+# We want LINT to cover profiling as well.
+# Except it's broken.
+#profile 2
+
+#
+# Enable the kernel DTrace hooks which are required to load the DTrace
+# kernel modules.
+#
+options KDTRACE_HOOKS
+
+#
+# Most of the following is copied from ARM64 GENERIC.
+cpu ARM64
+
+makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols
+makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support
+
+options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed.
+options KDTRACE_FRAME # Ensure frames are compiled in
+options VFP # Floating-point support
+options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default
+options INTRNG
+
+nooptions GDB # Support remote GDB -- not supported
+
+# SoC support
+options SOC_ALLWINNER_A64
+options SOC_ALLWINNER_H5
+options SOC_CAVM_THUNDERX
+options SOC_HISI_HI6220
+options SOC_BRCM_BCM2837
+options SOC_BRCM_BCM2838
+options SOC_MARVELL_8K
+options SOC_ROCKCHIP_RK3328
+options SOC_ROCKCHIP_RK3399
+options SOC_XILINX_ZYNQ
+
+# Timer drivers
+device a10_timer
+
+# Annapurna Alpine drivers
+device al_ccu # Alpine Cache Coherency Unit
+device al_nb_service # Alpine North Bridge Service
+device al_iofic # I/O Fabric Interrupt Controller
+device al_serdes # Serializer/Deserializer
+device al_udma # Universal DMA
+
+# Qualcomm Snapdragon drivers
+device qcom_gcc # Global Clock Controller
+
+# VirtIO support
+device virtio
+device virtio_pci
+device virtio_mmio
+device virtio_blk
+device vtnet
+
+# CPU frequency control
+device cpufreq
+
+# Bus drivers
+device al_pci # Annapurna Alpine PCI-E
+options PCI_HP # PCI-Express native HotPlug
+options PCI_IOV # PCI SR-IOV support
+
+# Ethernet NICs
+device mdio
+device awg # Allwinner EMAC Gigabit Ethernet
+device axgbe # AMD Opteron A1100 integrated NIC
+device neta # Marvell Armada 370/38x/XP/3700 NIC
+device smc # SMSC LAN91C111
+device vnic # Cavium ThunderX NIC
+device al_eth # Annapurna Alpine Ethernet NIC
+device dwc_rk # Rockchip Designware
+device dwc_socfpga # Altera SOCFPGA Ethernet MAC
+device ice # Intel 800 Series Physical Function
+device ice_ddp # Intel 800 Series DDP Package
+
+# Etherswitch devices
+device e6000sw # Marvell mv88e6085 based switches
+
+# NVM Express (NVMe) support
+device nvme # base NVMe driver
+options NVME_USE_NVD=0 # prefer the cam(4) based nda(4) driver
+device nvd # expose NVMe namespaces as disks, depends on nvme
+
+# MMC/SD/SDIO Card slot support
+device sdhci_xenon # Marvell Xenon SD/MMC controller
+device aw_mmc # Allwinner SD/MMC controller
+device dwmmc
+device dwmmc_altera
+device rk_emmcphy
+
+# Serial (COM) ports
+device uart_msm # Qualcomm MSM UART driver
+device uart_mu # RPI3 aux port
+device uart_mvebu # Armada 3700 UART driver
+device uart_ns8250 # ns8250-type UART driver
+device uart_snps
+device pl011
+
+# USB support
+device aw_usbphy # Allwinner USB PHY
+device dwcotg # DWC OTG controller
+device ehci_mv # Marvell EHCI USB interface
+
+# USB ethernet support
+device muge
+device smsc
+
+# Sound support
+device a10_codec
+
+# DMA controller
+device a31_dmac
+
+# GPIO / PINCTRL
+device a37x0_gpio # Marvell Armada 37x0 GPIO controller
+device aw_gpio # Allwinner GPIO controller
+device fdt_pinctrl
+device mv_gpio # Marvell GPIO controller
+device mvebu_pinctrl # Marvell Pinmux Controller
+device rk_gpio # RockChip GPIO Controller
+device rk_pinctrl # RockChip Pinmux Controller
+
+# I2C
+device aw_rsb # Allwinner Reduced Serial Bus
+device bcm2835_bsc # Broadcom BCM283x I2C bus
+device twsi # Allwinner I2C controller
+device rk_i2c # RockChip I2C controller
+
+# Clock and reset controllers
+device aw_ccu # Allwinner clock controller
+
+# Interrupt controllers
+device aw_nmi # Allwinner NMI support
+device mv_cp110_icu # Marvell CP110 ICU
+device mv_ap806_gicp # Marvell AP806 GICP
+
+# Real-time clock support
+device aw_rtc # Allwinner Real-time Clock
+device mv_rtc # Marvell Real-time Clock
+
+# Watchdog controllers
+device aw_wdog # Allwinner Watchdog
+
+# Power management controllers
+device axp81x # X-Powers AXP81x PMIC
+device rk805 # RockChip RK805 PMIC
+
+# EFUSE
+device aw_sid # Allwinner Secure ID EFUSE
+
+# Thermal sensors
+device aw_thermal # Allwinner Thermal Sensor Controller
+device mv_thermal # Marvell Thermal Sensor Controller
+
+# SPI
+device bcm2835_spi # Broadcom BCM283x SPI bus
+
+# PWM
+device pwm
+device aw_pwm
+
+device vt_efifb
+
+# EVDEV support
+options EVDEV_SUPPORT # evdev support in legacy drivers
+device aw_cir
+
+# Pseudo devices.
+options EFIRT # EFI Runtime Services
+
+# EXT_RESOURCES pseudo devices
+options EXT_RESOURCES
+device clk
+device phy
+device hwreset
+device nvmem
+device regulator
+device syscon
+device aw_syscon
+
+# Misc devices.
+device pl330 # ARM PL330 dma controller
+device xdma # xDMA framework for SoC on-chip dma controllers
+
+# Chip-specific errata
+options THUNDERX_PASS_1_1_ERRATA
+
+options FDT
+device acpi
+
+# DTBs
+makeoptions MODULES_EXTRA="dtb/allwinner dtb/rockchip dtb/rpi"
+
+# Add CAMDEBUG stuff
+options CAMDEBUG
+options CAM_DEBUG_FLAGS=(CAM_DEBUG_INFO|CAM_DEBUG_PROBE|CAM_DEBUG_PERIPH)
+
+# bring in camified MMC too
+options MMCCAM
+
+# arm64 doesn't support inb/outb, so disable chipset probing which needs it
+nooptions PPC_PROBE_CHIPSET
+
+# These cause weird issues, not sure why
+nooptions DEBUG
+
+# Makes assumptions about bus tags that aren't true on arm64
+nodevice snd_cmi
+
+# arm64 didn't exist for these releases, so doesn't have the required compat
+# support. Just disable them because they are meaningless.
+nooptions COMPAT_FREEBSD4
+nooptions COMPAT_FREEBSD5
+nooptions COMPAT_FREEBSD6
+nooptions COMPAT_FREEBSD7
+nooptions COMPAT_FREEBSD9
+nooptions COMPAT_FREEBSD10
+
+# arm64 supports 32-bit FreeBSD/arm binaries (armv[67] ABIs)
+options COMPAT_FREEBSD32 # Compatible with FreeBSD/arm
+
+#####################################################################
+# ZFS support
+
+options ZFS
diff --git a/sys/arm64/coresight/coresight.c b/sys/arm64/coresight/coresight.c
new file mode 100644
index 000000000000..38a47c0c3b0f
--- /dev/null
+++ b/sys/arm64/coresight/coresight.c
@@ -0,0 +1,126 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <machine/bus.h>
+
+#include <arm64/coresight/coresight.h>
+
+static struct mtx cs_mtx;
+struct coresight_device_list cs_devs;
+
+int
+coresight_register(struct coresight_desc *desc)
+{
+ struct coresight_device *cs_dev;
+
+ cs_dev = malloc(sizeof(struct coresight_device),
+ M_CORESIGHT, M_WAITOK | M_ZERO);
+ cs_dev->dev = desc->dev;
+ cs_dev->pdata = desc->pdata;
+ cs_dev->dev_type = desc->dev_type;
+
+ mtx_lock(&cs_mtx);
+ TAILQ_INSERT_TAIL(&cs_devs, cs_dev, link);
+ mtx_unlock(&cs_mtx);
+
+ return (0);
+}
+
+struct endpoint *
+coresight_get_output_endpoint(struct coresight_platform_data *pdata)
+{
+ struct endpoint *endp;
+
+ if (pdata->out_ports != 1)
+ return (NULL);
+
+ TAILQ_FOREACH(endp, &pdata->endpoints, link) {
+ if (endp->input == 0)
+ return (endp);
+ }
+
+ return (NULL);
+}
+
+struct coresight_device *
+coresight_get_output_device(struct endpoint *endp, struct endpoint **out_endp)
+{
+ struct coresight_platform_data *pdata;
+ struct coresight_device *cs_dev;
+ struct endpoint *endp2;
+
+ TAILQ_FOREACH(cs_dev, &cs_devs, link) {
+ pdata = cs_dev->pdata;
+ TAILQ_FOREACH(endp2, &cs_dev->pdata->endpoints, link) {
+ switch (pdata->bus_type) {
+ case CORESIGHT_BUS_FDT:
+#ifdef FDT
+ if (endp->their_node == endp2->my_node) {
+ *out_endp = endp2;
+ return (cs_dev);
+ }
+#endif
+ break;
+
+ case CORESIGHT_BUS_ACPI:
+#ifdef DEV_ACPI
+ if (endp->their_handle == endp2->my_handle) {
+ *out_endp = endp2;
+ return (cs_dev);
+ }
+#endif
+ break;
+ }
+ }
+ }
+
+ return (NULL);
+}
+
+static void
+coresight_init(void)
+{
+
+ mtx_init(&cs_mtx, "ARM Coresight", NULL, MTX_DEF);
+ TAILQ_INIT(&cs_devs);
+}
+
+SYSINIT(coresight, SI_SUB_DRIVERS, SI_ORDER_FIRST, coresight_init, NULL);
diff --git a/sys/arm64/coresight/coresight.h b/sys/arm64/coresight/coresight.h
new file mode 100644
index 000000000000..a3aa71cb207f
--- /dev/null
+++ b/sys/arm64/coresight/coresight.h
@@ -0,0 +1,163 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ARM64_CORESIGHT_CORESIGHT_H_
+#define _ARM64_CORESIGHT_CORESIGHT_H_
+
+#include "opt_acpi.h"
+#include "opt_platform.h"
+
+#include <sys/bus.h>
+
+#ifdef FDT
+#include <dev/ofw/openfirm.h>
+#endif
+
+#ifdef DEV_ACPI
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+#endif
+
+#define CORESIGHT_ITCTRL 0xf00
+#define CORESIGHT_CLAIMSET 0xfa0
+#define CORESIGHT_CLAIMCLR 0xfa4
+#define CORESIGHT_LAR 0xfb0
+#define CORESIGHT_UNLOCK 0xc5acce55
+#define CORESIGHT_LSR 0xfb4
+#define CORESIGHT_AUTHSTATUS 0xfb8
+#define CORESIGHT_DEVID 0xfc8
+#define CORESIGHT_DEVTYPE 0xfcc
+
+enum cs_dev_type {
+ CORESIGHT_ETMV4,
+ CORESIGHT_TMC,
+ CORESIGHT_DYNAMIC_REPLICATOR,
+ CORESIGHT_FUNNEL,
+ CORESIGHT_CPU_DEBUG,
+};
+
+enum cs_bus_type {
+ CORESIGHT_BUS_ACPI,
+ CORESIGHT_BUS_FDT,
+};
+
+struct coresight_device {
+ TAILQ_ENTRY(coresight_device) link;
+ device_t dev;
+ enum cs_dev_type dev_type;
+ struct coresight_platform_data *pdata;
+};
+
+struct endpoint {
+ TAILQ_ENTRY(endpoint) link;
+#ifdef FDT
+ phandle_t my_node;
+ phandle_t their_node;
+ phandle_t dev_node;
+#endif
+#ifdef DEV_ACPI
+ ACPI_HANDLE their_handle;
+ ACPI_HANDLE my_handle;
+#endif
+ boolean_t input;
+ int reg;
+ struct coresight_device *cs_dev;
+ LIST_ENTRY(endpoint) endplink;
+};
+
+struct coresight_platform_data {
+ int cpu;
+ int in_ports;
+ int out_ports;
+ struct mtx mtx_lock;
+ TAILQ_HEAD(endpoint_list, endpoint) endpoints;
+ enum cs_bus_type bus_type;
+};
+
+struct coresight_desc {
+ struct coresight_platform_data *pdata;
+ device_t dev;
+ enum cs_dev_type dev_type;
+};
+
+TAILQ_HEAD(coresight_device_list, coresight_device);
+
+#define ETM_N_COMPRATOR 16
+
+struct etm_state {
+ uint32_t trace_id;
+};
+
+struct etr_state {
+ boolean_t started;
+ uint32_t cycle;
+ uint32_t offset;
+ uint32_t low;
+ uint32_t high;
+ uint32_t bufsize;
+ uint32_t flags;
+#define ETR_FLAG_ALLOCATE (1 << 0)
+#define ETR_FLAG_RELEASE (1 << 1)
+};
+
+struct coresight_event {
+ LIST_HEAD(, endpoint) endplist;
+
+ uint64_t addr[ETM_N_COMPRATOR];
+ uint32_t naddr;
+ uint8_t excp_level;
+ enum cs_dev_type src;
+ enum cs_dev_type sink;
+
+ struct etr_state etr;
+ struct etm_state etm;
+};
+
+struct etm_config {
+ uint64_t addr[ETM_N_COMPRATOR];
+ uint32_t naddr;
+ uint8_t excp_level;
+};
+
+static MALLOC_DEFINE(M_CORESIGHT, "coresight", "ARM Coresight");
+
+struct coresight_platform_data *coresight_fdt_get_platform_data(device_t dev);
+struct coresight_platform_data *coresight_acpi_get_platform_data(device_t dev);
+struct endpoint * coresight_get_output_endpoint(struct coresight_platform_data *pdata);
+struct coresight_device * coresight_get_output_device(struct endpoint *endp, struct endpoint **);
+int coresight_register(struct coresight_desc *desc);
+int coresight_init_event(int cpu, struct coresight_event *event);
+void coresight_enable(int cpu, struct coresight_event *event);
+void coresight_disable(int cpu, struct coresight_event *event);
+void coresight_read(int cpu, struct coresight_event *event);
+
+#endif /* !_ARM64_CORESIGHT_CORESIGHT_H_ */
diff --git a/sys/arm64/coresight/coresight_acpi.c b/sys/arm64/coresight/coresight_acpi.c
new file mode 100644
index 000000000000..57fda8b97666
--- /dev/null
+++ b/sys/arm64/coresight/coresight_acpi.c
@@ -0,0 +1,373 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/uuid.h>
+#include <machine/bus.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <arm64/coresight/coresight.h>
+
+#define ACPI_CORESIGHT_LINK_OUTPUT 1
+#define ACPI_CORESIGHT_LINK_INPUT 0
+
+static const struct uuid acpi_graph_uuid = {
+ 0xab02a46b, 0x74c7, 0x45a2, 0xbd, 0x68,
+ { 0xf7, 0xd3, 0x44, 0xef, 0x21, 0x53 },
+};
+
+static const struct uuid coresight_graph_uuid = {
+ 0x3ecbc8b6, 0x1d0e, 0x4fb3, 0x81, 0x07,
+ { 0xe6, 0x27, 0xf8, 0x05, 0xc6, 0xcd },
+};
+
+static inline bool
+cs_acpi_validate_dsd_graph(const union acpi_object *graph)
+{
+ const union acpi_object *rev, *nr_graphs;
+ const union acpi_object *obj;
+ int i, n;
+
+ if (graph->Package.Count < 2)
+ return (false);
+
+ rev = &graph->Package.Elements[0];
+ nr_graphs = &graph->Package.Elements[1];
+
+ if (rev->Type != ACPI_TYPE_INTEGER ||
+ nr_graphs->Type != ACPI_TYPE_INTEGER)
+ return (false);
+
+ /* Revision 0 supported only. */
+ if (rev->Integer.Value != 0)
+ return (false);
+
+ /* We are looking for a single graph. */
+ n = nr_graphs->Integer.Value;
+ if (n != 1)
+ return (false);
+
+ /* Check the number of elements. */
+ if (graph->Package.Count != (n + 2))
+ return (false);
+
+ for (i = 2; i < n + 2; i++) {
+ obj = &graph->Package.Elements[i];
+ if (obj->Type != ACPI_TYPE_PACKAGE || obj->Package.Count < 3)
+ return (false);
+ }
+
+ return (true);
+}
+
+static inline bool
+cs_is_acpi_guid(const union acpi_object *obj)
+{
+
+ return (obj->Type == ACPI_TYPE_BUFFER) && (obj->Buffer.Length == 16);
+}
+
+static inline bool
+cs_guid_equal(const struct uuid *u1, const struct uuid *u2)
+{
+
+ if (memcmp(u1, u2, 16) == 0)
+ return (true);
+
+ return (false);
+}
+
+static inline bool
+cs_acpi_guid_matches(const union acpi_object *obj, const struct uuid *guid)
+{
+
+ if (cs_is_acpi_guid(obj) &&
+ cs_guid_equal((struct uuid *)obj->Buffer.Pointer, guid))
+ return (true);
+
+ return (false);
+}
+
+static inline bool
+is_acpi_dsd_graph_guid(const union acpi_object *obj)
+{
+
+ return (cs_acpi_guid_matches(obj, &acpi_graph_uuid));
+}
+
+static inline bool
+cs_is_acpi_coresight_graph_guid(const union acpi_object *obj)
+{
+
+ return (cs_acpi_guid_matches(obj, &coresight_graph_uuid));
+}
+
+static inline bool
+cs_is_acpi_coresight_graph(const union acpi_object *obj)
+{
+ const union acpi_object *graphid, *guid, *links;
+
+ if (obj->Type != ACPI_TYPE_PACKAGE ||
+ obj->Package.Count < 3)
+ return (false);
+
+ graphid = &obj->Package.Elements[0];
+ guid = &obj->Package.Elements[1];
+ links = &obj->Package.Elements[2];
+
+ if (graphid->Type != ACPI_TYPE_INTEGER ||
+ links->Type != ACPI_TYPE_INTEGER)
+ return (false);
+
+ if (cs_is_acpi_coresight_graph_guid(guid))
+ return (true);
+
+ return (false);
+}
+
+static const union acpi_object *
+cs_get_dsd_graph(device_t dev)
+{
+ const union acpi_object *guid, *package;
+ union acpi_object *dsd;
+ ACPI_STATUS status;
+ ACPI_BUFFER buf;
+ device_t bus;
+ int i;
+
+ buf.Length = PAGE_SIZE;
+ buf.Pointer = malloc(buf.Length, M_TEMP, M_NOWAIT | M_ZERO);
+ if (buf.Pointer == NULL) {
+ printf("Failed to allocate memory.\n");
+ return (NULL);
+ }
+
+ bus = device_get_parent(dev);
+ status = ACPI_EVALUATE_OBJECT(bus, dev, "_DSD", NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ printf("Failed to evaluate object.\n");
+ return (NULL);
+ }
+
+ dsd = buf.Pointer;
+
+ for (i = 0; i + 1 < dsd->Package.Count; i += 2) {
+ guid = &dsd->Package.Elements[i];
+ package = &dsd->Package.Elements[i + 1];
+
+ if (!cs_is_acpi_guid(guid) ||
+ package->Type != ACPI_TYPE_PACKAGE)
+ break;
+
+ if (!is_acpi_dsd_graph_guid(guid))
+ continue;
+
+ if (cs_acpi_validate_dsd_graph(package))
+ return (package);
+ }
+
+ return (NULL);
+}
+
+static inline bool
+cs_acpi_validate_coresight_graph(const union acpi_object *cs_graph)
+{
+ int nlinks;
+
+ nlinks = cs_graph->Package.Elements[2].Integer.Value;
+ if (cs_graph->Package.Count != (nlinks + 3))
+ return (false);
+
+ return (true);
+}
+
+static const union acpi_object *
+cs_get_coresight_graph(device_t dev)
+{
+ const union acpi_object *graph_list, *graph;
+ int i, nr_graphs;
+
+ graph_list = cs_get_dsd_graph(dev);
+ if (!graph_list) {
+ printf("failed to get graph list\n");
+ return (NULL);
+ }
+
+ nr_graphs = graph_list->Package.Elements[1].Integer.Value;
+ for (i = 2; i < nr_graphs + 2; i++) {
+ graph = &graph_list->Package.Elements[i];
+ if (!cs_is_acpi_coresight_graph(graph))
+ continue;
+ if (cs_acpi_validate_coresight_graph(graph))
+ return (graph);
+ break;
+ }
+
+ return (NULL);
+}
+
+static int
+cs_acpi_record_endpoint(device_t dev,
+ struct coresight_platform_data *pdata,
+ const union acpi_object *link)
+{
+ const union acpi_object *fields;
+ struct endpoint *endp;
+ ACPI_HANDLE handle;
+ int dir;
+
+ if (link->Type != ACPI_TYPE_PACKAGE ||
+ link->Package.Count != 4)
+ return (ENXIO);
+
+ fields = link->Package.Elements;
+ if (fields[0].Type != ACPI_TYPE_INTEGER ||
+ fields[1].Type != ACPI_TYPE_INTEGER ||
+ fields[2].Type != ACPI_TYPE_LOCAL_REFERENCE ||
+ fields[3].Type != ACPI_TYPE_INTEGER)
+ return (ENXIO);
+
+ handle = fields[2].Reference.Handle;
+ dir = fields[3].Integer.Value;
+
+ endp = malloc(sizeof(struct endpoint),
+ M_CORESIGHT, M_WAITOK | M_ZERO);
+ if (endp == NULL) {
+ device_printf(dev, "Failed to allocate memory.\n");
+ return (ENXIO);
+ }
+
+ endp->their_handle = handle;
+ endp->my_handle = acpi_get_handle(dev);
+
+ mtx_lock(&pdata->mtx_lock);
+ TAILQ_INSERT_TAIL(&pdata->endpoints, endp, link);
+ mtx_unlock(&pdata->mtx_lock);
+
+ if (dir == ACPI_CORESIGHT_LINK_OUTPUT) {
+ pdata->out_ports++;
+ } else {
+ endp->input = true;
+ pdata->in_ports++;
+ }
+
+ return (0);
+}
+
+static int
+coresight_acpi_get_ports(device_t dev,
+ struct coresight_platform_data *pdata)
+{
+ const union acpi_object *graph;
+ const union acpi_object *link;
+ int nlinks;
+ int error;
+ int i;
+
+ graph = cs_get_coresight_graph(dev);
+ if (graph == NULL) {
+ device_printf(dev, "Coresight graph not found.\n");
+ return (ENXIO);
+ }
+
+ nlinks = graph->Package.Elements[2].Integer.Value;
+ if (!nlinks)
+ return (0);
+
+ for (i = 0; i < nlinks; i++) {
+ link = &graph->Package.Elements[3 + i];
+ error = cs_acpi_record_endpoint(dev, pdata, link);
+ if (error < 0)
+ return (error);
+ }
+
+ return (0);
+}
+
+static int
+coresight_acpi_get_cpu(device_t dev, struct coresight_platform_data *pdata)
+{
+ ACPI_HANDLE handle, parent;
+ ACPI_STATUS status;
+ int cpuid;
+
+ handle = acpi_get_handle(dev);
+
+ status = AcpiGetParent(handle, &parent);
+ if (!ACPI_SUCCESS(status))
+ return (ENXIO);
+
+ if (!acpi_MatchHid(parent, "ACPI0007"))
+ return (ENXIO);
+
+ status = acpi_GetInteger(parent, "_UID", &cpuid);
+ if (ACPI_SUCCESS(status)) {
+ pdata->cpu = cpuid;
+ return (0);
+ }
+
+ return (ENXIO);
+}
+
+struct coresight_platform_data *
+coresight_acpi_get_platform_data(device_t dev)
+{
+ struct coresight_platform_data *pdata;
+
+ pdata = malloc(sizeof(struct coresight_platform_data),
+ M_CORESIGHT, M_WAITOK | M_ZERO);
+ pdata->bus_type = CORESIGHT_BUS_ACPI;
+
+ mtx_init(&pdata->mtx_lock, "Coresight Platform Data", NULL, MTX_DEF);
+ TAILQ_INIT(&pdata->endpoints);
+
+ coresight_acpi_get_cpu(dev, pdata);
+ coresight_acpi_get_ports(dev, pdata);
+
+ if (bootverbose)
+ printf("Total ports: in %d out %d\n",
+ pdata->in_ports, pdata->out_ports);
+
+ return (pdata);
+}
diff --git a/sys/arm64/coresight/coresight_cmd.c b/sys/arm64/coresight/coresight_cmd.c
new file mode 100644
index 000000000000..c25f512f5aa7
--- /dev/null
+++ b/sys/arm64/coresight/coresight_cmd.c
@@ -0,0 +1,159 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <arm64/coresight/coresight.h>
+
+#include "coresight_if.h"
+
+extern struct coresight_device_list cs_devs;
+
+static struct coresight_device *
+coresight_next_device(struct coresight_device *cs_dev,
+ struct coresight_event *event)
+{
+ struct coresight_device *out;
+ struct endpoint *out_endp;
+ struct endpoint *endp;
+
+ TAILQ_FOREACH(endp, &cs_dev->pdata->endpoints, link) {
+ if (endp->input != 0)
+ continue;
+
+ out = coresight_get_output_device(endp, &out_endp);
+ if (out != NULL) {
+ if (LIST_EMPTY(&event->endplist)) {
+ /* Add source device */
+ endp->cs_dev = cs_dev;
+ LIST_INSERT_HEAD(&event->endplist, endp,
+ endplink);
+ }
+
+ /* Add output device */
+ if (bootverbose)
+ printf("Adding device %s to the chain\n",
+ device_get_nameunit(out->dev));
+ out_endp->cs_dev = out;
+ LIST_INSERT_HEAD(&event->endplist, out_endp, endplink);
+
+ return (out);
+ }
+ }
+
+ return (NULL);
+}
+
+static int
+coresight_build_list(struct coresight_device *cs_dev,
+ struct coresight_event *event)
+{
+ struct coresight_device *out;
+
+ out = cs_dev;
+ while (out != NULL)
+ out = coresight_next_device(out, event);
+
+ return (0);
+}
+
+int
+coresight_init_event(int cpu, struct coresight_event *event)
+{
+ struct coresight_device *cs_dev;
+ struct endpoint *endp;
+
+ /* Start building path from source device */
+ TAILQ_FOREACH(cs_dev, &cs_devs, link) {
+ if (cs_dev->dev_type == event->src &&
+ cs_dev->pdata->cpu == cpu) {
+ LIST_INIT(&event->endplist);
+ coresight_build_list(cs_dev, event);
+ break;
+ }
+ }
+
+ /* Ensure Coresight is initialized for the CPU */
+ TAILQ_FOREACH(cs_dev, &cs_devs, link) {
+ if (cs_dev->dev_type == CORESIGHT_CPU_DEBUG &&
+ cs_dev->pdata->cpu == cpu)
+ CORESIGHT_INIT(cs_dev->dev);
+ }
+
+ /* Init all devices in the path */
+ LIST_FOREACH(endp, &event->endplist, endplink) {
+ cs_dev = endp->cs_dev;
+ CORESIGHT_INIT(cs_dev->dev);
+ }
+
+ return (0);
+}
+
+void
+coresight_enable(int cpu, struct coresight_event *event)
+{
+ struct coresight_device *cs_dev;
+ struct endpoint *endp;
+
+ LIST_FOREACH(endp, &event->endplist, endplink) {
+ cs_dev = endp->cs_dev;
+ CORESIGHT_ENABLE(cs_dev->dev, endp, event);
+ }
+}
+
+void
+coresight_disable(int cpu, struct coresight_event *event)
+{
+ struct coresight_device *cs_dev;
+ struct endpoint *endp;
+
+ LIST_FOREACH(endp, &event->endplist, endplink) {
+ cs_dev = endp->cs_dev;
+ CORESIGHT_DISABLE(cs_dev->dev, endp, event);
+ }
+}
+
+void
+coresight_read(int cpu, struct coresight_event *event)
+{
+ struct endpoint *endp;
+
+ LIST_FOREACH(endp, &event->endplist, endplink)
+ CORESIGHT_READ(endp->cs_dev->dev, endp, event);
+}
diff --git a/sys/arm64/coresight/coresight_cpu_debug.c b/sys/arm64/coresight/coresight_cpu_debug.c
new file mode 100644
index 000000000000..c28db5fd51c7
--- /dev/null
+++ b/sys/arm64/coresight/coresight_cpu_debug.c
@@ -0,0 +1,164 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm64/coresight/coresight.h>
+
+#include "coresight_if.h"
+
+#define EDPCSR 0x0a0
+#define EDCIDSR 0x0a4
+#define EDVIDSR 0x0a8
+#define EDPCSR_HI 0x0ac
+#define EDOSLAR 0x300
+#define EDPRCR 0x310
+#define EDPRCR_COREPURQ (1 << 3)
+#define EDPRCR_CORENPDRQ (1 << 0)
+#define EDPRSR 0x314
+#define EDDEVID1 0xfc4
+#define EDDEVID 0xfc8
+
+static struct ofw_compat_data compat_data[] = {
+ { "arm,coresight-cpu-debug", 1 },
+ { NULL, 0 }
+};
+
+struct debug_softc {
+ struct resource *res;
+ struct coresight_platform_data *pdata;
+};
+
+static struct resource_spec debug_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+static int
+debug_init(device_t dev)
+{
+ struct debug_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ /* Unlock Coresight */
+ bus_write_4(sc->res, CORESIGHT_LAR, CORESIGHT_UNLOCK);
+
+ /* Unlock Debug */
+ bus_write_4(sc->res, EDOSLAR, 0);
+
+ /* Already initialized? */
+ reg = bus_read_4(sc->res, EDPRCR);
+ if (reg & EDPRCR_CORENPDRQ)
+ return (0);
+
+ /* Enable power */
+ reg |= EDPRCR_COREPURQ;
+ bus_write_4(sc->res, EDPRCR, reg);
+
+ do {
+ reg = bus_read_4(sc->res, EDPRSR);
+ } while ((reg & EDPRCR_CORENPDRQ) == 0);
+
+ return (0);
+}
+
+static int
+debug_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Coresight CPU Debug");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+debug_attach(device_t dev)
+{
+ struct coresight_desc desc;
+ struct debug_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (bus_alloc_resources(dev, debug_spec, &sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ return (ENXIO);
+ }
+
+ sc->pdata = coresight_fdt_get_platform_data(dev);
+ desc.pdata = sc->pdata;
+ desc.dev = dev;
+ desc.dev_type = CORESIGHT_CPU_DEBUG;
+ coresight_register(&desc);
+
+ return (0);
+}
+
+static device_method_t debug_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, debug_probe),
+ DEVMETHOD(device_attach, debug_attach),
+
+ /* Coresight interface */
+ DEVMETHOD(coresight_init, debug_init),
+ DEVMETHOD_END
+};
+
+static driver_t debug_driver = {
+ "debug",
+ debug_methods,
+ sizeof(struct debug_softc),
+};
+
+static devclass_t debug_devclass;
+
+EARLY_DRIVER_MODULE(debug, simplebus, debug_driver, debug_devclass,
+ 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_LATE);
+MODULE_VERSION(debug, 1);
diff --git a/sys/arm64/coresight/coresight_etm4x.c b/sys/arm64/coresight/coresight_etm4x.c
new file mode 100644
index 000000000000..1b9b3ed71fd6
--- /dev/null
+++ b/sys/arm64/coresight/coresight_etm4x.c
@@ -0,0 +1,266 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_etm4x.h>
+
+#include "coresight_if.h"
+
+#define ETM_DEBUG
+#undef ETM_DEBUG
+
+#ifdef ETM_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+/*
+ * Typical trace flow:
+ *
+ * CPU0 -> ETM0 -> funnel1 -> funnel0 -> ETF -> replicator -> ETR -> DRAM
+ * CPU1 -> ETM1 -> funnel1 -^
+ * CPU2 -> ETM2 -> funnel1 -^
+ * CPU3 -> ETM3 -> funnel1 -^
+ */
+
+static struct resource_spec etm_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+static int
+etm_prepare(device_t dev, struct coresight_event *event)
+{
+ struct etm_softc *sc;
+ uint32_t reg;
+ int i;
+
+ sc = device_get_softc(dev);
+
+ /* Configure ETM */
+
+ /*
+ * Enable the return stack, global timestamping,
+ * Context ID, and Virtual context identifier tracing.
+ */
+ reg = TRCCONFIGR_RS | TRCCONFIGR_TS;
+ reg |= TRCCONFIGR_CID | TRCCONFIGR_VMID;
+ reg |= TRCCONFIGR_INSTP0_LDRSTR;
+ reg |= TRCCONFIGR_COND_ALL;
+ bus_write_4(sc->res, TRCCONFIGR, reg);
+
+ /* Disable all event tracing. */
+ bus_write_4(sc->res, TRCEVENTCTL0R, 0);
+ bus_write_4(sc->res, TRCEVENTCTL1R, 0);
+
+ /* Disable stalling, if implemented. */
+ bus_write_4(sc->res, TRCSTALLCTLR, 0);
+
+ /* Enable trace synchronization every 4096 bytes of trace. */
+ bus_write_4(sc->res, TRCSYNCPR, TRCSYNCPR_4K);
+
+ /* Set a value for the trace ID */
+ bus_write_4(sc->res, TRCTRACEIDR, event->etm.trace_id);
+
+ /*
+ * Disable the timestamp event. The trace unit still generates
+ * timestamps due to other reasons such as trace synchronization.
+ */
+ bus_write_4(sc->res, TRCTSCTLR, 0);
+
+ /*
+ * Enable ViewInst to trace everything, with the start/stop
+ * logic started.
+ */
+ reg = TRCVICTLR_SSSTATUS;
+
+ /* The number of the single resource used to activate the event. */
+ reg |= (1 << EVENT_SEL_S);
+
+ if (event->excp_level > 2)
+ return (-1);
+
+ reg |= TRCVICTLR_EXLEVEL_NS_M;
+ reg &= ~TRCVICTLR_EXLEVEL_NS(event->excp_level);
+ reg |= TRCVICTLR_EXLEVEL_S_M;
+ reg &= ~TRCVICTLR_EXLEVEL_S(event->excp_level);
+ bus_write_4(sc->res, TRCVICTLR, reg);
+
+ for (i = 0; i < event->naddr * 2; i++) {
+ dprintf("configure range %d, address %lx\n",
+ i, event->addr[i]);
+ bus_write_8(sc->res, TRCACVR(i), event->addr[i]);
+
+ reg = 0;
+ /* Secure state */
+ reg |= TRCACATR_EXLEVEL_S_M;
+ reg &= ~TRCACATR_EXLEVEL_S(event->excp_level);
+ /* Non-secure state */
+ reg |= TRCACATR_EXLEVEL_NS_M;
+ reg &= ~TRCACATR_EXLEVEL_NS(event->excp_level);
+ bus_write_4(sc->res, TRCACATR(i), reg);
+
+ /* Address range is included */
+ reg = bus_read_4(sc->res, TRCVIIECTLR);
+ reg |= (1 << (TRCVIIECTLR_INCLUDE_S + i / 2));
+ bus_write_4(sc->res, TRCVIIECTLR, reg);
+ }
+
+ /* No address filtering for ViewData. */
+ bus_write_4(sc->res, TRCVDARCCTLR, 0);
+
+ /* Clear the STATUS bit to zero */
+ bus_write_4(sc->res, TRCSSCSR(0), 0);
+
+ if (event->naddr == 0) {
+ /* No address range filtering for ViewInst. */
+ bus_write_4(sc->res, TRCVIIECTLR, 0);
+ }
+
+ /* No start or stop points for ViewInst. */
+ bus_write_4(sc->res, TRCVISSCTLR, 0);
+
+ /* Disable ViewData */
+ bus_write_4(sc->res, TRCVDCTLR, 0);
+
+ /* No address filtering for ViewData. */
+ bus_write_4(sc->res, TRCVDSACCTLR, 0);
+
+ return (0);
+}
+
+static int
+etm_init(device_t dev)
+{
+ struct etm_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ /* Unlocking Coresight */
+ bus_write_4(sc->res, CORESIGHT_LAR, CORESIGHT_UNLOCK);
+
+ /* Unlocking ETM */
+ bus_write_4(sc->res, TRCOSLAR, 0);
+
+ reg = bus_read_4(sc->res, TRCIDR(1));
+ dprintf("ETM Version: %d.%d\n",
+ (reg & TRCIDR1_TRCARCHMAJ_M) >> TRCIDR1_TRCARCHMAJ_S,
+ (reg & TRCIDR1_TRCARCHMIN_M) >> TRCIDR1_TRCARCHMIN_S);
+
+ return (0);
+}
+
+static int
+etm_enable(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct etm_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ etm_prepare(dev, event);
+
+ /* Enable the trace unit */
+ bus_write_4(sc->res, TRCPRGCTLR, TRCPRGCTLR_EN);
+
+ /* Wait for an IDLE bit to be LOW */
+ do {
+ reg = bus_read_4(sc->res, TRCSTATR);
+ } while ((reg & TRCSTATR_IDLE) == 1);
+
+ if ((bus_read_4(sc->res, TRCPRGCTLR) & TRCPRGCTLR_EN) == 0)
+ panic("etm is not enabled\n");
+
+ return (0);
+}
+
+static void
+etm_disable(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct etm_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ /* Disable the trace unit */
+ bus_write_4(sc->res, TRCPRGCTLR, 0);
+
+ /* Wait for an IDLE bit */
+ do {
+ reg = bus_read_4(sc->res, TRCSTATR);
+ } while ((reg & TRCSTATR_IDLE) == 0);
+}
+
+int
+etm_attach(device_t dev)
+{
+ struct coresight_desc desc;
+ struct etm_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (bus_alloc_resources(dev, etm_spec, &sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ return (ENXIO);
+ }
+
+ desc.pdata = sc->pdata;
+ desc.dev = dev;
+ desc.dev_type = CORESIGHT_ETMV4;
+ coresight_register(&desc);
+
+ return (0);
+}
+
+static device_method_t etm_methods[] = {
+ /* Coresight interface */
+ DEVMETHOD(coresight_init, etm_init),
+ DEVMETHOD(coresight_enable, etm_enable),
+ DEVMETHOD(coresight_disable, etm_disable),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(etm, etm_driver, etm_methods, sizeof(struct etm_softc));
diff --git a/sys/arm64/coresight/coresight_etm4x.h b/sys/arm64/coresight/coresight_etm4x.h
new file mode 100644
index 000000000000..ce84a6c30e70
--- /dev/null
+++ b/sys/arm64/coresight/coresight_etm4x.h
@@ -0,0 +1,184 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ARM64_CORESIGHT_ETM4X_H_
+#define _ARM64_CORESIGHT_ETM4X_H_
+
+#define TRCPRGCTLR 0x004 /* Trace Programming Control Register */
+#define TRCPRGCTLR_EN (1 << 0) /* Trace unit enable bit */
+#define TRCPROCSELR 0x008 /* Trace PE Select Control Register */
+#define TRCSTATR 0x00C /* Trace Trace Status Register */
+#define TRCSTATR_PMSTABLE (1 << 1) /* The programmers' model is stable. */
+#define TRCSTATR_IDLE (1 << 0) /* The trace unit is idle. */
+#define TRCCONFIGR 0x010 /* Trace Trace Configuration Register */
+#define TRCCONFIGR_DV (1 << 17) /* Data value tracing is enabled when INSTP0 is not 0b00 */
+#define TRCCONFIGR_DA (1 << 16) /* Data address tracing is enabled when INSTP0 is not 0b00. */
+#define TRCCONFIGR_VMIDOPT (1 << 15) /* Control bit to configure the Virtual context identifier value */
+#define TRCCONFIGR_QE_S 13 /* Q element enable field */
+#define TRCCONFIGR_QE_M (0x3 << TRCCONFIGR_QE_S)
+#define TRCCONFIGR_RS (1 << 12) /* Return stack enable bit */
+#define TRCCONFIGR_TS (1 << 11) /* Global timestamp tracing is enabled. */
+#define TRCCONFIGR_COND_S 8 /* Conditional instruction tracing bit. */
+#define TRCCONFIGR_COND_M (0x7 << TRCCONFIGR_COND_S)
+#define TRCCONFIGR_COND_DIS 0
+#define TRCCONFIGR_COND_LDR (1 << TRCCONFIGR_COND_S) /* Conditional load instructions are traced. */
+#define TRCCONFIGR_COND_STR (2 << TRCCONFIGR_COND_S) /* Conditional store instructions are traced. */
+#define TRCCONFIGR_COND_LDRSTR (3 << TRCCONFIGR_COND_S) /* Conditional load and store instructions are traced. */
+#define TRCCONFIGR_COND_ALL (7 << TRCCONFIGR_COND_S) /* All conditional instructions are traced. */
+#define TRCCONFIGR_VMID (1 << 7) /* Virtual context identifier tracing is enabled. */
+#define TRCCONFIGR_CID (1 << 6) /* Context ID tracing is enabled. */
+#define TRCCONFIGR_CCI (1 << 4) /* Cycle counting in the instruction trace is enabled. */
+#define TRCCONFIGR_BB (1 << 3) /* Branch broadcast mode is enabled. */
+#define TRCCONFIGR_INSTP0_S 1 /* Instruction P0 field. */
+#define TRCCONFIGR_INSTP0_M (0x3 << TRCCONFIGR_INSTP0_S)
+#define TRCCONFIGR_INSTP0_NONE 0 /* Do not trace load and store instructions as P0 instructions. */
+#define TRCCONFIGR_INSTP0_LDR (1 << TRCCONFIGR_INSTP0_S) /* Trace load instructions as P0 instructions. */
+#define TRCCONFIGR_INSTP0_STR (2 << TRCCONFIGR_INSTP0_S) /* Trace store instructions as P0 instructions. */
+#define TRCCONFIGR_INSTP0_LDRSTR (3 << TRCCONFIGR_INSTP0_S) /* Trace load and store instructions as P0 instr. */
+#define TRCAUXCTLR 0x018 /* Trace Auxiliary Control Register */
+#define TRCEVENTCTL0R 0x020 /* Trace Event Control 0 Register */
+#define TRCEVENTCTL1R 0x024 /* Trace Event Control 1 Register */
+#define TRCSTALLCTLR 0x02C /* Trace Stall Control Register */
+#define TRCTSCTLR 0x030 /* Trace Global Timestamp Control Register */
+#define TRCSYNCPR 0x034 /* Trace Synchronization Period Register */
+#define TRCSYNCPR_PERIOD_S 0
+#define TRCSYNCPR_PERIOD_M 0x1f
+#define TRCSYNCPR_1K (10 << TRCSYNCPR_PERIOD_S)
+#define TRCSYNCPR_2K (11 << TRCSYNCPR_PERIOD_S)
+#define TRCSYNCPR_4K (12 << TRCSYNCPR_PERIOD_S)
+#define TRCCCCTLR 0x038 /* Trace Cycle Count Control Register */
+#define TRCBBCTLR 0x03C /* Trace Branch Broadcast Control Register */
+#define TRCTRACEIDR 0x040 /* Trace Trace ID Register */
+#define TRCQCTLR 0x044 /* Trace Q Element Control Register */
+#define TRCQCTLR_MODE_INC (1 << 8) /* Include mode. */
+#define TRCVICTLR 0x080 /* Trace ViewInst Main Control Register */
+#define TRCVICTLR_SSSTATUS (1 << 9) /* The start/stop logic is in the started state. */
+#define TRCVICTLR_EXLEVEL_NS_S 20
+#define TRCVICTLR_EXLEVEL_NS_M (0xf << TRCVICTLR_EXLEVEL_NS_S)
+#define TRCVICTLR_EXLEVEL_NS(n) (0x1 << ((n) + TRCVICTLR_EXLEVEL_NS_S))
+#define TRCVICTLR_EXLEVEL_S_S 16
+#define TRCVICTLR_EXLEVEL_S_M (0xf << TRCVICTLR_EXLEVEL_S_S)
+#define TRCVICTLR_EXLEVEL_S(n) (0x1 << ((n) + TRCVICTLR_EXLEVEL_S_S))
+#define EVENT_SEL_S 0
+#define EVENT_SEL_M (0x1f << EVENT_SEL_S)
+#define TRCVIIECTLR 0x084 /* Trace ViewInst Include/Exclude Control Register */
+#define TRCVIIECTLR_INCLUDE_S 0
+#define TRCVISSCTLR 0x088 /* Trace ViewInst Start/Stop Control Register */
+#define TRCVIPCSSCTLR 0x08C /* Trace ViewInst Start/Stop PE Comparator Control Register */
+#define TRCVDCTLR 0x0A0 /* Trace ViewData Main Control Register */
+#define TRCVDCTLR_TRCEXDATA (1 << 12) /* Exception and exception return data transfers are traced */
+#define TRCVDCTLR_TBI (1 << 11) /* The trace unit assigns bits[63:56] to have the same value as bits[63:56] of the data address. */
+#define TRCVDCTLR_PCREL (1 << 10) /* The trace unit does not trace the address or value portions of PC-relative transfers. */
+#define TRCVDCTLR_SPREL_S 8
+#define TRCVDCTLR_SPREL_M (0x3 << TRCVDCTLR_SPREL_S)
+#define TRCVDCTLR_EVENT_S 0
+#define TRCVDCTLR_EVENT_M (0xff << TRCVDCTLR_EVENT_S)
+#define TRCVDSACCTLR 0x0A4 /* Trace ViewData Include/Exclude Single Address Comparator Control Register */
+#define TRCVDARCCTLR 0x0A8 /* Trace ViewData Include/Exclude Address Range Comparator Control Register */
+#define TRCSEQEVR(n) (0x100 + (n) * 0x4) /* Trace Sequencer State Transition Control Register [n=0-2] */
+#define TRCSEQRSTEVR 0x118 /* Trace Sequencer Reset Control Register */
+#define TRCSEQSTR 0x11C /* Trace Sequencer State Register */
+#define TRCEXTINSELR 0x120 /* Trace External Input Select Register */
+#define TRCCNTRLDVR(n) (0x140 + (n) * 0x4) /* 32 Trace Counter Reload Value Register [n=0-3] */
+#define TRCCNTCTLR(n) (0x150 + (n) * 0x4) /* 32 Trace Counter Control Register [n=0-3] */
+#define TRCCNTVR(n) (0x160 + (n) * 0x4) /* 32 Trace Counter Value Register [n=0-3] */
+#define TRCIMSPEC(n) (0x1C0 + (n) * 0x4) /* Trace IMPLEMENTATION DEFINED register [n=0-7] */
+
+#define TRCIDR0(n) (0x1E0 + 0x4 * (n))
+#define TRCIDR8(n) (0x180 + 0x4 * (n))
+#define TRCIDR(n) ((n > 7) ? TRCIDR8(n) : TRCIDR0(n))
+#define TRCIDR1_TRCARCHMAJ_S 8
+#define TRCIDR1_TRCARCHMAJ_M (0xf << TRCIDR1_TRCARCHMAJ_S)
+#define TRCIDR1_TRCARCHMIN_S 4
+#define TRCIDR1_TRCARCHMIN_M (0xf << TRCIDR1_TRCARCHMIN_S)
+
+#define TRCRSCTLR(n) (0x200 + (n) * 0x4) /* Trace Resource Selection Control Register [n=2-31] */
+#define TRCSSCCR(n) (0x280 + (n) * 0x4) /* Trace Single-shot Comparator Control Register [n=0-7] */
+#define TRCSSCSR(n) (0x2A0 + (n) * 0x4) /* Trace Single-shot Comparator Status Register [n=0-7] */
+#define TRCSSPCICR(n) (0x2C0 + (n) * 0x4) /* Trace Single-shot PE Comparator Input Control [n=0-7] */
+#define TRCOSLAR 0x300 /* Management OS Lock Access Register */
+#define TRCOSLSR 0x304 /* Management OS Lock Status Register */
+#define TRCPDCR 0x310 /* Management PowerDown Control Register */
+#define TRCPDSR 0x314 /* Management PowerDown Status Register */
+#define TRCACVR(n) (0x400 + (n) * 0x8) /* Trace Address Comparator Value Register [n=0-15] */
+#define TRCACATR(n) (0x480 + (n) * 0x8) /* Trace Address Comparator Access Type Register [n=0-15] */
+#define TRCACATR_DTBM (1 << 21)
+#define TRCACATR_DATARANGE (1 << 20)
+#define TRCACATR_DATASIZE_S 18
+#define TRCACATR_DATASIZE_M (0x3 << TRCACATR_DATASIZE_S)
+#define TRCACATR_DATASIZE_B (0x0 << TRCACATR_DATASIZE_S)
+#define TRCACATR_DATASIZE_HW (0x1 << TRCACATR_DATASIZE_S)
+#define TRCACATR_DATASIZE_W (0x2 << TRCACATR_DATASIZE_S)
+#define TRCACATR_DATASIZE_DW (0x3 << TRCACATR_DATASIZE_S)
+#define TRCACATR_DATAMATCH_S 16
+#define TRCACATR_DATAMATCH_M (0x3 << TRCACATR_DATAMATCH_S)
+#define TRCACATR_EXLEVEL_S_S 8
+#define TRCACATR_EXLEVEL_S_M (0xf << TRCACATR_EXLEVEL_S_S)
+#define TRCACATR_EXLEVEL_S(n) (0x1 << ((n) + TRCACATR_EXLEVEL_S_S))
+#define TRCACATR_EXLEVEL_NS_S 12
+#define TRCACATR_EXLEVEL_NS_M (0xf << TRCACATR_EXLEVEL_NS_S)
+#define TRCACATR_EXLEVEL_NS(n) (0x1 << ((n) + TRCACATR_EXLEVEL_NS_S))
+#define TRCDVCVR(n) (0x500 + (n) * 0x8) /* Trace Data Value Comparator Value Register [n=0-7] */
+#define TRCDVCMR(n) (0x580 + (n) * 0x8) /* Trace Data Value Comparator Mask Register [n=0-7] */
+#define TRCCIDCVR(n) (0x600 + (n) * 0x8) /* Trace Context ID Comparator Value Register [n=0-7] */
+#define TRCVMIDCVR(n) (0x640 + (n) * 0x8) /* Trace Virtual context identifier Comparator Value [n=0-7] */
+#define TRCCIDCCTLR0 0x680 /* Trace Context ID Comparator Control Register 0 */
+#define TRCCIDCCTLR1 0x684 /* Trace Context ID Comparator Control Register 1 */
+#define TRCVMIDCCTLR0 0x688 /* Trace Virtual context identifier Comparator Control Register 0 */
+#define TRCVMIDCCTLR1 0x68C /* Trace Virtual context identifier Comparator Control Register 1 */
+#define TRCITCTRL 0xF00 /* Management Integration Mode Control register */
+#define TRCCLAIMSET 0xFA0 /* Trace Claim Tag Set register */
+#define TRCCLAIMCLR 0xFA4 /* Trace Claim Tag Clear register */
+#define TRCDEVAFF0 0xFA8 /* Management Device Affinity register 0 */
+#define TRCDEVAFF1 0xFAC /* Management Device Affinity register 1 */
+#define TRCLAR 0xFB0 /* Management Software Lock Access Register */
+#define TRCLSR 0xFB4 /* Management Software Lock Status Register */
+#define TRCAUTHSTATUS 0xFB8 /* Management Authentication Status register */
+#define TRCDEVARCH 0xFBC /* Management Device Architecture register */
+#define TRCDEVID 0xFC8 /* Management Device ID register */
+#define TRCDEVTYPE 0xFCC /* Management Device Type register */
+#define TRCPIDR4 0xFD0 /* Management Peripheral ID4 Register */
+#define TRCPIDR(n) (0xFE0 + (n) * 0x4) /* Management Peripheral IDn Register [n=0-3] */
+#define TRCPIDR567(n) (0xFD4 + ((n) - 5) * 0x4) /* Management Peripheral ID5 to Peripheral ID7 Registers */
+#define TRCCIDR(n) (0xFF0 + (n) * 0x4) /* Management Component IDn Register [n=0-4] */
+
+DECLARE_CLASS(etm_driver);
+
+struct etm_softc {
+ struct resource *res;
+ struct coresight_platform_data *pdata;
+};
+
+int etm_attach(device_t dev);
+
+#endif /* !_ARM64_CORESIGHT_ETM4X_H_ */
diff --git a/sys/arm64/coresight/coresight_etm4x_acpi.c b/sys/arm64/coresight/coresight_etm4x_acpi.c
new file mode 100644
index 000000000000..93dfd7a4f880
--- /dev/null
+++ b/sys/arm64/coresight/coresight_etm4x_acpi.c
@@ -0,0 +1,92 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/memdesc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_etm4x.h>
+
+static int
+etm_acpi_probe(device_t dev)
+{
+ static char *etm_ids[] = { "ARMHC500", NULL };
+ int error;
+
+ error = ACPI_ID_PROBE(device_get_parent(dev), dev, etm_ids, NULL);
+ if (error <= 0)
+ device_set_desc(dev, "ARM Embedded Trace Macrocell");
+
+ return (error);
+}
+
+static int
+etm_acpi_attach(device_t dev)
+{
+ struct etm_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->pdata = coresight_acpi_get_platform_data(dev);
+
+ return (etm_attach(dev));
+}
+
+static device_method_t etm_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, etm_acpi_probe),
+ DEVMETHOD(device_attach, etm_acpi_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(etm, etm_acpi_driver, etm_acpi_methods,
+ sizeof(struct etm_softc), etm_driver);
+
+static devclass_t etm_acpi_devclass;
+
+EARLY_DRIVER_MODULE(etm, acpi, etm_acpi_driver, etm_acpi_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/coresight/coresight_etm4x_fdt.c b/sys/arm64/coresight/coresight_etm4x_fdt.c
new file mode 100644
index 000000000000..b3418e538a47
--- /dev/null
+++ b/sys/arm64/coresight/coresight_etm4x_fdt.c
@@ -0,0 +1,94 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_etm4x.h>
+
+#include "coresight_if.h"
+
+static struct ofw_compat_data compat_data[] = {
+ { "arm,coresight-etm4x", 1 },
+ { NULL, 0 }
+};
+
+static int
+etm_fdt_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "AArch64 Embedded Trace Macrocell");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+etm_fdt_attach(device_t dev)
+{
+ struct etm_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->pdata = coresight_fdt_get_platform_data(dev);
+
+ return (etm_attach(dev));
+}
+
+static device_method_t etm_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, etm_fdt_probe),
+ DEVMETHOD(device_attach, etm_fdt_attach),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(etm, etm_fdt_driver, etm_fdt_methods,
+ sizeof(struct etm_softc), etm_driver);
+
+static devclass_t etm_fdt_devclass;
+
+EARLY_DRIVER_MODULE(etm, simplebus, etm_fdt_driver, etm_fdt_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/coresight/coresight_fdt.c b/sys/arm64/coresight/coresight_fdt.c
new file mode 100644
index 000000000000..6e31a9f002ab
--- /dev/null
+++ b/sys/arm64/coresight/coresight_fdt.c
@@ -0,0 +1,153 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm64/coresight/coresight.h>
+
+static int
+coresight_fdt_get_ports(phandle_t dev_node,
+ struct coresight_platform_data *pdata)
+{
+ phandle_t node, child;
+ pcell_t port_reg;
+ phandle_t xref;
+ char *name;
+ int ret;
+ phandle_t endpoint_child;
+ struct endpoint *endp;
+
+ child = ofw_bus_find_child(dev_node, "ports");
+ if (child)
+ node = child;
+ else
+ node = dev_node;
+
+ for (child = OF_child(node); child != 0; child = OF_peer(child)) {
+ ret = OF_getprop_alloc(child, "name", (void **)&name);
+ if (ret == -1)
+ continue;
+
+ if (strcasecmp(name, "port") ||
+ strncasecmp(name, "port@", 6)) {
+ port_reg = -1;
+ OF_getencprop(child, "reg", (void *)&port_reg,
+ sizeof(port_reg));
+
+ endpoint_child = ofw_bus_find_child(child, "endpoint");
+ if (endpoint_child) {
+ if (OF_getencprop(endpoint_child,
+ "remote-endpoint", &xref,
+ sizeof(xref)) == -1) {
+ printf("failed\n");
+ continue;
+ }
+ endp = malloc(sizeof(struct endpoint),
+ M_CORESIGHT, M_WAITOK | M_ZERO);
+ endp->my_node = endpoint_child;
+ endp->their_node = OF_node_from_xref(xref);
+ endp->dev_node = dev_node;
+ endp->reg = port_reg;
+ if (OF_getproplen(endpoint_child,
+ "slave-mode") >= 0) {
+ pdata->in_ports++;
+ endp->input = 1;
+ } else
+ pdata->out_ports++;
+
+ mtx_lock(&pdata->mtx_lock);
+ TAILQ_INSERT_TAIL(&pdata->endpoints,
+ endp, link);
+ mtx_unlock(&pdata->mtx_lock);
+ }
+ }
+ }
+
+ return (0);
+}
+
+static int
+coresight_fdt_get_cpu(phandle_t node,
+ struct coresight_platform_data *pdata)
+{
+ phandle_t cpu_node;
+ pcell_t xref;
+ pcell_t cpu_reg;
+
+ if (OF_getencprop(node, "cpu", &xref, sizeof(xref)) != -1) {
+ cpu_node = OF_node_from_xref(xref);
+ if (OF_getencprop(cpu_node, "reg", (void *)&cpu_reg,
+ sizeof(cpu_reg)) > 0) {
+ pdata->cpu = cpu_reg;
+ return (0);
+ }
+ }
+
+ return (-1);
+}
+
+struct coresight_platform_data *
+coresight_fdt_get_platform_data(device_t dev)
+{
+ struct coresight_platform_data *pdata;
+ phandle_t node;
+
+ node = ofw_bus_get_node(dev);
+
+ pdata = malloc(sizeof(struct coresight_platform_data),
+ M_CORESIGHT, M_WAITOK | M_ZERO);
+ pdata->bus_type = CORESIGHT_BUS_FDT;
+
+ mtx_init(&pdata->mtx_lock, "Coresight Platform Data", NULL, MTX_DEF);
+ TAILQ_INIT(&pdata->endpoints);
+
+ coresight_fdt_get_cpu(node, pdata);
+ coresight_fdt_get_ports(node, pdata);
+
+ if (bootverbose)
+ printf("Total ports: in %d out %d\n",
+ pdata->in_ports, pdata->out_ports);
+
+ return (pdata);
+}
diff --git a/sys/arm64/coresight/coresight_funnel.c b/sys/arm64/coresight/coresight_funnel.c
new file mode 100644
index 000000000000..2eb588f18366
--- /dev/null
+++ b/sys/arm64/coresight/coresight_funnel.c
@@ -0,0 +1,144 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_funnel.h>
+
+#include "coresight_if.h"
+
+#define FUNNEL_DEBUG
+#undef FUNNEL_DEBUG
+
+#ifdef FUNNEL_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static struct resource_spec funnel_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+static int
+funnel_init(device_t dev)
+{
+ struct funnel_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (sc->hwtype == HWTYPE_STATIC_FUNNEL)
+ return (0);
+
+ /* Unlock Coresight */
+ bus_write_4(sc->res, CORESIGHT_LAR, CORESIGHT_UNLOCK);
+ dprintf("Device ID: %x\n", bus_read_4(sc->res, FUNNEL_DEVICEID));
+
+ return (0);
+}
+
+static int
+funnel_enable(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct funnel_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ if (sc->hwtype == HWTYPE_STATIC_FUNNEL)
+ return (0);
+
+ reg = bus_read_4(sc->res, FUNNEL_FUNCTL);
+ reg &= ~(FUNCTL_HOLDTIME_MASK);
+ reg |= (7 << FUNCTL_HOLDTIME_SHIFT);
+ reg |= (1 << endp->reg);
+ bus_write_4(sc->res, FUNNEL_FUNCTL, reg);
+
+ return (0);
+}
+
+static void
+funnel_disable(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct funnel_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ if (sc->hwtype == HWTYPE_STATIC_FUNNEL)
+ return;
+
+ reg = bus_read_4(sc->res, FUNNEL_FUNCTL);
+ reg &= ~(1 << endp->reg);
+ bus_write_4(sc->res, FUNNEL_FUNCTL, reg);
+}
+
+int
+funnel_attach(device_t dev)
+{
+ struct coresight_desc desc;
+ struct funnel_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (sc->hwtype == HWTYPE_FUNNEL &&
+ bus_alloc_resources(dev, funnel_spec, &sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ return (ENXIO);
+ }
+
+ desc.pdata = sc->pdata;
+ desc.dev = dev;
+ desc.dev_type = CORESIGHT_FUNNEL;
+ coresight_register(&desc);
+
+ return (0);
+}
+
+static device_method_t funnel_methods[] = {
+ /* Coresight interface */
+ DEVMETHOD(coresight_init, funnel_init),
+ DEVMETHOD(coresight_enable, funnel_enable),
+ DEVMETHOD(coresight_disable, funnel_disable),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(funnel, funnel_driver, funnel_methods,
+ sizeof(struct funnel_softc));
diff --git a/sys/arm64/coresight/coresight_funnel.h b/sys/arm64/coresight/coresight_funnel.h
new file mode 100644
index 000000000000..a064c187b5bb
--- /dev/null
+++ b/sys/arm64/coresight/coresight_funnel.h
@@ -0,0 +1,80 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ARM64_CORESIGHT_CORESIGHT_FUNNEL_H_
+#define _ARM64_CORESIGHT_CORESIGHT_FUNNEL_H_
+
+#define FUNNEL_FUNCTL 0x000 /* Funnel Control Register */
+#define FUNCTL_HOLDTIME_SHIFT 8
+#define FUNCTL_HOLDTIME_MASK (0xf << FUNCTL_HOLDTIME_SHIFT)
+#define FUNNEL_PRICTL 0x004 /* Priority Control Register */
+#define FUNNEL_ITATBDATA0 0xEEC /* Integration Register, ITATBDATA0 */
+#define FUNNEL_ITATBCTR2 0xEF0 /* Integration Register, ITATBCTR2 */
+#define FUNNEL_ITATBCTR1 0xEF4 /* Integration Register, ITATBCTR1 */
+#define FUNNEL_ITATBCTR0 0xEF8 /* Integration Register, ITATBCTR0 */
+#define FUNNEL_IMCR 0xF00 /* Integration Mode Control Register */
+#define FUNNEL_CTSR 0xFA0 /* Claim Tag Set Register */
+#define FUNNEL_CTCR 0xFA4 /* Claim Tag Clear Register */
+#define FUNNEL_LOCKACCESS 0xFB0 /* Lock Access */
+#define FUNNEL_LOCKSTATUS 0xFB4 /* Lock Status */
+#define FUNNEL_AUTHSTATUS 0xFB8 /* Authentication status */
+#define FUNNEL_DEVICEID 0xFC8 /* Device ID */
+#define FUNNEL_DEVICETYPE 0xFCC /* Device Type Identifier */
+#define FUNNEL_PERIPH4 0xFD0 /* Peripheral ID4 */
+#define FUNNEL_PERIPH5 0xFD4 /* Peripheral ID5 */
+#define FUNNEL_PERIPH6 0xFD8 /* Peripheral ID6 */
+#define FUNNEL_PERIPH7 0xFDC /* Peripheral ID7 */
+#define FUNNEL_PERIPH0 0xFE0 /* Peripheral ID0 */
+#define FUNNEL_PERIPH1 0xFE4 /* Peripheral ID1 */
+#define FUNNEL_PERIPH2 0xFE8 /* Peripheral ID2 */
+#define FUNNEL_PERIPH3 0xFEC /* Peripheral ID3 */
+#define FUNNEL_COMP0 0xFF0 /* Component ID0 */
+#define FUNNEL_COMP1 0xFF4 /* Component ID1 */
+#define FUNNEL_COMP2 0xFF8 /* Component ID2 */
+#define FUNNEL_COMP3 0xFFC /* Component ID3 */
+
+#define HWTYPE_NONE 0
+#define HWTYPE_FUNNEL 1
+#define HWTYPE_STATIC_FUNNEL 2
+
+DECLARE_CLASS(funnel_driver);
+
+struct funnel_softc {
+ struct resource *res;
+ struct coresight_platform_data *pdata;
+ int hwtype;
+};
+
+int funnel_attach(device_t dev);
+
+#endif /* !_ARM64_CORESIGHT_CORESIGHT_FUNNEL_H_ */
diff --git a/sys/arm64/coresight/coresight_funnel_acpi.c b/sys/arm64/coresight/coresight_funnel_acpi.c
new file mode 100644
index 000000000000..0757af3d1ce0
--- /dev/null
+++ b/sys/arm64/coresight/coresight_funnel_acpi.c
@@ -0,0 +1,108 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/memdesc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_funnel.h>
+
+static int
+funnel_acpi_probe(device_t dev)
+{
+ struct funnel_softc *sc;
+ static char *static_funnel_ids[] = { "ARMHC9FE", NULL };
+ static char *funnel_ids[] = { "ARMHC9FF", NULL };
+ int error;
+
+ sc = device_get_softc(dev);
+
+ error = ACPI_ID_PROBE(device_get_parent(dev), dev,
+ static_funnel_ids, NULL);
+ if (error <= 0) {
+ sc->hwtype = HWTYPE_STATIC_FUNNEL;
+ device_set_desc(dev, "ARM Coresight Static Funnel");
+ return (error);
+ }
+
+ error = ACPI_ID_PROBE(device_get_parent(dev), dev,
+ funnel_ids, NULL);
+ if (error <= 0) {
+ sc->hwtype = HWTYPE_FUNNEL;
+ device_set_desc(dev, "ARM Coresight Funnel");
+ return (error);
+ }
+
+ return (ENXIO);
+}
+
+static int
+funnel_acpi_attach(device_t dev)
+{
+ struct funnel_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->pdata = coresight_acpi_get_platform_data(dev);
+
+ return (funnel_attach(dev));
+}
+
+static device_method_t funnel_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, funnel_acpi_probe),
+ DEVMETHOD(device_attach, funnel_acpi_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(funnel, funnel_acpi_driver, funnel_acpi_methods,
+ sizeof(struct funnel_softc), funnel_driver);
+
+static devclass_t funnel_acpi_devclass;
+
+EARLY_DRIVER_MODULE(funnel, acpi, funnel_acpi_driver, funnel_acpi_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/coresight/coresight_funnel_fdt.c b/sys/arm64/coresight/coresight_funnel_fdt.c
new file mode 100644
index 000000000000..c3cbad2224d8
--- /dev/null
+++ b/sys/arm64/coresight/coresight_funnel_fdt.c
@@ -0,0 +1,106 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_funnel.h>
+
+#include "coresight_if.h"
+
+static struct ofw_compat_data compat_data[] = {
+ { "arm,coresight-funnel", HWTYPE_FUNNEL },
+ { "arm,coresight-static-funnel", HWTYPE_STATIC_FUNNEL },
+ { NULL, HWTYPE_NONE }
+};
+
+static int
+funnel_fdt_probe(device_t dev)
+{
+ struct funnel_softc *sc;
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+
+ sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+ switch (sc->hwtype) {
+ case HWTYPE_FUNNEL:
+ device_set_desc(dev, "Coresight Funnel");
+ break;
+ case HWTYPE_STATIC_FUNNEL:
+ device_set_desc(dev, "Coresight Static Funnel");
+ break;
+ default:
+ return (ENXIO);
+ };
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+funnel_fdt_attach(device_t dev)
+{
+ struct funnel_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->pdata = coresight_fdt_get_platform_data(dev);
+
+ return (funnel_attach(dev));
+}
+
+static device_method_t funnel_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, funnel_fdt_probe),
+ DEVMETHOD(device_attach, funnel_fdt_attach),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(funnel, funnel_fdt_driver, funnel_fdt_methods,
+ sizeof(struct funnel_softc), funnel_driver);
+
+static devclass_t funnel_fdt_devclass;
+
+EARLY_DRIVER_MODULE(funnel, simplebus, funnel_fdt_driver, funnel_fdt_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/coresight/coresight_if.m b/sys/arm64/coresight/coresight_if.m
new file mode 100644
index 000000000000..e311c4c8f5cb
--- /dev/null
+++ b/sys/arm64/coresight/coresight_if.m
@@ -0,0 +1,58 @@
+#-
+# Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
+# All rights reserved.
+#
+# This software was developed by SRI International and the University of
+# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+# ("CTSRD"), as part of the DARPA CRASH research programme.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <machine/bus.h>
+#include <arm64/coresight/coresight.h>
+
+INTERFACE coresight;
+
+METHOD int init {
+ device_t dev;
+};
+
+METHOD int enable {
+ device_t dev;
+ struct endpoint *endp;
+ struct coresight_event *event;
+};
+
+METHOD void disable {
+ device_t dev;
+ struct endpoint *endp;
+ struct coresight_event *event;
+};
+
+METHOD int read {
+ device_t dev;
+ struct endpoint *endp;
+ struct coresight_event *event;
+};
diff --git a/sys/arm64/coresight/coresight_replicator.c b/sys/arm64/coresight/coresight_replicator.c
new file mode 100644
index 000000000000..e7b4fdb770f6
--- /dev/null
+++ b/sys/arm64/coresight/coresight_replicator.c
@@ -0,0 +1,128 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_replicator.h>
+
+#include "coresight_if.h"
+
+static struct resource_spec replicator_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+static int
+replicator_init(device_t dev)
+{
+ struct replicator_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ /* Unlock Coresight */
+ bus_write_4(sc->res, CORESIGHT_LAR, CORESIGHT_UNLOCK);
+
+ return (0);
+}
+
+static int
+replicator_enable(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct replicator_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ /* Enable the port. Keep the other port disabled */
+ if (endp->reg == 0) {
+ bus_write_4(sc->res, REPLICATOR_IDFILTER0, 0x00);
+ bus_write_4(sc->res, REPLICATOR_IDFILTER1, 0xff);
+ } else {
+ bus_write_4(sc->res, REPLICATOR_IDFILTER0, 0xff);
+ bus_write_4(sc->res, REPLICATOR_IDFILTER1, 0x00);
+ }
+
+ return (0);
+}
+
+static void
+replicator_disable(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct replicator_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ bus_write_4(sc->res, REPLICATOR_IDFILTER0, 0xff);
+ bus_write_4(sc->res, REPLICATOR_IDFILTER1, 0xff);
+}
+
+int
+replicator_attach(device_t dev)
+{
+ struct replicator_softc *sc;
+ struct coresight_desc desc;
+
+ sc = device_get_softc(dev);
+
+ if (bus_alloc_resources(dev, replicator_spec, &sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ return (ENXIO);
+ }
+
+ desc.pdata = sc->pdata;
+ desc.dev = dev;
+ desc.dev_type = CORESIGHT_DYNAMIC_REPLICATOR;
+ coresight_register(&desc);
+
+ return (0);
+}
+
+static device_method_t replicator_methods[] = {
+ /* Coresight interface */
+ DEVMETHOD(coresight_init, replicator_init),
+ DEVMETHOD(coresight_enable, replicator_enable),
+ DEVMETHOD(coresight_disable, replicator_disable),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(replicator, replicator_driver, replicator_methods,
+ sizeof(struct replicator_softc));
diff --git a/sys/arm64/coresight/coresight_replicator.h b/sys/arm64/coresight/coresight_replicator.h
new file mode 100644
index 000000000000..0b47eb5c9cd7
--- /dev/null
+++ b/sys/arm64/coresight/coresight_replicator.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ARM64_CORESIGHT_CORESIGHT_REPLICATOR_H_
+#define _ARM64_CORESIGHT_CORESIGHT_REPLICATOR_H_
+
+#define REPLICATOR_IDFILTER0 0x00
+#define REPLICATOR_IDFILTER1 0x04
+
+DECLARE_CLASS(replicator_driver);
+
+struct replicator_softc {
+ struct resource *res;
+ struct coresight_platform_data *pdata;
+};
+
+int replicator_attach(device_t dev);
+
+#endif /* !_ARM64_CORESIGHT_CORESIGHT_REPLICATOR_H_ */
diff --git a/sys/arm64/coresight/coresight_replicator_acpi.c b/sys/arm64/coresight/coresight_replicator_acpi.c
new file mode 100644
index 000000000000..7cc08930c81f
--- /dev/null
+++ b/sys/arm64/coresight/coresight_replicator_acpi.c
@@ -0,0 +1,94 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/memdesc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_replicator.h>
+
+static int
+replicator_acpi_probe(device_t dev)
+{
+ static char *replicator_ids[] = { "ARMHC98D", NULL };
+ int error;
+
+ error = ACPI_ID_PROBE(device_get_parent(dev), dev,
+ replicator_ids, NULL);
+ if (error <= 0)
+ device_set_desc(dev, "ARM Coresight Replicator");
+
+ return (error);
+}
+
+static int
+replicator_acpi_attach(device_t dev)
+{
+ struct replicator_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->pdata = coresight_acpi_get_platform_data(dev);
+
+ return (replicator_attach(dev));
+}
+
+static device_method_t replicator_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, replicator_acpi_probe),
+ DEVMETHOD(device_attach, replicator_acpi_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(replicator, replicator_acpi_driver, replicator_acpi_methods,
+ sizeof(struct replicator_softc), replicator_driver);
+
+static devclass_t replicator_acpi_devclass;
+
+EARLY_DRIVER_MODULE(replicator, acpi, replicator_acpi_driver,
+ replicator_acpi_devclass, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/coresight/coresight_replicator_fdt.c b/sys/arm64/coresight/coresight_replicator_fdt.c
new file mode 100644
index 000000000000..6744880369b1
--- /dev/null
+++ b/sys/arm64/coresight/coresight_replicator_fdt.c
@@ -0,0 +1,95 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_replicator.h>
+
+#include "coresight_if.h"
+
+static struct ofw_compat_data compat_data[] = {
+ { "arm,coresight-dynamic-replicator", 1 },
+ { NULL, 0 }
+};
+
+static int
+replicator_fdt_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "ARM Coresight Replicator");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+replicator_fdt_attach(device_t dev)
+{
+ struct replicator_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->pdata = coresight_fdt_get_platform_data(dev);
+
+ return (replicator_attach(dev));
+}
+
+static device_method_t replicator_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, replicator_fdt_probe),
+ DEVMETHOD(device_attach, replicator_fdt_attach),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(replicator, replicator_fdt_driver, replicator_fdt_methods,
+ sizeof(struct replicator_softc), replicator_driver);
+
+static devclass_t replicator_fdt_devclass;
+
+EARLY_DRIVER_MODULE(replicator, simplebus, replicator_fdt_driver,
+ replicator_fdt_devclass, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/coresight/coresight_tmc.c b/sys/arm64/coresight/coresight_tmc.c
new file mode 100644
index 000000000000..262e70ea4495
--- /dev/null
+++ b/sys/arm64/coresight/coresight_tmc.c
@@ -0,0 +1,349 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_tmc.h>
+
+#include "coresight_if.h"
+
+#define TMC_DEBUG
+#undef TMC_DEBUG
+
+#ifdef TMC_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static struct resource_spec tmc_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+static int
+tmc_start(device_t dev)
+{
+ struct tmc_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ if (bus_read_4(sc->res, TMC_CTL) & CTL_TRACECAPTEN)
+ return (-1);
+
+ /* Enable TMC */
+ bus_write_4(sc->res, TMC_CTL, CTL_TRACECAPTEN);
+ if ((bus_read_4(sc->res, TMC_CTL) & CTL_TRACECAPTEN) == 0)
+ panic("Not enabled\n");
+
+ do {
+ reg = bus_read_4(sc->res, TMC_STS);
+ } while ((reg & STS_TMCREADY) == 1);
+
+ if ((bus_read_4(sc->res, TMC_CTL) & CTL_TRACECAPTEN) == 0)
+ panic("Not enabled\n");
+
+ return (0);
+}
+
+static int
+tmc_stop(device_t dev)
+{
+ struct tmc_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ reg = bus_read_4(sc->res, TMC_CTL);
+ reg &= ~CTL_TRACECAPTEN;
+ bus_write_4(sc->res, TMC_CTL, reg);
+
+ do {
+ reg = bus_read_4(sc->res, TMC_STS);
+ } while ((reg & STS_TMCREADY) == 1);
+
+ return (0);
+}
+
+static int
+tmc_configure_etf(device_t dev)
+{
+ struct tmc_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ do {
+ reg = bus_read_4(sc->res, TMC_STS);
+ } while ((reg & STS_TMCREADY) == 0);
+
+ bus_write_4(sc->res, TMC_MODE, MODE_HW_FIFO);
+ bus_write_4(sc->res, TMC_FFCR, FFCR_EN_FMT | FFCR_EN_TI);
+
+ tmc_start(dev);
+
+ dprintf("%s: STS %x, CTL %x, RSZ %x, RRP %x, RWP %x, "
+ "LBUFLEVEL %x, CBUFLEVEL %x\n", __func__,
+ bus_read_4(sc->res, TMC_STS),
+ bus_read_4(sc->res, TMC_CTL),
+ bus_read_4(sc->res, TMC_RSZ),
+ bus_read_4(sc->res, TMC_RRP),
+ bus_read_4(sc->res, TMC_RWP),
+ bus_read_4(sc->res, TMC_CBUFLEVEL),
+ bus_read_4(sc->res, TMC_LBUFLEVEL));
+
+ return (0);
+}
+
+static int
+tmc_configure_etr(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct tmc_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ tmc_stop(dev);
+
+ do {
+ reg = bus_read_4(sc->res, TMC_STS);
+ } while ((reg & STS_TMCREADY) == 0);
+
+ /* Configure TMC */
+ bus_write_4(sc->res, TMC_MODE, MODE_CIRCULAR_BUFFER);
+
+ reg = AXICTL_PROT_CTRL_BIT1;
+ reg |= AXICTL_WRBURSTLEN_16;
+
+ /*
+ * SG operation is broken on DragonBoard 410c
+ * reg |= AXICTL_SG_MODE;
+ */
+
+ reg |= AXICTL_AXCACHE_OS;
+ bus_write_4(sc->res, TMC_AXICTL, reg);
+
+ reg = FFCR_EN_FMT | FFCR_EN_TI | FFCR_FON_FLIN |
+ FFCR_FON_TRIG_EVT | FFCR_TRIGON_TRIGIN;
+ bus_write_4(sc->res, TMC_FFCR, reg);
+
+ bus_write_4(sc->res, TMC_TRG, 8);
+
+ bus_write_4(sc->res, TMC_DBALO, event->etr.low);
+ bus_write_4(sc->res, TMC_DBAHI, event->etr.high);
+ bus_write_4(sc->res, TMC_RSZ, event->etr.bufsize / 4);
+
+ bus_write_4(sc->res, TMC_RRP, event->etr.low);
+ bus_write_4(sc->res, TMC_RWP, event->etr.low);
+
+ reg = bus_read_4(sc->res, TMC_STS);
+ reg &= ~STS_FULL;
+ bus_write_4(sc->res, TMC_STS, reg);
+
+ tmc_start(dev);
+
+ return (0);
+}
+
+static int
+tmc_init(device_t dev)
+{
+ struct tmc_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ /* Unlock Coresight */
+ bus_write_4(sc->res, CORESIGHT_LAR, CORESIGHT_UNLOCK);
+
+ /* Unlock TMC */
+ bus_write_4(sc->res, TMC_LAR, CORESIGHT_UNLOCK);
+
+ reg = bus_read_4(sc->res, TMC_DEVID);
+ reg &= DEVID_CONFIGTYPE_M;
+ switch (reg) {
+ case DEVID_CONFIGTYPE_ETR:
+ sc->dev_type = CORESIGHT_ETR;
+ dprintf(dev, "ETR configuration found\n");
+ break;
+ case DEVID_CONFIGTYPE_ETF:
+ sc->dev_type = CORESIGHT_ETF;
+ dprintf(dev, "ETF configuration found\n");
+ if (sc->etf_configured == false) {
+ tmc_configure_etf(dev);
+ sc->etf_configured = true;
+ }
+ break;
+ default:
+ sc->dev_type = CORESIGHT_UNKNOWN;
+ break;
+ }
+
+ return (0);
+}
+
+static int
+tmc_enable(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct tmc_softc *sc;
+ uint32_t nev;
+
+ sc = device_get_softc(dev);
+
+ if (sc->dev_type == CORESIGHT_ETF)
+ return (0);
+
+ KASSERT(sc->dev_type == CORESIGHT_ETR,
+ ("Wrong dev_type"));
+
+ /*
+ * Multiple CPUs can call this same time.
+ * We allow only one running configuration.
+ */
+
+ if (event->etr.flags & ETR_FLAG_ALLOCATE) {
+ event->etr.flags &= ~ETR_FLAG_ALLOCATE;
+ nev = atomic_fetchadd_int(&sc->nev, 1);
+ if (nev == 0) {
+ sc->event = event;
+ tmc_stop(dev);
+ tmc_configure_etr(dev, endp, event);
+ tmc_start(dev);
+ }
+ }
+
+ return (0);
+}
+
+static void
+tmc_disable(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct tmc_softc *sc;
+ uint32_t nev;
+
+ sc = device_get_softc(dev);
+
+ /* ETF configuration is static */
+ if (sc->dev_type == CORESIGHT_ETF)
+ return;
+
+ KASSERT(sc->dev_type == CORESIGHT_ETR, ("Wrong dev_type"));
+
+ if (event->etr.flags & ETR_FLAG_RELEASE) {
+ event->etr.flags &= ~ETR_FLAG_RELEASE;
+ nev = atomic_fetchadd_int(&sc->nev, -1);
+ if (nev == 1) {
+ tmc_stop(dev);
+ sc->event = NULL;
+ }
+ }
+}
+
+static int
+tmc_read(device_t dev, struct endpoint *endp,
+ struct coresight_event *event)
+{
+ struct tmc_softc *sc;
+ uint32_t cur_ptr;
+
+ sc = device_get_softc(dev);
+
+ if (sc->dev_type == CORESIGHT_ETF)
+ return (0);
+
+ /*
+ * Ensure the event we are reading information for
+ * is currently configured one.
+ */
+ if (sc->event != event)
+ return (0);
+
+ if (bus_read_4(sc->res, TMC_STS) & STS_FULL) {
+ event->etr.offset = 0;
+ event->etr.cycle++;
+ tmc_stop(dev);
+ tmc_start(dev);
+ } else {
+ cur_ptr = bus_read_4(sc->res, TMC_RWP);
+ event->etr.offset = (cur_ptr - event->etr.low);
+ }
+
+ return (0);
+}
+
+int
+tmc_attach(device_t dev)
+{
+ struct coresight_desc desc;
+ struct tmc_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ if (bus_alloc_resources(dev, tmc_spec, &sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ return (ENXIO);
+ }
+
+ desc.pdata = sc->pdata;
+ desc.dev = dev;
+ desc.dev_type = CORESIGHT_TMC;
+ coresight_register(&desc);
+
+ return (0);
+}
+
+static device_method_t tmc_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_attach, tmc_attach),
+
+ /* Coresight interface */
+ DEVMETHOD(coresight_init, tmc_init),
+ DEVMETHOD(coresight_enable, tmc_enable),
+ DEVMETHOD(coresight_disable, tmc_disable),
+ DEVMETHOD(coresight_read, tmc_read),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(tmc, tmc_driver, tmc_methods, sizeof(struct tmc_softc));
diff --git a/sys/arm64/coresight/coresight_tmc.h b/sys/arm64/coresight/coresight_tmc.h
new file mode 100644
index 000000000000..051fb63f58f9
--- /dev/null
+++ b/sys/arm64/coresight/coresight_tmc.h
@@ -0,0 +1,137 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ARM64_CORESIGHT_CORESIGHT_TMC_H_
+#define _ARM64_CORESIGHT_CORESIGHT_TMC_H_
+
+#define TMC_RSZ 0x004 /* RAM Size Register */
+#define TMC_STS 0x00C /* Status Register */
+#define STS_MEMERR (1 << 5)
+#define STS_EMPTY (1 << 4)
+#define STS_FTEMPTY (1 << 3)
+#define STS_TMCREADY (1 << 2)
+#define STS_TRIGGERED (1 << 1)
+#define STS_FULL (1 << 0)
+#define TMC_RRD 0x010 /* RAM Read Data Register */
+#define TMC_RRP 0x014 /* RAM Read Pointer Register */
+#define TMC_RWP 0x018 /* RAM Write Pointer Register */
+#define TMC_TRG 0x01C /* Trigger Counter Register */
+#define TMC_CTL 0x020 /* Control Register */
+#define CTL_TRACECAPTEN (1 << 0) /* Controls trace capture. */
+#define TMC_RWD 0x024 /* RAM Write Data Register */
+#define TMC_MODE 0x028 /* Mode Register */
+#define MODE_HW_FIFO 2
+#define MODE_SW_FIFO 1
+#define MODE_CIRCULAR_BUFFER 0
+#define TMC_LBUFLEVEL 0x02C /* Latched Buffer Fill Level */
+#define TMC_CBUFLEVEL 0x030 /* Current Buffer Fill Level */
+#define TMC_BUFWM 0x034 /* Buffer Level Water Mark */
+#define TMC_RRPHI 0x038 /* RAM Read Pointer High Register */
+#define TMC_RWPHI 0x03C /* RAM Write Pointer High Register */
+#define TMC_AXICTL 0x110 /* AXI Control Register */
+#define AXICTL_WRBURSTLEN_S 8
+#define AXICTL_WRBURSTLEN_M (0xf << AXICTL_WRBURSTLEN_S)
+#define AXICTL_WRBURSTLEN_16 (0xf << AXICTL_WRBURSTLEN_S)
+#define AXICTL_SG_MODE (1 << 7) /* Scatter Gather Mode */
+#define AXICTL_CACHE_CTRL_BIT3 (1 << 5)
+#define AXICTL_CACHE_CTRL_BIT2 (1 << 4)
+#define AXICTL_CACHE_CTRL_BIT1 (1 << 3)
+#define AXICTL_CACHE_CTRL_BIT0 (1 << 2)
+#define AXICTL_AXCACHE_OS (0xf << 2)
+#define AXICTL_PROT_CTRL_BIT1 (1 << 1)
+#define AXICTL_PROT_CTRL_BIT0 (1 << 0)
+#define TMC_DBALO 0x118 /* Data Buffer Address Low Register */
+#define TMC_DBAHI 0x11C /* Data Buffer Address High Register */
+#define TMC_FFSR 0x300 /* Formatter and Flush Status Register */
+#define TMC_FFCR 0x304 /* Formatter and Flush Control Register */
+#define FFCR_EN_FMT (1 << 0)
+#define FFCR_EN_TI (1 << 1)
+#define FFCR_FON_FLIN (1 << 4)
+#define FFCR_FON_TRIG_EVT (1 << 5)
+#define FFCR_FLUSH_MAN (1 << 6)
+#define FFCR_TRIGON_TRIGIN (1 << 8)
+#define TMC_PSCR 0x308 /* Periodic Synchronization Counter Register */
+#define TMC_ITATBMDATA0 0xED0 /* Integration Test ATB Master Data Register 0 */
+#define TMC_ITATBMCTR2 0xED4 /* Integration Test ATB Master Interface Control 2 Register */
+#define TMC_ITATBMCTR1 0xED8 /* Integration Test ATB Master Control Register 1 */
+#define TMC_ITATBMCTR0 0xEDC /* Integration Test ATB Master Interface Control 0 Register */
+#define TMC_ITMISCOP0 0xEE0 /* Integration Test Miscellaneous Output Register 0 */
+#define TMC_ITTRFLIN 0xEE8 /* Integration Test Trigger In and Flush In Register */
+#define TMC_ITATBDATA0 0xEEC /* Integration Test ATB Data Register 0 */
+#define TMC_ITATBCTR2 0xEF0 /* Integration Test ATB Control 2 Register */
+#define TMC_ITATBCTR1 0xEF4 /* Integration Test ATB Control 1 Register */
+#define TMC_ITATBCTR0 0xEF8 /* Integration Test ATB Control 0 Register */
+#define TMC_ITCTRL 0xF00 /* Integration Mode Control Register */
+#define TMC_CLAIMSET 0xFA0 /* Claim Tag Set Register */
+#define TMC_CLAIMCLR 0xFA4 /* Claim Tag Clear Register */
+#define TMC_LAR 0xFB0 /* Lock Access Register */
+#define TMC_LSR 0xFB4 /* Lock Status Register */
+#define TMC_AUTHSTATUS 0xFB8 /* Authentication Status Register */
+#define TMC_DEVID 0xFC8 /* Device Configuration Register */
+#define DEVID_CONFIGTYPE_S 6
+#define DEVID_CONFIGTYPE_M (0x3 << DEVID_CONFIGTYPE_S)
+#define DEVID_CONFIGTYPE_ETB (0 << DEVID_CONFIGTYPE_S)
+#define DEVID_CONFIGTYPE_ETR (1 << DEVID_CONFIGTYPE_S)
+#define DEVID_CONFIGTYPE_ETF (2 << DEVID_CONFIGTYPE_S)
+#define TMC_DEVTYPE 0xFCC /* Device Type Identifier Register */
+#define TMC_PERIPHID4 0xFD0 /* Peripheral ID4 Register */
+#define TMC_PERIPHID5 0xFD4 /* Peripheral ID5 Register */
+#define TMC_PERIPHID6 0xFD8 /* Peripheral ID6 Register */
+#define TMC_PERIPHID7 0xFDC /* Peripheral ID7 Register */
+#define TMC_PERIPHID0 0xFE0 /* Peripheral ID0 Register */
+#define TMC_PERIPHID1 0xFE4 /* Peripheral ID1 Register */
+#define TMC_PERIPHID2 0xFE8 /* Peripheral ID2 Register */
+#define TMC_PERIPHID3 0xFEC /* Peripheral ID3 Register */
+#define TMC_COMPID0 0xFF0 /* Component ID0 Register */
+#define TMC_COMPID1 0xFF4 /* Component ID1 Register */
+#define TMC_COMPID2 0xFF8 /* Component ID2 Register */
+#define TMC_COMPID3 0xFFC /* Component ID3 Register */
+
+DECLARE_CLASS(tmc_driver);
+
+struct tmc_softc {
+ struct resource *res;
+ device_t dev;
+ uint64_t cycle;
+ struct coresight_platform_data *pdata;
+ uint32_t dev_type;
+#define CORESIGHT_UNKNOWN 0
+#define CORESIGHT_ETR 1
+#define CORESIGHT_ETF 2
+ uint32_t nev;
+ struct coresight_event *event;
+ boolean_t etf_configured;
+};
+
+int tmc_attach(device_t dev);
+
+#endif /* !_ARM64_CORESIGHT_CORESIGHT_TMC_H_ */
diff --git a/sys/arm64/coresight/coresight_tmc_acpi.c b/sys/arm64/coresight/coresight_tmc_acpi.c
new file mode 100644
index 000000000000..020306593fc8
--- /dev/null
+++ b/sys/arm64/coresight/coresight_tmc_acpi.c
@@ -0,0 +1,92 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/memdesc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_tmc.h>
+
+static int
+tmc_acpi_probe(device_t dev)
+{
+ static char *tmc_ids[] = { "ARMHC97C", NULL };
+ int error;
+
+ error = ACPI_ID_PROBE(device_get_parent(dev), dev, tmc_ids, NULL);
+ if (error <= 0)
+ device_set_desc(dev, "ARM Coresight TMC");
+
+ return (error);
+}
+
+static int
+tmc_acpi_attach(device_t dev)
+{
+ struct tmc_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->pdata = coresight_acpi_get_platform_data(dev);
+
+ return (tmc_attach(dev));
+}
+
+static device_method_t tmc_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, tmc_acpi_probe),
+ DEVMETHOD(device_attach, tmc_acpi_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(tmc, tmc_acpi_driver, tmc_acpi_methods,
+ sizeof(struct tmc_softc), tmc_driver);
+
+static devclass_t tmc_acpi_devclass;
+
+EARLY_DRIVER_MODULE(tmc, acpi, tmc_acpi_driver, tmc_acpi_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/coresight/coresight_tmc_fdt.c b/sys/arm64/coresight/coresight_tmc_fdt.c
new file mode 100644
index 000000000000..6b057231e208
--- /dev/null
+++ b/sys/arm64/coresight/coresight_tmc_fdt.c
@@ -0,0 +1,94 @@
+/*-
+ * Copyright (c) 2018-2020 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm64/coresight/coresight.h>
+#include <arm64/coresight/coresight_tmc.h>
+
+#include "coresight_if.h"
+
+static struct ofw_compat_data compat_data[] = {
+ { "arm,coresight-tmc", 1 },
+ { NULL, 0 }
+};
+
+static int
+tmc_fdt_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "ARM Coresight TMC");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+tmc_fdt_attach(device_t dev)
+{
+ struct tmc_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->pdata = coresight_fdt_get_platform_data(dev);
+
+ return (tmc_attach(dev));
+}
+
+static device_method_t tmc_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, tmc_fdt_probe),
+ DEVMETHOD(device_attach, tmc_fdt_attach),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(tmc, tmc_fdt_driver, tmc_fdt_methods,
+ sizeof(struct tmc_softc), tmc_driver);
+
+static devclass_t tmc_fdt_devclass;
+
+EARLY_DRIVER_MODULE(tmc, simplebus, tmc_fdt_driver, tmc_fdt_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_composite.c b/sys/arm64/freescale/imx/clk/imx_clk_composite.c
new file mode 100644
index 000000000000..982ca2c6332f
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_composite.c
@@ -0,0 +1,309 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/freescale/imx/clk/imx_clk_composite.h>
+
+#include "clkdev_if.h"
+
+#define TARGET_ROOT_ENABLE (1 << 28)
+#define TARGET_ROOT_MUX(n) ((n) << 24)
+#define TARGET_ROOT_MUX_MASK (7 << 24)
+#define TARGET_ROOT_MUX_SHIFT 24
+#define TARGET_ROOT_PRE_PODF(n) ((((n) - 1) & 0x7) << 16)
+#define TARGET_ROOT_PRE_PODF_MASK (0x7 << 16)
+#define TARGET_ROOT_PRE_PODF_SHIFT 16
+#define TARGET_ROOT_PRE_PODF_MAX 7
+#define TARGET_ROOT_POST_PODF(n) ((((n) - 1) & 0x3f) << 0)
+#define TARGET_ROOT_POST_PODF_MASK (0x3f << 0)
+#define TARGET_ROOT_POST_PODF_SHIFT 0
+#define TARGET_ROOT_POST_PODF_MAX 0x3f
+
+struct imx_clk_composite_sc {
+ uint32_t offset;
+ uint32_t flags;
+};
+
+#define WRITE4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define READ4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+#define IMX_CLK_COMPOSITE_MASK_SHIFT 16
+
+#if 0
+#define dprintf(format, arg...) \
+ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg)
+#else
+#define dprintf(format, arg...)
+#endif
+
+static int
+imx_clk_composite_init(struct clknode *clk, device_t dev)
+{
+ struct imx_clk_composite_sc *sc;
+ uint32_t val, idx;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->offset, &val);
+ DEVICE_UNLOCK(clk);
+ idx = (val & TARGET_ROOT_MUX_MASK) >> TARGET_ROOT_MUX_SHIFT;
+
+ clknode_init_parent_idx(clk, idx);
+
+ return (0);
+}
+
+static int
+imx_clk_composite_set_gate(struct clknode *clk, bool enable)
+{
+ struct imx_clk_composite_sc *sc;
+ uint32_t val = 0;
+
+ sc = clknode_get_softc(clk);
+
+ dprintf("%sabling gate\n", enable ? "En" : "Dis");
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->offset, &val);
+ if (enable)
+ val |= TARGET_ROOT_ENABLE;
+ else
+ val &= ~(TARGET_ROOT_ENABLE);
+ WRITE4(clk, sc->offset, val);
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+static int
+imx_clk_composite_set_mux(struct clknode *clk, int index)
+{
+ struct imx_clk_composite_sc *sc;
+ uint32_t val = 0;
+
+ sc = clknode_get_softc(clk);
+
+ dprintf("Set mux to %d\n", index);
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->offset, &val);
+ val &= ~(TARGET_ROOT_MUX_MASK);
+ val |= TARGET_ROOT_MUX(index);
+ WRITE4(clk, sc->offset, val);
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+static int
+imx_clk_composite_recalc(struct clknode *clk, uint64_t *freq)
+{
+ struct imx_clk_composite_sc *sc;
+ uint32_t reg, pre_div, post_div;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->offset, &reg);
+ DEVICE_UNLOCK(clk);
+
+ pre_div = ((reg & TARGET_ROOT_PRE_PODF_MASK)
+ >> TARGET_ROOT_PRE_PODF_SHIFT) + 1;
+ post_div = ((reg & TARGET_ROOT_POST_PODF_MASK)
+ >> TARGET_ROOT_POST_PODF_SHIFT) + 1;
+
+ dprintf("parent_freq=%ju, div=%u\n", *freq, div);
+ *freq = *freq / pre_div / post_div;
+ dprintf("Final freq=%ju\n", *freq);
+ return (0);
+}
+
+static int
+imx_clk_composite_find_best(uint64_t fparent, uint64_t ftarget,
+ uint32_t *pre_div, uint32_t *post_div, int flags)
+{
+ uint32_t prediv, postdiv, best_prediv, best_postdiv;
+ int64_t diff, best_diff;
+ uint64_t cur;
+
+ best_diff = INT64_MAX;
+ for (prediv = 1; prediv <= TARGET_ROOT_PRE_PODF_MAX + 1; prediv++) {
+ for (postdiv = 1; postdiv <= TARGET_ROOT_POST_PODF_MAX + 1; postdiv++) {
+ cur= fparent / prediv / postdiv;
+ diff = (int64_t)ftarget - (int64_t)cur;
+ if (flags & CLK_SET_ROUND_DOWN) {
+ if (diff >= 0 && diff < best_diff) {
+ best_diff = diff;
+ best_prediv = prediv;
+ best_postdiv = postdiv;
+ }
+ }
+ else if (flags & CLK_SET_ROUND_UP) {
+ if (diff <= 0 && abs(diff) < best_diff) {
+ best_diff = diff;
+ best_prediv = prediv;
+ best_postdiv = postdiv;
+ }
+ }
+ else {
+ if (abs(diff) < best_diff) {
+ best_diff = abs(diff);
+ best_prediv = prediv;
+ best_postdiv = postdiv;
+ }
+ }
+ }
+ }
+
+ if (best_diff == INT64_MAX)
+ return (ERANGE);
+
+ *pre_div = best_prediv;
+ *post_div = best_postdiv;
+
+ return (0);
+}
+
+static int
+imx_clk_composite_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
+ int flags, int *stop)
+{
+ struct imx_clk_composite_sc *sc;
+ struct clknode *p_clk;
+ const char **p_names;
+ int p_idx, best_parent;
+ int64_t best_diff, diff;
+ int32_t best_pre_div, best_post_div, pre_div, post_div;
+ uint64_t cur, best;
+ uint32_t val;
+
+ sc = clknode_get_softc(clk);
+ dprintf("Finding best parent/div for target freq of %ju\n", *fout);
+ p_names = clknode_get_parent_names(clk);
+
+ best_diff = 0;
+
+ for (p_idx = 0; p_idx != clknode_get_parents_num(clk); p_idx++) {
+ p_clk = clknode_find_by_name(p_names[p_idx]);
+ clknode_get_freq(p_clk, &fparent);
+ dprintf("Testing with parent %s (%d) at freq %ju\n",
+ clknode_get_name(p_clk), p_idx, fparent);
+
+ if (!imx_clk_composite_find_best(fparent, *fout, &pre_div, &post_div, sc->flags))
+ continue;
+ cur = fparent / pre_div / post_div;
+ diff = abs((int64_t)*fout - (int64_t)cur);
+ if (diff < best_diff) {
+ best = cur;
+ best_diff = diff;
+ best_pre_div = pre_div;
+ best_post_div = pre_div;
+ best_parent = p_idx;
+ dprintf("Best parent so far %s (%d) with best freq at "
+ "%ju\n", clknode_get_name(p_clk), p_idx, best);
+ }
+ }
+
+ *stop = 1;
+ if (best_diff == INT64_MAX)
+ return (ERANGE);
+
+ if ((flags & CLK_SET_DRYRUN) != 0) {
+ *fout = best;
+ return (0);
+ }
+
+ p_idx = clknode_get_parent_idx(clk);
+ if (p_idx != best_parent) {
+ dprintf("Switching parent index from %d to %d\n", p_idx,
+ best_parent);
+ clknode_set_parent_by_idx(clk, best_parent);
+ }
+
+ dprintf("Setting dividers to pre=%d, post=%d\n", best_pre_div, best_post_div);
+
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->offset, &val);
+ val &= ~(TARGET_ROOT_PRE_PODF_MASK | TARGET_ROOT_POST_PODF_MASK);
+ val |= TARGET_ROOT_PRE_PODF(pre_div);
+ val |= TARGET_ROOT_POST_PODF(post_div);
+ DEVICE_UNLOCK(clk);
+
+ *fout = best;
+ return (0);
+}
+
+static clknode_method_t imx_clk_composite_clknode_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, imx_clk_composite_init),
+ CLKNODEMETHOD(clknode_set_gate, imx_clk_composite_set_gate),
+ CLKNODEMETHOD(clknode_set_mux, imx_clk_composite_set_mux),
+ CLKNODEMETHOD(clknode_recalc_freq, imx_clk_composite_recalc),
+ CLKNODEMETHOD(clknode_set_freq, imx_clk_composite_set_freq),
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(imx_clk_composite_clknode, imx_clk_composite_clknode_class,
+ imx_clk_composite_clknode_methods, sizeof(struct imx_clk_composite_sc),
+ clknode_class);
+
+int
+imx_clk_composite_register(struct clkdom *clkdom,
+ struct imx_clk_composite_def *clkdef)
+{
+ struct clknode *clk;
+ struct imx_clk_composite_sc *sc;
+
+ clk = clknode_create(clkdom, &imx_clk_composite_clknode_class,
+ &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+
+ sc->offset = clkdef->offset;
+ sc->flags = clkdef->flags;
+
+ clknode_register(clkdom, clk);
+
+ return (0);
+}
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_composite.h b/sys/arm64/freescale/imx/clk/imx_clk_composite.h
new file mode 100644
index 000000000000..56f8e707d92d
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_composite.h
@@ -0,0 +1,45 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IMX_CLK_COMPOSITE_H_
+#define _IMX_CLK_COMPOSITE_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct imx_clk_composite_def {
+ struct clknode_init_def clkdef;
+
+ uint32_t offset;
+ uint32_t flags;
+};
+
+int imx_clk_composite_register(struct clkdom *clkdom,
+ struct imx_clk_composite_def *clkdef);
+
+#endif /* _IMX_CLK_COMPOSITE_H_ */
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.c b/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.c
new file mode 100644
index 000000000000..9742b691f73e
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.c
@@ -0,0 +1,177 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/freescale/imx/clk/imx_clk_frac_pll.h>
+
+#include "clkdev_if.h"
+
+struct imx_clk_frac_pll_sc {
+ uint32_t offset;
+};
+
+#define WRITE4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define READ4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+#define CFG0 0
+#define CFG0_PLL_LOCK (1 << 31)
+#define CFG0_PD (1 << 19)
+#define CFG0_BYPASS (1 << 14)
+#define CFG0_NEWDIV_VAL (1 << 12)
+#define CFG0_NEWDIV_ACK (1 << 11)
+#define CFG0_OUTPUT_DIV_MASK (0x1f << 0)
+#define CFG0_OUTPUT_DIV_SHIFT 0
+#define CFG1 4
+#define CFG1_FRAC_DIV_MASK (0xffffff << 7)
+#define CFG1_FRAC_DIV_SHIFT 7
+#define CFG1_INT_DIV_MASK (0x7f << 0)
+#define CFG1_INT_DIV_SHIFT 0
+
+#if 0
+#define dprintf(format, arg...) \
+ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg)
+#else
+#define dprintf(format, arg...)
+#endif
+
+static int
+imx_clk_frac_pll_init(struct clknode *clk, device_t dev)
+{
+
+ clknode_init_parent_idx(clk, 0);
+ return (0);
+}
+
+static int
+imx_clk_frac_pll_set_gate(struct clknode *clk, bool enable)
+{
+ struct imx_clk_frac_pll_sc *sc;
+ uint32_t cfg0;
+ int timeout;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->offset + CFG0, &cfg0);
+ if (enable)
+ cfg0 &= ~(CFG0_PD);
+ else
+ cfg0 |= CFG0_PD;
+ WRITE4(clk, sc->offset + CFG0, cfg0);
+
+ /* Wait for PLL to lock */
+ if (enable && ((cfg0 & CFG0_BYPASS) == 0)) {
+ for (timeout = 1000; timeout; timeout--) {
+ READ4(clk, sc->offset + CFG0, &cfg0);
+ if (cfg0 & CFG0_PLL_LOCK)
+ break;
+ DELAY(1);
+ }
+ }
+
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+static int
+imx_clk_frac_pll_recalc(struct clknode *clk, uint64_t *freq)
+{
+ struct imx_clk_frac_pll_sc *sc;
+ uint32_t cfg0, cfg1;
+ uint64_t div, divfi, divff, divf_val;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->offset + CFG0, &cfg0);
+ READ4(clk, sc->offset + CFG1, &cfg1);
+ DEVICE_UNLOCK(clk);
+
+ div = (cfg0 & CFG0_OUTPUT_DIV_MASK) >> CFG0_OUTPUT_DIV_SHIFT;
+ div = (div + 1) * 2;
+ divff = (cfg1 & CFG1_FRAC_DIV_MASK) >> CFG1_FRAC_DIV_SHIFT;
+ divfi = (cfg1 & CFG1_INT_DIV_MASK) >> CFG1_INT_DIV_SHIFT;
+
+ /* PLL is bypassed */
+ if (cfg0 & CFG0_BYPASS)
+ return (0);
+
+ divf_val = 1 + divfi + (divff/0x1000000);
+ *freq = *freq * 8 * divf_val / div;
+
+ return (0);
+}
+
+static clknode_method_t imx_clk_frac_pll_clknode_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, imx_clk_frac_pll_init),
+ CLKNODEMETHOD(clknode_set_gate, imx_clk_frac_pll_set_gate),
+ CLKNODEMETHOD(clknode_recalc_freq, imx_clk_frac_pll_recalc),
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(imx_clk_frac_pll_clknode, imx_clk_frac_pll_clknode_class,
+ imx_clk_frac_pll_clknode_methods, sizeof(struct imx_clk_frac_pll_sc),
+ clknode_class);
+
+int
+imx_clk_frac_pll_register(struct clkdom *clkdom,
+ struct imx_clk_frac_pll_def *clkdef)
+{
+ struct clknode *clk;
+ struct imx_clk_frac_pll_sc *sc;
+
+ clk = clknode_create(clkdom, &imx_clk_frac_pll_clknode_class,
+ &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+
+ sc->offset = clkdef->offset;
+
+ clknode_register(clkdom, clk);
+
+ return (0);
+}
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.h b/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.h
new file mode 100644
index 000000000000..e80a34d113fc
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.h
@@ -0,0 +1,42 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IMX_CLK_FRAC_PLL_H_
+#define _IMX_CLK_FRAC_PLL_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct imx_clk_frac_pll_def {
+ struct clknode_init_def clkdef;
+ uint32_t offset;
+};
+
+int imx_clk_frac_pll_register(struct clkdom *clkdom, struct imx_clk_frac_pll_def *clkdef);
+
+#endif /* _IMX_CLK_FRAC_PLL_H_ */
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_gate.c b/sys/arm64/freescale/imx/clk/imx_clk_gate.c
new file mode 100644
index 000000000000..2ec9f39279a0
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_gate.c
@@ -0,0 +1,117 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/freescale/imx/clk/imx_clk_gate.h>
+
+#include "clkdev_if.h"
+
+#define WR4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define RD4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define MD4(_clk, off, clr, set ) \
+ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+static int imx_clk_gate_init(struct clknode *clk, device_t dev);
+static int imx_clk_gate_set_gate(struct clknode *clk, bool enable);
+struct imx_clk_gate_sc {
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t mask;
+ int gate_flags;
+};
+
+static clknode_method_t imx_clk_gate_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, imx_clk_gate_init),
+ CLKNODEMETHOD(clknode_set_gate, imx_clk_gate_set_gate),
+ CLKNODEMETHOD_END
+};
+DEFINE_CLASS_1(imx_clk_gate, imx_clk_gate_class, imx_clk_gate_methods,
+ sizeof(struct imx_clk_gate_sc), clknode_class);
+
+static int
+imx_clk_gate_init(struct clknode *clk, device_t dev)
+{
+
+ clknode_init_parent_idx(clk, 0);
+ return(0);
+}
+
+static int
+imx_clk_gate_set_gate(struct clknode *clk, bool enable)
+{
+ uint32_t reg;
+ struct imx_clk_gate_sc *sc;
+ int rv;
+
+ sc = clknode_get_softc(clk);
+ DEVICE_LOCK(clk);
+ rv = MD4(clk, sc->offset, sc->mask << sc->shift,
+ (enable ? sc->mask : 0) << sc->shift);
+ if (rv != 0) {
+ DEVICE_UNLOCK(clk);
+ return (rv);
+ }
+ RD4(clk, sc->offset, &reg);
+ DEVICE_UNLOCK(clk);
+ return(0);
+}
+
+int
+imx_clk_gate_register(struct clkdom *clkdom, struct imx_clk_gate_def *clkdef)
+{
+ struct clknode *clk;
+ struct imx_clk_gate_sc *sc;
+
+ clk = clknode_create(clkdom, &imx_clk_gate_class, &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+ sc->offset = clkdef->offset;
+ sc->shift = clkdef->shift;
+ sc->mask = clkdef->mask;
+ sc->gate_flags = clkdef->gate_flags;
+
+ clknode_register(clkdom, clk);
+ return (0);
+}
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_gate.h b/sys/arm64/freescale/imx/clk/imx_clk_gate.h
new file mode 100644
index 000000000000..3eaf3d51da23
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_gate.h
@@ -0,0 +1,45 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IMX_CLK_GATE_H_
+#define _IMX_CLK_GATE_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct imx_clk_gate_def {
+ struct clknode_init_def clkdef;
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t mask;
+ int gate_flags;
+};
+
+int imx_clk_gate_register(struct clkdom *clkdom, struct imx_clk_gate_def *clkdef);
+
+#endif /* _IMX_CLK_GATE_H_ */
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_mux.c b/sys/arm64/freescale/imx/clk/imx_clk_mux.c
new file mode 100644
index 000000000000..8cb6b19daa1c
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_mux.c
@@ -0,0 +1,136 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/freescale/imx/clk/imx_clk_mux.h>
+
+#include "clkdev_if.h"
+
+#define WR4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define RD4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define MD4(_clk, off, clr, set ) \
+ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+static int imx_clk_mux_init(struct clknode *clk, device_t dev);
+static int imx_clk_mux_set_mux(struct clknode *clk, int idx);
+
+struct imx_clk_mux_sc {
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t mask;
+ int mux_flags;
+};
+
+static clknode_method_t imx_clk_mux_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, imx_clk_mux_init),
+ CLKNODEMETHOD(clknode_set_mux, imx_clk_mux_set_mux),
+ CLKNODEMETHOD_END
+};
+DEFINE_CLASS_1(imx_clk_mux, imx_clk_mux_class, imx_clk_mux_methods,
+ sizeof(struct imx_clk_mux_sc), clknode_class);
+
+static int
+imx_clk_mux_init(struct clknode *clk, device_t dev)
+{
+ uint32_t reg;
+ struct imx_clk_mux_sc *sc;
+ int rv;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ rv = RD4(clk, sc->offset, &reg);
+ DEVICE_UNLOCK(clk);
+ if (rv != 0) {
+ return (rv);
+ }
+ reg = (reg >> sc->shift) & sc->mask;
+ clknode_init_parent_idx(clk, reg);
+ return(0);
+}
+
+static int
+imx_clk_mux_set_mux(struct clknode *clk, int idx)
+{
+ uint32_t reg;
+ struct imx_clk_mux_sc *sc;
+ int rv;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ rv = MD4(clk, sc->offset, sc->mask << sc->shift,
+ ((idx & sc->mask) << sc->shift));
+ if (rv != 0) {
+ DEVICE_UNLOCK(clk);
+ return (rv);
+ }
+ RD4(clk, sc->offset, &reg);
+ DEVICE_UNLOCK(clk);
+
+ return(0);
+}
+
+int
+imx_clk_mux_register(struct clkdom *clkdom, struct imx_clk_mux_def *clkdef)
+{
+ struct clknode *clk;
+ struct imx_clk_mux_sc *sc;
+
+ clk = clknode_create(clkdom, &imx_clk_mux_class, &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+ sc->offset = clkdef->offset;
+ sc->shift = clkdef->shift;
+ sc->mask = (1 << clkdef->width) - 1;
+ sc->mux_flags = clkdef->mux_flags;
+
+ clknode_register(clkdom, clk);
+ return (0);
+}
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_mux.h b/sys/arm64/freescale/imx/clk/imx_clk_mux.h
new file mode 100644
index 000000000000..e3e32c12a939
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_mux.h
@@ -0,0 +1,45 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _IMX_CLK_MUX_H_
+#define _IMX_CLK_MUX_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct imx_clk_mux_def {
+ struct clknode_init_def clkdef;
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t width;
+ int mux_flags;
+};
+
+int imx_clk_mux_register(struct clkdom *clkdom, struct imx_clk_mux_def *clkdef);
+
+#endif /* _IMX_CLK_MUX_H_ */
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.c b/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.c
new file mode 100644
index 000000000000..2b1532e1498e
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.c
@@ -0,0 +1,195 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/freescale/imx/clk/imx_clk_sscg_pll.h>
+
+#include "clkdev_if.h"
+
+struct imx_clk_sscg_pll_sc {
+ uint32_t offset;
+};
+
+#define WRITE4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define READ4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+#define CFG0 0x00
+#define CFG0_PLL_LOCK (1 << 31)
+#define CFG0_PD (1 << 7)
+#define CFG0_BYPASS2 (1 << 5)
+#define CFG0_BYPASS1 (1 << 4)
+#define CFG1 0x04
+#define CFG2 0x08
+#define CFG2_DIVR1_MASK (7 << 25)
+#define CFG2_DIVR1_SHIFT 25
+#define CFG2_DIVR2_MASK (0x3f << 19)
+#define CFG2_DIVR2_SHIFT 19
+#define CFG2_DIVF1_MASK (0x3f << 13)
+#define CFG2_DIVF1_SHIFT 13
+#define CFG2_DIVF2_MASK (0x3f << 7)
+#define CFG2_DIVF2_SHIFT 7
+#define CFG2_DIV_MASK (0x3f << 1)
+#define CFG2_DIV_SHIFT 1
+
+#if 0
+#define dprintf(format, arg...) \
+ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg)
+#else
+#define dprintf(format, arg...)
+#endif
+
+static int
+imx_clk_sscg_pll_init(struct clknode *clk, device_t dev)
+{
+ struct imx_clk_sscg_pll_sc *sc;
+
+ sc = clknode_get_softc(clk);
+ if (clknode_get_parents_num(clk) > 1) {
+ device_printf(clknode_get_device(clk),
+ "error: SSCG PLL does not support more than one parent yet\n");
+ return (EINVAL);
+ }
+ clknode_init_parent_idx(clk, 0);
+
+ return (0);
+}
+
+static int
+imx_clk_sscg_pll_set_gate(struct clknode *clk, bool enable)
+{
+ struct imx_clk_sscg_pll_sc *sc;
+ uint32_t cfg0;
+ int timeout;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->offset + CFG0, &cfg0);
+ if (enable)
+ cfg0 &= ~(CFG0_PD);
+ else
+ cfg0 |= CFG0_PD;
+ WRITE4(clk, sc->offset + CFG0, cfg0);
+
+ /* Reading lock */
+ if (enable) {
+ for (timeout = 1000; timeout; timeout--) {
+ READ4(clk, sc->offset + CFG0, &cfg0);
+ if (cfg0 & CFG0_PLL_LOCK)
+ break;
+ DELAY(1);
+ }
+ }
+
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+static int
+imx_clk_sscg_pll_recalc(struct clknode *clk, uint64_t *freq)
+{
+ struct imx_clk_sscg_pll_sc *sc;
+ uint32_t cfg0, cfg2;
+ int divr1, divr2, divf1, divf2, div;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->offset + CFG0, &cfg0);
+ READ4(clk, sc->offset + CFG2, &cfg2);
+ DEVICE_UNLOCK(clk);
+
+ /* PLL is bypassed */
+ if (cfg0 & CFG0_BYPASS2)
+ return (0);
+
+ divr1 = (cfg2 & CFG2_DIVR1_MASK) >> CFG2_DIVR1_SHIFT;
+ divr2 = (cfg2 & CFG2_DIVR2_MASK) >> CFG2_DIVR2_SHIFT;
+ divf1 = (cfg2 & CFG2_DIVF1_MASK) >> CFG2_DIVF1_SHIFT;
+ divf2 = (cfg2 & CFG2_DIVF2_MASK) >> CFG2_DIVF2_SHIFT;
+ div = (cfg2 & CFG2_DIV_MASK) >> CFG2_DIV_SHIFT;
+
+ if (cfg0 & CFG0_BYPASS1) {
+ *freq = *freq / ((divr2 + 1) * (div + 1));
+ return (0);
+ }
+
+ *freq *= 2 * (divf1 + 1) * (divf2 + 1);
+ *freq /= (divr1 + 1) * (divr2 + 1) * (div + 1);
+
+ return (0);
+}
+
+static clknode_method_t imx_clk_sscg_pll_clknode_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, imx_clk_sscg_pll_init),
+ CLKNODEMETHOD(clknode_set_gate, imx_clk_sscg_pll_set_gate),
+ CLKNODEMETHOD(clknode_recalc_freq, imx_clk_sscg_pll_recalc),
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(imx_clk_sscg_pll_clknode, imx_clk_sscg_pll_clknode_class,
+ imx_clk_sscg_pll_clknode_methods, sizeof(struct imx_clk_sscg_pll_sc),
+ clknode_class);
+
+int
+imx_clk_sscg_pll_register(struct clkdom *clkdom,
+ struct imx_clk_sscg_pll_def *clkdef)
+{
+ struct clknode *clk;
+ struct imx_clk_sscg_pll_sc *sc;
+
+ clk = clknode_create(clkdom, &imx_clk_sscg_pll_clknode_class,
+ &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+
+ sc->offset = clkdef->offset;
+
+ clknode_register(clkdom, clk);
+
+ return (0);
+}
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.h b/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.h
new file mode 100644
index 000000000000..eb0d3e9efbec
--- /dev/null
+++ b/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.h
@@ -0,0 +1,42 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IMX_CLK_SSCG_PLL_H_
+#define _IMX_CLK_SSCG_PLL_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct imx_clk_sscg_pll_def {
+ struct clknode_init_def clkdef;
+ uint32_t offset;
+};
+
+int imx_clk_sscg_pll_register(struct clkdom *clkdom, struct imx_clk_sscg_pll_def *clkdef);
+
+#endif /* _IMX_CLK_SSCG_PLL_H_ */
diff --git a/sys/arm64/freescale/imx/imx7gpc.c b/sys/arm64/freescale/imx/imx7gpc.c
new file mode 100644
index 000000000000..b73f0acf864b
--- /dev/null
+++ b/sys/arm64/freescale/imx/imx7gpc.c
@@ -0,0 +1,261 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include "pic_if.h"
+
+struct imx7gpc_softc {
+ device_t dev;
+ struct resource *memres;
+ device_t parent;
+};
+
+static struct ofw_compat_data compat_data[] = {
+ { "fsl,imx7gpc", 1},
+ { "fsl,imx8mq-gpc", 1},
+ { NULL, 0}
+};
+
+static inline uint32_t
+imx7gpc_read_4(struct imx7gpc_softc *sc, int reg)
+{
+
+ return (bus_read_4(sc->memres, reg));
+}
+
+static inline void
+imx7gpc_write_4(struct imx7gpc_softc *sc, int reg, uint32_t val)
+{
+
+ bus_write_4(sc->memres, reg, val);
+}
+
+static int
+imx7gpc_activate_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ return (PIC_ACTIVATE_INTR(sc->parent, isrc, res, data));
+}
+
+static void
+imx7gpc_disable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ PIC_DISABLE_INTR(sc->parent, isrc);
+}
+
+static void
+imx7gpc_enable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ PIC_ENABLE_INTR(sc->parent, isrc);
+}
+
+static int
+imx7gpc_map_intr(device_t dev, struct intr_map_data *data,
+ struct intr_irqsrc **isrcp)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ return (PIC_MAP_INTR(sc->parent, data, isrcp));
+}
+
+static int
+imx7gpc_deactivate_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ return (PIC_DEACTIVATE_INTR(sc->parent, isrc, res, data));
+}
+
+static int
+imx7gpc_setup_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ return (PIC_SETUP_INTR(sc->parent, isrc, res, data));
+}
+
+static int
+imx7gpc_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ return (PIC_TEARDOWN_INTR(sc->parent, isrc, res, data));
+}
+
+static void
+imx7gpc_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ PIC_PRE_ITHREAD(sc->parent, isrc);
+}
+
+static void
+imx7gpc_post_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ PIC_POST_ITHREAD(sc->parent, isrc);
+}
+
+static void
+imx7gpc_post_filter(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ PIC_POST_FILTER(sc->parent, isrc);
+}
+
+#ifdef SMP
+static int
+imx7gpc_bind_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+
+ return (PIC_BIND_INTR(sc->parent, isrc));
+}
+#endif
+
+static int
+imx7gpc_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "General Power Controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+imx7gpc_attach(device_t dev)
+{
+ struct imx7gpc_softc *sc = device_get_softc(dev);
+ phandle_t node;
+ phandle_t parent_xref;
+ int i, rv;
+
+ sc->dev = dev;
+
+ node = ofw_bus_get_node(dev);
+
+ rv = OF_getencprop(node, "interrupt-parent", &parent_xref,
+ sizeof(parent_xref));
+ if (rv <= 0) {
+ device_printf(dev, "Can't read parent node property\n");
+ return (ENXIO);
+ }
+ sc->parent = OF_device_from_xref(parent_xref);
+ if (sc->parent == NULL) {
+ device_printf(dev, "Can't find parent controller\n");
+ return (ENXIO);
+ }
+
+ i = 0;
+ sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i,
+ RF_ACTIVE);
+ if (sc->memres == NULL) {
+ device_printf(dev, "could not allocate resources\n");
+ return (ENXIO);
+ }
+
+ /* TODO: power up OTG domain and unmask all interrupts */
+
+ if (intr_pic_register(dev, OF_xref_from_node(node)) == NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY, i, sc->memres);
+ device_printf(dev, "Cannot register PIC\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static device_method_t imx7gpc_methods[] = {
+ DEVMETHOD(device_probe, imx7gpc_probe),
+ DEVMETHOD(device_attach, imx7gpc_attach),
+
+ /* Interrupt controller interface */
+ DEVMETHOD(pic_activate_intr, imx7gpc_activate_intr),
+ DEVMETHOD(pic_disable_intr, imx7gpc_disable_intr),
+ DEVMETHOD(pic_enable_intr, imx7gpc_enable_intr),
+ DEVMETHOD(pic_map_intr, imx7gpc_map_intr),
+ DEVMETHOD(pic_deactivate_intr, imx7gpc_deactivate_intr),
+ DEVMETHOD(pic_setup_intr, imx7gpc_setup_intr),
+ DEVMETHOD(pic_teardown_intr, imx7gpc_teardown_intr),
+ DEVMETHOD(pic_pre_ithread, imx7gpc_pre_ithread),
+ DEVMETHOD(pic_post_ithread, imx7gpc_post_ithread),
+ DEVMETHOD(pic_post_filter, imx7gpc_post_filter),
+#ifdef SMP
+ DEVMETHOD(pic_bind_intr, imx7gpc_bind_intr),
+#endif
+
+ DEVMETHOD_END
+};
+
+static driver_t imx7gpc_driver = {
+ "imx7gpc",
+ imx7gpc_methods,
+ sizeof(struct imx7gpc_softc),
+};
+
+static devclass_t imx7gpc_devclass;
+
+EARLY_DRIVER_MODULE(imx7gpc, ofwbus, imx7gpc_driver, imx7gpc_devclass, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+EARLY_DRIVER_MODULE(imx7gpc, simplebus, imx7gpc_driver, imx7gpc_devclass, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/freescale/imx/imx8mq_ccm.c b/sys/arm64/freescale/imx/imx8mq_ccm.c
new file mode 100644
index 000000000000..5c9751791415
--- /dev/null
+++ b/sys/arm64/freescale/imx/imx8mq_ccm.c
@@ -0,0 +1,484 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Clocks driver for Freescale i.MX8MQ SoC
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <machine/bus.h>
+
+#include <arm64/freescale/imx/imx_ccm_clk.h>
+#include <arm64/freescale/imx/imx8mq_ccm.h>
+#include <arm64/freescale/imx/clk/imx_clk_gate.h>
+#include <arm64/freescale/imx/clk/imx_clk_mux.h>
+#include <arm64/freescale/imx/clk/imx_clk_composite.h>
+#include <arm64/freescale/imx/clk/imx_clk_sscg_pll.h>
+#include <arm64/freescale/imx/clk/imx_clk_frac_pll.h>
+
+#include "clkdev_if.h"
+
+static const char *pll_ref_p[] = {
+ "osc_25m", "osc_27m", "dummy", "dummy"
+};
+static const char *sys3_pll_out_p[] = {
+ "sys3_pll1_ref_sel"
+};
+static const char * arm_pll_bypass_p[] = {
+ "arm_pll", "arm_pll_ref_sel"
+};
+static const char * gpu_pll_bypass_p[] = {
+ "gpu_pll", "gpu_pll_ref_sel"
+};
+static const char * vpu_pll_bypass_p[] = {
+ "vpu_pll", "vpu_pll_ref_sel"
+};
+static const char * audio_pll1_bypass_p[] = {
+ "audio_pll1", "audio_pll1_ref_sel"
+};
+static const char * audio_pll2_bypass_p[] = {
+ "audio_pll2", "audio_pll2_ref_sel"
+};
+static const char * video_pll1_bypass_p[] = {
+ "video_pll1", "video_pll1_ref_sel"
+};
+static const char *uart_p[] = {
+ "osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", "sys3_pll_out",
+ "clk_ext2", "clk_ext4", "audio_pll2_out"
+};
+static const char *usdhc_p[] = {
+ "osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", "audio_pll2_out",
+ "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m"
+};
+static const char *enet_axi_p[] = {
+ "osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m", "sys2_pll_200m",
+ "audio_pll1_out", "video_pll1_out", "sys3_pll_out"
+};
+static const char *enet_ref_p[] = {
+ "osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m", "sys1_pll_160m",
+ "audio_pll1_out", "video_pll1_out", "clk_ext4"
+};
+static const char *enet_timer_p[] = {
+ "osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2", "clk_ext3",
+ "clk_ext4", "video_pll1_out"
+};
+static const char *enet_phy_ref_p[] = {
+ "osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out"
+};
+static const char *usb_bus_p[] = {
+ "osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m", "sys2_pll_200m",
+ "clk_ext2", "clk_ext4", "audio_pll2_out"
+};
+static const char *usb_core_phy_p[] = {
+ "osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m", "sys2_pll_200m",
+ "clk_ext2", "clk_ext3", "audio_pll2_out"
+};
+static const char *i2c_p[] = {
+ "osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys1_pll_133m"
+};
+static const char *ahb_p[] = {
+ "osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m", "sys2_pll_125m",
+ "sys3_pll_out", "audio_pll1_out", "video_pll1_out"
+};
+
+static struct imx_clk imx_clks[] = {
+ FIXED(IMX8MQ_CLK_DUMMY, "dummy", 0),
+
+ LINK(IMX8MQ_CLK_32K, "ckil"),
+ LINK(IMX8MQ_CLK_25M, "osc_25m"),
+ LINK(IMX8MQ_CLK_27M, "osc_27m"),
+ LINK(IMX8MQ_CLK_EXT1, "clk_ext1"),
+ LINK(IMX8MQ_CLK_EXT2, "clk_ext2"),
+ LINK(IMX8MQ_CLK_EXT3, "clk_ext3"),
+ LINK(IMX8MQ_CLK_EXT4, "clk_ext4"),
+
+ FIXED(IMX8MQ_SYS1_PLL_OUT, "sys1_pll_out", 800000000),
+ FIXED(IMX8MQ_SYS2_PLL_OUT, "sys2_pll_out", 1000000000),
+ SSCG_PLL(IMX8MQ_SYS3_PLL_OUT, "sys3_pll_out", sys3_pll_out_p, 0x48),
+
+ MUX(IMX8MQ_ARM_PLL_REF_SEL, "arm_pll_ref_sel", pll_ref_p, 0, 0x28, 16, 2),
+ MUX(IMX8MQ_GPU_PLL_REF_SEL, "gpu_pll_ref_sel", pll_ref_p, 0, 0x18, 16, 2),
+ MUX(IMX8MQ_VPU_PLL_REF_SEL, "vpu_pll_ref_sel", pll_ref_p, 0, 0x20, 16, 2),
+ MUX(IMX8MQ_AUDIO_PLL1_REF_SEL, "audio_pll1_ref_sel", pll_ref_p, 0, 0x0, 16, 2),
+ MUX(IMX8MQ_AUDIO_PLL2_REF_SEL, "audio_pll2_ref_sel", pll_ref_p, 0, 0x8, 16, 2),
+ MUX(IMX8MQ_VIDEO_PLL1_REF_SEL, "video_pll1_ref_sel", pll_ref_p, 0, 0x10, 16, 2),
+ MUX(IMX8MQ_SYS3_PLL1_REF_SEL, "sys3_pll1_ref_sel", pll_ref_p, 0, 0x48, 0, 2),
+ MUX(IMX8MQ_DRAM_PLL1_REF_SEL, "dram_pll1_ref_sel", pll_ref_p, 0, 0x60, 0, 2),
+ MUX(IMX8MQ_VIDEO2_PLL1_REF_SEL, "video2_pll1_ref_sel", pll_ref_p, 0, 0x54, 0, 2),
+
+ DIV(IMX8MQ_ARM_PLL_REF_DIV, "arm_pll_ref_div", "arm_pll_ref_sel", 0x28, 5, 6),
+ DIV(IMX8MQ_GPU_PLL_REF_DIV, "gpu_pll_ref_div", "gpu_pll_ref_sel", 0x18, 5, 6),
+ DIV(IMX8MQ_VPU_PLL_REF_DIV, "vpu_pll_ref_div", "vpu_pll_ref_sel", 0x20, 5, 6),
+ DIV(IMX8MQ_AUDIO_PLL1_REF_DIV, "audio_pll1_ref_div", "audio_pll1_ref_sel", 0x0, 5, 6),
+ DIV(IMX8MQ_AUDIO_PLL2_REF_DIV, "audio_pll2_ref_div", "audio_pll2_ref_sel", 0x8, 5, 6),
+ DIV(IMX8MQ_VIDEO_PLL1_REF_DIV, "video_pll1_ref_div", "video_pll1_ref_sel", 0x10, 5, 6),
+
+ FRAC_PLL(IMX8MQ_ARM_PLL, "arm_pll", "arm_pll_ref_div", 0x28),
+ FRAC_PLL(IMX8MQ_GPU_PLL, "gpu_pll", "gpu_pll_ref_div", 0x18),
+ FRAC_PLL(IMX8MQ_VPU_PLL, "vpu_pll", "vpu_pll_ref_div", 0x20),
+ FRAC_PLL(IMX8MQ_AUDIO_PLL1, "audio_pll1", "audio_pll1_ref_div", 0x0),
+ FRAC_PLL(IMX8MQ_AUDIO_PLL2, "audio_pll2", "audio_pll2_ref_div", 0x8),
+ FRAC_PLL(IMX8MQ_VIDEO_PLL1, "video_pll1", "video_pll1_ref_div", 0x10),
+
+ /* ARM_PLL needs SET_PARENT flag */
+ MUX(IMX8MQ_ARM_PLL_BYPASS, "arm_pll_bypass", arm_pll_bypass_p, 0, 0x28, 14, 1),
+ MUX(IMX8MQ_GPU_PLL_BYPASS, "gpu_pll_bypass", gpu_pll_bypass_p, 0, 0x18, 14, 1),
+ MUX(IMX8MQ_VPU_PLL_BYPASS, "vpu_pll_bypass", vpu_pll_bypass_p, 0, 0x20, 14, 1),
+ MUX(IMX8MQ_AUDIO_PLL1_BYPASS, "audio_pll1_bypass", audio_pll1_bypass_p, 0, 0x0, 14, 1),
+ MUX(IMX8MQ_AUDIO_PLL2_BYPASS, "audio_pll2_bypass", audio_pll2_bypass_p, 0, 0x8, 14, 1),
+ MUX(IMX8MQ_VIDEO_PLL1_BYPASS, "video_pll1_bypass", video_pll1_bypass_p, 0, 0x10, 14, 1),
+
+ GATE(IMX8MQ_ARM_PLL_OUT, "arm_pll_out", "arm_pll_bypass", 0x28, 21),
+ GATE(IMX8MQ_GPU_PLL_OUT, "gpu_pll_out", "gpu_pll_bypass", 0x18, 21),
+ GATE(IMX8MQ_VPU_PLL_OUT, "vpu_pll_out", "vpu_pll_bypass", 0x20, 21),
+ GATE(IMX8MQ_AUDIO_PLL1_OUT, "audio_pll1_out", "audio_pll1_bypass", 0x0, 21),
+ GATE(IMX8MQ_AUDIO_PLL2_OUT, "audio_pll2_out", "audio_pll2_bypass", 0x8, 21),
+ GATE(IMX8MQ_VIDEO_PLL1_OUT, "video_pll1_out", "video_pll1_bypass", 0x10, 21),
+
+ GATE(IMX8MQ_SYS1_PLL_40M_CG, "sys1_pll_40m_cg", "sys1_pll_out", 0x30, 9),
+ GATE(IMX8MQ_SYS1_PLL_80M_CG, "sys1_pll_80m_cg", "sys1_pll_out", 0x30, 11),
+ GATE(IMX8MQ_SYS1_PLL_100M_CG, "sys1_pll_100m_cg", "sys1_pll_out", 0x30, 13),
+ GATE(IMX8MQ_SYS1_PLL_133M_CG, "sys1_pll_133m_cg", "sys1_pll_out", 0x30, 15),
+ GATE(IMX8MQ_SYS1_PLL_160M_CG, "sys1_pll_160m_cg", "sys1_pll_out", 0x30, 17),
+ GATE(IMX8MQ_SYS1_PLL_200M_CG, "sys1_pll_200m_cg", "sys1_pll_out", 0x30, 19),
+ GATE(IMX8MQ_SYS1_PLL_266M_CG, "sys1_pll_266m_cg", "sys1_pll_out", 0x30, 21),
+ GATE(IMX8MQ_SYS1_PLL_400M_CG, "sys1_pll_400m_cg", "sys1_pll_out", 0x30, 23),
+ GATE(IMX8MQ_SYS1_PLL_800M_CG, "sys1_pll_800m_cg", "sys1_pll_out", 0x30, 25),
+
+ FFACT(IMX8MQ_SYS1_PLL_40M, "sys1_pll_40m", "sys1_pll_40m_cg", 1, 20),
+ FFACT(IMX8MQ_SYS1_PLL_80M, "sys1_pll_80m", "sys1_pll_80m_cg", 1, 10),
+ FFACT(IMX8MQ_SYS1_PLL_100M, "sys1_pll_100m", "sys1_pll_100m_cg", 1, 8),
+ FFACT(IMX8MQ_SYS1_PLL_133M, "sys1_pll_133m", "sys1_pll_133m_cg", 1, 6),
+ FFACT(IMX8MQ_SYS1_PLL_160M, "sys1_pll_160m", "sys1_pll_160m_cg", 1, 5),
+ FFACT(IMX8MQ_SYS1_PLL_200M, "sys1_pll_200m", "sys1_pll_200m_cg", 1, 4),
+ FFACT(IMX8MQ_SYS1_PLL_266M, "sys1_pll_266m", "sys1_pll_266m_cg", 1, 3),
+ FFACT(IMX8MQ_SYS1_PLL_400M, "sys1_pll_400m", "sys1_pll_400m_cg", 1, 2),
+ FFACT(IMX8MQ_SYS1_PLL_800M, "sys1_pll_800m", "sys1_pll_800m_cg", 1, 1),
+
+ GATE(IMX8MQ_SYS2_PLL_50M_CG, "sys2_pll_50m_cg", "sys2_pll_out", 0x3c, 9),
+ GATE(IMX8MQ_SYS2_PLL_100M_CG, "sys2_pll_100m_cg", "sys2_pll_out", 0x3c, 11),
+ GATE(IMX8MQ_SYS2_PLL_125M_CG, "sys2_pll_125m_cg", "sys2_pll_out", 0x3c, 13),
+ GATE(IMX8MQ_SYS2_PLL_166M_CG, "sys2_pll_166m_cg", "sys2_pll_out", 0x3c, 15),
+ GATE(IMX8MQ_SYS2_PLL_200M_CG, "sys2_pll_200m_cg", "sys2_pll_out", 0x3c, 17),
+ GATE(IMX8MQ_SYS2_PLL_250M_CG, "sys2_pll_250m_cg", "sys2_pll_out", 0x3c, 19),
+ GATE(IMX8MQ_SYS2_PLL_333M_CG, "sys2_pll_333m_cg", "sys2_pll_out", 0x3c, 21),
+ GATE(IMX8MQ_SYS2_PLL_500M_CG, "sys2_pll_500m_cg", "sys2_pll_out", 0x3c, 23),
+ GATE(IMX8MQ_SYS2_PLL_1000M_CG, "sys2_pll_1000m_cg", "sys2_pll_out", 0x3c, 25),
+
+ FFACT(IMX8MQ_SYS2_PLL_50M, "sys2_pll_50m", "sys2_pll_50m_cg", 1, 20),
+ FFACT(IMX8MQ_SYS2_PLL_100M, "sys2_pll_100m", "sys2_pll_100m_cg", 1, 10),
+ FFACT(IMX8MQ_SYS2_PLL_125M, "sys2_pll_125m", "sys2_pll_125m_cg", 1, 8),
+ FFACT(IMX8MQ_SYS2_PLL_166M, "sys2_pll_166m", "sys2_pll_166m_cg", 1, 6),
+ FFACT(IMX8MQ_SYS2_PLL_200M, "sys2_pll_200m", "sys2_pll_200m_cg", 1, 5),
+ FFACT(IMX8MQ_SYS2_PLL_250M, "sys2_pll_250m", "sys2_pll_250m_cg", 1, 4),
+ FFACT(IMX8MQ_SYS2_PLL_333M, "sys2_pll_333m", "sys2_pll_333m_cg", 1, 3),
+ FFACT(IMX8MQ_SYS2_PLL_500M, "sys2_pll_500m", "sys2_pll_500m_cg", 1, 2),
+ FFACT(IMX8MQ_SYS2_PLL_1000M, "sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1),
+
+ COMPOSITE(IMX8MQ_CLK_AHB, "ahb", ahb_p, 0x9000, 0),
+ DIV(IMX8MQ_CLK_IPG_ROOT, "ipg_root", "ahb", 0x9080, 0, 1),
+
+ COMPOSITE(IMX8MQ_CLK_UART1, "uart1", uart_p, 0xaf00, 0),
+ COMPOSITE(IMX8MQ_CLK_UART2, "uart2", uart_p, 0xaf80, 0),
+ COMPOSITE(IMX8MQ_CLK_UART3, "uart3", uart_p, 0xb000, 0),
+ COMPOSITE(IMX8MQ_CLK_UART4, "uart4", uart_p, 0xb080, 0),
+
+ ROOT_GATE(IMX8MQ_CLK_UART1_ROOT, "uart1_root_clk", "uart1", 0x4490),
+ ROOT_GATE(IMX8MQ_CLK_UART2_ROOT, "uart2_root_clk", "uart2", 0x44a0),
+ ROOT_GATE(IMX8MQ_CLK_UART3_ROOT, "uart3_root_clk", "uart3", 0x44b0),
+ ROOT_GATE(IMX8MQ_CLK_UART4_ROOT, "uart4_root_clk", "uart4", 0x44c0),
+
+ COMPOSITE(IMX8MQ_CLK_USDHC1, "usdhc1", usdhc_p, 0xac00, CLK_SET_ROUND_DOWN),
+ COMPOSITE(IMX8MQ_CLK_USDHC2, "usdhc2", usdhc_p, 0xac80, CLK_SET_ROUND_DOWN),
+
+ ROOT_GATE(IMX8MQ_CLK_USDHC1_ROOT, "usdhc1_root_clk", "usdhc1", 0x4510),
+ ROOT_GATE(IMX8MQ_CLK_USDHC2_ROOT, "usdhc2_root_clk", "usdhc2", 0x4520),
+
+ COMPOSITE(IMX8MQ_CLK_ENET_AXI, "enet_axi", enet_axi_p, 0x8800, 0),
+ COMPOSITE(IMX8MQ_CLK_ENET_REF, "enet_ref", enet_ref_p, 0xa980, 0),
+ COMPOSITE(IMX8MQ_CLK_ENET_TIMER, "enet_timer", enet_timer_p, 0xaa00, 0),
+ COMPOSITE(IMX8MQ_CLK_ENET_PHY_REF, "enet_phy_ref", enet_phy_ref_p, 0xaa80, 0),
+
+ ROOT_GATE(IMX8MQ_CLK_ENET1_ROOT, "enet1_root_clk", "enet_axi", 0x40a0),
+
+ COMPOSITE(IMX8MQ_CLK_USB_BUS, "usb_bus", usb_bus_p, 0x8b80, 0),
+ COMPOSITE(IMX8MQ_CLK_USB_CORE_REF, "usb_core_ref", usb_core_phy_p, 0xb100, 0),
+ COMPOSITE(IMX8MQ_CLK_USB_PHY_REF, "usb_phy_ref", usb_core_phy_p, 0xb180, 0),
+
+ ROOT_GATE(IMX8MQ_CLK_USB1_CTRL_ROOT, "usb1_ctrl_root_clk", "usb_bus", 0x44d0),
+ ROOT_GATE(IMX8MQ_CLK_USB2_CTRL_ROOT, "usb2_ctrl_root_clk", "usb_bus", 0x44e0),
+ ROOT_GATE(IMX8MQ_CLK_USB1_PHY_ROOT, "usb1_phy_root_clk", "usb_phy_ref", 0x44f0),
+ ROOT_GATE(IMX8MQ_CLK_USB2_PHY_ROOT, "usb2_phy_root_clk", "usb_phy_ref", 0x4500),
+
+ COMPOSITE(IMX8MQ_CLK_I2C1, "i2c1", i2c_p, 0xad00, 0),
+ COMPOSITE(IMX8MQ_CLK_I2C2, "i2c2", i2c_p, 0xad80, 0),
+ COMPOSITE(IMX8MQ_CLK_I2C3, "i2c3", i2c_p, 0xae00, 0),
+ COMPOSITE(IMX8MQ_CLK_I2C4, "i2c4", i2c_p, 0xae80, 0),
+
+ ROOT_GATE(IMX8MQ_CLK_I2C1_ROOT, "i2c1_root_clk", "i2c1", 0x4170),
+ ROOT_GATE(IMX8MQ_CLK_I2C2_ROOT, "i2c2_root_clk", "i2c2", 0x4180),
+ ROOT_GATE(IMX8MQ_CLK_I2C3_ROOT, "i2c3_root_clk", "i2c3", 0x4190),
+ ROOT_GATE(IMX8MQ_CLK_I2C4_ROOT, "i2c4_root_clk", "i2c4", 0x41a0),
+
+ ROOT_GATE(IMX8MQ_CLK_GPIO1_ROOT, "gpio1_root_clk", "ipg_root", 0x40b0),
+ ROOT_GATE(IMX8MQ_CLK_GPIO2_ROOT, "gpio2_root_clk", "ipg_root", 0x40c0),
+ ROOT_GATE(IMX8MQ_CLK_GPIO3_ROOT, "gpio3_root_clk", "ipg_root", 0x40d0),
+ ROOT_GATE(IMX8MQ_CLK_GPIO4_ROOT, "gpio4_root_clk", "ipg_root", 0x40e0),
+ ROOT_GATE(IMX8MQ_CLK_GPIO5_ROOT, "gpio5_root_clk", "ipg_root", 0x40f0),
+};
+
+struct ccm_softc {
+ device_t dev;
+ struct resource *mem_res;
+ struct clkdom *clkdom;
+ struct mtx mtx;
+ struct imx_clk *clks;
+ int nclks;
+};
+
+static inline uint32_t
+CCU_READ4(struct ccm_softc *sc, bus_size_t off)
+{
+
+ return (bus_read_4(sc->mem_res, off));
+}
+
+static inline void
+CCU_WRITE4(struct ccm_softc *sc, bus_size_t off, uint32_t val)
+{
+
+ bus_write_4(sc->mem_res, off, val);
+}
+
+static int
+ccm_detach(device_t dev)
+{
+ struct ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->mem_res != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
+
+ return (0);
+}
+
+static int
+ccm_attach(device_t dev)
+{
+ struct ccm_softc *sc;
+ int err, rid;
+ phandle_t node;
+ int i;
+
+ sc = device_get_softc(dev);
+ err = 0;
+
+ /* Allocate bus_space resources. */
+ rid = 0;
+ sc->clks = imx_clks;
+ sc->nclks = nitems(imx_clks);
+ sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->mem_res == NULL) {
+ device_printf(dev, "Cannot allocate memory resources\n");
+ err = ENXIO;
+ goto out;
+ }
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ sc->clkdom = clkdom_create(dev);
+ if (sc->clkdom == NULL)
+ panic("Cannot create clkdom\n");
+
+ for (i = 0; i < sc->nclks; i++) {
+ switch (sc->clks[i].type) {
+ case IMX_CLK_UNDEFINED:
+ break;
+ case IMX_CLK_LINK:
+ clknode_link_register(sc->clkdom,
+ sc->clks[i].clk.link);
+ break;
+ case IMX_CLK_FIXED:
+ clknode_fixed_register(sc->clkdom,
+ sc->clks[i].clk.fixed);
+ break;
+ case IMX_CLK_MUX:
+ imx_clk_mux_register(sc->clkdom, sc->clks[i].clk.mux);
+ break;
+ case IMX_CLK_GATE:
+ imx_clk_gate_register(sc->clkdom, sc->clks[i].clk.gate);
+ break;
+ case IMX_CLK_COMPOSITE:
+ imx_clk_composite_register(sc->clkdom, sc->clks[i].clk.composite);
+ break;
+ case IMX_CLK_SSCG_PLL:
+ imx_clk_sscg_pll_register(sc->clkdom, sc->clks[i].clk.sscg_pll);
+ break;
+ case IMX_CLK_FRAC_PLL:
+ imx_clk_frac_pll_register(sc->clkdom, sc->clks[i].clk.frac_pll);
+ break;
+ case IMX_CLK_DIV:
+ clknode_div_register(sc->clkdom, sc->clks[i].clk.div);
+ break;
+ default:
+ device_printf(dev, "Unknown clock type %d\n", sc->clks[i].type);
+ return (ENXIO);
+ }
+ }
+
+ if (clkdom_finit(sc->clkdom) != 0)
+ panic("cannot finalize clkdom initialization\n");
+
+ if (bootverbose)
+ clkdom_dump(sc->clkdom);
+
+ node = ofw_bus_get_node(dev);
+ clk_set_assigned(dev, node);
+
+ err = 0;
+
+out:
+
+ if (err != 0)
+ ccm_detach(dev);
+
+ return (err);
+}
+
+static int
+ccm_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_is_compatible(dev, "fsl,imx8mq-ccm") == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Freescale i.MX8 Clock Control Module");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+imx_ccm_write_4(device_t dev, bus_addr_t addr, uint32_t val)
+{
+ struct ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+ CCU_WRITE4(sc, addr, val);
+ return (0);
+}
+
+static int
+imx_ccm_read_4(device_t dev, bus_addr_t addr, uint32_t *val)
+{
+ struct ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ *val = CCU_READ4(sc, addr);
+ return (0);
+}
+
+static int
+imx_ccm_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set)
+{
+ struct ccm_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ reg = CCU_READ4(sc, addr);
+ reg &= ~clr;
+ reg |= set;
+ CCU_WRITE4(sc, addr, reg);
+
+ return (0);
+}
+
+static void
+imx_ccm_device_lock(device_t dev)
+{
+ struct ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->mtx);
+}
+
+static void
+imx_ccm_device_unlock(device_t dev)
+{
+ struct ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_unlock(&sc->mtx);
+}
+
+static device_method_t ccm_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ccm_probe),
+ DEVMETHOD(device_attach, ccm_attach),
+ DEVMETHOD(device_detach, ccm_detach),
+
+ /* clkdev interface */
+ DEVMETHOD(clkdev_write_4, imx_ccm_write_4),
+ DEVMETHOD(clkdev_read_4, imx_ccm_read_4),
+ DEVMETHOD(clkdev_modify_4, imx_ccm_modify_4),
+ DEVMETHOD(clkdev_device_lock, imx_ccm_device_lock),
+ DEVMETHOD(clkdev_device_unlock, imx_ccm_device_unlock),
+
+ DEVMETHOD_END
+};
+
+static driver_t ccm_driver = {
+ "ccm",
+ ccm_methods,
+ sizeof(struct ccm_softc)
+};
+
+static devclass_t ccm_devclass;
+
+EARLY_DRIVER_MODULE(ccm, simplebus, ccm_driver, ccm_devclass, 0, 0,
+ BUS_PASS_CPU + BUS_PASS_ORDER_EARLY);
diff --git a/sys/arm64/freescale/imx/imx8mq_ccm.h b/sys/arm64/freescale/imx/imx8mq_ccm.h
new file mode 100644
index 000000000000..dcd016fc729f
--- /dev/null
+++ b/sys/arm64/freescale/imx/imx8mq_ccm.h
@@ -0,0 +1,173 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __IMX8MQ_CCM_H__
+#define __IMX8MQ_CCM_H__
+
+#define IMX8MQ_CLK_DUMMY 0
+#define IMX8MQ_CLK_32K 1
+#define IMX8MQ_CLK_25M 2
+#define IMX8MQ_CLK_27M 3
+#define IMX8MQ_CLK_EXT1 4
+#define IMX8MQ_CLK_EXT2 5
+#define IMX8MQ_CLK_EXT3 6
+#define IMX8MQ_CLK_EXT4 7
+
+#define IMX8MQ_ARM_PLL_REF_SEL 8
+#define IMX8MQ_ARM_PLL_REF_DIV 9
+#define IMX8MQ_ARM_PLL 10
+#define IMX8MQ_ARM_PLL_BYPASS 11
+#define IMX8MQ_ARM_PLL_OUT 12
+
+#define IMX8MQ_GPU_PLL_REF_SEL 13
+#define IMX8MQ_GPU_PLL_REF_DIV 14
+#define IMX8MQ_GPU_PLL 15
+#define IMX8MQ_GPU_PLL_BYPASS 16
+#define IMX8MQ_GPU_PLL_OUT 17
+
+#define IMX8MQ_VPU_PLL_REF_SEL 18
+#define IMX8MQ_VPU_PLL_REF_DIV 19
+#define IMX8MQ_VPU_PLL 20
+#define IMX8MQ_VPU_PLL_BYPASS 21
+#define IMX8MQ_VPU_PLL_OUT 22
+
+#define IMX8MQ_AUDIO_PLL1_REF_SEL 23
+#define IMX8MQ_AUDIO_PLL1_REF_DIV 24
+#define IMX8MQ_AUDIO_PLL1 25
+#define IMX8MQ_AUDIO_PLL1_BYPASS 26
+#define IMX8MQ_AUDIO_PLL1_OUT 27
+
+#define IMX8MQ_AUDIO_PLL2_REF_SEL 28
+#define IMX8MQ_AUDIO_PLL2_REF_DIV 29
+#define IMX8MQ_AUDIO_PLL2 30
+#define IMX8MQ_AUDIO_PLL2_BYPASS 31
+#define IMX8MQ_AUDIO_PLL2_OUT 32
+
+#define IMX8MQ_VIDEO_PLL1_REF_SEL 33
+#define IMX8MQ_VIDEO_PLL1_REF_DIV 34
+#define IMX8MQ_VIDEO_PLL1 35
+#define IMX8MQ_VIDEO_PLL1_BYPASS 36
+#define IMX8MQ_VIDEO_PLL1_OUT 37
+
+#define IMX8MQ_SYS3_PLL1_REF_SEL 54
+#define IMX8MQ_SYS3_PLL1 56
+
+#define IMX8MQ_DRAM_PLL1_REF_SEL 62
+
+#define IMX8MQ_SYS1_PLL_40M 70
+#define IMX8MQ_SYS1_PLL_80M 71
+#define IMX8MQ_SYS1_PLL_100M 72
+#define IMX8MQ_SYS1_PLL_133M 73
+#define IMX8MQ_SYS1_PLL_160M 74
+#define IMX8MQ_SYS1_PLL_200M 75
+#define IMX8MQ_SYS1_PLL_266M 76
+#define IMX8MQ_SYS1_PLL_400M 77
+#define IMX8MQ_SYS1_PLL_800M 78
+
+#define IMX8MQ_SYS2_PLL_50M 79
+#define IMX8MQ_SYS2_PLL_100M 80
+#define IMX8MQ_SYS2_PLL_125M 81
+#define IMX8MQ_SYS2_PLL_166M 82
+#define IMX8MQ_SYS2_PLL_200M 83
+#define IMX8MQ_SYS2_PLL_250M 84
+#define IMX8MQ_SYS2_PLL_333M 85
+#define IMX8MQ_SYS2_PLL_500M 86
+#define IMX8MQ_SYS2_PLL_1000M 87
+
+#define IMX8MQ_CLK_ENET_AXI 104
+#define IMX8MQ_CLK_USB_BUS 110
+
+#define IMX8MQ_CLK_AHB 116
+
+#define IMX8MQ_CLK_ENET_REF 137
+#define IMX8MQ_CLK_ENET_TIMER 138
+#define IMX8MQ_CLK_ENET_PHY_REF 139
+#define IMX8MQ_CLK_USDHC1 142
+#define IMX8MQ_CLK_USDHC2 143
+#define IMX8MQ_CLK_I2C1 144
+#define IMX8MQ_CLK_I2C2 145
+#define IMX8MQ_CLK_I2C3 146
+#define IMX8MQ_CLK_I2C4 147
+#define IMX8MQ_CLK_UART1 148
+#define IMX8MQ_CLK_UART2 149
+#define IMX8MQ_CLK_UART3 150
+#define IMX8MQ_CLK_UART4 151
+#define IMX8MQ_CLK_USB_CORE_REF 152
+#define IMX8MQ_CLK_USB_PHY_REF 153
+
+#define IMX8MQ_CLK_ENET1_ROOT 182
+#define IMX8MQ_CLK_I2C1_ROOT 184
+#define IMX8MQ_CLK_I2C2_ROOT 185
+#define IMX8MQ_CLK_I2C3_ROOT 186
+#define IMX8MQ_CLK_I2C4_ROOT 187
+#define IMX8MQ_CLK_UART1_ROOT 202
+#define IMX8MQ_CLK_UART2_ROOT 203
+#define IMX8MQ_CLK_UART3_ROOT 204
+#define IMX8MQ_CLK_UART4_ROOT 205
+#define IMX8MQ_CLK_USB1_CTRL_ROOT 206
+#define IMX8MQ_CLK_USB2_CTRL_ROOT 207
+#define IMX8MQ_CLK_USB1_PHY_ROOT 208
+#define IMX8MQ_CLK_USB2_PHY_ROOT 209
+#define IMX8MQ_CLK_USDHC1_ROOT 210
+#define IMX8MQ_CLK_USDHC2_ROOT 211
+
+#define IMX8MQ_SYS1_PLL_OUT 231
+#define IMX8MQ_SYS2_PLL_OUT 232
+#define IMX8MQ_SYS3_PLL_OUT 233
+
+#define IMX8MQ_CLK_IPG_ROOT 236
+
+#define IMX8MQ_CLK_GPIO1_ROOT 259
+#define IMX8MQ_CLK_GPIO2_ROOT 260
+#define IMX8MQ_CLK_GPIO3_ROOT 261
+#define IMX8MQ_CLK_GPIO4_ROOT 262
+#define IMX8MQ_CLK_GPIO5_ROOT 263
+
+#define IMX8MQ_VIDEO2_PLL1_REF_SEL 266
+
+#define IMX8MQ_SYS1_PLL_40M_CG 267
+#define IMX8MQ_SYS1_PLL_80M_CG 268
+#define IMX8MQ_SYS1_PLL_100M_CG 269
+#define IMX8MQ_SYS1_PLL_133M_CG 270
+#define IMX8MQ_SYS1_PLL_160M_CG 271
+#define IMX8MQ_SYS1_PLL_200M_CG 272
+#define IMX8MQ_SYS1_PLL_266M_CG 273
+#define IMX8MQ_SYS1_PLL_400M_CG 274
+#define IMX8MQ_SYS1_PLL_800M_CG 275
+#define IMX8MQ_SYS2_PLL_50M_CG 276
+#define IMX8MQ_SYS2_PLL_100M_CG 277
+#define IMX8MQ_SYS2_PLL_125M_CG 278
+#define IMX8MQ_SYS2_PLL_166M_CG 279
+#define IMX8MQ_SYS2_PLL_200M_CG 280
+#define IMX8MQ_SYS2_PLL_250M_CG 281
+#define IMX8MQ_SYS2_PLL_333M_CG 282
+#define IMX8MQ_SYS2_PLL_500M_CG 283
+#define IMX8MQ_SYS2_PLL_1000M_CG 284
+
+#endif
diff --git a/sys/arm64/freescale/imx/imx_ccm_clk.h b/sys/arm64/freescale/imx/imx_ccm_clk.h
new file mode 100644
index 000000000000..6f17c389bad3
--- /dev/null
+++ b/sys/arm64/freescale/imx/imx_ccm_clk.h
@@ -0,0 +1,212 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef IMX6_CCM_CLK_H
+#define IMX6_CCM_CLK_H
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/clk/clk_div.h>
+#include <dev/extres/clk/clk_fixed.h>
+#include <dev/extres/clk/clk_gate.h>
+#include <dev/extres/clk/clk_link.h>
+
+enum imx_clk_type {
+ IMX_CLK_UNDEFINED = 0,
+ IMX_CLK_FIXED,
+ IMX_CLK_LINK,
+ IMX_CLK_MUX,
+ IMX_CLK_GATE,
+ IMX_CLK_COMPOSITE,
+ IMX_CLK_SSCG_PLL,
+ IMX_CLK_FRAC_PLL,
+ IMX_CLK_DIV,
+};
+
+struct imx_clk {
+ enum imx_clk_type type;
+ union {
+ struct clk_fixed_def *fixed;
+ struct clk_link_def *link;
+ struct imx_clk_mux_def *mux;
+ struct imx_clk_gate_def *gate;
+ struct imx_clk_composite_def *composite;
+ struct imx_clk_sscg_pll_def *sscg_pll;
+ struct imx_clk_frac_pll_def *frac_pll;
+ struct clk_div_def *div;
+ } clk;
+};
+
+/* Linked clock. */
+#define LINK(_id, _name) \
+{ \
+ .type = IMX_CLK_LINK, \
+ .clk.link = &(struct clk_link_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = NULL, \
+ .clkdef.parent_cnt = 0, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ }, \
+}
+
+/* Complex clock without divider (multiplexer only). */
+#define MUX(_id, _name, _pn, _f, _mo, _ms, _mw) \
+{ \
+ .type = IMX_CLK_MUX, \
+ .clk.mux = &(struct imx_clk_mux_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = _pn, \
+ .clkdef.parent_cnt = nitems(_pn), \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .offset = _mo, \
+ .shift = _ms, \
+ .width = _mw, \
+ .mux_flags = _f, \
+ }, \
+}
+
+/* Fixed frequency clock */
+#define FIXED(_id, _name, _freq) \
+{ \
+ .type = IMX_CLK_FIXED, \
+ .clk.fixed = &(struct clk_fixed_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .freq = _freq, \
+ }, \
+}
+
+/* Fixed factor multipier/divider. */
+#define FFACT(_id, _name, _pname, _mult, _div) \
+{ \
+ .type = IMX_CLK_FIXED, \
+ .clk.fixed = &(struct clk_fixed_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = (const char *[]){_pname}, \
+ .clkdef.parent_cnt = 1, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .mult = _mult, \
+ .div = _div, \
+ }, \
+}
+
+/* Clock gate */
+#define GATE(_id, _name, _pname, _o, _shift) \
+{ \
+ .type = IMX_CLK_GATE, \
+ .clk.gate = &(struct imx_clk_gate_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = (const char *[]){_pname}, \
+ .clkdef.parent_cnt = 1, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .offset = _o, \
+ .shift = _shift, \
+ .mask = 1, \
+ }, \
+}
+
+/* Root clock gate */
+#define ROOT_GATE(_id, _name, _pname, _reg) \
+{ \
+ .type = IMX_CLK_GATE, \
+ .clk.gate = &(struct imx_clk_gate_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = (const char *[]){_pname}, \
+ .clkdef.parent_cnt = 1, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .offset = _reg, \
+ .shift = 0, \
+ .mask = 3, \
+ }, \
+}
+
+/* Composite clock with GATE, MUX, PRE_DIV, and POST_DIV */
+#define COMPOSITE(_id, _name, _pn, _o, _flags) \
+{ \
+ .type = IMX_CLK_COMPOSITE, \
+ .clk.composite = &(struct imx_clk_composite_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = _pn, \
+ .clkdef.parent_cnt = nitems(_pn), \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .offset = _o, \
+ .flags = _flags, \
+ }, \
+}
+
+/* SSCG PLL */
+#define SSCG_PLL(_id, _name, _pn, _o) \
+{ \
+ .type = IMX_CLK_SSCG_PLL, \
+ .clk.composite = &(struct imx_clk_composite_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = _pn, \
+ .clkdef.parent_cnt = nitems(_pn), \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .offset = _o, \
+ }, \
+}
+
+/* Fractional PLL */
+#define FRAC_PLL(_id, _name, _pname, _o) \
+{ \
+ .type = IMX_CLK_FRAC_PLL, \
+ .clk.frac_pll = &(struct imx_clk_frac_pll_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = (const char *[]){_pname}, \
+ .clkdef.parent_cnt = 1, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .offset = _o, \
+ }, \
+}
+
+#define DIV(_id, _name, _pname, _o, _shift, _width) \
+{ \
+ .type = IMX_CLK_DIV, \
+ .clk.div = &(struct clk_div_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = (const char *[]){_pname}, \
+ .clkdef.parent_cnt = 1, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .offset = _o, \
+ .i_shift = _shift, \
+ .i_width = _width, \
+ }, \
+}
+
+#endif
diff --git a/sys/arm64/include/_align.h b/sys/arm64/include/_align.h
new file mode 100644
index 000000000000..3844133ffb68
--- /dev/null
+++ b/sys/arm64/include/_align.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)param.h 5.8 (Berkeley) 6/28/91
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__ALIGN_H_
+#define _MACHINE__ALIGN_H_
+
+/*
+ * Round p (pointer or byte index) up to a correctly-aligned value
+ * for all data types (int, long, ...). The result is unsigned int
+ * and must be cast to any desired pointer type.
+ */
+#define _ALIGNBYTES (sizeof(long long) - 1)
+#define _ALIGN(p) (((u_long)(p) + _ALIGNBYTES) & ~_ALIGNBYTES)
+
+#endif /* !_MACHINE__ALIGN_H_ */
diff --git a/sys/arm64/include/_bus.h b/sys/arm64/include/_bus.h
new file mode 100644
index 000000000000..e2d3069323d5
--- /dev/null
+++ b/sys/arm64/include/_bus.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2005 M. Warner Losh <imp@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__BUS_H_
+#define _MACHINE__BUS_H_
+
+/*
+ * Addresses (in bus space).
+ */
+typedef u_long bus_addr_t;
+typedef u_long bus_size_t;
+
+/*
+ * Access methods for bus space.
+ */
+typedef u_long bus_space_handle_t;
+typedef struct bus_space *bus_space_tag_t;
+
+#endif /* !_MACHINE__BUS_H_ */
diff --git a/sys/arm64/include/_inttypes.h b/sys/arm64/include/_inttypes.h
new file mode 100644
index 000000000000..df1af0b25df0
--- /dev/null
+++ b/sys/arm64/include/_inttypes.h
@@ -0,0 +1,213 @@
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * From: $NetBSD: int_fmtio.h,v 1.4 2008/04/28 20:23:36 martin Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__INTTYPES_H_
+#define _MACHINE__INTTYPES_H_
+
+/*
+ * Macros for format specifiers.
+ */
+
+/* fprintf(3) macros for signed integers. */
+
+#define PRId8 "d" /* int8_t */
+#define PRId16 "d" /* int16_t */
+#define PRId32 "d" /* int32_t */
+#define PRId64 "ld" /* int64_t */
+#define PRIdLEAST8 "d" /* int_least8_t */
+#define PRIdLEAST16 "d" /* int_least16_t */
+#define PRIdLEAST32 "d" /* int_least32_t */
+#define PRIdLEAST64 "ld" /* int_least64_t */
+#define PRIdFAST8 "d" /* int_fast8_t */
+#define PRIdFAST16 "d" /* int_fast16_t */
+#define PRIdFAST32 "d" /* int_fast32_t */
+#define PRIdFAST64 "ld" /* int_fast64_t */
+#define PRIdMAX "jd" /* intmax_t */
+#define PRIdPTR "ld" /* intptr_t */
+
+#define PRIi8 "i" /* int8_t */
+#define PRIi16 "i" /* int16_t */
+#define PRIi32 "i" /* int32_t */
+#define PRIi64 "li" /* int64_t */
+#define PRIiLEAST8 "i" /* int_least8_t */
+#define PRIiLEAST16 "i" /* int_least16_t */
+#define PRIiLEAST32 "i" /* int_least32_t */
+#define PRIiLEAST64 "li" /* int_least64_t */
+#define PRIiFAST8 "i" /* int_fast8_t */
+#define PRIiFAST16 "i" /* int_fast16_t */
+#define PRIiFAST32 "i" /* int_fast32_t */
+#define PRIiFAST64 "li" /* int_fast64_t */
+#define PRIiMAX "ji" /* intmax_t */
+#define PRIiPTR "li" /* intptr_t */
+
+/* fprintf(3) macros for unsigned integers. */
+
+#define PRIo8 "o" /* uint8_t */
+#define PRIo16 "o" /* uint16_t */
+#define PRIo32 "o" /* uint32_t */
+#define PRIo64 "lo" /* uint64_t */
+#define PRIoLEAST8 "o" /* uint_least8_t */
+#define PRIoLEAST16 "o" /* uint_least16_t */
+#define PRIoLEAST32 "o" /* uint_least32_t */
+#define PRIoLEAST64 "lo" /* uint_least64_t */
+#define PRIoFAST8 "o" /* uint_fast8_t */
+#define PRIoFAST16 "o" /* uint_fast16_t */
+#define PRIoFAST32 "o" /* uint_fast32_t */
+#define PRIoFAST64 "lo" /* uint_fast64_t */
+#define PRIoMAX "jo" /* uintmax_t */
+#define PRIoPTR "lo" /* uintptr_t */
+
+#define PRIu8 "u" /* uint8_t */
+#define PRIu16 "u" /* uint16_t */
+#define PRIu32 "u" /* uint32_t */
+#define PRIu64 "lu" /* uint64_t */
+#define PRIuLEAST8 "u" /* uint_least8_t */
+#define PRIuLEAST16 "u" /* uint_least16_t */
+#define PRIuLEAST32 "u" /* uint_least32_t */
+#define PRIuLEAST64 "lu" /* uint_least64_t */
+#define PRIuFAST8 "u" /* uint_fast8_t */
+#define PRIuFAST16 "u" /* uint_fast16_t */
+#define PRIuFAST32 "u" /* uint_fast32_t */
+#define PRIuFAST64 "lu" /* uint_fast64_t */
+#define PRIuMAX "ju" /* uintmax_t */
+#define PRIuPTR "lu" /* uintptr_t */
+
+#define PRIx8 "x" /* uint8_t */
+#define PRIx16 "x" /* uint16_t */
+#define PRIx32 "x" /* uint32_t */
+#define PRIx64 "lx" /* uint64_t */
+#define PRIxLEAST8 "x" /* uint_least8_t */
+#define PRIxLEAST16 "x" /* uint_least16_t */
+#define PRIxLEAST32 "x" /* uint_least32_t */
+#define PRIxLEAST64 "lx" /* uint_least64_t */
+#define PRIxFAST8 "x" /* uint_fast8_t */
+#define PRIxFAST16 "x" /* uint_fast16_t */
+#define PRIxFAST32 "x" /* uint_fast32_t */
+#define PRIxFAST64 "lx" /* uint_fast64_t */
+#define PRIxMAX "jx" /* uintmax_t */
+#define PRIxPTR "lx" /* uintptr_t */
+
+#define PRIX8 "X" /* uint8_t */
+#define PRIX16 "X" /* uint16_t */
+#define PRIX32 "X" /* uint32_t */
+#define PRIX64 "lX" /* uint64_t */
+#define PRIXLEAST8 "X" /* uint_least8_t */
+#define PRIXLEAST16 "X" /* uint_least16_t */
+#define PRIXLEAST32 "X" /* uint_least32_t */
+#define PRIXLEAST64 "lX" /* uint_least64_t */
+#define PRIXFAST8 "X" /* uint_fast8_t */
+#define PRIXFAST16 "X" /* uint_fast16_t */
+#define PRIXFAST32 "X" /* uint_fast32_t */
+#define PRIXFAST64 "lX" /* uint_fast64_t */
+#define PRIXMAX "jX" /* uintmax_t */
+#define PRIXPTR "lX" /* uintptr_t */
+
+/* fscanf(3) macros for signed integers. */
+
+#define SCNd8 "hhd" /* int8_t */
+#define SCNd16 "hd" /* int16_t */
+#define SCNd32 "d" /* int32_t */
+#define SCNd64 "ld" /* int64_t */
+#define SCNdLEAST8 "hhd" /* int_least8_t */
+#define SCNdLEAST16 "hd" /* int_least16_t */
+#define SCNdLEAST32 "d" /* int_least32_t */
+#define SCNdLEAST64 "ld" /* int_least64_t */
+#define SCNdFAST8 "d" /* int_fast8_t */
+#define SCNdFAST16 "d" /* int_fast16_t */
+#define SCNdFAST32 "d" /* int_fast32_t */
+#define SCNdFAST64 "ld" /* int_fast64_t */
+#define SCNdMAX "jd" /* intmax_t */
+#define SCNdPTR "ld" /* intptr_t */
+
+#define SCNi8 "hhi" /* int8_t */
+#define SCNi16 "hi" /* int16_t */
+#define SCNi32 "i" /* int32_t */
+#define SCNi64 "li" /* int64_t */
+#define SCNiLEAST8 "hhi" /* int_least8_t */
+#define SCNiLEAST16 "hi" /* int_least16_t */
+#define SCNiLEAST32 "i" /* int_least32_t */
+#define SCNiLEAST64 "li" /* int_least64_t */
+#define SCNiFAST8 "i" /* int_fast8_t */
+#define SCNiFAST16 "i" /* int_fast16_t */
+#define SCNiFAST32 "i" /* int_fast32_t */
+#define SCNiFAST64 "li" /* int_fast64_t */
+#define SCNiMAX "ji" /* intmax_t */
+#define SCNiPTR "li" /* intptr_t */
+
+/* fscanf(3) macros for unsigned integers. */
+
+#define SCNo8 "hho" /* uint8_t */
+#define SCNo16 "ho" /* uint16_t */
+#define SCNo32 "o" /* uint32_t */
+#define SCNo64 "lo" /* uint64_t */
+#define SCNoLEAST8 "hho" /* uint_least8_t */
+#define SCNoLEAST16 "ho" /* uint_least16_t */
+#define SCNoLEAST32 "o" /* uint_least32_t */
+#define SCNoLEAST64 "lo" /* uint_least64_t */
+#define SCNoFAST8 "o" /* uint_fast8_t */
+#define SCNoFAST16 "o" /* uint_fast16_t */
+#define SCNoFAST32 "o" /* uint_fast32_t */
+#define SCNoFAST64 "lo" /* uint_fast64_t */
+#define SCNoMAX "jo" /* uintmax_t */
+#define SCNoPTR "lo" /* uintptr_t */
+
+#define SCNu8 "hhu" /* uint8_t */
+#define SCNu16 "hu" /* uint16_t */
+#define SCNu32 "u" /* uint32_t */
+#define SCNu64 "lu" /* uint64_t */
+#define SCNuLEAST8 "hhu" /* uint_least8_t */
+#define SCNuLEAST16 "hu" /* uint_least16_t */
+#define SCNuLEAST32 "u" /* uint_least32_t */
+#define SCNuLEAST64 "lu" /* uint_least64_t */
+#define SCNuFAST8 "u" /* uint_fast8_t */
+#define SCNuFAST16 "u" /* uint_fast16_t */
+#define SCNuFAST32 "u" /* uint_fast32_t */
+#define SCNuFAST64 "lu" /* uint_fast64_t */
+#define SCNuMAX "ju" /* uintmax_t */
+#define SCNuPTR "lu" /* uintptr_t */
+
+#define SCNx8 "hhx" /* uint8_t */
+#define SCNx16 "hx" /* uint16_t */
+#define SCNx32 "x" /* uint32_t */
+#define SCNx64 "lx" /* uint64_t */
+#define SCNxLEAST8 "hhx" /* uint_least8_t */
+#define SCNxLEAST16 "hx" /* uint_least16_t */
+#define SCNxLEAST32 "x" /* uint_least32_t */
+#define SCNxLEAST64 "lx" /* uint_least64_t */
+#define SCNxFAST8 "x" /* uint_fast8_t */
+#define SCNxFAST16 "x" /* uint_fast16_t */
+#define SCNxFAST32 "x" /* uint_fast32_t */
+#define SCNxFAST64 "lx" /* uint_fast64_t */
+#define SCNxMAX "jx" /* uintmax_t */
+#define SCNxPTR "lx" /* uintptr_t */
+
+#endif /* !_MACHINE__INTTYPES_H_ */
diff --git a/sys/arm64/include/_limits.h b/sys/arm64/include/_limits.h
new file mode 100644
index 000000000000..39f0bcf0f5b2
--- /dev/null
+++ b/sys/arm64/include/_limits.h
@@ -0,0 +1,85 @@
+/*-
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__LIMITS_H_
+#define _MACHINE__LIMITS_H_
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN, etc., is so the value is not unsigned; e.g., 0x80000000 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ */
+
+#define __CHAR_BIT 8 /* number of bits in a char */
+
+#define __SCHAR_MAX 0x7f /* max value for a signed char */
+#define __SCHAR_MIN (-0x7f - 1) /* min value for a signed char */
+
+#define __UCHAR_MAX 0xff /* max value for an unsigned char */
+
+#define __USHRT_MAX 0xffff /* max value for an unsigned short */
+#define __SHRT_MAX 0x7fff /* max value for a short */
+#define __SHRT_MIN (-0x7fff - 1) /* min value for a short */
+
+#define __UINT_MAX 0xffffffff /* max value for an unsigned int */
+#define __INT_MAX 0x7fffffff /* max value for an int */
+#define __INT_MIN (-0x7fffffff - 1) /* min value for an int */
+
+#define __ULONG_MAX 0xffffffffffffffffUL /* max for an unsigned long */
+#define __LONG_MAX 0x7fffffffffffffffL /* max for a long */
+#define __LONG_MIN (-0x7fffffffffffffffL - 1) /* min for a long */
+
+/* Long longs have the same size but not the same type as longs. */
+ /* max for an unsigned long long */
+#define __ULLONG_MAX 0xffffffffffffffffULL
+#define __LLONG_MAX 0x7fffffffffffffffLL /* max for a long long */
+#define __LLONG_MIN (-0x7fffffffffffffffLL - 1) /* min for a long long */
+
+#define __SSIZE_MAX __LONG_MAX /* max value for a ssize_t */
+
+#define __SIZE_T_MAX __ULONG_MAX /* max value for a size_t */
+
+#define __OFF_MAX __LONG_MAX /* max value for an off_t */
+#define __OFF_MIN __LONG_MIN /* min value for an off_t */
+
+/* Quads and longs are the same size. Ensure they stay in sync. */
+#define __UQUAD_MAX (__ULONG_MAX) /* max value for a uquad_t */
+#define __QUAD_MAX (__LONG_MAX) /* max value for a quad_t */
+#define __QUAD_MIN (__LONG_MIN) /* min value for a quad_t */
+
+#define __LONG_BIT 64
+#define __WORD_BIT 32
+
+/* Minimum signal stack size. */
+#define __MINSIGSTKSZ (1024 * 4)
+
+#endif /* !_MACHINE__LIMITS_H_ */
diff --git a/sys/arm64/include/_stdint.h b/sys/arm64/include/_stdint.h
new file mode 100644
index 000000000000..32e5b6fd081e
--- /dev/null
+++ b/sys/arm64/include/_stdint.h
@@ -0,0 +1,158 @@
+/*-
+ * Copyright (c) 2001, 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__STDINT_H_
+#define _MACHINE__STDINT_H_
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS)
+
+#define INT8_C(c) (c)
+#define INT16_C(c) (c)
+#define INT32_C(c) (c)
+#define INT64_C(c) (c ## L)
+
+#define UINT8_C(c) (c)
+#define UINT16_C(c) (c)
+#define UINT32_C(c) (c ## U)
+#define UINT64_C(c) (c ## UL)
+
+#define INTMAX_C(c) INT64_C(c)
+#define UINTMAX_C(c) UINT64_C(c)
+
+#endif /* !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) */
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS)
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.1 Limits of exact-width integer types
+ */
+/* Minimum values of exact-width signed integer types. */
+#define INT8_MIN (-0x7f-1)
+#define INT16_MIN (-0x7fff-1)
+#define INT32_MIN (-0x7fffffff-1)
+#define INT64_MIN (-0x7fffffffffffffffL-1)
+
+/* Maximum values of exact-width signed integer types. */
+#define INT8_MAX 0x7f
+#define INT16_MAX 0x7fff
+#define INT32_MAX 0x7fffffff
+#define INT64_MAX 0x7fffffffffffffffL
+
+/* Maximum values of exact-width unsigned integer types. */
+#define UINT8_MAX 0xff
+#define UINT16_MAX 0xffff
+#define UINT32_MAX 0xffffffffU
+#define UINT64_MAX 0xffffffffffffffffUL
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.2 Limits of minimum-width integer types
+ */
+/* Minimum values of minimum-width signed integer types. */
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST64_MIN INT64_MIN
+
+/* Maximum values of minimum-width signed integer types. */
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MAX INT64_MAX
+
+/* Maximum values of minimum-width unsigned integer types. */
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.3 Limits of fastest minimum-width integer types
+ */
+/* Minimum values of fastest minimum-width signed integer types. */
+#define INT_FAST8_MIN INT32_MIN
+#define INT_FAST16_MIN INT32_MIN
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST64_MIN INT64_MIN
+
+/* Maximum values of fastest minimum-width signed integer types. */
+#define INT_FAST8_MAX INT32_MAX
+#define INT_FAST16_MAX INT32_MAX
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MAX INT64_MAX
+
+/* Maximum values of fastest minimum-width unsigned integer types. */
+#define UINT_FAST8_MAX UINT32_MAX
+#define UINT_FAST16_MAX UINT32_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.4 Limits of integer types capable of holding object pointers
+ */
+#define INTPTR_MIN INT64_MIN
+#define INTPTR_MAX INT64_MAX
+#define UINTPTR_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.5 Limits of greatest-width integer types
+ */
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.3 Limits of other integer types
+ */
+/* Limits of ptrdiff_t. */
+#define PTRDIFF_MIN INT64_MIN
+#define PTRDIFF_MAX INT64_MAX
+
+/* Limits of sig_atomic_t. */
+#define SIG_ATOMIC_MIN INT32_MIN
+#define SIG_ATOMIC_MAX INT32_MAX
+
+/* Limit of size_t. */
+#define SIZE_MAX UINT64_MAX
+
+/* Limits of wint_t. */
+#define WINT_MIN INT32_MIN
+#define WINT_MAX INT32_MAX
+
+#endif /* !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) */
+
+#endif /* !_MACHINE__STDINT_H_ */
diff --git a/sys/arm64/include/_types.h b/sys/arm64/include/_types.h
new file mode 100644
index 000000000000..b54a17d25024
--- /dev/null
+++ b/sys/arm64/include/_types.h
@@ -0,0 +1,100 @@
+/*-
+ * Copyright (c) 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)ansi.h 8.2 (Berkeley) 1/4/94
+ * From: @(#)types.h 8.3 (Berkeley) 1/5/94
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__TYPES_H_
+#define _MACHINE__TYPES_H_
+
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+/*
+ * Basic types upon which most other types are built.
+ */
+typedef signed char __int8_t;
+typedef unsigned char __uint8_t;
+typedef short __int16_t;
+typedef unsigned short __uint16_t;
+typedef int __int32_t;
+typedef unsigned int __uint32_t;
+typedef long __int64_t;
+typedef unsigned long __uint64_t;
+
+/*
+ * Standard type definitions.
+ */
+typedef __int32_t __clock_t; /* clock()... */
+typedef __int64_t __critical_t;
+#ifndef _STANDALONE
+typedef double __double_t;
+typedef float __float_t;
+#endif
+typedef __int64_t __intfptr_t;
+typedef __int64_t __intmax_t;
+typedef __int64_t __intptr_t;
+typedef __int32_t __int_fast8_t;
+typedef __int32_t __int_fast16_t;
+typedef __int32_t __int_fast32_t;
+typedef __int64_t __int_fast64_t;
+typedef __int8_t __int_least8_t;
+typedef __int16_t __int_least16_t;
+typedef __int32_t __int_least32_t;
+typedef __int64_t __int_least64_t;
+typedef __int64_t __ptrdiff_t; /* ptr1 - ptr2 */
+typedef __int64_t __register_t;
+typedef __int64_t __segsz_t; /* segment size (in pages) */
+typedef __uint64_t __size_t; /* sizeof() */
+typedef __int64_t __ssize_t; /* byte count or error */
+typedef __int64_t __time_t; /* time()... */
+typedef __uint64_t __uintfptr_t;
+typedef __uint64_t __uintmax_t;
+typedef __uint64_t __uintptr_t;
+typedef __uint32_t __uint_fast8_t;
+typedef __uint32_t __uint_fast16_t;
+typedef __uint32_t __uint_fast32_t;
+typedef __uint64_t __uint_fast64_t;
+typedef __uint8_t __uint_least8_t;
+typedef __uint16_t __uint_least16_t;
+typedef __uint32_t __uint_least32_t;
+typedef __uint64_t __uint_least64_t;
+typedef __uint64_t __u_register_t;
+typedef __uint64_t __vm_offset_t;
+typedef __uint64_t __vm_paddr_t;
+typedef __uint64_t __vm_size_t;
+typedef unsigned int ___wchar_t;
+
+#define __WCHAR_MIN 0 /* min value for a wchar_t */
+#define __WCHAR_MAX __UINT_MAX /* max value for a wchar_t */
+
+#endif /* !_MACHINE__TYPES_H_ */
diff --git a/sys/arm64/include/acpica_machdep.h b/sys/arm64/include/acpica_machdep.h
new file mode 100644
index 000000000000..f17e5dcfdbf1
--- /dev/null
+++ b/sys/arm64/include/acpica_machdep.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2002 Mitsuru IWASAKI
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/******************************************************************************
+ *
+ * Name: acpica_machdep.h - arch-specific defines, etc.
+ * $Revision$
+ *
+ *****************************************************************************/
+
+#ifndef __ACPICA_MACHDEP_H__
+#define __ACPICA_MACHDEP_H__
+
+#ifdef _KERNEL
+
+#include <machine/_bus.h>
+
+/* Only use the reduced hardware model */
+#define ACPI_REDUCED_HARDWARE 1
+
+/* Section 5.2.10.1: global lock acquire/release functions */
+int acpi_acquire_global_lock(volatile uint32_t *);
+int acpi_release_global_lock(volatile uint32_t *);
+
+void *acpi_map_table(vm_paddr_t pa, const char *sig);
+void acpi_unmap_table(void *table);
+vm_paddr_t acpi_find_table(const char *sig);
+
+struct acpi_generic_address;
+
+int acpi_map_addr(struct acpi_generic_address *, bus_space_tag_t *,
+ bus_space_handle_t *, bus_size_t);
+
+extern int (*apei_nmi)(void);
+
+#endif /* _KERNEL */
+
+#endif /* __ACPICA_MACHDEP_H__ */
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
new file mode 100644
index 000000000000..6d8b42bbe0e2
--- /dev/null
+++ b/sys/arm64/include/armreg.h
@@ -0,0 +1,930 @@
+/*-
+ * Copyright (c) 2013, 2014 Andrew Turner
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ARMREG_H_
+#define _MACHINE_ARMREG_H_
+
+#define INSN_SIZE 4
+
+#define MRS_MASK 0xfff00000
+#define MRS_VALUE 0xd5300000
+#define MRS_SPECIAL(insn) ((insn) & 0x000fffe0)
+#define MRS_REGISTER(insn) ((insn) & 0x0000001f)
+#define MRS_Op0_SHIFT 19
+#define MRS_Op0_MASK 0x00080000
+#define MRS_Op1_SHIFT 16
+#define MRS_Op1_MASK 0x00070000
+#define MRS_CRn_SHIFT 12
+#define MRS_CRn_MASK 0x0000f000
+#define MRS_CRm_SHIFT 8
+#define MRS_CRm_MASK 0x00000f00
+#define MRS_Op2_SHIFT 5
+#define MRS_Op2_MASK 0x000000e0
+#define MRS_Rt_SHIFT 0
+#define MRS_Rt_MASK 0x0000001f
+#define MRS_REG(op0, op1, crn, crm, op2) \
+ (((op0) << MRS_Op0_SHIFT) | ((op1) << MRS_Op1_SHIFT) | \
+ ((crn) << MRS_CRn_SHIFT) | ((crm) << MRS_CRm_SHIFT) | \
+ ((op2) << MRS_Op2_SHIFT))
+
+#define READ_SPECIALREG(reg) \
+({ uint64_t _val; \
+ __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (_val)); \
+ _val; \
+})
+#define WRITE_SPECIALREG(reg, _val) \
+ __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)_val))
+
+#define UL(x) UINT64_C(x)
+
+/* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */
+#define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */
+#define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */
+#define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */
+#define CNTHCTL_EL1PCEN (1 << 1) /* Allow EL0/1 physical timer access */
+#define CNTHCTL_EL1PCTEN (1 << 0) /*Allow EL0/1 physical counter access*/
+
+/* CNTP_CTL_EL0 - Counter-timer Physical Timer Control register */
+#define CNTP_CTL_ENABLE (1 << 0)
+#define CNTP_CTL_IMASK (1 << 1)
+#define CNTP_CTL_ISTATUS (1 << 2)
+
+/* CPACR_EL1 */
+#define CPACR_FPEN_MASK (0x3 << 20)
+#define CPACR_FPEN_TRAP_ALL1 (0x0 << 20) /* Traps from EL0 and EL1 */
+#define CPACR_FPEN_TRAP_EL0 (0x1 << 20) /* Traps from EL0 */
+#define CPACR_FPEN_TRAP_ALL2 (0x2 << 20) /* Traps from EL0 and EL1 */
+#define CPACR_FPEN_TRAP_NONE (0x3 << 20) /* No traps */
+#define CPACR_TTA (0x1 << 28)
+
+/* CTR_EL0 - Cache Type Register */
+#define CTR_RES1 (1 << 31)
+#define CTR_TminLine_SHIFT 32
+#define CTR_TminLine_MASK (UL(0x3f) << CTR_TminLine_SHIFT)
+#define CTR_TminLine_VAL(reg) ((reg) & CTR_TminLine_MASK)
+#define CTR_DIC_SHIFT 29
+#define CTR_DIC_MASK (0x1 << CTR_DIC_SHIFT)
+#define CTR_DIC_VAL(reg) ((reg) & CTR_DIC_MASK)
+#define CTR_IDC_SHIFT 28
+#define CTR_IDC_MASK (0x1 << CTR_IDC_SHIFT)
+#define CTR_IDC_VAL(reg) ((reg) & CTR_IDC_MASK)
+#define CTR_CWG_SHIFT 24
+#define CTR_CWG_MASK (0xf << CTR_CWG_SHIFT)
+#define CTR_CWG_VAL(reg) ((reg) & CTR_CWG_MASK)
+#define CTR_CWG_SIZE(reg) (4 << (CTR_CWG_VAL(reg) >> CTR_CWG_SHIFT))
+#define CTR_ERG_SHIFT 20
+#define CTR_ERG_MASK (0xf << CTR_ERG_SHIFT)
+#define CTR_ERG_VAL(reg) ((reg) & CTR_ERG_MASK)
+#define CTR_ERG_SIZE(reg) (4 << (CTR_ERG_VAL(reg) >> CTR_ERG_SHIFT))
+#define CTR_DLINE_SHIFT 16
+#define CTR_DLINE_MASK (0xf << CTR_DLINE_SHIFT)
+#define CTR_DLINE_VAL(reg) ((reg) & CTR_DLINE_MASK)
+#define CTR_DLINE_SIZE(reg) (4 << (CTR_DLINE_VAL(reg) >> CTR_DLINE_SHIFT))
+#define CTR_L1IP_SHIFT 14
+#define CTR_L1IP_MASK (0x3 << CTR_L1IP_SHIFT)
+#define CTR_L1IP_VAL(reg) ((reg) & CTR_L1IP_MASK)
+#define CTR_L1IP_VPIPT (0 << CTR_L1IP_SHIFT)
+#define CTR_L1IP_AIVIVT (1 << CTR_L1IP_SHIFT)
+#define CTR_L1IP_VIPT (2 << CTR_L1IP_SHIFT)
+#define CTR_L1IP_PIPT (3 << CTR_L1IP_SHIFT)
+#define CTR_ILINE_SHIFT 0
+#define CTR_ILINE_MASK (0xf << CTR_ILINE_SHIFT)
+#define CTR_ILINE_VAL(reg) ((reg) & CTR_ILINE_MASK)
+#define CTR_ILINE_SIZE(reg) (4 << (CTR_ILINE_VAL(reg) >> CTR_ILINE_SHIFT))
+
+/* DAIF - Interrupt Mask Bits */
+#define DAIF_D_MASKED (1 << 9)
+#define DAIF_A_MASKED (1 << 8)
+#define DAIF_I_MASKED (1 << 7)
+#define DAIF_F_MASKED (1 << 6)
+
+/* DCZID_EL0 - Data Cache Zero ID register */
+#define DCZID_DZP (1 << 4) /* DC ZVA prohibited if non-0 */
+#define DCZID_BS_SHIFT 0
+#define DCZID_BS_MASK (0xf << DCZID_BS_SHIFT)
+#define DCZID_BS_SIZE(reg) (((reg) & DCZID_BS_MASK) >> DCZID_BS_SHIFT)
+
+/* ESR_ELx */
+#define ESR_ELx_ISS_MASK 0x01ffffff
+#define ISS_INSN_FnV (0x01 << 10)
+#define ISS_INSN_EA (0x01 << 9)
+#define ISS_INSN_S1PTW (0x01 << 7)
+#define ISS_INSN_IFSC_MASK (0x1f << 0)
+
+#define ISS_MSR_DIR_SHIFT 0
+#define ISS_MSR_DIR (0x01 << ISS_MSR_DIR_SHIFT)
+#define ISS_MSR_Rt_SHIFT 5
+#define ISS_MSR_Rt_MASK (0x1f << ISS_MSR_Rt_SHIFT)
+#define ISS_MSR_Rt(x) (((x) & ISS_MSR_Rt_MASK) >> ISS_MSR_Rt_SHIFT)
+#define ISS_MSR_CRm_SHIFT 1
+#define ISS_MSR_CRm_MASK (0xf << ISS_MSR_CRm_SHIFT)
+#define ISS_MSR_CRm(x) (((x) & ISS_MSR_CRm_MASK) >> ISS_MSR_CRm_SHIFT)
+#define ISS_MSR_CRn_SHIFT 10
+#define ISS_MSR_CRn_MASK (0xf << ISS_MSR_CRn_SHIFT)
+#define ISS_MSR_CRn(x) (((x) & ISS_MSR_CRn_MASK) >> ISS_MSR_CRn_SHIFT)
+#define ISS_MSR_OP1_SHIFT 14
+#define ISS_MSR_OP1_MASK (0x7 << ISS_MSR_OP1_SHIFT)
+#define ISS_MSR_OP1(x) (((x) & ISS_MSR_OP1_MASK) >> ISS_MSR_OP1_SHIFT)
+#define ISS_MSR_OP2_SHIFT 17
+#define ISS_MSR_OP2_MASK (0x7 << ISS_MSR_OP2_SHIFT)
+#define ISS_MSR_OP2(x) (((x) & ISS_MSR_OP2_MASK) >> ISS_MSR_OP2_SHIFT)
+#define ISS_MSR_OP0_SHIFT 20
+#define ISS_MSR_OP0_MASK (0x3 << ISS_MSR_OP0_SHIFT)
+#define ISS_MSR_OP0(x) (((x) & ISS_MSR_OP0_MASK) >> ISS_MSR_OP0_SHIFT)
+#define ISS_MSR_REG_MASK \
+ (ISS_MSR_OP0_MASK | ISS_MSR_OP2_MASK | ISS_MSR_OP1_MASK | \
+ ISS_MSR_CRn_MASK | ISS_MSR_CRm_MASK)
+
+#define ISS_DATA_ISV_SHIFT 24
+#define ISS_DATA_ISV (0x01 << ISS_DATA_ISV_SHIFT)
+#define ISS_DATA_SAS_SHIFT 22
+#define ISS_DATA_SAS_MASK (0x03 << ISS_DATA_SAS_SHIFT)
+#define ISS_DATA_SSE_SHIFT 21
+#define ISS_DATA_SSE (0x01 << ISS_DATA_SSE_SHIFT)
+#define ISS_DATA_SRT_SHIFT 16
+#define ISS_DATA_SRT_MASK (0x1f << ISS_DATA_SRT_SHIFT)
+#define ISS_DATA_SF (0x01 << 15)
+#define ISS_DATA_AR (0x01 << 14)
+#define ISS_DATA_FnV (0x01 << 10)
+#define ISS_DATA_EA (0x01 << 9)
+#define ISS_DATA_CM (0x01 << 8)
+#define ISS_DATA_S1PTW (0x01 << 7)
+#define ISS_DATA_WnR_SHIFT 6
+#define ISS_DATA_WnR (0x01 << ISS_DATA_WnR_SHIFT)
+#define ISS_DATA_DFSC_MASK (0x3f << 0)
+#define ISS_DATA_DFSC_ASF_L0 (0x00 << 0)
+#define ISS_DATA_DFSC_ASF_L1 (0x01 << 0)
+#define ISS_DATA_DFSC_ASF_L2 (0x02 << 0)
+#define ISS_DATA_DFSC_ASF_L3 (0x03 << 0)
+#define ISS_DATA_DFSC_TF_L0 (0x04 << 0)
+#define ISS_DATA_DFSC_TF_L1 (0x05 << 0)
+#define ISS_DATA_DFSC_TF_L2 (0x06 << 0)
+#define ISS_DATA_DFSC_TF_L3 (0x07 << 0)
+#define ISS_DATA_DFSC_AFF_L1 (0x09 << 0)
+#define ISS_DATA_DFSC_AFF_L2 (0x0a << 0)
+#define ISS_DATA_DFSC_AFF_L3 (0x0b << 0)
+#define ISS_DATA_DFSC_PF_L1 (0x0d << 0)
+#define ISS_DATA_DFSC_PF_L2 (0x0e << 0)
+#define ISS_DATA_DFSC_PF_L3 (0x0f << 0)
+#define ISS_DATA_DFSC_EXT (0x10 << 0)
+#define ISS_DATA_DFSC_EXT_L0 (0x14 << 0)
+#define ISS_DATA_DFSC_EXT_L1 (0x15 << 0)
+#define ISS_DATA_DFSC_EXT_L2 (0x16 << 0)
+#define ISS_DATA_DFSC_EXT_L3 (0x17 << 0)
+#define ISS_DATA_DFSC_ECC (0x18 << 0)
+#define ISS_DATA_DFSC_ECC_L0 (0x1c << 0)
+#define ISS_DATA_DFSC_ECC_L1 (0x1d << 0)
+#define ISS_DATA_DFSC_ECC_L2 (0x1e << 0)
+#define ISS_DATA_DFSC_ECC_L3 (0x1f << 0)
+#define ISS_DATA_DFSC_ALIGN (0x21 << 0)
+#define ISS_DATA_DFSC_TLB_CONFLICT (0x30 << 0)
+#define ESR_ELx_IL (0x01 << 25)
+#define ESR_ELx_EC_SHIFT 26
+#define ESR_ELx_EC_MASK (0x3f << 26)
+#define ESR_ELx_EXCEPTION(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
+#define EXCP_UNKNOWN 0x00 /* Unkwn exception */
+#define EXCP_TRAP_WFI_WFE 0x01 /* Trapped WFI or WFE */
+#define EXCP_FP_SIMD 0x07 /* VFP/SIMD trap */
+#define EXCP_ILL_STATE 0x0e /* Illegal execution state */
+#define EXCP_SVC32 0x11 /* SVC trap for AArch32 */
+#define EXCP_SVC64 0x15 /* SVC trap for AArch64 */
+#define EXCP_HVC 0x16 /* HVC trap */
+#define EXCP_MSR 0x18 /* MSR/MRS trap */
+#define EXCP_INSN_ABORT_L 0x20 /* Instruction abort, from lower EL */
+#define EXCP_INSN_ABORT 0x21 /* Instruction abort, from same EL */
+#define EXCP_PC_ALIGN 0x22 /* PC alignment fault */
+#define EXCP_DATA_ABORT_L 0x24 /* Data abort, from lower EL */
+#define EXCP_DATA_ABORT 0x25 /* Data abort, from same EL */
+#define EXCP_SP_ALIGN 0x26 /* SP slignment fault */
+#define EXCP_TRAP_FP 0x2c /* Trapped FP exception */
+#define EXCP_SERROR 0x2f /* SError interrupt */
+#define EXCP_BRKPT_EL0 0x30 /* Hardware breakpoint, from same EL */
+#define EXCP_SOFTSTP_EL0 0x32 /* Software Step, from lower EL */
+#define EXCP_SOFTSTP_EL1 0x33 /* Software Step, from same EL */
+#define EXCP_WATCHPT_EL1 0x35 /* Watchpoint, from same EL */
+#define EXCP_BRK 0x3c /* Breakpoint */
+
+/* ICC_CTLR_EL1 */
+#define ICC_CTLR_EL1_EOIMODE (1U << 1)
+
+/* ICC_IAR1_EL1 */
+#define ICC_IAR1_EL1_SPUR (0x03ff)
+
+/* ICC_IGRPEN0_EL1 */
+#define ICC_IGRPEN0_EL1_EN (1U << 0)
+
+/* ICC_PMR_EL1 */
+#define ICC_PMR_EL1_PRIO_MASK (0xFFUL)
+
+/* ICC_SGI1R_EL1 */
+#define ICC_SGI1R_EL1_TL_MASK 0xffffUL
+#define ICC_SGI1R_EL1_AFF1_SHIFT 16
+#define ICC_SGI1R_EL1_SGIID_SHIFT 24
+#define ICC_SGI1R_EL1_AFF2_SHIFT 32
+#define ICC_SGI1R_EL1_AFF3_SHIFT 48
+#define ICC_SGI1R_EL1_SGIID_MASK 0xfUL
+#define ICC_SGI1R_EL1_IRM (0x1UL << 40)
+
+/* ICC_SRE_EL1 */
+#define ICC_SRE_EL1_SRE (1U << 0)
+
+/* ID_AA64DFR0_EL1 */
+#define ID_AA64DFR0_EL1 MRS_REG(3, 0, 0, 5, 0)
+#define ID_AA64DFR0_DebugVer_SHIFT 0
+#define ID_AA64DFR0_DebugVer_MASK (UL(0xf) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_VAL(x) ((x) & ID_AA64DFR0_DebugVer_MASK)
+#define ID_AA64DFR0_DebugVer_8 (UL(0x6) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_8_VHE (UL(0x7) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_8_2 (UL(0x8) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_TraceVer_SHIFT 4
+#define ID_AA64DFR0_TraceVer_MASK (UL(0xf) << ID_AA64DFR0_TraceVer_SHIFT)
+#define ID_AA64DFR0_TraceVer_VAL(x) ((x) & ID_AA64DFR0_TraceVer_MASK)
+#define ID_AA64DFR0_TraceVer_NONE (UL(0x0) << ID_AA64DFR0_TraceVer_SHIFT)
+#define ID_AA64DFR0_TraceVer_IMPL (UL(0x1) << ID_AA64DFR0_TraceVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_SHIFT 8
+#define ID_AA64DFR0_PMUVer_MASK (UL(0xf) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_VAL(x) ((x) & ID_AA64DFR0_PMUVer_MASK)
+#define ID_AA64DFR0_PMUVer_NONE (UL(0x0) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3 (UL(0x1) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3_1 (UL(0x4) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_IMPL (UL(0xf) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_BRPs_SHIFT 12
+#define ID_AA64DFR0_BRPs_MASK (UL(0xf) << ID_AA64DFR0_BRPs_SHIFT)
+#define ID_AA64DFR0_BRPs_VAL(x) \
+ ((((x) >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) + 1)
+#define ID_AA64DFR0_WRPs_SHIFT 20
+#define ID_AA64DFR0_WRPs_MASK (UL(0xf) << ID_AA64DFR0_WRPs_SHIFT)
+#define ID_AA64DFR0_WRPs_VAL(x) \
+ ((((x) >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) + 1)
+#define ID_AA64DFR0_CTX_CMPs_SHIFT 28
+#define ID_AA64DFR0_CTX_CMPs_MASK (UL(0xf) << ID_AA64DFR0_CTX_CMPs_SHIFT)
+#define ID_AA64DFR0_CTX_CMPs_VAL(x) \
+ ((((x) >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) + 1)
+#define ID_AA64DFR0_PMSVer_SHIFT 32
+#define ID_AA64DFR0_PMSVer_MASK (UL(0xf) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_PMSVer_VAL(x) ((x) & ID_AA64DFR0_PMSVer_MASK)
+#define ID_AA64DFR0_PMSVer_NONE (UL(0x0) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_PMSVer_V1 (UL(0x1) << ID_AA64DFR0_PMSVer_SHIFT)
+
+/* ID_AA64ISAR0_EL1 */
+#define ID_AA64ISAR0_EL1 MRS_REG(3, 0, 0, 6, 0)
+#define ID_AA64ISAR0_AES_SHIFT 4
+#define ID_AA64ISAR0_AES_MASK (UL(0xf) << ID_AA64ISAR0_AES_SHIFT)
+#define ID_AA64ISAR0_AES_VAL(x) ((x) & ID_AA64ISAR0_AES_MASK)
+#define ID_AA64ISAR0_AES_NONE (UL(0x0) << ID_AA64ISAR0_AES_SHIFT)
+#define ID_AA64ISAR0_AES_BASE (UL(0x1) << ID_AA64ISAR0_AES_SHIFT)
+#define ID_AA64ISAR0_AES_PMULL (UL(0x2) << ID_AA64ISAR0_AES_SHIFT)
+#define ID_AA64ISAR0_SHA1_SHIFT 8
+#define ID_AA64ISAR0_SHA1_MASK (UL(0xf) << ID_AA64ISAR0_SHA1_SHIFT)
+#define ID_AA64ISAR0_SHA1_VAL(x) ((x) & ID_AA64ISAR0_SHA1_MASK)
+#define ID_AA64ISAR0_SHA1_NONE (UL(0x0) << ID_AA64ISAR0_SHA1_SHIFT)
+#define ID_AA64ISAR0_SHA1_BASE (UL(0x1) << ID_AA64ISAR0_SHA1_SHIFT)
+#define ID_AA64ISAR0_SHA2_SHIFT 12
+#define ID_AA64ISAR0_SHA2_MASK (UL(0xf) << ID_AA64ISAR0_SHA2_SHIFT)
+#define ID_AA64ISAR0_SHA2_VAL(x) ((x) & ID_AA64ISAR0_SHA2_MASK)
+#define ID_AA64ISAR0_SHA2_NONE (UL(0x0) << ID_AA64ISAR0_SHA2_SHIFT)
+#define ID_AA64ISAR0_SHA2_BASE (UL(0x1) << ID_AA64ISAR0_SHA2_SHIFT)
+#define ID_AA64ISAR0_SHA2_512 (UL(0x2) << ID_AA64ISAR0_SHA2_SHIFT)
+#define ID_AA64ISAR0_CRC32_SHIFT 16
+#define ID_AA64ISAR0_CRC32_MASK (UL(0xf) << ID_AA64ISAR0_CRC32_SHIFT)
+#define ID_AA64ISAR0_CRC32_VAL(x) ((x) & ID_AA64ISAR0_CRC32_MASK)
+#define ID_AA64ISAR0_CRC32_NONE (UL(0x0) << ID_AA64ISAR0_CRC32_SHIFT)
+#define ID_AA64ISAR0_CRC32_BASE (UL(0x1) << ID_AA64ISAR0_CRC32_SHIFT)
+#define ID_AA64ISAR0_Atomic_SHIFT 20
+#define ID_AA64ISAR0_Atomic_MASK (UL(0xf) << ID_AA64ISAR0_Atomic_SHIFT)
+#define ID_AA64ISAR0_Atomic_VAL(x) ((x) & ID_AA64ISAR0_Atomic_MASK)
+#define ID_AA64ISAR0_Atomic_NONE (UL(0x0) << ID_AA64ISAR0_Atomic_SHIFT)
+#define ID_AA64ISAR0_Atomic_IMPL (UL(0x2) << ID_AA64ISAR0_Atomic_SHIFT)
+#define ID_AA64ISAR0_RDM_SHIFT 28
+#define ID_AA64ISAR0_RDM_MASK (UL(0xf) << ID_AA64ISAR0_RDM_SHIFT)
+#define ID_AA64ISAR0_RDM_VAL(x) ((x) & ID_AA64ISAR0_RDM_MASK)
+#define ID_AA64ISAR0_RDM_NONE (UL(0x0) << ID_AA64ISAR0_RDM_SHIFT)
+#define ID_AA64ISAR0_RDM_IMPL (UL(0x1) << ID_AA64ISAR0_RDM_SHIFT)
+#define ID_AA64ISAR0_SHA3_SHIFT 32
+#define ID_AA64ISAR0_SHA3_MASK (UL(0xf) << ID_AA64ISAR0_SHA3_SHIFT)
+#define ID_AA64ISAR0_SHA3_VAL(x) ((x) & ID_AA64ISAR0_SHA3_MASK)
+#define ID_AA64ISAR0_SHA3_NONE (UL(0x0) << ID_AA64ISAR0_SHA3_SHIFT)
+#define ID_AA64ISAR0_SHA3_IMPL (UL(0x1) << ID_AA64ISAR0_SHA3_SHIFT)
+#define ID_AA64ISAR0_SM3_SHIFT 36
+#define ID_AA64ISAR0_SM3_MASK (UL(0xf) << ID_AA64ISAR0_SM3_SHIFT)
+#define ID_AA64ISAR0_SM3_VAL(x) ((x) & ID_AA64ISAR0_SM3_MASK)
+#define ID_AA64ISAR0_SM3_NONE (UL(0x0) << ID_AA64ISAR0_SM3_SHIFT)
+#define ID_AA64ISAR0_SM3_IMPL (UL(0x1) << ID_AA64ISAR0_SM3_SHIFT)
+#define ID_AA64ISAR0_SM4_SHIFT 40
+#define ID_AA64ISAR0_SM4_MASK (UL(0xf) << ID_AA64ISAR0_SM4_SHIFT)
+#define ID_AA64ISAR0_SM4_VAL(x) ((x) & ID_AA64ISAR0_SM4_MASK)
+#define ID_AA64ISAR0_SM4_NONE (UL(0x0) << ID_AA64ISAR0_SM4_SHIFT)
+#define ID_AA64ISAR0_SM4_IMPL (UL(0x1) << ID_AA64ISAR0_SM4_SHIFT)
+#define ID_AA64ISAR0_DP_SHIFT 44
+#define ID_AA64ISAR0_DP_MASK (UL(0xf) << ID_AA64ISAR0_DP_SHIFT)
+#define ID_AA64ISAR0_DP_VAL(x) ((x) & ID_AA64ISAR0_DP_MASK)
+#define ID_AA64ISAR0_DP_NONE (UL(0x0) << ID_AA64ISAR0_DP_SHIFT)
+#define ID_AA64ISAR0_DP_IMPL (UL(0x1) << ID_AA64ISAR0_DP_SHIFT)
+#define ID_AA64ISAR0_FHM_SHIFT 48
+#define ID_AA64ISAR0_FHM_MASK (UL(0xf) << ID_AA64ISAR0_FHM_SHIFT)
+#define ID_AA64ISAR0_FHM_VAL(x) ((x) & ID_AA64ISAR0_FHM_MASK)
+#define ID_AA64ISAR0_FHM_NONE (UL(0x0) << ID_AA64ISAR0_FHM_SHIFT)
+#define ID_AA64ISAR0_FHM_IMPL (UL(0x1) << ID_AA64ISAR0_FHM_SHIFT)
+#define ID_AA64ISAR0_TS_SHIFT 52
+#define ID_AA64ISAR0_TS_MASK (UL(0xf) << ID_AA64ISAR0_TS_SHIFT)
+#define ID_AA64ISAR0_TS_VAL(x) ((x) & ID_AA64ISAR0_TS_MASK)
+#define ID_AA64ISAR0_TS_NONE (UL(0x0) << ID_AA64ISAR0_TS_SHIFT)
+#define ID_AA64ISAR0_TS_CondM_8_4 (UL(0x1) << ID_AA64ISAR0_TS_SHIFT)
+#define ID_AA64ISAR0_TS_CondM_8_5 (UL(0x2) << ID_AA64ISAR0_TS_SHIFT)
+#define ID_AA64ISAR0_TLB_SHIFT 56
+#define ID_AA64ISAR0_TLB_MASK (UL(0xf) << ID_AA64ISAR0_TLB_SHIFT)
+#define ID_AA64ISAR0_TLB_VAL(x) ((x) & ID_AA64ISAR0_TLB_MASK)
+#define ID_AA64ISAR0_TLB_NONE (UL(0x0) << ID_AA64ISAR0_TLB_SHIFT)
+#define ID_AA64ISAR0_TLB_TLBIOS (UL(0x1) << ID_AA64ISAR0_TLB_SHIFT)
+#define ID_AA64ISAR0_TLB_TLBIOSR (UL(0x2) << ID_AA64ISAR0_TLB_SHIFT)
+#define ID_AA64ISAR0_RNDR_SHIFT 60
+#define ID_AA64ISAR0_RNDR_MASK (UL(0xf) << ID_AA64ISAR0_RNDR_SHIFT)
+#define ID_AA64ISAR0_RNDR_VAL(x) ((x) & ID_AA64ISAR0_RNDR_MASK)
+#define ID_AA64ISAR0_RNDR_NONE (UL(0x0) << ID_AA64ISAR0_RNDR_SHIFT)
+#define ID_AA64ISAR0_RNDR_IMPL (UL(0x1) << ID_AA64ISAR0_RNDR_SHIFT)
+
+/* ID_AA64ISAR1_EL1 */
+#define ID_AA64ISAR1_EL1 MRS_REG(3, 0, 0, 6, 1)
+#define ID_AA64ISAR1_DPB_SHIFT 0
+#define ID_AA64ISAR1_DPB_MASK (UL(0xf) << ID_AA64ISAR1_DPB_SHIFT)
+#define ID_AA64ISAR1_DPB_VAL(x) ((x) & ID_AA64ISAR1_DPB_MASK)
+#define ID_AA64ISAR1_DPB_NONE (UL(0x0) << ID_AA64ISAR1_DPB_SHIFT)
+#define ID_AA64ISAR1_DPB_DCCVAP (UL(0x1) << ID_AA64ISAR1_DPB_SHIFT)
+#define ID_AA64ISAR1_DPB_DCCVADP (UL(0x2) << ID_AA64ISAR1_DPB_SHIFT)
+#define ID_AA64ISAR1_APA_SHIFT 4
+#define ID_AA64ISAR1_APA_MASK (UL(0xf) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_APA_VAL(x) ((x) & ID_AA64ISAR1_APA_MASK)
+#define ID_AA64ISAR1_APA_NONE (UL(0x0) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_APA_IMPL (UL(0x1) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_API_SHIFT 8
+#define ID_AA64ISAR1_API_MASK (UL(0xf) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_API_VAL(x) ((x) & ID_AA64ISAR1_API_MASK)
+#define ID_AA64ISAR1_API_NONE (UL(0x0) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_API_IMPL (UL(0x1) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_JSCVT_SHIFT 12
+#define ID_AA64ISAR1_JSCVT_MASK (UL(0xf) << ID_AA64ISAR1_JSCVT_SHIFT)
+#define ID_AA64ISAR1_JSCVT_VAL(x) ((x) & ID_AA64ISAR1_JSCVT_MASK)
+#define ID_AA64ISAR1_JSCVT_NONE (UL(0x0) << ID_AA64ISAR1_JSCVT_SHIFT)
+#define ID_AA64ISAR1_JSCVT_IMPL (UL(0x1) << ID_AA64ISAR1_JSCVT_SHIFT)
+#define ID_AA64ISAR1_FCMA_SHIFT 16
+#define ID_AA64ISAR1_FCMA_MASK (UL(0xf) << ID_AA64ISAR1_FCMA_SHIFT)
+#define ID_AA64ISAR1_FCMA_VAL(x) ((x) & ID_AA64ISAR1_FCMA_MASK)
+#define ID_AA64ISAR1_FCMA_NONE (UL(0x0) << ID_AA64ISAR1_FCMA_SHIFT)
+#define ID_AA64ISAR1_FCMA_IMPL (UL(0x1) << ID_AA64ISAR1_FCMA_SHIFT)
+#define ID_AA64ISAR1_LRCPC_SHIFT 20
+#define ID_AA64ISAR1_LRCPC_MASK (UL(0xf) << ID_AA64ISAR1_LRCPC_SHIFT)
+#define ID_AA64ISAR1_LRCPC_VAL(x) ((x) & ID_AA64ISAR1_LRCPC_MASK)
+#define ID_AA64ISAR1_LRCPC_NONE (UL(0x0) << ID_AA64ISAR1_LRCPC_SHIFT)
+#define ID_AA64ISAR1_LRCPC_RCPC_8_3 (UL(0x1) << ID_AA64ISAR1_LRCPC_SHIFT)
+#define ID_AA64ISAR1_LRCPC_RCPC_8_4 (UL(0x2) << ID_AA64ISAR1_LRCPC_SHIFT)
+#define ID_AA64ISAR1_GPA_SHIFT 24
+#define ID_AA64ISAR1_GPA_MASK (UL(0xf) << ID_AA64ISAR1_GPA_SHIFT)
+#define ID_AA64ISAR1_GPA_VAL(x) ((x) & ID_AA64ISAR1_GPA_MASK)
+#define ID_AA64ISAR1_GPA_NONE (UL(0x0) << ID_AA64ISAR1_GPA_SHIFT)
+#define ID_AA64ISAR1_GPA_IMPL (UL(0x1) << ID_AA64ISAR1_GPA_SHIFT)
+#define ID_AA64ISAR1_GPI_SHIFT 28
+#define ID_AA64ISAR1_GPI_MASK (UL(0xf) << ID_AA64ISAR1_GPI_SHIFT)
+#define ID_AA64ISAR1_GPI_VAL(x) ((x) & ID_AA64ISAR1_GPI_MASK)
+#define ID_AA64ISAR1_GPI_NONE (UL(0x0) << ID_AA64ISAR1_GPI_SHIFT)
+#define ID_AA64ISAR1_GPI_IMPL (UL(0x1) << ID_AA64ISAR1_GPI_SHIFT)
+#define ID_AA64ISAR1_FRINTTS_SHIFT 32
+#define ID_AA64ISAR1_FRINTTS_MASK (UL(0xf) << ID_AA64ISAR1_FRINTTS_SHIFT)
+#define ID_AA64ISAR1_FRINTTS_VAL(x) ((x) & ID_AA64ISAR1_FRINTTS_MASK)
+#define ID_AA64ISAR1_FRINTTS_NONE (UL(0x0) << ID_AA64ISAR1_FRINTTS_SHIFT)
+#define ID_AA64ISAR1_FRINTTS_IMPL (UL(0x1) << ID_AA64ISAR1_FRINTTS_SHIFT)
+#define ID_AA64ISAR1_SB_SHIFT 36
+#define ID_AA64ISAR1_SB_MASK (UL(0xf) << ID_AA64ISAR1_SB_SHIFT)
+#define ID_AA64ISAR1_SB_VAL(x) ((x) & ID_AA64ISAR1_SB_MASK)
+#define ID_AA64ISAR1_SB_NONE (UL(0x0) << ID_AA64ISAR1_SB_SHIFT)
+#define ID_AA64ISAR1_SB_IMPL (UL(0x1) << ID_AA64ISAR1_SB_SHIFT)
+#define ID_AA64ISAR1_SPECRES_SHIFT 40
+#define ID_AA64ISAR1_SPECRES_MASK (UL(0xf) << ID_AA64ISAR1_SPECRES_SHIFT)
+#define ID_AA64ISAR1_SPECRES_VAL(x) ((x) & ID_AA64ISAR1_SPECRES_MASK)
+#define ID_AA64ISAR1_SPECRES_NONE (UL(0x0) << ID_AA64ISAR1_SPECRES_SHIFT)
+#define ID_AA64ISAR1_SPECRES_IMPL (UL(0x1) << ID_AA64ISAR1_SPECRES_SHIFT)
+#define ID_AA64ISAR1_BF16_SHIFT 44
+#define ID_AA64ISAR1_BF16_MASK (UL(0xf) << ID_AA64ISAR1_BF16_SHIFT)
+#define ID_AA64ISAR1_BF16_VAL(x) ((x) & ID_AA64ISAR1_BF16_MASK)
+#define ID_AA64ISAR1_BF16_NONE (UL(0x0) << ID_AA64ISAR1_BF16_SHIFT)
+#define ID_AA64ISAR1_BF16_IMPL (UL(0x1) << ID_AA64ISAR1_BF16_SHIFT)
+#define ID_AA64ISAR1_DGH_SHIFT 48
+#define ID_AA64ISAR1_DGH_MASK (UL(0xf) << ID_AA64ISAR1_DGH_SHIFT)
+#define ID_AA64ISAR1_DGH_VAL(x) ((x) & ID_AA64ISAR1_DGH_MASK)
+#define ID_AA64ISAR1_DGH_NONE (UL(0x0) << ID_AA64ISAR1_DGH_SHIFT)
+#define ID_AA64ISAR1_DGH_IMPL (UL(0x1) << ID_AA64ISAR1_DGH_SHIFT)
+#define ID_AA64ISAR1_I8MM_SHIFT 52
+#define ID_AA64ISAR1_I8MM_MASK (UL(0xf) << ID_AA64ISAR1_I8MM_SHIFT)
+#define ID_AA64ISAR1_I8MM_VAL(x) ((x) & ID_AA64ISAR1_I8MM_MASK)
+#define ID_AA64ISAR1_I8MM_NONE (UL(0x0) << ID_AA64ISAR1_I8MM_SHIFT)
+#define ID_AA64ISAR1_I8MM_IMPL (UL(0x1) << ID_AA64ISAR1_I8MM_SHIFT)
+
+/* ID_AA64MMFR0_EL1 */
+#define ID_AA64MMFR0_EL1 MRS_REG(3, 0, 0, 7, 0)
+#define ID_AA64MMFR0_PARange_SHIFT 0
+#define ID_AA64MMFR0_PARange_MASK (UL(0xf) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_VAL(x) ((x) & ID_AA64MMFR0_PARange_MASK)
+#define ID_AA64MMFR0_PARange_4G (UL(0x0) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_64G (UL(0x1) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_1T (UL(0x2) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_4T (UL(0x3) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_16T (UL(0x4) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_256T (UL(0x5) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_4P (UL(0x6) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_ASIDBits_SHIFT 4
+#define ID_AA64MMFR0_ASIDBits_MASK (UL(0xf) << ID_AA64MMFR0_ASIDBits_SHIFT)
+#define ID_AA64MMFR0_ASIDBits_VAL(x) ((x) & ID_AA64MMFR0_ASIDBits_MASK)
+#define ID_AA64MMFR0_ASIDBits_8 (UL(0x0) << ID_AA64MMFR0_ASIDBits_SHIFT)
+#define ID_AA64MMFR0_ASIDBits_16 (UL(0x2) << ID_AA64MMFR0_ASIDBits_SHIFT)
+#define ID_AA64MMFR0_BigEnd_SHIFT 8
+#define ID_AA64MMFR0_BigEnd_MASK (UL(0xf) << ID_AA64MMFR0_BigEnd_SHIFT)
+#define ID_AA64MMFR0_BigEnd_VAL(x) ((x) & ID_AA64MMFR0_BigEnd_MASK)
+#define ID_AA64MMFR0_BigEnd_FIXED (UL(0x0) << ID_AA64MMFR0_BigEnd_SHIFT)
+#define ID_AA64MMFR0_BigEnd_MIXED (UL(0x1) << ID_AA64MMFR0_BigEnd_SHIFT)
+#define ID_AA64MMFR0_SNSMem_SHIFT 12
+#define ID_AA64MMFR0_SNSMem_MASK (UL(0xf) << ID_AA64MMFR0_SNSMem_SHIFT)
+#define ID_AA64MMFR0_SNSMem_VAL(x) ((x) & ID_AA64MMFR0_SNSMem_MASK)
+#define ID_AA64MMFR0_SNSMem_NONE (UL(0x0) << ID_AA64MMFR0_SNSMem_SHIFT)
+#define ID_AA64MMFR0_SNSMem_DISTINCT (UL(0x1) << ID_AA64MMFR0_SNSMem_SHIFT)
+#define ID_AA64MMFR0_BigEndEL0_SHIFT 16
+#define ID_AA64MMFR0_BigEndEL0_MASK (UL(0xf) << ID_AA64MMFR0_BigEndEL0_SHIFT)
+#define ID_AA64MMFR0_BigEndEL0_VAL(x) ((x) & ID_AA64MMFR0_BigEndEL0_MASK)
+#define ID_AA64MMFR0_BigEndEL0_FIXED (UL(0x0) << ID_AA64MMFR0_BigEndEL0_SHIFT)
+#define ID_AA64MMFR0_BigEndEL0_MIXED (UL(0x1) << ID_AA64MMFR0_BigEndEL0_SHIFT)
+#define ID_AA64MMFR0_TGran16_SHIFT 20
+#define ID_AA64MMFR0_TGran16_MASK (UL(0xf) << ID_AA64MMFR0_TGran16_SHIFT)
+#define ID_AA64MMFR0_TGran16_VAL(x) ((x) & ID_AA64MMFR0_TGran16_MASK)
+#define ID_AA64MMFR0_TGran16_NONE (UL(0x0) << ID_AA64MMFR0_TGran16_SHIFT)
+#define ID_AA64MMFR0_TGran16_IMPL (UL(0x1) << ID_AA64MMFR0_TGran16_SHIFT)
+#define ID_AA64MMFR0_TGran64_SHIFT 24
+#define ID_AA64MMFR0_TGran64_MASK (UL(0xf) << ID_AA64MMFR0_TGran64_SHIFT)
+#define ID_AA64MMFR0_TGran64_VAL(x) ((x) & ID_AA64MMFR0_TGran64_MASK)
+#define ID_AA64MMFR0_TGran64_IMPL (UL(0x0) << ID_AA64MMFR0_TGran64_SHIFT)
+#define ID_AA64MMFR0_TGran64_NONE (UL(0xf) << ID_AA64MMFR0_TGran64_SHIFT)
+#define ID_AA64MMFR0_TGran4_SHIFT 28
+#define ID_AA64MMFR0_TGran4_MASK (UL(0xf) << ID_AA64MMFR0_TGran4_SHIFT)
+#define ID_AA64MMFR0_TGran4_VAL(x) ((x) & ID_AA64MMFR0_TGran4_MASK)
+#define ID_AA64MMFR0_TGran4_IMPL (UL(0x0) << ID_AA64MMFR0_TGran4_SHIFT)
+#define ID_AA64MMFR0_TGran4_NONE (UL(0xf) << ID_AA64MMFR0_TGran4_SHIFT)
+
+/* ID_AA64MMFR1_EL1 */
+#define ID_AA64MMFR1_EL1 MRS_REG(3, 0, 0, 7, 1)
+#define ID_AA64MMFR1_HAFDBS_SHIFT 0
+#define ID_AA64MMFR1_HAFDBS_MASK (UL(0xf) << ID_AA64MMFR1_HAFDBS_SHIFT)
+#define ID_AA64MMFR1_HAFDBS_VAL(x) ((x) & ID_AA64MMFR1_HAFDBS_MASK)
+#define ID_AA64MMFR1_HAFDBS_NONE (UL(0x0) << ID_AA64MMFR1_HAFDBS_SHIFT)
+#define ID_AA64MMFR1_HAFDBS_AF (UL(0x1) << ID_AA64MMFR1_HAFDBS_SHIFT)
+#define ID_AA64MMFR1_HAFDBS_AF_DBS (UL(0x2) << ID_AA64MMFR1_HAFDBS_SHIFT)
+#define ID_AA64MMFR1_VMIDBits_SHIFT 4
+#define ID_AA64MMFR1_VMIDBits_MASK (UL(0xf) << ID_AA64MMFR1_VMIDBits_SHIFT)
+#define ID_AA64MMFR1_VMIDBits_VAL(x) ((x) & ID_AA64MMFR1_VMIDBits_MASK)
+#define ID_AA64MMFR1_VMIDBits_8 (UL(0x0) << ID_AA64MMFR1_VMIDBits_SHIFT)
+#define ID_AA64MMFR1_VMIDBits_16 (UL(0x2) << ID_AA64MMFR1_VMIDBits_SHIFT)
+#define ID_AA64MMFR1_VH_SHIFT 8
+#define ID_AA64MMFR1_VH_MASK (UL(0xf) << ID_AA64MMFR1_VH_SHIFT)
+#define ID_AA64MMFR1_VH_VAL(x) ((x) & ID_AA64MMFR1_VH_MASK)
+#define ID_AA64MMFR1_VH_NONE (UL(0x0) << ID_AA64MMFR1_VH_SHIFT)
+#define ID_AA64MMFR1_VH_IMPL (UL(0x1) << ID_AA64MMFR1_VH_SHIFT)
+#define ID_AA64MMFR1_HPDS_SHIFT 12
+#define ID_AA64MMFR1_HPDS_MASK (UL(0xf) << ID_AA64MMFR1_HPDS_SHIFT)
+#define ID_AA64MMFR1_HPDS_VAL(x) ((x) & ID_AA64MMFR1_HPDS_MASK)
+#define ID_AA64MMFR1_HPDS_NONE (UL(0x0) << ID_AA64MMFR1_HPDS_SHIFT)
+#define ID_AA64MMFR1_HPDS_HPD (UL(0x1) << ID_AA64MMFR1_HPDS_SHIFT)
+#define ID_AA64MMFR1_HPDS_TTPBHA (UL(0x2) << ID_AA64MMFR1_HPDS_SHIFT)
+#define ID_AA64MMFR1_LO_SHIFT 16
+#define ID_AA64MMFR1_LO_MASK (UL(0xf) << ID_AA64MMFR1_LO_SHIFT)
+#define ID_AA64MMFR1_LO_VAL(x) ((x) & ID_AA64MMFR1_LO_MASK)
+#define ID_AA64MMFR1_LO_NONE (UL(0x0) << ID_AA64MMFR1_LO_SHIFT)
+#define ID_AA64MMFR1_LO_IMPL (UL(0x1) << ID_AA64MMFR1_LO_SHIFT)
+#define ID_AA64MMFR1_PAN_SHIFT 20
+#define ID_AA64MMFR1_PAN_MASK (UL(0xf) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_PAN_VAL(x) ((x) & ID_AA64MMFR1_PAN_MASK)
+#define ID_AA64MMFR1_PAN_NONE (UL(0x0) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_PAN_IMPL (UL(0x1) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_PAN_ATS1E1 (UL(0x2) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_SpecSEI_SHIFT 24
+#define ID_AA64MMFR1_SpecSEI_MASK (UL(0xf) << ID_AA64MMFR1_SpecSEI_SHIFT)
+#define ID_AA64MMFR1_SpecSEI_VAL(x) ((x) & ID_AA64MMFR1_SpecSEI_MASK)
+#define ID_AA64MMFR1_SpecSEI_NONE (UL(0x0) << ID_AA64MMFR1_SpecSEI_SHIFT)
+#define ID_AA64MMFR1_SpecSEI_IMPL (UL(0x1) << ID_AA64MMFR1_SpecSEI_SHIFT)
+#define ID_AA64MMFR1_XNX_SHIFT 28
+#define ID_AA64MMFR1_XNX_MASK (UL(0xf) << ID_AA64MMFR1_XNX_SHIFT)
+#define ID_AA64MMFR1_XNX_VAL(x) ((x) & ID_AA64MMFR1_XNX_MASK)
+#define ID_AA64MMFR1_XNX_NONE (UL(0x0) << ID_AA64MMFR1_XNX_SHIFT)
+#define ID_AA64MMFR1_XNX_IMPL (UL(0x1) << ID_AA64MMFR1_XNX_SHIFT)
+
+/* ID_AA64MMFR2_EL1 */
+#define ID_AA64MMFR2_EL1 MRS_REG(3, 0, 0, 7, 2)
+#define ID_AA64MMFR2_CnP_SHIFT 0
+#define ID_AA64MMFR2_CnP_MASK (UL(0xf) << ID_AA64MMFR2_CnP_SHIFT)
+#define ID_AA64MMFR2_CnP_VAL(x) ((x) & ID_AA64MMFR2_CnP_MASK)
+#define ID_AA64MMFR2_CnP_NONE (UL(0x0) << ID_AA64MMFR2_CnP_SHIFT)
+#define ID_AA64MMFR2_CnP_IMPL (UL(0x1) << ID_AA64MMFR2_CnP_SHIFT)
+#define ID_AA64MMFR2_UAO_SHIFT 4
+#define ID_AA64MMFR2_UAO_MASK (UL(0xf) << ID_AA64MMFR2_UAO_SHIFT)
+#define ID_AA64MMFR2_UAO_VAL(x) ((x) & ID_AA64MMFR2_UAO_MASK)
+#define ID_AA64MMFR2_UAO_NONE (UL(0x0) << ID_AA64MMFR2_UAO_SHIFT)
+#define ID_AA64MMFR2_UAO_IMPL (UL(0x1) << ID_AA64MMFR2_UAO_SHIFT)
+#define ID_AA64MMFR2_LSM_SHIFT 8
+#define ID_AA64MMFR2_LSM_MASK (UL(0xf) << ID_AA64MMFR2_LSM_SHIFT)
+#define ID_AA64MMFR2_LSM_VAL(x) ((x) & ID_AA64MMFR2_LSM_MASK)
+#define ID_AA64MMFR2_LSM_NONE (UL(0x0) << ID_AA64MMFR2_LSM_SHIFT)
+#define ID_AA64MMFR2_LSM_IMPL (UL(0x1) << ID_AA64MMFR2_LSM_SHIFT)
+#define ID_AA64MMFR2_IESB_SHIFT 12
+#define ID_AA64MMFR2_IESB_MASK (UL(0xf) << ID_AA64MMFR2_IESB_SHIFT)
+#define ID_AA64MMFR2_IESB_VAL(x) ((x) & ID_AA64MMFR2_IESB_MASK)
+#define ID_AA64MMFR2_IESB_NONE (UL(0x0) << ID_AA64MMFR2_IESB_SHIFT)
+#define ID_AA64MMFR2_IESB_IMPL (UL(0x1) << ID_AA64MMFR2_IESB_SHIFT)
+#define ID_AA64MMFR2_VARange_SHIFT 16
+#define ID_AA64MMFR2_VARange_MASK (UL(0xf) << ID_AA64MMFR2_VARange_SHIFT)
+#define ID_AA64MMFR2_VARange_VAL(x) ((x) & ID_AA64MMFR2_VARange_MASK)
+#define ID_AA64MMFR2_VARange_48 (UL(0x0) << ID_AA64MMFR2_VARange_SHIFT)
+#define ID_AA64MMFR2_VARange_52 (UL(0x1) << ID_AA64MMFR2_VARange_SHIFT)
+#define ID_AA64MMFR2_CCIDX_SHIFT 20
+#define ID_AA64MMFR2_CCIDX_MASK (UL(0xf) << ID_AA64MMFR2_CCIDX_SHIFT)
+#define ID_AA64MMFR2_CCIDX_VAL(x) ((x) & ID_AA64MMFR2_CCIDX_MASK)
+#define ID_AA64MMFR2_CCIDX_32 (UL(0x0) << ID_AA64MMFR2_CCIDX_SHIFT)
+#define ID_AA64MMFR2_CCIDX_64 (UL(0x1) << ID_AA64MMFR2_CCIDX_SHIFT)
+#define ID_AA64MMFR2_NV_SHIFT 24
+#define ID_AA64MMFR2_NV_MASK (UL(0xf) << ID_AA64MMFR2_NV_SHIFT)
+#define ID_AA64MMFR2_NV_VAL(x) ((x) & ID_AA64MMFR2_NV_MASK)
+#define ID_AA64MMFR2_NV_NONE (UL(0x0) << ID_AA64MMFR2_NV_SHIFT)
+#define ID_AA64MMFR2_NV_IMPL (UL(0x1) << ID_AA64MMFR2_NV_SHIFT)
+
+/* ID_AA64PFR0_EL1 */
+#define ID_AA64PFR0_EL1 MRS_REG(3, 0, 0, 4, 0)
+#define ID_AA64PFR0_EL0_SHIFT 0
+#define ID_AA64PFR0_EL0_MASK (UL(0xf) << ID_AA64PFR0_EL0_SHIFT)
+#define ID_AA64PFR0_EL0_VAL(x) ((x) & ID_AA64PFR0_EL0_MASK)
+#define ID_AA64PFR0_EL0_64 (UL(0x1) << ID_AA64PFR0_EL0_SHIFT)
+#define ID_AA64PFR0_EL0_64_32 (UL(0x2) << ID_AA64PFR0_EL0_SHIFT)
+#define ID_AA64PFR0_EL1_SHIFT 4
+#define ID_AA64PFR0_EL1_MASK (UL(0xf) << ID_AA64PFR0_EL1_SHIFT)
+#define ID_AA64PFR0_EL1_VAL(x) ((x) & ID_AA64PFR0_EL1_MASK)
+#define ID_AA64PFR0_EL1_64 (UL(0x1) << ID_AA64PFR0_EL1_SHIFT)
+#define ID_AA64PFR0_EL1_64_32 (UL(0x2) << ID_AA64PFR0_EL1_SHIFT)
+#define ID_AA64PFR0_EL2_SHIFT 8
+#define ID_AA64PFR0_EL2_MASK (UL(0xf) << ID_AA64PFR0_EL2_SHIFT)
+#define ID_AA64PFR0_EL2_VAL(x) ((x) & ID_AA64PFR0_EL2_MASK)
+#define ID_AA64PFR0_EL2_NONE (UL(0x0) << ID_AA64PFR0_EL2_SHIFT)
+#define ID_AA64PFR0_EL2_64 (UL(0x1) << ID_AA64PFR0_EL2_SHIFT)
+#define ID_AA64PFR0_EL2_64_32 (UL(0x2) << ID_AA64PFR0_EL2_SHIFT)
+#define ID_AA64PFR0_EL3_SHIFT 12
+#define ID_AA64PFR0_EL3_MASK (UL(0xf) << ID_AA64PFR0_EL3_SHIFT)
+#define ID_AA64PFR0_EL3_VAL(x) ((x) & ID_AA64PFR0_EL3_MASK)
+#define ID_AA64PFR0_EL3_NONE (UL(0x0) << ID_AA64PFR0_EL3_SHIFT)
+#define ID_AA64PFR0_EL3_64 (UL(0x1) << ID_AA64PFR0_EL3_SHIFT)
+#define ID_AA64PFR0_EL3_64_32 (UL(0x2) << ID_AA64PFR0_EL3_SHIFT)
+#define ID_AA64PFR0_FP_SHIFT 16
+#define ID_AA64PFR0_FP_MASK (UL(0xf) << ID_AA64PFR0_FP_SHIFT)
+#define ID_AA64PFR0_FP_VAL(x) ((x) & ID_AA64PFR0_FP_MASK)
+#define ID_AA64PFR0_FP_IMPL (UL(0x0) << ID_AA64PFR0_FP_SHIFT)
+#define ID_AA64PFR0_FP_HP (UL(0x1) << ID_AA64PFR0_FP_SHIFT)
+#define ID_AA64PFR0_FP_NONE (UL(0xf) << ID_AA64PFR0_FP_SHIFT)
+#define ID_AA64PFR0_AdvSIMD_SHIFT 20
+#define ID_AA64PFR0_AdvSIMD_MASK (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT)
+#define ID_AA64PFR0_AdvSIMD_VAL(x) ((x) & ID_AA64PFR0_AdvSIMD_MASK)
+#define ID_AA64PFR0_AdvSIMD_IMPL (UL(0x0) << ID_AA64PFR0_AdvSIMD_SHIFT)
+#define ID_AA64PFR0_AdvSIMD_HP (UL(0x1) << ID_AA64PFR0_AdvSIMD_SHIFT)
+#define ID_AA64PFR0_AdvSIMD_NONE (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT)
+#define ID_AA64PFR0_GIC_BITS 0x4 /* Number of bits in GIC field */
+#define ID_AA64PFR0_GIC_SHIFT 24
+#define ID_AA64PFR0_GIC_MASK (UL(0xf) << ID_AA64PFR0_GIC_SHIFT)
+#define ID_AA64PFR0_GIC_VAL(x) ((x) & ID_AA64PFR0_GIC_MASK)
+#define ID_AA64PFR0_GIC_CPUIF_NONE (UL(0x0) << ID_AA64PFR0_GIC_SHIFT)
+#define ID_AA64PFR0_GIC_CPUIF_EN (UL(0x1) << ID_AA64PFR0_GIC_SHIFT)
+#define ID_AA64PFR0_RAS_SHIFT 28
+#define ID_AA64PFR0_RAS_MASK (UL(0xf) << ID_AA64PFR0_RAS_SHIFT)
+#define ID_AA64PFR0_RAS_VAL(x) ((x) & ID_AA64PFR0_RAS_MASK)
+#define ID_AA64PFR0_RAS_NONE (UL(0x0) << ID_AA64PFR0_RAS_SHIFT)
+#define ID_AA64PFR0_RAS_V1 (UL(0x1) << ID_AA64PFR0_RAS_SHIFT)
+#define ID_AA64PFR0_SVE_SHIFT 32
+#define ID_AA64PFR0_SVE_MASK (UL(0xf) << ID_AA64PFR0_SVE_SHIFT)
+#define ID_AA64PFR0_SVE_VAL(x) ((x) & ID_AA64PFR0_SVE_MASK)
+#define ID_AA64PFR0_SVE_NONE (UL(0x0) << ID_AA64PFR0_SVE_SHIFT)
+#define ID_AA64PFR0_SVE_IMPL (UL(0x1) << ID_AA64PFR0_SVE_SHIFT)
+#define ID_AA64PFR0_SEL2_SHIFT 36
+#define ID_AA64PFR0_SEL2_MASK (UL(0xf) << ID_AA64PFR0_SEL2_SHIFT)
+#define ID_AA64PFR0_SEL2_VAL(x) ((x) & ID_AA64PFR0_SEL2_MASK)
+#define ID_AA64PFR0_SEL2_NONE (UL(0x0) << ID_AA64PFR0_SEL2_SHIFT)
+#define ID_AA64PFR0_SEL2_IMPL (UL(0x1) << ID_AA64PFR0_SEL2_SHIFT)
+#define ID_AA64PFR0_MPAM_SHIFT 40
+#define ID_AA64PFR0_MPAM_MASK (UL(0xf) << ID_AA64PFR0_MPAM_SHIFT)
+#define ID_AA64PFR0_MPAM_VAL(x) ((x) & ID_AA64PFR0_MPAM_MASK)
+#define ID_AA64PFR0_MPAM_NONE (UL(0x0) << ID_AA64PFR0_MPAM_SHIFT)
+#define ID_AA64PFR0_MPAM_IMPL (UL(0x1) << ID_AA64PFR0_MPAM_SHIFT)
+#define ID_AA64PFR0_AMU_SHIFT 44
+#define ID_AA64PFR0_AMU_MASK (UL(0xf) << ID_AA64PFR0_AMU_SHIFT)
+#define ID_AA64PFR0_AMU_VAL(x) ((x) & ID_AA64PFR0_AMU_MASK)
+#define ID_AA64PFR0_AMU_NONE (UL(0x0) << ID_AA64PFR0_AMU_SHIFT)
+#define ID_AA64PFR0_AMU_V1 (UL(0x1) << ID_AA64PFR0_AMU_SHIFT)
+#define ID_AA64PFR0_DIT_SHIFT 48
+#define ID_AA64PFR0_DIT_MASK (UL(0xf) << ID_AA64PFR0_DIT_SHIFT)
+#define ID_AA64PFR0_DIT_VAL(x) ((x) & ID_AA64PFR0_DIT_MASK)
+#define ID_AA64PFR0_DIT_NONE (UL(0x0) << ID_AA64PFR0_DIT_SHIFT)
+#define ID_AA64PFR0_DIT_PSTATE (UL(0x1) << ID_AA64PFR0_DIT_SHIFT)
+#define ID_AA64PFR0_CSV2_SHIFT 56
+#define ID_AA64PFR0_CSV2_MASK (UL(0xf) << ID_AA64PFR0_CSV2_SHIFT)
+#define ID_AA64PFR0_CSV2_VAL(x) ((x) & ID_AA64PFR0_CSV2_MASK)
+#define ID_AA64PFR0_CSV2_NONE (UL(0x0) << ID_AA64PFR0_CSV2_SHIFT)
+#define ID_AA64PFR0_CSV2_ISOLATED (UL(0x1) << ID_AA64PFR0_CSV2_SHIFT)
+#define ID_AA64PFR0_CSV2_SCXTNUM (UL(0x2) << ID_AA64PFR0_CSV2_SHIFT)
+#define ID_AA64PFR0_CSV3_SHIFT 60
+#define ID_AA64PFR0_CSV3_MASK (UL(0xf) << ID_AA64PFR0_CSV3_SHIFT)
+#define ID_AA64PFR0_CSV3_VAL(x) ((x) & ID_AA64PFR0_CSV3_MASK)
+#define ID_AA64PFR0_CSV3_NONE (UL(0x0) << ID_AA64PFR0_CSV3_SHIFT)
+#define ID_AA64PFR0_CSV3_ISOLATED (UL(0x1) << ID_AA64PFR0_CSV3_SHIFT)
+
+/* ID_AA64PFR1_EL1 */
+#define ID_AA64PFR1_EL1 MRS_REG(3, 0, 0, 4, 1)
+#define ID_AA64PFR1_BT_SHIFT 0
+#define ID_AA64PFR1_BT_MASK (UL(0xf) << ID_AA64PFR1_BT_SHIFT)
+#define ID_AA64PFR1_BT_VAL(x) ((x) & ID_AA64PFR1_BT_MASK)
+#define ID_AA64PFR1_BT_NONE (UL(0x0) << ID_AA64PFR1_BT_SHIFT)
+#define ID_AA64PFR1_BT_IMPL (UL(0x1) << ID_AA64PFR1_BT_SHIFT)
+#define ID_AA64PFR1_SSBS_SHIFT 4
+#define ID_AA64PFR1_SSBS_MASK (UL(0xf) << ID_AA64PFR1_SSBS_SHIFT)
+#define ID_AA64PFR1_SSBS_VAL(x) ((x) & ID_AA64PFR1_SSBS_MASK)
+#define ID_AA64PFR1_SSBS_NONE (UL(0x0) << ID_AA64PFR1_SSBS_SHIFT)
+#define ID_AA64PFR1_SSBS_PSTATE (UL(0x1) << ID_AA64PFR1_SSBS_SHIFT)
+#define ID_AA64PFR1_SSBS_PSTATE_MSR (UL(0x2) << ID_AA64PFR1_SSBS_SHIFT)
+#define ID_AA64PFR1_MTE_SHIFT 8
+#define ID_AA64PFR1_MTE_MASK (UL(0xf) << ID_AA64PFR1_MTE_SHIFT)
+#define ID_AA64PFR1_MTE_VAL(x) ((x) & ID_AA64PFR1_MTE_MASK)
+#define ID_AA64PFR1_MTE_NONE (UL(0x0) << ID_AA64PFR1_MTE_SHIFT)
+#define ID_AA64PFR1_MTE_IMPL_EL0 (UL(0x1) << ID_AA64PFR1_MTE_SHIFT)
+#define ID_AA64PFR1_MTE_IMPL (UL(0x2) << ID_AA64PFR1_MTE_SHIFT)
+#define ID_AA64PFR1_RAS_frac_SHIFT 12
+#define ID_AA64PFR1_RAS_frac_MASK (UL(0xf) << ID_AA64PFR1_RAS_frac_SHIFT)
+#define ID_AA64PFR1_RAS_frac_VAL(x) ((x) & ID_AA64PFR1_RAS_frac_MASK)
+#define ID_AA64PFR1_RAS_frac_V1 (UL(0x0) << ID_AA64PFR1_RAS_frac_SHIFT)
+#define ID_AA64PFR1_RAS_frac_V2 (UL(0x1) << ID_AA64PFR1_RAS_frac_SHIFT)
+
+/* MAIR_EL1 - Memory Attribute Indirection Register */
+#define MAIR_ATTR_MASK(idx) (0xff << ((n)* 8))
+#define MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8))
+#define MAIR_DEVICE_nGnRnE 0x00
+#define MAIR_NORMAL_NC 0x44
+#define MAIR_NORMAL_WT 0xbb
+#define MAIR_NORMAL_WB 0xff
+
+/* PAR_EL1 - Physical Address Register */
+#define PAR_F_SHIFT 0
+#define PAR_F (0x1 << PAR_F_SHIFT)
+#define PAR_SUCCESS(x) (((x) & PAR_F) == 0)
+/* When PAR_F == 0 (success) */
+#define PAR_SH_SHIFT 7
+#define PAR_SH_MASK (0x3 << PAR_SH_SHIFT)
+#define PAR_NS_SHIFT 9
+#define PAR_NS_MASK (0x3 << PAR_NS_SHIFT)
+#define PAR_PA_SHIFT 12
+#define PAR_PA_MASK 0x0000fffffffff000
+#define PAR_ATTR_SHIFT 56
+#define PAR_ATTR_MASK (0xff << PAR_ATTR_SHIFT)
+/* When PAR_F == 1 (aborted) */
+#define PAR_FST_SHIFT 1
+#define PAR_FST_MASK (0x3f << PAR_FST_SHIFT)
+#define PAR_PTW_SHIFT 8
+#define PAR_PTW_MASK (0x1 << PAR_PTW_SHIFT)
+#define PAR_S_SHIFT 9
+#define PAR_S_MASK (0x1 << PAR_S_SHIFT)
+
+/* SCTLR_EL1 - System Control Register */
+#define SCTLR_RES0 0xc8222440 /* Reserved ARMv8.0, write 0 */
+#define SCTLR_RES1 0x30d00800 /* Reserved ARMv8.0, write 1 */
+
+#define SCTLR_M 0x00000001
+#define SCTLR_A 0x00000002
+#define SCTLR_C 0x00000004
+#define SCTLR_SA 0x00000008
+#define SCTLR_SA0 0x00000010
+#define SCTLR_CP15BEN 0x00000020
+/* Bit 6 is reserved */
+#define SCTLR_ITD 0x00000080
+#define SCTLR_SED 0x00000100
+#define SCTLR_UMA 0x00000200
+/* Bit 10 is reserved */
+/* Bit 11 is reserved */
+#define SCTLR_I 0x00001000
+#define SCTLR_EnDB 0x00002000 /* ARMv8.3 */
+#define SCTLR_DZE 0x00004000
+#define SCTLR_UCT 0x00008000
+#define SCTLR_nTWI 0x00010000
+/* Bit 17 is reserved */
+#define SCTLR_nTWE 0x00040000
+#define SCTLR_WXN 0x00080000
+/* Bit 20 is reserved */
+#define SCTLR_IESB 0x00200000 /* ARMv8.2 */
+/* Bit 22 is reserved */
+#define SCTLR_SPAN 0x00800000 /* ARMv8.1 */
+#define SCTLR_EOE 0x01000000
+#define SCTLR_EE 0x02000000
+#define SCTLR_UCI 0x04000000
+#define SCTLR_EnDA 0x08000000 /* ARMv8.3 */
+#define SCTLR_nTLSMD 0x10000000 /* ARMv8.2 */
+#define SCTLR_LSMAOE 0x20000000 /* ARMv8.2 */
+#define SCTLR_EnIB 0x40000000 /* ARMv8.3 */
+#define SCTLR_EnIA 0x80000000 /* ARMv8.3 */
+
+/* SPSR_EL1 */
+/*
+ * When the exception is taken in AArch64:
+ * M[3:2] is the exception level
+ * M[1] is unused
+ * M[0] is the SP select:
+ * 0: always SP0
+ * 1: current ELs SP
+ */
+#define PSR_M_EL0t 0x00000000
+#define PSR_M_EL1t 0x00000004
+#define PSR_M_EL1h 0x00000005
+#define PSR_M_EL2t 0x00000008
+#define PSR_M_EL2h 0x00000009
+#define PSR_M_64 0x00000000
+#define PSR_M_32 0x00000010
+#define PSR_M_MASK 0x0000000f
+
+#define PSR_T 0x00000020
+
+#define PSR_AARCH32 0x00000010
+#define PSR_F 0x00000040
+#define PSR_I 0x00000080
+#define PSR_A 0x00000100
+#define PSR_D 0x00000200
+#define PSR_DAIF (PSR_D | PSR_A | PSR_I | PSR_F)
+#define PSR_IL 0x00100000
+#define PSR_SS 0x00200000
+#define PSR_V 0x10000000
+#define PSR_C 0x20000000
+#define PSR_Z 0x40000000
+#define PSR_N 0x80000000
+#define PSR_FLAGS 0xf0000000
+
+/* TCR_EL1 - Translation Control Register */
+/* Bits 63:59 are reserved */
+#define TCR_TCMA1_SHIFT 58
+#define TCR_TCMA1 (1UL << TCR_TCMA1_SHIFT)
+#define TCR_TCMA0_SHIFT 57
+#define TCR_TCMA0 (1UL << TCR_TCMA0_SHIFT)
+#define TCR_E0PD1_SHIFT 56
+#define TCR_E0PD1 (1UL << TCR_E0PD1_SHIFT)
+#define TCR_E0PD0_SHIFT 55
+#define TCR_E0PD0 (1UL << TCR_E0PD0_SHIFT)
+#define TCR_NFD1_SHIFT 54
+#define TCR_NFD1 (1UL << TCR_NFD1_SHIFT)
+#define TCR_NFD0_SHIFT 53
+#define TCR_NFD0 (1UL << TCR_NFD0_SHIFT)
+#define TCR_TBID1_SHIFT 52
+#define TCR_TBID1 (1UL << TCR_TBID1_SHIFT)
+#define TCR_TBID0_SHIFT 51
+#define TCR_TBID0 (1UL << TCR_TBID0_SHIFT)
+#define TCR_HWU162_SHIFT 50
+#define TCR_HWU162 (1UL << TCR_HWU162_SHIFT)
+#define TCR_HWU161_SHIFT 49
+#define TCR_HWU161 (1UL << TCR_HWU161_SHIFT)
+#define TCR_HWU160_SHIFT 48
+#define TCR_HWU160 (1UL << TCR_HWU160_SHIFT)
+#define TCR_HWU159_SHIFT 47
+#define TCR_HWU159 (1UL << TCR_HWU159_SHIFT)
+#define TCR_HWU1 \
+ (TCR_HWU159 | TCR_HWU160 | TCR_HWU161 | TCR_HWU162)
+#define TCR_HWU062_SHIFT 46
+#define TCR_HWU062 (1UL << TCR_HWU062_SHIFT)
+#define TCR_HWU061_SHIFT 45
+#define TCR_HWU061 (1UL << TCR_HWU061_SHIFT)
+#define TCR_HWU060_SHIFT 44
+#define TCR_HWU060 (1UL << TCR_HWU060_SHIFT)
+#define TCR_HWU059_SHIFT 43
+#define TCR_HWU059 (1UL << TCR_HWU059_SHIFT)
+#define TCR_HWU0 \
+ (TCR_HWU059 | TCR_HWU060 | TCR_HWU061 | TCR_HWU062)
+#define TCR_HPD1_SHIFT 42
+#define TCR_HPD1 (1UL << TCR_HPD1_SHIFT)
+#define TCR_HPD0_SHIFT 41
+#define TCR_HPD0 (1UL << TCR_HPD0_SHIFT)
+#define TCR_HD_SHIFT 40
+#define TCR_HD (1UL << TCR_HD_SHIFT)
+#define TCR_HA_SHIFT 39
+#define TCR_HA (1UL << TCR_HA_SHIFT)
+#define TCR_TBI1_SHIFT 38
+#define TCR_TBI1 (1UL << TCR_TBI1_SHIFT)
+#define TCR_TBI0_SHIFT 37
+#define TCR_TBI0 (1U << TCR_TBI0_SHIFT)
+#define TCR_ASID_SHIFT 36
+#define TCR_ASID_WIDTH 1
+#define TCR_ASID_16 (1UL << TCR_ASID_SHIFT)
+/* Bit 35 is reserved */
+#define TCR_IPS_SHIFT 32
+#define TCR_IPS_WIDTH 3
+#define TCR_IPS_32BIT (0UL << TCR_IPS_SHIFT)
+#define TCR_IPS_36BIT (1UL << TCR_IPS_SHIFT)
+#define TCR_IPS_40BIT (2UL << TCR_IPS_SHIFT)
+#define TCR_IPS_42BIT (3UL << TCR_IPS_SHIFT)
+#define TCR_IPS_44BIT (4UL << TCR_IPS_SHIFT)
+#define TCR_IPS_48BIT (5UL << TCR_IPS_SHIFT)
+#define TCR_TG1_SHIFT 30
+#define TCR_TG1_16K (1UL << TCR_TG1_SHIFT)
+#define TCR_TG1_4K (2UL << TCR_TG1_SHIFT)
+#define TCR_TG1_64K (3UL << TCR_TG1_SHIFT)
+#define TCR_SH1_SHIFT 28
+#define TCR_SH1_IS (3UL << TCR_SH1_SHIFT)
+#define TCR_ORGN1_SHIFT 26
+#define TCR_ORGN1_WBWA (1UL << TCR_ORGN1_SHIFT)
+#define TCR_IRGN1_SHIFT 24
+#define TCR_IRGN1_WBWA (1UL << TCR_IRGN1_SHIFT)
+#define TCR_EPD1_SHIFT 23
+#define TCR_EPD1 (1UL << TCR_EPD1_SHIFT)
+#define TCR_A1_SHIFT 22
+#define TCR_A1 (0x1UL << TCR_A1_SHIFT)
+#define TCR_T1SZ_SHIFT 16
+#define TCR_T1SZ(x) ((x) << TCR_T1SZ_SHIFT)
+#define TCR_TG0_SHIFT 14
+#define TCR_TG0_16K (1UL << TCR_TG0_SHIFT)
+#define TCR_TG0_4K (2UL << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (3UL << TCR_TG0_SHIFT)
+#define TCR_SH0_SHIFT 12
+#define TCR_SH0_IS (3UL << TCR_SH0_SHIFT)
+#define TCR_ORGN0_SHIFT 10
+#define TCR_ORGN0_WBWA (1UL << TCR_ORGN0_SHIFT)
+#define TCR_IRGN0_SHIFT 8
+#define TCR_IRGN0_WBWA (1UL << TCR_IRGN0_SHIFT)
+#define TCR_EPD0_SHIFT 7
+#define TCR_EPD0 (1UL << TCR_EPD1_SHIFT)
+/* Bit 6 is reserved */
+#define TCR_T0SZ_SHIFT 0
+#define TCR_T0SZ_MASK 0x3f
+#define TCR_T0SZ(x) ((x) << TCR_T0SZ_SHIFT)
+#define TCR_TxSZ(x) (TCR_T1SZ(x) | TCR_T0SZ(x))
+
+#define TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) |\
+ (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA))
+#ifdef SMP
+#define TCR_SMP_ATTRS (TCR_SH0_IS | TCR_SH1_IS)
+#else
+#define TCR_SMP_ATTRS 0
+#endif
+
+/* Saved Program Status Register */
+#define DBG_SPSR_SS (0x1 << 21)
+
+/* Monitor Debug System Control Register */
+#define DBG_MDSCR_SS (0x1 << 0)
+#define DBG_MDSCR_KDE (0x1 << 13)
+#define DBG_MDSCR_MDE (0x1 << 15)
+
+/* Perfomance Monitoring Counters */
+#define PMCR_E (1 << 0) /* Enable all counters */
+#define PMCR_P (1 << 1) /* Reset all counters */
+#define PMCR_C (1 << 2) /* Clock counter reset */
+#define PMCR_D (1 << 3) /* CNTR counts every 64 clk cycles */
+#define PMCR_X (1 << 4) /* Export to ext. monitoring (ETM) */
+#define PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define PMCR_LC (1 << 6) /* Long cycle count enable */
+#define PMCR_IMP_SHIFT 24 /* Implementer code */
+#define PMCR_IMP_MASK (0xff << PMCR_IMP_SHIFT)
+#define PMCR_IMP_ARM 0x41
+#define PMCR_IDCODE_SHIFT 16 /* Identification code */
+#define PMCR_IDCODE_MASK (0xff << PMCR_IDCODE_SHIFT)
+#define PMCR_IDCODE_CORTEX_A57 0x01
+#define PMCR_IDCODE_CORTEX_A72 0x02
+#define PMCR_IDCODE_CORTEX_A53 0x03
+#define PMCR_IDCODE_CORTEX_A73 0x04
+#define PMCR_IDCODE_CORTEX_A35 0x0a
+#define PMCR_IDCODE_CORTEX_A76 0x0b
+#define PMCR_IDCODE_NEOVERSE_N1 0x0c
+#define PMCR_IDCODE_CORTEX_A77 0x10
+#define PMCR_IDCODE_CORTEX_A55 0x45
+#define PMCR_IDCODE_NEOVERSE_E1 0x46
+#define PMCR_IDCODE_CORTEX_A75 0x4a
+#define PMCR_N_SHIFT 11 /* Number of counters implemented */
+#define PMCR_N_MASK (0x1f << PMCR_N_SHIFT)
+
+#endif /* !_MACHINE_ARMREG_H_ */
diff --git a/sys/arm64/include/asm.h b/sys/arm64/include/asm.h
new file mode 100644
index 000000000000..d947301d5865
--- /dev/null
+++ b/sys/arm64/include/asm.h
@@ -0,0 +1,105 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ASM_H_
+#define _MACHINE_ASM_H_
+
+#undef __FBSDID
+#if !defined(lint) && !defined(STRIP_FBSDID)
+#define __FBSDID(s) .ident s
+#else
+#define __FBSDID(s) /* nothing */
+#endif
+
+#define _C_LABEL(x) x
+
+#define ENTRY(sym) \
+ .text; .globl sym; .align 2; .type sym,#function; sym:
+#define EENTRY(sym) \
+ .globl sym; sym:
+#define END(sym) .size sym, . - sym
+#define EEND(sym)
+
+#define WEAK_REFERENCE(sym, alias) \
+ .weak alias; \
+ .set alias,sym
+
+#define UINT64_C(x) (x)
+
+#if defined(PIC)
+#define PIC_SYM(x,y) x ## @ ## y
+#else
+#define PIC_SYM(x,y) x
+#endif
+
+/* Alias for link register x30 */
+#define lr x30
+
+/*
+ * Sets the trap fault handler. The exception handler will return to the
+ * address in the handler register on a data abort or the xzr register to
+ * clear the handler. The tmp parameter should be a register able to hold
+ * the temporary data.
+ */
+#define SET_FAULT_HANDLER(handler, tmp) \
+ ldr tmp, [x18, #PC_CURTHREAD]; /* Load curthread */ \
+ ldr tmp, [tmp, #TD_PCB]; /* Load the pcb */ \
+ str handler, [tmp, #PCB_ONFAULT] /* Set the handler */
+
+#define ENTER_USER_ACCESS(reg, tmp) \
+ ldr tmp, =has_pan; /* Get the addr of has_pan */ \
+ ldr reg, [tmp]; /* Read it */ \
+ cbz reg, 997f; /* If no PAN skip */ \
+ .inst 0xd500409f | (0 << 8); /* Clear PAN */ \
+ 997:
+
+#define EXIT_USER_ACCESS(reg) \
+ cbz reg, 998f; /* If no PAN skip */ \
+ .inst 0xd500409f | (1 << 8); /* Set PAN */ \
+ 998:
+
+#define EXIT_USER_ACCESS_CHECK(reg, tmp) \
+ ldr tmp, =has_pan; /* Get the addr of has_pan */ \
+ ldr reg, [tmp]; /* Read it */ \
+ cbz reg, 999f; /* If no PAN skip */ \
+ .inst 0xd500409f | (1 << 8); /* Set PAN */ \
+ 999:
+
+/*
+ * Some AArch64 CPUs speculate past an eret instruction. As the user may
+ * control the registers at this point add a speculation barrier usable on
+ * all AArch64 CPUs after the eret instruction.
+ * TODO: ARMv8.5 adds a specific instruction for this, we could use that
+ * if we know we are running on something that supports it.
+ */
+#define ERET \
+ eret; \
+ dsb sy; \
+ isb
+
+#endif /* _MACHINE_ASM_H_ */
diff --git a/sys/arm64/include/atomic.h b/sys/arm64/include/atomic.h
new file mode 100644
index 000000000000..99dd73d4f85f
--- /dev/null
+++ b/sys/arm64/include/atomic.h
@@ -0,0 +1,609 @@
+/*-
+ * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+#define isb() __asm __volatile("isb" : : : "memory")
+
+/*
+ * Options for DMB and DSB:
+ * oshld Outer Shareable, load
+ * oshst Outer Shareable, store
+ * osh Outer Shareable, all
+ * nshld Non-shareable, load
+ * nshst Non-shareable, store
+ * nsh Non-shareable, all
+ * ishld Inner Shareable, load
+ * ishst Inner Shareable, store
+ * ish Inner Shareable, all
+ * ld Full system, load
+ * st Full system, store
+ * sy Full system, all
+ */
+#define dsb(opt) __asm __volatile("dsb " __STRING(opt) : : : "memory")
+#define dmb(opt) __asm __volatile("dmb " __STRING(opt) : : : "memory")
+
+#define mb() dmb(sy) /* Full system memory barrier all */
+#define wmb() dmb(st) /* Full system memory barrier store */
+#define rmb() dmb(ld) /* Full system memory barrier load */
+
+#if defined(KCSAN) && !defined(KCSAN_RUNTIME)
+#include <sys/_cscan_atomic.h>
+#else
+
+#include <sys/atomic_common.h>
+
+#ifdef _KERNEL
+extern bool lse_supported;
+
+#ifdef LSE_ATOMICS
+#define _ATOMIC_LSE_SUPPORTED 1
+#else
+#define _ATOMIC_LSE_SUPPORTED lse_supported
+#endif
+#else
+#define _ATOMIC_LSE_SUPPORTED 0
+#endif
+
+#define _ATOMIC_OP_PROTO(t, op, bar, flav) \
+static __inline void \
+atomic_##op##_##bar##t##flav(volatile uint##t##_t *p, uint##t##_t val)
+
+#define _ATOMIC_OP_IMPL(t, w, s, op, llsc_asm_op, lse_asm_op, pre, bar, a, l) \
+_ATOMIC_OP_PROTO(t, op, bar, _llsc) \
+{ \
+ uint##t##_t tmp; \
+ int res; \
+ \
+ pre; \
+ __asm __volatile( \
+ "1: ld"#a"xr"#s" %"#w"0, [%2]\n" \
+ " "#llsc_asm_op" %"#w"0, %"#w"0, %"#w"3\n" \
+ " st"#l"xr"#s" %w1, %"#w"0, [%2]\n" \
+ " cbnz %w1, 1b\n" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+} \
+ \
+_ATOMIC_OP_PROTO(t, op, bar, _lse) \
+{ \
+ uint##t##_t tmp; \
+ \
+ pre; \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "ld"#lse_asm_op#a#l#s" %"#w"2, %"#w"0, [%1]\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (tmp) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+} \
+ \
+_ATOMIC_OP_PROTO(t, op, bar, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ atomic_##op##_##bar##t##_lse(p, val); \
+ else \
+ atomic_##op##_##bar##t##_llsc(p, val); \
+}
+
+#define __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, bar, a, l) \
+ _ATOMIC_OP_IMPL(8, w, b, op, llsc_asm_op, lse_asm_op, pre, \
+ bar, a, l) \
+ _ATOMIC_OP_IMPL(16, w, h, op, llsc_asm_op, lse_asm_op, pre, \
+ bar, a, l) \
+ _ATOMIC_OP_IMPL(32, w, , op, llsc_asm_op, lse_asm_op, pre, \
+ bar, a, l) \
+ _ATOMIC_OP_IMPL(64, , , op, llsc_asm_op, lse_asm_op, pre, \
+ bar, a, l)
+
+#define _ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre) \
+ __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, , , ) \
+ __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, acq_, a, ) \
+ __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, rel_, , l)
+
+_ATOMIC_OP(add, add, add, )
+_ATOMIC_OP(clear, bic, clr, )
+_ATOMIC_OP(set, orr, set, )
+_ATOMIC_OP(subtract, add, add, val = -val)
+
+#define _ATOMIC_CMPSET_PROTO(t, bar, flav) \
+static __inline int \
+atomic_cmpset_##bar##t##flav(volatile uint##t##_t *p, \
+ uint##t##_t cmpval, uint##t##_t newval)
+
+#define _ATOMIC_FCMPSET_PROTO(t, bar, flav) \
+static __inline int \
+atomic_fcmpset_##bar##t##flav(volatile uint##t##_t *p, \
+ uint##t##_t *cmpval, uint##t##_t newval)
+
+#define _ATOMIC_CMPSET_IMPL(t, w, s, bar, a, l) \
+_ATOMIC_CMPSET_PROTO(t, bar, _llsc) \
+{ \
+ uint##t##_t tmp; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: mov %w1, #1\n" \
+ " ld"#a"xr"#s" %"#w"0, [%2]\n" \
+ " cmp %"#w"0, %"#w"3\n" \
+ " b.ne 2f\n" \
+ " st"#l"xr"#s" %w1, %"#w"4, [%2]\n" \
+ " cbnz %w1, 1b\n" \
+ "2:" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (cmpval), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ \
+ return (!res); \
+} \
+ \
+_ATOMIC_CMPSET_PROTO(t, bar, _lse) \
+{ \
+ uint##t##_t oldval; \
+ int res; \
+ \
+ oldval = cmpval; \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "cas"#a#l#s" %"#w"1, %"#w"4, [%3]\n" \
+ "cmp %"#w"1, %"#w"2\n" \
+ "cset %w0, eq\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (res), "+&r" (cmpval) \
+ : "r" (oldval), "r" (p), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ \
+ return (res); \
+} \
+ \
+_ATOMIC_CMPSET_PROTO(t, bar, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_cmpset_##bar##t##_lse(p, cmpval, \
+ newval)); \
+ else \
+ return (atomic_cmpset_##bar##t##_llsc(p, cmpval, \
+ newval)); \
+} \
+ \
+_ATOMIC_FCMPSET_PROTO(t, bar, _llsc) \
+{ \
+ uint##t##_t _cmpval, tmp; \
+ int res; \
+ \
+ _cmpval = *cmpval; \
+ __asm __volatile( \
+ " mov %w1, #1\n" \
+ " ld"#a"xr"#s" %"#w"0, [%2]\n" \
+ " cmp %"#w"0, %"#w"3\n" \
+ " b.ne 1f\n" \
+ " st"#l"xr"#s" %w1, %"#w"4, [%2]\n" \
+ "1:" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (_cmpval), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ *cmpval = tmp; \
+ \
+ return (!res); \
+} \
+ \
+_ATOMIC_FCMPSET_PROTO(t, bar, _lse) \
+{ \
+ uint##t##_t _cmpval, tmp; \
+ int res; \
+ \
+ _cmpval = tmp = *cmpval; \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "cas"#a#l#s" %"#w"1, %"#w"4, [%3]\n" \
+ "cmp %"#w"1, %"#w"2\n" \
+ "cset %w0, eq\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (res), "+&r" (tmp) \
+ : "r" (_cmpval), "r" (p), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ *cmpval = tmp; \
+ \
+ return (res); \
+} \
+ \
+_ATOMIC_FCMPSET_PROTO(t, bar, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_fcmpset_##bar##t##_lse(p, cmpval, \
+ newval)); \
+ else \
+ return (atomic_fcmpset_##bar##t##_llsc(p, cmpval, \
+ newval)); \
+}
+
+#define _ATOMIC_CMPSET(bar, a, l) \
+ _ATOMIC_CMPSET_IMPL(8, w, b, bar, a, l) \
+ _ATOMIC_CMPSET_IMPL(16, w, h, bar, a, l) \
+ _ATOMIC_CMPSET_IMPL(32, w, , bar, a, l) \
+ _ATOMIC_CMPSET_IMPL(64, , , bar, a, l)
+
+#define atomic_cmpset_8 atomic_cmpset_8
+#define atomic_fcmpset_8 atomic_fcmpset_8
+#define atomic_cmpset_16 atomic_cmpset_16
+#define atomic_fcmpset_16 atomic_fcmpset_16
+
+_ATOMIC_CMPSET( , , )
+_ATOMIC_CMPSET(acq_, a, )
+_ATOMIC_CMPSET(rel_, ,l)
+
+#define _ATOMIC_FETCHADD_PROTO(t, flav) \
+static __inline uint##t##_t \
+atomic_fetchadd_##t##flav(volatile uint##t##_t *p, uint##t##_t val)
+
+#define _ATOMIC_FETCHADD_IMPL(t, w) \
+_ATOMIC_FETCHADD_PROTO(t, _llsc) \
+{ \
+ uint##t##_t ret, tmp; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: ldxr %"#w"2, [%3]\n" \
+ " add %"#w"0, %"#w"2, %"#w"4\n" \
+ " stxr %w1, %"#w"0, [%3]\n" \
+ " cbnz %w1, 1b\n" \
+ : "=&r" (tmp), "=&r" (res), "=&r" (ret) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_FETCHADD_PROTO(t, _lse) \
+{ \
+ uint##t##_t ret; \
+ \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "ldadd %"#w"2, %"#w"0, [%1]\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (ret) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_FETCHADD_PROTO(t, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_fetchadd_##t##_lse(p, val)); \
+ else \
+ return (atomic_fetchadd_##t##_llsc(p, val)); \
+}
+
+_ATOMIC_FETCHADD_IMPL(32, w)
+_ATOMIC_FETCHADD_IMPL(64, )
+
+#define _ATOMIC_SWAP_PROTO(t, flav) \
+static __inline uint##t##_t \
+atomic_swap_##t##flav(volatile uint##t##_t *p, uint##t##_t val)
+
+#define _ATOMIC_READANDCLEAR_PROTO(t, flav) \
+static __inline uint##t##_t \
+atomic_readandclear_##t##flav(volatile uint##t##_t *p)
+
+#define _ATOMIC_SWAP_IMPL(t, w, zreg) \
+_ATOMIC_SWAP_PROTO(t, _llsc) \
+{ \
+ uint##t##_t ret; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: ldxr %"#w"1, [%2]\n" \
+ " stxr %w0, %"#w"3, [%2]\n" \
+ " cbnz %w0, 1b\n" \
+ : "=&r" (res), "=&r" (ret) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_SWAP_PROTO(t, _lse) \
+{ \
+ uint##t##_t ret; \
+ \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "swp %"#w"2, %"#w"0, [%1]\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (ret) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_SWAP_PROTO(t, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_swap_##t##_lse(p, val)); \
+ else \
+ return (atomic_swap_##t##_llsc(p, val)); \
+} \
+ \
+_ATOMIC_READANDCLEAR_PROTO(t, _llsc) \
+{ \
+ uint##t##_t ret; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: ldxr %"#w"1, [%2]\n" \
+ " stxr %w0, "#zreg", [%2]\n" \
+ " cbnz %w0, 1b\n" \
+ : "=&r" (res), "=&r" (ret) \
+ : "r" (p) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_READANDCLEAR_PROTO(t, _lse) \
+{ \
+ return (atomic_swap_##t##_lse(p, 0)); \
+} \
+ \
+_ATOMIC_READANDCLEAR_PROTO(t, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_readandclear_##t##_lse(p)); \
+ else \
+ return (atomic_readandclear_##t##_llsc(p)); \
+}
+
+_ATOMIC_SWAP_IMPL(32, w, wzr)
+_ATOMIC_SWAP_IMPL(64, , xzr)
+
+#define _ATOMIC_TEST_OP_PROTO(t, op, flav) \
+static __inline int \
+atomic_testand##op##_##t##flav(volatile uint##t##_t *p, u_int val)
+
+#define _ATOMIC_TEST_OP_IMPL(t, w, op, llsc_asm_op, lse_asm_op) \
+_ATOMIC_TEST_OP_PROTO(t, op, _llsc) \
+{ \
+ uint##t##_t mask, old, tmp; \
+ int res; \
+ \
+ mask = 1u << (val & 0x1f); \
+ __asm __volatile( \
+ "1: ldxr %"#w"2, [%3]\n" \
+ " "#llsc_asm_op" %"#w"0, %"#w"2, %"#w"4\n" \
+ " stxr %w1, %"#w"0, [%3]\n" \
+ " cbnz %w1, 1b\n" \
+ : "=&r" (tmp), "=&r" (res), "=&r" (old) \
+ : "r" (p), "r" (mask) \
+ : "memory" \
+ ); \
+ \
+ return ((old & mask) != 0); \
+} \
+ \
+_ATOMIC_TEST_OP_PROTO(t, op, _lse) \
+{ \
+ uint##t##_t mask, old; \
+ \
+ mask = 1u << (val & 0x1f); \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "ld"#lse_asm_op" %"#w"2, %"#w"0, [%1]\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (old) \
+ : "r" (p), "r" (mask) \
+ : "memory" \
+ ); \
+ \
+ return ((old & mask) != 0); \
+} \
+ \
+_ATOMIC_TEST_OP_PROTO(t, op, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_testand##op##_##t##_lse(p, val)); \
+ else \
+ return (atomic_testand##op##_##t##_llsc(p, val)); \
+}
+
+#define _ATOMIC_TEST_OP(op, llsc_asm_op, lse_asm_op) \
+ _ATOMIC_TEST_OP_IMPL(32, w, op, llsc_asm_op, lse_asm_op) \
+ _ATOMIC_TEST_OP_IMPL(64, , op, llsc_asm_op, lse_asm_op)
+
+_ATOMIC_TEST_OP(clear, bic, clr)
+_ATOMIC_TEST_OP(set, orr, set)
+
+#define _ATOMIC_LOAD_ACQ_IMPL(t, w, s) \
+static __inline uint##t##_t \
+atomic_load_acq_##t(volatile uint##t##_t *p) \
+{ \
+ uint##t##_t ret; \
+ \
+ __asm __volatile( \
+ "ldar"#s" %"#w"0, [%1]\n" \
+ : "=&r" (ret) \
+ : "r" (p) \
+ : "memory"); \
+ \
+ return (ret); \
+}
+
+#define atomic_load_acq_8 atomic_load_acq_8
+#define atomic_load_acq_16 atomic_load_acq_16
+_ATOMIC_LOAD_ACQ_IMPL(8, w, b)
+_ATOMIC_LOAD_ACQ_IMPL(16, w, h)
+_ATOMIC_LOAD_ACQ_IMPL(32, w, )
+_ATOMIC_LOAD_ACQ_IMPL(64, , )
+
+#define _ATOMIC_STORE_REL_IMPL(t, w, s) \
+static __inline void \
+atomic_store_rel_##t(volatile uint##t##_t *p, uint##t##_t val) \
+{ \
+ __asm __volatile( \
+ "stlr"#s" %"#w"0, [%1]\n" \
+ : \
+ : "r" (val), "r" (p) \
+ : "memory"); \
+}
+
+_ATOMIC_STORE_REL_IMPL(8, w, b)
+_ATOMIC_STORE_REL_IMPL(16, w, h)
+_ATOMIC_STORE_REL_IMPL(32, w, )
+_ATOMIC_STORE_REL_IMPL(64, , )
+
+#define atomic_add_int atomic_add_32
+#define atomic_fcmpset_int atomic_fcmpset_32
+#define atomic_clear_int atomic_clear_32
+#define atomic_cmpset_int atomic_cmpset_32
+#define atomic_fetchadd_int atomic_fetchadd_32
+#define atomic_readandclear_int atomic_readandclear_32
+#define atomic_set_int atomic_set_32
+#define atomic_swap_int atomic_swap_32
+#define atomic_subtract_int atomic_subtract_32
+#define atomic_testandclear_int atomic_testandclear_32
+#define atomic_testandset_int atomic_testandset_32
+
+#define atomic_add_acq_int atomic_add_acq_32
+#define atomic_fcmpset_acq_int atomic_fcmpset_acq_32
+#define atomic_clear_acq_int atomic_clear_acq_32
+#define atomic_cmpset_acq_int atomic_cmpset_acq_32
+#define atomic_load_acq_int atomic_load_acq_32
+#define atomic_set_acq_int atomic_set_acq_32
+#define atomic_subtract_acq_int atomic_subtract_acq_32
+
+#define atomic_add_rel_int atomic_add_rel_32
+#define atomic_fcmpset_rel_int atomic_fcmpset_rel_32
+#define atomic_clear_rel_int atomic_clear_rel_32
+#define atomic_cmpset_rel_int atomic_cmpset_rel_32
+#define atomic_set_rel_int atomic_set_rel_32
+#define atomic_subtract_rel_int atomic_subtract_rel_32
+#define atomic_store_rel_int atomic_store_rel_32
+
+#define atomic_add_long atomic_add_64
+#define atomic_fcmpset_long atomic_fcmpset_64
+#define atomic_clear_long atomic_clear_64
+#define atomic_cmpset_long atomic_cmpset_64
+#define atomic_fetchadd_long atomic_fetchadd_64
+#define atomic_readandclear_long atomic_readandclear_64
+#define atomic_set_long atomic_set_64
+#define atomic_swap_long atomic_swap_64
+#define atomic_subtract_long atomic_subtract_64
+#define atomic_testandclear_long atomic_testandclear_64
+#define atomic_testandset_long atomic_testandset_64
+
+#define atomic_add_ptr atomic_add_64
+#define atomic_fcmpset_ptr atomic_fcmpset_64
+#define atomic_clear_ptr atomic_clear_64
+#define atomic_cmpset_ptr atomic_cmpset_64
+#define atomic_fetchadd_ptr atomic_fetchadd_64
+#define atomic_readandclear_ptr atomic_readandclear_64
+#define atomic_set_ptr atomic_set_64
+#define atomic_swap_ptr atomic_swap_64
+#define atomic_subtract_ptr atomic_subtract_64
+
+#define atomic_add_acq_long atomic_add_acq_64
+#define atomic_fcmpset_acq_long atomic_fcmpset_acq_64
+#define atomic_clear_acq_long atomic_clear_acq_64
+#define atomic_cmpset_acq_long atomic_cmpset_acq_64
+#define atomic_load_acq_long atomic_load_acq_64
+#define atomic_set_acq_long atomic_set_acq_64
+#define atomic_subtract_acq_long atomic_subtract_acq_64
+
+#define atomic_add_acq_ptr atomic_add_acq_64
+#define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_64
+#define atomic_clear_acq_ptr atomic_clear_acq_64
+#define atomic_cmpset_acq_ptr atomic_cmpset_acq_64
+#define atomic_load_acq_ptr atomic_load_acq_64
+#define atomic_set_acq_ptr atomic_set_acq_64
+#define atomic_subtract_acq_ptr atomic_subtract_acq_64
+
+#define atomic_add_rel_long atomic_add_rel_64
+#define atomic_fcmpset_rel_long atomic_fcmpset_rel_64
+#define atomic_clear_rel_long atomic_clear_rel_64
+#define atomic_cmpset_rel_long atomic_cmpset_rel_64
+#define atomic_set_rel_long atomic_set_rel_64
+#define atomic_subtract_rel_long atomic_subtract_rel_64
+#define atomic_store_rel_long atomic_store_rel_64
+
+#define atomic_add_rel_ptr atomic_add_rel_64
+#define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_64
+#define atomic_clear_rel_ptr atomic_clear_rel_64
+#define atomic_cmpset_rel_ptr atomic_cmpset_rel_64
+#define atomic_set_rel_ptr atomic_set_rel_64
+#define atomic_subtract_rel_ptr atomic_subtract_rel_64
+#define atomic_store_rel_ptr atomic_store_rel_64
+
+static __inline void
+atomic_thread_fence_acq(void)
+{
+
+ dmb(ld);
+}
+
+static __inline void
+atomic_thread_fence_rel(void)
+{
+
+ dmb(sy);
+}
+
+static __inline void
+atomic_thread_fence_acq_rel(void)
+{
+
+ dmb(sy);
+}
+
+static __inline void
+atomic_thread_fence_seq_cst(void)
+{
+
+ dmb(sy);
+}
+
+#include <sys/_atomic_subword.h>
+
+#endif /* KCSAN && !KCSAN_RUNTIME */
+#endif /* _MACHINE_ATOMIC_H_ */
diff --git a/sys/arm64/include/bus.h b/sys/arm64/include/bus.h
new file mode 100644
index 000000000000..60dcfb6fd84c
--- /dev/null
+++ b/sys/arm64/include/bus.h
@@ -0,0 +1,464 @@
+/* $NetBSD: bus.h,v 1.11 2003/07/28 17:35:54 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * From: sys/arm/include/bus.h
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BUS_H_
+#define _MACHINE_BUS_H_
+
+#include <machine/_bus.h>
+
+#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
+
+#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFFUL
+#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFFUL
+#define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFUL
+#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFFUL
+#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFFUL
+#define BUS_SPACE_MAXSIZE_40BIT 0xFFFFFFFFFFUL
+
+#define BUS_SPACE_MAXADDR 0xFFFFFFFFFFFFFFFFUL
+#define BUS_SPACE_MAXSIZE 0xFFFFFFFFFFFFFFFFUL
+
+#define BUS_SPACE_MAP_CACHEABLE 0x01
+#define BUS_SPACE_MAP_LINEAR 0x02
+#define BUS_SPACE_MAP_PREFETCHABLE 0x04
+
+#define BUS_SPACE_UNRESTRICTED (~0)
+
+#define BUS_SPACE_BARRIER_READ 0x01
+#define BUS_SPACE_BARRIER_WRITE 0x02
+
+#if defined(KCSAN) && !defined(KCSAN_RUNTIME)
+#include <sys/_cscan_bus.h>
+#else
+
+struct bus_space {
+ /* cookie */
+ void *bs_cookie;
+
+ /* mapping/unmapping */
+ int (*bs_map) (void *, bus_addr_t, bus_size_t,
+ int, bus_space_handle_t *);
+ void (*bs_unmap) (void *, bus_space_handle_t, bus_size_t);
+ int (*bs_subregion) (void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *);
+
+ /* allocation/deallocation */
+ int (*bs_alloc) (void *, bus_addr_t, bus_addr_t,
+ bus_size_t, bus_size_t, bus_size_t, int,
+ bus_addr_t *, bus_space_handle_t *);
+ void (*bs_free) (void *, bus_space_handle_t,
+ bus_size_t);
+
+ /* get kernel virtual address */
+ /* barrier */
+ void (*bs_barrier) (void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, int);
+
+ /* read single */
+ u_int8_t (*bs_r_1) (void *, bus_space_handle_t, bus_size_t);
+ u_int16_t (*bs_r_2) (void *, bus_space_handle_t, bus_size_t);
+ u_int32_t (*bs_r_4) (void *, bus_space_handle_t, bus_size_t);
+ u_int64_t (*bs_r_8) (void *, bus_space_handle_t, bus_size_t);
+
+ /* read multiple */
+ void (*bs_rm_1) (void *, bus_space_handle_t, bus_size_t,
+ u_int8_t *, bus_size_t);
+ void (*bs_rm_2) (void *, bus_space_handle_t, bus_size_t,
+ u_int16_t *, bus_size_t);
+ void (*bs_rm_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rm_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* read region */
+ void (*bs_rr_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rr_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rr_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rr_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* write single */
+ void (*bs_w_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*bs_w_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*bs_w_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*bs_w_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
+
+ /* write multiple */
+ void (*bs_wm_1) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wm_2) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wm_4) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wm_8) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* write region */
+ void (*bs_wr_1) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wr_2) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wr_4) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wr_8) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* set multiple */
+ void (*bs_sm_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sm_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sm_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sm_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* set region */
+ void (*bs_sr_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sr_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sr_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sr_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* copy */
+ void (*bs_c_1) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_2) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_4) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_8) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+
+ /* read single stream */
+ u_int8_t (*bs_r_1_s) (void *, bus_space_handle_t, bus_size_t);
+ u_int16_t (*bs_r_2_s) (void *, bus_space_handle_t, bus_size_t);
+ u_int32_t (*bs_r_4_s) (void *, bus_space_handle_t, bus_size_t);
+ u_int64_t (*bs_r_8_s) (void *, bus_space_handle_t, bus_size_t);
+
+ /* read multiple stream */
+ void (*bs_rm_1_s) (void *, bus_space_handle_t, bus_size_t,
+ u_int8_t *, bus_size_t);
+ void (*bs_rm_2_s) (void *, bus_space_handle_t, bus_size_t,
+ u_int16_t *, bus_size_t);
+ void (*bs_rm_4_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rm_8_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* read region stream */
+ void (*bs_rr_1_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rr_2_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rr_4_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rr_8_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* write single stream */
+ void (*bs_w_1_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*bs_w_2_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*bs_w_4_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*bs_w_8_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
+
+ /* write multiple stream */
+ void (*bs_wm_1_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wm_2_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wm_4_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wm_8_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* write region stream */
+ void (*bs_wr_1_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wr_2_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wr_4_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wr_8_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+};
+
+/*
+ * Utility macros; INTERNAL USE ONLY.
+ */
+#define __bs_c(a,b) __CONCAT(a,b)
+#define __bs_opname(op,size) __bs_c(__bs_c(__bs_c(bs_,op),_),size)
+
+#define __bs_rs(sz, t, h, o) \
+ (*(t)->__bs_opname(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws(sz, t, h, o, v) \
+ (*(t)->__bs_opname(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, a, c)
+#define __bs_set(type, sz, t, h, o, v, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, v, c)
+#define __bs_copy(sz, t, h1, o1, h2, o2, cnt) \
+ (*(t)->__bs_opname(c,sz))((t)->bs_cookie, h1, o1, h2, o2, cnt)
+
+#define __bs_opname_s(op,size) __bs_c(__bs_c(__bs_c(__bs_c(bs_,op),_),size),_s)
+#define __bs_rs_s(sz, t, h, o) \
+ (*(t)->__bs_opname_s(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws_s(sz, t, h, o, v) \
+ (*(t)->__bs_opname_s(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle_s(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname_s(type,sz))((t)->bs_cookie, h, o, a, c)
+
+/*
+ * Mapping and unmapping operations.
+ */
+#define bus_space_map(t, a, s, c, hp) \
+ (*(t)->bs_map)((t)->bs_cookie, (a), (s), (c), (hp))
+#define bus_space_unmap(t, h, s) \
+ (*(t)->bs_unmap)((t)->bs_cookie, (h), (s))
+#define bus_space_subregion(t, h, o, s, hp) \
+ (*(t)->bs_subregion)((t)->bs_cookie, (h), (o), (s), (hp))
+
+/*
+ * Allocation and deallocation operations.
+ */
+#define bus_space_alloc(t, rs, re, s, a, b, c, ap, hp) \
+ (*(t)->bs_alloc)((t)->bs_cookie, (rs), (re), (s), (a), (b), \
+ (c), (ap), (hp))
+#define bus_space_free(t, h, s) \
+ (*(t)->bs_free)((t)->bs_cookie, (h), (s))
+
+/*
+ * Bus barrier operations.
+ */
+#define bus_space_barrier(t, h, o, l, f) \
+ (*(t)->bs_barrier)((t)->bs_cookie, (h), (o), (l), (f))
+
+/*
+ * Bus read (single) operations.
+ */
+#define bus_space_read_1(t, h, o) __bs_rs(1,(t),(h),(o))
+#define bus_space_read_2(t, h, o) __bs_rs(2,(t),(h),(o))
+#define bus_space_read_4(t, h, o) __bs_rs(4,(t),(h),(o))
+#define bus_space_read_8(t, h, o) __bs_rs(8,(t),(h),(o))
+
+#define bus_space_read_stream_1(t, h, o) __bs_rs_s(1,(t), (h), (o))
+#define bus_space_read_stream_2(t, h, o) __bs_rs_s(2,(t), (h), (o))
+#define bus_space_read_stream_4(t, h, o) __bs_rs_s(4,(t), (h), (o))
+#define bus_space_read_stream_8(t, h, o) __bs_rs_s(8,(t), (h), (o))
+
+/*
+ * Bus read multiple operations.
+ */
+#define bus_space_read_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(rm,8,(t),(h),(o),(a),(c))
+
+#define bus_space_read_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,8,(t),(h),(o),(a),(c))
+
+/*
+ * Bus read region operations.
+ */
+#define bus_space_read_region_1(t, h, o, a, c) \
+ __bs_nonsingle(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_2(t, h, o, a, c) \
+ __bs_nonsingle(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_4(t, h, o, a, c) \
+ __bs_nonsingle(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_8(t, h, o, a, c) \
+ __bs_nonsingle(rr,8,(t),(h),(o),(a),(c))
+
+#define bus_space_read_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,8,(t),(h),(o),(a),(c))
+
+/*
+ * Bus write (single) operations.
+ */
+#define bus_space_write_1(t, h, o, v) __bs_ws(1,(t),(h),(o),(v))
+#define bus_space_write_2(t, h, o, v) __bs_ws(2,(t),(h),(o),(v))
+#define bus_space_write_4(t, h, o, v) __bs_ws(4,(t),(h),(o),(v))
+#define bus_space_write_8(t, h, o, v) __bs_ws(8,(t),(h),(o),(v))
+
+#define bus_space_write_stream_1(t, h, o, v) __bs_ws_s(1,(t),(h),(o),(v))
+#define bus_space_write_stream_2(t, h, o, v) __bs_ws_s(2,(t),(h),(o),(v))
+#define bus_space_write_stream_4(t, h, o, v) __bs_ws_s(4,(t),(h),(o),(v))
+#define bus_space_write_stream_8(t, h, o, v) __bs_ws_s(8,(t),(h),(o),(v))
+
+/*
+ * Bus write multiple operations.
+ */
+#define bus_space_write_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(wm,8,(t),(h),(o),(a),(c))
+
+#define bus_space_write_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,8,(t),(h),(o),(a),(c))
+
+/*
+ * Bus write region operations.
+ */
+#define bus_space_write_region_1(t, h, o, a, c) \
+ __bs_nonsingle(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_2(t, h, o, a, c) \
+ __bs_nonsingle(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_4(t, h, o, a, c) \
+ __bs_nonsingle(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_8(t, h, o, a, c) \
+ __bs_nonsingle(wr,8,(t),(h),(o),(a),(c))
+
+#define bus_space_write_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,8,(t),(h),(o),(a),(c))
+
+/*
+ * Set multiple operations.
+ */
+#define bus_space_set_multi_1(t, h, o, v, c) \
+ __bs_set(sm,1,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_2(t, h, o, v, c) \
+ __bs_set(sm,2,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_4(t, h, o, v, c) \
+ __bs_set(sm,4,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_8(t, h, o, v, c) \
+ __bs_set(sm,8,(t),(h),(o),(v),(c))
+
+/*
+ * Set region operations.
+ */
+#define bus_space_set_region_1(t, h, o, v, c) \
+ __bs_set(sr,1,(t),(h),(o),(v),(c))
+#define bus_space_set_region_2(t, h, o, v, c) \
+ __bs_set(sr,2,(t),(h),(o),(v),(c))
+#define bus_space_set_region_4(t, h, o, v, c) \
+ __bs_set(sr,4,(t),(h),(o),(v),(c))
+#define bus_space_set_region_8(t, h, o, v, c) \
+ __bs_set(sr,8,(t),(h),(o),(v),(c))
+
+/*
+ * Copy operations.
+ */
+#define bus_space_copy_region_1(t, h1, o1, h2, o2, c) \
+ __bs_copy(1, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_2(t, h1, o1, h2, o2, c) \
+ __bs_copy(2, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_4(t, h1, o1, h2, o2, c) \
+ __bs_copy(4, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_8(t, h1, o1, h2, o2, c) \
+ __bs_copy(8, t, h1, o1, h2, o2, c)
+
+#endif
+
+#include <machine/bus_dma.h>
+
+#endif /* _MACHINE_BUS_H_ */
diff --git a/sys/arm64/include/bus_dma.h b/sys/arm64/include/bus_dma.h
new file mode 100644
index 000000000000..1b2d36086e4f
--- /dev/null
+++ b/sys/arm64/include/bus_dma.h
@@ -0,0 +1,153 @@
+/* $FreeBSD$ */
+
+#ifndef _MACHINE_BUS_DMA_H_
+#define _MACHINE_BUS_DMA_H_
+
+#define WANT_INLINE_DMAMAP
+#include <sys/bus_dma.h>
+
+#include <machine/bus_dma_impl.h>
+
+/*
+ * Is DMA address 1:1 mapping of physical address
+ */
+static inline bool
+bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->id_mapped(dmat, buf, buflen));
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static inline int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->map_create(dmat, flags, mapp));
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static inline int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->map_destroy(dmat, map));
+}
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints listed in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+static inline int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->mem_alloc(dmat, vaddr, flags, mapp));
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc. Make the same choice for free/contigfree.
+ */
+static inline void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->mem_free(dmat, vaddr, map);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+static inline void
+bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->map_unload(dmat, map);
+}
+
+static inline void
+bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->map_sync(dmat, map, op);
+}
+
+static inline int
+_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_phys(dmat, map, buf, buflen, flags, segs,
+ segp));
+}
+
+static inline int
+_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
+ bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_ma(dmat, map, ma, tlen, ma_offs, flags,
+ segs, segp));
+}
+
+static inline int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_buffer(dmat, map, buf, buflen, pmap, flags, segs,
+ segp));
+}
+
+static inline void
+_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->map_waitok(dmat, map, mem, callback, callback_arg);
+}
+
+static inline bus_dma_segment_t *
+_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->map_complete(dmat, map, segs, nsegs, error));
+}
+
+#endif /* !_MACHINE_BUS_DMA_H_ */
diff --git a/sys/arm64/include/bus_dma_impl.h b/sys/arm64/include/bus_dma_impl.h
new file mode 100644
index 000000000000..ae0758c81337
--- /dev/null
+++ b/sys/arm64/include/bus_dma_impl.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BUS_DMA_IMPL_H_
+#define _MACHINE_BUS_DMA_IMPL_H_
+
+struct bus_dma_tag_common {
+ struct bus_dma_impl *impl;
+ struct bus_dma_tag_common *parent;
+ bus_size_t alignment;
+ bus_addr_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_dma_filter_t *filter;
+ void *filterarg;
+ bus_size_t maxsize;
+ u_int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ bus_dma_lock_t *lockfunc;
+ void *lockfuncarg;
+ int ref_count;
+};
+
+struct bus_dma_impl {
+ int (*tag_create)(bus_dma_tag_t parent,
+ bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filter,
+ void *filterarg, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat);
+ int (*tag_destroy)(bus_dma_tag_t dmat);
+ bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t);
+ int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
+ int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map);
+ int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp);
+ void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map);
+ int (*load_ma)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, struct pmap *pmap, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback,
+ void *callback_arg);
+ bus_dma_segment_t *(*map_complete)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error);
+ void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map);
+ void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dmasync_op_t op);
+};
+
+void bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op);
+int bus_dma_run_filter(struct bus_dma_tag_common *dmat, bus_addr_t paddr);
+int common_bus_dma_tag_create(struct bus_dma_tag_common *parent,
+ bus_size_t alignment,
+ bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+ int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, size_t sz, void **dmat);
+
+extern struct bus_dma_impl bus_dma_bounce_impl;
+
+#endif
diff --git a/sys/arm64/include/clock.h b/sys/arm64/include/clock.h
new file mode 100644
index 000000000000..da23dbe43a4f
--- /dev/null
+++ b/sys/arm64/include/clock.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
diff --git a/sys/arm64/include/counter.h b/sys/arm64/include/counter.h
new file mode 100644
index 000000000000..333015cc7139
--- /dev/null
+++ b/sys/arm64/include/counter.h
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_COUNTER_H_
+#define _MACHINE_COUNTER_H_
+
+#include <sys/pcpu.h>
+#include <machine/atomic.h>
+
+#define EARLY_COUNTER &__pcpu[0].pc_early_dummy_counter
+
+#define counter_enter() do {} while (0)
+#define counter_exit() do {} while (0)
+
+#ifdef IN_SUBR_COUNTER_C
+static inline uint64_t
+counter_u64_read_one(uint64_t *p, int cpu)
+{
+
+ return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
+}
+
+static inline uint64_t
+counter_u64_fetch_inline(uint64_t *p)
+{
+ uint64_t r;
+ int i;
+
+ r = 0;
+ CPU_FOREACH(i)
+ r += counter_u64_read_one((uint64_t *)p, i);
+
+ return (r);
+}
+
+static void
+counter_u64_zero_one_cpu(void *arg)
+{
+
+ *((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
+ PCPU_GET(cpuid))) = 0;
+}
+
+static inline void
+counter_u64_zero_inline(counter_u64_t c)
+{
+
+ smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
+ smp_no_rendezvous_barrier, c);
+}
+#endif
+
+#define counter_u64_add_protected(c, inc) counter_u64_add(c, inc)
+
+static inline void
+counter_u64_add(counter_u64_t c, int64_t inc)
+{
+
+ atomic_add_64((uint64_t *)zpcpu_get(c), inc);
+}
+
+#endif /* ! _MACHINE_COUNTER_H_ */
diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h
new file mode 100644
index 000000000000..529f156f2e96
--- /dev/null
+++ b/sys/arm64/include/cpu.h
@@ -0,0 +1,212 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * Copyright (c) 2014-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)cpu.h 5.4 (Berkeley) 5/9/91
+ * from: FreeBSD: src/sys/i386/include/cpu.h,v 1.62 2001/06/29
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPU_H_
+#define _MACHINE_CPU_H_
+
+#include <machine/atomic.h>
+#include <machine/frame.h>
+#include <machine/armreg.h>
+
+#define TRAPF_PC(tfp) ((tfp)->tf_lr)
+#define TRAPF_USERMODE(tfp) (((tfp)->tf_spsr & PSR_M_MASK) == PSR_M_EL0t)
+
+#define cpu_getstack(td) ((td)->td_frame->tf_sp)
+#define cpu_setstack(td, sp) ((td)->td_frame->tf_sp = (sp))
+#define cpu_spinwait() __asm __volatile("yield" ::: "memory")
+#define cpu_lock_delay() DELAY(1)
+
+/* Extract CPU affinity levels 0-3 */
+#define CPU_AFF0(mpidr) (u_int)(((mpidr) >> 0) & 0xff)
+#define CPU_AFF1(mpidr) (u_int)(((mpidr) >> 8) & 0xff)
+#define CPU_AFF2(mpidr) (u_int)(((mpidr) >> 16) & 0xff)
+#define CPU_AFF3(mpidr) (u_int)(((mpidr) >> 32) & 0xff)
+#define CPU_AFF0_MASK 0xffUL
+#define CPU_AFF1_MASK 0xff00UL
+#define CPU_AFF2_MASK 0xff0000UL
+#define CPU_AFF3_MASK 0xff00000000UL
+#define CPU_AFF_MASK (CPU_AFF0_MASK | CPU_AFF1_MASK | \
+ CPU_AFF2_MASK| CPU_AFF3_MASK) /* Mask affinity fields in MPIDR_EL1 */
+
+#ifdef _KERNEL
+
+#define CPU_IMPL_ARM 0x41
+#define CPU_IMPL_BROADCOM 0x42
+#define CPU_IMPL_CAVIUM 0x43
+#define CPU_IMPL_DEC 0x44
+#define CPU_IMPL_INFINEON 0x49
+#define CPU_IMPL_FREESCALE 0x4D
+#define CPU_IMPL_NVIDIA 0x4E
+#define CPU_IMPL_APM 0x50
+#define CPU_IMPL_QUALCOMM 0x51
+#define CPU_IMPL_MARVELL 0x56
+#define CPU_IMPL_INTEL 0x69
+
+/* ARM Part numbers */
+#define CPU_PART_FOUNDATION 0xD00
+#define CPU_PART_CORTEX_A53 0xD03
+#define CPU_PART_CORTEX_A35 0xD04
+#define CPU_PART_CORTEX_A55 0xD05
+#define CPU_PART_CORTEX_A65 0xD06
+#define CPU_PART_CORTEX_A57 0xD07
+#define CPU_PART_CORTEX_A72 0xD08
+#define CPU_PART_CORTEX_A73 0xD09
+#define CPU_PART_CORTEX_A75 0xD0A
+#define CPU_PART_CORTEX_A76 0xD0B
+#define CPU_PART_NEOVERSE_N1 0xD0C
+#define CPU_PART_CORTEX_A77 0xD0D
+#define CPU_PART_CORTEX_A76AE 0xD0E
+
+/* Cavium Part numbers */
+#define CPU_PART_THUNDERX 0x0A1
+#define CPU_PART_THUNDERX_81XX 0x0A2
+#define CPU_PART_THUNDERX_83XX 0x0A3
+#define CPU_PART_THUNDERX2 0x0AF
+
+#define CPU_REV_THUNDERX_1_0 0x00
+#define CPU_REV_THUNDERX_1_1 0x01
+
+#define CPU_REV_THUNDERX2_0 0x00
+
+/* APM / Ampere Part Number */
+#define CPU_PART_EMAG8180 0x000
+
+#define CPU_IMPL(midr) (((midr) >> 24) & 0xff)
+#define CPU_PART(midr) (((midr) >> 4) & 0xfff)
+#define CPU_VAR(midr) (((midr) >> 20) & 0xf)
+#define CPU_REV(midr) (((midr) >> 0) & 0xf)
+
+#define CPU_IMPL_TO_MIDR(val) (((val) & 0xff) << 24)
+#define CPU_PART_TO_MIDR(val) (((val) & 0xfff) << 4)
+#define CPU_VAR_TO_MIDR(val) (((val) & 0xf) << 20)
+#define CPU_REV_TO_MIDR(val) (((val) & 0xf) << 0)
+
+#define CPU_IMPL_MASK (0xff << 24)
+#define CPU_PART_MASK (0xfff << 4)
+#define CPU_VAR_MASK (0xf << 20)
+#define CPU_REV_MASK (0xf << 0)
+
+#define CPU_ID_RAW(impl, part, var, rev) \
+ (CPU_IMPL_TO_MIDR((impl)) | \
+ CPU_PART_TO_MIDR((part)) | CPU_VAR_TO_MIDR((var)) | \
+ CPU_REV_TO_MIDR((rev)))
+
+#define CPU_MATCH(mask, impl, part, var, rev) \
+ (((mask) & PCPU_GET(midr)) == \
+ ((mask) & CPU_ID_RAW((impl), (part), (var), (rev))))
+
+#define CPU_MATCH_RAW(mask, devid) \
+ (((mask) & PCPU_GET(midr)) == ((mask) & (devid)))
+
+/*
+ * Chip-specific errata. This defines are intended to be
+ * booleans used within if statements. When an appropriate
+ * kernel option is disabled, these defines must be defined
+ * as 0 to allow the compiler to remove a dead code thus
+ * produce better optimized kernel image.
+ */
+/*
+ * Vendor: Cavium
+ * Chip: ThunderX
+ * Revision(s): Pass 1.0, Pass 1.1
+ */
+#ifdef THUNDERX_PASS_1_1_ERRATA
+#define CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 \
+ (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \
+ CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_0) || \
+ CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \
+ CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_1))
+#else
+#define CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 0
+#endif
+
+extern char btext[];
+extern char etext[];
+
+extern uint64_t __cpu_affinity[];
+
+void cpu_halt(void) __dead2;
+void cpu_reset(void) __dead2;
+void fork_trampoline(void);
+void identify_cache(uint64_t);
+void identify_cpu(u_int);
+void install_cpu_errata(void);
+void swi_vm(void *v);
+
+/* Functions to read the sanitised view of the special registers */
+void update_special_regs(u_int);
+bool extract_user_id_field(u_int, u_int, uint8_t *);
+bool get_kernel_reg(u_int, uint64_t *);
+
+#define CPU_AFFINITY(cpu) __cpu_affinity[(cpu)]
+#define CPU_CURRENT_SOCKET \
+ (CPU_AFF2(CPU_AFFINITY(PCPU_GET(cpuid))))
+
+static __inline uint64_t
+get_cyclecount(void)
+{
+ uint64_t ret;
+
+ ret = READ_SPECIALREG(cntvct_el0);
+
+ return (ret);
+}
+
+#define ADDRESS_TRANSLATE_FUNC(stage) \
+static inline uint64_t \
+arm64_address_translate_ ##stage (uint64_t addr) \
+{ \
+ uint64_t ret; \
+ \
+ __asm __volatile( \
+ "at " __STRING(stage) ", %1 \n" \
+ "mrs %0, par_el1" : "=r"(ret) : "r"(addr)); \
+ \
+ return (ret); \
+}
+
+ADDRESS_TRANSLATE_FUNC(s1e0r)
+ADDRESS_TRANSLATE_FUNC(s1e0w)
+ADDRESS_TRANSLATE_FUNC(s1e1r)
+ADDRESS_TRANSLATE_FUNC(s1e1w)
+
+#endif
+
+#endif /* !_MACHINE_CPU_H_ */
diff --git a/sys/arm64/include/cpufunc.h b/sys/arm64/include/cpufunc.h
new file mode 100644
index 000000000000..5400f253f9a2
--- /dev/null
+++ b/sys/arm64/include/cpufunc.h
@@ -0,0 +1,244 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUFUNC_H_
+#define _MACHINE_CPUFUNC_H_
+
+static __inline void
+breakpoint(void)
+{
+
+ __asm("brk #0");
+}
+
+#ifdef _KERNEL
+
+#define HAVE_INLINE_FFS
+
+static __inline __pure2 int
+ffs(int mask)
+{
+
+ return (__builtin_ffs(mask));
+}
+
+#define HAVE_INLINE_FFSL
+
+static __inline __pure2 int
+ffsl(long mask)
+{
+
+ return (__builtin_ffsl(mask));
+}
+
+#define HAVE_INLINE_FFSLL
+
+static __inline __pure2 int
+ffsll(long long mask)
+{
+
+ return (__builtin_ffsll(mask));
+}
+
+#define HAVE_INLINE_FLS
+
+static __inline __pure2 int
+fls(int mask)
+{
+
+ return (mask == 0 ? 0 :
+ 8 * sizeof(mask) - __builtin_clz((u_int)mask));
+}
+
+#define HAVE_INLINE_FLSL
+
+static __inline __pure2 int
+flsl(long mask)
+{
+
+ return (mask == 0 ? 0 :
+ 8 * sizeof(mask) - __builtin_clzl((u_long)mask));
+}
+
+#define HAVE_INLINE_FLSLL
+
+static __inline __pure2 int
+flsll(long long mask)
+{
+
+ return (mask == 0 ? 0 :
+ 8 * sizeof(mask) - __builtin_clzll((unsigned long long)mask));
+}
+
+#include <machine/armreg.h>
+
+void pan_enable(void);
+
+static __inline register_t
+dbg_disable(void)
+{
+ uint32_t ret;
+
+ __asm __volatile(
+ "mrs %x0, daif \n"
+ "msr daifset, #8 \n"
+ : "=&r" (ret));
+
+ return (ret);
+}
+
+static __inline void
+dbg_enable(void)
+{
+
+ __asm __volatile("msr daifclr, #8");
+}
+
+static __inline register_t
+intr_disable(void)
+{
+ /* DAIF is a 32-bit register */
+ uint32_t ret;
+
+ __asm __volatile(
+ "mrs %x0, daif \n"
+ "msr daifset, #2 \n"
+ : "=&r" (ret));
+
+ return (ret);
+}
+
+static __inline void
+intr_restore(register_t s)
+{
+
+ WRITE_SPECIALREG(daif, s);
+}
+
+static __inline void
+intr_enable(void)
+{
+
+ __asm __volatile("msr daifclr, #2");
+}
+
+static __inline register_t
+get_midr(void)
+{
+ uint64_t midr;
+
+ midr = READ_SPECIALREG(midr_el1);
+
+ return (midr);
+}
+
+static __inline register_t
+get_mpidr(void)
+{
+ uint64_t mpidr;
+
+ mpidr = READ_SPECIALREG(mpidr_el1);
+
+ return (mpidr);
+}
+
+static __inline void
+clrex(void)
+{
+
+ /*
+ * Ensure compiler barrier, otherwise the monitor clear might
+ * occur too late for us ?
+ */
+ __asm __volatile("clrex" : : : "memory");
+}
+
+static __inline void
+set_ttbr0(uint64_t ttbr0)
+{
+
+ __asm __volatile(
+ "msr ttbr0_el1, %0 \n"
+ "isb \n"
+ :
+ : "r" (ttbr0));
+}
+
+static __inline void
+invalidate_icache(void)
+{
+
+ __asm __volatile(
+ "ic ialluis \n"
+ "dsb ish \n"
+ "isb \n");
+}
+
+static __inline void
+invalidate_local_icache(void)
+{
+
+ __asm __volatile(
+ "ic iallu \n"
+ "dsb nsh \n"
+ "isb \n");
+}
+
+extern bool icache_aliasing;
+extern bool icache_vmid;
+
+extern int64_t dcache_line_size;
+extern int64_t icache_line_size;
+extern int64_t idcache_line_size;
+extern int64_t dczva_line_size;
+
+#define cpu_nullop() arm64_nullop()
+#define cpufunc_nullop() arm64_nullop()
+
+#define cpu_tlb_flushID() arm64_tlb_flushID()
+
+#define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s))
+#define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s))
+#define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s))
+
+extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t);
+
+#define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s))
+#define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
+
+void arm64_nullop(void);
+void arm64_tlb_flushID(void);
+void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t);
+void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
+int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
+void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_CPUFUNC_H_ */
diff --git a/sys/arm64/include/csan.h b/sys/arm64/include/csan.h
new file mode 100644
index 000000000000..bace3866eb66
--- /dev/null
+++ b/sys/arm64/include/csan.h
@@ -0,0 +1,110 @@
+/* $NetBSD: csan.h,v 1.2 2019/11/06 06:57:22 maxv Exp $ */
+
+/*
+ * Copyright (c) 2019 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/cpufunc.h>
+#include <machine/stack.h>
+#include <machine/vmparam.h>
+
+static inline bool
+kcsan_md_unsupported(vm_offset_t addr)
+{
+ return false;
+}
+
+static inline bool
+kcsan_md_is_avail(void)
+{
+ return true;
+}
+
+static inline void
+kcsan_md_disable_intrs(uint64_t *state)
+{
+
+ *state = intr_disable();
+}
+
+static inline void
+kcsan_md_enable_intrs(uint64_t *state)
+{
+
+ intr_restore(*state);
+}
+
+static inline void
+kcsan_md_delay(uint64_t us)
+{
+ DELAY(us);
+}
+
+static void
+kcsan_md_unwind(void)
+{
+#ifdef DDB
+ c_db_sym_t sym;
+ db_expr_t offset;
+ const char *symname;
+#endif
+ struct unwind_state frame;
+ uint64_t sp;
+ int nsym;
+
+ __asm __volatile("mov %0, sp" : "=&r" (sp));
+
+ frame.sp = sp;
+ frame.fp = (uint64_t)__builtin_frame_address(0);
+ frame.pc = (uint64_t)kcsan_md_unwind;
+ nsym = 0;
+
+ while (1) {
+ unwind_frame(&frame);
+ if (!INKERNEL((vm_offset_t)frame.fp) ||
+ !INKERNEL((vm_offset_t)frame.pc))
+ break;
+
+#ifdef DDB
+ sym = db_search_symbol((vm_offset_t)frame.pc, DB_STGY_PROC,
+ &offset);
+ db_symbol_values(sym, &symname, NULL);
+ printf("#%d %p in %s+%#lx\n", nsym, (void *)frame.pc,
+ symname, offset);
+#else
+ printf("#%d %p\n", nsym, (void *)frame.pc);
+#endif
+ nsym++;
+
+ if (nsym >= 15) {
+ break;
+ }
+ }
+}
diff --git a/sys/arm64/include/db_machdep.h b/sys/arm64/include/db_machdep.h
new file mode 100644
index 000000000000..45d548c750bc
--- /dev/null
+++ b/sys/arm64/include/db_machdep.h
@@ -0,0 +1,123 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_DB_MACHDEP_H_
+#define _MACHINE_DB_MACHDEP_H_
+
+#include <machine/armreg.h>
+#include <machine/frame.h>
+#include <machine/trap.h>
+
+#define T_BREAKPOINT (EXCP_BRK)
+#define T_WATCHPOINT (EXCP_WATCHPT_EL1)
+
+typedef vm_offset_t db_addr_t;
+typedef long db_expr_t;
+
+#define PC_REGS() ((db_addr_t)kdb_thrctx->pcb_pc)
+
+#define BKPT_INST (0xd4200000)
+#define BKPT_SIZE (4)
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define BKPT_SKIP do { \
+ kdb_frame->tf_elr += BKPT_SIZE; \
+} while (0)
+
+#define db_clear_single_step kdb_cpu_clear_singlestep
+#define db_set_single_step kdb_cpu_set_singlestep
+
+#define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT)
+#define IS_WATCHPOINT_TRAP(type, code) (type == T_WATCHPOINT)
+
+#define inst_trap_return(ins) (0)
+/* ret */
+#define inst_return(ins) (((ins) & 0xfffffc1fu) == 0xd65f0000)
+#define inst_call(ins) (((ins) & 0xfc000000u) == 0x94000000u || /* BL */ \
+ ((ins) & 0xfffffc1fu) == 0xd63f0000u) /* BLR */
+
+#define inst_load(ins) ({ \
+ uint32_t tmp_instr = db_get_value(PC_REGS(), sizeof(uint32_t), FALSE); \
+ is_load_instr(tmp_instr); \
+})
+
+#define inst_store(ins) ({ \
+ uint32_t tmp_instr = db_get_value(PC_REGS(), sizeof(uint32_t), FALSE); \
+ is_store_instr(tmp_instr); \
+})
+
+#define is_load_instr(ins) ((((ins) & 0x3b000000u) == 0x18000000u) || /* literal */ \
+ (((ins) & 0x3f400000u) == 0x08400000u) || /* exclusive */ \
+ (((ins) & 0x3bc00000u) == 0x28400000u) || /* no-allocate pair */ \
+ ((((ins) & 0x3b200c00u) == 0x38000400u) && \
+ (((ins) & 0x3be00c00u) != 0x38000400u) && \
+ (((ins) & 0xffe00c00u) != 0x3c800400u)) || /* immediate post-indexed */ \
+ ((((ins) & 0x3b200c00u) == 0x38000c00u) && \
+ (((ins) & 0x3be00c00u) != 0x38000c00u) && \
+ (((ins) & 0xffe00c00u) != 0x3c800c00u)) || /* immediate pre-indexed */ \
+ ((((ins) & 0x3b200c00u) == 0x38200800u) && \
+ (((ins) & 0x3be00c00u) != 0x38200800u) && \
+ (((ins) & 0xffe00c00u) != 0x3ca00c80u)) || /* register offset */ \
+ ((((ins) & 0x3b200c00u) == 0x38000800u) && \
+ (((ins) & 0x3be00c00u) != 0x38000800u)) || /* unprivileged */ \
+ ((((ins) & 0x3b200c00u) == 0x38000000u) && \
+ (((ins) & 0x3be00c00u) != 0x38000000u) && \
+ (((ins) & 0xffe00c00u) != 0x3c800000u)) || /* unscaled immediate */ \
+ ((((ins) & 0x3b000000u) == 0x39000000u) && \
+ (((ins) & 0x3bc00000u) != 0x39000000u) && \
+ (((ins) & 0xffc00000u) != 0x3d800000u)) && /* unsigned immediate */ \
+ (((ins) & 0x3bc00000u) == 0x28400000u) || /* pair (offset) */ \
+ (((ins) & 0x3bc00000u) == 0x28c00000u) || /* pair (post-indexed) */ \
+ (((ins) & 0x3bc00000u) == 0x29800000u)) /* pair (pre-indexed) */
+
+#define is_store_instr(ins) ((((ins) & 0x3f400000u) == 0x08000000u) || /* exclusive */ \
+ (((ins) & 0x3bc00000u) == 0x28000000u) || /* no-allocate pair */ \
+ ((((ins) & 0x3be00c00u) == 0x38000400u) || \
+ (((ins) & 0xffe00c00u) == 0x3c800400u)) || /* immediate post-indexed */ \
+ ((((ins) & 0x3be00c00u) == 0x38000c00u) || \
+ (((ins) & 0xffe00c00u) == 0x3c800c00u)) || /* immediate pre-indexed */ \
+ ((((ins) & 0x3be00c00u) == 0x38200800u) || \
+ (((ins) & 0xffe00c00u) == 0x3ca00800u)) || /* register offset */ \
+ (((ins) & 0x3be00c00u) == 0x38000800u) || /* unprivileged */ \
+ ((((ins) & 0x3be00c00u) == 0x38000000u) || \
+ (((ins) & 0xffe00c00u) == 0x3c800000u)) || /* unscaled immediate */ \
+ ((((ins) & 0x3bc00000u) == 0x39000000u) || \
+ (((ins) & 0xffc00000u) == 0x3d800000u)) || /* unsigned immediate */ \
+ (((ins) & 0x3bc00000u) == 0x28000000u) || /* pair (offset) */ \
+ (((ins) & 0x3bc00000u) == 0x28800000u) || /* pair (post-indexed) */ \
+ (((ins) & 0x3bc00000u) == 0x29800000u)) /* pair (pre-indexed) */
+
+#define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + 4))
+
+#define DB_ELFSIZE 64
+
+#endif /* !_MACHINE_DB_MACHDEP_H_ */
diff --git a/sys/arm64/include/debug_monitor.h b/sys/arm64/include/debug_monitor.h
new file mode 100644
index 000000000000..4ca1a4c9248e
--- /dev/null
+++ b/sys/arm64/include/debug_monitor.h
@@ -0,0 +1,70 @@
+/*-
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_DEBUG_MONITOR_H_
+#define _MACHINE_DEBUG_MONITOR_H_
+
+#define DBG_BRP_MAX 16
+#define DBG_WRP_MAX 16
+
+struct debug_monitor_state {
+ uint32_t dbg_enable_count;
+ uint32_t dbg_flags;
+#define DBGMON_ENABLED (1 << 0)
+#define DBGMON_KERNEL (1 << 1)
+ uint64_t dbg_bcr[DBG_BRP_MAX];
+ uint64_t dbg_bvr[DBG_BRP_MAX];
+ uint64_t dbg_wcr[DBG_WRP_MAX];
+ uint64_t dbg_wvr[DBG_WRP_MAX];
+};
+
+#ifdef _KERNEL
+
+enum dbg_access_t {
+ HW_BREAKPOINT_X = 0,
+ HW_BREAKPOINT_R = 1,
+ HW_BREAKPOINT_W = 2,
+ HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
+};
+
+void dbg_monitor_init(void);
+void dbg_register_sync(struct debug_monitor_state *);
+int dbg_setup_watchpoint(struct debug_monitor_state *, vm_offset_t, vm_size_t,
+ enum dbg_access_t);
+int dbg_remove_watchpoint(struct debug_monitor_state *, vm_offset_t, vm_size_t);
+
+#ifdef DDB
+void dbg_show_watchpoint(void);
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_DEBUG_MONITOR_H_ */
diff --git a/sys/arm64/include/disassem.h b/sys/arm64/include/disassem.h
new file mode 100644
index 000000000000..1a555117a066
--- /dev/null
+++ b/sys/arm64/include/disassem.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 2016 Cavium
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __DISASSEM_H_
+#define __DISASSEM_H_
+
+struct disasm_interface {
+ u_int (*di_readword)(vm_offset_t);
+ void (*di_printaddr)(vm_offset_t);
+ int (*di_printf)(const char *, ...) __printflike(1, 2);
+};
+
+vm_offset_t disasm(const struct disasm_interface *, vm_offset_t, int);
+
+#endif /* __DISASSEM_H_ */
diff --git a/sys/arm64/include/dump.h b/sys/arm64/include/dump.h
new file mode 100644
index 000000000000..6f2537550c42
--- /dev/null
+++ b/sys/arm64/include/dump.h
@@ -0,0 +1,74 @@
+/*-
+ * Copyright (c) 2014 EMC Corp.
+ * Author: Conrad Meyer <conrad.meyer@isilon.com>
+ * Copyright (c) 2015 The FreeBSD Foundation.
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_DUMP_H_
+#define _MACHINE_DUMP_H_
+
+#define KERNELDUMP_ARCH_VERSION KERNELDUMP_AARCH64_VERSION
+#define EM_VALUE EM_AARCH64
+/* XXX: I suppose 20 should be enough. */
+#define DUMPSYS_MD_PA_NPAIRS 20
+#define DUMPSYS_NUM_AUX_HDRS 1
+
+void dumpsys_wbinv_all(void);
+int dumpsys_write_aux_headers(struct dumperinfo *di);
+
+static inline void
+dumpsys_pa_init(void)
+{
+
+ dumpsys_gen_pa_init();
+}
+
+static inline struct dump_pa *
+dumpsys_pa_next(struct dump_pa *p)
+{
+
+ return (dumpsys_gen_pa_next(p));
+}
+
+static inline void
+dumpsys_unmap_chunk(vm_paddr_t pa, size_t s, void *va)
+{
+
+ dumpsys_gen_unmap_chunk(pa, s, va);
+}
+
+static inline int
+dumpsys(struct dumperinfo *di)
+{
+
+ return (dumpsys_generic(di));
+}
+
+#endif /* !_MACHINE_DUMP_H_ */
diff --git a/sys/arm64/include/efi.h b/sys/arm64/include/efi.h
new file mode 100644
index 000000000000..a8fddfad8d0f
--- /dev/null
+++ b/sys/arm64/include/efi.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2017 Andrew Turner
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __ARM64_INCLUDE_EFI_H_
+#define __ARM64_INCLUDE_EFI_H_
+
+#define EFIABI_ATTR
+
+#ifdef _KERNEL
+#define EFI_TIME_LOCK()
+#define EFI_TIME_UNLOCK()
+#define EFI_TIME_OWNED()
+
+#define EFI_RT_HANDLE_FAULTS_DEFAULT 0
+#endif
+
+struct efirt_callinfo {
+ const char *ec_name;
+ register_t ec_efi_status;
+ register_t ec_fptr;
+ register_t ec_argcnt;
+ register_t ec_arg1;
+ register_t ec_arg2;
+ register_t ec_arg3;
+ register_t ec_arg4;
+ register_t ec_arg5;
+};
+
+#endif /* __ARM64_INCLUDE_EFI_H_ */
diff --git a/sys/arm64/include/elf.h b/sys/arm64/include/elf.h
new file mode 100644
index 000000000000..9b182b762fd2
--- /dev/null
+++ b/sys/arm64/include/elf.h
@@ -0,0 +1,149 @@
+/*-
+ * Copyright (c) 1996-1997 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ELF_H_
+#define _MACHINE_ELF_H_
+
+/*
+ * ELF definitions for the AArch64 architecture.
+ */
+
+#include <sys/elf32.h> /* Definitions common to all 32 bit architectures. */
+#include <sys/elf64.h> /* Definitions common to all 64 bit architectures. */
+
+#ifndef __ELF_WORD_SIZE
+#define __ELF_WORD_SIZE 64 /* Used by <sys/elf_generic.h> */
+#endif
+
+#include <sys/elf_generic.h>
+
+/*
+ * Auxiliary vector entries for passing information to the interpreter.
+ */
+
+typedef struct { /* Auxiliary vector entry on initial stack */
+ int a_type; /* Entry type. */
+ union {
+ int a_val; /* Integer value. */
+ } a_un;
+} Elf32_Auxinfo;
+
+typedef struct { /* Auxiliary vector entry on initial stack */
+ long a_type; /* Entry type. */
+ union {
+ long a_val; /* Integer value. */
+ void *a_ptr; /* Address. */
+ void (*a_fcn)(void); /* Function pointer (not used). */
+ } a_un;
+} Elf64_Auxinfo;
+
+__ElfType(Auxinfo);
+
+#ifdef _MACHINE_ELF_WANT_32BIT
+#define ELF_ARCH EM_ARM
+#else
+#define ELF_ARCH EM_AARCH64
+#endif
+
+#define ELF_MACHINE_OK(x) ((x) == (ELF_ARCH))
+
+/* Define "machine" characteristics */
+#if __ELF_WORD_SIZE == 64
+#define ELF_TARG_CLASS ELFCLASS64
+#define ELF_TARG_DATA ELFDATA2LSB
+#define ELF_TARG_MACH EM_AARCH64
+#define ELF_TARG_VER 1
+#else
+#define ELF_TARG_CLASS ELFCLASS32
+#define ELF_TARG_DATA ELFDATA2LSB
+#define ELF_TARG_MACH EM_ARM
+#define ELF_TARG_VER 1
+#endif
+
+#if __ELF_WORD_SIZE == 32
+#define ET_DYN_LOAD_ADDR 0x12000
+#else
+#define ET_DYN_LOAD_ADDR 0x100000
+#endif
+
+/* HWCAP */
+#define HWCAP_FP 0x00000001
+#define HWCAP_ASIMD 0x00000002
+#define HWCAP_EVTSTRM 0x00000004
+#define HWCAP_AES 0x00000008
+#define HWCAP_PMULL 0x00000010
+#define HWCAP_SHA1 0x00000020
+#define HWCAP_SHA2 0x00000040
+#define HWCAP_CRC32 0x00000080
+#define HWCAP_ATOMICS 0x00000100
+#define HWCAP_FPHP 0x00000200
+/* XXX: The following bits don't match the Linux definitions */
+#define HWCAP_CPUID 0x00000400
+#define HWCAP_ASIMDRDM 0x00000800
+#define HWCAP_JSCVT 0x00001000
+#define HWCAP_FCMA 0x00002000
+#define HWCAP_LRCPC 0x00004000
+#define HWCAP_DCPOP 0x00008000
+#define HWCAP_SHA3 0x00010000
+#define HWCAP_SM3 0x00020000
+#define HWCAP_SM4 0x00040000
+#define HWCAP_ASIMDDP 0x00080000
+#define HWCAP_SHA512 0x00100000
+#define HWCAP_SVE 0x00200000
+#define HWCAP_ASIMDFHM 0x00400000
+#define HWCAP_DIT 0x00800000
+#define HWCAP_USCAT 0x01000000
+#define HWCAP_ILRCPC 0x02000000
+#define HWCAP_FLAGM 0x04000000
+/* XXX: end of incorrect definitions */
+#define HWCAP_SSBS 0x10000000
+#define HWCAP_SB 0x20000000
+#define HWCAP_PACA 0x40000000
+#define HWCAP_PACG 0x80000000
+
+/* HWCAP2 */
+#define HWCAP2_DCPODP 0x00000001
+#define HWCAP2_SVE2 0x00000002
+#define HWCAP2_SVEAES 0x00000004
+#define HWCAP2_SVEPMULL 0x00000008
+#define HWCAP2_SVEBITPERM 0x00000010
+#define HWCAP2_SVESHA3 0x00000020
+#define HWCAP2_SVESM4 0x00000040
+#define HWCAP2_FLAGM2 0x00000080
+#define HWCAP2_FRINT 0x00000100
+#define HWCAP2_SVEI8MM 0x00000200
+#define HWCAP2_SVEF32MM 0x00000400
+#define HWCAP2_SVEF64MM 0x00000800
+#define HWCAP2_SVEBF16 0x00001000
+#define HWCAP2_I8MM 0x00002000
+#define HWCAP2_BF16 0x00004000
+#define HWCAP2_DGH 0x00008000
+#define HWCAP2_RNG 0x00010000
+#define HWCAP2_BTI 0x00020000
+
+#endif /* !_MACHINE_ELF_H_ */
diff --git a/sys/arm64/include/endian.h b/sys/arm64/include/endian.h
new file mode 100644
index 000000000000..8cb5c6976b37
--- /dev/null
+++ b/sys/arm64/include/endian.h
@@ -0,0 +1,122 @@
+/*-
+ * Copyright (c) 2001 David E. O'Brien
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)endian.h 8.1 (Berkeley) 6/10/93
+ * $NetBSD: endian.h,v 1.7 1999/08/21 05:53:51 simonb Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ENDIAN_H_
+#define _MACHINE_ENDIAN_H_
+
+#include <sys/_types.h>
+
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define _LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define _BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */
+#define _PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */
+
+#define _BYTE_ORDER _LITTLE_ENDIAN
+
+#if __BSD_VISIBLE
+#define LITTLE_ENDIAN _LITTLE_ENDIAN
+#define BIG_ENDIAN _BIG_ENDIAN
+#define PDP_ENDIAN _PDP_ENDIAN
+#define BYTE_ORDER _BYTE_ORDER
+#endif
+
+#define _QUAD_HIGHWORD 1
+#define _QUAD_LOWWORD 0
+#define __ntohl(x) (__bswap32(x))
+#define __ntohs(x) (__bswap16(x))
+#define __htonl(x) (__bswap32(x))
+#define __htons(x) (__bswap16(x))
+
+static __inline __uint64_t
+__bswap64(__uint64_t x)
+{
+ __uint64_t ret;
+
+ __asm __volatile("rev %0, %1\n"
+ : "=&r" (ret), "+r" (x));
+
+ return (ret);
+}
+
+static __inline __uint32_t
+__bswap32_var(__uint32_t v)
+{
+ __uint32_t ret;
+
+ __asm __volatile("rev32 %x0, %x1\n"
+ : "=&r" (ret), "+r" (v));
+
+ return (ret);
+}
+
+static __inline __uint16_t
+__bswap16_var(__uint16_t v)
+{
+ __uint32_t ret;
+
+ __asm __volatile("rev16 %w0, %w1\n"
+ : "=&r" (ret), "+r" (v));
+
+ return ((__uint16_t)ret);
+}
+
+#ifdef __OPTIMIZE__
+
+#define __bswap32_constant(x) \
+ ((((x) & 0xff000000U) >> 24) | \
+ (((x) & 0x00ff0000U) >> 8) | \
+ (((x) & 0x0000ff00U) << 8) | \
+ (((x) & 0x000000ffU) << 24))
+
+#define __bswap16_constant(x) \
+ ((((x) & 0xff00) >> 8) | \
+ (((x) & 0x00ff) << 8))
+
+#define __bswap16(x) \
+ ((__uint16_t)(__builtin_constant_p(x) ? \
+ __bswap16_constant((__uint16_t)(x)) : \
+ __bswap16_var(x)))
+
+#define __bswap32(x) \
+ ((__uint32_t)(__builtin_constant_p(x) ? \
+ __bswap32_constant((__uint32_t)(x)) : \
+ __bswap32_var(x)))
+
+#else
+#define __bswap16(x) __bswap16_var(x)
+#define __bswap32(x) __bswap32_var(x)
+
+#endif /* __OPTIMIZE__ */
+#endif /* !_MACHINE_ENDIAN_H_ */
diff --git a/sys/arm64/include/exec.h b/sys/arm64/include/exec.h
new file mode 100644
index 000000000000..da23dbe43a4f
--- /dev/null
+++ b/sys/arm64/include/exec.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
diff --git a/sys/arm64/include/float.h b/sys/arm64/include/float.h
new file mode 100644
index 000000000000..0829f6f52aa9
--- /dev/null
+++ b/sys/arm64/include/float.h
@@ -0,0 +1,94 @@
+/*-
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)float.h 7.1 (Berkeley) 5/8/90
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FLOAT_H_
+#define _MACHINE_FLOAT_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern int __flt_rounds(void);
+__END_DECLS
+
+#define FLT_RADIX 2 /* b */
+#define FLT_ROUNDS __flt_rounds()
+#if __ISO_C_VISIBLE >= 1999
+#define FLT_EVAL_METHOD 0
+#define DECIMAL_DIG 17 /* max precision in decimal digits */
+#endif
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP (-125) /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+#if __ISO_C_VISIBLE >= 2011
+#define FLT_TRUE_MIN 1.40129846E-45F /* b**(emin-p) */
+#define FLT_DECIMAL_DIG 9 /* ceil(1+p*log10(b)) */
+#define FLT_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP (-1021)
+#define DBL_MIN 2.2250738585072014E-308
+#define DBL_MIN_10_EXP (-307)
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MAX_10_EXP 308
+#if __ISO_C_VISIBLE >= 2011
+#define DBL_TRUE_MIN 4.9406564584124654E-324
+#define DBL_DECIMAL_DIG 17
+#define DBL_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define LDBL_MANT_DIG 113
+#define LDBL_EPSILON 1.925929944387235853055977942584927319E-34L
+#define LDBL_DIG 33
+#define LDBL_MIN_EXP (-16381)
+#define LDBL_MIN 3.362103143112093506262677817321752603E-4932L
+#define LDBL_MIN_10_EXP (-4931)
+#define LDBL_MAX_EXP (+16384)
+#define LDBL_MAX 1.189731495357231765085759326628007016E+4932L
+#define LDBL_MAX_10_EXP (+4932)
+#if __ISO_C_VISIBLE >= 2011
+#define LDBL_TRUE_MIN 6.475175119438025110924438958227646552E-4966L
+#define LDBL_DECIMAL_DIG 36
+#define LDBL_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#endif /* _MACHINE_FLOAT_H_ */
diff --git a/sys/arm64/include/floatingpoint.h b/sys/arm64/include/floatingpoint.h
new file mode 100644
index 000000000000..da2e005915fc
--- /dev/null
+++ b/sys/arm64/include/floatingpoint.h
@@ -0,0 +1,3 @@
+/* $FreeBSD$ */
+
+#include <machine/ieeefp.h>
diff --git a/sys/arm64/include/fpu.h b/sys/arm64/include/fpu.h
new file mode 100644
index 000000000000..bd543d0d8e61
--- /dev/null
+++ b/sys/arm64/include/fpu.h
@@ -0,0 +1,6 @@
+/*-
+ * This file is in the public domain.
+ *
+ * $FreeBSD$
+ */
+#include <machine/vfp.h>
diff --git a/sys/arm64/include/frame.h b/sys/arm64/include/frame.h
new file mode 100644
index 000000000000..0a8b53ebb01e
--- /dev/null
+++ b/sys/arm64/include/frame.h
@@ -0,0 +1,83 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FRAME_H_
+#define _MACHINE_FRAME_H_
+
+#ifndef LOCORE
+
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+
+/*
+ * NOTE: keep this structure in sync with struct reg and struct mcontext.
+ */
+struct trapframe {
+ uint64_t tf_sp;
+ uint64_t tf_lr;
+ uint64_t tf_elr;
+ uint32_t tf_spsr;
+ uint32_t tf_esr;
+ uint64_t tf_x[30];
+};
+
+struct arm64_frame {
+ struct arm64_frame *f_frame;
+ u_long f_retaddr;
+};
+
+/*
+ * Signal frame, pushed onto the user stack.
+ */
+struct sigframe {
+ siginfo_t sf_si; /* actual saved siginfo */
+ ucontext_t sf_uc; /* actual saved ucontext */
+};
+
+/*
+ * There is no fixed frame layout, other than to be 16-byte aligned.
+ */
+struct frame {
+ int dummy;
+};
+
+#ifdef COMPAT_FREEBSD32
+struct sigframe32 {
+ struct siginfo32 sf_si;
+ ucontext32_t sf_uc;
+ mcontext32_vfp_t sf_vfp;
+};
+#endif /* COMPAT_FREEBSD32 */
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_FRAME_H_ */
diff --git a/sys/arm64/include/hypervisor.h b/sys/arm64/include/hypervisor.h
new file mode 100644
index 000000000000..eab43b29a89e
--- /dev/null
+++ b/sys/arm64/include/hypervisor.h
@@ -0,0 +1,185 @@
+/*-
+ * Copyright (c) 2013, 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_HYPERVISOR_H_
+#define _MACHINE_HYPERVISOR_H_
+
+/*
+ * These registers are only useful when in hypervisor context,
+ * e.g. specific to EL2, or controlling the hypervisor.
+ */
+
+/* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */
+#define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */
+#define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */
+#define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */
+#define CNTHCTL_EL1PCEN (1 << 1) /* Allow EL0/1 physical timer access */
+#define CNTHCTL_EL1PCTEN (1 << 0) /*Allow EL0/1 physical counter access*/
+
+/* CPTR_EL2 - Architecture feature trap register */
+#define CPTR_RES0 0x7fefc800
+#define CPTR_RES1 0x000033ff
+#define CPTR_TFP 0x00000400
+#define CPTR_TTA 0x00100000
+#define CPTR_TCPAC 0x80000000
+
+/* HCR_EL2 - Hypervisor Config Register */
+#define HCR_VM 0x0000000000000001
+#define HCR_SWIO 0x0000000000000002
+#define HCR_PTW 0x0000000000000004
+#define HCR_FMO 0x0000000000000008
+#define HCR_IMO 0x0000000000000010
+#define HCR_AMO 0x0000000000000020
+#define HCR_VF 0x0000000000000040
+#define HCR_VI 0x0000000000000080
+#define HCR_VSE 0x0000000000000100
+#define HCR_FB 0x0000000000000200
+#define HCR_BSU_MASK 0x0000000000000c00
+#define HCR_BSU_IS 0x0000000000000400
+#define HCR_BSU_OS 0x0000000000000800
+#define HCR_BSU_FS 0x0000000000000c00
+#define HCR_DC 0x0000000000001000
+#define HCR_TWI 0x0000000000002000
+#define HCR_TWE 0x0000000000004000
+#define HCR_TID0 0x0000000000008000
+#define HCR_TID1 0x0000000000010000
+#define HCR_TID2 0x0000000000020000
+#define HCR_TID3 0x0000000000040000
+#define HCR_TSC 0x0000000000080000
+#define HCR_TIDCP 0x0000000000100000
+#define HCR_TACR 0x0000000000200000
+#define HCR_TSW 0x0000000000400000
+#define HCR_TPCP 0x0000000000800000
+#define HCR_TPU 0x0000000001000000
+#define HCR_TTLB 0x0000000002000000
+#define HCR_TVM 0x0000000004000000
+#define HCR_TGE 0x0000000008000000
+#define HCR_TDZ 0x0000000010000000
+#define HCR_HCD 0x0000000020000000
+#define HCR_TRVM 0x0000000040000000
+#define HCR_RW 0x0000000080000000
+#define HCR_CD 0x0000000100000000
+#define HCR_ID 0x0000000200000000
+#define HCR_E2H 0x0000000400000000
+#define HCR_TLOR 0x0000000800000000
+#define HCR_TERR 0x0000001000000000
+#define HCR_TEA 0x0000002000000000
+#define HCR_MIOCNCE 0x0000004000000000
+/* Bit 39 is reserved */
+#define HCR_APK 0x0000010000000000
+#define HCR_API 0x0000020000000000
+#define HCR_NV 0x0000040000000000
+#define HCR_NV1 0x0000080000000000
+#define HCR_AT 0x0000100000000000
+
+/* HPFAR_EL2 - Hypervisor IPA Fault Address Register */
+#define HPFAR_EL2_FIPA_SHIFT 4
+#define HPFAR_EL2_FIPA_MASK 0xfffffffff0
+
+/* ICC_SRE_EL2 */
+#define ICC_SRE_EL2_SRE (1U << 0)
+#define ICC_SRE_EL2_EN (1U << 3)
+
+/* SCTLR_EL2 - System Control Register */
+#define SCTLR_EL2_RES1 0x30c50830
+#define SCTLR_EL2_M_SHIFT 0
+#define SCTLR_EL2_M (0x1 << SCTLR_EL2_M_SHIFT)
+#define SCTLR_EL2_A_SHIFT 1
+#define SCTLR_EL2_A (0x1 << SCTLR_EL2_A_SHIFT)
+#define SCTLR_EL2_C_SHIFT 2
+#define SCTLR_EL2_C (0x1 << SCTLR_EL2_C_SHIFT)
+#define SCTLR_EL2_SA_SHIFT 3
+#define SCTLR_EL2_SA (0x1 << SCTLR_EL2_SA_SHIFT)
+#define SCTLR_EL2_I_SHIFT 12
+#define SCTLR_EL2_I (0x1 << SCTLR_EL2_I_SHIFT)
+#define SCTLR_EL2_WXN_SHIFT 19
+#define SCTLR_EL2_WXN (0x1 << SCTLR_EL2_WXN_SHIFT)
+#define SCTLR_EL2_EE_SHIFT 25
+#define SCTLR_EL2_EE (0x1 << SCTLR_EL2_EE_SHIFT)
+
+/* TCR_EL2 - Translation Control Register */
+#define TCR_EL2_RES1 ((0x1UL << 31) | (0x1UL << 23))
+#define TCR_EL2_T0SZ_SHIFT 0
+#define TCR_EL2_T0SZ_MASK (0x3f << TCR_EL2_T0SZ_SHIFT)
+#define TCR_EL2_T0SZ(x) ((x) << TCR_EL2_T0SZ_SHIFT)
+/* Bits 7:6 are reserved */
+#define TCR_EL2_IRGN0_SHIFT 8
+#define TCR_EL2_IRGN0_MASK (0x3 << TCR_EL2_IRGN0_SHIFT)
+#define TCR_EL2_ORGN0_SHIFT 10
+#define TCR_EL2_ORGN0_MASK (0x3 << TCR_EL2_ORGN0_SHIFT)
+#define TCR_EL2_SH0_SHIFT 12
+#define TCR_EL2_SH0_MASK (0x3 << TCR_EL2_SH0_SHIFT)
+#define TCR_EL2_TG0_SHIFT 14
+#define TCR_EL2_TG0_MASK (0x3 << TCR_EL2_TG0_SHIFT)
+#define TCR_EL2_PS_SHIFT 16
+#define TCR_EL2_PS_32BITS (0 << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_36BITS (1 << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_40BITS (2 << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_42BITS (3 << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_44BITS (4 << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_48BITS (5 << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_52BITS (6 << TCR_EL2_PS_SHIFT) /* ARMv8.2-LPA */
+
+/* VMPDIR_EL2 - Virtualization Multiprocessor ID Register */
+#define VMPIDR_EL2_U 0x0000000040000000
+#define VMPIDR_EL2_MT 0x0000000001000000
+#define VMPIDR_EL2_RES1 0x0000000080000000
+
+/* VTCR_EL2 - Virtualization Translation Control Register */
+#define VTCR_EL2_RES1 (0x1 << 31)
+#define VTCR_EL2_T0SZ_MASK 0x3f
+#define VTCR_EL2_SL0_SHIFT 6
+#define VTCR_EL2_SL0_4K_LVL2 (0x0 << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_4K_LVL1 (0x1 << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_4K_LVL0 (0x2 << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_IRGN0_SHIFT 8
+#define VTCR_EL2_IRGN0_WBWA (0x1 << VTCR_EL2_IRGN0_SHIFT)
+#define VTCR_EL2_ORGN0_SHIFT 10
+#define VTCR_EL2_ORGN0_WBWA (0x1 << VTCR_EL2_ORGN0_SHIFT)
+#define VTCR_EL2_SH0_SHIFT 12
+#define VTCR_EL2_SH0_NS (0x0 << VTCR_EL2_SH0_SHIFT)
+#define VTCR_EL2_SH0_OS (0x2 << VTCR_EL2_SH0_SHIFT)
+#define VTCR_EL2_SH0_IS (0x3 << VTCR_EL2_SH0_SHIFT)
+#define VTCR_EL2_TG0_SHIFT 14
+#define VTCR_EL2_TG0_4K (0x0 << VTCR_EL2_TG0_SHIFT)
+#define VTCR_EL2_TG0_64K (0x1 << VTCR_EL2_TG0_SHIFT)
+#define VTCR_EL2_TG0_16K (0x2 << VTCR_EL2_TG0_SHIFT)
+#define VTCR_EL2_PS_SHIFT 16
+#define VTCR_EL2_PS_32BIT (0x0 << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_36BIT (0x1 << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_40BIT (0x2 << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_42BIT (0x3 << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_44BIT (0x4 << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_48BIT (0x5 << VTCR_EL2_PS_SHIFT)
+
+/* VTTBR_EL2 - Virtualization Translation Table Base Register */
+#define VTTBR_VMID_MASK 0xffff000000000000
+#define VTTBR_VMID_SHIFT 48
+#define VTTBR_HOST 0x0000000000000000
+
+#endif /* !_MACHINE_HYPERVISOR_H_ */
diff --git a/sys/arm64/include/ieeefp.h b/sys/arm64/include/ieeefp.h
new file mode 100644
index 000000000000..178721a65b1e
--- /dev/null
+++ b/sys/arm64/include/ieeefp.h
@@ -0,0 +1,43 @@
+/*-
+ * Based on sys/sparc64/include/ieeefp.h
+ * Public domain.
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IEEEFP_H_
+#define _MACHINE_IEEEFP_H_
+
+/* Deprecated FPU control interface */
+
+/* FP exception codes */
+#define FP_EXCEPT_INV 8
+#define FP_EXCEPT_DZ 9
+#define FP_EXCEPT_OFL 10
+#define FP_EXCEPT_UFL 11
+#define FP_EXCEPT_IMP 12
+#define FP_EXCEPT_DNML 15
+
+typedef int fp_except_t;
+
+#define FP_X_INV (1 << FP_EXCEPT_INV) /* invalid operation exception */
+#define FP_X_DZ (1 << FP_EXCEPT_DZ) /* divide-by-zero exception */
+#define FP_X_OFL (1 << FP_EXCEPT_OFL) /* overflow exception */
+#define FP_X_UFL (1 << FP_EXCEPT_UFL) /* underflow exception */
+#define FP_X_IMP (1 << FP_EXCEPT_IMP) /* imprecise (loss of precision) */
+#define FP_X_DNML (1 << FP_EXCEPT_DNML) /* denormal exception */
+
+typedef enum {
+ FP_RN = (0 << 22), /* round to nearest representable number */
+ FP_RP = (1 << 22), /* round toward positive infinity */
+ FP_RM = (2 << 22), /* round toward negative infinity */
+ FP_RZ = (3 << 22) /* round to zero (truncate) */
+} fp_rnd_t;
+
+__BEGIN_DECLS
+extern fp_rnd_t fpgetround(void);
+extern fp_rnd_t fpsetround(fp_rnd_t);
+extern fp_except_t fpgetmask(void);
+extern fp_except_t fpsetmask(fp_except_t);
+__END_DECLS
+
+#endif /* _MACHINE_IEEEFP_H_ */
diff --git a/sys/arm64/include/ifunc.h b/sys/arm64/include/ifunc.h
new file mode 100644
index 000000000000..cf89af7c7142
--- /dev/null
+++ b/sys/arm64/include/ifunc.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2015-2018 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __ARM64_IFUNC_H
+#define __ARM64_IFUNC_H
+
+#define DEFINE_IFUNC(qual, ret_type, name, args) \
+ static ret_type (*name##_resolver(void))args __used; \
+ qual ret_type name args __attribute__((ifunc(#name "_resolver"))); \
+ static ret_type (*name##_resolver(void))args
+
+#define DEFINE_UIFUNC(qual, ret_type, name, args) \
+ static ret_type (*name##_resolver(uint64_t, uint64_t, \
+ uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, \
+ uint64_t))args __used; \
+ qual ret_type name args __attribute__((ifunc(#name "_resolver"))); \
+ static ret_type (*name##_resolver(uint64_t _arg1 __unused, \
+ uint64_t _arg2 __unused, uint64_t _arg3 __unused, \
+ uint64_t _arg4 __unused, uint64_t _arg5 __unused, \
+ uint64_t _arg6 __unused, uint64_t _arg7 __unused, \
+ uint64_t _arg8 __unused))args
+
+#endif
diff --git a/sys/arm64/include/in_cksum.h b/sys/arm64/include/in_cksum.h
new file mode 100644
index 000000000000..522ba005a0e4
--- /dev/null
+++ b/sys/arm64/include/in_cksum.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#include <sys/cdefs.h>
+
+#ifdef _KERNEL
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+u_short in_addword(u_short sum, u_short b);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+u_int do_cksum(const void *, int);
+#if defined(IPVERSION) && (IPVERSION == 4)
+u_int in_cksum_hdr(const struct ip *);
+#endif
+
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/sys/arm64/include/intr.h b/sys/arm64/include/intr.h
new file mode 100644
index 000000000000..b653bb98b8b6
--- /dev/null
+++ b/sys/arm64/include/intr.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner <andrew@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_INTR_H_
+#define _MACHINE_INTR_H_
+
+#ifdef FDT
+#include <dev/ofw/openfirm.h>
+#endif
+
+#include <sys/intr.h>
+
+#ifndef NIRQ
+#define NIRQ 2048 /* XXX - It should be an option. */
+#endif
+
+static inline void
+arm_irq_memory_barrier(uintptr_t irq)
+{
+}
+
+#ifdef SMP
+void intr_ipi_dispatch(u_int, struct trapframe *);
+#endif
+
+#ifdef DEV_ACPI
+#define ACPI_INTR_XREF 1
+#define ACPI_MSI_XREF 2
+#endif
+
+#endif /* _MACHINE_INTR_H */
diff --git a/sys/arm64/include/iodev.h b/sys/arm64/include/iodev.h
new file mode 100644
index 000000000000..5521ff71bc5f
--- /dev/null
+++ b/sys/arm64/include/iodev.h
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IODEV_H_
+#define _MACHINE_IODEV_H_
+
+#define iodev_read_1(a) \
+({ \
+ uint8_t val; \
+ __asm __volatile("ldrb %w0, [%1]" : "=&r" (val) : "r"(a)); \
+ val; \
+})
+
+#define iodev_read_2(a) \
+({ \
+ uint16_t val; \
+ __asm __volatile("ldrh %w0, [%1]" : "=&r" (val) : "r"(a)); \
+ val; \
+})
+
+#define iodev_read_4(a) \
+({ \
+ uint32_t val; \
+ __asm __volatile("ldr %w0, [%1]" : "=&r" (val) : "r"(a)); \
+ val; \
+})
+
+#define iodev_write_1(a, v) \
+ __asm __volatile("strb %w0, [%1]" :: "r" (v), "r"(a))
+
+#define iodev_write_2(a, v) \
+ __asm __volatile("strh %w0, [%1]" :: "r" (v), "r"(a))
+
+#define iodev_write_4(a, v) \
+ __asm __volatile("str %w0, [%1]" :: "r" (v), "r"(a))
+
+#endif /* _MACHINE_IODEV_H_ */
diff --git a/sys/arm64/include/kdb.h b/sys/arm64/include/kdb.h
new file mode 100644
index 000000000000..2f7306ef669b
--- /dev/null
+++ b/sys/arm64/include/kdb.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_KDB_H_
+#define _MACHINE_KDB_H_
+
+#include <machine/cpufunc.h>
+
+#define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid]
+
+void kdb_cpu_clear_singlestep(void);
+void kdb_cpu_set_singlestep(void);
+
+static __inline void
+kdb_cpu_sync_icache(unsigned char *addr, size_t size)
+{
+
+ cpu_icache_sync_range((vm_offset_t)addr, size);
+}
+
+static __inline void
+kdb_cpu_trap(int type, int code)
+{
+}
+
+#endif /* _MACHINE_KDB_H_ */
diff --git a/sys/arm64/include/machdep.h b/sys/arm64/include/machdep.h
new file mode 100644
index 000000000000..54ffcbd46c81
--- /dev/null
+++ b/sys/arm64/include/machdep.h
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MACHDEP_H_
+#define _MACHINE_MACHDEP_H_
+
+#ifdef _KERNEL
+
+struct arm64_bootparams {
+ vm_offset_t modulep;
+ vm_offset_t kern_l1pt; /* L1 page table for the kernel */
+ uint64_t kern_delta;
+ vm_offset_t kern_stack;
+ vm_offset_t kern_l0pt; /* L1 page table for the kernel */
+ vm_paddr_t kern_ttbr0;
+ int boot_el; /* EL the kernel booted from */
+ int pad;
+};
+
+enum arm64_bus {
+ ARM64_BUS_NONE,
+ ARM64_BUS_FDT,
+ ARM64_BUS_ACPI,
+};
+
+extern enum arm64_bus arm64_bus_method;
+
+void dbg_init(void);
+bool has_hyp(void);
+void initarm(struct arm64_bootparams *);
+vm_offset_t parse_boot_param(struct arm64_bootparams *abp);
+#ifdef FDT
+void parse_fdt_bootargs(void);
+#endif
+int memory_mapping_mode(vm_paddr_t pa);
+extern void (*pagezero)(void *);
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_MACHDEP_H_ */
diff --git a/sys/arm64/include/md_var.h b/sys/arm64/include/md_var.h
new file mode 100644
index 000000000000..ef64920e6bb2
--- /dev/null
+++ b/sys/arm64/include/md_var.h
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 1995 Bruce D. Evans.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/md_var.h,v 1.40 2001/07/12
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MD_VAR_H_
+#define _MACHINE_MD_VAR_H_
+
+extern long Maxmem;
+extern char sigcode[];
+extern int szsigcode;
+extern uint64_t *vm_page_dump;
+extern int vm_page_dump_size;
+
+struct dumperinfo;
+
+extern int busdma_swi_pending;
+void busdma_swi(void);
+void dump_add_page(vm_paddr_t);
+void dump_drop_page(vm_paddr_t);
+int minidumpsys(struct dumperinfo *);
+
+#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/arm64/include/memdev.h b/sys/arm64/include/memdev.h
new file mode 100644
index 000000000000..8fe431bc1fe3
--- /dev/null
+++ b/sys/arm64/include/memdev.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2004 Mark R V Murray
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MEMDEV_H_
+#define _MACHINE_MEMDEV_H_
+
+#define CDEV_MINOR_MEM 0
+#define CDEV_MINOR_KMEM 1
+
+d_open_t memopen;
+d_read_t memrw;
+d_ioctl_t memioctl_md;
+d_mmap_t memmmap;
+
+#endif /* _MACHINE_MEMDEV_H_ */
diff --git a/sys/arm64/include/metadata.h b/sys/arm64/include/metadata.h
new file mode 100644
index 000000000000..dc98620304e4
--- /dev/null
+++ b/sys/arm64/include/metadata.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner <andrew@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_METADATA_H_
+#define _MACHINE_METADATA_H_
+
+#define MODINFOMD_EFI_MAP 0x1001
+#define MODINFOMD_DTBP 0x1002
+#define MODINFOMD_EFI_FB 0x1003
+
+struct efi_map_header {
+ size_t memory_size;
+ size_t descriptor_size;
+ uint32_t descriptor_version;
+};
+
+struct efi_fb {
+ uint64_t fb_addr;
+ uint64_t fb_size;
+ uint32_t fb_height;
+ uint32_t fb_width;
+ uint32_t fb_stride;
+ uint32_t fb_mask_red;
+ uint32_t fb_mask_green;
+ uint32_t fb_mask_blue;
+ uint32_t fb_mask_reserved;
+};
+
+#endif /* !_MACHINE_METADATA_H_ */
diff --git a/sys/arm64/include/minidump.h b/sys/arm64/include/minidump.h
new file mode 100644
index 000000000000..240d5c3f74c8
--- /dev/null
+++ b/sys/arm64/include/minidump.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2006 Peter Wemm
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * From i386: FreeBSD: 157909 2006-04-21 04:28:43Z peter
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MINIDUMP_H_
+#define _MACHINE_MINIDUMP_H_ 1
+
+#define MINIDUMP_MAGIC "minidump FreeBSD/arm64"
+#define MINIDUMP_VERSION 1
+
+struct minidumphdr {
+ char magic[24];
+ uint32_t version;
+ uint32_t msgbufsize;
+ uint32_t bitmapsize;
+ uint32_t pmapsize;
+ uint64_t kernbase;
+ uint64_t dmapphys;
+ uint64_t dmapbase;
+ uint64_t dmapend;
+};
+
+#endif /* _MACHINE_MINIDUMP_H_ */
diff --git a/sys/arm64/include/ofw_machdep.h b/sys/arm64/include/ofw_machdep.h
new file mode 100644
index 000000000000..511fc8d71c19
--- /dev/null
+++ b/sys/arm64/include/ofw_machdep.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2009 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_OFW_MACHDEP_H_
+#define _MACHINE_OFW_MACHDEP_H_
+
+#include <vm/vm.h>
+
+typedef uint32_t cell_t;
+
+struct mem_region {
+ vm_offset_t mr_start;
+ vm_size_t mr_size;
+};
+
+#endif /* _MACHINE_OFW_MACHDEP_H_ */
diff --git a/sys/arm64/include/param.h b/sys/arm64/include/param.h
new file mode 100644
index 000000000000..1164965ca12f
--- /dev/null
+++ b/sys/arm64/include/param.h
@@ -0,0 +1,130 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)param.h 5.8 (Berkeley) 6/28/91
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PARAM_H_
+#define _MACHINE_PARAM_H_
+
+/*
+ * Machine dependent constants for arm64.
+ */
+
+#include <machine/_align.h>
+
+#define STACKALIGNBYTES (16 - 1)
+#define STACKALIGN(p) ((uint64_t)(p) & ~STACKALIGNBYTES)
+
+#define __PCI_REROUTE_INTERRUPT
+
+#ifndef MACHINE
+#define MACHINE "arm64"
+#endif
+#ifndef MACHINE_ARCH
+#define MACHINE_ARCH "aarch64"
+#endif
+#ifndef MACHINE_ARCH32
+#define MACHINE_ARCH32 "armv7"
+#endif
+
+#ifdef SMP
+#ifndef MAXCPU
+#define MAXCPU 256
+#endif
+#else
+#define MAXCPU 1
+#endif
+
+#ifndef MAXMEMDOM
+#define MAXMEMDOM 2
+#endif
+
+#define ALIGNBYTES _ALIGNBYTES
+#define ALIGN(p) _ALIGN(p)
+/*
+ * ALIGNED_POINTER is a boolean macro that checks whether an address
+ * is valid to fetch data elements of type t from on this architecture.
+ * This does not reflect the optimal alignment, just the possibility
+ * (within reasonable limits).
+ */
+#define ALIGNED_POINTER(p, t) ((((u_long)(p)) & (sizeof(t) - 1)) == 0)
+
+/*
+ * CACHE_LINE_SIZE is the compile-time maximum cache line size for an
+ * architecture. It should be used with appropriate caution.
+ */
+#define CACHE_LINE_SHIFT 7
+#define CACHE_LINE_SIZE (1 << CACHE_LINE_SHIFT)
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT) /* Page size */
+#define PAGE_MASK (PAGE_SIZE - 1)
+
+#define PAGE_SHIFT_16K 14
+#define PAGE_SIZE_16K (1 << PAGE_SHIFT_16K)
+#define PAGE_MASK_16K (PAGE_SIZE_16K - 1)
+
+#define PAGE_SHIFT_64K 16
+#define PAGE_SIZE_64K (1 << PAGE_SHIFT_64K)
+#define PAGE_MASK_64K (PAGE_SIZE_64K - 1)
+
+#define MAXPAGESIZES 2 /* maximum number of supported page sizes */
+
+#ifndef KSTACK_PAGES
+#define KSTACK_PAGES 4 /* pages of kernel stack (with pcb) */
+#endif
+
+#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
+#define PCPU_PAGES 1
+
+/*
+ * Ceiling on size of buffer cache (really only effects write queueing,
+ * the VM page cache is not effected), can be changed via
+ * the kern.maxbcache /boot/loader.conf variable.
+ */
+#ifndef VM_BCACHE_SIZE_MAX
+#define VM_BCACHE_SIZE_MAX (400 * 1024 * 1024)
+#endif
+
+/*
+ * Mach derived conversion macros
+ */
+#define round_page(x) (((unsigned long)(x) + PAGE_MASK) & ~PAGE_MASK)
+#define trunc_page(x) ((unsigned long)(x) & ~PAGE_MASK)
+
+#define atop(x) ((unsigned long)(x) >> PAGE_SHIFT)
+#define ptoa(x) ((unsigned long)(x) << PAGE_SHIFT)
+
+#define arm64_btop(x) ((unsigned long)(x) >> PAGE_SHIFT)
+#define arm64_ptob(x) ((unsigned long)(x) << PAGE_SHIFT)
+
+#define pgtok(x) ((unsigned long)(x) * (PAGE_SIZE / 1024))
+
+#endif /* !_MACHINE_PARAM_H_ */
diff --git a/sys/arm64/include/pcb.h b/sys/arm64/include/pcb.h
new file mode 100644
index 000000000000..25c1a5a7aff4
--- /dev/null
+++ b/sys/arm64/include/pcb.h
@@ -0,0 +1,81 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCB_H_
+#define _MACHINE_PCB_H_
+
+#ifndef LOCORE
+
+#include <machine/debug_monitor.h>
+#include <machine/vfp.h>
+
+struct trapframe;
+
+#define PCB_LR 30
+struct pcb {
+ uint64_t pcb_x[31];
+ uint64_t pcb_pc;
+ /* These two need to be in order as we access them together */
+ uint64_t pcb_sp;
+ uint64_t pcb_tpidr_el0;
+ uint64_t pcb_tpidrro_el0;
+
+ /* Fault handler, the error value is passed in x0 */
+ vm_offset_t pcb_onfault;
+
+ u_int pcb_flags;
+#define PCB_SINGLE_STEP_SHIFT 0
+#define PCB_SINGLE_STEP (1 << PCB_SINGLE_STEP_SHIFT)
+
+ struct vfpstate *pcb_fpusaved;
+ int pcb_fpflags;
+#define PCB_FP_STARTED 0x01
+#define PCB_FP_KERN 0x02
+#define PCB_FP_NOSAVE 0x04
+/* The bits passed to userspace in get_fpcontext */
+#define PCB_FP_USERMASK (PCB_FP_STARTED)
+ u_int pcb_vfpcpu; /* Last cpu this thread ran VFP code */
+
+ /*
+ * The userspace VFP state. The pcb_fpusaved pointer will point to
+ * this unless the kernel has allocated a VFP context.
+ * Place last to simplify the asm to access the rest if the struct.
+ */
+ struct vfpstate pcb_fpustate;
+
+ struct debug_monitor_state pcb_dbg_regs;
+};
+
+#ifdef _KERNEL
+void makectx(struct trapframe *tf, struct pcb *pcb);
+int savectx(struct pcb *pcb) __returns_twice;
+#endif
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_PCB_H_ */
diff --git a/sys/arm64/include/pci_cfgreg.h b/sys/arm64/include/pci_cfgreg.h
new file mode 100644
index 000000000000..68b38aaeb5bb
--- /dev/null
+++ b/sys/arm64/include/pci_cfgreg.h
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCI_CFGREG_H
+#define _MACHINE_PCI_CFGREG_H
+
+int pci_cfgregopen(void);
+uint32_t pci_cfgregread(int, int, int, int, int);
+void pci_cfgregwrite(int, int, int, int, u_int32_t, int);
+
+#endif /* !_MACHINE_PCI_CFGREG_H */
diff --git a/sys/arm64/include/pcpu.h b/sys/arm64/include/pcpu.h
new file mode 100644
index 000000000000..c13de37c7141
--- /dev/null
+++ b/sys/arm64/include/pcpu.h
@@ -0,0 +1,86 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/globaldata.h,v 1.27 2001/04/27
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCPU_H_
+#define _MACHINE_PCPU_H_
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+
+#define ALT_STACK_SIZE 128
+
+typedef int (*pcpu_bp_harden)(void);
+typedef int (*pcpu_ssbd)(int);
+struct debug_monitor_state;
+
+#define PCPU_MD_FIELDS \
+ u_int pc_acpi_id; /* ACPI CPU id */ \
+ u_int pc_midr; /* stored MIDR value */ \
+ uint64_t pc_clock; \
+ pcpu_bp_harden pc_bp_harden; \
+ pcpu_ssbd pc_ssbd; \
+ struct pmap *pc_curpmap; \
+ struct pmap *pc_curvmpmap; \
+ u_int pc_bcast_tlbi_workaround; \
+ char __pad[205]
+
+#ifdef _KERNEL
+
+struct pcb;
+struct pcpu;
+
+static inline struct pcpu *
+get_pcpu(void)
+{
+ struct pcpu *pcpu;
+
+ __asm __volatile("mov %0, x18" : "=&r"(pcpu));
+ return (pcpu);
+}
+
+static inline struct thread *
+get_curthread(void)
+{
+ struct thread *td;
+
+ __asm __volatile("ldr %0, [x18]" : "=&r"(td));
+ return (td);
+}
+
+#define curthread get_curthread()
+
+#define PCPU_GET(member) (get_pcpu()->pc_ ## member)
+#define PCPU_ADD(member, value) (get_pcpu()->pc_ ## member += (value))
+#define PCPU_INC(member) PCPU_ADD(member, 1)
+#define PCPU_PTR(member) (&get_pcpu()->pc_ ## member)
+#define PCPU_SET(member,value) (get_pcpu()->pc_ ## member = (value))
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_PCPU_H_ */
diff --git a/sys/arm64/include/pcpu_aux.h b/sys/arm64/include/pcpu_aux.h
new file mode 100644
index 000000000000..3d4c70c491d6
--- /dev/null
+++ b/sys/arm64/include/pcpu_aux.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 The FreeBSD Foundation
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PCPU_AUX_H_
+#define _MACHINE_PCPU_AUX_H_
+
+#ifndef _KERNEL
+#error "Not for userspace"
+#endif
+
+#ifndef _SYS_PCPU_H_
+#error "Do not include machine/pcpu_aux.h directly"
+#endif
+
+/*
+ * To minimize memory waste in per-cpu UMA zones, the page size should
+ * be a multiple of the size of struct pcpu.
+ */
+_Static_assert(PAGE_SIZE % sizeof(struct pcpu) == 0, "fix pcpu size");
+
+extern struct pcpu __pcpu[];
+
+#endif /* _MACHINE_PCPU_AUX_H_ */
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
new file mode 100644
index 000000000000..e354d52ae901
--- /dev/null
+++ b/sys/arm64/include/pmap.h
@@ -0,0 +1,206 @@
+/*-
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
+
+#include <machine/pte.h>
+
+#ifndef LOCORE
+
+#include <sys/queue.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+
+#include <vm/_vm_radix.h>
+
+#ifdef _KERNEL
+
+#define vtophys(va) pmap_kextract((vm_offset_t)(va))
+
+#endif
+
+#define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
+#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
+void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
+
+/*
+ * Pmap stuff
+ */
+
+struct md_page {
+ TAILQ_HEAD(,pv_entry) pv_list;
+ int pv_gen;
+ vm_memattr_t pv_memattr;
+};
+
+/*
+ * This structure is used to hold a virtual<->physical address
+ * association and is used mostly by bootstrap code
+ */
+struct pv_addr {
+ SLIST_ENTRY(pv_addr) pv_list;
+ vm_offset_t pv_va;
+ vm_paddr_t pv_pa;
+};
+
+enum pmap_stage {
+ PM_INVALID,
+ PM_STAGE1,
+ PM_STAGE2,
+};
+
+struct pmap {
+ struct mtx pm_mtx;
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ vm_paddr_t pm_l0_paddr;
+ pd_entry_t *pm_l0;
+ TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
+ struct vm_radix pm_root; /* spare page table pages */
+ long pm_cookie; /* encodes the pmap's ASID */
+ struct asid_set *pm_asid_set; /* The ASID/VMID set to use */
+ enum pmap_stage pm_stage;
+};
+typedef struct pmap *pmap_t;
+
+typedef struct pv_entry {
+ vm_offset_t pv_va; /* virtual address for mapping */
+ TAILQ_ENTRY(pv_entry) pv_next;
+} *pv_entry_t;
+
+/*
+ * pv_entries are allocated in chunks per-process. This avoids the
+ * need to track per-pmap assignments.
+ */
+#define _NPCM 3
+#define _NPCPV 168
+#define PV_CHUNK_HEADER \
+ pmap_t pc_pmap; \
+ TAILQ_ENTRY(pv_chunk) pc_list; \
+ uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \
+ TAILQ_ENTRY(pv_chunk) pc_lru;
+
+struct pv_chunk_header {
+ PV_CHUNK_HEADER
+};
+
+struct pv_chunk {
+ PV_CHUNK_HEADER
+ struct pv_entry pc_pventry[_NPCPV];
+};
+
+struct thread;
+
+#ifdef _KERNEL
+extern struct pmap kernel_pmap_store;
+#define kernel_pmap (&kernel_pmap_store)
+#define pmap_kernel() kernel_pmap
+
+#define PMAP_ASSERT_LOCKED(pmap) \
+ mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
+#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
+#define PMAP_LOCK_ASSERT(pmap, type) \
+ mtx_assert(&(pmap)->pm_mtx, (type))
+#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
+#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
+ NULL, MTX_DEF | MTX_DUPOK)
+#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx)
+#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
+#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
+#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
+
+#define ASID_RESERVED_FOR_PID_0 0
+#define ASID_RESERVED_FOR_EFI 1
+#define ASID_FIRST_AVAILABLE (ASID_RESERVED_FOR_EFI + 1)
+#define ASID_TO_OPERAND_SHIFT 48
+#define ASID_TO_OPERAND(asid) ({ \
+ KASSERT((asid) != -1, ("invalid ASID")); \
+ (uint64_t)(asid) << ASID_TO_OPERAND_SHIFT; \
+})
+
+extern vm_offset_t virtual_avail;
+extern vm_offset_t virtual_end;
+
+/*
+ * Macros to test if a mapping is mappable with an L1 Section mapping
+ * or an L2 Large Page mapping.
+ */
+#define L1_MAPPABLE_P(va, pa, size) \
+ ((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
+
+void pmap_activate_vm(pmap_t);
+void pmap_bootstrap(vm_offset_t, vm_offset_t, vm_paddr_t, vm_size_t);
+int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
+void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode);
+void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
+vm_paddr_t pmap_kextract(vm_offset_t va);
+void pmap_kremove(vm_offset_t);
+void pmap_kremove_device(vm_offset_t, vm_size_t);
+void *pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t ma);
+bool pmap_page_is_mapped(vm_page_t m);
+int pmap_pinit_stage(pmap_t, enum pmap_stage);
+bool pmap_ps_enabled(pmap_t pmap);
+uint64_t pmap_to_ttbr0(pmap_t pmap);
+
+void *pmap_mapdev(vm_offset_t, vm_size_t);
+void *pmap_mapbios(vm_paddr_t, vm_size_t);
+void pmap_unmapdev(vm_offset_t, vm_size_t);
+void pmap_unmapbios(vm_offset_t, vm_size_t);
+
+boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
+void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
+
+bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
+ pd_entry_t **, pt_entry_t **);
+
+int pmap_fault(pmap_t, uint64_t, uint64_t);
+
+struct pcb *pmap_switch(struct thread *, struct thread *);
+
+extern void (*pmap_clean_stage2_tlbi)(void);
+extern void (*pmap_invalidate_vpipt_icache)(void);
+
+static inline int
+pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused)
+{
+
+ return (0);
+}
+
+#endif /* _KERNEL */
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/arm64/include/pmc_mdep.h b/sys/arm64/include/pmc_mdep.h
new file mode 100644
index 000000000000..5d6f40d0e6c0
--- /dev/null
+++ b/sys/arm64/include/pmc_mdep.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 2009 Rui Paulo <rpaulo@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PMC_MDEP_H_
+#define _MACHINE_PMC_MDEP_H_
+
+#define PMC_MDEP_CLASS_INDEX_ARMV8 1
+/*
+ * On the ARMv8 platform we support the following PMCs.
+ *
+ * ARMV8 ARM Cortex-A53/57/72 processors
+ */
+#include <dev/hwpmc/hwpmc_arm64.h>
+
+union pmc_md_op_pmcallocate {
+ uint64_t __pad[4];
+};
+
+/* Logging */
+#define PMCLOG_READADDR PMCLOG_READ64
+#define PMCLOG_EMITADDR PMCLOG_EMIT64
+
+#ifdef _KERNEL
+union pmc_md_pmc {
+ struct pmc_md_arm64_pmc pm_arm64;
+};
+
+#define PMC_IN_KERNEL_STACK(S,START,END) \
+ ((S) >= (START) && (S) < (END))
+#define PMC_IN_KERNEL(va) INKERNEL((va))
+#define PMC_IN_USERSPACE(va) ((va) <= VM_MAXUSER_ADDRESS)
+#define PMC_TRAPFRAME_TO_PC(TF) ((TF)->tf_lr)
+#define PMC_TRAPFRAME_TO_FP(TF) ((TF)->tf_x[29])
+
+/*
+ * Prototypes
+ */
+struct pmc_mdep *pmc_arm64_initialize(void);
+void pmc_arm64_finalize(struct pmc_mdep *_md);
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_PMC_MDEP_H_ */
diff --git a/sys/arm64/include/proc.h b/sys/arm64/include/proc.h
new file mode 100644
index 000000000000..a844bfc64bcc
--- /dev/null
+++ b/sys/arm64/include/proc.h
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)proc.h 7.1 (Berkeley) 5/15/91
+ * from: FreeBSD: src/sys/i386/include/proc.h,v 1.11 2001/06/29
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PROC_H_
+#define _MACHINE_PROC_H_
+
+struct mdthread {
+ int md_spinlock_count; /* (k) */
+ register_t md_saved_daif; /* (k) */
+};
+
+struct mdproc {
+ long md_dummy;
+};
+
+#define KINFO_PROC_SIZE 1088
+#define KINFO_PROC32_SIZE 816
+
+#define MAXARGS 8
+struct syscall_args {
+ u_int code;
+ struct sysent *callp;
+ register_t args[MAXARGS];
+ int narg;
+};
+
+#ifdef _KERNEL
+
+#include <machine/pcb.h>
+
+#define GET_STACK_USAGE(total, used) do { \
+ struct thread *td = curthread; \
+ (total) = td->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb); \
+ (used) = (char *)td->td_kstack + \
+ td->td_kstack_pages * PAGE_SIZE - \
+ (char *)&td; \
+} while (0)
+
+#endif
+
+#endif /* !_MACHINE_PROC_H_ */
diff --git a/sys/arm64/include/procctl.h b/sys/arm64/include/procctl.h
new file mode 100644
index 000000000000..5221cfcd7be1
--- /dev/null
+++ b/sys/arm64/include/procctl.h
@@ -0,0 +1,4 @@
+/*-
+ * This file is in the public domain.
+ */
+/* $FreeBSD$ */
diff --git a/sys/arm64/include/profile.h b/sys/arm64/include/profile.h
new file mode 100644
index 000000000000..afbfbdb44c0b
--- /dev/null
+++ b/sys/arm64/include/profile.h
@@ -0,0 +1,80 @@
+/*-
+ * SPDX-License-Identifier: MIT-CMU
+ *
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * from: NetBSD: profile.h,v 1.9 1997/04/06 08:47:37 cgd Exp
+ * from: FreeBSD: src/sys/alpha/include/profile.h,v 1.4 1999/12/29
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PROFILE_H_
+#define _MACHINE_PROFILE_H_
+
+#if !defined(_KERNEL) && !defined(_SYS_CDEFS_H_)
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+#define FUNCTION_ALIGNMENT 32
+
+typedef u_long fptrdiff_t;
+
+#ifdef _KERNEL
+
+#include <machine/cpufunc.h>
+
+#define _MCOUNT_DECL void mcount
+#define MCOUNT
+
+#define MCOUNT_DECL(s) register_t s;
+#define MCOUNT_ENTER(s) {s = intr_disable(); }
+#define MCOUNT_EXIT(s) {intr_restore(s); }
+
+void bintr(void);
+void btrap(void);
+void eintr(void);
+void user(void);
+
+#define MCOUNT_FROMPC_USER(pc) \
+ ((pc < (uintfptr_t)VM_MAXUSER_ADDRESS) ? (uintfptr_t)user : pc)
+
+#define MCOUNT_FROMPC_INTR(pc) \
+ ((pc >= (uintfptr_t)btrap && pc < (uintfptr_t)eintr) ? \
+ ((pc >= (uintfptr_t)bintr) ? (uintfptr_t)bintr : \
+ (uintfptr_t)btrap) : ~0UL)
+
+void mcount(uintfptr_t frompc, uintfptr_t selfpc);
+
+#else /* !_KERNEL */
+
+typedef __uintfptr_t uintfptr_t;
+
+#define _MCOUNT_DECL void mcount
+#define MCOUNT
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_PROFILE_H_ */
diff --git a/sys/arm64/include/psl.h b/sys/arm64/include/psl.h
new file mode 100644
index 000000000000..da23dbe43a4f
--- /dev/null
+++ b/sys/arm64/include/psl.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
diff --git a/sys/arm64/include/pte.h b/sys/arm64/include/pte.h
new file mode 100644
index 000000000000..16a72be65fd0
--- /dev/null
+++ b/sys/arm64/include/pte.h
@@ -0,0 +1,158 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PTE_H_
+#define _MACHINE_PTE_H_
+
+#ifndef LOCORE
+typedef uint64_t pd_entry_t; /* page directory entry */
+typedef uint64_t pt_entry_t; /* page table entry */
+#endif
+
+/* Block and Page attributes */
+#define ATTR_MASK_H UINT64_C(0xfffc000000000000)
+#define ATTR_MASK_L UINT64_C(0x0000000000000fff)
+#define ATTR_MASK (ATTR_MASK_H | ATTR_MASK_L)
+/* Bits 58:55 are reserved for software */
+#define ATTR_SW_UNUSED2 (1UL << 58)
+#define ATTR_SW_UNUSED1 (1UL << 57)
+#define ATTR_SW_MANAGED (1UL << 56)
+#define ATTR_SW_WIRED (1UL << 55)
+
+#define ATTR_S1_UXN (1UL << 54)
+#define ATTR_S1_PXN (1UL << 53)
+#define ATTR_S1_XN (ATTR_S1_PXN | ATTR_S1_UXN)
+
+#define ATTR_S2_XN(x) ((x) << 53)
+#define ATTR_S2_XN_MASK ATTR_S2_XN(3UL)
+#define ATTR_S2_XN_NONE 0UL /* Allow execution at EL0 & EL1 */
+#define ATTR_S2_XN_EL1 1UL /* Allow execution at EL0 */
+#define ATTR_S2_XN_ALL 2UL /* No execution */
+#define ATTR_S2_XN_EL0 3UL /* Allow execution at EL1 */
+
+#define ATTR_CONTIGUOUS (1UL << 52)
+#define ATTR_DBM (1UL << 51)
+#define ATTR_S1_nG (1 << 11)
+#define ATTR_AF (1 << 10)
+#define ATTR_SH(x) ((x) << 8)
+#define ATTR_SH_MASK ATTR_SH(3)
+#define ATTR_SH_NS 0 /* Non-shareable */
+#define ATTR_SH_OS 2 /* Outer-shareable */
+#define ATTR_SH_IS 3 /* Inner-shareable */
+
+#define ATTR_S1_AP_RW_BIT (1 << 7)
+#define ATTR_S1_AP(x) ((x) << 6)
+#define ATTR_S1_AP_MASK ATTR_S1_AP(3)
+#define ATTR_S1_AP_RW (0 << 1)
+#define ATTR_S1_AP_RO (1 << 1)
+#define ATTR_S1_AP_USER (1 << 0)
+#define ATTR_S1_NS (1 << 5)
+#define ATTR_S1_IDX(x) ((x) << 2)
+#define ATTR_S1_IDX_MASK (7 << 2)
+
+#define ATTR_S2_S2AP(x) ((x) << 6)
+#define ATTR_S2_S2AP_MASK 3
+#define ATTR_S2_S2AP_READ 1
+#define ATTR_S2_S2AP_WRITE 2
+
+#define ATTR_S2_MEMATTR(x) ((x) << 2)
+#define ATTR_S2_MEMATTR_MASK ATTR_S2_MEMATTR(0xf)
+#define ATTR_S2_MEMATTR_DEVICE_nGnRnE 0x0
+#define ATTR_S2_MEMATTR_NC 0xf
+#define ATTR_S2_MEMATTR_WT 0xa
+#define ATTR_S2_MEMATTR_WB 0xf
+
+#define ATTR_DEFAULT (ATTR_AF | ATTR_SH(ATTR_SH_IS))
+
+#define ATTR_DESCR_MASK 3
+#define ATTR_DESCR_VALID 1
+#define ATTR_DESCR_TYPE_MASK 2
+#define ATTR_DESCR_TYPE_TABLE 2
+#define ATTR_DESCR_TYPE_PAGE 2
+#define ATTR_DESCR_TYPE_BLOCK 0
+
+/* Level 0 table, 512GiB per entry */
+#define L0_SHIFT 39
+#define L0_SIZE (1ul << L0_SHIFT)
+#define L0_OFFSET (L0_SIZE - 1ul)
+#define L0_INVAL 0x0 /* An invalid address */
+ /* 0x1 Level 0 doesn't support block translation */
+ /* 0x2 also marks an invalid address */
+#define L0_TABLE 0x3 /* A next-level table */
+
+/* Level 1 table, 1GiB per entry */
+#define L1_SHIFT 30
+#define L1_SIZE (1 << L1_SHIFT)
+#define L1_OFFSET (L1_SIZE - 1)
+#define L1_INVAL L0_INVAL
+#define L1_BLOCK 0x1
+#define L1_TABLE L0_TABLE
+
+/* Level 2 table, 2MiB per entry */
+#define L2_SHIFT 21
+#define L2_SIZE (1 << L2_SHIFT)
+#define L2_OFFSET (L2_SIZE - 1)
+#define L2_INVAL L1_INVAL
+#define L2_BLOCK L1_BLOCK
+#define L2_TABLE L1_TABLE
+
+#define L2_BLOCK_MASK UINT64_C(0xffffffe00000)
+
+/* Level 3 table, 4KiB per entry */
+#define L3_SHIFT 12
+#define L3_SIZE (1 << L3_SHIFT)
+#define L3_OFFSET (L3_SIZE - 1)
+#define L3_SHIFT 12
+#define L3_INVAL 0x0
+ /* 0x1 is reserved */
+ /* 0x2 also marks an invalid address */
+#define L3_PAGE 0x3
+
+#define PMAP_MAPDEV_EARLY_SIZE (L2_SIZE * 8)
+
+#define L0_ENTRIES_SHIFT 9
+#define L0_ENTRIES (1 << L0_ENTRIES_SHIFT)
+#define L0_ADDR_MASK (L0_ENTRIES - 1)
+
+#define Ln_ENTRIES_SHIFT 9
+#define Ln_ENTRIES (1 << Ln_ENTRIES_SHIFT)
+#define Ln_ADDR_MASK (Ln_ENTRIES - 1)
+#define Ln_TABLE_MASK ((1 << 12) - 1)
+
+#define pmap_l0_index(va) (((va) >> L0_SHIFT) & L0_ADDR_MASK)
+#define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
+#define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
+#define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK)
+
+#endif /* !_MACHINE_PTE_H_ */
+
+/* End of pte.h */
diff --git a/sys/arm64/include/ptrace.h b/sys/arm64/include/ptrace.h
new file mode 100644
index 000000000000..da23dbe43a4f
--- /dev/null
+++ b/sys/arm64/include/ptrace.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
diff --git a/sys/arm64/include/reg.h b/sys/arm64/include/reg.h
new file mode 100644
index 000000000000..aafe02b70925
--- /dev/null
+++ b/sys/arm64/include/reg.h
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_REG_H_
+#define _MACHINE_REG_H_
+
+struct reg {
+ uint64_t x[30];
+ uint64_t lr;
+ uint64_t sp;
+ uint64_t elr;
+ uint32_t spsr;
+};
+
+struct reg32 {
+ unsigned int r[13];
+ unsigned int r_sp;
+ unsigned int r_lr;
+ unsigned int r_pc;
+ unsigned int r_cpsr;
+};
+
+struct fpreg {
+ __uint128_t fp_q[32];
+ uint32_t fp_sr;
+ uint32_t fp_cr;
+};
+
+struct fpreg32 {
+ int dummy;
+};
+
+struct dbreg {
+ uint32_t db_info;
+ uint32_t db_pad;
+
+ struct {
+ uint64_t dbr_addr;
+ uint32_t dbr_ctrl;
+ uint32_t dbr_pad;
+ } db_regs[16];
+};
+
+struct dbreg32 {
+ int dummy;
+};
+
+#define __HAVE_REG32
+
+#ifdef _KERNEL
+/*
+ * XXX these interfaces are MI, so they should be declared in a MI place.
+ */
+int fill_regs(struct thread *, struct reg *);
+int set_regs(struct thread *, struct reg *);
+int fill_fpregs(struct thread *, struct fpreg *);
+int set_fpregs(struct thread *, struct fpreg *);
+int fill_dbregs(struct thread *, struct dbreg *);
+int set_dbregs(struct thread *, struct dbreg *);
+#ifdef COMPAT_FREEBSD32
+int fill_regs32(struct thread *, struct reg32 *);
+int set_regs32(struct thread *, struct reg32 *);
+int fill_fpregs32(struct thread *, struct fpreg32 *);
+int set_fpregs32(struct thread *, struct fpreg32 *);
+int fill_dbregs32(struct thread *, struct dbreg32 *);
+int set_dbregs32(struct thread *, struct dbreg32 *);
+#endif
+#endif
+
+#endif /* !_MACHINE_REG_H_ */
diff --git a/sys/arm64/include/reloc.h b/sys/arm64/include/reloc.h
new file mode 100644
index 000000000000..da23dbe43a4f
--- /dev/null
+++ b/sys/arm64/include/reloc.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
diff --git a/sys/arm64/include/resource.h b/sys/arm64/include/resource.h
new file mode 100644
index 000000000000..aef4fad4516d
--- /dev/null
+++ b/sys/arm64/include/resource.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_RESOURCE_H_
+#define _MACHINE_RESOURCE_H_ 1
+
+/*
+ * Definitions of resource types for Intel Architecture machines
+ * with support for legacy ISA devices and drivers.
+ */
+
+#define SYS_RES_IRQ 1 /* interrupt lines */
+#define SYS_RES_DRQ 2 /* isa dma lines */
+#define SYS_RES_MEMORY 3 /* i/o memory */
+#define SYS_RES_IOPORT 4 /* i/o ports */
+#define SYS_RES_GPIO 5 /* general purpose i/o */
+#ifdef NEW_PCIB
+#define PCI_RES_BUS 6 /* PCI bus numbers */
+#endif
+
+#endif /* !_MACHINE_RESOURCE_H_ */
diff --git a/sys/arm64/include/runq.h b/sys/arm64/include/runq.h
new file mode 100644
index 000000000000..eaeb824a6698
--- /dev/null
+++ b/sys/arm64/include/runq.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_RUNQ_H_
+#define _MACHINE_RUNQ_H_
+
+#define RQB_LEN (1) /* Number of priority status words. */
+#define RQB_L2BPW (6) /* Log2(sizeof(rqb_word_t) * NBBY)). */
+#define RQB_BPW (1<<RQB_L2BPW) /* Bits in an rqb_word_t. */
+
+#define RQB_BIT(pri) (1ul << ((pri) & (RQB_BPW - 1)))
+#define RQB_WORD(pri) ((pri) >> RQB_L2BPW)
+
+#define RQB_FFS(word) (ffsl(word) - 1)
+
+/*
+ * Type of run queue status word.
+ */
+typedef unsigned long rqb_word_t;
+
+#endif
diff --git a/sys/arm64/include/setjmp.h b/sys/arm64/include/setjmp.h
new file mode 100644
index 000000000000..af11a246cd3f
--- /dev/null
+++ b/sys/arm64/include/setjmp.h
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SETJMP_H_
+#define _MACHINE_SETJMP_H_
+
+#include <sys/cdefs.h>
+
+/*
+ * We need to store:
+ * - A magic value to differentiate the buffers
+ * - The stack pointer
+ * - The link register
+ * - 11 general purpose registers
+ * - 8 floating point registers
+ * - The signal mask (128 bits)
+ * i.e. 24 64-bit words, round this up to 31(+1) 128-bit words to allow for
+ * CPU extensions with larger registers and stronger alignment requirements.
+ *
+ * The registers to save are: r19 to r29, and d8 to d15.
+ */
+#define _JBLEN 31
+#define _JB_SIGMASK 22
+
+/* This should only be needed in libc and may change */
+#ifdef __ASSEMBLER__
+#define _JB_MAGIC__SETJMP 0xfb5d25837d7ff700
+#define _JB_MAGIC_SETJMP 0xfb5d25837d7ff701
+#endif
+
+#ifndef __ASSEMBLER__
+/*
+ * jmp_buf and sigjmp_buf are encapsulated in different structs to force
+ * compile-time diagnostics for mismatches. The structs are the same
+ * internally to avoid some run-time errors for mismatches.
+ */
+#if __BSD_VISIBLE || __POSIX_VISIBLE || __XSI_VISIBLE
+typedef struct _sigjmp_buf { __int128_t _sjb[_JBLEN + 1]; } sigjmp_buf[1];
+#endif
+
+typedef struct _jmp_buf { __int128_t _jb[_JBLEN + 1]; } jmp_buf[1];
+#endif /* __ASSEMBLER__ */
+
+#endif /* !_MACHINE_SETJMP_H_ */
diff --git a/sys/arm64/include/sf_buf.h b/sys/arm64/include/sf_buf.h
new file mode 100644
index 000000000000..59f9009bd086
--- /dev/null
+++ b/sys/arm64/include/sf_buf.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2003, 2005 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SF_BUF_H_
+#define _MACHINE_SF_BUF_H_
+
+/*
+ * On this machine, the only purpose for which sf_buf is used is to implement
+ * an opaque pointer required by the machine-independent parts of the kernel.
+ * That pointer references the vm_page that is "mapped" by the sf_buf. The
+ * actual mapping is provided by the direct virtual-to-physical mapping.
+ */
+static inline vm_offset_t
+sf_buf_kva(struct sf_buf *sf)
+{
+
+ return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS((vm_page_t)sf)));
+}
+
+static inline vm_page_t
+sf_buf_page(struct sf_buf *sf)
+{
+
+ return ((vm_page_t)sf);
+}
+#endif /* !_MACHINE_SF_BUF_H_ */
diff --git a/sys/arm64/include/sigframe.h b/sys/arm64/include/sigframe.h
new file mode 100644
index 000000000000..9787f579d563
--- /dev/null
+++ b/sys/arm64/include/sigframe.h
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include <machine/frame.h>
diff --git a/sys/arm64/include/signal.h b/sys/arm64/include/signal.h
new file mode 100644
index 000000000000..6c8ac5cabab5
--- /dev/null
+++ b/sys/arm64/include/signal.h
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 1986, 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)signal.h 8.1 (Berkeley) 6/11/93
+ * from: FreeBSD: src/sys/i386/include/signal.h,v 1.13 2000/11/09
+ * from: FreeBSD: src/sys/sparc64/include/signal.h,v 1.6 2001/09/30 18:52:17
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SIGNAL_H_
+#define _MACHINE_SIGNAL_H_
+
+#include <sys/cdefs.h>
+
+typedef long sig_atomic_t;
+
+#if __BSD_VISIBLE
+
+struct sigcontext {
+ int _dummy;
+};
+
+#endif
+
+#endif /* !_MACHINE_SIGNAL_H_ */
diff --git a/sys/arm64/include/smp.h b/sys/arm64/include/smp.h
new file mode 100644
index 000000000000..538981a954f0
--- /dev/null
+++ b/sys/arm64/include/smp.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner <andrew@FreeBSD.org>
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SMP_H_
+#define _MACHINE_SMP_H_
+
+#include <machine/pcb.h>
+
+enum {
+ IPI_AST,
+ IPI_PREEMPT,
+ IPI_RENDEZVOUS,
+ IPI_STOP,
+ IPI_STOP_HARD,
+ IPI_HARDCLOCK,
+ INTR_IPI_COUNT,
+};
+
+void ipi_all_but_self(u_int ipi);
+void ipi_cpu(int cpu, u_int ipi);
+void ipi_selected(cpuset_t cpus, u_int ipi);
+
+/* global data in mp_machdep.c */
+extern struct pcb stoppcbs[];
+
+#endif /* !_MACHINE_SMP_H_ */
diff --git a/sys/arm64/include/stack.h b/sys/arm64/include/stack.h
new file mode 100644
index 000000000000..db0d4ab38d4d
--- /dev/null
+++ b/sys/arm64/include/stack.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_STACK_H_
+#define _MACHINE_STACK_H_
+
+#define INKERNEL(va) \
+ ((va) >= VM_MIN_KERNEL_ADDRESS && (va) <= VM_MAX_KERNEL_ADDRESS)
+
+struct unwind_state {
+ uint64_t fp;
+ uint64_t sp;
+ uint64_t pc;
+};
+
+int unwind_frame(struct unwind_state *);
+
+#endif /* !_MACHINE_STACK_H_ */
diff --git a/sys/arm64/include/stdarg.h b/sys/arm64/include/stdarg.h
new file mode 100644
index 000000000000..acb526429ac2
--- /dev/null
+++ b/sys/arm64/include/stdarg.h
@@ -0,0 +1,39 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2017 Poul-Henning Kamp. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_STDARG_H_
+#define _MACHINE_STDARG_H_
+
+#include <sys/_stdarg.h>
+
+#ifndef va_start
+ #error this file needs to be ported to your compiler
+#endif
+
+#endif /* !_MACHINE_STDARG_H_ */
diff --git a/sys/arm64/include/sysarch.h b/sys/arm64/include/sysarch.h
new file mode 100644
index 000000000000..4d6c547fd3df
--- /dev/null
+++ b/sys/arm64/include/sysarch.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/sysarch.h,v 1.14 2000/09/21
+ * $FreeBSD$
+ */
+
+/*
+ * Architecture specific syscalls (arm64)
+ */
+#ifndef _MACHINE_SYSARCH_H_
+#define _MACHINE_SYSARCH_H_
+
+#ifndef _KERNEL
+
+__BEGIN_DECLS
+int sysarch(int _number, void *_args);
+__END_DECLS
+
+#endif
+
+#endif /* !_MACHINE_SYSARCH_H_ */
diff --git a/sys/arm64/include/trap.h b/sys/arm64/include/trap.h
new file mode 100644
index 000000000000..da23dbe43a4f
--- /dev/null
+++ b/sys/arm64/include/trap.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
diff --git a/sys/arm64/include/ucontext.h b/sys/arm64/include/ucontext.h
new file mode 100644
index 000000000000..a81fdf9ad724
--- /dev/null
+++ b/sys/arm64/include/ucontext.h
@@ -0,0 +1,89 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_UCONTEXT_H_
+#define _MACHINE_UCONTEXT_H_
+
+struct gpregs {
+ __register_t gp_x[30];
+ __register_t gp_lr;
+ __register_t gp_sp;
+ __register_t gp_elr;
+ __uint32_t gp_spsr;
+ int gp_pad;
+};
+
+struct fpregs {
+ __uint128_t fp_q[32];
+ __uint32_t fp_sr;
+ __uint32_t fp_cr;
+ int fp_flags;
+ int fp_pad;
+};
+
+struct __mcontext {
+ struct gpregs mc_gpregs;
+ struct fpregs mc_fpregs;
+ int mc_flags;
+#define _MC_FP_VALID 0x1 /* Set when mc_fpregs has valid data */
+ int mc_pad; /* Padding */
+ __uint64_t mc_spare[8]; /* Space for expansion, set to zero */
+};
+
+typedef struct __mcontext mcontext_t;
+
+#ifdef COMPAT_FREEBSD32
+#include <compat/freebsd32/freebsd32_signal.h>
+typedef struct __mcontext32 {
+ uint32_t mc_gregset[17];
+ uint32_t mc_vfp_size;
+ uint32_t mc_vfp_ptr;
+ uint32_t mc_spare[33];
+} mcontext32_t;
+
+typedef struct __ucontext32 {
+ sigset_t uc_sigmask;
+ mcontext32_t uc_mcontext;
+ u_int32_t uc_link;
+ struct sigaltstack32 uc_stack;
+ u_int32_t uc_flags;
+ u_int32_t __spare__[4];
+} ucontext32_t;
+
+typedef struct __mcontext32_vfp {
+ __uint64_t mcv_reg[32];
+ __uint32_t mcv_fpscr;
+} mcontext32_vfp_t;
+
+#endif /* COMPAT_FREEBSD32 */
+
+#endif /* !_MACHINE_UCONTEXT_H_ */
diff --git a/sys/arm64/include/undefined.h b/sys/arm64/include/undefined.h
new file mode 100644
index 000000000000..b13aa34cf47c
--- /dev/null
+++ b/sys/arm64/include/undefined.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2017 Andrew Turner
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__UNDEFINED_H_
+#define _MACHINE__UNDEFINED_H_
+
+#ifdef _KERNEL
+
+typedef int (*undef_handler_t)(vm_offset_t, uint32_t, struct trapframe *,
+ uint32_t);
+
+static inline int
+mrs_Op0(uint32_t insn)
+{
+
+ /* op0 is encoded without the top bit in a mrs instruction */
+ return (2 | ((insn & MRS_Op0_MASK) >> MRS_Op0_SHIFT));
+}
+
+#define MRS_GET(op) \
+static inline int \
+mrs_##op(uint32_t insn) \
+{ \
+ \
+ return ((insn & MRS_##op##_MASK) >> MRS_##op##_SHIFT); \
+}
+MRS_GET(Op1)
+MRS_GET(CRn)
+MRS_GET(CRm)
+MRS_GET(Op2)
+
+void undef_init(void);
+void *install_undef_handler(bool, undef_handler_t);
+void remove_undef_handler(void *);
+int undef_insn(u_int, struct trapframe *);
+
+#endif /* _KERNEL */
+
+#endif
diff --git a/sys/arm64/include/vdso.h b/sys/arm64/include/vdso.h
new file mode 100644
index 000000000000..fef74df39663
--- /dev/null
+++ b/sys/arm64/include/vdso.h
@@ -0,0 +1,39 @@
+/*-
+ * Copyright 2012 Konstantin Belousov <kib@FreeBSD.ORG>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_VDSO_H_
+#define _MACHINE_VDSO_H_
+
+#define VDSO_TIMEHANDS_MD \
+ uint32_t th_physical; \
+ uint32_t th_res[7];
+
+#define VDSO_TH_ALGO_ARM_GENTIM VDSO_TH_ALGO_1
+
+#define VDSO_TIMEHANDS_MD32 VDSO_TIMEHANDS_MD
+
+#endif /* !_MACHINE_VDSO_H_ */
diff --git a/sys/arm64/include/vfp.h b/sys/arm64/include/vfp.h
new file mode 100644
index 000000000000..b4b9bb524d30
--- /dev/null
+++ b/sys/arm64/include/vfp.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_VFP_H_
+#define _MACHINE_VFP_H_
+
+#ifndef LOCORE
+struct vfpstate {
+ __uint128_t vfp_regs[32];
+ uint32_t vfp_fpcr;
+ uint32_t vfp_fpsr;
+};
+
+#ifdef _KERNEL
+struct pcb;
+struct thread;
+
+void vfp_init(void);
+void vfp_discard(struct thread *);
+void vfp_restore_state(void);
+void vfp_save_state(struct thread *, struct pcb *);
+
+struct fpu_kern_ctx;
+
+/*
+ * Flags for fpu_kern_alloc_ctx(), fpu_kern_enter() and fpu_kern_thread().
+ */
+#define FPU_KERN_NORMAL 0x0000
+#define FPU_KERN_NOWAIT 0x0001
+#define FPU_KERN_KTHR 0x0002
+#define FPU_KERN_NOCTX 0x0004
+
+struct fpu_kern_ctx *fpu_kern_alloc_ctx(u_int);
+void fpu_kern_free_ctx(struct fpu_kern_ctx *);
+void fpu_kern_enter(struct thread *, struct fpu_kern_ctx *, u_int);
+int fpu_kern_leave(struct thread *, struct fpu_kern_ctx *);
+int fpu_kern_thread(u_int);
+int is_fpu_kern_thread(u_int);
+
+/* Convert to and from Aarch32 FPSCR to Aarch64 FPCR/FPSR */
+#define VFP_FPSCR_FROM_SRCR(vpsr, vpcr) ((vpsr) | ((vpcr) & 0x7c00000))
+#define VFP_FPSR_FROM_FPSCR(vpscr) ((vpscr) &~ 0x7c00000)
+#define VFP_FPCR_FROM_FPSCR(vpsrc) ((vpsrc) & 0x7c00000)
+
+#endif
+
+#endif
+
+#endif /* !_MACHINE_VFP_H_ */
diff --git a/sys/arm64/include/vm.h b/sys/arm64/include/vm.h
new file mode 100644
index 000000000000..dac13980060e
--- /dev/null
+++ b/sys/arm64/include/vm.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2009 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_VM_H_
+#define _MACHINE_VM_H_
+
+/* Memory attribute configuration. */
+#define VM_MEMATTR_DEVICE 0
+#define VM_MEMATTR_UNCACHEABLE 1
+#define VM_MEMATTR_WRITE_BACK 2
+#define VM_MEMATTR_WRITE_THROUGH 3
+
+#ifdef _KERNEL
+/* If defined vmstat will try to use both of these in a switch statement */
+#define VM_MEMATTR_WRITE_COMBINING VM_MEMATTR_WRITE_THROUGH
+#endif
+
+#define VM_MEMATTR_DEFAULT VM_MEMATTR_WRITE_BACK
+
+#endif /* !_MACHINE_VM_H_ */
diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h
new file mode 100644
index 000000000000..96b46e575483
--- /dev/null
+++ b/sys/arm64/include/vmparam.h
@@ -0,0 +1,246 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91
+ * from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_VMPARAM_H_
+#define _MACHINE_VMPARAM_H_
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#ifndef MAXTSIZ
+#define MAXTSIZ (1*1024*1024*1024) /* max text size */
+#endif
+#ifndef DFLDSIZ
+#define DFLDSIZ (128*1024*1024) /* initial data size limit */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ (1*1024*1024*1024) /* max data size */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ (128*1024*1024) /* initial stack size limit */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ (1*1024*1024*1024) /* max stack size */
+#endif
+#ifndef SGROWSIZ
+#define SGROWSIZ (128*1024) /* amount to grow stack */
+#endif
+
+/*
+ * The physical address space is sparsely populated.
+ */
+#define VM_PHYSSEG_SPARSE
+
+/*
+ * The number of PHYSSEG entries.
+ */
+#define VM_PHYSSEG_MAX 64
+
+/*
+ * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool
+ * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
+ * the pool from which physical pages for small UMA objects are
+ * allocated.
+ */
+#define VM_NFREEPOOL 2
+#define VM_FREEPOOL_DEFAULT 0
+#define VM_FREEPOOL_DIRECT 1
+
+/*
+ * Create one free page lists: VM_FREELIST_DEFAULT is for all physical
+ * pages.
+ */
+#define VM_NFREELIST 1
+#define VM_FREELIST_DEFAULT 0
+
+/*
+ * An allocation size of 16MB is supported in order to optimize the
+ * use of the direct map by UMA. Specifically, a cache line contains
+ * at most four TTEs, collectively mapping 16MB of physical memory.
+ * By reducing the number of distinct 16MB "pages" that are used by UMA,
+ * the physical memory allocator reduces the likelihood of both 4MB
+ * page TLB misses and cache misses caused by 4MB page TLB misses.
+ */
+#define VM_NFREEORDER 12
+
+/*
+ * Enable superpage reservations: 1 level.
+ */
+#ifndef VM_NRESERVLEVEL
+#define VM_NRESERVLEVEL 1
+#endif
+
+/*
+ * Level 0 reservations consist of 512 pages.
+ */
+#ifndef VM_LEVEL_0_ORDER
+#define VM_LEVEL_0_ORDER 9
+#endif
+
+/**
+ * Address space layout.
+ *
+ * ARMv8 implements up to a 48 bit virtual address space. The address space is
+ * split into 2 regions at each end of the 64 bit address space, with an
+ * out of range "hole" in the middle.
+ *
+ * We use the full 48 bits for each region, however the kernel may only use
+ * a limited range within this space.
+ *
+ * Upper region: 0xffffffffffffffff Top of virtual memory
+ *
+ * 0xfffffeffffffffff End of DMAP
+ * 0xfffffd0000000000 Start of DMAP
+ *
+ * 0xffff007fffffffff End of KVA
+ * 0xffff000000000000 Kernel base address & start of KVA
+ *
+ * Hole: 0xfffeffffffffffff
+ * 0x0001000000000000
+ *
+ * Lower region: 0x0000ffffffffffff End of user address space
+ * 0x0000000000000000 Start of user address space
+ *
+ * We use the upper region for the kernel, and the lower region for userland.
+ *
+ * We define some interesting address constants:
+ *
+ * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire
+ * 64 bit address space, mostly just for convenience.
+ *
+ * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of
+ * mappable kernel virtual address space.
+ *
+ * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the
+ * user address space.
+ */
+#define VM_MIN_ADDRESS (0x0000000000000000UL)
+#define VM_MAX_ADDRESS (0xffffffffffffffffUL)
+
+/* 512 GiB of kernel addresses */
+#define VM_MIN_KERNEL_ADDRESS (0xffff000000000000UL)
+#define VM_MAX_KERNEL_ADDRESS (0xffff008000000000UL)
+
+/* 95 TiB maximum for the direct map region */
+#define DMAP_MIN_ADDRESS (0xffffa00000000000UL)
+#define DMAP_MAX_ADDRESS (0xffffff0000000000UL)
+
+#define DMAP_MIN_PHYSADDR (dmap_phys_base)
+#define DMAP_MAX_PHYSADDR (dmap_phys_max)
+
+/* True if pa is in the dmap range */
+#define PHYS_IN_DMAP(pa) ((pa) >= DMAP_MIN_PHYSADDR && \
+ (pa) < DMAP_MAX_PHYSADDR)
+/* True if va is in the dmap range */
+#define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \
+ (va) < (dmap_max_addr))
+
+#define PMAP_HAS_DMAP 1
+#define PHYS_TO_DMAP(pa) \
+({ \
+ KASSERT(PHYS_IN_DMAP(pa), \
+ ("%s: PA out of range, PA: 0x%lx", __func__, \
+ (vm_paddr_t)(pa))); \
+ ((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS; \
+})
+
+#define DMAP_TO_PHYS(va) \
+({ \
+ KASSERT(VIRT_IN_DMAP(va), \
+ ("%s: VA out of range, VA: 0x%lx", __func__, \
+ (vm_offset_t)(va))); \
+ ((va) - DMAP_MIN_ADDRESS) + dmap_phys_base; \
+})
+
+#define VM_MIN_USER_ADDRESS (0x0000000000000000UL)
+#define VM_MAX_USER_ADDRESS (0x0001000000000000UL)
+
+#define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS)
+#define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS)
+
+#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
+#define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE)
+#define USRSTACK SHAREDPAGE
+
+/*
+ * How many physical pages per kmem arena virtual page.
+ */
+#ifndef VM_KMEM_SIZE_SCALE
+#define VM_KMEM_SIZE_SCALE (3)
+#endif
+
+/*
+ * Optional floor (in bytes) on the size of the kmem arena.
+ */
+#ifndef VM_KMEM_SIZE_MIN
+#define VM_KMEM_SIZE_MIN (16 * 1024 * 1024)
+#endif
+
+/*
+ * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the
+ * kernel map.
+ */
+#ifndef VM_KMEM_SIZE_MAX
+#define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \
+ VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
+#endif
+
+/*
+ * Initial pagein size of beginning of executable file.
+ */
+#ifndef VM_INITIAL_PAGEIN
+#define VM_INITIAL_PAGEIN 16
+#endif
+
+#define UMA_MD_SMALL_ALLOC
+
+#ifndef LOCORE
+
+extern vm_paddr_t dmap_phys_base;
+extern vm_paddr_t dmap_phys_max;
+extern vm_offset_t dmap_max_addr;
+extern vm_offset_t vm_max_kernel_address;
+extern vm_offset_t init_pt_va;
+
+#endif
+
+#define ZERO_REGION_SIZE (64 * 1024) /* 64KB */
+
+#define DEVMAP_MAX_VADDR VM_MAX_KERNEL_ADDRESS
+
+#endif /* !_MACHINE_VMPARAM_H_ */
diff --git a/sys/arm64/intel/firmware.c b/sys/arm64/intel/firmware.c
new file mode 100644
index 000000000000..083caaf94755
--- /dev/null
+++ b/sys/arm64/intel/firmware.c
@@ -0,0 +1,122 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <dev/fdt/simplebus.h>
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+struct firmware_softc {
+ struct simplebus_softc simplebus_sc;
+ device_t dev;
+};
+
+static int
+firmware_probe(device_t dev)
+{
+ phandle_t node;
+
+ node = ofw_bus_get_node(dev);
+
+ /*
+ * The firmware node has no property compatible.
+ * Look for a known child.
+ */
+ if (!fdt_depth_search_compatible(node, "intel,stratix10-svc", 0))
+ return (ENXIO);
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ device_set_desc(dev, "Firmware node");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+firmware_attach(device_t dev)
+{
+ struct firmware_softc *sc;
+ phandle_t node;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ node = ofw_bus_get_node(dev);
+ if (node == -1)
+ return (ENXIO);
+
+ simplebus_init(dev, node);
+
+ /*
+ * Allow devices to identify.
+ */
+ bus_generic_probe(dev);
+
+ /*
+ * Now walk the OFW tree and attach top-level devices.
+ */
+ for (node = OF_child(node); node > 0; node = OF_peer(node))
+ simplebus_add_device(dev, node, 0, NULL, -1, NULL);
+
+ return (bus_generic_attach(dev));
+}
+
+static int
+firmware_detach(device_t dev)
+{
+
+ return (0);
+}
+
+static device_method_t firmware_methods[] = {
+ DEVMETHOD(device_probe, firmware_probe),
+ DEVMETHOD(device_attach, firmware_attach),
+ DEVMETHOD(device_detach, firmware_detach),
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(firmware, firmware_driver, firmware_methods,
+ sizeof(struct firmware_softc), simplebus_driver);
+
+static devclass_t firmware_devclass;
+
+EARLY_DRIVER_MODULE(firmware, simplebus, firmware_driver, firmware_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+MODULE_VERSION(firmware, 1);
diff --git a/sys/arm64/intel/intel-smc.h b/sys/arm64/intel/intel-smc.h
new file mode 100644
index 000000000000..5800b332b91c
--- /dev/null
+++ b/sys/arm64/intel/intel-smc.h
@@ -0,0 +1,99 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ARM64_INTEL_INTEL_SMC_H_
+#define _ARM64_INTEL_INTEL_SMC_H_
+
+#include <dev/psci/smccc.h>
+
+/*
+ * Intel SiP return values.
+ */
+#define INTEL_SIP_SMC_STATUS_OK 0
+#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY 1
+#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED 2
+#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR 4
+#define INTEL_SIP_SMC_REG_ERROR 5
+#define INTEL_SIP_SMC_RSU_ERROR 7
+
+/*
+ * Intel SiP calls.
+ */
+#define INTEL_SIP_SMC_STD_CALL(func) \
+ SMCCC_FUNC_ID(SMCCC_YIELDING_CALL, SMCCC_64BIT_CALL, \
+ SMCCC_SIP_SERVICE_CALLS, (func))
+#define INTEL_SIP_SMC_FAST_CALL(func) \
+ SMCCC_FUNC_ID(SMCCC_FAST_CALL, SMCCC_64BIT_CALL, \
+ SMCCC_SIP_SERVICE_CALLS, (func))
+
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE 2
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE 3
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM 5
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6
+#define INTEL_SIP_SMC_FUNCID_REG_READ 7
+#define INTEL_SIP_SMC_FUNCID_REG_WRITE 8
+#define INTEL_SIP_SMC_FUNCID_REG_UPDATE 9
+#define INTEL_SIP_SMC_FUNCID_RSU_STATUS 11
+#define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12
+
+#define INTEL_SIP_SMC_FPGA_CONFIG_START \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START)
+#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE \
+ INTEL_SIP_SMC_STD_CALL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE)
+#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
+#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE)
+#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM)
+#define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK)
+#define INTEL_SIP_SMC_REG_READ \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_REG_READ)
+#define INTEL_SIP_SMC_REG_WRITE \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
+#define INTEL_SIP_SMC_REG_UPDATE \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_REG_UPDATE)
+#define INTEL_SIP_SMC_RSU_STATUS \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_RSU_STATUS)
+#define INTEL_SIP_SMC_RSU_UPDATE \
+ INTEL_SIP_SMC_FAST_CALL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE)
+
+typedef int (*intel_smc_callfn_t)(register_t, register_t, register_t,
+ register_t, register_t, register_t, register_t, register_t,
+ struct arm_smccc_res *res);
+
+#endif /* _ARM64_INTEL_INTEL_SMC_H_ */
diff --git a/sys/arm64/intel/stratix10-soc-fpga-mgr.c b/sys/arm64/intel/stratix10-soc-fpga-mgr.c
new file mode 100644
index 000000000000..f4fa600d517f
--- /dev/null
+++ b/sys/arm64/intel/stratix10-soc-fpga-mgr.c
@@ -0,0 +1,290 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Intel Stratix 10 FPGA Manager.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/rman.h>
+#include <sys/timeet.h>
+#include <sys/timetc.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/sx.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm64/intel/stratix10-svc.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+
+#define SVC_BUF_SIZE (2 * 1024 * 1024)
+
+struct fpgamgr_s10_softc {
+ struct cdev *mgr_cdev;
+ struct cdev *mgr_cdev_partial;
+ device_t dev;
+ device_t s10_svc_dev;
+ struct s10_svc_mem mem;
+ struct sx sx;
+ int opened;
+};
+
+static int
+fpga_open(struct cdev *dev, int flags __unused,
+ int fmt __unused, struct thread *td __unused)
+{
+ struct fpgamgr_s10_softc *sc;
+ struct s10_svc_msg msg;
+ int ret;
+ int err;
+
+ sc = dev->si_drv1;
+
+ sx_xlock(&sc->sx);
+ if (sc->opened) {
+ sx_xunlock(&sc->sx);
+ return (EBUSY);
+ }
+
+ err = s10_svc_allocate_memory(sc->s10_svc_dev,
+ &sc->mem, SVC_BUF_SIZE);
+ if (err != 0) {
+ sx_xunlock(&sc->sx);
+ return (ENXIO);
+ }
+
+ bzero(&msg, sizeof(struct s10_svc_msg));
+ msg.command = COMMAND_RECONFIG;
+ if (dev == sc->mgr_cdev_partial)
+ msg.flags |= COMMAND_RECONFIG_FLAG_PARTIAL;
+ ret = s10_svc_send(sc->s10_svc_dev, &msg);
+ if (ret != 0) {
+ sx_xunlock(&sc->sx);
+ return (ENXIO);
+ }
+
+ sc->opened = 1;
+ sx_xunlock(&sc->sx);
+
+ return (0);
+}
+
+static int
+fpga_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct fpgamgr_s10_softc *sc;
+ vm_offset_t addr;
+ int amnt;
+
+ sc = dev->si_drv1;
+
+ sx_xlock(&sc->sx);
+ if (sc->opened == 0) {
+ /* Device closed. */
+ sx_xunlock(&sc->sx);
+ return (ENXIO);
+ }
+
+ while (uio->uio_resid > 0) {
+ addr = sc->mem.vaddr + sc->mem.fill;
+ if (sc->mem.fill >= SVC_BUF_SIZE)
+ return (ENOMEM);
+ amnt = MIN(uio->uio_resid, (SVC_BUF_SIZE - sc->mem.fill));
+ uiomove((void *)addr, amnt, uio);
+ sc->mem.fill += amnt;
+ }
+
+ sx_xunlock(&sc->sx);
+
+ return (0);
+}
+
+static int
+fpga_close(struct cdev *dev, int flags __unused,
+ int fmt __unused, struct thread *td __unused)
+{
+ struct fpgamgr_s10_softc *sc;
+ struct s10_svc_msg msg;
+ int ret;
+
+ sc = dev->si_drv1;
+
+ sx_xlock(&sc->sx);
+ if (sc->opened == 0) {
+ /* Device closed. */
+ sx_xunlock(&sc->sx);
+ return (ENXIO);
+ }
+
+ /* Submit bitstream */
+ bzero(&msg, sizeof(struct s10_svc_msg));
+ msg.command = COMMAND_RECONFIG_DATA_SUBMIT;
+ msg.payload = (void *)sc->mem.paddr;
+ msg.payload_length = sc->mem.fill;
+ ret = s10_svc_send(sc->s10_svc_dev, &msg);
+ if (ret != 0) {
+ device_printf(sc->dev, "Failed to submit data\n");
+ s10_svc_free_memory(sc->s10_svc_dev, &sc->mem);
+ sc->opened = 0;
+ sx_xunlock(&sc->sx);
+ return (0);
+ }
+
+ /* Claim memory buffer back */
+ bzero(&msg, sizeof(struct s10_svc_msg));
+ msg.command = COMMAND_RECONFIG_DATA_CLAIM;
+ s10_svc_send(sc->s10_svc_dev, &msg);
+
+ s10_svc_free_memory(sc->s10_svc_dev, &sc->mem);
+ sc->opened = 0;
+ sx_xunlock(&sc->sx);
+
+ return (0);
+}
+
+static int
+fpga_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
+ struct thread *td)
+{
+
+ return (0);
+}
+
+static struct cdevsw fpga_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = fpga_open,
+ .d_close = fpga_close,
+ .d_write = fpga_write,
+ .d_ioctl = fpga_ioctl,
+ .d_name = "FPGA Manager",
+};
+
+static int
+fpgamgr_s10_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "intel,stratix10-soc-fpga-mgr"))
+ return (ENXIO);
+
+ device_set_desc(dev, "Stratix 10 SOC FPGA Manager");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+fpgamgr_s10_attach(device_t dev)
+{
+ struct fpgamgr_s10_softc *sc;
+ devclass_t dc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ dc = devclass_find("s10_svc");
+ if (dc == NULL)
+ return (ENXIO);
+
+ sc->s10_svc_dev = devclass_get_device(dc, 0);
+ if (sc->s10_svc_dev == NULL)
+ return (ENXIO);
+
+ sc->mgr_cdev = make_dev(&fpga_cdevsw, 0, UID_ROOT, GID_WHEEL,
+ 0600, "fpga%d", device_get_unit(sc->dev));
+ if (sc->mgr_cdev == NULL) {
+ device_printf(dev, "Failed to create character device.\n");
+ return (ENXIO);
+ }
+
+ sc->mgr_cdev_partial = make_dev(&fpga_cdevsw, 0, UID_ROOT, GID_WHEEL,
+ 0600, "fpga_partial%d", device_get_unit(sc->dev));
+ if (sc->mgr_cdev_partial == NULL) {
+ device_printf(dev, "Failed to create character device.\n");
+ return (ENXIO);
+ }
+
+ sx_init(&sc->sx, "s10 fpga");
+
+ sc->mgr_cdev->si_drv1 = sc;
+ sc->mgr_cdev_partial->si_drv1 = sc;
+
+ return (0);
+}
+
+static int
+fpgamgr_s10_detach(device_t dev)
+{
+ struct fpgamgr_s10_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ destroy_dev(sc->mgr_cdev);
+ destroy_dev(sc->mgr_cdev_partial);
+
+ sx_destroy(&sc->sx);
+
+ return (0);
+}
+
+static device_method_t fpgamgr_s10_methods[] = {
+ DEVMETHOD(device_probe, fpgamgr_s10_probe),
+ DEVMETHOD(device_attach, fpgamgr_s10_attach),
+ DEVMETHOD(device_detach, fpgamgr_s10_detach),
+ { 0, 0 }
+};
+
+static driver_t fpgamgr_s10_driver = {
+ "fpgamgr_s10",
+ fpgamgr_s10_methods,
+ sizeof(struct fpgamgr_s10_softc),
+};
+
+static devclass_t fpgamgr_s10_devclass;
+
+DRIVER_MODULE(fpgamgr_s10, simplebus, fpgamgr_s10_driver,
+ fpgamgr_s10_devclass, 0, 0);
diff --git a/sys/arm64/intel/stratix10-svc.c b/sys/arm64/intel/stratix10-svc.c
new file mode 100644
index 000000000000..f68ac8600254
--- /dev/null
+++ b/sys/arm64/intel/stratix10-svc.c
@@ -0,0 +1,271 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Intel Stratix 10 Service Layer
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/rman.h>
+#include <sys/timeet.h>
+#include <sys/timetc.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/vmem.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <dev/fdt/simplebus.h>
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <arm64/intel/intel-smc.h>
+#include <arm64/intel/stratix10-svc.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+
+struct s10_svc_softc {
+ device_t dev;
+ vmem_t *vmem;
+ intel_smc_callfn_t callfn;
+};
+
+static int
+s10_data_claim(struct s10_svc_softc *sc)
+{
+ struct arm_smccc_res res;
+ register_t a0, a1, a2;
+ int ret;
+
+ ret = 0;
+
+ while (1) {
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE;
+ a1 = 0;
+ a2 = 0;
+
+ ret = sc->callfn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
+ if (ret == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY)
+ continue;
+
+ break;
+ }
+
+ return (ret);
+}
+
+int
+s10_svc_send(device_t dev, struct s10_svc_msg *msg)
+{
+ struct s10_svc_softc *sc;
+ struct arm_smccc_res res;
+ register_t a0, a1, a2;
+ int ret;
+
+ sc = device_get_softc(dev);
+
+ a0 = 0;
+ a1 = 0;
+ a2 = 0;
+
+ switch (msg->command) {
+ case COMMAND_RECONFIG:
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_START;
+ a1 = msg->flags;
+ break;
+ case COMMAND_RECONFIG_DATA_SUBMIT:
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_WRITE;
+ a1 = (uint64_t)msg->payload;
+ a2 = (uint64_t)msg->payload_length;
+ break;
+ case COMMAND_RECONFIG_DATA_CLAIM:
+ ret = s10_data_claim(sc);
+ return (ret);
+ default:
+ return (-1);
+ }
+
+ ret = sc->callfn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
+
+ return (ret);
+}
+
+int
+s10_svc_allocate_memory(device_t dev, struct s10_svc_mem *mem, int size)
+{
+ struct s10_svc_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (size <= 0)
+ return (EINVAL);
+
+ if (vmem_alloc(sc->vmem, size,
+ M_FIRSTFIT | M_NOWAIT, &mem->paddr)) {
+ device_printf(dev, "Can't allocate memory\n");
+ return (ENOMEM);
+ }
+
+ mem->size = size;
+ mem->fill = 0;
+ mem->vaddr = (vm_offset_t)pmap_mapdev(mem->paddr, mem->size);
+
+ return (0);
+}
+
+void
+s10_svc_free_memory(device_t dev, struct s10_svc_mem *mem)
+{
+ struct s10_svc_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ vmem_free(sc->vmem, mem->paddr, mem->size);
+}
+
+static int
+s10_get_memory(struct s10_svc_softc *sc)
+{
+ struct arm_smccc_res res;
+ vmem_addr_t addr;
+ vmem_size_t size;
+ vmem_t *vmem;
+
+ sc->callfn(INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != INTEL_SIP_SMC_STATUS_OK)
+ return (ENXIO);
+
+ vmem = vmem_create("stratix10 vmem", 0, 0, PAGE_SIZE,
+ PAGE_SIZE, M_BESTFIT | M_WAITOK);
+ if (vmem == NULL)
+ return (ENXIO);
+
+ addr = res.a1;
+ size = res.a2;
+
+ device_printf(sc->dev, "Shared memory address 0x%lx size 0x%lx\n",
+ addr, size);
+
+ vmem_add(vmem, addr, size, 0);
+
+ sc->vmem = vmem;
+
+ return (0);
+}
+
+static intel_smc_callfn_t
+s10_svc_get_callfn(struct s10_svc_softc *sc, phandle_t node)
+{
+ char method[16];
+
+ if ((OF_getprop(node, "method", method, sizeof(method))) > 0) {
+ if (strcmp(method, "hvc") == 0)
+ return (arm_smccc_hvc);
+ else if (strcmp(method, "smc") == 0)
+ return (arm_smccc_smc);
+ else
+ device_printf(sc->dev,
+ "Invalid method \"%s\"\n", method);
+ } else
+ device_printf(sc->dev, "SMC method not provided\n");
+
+ return (NULL);
+}
+
+static int
+s10_svc_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "intel,stratix10-svc"))
+ return (ENXIO);
+
+ device_set_desc(dev, "Stratix 10 SVC");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+s10_svc_attach(device_t dev)
+{
+ struct s10_svc_softc *sc;
+ phandle_t node;
+
+ node = ofw_bus_get_node(dev);
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ if (device_get_unit(dev) != 0)
+ return (ENXIO);
+
+ sc->callfn = s10_svc_get_callfn(sc, node);
+ if (sc->callfn == NULL)
+ return (ENXIO);
+
+ if (s10_get_memory(sc) != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static device_method_t s10_svc_methods[] = {
+ DEVMETHOD(device_probe, s10_svc_probe),
+ DEVMETHOD(device_attach, s10_svc_attach),
+ { 0, 0 }
+};
+
+static driver_t s10_svc_driver = {
+ "s10_svc",
+ s10_svc_methods,
+ sizeof(struct s10_svc_softc),
+};
+
+static devclass_t s10_svc_devclass;
+
+EARLY_DRIVER_MODULE(s10_svc, firmware, s10_svc_driver,
+ s10_svc_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/intel/stratix10-svc.h b/sys/arm64/intel/stratix10-svc.h
new file mode 100644
index 000000000000..c2f25efe1998
--- /dev/null
+++ b/sys/arm64/intel/stratix10-svc.h
@@ -0,0 +1,60 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ARM64_INTEL_STRATIX10_SVC_H_
+#define _ARM64_INTEL_STRATIX10_SVC_H_
+
+struct s10_svc_msg {
+ int command;
+#define COMMAND_RECONFIG (1 << 0)
+#define COMMAND_RECONFIG_DATA_SUBMIT (1 << 1)
+#define COMMAND_RECONFIG_DATA_CLAIM (1 << 2)
+ int flags;
+#define COMMAND_RECONFIG_FLAG_PARTIAL (1 << 0)
+ void *payload;
+ int payload_length;
+};
+
+struct s10_svc_mem {
+ vm_offset_t paddr;
+ vm_offset_t vaddr;
+ int size;
+ int fill;
+};
+
+int s10_svc_send(device_t dev, struct s10_svc_msg *msg);
+int s10_svc_allocate_memory(device_t dev, struct s10_svc_mem *mem, int size);
+void s10_svc_free_memory(device_t dev, struct s10_svc_mem *mem);
+
+#endif /* !_ARM64_INTEL_STRATIX10_SVC_H_ */
diff --git a/sys/arm64/linux/Makefile b/sys/arm64/linux/Makefile
new file mode 100644
index 000000000000..662c7f8fc42f
--- /dev/null
+++ b/sys/arm64/linux/Makefile
@@ -0,0 +1,7 @@
+# Makefile for syscall tables
+#
+# $FreeBSD$
+
+GENERATED_PREFIX= linux_
+
+.include "../../conf/sysent.mk"
diff --git a/sys/arm64/linux/linux.h b/sys/arm64/linux/linux.h
new file mode 100644
index 000000000000..186a0b27cc9c
--- /dev/null
+++ b/sys/arm64/linux/linux.h
@@ -0,0 +1,300 @@
+/*-
+ * Copyright (c) 1994-1996 Søren Schmidt
+ * Copyright (c) 2013 Dmitry Chagin
+ * Copyright (c) 2018 Turing Robotic Industries Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ */
+#ifndef _ARM64_LINUX_H_
+#define _ARM64_LINUX_H_
+
+#include <sys/abi_compat.h>
+
+#include <compat/linux/linux.h>
+#include <arm64/linux/linux_syscall.h>
+
+#define LINUX_DTRACE linuxulator
+
+/* Provide a separate set of types for the Linux types */
+typedef int32_t l_int;
+typedef int64_t l_long;
+typedef int16_t l_short;
+typedef uint32_t l_uint;
+typedef uint64_t l_ulong;
+typedef uint16_t l_ushort;
+
+typedef l_ulong l_uintptr_t;
+typedef l_long l_clock_t;
+typedef l_int l_daddr_t;
+typedef l_ulong l_dev_t;
+typedef l_uint l_gid_t;
+typedef l_ushort l_gid16_t; /* XXX */
+typedef l_uint l_uid_t;
+typedef l_ushort l_uid16_t; /* XXX */
+typedef l_ulong l_ino_t;
+typedef l_int l_key_t;
+typedef l_long l_loff_t;
+typedef l_uint l_mode_t;
+typedef l_long l_off_t;
+typedef l_int l_pid_t;
+typedef l_ulong l_size_t;
+typedef l_long l_suseconds_t;
+typedef l_long l_time_t;
+typedef l_int l_timer_t; /* XXX */
+typedef l_int l_mqd_t;
+typedef l_ulong l_fd_mask;
+
+typedef struct {
+ l_int val[2];
+} l_fsid_t;
+
+typedef struct {
+ l_time_t tv_sec;
+ l_suseconds_t tv_usec;
+} l_timeval;
+
+#define l_fd_set fd_set
+
+/* Miscellaneous */
+#define LINUX_AT_COUNT 20
+
+struct l___sysctl_args
+{
+ l_uintptr_t name;
+ l_int nlen;
+ l_uintptr_t oldval;
+ l_uintptr_t oldlenp;
+ l_uintptr_t newval;
+ l_uintptr_t newlen;
+ l_ulong __spare[4];
+};
+
+/* Resource limits */
+#define LINUX_RLIMIT_CPU 0
+#define LINUX_RLIMIT_FSIZE 1
+#define LINUX_RLIMIT_DATA 2
+#define LINUX_RLIMIT_STACK 3
+#define LINUX_RLIMIT_CORE 4
+#define LINUX_RLIMIT_RSS 5
+#define LINUX_RLIMIT_NPROC 6
+#define LINUX_RLIMIT_NOFILE 7
+#define LINUX_RLIMIT_MEMLOCK 8
+#define LINUX_RLIMIT_AS 9 /* Address space limit */
+
+#define LINUX_RLIM_NLIMITS 10
+
+struct l_rlimit {
+ l_ulong rlim_cur;
+ l_ulong rlim_max;
+};
+
+/* stat family of syscalls */
+struct l_timespec {
+ l_time_t tv_sec;
+ l_long tv_nsec;
+};
+
+struct l_newstat {
+ l_dev_t st_dev;
+ l_ino_t st_ino;
+ l_uint st_mode;
+ l_uint st_nlink;
+
+ l_uid_t st_uid;
+ l_gid_t st_gid;
+
+ l_dev_t st_rdev;
+ l_ulong __st_pad1;
+ l_off_t st_size;
+ l_int st_blksize;
+ l_int __st_pad2;
+ l_long st_blocks;
+
+ struct l_timespec st_atim;
+ struct l_timespec st_mtim;
+ struct l_timespec st_ctim;
+ l_uint __unused1;
+ l_uint __unused2;
+};
+
+/* sigaction flags */
+#define LINUX_SA_NOCLDSTOP 0x00000001
+#define LINUX_SA_NOCLDWAIT 0x00000002
+#define LINUX_SA_SIGINFO 0x00000004
+#define LINUX_SA_RESTORER 0x04000000
+#define LINUX_SA_ONSTACK 0x08000000
+#define LINUX_SA_RESTART 0x10000000
+#define LINUX_SA_INTERRUPT 0x20000000 /* XXX */
+#define LINUX_SA_NOMASK 0x40000000 /* SA_NODEFER */
+#define LINUX_SA_ONESHOT 0x80000000 /* SA_RESETHAND */
+
+/* sigprocmask actions */
+#define LINUX_SIG_BLOCK 0
+#define LINUX_SIG_UNBLOCK 1
+#define LINUX_SIG_SETMASK 2
+
+/* sigaltstack */
+#define LINUX_MINSIGSTKSZ 2048 /* XXX */
+
+typedef void (*l_handler_t)(l_int);
+
+typedef struct {
+ l_handler_t lsa_handler;
+ l_sigset_t lsa_mask;
+ l_ulong lsa_flags;
+ l_uintptr_t lsa_restorer;
+} l_sigaction_t; /* XXX */
+
+typedef struct {
+ l_uintptr_t ss_sp;
+ l_int ss_flags;
+ l_size_t ss_size;
+} l_stack_t;
+
+#define LINUX_SI_PREAMBLE_SIZE (4 * sizeof(int))
+#define LINUX_SI_MAX_SIZE 128
+#define LINUX_SI_PAD_SIZE ((LINUX_SI_MAX_SIZE - \
+ LINUX_SI_PREAMBLE_SIZE) / sizeof(l_int))
+typedef union l_sigval {
+ l_int sival_int;
+ l_uintptr_t sival_ptr;
+} l_sigval_t;
+
+typedef struct l_siginfo {
+ l_int lsi_signo;
+ l_int lsi_errno;
+ l_int lsi_code;
+ union {
+ l_int _pad[LINUX_SI_PAD_SIZE];
+
+ struct {
+ l_pid_t _pid;
+ l_uid_t _uid;
+ } _kill;
+
+ struct {
+ l_timer_t _tid;
+ l_int _overrun;
+ char _pad[sizeof(l_uid_t) - sizeof(int)];
+ union l_sigval _sigval;
+ l_uint _sys_private;
+ } _timer;
+
+ struct {
+ l_pid_t _pid; /* sender's pid */
+ l_uid_t _uid; /* sender's uid */
+ union l_sigval _sigval;
+ } _rt;
+
+ struct {
+ l_pid_t _pid; /* which child */
+ l_uid_t _uid; /* sender's uid */
+ l_int _status; /* exit code */
+ l_clock_t _utime;
+ l_clock_t _stime;
+ } _sigchld;
+
+ struct {
+ l_uintptr_t _addr; /* Faulting insn/memory ref. */
+ } _sigfault;
+
+ struct {
+ l_long _band; /* POLL_IN,POLL_OUT,POLL_MSG */
+ l_int _fd;
+ } _sigpoll;
+ } _sifields;
+} l_siginfo_t;
+
+#define lsi_pid _sifields._kill._pid
+#define lsi_uid _sifields._kill._uid
+#define lsi_tid _sifields._timer._tid
+#define lsi_overrun _sifields._timer._overrun
+#define lsi_sys_private _sifields._timer._sys_private
+#define lsi_status _sifields._sigchld._status
+#define lsi_utime _sifields._sigchld._utime
+#define lsi_stime _sifields._sigchld._stime
+#define lsi_value _sifields._rt._sigval
+#define lsi_int _sifields._rt._sigval.sival_int
+#define lsi_ptr _sifields._rt._sigval.sival_ptr
+#define lsi_addr _sifields._sigfault._addr
+#define lsi_band _sifields._sigpoll._band
+#define lsi_fd _sifields._sigpoll._fd
+
+union l_semun {
+ l_int val;
+ l_uintptr_t buf;
+ l_uintptr_t array;
+ l_uintptr_t __buf;
+ l_uintptr_t __pad;
+};
+
+struct l_ifmap {
+ l_ulong mem_start;
+ l_ulong mem_end;
+ l_ushort base_addr;
+ u_char irq;
+ u_char dma;
+ u_char port;
+} __packed;
+
+struct l_ifreq {
+ union {
+ char ifrn_name[LINUX_IFNAMSIZ];
+ } ifr_ifrn;
+
+ union {
+ struct l_sockaddr ifru_addr;
+ struct l_sockaddr ifru_dstaddr;
+ struct l_sockaddr ifru_broadaddr;
+ struct l_sockaddr ifru_netmask;
+ struct l_sockaddr ifru_hwaddr;
+ l_short ifru_flags[1];
+ l_int ifru_ivalue;
+ l_int ifru_mtu;
+ struct l_ifmap ifru_map;
+ char ifru_slave[LINUX_IFNAMSIZ];
+ l_uintptr_t ifru_data;
+ } ifr_ifru;
+} __packed;
+
+#define ifr_name ifr_ifrn.ifrn_name /* Interface name */
+#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
+#define ifr_ifindex ifr_ifru.ifru_ivalue /* Interface index */
+
+#define linux_copyout_rusage(r, u) copyout(r, u, sizeof(*r))
+
+/* robust futexes */
+struct linux_robust_list {
+ l_uintptr_t next;
+};
+
+struct linux_robust_list_head {
+ struct linux_robust_list list;
+ l_long futex_offset;
+ l_uintptr_t pending_list;
+};
+
+#endif /* _ARM64_LINUX_H_ */
diff --git a/sys/arm64/linux/linux_dummy.c b/sys/arm64/linux/linux_dummy.c
new file mode 100644
index 000000000000..ad56a0eec4e9
--- /dev/null
+++ b/sys/arm64/linux/linux_dummy.c
@@ -0,0 +1,167 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2013 Dmitry Chagin
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_compat.h"
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/sdt.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+
+#include <arm64/linux/linux.h>
+#include <arm64/linux/linux_proto.h>
+#include <compat/linux/linux_dtrace.h>
+#include <compat/linux/linux_util.h>
+
+// LINUXTODO: deduplicate arm64 dummy against other archs?
+// LINUXTODO: review/update/add unimplemented syscalls
+
+/* DTrace init */
+LIN_SDT_PROVIDER_DECLARE(LINUX_DTRACE);
+
+UNIMPLEMENTED(afs_syscall);
+UNIMPLEMENTED(create_module); /* Added in Linux 1.0 removed in 2.6. */
+UNIMPLEMENTED(epoll_ctl_old);
+UNIMPLEMENTED(epoll_wait_old);
+UNIMPLEMENTED(get_kernel_syms); /* Added in Linux 1.0 removed in 2.6. */
+UNIMPLEMENTED(get_thread_area);
+UNIMPLEMENTED(getpmsg);
+UNIMPLEMENTED(nfsservctl); /* Added in Linux 2.2 removed in 3.1. */
+UNIMPLEMENTED(putpmsg);
+UNIMPLEMENTED(query_module); /* Added in Linux 2.2 removed in 2.6. */
+UNIMPLEMENTED(security);
+UNIMPLEMENTED(set_thread_area);
+UNIMPLEMENTED(tuxcall);
+UNIMPLEMENTED(uselib);
+UNIMPLEMENTED(vserver);
+
+DUMMY(setfsuid);
+DUMMY(setfsgid);
+DUMMY(vhangup);
+DUMMY(pivot_root);
+DUMMY(adjtimex);
+DUMMY(swapoff);
+DUMMY(init_module);
+DUMMY(delete_module);
+DUMMY(lookup_dcookie);
+DUMMY(remap_file_pages);
+DUMMY(semtimedop);
+DUMMY(mbind);
+DUMMY(get_mempolicy);
+DUMMY(set_mempolicy);
+DUMMY(mq_open);
+DUMMY(mq_unlink);
+DUMMY(mq_timedsend);
+DUMMY(mq_timedreceive);
+DUMMY(mq_notify);
+DUMMY(mq_getsetattr);
+DUMMY(kexec_load);
+/* Linux 2.6.11: */
+DUMMY(add_key);
+DUMMY(request_key);
+DUMMY(keyctl);
+/* Linux 2.6.13: */
+DUMMY(ioprio_set);
+DUMMY(ioprio_get);
+DUMMY(inotify_add_watch);
+DUMMY(inotify_rm_watch);
+/* Linux 2.6.16: */
+DUMMY(migrate_pages);
+DUMMY(unshare);
+/* Linux 2.6.17: */
+DUMMY(tee);
+DUMMY(vmsplice);
+/* Linux 2.6.18: */
+DUMMY(move_pages);
+/* Linux 2.6.27: */
+DUMMY(signalfd4);
+DUMMY(inotify_init1);
+/* Linux 2.6.31: */
+DUMMY(perf_event_open);
+/* Linux 2.6.36: */
+DUMMY(fanotify_init);
+DUMMY(fanotify_mark);
+/* Linux 2.6.39: */
+DUMMY(name_to_handle_at);
+DUMMY(open_by_handle_at);
+DUMMY(clock_adjtime);
+/* Linux 3.0: */
+DUMMY(setns);
+/* Linux 3.2: */
+DUMMY(process_vm_readv);
+DUMMY(process_vm_writev);
+/* Linux 3.5: */
+DUMMY(kcmp);
+/* Linux 3.8: */
+DUMMY(finit_module);
+DUMMY(sched_setattr);
+DUMMY(sched_getattr);
+/* Linux 3.17: */
+DUMMY(seccomp);
+/* Linux 3.18: */
+DUMMY(bpf);
+/* Linux 3.19: */
+DUMMY(execveat);
+/* Linux 4.2: */
+DUMMY(userfaultfd);
+/* Linux 4.3: */
+DUMMY(membarrier);
+/* Linux 4.4: */
+DUMMY(mlock2);
+/* Linux 4.6: */
+DUMMY(preadv2);
+DUMMY(pwritev2);
+/* Linux 4.8: */
+DUMMY(pkey_mprotect);
+DUMMY(pkey_alloc);
+DUMMY(pkey_free);
+
+#define DUMMY_XATTR(s) \
+int \
+linux_ ## s ## xattr( \
+ struct thread *td, struct linux_ ## s ## xattr_args *arg) \
+{ \
+ \
+ return (EOPNOTSUPP); \
+}
+DUMMY_XATTR(set);
+DUMMY_XATTR(lset);
+DUMMY_XATTR(fset);
+DUMMY_XATTR(get);
+DUMMY_XATTR(lget);
+DUMMY_XATTR(fget);
+DUMMY_XATTR(list);
+DUMMY_XATTR(llist);
+DUMMY_XATTR(flist);
+DUMMY_XATTR(remove);
+DUMMY_XATTR(lremove);
+DUMMY_XATTR(fremove);
diff --git a/sys/arm64/linux/linux_genassym.c b/sys/arm64/linux/linux_genassym.c
new file mode 100644
index 000000000000..661f8c9b2046
--- /dev/null
+++ b/sys/arm64/linux/linux_genassym.c
@@ -0,0 +1,2 @@
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
diff --git a/sys/arm64/linux/linux_locore.asm b/sys/arm64/linux/linux_locore.asm
new file mode 100644
index 000000000000..b7e764b6d379
--- /dev/null
+++ b/sys/arm64/linux/linux_locore.asm
@@ -0,0 +1,63 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2018 Turing Robotic Industries Inc.
+ * Copyright (C) 2020 Andrew Turner <andrew@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * arm64 Linux VDSO implementation.
+ */
+
+#include <machine/asm.h>
+
+#include <arm64/linux/linux_syscall.h>
+
+ .data
+
+ .globl linux_platform
+linux_platform:
+ .asciz "arm64"
+
+ .text
+
+ENTRY(__kernel_rt_sigreturn)
+ brk #0 /* LINUXTODO: implement __kernel_rt_sigreturn */
+ ret
+
+ENTRY(__kernel_gettimeofday)
+ ldr x8, =LINUX_SYS_gettimeofday
+ svc #0
+ ret
+
+ENTRY(__kernel_clock_gettime)
+ ldr x8, =LINUX_SYS_linux_clock_gettime
+ svc #0
+ ret
+
+ENTRY(__kernel_clock_getres)
+ brk #0 /* LINUXTODO: implement __kernel_clock_getres */
+ ret
diff --git a/sys/arm64/linux/linux_machdep.c b/sys/arm64/linux/linux_machdep.c
new file mode 100644
index 000000000000..ee950ffbbbbd
--- /dev/null
+++ b/sys/arm64/linux/linux_machdep.c
@@ -0,0 +1,143 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Turing Robotic Industries Inc.
+ * Copyright (c) 2000 Marcel Moolenaar
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/fcntl.h>
+#include <sys/imgact.h>
+#include <sys/ktr.h>
+#include <sys/proc.h>
+#include <sys/sdt.h>
+
+#include <arm64/linux/linux.h>
+#include <arm64/linux/linux_proto.h>
+#include <compat/linux/linux_dtrace.h>
+#include <compat/linux/linux_emul.h>
+#include <compat/linux/linux_misc.h>
+#include <compat/linux/linux_mmap.h>
+#include <compat/linux/linux_util.h>
+
+/* DTrace init */
+LIN_SDT_PROVIDER_DECLARE(LINUX_DTRACE);
+
+/* DTrace probes */
+LIN_SDT_PROBE_DEFINE0(machdep, linux_set_upcall_kse, todo);
+LIN_SDT_PROBE_DEFINE0(machdep, linux_mmap2, todo);
+LIN_SDT_PROBE_DEFINE0(machdep, linux_rt_sigsuspend, todo);
+LIN_SDT_PROBE_DEFINE0(machdep, linux_sigaltstack, todo);
+LIN_SDT_PROBE_DEFINE0(machdep, linux_set_cloned_tls, todo);
+
+/*
+ * LINUXTODO: deduplicate; linux_execve is common across archs, except that on
+ * amd64 compat linuxulator it calls freebsd32_exec_copyin_args.
+ */
+int
+linux_execve(struct thread *td, struct linux_execve_args *uap)
+{
+ struct image_args eargs;
+ char *path;
+ int error;
+
+ if (!LUSECONVPATH(td)) {
+ error = exec_copyin_args(&eargs, uap->path, UIO_USERSPACE,
+ uap->argp, uap->envp);
+ } else {
+ LCONVPATHEXIST(td, uap->path, &path);
+ error = exec_copyin_args(&eargs, path, UIO_SYSSPACE,
+ uap->argp, uap->envp);
+ LFREEPATH(path);
+ }
+ if (error == 0)
+ error = linux_common_execve(td, &eargs);
+ return (error);
+}
+
+/* LINUXTODO: implement (or deduplicate) arm64 linux_set_upcall_kse */
+int
+linux_set_upcall_kse(struct thread *td, register_t stack)
+{
+
+ LIN_SDT_PROBE0(machdep, linux_set_upcall_kse, todo);
+ return (EDOOFUS);
+}
+
+/* LINUXTODO: deduplicate arm64 linux_mmap2 */
+int
+linux_mmap2(struct thread *td, struct linux_mmap2_args *uap)
+{
+
+ LIN_SDT_PROBE0(machdep, linux_mmap2, todo);
+ return (linux_mmap_common(td, PTROUT(uap->addr), uap->len, uap->prot,
+ uap->flags, uap->fd, uap->pgoff));
+}
+
+int
+linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
+{
+
+ return (linux_mprotect_common(td, PTROUT(uap->addr), uap->len,
+ uap->prot));
+}
+
+int
+linux_madvise(struct thread *td, struct linux_madvise_args *uap)
+{
+
+ return (linux_madvise_common(td, PTROUT(uap->addr), uap->len, uap->behav));
+}
+
+/* LINUXTODO: implement arm64 linux_rt_sigsuspend */
+int
+linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap)
+{
+
+ LIN_SDT_PROBE0(machdep, linux_rt_sigsuspend, todo);
+ return (EDOOFUS);
+}
+
+/* LINUXTODO: implement arm64 linux_sigaltstack */
+int
+linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap)
+{
+
+ LIN_SDT_PROBE0(machdep, linux_sigaltstack, todo);
+ return (EDOOFUS);
+}
+
+/* LINUXTODO: implement arm64 linux_set_cloned_tls */
+int
+linux_set_cloned_tls(struct thread *td, void *desc)
+{
+
+ LIN_SDT_PROBE0(machdep, linux_set_cloned_tls, todo);
+ return (EDOOFUS);
+}
diff --git a/sys/arm64/linux/linux_proto.h b/sys/arm64/linux/linux_proto.h
new file mode 100644
index 000000000000..42b0ea16e1d6
--- /dev/null
+++ b/sys/arm64/linux/linux_proto.h
@@ -0,0 +1,1613 @@
+/*
+ * System call prototypes.
+ *
+ * DO NOT EDIT-- this file is automatically @generated.
+ * $FreeBSD$
+ */
+
+#ifndef _LINUX_SYSPROTO_H_
+#define _LINUX_SYSPROTO_H_
+
+#include <sys/signal.h>
+#include <sys/acl.h>
+#include <sys/cpuset.h>
+#include <sys/domainset.h>
+#include <sys/_ffcounter.h>
+#include <sys/_semaphore.h>
+#include <sys/ucontext.h>
+#include <sys/wait.h>
+
+#include <bsm/audit_kevents.h>
+
+struct proc;
+
+struct thread;
+
+#define PAD_(t) (sizeof(register_t) <= sizeof(t) ? \
+ 0 : sizeof(register_t) - sizeof(t))
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define PADL_(t) 0
+#define PADR_(t) PAD_(t)
+#else
+#define PADL_(t) PAD_(t)
+#define PADR_(t) 0
+#endif
+
+#define nosys linux_nosys
+struct linux_setxattr_args {
+ char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)];
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+ char value_l_[PADL_(const char *)]; const char * value; char value_r_[PADR_(const char *)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_lsetxattr_args {
+ char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)];
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+ char value_l_[PADL_(const char *)]; const char * value; char value_r_[PADR_(const char *)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_fsetxattr_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+ char value_l_[PADL_(const char *)]; const char * value; char value_r_[PADR_(const char *)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_getxattr_args {
+ char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)];
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+ char value_l_[PADL_(char *)]; char * value; char value_r_[PADR_(char *)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+};
+struct linux_lgetxattr_args {
+ char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)];
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+ char value_l_[PADL_(char *)]; char * value; char value_r_[PADR_(char *)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+};
+struct linux_fgetxattr_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+ char value_l_[PADL_(char *)]; char * value; char value_r_[PADR_(char *)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+};
+struct linux_listxattr_args {
+ char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)];
+ char list_l_[PADL_(const char *)]; const char * list; char list_r_[PADR_(const char *)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+};
+struct linux_llistxattr_args {
+ char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)];
+ char list_l_[PADL_(const char *)]; const char * list; char list_r_[PADR_(const char *)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+};
+struct linux_flistxattr_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char list_l_[PADL_(const char *)]; const char * list; char list_r_[PADR_(const char *)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+};
+struct linux_removexattr_args {
+ char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)];
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+};
+struct linux_lremovexattr_args {
+ char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)];
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+};
+struct linux_fremovexattr_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+};
+struct linux_getcwd_args {
+ char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)];
+ char bufsize_l_[PADL_(l_ulong)]; l_ulong bufsize; char bufsize_r_[PADR_(l_ulong)];
+};
+struct linux_lookup_dcookie_args {
+ register_t dummy;
+};
+struct linux_eventfd2_args {
+ char initval_l_[PADL_(l_uint)]; l_uint initval; char initval_r_[PADR_(l_uint)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_epoll_create1_args {
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_epoll_ctl_args {
+ char epfd_l_[PADL_(l_int)]; l_int epfd; char epfd_r_[PADR_(l_int)];
+ char op_l_[PADL_(l_int)]; l_int op; char op_r_[PADR_(l_int)];
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char event_l_[PADL_(struct epoll_event *)]; struct epoll_event * event; char event_r_[PADR_(struct epoll_event *)];
+};
+struct linux_epoll_pwait_args {
+ char epfd_l_[PADL_(l_int)]; l_int epfd; char epfd_r_[PADR_(l_int)];
+ char events_l_[PADL_(struct epoll_event *)]; struct epoll_event * events; char events_r_[PADR_(struct epoll_event *)];
+ char maxevents_l_[PADL_(l_int)]; l_int maxevents; char maxevents_r_[PADR_(l_int)];
+ char timeout_l_[PADL_(l_int)]; l_int timeout; char timeout_r_[PADR_(l_int)];
+ char mask_l_[PADL_(l_sigset_t *)]; l_sigset_t * mask; char mask_r_[PADR_(l_sigset_t *)];
+ char sigsetsize_l_[PADL_(l_size_t)]; l_size_t sigsetsize; char sigsetsize_r_[PADR_(l_size_t)];
+};
+struct linux_dup3_args {
+ char oldfd_l_[PADL_(l_int)]; l_int oldfd; char oldfd_r_[PADR_(l_int)];
+ char newfd_l_[PADL_(l_int)]; l_int newfd; char newfd_r_[PADR_(l_int)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_fcntl_args {
+ char fd_l_[PADL_(l_uint)]; l_uint fd; char fd_r_[PADR_(l_uint)];
+ char cmd_l_[PADL_(l_uint)]; l_uint cmd; char cmd_r_[PADR_(l_uint)];
+ char arg_l_[PADL_(l_ulong)]; l_ulong arg; char arg_r_[PADR_(l_ulong)];
+};
+struct linux_inotify_init1_args {
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_inotify_add_watch_args {
+ register_t dummy;
+};
+struct linux_inotify_rm_watch_args {
+ register_t dummy;
+};
+struct linux_ioctl_args {
+ char fd_l_[PADL_(l_uint)]; l_uint fd; char fd_r_[PADR_(l_uint)];
+ char cmd_l_[PADL_(l_uint)]; l_uint cmd; char cmd_r_[PADR_(l_uint)];
+ char arg_l_[PADL_(l_ulong)]; l_ulong arg; char arg_r_[PADR_(l_ulong)];
+};
+struct linux_ioprio_set_args {
+ register_t dummy;
+};
+struct linux_ioprio_get_args {
+ register_t dummy;
+};
+struct linux_mknodat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char filename_l_[PADL_(const char *)]; const char * filename; char filename_r_[PADR_(const char *)];
+ char mode_l_[PADL_(l_int)]; l_int mode; char mode_r_[PADR_(l_int)];
+ char dev_l_[PADL_(l_uint)]; l_uint dev; char dev_r_[PADR_(l_uint)];
+};
+struct linux_mkdirat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char pathname_l_[PADL_(const char *)]; const char * pathname; char pathname_r_[PADR_(const char *)];
+ char mode_l_[PADL_(l_mode_t)]; l_mode_t mode; char mode_r_[PADR_(l_mode_t)];
+};
+struct linux_unlinkat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char pathname_l_[PADL_(const char *)]; const char * pathname; char pathname_r_[PADR_(const char *)];
+ char flag_l_[PADL_(l_int)]; l_int flag; char flag_r_[PADR_(l_int)];
+};
+struct linux_symlinkat_args {
+ char oldname_l_[PADL_(const char *)]; const char * oldname; char oldname_r_[PADR_(const char *)];
+ char newdfd_l_[PADL_(l_int)]; l_int newdfd; char newdfd_r_[PADR_(l_int)];
+ char newname_l_[PADL_(const char *)]; const char * newname; char newname_r_[PADR_(const char *)];
+};
+struct linux_linkat_args {
+ char olddfd_l_[PADL_(l_int)]; l_int olddfd; char olddfd_r_[PADR_(l_int)];
+ char oldname_l_[PADL_(const char *)]; const char * oldname; char oldname_r_[PADR_(const char *)];
+ char newdfd_l_[PADL_(l_int)]; l_int newdfd; char newdfd_r_[PADR_(l_int)];
+ char newname_l_[PADL_(const char *)]; const char * newname; char newname_r_[PADR_(const char *)];
+ char flag_l_[PADL_(l_int)]; l_int flag; char flag_r_[PADR_(l_int)];
+};
+struct linux_renameat_args {
+ char olddfd_l_[PADL_(l_int)]; l_int olddfd; char olddfd_r_[PADR_(l_int)];
+ char oldname_l_[PADL_(const char *)]; const char * oldname; char oldname_r_[PADR_(const char *)];
+ char newdfd_l_[PADL_(l_int)]; l_int newdfd; char newdfd_r_[PADR_(l_int)];
+ char newname_l_[PADL_(const char *)]; const char * newname; char newname_r_[PADR_(const char *)];
+};
+struct linux_mount_args {
+ char specialfile_l_[PADL_(char *)]; char * specialfile; char specialfile_r_[PADR_(char *)];
+ char dir_l_[PADL_(char *)]; char * dir; char dir_r_[PADR_(char *)];
+ char filesystemtype_l_[PADL_(char *)]; char * filesystemtype; char filesystemtype_r_[PADR_(char *)];
+ char rwflag_l_[PADL_(l_ulong)]; l_ulong rwflag; char rwflag_r_[PADR_(l_ulong)];
+ char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)];
+};
+struct linux_pivot_root_args {
+ register_t dummy;
+};
+struct linux_statfs_args {
+ char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)];
+ char buf_l_[PADL_(struct l_statfs_buf *)]; struct l_statfs_buf * buf; char buf_r_[PADR_(struct l_statfs_buf *)];
+};
+struct linux_fstatfs_args {
+ char fd_l_[PADL_(l_uint)]; l_uint fd; char fd_r_[PADR_(l_uint)];
+ char buf_l_[PADL_(struct l_statfs_buf *)]; struct l_statfs_buf * buf; char buf_r_[PADR_(struct l_statfs_buf *)];
+};
+struct linux_truncate_args {
+ char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)];
+ char length_l_[PADL_(l_ulong)]; l_ulong length; char length_r_[PADR_(l_ulong)];
+};
+struct linux_ftruncate_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char length_l_[PADL_(l_long)]; l_long length; char length_r_[PADR_(l_long)];
+};
+struct linux_fallocate_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char mode_l_[PADL_(l_int)]; l_int mode; char mode_r_[PADR_(l_int)];
+ char offset_l_[PADL_(l_loff_t)]; l_loff_t offset; char offset_r_[PADR_(l_loff_t)];
+ char len_l_[PADL_(l_loff_t)]; l_loff_t len; char len_r_[PADR_(l_loff_t)];
+};
+struct linux_faccessat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char filename_l_[PADL_(const char *)]; const char * filename; char filename_r_[PADR_(const char *)];
+ char amode_l_[PADL_(l_int)]; l_int amode; char amode_r_[PADR_(l_int)];
+};
+struct linux_chdir_args {
+ char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)];
+};
+struct linux_fchmodat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char filename_l_[PADL_(const char *)]; const char * filename; char filename_r_[PADR_(const char *)];
+ char mode_l_[PADL_(l_mode_t)]; l_mode_t mode; char mode_r_[PADR_(l_mode_t)];
+};
+struct linux_fchownat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char filename_l_[PADL_(const char *)]; const char * filename; char filename_r_[PADR_(const char *)];
+ char uid_l_[PADL_(l_uid_t)]; l_uid_t uid; char uid_r_[PADR_(l_uid_t)];
+ char gid_l_[PADL_(l_gid_t)]; l_gid_t gid; char gid_r_[PADR_(l_gid_t)];
+ char flag_l_[PADL_(l_int)]; l_int flag; char flag_r_[PADR_(l_int)];
+};
+struct linux_openat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char filename_l_[PADL_(const char *)]; const char * filename; char filename_r_[PADR_(const char *)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+ char mode_l_[PADL_(l_mode_t)]; l_mode_t mode; char mode_r_[PADR_(l_mode_t)];
+};
+struct linux_vhangup_args {
+ register_t dummy;
+};
+struct linux_pipe2_args {
+ char pipefds_l_[PADL_(l_int *)]; l_int * pipefds; char pipefds_r_[PADR_(l_int *)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_getdents64_args {
+ char fd_l_[PADL_(l_uint)]; l_uint fd; char fd_r_[PADR_(l_uint)];
+ char dirent_l_[PADL_(void *)]; void * dirent; char dirent_r_[PADR_(void *)];
+ char count_l_[PADL_(l_uint)]; l_uint count; char count_r_[PADR_(l_uint)];
+};
+struct linux_lseek_args {
+ char fdes_l_[PADL_(l_uint)]; l_uint fdes; char fdes_r_[PADR_(l_uint)];
+ char off_l_[PADL_(l_off_t)]; l_off_t off; char off_r_[PADR_(l_off_t)];
+ char whence_l_[PADL_(l_int)]; l_int whence; char whence_r_[PADR_(l_int)];
+};
+struct linux_pread_args {
+ char fd_l_[PADL_(l_uint)]; l_uint fd; char fd_r_[PADR_(l_uint)];
+ char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)];
+ char nbyte_l_[PADL_(l_size_t)]; l_size_t nbyte; char nbyte_r_[PADR_(l_size_t)];
+ char offset_l_[PADL_(l_loff_t)]; l_loff_t offset; char offset_r_[PADR_(l_loff_t)];
+};
+struct linux_pwrite_args {
+ char fd_l_[PADL_(l_uint)]; l_uint fd; char fd_r_[PADR_(l_uint)];
+ char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)];
+ char nbyte_l_[PADL_(l_size_t)]; l_size_t nbyte; char nbyte_r_[PADR_(l_size_t)];
+ char offset_l_[PADL_(l_loff_t)]; l_loff_t offset; char offset_r_[PADR_(l_loff_t)];
+};
+struct linux_preadv_args {
+ char fd_l_[PADL_(l_ulong)]; l_ulong fd; char fd_r_[PADR_(l_ulong)];
+ char vec_l_[PADL_(struct iovec *)]; struct iovec * vec; char vec_r_[PADR_(struct iovec *)];
+ char vlen_l_[PADL_(l_ulong)]; l_ulong vlen; char vlen_r_[PADR_(l_ulong)];
+ char pos_l_l_[PADL_(l_ulong)]; l_ulong pos_l; char pos_l_r_[PADR_(l_ulong)];
+ char pos_h_l_[PADL_(l_ulong)]; l_ulong pos_h; char pos_h_r_[PADR_(l_ulong)];
+};
+struct linux_pwritev_args {
+ char fd_l_[PADL_(l_ulong)]; l_ulong fd; char fd_r_[PADR_(l_ulong)];
+ char vec_l_[PADL_(struct iovec *)]; struct iovec * vec; char vec_r_[PADR_(struct iovec *)];
+ char vlen_l_[PADL_(l_ulong)]; l_ulong vlen; char vlen_r_[PADR_(l_ulong)];
+ char pos_l_l_[PADL_(l_ulong)]; l_ulong pos_l; char pos_l_r_[PADR_(l_ulong)];
+ char pos_h_l_[PADL_(l_ulong)]; l_ulong pos_h; char pos_h_r_[PADR_(l_ulong)];
+};
+struct linux_sendfile_args {
+ char out_l_[PADL_(l_int)]; l_int out; char out_r_[PADR_(l_int)];
+ char in_l_[PADL_(l_int)]; l_int in; char in_r_[PADR_(l_int)];
+ char offset_l_[PADL_(l_off_t *)]; l_off_t * offset; char offset_r_[PADR_(l_off_t *)];
+ char count_l_[PADL_(l_size_t)]; l_size_t count; char count_r_[PADR_(l_size_t)];
+};
+struct linux_pselect6_args {
+ char nfds_l_[PADL_(l_int)]; l_int nfds; char nfds_r_[PADR_(l_int)];
+ char readfds_l_[PADL_(l_fd_set *)]; l_fd_set * readfds; char readfds_r_[PADR_(l_fd_set *)];
+ char writefds_l_[PADL_(l_fd_set *)]; l_fd_set * writefds; char writefds_r_[PADR_(l_fd_set *)];
+ char exceptfds_l_[PADL_(l_fd_set *)]; l_fd_set * exceptfds; char exceptfds_r_[PADR_(l_fd_set *)];
+ char tsp_l_[PADL_(struct l_timespec *)]; struct l_timespec * tsp; char tsp_r_[PADR_(struct l_timespec *)];
+ char sig_l_[PADL_(l_uintptr_t *)]; l_uintptr_t * sig; char sig_r_[PADR_(l_uintptr_t *)];
+};
+struct linux_ppoll_args {
+ char fds_l_[PADL_(struct pollfd *)]; struct pollfd * fds; char fds_r_[PADR_(struct pollfd *)];
+ char nfds_l_[PADL_(l_uint)]; l_uint nfds; char nfds_r_[PADR_(l_uint)];
+ char tsp_l_[PADL_(struct l_timespec *)]; struct l_timespec * tsp; char tsp_r_[PADR_(struct l_timespec *)];
+ char sset_l_[PADL_(l_sigset_t *)]; l_sigset_t * sset; char sset_r_[PADR_(l_sigset_t *)];
+ char ssize_l_[PADL_(l_size_t)]; l_size_t ssize; char ssize_r_[PADR_(l_size_t)];
+};
+struct linux_signalfd4_args {
+ register_t dummy;
+};
+struct linux_vmsplice_args {
+ register_t dummy;
+};
+struct linux_splice_args {
+ char fd_in_l_[PADL_(int)]; int fd_in; char fd_in_r_[PADR_(int)];
+ char off_in_l_[PADL_(l_loff_t *)]; l_loff_t * off_in; char off_in_r_[PADR_(l_loff_t *)];
+ char fd_out_l_[PADL_(int)]; int fd_out; char fd_out_r_[PADR_(int)];
+ char off_out_l_[PADL_(l_loff_t *)]; l_loff_t * off_out; char off_out_r_[PADR_(l_loff_t *)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_tee_args {
+ register_t dummy;
+};
+struct linux_readlinkat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)];
+ char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)];
+ char bufsiz_l_[PADL_(l_int)]; l_int bufsiz; char bufsiz_r_[PADR_(l_int)];
+};
+struct linux_newfstatat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char pathname_l_[PADL_(char *)]; char * pathname; char pathname_r_[PADR_(char *)];
+ char statbuf_l_[PADL_(struct l_stat64 *)]; struct l_stat64 * statbuf; char statbuf_r_[PADR_(struct l_stat64 *)];
+ char flag_l_[PADL_(l_int)]; l_int flag; char flag_r_[PADR_(l_int)];
+};
+struct linux_newfstat_args {
+ char fd_l_[PADL_(l_uint)]; l_uint fd; char fd_r_[PADR_(l_uint)];
+ char buf_l_[PADL_(struct l_newstat *)]; struct l_newstat * buf; char buf_r_[PADR_(struct l_newstat *)];
+};
+struct linux_fdatasync_args {
+ char fd_l_[PADL_(l_uint)]; l_uint fd; char fd_r_[PADR_(l_uint)];
+};
+struct linux_sync_file_range_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char offset_l_[PADL_(l_loff_t)]; l_loff_t offset; char offset_r_[PADR_(l_loff_t)];
+ char nbytes_l_[PADL_(l_loff_t)]; l_loff_t nbytes; char nbytes_r_[PADR_(l_loff_t)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_timerfd_create_args {
+ char clockid_l_[PADL_(l_int)]; l_int clockid; char clockid_r_[PADR_(l_int)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_timerfd_settime_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+ char new_value_l_[PADL_(const struct l_itimerspec *)]; const struct l_itimerspec * new_value; char new_value_r_[PADR_(const struct l_itimerspec *)];
+ char old_value_l_[PADL_(struct l_itimerspec *)]; struct l_itimerspec * old_value; char old_value_r_[PADR_(struct l_itimerspec *)];
+};
+struct linux_timerfd_gettime_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char old_value_l_[PADL_(struct l_itimerspec *)]; struct l_itimerspec * old_value; char old_value_r_[PADR_(struct l_itimerspec *)];
+};
+struct linux_utimensat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char pathname_l_[PADL_(const char *)]; const char * pathname; char pathname_r_[PADR_(const char *)];
+ char times_l_[PADL_(const struct l_timespec *)]; const struct l_timespec * times; char times_r_[PADR_(const struct l_timespec *)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_capget_args {
+ char hdrp_l_[PADL_(struct l_user_cap_header *)]; struct l_user_cap_header * hdrp; char hdrp_r_[PADR_(struct l_user_cap_header *)];
+ char datap_l_[PADL_(struct l_user_cap_data *)]; struct l_user_cap_data * datap; char datap_r_[PADR_(struct l_user_cap_data *)];
+};
+struct linux_capset_args {
+ char hdrp_l_[PADL_(struct l_user_cap_header *)]; struct l_user_cap_header * hdrp; char hdrp_r_[PADR_(struct l_user_cap_header *)];
+ char datap_l_[PADL_(struct l_user_cap_data *)]; struct l_user_cap_data * datap; char datap_r_[PADR_(struct l_user_cap_data *)];
+};
+struct linux_personality_args {
+ char per_l_[PADL_(l_uint)]; l_uint per; char per_r_[PADR_(l_uint)];
+};
+struct linux_exit_args {
+ char rval_l_[PADL_(u_int)]; u_int rval; char rval_r_[PADR_(u_int)];
+};
+struct linux_exit_group_args {
+ char error_code_l_[PADL_(l_int)]; l_int error_code; char error_code_r_[PADR_(l_int)];
+};
+struct linux_waitid_args {
+ char idtype_l_[PADL_(l_int)]; l_int idtype; char idtype_r_[PADR_(l_int)];
+ char id_l_[PADL_(l_pid_t)]; l_pid_t id; char id_r_[PADR_(l_pid_t)];
+ char info_l_[PADL_(l_siginfo_t *)]; l_siginfo_t * info; char info_r_[PADR_(l_siginfo_t *)];
+ char options_l_[PADL_(l_int)]; l_int options; char options_r_[PADR_(l_int)];
+ char rusage_l_[PADL_(struct rusage *)]; struct rusage * rusage; char rusage_r_[PADR_(struct rusage *)];
+};
+struct linux_set_tid_address_args {
+ char tidptr_l_[PADL_(l_int *)]; l_int * tidptr; char tidptr_r_[PADR_(l_int *)];
+};
+struct linux_unshare_args {
+ register_t dummy;
+};
+struct linux_sys_futex_args {
+ char uaddr_l_[PADL_(void *)]; void * uaddr; char uaddr_r_[PADR_(void *)];
+ char op_l_[PADL_(int)]; int op; char op_r_[PADR_(int)];
+ char val_l_[PADL_(int)]; int val; char val_r_[PADR_(int)];
+ char timeout_l_[PADL_(struct l_timespec *)]; struct l_timespec * timeout; char timeout_r_[PADR_(struct l_timespec *)];
+ char uaddr2_l_[PADL_(void *)]; void * uaddr2; char uaddr2_r_[PADR_(void *)];
+ char val3_l_[PADL_(int)]; int val3; char val3_r_[PADR_(int)];
+};
+struct linux_set_robust_list_args {
+ char head_l_[PADL_(struct linux_robust_list_head *)]; struct linux_robust_list_head * head; char head_r_[PADR_(struct linux_robust_list_head *)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+};
+struct linux_get_robust_list_args {
+ char pid_l_[PADL_(l_int)]; l_int pid; char pid_r_[PADR_(l_int)];
+ char head_l_[PADL_(struct linux_robust_list_head **)]; struct linux_robust_list_head ** head; char head_r_[PADR_(struct linux_robust_list_head **)];
+ char len_l_[PADL_(l_size_t *)]; l_size_t * len; char len_r_[PADR_(l_size_t *)];
+};
+struct linux_nanosleep_args {
+ char rqtp_l_[PADL_(const struct l_timespec *)]; const struct l_timespec * rqtp; char rqtp_r_[PADR_(const struct l_timespec *)];
+ char rmtp_l_[PADL_(struct l_timespec *)]; struct l_timespec * rmtp; char rmtp_r_[PADR_(struct l_timespec *)];
+};
+struct linux_getitimer_args {
+ char which_l_[PADL_(l_int)]; l_int which; char which_r_[PADR_(l_int)];
+ char itv_l_[PADL_(struct l_itimerval *)]; struct l_itimerval * itv; char itv_r_[PADR_(struct l_itimerval *)];
+};
+struct linux_setitimer_args {
+ char which_l_[PADL_(l_int)]; l_int which; char which_r_[PADR_(l_int)];
+ char itv_l_[PADL_(struct l_itimerval *)]; struct l_itimerval * itv; char itv_r_[PADR_(struct l_itimerval *)];
+ char oitv_l_[PADL_(struct l_itimerval *)]; struct l_itimerval * oitv; char oitv_r_[PADR_(struct l_itimerval *)];
+};
+struct linux_kexec_load_args {
+ register_t dummy;
+};
+struct linux_init_module_args {
+ register_t dummy;
+};
+struct linux_delete_module_args {
+ register_t dummy;
+};
+struct linux_timer_create_args {
+ char clock_id_l_[PADL_(clockid_t)]; clockid_t clock_id; char clock_id_r_[PADR_(clockid_t)];
+ char evp_l_[PADL_(struct sigevent *)]; struct sigevent * evp; char evp_r_[PADR_(struct sigevent *)];
+ char timerid_l_[PADL_(l_timer_t *)]; l_timer_t * timerid; char timerid_r_[PADR_(l_timer_t *)];
+};
+struct linux_timer_gettime_args {
+ char timerid_l_[PADL_(l_timer_t)]; l_timer_t timerid; char timerid_r_[PADR_(l_timer_t)];
+ char setting_l_[PADL_(struct itimerspec *)]; struct itimerspec * setting; char setting_r_[PADR_(struct itimerspec *)];
+};
+struct linux_timer_getoverrun_args {
+ char timerid_l_[PADL_(l_timer_t)]; l_timer_t timerid; char timerid_r_[PADR_(l_timer_t)];
+};
+struct linux_timer_settime_args {
+ char timerid_l_[PADL_(l_timer_t)]; l_timer_t timerid; char timerid_r_[PADR_(l_timer_t)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+ char new_l_[PADL_(const struct itimerspec *)]; const struct itimerspec * new; char new_r_[PADR_(const struct itimerspec *)];
+ char old_l_[PADL_(struct itimerspec *)]; struct itimerspec * old; char old_r_[PADR_(struct itimerspec *)];
+};
+struct linux_timer_delete_args {
+ char timerid_l_[PADL_(l_timer_t)]; l_timer_t timerid; char timerid_r_[PADR_(l_timer_t)];
+};
+struct linux_clock_settime_args {
+ char which_l_[PADL_(clockid_t)]; clockid_t which; char which_r_[PADR_(clockid_t)];
+ char tp_l_[PADL_(struct l_timespec *)]; struct l_timespec * tp; char tp_r_[PADR_(struct l_timespec *)];
+};
+struct linux_clock_gettime_args {
+ char which_l_[PADL_(clockid_t)]; clockid_t which; char which_r_[PADR_(clockid_t)];
+ char tp_l_[PADL_(struct l_timespec *)]; struct l_timespec * tp; char tp_r_[PADR_(struct l_timespec *)];
+};
+struct linux_clock_getres_args {
+ char which_l_[PADL_(clockid_t)]; clockid_t which; char which_r_[PADR_(clockid_t)];
+ char tp_l_[PADL_(struct l_timespec *)]; struct l_timespec * tp; char tp_r_[PADR_(struct l_timespec *)];
+};
+struct linux_clock_nanosleep_args {
+ char which_l_[PADL_(clockid_t)]; clockid_t which; char which_r_[PADR_(clockid_t)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+ char rqtp_l_[PADL_(struct l_timespec *)]; struct l_timespec * rqtp; char rqtp_r_[PADR_(struct l_timespec *)];
+ char rmtp_l_[PADL_(struct l_timespec *)]; struct l_timespec * rmtp; char rmtp_r_[PADR_(struct l_timespec *)];
+};
+struct linux_syslog_args {
+ char type_l_[PADL_(l_int)]; l_int type; char type_r_[PADR_(l_int)];
+ char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)];
+ char len_l_[PADL_(l_int)]; l_int len; char len_r_[PADR_(l_int)];
+};
+struct linux_ptrace_args {
+ char req_l_[PADL_(l_long)]; l_long req; char req_r_[PADR_(l_long)];
+ char pid_l_[PADL_(l_long)]; l_long pid; char pid_r_[PADR_(l_long)];
+ char addr_l_[PADL_(l_ulong)]; l_ulong addr; char addr_r_[PADR_(l_ulong)];
+ char data_l_[PADL_(l_ulong)]; l_ulong data; char data_r_[PADR_(l_ulong)];
+};
+struct linux_sched_setparam_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
+};
+struct linux_sched_setscheduler_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char policy_l_[PADL_(l_int)]; l_int policy; char policy_r_[PADR_(l_int)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
+};
+struct linux_sched_getscheduler_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+};
+struct linux_sched_getparam_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)];
+};
+struct linux_sched_setaffinity_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char len_l_[PADL_(l_uint)]; l_uint len; char len_r_[PADR_(l_uint)];
+ char user_mask_ptr_l_[PADL_(l_ulong *)]; l_ulong * user_mask_ptr; char user_mask_ptr_r_[PADR_(l_ulong *)];
+};
+struct linux_sched_getaffinity_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char len_l_[PADL_(l_uint)]; l_uint len; char len_r_[PADR_(l_uint)];
+ char user_mask_ptr_l_[PADL_(l_ulong *)]; l_ulong * user_mask_ptr; char user_mask_ptr_r_[PADR_(l_ulong *)];
+};
+struct linux_sched_get_priority_max_args {
+ char policy_l_[PADL_(l_int)]; l_int policy; char policy_r_[PADR_(l_int)];
+};
+struct linux_sched_get_priority_min_args {
+ char policy_l_[PADL_(l_int)]; l_int policy; char policy_r_[PADR_(l_int)];
+};
+struct linux_sched_rr_get_interval_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char interval_l_[PADL_(struct l_timespec *)]; struct l_timespec * interval; char interval_r_[PADR_(struct l_timespec *)];
+};
+struct linux_kill_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char signum_l_[PADL_(l_int)]; l_int signum; char signum_r_[PADR_(l_int)];
+};
+struct linux_tkill_args {
+ char tid_l_[PADL_(l_pid_t)]; l_pid_t tid; char tid_r_[PADR_(l_pid_t)];
+ char sig_l_[PADL_(l_int)]; l_int sig; char sig_r_[PADR_(l_int)];
+};
+struct linux_tgkill_args {
+ char tgid_l_[PADL_(l_pid_t)]; l_pid_t tgid; char tgid_r_[PADR_(l_pid_t)];
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char sig_l_[PADL_(l_int)]; l_int sig; char sig_r_[PADR_(l_int)];
+};
+struct linux_sigaltstack_args {
+ char uss_l_[PADL_(l_stack_t *)]; l_stack_t * uss; char uss_r_[PADR_(l_stack_t *)];
+ char uoss_l_[PADL_(l_stack_t *)]; l_stack_t * uoss; char uoss_r_[PADR_(l_stack_t *)];
+};
+struct linux_rt_sigsuspend_args {
+ char newset_l_[PADL_(l_sigset_t *)]; l_sigset_t * newset; char newset_r_[PADR_(l_sigset_t *)];
+ char sigsetsize_l_[PADL_(l_size_t)]; l_size_t sigsetsize; char sigsetsize_r_[PADR_(l_size_t)];
+};
+struct linux_rt_sigaction_args {
+ char sig_l_[PADL_(l_int)]; l_int sig; char sig_r_[PADR_(l_int)];
+ char act_l_[PADL_(l_sigaction_t *)]; l_sigaction_t * act; char act_r_[PADR_(l_sigaction_t *)];
+ char oact_l_[PADL_(l_sigaction_t *)]; l_sigaction_t * oact; char oact_r_[PADR_(l_sigaction_t *)];
+ char sigsetsize_l_[PADL_(l_size_t)]; l_size_t sigsetsize; char sigsetsize_r_[PADR_(l_size_t)];
+};
+struct linux_rt_sigprocmask_args {
+ char how_l_[PADL_(l_int)]; l_int how; char how_r_[PADR_(l_int)];
+ char mask_l_[PADL_(l_sigset_t *)]; l_sigset_t * mask; char mask_r_[PADR_(l_sigset_t *)];
+ char omask_l_[PADL_(l_sigset_t *)]; l_sigset_t * omask; char omask_r_[PADR_(l_sigset_t *)];
+ char sigsetsize_l_[PADL_(l_size_t)]; l_size_t sigsetsize; char sigsetsize_r_[PADR_(l_size_t)];
+};
+struct linux_rt_sigpending_args {
+ char set_l_[PADL_(l_sigset_t *)]; l_sigset_t * set; char set_r_[PADR_(l_sigset_t *)];
+ char sigsetsize_l_[PADL_(l_size_t)]; l_size_t sigsetsize; char sigsetsize_r_[PADR_(l_size_t)];
+};
+struct linux_rt_sigtimedwait_args {
+ char mask_l_[PADL_(l_sigset_t *)]; l_sigset_t * mask; char mask_r_[PADR_(l_sigset_t *)];
+ char ptr_l_[PADL_(l_siginfo_t *)]; l_siginfo_t * ptr; char ptr_r_[PADR_(l_siginfo_t *)];
+ char timeout_l_[PADL_(struct l_timeval *)]; struct l_timeval * timeout; char timeout_r_[PADR_(struct l_timeval *)];
+ char sigsetsize_l_[PADL_(l_size_t)]; l_size_t sigsetsize; char sigsetsize_r_[PADR_(l_size_t)];
+};
+struct linux_rt_sigqueueinfo_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char sig_l_[PADL_(l_int)]; l_int sig; char sig_r_[PADR_(l_int)];
+ char info_l_[PADL_(l_siginfo_t *)]; l_siginfo_t * info; char info_r_[PADR_(l_siginfo_t *)];
+};
+struct linux_rt_sigreturn_args {
+ char ucp_l_[PADL_(struct l_ucontext *)]; struct l_ucontext * ucp; char ucp_r_[PADR_(struct l_ucontext *)];
+};
+struct linux_getpriority_args {
+ char which_l_[PADL_(l_int)]; l_int which; char which_r_[PADR_(l_int)];
+ char who_l_[PADL_(l_int)]; l_int who; char who_r_[PADR_(l_int)];
+};
+struct linux_reboot_args {
+ char magic1_l_[PADL_(l_int)]; l_int magic1; char magic1_r_[PADR_(l_int)];
+ char magic2_l_[PADL_(l_int)]; l_int magic2; char magic2_r_[PADR_(l_int)];
+ char cmd_l_[PADL_(l_uint)]; l_uint cmd; char cmd_r_[PADR_(l_uint)];
+ char arg_l_[PADL_(void *)]; void * arg; char arg_r_[PADR_(void *)];
+};
+struct linux_setfsuid_args {
+ char uid_l_[PADL_(l_uid_t)]; l_uid_t uid; char uid_r_[PADR_(l_uid_t)];
+};
+struct linux_setfsgid_args {
+ char gid_l_[PADL_(l_gid_t)]; l_gid_t gid; char gid_r_[PADR_(l_gid_t)];
+};
+struct linux_times_args {
+ char buf_l_[PADL_(struct l_times_argv *)]; struct l_times_argv * buf; char buf_r_[PADR_(struct l_times_argv *)];
+};
+struct linux_getsid_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+};
+struct linux_getgroups_args {
+ char gidsetsize_l_[PADL_(l_int)]; l_int gidsetsize; char gidsetsize_r_[PADR_(l_int)];
+ char grouplist_l_[PADL_(l_gid_t *)]; l_gid_t * grouplist; char grouplist_r_[PADR_(l_gid_t *)];
+};
+struct linux_setgroups_args {
+ char gidsetsize_l_[PADL_(l_int)]; l_int gidsetsize; char gidsetsize_r_[PADR_(l_int)];
+ char grouplist_l_[PADL_(l_gid_t *)]; l_gid_t * grouplist; char grouplist_r_[PADR_(l_gid_t *)];
+};
+struct linux_newuname_args {
+ char buf_l_[PADL_(struct l_new_utsname *)]; struct l_new_utsname * buf; char buf_r_[PADR_(struct l_new_utsname *)];
+};
+struct linux_sethostname_args {
+ char hostname_l_[PADL_(char *)]; char * hostname; char hostname_r_[PADR_(char *)];
+ char len_l_[PADL_(l_uint)]; l_uint len; char len_r_[PADR_(l_uint)];
+};
+struct linux_setdomainname_args {
+ char name_l_[PADL_(char *)]; char * name; char name_r_[PADR_(char *)];
+ char len_l_[PADL_(l_int)]; l_int len; char len_r_[PADR_(l_int)];
+};
+struct linux_getrlimit_args {
+ char resource_l_[PADL_(l_uint)]; l_uint resource; char resource_r_[PADR_(l_uint)];
+ char rlim_l_[PADL_(struct l_rlimit *)]; struct l_rlimit * rlim; char rlim_r_[PADR_(struct l_rlimit *)];
+};
+struct linux_setrlimit_args {
+ char resource_l_[PADL_(l_uint)]; l_uint resource; char resource_r_[PADR_(l_uint)];
+ char rlim_l_[PADL_(struct l_rlimit *)]; struct l_rlimit * rlim; char rlim_r_[PADR_(struct l_rlimit *)];
+};
+struct linux_prctl_args {
+ char option_l_[PADL_(l_int)]; l_int option; char option_r_[PADR_(l_int)];
+ char arg2_l_[PADL_(l_uintptr_t)]; l_uintptr_t arg2; char arg2_r_[PADR_(l_uintptr_t)];
+ char arg3_l_[PADL_(l_uintptr_t)]; l_uintptr_t arg3; char arg3_r_[PADR_(l_uintptr_t)];
+ char arg4_l_[PADL_(l_uintptr_t)]; l_uintptr_t arg4; char arg4_r_[PADR_(l_uintptr_t)];
+ char arg5_l_[PADL_(l_uintptr_t)]; l_uintptr_t arg5; char arg5_r_[PADR_(l_uintptr_t)];
+};
+struct linux_getcpu_args {
+ char cpu_l_[PADL_(l_uint *)]; l_uint * cpu; char cpu_r_[PADR_(l_uint *)];
+ char node_l_[PADL_(l_uint *)]; l_uint * node; char node_r_[PADR_(l_uint *)];
+ char cache_l_[PADL_(void *)]; void * cache; char cache_r_[PADR_(void *)];
+};
+struct linux_adjtimex_args {
+ register_t dummy;
+};
+struct linux_getpid_args {
+ register_t dummy;
+};
+struct linux_getppid_args {
+ register_t dummy;
+};
+struct linux_getuid_args {
+ register_t dummy;
+};
+struct linux_getgid_args {
+ register_t dummy;
+};
+struct linux_gettid_args {
+ register_t dummy;
+};
+struct linux_sysinfo_args {
+ char info_l_[PADL_(struct l_sysinfo *)]; struct l_sysinfo * info; char info_r_[PADR_(struct l_sysinfo *)];
+};
+struct linux_mq_open_args {
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+ char oflag_l_[PADL_(l_int)]; l_int oflag; char oflag_r_[PADR_(l_int)];
+ char mode_l_[PADL_(l_mode_t)]; l_mode_t mode; char mode_r_[PADR_(l_mode_t)];
+ char attr_l_[PADL_(struct mq_attr *)]; struct mq_attr * attr; char attr_r_[PADR_(struct mq_attr *)];
+};
+struct linux_mq_unlink_args {
+ char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)];
+};
+struct linux_mq_timedsend_args {
+ char mqd_l_[PADL_(l_mqd_t)]; l_mqd_t mqd; char mqd_r_[PADR_(l_mqd_t)];
+ char msg_ptr_l_[PADL_(const char *)]; const char * msg_ptr; char msg_ptr_r_[PADR_(const char *)];
+ char msg_len_l_[PADL_(l_size_t)]; l_size_t msg_len; char msg_len_r_[PADR_(l_size_t)];
+ char msg_prio_l_[PADL_(l_uint)]; l_uint msg_prio; char msg_prio_r_[PADR_(l_uint)];
+ char abs_timeout_l_[PADL_(const struct l_timespec *)]; const struct l_timespec * abs_timeout; char abs_timeout_r_[PADR_(const struct l_timespec *)];
+};
+struct linux_mq_timedreceive_args {
+ char mqd_l_[PADL_(l_mqd_t)]; l_mqd_t mqd; char mqd_r_[PADR_(l_mqd_t)];
+ char msg_ptr_l_[PADL_(char *)]; char * msg_ptr; char msg_ptr_r_[PADR_(char *)];
+ char msg_len_l_[PADL_(l_size_t)]; l_size_t msg_len; char msg_len_r_[PADR_(l_size_t)];
+ char msg_prio_l_[PADL_(l_uint *)]; l_uint * msg_prio; char msg_prio_r_[PADR_(l_uint *)];
+ char abs_timeout_l_[PADL_(const struct l_timespec *)]; const struct l_timespec * abs_timeout; char abs_timeout_r_[PADR_(const struct l_timespec *)];
+};
+struct linux_mq_notify_args {
+ char mqd_l_[PADL_(l_mqd_t)]; l_mqd_t mqd; char mqd_r_[PADR_(l_mqd_t)];
+ char abs_timeout_l_[PADL_(const struct l_timespec *)]; const struct l_timespec * abs_timeout; char abs_timeout_r_[PADR_(const struct l_timespec *)];
+};
+struct linux_mq_getsetattr_args {
+ char mqd_l_[PADL_(l_mqd_t)]; l_mqd_t mqd; char mqd_r_[PADR_(l_mqd_t)];
+ char attr_l_[PADL_(const struct mq_attr *)]; const struct mq_attr * attr; char attr_r_[PADR_(const struct mq_attr *)];
+ char oattr_l_[PADL_(struct mq_attr *)]; struct mq_attr * oattr; char oattr_r_[PADR_(struct mq_attr *)];
+};
+struct linux_msgget_args {
+ char key_l_[PADL_(l_key_t)]; l_key_t key; char key_r_[PADR_(l_key_t)];
+ char msgflg_l_[PADL_(l_int)]; l_int msgflg; char msgflg_r_[PADR_(l_int)];
+};
+struct linux_msgctl_args {
+ char msqid_l_[PADL_(l_int)]; l_int msqid; char msqid_r_[PADR_(l_int)];
+ char cmd_l_[PADL_(l_int)]; l_int cmd; char cmd_r_[PADR_(l_int)];
+ char buf_l_[PADL_(struct l_msqid_ds *)]; struct l_msqid_ds * buf; char buf_r_[PADR_(struct l_msqid_ds *)];
+};
+struct linux_msgrcv_args {
+ char msqid_l_[PADL_(l_int)]; l_int msqid; char msqid_r_[PADR_(l_int)];
+ char msgp_l_[PADL_(struct l_msgbuf *)]; struct l_msgbuf * msgp; char msgp_r_[PADR_(struct l_msgbuf *)];
+ char msgsz_l_[PADL_(l_size_t)]; l_size_t msgsz; char msgsz_r_[PADR_(l_size_t)];
+ char msgtyp_l_[PADL_(l_long)]; l_long msgtyp; char msgtyp_r_[PADR_(l_long)];
+ char msgflg_l_[PADL_(l_int)]; l_int msgflg; char msgflg_r_[PADR_(l_int)];
+};
+struct linux_msgsnd_args {
+ char msqid_l_[PADL_(l_int)]; l_int msqid; char msqid_r_[PADR_(l_int)];
+ char msgp_l_[PADL_(struct l_msgbuf *)]; struct l_msgbuf * msgp; char msgp_r_[PADR_(struct l_msgbuf *)];
+ char msgsz_l_[PADL_(l_size_t)]; l_size_t msgsz; char msgsz_r_[PADR_(l_size_t)];
+ char msgflg_l_[PADL_(l_int)]; l_int msgflg; char msgflg_r_[PADR_(l_int)];
+};
+struct linux_semget_args {
+ char key_l_[PADL_(l_key_t)]; l_key_t key; char key_r_[PADR_(l_key_t)];
+ char nsems_l_[PADL_(l_int)]; l_int nsems; char nsems_r_[PADR_(l_int)];
+ char semflg_l_[PADL_(l_int)]; l_int semflg; char semflg_r_[PADR_(l_int)];
+};
+struct linux_semctl_args {
+ char semid_l_[PADL_(l_int)]; l_int semid; char semid_r_[PADR_(l_int)];
+ char semnum_l_[PADL_(l_int)]; l_int semnum; char semnum_r_[PADR_(l_int)];
+ char cmd_l_[PADL_(l_int)]; l_int cmd; char cmd_r_[PADR_(l_int)];
+ char arg_l_[PADL_(union l_semun)]; union l_semun arg; char arg_r_[PADR_(union l_semun)];
+};
+struct linux_semtimedop_args {
+ register_t dummy;
+};
+struct linux_semop_args {
+ char semid_l_[PADL_(l_int)]; l_int semid; char semid_r_[PADR_(l_int)];
+ char tsops_l_[PADL_(struct l_sembuf *)]; struct l_sembuf * tsops; char tsops_r_[PADR_(struct l_sembuf *)];
+ char nsops_l_[PADL_(l_uint)]; l_uint nsops; char nsops_r_[PADR_(l_uint)];
+};
+struct linux_shmget_args {
+ char key_l_[PADL_(l_key_t)]; l_key_t key; char key_r_[PADR_(l_key_t)];
+ char size_l_[PADL_(l_size_t)]; l_size_t size; char size_r_[PADR_(l_size_t)];
+ char shmflg_l_[PADL_(l_int)]; l_int shmflg; char shmflg_r_[PADR_(l_int)];
+};
+struct linux_shmctl_args {
+ char shmid_l_[PADL_(l_int)]; l_int shmid; char shmid_r_[PADR_(l_int)];
+ char cmd_l_[PADL_(l_int)]; l_int cmd; char cmd_r_[PADR_(l_int)];
+ char buf_l_[PADL_(struct l_shmid_ds *)]; struct l_shmid_ds * buf; char buf_r_[PADR_(struct l_shmid_ds *)];
+};
+struct linux_shmat_args {
+ char shmid_l_[PADL_(l_int)]; l_int shmid; char shmid_r_[PADR_(l_int)];
+ char shmaddr_l_[PADL_(char *)]; char * shmaddr; char shmaddr_r_[PADR_(char *)];
+ char shmflg_l_[PADL_(l_int)]; l_int shmflg; char shmflg_r_[PADR_(l_int)];
+};
+struct linux_shmdt_args {
+ char shmaddr_l_[PADL_(char *)]; char * shmaddr; char shmaddr_r_[PADR_(char *)];
+};
+struct linux_socket_args {
+ char domain_l_[PADL_(l_int)]; l_int domain; char domain_r_[PADR_(l_int)];
+ char type_l_[PADL_(l_int)]; l_int type; char type_r_[PADR_(l_int)];
+ char protocol_l_[PADL_(l_int)]; l_int protocol; char protocol_r_[PADR_(l_int)];
+};
+struct linux_socketpair_args {
+ char domain_l_[PADL_(l_int)]; l_int domain; char domain_r_[PADR_(l_int)];
+ char type_l_[PADL_(l_int)]; l_int type; char type_r_[PADR_(l_int)];
+ char protocol_l_[PADL_(l_int)]; l_int protocol; char protocol_r_[PADR_(l_int)];
+ char rsv_l_[PADL_(l_uintptr_t)]; l_uintptr_t rsv; char rsv_r_[PADR_(l_uintptr_t)];
+};
+struct linux_bind_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char name_l_[PADL_(l_uintptr_t)]; l_uintptr_t name; char name_r_[PADR_(l_uintptr_t)];
+ char namelen_l_[PADL_(l_int)]; l_int namelen; char namelen_r_[PADR_(l_int)];
+};
+struct linux_listen_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char backlog_l_[PADL_(l_int)]; l_int backlog; char backlog_r_[PADR_(l_int)];
+};
+struct linux_accept_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char addr_l_[PADL_(l_uintptr_t)]; l_uintptr_t addr; char addr_r_[PADR_(l_uintptr_t)];
+ char namelen_l_[PADL_(l_uintptr_t)]; l_uintptr_t namelen; char namelen_r_[PADR_(l_uintptr_t)];
+};
+struct linux_connect_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char name_l_[PADL_(l_uintptr_t)]; l_uintptr_t name; char name_r_[PADR_(l_uintptr_t)];
+ char namelen_l_[PADL_(l_int)]; l_int namelen; char namelen_r_[PADR_(l_int)];
+};
+struct linux_getsockname_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char addr_l_[PADL_(l_uintptr_t)]; l_uintptr_t addr; char addr_r_[PADR_(l_uintptr_t)];
+ char namelen_l_[PADL_(l_uintptr_t)]; l_uintptr_t namelen; char namelen_r_[PADR_(l_uintptr_t)];
+};
+struct linux_getpeername_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char addr_l_[PADL_(l_uintptr_t)]; l_uintptr_t addr; char addr_r_[PADR_(l_uintptr_t)];
+ char namelen_l_[PADL_(l_uintptr_t)]; l_uintptr_t namelen; char namelen_r_[PADR_(l_uintptr_t)];
+};
+struct linux_sendto_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char msg_l_[PADL_(l_uintptr_t)]; l_uintptr_t msg; char msg_r_[PADR_(l_uintptr_t)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+ char to_l_[PADL_(l_uintptr_t)]; l_uintptr_t to; char to_r_[PADR_(l_uintptr_t)];
+ char tolen_l_[PADL_(l_int)]; l_int tolen; char tolen_r_[PADR_(l_int)];
+};
+struct linux_recvfrom_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char buf_l_[PADL_(l_uintptr_t)]; l_uintptr_t buf; char buf_r_[PADR_(l_uintptr_t)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+ char from_l_[PADL_(l_uintptr_t)]; l_uintptr_t from; char from_r_[PADR_(l_uintptr_t)];
+ char fromlen_l_[PADL_(l_uintptr_t)]; l_uintptr_t fromlen; char fromlen_r_[PADR_(l_uintptr_t)];
+};
+struct linux_setsockopt_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char level_l_[PADL_(l_int)]; l_int level; char level_r_[PADR_(l_int)];
+ char optname_l_[PADL_(l_int)]; l_int optname; char optname_r_[PADR_(l_int)];
+ char optval_l_[PADL_(l_uintptr_t)]; l_uintptr_t optval; char optval_r_[PADR_(l_uintptr_t)];
+ char optlen_l_[PADL_(l_int)]; l_int optlen; char optlen_r_[PADR_(l_int)];
+};
+struct linux_getsockopt_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char level_l_[PADL_(l_int)]; l_int level; char level_r_[PADR_(l_int)];
+ char optname_l_[PADL_(l_int)]; l_int optname; char optname_r_[PADR_(l_int)];
+ char optval_l_[PADL_(l_uintptr_t)]; l_uintptr_t optval; char optval_r_[PADR_(l_uintptr_t)];
+ char optlen_l_[PADL_(l_uintptr_t)]; l_uintptr_t optlen; char optlen_r_[PADR_(l_uintptr_t)];
+};
+struct linux_shutdown_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char how_l_[PADL_(l_int)]; l_int how; char how_r_[PADR_(l_int)];
+};
+struct linux_sendmsg_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char msg_l_[PADL_(l_uintptr_t)]; l_uintptr_t msg; char msg_r_[PADR_(l_uintptr_t)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_recvmsg_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char msg_l_[PADL_(l_uintptr_t)]; l_uintptr_t msg; char msg_r_[PADR_(l_uintptr_t)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_brk_args {
+ char dsend_l_[PADL_(l_ulong)]; l_ulong dsend; char dsend_r_[PADR_(l_ulong)];
+};
+struct linux_mremap_args {
+ char addr_l_[PADL_(l_ulong)]; l_ulong addr; char addr_r_[PADR_(l_ulong)];
+ char old_len_l_[PADL_(l_ulong)]; l_ulong old_len; char old_len_r_[PADR_(l_ulong)];
+ char new_len_l_[PADL_(l_ulong)]; l_ulong new_len; char new_len_r_[PADR_(l_ulong)];
+ char flags_l_[PADL_(l_ulong)]; l_ulong flags; char flags_r_[PADR_(l_ulong)];
+ char new_addr_l_[PADL_(l_ulong)]; l_ulong new_addr; char new_addr_r_[PADR_(l_ulong)];
+};
+struct linux_add_key_args {
+ register_t dummy;
+};
+struct linux_request_key_args {
+ register_t dummy;
+};
+struct linux_keyctl_args {
+ register_t dummy;
+};
+struct linux_clone_args {
+ char flags_l_[PADL_(l_ulong)]; l_ulong flags; char flags_r_[PADR_(l_ulong)];
+ char stack_l_[PADL_(void *)]; void * stack; char stack_r_[PADR_(void *)];
+ char parent_tidptr_l_[PADL_(void *)]; void * parent_tidptr; char parent_tidptr_r_[PADR_(void *)];
+ char tls_l_[PADL_(void *)]; void * tls; char tls_r_[PADR_(void *)];
+ char child_tidptr_l_[PADL_(void *)]; void * child_tidptr; char child_tidptr_r_[PADR_(void *)];
+};
+struct linux_execve_args {
+ char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)];
+ char argp_l_[PADL_(char **)]; char ** argp; char argp_r_[PADR_(char **)];
+ char envp_l_[PADL_(char **)]; char ** envp; char envp_r_[PADR_(char **)];
+};
+struct linux_mmap2_args {
+ char addr_l_[PADL_(l_ulong)]; l_ulong addr; char addr_r_[PADR_(l_ulong)];
+ char len_l_[PADL_(l_ulong)]; l_ulong len; char len_r_[PADR_(l_ulong)];
+ char prot_l_[PADL_(l_ulong)]; l_ulong prot; char prot_r_[PADR_(l_ulong)];
+ char flags_l_[PADL_(l_ulong)]; l_ulong flags; char flags_r_[PADR_(l_ulong)];
+ char fd_l_[PADL_(l_ulong)]; l_ulong fd; char fd_r_[PADR_(l_ulong)];
+ char pgoff_l_[PADL_(l_ulong)]; l_ulong pgoff; char pgoff_r_[PADR_(l_ulong)];
+};
+struct linux_fadvise64_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char offset_l_[PADL_(l_loff_t)]; l_loff_t offset; char offset_r_[PADR_(l_loff_t)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char advice_l_[PADL_(l_int)]; l_int advice; char advice_r_[PADR_(l_int)];
+};
+struct linux_swapoff_args {
+ register_t dummy;
+};
+struct linux_mprotect_args {
+ char addr_l_[PADL_(l_ulong)]; l_ulong addr; char addr_r_[PADR_(l_ulong)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char prot_l_[PADL_(l_ulong)]; l_ulong prot; char prot_r_[PADR_(l_ulong)];
+};
+struct linux_msync_args {
+ char addr_l_[PADL_(l_ulong)]; l_ulong addr; char addr_r_[PADR_(l_ulong)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char fl_l_[PADL_(l_int)]; l_int fl; char fl_r_[PADR_(l_int)];
+};
+struct linux_mincore_args {
+ char start_l_[PADL_(l_ulong)]; l_ulong start; char start_r_[PADR_(l_ulong)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char vec_l_[PADL_(u_char *)]; u_char * vec; char vec_r_[PADR_(u_char *)];
+};
+struct linux_madvise_args {
+ char addr_l_[PADL_(l_ulong)]; l_ulong addr; char addr_r_[PADR_(l_ulong)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char behav_l_[PADL_(l_int)]; l_int behav; char behav_r_[PADR_(l_int)];
+};
+struct linux_remap_file_pages_args {
+ register_t dummy;
+};
+struct linux_mbind_args {
+ register_t dummy;
+};
+struct linux_get_mempolicy_args {
+ register_t dummy;
+};
+struct linux_set_mempolicy_args {
+ register_t dummy;
+};
+struct linux_migrate_pages_args {
+ register_t dummy;
+};
+struct linux_move_pages_args {
+ register_t dummy;
+};
+struct linux_rt_tgsigqueueinfo_args {
+ char tgid_l_[PADL_(l_pid_t)]; l_pid_t tgid; char tgid_r_[PADR_(l_pid_t)];
+ char tid_l_[PADL_(l_pid_t)]; l_pid_t tid; char tid_r_[PADR_(l_pid_t)];
+ char sig_l_[PADL_(l_int)]; l_int sig; char sig_r_[PADR_(l_int)];
+ char uinfo_l_[PADL_(l_siginfo_t *)]; l_siginfo_t * uinfo; char uinfo_r_[PADR_(l_siginfo_t *)];
+};
+struct linux_perf_event_open_args {
+ register_t dummy;
+};
+struct linux_accept4_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char addr_l_[PADL_(l_uintptr_t)]; l_uintptr_t addr; char addr_r_[PADR_(l_uintptr_t)];
+ char namelen_l_[PADL_(l_uintptr_t)]; l_uintptr_t namelen; char namelen_r_[PADR_(l_uintptr_t)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_recvmmsg_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char msg_l_[PADL_(struct l_mmsghdr *)]; struct l_mmsghdr * msg; char msg_r_[PADR_(struct l_mmsghdr *)];
+ char vlen_l_[PADL_(l_uint)]; l_uint vlen; char vlen_r_[PADR_(l_uint)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+ char timeout_l_[PADL_(struct l_timespec *)]; struct l_timespec * timeout; char timeout_r_[PADR_(struct l_timespec *)];
+};
+struct linux_wait4_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char status_l_[PADL_(l_int *)]; l_int * status; char status_r_[PADR_(l_int *)];
+ char options_l_[PADL_(l_int)]; l_int options; char options_r_[PADR_(l_int)];
+ char rusage_l_[PADL_(struct rusage *)]; struct rusage * rusage; char rusage_r_[PADR_(struct rusage *)];
+};
+struct linux_prlimit64_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char resource_l_[PADL_(l_uint)]; l_uint resource; char resource_r_[PADR_(l_uint)];
+ char new_l_[PADL_(struct rlimit *)]; struct rlimit * new; char new_r_[PADR_(struct rlimit *)];
+ char old_l_[PADL_(struct rlimit *)]; struct rlimit * old; char old_r_[PADR_(struct rlimit *)];
+};
+struct linux_fanotify_init_args {
+ register_t dummy;
+};
+struct linux_fanotify_mark_args {
+ register_t dummy;
+};
+struct linux_name_to_handle_at_args {
+ register_t dummy;
+};
+struct linux_open_by_handle_at_args {
+ register_t dummy;
+};
+struct linux_clock_adjtime_args {
+ register_t dummy;
+};
+struct linux_syncfs_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+};
+struct linux_setns_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char nstype_l_[PADL_(l_int)]; l_int nstype; char nstype_r_[PADR_(l_int)];
+};
+struct linux_sendmmsg_args {
+ char s_l_[PADL_(l_int)]; l_int s; char s_r_[PADR_(l_int)];
+ char msg_l_[PADL_(struct l_mmsghdr *)]; struct l_mmsghdr * msg; char msg_r_[PADR_(struct l_mmsghdr *)];
+ char vlen_l_[PADL_(l_uint)]; l_uint vlen; char vlen_r_[PADR_(l_uint)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_process_vm_readv_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char lvec_l_[PADL_(const struct iovec *)]; const struct iovec * lvec; char lvec_r_[PADR_(const struct iovec *)];
+ char liovcnt_l_[PADL_(l_ulong)]; l_ulong liovcnt; char liovcnt_r_[PADR_(l_ulong)];
+ char rvec_l_[PADL_(const struct iovec *)]; const struct iovec * rvec; char rvec_r_[PADR_(const struct iovec *)];
+ char riovcnt_l_[PADL_(l_ulong)]; l_ulong riovcnt; char riovcnt_r_[PADR_(l_ulong)];
+ char flags_l_[PADL_(l_ulong)]; l_ulong flags; char flags_r_[PADR_(l_ulong)];
+};
+struct linux_process_vm_writev_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char lvec_l_[PADL_(const struct iovec *)]; const struct iovec * lvec; char lvec_r_[PADR_(const struct iovec *)];
+ char liovcnt_l_[PADL_(l_ulong)]; l_ulong liovcnt; char liovcnt_r_[PADR_(l_ulong)];
+ char rvec_l_[PADL_(const struct iovec *)]; const struct iovec * rvec; char rvec_r_[PADR_(const struct iovec *)];
+ char riovcnt_l_[PADL_(l_ulong)]; l_ulong riovcnt; char riovcnt_r_[PADR_(l_ulong)];
+ char flags_l_[PADL_(l_ulong)]; l_ulong flags; char flags_r_[PADR_(l_ulong)];
+};
+struct linux_kcmp_args {
+ char pid1_l_[PADL_(l_pid_t)]; l_pid_t pid1; char pid1_r_[PADR_(l_pid_t)];
+ char pid2_l_[PADL_(l_pid_t)]; l_pid_t pid2; char pid2_r_[PADR_(l_pid_t)];
+ char type_l_[PADL_(l_int)]; l_int type; char type_r_[PADR_(l_int)];
+ char idx1_l_[PADL_(l_ulong)]; l_ulong idx1; char idx1_r_[PADR_(l_ulong)];
+ char idx_l_[PADL_(l_ulong)]; l_ulong idx; char idx_r_[PADR_(l_ulong)];
+};
+struct linux_finit_module_args {
+ char fd_l_[PADL_(l_int)]; l_int fd; char fd_r_[PADR_(l_int)];
+ char uargs_l_[PADL_(const char *)]; const char * uargs; char uargs_r_[PADR_(const char *)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_sched_setattr_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char attr_l_[PADL_(void *)]; void * attr; char attr_r_[PADR_(void *)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_sched_getattr_args {
+ char pid_l_[PADL_(l_pid_t)]; l_pid_t pid; char pid_r_[PADR_(l_pid_t)];
+ char attr_l_[PADL_(void *)]; void * attr; char attr_r_[PADR_(void *)];
+ char size_l_[PADL_(l_uint)]; l_uint size; char size_r_[PADR_(l_uint)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_renameat2_args {
+ char olddfd_l_[PADL_(l_int)]; l_int olddfd; char olddfd_r_[PADR_(l_int)];
+ char oldname_l_[PADL_(const char *)]; const char * oldname; char oldname_r_[PADR_(const char *)];
+ char newdfd_l_[PADL_(l_int)]; l_int newdfd; char newdfd_r_[PADR_(l_int)];
+ char newname_l_[PADL_(const char *)]; const char * newname; char newname_r_[PADR_(const char *)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_seccomp_args {
+ char op_l_[PADL_(l_uint)]; l_uint op; char op_r_[PADR_(l_uint)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+ char uargs_l_[PADL_(const char *)]; const char * uargs; char uargs_r_[PADR_(const char *)];
+};
+struct linux_getrandom_args {
+ char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)];
+ char count_l_[PADL_(l_size_t)]; l_size_t count; char count_r_[PADR_(l_size_t)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_memfd_create_args {
+ char uname_ptr_l_[PADL_(const char *)]; const char * uname_ptr; char uname_ptr_r_[PADR_(const char *)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_bpf_args {
+ char cmd_l_[PADL_(l_int)]; l_int cmd; char cmd_r_[PADR_(l_int)];
+ char attr_l_[PADL_(void *)]; void * attr; char attr_r_[PADR_(void *)];
+ char size_l_[PADL_(l_uint)]; l_uint size; char size_r_[PADR_(l_uint)];
+};
+struct linux_execveat_args {
+ char dfd_l_[PADL_(l_int)]; l_int dfd; char dfd_r_[PADR_(l_int)];
+ char filename_l_[PADL_(const char *)]; const char * filename; char filename_r_[PADR_(const char *)];
+ char argv_l_[PADL_(const char **)]; const char ** argv; char argv_r_[PADR_(const char **)];
+ char envp_l_[PADL_(const char **)]; const char ** envp; char envp_r_[PADR_(const char **)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_userfaultfd_args {
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_membarrier_args {
+ char cmd_l_[PADL_(l_int)]; l_int cmd; char cmd_r_[PADR_(l_int)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_mlock2_args {
+ char start_l_[PADL_(l_ulong)]; l_ulong start; char start_r_[PADR_(l_ulong)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_copy_file_range_args {
+ char fd_in_l_[PADL_(l_int)]; l_int fd_in; char fd_in_r_[PADR_(l_int)];
+ char off_in_l_[PADL_(l_loff_t *)]; l_loff_t * off_in; char off_in_r_[PADR_(l_loff_t *)];
+ char fd_out_l_[PADL_(l_int)]; l_int fd_out; char fd_out_r_[PADR_(l_int)];
+ char off_out_l_[PADL_(l_loff_t *)]; l_loff_t * off_out; char off_out_r_[PADR_(l_loff_t *)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char flags_l_[PADL_(l_uint)]; l_uint flags; char flags_r_[PADR_(l_uint)];
+};
+struct linux_preadv2_args {
+ char fd_l_[PADL_(l_ulong)]; l_ulong fd; char fd_r_[PADR_(l_ulong)];
+ char vec_l_[PADL_(const struct iovec *)]; const struct iovec * vec; char vec_r_[PADR_(const struct iovec *)];
+ char vlen_l_[PADL_(l_ulong)]; l_ulong vlen; char vlen_r_[PADR_(l_ulong)];
+ char pos_l_l_[PADL_(l_ulong)]; l_ulong pos_l; char pos_l_r_[PADR_(l_ulong)];
+ char pos_h_l_[PADL_(l_ulong)]; l_ulong pos_h; char pos_h_r_[PADR_(l_ulong)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_pwritev2_args {
+ char fd_l_[PADL_(l_ulong)]; l_ulong fd; char fd_r_[PADR_(l_ulong)];
+ char vec_l_[PADL_(const struct iovec *)]; const struct iovec * vec; char vec_r_[PADR_(const struct iovec *)];
+ char vlen_l_[PADL_(l_ulong)]; l_ulong vlen; char vlen_r_[PADR_(l_ulong)];
+ char pos_l_l_[PADL_(l_ulong)]; l_ulong pos_l; char pos_l_r_[PADR_(l_ulong)];
+ char pos_h_l_[PADL_(l_ulong)]; l_ulong pos_h; char pos_h_r_[PADR_(l_ulong)];
+ char flags_l_[PADL_(l_int)]; l_int flags; char flags_r_[PADR_(l_int)];
+};
+struct linux_pkey_mprotect_args {
+ char start_l_[PADL_(l_ulong)]; l_ulong start; char start_r_[PADR_(l_ulong)];
+ char len_l_[PADL_(l_size_t)]; l_size_t len; char len_r_[PADR_(l_size_t)];
+ char prot_l_[PADL_(l_ulong)]; l_ulong prot; char prot_r_[PADR_(l_ulong)];
+ char pkey_l_[PADL_(l_int)]; l_int pkey; char pkey_r_[PADR_(l_int)];
+};
+struct linux_pkey_alloc_args {
+ char flags_l_[PADL_(l_ulong)]; l_ulong flags; char flags_r_[PADR_(l_ulong)];
+ char init_val_l_[PADL_(l_ulong)]; l_ulong init_val; char init_val_r_[PADR_(l_ulong)];
+};
+struct linux_pkey_free_args {
+ char pkey_l_[PADL_(l_int)]; l_int pkey; char pkey_r_[PADR_(l_int)];
+};
+#define nosys linux_nosys
+int linux_setxattr(struct thread *, struct linux_setxattr_args *);
+int linux_lsetxattr(struct thread *, struct linux_lsetxattr_args *);
+int linux_fsetxattr(struct thread *, struct linux_fsetxattr_args *);
+int linux_getxattr(struct thread *, struct linux_getxattr_args *);
+int linux_lgetxattr(struct thread *, struct linux_lgetxattr_args *);
+int linux_fgetxattr(struct thread *, struct linux_fgetxattr_args *);
+int linux_listxattr(struct thread *, struct linux_listxattr_args *);
+int linux_llistxattr(struct thread *, struct linux_llistxattr_args *);
+int linux_flistxattr(struct thread *, struct linux_flistxattr_args *);
+int linux_removexattr(struct thread *, struct linux_removexattr_args *);
+int linux_lremovexattr(struct thread *, struct linux_lremovexattr_args *);
+int linux_fremovexattr(struct thread *, struct linux_fremovexattr_args *);
+int linux_getcwd(struct thread *, struct linux_getcwd_args *);
+int linux_lookup_dcookie(struct thread *, struct linux_lookup_dcookie_args *);
+int linux_eventfd2(struct thread *, struct linux_eventfd2_args *);
+int linux_epoll_create1(struct thread *, struct linux_epoll_create1_args *);
+int linux_epoll_ctl(struct thread *, struct linux_epoll_ctl_args *);
+int linux_epoll_pwait(struct thread *, struct linux_epoll_pwait_args *);
+int linux_dup3(struct thread *, struct linux_dup3_args *);
+int linux_fcntl(struct thread *, struct linux_fcntl_args *);
+int linux_inotify_init1(struct thread *, struct linux_inotify_init1_args *);
+int linux_inotify_add_watch(struct thread *, struct linux_inotify_add_watch_args *);
+int linux_inotify_rm_watch(struct thread *, struct linux_inotify_rm_watch_args *);
+int linux_ioctl(struct thread *, struct linux_ioctl_args *);
+int linux_ioprio_set(struct thread *, struct linux_ioprio_set_args *);
+int linux_ioprio_get(struct thread *, struct linux_ioprio_get_args *);
+int linux_mknodat(struct thread *, struct linux_mknodat_args *);
+int linux_mkdirat(struct thread *, struct linux_mkdirat_args *);
+int linux_unlinkat(struct thread *, struct linux_unlinkat_args *);
+int linux_symlinkat(struct thread *, struct linux_symlinkat_args *);
+int linux_linkat(struct thread *, struct linux_linkat_args *);
+int linux_renameat(struct thread *, struct linux_renameat_args *);
+int linux_mount(struct thread *, struct linux_mount_args *);
+int linux_pivot_root(struct thread *, struct linux_pivot_root_args *);
+int linux_statfs(struct thread *, struct linux_statfs_args *);
+int linux_fstatfs(struct thread *, struct linux_fstatfs_args *);
+int linux_truncate(struct thread *, struct linux_truncate_args *);
+int linux_ftruncate(struct thread *, struct linux_ftruncate_args *);
+int linux_fallocate(struct thread *, struct linux_fallocate_args *);
+int linux_faccessat(struct thread *, struct linux_faccessat_args *);
+int linux_chdir(struct thread *, struct linux_chdir_args *);
+int linux_fchmodat(struct thread *, struct linux_fchmodat_args *);
+int linux_fchownat(struct thread *, struct linux_fchownat_args *);
+int linux_openat(struct thread *, struct linux_openat_args *);
+int linux_vhangup(struct thread *, struct linux_vhangup_args *);
+int linux_pipe2(struct thread *, struct linux_pipe2_args *);
+int linux_getdents64(struct thread *, struct linux_getdents64_args *);
+int linux_lseek(struct thread *, struct linux_lseek_args *);
+int linux_pread(struct thread *, struct linux_pread_args *);
+int linux_pwrite(struct thread *, struct linux_pwrite_args *);
+int linux_preadv(struct thread *, struct linux_preadv_args *);
+int linux_pwritev(struct thread *, struct linux_pwritev_args *);
+int linux_sendfile(struct thread *, struct linux_sendfile_args *);
+int linux_pselect6(struct thread *, struct linux_pselect6_args *);
+int linux_ppoll(struct thread *, struct linux_ppoll_args *);
+int linux_signalfd4(struct thread *, struct linux_signalfd4_args *);
+int linux_vmsplice(struct thread *, struct linux_vmsplice_args *);
+int linux_splice(struct thread *, struct linux_splice_args *);
+int linux_tee(struct thread *, struct linux_tee_args *);
+int linux_readlinkat(struct thread *, struct linux_readlinkat_args *);
+int linux_newfstatat(struct thread *, struct linux_newfstatat_args *);
+int linux_newfstat(struct thread *, struct linux_newfstat_args *);
+int linux_fdatasync(struct thread *, struct linux_fdatasync_args *);
+int linux_sync_file_range(struct thread *, struct linux_sync_file_range_args *);
+int linux_timerfd_create(struct thread *, struct linux_timerfd_create_args *);
+int linux_timerfd_settime(struct thread *, struct linux_timerfd_settime_args *);
+int linux_timerfd_gettime(struct thread *, struct linux_timerfd_gettime_args *);
+int linux_utimensat(struct thread *, struct linux_utimensat_args *);
+int linux_capget(struct thread *, struct linux_capget_args *);
+int linux_capset(struct thread *, struct linux_capset_args *);
+int linux_personality(struct thread *, struct linux_personality_args *);
+int linux_exit(struct thread *, struct linux_exit_args *);
+int linux_exit_group(struct thread *, struct linux_exit_group_args *);
+int linux_waitid(struct thread *, struct linux_waitid_args *);
+int linux_set_tid_address(struct thread *, struct linux_set_tid_address_args *);
+int linux_unshare(struct thread *, struct linux_unshare_args *);
+int linux_sys_futex(struct thread *, struct linux_sys_futex_args *);
+int linux_set_robust_list(struct thread *, struct linux_set_robust_list_args *);
+int linux_get_robust_list(struct thread *, struct linux_get_robust_list_args *);
+int linux_nanosleep(struct thread *, struct linux_nanosleep_args *);
+int linux_getitimer(struct thread *, struct linux_getitimer_args *);
+int linux_setitimer(struct thread *, struct linux_setitimer_args *);
+int linux_kexec_load(struct thread *, struct linux_kexec_load_args *);
+int linux_init_module(struct thread *, struct linux_init_module_args *);
+int linux_delete_module(struct thread *, struct linux_delete_module_args *);
+int linux_timer_create(struct thread *, struct linux_timer_create_args *);
+int linux_timer_gettime(struct thread *, struct linux_timer_gettime_args *);
+int linux_timer_getoverrun(struct thread *, struct linux_timer_getoverrun_args *);
+int linux_timer_settime(struct thread *, struct linux_timer_settime_args *);
+int linux_timer_delete(struct thread *, struct linux_timer_delete_args *);
+int linux_clock_settime(struct thread *, struct linux_clock_settime_args *);
+int linux_clock_gettime(struct thread *, struct linux_clock_gettime_args *);
+int linux_clock_getres(struct thread *, struct linux_clock_getres_args *);
+int linux_clock_nanosleep(struct thread *, struct linux_clock_nanosleep_args *);
+int linux_syslog(struct thread *, struct linux_syslog_args *);
+int linux_ptrace(struct thread *, struct linux_ptrace_args *);
+int linux_sched_setparam(struct thread *, struct linux_sched_setparam_args *);
+int linux_sched_setscheduler(struct thread *, struct linux_sched_setscheduler_args *);
+int linux_sched_getscheduler(struct thread *, struct linux_sched_getscheduler_args *);
+int linux_sched_getparam(struct thread *, struct linux_sched_getparam_args *);
+int linux_sched_setaffinity(struct thread *, struct linux_sched_setaffinity_args *);
+int linux_sched_getaffinity(struct thread *, struct linux_sched_getaffinity_args *);
+int linux_sched_get_priority_max(struct thread *, struct linux_sched_get_priority_max_args *);
+int linux_sched_get_priority_min(struct thread *, struct linux_sched_get_priority_min_args *);
+int linux_sched_rr_get_interval(struct thread *, struct linux_sched_rr_get_interval_args *);
+int linux_kill(struct thread *, struct linux_kill_args *);
+int linux_tkill(struct thread *, struct linux_tkill_args *);
+int linux_tgkill(struct thread *, struct linux_tgkill_args *);
+int linux_sigaltstack(struct thread *, struct linux_sigaltstack_args *);
+int linux_rt_sigsuspend(struct thread *, struct linux_rt_sigsuspend_args *);
+int linux_rt_sigaction(struct thread *, struct linux_rt_sigaction_args *);
+int linux_rt_sigprocmask(struct thread *, struct linux_rt_sigprocmask_args *);
+int linux_rt_sigpending(struct thread *, struct linux_rt_sigpending_args *);
+int linux_rt_sigtimedwait(struct thread *, struct linux_rt_sigtimedwait_args *);
+int linux_rt_sigqueueinfo(struct thread *, struct linux_rt_sigqueueinfo_args *);
+int linux_rt_sigreturn(struct thread *, struct linux_rt_sigreturn_args *);
+int linux_getpriority(struct thread *, struct linux_getpriority_args *);
+int linux_reboot(struct thread *, struct linux_reboot_args *);
+int linux_setfsuid(struct thread *, struct linux_setfsuid_args *);
+int linux_setfsgid(struct thread *, struct linux_setfsgid_args *);
+int linux_times(struct thread *, struct linux_times_args *);
+int linux_getsid(struct thread *, struct linux_getsid_args *);
+int linux_getgroups(struct thread *, struct linux_getgroups_args *);
+int linux_setgroups(struct thread *, struct linux_setgroups_args *);
+int linux_newuname(struct thread *, struct linux_newuname_args *);
+int linux_sethostname(struct thread *, struct linux_sethostname_args *);
+int linux_setdomainname(struct thread *, struct linux_setdomainname_args *);
+int linux_getrlimit(struct thread *, struct linux_getrlimit_args *);
+int linux_setrlimit(struct thread *, struct linux_setrlimit_args *);
+int linux_prctl(struct thread *, struct linux_prctl_args *);
+int linux_getcpu(struct thread *, struct linux_getcpu_args *);
+int linux_adjtimex(struct thread *, struct linux_adjtimex_args *);
+int linux_getpid(struct thread *, struct linux_getpid_args *);
+int linux_getppid(struct thread *, struct linux_getppid_args *);
+int linux_getuid(struct thread *, struct linux_getuid_args *);
+int linux_getgid(struct thread *, struct linux_getgid_args *);
+int linux_gettid(struct thread *, struct linux_gettid_args *);
+int linux_sysinfo(struct thread *, struct linux_sysinfo_args *);
+int linux_mq_open(struct thread *, struct linux_mq_open_args *);
+int linux_mq_unlink(struct thread *, struct linux_mq_unlink_args *);
+int linux_mq_timedsend(struct thread *, struct linux_mq_timedsend_args *);
+int linux_mq_timedreceive(struct thread *, struct linux_mq_timedreceive_args *);
+int linux_mq_notify(struct thread *, struct linux_mq_notify_args *);
+int linux_mq_getsetattr(struct thread *, struct linux_mq_getsetattr_args *);
+int linux_msgget(struct thread *, struct linux_msgget_args *);
+int linux_msgctl(struct thread *, struct linux_msgctl_args *);
+int linux_msgrcv(struct thread *, struct linux_msgrcv_args *);
+int linux_msgsnd(struct thread *, struct linux_msgsnd_args *);
+int linux_semget(struct thread *, struct linux_semget_args *);
+int linux_semctl(struct thread *, struct linux_semctl_args *);
+int linux_semtimedop(struct thread *, struct linux_semtimedop_args *);
+int linux_semop(struct thread *, struct linux_semop_args *);
+int linux_shmget(struct thread *, struct linux_shmget_args *);
+int linux_shmctl(struct thread *, struct linux_shmctl_args *);
+int linux_shmat(struct thread *, struct linux_shmat_args *);
+int linux_shmdt(struct thread *, struct linux_shmdt_args *);
+int linux_socket(struct thread *, struct linux_socket_args *);
+int linux_socketpair(struct thread *, struct linux_socketpair_args *);
+int linux_bind(struct thread *, struct linux_bind_args *);
+int linux_listen(struct thread *, struct linux_listen_args *);
+int linux_accept(struct thread *, struct linux_accept_args *);
+int linux_connect(struct thread *, struct linux_connect_args *);
+int linux_getsockname(struct thread *, struct linux_getsockname_args *);
+int linux_getpeername(struct thread *, struct linux_getpeername_args *);
+int linux_sendto(struct thread *, struct linux_sendto_args *);
+int linux_recvfrom(struct thread *, struct linux_recvfrom_args *);
+int linux_setsockopt(struct thread *, struct linux_setsockopt_args *);
+int linux_getsockopt(struct thread *, struct linux_getsockopt_args *);
+int linux_shutdown(struct thread *, struct linux_shutdown_args *);
+int linux_sendmsg(struct thread *, struct linux_sendmsg_args *);
+int linux_recvmsg(struct thread *, struct linux_recvmsg_args *);
+int linux_brk(struct thread *, struct linux_brk_args *);
+int linux_mremap(struct thread *, struct linux_mremap_args *);
+int linux_add_key(struct thread *, struct linux_add_key_args *);
+int linux_request_key(struct thread *, struct linux_request_key_args *);
+int linux_keyctl(struct thread *, struct linux_keyctl_args *);
+int linux_clone(struct thread *, struct linux_clone_args *);
+int linux_execve(struct thread *, struct linux_execve_args *);
+int linux_mmap2(struct thread *, struct linux_mmap2_args *);
+int linux_fadvise64(struct thread *, struct linux_fadvise64_args *);
+int linux_swapoff(struct thread *, struct linux_swapoff_args *);
+int linux_mprotect(struct thread *, struct linux_mprotect_args *);
+int linux_msync(struct thread *, struct linux_msync_args *);
+int linux_mincore(struct thread *, struct linux_mincore_args *);
+int linux_madvise(struct thread *, struct linux_madvise_args *);
+int linux_remap_file_pages(struct thread *, struct linux_remap_file_pages_args *);
+int linux_mbind(struct thread *, struct linux_mbind_args *);
+int linux_get_mempolicy(struct thread *, struct linux_get_mempolicy_args *);
+int linux_set_mempolicy(struct thread *, struct linux_set_mempolicy_args *);
+int linux_migrate_pages(struct thread *, struct linux_migrate_pages_args *);
+int linux_move_pages(struct thread *, struct linux_move_pages_args *);
+int linux_rt_tgsigqueueinfo(struct thread *, struct linux_rt_tgsigqueueinfo_args *);
+int linux_perf_event_open(struct thread *, struct linux_perf_event_open_args *);
+int linux_accept4(struct thread *, struct linux_accept4_args *);
+int linux_recvmmsg(struct thread *, struct linux_recvmmsg_args *);
+int linux_wait4(struct thread *, struct linux_wait4_args *);
+int linux_prlimit64(struct thread *, struct linux_prlimit64_args *);
+int linux_fanotify_init(struct thread *, struct linux_fanotify_init_args *);
+int linux_fanotify_mark(struct thread *, struct linux_fanotify_mark_args *);
+int linux_name_to_handle_at(struct thread *, struct linux_name_to_handle_at_args *);
+int linux_open_by_handle_at(struct thread *, struct linux_open_by_handle_at_args *);
+int linux_clock_adjtime(struct thread *, struct linux_clock_adjtime_args *);
+int linux_syncfs(struct thread *, struct linux_syncfs_args *);
+int linux_setns(struct thread *, struct linux_setns_args *);
+int linux_sendmmsg(struct thread *, struct linux_sendmmsg_args *);
+int linux_process_vm_readv(struct thread *, struct linux_process_vm_readv_args *);
+int linux_process_vm_writev(struct thread *, struct linux_process_vm_writev_args *);
+int linux_kcmp(struct thread *, struct linux_kcmp_args *);
+int linux_finit_module(struct thread *, struct linux_finit_module_args *);
+int linux_sched_setattr(struct thread *, struct linux_sched_setattr_args *);
+int linux_sched_getattr(struct thread *, struct linux_sched_getattr_args *);
+int linux_renameat2(struct thread *, struct linux_renameat2_args *);
+int linux_seccomp(struct thread *, struct linux_seccomp_args *);
+int linux_getrandom(struct thread *, struct linux_getrandom_args *);
+int linux_memfd_create(struct thread *, struct linux_memfd_create_args *);
+int linux_bpf(struct thread *, struct linux_bpf_args *);
+int linux_execveat(struct thread *, struct linux_execveat_args *);
+int linux_userfaultfd(struct thread *, struct linux_userfaultfd_args *);
+int linux_membarrier(struct thread *, struct linux_membarrier_args *);
+int linux_mlock2(struct thread *, struct linux_mlock2_args *);
+int linux_copy_file_range(struct thread *, struct linux_copy_file_range_args *);
+int linux_preadv2(struct thread *, struct linux_preadv2_args *);
+int linux_pwritev2(struct thread *, struct linux_pwritev2_args *);
+int linux_pkey_mprotect(struct thread *, struct linux_pkey_mprotect_args *);
+int linux_pkey_alloc(struct thread *, struct linux_pkey_alloc_args *);
+int linux_pkey_free(struct thread *, struct linux_pkey_free_args *);
+
+#ifdef COMPAT_43
+
+#define nosys linux_nosys
+
+#endif /* COMPAT_43 */
+
+#ifdef COMPAT_FREEBSD4
+
+#define nosys linux_nosys
+
+#endif /* COMPAT_FREEBSD4 */
+
+#ifdef COMPAT_FREEBSD6
+
+#define nosys linux_nosys
+
+#endif /* COMPAT_FREEBSD6 */
+
+#ifdef COMPAT_FREEBSD7
+
+#define nosys linux_nosys
+
+#endif /* COMPAT_FREEBSD7 */
+
+#ifdef COMPAT_FREEBSD10
+
+#define nosys linux_nosys
+
+#endif /* COMPAT_FREEBSD10 */
+
+#ifdef COMPAT_FREEBSD11
+
+#define nosys linux_nosys
+
+#endif /* COMPAT_FREEBSD11 */
+
+#ifdef COMPAT_FREEBSD12
+
+#define nosys linux_nosys
+
+#endif /* COMPAT_FREEBSD12 */
+
+#define LINUX_SYS_AUE_linux_setxattr AUE_NULL
+#define LINUX_SYS_AUE_linux_lsetxattr AUE_NULL
+#define LINUX_SYS_AUE_linux_fsetxattr AUE_NULL
+#define LINUX_SYS_AUE_linux_getxattr AUE_NULL
+#define LINUX_SYS_AUE_linux_lgetxattr AUE_NULL
+#define LINUX_SYS_AUE_linux_fgetxattr AUE_NULL
+#define LINUX_SYS_AUE_linux_listxattr AUE_NULL
+#define LINUX_SYS_AUE_linux_llistxattr AUE_NULL
+#define LINUX_SYS_AUE_linux_flistxattr AUE_NULL
+#define LINUX_SYS_AUE_linux_removexattr AUE_NULL
+#define LINUX_SYS_AUE_linux_lremovexattr AUE_NULL
+#define LINUX_SYS_AUE_linux_fremovexattr AUE_NULL
+#define LINUX_SYS_AUE_linux_getcwd AUE_GETCWD
+#define LINUX_SYS_AUE_linux_lookup_dcookie AUE_NULL
+#define LINUX_SYS_AUE_linux_eventfd2 AUE_NULL
+#define LINUX_SYS_AUE_linux_epoll_create1 AUE_NULL
+#define LINUX_SYS_AUE_linux_epoll_ctl AUE_NULL
+#define LINUX_SYS_AUE_linux_epoll_pwait AUE_NULL
+#define LINUX_SYS_AUE_linux_dup3 AUE_NULL
+#define LINUX_SYS_AUE_linux_fcntl AUE_FCNTL
+#define LINUX_SYS_AUE_linux_inotify_init1 AUE_NULL
+#define LINUX_SYS_AUE_linux_inotify_add_watch AUE_NULL
+#define LINUX_SYS_AUE_linux_inotify_rm_watch AUE_NULL
+#define LINUX_SYS_AUE_linux_ioctl AUE_IOCTL
+#define LINUX_SYS_AUE_linux_ioprio_set AUE_NULL
+#define LINUX_SYS_AUE_linux_ioprio_get AUE_NULL
+#define LINUX_SYS_AUE_linux_mknodat AUE_MKNODAT
+#define LINUX_SYS_AUE_linux_mkdirat AUE_MKDIRAT
+#define LINUX_SYS_AUE_linux_unlinkat AUE_UNLINKAT
+#define LINUX_SYS_AUE_linux_symlinkat AUE_SYMLINKAT
+#define LINUX_SYS_AUE_linux_linkat AUE_LINKAT
+#define LINUX_SYS_AUE_linux_renameat AUE_RENAMEAT
+#define LINUX_SYS_AUE_linux_mount AUE_MOUNT
+#define LINUX_SYS_AUE_linux_pivot_root AUE_PIVOT_ROOT
+#define LINUX_SYS_AUE_linux_statfs AUE_STATFS
+#define LINUX_SYS_AUE_linux_fstatfs AUE_FSTATFS
+#define LINUX_SYS_AUE_linux_truncate AUE_TRUNCATE
+#define LINUX_SYS_AUE_linux_ftruncate AUE_FTRUNCATE
+#define LINUX_SYS_AUE_linux_fallocate AUE_NULL
+#define LINUX_SYS_AUE_linux_faccessat AUE_FACCESSAT
+#define LINUX_SYS_AUE_linux_chdir AUE_CHDIR
+#define LINUX_SYS_AUE_linux_fchmodat AUE_FCHMODAT
+#define LINUX_SYS_AUE_linux_fchownat AUE_FCHOWNAT
+#define LINUX_SYS_AUE_linux_openat AUE_OPEN_RWTC
+#define LINUX_SYS_AUE_linux_vhangup AUE_NULL
+#define LINUX_SYS_AUE_linux_pipe2 AUE_NULL
+#define LINUX_SYS_AUE_linux_getdents64 AUE_GETDIRENTRIES
+#define LINUX_SYS_AUE_linux_lseek AUE_LSEEK
+#define LINUX_SYS_AUE_linux_pread AUE_PREAD
+#define LINUX_SYS_AUE_linux_pwrite AUE_PWRITE
+#define LINUX_SYS_AUE_linux_preadv AUE_NULL
+#define LINUX_SYS_AUE_linux_pwritev AUE_NULL
+#define LINUX_SYS_AUE_linux_sendfile AUE_SENDFILE
+#define LINUX_SYS_AUE_linux_pselect6 AUE_SELECT
+#define LINUX_SYS_AUE_linux_ppoll AUE_POLL
+#define LINUX_SYS_AUE_linux_signalfd4 AUE_NULL
+#define LINUX_SYS_AUE_linux_vmsplice AUE_NULL
+#define LINUX_SYS_AUE_linux_splice AUE_NULL
+#define LINUX_SYS_AUE_linux_tee AUE_NULL
+#define LINUX_SYS_AUE_linux_readlinkat AUE_READLINKAT
+#define LINUX_SYS_AUE_linux_newfstatat AUE_FSTATAT
+#define LINUX_SYS_AUE_linux_newfstat AUE_FSTAT
+#define LINUX_SYS_AUE_linux_fdatasync AUE_NULL
+#define LINUX_SYS_AUE_linux_sync_file_range AUE_NULL
+#define LINUX_SYS_AUE_linux_timerfd_create AUE_NULL
+#define LINUX_SYS_AUE_linux_timerfd_settime AUE_NULL
+#define LINUX_SYS_AUE_linux_timerfd_gettime AUE_NULL
+#define LINUX_SYS_AUE_linux_utimensat AUE_FUTIMESAT
+#define LINUX_SYS_AUE_linux_capget AUE_CAPGET
+#define LINUX_SYS_AUE_linux_capset AUE_CAPSET
+#define LINUX_SYS_AUE_linux_personality AUE_PERSONALITY
+#define LINUX_SYS_AUE_linux_exit AUE_EXIT
+#define LINUX_SYS_AUE_linux_exit_group AUE_EXIT
+#define LINUX_SYS_AUE_linux_waitid AUE_WAIT6
+#define LINUX_SYS_AUE_linux_set_tid_address AUE_NULL
+#define LINUX_SYS_AUE_linux_unshare AUE_NULL
+#define LINUX_SYS_AUE_linux_sys_futex AUE_NULL
+#define LINUX_SYS_AUE_linux_set_robust_list AUE_NULL
+#define LINUX_SYS_AUE_linux_get_robust_list AUE_NULL
+#define LINUX_SYS_AUE_linux_nanosleep AUE_NULL
+#define LINUX_SYS_AUE_linux_getitimer AUE_GETITIMER
+#define LINUX_SYS_AUE_linux_setitimer AUE_SETITIMER
+#define LINUX_SYS_AUE_linux_kexec_load AUE_NULL
+#define LINUX_SYS_AUE_linux_init_module AUE_NULL
+#define LINUX_SYS_AUE_linux_delete_module AUE_NULL
+#define LINUX_SYS_AUE_linux_timer_create AUE_NULL
+#define LINUX_SYS_AUE_linux_timer_gettime AUE_NULL
+#define LINUX_SYS_AUE_linux_timer_getoverrun AUE_NULL
+#define LINUX_SYS_AUE_linux_timer_settime AUE_NULL
+#define LINUX_SYS_AUE_linux_timer_delete AUE_NULL
+#define LINUX_SYS_AUE_linux_clock_settime AUE_CLOCK_SETTIME
+#define LINUX_SYS_AUE_linux_clock_gettime AUE_NULL
+#define LINUX_SYS_AUE_linux_clock_getres AUE_NULL
+#define LINUX_SYS_AUE_linux_clock_nanosleep AUE_NULL
+#define LINUX_SYS_AUE_linux_syslog AUE_NULL
+#define LINUX_SYS_AUE_linux_ptrace AUE_PTRACE
+#define LINUX_SYS_AUE_linux_sched_setparam AUE_SCHED_SETPARAM
+#define LINUX_SYS_AUE_linux_sched_setscheduler AUE_SCHED_SETSCHEDULER
+#define LINUX_SYS_AUE_linux_sched_getscheduler AUE_SCHED_GETSCHEDULER
+#define LINUX_SYS_AUE_linux_sched_getparam AUE_SCHED_GETPARAM
+#define LINUX_SYS_AUE_linux_sched_setaffinity AUE_NULL
+#define LINUX_SYS_AUE_linux_sched_getaffinity AUE_NULL
+#define LINUX_SYS_AUE_linux_sched_get_priority_max AUE_SCHED_GET_PRIORITY_MAX
+#define LINUX_SYS_AUE_linux_sched_get_priority_min AUE_SCHED_GET_PRIORITY_MIN
+#define LINUX_SYS_AUE_linux_sched_rr_get_interval AUE_SCHED_RR_GET_INTERVAL
+#define LINUX_SYS_AUE_linux_kill AUE_KILL
+#define LINUX_SYS_AUE_linux_tkill AUE_NULL
+#define LINUX_SYS_AUE_linux_tgkill AUE_NULL
+#define LINUX_SYS_AUE_linux_sigaltstack AUE_NULL
+#define LINUX_SYS_AUE_linux_rt_sigsuspend AUE_NULL
+#define LINUX_SYS_AUE_linux_rt_sigaction AUE_NULL
+#define LINUX_SYS_AUE_linux_rt_sigprocmask AUE_NULL
+#define LINUX_SYS_AUE_linux_rt_sigpending AUE_NULL
+#define LINUX_SYS_AUE_linux_rt_sigtimedwait AUE_NULL
+#define LINUX_SYS_AUE_linux_rt_sigqueueinfo AUE_NULL
+#define LINUX_SYS_AUE_linux_rt_sigreturn AUE_NULL
+#define LINUX_SYS_AUE_linux_getpriority AUE_GETPRIORITY
+#define LINUX_SYS_AUE_linux_reboot AUE_REBOOT
+#define LINUX_SYS_AUE_linux_setfsuid AUE_SETFSUID
+#define LINUX_SYS_AUE_linux_setfsgid AUE_SETFSGID
+#define LINUX_SYS_AUE_linux_times AUE_NULL
+#define LINUX_SYS_AUE_linux_getsid AUE_GETSID
+#define LINUX_SYS_AUE_linux_getgroups AUE_GETGROUPS
+#define LINUX_SYS_AUE_linux_setgroups AUE_SETGROUPS
+#define LINUX_SYS_AUE_linux_newuname AUE_NULL
+#define LINUX_SYS_AUE_linux_sethostname AUE_SYSCTL
+#define LINUX_SYS_AUE_linux_setdomainname AUE_SYSCTL
+#define LINUX_SYS_AUE_linux_getrlimit AUE_GETRLIMIT
+#define LINUX_SYS_AUE_linux_setrlimit AUE_SETRLIMIT
+#define LINUX_SYS_AUE_linux_prctl AUE_PRCTL
+#define LINUX_SYS_AUE_linux_getcpu AUE_NULL
+#define LINUX_SYS_AUE_linux_adjtimex AUE_ADJTIME
+#define LINUX_SYS_AUE_linux_getpid AUE_GETPID
+#define LINUX_SYS_AUE_linux_getppid AUE_GETPPID
+#define LINUX_SYS_AUE_linux_getuid AUE_GETUID
+#define LINUX_SYS_AUE_linux_getgid AUE_GETGID
+#define LINUX_SYS_AUE_linux_gettid AUE_NULL
+#define LINUX_SYS_AUE_linux_sysinfo AUE_NULL
+#define LINUX_SYS_AUE_linux_mq_open AUE_NULL
+#define LINUX_SYS_AUE_linux_mq_unlink AUE_NULL
+#define LINUX_SYS_AUE_linux_mq_timedsend AUE_NULL
+#define LINUX_SYS_AUE_linux_mq_timedreceive AUE_NULL
+#define LINUX_SYS_AUE_linux_mq_notify AUE_NULL
+#define LINUX_SYS_AUE_linux_mq_getsetattr AUE_NULL
+#define LINUX_SYS_AUE_linux_msgget AUE_NULL
+#define LINUX_SYS_AUE_linux_msgctl AUE_NULL
+#define LINUX_SYS_AUE_linux_msgrcv AUE_NULL
+#define LINUX_SYS_AUE_linux_msgsnd AUE_NULL
+#define LINUX_SYS_AUE_linux_semget AUE_NULL
+#define LINUX_SYS_AUE_linux_semctl AUE_NULL
+#define LINUX_SYS_AUE_linux_semtimedop AUE_NULL
+#define LINUX_SYS_AUE_linux_semop AUE_NULL
+#define LINUX_SYS_AUE_linux_shmget AUE_NULL
+#define LINUX_SYS_AUE_linux_shmctl AUE_NULL
+#define LINUX_SYS_AUE_linux_shmat AUE_NULL
+#define LINUX_SYS_AUE_linux_shmdt AUE_NULL
+#define LINUX_SYS_AUE_linux_socket AUE_SOCKET
+#define LINUX_SYS_AUE_linux_socketpair AUE_SOCKETPAIR
+#define LINUX_SYS_AUE_linux_bind AUE_BIND
+#define LINUX_SYS_AUE_linux_listen AUE_LISTEN
+#define LINUX_SYS_AUE_linux_accept AUE_ACCEPT
+#define LINUX_SYS_AUE_linux_connect AUE_CONNECT
+#define LINUX_SYS_AUE_linux_getsockname AUE_GETSOCKNAME
+#define LINUX_SYS_AUE_linux_getpeername AUE_GETPEERNAME
+#define LINUX_SYS_AUE_linux_sendto AUE_SENDTO
+#define LINUX_SYS_AUE_linux_recvfrom AUE_RECVFROM
+#define LINUX_SYS_AUE_linux_setsockopt AUE_SETSOCKOPT
+#define LINUX_SYS_AUE_linux_getsockopt AUE_GETSOCKOPT
+#define LINUX_SYS_AUE_linux_shutdown AUE_NULL
+#define LINUX_SYS_AUE_linux_sendmsg AUE_SENDMSG
+#define LINUX_SYS_AUE_linux_recvmsg AUE_RECVMSG
+#define LINUX_SYS_AUE_linux_brk AUE_NULL
+#define LINUX_SYS_AUE_linux_mremap AUE_NULL
+#define LINUX_SYS_AUE_linux_add_key AUE_NULL
+#define LINUX_SYS_AUE_linux_request_key AUE_NULL
+#define LINUX_SYS_AUE_linux_keyctl AUE_NULL
+#define LINUX_SYS_AUE_linux_clone AUE_RFORK
+#define LINUX_SYS_AUE_linux_execve AUE_EXECVE
+#define LINUX_SYS_AUE_linux_mmap2 AUE_MMAP
+#define LINUX_SYS_AUE_linux_fadvise64 AUE_NULL
+#define LINUX_SYS_AUE_linux_swapoff AUE_SWAPOFF
+#define LINUX_SYS_AUE_linux_mprotect AUE_MPROTECT
+#define LINUX_SYS_AUE_linux_msync AUE_MSYNC
+#define LINUX_SYS_AUE_linux_mincore AUE_MINCORE
+#define LINUX_SYS_AUE_linux_madvise AUE_MADVISE
+#define LINUX_SYS_AUE_linux_remap_file_pages AUE_NULL
+#define LINUX_SYS_AUE_linux_mbind AUE_NULL
+#define LINUX_SYS_AUE_linux_get_mempolicy AUE_NULL
+#define LINUX_SYS_AUE_linux_set_mempolicy AUE_NULL
+#define LINUX_SYS_AUE_linux_migrate_pages AUE_NULL
+#define LINUX_SYS_AUE_linux_move_pages AUE_NULL
+#define LINUX_SYS_AUE_linux_rt_tgsigqueueinfo AUE_NULL
+#define LINUX_SYS_AUE_linux_perf_event_open AUE_NULL
+#define LINUX_SYS_AUE_linux_accept4 AUE_ACCEPT
+#define LINUX_SYS_AUE_linux_recvmmsg AUE_NULL
+#define LINUX_SYS_AUE_linux_wait4 AUE_WAIT4
+#define LINUX_SYS_AUE_linux_prlimit64 AUE_NULL
+#define LINUX_SYS_AUE_linux_fanotify_init AUE_NULL
+#define LINUX_SYS_AUE_linux_fanotify_mark AUE_NULL
+#define LINUX_SYS_AUE_linux_name_to_handle_at AUE_NULL
+#define LINUX_SYS_AUE_linux_open_by_handle_at AUE_NULL
+#define LINUX_SYS_AUE_linux_clock_adjtime AUE_NULL
+#define LINUX_SYS_AUE_linux_syncfs AUE_SYNC
+#define LINUX_SYS_AUE_linux_setns AUE_NULL
+#define LINUX_SYS_AUE_linux_sendmmsg AUE_NULL
+#define LINUX_SYS_AUE_linux_process_vm_readv AUE_NULL
+#define LINUX_SYS_AUE_linux_process_vm_writev AUE_NULL
+#define LINUX_SYS_AUE_linux_kcmp AUE_NULL
+#define LINUX_SYS_AUE_linux_finit_module AUE_NULL
+#define LINUX_SYS_AUE_linux_sched_setattr AUE_NULL
+#define LINUX_SYS_AUE_linux_sched_getattr AUE_NULL
+#define LINUX_SYS_AUE_linux_renameat2 AUE_NULL
+#define LINUX_SYS_AUE_linux_seccomp AUE_NULL
+#define LINUX_SYS_AUE_linux_getrandom AUE_NULL
+#define LINUX_SYS_AUE_linux_memfd_create AUE_NULL
+#define LINUX_SYS_AUE_linux_bpf AUE_NULL
+#define LINUX_SYS_AUE_linux_execveat AUE_NULL
+#define LINUX_SYS_AUE_linux_userfaultfd AUE_NULL
+#define LINUX_SYS_AUE_linux_membarrier AUE_NULL
+#define LINUX_SYS_AUE_linux_mlock2 AUE_NULL
+#define LINUX_SYS_AUE_linux_copy_file_range AUE_NULL
+#define LINUX_SYS_AUE_linux_preadv2 AUE_NULL
+#define LINUX_SYS_AUE_linux_pwritev2 AUE_NULL
+#define LINUX_SYS_AUE_linux_pkey_mprotect AUE_NULL
+#define LINUX_SYS_AUE_linux_pkey_alloc AUE_NULL
+#define LINUX_SYS_AUE_linux_pkey_free AUE_NULL
+
+#undef PAD_
+#undef PADL_
+#undef PADR_
+
+#endif /* !_LINUX_SYSPROTO_H_ */
diff --git a/sys/arm64/linux/linux_ptrace.c b/sys/arm64/linux/linux_ptrace.c
new file mode 100644
index 000000000000..a7d53470a12d
--- /dev/null
+++ b/sys/arm64/linux/linux_ptrace.c
@@ -0,0 +1,56 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2018 Turing Robotic Industries Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/sdt.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+
+#include <arm64/linux/linux.h>
+#include <arm64/linux/linux_proto.h>
+#include <compat/linux/linux_dtrace.h>
+
+/* DTrace init */
+LIN_SDT_PROVIDER_DECLARE(LINUX_DTRACE);
+
+/* DTrace probes */
+LIN_SDT_PROBE_DEFINE0(ptrace, linux_ptrace, todo);
+
+int
+linux_ptrace(struct thread *td, struct linux_ptrace_args *uap)
+{
+
+ /* LINUXTODO: implement arm64 linux_ptrace */
+ LIN_SDT_PROBE0(ptrace, linux_ptrace, todo);
+ return (EDOOFUS);
+}
diff --git a/sys/arm64/linux/linux_support.s b/sys/arm64/linux/linux_support.s
new file mode 100644
index 000000000000..c7a87f616812
--- /dev/null
+++ b/sys/arm64/linux/linux_support.s
@@ -0,0 +1,57 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2018 Turing Robotic Industries Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "linux_assym.h"
+#include <machine/asm.h>
+
+#include "assym.inc"
+
+/*
+ * LINUXTODO: implement futex_*
+ */
+
+ENTRY(futex_xchgl)
+ brk #0
+ ret
+
+ENTRY(futex_addl)
+ brk #0
+ ret
+
+ENTRY(futex_orl)
+ brk #0
+ ret
+
+ENTRY(futex_andl)
+ brk #0
+ ret
+
+ENTRY(futex_xorl)
+ brk #0
+ ret
diff --git a/sys/arm64/linux/linux_syscall.h b/sys/arm64/linux/linux_syscall.h
new file mode 100644
index 000000000000..975229304af6
--- /dev/null
+++ b/sys/arm64/linux/linux_syscall.h
@@ -0,0 +1,272 @@
+/*
+ * System call numbers.
+ *
+ * DO NOT EDIT-- this file is automatically @generated.
+ * $FreeBSD$
+ */
+
+#define LINUX_SYS_linux_setxattr 5
+#define LINUX_SYS_linux_lsetxattr 6
+#define LINUX_SYS_linux_fsetxattr 7
+#define LINUX_SYS_linux_getxattr 8
+#define LINUX_SYS_linux_lgetxattr 9
+#define LINUX_SYS_linux_fgetxattr 10
+#define LINUX_SYS_linux_listxattr 11
+#define LINUX_SYS_linux_llistxattr 12
+#define LINUX_SYS_linux_flistxattr 13
+#define LINUX_SYS_linux_removexattr 14
+#define LINUX_SYS_linux_lremovexattr 15
+#define LINUX_SYS_linux_fremovexattr 16
+#define LINUX_SYS_linux_getcwd 17
+#define LINUX_SYS_linux_lookup_dcookie 18
+#define LINUX_SYS_linux_eventfd2 19
+#define LINUX_SYS_linux_epoll_create1 20
+#define LINUX_SYS_linux_epoll_ctl 21
+#define LINUX_SYS_linux_epoll_pwait 22
+#define LINUX_SYS_dup 23
+#define LINUX_SYS_linux_dup3 24
+#define LINUX_SYS_linux_fcntl 25
+#define LINUX_SYS_linux_inotify_init1 26
+#define LINUX_SYS_linux_inotify_add_watch 27
+#define LINUX_SYS_linux_inotify_rm_watch 28
+#define LINUX_SYS_linux_ioctl 29
+#define LINUX_SYS_linux_ioprio_set 30
+#define LINUX_SYS_linux_ioprio_get 31
+#define LINUX_SYS_flock 32
+#define LINUX_SYS_linux_mknodat 33
+#define LINUX_SYS_linux_mkdirat 34
+#define LINUX_SYS_linux_unlinkat 35
+#define LINUX_SYS_linux_symlinkat 36
+#define LINUX_SYS_linux_linkat 37
+#define LINUX_SYS_linux_renameat 38
+#define LINUX_SYS_linux_mount 40
+#define LINUX_SYS_linux_pivot_root 41
+#define LINUX_SYS_linux_statfs 43
+#define LINUX_SYS_linux_fstatfs 44
+#define LINUX_SYS_linux_truncate 45
+#define LINUX_SYS_linux_ftruncate 46
+#define LINUX_SYS_linux_fallocate 47
+#define LINUX_SYS_linux_faccessat 48
+#define LINUX_SYS_linux_chdir 49
+#define LINUX_SYS_fchdir 50
+#define LINUX_SYS_chroot 51
+#define LINUX_SYS_fchmod 52
+#define LINUX_SYS_linux_fchmodat 53
+#define LINUX_SYS_linux_fchownat 54
+#define LINUX_SYS_fchown 55
+#define LINUX_SYS_linux_openat 56
+#define LINUX_SYS_close 57
+#define LINUX_SYS_linux_vhangup 58
+#define LINUX_SYS_linux_pipe2 59
+#define LINUX_SYS_linux_getdents64 61
+#define LINUX_SYS_linux_lseek 62
+#define LINUX_SYS_read 63
+#define LINUX_SYS_write 64
+#define LINUX_SYS_readv 65
+#define LINUX_SYS_writev 66
+#define LINUX_SYS_linux_pread 67
+#define LINUX_SYS_linux_pwrite 68
+#define LINUX_SYS_linux_preadv 69
+#define LINUX_SYS_linux_pwritev 70
+#define LINUX_SYS_linux_sendfile 71
+#define LINUX_SYS_linux_pselect6 72
+#define LINUX_SYS_linux_ppoll 73
+#define LINUX_SYS_linux_signalfd4 74
+#define LINUX_SYS_linux_vmsplice 75
+#define LINUX_SYS_linux_splice 76
+#define LINUX_SYS_linux_tee 77
+#define LINUX_SYS_linux_readlinkat 78
+#define LINUX_SYS_linux_newfstatat 79
+#define LINUX_SYS_linux_newfstat 80
+#define LINUX_SYS_fsync 82
+#define LINUX_SYS_linux_fdatasync 83
+#define LINUX_SYS_linux_sync_file_range 84
+#define LINUX_SYS_linux_timerfd_create 85
+#define LINUX_SYS_linux_timerfd_settime 86
+#define LINUX_SYS_linux_timerfd_gettime 87
+#define LINUX_SYS_linux_utimensat 88
+#define LINUX_SYS_acct 89
+#define LINUX_SYS_linux_capget 90
+#define LINUX_SYS_linux_capset 91
+#define LINUX_SYS_linux_personality 92
+#define LINUX_SYS_linux_exit 93
+#define LINUX_SYS_linux_exit_group 94
+#define LINUX_SYS_linux_waitid 95
+#define LINUX_SYS_linux_set_tid_address 96
+#define LINUX_SYS_linux_unshare 97
+#define LINUX_SYS_linux_sys_futex 98
+#define LINUX_SYS_linux_set_robust_list 99
+#define LINUX_SYS_linux_get_robust_list 100
+#define LINUX_SYS_linux_nanosleep 101
+#define LINUX_SYS_linux_getitimer 102
+#define LINUX_SYS_linux_setitimer 103
+#define LINUX_SYS_linux_kexec_load 104
+#define LINUX_SYS_linux_init_module 105
+#define LINUX_SYS_linux_delete_module 106
+#define LINUX_SYS_linux_timer_create 107
+#define LINUX_SYS_linux_timer_gettime 108
+#define LINUX_SYS_linux_timer_getoverrun 109
+#define LINUX_SYS_linux_timer_settime 110
+#define LINUX_SYS_linux_timer_delete 111
+#define LINUX_SYS_linux_clock_settime 112
+#define LINUX_SYS_linux_clock_gettime 113
+#define LINUX_SYS_linux_clock_getres 114
+#define LINUX_SYS_linux_clock_nanosleep 115
+#define LINUX_SYS_linux_syslog 116
+#define LINUX_SYS_linux_ptrace 117
+#define LINUX_SYS_linux_sched_setparam 118
+#define LINUX_SYS_linux_sched_setscheduler 119
+#define LINUX_SYS_linux_sched_getscheduler 120
+#define LINUX_SYS_linux_sched_getparam 121
+#define LINUX_SYS_linux_sched_setaffinity 122
+#define LINUX_SYS_linux_sched_getaffinity 123
+#define LINUX_SYS_sched_yield 124
+#define LINUX_SYS_linux_sched_get_priority_max 125
+#define LINUX_SYS_linux_sched_get_priority_min 126
+#define LINUX_SYS_linux_sched_rr_get_interval 127
+#define LINUX_SYS_linux_kill 129
+#define LINUX_SYS_linux_tkill 130
+#define LINUX_SYS_linux_tgkill 131
+#define LINUX_SYS_linux_sigaltstack 132
+#define LINUX_SYS_linux_rt_sigsuspend 133
+#define LINUX_SYS_linux_rt_sigaction 134
+#define LINUX_SYS_linux_rt_sigprocmask 135
+#define LINUX_SYS_linux_rt_sigpending 136
+#define LINUX_SYS_linux_rt_sigtimedwait 137
+#define LINUX_SYS_linux_rt_sigqueueinfo 138
+#define LINUX_SYS_linux_rt_sigreturn 139
+#define LINUX_SYS_setpriority 140
+#define LINUX_SYS_linux_getpriority 141
+#define LINUX_SYS_linux_reboot 142
+#define LINUX_SYS_setregid 143
+#define LINUX_SYS_setgid 144
+#define LINUX_SYS_setreuid 145
+#define LINUX_SYS_setuid 146
+#define LINUX_SYS_setresuid 147
+#define LINUX_SYS_getresuid 148
+#define LINUX_SYS_setresgid 149
+#define LINUX_SYS_getresgid 150
+#define LINUX_SYS_linux_setfsuid 151
+#define LINUX_SYS_linux_setfsgid 152
+#define LINUX_SYS_linux_times 153
+#define LINUX_SYS_setpgid 154
+#define LINUX_SYS_getpgid 155
+#define LINUX_SYS_linux_getsid 156
+#define LINUX_SYS_setsid 157
+#define LINUX_SYS_linux_getgroups 158
+#define LINUX_SYS_linux_setgroups 159
+#define LINUX_SYS_linux_newuname 160
+#define LINUX_SYS_linux_sethostname 161
+#define LINUX_SYS_linux_setdomainname 162
+#define LINUX_SYS_linux_getrlimit 163
+#define LINUX_SYS_linux_setrlimit 164
+#define LINUX_SYS_getrusage 165
+#define LINUX_SYS_umask 166
+#define LINUX_SYS_linux_prctl 167
+#define LINUX_SYS_linux_getcpu 168
+#define LINUX_SYS_gettimeofday 169
+#define LINUX_SYS_settimeofday 170
+#define LINUX_SYS_linux_adjtimex 171
+#define LINUX_SYS_linux_getpid 172
+#define LINUX_SYS_linux_getppid 173
+#define LINUX_SYS_linux_getuid 174
+#define LINUX_SYS_geteuid 175
+#define LINUX_SYS_linux_getgid 176
+#define LINUX_SYS_getegid 177
+#define LINUX_SYS_linux_gettid 178
+#define LINUX_SYS_linux_sysinfo 179
+#define LINUX_SYS_linux_mq_open 180
+#define LINUX_SYS_linux_mq_unlink 181
+#define LINUX_SYS_linux_mq_timedsend 182
+#define LINUX_SYS_linux_mq_timedreceive 183
+#define LINUX_SYS_linux_mq_notify 184
+#define LINUX_SYS_linux_mq_getsetattr 185
+#define LINUX_SYS_linux_msgget 186
+#define LINUX_SYS_linux_msgctl 187
+#define LINUX_SYS_linux_msgrcv 188
+#define LINUX_SYS_linux_msgsnd 189
+#define LINUX_SYS_linux_semget 190
+#define LINUX_SYS_linux_semctl 191
+#define LINUX_SYS_linux_semtimedop 192
+#define LINUX_SYS_linux_semop 193
+#define LINUX_SYS_linux_shmget 194
+#define LINUX_SYS_linux_shmctl 195
+#define LINUX_SYS_linux_shmat 196
+#define LINUX_SYS_linux_shmdt 197
+#define LINUX_SYS_linux_socket 198
+#define LINUX_SYS_linux_socketpair 199
+#define LINUX_SYS_linux_bind 200
+#define LINUX_SYS_linux_listen 201
+#define LINUX_SYS_linux_accept 202
+#define LINUX_SYS_linux_connect 203
+#define LINUX_SYS_linux_getsockname 204
+#define LINUX_SYS_linux_getpeername 205
+#define LINUX_SYS_linux_sendto 206
+#define LINUX_SYS_linux_recvfrom 207
+#define LINUX_SYS_linux_setsockopt 208
+#define LINUX_SYS_linux_getsockopt 209
+#define LINUX_SYS_linux_shutdown 210
+#define LINUX_SYS_linux_sendmsg 211
+#define LINUX_SYS_linux_recvmsg 212
+#define LINUX_SYS_linux_brk 214
+#define LINUX_SYS_munmap 215
+#define LINUX_SYS_linux_mremap 216
+#define LINUX_SYS_linux_add_key 217
+#define LINUX_SYS_linux_request_key 218
+#define LINUX_SYS_linux_keyctl 219
+#define LINUX_SYS_linux_clone 220
+#define LINUX_SYS_linux_execve 221
+#define LINUX_SYS_linux_mmap2 222
+#define LINUX_SYS_linux_fadvise64 223
+#define LINUX_SYS_swapon 224
+#define LINUX_SYS_linux_swapoff 225
+#define LINUX_SYS_linux_mprotect 226
+#define LINUX_SYS_linux_msync 227
+#define LINUX_SYS_mlock 228
+#define LINUX_SYS_munlock 229
+#define LINUX_SYS_mlockall 230
+#define LINUX_SYS_munlockall 231
+#define LINUX_SYS_linux_mincore 232
+#define LINUX_SYS_linux_madvise 233
+#define LINUX_SYS_linux_remap_file_pages 234
+#define LINUX_SYS_linux_mbind 235
+#define LINUX_SYS_linux_get_mempolicy 236
+#define LINUX_SYS_linux_set_mempolicy 237
+#define LINUX_SYS_linux_migrate_pages 238
+#define LINUX_SYS_linux_move_pages 239
+#define LINUX_SYS_linux_rt_tgsigqueueinfo 240
+#define LINUX_SYS_linux_perf_event_open 241
+#define LINUX_SYS_linux_accept4 242
+#define LINUX_SYS_linux_recvmmsg 243
+#define LINUX_SYS_linux_wait4 260
+#define LINUX_SYS_linux_prlimit64 261
+#define LINUX_SYS_linux_fanotify_init 262
+#define LINUX_SYS_linux_fanotify_mark 263
+#define LINUX_SYS_linux_name_to_handle_at 264
+#define LINUX_SYS_linux_open_by_handle_at 265
+#define LINUX_SYS_linux_clock_adjtime 266
+#define LINUX_SYS_linux_syncfs 267
+#define LINUX_SYS_linux_setns 268
+#define LINUX_SYS_linux_sendmmsg 269
+#define LINUX_SYS_linux_process_vm_readv 270
+#define LINUX_SYS_linux_process_vm_writev 271
+#define LINUX_SYS_linux_kcmp 272
+#define LINUX_SYS_linux_finit_module 273
+#define LINUX_SYS_linux_sched_setattr 274
+#define LINUX_SYS_linux_sched_getattr 275
+#define LINUX_SYS_linux_renameat2 276
+#define LINUX_SYS_linux_seccomp 277
+#define LINUX_SYS_linux_getrandom 278
+#define LINUX_SYS_linux_memfd_create 279
+#define LINUX_SYS_linux_bpf 280
+#define LINUX_SYS_linux_execveat 281
+#define LINUX_SYS_linux_userfaultfd 282
+#define LINUX_SYS_linux_membarrier 283
+#define LINUX_SYS_linux_mlock2 284
+#define LINUX_SYS_linux_copy_file_range 285
+#define LINUX_SYS_linux_preadv2 286
+#define LINUX_SYS_linux_pwritev2 287
+#define LINUX_SYS_linux_pkey_mprotect 288
+#define LINUX_SYS_linux_pkey_alloc 289
+#define LINUX_SYS_linux_pkey_free 290
+#define LINUX_SYS_MAXSYSCALL 292
diff --git a/sys/arm64/linux/linux_syscalls.c b/sys/arm64/linux/linux_syscalls.c
new file mode 100644
index 000000000000..9ce335174db8
--- /dev/null
+++ b/sys/arm64/linux/linux_syscalls.c
@@ -0,0 +1,302 @@
+/*
+ * System call names.
+ *
+ * DO NOT EDIT-- this file is automatically @generated.
+ * $FreeBSD$
+ */
+
+const char *linux_syscallnames[] = {
+#define nosys linux_nosys
+ "#0", /* 0 = linux_io_setup */
+ "#1", /* 1 = linux_io_destroy */
+ "#2", /* 2 = linux_io_submit */
+ "#3", /* 3 = linux_io_cancel */
+ "#4", /* 4 = linux_io_getevents */
+ "linux_setxattr", /* 5 = linux_setxattr */
+ "linux_lsetxattr", /* 6 = linux_lsetxattr */
+ "linux_fsetxattr", /* 7 = linux_fsetxattr */
+ "linux_getxattr", /* 8 = linux_getxattr */
+ "linux_lgetxattr", /* 9 = linux_lgetxattr */
+ "linux_fgetxattr", /* 10 = linux_fgetxattr */
+ "linux_listxattr", /* 11 = linux_listxattr */
+ "linux_llistxattr", /* 12 = linux_llistxattr */
+ "linux_flistxattr", /* 13 = linux_flistxattr */
+ "linux_removexattr", /* 14 = linux_removexattr */
+ "linux_lremovexattr", /* 15 = linux_lremovexattr */
+ "linux_fremovexattr", /* 16 = linux_fremovexattr */
+ "linux_getcwd", /* 17 = linux_getcwd */
+ "linux_lookup_dcookie", /* 18 = linux_lookup_dcookie */
+ "linux_eventfd2", /* 19 = linux_eventfd2 */
+ "linux_epoll_create1", /* 20 = linux_epoll_create1 */
+ "linux_epoll_ctl", /* 21 = linux_epoll_ctl */
+ "linux_epoll_pwait", /* 22 = linux_epoll_pwait */
+ "dup", /* 23 = dup */
+ "linux_dup3", /* 24 = linux_dup3 */
+ "linux_fcntl", /* 25 = linux_fcntl */
+ "linux_inotify_init1", /* 26 = linux_inotify_init1 */
+ "linux_inotify_add_watch", /* 27 = linux_inotify_add_watch */
+ "linux_inotify_rm_watch", /* 28 = linux_inotify_rm_watch */
+ "linux_ioctl", /* 29 = linux_ioctl */
+ "linux_ioprio_set", /* 30 = linux_ioprio_set */
+ "linux_ioprio_get", /* 31 = linux_ioprio_get */
+ "flock", /* 32 = flock */
+ "linux_mknodat", /* 33 = linux_mknodat */
+ "linux_mkdirat", /* 34 = linux_mkdirat */
+ "linux_unlinkat", /* 35 = linux_unlinkat */
+ "linux_symlinkat", /* 36 = linux_symlinkat */
+ "linux_linkat", /* 37 = linux_linkat */
+ "linux_renameat", /* 38 = linux_renameat */
+ "#39", /* 39 = linux_umount2 */
+ "linux_mount", /* 40 = linux_mount */
+ "linux_pivot_root", /* 41 = linux_pivot_root */
+ "#42", /* 42 = nfsservctl */
+ "linux_statfs", /* 43 = linux_statfs */
+ "linux_fstatfs", /* 44 = linux_fstatfs */
+ "linux_truncate", /* 45 = linux_truncate */
+ "linux_ftruncate", /* 46 = linux_ftruncate */
+ "linux_fallocate", /* 47 = linux_fallocate */
+ "linux_faccessat", /* 48 = linux_faccessat */
+ "linux_chdir", /* 49 = linux_chdir */
+ "fchdir", /* 50 = fchdir */
+ "chroot", /* 51 = chroot */
+ "fchmod", /* 52 = fchmod */
+ "linux_fchmodat", /* 53 = linux_fchmodat */
+ "linux_fchownat", /* 54 = linux_fchownat */
+ "fchown", /* 55 = fchown */
+ "linux_openat", /* 56 = linux_openat */
+ "close", /* 57 = close */
+ "linux_vhangup", /* 58 = linux_vhangup */
+ "linux_pipe2", /* 59 = linux_pipe2 */
+ "#60", /* 60 = linux_quotactl */
+ "linux_getdents64", /* 61 = linux_getdents64 */
+ "linux_lseek", /* 62 = linux_lseek */
+ "read", /* 63 = read */
+ "write", /* 64 = write */
+ "readv", /* 65 = readv */
+ "writev", /* 66 = writev */
+ "linux_pread", /* 67 = linux_pread */
+ "linux_pwrite", /* 68 = linux_pwrite */
+ "linux_preadv", /* 69 = linux_preadv */
+ "linux_pwritev", /* 70 = linux_pwritev */
+ "linux_sendfile", /* 71 = linux_sendfile */
+ "linux_pselect6", /* 72 = linux_pselect6 */
+ "linux_ppoll", /* 73 = linux_ppoll */
+ "linux_signalfd4", /* 74 = linux_signalfd4 */
+ "linux_vmsplice", /* 75 = linux_vmsplice */
+ "linux_splice", /* 76 = linux_splice */
+ "linux_tee", /* 77 = linux_tee */
+ "linux_readlinkat", /* 78 = linux_readlinkat */
+ "linux_newfstatat", /* 79 = linux_newfstatat */
+ "linux_newfstat", /* 80 = linux_newfstat */
+ "#81", /* 81 = linux_sync */
+ "fsync", /* 82 = fsync */
+ "linux_fdatasync", /* 83 = linux_fdatasync */
+ "linux_sync_file_range", /* 84 = linux_sync_file_range */
+ "linux_timerfd_create", /* 85 = linux_timerfd_create */
+ "linux_timerfd_settime", /* 86 = linux_timerfd_settime */
+ "linux_timerfd_gettime", /* 87 = linux_timerfd_gettime */
+ "linux_utimensat", /* 88 = linux_utimensat */
+ "acct", /* 89 = acct */
+ "linux_capget", /* 90 = linux_capget */
+ "linux_capset", /* 91 = linux_capset */
+ "linux_personality", /* 92 = linux_personality */
+ "linux_exit", /* 93 = linux_exit */
+ "linux_exit_group", /* 94 = linux_exit_group */
+ "linux_waitid", /* 95 = linux_waitid */
+ "linux_set_tid_address", /* 96 = linux_set_tid_address */
+ "linux_unshare", /* 97 = linux_unshare */
+ "linux_sys_futex", /* 98 = linux_sys_futex */
+ "linux_set_robust_list", /* 99 = linux_set_robust_list */
+ "linux_get_robust_list", /* 100 = linux_get_robust_list */
+ "linux_nanosleep", /* 101 = linux_nanosleep */
+ "linux_getitimer", /* 102 = linux_getitimer */
+ "linux_setitimer", /* 103 = linux_setitimer */
+ "linux_kexec_load", /* 104 = linux_kexec_load */
+ "linux_init_module", /* 105 = linux_init_module */
+ "linux_delete_module", /* 106 = linux_delete_module */
+ "linux_timer_create", /* 107 = linux_timer_create */
+ "linux_timer_gettime", /* 108 = linux_timer_gettime */
+ "linux_timer_getoverrun", /* 109 = linux_timer_getoverrun */
+ "linux_timer_settime", /* 110 = linux_timer_settime */
+ "linux_timer_delete", /* 111 = linux_timer_delete */
+ "linux_clock_settime", /* 112 = linux_clock_settime */
+ "linux_clock_gettime", /* 113 = linux_clock_gettime */
+ "linux_clock_getres", /* 114 = linux_clock_getres */
+ "linux_clock_nanosleep", /* 115 = linux_clock_nanosleep */
+ "linux_syslog", /* 116 = linux_syslog */
+ "linux_ptrace", /* 117 = linux_ptrace */
+ "linux_sched_setparam", /* 118 = linux_sched_setparam */
+ "linux_sched_setscheduler", /* 119 = linux_sched_setscheduler */
+ "linux_sched_getscheduler", /* 120 = linux_sched_getscheduler */
+ "linux_sched_getparam", /* 121 = linux_sched_getparam */
+ "linux_sched_setaffinity", /* 122 = linux_sched_setaffinity */
+ "linux_sched_getaffinity", /* 123 = linux_sched_getaffinity */
+ "sched_yield", /* 124 = sched_yield */
+ "linux_sched_get_priority_max", /* 125 = linux_sched_get_priority_max */
+ "linux_sched_get_priority_min", /* 126 = linux_sched_get_priority_min */
+ "linux_sched_rr_get_interval", /* 127 = linux_sched_rr_get_interval */
+ "#128", /* 128 = restart_syscall */
+ "linux_kill", /* 129 = linux_kill */
+ "linux_tkill", /* 130 = linux_tkill */
+ "linux_tgkill", /* 131 = linux_tgkill */
+ "linux_sigaltstack", /* 132 = linux_sigaltstack */
+ "linux_rt_sigsuspend", /* 133 = linux_rt_sigsuspend */
+ "linux_rt_sigaction", /* 134 = linux_rt_sigaction */
+ "linux_rt_sigprocmask", /* 135 = linux_rt_sigprocmask */
+ "linux_rt_sigpending", /* 136 = linux_rt_sigpending */
+ "linux_rt_sigtimedwait", /* 137 = linux_rt_sigtimedwait */
+ "linux_rt_sigqueueinfo", /* 138 = linux_rt_sigqueueinfo */
+ "linux_rt_sigreturn", /* 139 = linux_rt_sigreturn */
+ "setpriority", /* 140 = setpriority */
+ "linux_getpriority", /* 141 = linux_getpriority */
+ "linux_reboot", /* 142 = linux_reboot */
+ "setregid", /* 143 = setregid */
+ "setgid", /* 144 = setgid */
+ "setreuid", /* 145 = setreuid */
+ "setuid", /* 146 = setuid */
+ "setresuid", /* 147 = setresuid */
+ "getresuid", /* 148 = getresuid */
+ "setresgid", /* 149 = setresgid */
+ "getresgid", /* 150 = getresgid */
+ "linux_setfsuid", /* 151 = linux_setfsuid */
+ "linux_setfsgid", /* 152 = linux_setfsgid */
+ "linux_times", /* 153 = linux_times */
+ "setpgid", /* 154 = setpgid */
+ "getpgid", /* 155 = getpgid */
+ "linux_getsid", /* 156 = linux_getsid */
+ "setsid", /* 157 = setsid */
+ "linux_getgroups", /* 158 = linux_getgroups */
+ "linux_setgroups", /* 159 = linux_setgroups */
+ "linux_newuname", /* 160 = linux_newuname */
+ "linux_sethostname", /* 161 = linux_sethostname */
+ "linux_setdomainname", /* 162 = linux_setdomainname */
+ "linux_getrlimit", /* 163 = linux_getrlimit */
+ "linux_setrlimit", /* 164 = linux_setrlimit */
+ "getrusage", /* 165 = getrusage */
+ "umask", /* 166 = umask */
+ "linux_prctl", /* 167 = linux_prctl */
+ "linux_getcpu", /* 168 = linux_getcpu */
+ "gettimeofday", /* 169 = gettimeofday */
+ "settimeofday", /* 170 = settimeofday */
+ "linux_adjtimex", /* 171 = linux_adjtimex */
+ "linux_getpid", /* 172 = linux_getpid */
+ "linux_getppid", /* 173 = linux_getppid */
+ "linux_getuid", /* 174 = linux_getuid */
+ "geteuid", /* 175 = geteuid */
+ "linux_getgid", /* 176 = linux_getgid */
+ "getegid", /* 177 = getegid */
+ "linux_gettid", /* 178 = linux_gettid */
+ "linux_sysinfo", /* 179 = linux_sysinfo */
+ "linux_mq_open", /* 180 = linux_mq_open */
+ "linux_mq_unlink", /* 181 = linux_mq_unlink */
+ "linux_mq_timedsend", /* 182 = linux_mq_timedsend */
+ "linux_mq_timedreceive", /* 183 = linux_mq_timedreceive */
+ "linux_mq_notify", /* 184 = linux_mq_notify */
+ "linux_mq_getsetattr", /* 185 = linux_mq_getsetattr */
+ "linux_msgget", /* 186 = linux_msgget */
+ "linux_msgctl", /* 187 = linux_msgctl */
+ "linux_msgrcv", /* 188 = linux_msgrcv */
+ "linux_msgsnd", /* 189 = linux_msgsnd */
+ "linux_semget", /* 190 = linux_semget */
+ "linux_semctl", /* 191 = linux_semctl */
+ "linux_semtimedop", /* 192 = linux_semtimedop */
+ "linux_semop", /* 193 = linux_semop */
+ "linux_shmget", /* 194 = linux_shmget */
+ "linux_shmctl", /* 195 = linux_shmctl */
+ "linux_shmat", /* 196 = linux_shmat */
+ "linux_shmdt", /* 197 = linux_shmdt */
+ "linux_socket", /* 198 = linux_socket */
+ "linux_socketpair", /* 199 = linux_socketpair */
+ "linux_bind", /* 200 = linux_bind */
+ "linux_listen", /* 201 = linux_listen */
+ "linux_accept", /* 202 = linux_accept */
+ "linux_connect", /* 203 = linux_connect */
+ "linux_getsockname", /* 204 = linux_getsockname */
+ "linux_getpeername", /* 205 = linux_getpeername */
+ "linux_sendto", /* 206 = linux_sendto */
+ "linux_recvfrom", /* 207 = linux_recvfrom */
+ "linux_setsockopt", /* 208 = linux_setsockopt */
+ "linux_getsockopt", /* 209 = linux_getsockopt */
+ "linux_shutdown", /* 210 = linux_shutdown */
+ "linux_sendmsg", /* 211 = linux_sendmsg */
+ "linux_recvmsg", /* 212 = linux_recvmsg */
+ "#213", /* 213 = linux_readahead */
+ "linux_brk", /* 214 = linux_brk */
+ "munmap", /* 215 = munmap */
+ "linux_mremap", /* 216 = linux_mremap */
+ "linux_add_key", /* 217 = linux_add_key */
+ "linux_request_key", /* 218 = linux_request_key */
+ "linux_keyctl", /* 219 = linux_keyctl */
+ "linux_clone", /* 220 = linux_clone */
+ "linux_execve", /* 221 = linux_execve */
+ "linux_mmap2", /* 222 = linux_mmap2 */
+ "linux_fadvise64", /* 223 = linux_fadvise64 */
+ "swapon", /* 224 = swapon */
+ "linux_swapoff", /* 225 = linux_swapoff */
+ "linux_mprotect", /* 226 = linux_mprotect */
+ "linux_msync", /* 227 = linux_msync */
+ "mlock", /* 228 = mlock */
+ "munlock", /* 229 = munlock */
+ "mlockall", /* 230 = mlockall */
+ "munlockall", /* 231 = munlockall */
+ "linux_mincore", /* 232 = linux_mincore */
+ "linux_madvise", /* 233 = linux_madvise */
+ "linux_remap_file_pages", /* 234 = linux_remap_file_pages */
+ "linux_mbind", /* 235 = linux_mbind */
+ "linux_get_mempolicy", /* 236 = linux_get_mempolicy */
+ "linux_set_mempolicy", /* 237 = linux_set_mempolicy */
+ "linux_migrate_pages", /* 238 = linux_migrate_pages */
+ "linux_move_pages", /* 239 = linux_move_pages */
+ "linux_rt_tgsigqueueinfo", /* 240 = linux_rt_tgsigqueueinfo */
+ "linux_perf_event_open", /* 241 = linux_perf_event_open */
+ "linux_accept4", /* 242 = linux_accept4 */
+ "linux_recvmmsg", /* 243 = linux_recvmmsg */
+ "#244", /* 244 = unimpl_md_syscall */
+ "#245", /* 245 = unimpl_md_syscall */
+ "#246", /* 246 = unimpl_md_syscall */
+ "#247", /* 247 = unimpl_md_syscall */
+ "#248", /* 248 = unimpl_md_syscall */
+ "#249", /* 249 = unimpl_md_syscall */
+ "#250", /* 250 = unimpl_md_syscall */
+ "#251", /* 251 = unimpl_md_syscall */
+ "#252", /* 252 = unimpl_md_syscall */
+ "#253", /* 253 = unimpl_md_syscall */
+ "#254", /* 254 = unimpl_md_syscall */
+ "#255", /* 255 = unimpl_md_syscall */
+ "#256", /* 256 = unimpl_md_syscall */
+ "#257", /* 257 = unimpl_md_syscall */
+ "#258", /* 258 = unimpl_md_syscall */
+ "#259", /* 259 = unimpl_md_syscall */
+ "linux_wait4", /* 260 = linux_wait4 */
+ "linux_prlimit64", /* 261 = linux_prlimit64 */
+ "linux_fanotify_init", /* 262 = linux_fanotify_init */
+ "linux_fanotify_mark", /* 263 = linux_fanotify_mark */
+ "linux_name_to_handle_at", /* 264 = linux_name_to_handle_at */
+ "linux_open_by_handle_at", /* 265 = linux_open_by_handle_at */
+ "linux_clock_adjtime", /* 266 = linux_clock_adjtime */
+ "linux_syncfs", /* 267 = linux_syncfs */
+ "linux_setns", /* 268 = linux_setns */
+ "linux_sendmmsg", /* 269 = linux_sendmmsg */
+ "linux_process_vm_readv", /* 270 = linux_process_vm_readv */
+ "linux_process_vm_writev", /* 271 = linux_process_vm_writev */
+ "linux_kcmp", /* 272 = linux_kcmp */
+ "linux_finit_module", /* 273 = linux_finit_module */
+ "linux_sched_setattr", /* 274 = linux_sched_setattr */
+ "linux_sched_getattr", /* 275 = linux_sched_getattr */
+ "linux_renameat2", /* 276 = linux_renameat2 */
+ "linux_seccomp", /* 277 = linux_seccomp */
+ "linux_getrandom", /* 278 = linux_getrandom */
+ "linux_memfd_create", /* 279 = linux_memfd_create */
+ "linux_bpf", /* 280 = linux_bpf */
+ "linux_execveat", /* 281 = linux_execveat */
+ "linux_userfaultfd", /* 282 = linux_userfaultfd */
+ "linux_membarrier", /* 283 = linux_membarrier */
+ "linux_mlock2", /* 284 = linux_mlock2 */
+ "linux_copy_file_range", /* 285 = linux_copy_file_range */
+ "linux_preadv2", /* 286 = linux_preadv2 */
+ "linux_pwritev2", /* 287 = linux_pwritev2 */
+ "linux_pkey_mprotect", /* 288 = linux_pkey_mprotect */
+ "linux_pkey_alloc", /* 289 = linux_pkey_alloc */
+ "linux_pkey_free", /* 290 = linux_pkey_free */
+ "#291", /* 291 = nosys */
+};
diff --git a/sys/arm64/linux/linux_sysent.c b/sys/arm64/linux/linux_sysent.c
new file mode 100644
index 000000000000..1e8d5a195919
--- /dev/null
+++ b/sys/arm64/linux/linux_sysent.c
@@ -0,0 +1,312 @@
+/*
+ * System call switch table.
+ *
+ * DO NOT EDIT-- this file is automatically @generated.
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <compat/linux/linux_sysproto.h>
+#include <arm64/linux/linux.h>
+#include <arm64/linux/linux_proto.h>
+
+#define AS(name) (sizeof(struct name) / sizeof(register_t))
+
+/* The casts are bogus but will do for now. */
+struct sysent linux_sysent[] = {
+#define nosys linux_nosys
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 0 = linux_io_setup */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 1 = linux_io_destroy */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 2 = linux_io_submit */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 3 = linux_io_cancel */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 4 = linux_io_getevents */
+ { AS(linux_setxattr_args), (sy_call_t *)linux_setxattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 5 = linux_setxattr */
+ { AS(linux_lsetxattr_args), (sy_call_t *)linux_lsetxattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 6 = linux_lsetxattr */
+ { AS(linux_fsetxattr_args), (sy_call_t *)linux_fsetxattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 7 = linux_fsetxattr */
+ { AS(linux_getxattr_args), (sy_call_t *)linux_getxattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 8 = linux_getxattr */
+ { AS(linux_lgetxattr_args), (sy_call_t *)linux_lgetxattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 9 = linux_lgetxattr */
+ { AS(linux_fgetxattr_args), (sy_call_t *)linux_fgetxattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 10 = linux_fgetxattr */
+ { AS(linux_listxattr_args), (sy_call_t *)linux_listxattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 11 = linux_listxattr */
+ { AS(linux_llistxattr_args), (sy_call_t *)linux_llistxattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 12 = linux_llistxattr */
+ { AS(linux_flistxattr_args), (sy_call_t *)linux_flistxattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 13 = linux_flistxattr */
+ { AS(linux_removexattr_args), (sy_call_t *)linux_removexattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 14 = linux_removexattr */
+ { AS(linux_lremovexattr_args), (sy_call_t *)linux_lremovexattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 15 = linux_lremovexattr */
+ { AS(linux_fremovexattr_args), (sy_call_t *)linux_fremovexattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 16 = linux_fremovexattr */
+ { AS(linux_getcwd_args), (sy_call_t *)linux_getcwd, AUE_GETCWD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 17 = linux_getcwd */
+ { 0, (sy_call_t *)linux_lookup_dcookie, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 18 = linux_lookup_dcookie */
+ { AS(linux_eventfd2_args), (sy_call_t *)linux_eventfd2, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 19 = linux_eventfd2 */
+ { AS(linux_epoll_create1_args), (sy_call_t *)linux_epoll_create1, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 20 = linux_epoll_create1 */
+ { AS(linux_epoll_ctl_args), (sy_call_t *)linux_epoll_ctl, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 21 = linux_epoll_ctl */
+ { AS(linux_epoll_pwait_args), (sy_call_t *)linux_epoll_pwait, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 22 = linux_epoll_pwait */
+ { AS(dup_args), (sy_call_t *)sys_dup, AUE_DUP, NULL, 0, 0, 0, SY_THR_STATIC }, /* 23 = dup */
+ { AS(linux_dup3_args), (sy_call_t *)linux_dup3, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 24 = linux_dup3 */
+ { AS(linux_fcntl_args), (sy_call_t *)linux_fcntl, AUE_FCNTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 25 = linux_fcntl */
+ { AS(linux_inotify_init1_args), (sy_call_t *)linux_inotify_init1, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 26 = linux_inotify_init1 */
+ { 0, (sy_call_t *)linux_inotify_add_watch, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 27 = linux_inotify_add_watch */
+ { 0, (sy_call_t *)linux_inotify_rm_watch, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 28 = linux_inotify_rm_watch */
+ { AS(linux_ioctl_args), (sy_call_t *)linux_ioctl, AUE_IOCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 29 = linux_ioctl */
+ { 0, (sy_call_t *)linux_ioprio_set, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 30 = linux_ioprio_set */
+ { 0, (sy_call_t *)linux_ioprio_get, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 31 = linux_ioprio_get */
+ { AS(flock_args), (sy_call_t *)sys_flock, AUE_FLOCK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 32 = flock */
+ { AS(linux_mknodat_args), (sy_call_t *)linux_mknodat, AUE_MKNODAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 33 = linux_mknodat */
+ { AS(linux_mkdirat_args), (sy_call_t *)linux_mkdirat, AUE_MKDIRAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 34 = linux_mkdirat */
+ { AS(linux_unlinkat_args), (sy_call_t *)linux_unlinkat, AUE_UNLINKAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 35 = linux_unlinkat */
+ { AS(linux_symlinkat_args), (sy_call_t *)linux_symlinkat, AUE_SYMLINKAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 36 = linux_symlinkat */
+ { AS(linux_linkat_args), (sy_call_t *)linux_linkat, AUE_LINKAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 37 = linux_linkat */
+ { AS(linux_renameat_args), (sy_call_t *)linux_renameat, AUE_RENAMEAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 38 = linux_renameat */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 39 = linux_umount2 */
+ { AS(linux_mount_args), (sy_call_t *)linux_mount, AUE_MOUNT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 40 = linux_mount */
+ { 0, (sy_call_t *)linux_pivot_root, AUE_PIVOT_ROOT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 41 = linux_pivot_root */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 42 = nfsservctl */
+ { AS(linux_statfs_args), (sy_call_t *)linux_statfs, AUE_STATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 43 = linux_statfs */
+ { AS(linux_fstatfs_args), (sy_call_t *)linux_fstatfs, AUE_FSTATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 44 = linux_fstatfs */
+ { AS(linux_truncate_args), (sy_call_t *)linux_truncate, AUE_TRUNCATE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 45 = linux_truncate */
+ { AS(linux_ftruncate_args), (sy_call_t *)linux_ftruncate, AUE_FTRUNCATE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 46 = linux_ftruncate */
+ { AS(linux_fallocate_args), (sy_call_t *)linux_fallocate, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 47 = linux_fallocate */
+ { AS(linux_faccessat_args), (sy_call_t *)linux_faccessat, AUE_FACCESSAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 48 = linux_faccessat */
+ { AS(linux_chdir_args), (sy_call_t *)linux_chdir, AUE_CHDIR, NULL, 0, 0, 0, SY_THR_STATIC }, /* 49 = linux_chdir */
+ { AS(fchdir_args), (sy_call_t *)sys_fchdir, AUE_FCHDIR, NULL, 0, 0, 0, SY_THR_STATIC }, /* 50 = fchdir */
+ { AS(chroot_args), (sy_call_t *)sys_chroot, AUE_CHROOT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 51 = chroot */
+ { AS(fchmod_args), (sy_call_t *)sys_fchmod, AUE_FCHMOD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 52 = fchmod */
+ { AS(linux_fchmodat_args), (sy_call_t *)linux_fchmodat, AUE_FCHMODAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 53 = linux_fchmodat */
+ { AS(linux_fchownat_args), (sy_call_t *)linux_fchownat, AUE_FCHOWNAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 54 = linux_fchownat */
+ { AS(fchown_args), (sy_call_t *)sys_fchown, AUE_FCHOWN, NULL, 0, 0, 0, SY_THR_STATIC }, /* 55 = fchown */
+ { AS(linux_openat_args), (sy_call_t *)linux_openat, AUE_OPEN_RWTC, NULL, 0, 0, 0, SY_THR_STATIC }, /* 56 = linux_openat */
+ { AS(close_args), (sy_call_t *)sys_close, AUE_CLOSE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 57 = close */
+ { 0, (sy_call_t *)linux_vhangup, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 58 = linux_vhangup */
+ { AS(linux_pipe2_args), (sy_call_t *)linux_pipe2, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 59 = linux_pipe2 */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 60 = linux_quotactl */
+ { AS(linux_getdents64_args), (sy_call_t *)linux_getdents64, AUE_GETDIRENTRIES, NULL, 0, 0, 0, SY_THR_STATIC }, /* 61 = linux_getdents64 */
+ { AS(linux_lseek_args), (sy_call_t *)linux_lseek, AUE_LSEEK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 62 = linux_lseek */
+ { AS(read_args), (sy_call_t *)sys_read, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 63 = read */
+ { AS(write_args), (sy_call_t *)sys_write, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 64 = write */
+ { AS(readv_args), (sy_call_t *)sys_readv, AUE_READV, NULL, 0, 0, 0, SY_THR_STATIC }, /* 65 = readv */
+ { AS(writev_args), (sy_call_t *)sys_writev, AUE_WRITEV, NULL, 0, 0, 0, SY_THR_STATIC }, /* 66 = writev */
+ { AS(linux_pread_args), (sy_call_t *)linux_pread, AUE_PREAD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 67 = linux_pread */
+ { AS(linux_pwrite_args), (sy_call_t *)linux_pwrite, AUE_PWRITE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 68 = linux_pwrite */
+ { AS(linux_preadv_args), (sy_call_t *)linux_preadv, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 69 = linux_preadv */
+ { AS(linux_pwritev_args), (sy_call_t *)linux_pwritev, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 70 = linux_pwritev */
+ { AS(linux_sendfile_args), (sy_call_t *)linux_sendfile, AUE_SENDFILE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 71 = linux_sendfile */
+ { AS(linux_pselect6_args), (sy_call_t *)linux_pselect6, AUE_SELECT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 72 = linux_pselect6 */
+ { AS(linux_ppoll_args), (sy_call_t *)linux_ppoll, AUE_POLL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 73 = linux_ppoll */
+ { 0, (sy_call_t *)linux_signalfd4, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 74 = linux_signalfd4 */
+ { 0, (sy_call_t *)linux_vmsplice, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 75 = linux_vmsplice */
+ { AS(linux_splice_args), (sy_call_t *)linux_splice, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 76 = linux_splice */
+ { 0, (sy_call_t *)linux_tee, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 77 = linux_tee */
+ { AS(linux_readlinkat_args), (sy_call_t *)linux_readlinkat, AUE_READLINKAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 78 = linux_readlinkat */
+ { AS(linux_newfstatat_args), (sy_call_t *)linux_newfstatat, AUE_FSTATAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 79 = linux_newfstatat */
+ { AS(linux_newfstat_args), (sy_call_t *)linux_newfstat, AUE_FSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 80 = linux_newfstat */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 81 = linux_sync */
+ { AS(fsync_args), (sy_call_t *)sys_fsync, AUE_FSYNC, NULL, 0, 0, 0, SY_THR_STATIC }, /* 82 = fsync */
+ { AS(linux_fdatasync_args), (sy_call_t *)linux_fdatasync, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 83 = linux_fdatasync */
+ { AS(linux_sync_file_range_args), (sy_call_t *)linux_sync_file_range, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 84 = linux_sync_file_range */
+ { AS(linux_timerfd_create_args), (sy_call_t *)linux_timerfd_create, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 85 = linux_timerfd_create */
+ { AS(linux_timerfd_settime_args), (sy_call_t *)linux_timerfd_settime, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 86 = linux_timerfd_settime */
+ { AS(linux_timerfd_gettime_args), (sy_call_t *)linux_timerfd_gettime, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 87 = linux_timerfd_gettime */
+ { AS(linux_utimensat_args), (sy_call_t *)linux_utimensat, AUE_FUTIMESAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 88 = linux_utimensat */
+ { AS(acct_args), (sy_call_t *)sys_acct, AUE_ACCT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 89 = acct */
+ { AS(linux_capget_args), (sy_call_t *)linux_capget, AUE_CAPGET, NULL, 0, 0, 0, SY_THR_STATIC }, /* 90 = linux_capget */
+ { AS(linux_capset_args), (sy_call_t *)linux_capset, AUE_CAPSET, NULL, 0, 0, 0, SY_THR_STATIC }, /* 91 = linux_capset */
+ { AS(linux_personality_args), (sy_call_t *)linux_personality, AUE_PERSONALITY, NULL, 0, 0, 0, SY_THR_STATIC }, /* 92 = linux_personality */
+ { AS(linux_exit_args), (sy_call_t *)linux_exit, AUE_EXIT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 93 = linux_exit */
+ { AS(linux_exit_group_args), (sy_call_t *)linux_exit_group, AUE_EXIT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 94 = linux_exit_group */
+ { AS(linux_waitid_args), (sy_call_t *)linux_waitid, AUE_WAIT6, NULL, 0, 0, 0, SY_THR_STATIC }, /* 95 = linux_waitid */
+ { AS(linux_set_tid_address_args), (sy_call_t *)linux_set_tid_address, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 96 = linux_set_tid_address */
+ { 0, (sy_call_t *)linux_unshare, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 97 = linux_unshare */
+ { AS(linux_sys_futex_args), (sy_call_t *)linux_sys_futex, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 98 = linux_sys_futex */
+ { AS(linux_set_robust_list_args), (sy_call_t *)linux_set_robust_list, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 99 = linux_set_robust_list */
+ { AS(linux_get_robust_list_args), (sy_call_t *)linux_get_robust_list, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 100 = linux_get_robust_list */
+ { AS(linux_nanosleep_args), (sy_call_t *)linux_nanosleep, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 101 = linux_nanosleep */
+ { AS(linux_getitimer_args), (sy_call_t *)linux_getitimer, AUE_GETITIMER, NULL, 0, 0, 0, SY_THR_STATIC }, /* 102 = linux_getitimer */
+ { AS(linux_setitimer_args), (sy_call_t *)linux_setitimer, AUE_SETITIMER, NULL, 0, 0, 0, SY_THR_STATIC }, /* 103 = linux_setitimer */
+ { 0, (sy_call_t *)linux_kexec_load, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 104 = linux_kexec_load */
+ { 0, (sy_call_t *)linux_init_module, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 105 = linux_init_module */
+ { 0, (sy_call_t *)linux_delete_module, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 106 = linux_delete_module */
+ { AS(linux_timer_create_args), (sy_call_t *)linux_timer_create, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 107 = linux_timer_create */
+ { AS(linux_timer_gettime_args), (sy_call_t *)linux_timer_gettime, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 108 = linux_timer_gettime */
+ { AS(linux_timer_getoverrun_args), (sy_call_t *)linux_timer_getoverrun, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 109 = linux_timer_getoverrun */
+ { AS(linux_timer_settime_args), (sy_call_t *)linux_timer_settime, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 110 = linux_timer_settime */
+ { AS(linux_timer_delete_args), (sy_call_t *)linux_timer_delete, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 111 = linux_timer_delete */
+ { AS(linux_clock_settime_args), (sy_call_t *)linux_clock_settime, AUE_CLOCK_SETTIME, NULL, 0, 0, 0, SY_THR_STATIC }, /* 112 = linux_clock_settime */
+ { AS(linux_clock_gettime_args), (sy_call_t *)linux_clock_gettime, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 113 = linux_clock_gettime */
+ { AS(linux_clock_getres_args), (sy_call_t *)linux_clock_getres, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 114 = linux_clock_getres */
+ { AS(linux_clock_nanosleep_args), (sy_call_t *)linux_clock_nanosleep, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 115 = linux_clock_nanosleep */
+ { AS(linux_syslog_args), (sy_call_t *)linux_syslog, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 116 = linux_syslog */
+ { AS(linux_ptrace_args), (sy_call_t *)linux_ptrace, AUE_PTRACE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 117 = linux_ptrace */
+ { AS(linux_sched_setparam_args), (sy_call_t *)linux_sched_setparam, AUE_SCHED_SETPARAM, NULL, 0, 0, 0, SY_THR_STATIC }, /* 118 = linux_sched_setparam */
+ { AS(linux_sched_setscheduler_args), (sy_call_t *)linux_sched_setscheduler, AUE_SCHED_SETSCHEDULER, NULL, 0, 0, 0, SY_THR_STATIC }, /* 119 = linux_sched_setscheduler */
+ { AS(linux_sched_getscheduler_args), (sy_call_t *)linux_sched_getscheduler, AUE_SCHED_GETSCHEDULER, NULL, 0, 0, 0, SY_THR_STATIC }, /* 120 = linux_sched_getscheduler */
+ { AS(linux_sched_getparam_args), (sy_call_t *)linux_sched_getparam, AUE_SCHED_GETPARAM, NULL, 0, 0, 0, SY_THR_STATIC }, /* 121 = linux_sched_getparam */
+ { AS(linux_sched_setaffinity_args), (sy_call_t *)linux_sched_setaffinity, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 122 = linux_sched_setaffinity */
+ { AS(linux_sched_getaffinity_args), (sy_call_t *)linux_sched_getaffinity, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 123 = linux_sched_getaffinity */
+ { 0, (sy_call_t *)sys_sched_yield, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 124 = sched_yield */
+ { AS(linux_sched_get_priority_max_args), (sy_call_t *)linux_sched_get_priority_max, AUE_SCHED_GET_PRIORITY_MAX, NULL, 0, 0, 0, SY_THR_STATIC }, /* 125 = linux_sched_get_priority_max */
+ { AS(linux_sched_get_priority_min_args), (sy_call_t *)linux_sched_get_priority_min, AUE_SCHED_GET_PRIORITY_MIN, NULL, 0, 0, 0, SY_THR_STATIC }, /* 126 = linux_sched_get_priority_min */
+ { AS(linux_sched_rr_get_interval_args), (sy_call_t *)linux_sched_rr_get_interval, AUE_SCHED_RR_GET_INTERVAL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 127 = linux_sched_rr_get_interval */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 128 = restart_syscall */
+ { AS(linux_kill_args), (sy_call_t *)linux_kill, AUE_KILL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 129 = linux_kill */
+ { AS(linux_tkill_args), (sy_call_t *)linux_tkill, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 130 = linux_tkill */
+ { AS(linux_tgkill_args), (sy_call_t *)linux_tgkill, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 131 = linux_tgkill */
+ { AS(linux_sigaltstack_args), (sy_call_t *)linux_sigaltstack, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 132 = linux_sigaltstack */
+ { AS(linux_rt_sigsuspend_args), (sy_call_t *)linux_rt_sigsuspend, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 133 = linux_rt_sigsuspend */
+ { AS(linux_rt_sigaction_args), (sy_call_t *)linux_rt_sigaction, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 134 = linux_rt_sigaction */
+ { AS(linux_rt_sigprocmask_args), (sy_call_t *)linux_rt_sigprocmask, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 135 = linux_rt_sigprocmask */
+ { AS(linux_rt_sigpending_args), (sy_call_t *)linux_rt_sigpending, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 136 = linux_rt_sigpending */
+ { AS(linux_rt_sigtimedwait_args), (sy_call_t *)linux_rt_sigtimedwait, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 137 = linux_rt_sigtimedwait */
+ { AS(linux_rt_sigqueueinfo_args), (sy_call_t *)linux_rt_sigqueueinfo, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 138 = linux_rt_sigqueueinfo */
+ { AS(linux_rt_sigreturn_args), (sy_call_t *)linux_rt_sigreturn, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 139 = linux_rt_sigreturn */
+ { AS(setpriority_args), (sy_call_t *)sys_setpriority, AUE_SETPRIORITY, NULL, 0, 0, 0, SY_THR_STATIC }, /* 140 = setpriority */
+ { AS(linux_getpriority_args), (sy_call_t *)linux_getpriority, AUE_GETPRIORITY, NULL, 0, 0, 0, SY_THR_STATIC }, /* 141 = linux_getpriority */
+ { AS(linux_reboot_args), (sy_call_t *)linux_reboot, AUE_REBOOT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 142 = linux_reboot */
+ { AS(setregid_args), (sy_call_t *)sys_setregid, AUE_SETREGID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 143 = setregid */
+ { AS(setgid_args), (sy_call_t *)sys_setgid, AUE_SETGID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 144 = setgid */
+ { AS(setreuid_args), (sy_call_t *)sys_setreuid, AUE_SETREUID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 145 = setreuid */
+ { AS(setuid_args), (sy_call_t *)sys_setuid, AUE_SETUID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 146 = setuid */
+ { AS(setresuid_args), (sy_call_t *)sys_setresuid, AUE_SETRESUID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 147 = setresuid */
+ { AS(getresuid_args), (sy_call_t *)sys_getresuid, AUE_GETRESUID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 148 = getresuid */
+ { AS(setresgid_args), (sy_call_t *)sys_setresgid, AUE_SETRESGID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 149 = setresgid */
+ { AS(getresgid_args), (sy_call_t *)sys_getresgid, AUE_GETRESGID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 150 = getresgid */
+ { AS(linux_setfsuid_args), (sy_call_t *)linux_setfsuid, AUE_SETFSUID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 151 = linux_setfsuid */
+ { AS(linux_setfsgid_args), (sy_call_t *)linux_setfsgid, AUE_SETFSGID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 152 = linux_setfsgid */
+ { AS(linux_times_args), (sy_call_t *)linux_times, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 153 = linux_times */
+ { AS(setpgid_args), (sy_call_t *)sys_setpgid, AUE_SETPGRP, NULL, 0, 0, 0, SY_THR_STATIC }, /* 154 = setpgid */
+ { AS(getpgid_args), (sy_call_t *)sys_getpgid, AUE_GETPGID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 155 = getpgid */
+ { AS(linux_getsid_args), (sy_call_t *)linux_getsid, AUE_GETSID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 156 = linux_getsid */
+ { 0, (sy_call_t *)sys_setsid, AUE_SETSID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 157 = setsid */
+ { AS(linux_getgroups_args), (sy_call_t *)linux_getgroups, AUE_GETGROUPS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 158 = linux_getgroups */
+ { AS(linux_setgroups_args), (sy_call_t *)linux_setgroups, AUE_SETGROUPS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 159 = linux_setgroups */
+ { AS(linux_newuname_args), (sy_call_t *)linux_newuname, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 160 = linux_newuname */
+ { AS(linux_sethostname_args), (sy_call_t *)linux_sethostname, AUE_SYSCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 161 = linux_sethostname */
+ { AS(linux_setdomainname_args), (sy_call_t *)linux_setdomainname, AUE_SYSCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 162 = linux_setdomainname */
+ { AS(linux_getrlimit_args), (sy_call_t *)linux_getrlimit, AUE_GETRLIMIT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 163 = linux_getrlimit */
+ { AS(linux_setrlimit_args), (sy_call_t *)linux_setrlimit, AUE_SETRLIMIT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 164 = linux_setrlimit */
+ { AS(getrusage_args), (sy_call_t *)sys_getrusage, AUE_GETRUSAGE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 165 = getrusage */
+ { AS(umask_args), (sy_call_t *)sys_umask, AUE_UMASK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 166 = umask */
+ { AS(linux_prctl_args), (sy_call_t *)linux_prctl, AUE_PRCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 167 = linux_prctl */
+ { AS(linux_getcpu_args), (sy_call_t *)linux_getcpu, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 168 = linux_getcpu */
+ { AS(gettimeofday_args), (sy_call_t *)sys_gettimeofday, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 169 = gettimeofday */
+ { AS(settimeofday_args), (sy_call_t *)sys_settimeofday, AUE_SETTIMEOFDAY, NULL, 0, 0, 0, SY_THR_STATIC }, /* 170 = settimeofday */
+ { 0, (sy_call_t *)linux_adjtimex, AUE_ADJTIME, NULL, 0, 0, 0, SY_THR_STATIC }, /* 171 = linux_adjtimex */
+ { 0, (sy_call_t *)linux_getpid, AUE_GETPID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 172 = linux_getpid */
+ { 0, (sy_call_t *)linux_getppid, AUE_GETPPID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 173 = linux_getppid */
+ { 0, (sy_call_t *)linux_getuid, AUE_GETUID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 174 = linux_getuid */
+ { 0, (sy_call_t *)sys_geteuid, AUE_GETEUID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 175 = geteuid */
+ { 0, (sy_call_t *)linux_getgid, AUE_GETGID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 176 = linux_getgid */
+ { 0, (sy_call_t *)sys_getegid, AUE_GETEGID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 177 = getegid */
+ { 0, (sy_call_t *)linux_gettid, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 178 = linux_gettid */
+ { AS(linux_sysinfo_args), (sy_call_t *)linux_sysinfo, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 179 = linux_sysinfo */
+ { AS(linux_mq_open_args), (sy_call_t *)linux_mq_open, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 180 = linux_mq_open */
+ { AS(linux_mq_unlink_args), (sy_call_t *)linux_mq_unlink, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 181 = linux_mq_unlink */
+ { AS(linux_mq_timedsend_args), (sy_call_t *)linux_mq_timedsend, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 182 = linux_mq_timedsend */
+ { AS(linux_mq_timedreceive_args), (sy_call_t *)linux_mq_timedreceive, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 183 = linux_mq_timedreceive */
+ { AS(linux_mq_notify_args), (sy_call_t *)linux_mq_notify, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 184 = linux_mq_notify */
+ { AS(linux_mq_getsetattr_args), (sy_call_t *)linux_mq_getsetattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 185 = linux_mq_getsetattr */
+ { AS(linux_msgget_args), (sy_call_t *)linux_msgget, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 186 = linux_msgget */
+ { AS(linux_msgctl_args), (sy_call_t *)linux_msgctl, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 187 = linux_msgctl */
+ { AS(linux_msgrcv_args), (sy_call_t *)linux_msgrcv, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 188 = linux_msgrcv */
+ { AS(linux_msgsnd_args), (sy_call_t *)linux_msgsnd, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 189 = linux_msgsnd */
+ { AS(linux_semget_args), (sy_call_t *)linux_semget, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 190 = linux_semget */
+ { AS(linux_semctl_args), (sy_call_t *)linux_semctl, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 191 = linux_semctl */
+ { 0, (sy_call_t *)linux_semtimedop, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 192 = linux_semtimedop */
+ { AS(linux_semop_args), (sy_call_t *)linux_semop, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 193 = linux_semop */
+ { AS(linux_shmget_args), (sy_call_t *)linux_shmget, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 194 = linux_shmget */
+ { AS(linux_shmctl_args), (sy_call_t *)linux_shmctl, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 195 = linux_shmctl */
+ { AS(linux_shmat_args), (sy_call_t *)linux_shmat, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 196 = linux_shmat */
+ { AS(linux_shmdt_args), (sy_call_t *)linux_shmdt, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 197 = linux_shmdt */
+ { AS(linux_socket_args), (sy_call_t *)linux_socket, AUE_SOCKET, NULL, 0, 0, 0, SY_THR_STATIC }, /* 198 = linux_socket */
+ { AS(linux_socketpair_args), (sy_call_t *)linux_socketpair, AUE_SOCKETPAIR, NULL, 0, 0, 0, SY_THR_STATIC }, /* 199 = linux_socketpair */
+ { AS(linux_bind_args), (sy_call_t *)linux_bind, AUE_BIND, NULL, 0, 0, 0, SY_THR_STATIC }, /* 200 = linux_bind */
+ { AS(linux_listen_args), (sy_call_t *)linux_listen, AUE_LISTEN, NULL, 0, 0, 0, SY_THR_STATIC }, /* 201 = linux_listen */
+ { AS(linux_accept_args), (sy_call_t *)linux_accept, AUE_ACCEPT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 202 = linux_accept */
+ { AS(linux_connect_args), (sy_call_t *)linux_connect, AUE_CONNECT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 203 = linux_connect */
+ { AS(linux_getsockname_args), (sy_call_t *)linux_getsockname, AUE_GETSOCKNAME, NULL, 0, 0, 0, SY_THR_STATIC }, /* 204 = linux_getsockname */
+ { AS(linux_getpeername_args), (sy_call_t *)linux_getpeername, AUE_GETPEERNAME, NULL, 0, 0, 0, SY_THR_STATIC }, /* 205 = linux_getpeername */
+ { AS(linux_sendto_args), (sy_call_t *)linux_sendto, AUE_SENDTO, NULL, 0, 0, 0, SY_THR_STATIC }, /* 206 = linux_sendto */
+ { AS(linux_recvfrom_args), (sy_call_t *)linux_recvfrom, AUE_RECVFROM, NULL, 0, 0, 0, SY_THR_STATIC }, /* 207 = linux_recvfrom */
+ { AS(linux_setsockopt_args), (sy_call_t *)linux_setsockopt, AUE_SETSOCKOPT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 208 = linux_setsockopt */
+ { AS(linux_getsockopt_args), (sy_call_t *)linux_getsockopt, AUE_GETSOCKOPT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 209 = linux_getsockopt */
+ { AS(linux_shutdown_args), (sy_call_t *)linux_shutdown, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 210 = linux_shutdown */
+ { AS(linux_sendmsg_args), (sy_call_t *)linux_sendmsg, AUE_SENDMSG, NULL, 0, 0, 0, SY_THR_STATIC }, /* 211 = linux_sendmsg */
+ { AS(linux_recvmsg_args), (sy_call_t *)linux_recvmsg, AUE_RECVMSG, NULL, 0, 0, 0, SY_THR_STATIC }, /* 212 = linux_recvmsg */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 213 = linux_readahead */
+ { AS(linux_brk_args), (sy_call_t *)linux_brk, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 214 = linux_brk */
+ { AS(munmap_args), (sy_call_t *)sys_munmap, AUE_MUNMAP, NULL, 0, 0, 0, SY_THR_STATIC }, /* 215 = munmap */
+ { AS(linux_mremap_args), (sy_call_t *)linux_mremap, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 216 = linux_mremap */
+ { 0, (sy_call_t *)linux_add_key, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 217 = linux_add_key */
+ { 0, (sy_call_t *)linux_request_key, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 218 = linux_request_key */
+ { 0, (sy_call_t *)linux_keyctl, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 219 = linux_keyctl */
+ { AS(linux_clone_args), (sy_call_t *)linux_clone, AUE_RFORK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 220 = linux_clone */
+ { AS(linux_execve_args), (sy_call_t *)linux_execve, AUE_EXECVE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 221 = linux_execve */
+ { AS(linux_mmap2_args), (sy_call_t *)linux_mmap2, AUE_MMAP, NULL, 0, 0, 0, SY_THR_STATIC }, /* 222 = linux_mmap2 */
+ { AS(linux_fadvise64_args), (sy_call_t *)linux_fadvise64, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 223 = linux_fadvise64 */
+ { AS(swapon_args), (sy_call_t *)sys_swapon, AUE_SWAPON, NULL, 0, 0, 0, SY_THR_STATIC }, /* 224 = swapon */
+ { 0, (sy_call_t *)linux_swapoff, AUE_SWAPOFF, NULL, 0, 0, 0, SY_THR_STATIC }, /* 225 = linux_swapoff */
+ { AS(linux_mprotect_args), (sy_call_t *)linux_mprotect, AUE_MPROTECT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 226 = linux_mprotect */
+ { AS(linux_msync_args), (sy_call_t *)linux_msync, AUE_MSYNC, NULL, 0, 0, 0, SY_THR_STATIC }, /* 227 = linux_msync */
+ { AS(mlock_args), (sy_call_t *)sys_mlock, AUE_MLOCK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 228 = mlock */
+ { AS(munlock_args), (sy_call_t *)sys_munlock, AUE_MUNLOCK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 229 = munlock */
+ { AS(mlockall_args), (sy_call_t *)sys_mlockall, AUE_MLOCKALL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 230 = mlockall */
+ { 0, (sy_call_t *)sys_munlockall, AUE_MUNLOCKALL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 231 = munlockall */
+ { AS(linux_mincore_args), (sy_call_t *)linux_mincore, AUE_MINCORE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 232 = linux_mincore */
+ { AS(linux_madvise_args), (sy_call_t *)linux_madvise, AUE_MADVISE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 233 = linux_madvise */
+ { 0, (sy_call_t *)linux_remap_file_pages, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 234 = linux_remap_file_pages */
+ { 0, (sy_call_t *)linux_mbind, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 235 = linux_mbind */
+ { 0, (sy_call_t *)linux_get_mempolicy, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 236 = linux_get_mempolicy */
+ { 0, (sy_call_t *)linux_set_mempolicy, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 237 = linux_set_mempolicy */
+ { 0, (sy_call_t *)linux_migrate_pages, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 238 = linux_migrate_pages */
+ { 0, (sy_call_t *)linux_move_pages, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 239 = linux_move_pages */
+ { AS(linux_rt_tgsigqueueinfo_args), (sy_call_t *)linux_rt_tgsigqueueinfo, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 240 = linux_rt_tgsigqueueinfo */
+ { 0, (sy_call_t *)linux_perf_event_open, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 241 = linux_perf_event_open */
+ { AS(linux_accept4_args), (sy_call_t *)linux_accept4, AUE_ACCEPT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 242 = linux_accept4 */
+ { AS(linux_recvmmsg_args), (sy_call_t *)linux_recvmmsg, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 243 = linux_recvmmsg */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 244 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 245 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 246 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 247 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 248 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 249 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 250 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 251 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 252 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 253 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 254 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 255 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 256 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 257 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 258 = unimpl_md_syscall */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 259 = unimpl_md_syscall */
+ { AS(linux_wait4_args), (sy_call_t *)linux_wait4, AUE_WAIT4, NULL, 0, 0, 0, SY_THR_STATIC }, /* 260 = linux_wait4 */
+ { AS(linux_prlimit64_args), (sy_call_t *)linux_prlimit64, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 261 = linux_prlimit64 */
+ { 0, (sy_call_t *)linux_fanotify_init, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 262 = linux_fanotify_init */
+ { 0, (sy_call_t *)linux_fanotify_mark, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 263 = linux_fanotify_mark */
+ { 0, (sy_call_t *)linux_name_to_handle_at, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 264 = linux_name_to_handle_at */
+ { 0, (sy_call_t *)linux_open_by_handle_at, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 265 = linux_open_by_handle_at */
+ { 0, (sy_call_t *)linux_clock_adjtime, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 266 = linux_clock_adjtime */
+ { AS(linux_syncfs_args), (sy_call_t *)linux_syncfs, AUE_SYNC, NULL, 0, 0, 0, SY_THR_STATIC }, /* 267 = linux_syncfs */
+ { AS(linux_setns_args), (sy_call_t *)linux_setns, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 268 = linux_setns */
+ { AS(linux_sendmmsg_args), (sy_call_t *)linux_sendmmsg, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 269 = linux_sendmmsg */
+ { AS(linux_process_vm_readv_args), (sy_call_t *)linux_process_vm_readv, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 270 = linux_process_vm_readv */
+ { AS(linux_process_vm_writev_args), (sy_call_t *)linux_process_vm_writev, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 271 = linux_process_vm_writev */
+ { AS(linux_kcmp_args), (sy_call_t *)linux_kcmp, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 272 = linux_kcmp */
+ { AS(linux_finit_module_args), (sy_call_t *)linux_finit_module, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 273 = linux_finit_module */
+ { AS(linux_sched_setattr_args), (sy_call_t *)linux_sched_setattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 274 = linux_sched_setattr */
+ { AS(linux_sched_getattr_args), (sy_call_t *)linux_sched_getattr, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 275 = linux_sched_getattr */
+ { AS(linux_renameat2_args), (sy_call_t *)linux_renameat2, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 276 = linux_renameat2 */
+ { AS(linux_seccomp_args), (sy_call_t *)linux_seccomp, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 277 = linux_seccomp */
+ { AS(linux_getrandom_args), (sy_call_t *)linux_getrandom, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 278 = linux_getrandom */
+ { AS(linux_memfd_create_args), (sy_call_t *)linux_memfd_create, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 279 = linux_memfd_create */
+ { AS(linux_bpf_args), (sy_call_t *)linux_bpf, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 280 = linux_bpf */
+ { AS(linux_execveat_args), (sy_call_t *)linux_execveat, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 281 = linux_execveat */
+ { AS(linux_userfaultfd_args), (sy_call_t *)linux_userfaultfd, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 282 = linux_userfaultfd */
+ { AS(linux_membarrier_args), (sy_call_t *)linux_membarrier, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 283 = linux_membarrier */
+ { AS(linux_mlock2_args), (sy_call_t *)linux_mlock2, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 284 = linux_mlock2 */
+ { AS(linux_copy_file_range_args), (sy_call_t *)linux_copy_file_range, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 285 = linux_copy_file_range */
+ { AS(linux_preadv2_args), (sy_call_t *)linux_preadv2, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 286 = linux_preadv2 */
+ { AS(linux_pwritev2_args), (sy_call_t *)linux_pwritev2, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 287 = linux_pwritev2 */
+ { AS(linux_pkey_mprotect_args), (sy_call_t *)linux_pkey_mprotect, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 288 = linux_pkey_mprotect */
+ { AS(linux_pkey_alloc_args), (sy_call_t *)linux_pkey_alloc, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 289 = linux_pkey_alloc */
+ { AS(linux_pkey_free_args), (sy_call_t *)linux_pkey_free, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 290 = linux_pkey_free */
+ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 291 = nosys */
+};
diff --git a/sys/arm64/linux/linux_systrace_args.c b/sys/arm64/linux/linux_systrace_args.c
new file mode 100644
index 000000000000..636307c1c1fe
--- /dev/null
+++ b/sys/arm64/linux/linux_systrace_args.c
@@ -0,0 +1,7055 @@
+/*
+ * System call argument to DTrace register array converstion.
+ *
+ * DO NOT EDIT-- this file is automatically @generated.
+ * $FreeBSD$
+ * This file is part of the DTrace syscall provider.
+ */
+
+static void
+systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
+{
+ int64_t *iarg = (int64_t *) uarg;
+ switch (sysnum) {
+#define nosys linux_nosys
+ /* linux_setxattr */
+ case 5: {
+ struct linux_setxattr_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* const char * */
+ uarg[1] = (intptr_t) p->name; /* const char * */
+ uarg[2] = (intptr_t) p->value; /* const char * */
+ iarg[3] = p->size; /* l_size_t */
+ iarg[4] = p->flags; /* l_int */
+ *n_args = 5;
+ break;
+ }
+ /* linux_lsetxattr */
+ case 6: {
+ struct linux_lsetxattr_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* const char * */
+ uarg[1] = (intptr_t) p->name; /* const char * */
+ uarg[2] = (intptr_t) p->value; /* const char * */
+ iarg[3] = p->size; /* l_size_t */
+ iarg[4] = p->flags; /* l_int */
+ *n_args = 5;
+ break;
+ }
+ /* linux_fsetxattr */
+ case 7: {
+ struct linux_fsetxattr_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ uarg[1] = (intptr_t) p->name; /* const char * */
+ uarg[2] = (intptr_t) p->value; /* const char * */
+ iarg[3] = p->size; /* l_size_t */
+ iarg[4] = p->flags; /* l_int */
+ *n_args = 5;
+ break;
+ }
+ /* linux_getxattr */
+ case 8: {
+ struct linux_getxattr_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* const char * */
+ uarg[1] = (intptr_t) p->name; /* const char * */
+ uarg[2] = (intptr_t) p->value; /* char * */
+ iarg[3] = p->size; /* l_size_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_lgetxattr */
+ case 9: {
+ struct linux_lgetxattr_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* const char * */
+ uarg[1] = (intptr_t) p->name; /* const char * */
+ uarg[2] = (intptr_t) p->value; /* char * */
+ iarg[3] = p->size; /* l_size_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_fgetxattr */
+ case 10: {
+ struct linux_fgetxattr_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ uarg[1] = (intptr_t) p->name; /* const char * */
+ uarg[2] = (intptr_t) p->value; /* char * */
+ iarg[3] = p->size; /* l_size_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_listxattr */
+ case 11: {
+ struct linux_listxattr_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* const char * */
+ uarg[1] = (intptr_t) p->list; /* const char * */
+ iarg[2] = p->size; /* l_size_t */
+ *n_args = 3;
+ break;
+ }
+ /* linux_llistxattr */
+ case 12: {
+ struct linux_llistxattr_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* const char * */
+ uarg[1] = (intptr_t) p->list; /* const char * */
+ iarg[2] = p->size; /* l_size_t */
+ *n_args = 3;
+ break;
+ }
+ /* linux_flistxattr */
+ case 13: {
+ struct linux_flistxattr_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ uarg[1] = (intptr_t) p->list; /* const char * */
+ iarg[2] = p->size; /* l_size_t */
+ *n_args = 3;
+ break;
+ }
+ /* linux_removexattr */
+ case 14: {
+ struct linux_removexattr_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* const char * */
+ uarg[1] = (intptr_t) p->name; /* const char * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_lremovexattr */
+ case 15: {
+ struct linux_lremovexattr_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* const char * */
+ uarg[1] = (intptr_t) p->name; /* const char * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_fremovexattr */
+ case 16: {
+ struct linux_fremovexattr_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ uarg[1] = (intptr_t) p->name; /* const char * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_getcwd */
+ case 17: {
+ struct linux_getcwd_args *p = params;
+ uarg[0] = (intptr_t) p->buf; /* char * */
+ iarg[1] = p->bufsize; /* l_ulong */
+ *n_args = 2;
+ break;
+ }
+ /* linux_lookup_dcookie */
+ case 18: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_eventfd2 */
+ case 19: {
+ struct linux_eventfd2_args *p = params;
+ iarg[0] = p->initval; /* l_uint */
+ iarg[1] = p->flags; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_epoll_create1 */
+ case 20: {
+ struct linux_epoll_create1_args *p = params;
+ iarg[0] = p->flags; /* l_int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_epoll_ctl */
+ case 21: {
+ struct linux_epoll_ctl_args *p = params;
+ iarg[0] = p->epfd; /* l_int */
+ iarg[1] = p->op; /* l_int */
+ iarg[2] = p->fd; /* l_int */
+ uarg[3] = (intptr_t) p->event; /* struct epoll_event * */
+ *n_args = 4;
+ break;
+ }
+ /* linux_epoll_pwait */
+ case 22: {
+ struct linux_epoll_pwait_args *p = params;
+ iarg[0] = p->epfd; /* l_int */
+ uarg[1] = (intptr_t) p->events; /* struct epoll_event * */
+ iarg[2] = p->maxevents; /* l_int */
+ iarg[3] = p->timeout; /* l_int */
+ uarg[4] = (intptr_t) p->mask; /* l_sigset_t * */
+ iarg[5] = p->sigsetsize; /* l_size_t */
+ *n_args = 6;
+ break;
+ }
+ /* dup */
+ case 23: {
+ struct dup_args *p = params;
+ uarg[0] = p->fd; /* u_int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_dup3 */
+ case 24: {
+ struct linux_dup3_args *p = params;
+ iarg[0] = p->oldfd; /* l_int */
+ iarg[1] = p->newfd; /* l_int */
+ iarg[2] = p->flags; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_fcntl */
+ case 25: {
+ struct linux_fcntl_args *p = params;
+ iarg[0] = p->fd; /* l_uint */
+ iarg[1] = p->cmd; /* l_uint */
+ iarg[2] = p->arg; /* l_ulong */
+ *n_args = 3;
+ break;
+ }
+ /* linux_inotify_init1 */
+ case 26: {
+ struct linux_inotify_init1_args *p = params;
+ iarg[0] = p->flags; /* l_int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_inotify_add_watch */
+ case 27: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_inotify_rm_watch */
+ case 28: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_ioctl */
+ case 29: {
+ struct linux_ioctl_args *p = params;
+ iarg[0] = p->fd; /* l_uint */
+ iarg[1] = p->cmd; /* l_uint */
+ iarg[2] = p->arg; /* l_ulong */
+ *n_args = 3;
+ break;
+ }
+ /* linux_ioprio_set */
+ case 30: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_ioprio_get */
+ case 31: {
+ *n_args = 0;
+ break;
+ }
+ /* flock */
+ case 32: {
+ struct flock_args *p = params;
+ iarg[0] = p->fd; /* int */
+ iarg[1] = p->how; /* int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_mknodat */
+ case 33: {
+ struct linux_mknodat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->filename; /* const char * */
+ iarg[2] = p->mode; /* l_int */
+ iarg[3] = p->dev; /* l_uint */
+ *n_args = 4;
+ break;
+ }
+ /* linux_mkdirat */
+ case 34: {
+ struct linux_mkdirat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->pathname; /* const char * */
+ iarg[2] = p->mode; /* l_mode_t */
+ *n_args = 3;
+ break;
+ }
+ /* linux_unlinkat */
+ case 35: {
+ struct linux_unlinkat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->pathname; /* const char * */
+ iarg[2] = p->flag; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_symlinkat */
+ case 36: {
+ struct linux_symlinkat_args *p = params;
+ uarg[0] = (intptr_t) p->oldname; /* const char * */
+ iarg[1] = p->newdfd; /* l_int */
+ uarg[2] = (intptr_t) p->newname; /* const char * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_linkat */
+ case 37: {
+ struct linux_linkat_args *p = params;
+ iarg[0] = p->olddfd; /* l_int */
+ uarg[1] = (intptr_t) p->oldname; /* const char * */
+ iarg[2] = p->newdfd; /* l_int */
+ uarg[3] = (intptr_t) p->newname; /* const char * */
+ iarg[4] = p->flag; /* l_int */
+ *n_args = 5;
+ break;
+ }
+ /* linux_renameat */
+ case 38: {
+ struct linux_renameat_args *p = params;
+ iarg[0] = p->olddfd; /* l_int */
+ uarg[1] = (intptr_t) p->oldname; /* const char * */
+ iarg[2] = p->newdfd; /* l_int */
+ uarg[3] = (intptr_t) p->newname; /* const char * */
+ *n_args = 4;
+ break;
+ }
+ /* linux_mount */
+ case 40: {
+ struct linux_mount_args *p = params;
+ uarg[0] = (intptr_t) p->specialfile; /* char * */
+ uarg[1] = (intptr_t) p->dir; /* char * */
+ uarg[2] = (intptr_t) p->filesystemtype; /* char * */
+ iarg[3] = p->rwflag; /* l_ulong */
+ uarg[4] = (intptr_t) p->data; /* void * */
+ *n_args = 5;
+ break;
+ }
+ /* linux_pivot_root */
+ case 41: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_statfs */
+ case 43: {
+ struct linux_statfs_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* char * */
+ uarg[1] = (intptr_t) p->buf; /* struct l_statfs_buf * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_fstatfs */
+ case 44: {
+ struct linux_fstatfs_args *p = params;
+ iarg[0] = p->fd; /* l_uint */
+ uarg[1] = (intptr_t) p->buf; /* struct l_statfs_buf * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_truncate */
+ case 45: {
+ struct linux_truncate_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* char * */
+ iarg[1] = p->length; /* l_ulong */
+ *n_args = 2;
+ break;
+ }
+ /* linux_ftruncate */
+ case 46: {
+ struct linux_ftruncate_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ iarg[1] = p->length; /* l_long */
+ *n_args = 2;
+ break;
+ }
+ /* linux_fallocate */
+ case 47: {
+ struct linux_fallocate_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ iarg[1] = p->mode; /* l_int */
+ iarg[2] = p->offset; /* l_loff_t */
+ iarg[3] = p->len; /* l_loff_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_faccessat */
+ case 48: {
+ struct linux_faccessat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->filename; /* const char * */
+ iarg[2] = p->amode; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_chdir */
+ case 49: {
+ struct linux_chdir_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* char * */
+ *n_args = 1;
+ break;
+ }
+ /* fchdir */
+ case 50: {
+ struct fchdir_args *p = params;
+ iarg[0] = p->fd; /* int */
+ *n_args = 1;
+ break;
+ }
+ /* chroot */
+ case 51: {
+ struct chroot_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* char * */
+ *n_args = 1;
+ break;
+ }
+ /* fchmod */
+ case 52: {
+ struct fchmod_args *p = params;
+ iarg[0] = p->fd; /* int */
+ iarg[1] = p->mode; /* int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_fchmodat */
+ case 53: {
+ struct linux_fchmodat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->filename; /* const char * */
+ iarg[2] = p->mode; /* l_mode_t */
+ *n_args = 3;
+ break;
+ }
+ /* linux_fchownat */
+ case 54: {
+ struct linux_fchownat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->filename; /* const char * */
+ iarg[2] = p->uid; /* l_uid_t */
+ iarg[3] = p->gid; /* l_gid_t */
+ iarg[4] = p->flag; /* l_int */
+ *n_args = 5;
+ break;
+ }
+ /* fchown */
+ case 55: {
+ struct fchown_args *p = params;
+ iarg[0] = p->fd; /* int */
+ iarg[1] = p->uid; /* int */
+ iarg[2] = p->gid; /* int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_openat */
+ case 56: {
+ struct linux_openat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->filename; /* const char * */
+ iarg[2] = p->flags; /* l_int */
+ iarg[3] = p->mode; /* l_mode_t */
+ *n_args = 4;
+ break;
+ }
+ /* close */
+ case 57: {
+ struct close_args *p = params;
+ iarg[0] = p->fd; /* int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_vhangup */
+ case 58: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_pipe2 */
+ case 59: {
+ struct linux_pipe2_args *p = params;
+ uarg[0] = (intptr_t) p->pipefds; /* l_int * */
+ iarg[1] = p->flags; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_getdents64 */
+ case 61: {
+ struct linux_getdents64_args *p = params;
+ iarg[0] = p->fd; /* l_uint */
+ uarg[1] = (intptr_t) p->dirent; /* void * */
+ iarg[2] = p->count; /* l_uint */
+ *n_args = 3;
+ break;
+ }
+ /* linux_lseek */
+ case 62: {
+ struct linux_lseek_args *p = params;
+ iarg[0] = p->fdes; /* l_uint */
+ iarg[1] = p->off; /* l_off_t */
+ iarg[2] = p->whence; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* read */
+ case 63: {
+ struct read_args *p = params;
+ iarg[0] = p->fd; /* int */
+ uarg[1] = (intptr_t) p->buf; /* char * */
+ iarg[2] = p->nbyte; /* l_size_t */
+ *n_args = 3;
+ break;
+ }
+ /* write */
+ case 64: {
+ struct write_args *p = params;
+ iarg[0] = p->fd; /* int */
+ uarg[1] = (intptr_t) p->buf; /* char * */
+ iarg[2] = p->nbyte; /* l_size_t */
+ *n_args = 3;
+ break;
+ }
+ /* readv */
+ case 65: {
+ struct readv_args *p = params;
+ iarg[0] = p->fd; /* int */
+ uarg[1] = (intptr_t) p->iovp; /* struct iovec * */
+ uarg[2] = p->iovcnt; /* u_int */
+ *n_args = 3;
+ break;
+ }
+ /* writev */
+ case 66: {
+ struct writev_args *p = params;
+ iarg[0] = p->fd; /* int */
+ uarg[1] = (intptr_t) p->iovp; /* struct iovec * */
+ uarg[2] = p->iovcnt; /* u_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_pread */
+ case 67: {
+ struct linux_pread_args *p = params;
+ iarg[0] = p->fd; /* l_uint */
+ uarg[1] = (intptr_t) p->buf; /* char * */
+ iarg[2] = p->nbyte; /* l_size_t */
+ iarg[3] = p->offset; /* l_loff_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_pwrite */
+ case 68: {
+ struct linux_pwrite_args *p = params;
+ iarg[0] = p->fd; /* l_uint */
+ uarg[1] = (intptr_t) p->buf; /* char * */
+ iarg[2] = p->nbyte; /* l_size_t */
+ iarg[3] = p->offset; /* l_loff_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_preadv */
+ case 69: {
+ struct linux_preadv_args *p = params;
+ iarg[0] = p->fd; /* l_ulong */
+ uarg[1] = (intptr_t) p->vec; /* struct iovec * */
+ iarg[2] = p->vlen; /* l_ulong */
+ iarg[3] = p->pos_l; /* l_ulong */
+ iarg[4] = p->pos_h; /* l_ulong */
+ *n_args = 5;
+ break;
+ }
+ /* linux_pwritev */
+ case 70: {
+ struct linux_pwritev_args *p = params;
+ iarg[0] = p->fd; /* l_ulong */
+ uarg[1] = (intptr_t) p->vec; /* struct iovec * */
+ iarg[2] = p->vlen; /* l_ulong */
+ iarg[3] = p->pos_l; /* l_ulong */
+ iarg[4] = p->pos_h; /* l_ulong */
+ *n_args = 5;
+ break;
+ }
+ /* linux_sendfile */
+ case 71: {
+ struct linux_sendfile_args *p = params;
+ iarg[0] = p->out; /* l_int */
+ iarg[1] = p->in; /* l_int */
+ uarg[2] = (intptr_t) p->offset; /* l_off_t * */
+ iarg[3] = p->count; /* l_size_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_pselect6 */
+ case 72: {
+ struct linux_pselect6_args *p = params;
+ iarg[0] = p->nfds; /* l_int */
+ uarg[1] = (intptr_t) p->readfds; /* l_fd_set * */
+ uarg[2] = (intptr_t) p->writefds; /* l_fd_set * */
+ uarg[3] = (intptr_t) p->exceptfds; /* l_fd_set * */
+ uarg[4] = (intptr_t) p->tsp; /* struct l_timespec * */
+ uarg[5] = (intptr_t) p->sig; /* l_uintptr_t * */
+ *n_args = 6;
+ break;
+ }
+ /* linux_ppoll */
+ case 73: {
+ struct linux_ppoll_args *p = params;
+ uarg[0] = (intptr_t) p->fds; /* struct pollfd * */
+ iarg[1] = p->nfds; /* l_uint */
+ uarg[2] = (intptr_t) p->tsp; /* struct l_timespec * */
+ uarg[3] = (intptr_t) p->sset; /* l_sigset_t * */
+ iarg[4] = p->ssize; /* l_size_t */
+ *n_args = 5;
+ break;
+ }
+ /* linux_signalfd4 */
+ case 74: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_vmsplice */
+ case 75: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_splice */
+ case 76: {
+ struct linux_splice_args *p = params;
+ iarg[0] = p->fd_in; /* int */
+ uarg[1] = (intptr_t) p->off_in; /* l_loff_t * */
+ iarg[2] = p->fd_out; /* int */
+ uarg[3] = (intptr_t) p->off_out; /* l_loff_t * */
+ iarg[4] = p->len; /* l_size_t */
+ iarg[5] = p->flags; /* l_uint */
+ *n_args = 6;
+ break;
+ }
+ /* linux_tee */
+ case 77: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_readlinkat */
+ case 78: {
+ struct linux_readlinkat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->path; /* const char * */
+ uarg[2] = (intptr_t) p->buf; /* char * */
+ iarg[3] = p->bufsiz; /* l_int */
+ *n_args = 4;
+ break;
+ }
+ /* linux_newfstatat */
+ case 79: {
+ struct linux_newfstatat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->pathname; /* char * */
+ uarg[2] = (intptr_t) p->statbuf; /* struct l_stat64 * */
+ iarg[3] = p->flag; /* l_int */
+ *n_args = 4;
+ break;
+ }
+ /* linux_newfstat */
+ case 80: {
+ struct linux_newfstat_args *p = params;
+ iarg[0] = p->fd; /* l_uint */
+ uarg[1] = (intptr_t) p->buf; /* struct l_newstat * */
+ *n_args = 2;
+ break;
+ }
+ /* fsync */
+ case 82: {
+ struct fsync_args *p = params;
+ iarg[0] = p->fd; /* int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_fdatasync */
+ case 83: {
+ struct linux_fdatasync_args *p = params;
+ iarg[0] = p->fd; /* l_uint */
+ *n_args = 1;
+ break;
+ }
+ /* linux_sync_file_range */
+ case 84: {
+ struct linux_sync_file_range_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ iarg[1] = p->offset; /* l_loff_t */
+ iarg[2] = p->nbytes; /* l_loff_t */
+ iarg[3] = p->flags; /* l_uint */
+ *n_args = 4;
+ break;
+ }
+ /* linux_timerfd_create */
+ case 85: {
+ struct linux_timerfd_create_args *p = params;
+ iarg[0] = p->clockid; /* l_int */
+ iarg[1] = p->flags; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_timerfd_settime */
+ case 86: {
+ struct linux_timerfd_settime_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ iarg[1] = p->flags; /* l_int */
+ uarg[2] = (intptr_t) p->new_value; /* const struct l_itimerspec * */
+ uarg[3] = (intptr_t) p->old_value; /* struct l_itimerspec * */
+ *n_args = 4;
+ break;
+ }
+ /* linux_timerfd_gettime */
+ case 87: {
+ struct linux_timerfd_gettime_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ uarg[1] = (intptr_t) p->old_value; /* struct l_itimerspec * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_utimensat */
+ case 88: {
+ struct linux_utimensat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->pathname; /* const char * */
+ uarg[2] = (intptr_t) p->times; /* const struct l_timespec * */
+ iarg[3] = p->flags; /* l_int */
+ *n_args = 4;
+ break;
+ }
+ /* acct */
+ case 89: {
+ struct acct_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* char * */
+ *n_args = 1;
+ break;
+ }
+ /* linux_capget */
+ case 90: {
+ struct linux_capget_args *p = params;
+ uarg[0] = (intptr_t) p->hdrp; /* struct l_user_cap_header * */
+ uarg[1] = (intptr_t) p->datap; /* struct l_user_cap_data * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_capset */
+ case 91: {
+ struct linux_capset_args *p = params;
+ uarg[0] = (intptr_t) p->hdrp; /* struct l_user_cap_header * */
+ uarg[1] = (intptr_t) p->datap; /* struct l_user_cap_data * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_personality */
+ case 92: {
+ struct linux_personality_args *p = params;
+ iarg[0] = p->per; /* l_uint */
+ *n_args = 1;
+ break;
+ }
+ /* linux_exit */
+ case 93: {
+ struct linux_exit_args *p = params;
+ uarg[0] = p->rval; /* u_int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_exit_group */
+ case 94: {
+ struct linux_exit_group_args *p = params;
+ iarg[0] = p->error_code; /* l_int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_waitid */
+ case 95: {
+ struct linux_waitid_args *p = params;
+ iarg[0] = p->idtype; /* l_int */
+ iarg[1] = p->id; /* l_pid_t */
+ uarg[2] = (intptr_t) p->info; /* l_siginfo_t * */
+ iarg[3] = p->options; /* l_int */
+ uarg[4] = (intptr_t) p->rusage; /* struct rusage * */
+ *n_args = 5;
+ break;
+ }
+ /* linux_set_tid_address */
+ case 96: {
+ struct linux_set_tid_address_args *p = params;
+ uarg[0] = (intptr_t) p->tidptr; /* l_int * */
+ *n_args = 1;
+ break;
+ }
+ /* linux_unshare */
+ case 97: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_sys_futex */
+ case 98: {
+ struct linux_sys_futex_args *p = params;
+ uarg[0] = (intptr_t) p->uaddr; /* void * */
+ iarg[1] = p->op; /* int */
+ iarg[2] = p->val; /* int */
+ uarg[3] = (intptr_t) p->timeout; /* struct l_timespec * */
+ uarg[4] = (intptr_t) p->uaddr2; /* void * */
+ iarg[5] = p->val3; /* int */
+ *n_args = 6;
+ break;
+ }
+ /* linux_set_robust_list */
+ case 99: {
+ struct linux_set_robust_list_args *p = params;
+ uarg[0] = (intptr_t) p->head; /* struct linux_robust_list_head * */
+ iarg[1] = p->len; /* l_size_t */
+ *n_args = 2;
+ break;
+ }
+ /* linux_get_robust_list */
+ case 100: {
+ struct linux_get_robust_list_args *p = params;
+ iarg[0] = p->pid; /* l_int */
+ uarg[1] = (intptr_t) p->head; /* struct linux_robust_list_head ** */
+ uarg[2] = (intptr_t) p->len; /* l_size_t * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_nanosleep */
+ case 101: {
+ struct linux_nanosleep_args *p = params;
+ uarg[0] = (intptr_t) p->rqtp; /* const struct l_timespec * */
+ uarg[1] = (intptr_t) p->rmtp; /* struct l_timespec * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_getitimer */
+ case 102: {
+ struct linux_getitimer_args *p = params;
+ iarg[0] = p->which; /* l_int */
+ uarg[1] = (intptr_t) p->itv; /* struct l_itimerval * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_setitimer */
+ case 103: {
+ struct linux_setitimer_args *p = params;
+ iarg[0] = p->which; /* l_int */
+ uarg[1] = (intptr_t) p->itv; /* struct l_itimerval * */
+ uarg[2] = (intptr_t) p->oitv; /* struct l_itimerval * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_kexec_load */
+ case 104: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_init_module */
+ case 105: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_delete_module */
+ case 106: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_timer_create */
+ case 107: {
+ struct linux_timer_create_args *p = params;
+ iarg[0] = p->clock_id; /* clockid_t */
+ uarg[1] = (intptr_t) p->evp; /* struct sigevent * */
+ uarg[2] = (intptr_t) p->timerid; /* l_timer_t * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_timer_gettime */
+ case 108: {
+ struct linux_timer_gettime_args *p = params;
+ iarg[0] = p->timerid; /* l_timer_t */
+ uarg[1] = (intptr_t) p->setting; /* struct itimerspec * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_timer_getoverrun */
+ case 109: {
+ struct linux_timer_getoverrun_args *p = params;
+ iarg[0] = p->timerid; /* l_timer_t */
+ *n_args = 1;
+ break;
+ }
+ /* linux_timer_settime */
+ case 110: {
+ struct linux_timer_settime_args *p = params;
+ iarg[0] = p->timerid; /* l_timer_t */
+ iarg[1] = p->flags; /* l_int */
+ uarg[2] = (intptr_t) p->new; /* const struct itimerspec * */
+ uarg[3] = (intptr_t) p->old; /* struct itimerspec * */
+ *n_args = 4;
+ break;
+ }
+ /* linux_timer_delete */
+ case 111: {
+ struct linux_timer_delete_args *p = params;
+ iarg[0] = p->timerid; /* l_timer_t */
+ *n_args = 1;
+ break;
+ }
+ /* linux_clock_settime */
+ case 112: {
+ struct linux_clock_settime_args *p = params;
+ iarg[0] = p->which; /* clockid_t */
+ uarg[1] = (intptr_t) p->tp; /* struct l_timespec * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_clock_gettime */
+ case 113: {
+ struct linux_clock_gettime_args *p = params;
+ iarg[0] = p->which; /* clockid_t */
+ uarg[1] = (intptr_t) p->tp; /* struct l_timespec * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_clock_getres */
+ case 114: {
+ struct linux_clock_getres_args *p = params;
+ iarg[0] = p->which; /* clockid_t */
+ uarg[1] = (intptr_t) p->tp; /* struct l_timespec * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_clock_nanosleep */
+ case 115: {
+ struct linux_clock_nanosleep_args *p = params;
+ iarg[0] = p->which; /* clockid_t */
+ iarg[1] = p->flags; /* l_int */
+ uarg[2] = (intptr_t) p->rqtp; /* struct l_timespec * */
+ uarg[3] = (intptr_t) p->rmtp; /* struct l_timespec * */
+ *n_args = 4;
+ break;
+ }
+ /* linux_syslog */
+ case 116: {
+ struct linux_syslog_args *p = params;
+ iarg[0] = p->type; /* l_int */
+ uarg[1] = (intptr_t) p->buf; /* char * */
+ iarg[2] = p->len; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_ptrace */
+ case 117: {
+ struct linux_ptrace_args *p = params;
+ iarg[0] = p->req; /* l_long */
+ iarg[1] = p->pid; /* l_long */
+ iarg[2] = p->addr; /* l_ulong */
+ iarg[3] = p->data; /* l_ulong */
+ *n_args = 4;
+ break;
+ }
+ /* linux_sched_setparam */
+ case 118: {
+ struct linux_sched_setparam_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_sched_setscheduler */
+ case 119: {
+ struct linux_sched_setscheduler_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ iarg[1] = p->policy; /* l_int */
+ uarg[2] = (intptr_t) p->param; /* struct sched_param * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_sched_getscheduler */
+ case 120: {
+ struct linux_sched_getscheduler_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ *n_args = 1;
+ break;
+ }
+ /* linux_sched_getparam */
+ case 121: {
+ struct linux_sched_getparam_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ uarg[1] = (intptr_t) p->param; /* struct sched_param * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_sched_setaffinity */
+ case 122: {
+ struct linux_sched_setaffinity_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ iarg[1] = p->len; /* l_uint */
+ uarg[2] = (intptr_t) p->user_mask_ptr; /* l_ulong * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_sched_getaffinity */
+ case 123: {
+ struct linux_sched_getaffinity_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ iarg[1] = p->len; /* l_uint */
+ uarg[2] = (intptr_t) p->user_mask_ptr; /* l_ulong * */
+ *n_args = 3;
+ break;
+ }
+ /* sched_yield */
+ case 124: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_sched_get_priority_max */
+ case 125: {
+ struct linux_sched_get_priority_max_args *p = params;
+ iarg[0] = p->policy; /* l_int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_sched_get_priority_min */
+ case 126: {
+ struct linux_sched_get_priority_min_args *p = params;
+ iarg[0] = p->policy; /* l_int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_sched_rr_get_interval */
+ case 127: {
+ struct linux_sched_rr_get_interval_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ uarg[1] = (intptr_t) p->interval; /* struct l_timespec * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_kill */
+ case 129: {
+ struct linux_kill_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ iarg[1] = p->signum; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_tkill */
+ case 130: {
+ struct linux_tkill_args *p = params;
+ iarg[0] = p->tid; /* l_pid_t */
+ iarg[1] = p->sig; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_tgkill */
+ case 131: {
+ struct linux_tgkill_args *p = params;
+ iarg[0] = p->tgid; /* l_pid_t */
+ iarg[1] = p->pid; /* l_pid_t */
+ iarg[2] = p->sig; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_sigaltstack */
+ case 132: {
+ struct linux_sigaltstack_args *p = params;
+ uarg[0] = (intptr_t) p->uss; /* l_stack_t * */
+ uarg[1] = (intptr_t) p->uoss; /* l_stack_t * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_rt_sigsuspend */
+ case 133: {
+ struct linux_rt_sigsuspend_args *p = params;
+ uarg[0] = (intptr_t) p->newset; /* l_sigset_t * */
+ iarg[1] = p->sigsetsize; /* l_size_t */
+ *n_args = 2;
+ break;
+ }
+ /* linux_rt_sigaction */
+ case 134: {
+ struct linux_rt_sigaction_args *p = params;
+ iarg[0] = p->sig; /* l_int */
+ uarg[1] = (intptr_t) p->act; /* l_sigaction_t * */
+ uarg[2] = (intptr_t) p->oact; /* l_sigaction_t * */
+ iarg[3] = p->sigsetsize; /* l_size_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_rt_sigprocmask */
+ case 135: {
+ struct linux_rt_sigprocmask_args *p = params;
+ iarg[0] = p->how; /* l_int */
+ uarg[1] = (intptr_t) p->mask; /* l_sigset_t * */
+ uarg[2] = (intptr_t) p->omask; /* l_sigset_t * */
+ iarg[3] = p->sigsetsize; /* l_size_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_rt_sigpending */
+ case 136: {
+ struct linux_rt_sigpending_args *p = params;
+ uarg[0] = (intptr_t) p->set; /* l_sigset_t * */
+ iarg[1] = p->sigsetsize; /* l_size_t */
+ *n_args = 2;
+ break;
+ }
+ /* linux_rt_sigtimedwait */
+ case 137: {
+ struct linux_rt_sigtimedwait_args *p = params;
+ uarg[0] = (intptr_t) p->mask; /* l_sigset_t * */
+ uarg[1] = (intptr_t) p->ptr; /* l_siginfo_t * */
+ uarg[2] = (intptr_t) p->timeout; /* struct l_timeval * */
+ iarg[3] = p->sigsetsize; /* l_size_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_rt_sigqueueinfo */
+ case 138: {
+ struct linux_rt_sigqueueinfo_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ iarg[1] = p->sig; /* l_int */
+ uarg[2] = (intptr_t) p->info; /* l_siginfo_t * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_rt_sigreturn */
+ case 139: {
+ struct linux_rt_sigreturn_args *p = params;
+ uarg[0] = (intptr_t) p->ucp; /* struct l_ucontext * */
+ *n_args = 1;
+ break;
+ }
+ /* setpriority */
+ case 140: {
+ struct setpriority_args *p = params;
+ iarg[0] = p->which; /* int */
+ iarg[1] = p->who; /* int */
+ iarg[2] = p->prio; /* int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_getpriority */
+ case 141: {
+ struct linux_getpriority_args *p = params;
+ iarg[0] = p->which; /* l_int */
+ iarg[1] = p->who; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_reboot */
+ case 142: {
+ struct linux_reboot_args *p = params;
+ iarg[0] = p->magic1; /* l_int */
+ iarg[1] = p->magic2; /* l_int */
+ iarg[2] = p->cmd; /* l_uint */
+ uarg[3] = (intptr_t) p->arg; /* void * */
+ *n_args = 4;
+ break;
+ }
+ /* setregid */
+ case 143: {
+ struct setregid_args *p = params;
+ iarg[0] = p->rgid; /* gid_t */
+ iarg[1] = p->egid; /* gid_t */
+ *n_args = 2;
+ break;
+ }
+ /* setgid */
+ case 144: {
+ struct setgid_args *p = params;
+ iarg[0] = p->gid; /* gid_t */
+ *n_args = 1;
+ break;
+ }
+ /* setreuid */
+ case 145: {
+ struct setreuid_args *p = params;
+ uarg[0] = p->ruid; /* uid_t */
+ uarg[1] = p->euid; /* uid_t */
+ *n_args = 2;
+ break;
+ }
+ /* setuid */
+ case 146: {
+ struct setuid_args *p = params;
+ uarg[0] = p->uid; /* uid_t */
+ *n_args = 1;
+ break;
+ }
+ /* setresuid */
+ case 147: {
+ struct setresuid_args *p = params;
+ uarg[0] = p->ruid; /* uid_t */
+ uarg[1] = p->euid; /* uid_t */
+ uarg[2] = p->suid; /* uid_t */
+ *n_args = 3;
+ break;
+ }
+ /* getresuid */
+ case 148: {
+ struct getresuid_args *p = params;
+ uarg[0] = (intptr_t) p->ruid; /* uid_t * */
+ uarg[1] = (intptr_t) p->euid; /* uid_t * */
+ uarg[2] = (intptr_t) p->suid; /* uid_t * */
+ *n_args = 3;
+ break;
+ }
+ /* setresgid */
+ case 149: {
+ struct setresgid_args *p = params;
+ iarg[0] = p->rgid; /* gid_t */
+ iarg[1] = p->egid; /* gid_t */
+ iarg[2] = p->sgid; /* gid_t */
+ *n_args = 3;
+ break;
+ }
+ /* getresgid */
+ case 150: {
+ struct getresgid_args *p = params;
+ uarg[0] = (intptr_t) p->rgid; /* gid_t * */
+ uarg[1] = (intptr_t) p->egid; /* gid_t * */
+ uarg[2] = (intptr_t) p->sgid; /* gid_t * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_setfsuid */
+ case 151: {
+ struct linux_setfsuid_args *p = params;
+ iarg[0] = p->uid; /* l_uid_t */
+ *n_args = 1;
+ break;
+ }
+ /* linux_setfsgid */
+ case 152: {
+ struct linux_setfsgid_args *p = params;
+ iarg[0] = p->gid; /* l_gid_t */
+ *n_args = 1;
+ break;
+ }
+ /* linux_times */
+ case 153: {
+ struct linux_times_args *p = params;
+ uarg[0] = (intptr_t) p->buf; /* struct l_times_argv * */
+ *n_args = 1;
+ break;
+ }
+ /* setpgid */
+ case 154: {
+ struct setpgid_args *p = params;
+ iarg[0] = p->pid; /* int */
+ iarg[1] = p->pgid; /* int */
+ *n_args = 2;
+ break;
+ }
+ /* getpgid */
+ case 155: {
+ struct getpgid_args *p = params;
+ iarg[0] = p->pid; /* int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_getsid */
+ case 156: {
+ struct linux_getsid_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ *n_args = 1;
+ break;
+ }
+ /* setsid */
+ case 157: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_getgroups */
+ case 158: {
+ struct linux_getgroups_args *p = params;
+ iarg[0] = p->gidsetsize; /* l_int */
+ uarg[1] = (intptr_t) p->grouplist; /* l_gid_t * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_setgroups */
+ case 159: {
+ struct linux_setgroups_args *p = params;
+ iarg[0] = p->gidsetsize; /* l_int */
+ uarg[1] = (intptr_t) p->grouplist; /* l_gid_t * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_newuname */
+ case 160: {
+ struct linux_newuname_args *p = params;
+ uarg[0] = (intptr_t) p->buf; /* struct l_new_utsname * */
+ *n_args = 1;
+ break;
+ }
+ /* linux_sethostname */
+ case 161: {
+ struct linux_sethostname_args *p = params;
+ uarg[0] = (intptr_t) p->hostname; /* char * */
+ iarg[1] = p->len; /* l_uint */
+ *n_args = 2;
+ break;
+ }
+ /* linux_setdomainname */
+ case 162: {
+ struct linux_setdomainname_args *p = params;
+ uarg[0] = (intptr_t) p->name; /* char * */
+ iarg[1] = p->len; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_getrlimit */
+ case 163: {
+ struct linux_getrlimit_args *p = params;
+ iarg[0] = p->resource; /* l_uint */
+ uarg[1] = (intptr_t) p->rlim; /* struct l_rlimit * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_setrlimit */
+ case 164: {
+ struct linux_setrlimit_args *p = params;
+ iarg[0] = p->resource; /* l_uint */
+ uarg[1] = (intptr_t) p->rlim; /* struct l_rlimit * */
+ *n_args = 2;
+ break;
+ }
+ /* getrusage */
+ case 165: {
+ struct getrusage_args *p = params;
+ iarg[0] = p->who; /* int */
+ uarg[1] = (intptr_t) p->rusage; /* struct rusage * */
+ *n_args = 2;
+ break;
+ }
+ /* umask */
+ case 166: {
+ struct umask_args *p = params;
+ iarg[0] = p->newmask; /* int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_prctl */
+ case 167: {
+ struct linux_prctl_args *p = params;
+ iarg[0] = p->option; /* l_int */
+ iarg[1] = p->arg2; /* l_uintptr_t */
+ iarg[2] = p->arg3; /* l_uintptr_t */
+ iarg[3] = p->arg4; /* l_uintptr_t */
+ iarg[4] = p->arg5; /* l_uintptr_t */
+ *n_args = 5;
+ break;
+ }
+ /* linux_getcpu */
+ case 168: {
+ struct linux_getcpu_args *p = params;
+ uarg[0] = (intptr_t) p->cpu; /* l_uint * */
+ uarg[1] = (intptr_t) p->node; /* l_uint * */
+ uarg[2] = (intptr_t) p->cache; /* void * */
+ *n_args = 3;
+ break;
+ }
+ /* gettimeofday */
+ case 169: {
+ struct gettimeofday_args *p = params;
+ uarg[0] = (intptr_t) p->tp; /* struct l_timeval * */
+ uarg[1] = (intptr_t) p->tzp; /* struct timezone * */
+ *n_args = 2;
+ break;
+ }
+ /* settimeofday */
+ case 170: {
+ struct settimeofday_args *p = params;
+ uarg[0] = (intptr_t) p->tv; /* struct l_timeval * */
+ uarg[1] = (intptr_t) p->tzp; /* struct timezone * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_adjtimex */
+ case 171: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_getpid */
+ case 172: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_getppid */
+ case 173: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_getuid */
+ case 174: {
+ *n_args = 0;
+ break;
+ }
+ /* geteuid */
+ case 175: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_getgid */
+ case 176: {
+ *n_args = 0;
+ break;
+ }
+ /* getegid */
+ case 177: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_gettid */
+ case 178: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_sysinfo */
+ case 179: {
+ struct linux_sysinfo_args *p = params;
+ uarg[0] = (intptr_t) p->info; /* struct l_sysinfo * */
+ *n_args = 1;
+ break;
+ }
+ /* linux_mq_open */
+ case 180: {
+ struct linux_mq_open_args *p = params;
+ uarg[0] = (intptr_t) p->name; /* const char * */
+ iarg[1] = p->oflag; /* l_int */
+ iarg[2] = p->mode; /* l_mode_t */
+ uarg[3] = (intptr_t) p->attr; /* struct mq_attr * */
+ *n_args = 4;
+ break;
+ }
+ /* linux_mq_unlink */
+ case 181: {
+ struct linux_mq_unlink_args *p = params;
+ uarg[0] = (intptr_t) p->name; /* const char * */
+ *n_args = 1;
+ break;
+ }
+ /* linux_mq_timedsend */
+ case 182: {
+ struct linux_mq_timedsend_args *p = params;
+ iarg[0] = p->mqd; /* l_mqd_t */
+ uarg[1] = (intptr_t) p->msg_ptr; /* const char * */
+ iarg[2] = p->msg_len; /* l_size_t */
+ iarg[3] = p->msg_prio; /* l_uint */
+ uarg[4] = (intptr_t) p->abs_timeout; /* const struct l_timespec * */
+ *n_args = 5;
+ break;
+ }
+ /* linux_mq_timedreceive */
+ case 183: {
+ struct linux_mq_timedreceive_args *p = params;
+ iarg[0] = p->mqd; /* l_mqd_t */
+ uarg[1] = (intptr_t) p->msg_ptr; /* char * */
+ iarg[2] = p->msg_len; /* l_size_t */
+ uarg[3] = (intptr_t) p->msg_prio; /* l_uint * */
+ uarg[4] = (intptr_t) p->abs_timeout; /* const struct l_timespec * */
+ *n_args = 5;
+ break;
+ }
+ /* linux_mq_notify */
+ case 184: {
+ struct linux_mq_notify_args *p = params;
+ iarg[0] = p->mqd; /* l_mqd_t */
+ uarg[1] = (intptr_t) p->abs_timeout; /* const struct l_timespec * */
+ *n_args = 2;
+ break;
+ }
+ /* linux_mq_getsetattr */
+ case 185: {
+ struct linux_mq_getsetattr_args *p = params;
+ iarg[0] = p->mqd; /* l_mqd_t */
+ uarg[1] = (intptr_t) p->attr; /* const struct mq_attr * */
+ uarg[2] = (intptr_t) p->oattr; /* struct mq_attr * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_msgget */
+ case 186: {
+ struct linux_msgget_args *p = params;
+ iarg[0] = p->key; /* l_key_t */
+ iarg[1] = p->msgflg; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_msgctl */
+ case 187: {
+ struct linux_msgctl_args *p = params;
+ iarg[0] = p->msqid; /* l_int */
+ iarg[1] = p->cmd; /* l_int */
+ uarg[2] = (intptr_t) p->buf; /* struct l_msqid_ds * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_msgrcv */
+ case 188: {
+ struct linux_msgrcv_args *p = params;
+ iarg[0] = p->msqid; /* l_int */
+ uarg[1] = (intptr_t) p->msgp; /* struct l_msgbuf * */
+ iarg[2] = p->msgsz; /* l_size_t */
+ iarg[3] = p->msgtyp; /* l_long */
+ iarg[4] = p->msgflg; /* l_int */
+ *n_args = 5;
+ break;
+ }
+ /* linux_msgsnd */
+ case 189: {
+ struct linux_msgsnd_args *p = params;
+ iarg[0] = p->msqid; /* l_int */
+ uarg[1] = (intptr_t) p->msgp; /* struct l_msgbuf * */
+ iarg[2] = p->msgsz; /* l_size_t */
+ iarg[3] = p->msgflg; /* l_int */
+ *n_args = 4;
+ break;
+ }
+ /* linux_semget */
+ case 190: {
+ struct linux_semget_args *p = params;
+ iarg[0] = p->key; /* l_key_t */
+ iarg[1] = p->nsems; /* l_int */
+ iarg[2] = p->semflg; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_semctl */
+ case 191: {
+ struct linux_semctl_args *p = params;
+ iarg[0] = p->semid; /* l_int */
+ iarg[1] = p->semnum; /* l_int */
+ iarg[2] = p->cmd; /* l_int */
+ uarg[3] = p->arg.buf; /* union l_semun */
+ *n_args = 4;
+ break;
+ }
+ /* linux_semtimedop */
+ case 192: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_semop */
+ case 193: {
+ struct linux_semop_args *p = params;
+ iarg[0] = p->semid; /* l_int */
+ uarg[1] = (intptr_t) p->tsops; /* struct l_sembuf * */
+ iarg[2] = p->nsops; /* l_uint */
+ *n_args = 3;
+ break;
+ }
+ /* linux_shmget */
+ case 194: {
+ struct linux_shmget_args *p = params;
+ iarg[0] = p->key; /* l_key_t */
+ iarg[1] = p->size; /* l_size_t */
+ iarg[2] = p->shmflg; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_shmctl */
+ case 195: {
+ struct linux_shmctl_args *p = params;
+ iarg[0] = p->shmid; /* l_int */
+ iarg[1] = p->cmd; /* l_int */
+ uarg[2] = (intptr_t) p->buf; /* struct l_shmid_ds * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_shmat */
+ case 196: {
+ struct linux_shmat_args *p = params;
+ iarg[0] = p->shmid; /* l_int */
+ uarg[1] = (intptr_t) p->shmaddr; /* char * */
+ iarg[2] = p->shmflg; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_shmdt */
+ case 197: {
+ struct linux_shmdt_args *p = params;
+ uarg[0] = (intptr_t) p->shmaddr; /* char * */
+ *n_args = 1;
+ break;
+ }
+ /* linux_socket */
+ case 198: {
+ struct linux_socket_args *p = params;
+ iarg[0] = p->domain; /* l_int */
+ iarg[1] = p->type; /* l_int */
+ iarg[2] = p->protocol; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_socketpair */
+ case 199: {
+ struct linux_socketpair_args *p = params;
+ iarg[0] = p->domain; /* l_int */
+ iarg[1] = p->type; /* l_int */
+ iarg[2] = p->protocol; /* l_int */
+ iarg[3] = p->rsv; /* l_uintptr_t */
+ *n_args = 4;
+ break;
+ }
+ /* linux_bind */
+ case 200: {
+ struct linux_bind_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->name; /* l_uintptr_t */
+ iarg[2] = p->namelen; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_listen */
+ case 201: {
+ struct linux_listen_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->backlog; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_accept */
+ case 202: {
+ struct linux_accept_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->addr; /* l_uintptr_t */
+ iarg[2] = p->namelen; /* l_uintptr_t */
+ *n_args = 3;
+ break;
+ }
+ /* linux_connect */
+ case 203: {
+ struct linux_connect_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->name; /* l_uintptr_t */
+ iarg[2] = p->namelen; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_getsockname */
+ case 204: {
+ struct linux_getsockname_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->addr; /* l_uintptr_t */
+ iarg[2] = p->namelen; /* l_uintptr_t */
+ *n_args = 3;
+ break;
+ }
+ /* linux_getpeername */
+ case 205: {
+ struct linux_getpeername_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->addr; /* l_uintptr_t */
+ iarg[2] = p->namelen; /* l_uintptr_t */
+ *n_args = 3;
+ break;
+ }
+ /* linux_sendto */
+ case 206: {
+ struct linux_sendto_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->msg; /* l_uintptr_t */
+ iarg[2] = p->len; /* l_size_t */
+ iarg[3] = p->flags; /* l_uint */
+ iarg[4] = p->to; /* l_uintptr_t */
+ iarg[5] = p->tolen; /* l_int */
+ *n_args = 6;
+ break;
+ }
+ /* linux_recvfrom */
+ case 207: {
+ struct linux_recvfrom_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->buf; /* l_uintptr_t */
+ iarg[2] = p->len; /* l_size_t */
+ iarg[3] = p->flags; /* l_uint */
+ iarg[4] = p->from; /* l_uintptr_t */
+ iarg[5] = p->fromlen; /* l_uintptr_t */
+ *n_args = 6;
+ break;
+ }
+ /* linux_setsockopt */
+ case 208: {
+ struct linux_setsockopt_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->level; /* l_int */
+ iarg[2] = p->optname; /* l_int */
+ iarg[3] = p->optval; /* l_uintptr_t */
+ iarg[4] = p->optlen; /* l_int */
+ *n_args = 5;
+ break;
+ }
+ /* linux_getsockopt */
+ case 209: {
+ struct linux_getsockopt_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->level; /* l_int */
+ iarg[2] = p->optname; /* l_int */
+ iarg[3] = p->optval; /* l_uintptr_t */
+ iarg[4] = p->optlen; /* l_uintptr_t */
+ *n_args = 5;
+ break;
+ }
+ /* linux_shutdown */
+ case 210: {
+ struct linux_shutdown_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->how; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_sendmsg */
+ case 211: {
+ struct linux_sendmsg_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->msg; /* l_uintptr_t */
+ iarg[2] = p->flags; /* l_uint */
+ *n_args = 3;
+ break;
+ }
+ /* linux_recvmsg */
+ case 212: {
+ struct linux_recvmsg_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->msg; /* l_uintptr_t */
+ iarg[2] = p->flags; /* l_uint */
+ *n_args = 3;
+ break;
+ }
+ /* linux_brk */
+ case 214: {
+ struct linux_brk_args *p = params;
+ iarg[0] = p->dsend; /* l_ulong */
+ *n_args = 1;
+ break;
+ }
+ /* munmap */
+ case 215: {
+ struct munmap_args *p = params;
+ uarg[0] = (intptr_t) p->addr; /* void * */
+ iarg[1] = p->len; /* l_size_t */
+ *n_args = 2;
+ break;
+ }
+ /* linux_mremap */
+ case 216: {
+ struct linux_mremap_args *p = params;
+ iarg[0] = p->addr; /* l_ulong */
+ iarg[1] = p->old_len; /* l_ulong */
+ iarg[2] = p->new_len; /* l_ulong */
+ iarg[3] = p->flags; /* l_ulong */
+ iarg[4] = p->new_addr; /* l_ulong */
+ *n_args = 5;
+ break;
+ }
+ /* linux_add_key */
+ case 217: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_request_key */
+ case 218: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_keyctl */
+ case 219: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_clone */
+ case 220: {
+ struct linux_clone_args *p = params;
+ iarg[0] = p->flags; /* l_ulong */
+ uarg[1] = (intptr_t) p->stack; /* void * */
+ uarg[2] = (intptr_t) p->parent_tidptr; /* void * */
+ uarg[3] = (intptr_t) p->tls; /* void * */
+ uarg[4] = (intptr_t) p->child_tidptr; /* void * */
+ *n_args = 5;
+ break;
+ }
+ /* linux_execve */
+ case 221: {
+ struct linux_execve_args *p = params;
+ uarg[0] = (intptr_t) p->path; /* char * */
+ uarg[1] = (intptr_t) p->argp; /* char ** */
+ uarg[2] = (intptr_t) p->envp; /* char ** */
+ *n_args = 3;
+ break;
+ }
+ /* linux_mmap2 */
+ case 222: {
+ struct linux_mmap2_args *p = params;
+ iarg[0] = p->addr; /* l_ulong */
+ iarg[1] = p->len; /* l_ulong */
+ iarg[2] = p->prot; /* l_ulong */
+ iarg[3] = p->flags; /* l_ulong */
+ iarg[4] = p->fd; /* l_ulong */
+ iarg[5] = p->pgoff; /* l_ulong */
+ *n_args = 6;
+ break;
+ }
+ /* linux_fadvise64 */
+ case 223: {
+ struct linux_fadvise64_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ iarg[1] = p->offset; /* l_loff_t */
+ iarg[2] = p->len; /* l_size_t */
+ iarg[3] = p->advice; /* l_int */
+ *n_args = 4;
+ break;
+ }
+ /* swapon */
+ case 224: {
+ struct swapon_args *p = params;
+ uarg[0] = (intptr_t) p->name; /* char * */
+ *n_args = 1;
+ break;
+ }
+ /* linux_swapoff */
+ case 225: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_mprotect */
+ case 226: {
+ struct linux_mprotect_args *p = params;
+ iarg[0] = p->addr; /* l_ulong */
+ iarg[1] = p->len; /* l_size_t */
+ iarg[2] = p->prot; /* l_ulong */
+ *n_args = 3;
+ break;
+ }
+ /* linux_msync */
+ case 227: {
+ struct linux_msync_args *p = params;
+ iarg[0] = p->addr; /* l_ulong */
+ iarg[1] = p->len; /* l_size_t */
+ iarg[2] = p->fl; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* mlock */
+ case 228: {
+ struct mlock_args *p = params;
+ uarg[0] = (intptr_t) p->addr; /* const void * */
+ uarg[1] = p->len; /* size_t */
+ *n_args = 2;
+ break;
+ }
+ /* munlock */
+ case 229: {
+ struct munlock_args *p = params;
+ uarg[0] = (intptr_t) p->addr; /* const void * */
+ uarg[1] = p->len; /* size_t */
+ *n_args = 2;
+ break;
+ }
+ /* mlockall */
+ case 230: {
+ struct mlockall_args *p = params;
+ iarg[0] = p->how; /* int */
+ *n_args = 1;
+ break;
+ }
+ /* munlockall */
+ case 231: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_mincore */
+ case 232: {
+ struct linux_mincore_args *p = params;
+ iarg[0] = p->start; /* l_ulong */
+ iarg[1] = p->len; /* l_size_t */
+ uarg[2] = (intptr_t) p->vec; /* u_char * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_madvise */
+ case 233: {
+ struct linux_madvise_args *p = params;
+ iarg[0] = p->addr; /* l_ulong */
+ iarg[1] = p->len; /* l_size_t */
+ iarg[2] = p->behav; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_remap_file_pages */
+ case 234: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_mbind */
+ case 235: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_get_mempolicy */
+ case 236: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_set_mempolicy */
+ case 237: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_migrate_pages */
+ case 238: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_move_pages */
+ case 239: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_rt_tgsigqueueinfo */
+ case 240: {
+ struct linux_rt_tgsigqueueinfo_args *p = params;
+ iarg[0] = p->tgid; /* l_pid_t */
+ iarg[1] = p->tid; /* l_pid_t */
+ iarg[2] = p->sig; /* l_int */
+ uarg[3] = (intptr_t) p->uinfo; /* l_siginfo_t * */
+ *n_args = 4;
+ break;
+ }
+ /* linux_perf_event_open */
+ case 241: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_accept4 */
+ case 242: {
+ struct linux_accept4_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ iarg[1] = p->addr; /* l_uintptr_t */
+ iarg[2] = p->namelen; /* l_uintptr_t */
+ iarg[3] = p->flags; /* l_int */
+ *n_args = 4;
+ break;
+ }
+ /* linux_recvmmsg */
+ case 243: {
+ struct linux_recvmmsg_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ uarg[1] = (intptr_t) p->msg; /* struct l_mmsghdr * */
+ iarg[2] = p->vlen; /* l_uint */
+ iarg[3] = p->flags; /* l_uint */
+ uarg[4] = (intptr_t) p->timeout; /* struct l_timespec * */
+ *n_args = 5;
+ break;
+ }
+ /* linux_wait4 */
+ case 260: {
+ struct linux_wait4_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ uarg[1] = (intptr_t) p->status; /* l_int * */
+ iarg[2] = p->options; /* l_int */
+ uarg[3] = (intptr_t) p->rusage; /* struct rusage * */
+ *n_args = 4;
+ break;
+ }
+ /* linux_prlimit64 */
+ case 261: {
+ struct linux_prlimit64_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ iarg[1] = p->resource; /* l_uint */
+ uarg[2] = (intptr_t) p->new; /* struct rlimit * */
+ uarg[3] = (intptr_t) p->old; /* struct rlimit * */
+ *n_args = 4;
+ break;
+ }
+ /* linux_fanotify_init */
+ case 262: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_fanotify_mark */
+ case 263: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_name_to_handle_at */
+ case 264: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_open_by_handle_at */
+ case 265: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_clock_adjtime */
+ case 266: {
+ *n_args = 0;
+ break;
+ }
+ /* linux_syncfs */
+ case 267: {
+ struct linux_syncfs_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_setns */
+ case 268: {
+ struct linux_setns_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ iarg[1] = p->nstype; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_sendmmsg */
+ case 269: {
+ struct linux_sendmmsg_args *p = params;
+ iarg[0] = p->s; /* l_int */
+ uarg[1] = (intptr_t) p->msg; /* struct l_mmsghdr * */
+ iarg[2] = p->vlen; /* l_uint */
+ iarg[3] = p->flags; /* l_uint */
+ *n_args = 4;
+ break;
+ }
+ /* linux_process_vm_readv */
+ case 270: {
+ struct linux_process_vm_readv_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ uarg[1] = (intptr_t) p->lvec; /* const struct iovec * */
+ iarg[2] = p->liovcnt; /* l_ulong */
+ uarg[3] = (intptr_t) p->rvec; /* const struct iovec * */
+ iarg[4] = p->riovcnt; /* l_ulong */
+ iarg[5] = p->flags; /* l_ulong */
+ *n_args = 6;
+ break;
+ }
+ /* linux_process_vm_writev */
+ case 271: {
+ struct linux_process_vm_writev_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ uarg[1] = (intptr_t) p->lvec; /* const struct iovec * */
+ iarg[2] = p->liovcnt; /* l_ulong */
+ uarg[3] = (intptr_t) p->rvec; /* const struct iovec * */
+ iarg[4] = p->riovcnt; /* l_ulong */
+ iarg[5] = p->flags; /* l_ulong */
+ *n_args = 6;
+ break;
+ }
+ /* linux_kcmp */
+ case 272: {
+ struct linux_kcmp_args *p = params;
+ iarg[0] = p->pid1; /* l_pid_t */
+ iarg[1] = p->pid2; /* l_pid_t */
+ iarg[2] = p->type; /* l_int */
+ iarg[3] = p->idx1; /* l_ulong */
+ iarg[4] = p->idx; /* l_ulong */
+ *n_args = 5;
+ break;
+ }
+ /* linux_finit_module */
+ case 273: {
+ struct linux_finit_module_args *p = params;
+ iarg[0] = p->fd; /* l_int */
+ uarg[1] = (intptr_t) p->uargs; /* const char * */
+ iarg[2] = p->flags; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_sched_setattr */
+ case 274: {
+ struct linux_sched_setattr_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ uarg[1] = (intptr_t) p->attr; /* void * */
+ iarg[2] = p->flags; /* l_uint */
+ *n_args = 3;
+ break;
+ }
+ /* linux_sched_getattr */
+ case 275: {
+ struct linux_sched_getattr_args *p = params;
+ iarg[0] = p->pid; /* l_pid_t */
+ uarg[1] = (intptr_t) p->attr; /* void * */
+ iarg[2] = p->size; /* l_uint */
+ iarg[3] = p->flags; /* l_uint */
+ *n_args = 4;
+ break;
+ }
+ /* linux_renameat2 */
+ case 276: {
+ struct linux_renameat2_args *p = params;
+ iarg[0] = p->olddfd; /* l_int */
+ uarg[1] = (intptr_t) p->oldname; /* const char * */
+ iarg[2] = p->newdfd; /* l_int */
+ uarg[3] = (intptr_t) p->newname; /* const char * */
+ iarg[4] = p->flags; /* l_uint */
+ *n_args = 5;
+ break;
+ }
+ /* linux_seccomp */
+ case 277: {
+ struct linux_seccomp_args *p = params;
+ iarg[0] = p->op; /* l_uint */
+ iarg[1] = p->flags; /* l_uint */
+ uarg[2] = (intptr_t) p->uargs; /* const char * */
+ *n_args = 3;
+ break;
+ }
+ /* linux_getrandom */
+ case 278: {
+ struct linux_getrandom_args *p = params;
+ uarg[0] = (intptr_t) p->buf; /* char * */
+ iarg[1] = p->count; /* l_size_t */
+ iarg[2] = p->flags; /* l_uint */
+ *n_args = 3;
+ break;
+ }
+ /* linux_memfd_create */
+ case 279: {
+ struct linux_memfd_create_args *p = params;
+ uarg[0] = (intptr_t) p->uname_ptr; /* const char * */
+ iarg[1] = p->flags; /* l_uint */
+ *n_args = 2;
+ break;
+ }
+ /* linux_bpf */
+ case 280: {
+ struct linux_bpf_args *p = params;
+ iarg[0] = p->cmd; /* l_int */
+ uarg[1] = (intptr_t) p->attr; /* void * */
+ iarg[2] = p->size; /* l_uint */
+ *n_args = 3;
+ break;
+ }
+ /* linux_execveat */
+ case 281: {
+ struct linux_execveat_args *p = params;
+ iarg[0] = p->dfd; /* l_int */
+ uarg[1] = (intptr_t) p->filename; /* const char * */
+ uarg[2] = (intptr_t) p->argv; /* const char ** */
+ uarg[3] = (intptr_t) p->envp; /* const char ** */
+ iarg[4] = p->flags; /* l_int */
+ *n_args = 5;
+ break;
+ }
+ /* linux_userfaultfd */
+ case 282: {
+ struct linux_userfaultfd_args *p = params;
+ iarg[0] = p->flags; /* l_int */
+ *n_args = 1;
+ break;
+ }
+ /* linux_membarrier */
+ case 283: {
+ struct linux_membarrier_args *p = params;
+ iarg[0] = p->cmd; /* l_int */
+ iarg[1] = p->flags; /* l_int */
+ *n_args = 2;
+ break;
+ }
+ /* linux_mlock2 */
+ case 284: {
+ struct linux_mlock2_args *p = params;
+ iarg[0] = p->start; /* l_ulong */
+ iarg[1] = p->len; /* l_size_t */
+ iarg[2] = p->flags; /* l_int */
+ *n_args = 3;
+ break;
+ }
+ /* linux_copy_file_range */
+ case 285: {
+ struct linux_copy_file_range_args *p = params;
+ iarg[0] = p->fd_in; /* l_int */
+ uarg[1] = (intptr_t) p->off_in; /* l_loff_t * */
+ iarg[2] = p->fd_out; /* l_int */
+ uarg[3] = (intptr_t) p->off_out; /* l_loff_t * */
+ iarg[4] = p->len; /* l_size_t */
+ iarg[5] = p->flags; /* l_uint */
+ *n_args = 6;
+ break;
+ }
+ /* linux_preadv2 */
+ case 286: {
+ struct linux_preadv2_args *p = params;
+ iarg[0] = p->fd; /* l_ulong */
+ uarg[1] = (intptr_t) p->vec; /* const struct iovec * */
+ iarg[2] = p->vlen; /* l_ulong */
+ iarg[3] = p->pos_l; /* l_ulong */
+ iarg[4] = p->pos_h; /* l_ulong */
+ iarg[5] = p->flags; /* l_int */
+ *n_args = 6;
+ break;
+ }
+ /* linux_pwritev2 */
+ case 287: {
+ struct linux_pwritev2_args *p = params;
+ iarg[0] = p->fd; /* l_ulong */
+ uarg[1] = (intptr_t) p->vec; /* const struct iovec * */
+ iarg[2] = p->vlen; /* l_ulong */
+ iarg[3] = p->pos_l; /* l_ulong */
+ iarg[4] = p->pos_h; /* l_ulong */
+ iarg[5] = p->flags; /* l_int */
+ *n_args = 6;
+ break;
+ }
+ /* linux_pkey_mprotect */
+ case 288: {
+ struct linux_pkey_mprotect_args *p = params;
+ iarg[0] = p->start; /* l_ulong */
+ iarg[1] = p->len; /* l_size_t */
+ iarg[2] = p->prot; /* l_ulong */
+ iarg[3] = p->pkey; /* l_int */
+ *n_args = 4;
+ break;
+ }
+ /* linux_pkey_alloc */
+ case 289: {
+ struct linux_pkey_alloc_args *p = params;
+ iarg[0] = p->flags; /* l_ulong */
+ iarg[1] = p->init_val; /* l_ulong */
+ *n_args = 2;
+ break;
+ }
+ /* linux_pkey_free */
+ case 290: {
+ struct linux_pkey_free_args *p = params;
+ iarg[0] = p->pkey; /* l_int */
+ *n_args = 1;
+ break;
+ }
+ default:
+ *n_args = 0;
+ break;
+ };
+}
+static void
+systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
+{
+ const char *p = NULL;
+ switch (sysnum) {
+#define nosys linux_nosys
+ /* linux_setxattr */
+ case 5:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "userland const char *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ case 4:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_lsetxattr */
+ case 6:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "userland const char *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ case 4:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fsetxattr */
+ case 7:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "userland const char *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ case 4:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getxattr */
+ case 8:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "userland char *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_lgetxattr */
+ case 9:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "userland char *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fgetxattr */
+ case 10:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "userland char *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_listxattr */
+ case 11:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_llistxattr */
+ case 12:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_flistxattr */
+ case 13:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_removexattr */
+ case 14:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_lremovexattr */
+ case 15:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fremovexattr */
+ case 16:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getcwd */
+ case 17:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ case 1:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_lookup_dcookie */
+ case 18:
+ break;
+ /* linux_eventfd2 */
+ case 19:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_epoll_create1 */
+ case 20:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_epoll_ctl */
+ case 21:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "userland struct epoll_event *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_epoll_pwait */
+ case 22:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct epoll_event *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "l_int";
+ break;
+ case 4:
+ p = "userland l_sigset_t *";
+ break;
+ case 5:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* dup */
+ case 23:
+ switch(ndx) {
+ case 0:
+ p = "u_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_dup3 */
+ case 24:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fcntl */
+ case 25:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "l_uint";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_inotify_init1 */
+ case 26:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_inotify_add_watch */
+ case 27:
+ break;
+ /* linux_inotify_rm_watch */
+ case 28:
+ break;
+ /* linux_ioctl */
+ case 29:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "l_uint";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_ioprio_set */
+ case 30:
+ break;
+ /* linux_ioprio_get */
+ case 31:
+ break;
+ /* flock */
+ case 32:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mknodat */
+ case 33:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mkdirat */
+ case 34:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_mode_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_unlinkat */
+ case 35:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_symlinkat */
+ case 36:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "userland const char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_linkat */
+ case 37:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "userland const char *";
+ break;
+ case 4:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_renameat */
+ case 38:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "userland const char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mount */
+ case 40:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ case 1:
+ p = "userland char *";
+ break;
+ case 2:
+ p = "userland char *";
+ break;
+ case 3:
+ p = "l_ulong";
+ break;
+ case 4:
+ p = "userland void *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_pivot_root */
+ case 41:
+ break;
+ /* linux_statfs */
+ case 43:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ case 1:
+ p = "userland struct l_statfs_buf *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fstatfs */
+ case 44:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "userland struct l_statfs_buf *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_truncate */
+ case 45:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ case 1:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_ftruncate */
+ case 46:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_long";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fallocate */
+ case 47:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_loff_t";
+ break;
+ case 3:
+ p = "l_loff_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_faccessat */
+ case 48:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_chdir */
+ case 49:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* fchdir */
+ case 50:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* chroot */
+ case 51:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* fchmod */
+ case 52:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fchmodat */
+ case 53:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_mode_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fchownat */
+ case 54:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_uid_t";
+ break;
+ case 3:
+ p = "l_gid_t";
+ break;
+ case 4:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* fchown */
+ case 55:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "int";
+ break;
+ case 2:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_openat */
+ case 56:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "l_mode_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* close */
+ case 57:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_vhangup */
+ case 58:
+ break;
+ /* linux_pipe2 */
+ case 59:
+ switch(ndx) {
+ case 0:
+ p = "userland l_int *";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getdents64 */
+ case 61:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "userland void *";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_lseek */
+ case 62:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "l_off_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* read */
+ case 63:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland char *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* write */
+ case 64:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland char *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* readv */
+ case 65:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland struct iovec *";
+ break;
+ case 2:
+ p = "u_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* writev */
+ case 66:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland struct iovec *";
+ break;
+ case 2:
+ p = "u_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_pread */
+ case 67:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "userland char *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ case 3:
+ p = "l_loff_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_pwrite */
+ case 68:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "userland char *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ case 3:
+ p = "l_loff_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_preadv */
+ case 69:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "userland struct iovec *";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "l_ulong";
+ break;
+ case 4:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_pwritev */
+ case 70:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "userland struct iovec *";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "l_ulong";
+ break;
+ case 4:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sendfile */
+ case 71:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "userland l_off_t *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_pselect6 */
+ case 72:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland l_fd_set *";
+ break;
+ case 2:
+ p = "userland l_fd_set *";
+ break;
+ case 3:
+ p = "userland l_fd_set *";
+ break;
+ case 4:
+ p = "userland struct l_timespec *";
+ break;
+ case 5:
+ p = "userland l_uintptr_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_ppoll */
+ case 73:
+ switch(ndx) {
+ case 0:
+ p = "userland struct pollfd *";
+ break;
+ case 1:
+ p = "l_uint";
+ break;
+ case 2:
+ p = "userland struct l_timespec *";
+ break;
+ case 3:
+ p = "userland l_sigset_t *";
+ break;
+ case 4:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_signalfd4 */
+ case 74:
+ break;
+ /* linux_vmsplice */
+ case 75:
+ break;
+ /* linux_splice */
+ case 76:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland l_loff_t *";
+ break;
+ case 2:
+ p = "int";
+ break;
+ case 3:
+ p = "userland l_loff_t *";
+ break;
+ case 4:
+ p = "l_size_t";
+ break;
+ case 5:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_tee */
+ case 77:
+ break;
+ /* linux_readlinkat */
+ case 78:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "userland char *";
+ break;
+ case 3:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_newfstatat */
+ case 79:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland char *";
+ break;
+ case 2:
+ p = "userland struct l_stat64 *";
+ break;
+ case 3:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_newfstat */
+ case 80:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "userland struct l_newstat *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* fsync */
+ case 82:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fdatasync */
+ case 83:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sync_file_range */
+ case 84:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_loff_t";
+ break;
+ case 2:
+ p = "l_loff_t";
+ break;
+ case 3:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_timerfd_create */
+ case 85:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_timerfd_settime */
+ case 86:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "userland const struct l_itimerspec *";
+ break;
+ case 3:
+ p = "userland struct l_itimerspec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_timerfd_gettime */
+ case 87:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct l_itimerspec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_utimensat */
+ case 88:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "userland const struct l_timespec *";
+ break;
+ case 3:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* acct */
+ case 89:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_capget */
+ case 90:
+ switch(ndx) {
+ case 0:
+ p = "userland struct l_user_cap_header *";
+ break;
+ case 1:
+ p = "userland struct l_user_cap_data *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_capset */
+ case 91:
+ switch(ndx) {
+ case 0:
+ p = "userland struct l_user_cap_header *";
+ break;
+ case 1:
+ p = "userland struct l_user_cap_data *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_personality */
+ case 92:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_exit */
+ case 93:
+ switch(ndx) {
+ case 0:
+ p = "u_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_exit_group */
+ case 94:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_waitid */
+ case 95:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_pid_t";
+ break;
+ case 2:
+ p = "userland l_siginfo_t *";
+ break;
+ case 3:
+ p = "l_int";
+ break;
+ case 4:
+ p = "userland struct rusage *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_set_tid_address */
+ case 96:
+ switch(ndx) {
+ case 0:
+ p = "userland l_int *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_unshare */
+ case 97:
+ break;
+ /* linux_sys_futex */
+ case 98:
+ switch(ndx) {
+ case 0:
+ p = "userland void *";
+ break;
+ case 1:
+ p = "int";
+ break;
+ case 2:
+ p = "int";
+ break;
+ case 3:
+ p = "userland struct l_timespec *";
+ break;
+ case 4:
+ p = "userland void *";
+ break;
+ case 5:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_set_robust_list */
+ case 99:
+ switch(ndx) {
+ case 0:
+ p = "userland struct linux_robust_list_head *";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_get_robust_list */
+ case 100:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct linux_robust_list_head **";
+ break;
+ case 2:
+ p = "userland l_size_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_nanosleep */
+ case 101:
+ switch(ndx) {
+ case 0:
+ p = "userland const struct l_timespec *";
+ break;
+ case 1:
+ p = "userland struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getitimer */
+ case 102:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct l_itimerval *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_setitimer */
+ case 103:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct l_itimerval *";
+ break;
+ case 2:
+ p = "userland struct l_itimerval *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_kexec_load */
+ case 104:
+ break;
+ /* linux_init_module */
+ case 105:
+ break;
+ /* linux_delete_module */
+ case 106:
+ break;
+ /* linux_timer_create */
+ case 107:
+ switch(ndx) {
+ case 0:
+ p = "clockid_t";
+ break;
+ case 1:
+ p = "userland struct sigevent *";
+ break;
+ case 2:
+ p = "userland l_timer_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_timer_gettime */
+ case 108:
+ switch(ndx) {
+ case 0:
+ p = "l_timer_t";
+ break;
+ case 1:
+ p = "userland struct itimerspec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_timer_getoverrun */
+ case 109:
+ switch(ndx) {
+ case 0:
+ p = "l_timer_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_timer_settime */
+ case 110:
+ switch(ndx) {
+ case 0:
+ p = "l_timer_t";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "userland const struct itimerspec *";
+ break;
+ case 3:
+ p = "userland struct itimerspec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_timer_delete */
+ case 111:
+ switch(ndx) {
+ case 0:
+ p = "l_timer_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_clock_settime */
+ case 112:
+ switch(ndx) {
+ case 0:
+ p = "clockid_t";
+ break;
+ case 1:
+ p = "userland struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_clock_gettime */
+ case 113:
+ switch(ndx) {
+ case 0:
+ p = "clockid_t";
+ break;
+ case 1:
+ p = "userland struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_clock_getres */
+ case 114:
+ switch(ndx) {
+ case 0:
+ p = "clockid_t";
+ break;
+ case 1:
+ p = "userland struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_clock_nanosleep */
+ case 115:
+ switch(ndx) {
+ case 0:
+ p = "clockid_t";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "userland struct l_timespec *";
+ break;
+ case 3:
+ p = "userland struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_syslog */
+ case 116:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_ptrace */
+ case 117:
+ switch(ndx) {
+ case 0:
+ p = "l_long";
+ break;
+ case 1:
+ p = "l_long";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_setparam */
+ case 118:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "userland struct sched_param *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_setscheduler */
+ case 119:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "userland struct sched_param *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_getscheduler */
+ case 120:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_getparam */
+ case 121:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "userland struct sched_param *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_setaffinity */
+ case 122:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_uint";
+ break;
+ case 2:
+ p = "userland l_ulong *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_getaffinity */
+ case 123:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_uint";
+ break;
+ case 2:
+ p = "userland l_ulong *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* sched_yield */
+ case 124:
+ break;
+ /* linux_sched_get_priority_max */
+ case 125:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_get_priority_min */
+ case 126:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_rr_get_interval */
+ case 127:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "userland struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_kill */
+ case 129:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_tkill */
+ case 130:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_tgkill */
+ case 131:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_pid_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sigaltstack */
+ case 132:
+ switch(ndx) {
+ case 0:
+ p = "userland l_stack_t *";
+ break;
+ case 1:
+ p = "userland l_stack_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_rt_sigsuspend */
+ case 133:
+ switch(ndx) {
+ case 0:
+ p = "userland l_sigset_t *";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_rt_sigaction */
+ case 134:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland l_sigaction_t *";
+ break;
+ case 2:
+ p = "userland l_sigaction_t *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_rt_sigprocmask */
+ case 135:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland l_sigset_t *";
+ break;
+ case 2:
+ p = "userland l_sigset_t *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_rt_sigpending */
+ case 136:
+ switch(ndx) {
+ case 0:
+ p = "userland l_sigset_t *";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_rt_sigtimedwait */
+ case 137:
+ switch(ndx) {
+ case 0:
+ p = "userland l_sigset_t *";
+ break;
+ case 1:
+ p = "userland l_siginfo_t *";
+ break;
+ case 2:
+ p = "userland struct l_timeval *";
+ break;
+ case 3:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_rt_sigqueueinfo */
+ case 138:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "userland l_siginfo_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_rt_sigreturn */
+ case 139:
+ switch(ndx) {
+ case 0:
+ p = "userland struct l_ucontext *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setpriority */
+ case 140:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "int";
+ break;
+ case 2:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getpriority */
+ case 141:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_reboot */
+ case 142:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ case 3:
+ p = "userland void *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setregid */
+ case 143:
+ switch(ndx) {
+ case 0:
+ p = "gid_t";
+ break;
+ case 1:
+ p = "gid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setgid */
+ case 144:
+ switch(ndx) {
+ case 0:
+ p = "gid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setreuid */
+ case 145:
+ switch(ndx) {
+ case 0:
+ p = "uid_t";
+ break;
+ case 1:
+ p = "uid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setuid */
+ case 146:
+ switch(ndx) {
+ case 0:
+ p = "uid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setresuid */
+ case 147:
+ switch(ndx) {
+ case 0:
+ p = "uid_t";
+ break;
+ case 1:
+ p = "uid_t";
+ break;
+ case 2:
+ p = "uid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* getresuid */
+ case 148:
+ switch(ndx) {
+ case 0:
+ p = "userland uid_t *";
+ break;
+ case 1:
+ p = "userland uid_t *";
+ break;
+ case 2:
+ p = "userland uid_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setresgid */
+ case 149:
+ switch(ndx) {
+ case 0:
+ p = "gid_t";
+ break;
+ case 1:
+ p = "gid_t";
+ break;
+ case 2:
+ p = "gid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* getresgid */
+ case 150:
+ switch(ndx) {
+ case 0:
+ p = "userland gid_t *";
+ break;
+ case 1:
+ p = "userland gid_t *";
+ break;
+ case 2:
+ p = "userland gid_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_setfsuid */
+ case 151:
+ switch(ndx) {
+ case 0:
+ p = "l_uid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_setfsgid */
+ case 152:
+ switch(ndx) {
+ case 0:
+ p = "l_gid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_times */
+ case 153:
+ switch(ndx) {
+ case 0:
+ p = "userland struct l_times_argv *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setpgid */
+ case 154:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* getpgid */
+ case 155:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getsid */
+ case 156:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setsid */
+ case 157:
+ break;
+ /* linux_getgroups */
+ case 158:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland l_gid_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_setgroups */
+ case 159:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland l_gid_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_newuname */
+ case 160:
+ switch(ndx) {
+ case 0:
+ p = "userland struct l_new_utsname *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sethostname */
+ case 161:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ case 1:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_setdomainname */
+ case 162:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getrlimit */
+ case 163:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "userland struct l_rlimit *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_setrlimit */
+ case 164:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "userland struct l_rlimit *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* getrusage */
+ case 165:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland struct rusage *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* umask */
+ case 166:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_prctl */
+ case 167:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_uintptr_t";
+ break;
+ case 3:
+ p = "l_uintptr_t";
+ break;
+ case 4:
+ p = "l_uintptr_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getcpu */
+ case 168:
+ switch(ndx) {
+ case 0:
+ p = "userland l_uint *";
+ break;
+ case 1:
+ p = "userland l_uint *";
+ break;
+ case 2:
+ p = "userland void *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* gettimeofday */
+ case 169:
+ switch(ndx) {
+ case 0:
+ p = "userland struct l_timeval *";
+ break;
+ case 1:
+ p = "userland struct timezone *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* settimeofday */
+ case 170:
+ switch(ndx) {
+ case 0:
+ p = "userland struct l_timeval *";
+ break;
+ case 1:
+ p = "userland struct timezone *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_adjtimex */
+ case 171:
+ break;
+ /* linux_getpid */
+ case 172:
+ break;
+ /* linux_getppid */
+ case 173:
+ break;
+ /* linux_getuid */
+ case 174:
+ break;
+ /* geteuid */
+ case 175:
+ break;
+ /* linux_getgid */
+ case 176:
+ break;
+ /* getegid */
+ case 177:
+ break;
+ /* linux_gettid */
+ case 178:
+ break;
+ /* linux_sysinfo */
+ case 179:
+ switch(ndx) {
+ case 0:
+ p = "userland struct l_sysinfo *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mq_open */
+ case 180:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_mode_t";
+ break;
+ case 3:
+ p = "userland struct mq_attr *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mq_unlink */
+ case 181:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mq_timedsend */
+ case 182:
+ switch(ndx) {
+ case 0:
+ p = "l_mqd_t";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ case 3:
+ p = "l_uint";
+ break;
+ case 4:
+ p = "userland const struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mq_timedreceive */
+ case 183:
+ switch(ndx) {
+ case 0:
+ p = "l_mqd_t";
+ break;
+ case 1:
+ p = "userland char *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ case 3:
+ p = "userland l_uint *";
+ break;
+ case 4:
+ p = "userland const struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mq_notify */
+ case 184:
+ switch(ndx) {
+ case 0:
+ p = "l_mqd_t";
+ break;
+ case 1:
+ p = "userland const struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mq_getsetattr */
+ case 185:
+ switch(ndx) {
+ case 0:
+ p = "l_mqd_t";
+ break;
+ case 1:
+ p = "userland const struct mq_attr *";
+ break;
+ case 2:
+ p = "userland struct mq_attr *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_msgget */
+ case 186:
+ switch(ndx) {
+ case 0:
+ p = "l_key_t";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_msgctl */
+ case 187:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "userland struct l_msqid_ds *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_msgrcv */
+ case 188:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct l_msgbuf *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ case 3:
+ p = "l_long";
+ break;
+ case 4:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_msgsnd */
+ case 189:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct l_msgbuf *";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ case 3:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_semget */
+ case 190:
+ switch(ndx) {
+ case 0:
+ p = "l_key_t";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_semctl */
+ case 191:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "union l_semun";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_semtimedop */
+ case 192:
+ break;
+ /* linux_semop */
+ case 193:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct l_sembuf *";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_shmget */
+ case 194:
+ switch(ndx) {
+ case 0:
+ p = "l_key_t";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_shmctl */
+ case 195:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "userland struct l_shmid_ds *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_shmat */
+ case 196:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_shmdt */
+ case 197:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_socket */
+ case 198:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_socketpair */
+ case 199:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "l_uintptr_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_bind */
+ case 200:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_listen */
+ case 201:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_accept */
+ case 202:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_uintptr_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_connect */
+ case 203:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getsockname */
+ case 204:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_uintptr_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getpeername */
+ case 205:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_uintptr_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sendto */
+ case 206:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ case 3:
+ p = "l_uint";
+ break;
+ case 4:
+ p = "l_uintptr_t";
+ break;
+ case 5:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_recvfrom */
+ case 207:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ case 3:
+ p = "l_uint";
+ break;
+ case 4:
+ p = "l_uintptr_t";
+ break;
+ case 5:
+ p = "l_uintptr_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_setsockopt */
+ case 208:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "l_uintptr_t";
+ break;
+ case 4:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getsockopt */
+ case 209:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "l_uintptr_t";
+ break;
+ case 4:
+ p = "l_uintptr_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_shutdown */
+ case 210:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sendmsg */
+ case 211:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_recvmsg */
+ case 212:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_brk */
+ case 214:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* munmap */
+ case 215:
+ switch(ndx) {
+ case 0:
+ p = "userland void *";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mremap */
+ case 216:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "l_ulong";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "l_ulong";
+ break;
+ case 4:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_add_key */
+ case 217:
+ break;
+ /* linux_request_key */
+ case 218:
+ break;
+ /* linux_keyctl */
+ case 219:
+ break;
+ /* linux_clone */
+ case 220:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "userland void *";
+ break;
+ case 2:
+ p = "userland void *";
+ break;
+ case 3:
+ p = "userland void *";
+ break;
+ case 4:
+ p = "userland void *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_execve */
+ case 221:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ case 1:
+ p = "userland char **";
+ break;
+ case 2:
+ p = "userland char **";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mmap2 */
+ case 222:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "l_ulong";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "l_ulong";
+ break;
+ case 4:
+ p = "l_ulong";
+ break;
+ case 5:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fadvise64 */
+ case 223:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_loff_t";
+ break;
+ case 2:
+ p = "l_size_t";
+ break;
+ case 3:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* swapon */
+ case 224:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_swapoff */
+ case 225:
+ break;
+ /* linux_mprotect */
+ case 226:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_msync */
+ case 227:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* mlock */
+ case 228:
+ switch(ndx) {
+ case 0:
+ p = "userland const void *";
+ break;
+ case 1:
+ p = "size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* munlock */
+ case 229:
+ switch(ndx) {
+ case 0:
+ p = "userland const void *";
+ break;
+ case 1:
+ p = "size_t";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* mlockall */
+ case 230:
+ switch(ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* munlockall */
+ case 231:
+ break;
+ /* linux_mincore */
+ case 232:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ case 2:
+ p = "userland u_char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_madvise */
+ case 233:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_remap_file_pages */
+ case 234:
+ break;
+ /* linux_mbind */
+ case 235:
+ break;
+ /* linux_get_mempolicy */
+ case 236:
+ break;
+ /* linux_set_mempolicy */
+ case 237:
+ break;
+ /* linux_migrate_pages */
+ case 238:
+ break;
+ /* linux_move_pages */
+ case 239:
+ break;
+ /* linux_rt_tgsigqueueinfo */
+ case 240:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_pid_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "userland l_siginfo_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_perf_event_open */
+ case 241:
+ break;
+ /* linux_accept4 */
+ case 242:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_uintptr_t";
+ break;
+ case 2:
+ p = "l_uintptr_t";
+ break;
+ case 3:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_recvmmsg */
+ case 243:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct l_mmsghdr *";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ case 3:
+ p = "l_uint";
+ break;
+ case 4:
+ p = "userland struct l_timespec *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_wait4 */
+ case 260:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "userland l_int *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "userland struct rusage *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_prlimit64 */
+ case 261:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_uint";
+ break;
+ case 2:
+ p = "userland struct rlimit *";
+ break;
+ case 3:
+ p = "userland struct rlimit *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_fanotify_init */
+ case 262:
+ break;
+ /* linux_fanotify_mark */
+ case 263:
+ break;
+ /* linux_name_to_handle_at */
+ case 264:
+ break;
+ /* linux_open_by_handle_at */
+ case 265:
+ break;
+ /* linux_clock_adjtime */
+ case 266:
+ break;
+ /* linux_syncfs */
+ case 267:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_setns */
+ case 268:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sendmmsg */
+ case 269:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland struct l_mmsghdr *";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ case 3:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_process_vm_readv */
+ case 270:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "userland const struct iovec *";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "userland const struct iovec *";
+ break;
+ case 4:
+ p = "l_ulong";
+ break;
+ case 5:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_process_vm_writev */
+ case 271:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "userland const struct iovec *";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "userland const struct iovec *";
+ break;
+ case 4:
+ p = "l_ulong";
+ break;
+ case 5:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_kcmp */
+ case 272:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "l_pid_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "l_ulong";
+ break;
+ case 4:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_finit_module */
+ case 273:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_setattr */
+ case 274:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "userland void *";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_sched_getattr */
+ case 275:
+ switch(ndx) {
+ case 0:
+ p = "l_pid_t";
+ break;
+ case 1:
+ p = "userland void *";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ case 3:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_renameat2 */
+ case 276:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "userland const char *";
+ break;
+ case 4:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_seccomp */
+ case 277:
+ switch(ndx) {
+ case 0:
+ p = "l_uint";
+ break;
+ case 1:
+ p = "l_uint";
+ break;
+ case 2:
+ p = "userland const char *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_getrandom */
+ case 278:
+ switch(ndx) {
+ case 0:
+ p = "userland char *";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_memfd_create */
+ case 279:
+ switch(ndx) {
+ case 0:
+ p = "userland const char *";
+ break;
+ case 1:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_bpf */
+ case 280:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland void *";
+ break;
+ case 2:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_execveat */
+ case 281:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland const char *";
+ break;
+ case 2:
+ p = "userland const char **";
+ break;
+ case 3:
+ p = "userland const char **";
+ break;
+ case 4:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_userfaultfd */
+ case 282:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_membarrier */
+ case 283:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_mlock2 */
+ case 284:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_copy_file_range */
+ case 285:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ case 1:
+ p = "userland l_loff_t *";
+ break;
+ case 2:
+ p = "l_int";
+ break;
+ case 3:
+ p = "userland l_loff_t *";
+ break;
+ case 4:
+ p = "l_size_t";
+ break;
+ case 5:
+ p = "l_uint";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_preadv2 */
+ case 286:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "userland const struct iovec *";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "l_ulong";
+ break;
+ case 4:
+ p = "l_ulong";
+ break;
+ case 5:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_pwritev2 */
+ case 287:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "userland const struct iovec *";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "l_ulong";
+ break;
+ case 4:
+ p = "l_ulong";
+ break;
+ case 5:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_pkey_mprotect */
+ case 288:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "l_size_t";
+ break;
+ case 2:
+ p = "l_ulong";
+ break;
+ case 3:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_pkey_alloc */
+ case 289:
+ switch(ndx) {
+ case 0:
+ p = "l_ulong";
+ break;
+ case 1:
+ p = "l_ulong";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* linux_pkey_free */
+ case 290:
+ switch(ndx) {
+ case 0:
+ p = "l_int";
+ break;
+ default:
+ break;
+ };
+ break;
+ default:
+ break;
+ };
+ if (p != NULL)
+ strlcpy(desc, p, descsz);
+}
+static void
+systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
+{
+ const char *p = NULL;
+ switch (sysnum) {
+#define nosys linux_nosys
+ /* linux_setxattr */
+ case 5:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_lsetxattr */
+ case 6:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fsetxattr */
+ case 7:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getxattr */
+ case 8:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_lgetxattr */
+ case 9:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fgetxattr */
+ case 10:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_listxattr */
+ case 11:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_llistxattr */
+ case 12:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_flistxattr */
+ case 13:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_removexattr */
+ case 14:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_lremovexattr */
+ case 15:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fremovexattr */
+ case 16:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getcwd */
+ case 17:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_lookup_dcookie */
+ case 18:
+ /* linux_eventfd2 */
+ case 19:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_epoll_create1 */
+ case 20:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_epoll_ctl */
+ case 21:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_epoll_pwait */
+ case 22:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* dup */
+ case 23:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_dup3 */
+ case 24:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fcntl */
+ case 25:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_inotify_init1 */
+ case 26:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_inotify_add_watch */
+ case 27:
+ /* linux_inotify_rm_watch */
+ case 28:
+ /* linux_ioctl */
+ case 29:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_ioprio_set */
+ case 30:
+ /* linux_ioprio_get */
+ case 31:
+ /* flock */
+ case 32:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mknodat */
+ case 33:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mkdirat */
+ case 34:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_unlinkat */
+ case 35:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_symlinkat */
+ case 36:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_linkat */
+ case 37:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_renameat */
+ case 38:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mount */
+ case 40:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_pivot_root */
+ case 41:
+ /* linux_statfs */
+ case 43:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fstatfs */
+ case 44:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_truncate */
+ case 45:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_ftruncate */
+ case 46:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fallocate */
+ case 47:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_faccessat */
+ case 48:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_chdir */
+ case 49:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* fchdir */
+ case 50:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* chroot */
+ case 51:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* fchmod */
+ case 52:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fchmodat */
+ case 53:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fchownat */
+ case 54:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* fchown */
+ case 55:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_openat */
+ case 56:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* close */
+ case 57:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_vhangup */
+ case 58:
+ /* linux_pipe2 */
+ case 59:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getdents64 */
+ case 61:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_lseek */
+ case 62:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* read */
+ case 63:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* write */
+ case 64:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* readv */
+ case 65:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* writev */
+ case 66:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_pread */
+ case 67:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_pwrite */
+ case 68:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_preadv */
+ case 69:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_pwritev */
+ case 70:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sendfile */
+ case 71:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_pselect6 */
+ case 72:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_ppoll */
+ case 73:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_signalfd4 */
+ case 74:
+ /* linux_vmsplice */
+ case 75:
+ /* linux_splice */
+ case 76:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_tee */
+ case 77:
+ /* linux_readlinkat */
+ case 78:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_newfstatat */
+ case 79:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_newfstat */
+ case 80:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* fsync */
+ case 82:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fdatasync */
+ case 83:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sync_file_range */
+ case 84:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_timerfd_create */
+ case 85:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_timerfd_settime */
+ case 86:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_timerfd_gettime */
+ case 87:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_utimensat */
+ case 88:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* acct */
+ case 89:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_capget */
+ case 90:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_capset */
+ case 91:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_personality */
+ case 92:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_exit */
+ case 93:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_exit_group */
+ case 94:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_waitid */
+ case 95:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_set_tid_address */
+ case 96:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_unshare */
+ case 97:
+ /* linux_sys_futex */
+ case 98:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_set_robust_list */
+ case 99:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_get_robust_list */
+ case 100:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_nanosleep */
+ case 101:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getitimer */
+ case 102:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_setitimer */
+ case 103:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_kexec_load */
+ case 104:
+ /* linux_init_module */
+ case 105:
+ /* linux_delete_module */
+ case 106:
+ /* linux_timer_create */
+ case 107:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_timer_gettime */
+ case 108:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_timer_getoverrun */
+ case 109:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_timer_settime */
+ case 110:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_timer_delete */
+ case 111:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_clock_settime */
+ case 112:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_clock_gettime */
+ case 113:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_clock_getres */
+ case 114:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_clock_nanosleep */
+ case 115:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_syslog */
+ case 116:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_ptrace */
+ case 117:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_setparam */
+ case 118:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_setscheduler */
+ case 119:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_getscheduler */
+ case 120:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_getparam */
+ case 121:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_setaffinity */
+ case 122:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_getaffinity */
+ case 123:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* sched_yield */
+ case 124:
+ /* linux_sched_get_priority_max */
+ case 125:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_get_priority_min */
+ case 126:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_rr_get_interval */
+ case 127:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_kill */
+ case 129:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_tkill */
+ case 130:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_tgkill */
+ case 131:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sigaltstack */
+ case 132:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_rt_sigsuspend */
+ case 133:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_rt_sigaction */
+ case 134:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_rt_sigprocmask */
+ case 135:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_rt_sigpending */
+ case 136:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_rt_sigtimedwait */
+ case 137:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_rt_sigqueueinfo */
+ case 138:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_rt_sigreturn */
+ case 139:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setpriority */
+ case 140:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getpriority */
+ case 141:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_reboot */
+ case 142:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setregid */
+ case 143:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setgid */
+ case 144:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setreuid */
+ case 145:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setuid */
+ case 146:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setresuid */
+ case 147:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* getresuid */
+ case 148:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setresgid */
+ case 149:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* getresgid */
+ case 150:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_setfsuid */
+ case 151:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_setfsgid */
+ case 152:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_times */
+ case 153:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setpgid */
+ case 154:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* getpgid */
+ case 155:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getsid */
+ case 156:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setsid */
+ case 157:
+ /* linux_getgroups */
+ case 158:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_setgroups */
+ case 159:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_newuname */
+ case 160:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sethostname */
+ case 161:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_setdomainname */
+ case 162:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getrlimit */
+ case 163:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_setrlimit */
+ case 164:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* getrusage */
+ case 165:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* umask */
+ case 166:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_prctl */
+ case 167:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getcpu */
+ case 168:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* gettimeofday */
+ case 169:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* settimeofday */
+ case 170:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_adjtimex */
+ case 171:
+ /* linux_getpid */
+ case 172:
+ /* linux_getppid */
+ case 173:
+ /* linux_getuid */
+ case 174:
+ /* geteuid */
+ case 175:
+ /* linux_getgid */
+ case 176:
+ /* getegid */
+ case 177:
+ /* linux_gettid */
+ case 178:
+ /* linux_sysinfo */
+ case 179:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mq_open */
+ case 180:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mq_unlink */
+ case 181:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mq_timedsend */
+ case 182:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mq_timedreceive */
+ case 183:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mq_notify */
+ case 184:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mq_getsetattr */
+ case 185:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_msgget */
+ case 186:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_msgctl */
+ case 187:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_msgrcv */
+ case 188:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_msgsnd */
+ case 189:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_semget */
+ case 190:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_semctl */
+ case 191:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_semtimedop */
+ case 192:
+ /* linux_semop */
+ case 193:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_shmget */
+ case 194:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_shmctl */
+ case 195:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_shmat */
+ case 196:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_shmdt */
+ case 197:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_socket */
+ case 198:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_socketpair */
+ case 199:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_bind */
+ case 200:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_listen */
+ case 201:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_accept */
+ case 202:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_connect */
+ case 203:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getsockname */
+ case 204:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getpeername */
+ case 205:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sendto */
+ case 206:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_recvfrom */
+ case 207:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_setsockopt */
+ case 208:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getsockopt */
+ case 209:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_shutdown */
+ case 210:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sendmsg */
+ case 211:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_recvmsg */
+ case 212:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_brk */
+ case 214:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* munmap */
+ case 215:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mremap */
+ case 216:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_add_key */
+ case 217:
+ /* linux_request_key */
+ case 218:
+ /* linux_keyctl */
+ case 219:
+ /* linux_clone */
+ case 220:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_execve */
+ case 221:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mmap2 */
+ case 222:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fadvise64 */
+ case 223:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* swapon */
+ case 224:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_swapoff */
+ case 225:
+ /* linux_mprotect */
+ case 226:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_msync */
+ case 227:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* mlock */
+ case 228:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* munlock */
+ case 229:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* mlockall */
+ case 230:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* munlockall */
+ case 231:
+ /* linux_mincore */
+ case 232:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_madvise */
+ case 233:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_remap_file_pages */
+ case 234:
+ /* linux_mbind */
+ case 235:
+ /* linux_get_mempolicy */
+ case 236:
+ /* linux_set_mempolicy */
+ case 237:
+ /* linux_migrate_pages */
+ case 238:
+ /* linux_move_pages */
+ case 239:
+ /* linux_rt_tgsigqueueinfo */
+ case 240:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_perf_event_open */
+ case 241:
+ /* linux_accept4 */
+ case 242:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_recvmmsg */
+ case 243:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_wait4 */
+ case 260:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_prlimit64 */
+ case 261:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_fanotify_init */
+ case 262:
+ /* linux_fanotify_mark */
+ case 263:
+ /* linux_name_to_handle_at */
+ case 264:
+ /* linux_open_by_handle_at */
+ case 265:
+ /* linux_clock_adjtime */
+ case 266:
+ /* linux_syncfs */
+ case 267:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_setns */
+ case 268:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sendmmsg */
+ case 269:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_process_vm_readv */
+ case 270:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_process_vm_writev */
+ case 271:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_kcmp */
+ case 272:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_finit_module */
+ case 273:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_setattr */
+ case 274:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_sched_getattr */
+ case 275:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_renameat2 */
+ case 276:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_seccomp */
+ case 277:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_getrandom */
+ case 278:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_memfd_create */
+ case 279:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_bpf */
+ case 280:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_execveat */
+ case 281:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_userfaultfd */
+ case 282:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_membarrier */
+ case 283:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_mlock2 */
+ case 284:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_copy_file_range */
+ case 285:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_preadv2 */
+ case 286:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_pwritev2 */
+ case 287:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_pkey_mprotect */
+ case 288:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_pkey_alloc */
+ case 289:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* linux_pkey_free */
+ case 290:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ if (p != NULL)
+ strlcpy(desc, p, descsz);
+}
diff --git a/sys/arm64/linux/linux_sysvec.c b/sys/arm64/linux/linux_sysvec.c
new file mode 100644
index 000000000000..5b33b63fba92
--- /dev/null
+++ b/sys/arm64/linux/linux_sysvec.c
@@ -0,0 +1,566 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 1994-1996 Søren Schmidt
+ * Copyright (c) 2018 Turing Robotic Industries Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/cdefs.h>
+#include <sys/elf.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/imgact_elf.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+
+#include <vm/vm_param.h>
+
+#include <arm64/linux/linux.h>
+#include <arm64/linux/linux_proto.h>
+#include <compat/linux/linux_dtrace.h>
+#include <compat/linux/linux_emul.h>
+#include <compat/linux/linux_ioctl.h>
+#include <compat/linux/linux_mib.h>
+#include <compat/linux/linux_misc.h>
+#include <compat/linux/linux_util.h>
+#include <compat/linux/linux_vdso.h>
+
+MODULE_VERSION(linux64elf, 1);
+
+const char *linux_kplatform;
+static int linux_szsigcode;
+static vm_object_t linux_shared_page_obj;
+static char *linux_shared_page_mapping;
+extern char _binary_linux_locore_o_start;
+extern char _binary_linux_locore_o_end;
+
+extern struct sysent linux_sysent[LINUX_SYS_MAXSYSCALL];
+
+SET_DECLARE(linux_ioctl_handler_set, struct linux_ioctl_handler);
+
+static int linux_copyout_strings(struct image_params *imgp,
+ uintptr_t *stack_base);
+static int linux_elf_fixup(uintptr_t *stack_base,
+ struct image_params *iparams);
+static bool linux_trans_osrel(const Elf_Note *note, int32_t *osrel);
+static void linux_vdso_install(const void *param);
+static void linux_vdso_deinstall(const void *param);
+static void linux_set_syscall_retval(struct thread *td, int error);
+static int linux_fetch_syscall_args(struct thread *td);
+static void linux_exec_setregs(struct thread *td, struct image_params *imgp,
+ uintptr_t stack);
+static int linux_vsyscall(struct thread *td);
+
+/* DTrace init */
+LIN_SDT_PROVIDER_DECLARE(LINUX_DTRACE);
+
+/* DTrace probes */
+LIN_SDT_PROBE_DEFINE2(sysvec, linux_translate_traps, todo, "int", "int");
+LIN_SDT_PROBE_DEFINE0(sysvec, linux_exec_setregs, todo);
+LIN_SDT_PROBE_DEFINE0(sysvec, linux_copyout_auxargs, todo);
+LIN_SDT_PROBE_DEFINE0(sysvec, linux_elf_fixup, todo);
+LIN_SDT_PROBE_DEFINE0(sysvec, linux_rt_sigreturn, todo);
+LIN_SDT_PROBE_DEFINE0(sysvec, linux_rt_sendsig, todo);
+LIN_SDT_PROBE_DEFINE0(sysvec, linux_vsyscall, todo);
+LIN_SDT_PROBE_DEFINE0(sysvec, linux_vdso_install, todo);
+LIN_SDT_PROBE_DEFINE0(sysvec, linux_vdso_deinstall, todo);
+
+/* LINUXTODO: do we have traps to translate? */
+static int
+linux_translate_traps(int signal, int trap_code)
+{
+
+ LIN_SDT_PROBE2(sysvec, linux_translate_traps, todo, signal, trap_code);
+ return (signal);
+}
+
+LINUX_VDSO_SYM_CHAR(linux_platform);
+
+static int
+linux_fetch_syscall_args(struct thread *td)
+{
+ struct proc *p;
+ struct syscall_args *sa;
+ register_t *ap;
+
+ p = td->td_proc;
+ ap = td->td_frame->tf_x;
+ sa = &td->td_sa;
+
+ sa->code = td->td_frame->tf_x[8];
+ /* LINUXTODO: generic syscall? */
+ if (sa->code >= p->p_sysent->sv_size)
+ sa->callp = &p->p_sysent->sv_table[0];
+ else
+ sa->callp = &p->p_sysent->sv_table[sa->code];
+
+ sa->narg = sa->callp->sy_narg;
+ if (sa->narg > 8)
+ panic("ARM64TODO: Could we have more than 8 args?");
+ memcpy(sa->args, ap, 8 * sizeof(register_t));
+
+ td->td_retval[0] = 0;
+ return (0);
+}
+
+static void
+linux_set_syscall_retval(struct thread *td, int error)
+{
+
+ td->td_retval[1] = td->td_frame->tf_x[1];
+ cpu_set_syscall_retval(td, error);
+}
+
+static int
+linux_copyout_auxargs(struct image_params *imgp, uintptr_t base)
+{
+ Elf_Auxargs *args;
+ Elf_Auxinfo *argarray, *pos;
+ struct proc *p;
+ int error, issetugid;
+
+ LIN_SDT_PROBE0(sysvec, linux_copyout_auxargs, todo);
+ p = imgp->proc;
+
+ args = (Elf64_Auxargs *)imgp->auxargs;
+ argarray = pos = malloc(LINUX_AT_COUNT * sizeof(*pos), M_TEMP,
+ M_WAITOK | M_ZERO);
+
+ issetugid = p->p_flag & P_SUGID ? 1 : 0;
+ AUXARGS_ENTRY(pos, LINUX_AT_SYSINFO_EHDR,
+ imgp->proc->p_sysent->sv_shared_page_base);
+#if 0 /* LINUXTODO: implement arm64 LINUX_AT_HWCAP */
+ AUXARGS_ENTRY(pos, LINUX_AT_HWCAP, cpu_feature);
+#endif
+ AUXARGS_ENTRY(pos, LINUX_AT_CLKTCK, stclohz);
+ AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
+ AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
+ AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
+ AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
+ AUXARGS_ENTRY(pos, AT_BASE, args->base);
+ AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
+ AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
+ AUXARGS_ENTRY(pos, AT_UID, imgp->proc->p_ucred->cr_ruid);
+ AUXARGS_ENTRY(pos, AT_EUID, imgp->proc->p_ucred->cr_svuid);
+ AUXARGS_ENTRY(pos, AT_GID, imgp->proc->p_ucred->cr_rgid);
+ AUXARGS_ENTRY(pos, AT_EGID, imgp->proc->p_ucred->cr_svgid);
+ AUXARGS_ENTRY(pos, LINUX_AT_SECURE, issetugid);
+#if 0 /* LINUXTODO: implement arm64 LINUX_AT_PLATFORM */
+ AUXARGS_ENTRY(pos, LINUX_AT_PLATFORM, PTROUT(linux_platform));
+#endif
+ AUXARGS_ENTRY_PTR(pos, LINUX_AT_RANDOM, imgp->canary);
+ if (imgp->execpathp != 0)
+ AUXARGS_ENTRY_PTR(pos, LINUX_AT_EXECFN, imgp->execpathp);
+ if (args->execfd != -1)
+ AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
+ AUXARGS_ENTRY(pos, AT_NULL, 0);
+ free(imgp->auxargs, M_TEMP);
+ imgp->auxargs = NULL;
+ KASSERT(pos - argarray <= LINUX_AT_COUNT, ("Too many auxargs"));
+
+ error = copyout(argarray, (void *)base,
+ sizeof(*argarray) * LINUX_AT_COUNT);
+ free(argarray, M_TEMP);
+ return (error);
+}
+
+static int
+linux_elf_fixup(uintptr_t *stack_base, struct image_params *imgp)
+{
+
+ LIN_SDT_PROBE0(sysvec, linux_elf_fixup, todo);
+
+ return (0);
+}
+
+/*
+ * Copy strings out to the new process address space, constructing new arg
+ * and env vector tables. Return a pointer to the base so that it can be used
+ * as the initial stack pointer.
+ * LINUXTODO: deduplicate against other linuxulator archs
+ */
+static int
+linux_copyout_strings(struct image_params *imgp, uintptr_t *stack_base)
+{
+ char **vectp;
+ char *stringp;
+ uintptr_t destp, ustringp;
+ struct ps_strings *arginfo;
+ char canary[LINUX_AT_RANDOM_LEN];
+ size_t execpath_len;
+ struct proc *p;
+ int argc, envc, error;
+
+ /* Calculate string base and vector table pointers. */
+ if (imgp->execpath != NULL && imgp->auxargs != NULL)
+ execpath_len = strlen(imgp->execpath) + 1;
+ else
+ execpath_len = 0;
+
+ p = imgp->proc;
+ arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
+ destp = (uintptr_t)arginfo;
+
+ if (execpath_len != 0) {
+ destp -= execpath_len;
+ destp = rounddown2(destp, sizeof(void *));
+ imgp->execpathp = (void *)destp;
+ error = copyout(imgp->execpath, imgp->execpathp, execpath_len);
+ if (error != 0)
+ return (error);
+ }
+
+ /* Prepare the canary for SSP. */
+ arc4rand(canary, sizeof(canary), 0);
+ destp -= roundup(sizeof(canary), sizeof(void *));
+ imgp->canary = (void *)destp;
+ error = copyout(canary, imgp->canary, sizeof(canary));
+ if (error != 0)
+ return (error);
+
+ /* Allocate room for the argument and environment strings. */
+ destp -= ARG_MAX - imgp->args->stringspace;
+ destp = rounddown2(destp, sizeof(void *));
+ ustringp = destp;
+
+ if (imgp->auxargs) {
+ /*
+ * Allocate room on the stack for the ELF auxargs
+ * array. It has up to LINUX_AT_COUNT entries.
+ */
+ destp -= LINUX_AT_COUNT * sizeof(Elf64_Auxinfo);
+ destp = rounddown2(destp, sizeof(void *));
+ }
+
+ vectp = (char **)destp;
+
+ /*
+ * Allocate room for argc and the argv[] and env vectors including the
+ * terminating NULL pointers.
+ */
+ vectp -= 1 + imgp->args->argc + 1 + imgp->args->envc + 1;
+ vectp = (char **)STACKALIGN(vectp);
+
+ /* vectp also becomes our initial stack base. */
+ *stack_base = (uintptr_t)vectp;
+
+ stringp = imgp->args->begin_argv;
+ argc = imgp->args->argc;
+ envc = imgp->args->envc;
+
+ /* Copy out strings - arguments and environment. */
+ error = copyout(stringp, (void *)ustringp,
+ ARG_MAX - imgp->args->stringspace);
+ if (error != 0)
+ return (error);
+
+ /* Fill in "ps_strings" struct for ps, w, etc. */
+ if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 ||
+ suword(&arginfo->ps_nargvstr, argc) != 0)
+ return (EFAULT);
+
+ if (suword(vectp++, argc) != 0)
+ return (EFAULT);
+
+ /* Fill in argument portion of vector table. */
+ for (; argc > 0; --argc) {
+ if (suword(vectp++, ustringp) != 0)
+ return (EFAULT);
+ while (*stringp++ != 0)
+ ustringp++;
+ ustringp++;
+ }
+
+ /* A null vector table pointer separates the argp's from the envp's. */
+ if (suword(vectp++, 0) != 0)
+ return (EFAULT);
+
+ if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 ||
+ suword(&arginfo->ps_nenvstr, envc) != 0)
+ return (EFAULT);
+
+ /* Fill in environment portion of vector table. */
+ for (; envc > 0; --envc) {
+ if (suword(vectp++, ustringp) != 0)
+ return (EFAULT);
+ while (*stringp++ != 0)
+ ustringp++;
+ ustringp++;
+ }
+
+ /* The end of the vector table is a null pointer. */
+ if (suword(vectp, 0) != 0)
+ return (EFAULT);
+
+ if (imgp->auxargs) {
+ vectp++;
+ error = imgp->sysent->sv_copyout_auxargs(imgp,
+ (uintptr_t)vectp);
+ if (error != 0)
+ return (error);
+ }
+
+ return (0);
+}
+
+/*
+ * Reset registers to default values on exec.
+ */
+static void
+linux_exec_setregs(struct thread *td, struct image_params *imgp,
+ uintptr_t stack)
+{
+ struct trapframe *regs = td->td_frame;
+
+ /* LINUXTODO: validate */
+ LIN_SDT_PROBE0(sysvec, linux_exec_setregs, todo);
+
+ memset(regs, 0, sizeof(*regs));
+ /* glibc start.S registers function pointer in x0 with atexit. */
+ regs->tf_sp = stack;
+#if 0 /* LINUXTODO: See if this is used. */
+ regs->tf_lr = imgp->entry_addr;
+#else
+ regs->tf_lr = 0xffffffffffffffff;
+#endif
+ regs->tf_elr = imgp->entry_addr;
+}
+
+int
+linux_rt_sigreturn(struct thread *td, struct linux_rt_sigreturn_args *args)
+{
+
+ /* LINUXTODO: implement */
+ LIN_SDT_PROBE0(sysvec, linux_rt_sigreturn, todo);
+ return (EDOOFUS);
+}
+
+static void
+linux_rt_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
+{
+
+ /* LINUXTODO: implement */
+ LIN_SDT_PROBE0(sysvec, linux_rt_sendsig, todo);
+}
+
+static int
+linux_vsyscall(struct thread *td)
+{
+
+ /* LINUXTODO: implement */
+ LIN_SDT_PROBE0(sysvec, linux_vsyscall, todo);
+ return (EDOOFUS);
+}
+
+struct sysentvec elf_linux_sysvec = {
+ .sv_size = LINUX_SYS_MAXSYSCALL,
+ .sv_table = linux_sysent,
+ .sv_errsize = ELAST + 1,
+ .sv_errtbl = linux_errtbl,
+ .sv_transtrap = linux_translate_traps,
+ .sv_fixup = linux_elf_fixup,
+ .sv_sendsig = linux_rt_sendsig,
+ .sv_sigcode = &_binary_linux_locore_o_start,
+ .sv_szsigcode = &linux_szsigcode,
+ .sv_name = "Linux ELF64",
+ .sv_coredump = elf64_coredump,
+ .sv_imgact_try = linux_exec_imgact_try,
+ .sv_minsigstksz = LINUX_MINSIGSTKSZ,
+ .sv_minuser = VM_MIN_ADDRESS,
+ .sv_maxuser = VM_MAXUSER_ADDRESS,
+ .sv_usrstack = USRSTACK,
+ .sv_psstrings = PS_STRINGS, /* XXX */
+ .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE,
+ .sv_copyout_auxargs = linux_copyout_auxargs,
+ .sv_copyout_strings = linux_copyout_strings,
+ .sv_setregs = linux_exec_setregs,
+ .sv_fixlimit = NULL,
+ .sv_maxssiz = NULL,
+ .sv_flags = SV_ABI_LINUX | SV_LP64 | SV_SHP,
+ .sv_set_syscall_retval = linux_set_syscall_retval,
+ .sv_fetch_syscall_args = linux_fetch_syscall_args,
+ .sv_syscallnames = NULL,
+ .sv_shared_page_base = SHAREDPAGE,
+ .sv_shared_page_len = PAGE_SIZE,
+ .sv_schedtail = linux_schedtail,
+ .sv_thread_detach = linux_thread_detach,
+ .sv_trap = linux_vsyscall,
+};
+
+static void
+linux_vdso_install(const void *param)
+{
+
+ linux_szsigcode = (&_binary_linux_locore_o_end -
+ &_binary_linux_locore_o_start);
+
+ if (linux_szsigcode > elf_linux_sysvec.sv_shared_page_len)
+ panic("invalid Linux VDSO size\n");
+
+ __elfN(linux_vdso_fixup)(&elf_linux_sysvec);
+
+ linux_shared_page_obj = __elfN(linux_shared_page_init)
+ (&linux_shared_page_mapping);
+
+ __elfN(linux_vdso_reloc)(&elf_linux_sysvec);
+
+ memcpy(linux_shared_page_mapping, elf_linux_sysvec.sv_sigcode,
+ linux_szsigcode);
+ elf_linux_sysvec.sv_shared_page_obj = linux_shared_page_obj;
+
+ printf("LINUXTODO: %s: fix linux_kplatform\n", __func__);
+#if 0
+ linux_kplatform = linux_shared_page_mapping +
+ (linux_platform - (caddr_t)elf_linux_sysvec.sv_shared_page_base);
+#else
+ linux_kplatform = "arm64";
+#endif
+}
+SYSINIT(elf_linux_vdso_init, SI_SUB_EXEC, SI_ORDER_ANY,
+ linux_vdso_install, NULL);
+
+static void
+linux_vdso_deinstall(const void *param)
+{
+
+ LIN_SDT_PROBE0(sysvec, linux_vdso_deinstall, todo);
+ __elfN(linux_shared_page_fini)(linux_shared_page_obj);
+}
+SYSUNINIT(elf_linux_vdso_uninit, SI_SUB_EXEC, SI_ORDER_FIRST,
+ linux_vdso_deinstall, NULL);
+
+static char GNU_ABI_VENDOR[] = "GNU";
+static int GNU_ABI_LINUX = 0;
+
+/* LINUXTODO: deduplicate */
+static bool
+linux_trans_osrel(const Elf_Note *note, int32_t *osrel)
+{
+ const Elf32_Word *desc;
+ uintptr_t p;
+
+ p = (uintptr_t)(note + 1);
+ p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
+
+ desc = (const Elf32_Word *)p;
+ if (desc[0] != GNU_ABI_LINUX)
+ return (false);
+
+ *osrel = LINUX_KERNVER(desc[1], desc[2], desc[3]);
+ return (true);
+}
+
+static Elf_Brandnote linux64_brandnote = {
+ .hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
+ .hdr.n_descsz = 16,
+ .hdr.n_type = 1,
+ .vendor = GNU_ABI_VENDOR,
+ .flags = BN_TRANSLATE_OSREL,
+ .trans_osrel = linux_trans_osrel
+};
+
+static Elf64_Brandinfo linux_glibc2brand = {
+ .brand = ELFOSABI_LINUX,
+ .machine = EM_AARCH64,
+ .compat_3_brand = "Linux",
+ .emul_path = linux_emul_path,
+ .interp_path = "/lib64/ld-linux-x86-64.so.2",
+ .sysvec = &elf_linux_sysvec,
+ .interp_newpath = NULL,
+ .brand_note = &linux64_brandnote,
+ .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
+};
+
+Elf64_Brandinfo *linux_brandlist[] = {
+ &linux_glibc2brand,
+ NULL
+};
+
+static int
+linux64_elf_modevent(module_t mod, int type, void *data)
+{
+ Elf64_Brandinfo **brandinfo;
+ struct linux_ioctl_handler**lihp;
+ int error;
+
+ error = 0;
+ switch(type) {
+ case MOD_LOAD:
+ for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL;
+ ++brandinfo)
+ if (elf64_insert_brand_entry(*brandinfo) < 0)
+ error = EINVAL;
+ if (error == 0) {
+ SET_FOREACH(lihp, linux_ioctl_handler_set)
+ linux_ioctl_register_handler(*lihp);
+ stclohz = (stathz ? stathz : hz);
+ if (bootverbose)
+ printf("Linux arm64 ELF exec handler installed\n");
+ }
+ break;
+ case MOD_UNLOAD:
+ for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL;
+ ++brandinfo)
+ if (elf64_brand_inuse(*brandinfo))
+ error = EBUSY;
+ if (error == 0) {
+ for (brandinfo = &linux_brandlist[0];
+ *brandinfo != NULL; ++brandinfo)
+ if (elf64_remove_brand_entry(*brandinfo) < 0)
+ error = EINVAL;
+ }
+ if (error == 0) {
+ SET_FOREACH(lihp, linux_ioctl_handler_set)
+ linux_ioctl_unregister_handler(*lihp);
+ if (bootverbose)
+ printf("Linux ELF exec handler removed\n");
+ } else
+ printf("Could not deinstall ELF interpreter entry\n");
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (error);
+}
+
+static moduledata_t linux64_elf_mod = {
+ "linux64elf",
+ linux64_elf_modevent,
+ 0
+};
+
+DECLARE_MODULE_TIED(linux64elf, linux64_elf_mod, SI_SUB_EXEC, SI_ORDER_ANY);
+MODULE_DEPEND(linux64elf, linux_common, 1, 1, 1);
+FEATURE(linux64, "AArch64 Linux 64bit support");
diff --git a/sys/arm64/linux/linux_vdso.lds.s b/sys/arm64/linux/linux_vdso.lds.s
new file mode 100644
index 000000000000..86f8de91bf60
--- /dev/null
+++ b/sys/arm64/linux/linux_vdso.lds.s
@@ -0,0 +1,22 @@
+/*
+ * Stub arm64 vdso linker script.
+ * LINUXTODO: update along with VDSO implementation
+ *
+ * $FreeBSD$
+ */
+
+SECTIONS
+{
+ . = . + SIZEOF_HEADERS;
+ .text : { *(.text*) }
+ .rodata : { *(.rodata*) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .data : { *(.data*) }
+ .dynamic : { *(.dynamic) }
+}
diff --git a/sys/arm64/linux/syscalls.conf b/sys/arm64/linux/syscalls.conf
new file mode 100644
index 000000000000..29f37920bba6
--- /dev/null
+++ b/sys/arm64/linux/syscalls.conf
@@ -0,0 +1,11 @@
+# $FreeBSD$
+sysnames="linux_syscalls.c"
+sysproto="linux_proto.h"
+sysproto_h=_LINUX_SYSPROTO_H_
+syshdr="linux_syscall.h"
+syssw="linux_sysent.c"
+sysmk="/dev/null"
+syscallprefix="LINUX_SYS_"
+switchname="linux_sysent"
+namesname="linux_syscallnames"
+systrace="linux_systrace_args.c"
diff --git a/sys/arm64/linux/syscalls.master b/sys/arm64/linux/syscalls.master
new file mode 100644
index 000000000000..81d888e44492
--- /dev/null
+++ b/sys/arm64/linux/syscalls.master
@@ -0,0 +1,1669 @@
+ $FreeBSD$
+
+; Linux ABI system call generic name/number map, based on Linux file
+; include/uapi/asm-generic/unistd.h
+
+#include <sys/param.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <compat/linux/linux_sysproto.h>
+#include <arm64/linux/linux.h>
+#include <arm64/linux/linux_proto.h>
+
+; Isn't pretty, but there seems to be no other way to trap nosys
+#define nosys linux_nosys
+
+0 AUE_NULL UNIMPL linux_io_setup
+1 AUE_NULL UNIMPL linux_io_destroy
+2 AUE_NULL UNIMPL linux_io_submit
+3 AUE_NULL UNIMPL linux_io_cancel
+4 AUE_NULL UNIMPL linux_io_getevents
+5 AUE_NULL STD {
+ int linux_setxattr(
+ const char *path,
+ const char *name,
+ const char *value,
+ l_size_t size,
+ l_int flags
+ );
+ }
+6 AUE_NULL STD {
+ int linux_lsetxattr(
+ const char *path,
+ const char *name,
+ const char *value,
+ l_size_t size,
+ l_int flags
+ );
+ }
+7 AUE_NULL STD {
+ int linux_fsetxattr(
+ l_int fd,
+ const char *name,
+ const char *value,
+ l_size_t size,
+ l_int flags
+ );
+ }
+8 AUE_NULL STD {
+ int linux_getxattr(
+ const char *path,
+ const char *name,
+ char *value,
+ l_size_t size
+ );
+ }
+9 AUE_NULL STD {
+ int linux_lgetxattr(
+ const char *path,
+ const char *name,
+ char *value,
+ l_size_t size
+ );
+ }
+10 AUE_NULL STD {
+ int linux_fgetxattr(
+ l_int fd,
+ const char *name,
+ char *value,
+ l_size_t size
+ );
+ }
+11 AUE_NULL STD {
+ int linux_listxattr(
+ const char *path,
+ const char *list,
+ l_size_t size
+ );
+ }
+12 AUE_NULL STD {
+ int linux_llistxattr(
+ const char *path,
+ const char *list,
+ l_size_t size
+ );
+ }
+13 AUE_NULL STD {
+ int linux_flistxattr(
+ l_int fd,
+ const char *list,
+ l_size_t size
+ );
+ }
+14 AUE_NULL STD {
+ int linux_removexattr(
+ const char *path,
+ const char *name
+ );
+ }
+15 AUE_NULL STD {
+ int linux_lremovexattr(
+ const char *path,
+ const char *name
+ );
+ }
+16 AUE_NULL STD {
+ int linux_fremovexattr(
+ l_int fd,
+ const char *name
+ );
+ }
+17 AUE_GETCWD STD {
+ int linux_getcwd(
+ char *buf,
+ l_ulong bufsize
+ );
+ }
+18 AUE_NULL STD {
+ int linux_lookup_dcookie(void);
+ }
+19 AUE_NULL STD {
+ int linux_eventfd2(
+ l_uint initval,
+ l_int flags
+ );
+ }
+20 AUE_NULL STD {
+ int linux_epoll_create1(
+ l_int flags
+ );
+ }
+21 AUE_NULL STD {
+ int linux_epoll_ctl(
+ l_int epfd,
+ l_int op,
+ l_int fd,
+ struct epoll_event *event
+ );
+ }
+22 AUE_NULL STD {
+ int linux_epoll_pwait(
+ l_int epfd,
+ struct epoll_event *events,
+ l_int maxevents,
+ l_int timeout,
+ l_sigset_t *mask,
+ l_size_t sigsetsize
+ );
+ }
+23 AUE_DUP NOPROTO {
+ int dup(
+ u_int fd);
+ }
+24 AUE_NULL STD {
+ int linux_dup3(
+ l_int oldfd,
+ l_int newfd,
+ l_int flags
+ );
+ }
+25 AUE_FCNTL STD {
+ int linux_fcntl(
+ l_uint fd,
+ l_uint cmd,
+ l_ulong arg
+ );
+ }
+26 AUE_NULL STD {
+ int linux_inotify_init1(
+ l_int flags
+ );
+ }
+27 AUE_NULL STD {
+ int linux_inotify_add_watch(void);
+ }
+28 AUE_NULL STD {
+ int linux_inotify_rm_watch(void);
+ }
+29 AUE_IOCTL STD {
+ int linux_ioctl(
+ l_uint fd,
+ l_uint cmd,
+ l_ulong arg
+ );
+ }
+30 AUE_NULL STD {
+ int linux_ioprio_set(void);
+ }
+31 AUE_NULL STD {
+ int linux_ioprio_get(void);
+ }
+32 AUE_FLOCK NOPROTO {
+ int flock(
+ int fd,
+ int how
+ );
+ }
+33 AUE_MKNODAT STD {
+ int linux_mknodat(
+ l_int dfd,
+ const char *filename,
+ l_int mode,
+ l_uint dev
+ );
+ }
+34 AUE_MKDIRAT STD {
+ int linux_mkdirat(
+ l_int dfd,
+ const char *pathname,
+ l_mode_t mode
+ );
+ }
+35 AUE_UNLINKAT STD {
+ int linux_unlinkat(
+ l_int dfd,
+ const char *pathname,
+ l_int flag
+ );
+ }
+36 AUE_SYMLINKAT STD {
+ int linux_symlinkat(
+ const char *oldname,
+ l_int newdfd,
+ const char *newname
+ );
+ }
+37 AUE_LINKAT STD {
+ int linux_linkat(
+ l_int olddfd,
+ const char *oldname,
+ l_int newdfd,
+ const char *newname,
+ l_int flag
+ );
+ }
+38 AUE_RENAMEAT STD {
+ int linux_renameat(
+ l_int olddfd,
+ const char *oldname,
+ l_int newdfd,
+ const char *newname
+ );
+ }
+39 AUE_NULL UNIMPL linux_umount2
+40 AUE_MOUNT STD {
+ int linux_mount(
+ char *specialfile,
+ char *dir,
+ char *filesystemtype,
+ l_ulong rwflag,
+ void *data
+ );
+ }
+41 AUE_PIVOT_ROOT STD {
+ int linux_pivot_root(void);
+ }
+42 AUE_NULL UNIMPL nfsservctl
+43 AUE_STATFS STD {
+ int linux_statfs(
+ char *path,
+ struct l_statfs_buf *buf
+ );
+ }
+44 AUE_FSTATFS STD {
+ int linux_fstatfs(
+ l_uint fd,
+ struct l_statfs_buf *buf
+ );
+ }
+45 AUE_TRUNCATE STD {
+ int linux_truncate(
+ char *path,
+ l_ulong length
+ );
+ }
+46 AUE_FTRUNCATE STD {
+ int linux_ftruncate(
+ l_int fd,
+ l_long length
+ );
+ }
+47 AUE_NULL STD {
+ int linux_fallocate(
+ l_int fd,
+ l_int mode,
+ l_loff_t offset,
+ l_loff_t len
+ );
+ }
+48 AUE_FACCESSAT STD {
+ int linux_faccessat(
+ l_int dfd,
+ const char *filename,
+ l_int amode
+ );
+ }
+49 AUE_CHDIR STD {
+ int linux_chdir(
+ char *path
+ );
+ }
+50 AUE_FCHDIR NOPROTO {
+ int fchdir(
+ int fd);
+ }
+51 AUE_CHROOT NOPROTO {
+ int chroot(
+ char *path
+ );
+ }
+52 AUE_FCHMOD NOPROTO {
+ int fchmod(
+ int fd,
+ int mode
+ );
+ }
+53 AUE_FCHMODAT STD {
+ int linux_fchmodat(
+ l_int dfd,
+ const char *filename,
+ l_mode_t mode
+ );
+ }
+54 AUE_FCHOWNAT STD {
+ int linux_fchownat(
+ l_int dfd,
+ const char *filename,
+ l_uid_t uid,
+ l_gid_t gid,
+ l_int flag
+ );
+ }
+55 AUE_FCHOWN NOPROTO {
+ int fchown(
+ int fd,
+ int uid,
+ int gid);
+ }
+56 AUE_OPEN_RWTC STD {
+ int linux_openat(
+ l_int dfd,
+ const char *filename,
+ l_int flags,
+ l_mode_t mode
+ );
+ }
+57 AUE_CLOSE NOPROTO {
+ int close(
+ int fd);
+ }
+58 AUE_NULL STD {
+ int linux_vhangup(void);
+ }
+59 AUE_NULL STD {
+ int linux_pipe2(
+ l_int *pipefds,
+ l_int flags
+ );
+ }
+60 AUE_NULL UNIMPL linux_quotactl
+61 AUE_GETDIRENTRIES STD {
+ int linux_getdents64(
+ l_uint fd,
+ void *dirent,
+ l_uint count
+ );
+ }
+62 AUE_LSEEK STD {
+ int linux_lseek(
+ l_uint fdes,
+ l_off_t off,
+ l_int whence
+ );
+ }
+63 AUE_NULL NOPROTO {
+ int read(
+ int fd,
+ char *buf,
+ l_size_t nbyte
+ );
+ }
+64 AUE_NULL NOPROTO {
+ int write(
+ int fd,
+ char *buf,
+ l_size_t nbyte
+ );
+ }
+65 AUE_READV NOPROTO {
+ int readv(
+ int fd,
+ struct iovec *iovp,
+ u_int iovcnt
+ );
+ }
+66 AUE_WRITEV NOPROTO {
+ int writev(
+ int fd,
+ struct iovec *iovp,
+ u_int iovcnt
+ );
+ }
+67 AUE_PREAD STD {
+ int linux_pread(
+ l_uint fd,
+ char *buf,
+ l_size_t nbyte,
+ l_loff_t offset
+ );
+ }
+68 AUE_PWRITE STD {
+ int linux_pwrite(
+ l_uint fd,
+ char *buf,
+ l_size_t nbyte,
+ l_loff_t offset
+ );
+ }
+69 AUE_NULL STD {
+ int linux_preadv(
+ l_ulong fd,
+ struct iovec *vec,
+ l_ulong vlen,
+ l_ulong pos_l,
+ l_ulong pos_h
+ );
+ }
+70 AUE_NULL STD {
+ int linux_pwritev(
+ l_ulong fd,
+ struct iovec *vec,
+ l_ulong vlen,
+ l_ulong pos_l,
+ l_ulong pos_h
+ );
+ }
+71 AUE_SENDFILE STD {
+ int linux_sendfile(
+ l_int out,
+ l_int in,
+ l_off_t *offset,
+ l_size_t count
+ );
+ }
+72 AUE_SELECT STD {
+ int linux_pselect6(
+ l_int nfds,
+ l_fd_set *readfds,
+ l_fd_set *writefds,
+ l_fd_set *exceptfds,
+ struct l_timespec *tsp,
+ l_uintptr_t *sig
+ );
+ }
+73 AUE_POLL STD {
+ int linux_ppoll(
+ struct pollfd *fds,
+ l_uint nfds,
+ struct l_timespec *tsp,
+ l_sigset_t *sset,
+ l_size_t ssize
+ );
+ }
+74 AUE_NULL STD {
+ int linux_signalfd4(void);
+ }
+75 AUE_NULL STD {
+ int linux_vmsplice(void);
+ }
+76 AUE_NULL STD {
+ int linux_splice(
+ int fd_in,
+ l_loff_t *off_in,
+ int fd_out,
+ l_loff_t *off_out,
+ l_size_t len,
+ l_uint flags
+ );
+ }
+77 AUE_NULL STD {
+ int linux_tee(void);
+ }
+78 AUE_READLINKAT STD {
+ int linux_readlinkat(
+ l_int dfd,
+ const char *path,
+ char *buf,
+ l_int bufsiz
+ );
+ }
+79 AUE_FSTATAT STD {
+ int linux_newfstatat(
+ l_int dfd,
+ char *pathname,
+ struct l_stat64 *statbuf,
+ l_int flag
+ );
+ }
+80 AUE_FSTAT STD {
+ int linux_newfstat(
+ l_uint fd,
+ struct l_newstat *buf
+ );
+ }
+81 AUE_NULL UNIMPL linux_sync
+82 AUE_FSYNC NOPROTO {
+ int fsync(
+ int fd);
+ }
+83 AUE_NULL STD {
+ int linux_fdatasync(
+ l_uint fd);
+ }
+84 AUE_NULL STD {
+ int linux_sync_file_range(
+ l_int fd,
+ l_loff_t offset,
+ l_loff_t nbytes,
+ l_uint flags
+ );
+ }
+85 AUE_NULL STD {
+ int linux_timerfd_create(
+ l_int clockid,
+ l_int flags
+ );
+ }
+86 AUE_NULL STD {
+ int linux_timerfd_settime(
+ l_int fd,
+ l_int flags,
+ const struct l_itimerspec *new_value,
+ struct l_itimerspec *old_value
+ );
+ }
+87 AUE_NULL STD {
+ int linux_timerfd_gettime(
+ l_int fd,
+ struct l_itimerspec *old_value
+ );
+ }
+88 AUE_FUTIMESAT STD {
+ int linux_utimensat(
+ l_int dfd,
+ const char *pathname,
+ const struct l_timespec *times,
+ l_int flags
+ );
+ }
+89 AUE_ACCT NOPROTO {
+ int acct(
+ char *path
+ );
+ }
+90 AUE_CAPGET STD {
+ int linux_capget(
+ struct l_user_cap_header *hdrp,
+ struct l_user_cap_data *datap
+ );
+ }
+91 AUE_CAPSET STD {
+ int linux_capset(
+ struct l_user_cap_header *hdrp,
+ struct l_user_cap_data *datap
+ );
+ }
+92 AUE_PERSONALITY STD {
+ int linux_personality(
+ l_uint per
+ );
+ }
+93 AUE_EXIT STD {
+ int linux_exit(
+ u_int rval
+ );
+ }
+94 AUE_EXIT STD {
+ int linux_exit_group(
+ l_int error_code
+ );
+ }
+95 AUE_WAIT6 STD {
+ int linux_waitid(
+ l_int idtype,
+ l_pid_t id,
+ l_siginfo_t *info,
+ l_int options,
+ struct rusage *rusage
+ );
+ }
+96 AUE_NULL STD {
+ int linux_set_tid_address(
+ l_int *tidptr
+ );
+ }
+97 AUE_NULL STD {
+ int linux_unshare(void);
+ }
+98 AUE_NULL STD {
+ int linux_sys_futex(void *uaddr,
+ int op,
+ int val,
+ struct l_timespec *timeout,
+ void *uaddr2,
+ int val3
+ );
+ }
+99 AUE_NULL STD {
+ int linux_set_robust_list(
+ struct linux_robust_list_head *head,
+ l_size_t len
+ );
+ }
+100 AUE_NULL STD {
+ int linux_get_robust_list(
+ l_int pid,
+ struct linux_robust_list_head **head,
+ l_size_t *len
+ );
+ }
+101 AUE_NULL STD {
+ int linux_nanosleep(
+ const struct l_timespec *rqtp,
+ struct l_timespec *rmtp
+ );
+ }
+102 AUE_GETITIMER STD {
+ int linux_getitimer(
+ l_int which,
+ struct l_itimerval *itv
+ );
+ }
+103 AUE_SETITIMER STD {
+ int linux_setitimer(
+ l_int which,
+ struct l_itimerval *itv,
+ struct l_itimerval *oitv
+ );
+ }
+104 AUE_NULL STD {
+ int linux_kexec_load(void);
+ }
+105 AUE_NULL STD {
+ int linux_init_module(void);
+ }
+106 AUE_NULL STD {
+ int linux_delete_module(void);
+ }
+107 AUE_NULL STD {
+ int linux_timer_create(
+ clockid_t clock_id,
+ struct sigevent *evp,
+ l_timer_t *timerid);
+ }
+108 AUE_NULL STD {
+ int linux_timer_gettime(
+ l_timer_t timerid,
+ struct itimerspec *setting
+ );
+ }
+109 AUE_NULL STD {
+ int linux_timer_getoverrun(
+ l_timer_t timerid);
+ }
+110 AUE_NULL STD {
+ int linux_timer_settime(
+ l_timer_t timerid,
+ l_int flags,
+ const struct itimerspec *new,
+ struct itimerspec *old);
+ }
+111 AUE_NULL STD {
+ int linux_timer_delete(
+ l_timer_t timerid);
+ }
+112 AUE_CLOCK_SETTIME STD {
+ int linux_clock_settime(
+ clockid_t which,
+ struct l_timespec *tp
+ );
+ }
+113 AUE_NULL STD {
+ int linux_clock_gettime(
+ clockid_t which,
+ struct l_timespec *tp
+ );
+ }
+114 AUE_NULL STD {
+ int linux_clock_getres(
+ clockid_t which,
+ struct l_timespec *tp
+ );
+ }
+115 AUE_NULL STD {
+ int linux_clock_nanosleep(
+ clockid_t which,
+ l_int flags,
+ struct l_timespec *rqtp,
+ struct l_timespec *rmtp
+ );
+ }
+116 AUE_NULL STD {
+ int linux_syslog(
+ l_int type,
+ char *buf,
+ l_int len
+ );
+ }
+117 AUE_PTRACE STD {
+ int linux_ptrace(
+ l_long req,
+ l_long pid,
+ l_ulong addr,
+ l_ulong data
+ );
+ }
+118 AUE_SCHED_SETPARAM STD {
+ int linux_sched_setparam(
+ l_pid_t pid,
+ struct sched_param *param
+ );
+ }
+119 AUE_SCHED_SETSCHEDULER STD {
+ int linux_sched_setscheduler(
+ l_pid_t pid,
+ l_int policy,
+ struct sched_param *param
+ );
+ }
+120 AUE_SCHED_GETSCHEDULER STD {
+ int linux_sched_getscheduler(
+ l_pid_t pid);
+ }
+121 AUE_SCHED_GETPARAM STD {
+ int linux_sched_getparam(
+ l_pid_t pid,
+ struct sched_param *param
+ );
+ }
+122 AUE_NULL STD {
+ int linux_sched_setaffinity(
+ l_pid_t pid,
+ l_uint len,
+ l_ulong *user_mask_ptr
+ );
+ }
+123 AUE_NULL STD {
+ int linux_sched_getaffinity(
+ l_pid_t pid,
+ l_uint len,
+ l_ulong *user_mask_ptr
+ );
+ }
+124 AUE_NULL NOPROTO {
+ int sched_yield(void);
+ }
+125 AUE_SCHED_GET_PRIORITY_MAX STD {
+ int linux_sched_get_priority_max(
+ \
+ l_int policy
+ );
+ }
+126 AUE_SCHED_GET_PRIORITY_MIN STD {
+ int linux_sched_get_priority_min(
+ \
+ l_int policy
+ );
+ }
+127 AUE_SCHED_RR_GET_INTERVAL STD {
+ int linux_sched_rr_get_interval(
+ l_pid_t pid,
+ struct l_timespec *interval
+ );
+ }
+128 AUE_NULL UNIMPL restart_syscall
+129 AUE_KILL STD {
+ int linux_kill(
+ l_pid_t pid,
+ l_int signum
+ );
+ }
+130 AUE_NULL STD {
+ int linux_tkill(
+ l_pid_t tid,
+ l_int sig
+ );
+ }
+131 AUE_NULL STD {
+ int linux_tgkill(
+ l_pid_t tgid,
+ l_pid_t pid,
+ l_int sig
+ );
+ }
+132 AUE_NULL STD {
+ int linux_sigaltstack(
+ l_stack_t *uss,
+ l_stack_t *uoss
+ );
+ }
+133 AUE_NULL STD {
+ int linux_rt_sigsuspend(
+ l_sigset_t *newset,
+ l_size_t sigsetsize
+ );
+ }
+134 AUE_NULL STD {
+ int linux_rt_sigaction(
+ l_int sig,
+ l_sigaction_t *act,
+ l_sigaction_t *oact,
+ l_size_t sigsetsize
+ );
+ }
+135 AUE_NULL STD {
+ int linux_rt_sigprocmask(
+ l_int how,
+ l_sigset_t *mask,
+ l_sigset_t *omask,
+ l_size_t sigsetsize
+ );
+ }
+136 AUE_NULL STD {
+ int linux_rt_sigpending(
+ l_sigset_t *set,
+ l_size_t sigsetsize
+ );
+ }
+137 AUE_NULL STD {
+ int linux_rt_sigtimedwait(
+ l_sigset_t *mask,
+ l_siginfo_t *ptr,
+ struct l_timeval *timeout,
+ l_size_t sigsetsize
+ );
+ }
+138 AUE_NULL STD {
+ int linux_rt_sigqueueinfo(
+ l_pid_t pid,
+ l_int sig,
+ l_siginfo_t *info
+ );
+ }
+139 AUE_NULL STD {
+ int linux_rt_sigreturn(
+ struct l_ucontext *ucp
+ );
+ }
+140 AUE_SETPRIORITY NOPROTO {
+ int setpriority(
+ int which,
+ int who,
+ int prio
+ );
+ }
+141 AUE_GETPRIORITY STD {
+ int linux_getpriority(
+ l_int which,
+ l_int who
+ );
+ }
+142 AUE_REBOOT STD {
+ int linux_reboot(
+ l_int magic1,
+ l_int magic2,
+ l_uint cmd,
+ void *arg
+ );
+ }
+143 AUE_SETREGID NOPROTO {
+ int setregid(
+ gid_t rgid,
+ gid_t egid);
+ }
+144 AUE_SETGID NOPROTO {
+ int setgid(
+ gid_t gid);
+ }
+145 AUE_SETREUID NOPROTO {
+ int setreuid(
+ uid_t ruid,
+ uid_t euid);
+ }
+146 AUE_SETUID NOPROTO {
+ int setuid(
+ uid_t uid);
+ }
+147 AUE_SETRESUID NOPROTO {
+ int setresuid(
+ uid_t ruid,
+ uid_t euid,
+ uid_t suid);
+ }
+148 AUE_GETRESUID NOPROTO {
+ int getresuid(
+ uid_t *ruid,
+ uid_t *euid,
+ uid_t *suid);
+ }
+149 AUE_SETRESGID NOPROTO {
+ int setresgid(
+ gid_t rgid,
+ gid_t egid,
+ gid_t sgid);
+ }
+150 AUE_GETRESGID NOPROTO {
+ int getresgid(
+ gid_t *rgid,
+ gid_t *egid,
+ gid_t *sgid);
+ }
+151 AUE_SETFSUID STD {
+ int linux_setfsuid(
+ l_uid_t uid);
+ }
+152 AUE_SETFSGID STD {
+ int linux_setfsgid(
+ l_gid_t gid);
+ }
+153 AUE_NULL STD {
+ int linux_times(
+ struct l_times_argv *buf
+ );
+ }
+154 AUE_SETPGRP NOPROTO {
+ int setpgid(
+ int pid,
+ int pgid);
+ }
+155 AUE_GETPGID NOPROTO {
+ int getpgid(
+ int pid);
+ }
+156 AUE_GETSID STD {
+ int linux_getsid(
+ l_pid_t pid);
+ }
+157 AUE_SETSID NOPROTO {
+ int setsid(void);
+ }
+158 AUE_GETGROUPS STD {
+ int linux_getgroups(
+ l_int gidsetsize,
+ l_gid_t *grouplist
+ );
+ }
+159 AUE_SETGROUPS STD {
+ int linux_setgroups(
+ l_int gidsetsize,
+ l_gid_t *grouplist
+ );
+ }
+160 AUE_NULL STD {
+ int linux_newuname(
+ struct l_new_utsname *buf
+ );
+ }
+161 AUE_SYSCTL STD {
+ int linux_sethostname(
+ char *hostname,
+ l_uint len
+ );
+ }
+162 AUE_SYSCTL STD {
+ int linux_setdomainname(
+ char *name,
+ l_int len
+ );
+ }
+163 AUE_GETRLIMIT STD {
+ int linux_getrlimit(
+ l_uint resource,
+ struct l_rlimit *rlim
+ );
+ }
+164 AUE_SETRLIMIT STD {
+ int linux_setrlimit(
+ l_uint resource,
+ struct l_rlimit *rlim
+ );
+ }
+165 AUE_GETRUSAGE NOPROTO {
+ int getrusage(
+ int who,
+ struct rusage *rusage
+ );
+ }
+166 AUE_UMASK NOPROTO {
+ int umask(
+ int newmask
+ );
+ }
+167 AUE_PRCTL STD {
+ int linux_prctl(
+ l_int option,
+ l_uintptr_t arg2,
+ l_uintptr_t arg3,
+ l_uintptr_t arg4,
+ l_uintptr_t arg5
+ );
+ }
+168 AUE_NULL STD {
+ int linux_getcpu(
+ l_uint *cpu,
+ l_uint *node,
+ void *cache
+ );
+ }
+169 AUE_NULL NOPROTO {
+ int gettimeofday(
+ struct l_timeval *tp,
+ struct timezone *tzp
+ );
+ }
+170 AUE_SETTIMEOFDAY NOPROTO {
+ int settimeofday(
+ struct l_timeval *tv,
+ struct timezone *tzp
+ );
+ }
+171 AUE_ADJTIME STD {
+ int linux_adjtimex(void);
+ }
+172 AUE_GETPID STD {
+ int linux_getpid(void);
+ }
+173 AUE_GETPPID STD {
+ int linux_getppid(void);
+ }
+174 AUE_GETUID STD {
+ int linux_getuid(void);
+ }
+175 AUE_GETEUID NOPROTO {
+ int geteuid(void);
+ }
+176 AUE_GETGID STD {
+ int linux_getgid(void);
+ }
+177 AUE_GETEGID NOPROTO {
+ int getegid(void);
+ }
+178 AUE_NULL STD {
+ int linux_gettid(void);
+ }
+179 AUE_NULL STD {
+ int linux_sysinfo(
+ struct l_sysinfo *info
+ );
+ }
+180 AUE_NULL STD {
+ int linux_mq_open(
+ const char *name,
+ l_int oflag,
+ l_mode_t mode,
+ struct mq_attr *attr
+ );
+ }
+181 AUE_NULL STD {
+ int linux_mq_unlink(
+ const char *name
+ );
+ }
+182 AUE_NULL STD {
+ int linux_mq_timedsend(
+ l_mqd_t mqd,
+ const char *msg_ptr,
+ l_size_t msg_len,
+ l_uint msg_prio,
+ const struct l_timespec *abs_timeout
+ );
+ }
+183 AUE_NULL STD {
+ int linux_mq_timedreceive(
+ l_mqd_t mqd,
+ char *msg_ptr,
+ l_size_t msg_len,
+ l_uint *msg_prio,
+ const struct l_timespec *abs_timeout
+ );
+ }
+184 AUE_NULL STD {
+ int linux_mq_notify(
+ l_mqd_t mqd,
+ const struct l_timespec *abs_timeout
+ );
+ }
+185 AUE_NULL STD {
+ int linux_mq_getsetattr(
+ l_mqd_t mqd,
+ const struct mq_attr *attr,
+ struct mq_attr *oattr
+ );
+ }
+186 AUE_NULL STD {
+ int linux_msgget(
+ l_key_t key,
+ l_int msgflg
+ );
+ }
+187 AUE_NULL STD {
+ int linux_msgctl(
+ l_int msqid,
+ l_int cmd,
+ struct l_msqid_ds *buf
+ );
+ }
+188 AUE_NULL STD {
+ int linux_msgrcv(
+ l_int msqid,
+ struct l_msgbuf *msgp,
+ l_size_t msgsz,
+ l_long msgtyp,
+ l_int msgflg
+ );
+ }
+189 AUE_NULL STD {
+ int linux_msgsnd(
+ l_int msqid,
+ struct l_msgbuf *msgp,
+ l_size_t msgsz,
+ l_int msgflg
+ );
+ }
+190 AUE_NULL STD {
+ int linux_semget(
+ l_key_t key,
+ l_int nsems,
+ l_int semflg
+ );
+ }
+191 AUE_NULL STD {
+ int linux_semctl(
+ l_int semid,
+ l_int semnum,
+ l_int cmd,
+ union l_semun arg
+ );
+ }
+192 AUE_NULL STD {
+ int linux_semtimedop(void);
+ }
+193 AUE_NULL STD {
+ int linux_semop(
+ l_int semid,
+ struct l_sembuf *tsops,
+ l_uint nsops
+ );
+ }
+194 AUE_NULL STD {
+ int linux_shmget(
+ l_key_t key,
+ l_size_t size,
+ l_int shmflg
+ );
+ }
+195 AUE_NULL STD {
+ int linux_shmctl(
+ l_int shmid,
+ l_int cmd,
+ struct l_shmid_ds *buf
+ );
+ }
+196 AUE_NULL STD {
+ int linux_shmat(
+ l_int shmid,
+ char *shmaddr,
+ l_int shmflg
+ );
+ }
+197 AUE_NULL STD {
+ int linux_shmdt(
+ char *shmaddr
+ );
+ }
+198 AUE_SOCKET STD {
+ int linux_socket(
+ l_int domain,
+ l_int type,
+ l_int protocol
+ );
+ }
+199 AUE_SOCKETPAIR STD {
+ int linux_socketpair(
+ l_int domain,
+ l_int type,
+ l_int protocol,
+ l_uintptr_t rsv
+ );
+ }
+200 AUE_BIND STD {
+ int linux_bind(
+ l_int s,
+ l_uintptr_t name,
+ l_int namelen
+ );
+ }
+201 AUE_LISTEN STD {
+ int linux_listen(
+ l_int s,
+ l_int backlog
+ );
+ }
+202 AUE_ACCEPT STD {
+ int linux_accept(
+ l_int s,
+ l_uintptr_t addr,
+ l_uintptr_t namelen
+ );
+ }
+203 AUE_CONNECT STD {
+ int linux_connect(
+ l_int s,
+ l_uintptr_t name,
+ l_int namelen
+ );
+ }
+204 AUE_GETSOCKNAME STD {
+ int linux_getsockname(
+ l_int s,
+ l_uintptr_t addr,
+ l_uintptr_t namelen
+ );
+ }
+205 AUE_GETPEERNAME STD {
+ int linux_getpeername(
+ l_int s,
+ l_uintptr_t addr,
+ l_uintptr_t namelen
+ );
+ }
+206 AUE_SENDTO STD {
+ int linux_sendto(
+ l_int s,
+ l_uintptr_t msg,
+ l_size_t len,
+ l_uint flags,
+ l_uintptr_t to,
+ l_int tolen
+ );
+ }
+207 AUE_RECVFROM STD {
+ int linux_recvfrom(
+ l_int s,
+ l_uintptr_t buf,
+ l_size_t len,
+ l_uint flags,
+ l_uintptr_t from,
+ l_uintptr_t fromlen
+ );
+ }
+208 AUE_SETSOCKOPT STD {
+ int linux_setsockopt(
+ l_int s,
+ l_int level,
+ l_int optname,
+ l_uintptr_t optval,
+ l_int optlen
+ );
+ }
+209 AUE_GETSOCKOPT STD {
+ int linux_getsockopt(
+ l_int s,
+ l_int level,
+ l_int optname,
+ l_uintptr_t optval,
+ l_uintptr_t optlen
+ );
+ }
+210 AUE_NULL STD {
+ int linux_shutdown(
+ l_int s,
+ l_int how
+ );
+ }
+211 AUE_SENDMSG STD {
+ int linux_sendmsg(
+ l_int s,
+ l_uintptr_t msg,
+ l_uint flags
+ );
+ }
+212 AUE_RECVMSG STD {
+ int linux_recvmsg(
+ l_int s,
+ l_uintptr_t msg,
+ l_uint flags
+ );
+ }
+213 AUE_NULL UNIMPL linux_readahead
+214 AUE_NULL STD {
+ int linux_brk(
+ l_ulong dsend);
+ }
+215 AUE_MUNMAP NOPROTO {
+ int munmap(
+ void *addr,
+ l_size_t len
+ );
+ }
+216 AUE_NULL STD {
+ int linux_mremap(
+ l_ulong addr,
+ l_ulong old_len,
+ l_ulong new_len,
+ l_ulong flags,
+ l_ulong new_addr
+ );
+ }
+217 AUE_NULL STD {
+ int linux_add_key(void);
+ }
+218 AUE_NULL STD {
+ int linux_request_key(void);
+ }
+219 AUE_NULL STD {
+ int linux_keyctl(void);
+ }
+220 AUE_RFORK STD {
+ int linux_clone(
+ l_ulong flags,
+ void *stack,
+ void *parent_tidptr,
+ void *tls,
+ void *child_tidptr
+ );
+ }
+221 AUE_EXECVE STD {
+ int linux_execve(
+ char *path,
+ char **argp,
+ char **envp
+ );
+ }
+222 AUE_MMAP STD {
+ int linux_mmap2(
+ l_ulong addr,
+ l_ulong len,
+ l_ulong prot,
+ l_ulong flags,
+ l_ulong fd,
+ l_ulong pgoff
+ );
+ }
+223 AUE_NULL STD {
+ int linux_fadvise64(
+ l_int fd,
+ l_loff_t offset,
+ l_size_t len,
+ l_int advice
+ );
+ }
+224 AUE_SWAPON NOPROTO {
+ int swapon(
+ char *name
+ );
+ }
+225 AUE_SWAPOFF STD {
+ int linux_swapoff(void);
+ }
+226 AUE_MPROTECT STD {
+ int linux_mprotect(
+ l_ulong addr,
+ l_size_t len,
+ l_ulong prot
+ );
+ }
+227 AUE_MSYNC STD {
+ int linux_msync(
+ l_ulong addr,
+ l_size_t len,
+ l_int fl
+ );
+ }
+228 AUE_MLOCK NOPROTO {
+ int mlock(
+ const void *addr,
+ size_t len
+ );
+ }
+229 AUE_MUNLOCK NOPROTO {
+ int munlock(
+ const void *addr,
+ size_t len
+ );
+ }
+230 AUE_MLOCKALL NOPROTO {
+ int mlockall(
+ int how
+ );
+ }
+231 AUE_MUNLOCKALL NOPROTO {
+ int munlockall(void);
+ }
+232 AUE_MINCORE STD {
+ int linux_mincore(
+ l_ulong start,
+ l_size_t len,
+ u_char *vec
+ );
+ }
+233 AUE_MADVISE STD {
+ int linux_madvise(
+ l_ulong addr,
+ l_size_t len,
+ l_int behav
+ );
+ }
+234 AUE_NULL STD {
+ int linux_remap_file_pages(void);
+ }
+235 AUE_NULL STD {
+ int linux_mbind(void);
+ }
+236 AUE_NULL STD {
+ int linux_get_mempolicy(void);
+ }
+237 AUE_NULL STD {
+ int linux_set_mempolicy(void);
+ }
+238 AUE_NULL STD {
+ int linux_migrate_pages(void);
+ }
+239 AUE_NULL STD {
+ int linux_move_pages(void);
+ }
+240 AUE_NULL STD {
+ int linux_rt_tgsigqueueinfo(
+ l_pid_t tgid,
+ l_pid_t tid,
+ l_int sig,
+ l_siginfo_t *uinfo
+ );
+ }
+241 AUE_NULL STD {
+ int linux_perf_event_open(void);
+ }
+242 AUE_ACCEPT STD {
+ int linux_accept4(
+ l_int s,
+ l_uintptr_t addr,
+ l_uintptr_t namelen,
+ l_int flags
+ );
+ }
+243 AUE_NULL STD {
+ int linux_recvmmsg(
+ l_int s,
+ struct l_mmsghdr *msg,
+ l_uint vlen,
+ l_uint flags,
+ struct l_timespec *timeout
+ );
+ }
+244-259 AUE_NULL UNIMPL unimpl_md_syscall
+260 AUE_WAIT4 STD {
+ int linux_wait4(
+ l_pid_t pid,
+ l_int *status,
+ l_int options,
+ struct rusage *rusage
+ );
+ }
+261 AUE_NULL STD {
+ int linux_prlimit64(
+ l_pid_t pid,
+ l_uint resource,
+ struct rlimit *new,
+ struct rlimit *old);
+ }
+262 AUE_NULL STD {
+ int linux_fanotify_init(void);
+ }
+263 AUE_NULL STD {
+ int linux_fanotify_mark(void);
+ }
+264 AUE_NULL STD {
+ int linux_name_to_handle_at(void);
+ }
+265 AUE_NULL STD {
+ int linux_open_by_handle_at(void);
+ }
+266 AUE_NULL STD {
+ int linux_clock_adjtime(void);
+ }
+267 AUE_SYNC STD {
+ int linux_syncfs(
+ l_int fd);
+ }
+268 AUE_NULL STD {
+ int linux_setns(
+ l_int fd,
+ l_int nstype
+ );
+ }
+269 AUE_NULL STD {
+ int linux_sendmmsg(
+ l_int s,
+ struct l_mmsghdr *msg,
+ l_uint vlen,
+ l_uint flags
+ );
+ }
+270 AUE_NULL STD {
+ int linux_process_vm_readv(
+ l_pid_t pid,
+ const struct iovec *lvec,
+ l_ulong liovcnt,
+ const struct iovec *rvec,
+ l_ulong riovcnt,
+ l_ulong flags
+ );
+ }
+271 AUE_NULL STD {
+ int linux_process_vm_writev(
+ l_pid_t pid,
+ const struct iovec *lvec,
+ l_ulong liovcnt,
+ const struct iovec *rvec,
+ l_ulong riovcnt,
+ l_ulong flags
+ );
+ }
+272 AUE_NULL STD {
+ int linux_kcmp(
+ l_pid_t pid1,
+ l_pid_t pid2,
+ l_int type,
+ l_ulong idx1,
+ l_ulong idx
+ );
+ }
+273 AUE_NULL STD {
+ int linux_finit_module(
+ l_int fd,
+ const char *uargs,
+ l_int flags
+ );
+ }
+274 AUE_NULL STD {
+ int linux_sched_setattr(
+ l_pid_t pid,
+ void *attr,
+ l_uint flags
+ );
+ }
+275 AUE_NULL STD {
+ int linux_sched_getattr(
+ l_pid_t pid,
+ void *attr,
+ l_uint size,
+ l_uint flags
+ );
+ }
+276 AUE_NULL STD {
+ int linux_renameat2(
+ l_int olddfd,
+ const char *oldname,
+ l_int newdfd,
+ const char *newname,
+ l_uint flags
+ );
+ }
+277 AUE_NULL STD {
+ int linux_seccomp(
+ l_uint op,
+ l_uint flags,
+ const char *uargs
+ );
+ }
+278 AUE_NULL STD {
+ int linux_getrandom(
+ char *buf,
+ l_size_t count,
+ l_uint flags
+ );
+ }
+279 AUE_NULL STD {
+ int linux_memfd_create(
+ const char *uname_ptr,
+ l_uint flags
+ );
+ }
+280 AUE_NULL STD {
+ int linux_bpf(
+ l_int cmd,
+ void *attr,
+ l_uint size
+ );
+ }
+281 AUE_NULL STD {
+ int linux_execveat(
+ l_int dfd,
+ const char *filename,
+ const char **argv,
+ const char **envp,
+ l_int flags
+ );
+ }
+282 AUE_NULL STD {
+ int linux_userfaultfd(
+ l_int flags
+ );
+ }
+283 AUE_NULL STD {
+ int linux_membarrier(
+ l_int cmd,
+ l_int flags
+ );
+ }
+284 AUE_NULL STD {
+ int linux_mlock2(
+ l_ulong start,
+ l_size_t len,
+ l_int flags
+ );
+ }
+285 AUE_NULL STD {
+ int linux_copy_file_range(
+ l_int fd_in,
+ l_loff_t *off_in,
+ l_int fd_out,
+ l_loff_t *off_out,
+ l_size_t len,
+ l_uint flags
+ );
+ }
+286 AUE_NULL STD {
+ int linux_preadv2(
+ l_ulong fd,
+ const struct iovec *vec,
+ l_ulong vlen,
+ l_ulong pos_l,
+ l_ulong pos_h,
+ l_int flags
+ );
+ }
+287 AUE_NULL STD {
+ int linux_pwritev2(
+ l_ulong fd,
+ const struct iovec *vec,
+ l_ulong vlen,
+ l_ulong pos_l,
+ l_ulong pos_h,
+ l_int flags
+ );
+ }
+288 AUE_NULL STD {
+ int linux_pkey_mprotect(
+ l_ulong start,
+ l_size_t len,
+ l_ulong prot,
+ l_int pkey
+ );
+ }
+289 AUE_NULL STD {
+ int linux_pkey_alloc(
+ l_ulong flags,
+ l_ulong init_val
+ );
+ }
+290 AUE_NULL STD {
+ int linux_pkey_free(
+ l_int pkey
+ );
+ }
+
+; please, keep this line at the end.
+291 AUE_NULL UNIMPL nosys
+
+; vim: syntax=off
diff --git a/sys/arm64/qoriq/clk/ls1046a_clkgen.c b/sys/arm64/qoriq/clk/ls1046a_clkgen.c
new file mode 100644
index 000000000000..3270201f4f3d
--- /dev/null
+++ b/sys/arm64/qoriq/clk/ls1046a_clkgen.c
@@ -0,0 +1,255 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Alstom Group.
+ * Copyright (c) 2020 Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/clk/clk_fixed.h>
+
+#include <arm64/qoriq/clk/qoriq_clkgen.h>
+
+static uint8_t ls1046a_pltfrm_pll_divs[] = {
+ 2, 4, 0
+};
+
+static struct qoriq_clk_pll_def ls1046a_pltfrm_pll = {
+ .clkdef = {
+ .name = "ls1046a_platform_pll",
+ .id = QORIQ_CLK_ID(QORIQ_TYPE_PLATFORM_PLL, 0),
+ .flags = 0
+ },
+ .offset = 0xC00,
+ .shift = 1,
+ .mask = 0x7E,
+ .dividers = ls1046a_pltfrm_pll_divs,
+ .flags = 0
+};
+
+static const uint8_t ls1046a_cga1_pll_divs[] = {
+ 2, 3, 4, 0
+};
+
+static struct qoriq_clk_pll_def ls1046a_cga1_pll = {
+ .clkdef = {
+ .name = "ls1046a_cga_pll1",
+ .id = QORIQ_CLK_ID(QORIQ_TYPE_INTERNAL, 0),
+ .flags = 0
+ },
+ .offset = 0x800,
+ .shift = 1,
+ .mask = 0x1FE,
+ .dividers = ls1046a_cga1_pll_divs,
+ .flags = QORIQ_CLK_PLL_HAS_KILL_BIT
+};
+
+static struct qoriq_clk_pll_def ls1046a_cga2_pll = {
+ .clkdef = {
+ .name = "ls1046a_cga_pll2",
+ .id = QORIQ_CLK_ID(QORIQ_TYPE_INTERNAL, 20),
+ .flags = 0
+ },
+ .offset = 0x820,
+ .shift = 1,
+ .mask = 0x1FE,
+ .dividers = ls1046a_cga1_pll_divs,
+ .flags = QORIQ_CLK_PLL_HAS_KILL_BIT
+};
+
+static struct qoriq_clk_pll_def *ls1046a_cga_plls[] = {
+ &ls1046a_cga1_pll,
+ &ls1046a_cga2_pll
+};
+
+static const char *ls1046a_cmux0_parent_names[] = {
+ "ls1046a_cga_pll1",
+ NULL,
+ "ls1046a_cga_pll1_div2",
+ NULL,
+ "ls1046a_cga_pll2",
+ NULL,
+ "ls1046a_cga_pll2_div2"
+};
+
+static struct clk_mux_def ls1046a_cmux0 = {
+ .clkdef = {
+ .name = "ls1046a_cmux0",
+ .id = QORIQ_CLK_ID(QORIQ_TYPE_CMUX, 0),
+ .parent_names = ls1046a_cmux0_parent_names,
+ .parent_cnt = nitems(ls1046a_cmux0_parent_names),
+ .flags = 0
+ },
+ .offset = 0,
+ .shift = 27,
+ .width = 4,
+ .mux_flags = 0
+};
+
+static const char *ls1046a_hwaccel1_parent_names[] = {
+ NULL,
+ NULL,
+ "ls1046a_cga_pll1_div2",
+ "ls1046a_cga_pll1_div3",
+ "ls1046a_cga_pll1_div4",
+ "ls1046a_platform_pll",
+ "ls1046a_cga_pll2_div2",
+ "ls1046a_cga_pll2_div3"
+};
+
+static const char *ls1046a_hwaccel2_parent_names[] = {
+ NULL,
+ "ls1046a_cga_pll2",
+ "ls1046a_cga_pll2_div2",
+ "ls1046a_cga_pll2_div3",
+ NULL,
+ NULL,
+ "ls1046a_cga_pll1_div2"
+};
+
+static struct clk_mux_def ls1046a_hwaccel1 = {
+ .clkdef = {
+ .name = "ls1046a_hwaccel1",
+ .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 0),
+ .parent_names = ls1046a_hwaccel1_parent_names,
+ .parent_cnt = nitems(ls1046a_hwaccel1_parent_names),
+ .flags = 0
+ },
+ .offset = 0x10,
+ .shift = 27,
+ .width = 4,
+ .mux_flags = 0
+};
+
+static struct clk_mux_def ls1046a_hwaccel2 = {
+ .clkdef = {
+ .name = "ls1046a_hwaccel2",
+ .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 1),
+ .parent_names = ls1046a_hwaccel2_parent_names,
+ .parent_cnt = nitems(ls1046a_hwaccel2_parent_names),
+ .flags = 0
+ },
+ .offset = 0x30,
+ .shift = 27,
+ .width = 4,
+ .mux_flags = 0
+};
+
+static struct clk_mux_def *ls1046a_mux_nodes[] = {
+ &ls1046a_cmux0,
+ &ls1046a_hwaccel1,
+ &ls1046a_hwaccel2
+};
+
+const char *ls1046a_fman_srcs[] = {
+ "ls1046a_hwaccel1"
+};
+
+static int ls1046a_clkgen_probe(device_t);
+static int ls1046a_clkgen_attach(device_t);
+
+static device_method_t ls1046a_clkgen_methods[] = {
+ DEVMETHOD(device_probe, ls1046a_clkgen_probe),
+ DEVMETHOD(device_attach, ls1046a_clkgen_attach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(ls1046a_clkgen, ls1046a_clkgen_driver, ls1046a_clkgen_methods,
+ sizeof(struct qoriq_clkgen_softc), qoriq_clkgen_driver);
+
+static devclass_t ls1046a_clkgen_devclass;
+
+EARLY_DRIVER_MODULE(ls1046a_clkgen, simplebus, ls1046a_clkgen_driver,
+ ls1046a_clkgen_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+
+static int
+ls1046a_fman_init(device_t dev)
+{
+ struct qoriq_clkgen_softc *sc;
+ struct clk_fixed_def def;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ def.clkdef.name = "ls1046a_fman",
+ def.clkdef.id = QORIQ_CLK_ID(QORIQ_TYPE_FMAN, 0),
+ def.clkdef.parent_names = ls1046a_fman_srcs;
+ def.clkdef.parent_cnt = nitems(ls1046a_fman_srcs);
+ def.clkdef.flags = 0;
+ def.freq = 0;
+ def.mult = 1;
+ def.div = 1;
+ def.fixed_flags = 0;
+
+ error = clknode_fixed_register(sc->clkdom, &def);
+ return (error);
+}
+
+static int
+ls1046a_clkgen_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if(!ofw_bus_is_compatible(dev, "fsl,ls1046a-clockgen"))
+ return (ENXIO);
+
+ device_set_desc(dev, "LS1046A clockgen");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+ls1046a_clkgen_attach(device_t dev)
+{
+ struct qoriq_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ sc->pltfrm_pll_def = &ls1046a_pltfrm_pll;
+ sc->cga_pll = ls1046a_cga_plls;
+ sc->cga_pll_num = nitems(ls1046a_cga_plls);
+ sc->mux = ls1046a_mux_nodes;
+ sc->mux_num = nitems(ls1046a_mux_nodes);
+ sc->init_func = ls1046a_fman_init;
+ sc->flags = 0;
+
+ return (qoriq_clkgen_attach(dev));
+}
diff --git a/sys/arm64/qoriq/clk/qoriq_clk_pll.c b/sys/arm64/qoriq/clk/qoriq_clk_pll.c
new file mode 100644
index 000000000000..813bc76c3349
--- /dev/null
+++ b/sys/arm64/qoriq/clk/qoriq_clk_pll.c
@@ -0,0 +1,152 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Alstom Group.
+ * Copyright (c) 2020 Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/clk/clk_fixed.h>
+
+#include <arm64/qoriq/clk/qoriq_clkgen.h>
+
+#include "clkdev_if.h"
+
+struct qoriq_clk_pll_softc {
+ bus_addr_t offset;
+
+ uint32_t mask;
+ uint32_t shift;
+
+ uint32_t flags;
+};
+
+#define WR4(_clk, offset, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), offset, val)
+#define RD4(_clk, offset, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), offset, val)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+#define QORIQ_PLL_KILL_BIT (1 << 31)
+
+static int
+qoriq_clk_pll_init(struct clknode *clk, device_t dev)
+{
+
+ clknode_init_parent_idx(clk, 0);
+
+ return (0);
+}
+
+static int
+qoriq_clk_pll_recalc_freq(struct clknode *clk, uint64_t *freq)
+{
+ struct qoriq_clk_pll_softc *sc;
+ uint32_t mul;
+
+ sc = clknode_get_softc(clk);
+
+ RD4(clk, sc->offset, &mul);
+
+ if (sc->flags & QORIQ_CLK_PLL_HAS_KILL_BIT && mul & QORIQ_PLL_KILL_BIT)
+ return (0);
+
+ mul &= sc->mask;
+ mul >>= sc->shift;
+
+ *freq = *freq * mul;
+
+ return (0);
+}
+
+static clknode_method_t qoriq_clk_pll_clknode_methods[] = {
+ CLKNODEMETHOD(clknode_init, qoriq_clk_pll_init),
+ CLKNODEMETHOD(clknode_recalc_freq, qoriq_clk_pll_recalc_freq),
+
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(qoriq_clk_pll_clknode, qoriq_clk_pll_clknode_class,
+ qoriq_clk_pll_clknode_methods, sizeof(struct qoriq_clk_pll_softc),
+ clknode_class);
+
+int
+qoriq_clk_pll_register(struct clkdom *clkdom,
+ const struct qoriq_clk_pll_def *clkdef)
+{
+ char namebuf[QORIQ_CLK_NAME_MAX_LEN];
+ struct qoriq_clk_pll_softc *sc;
+ struct clk_fixed_def def;
+ const char *parent_name;
+ struct clknode *clk;
+ int error;
+ int i;
+
+ clk = clknode_create(clkdom, &qoriq_clk_pll_clknode_class,
+ &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+ sc->mask = clkdef->mask;
+ sc->shift = clkdef->shift;
+ sc->flags = clkdef->flags;
+ sc->offset = clkdef->offset;
+
+ clknode_register(clkdom, clk);
+
+ parent_name = clkdef->clkdef.name;
+
+ def.clkdef.parent_names = &parent_name;
+ def.clkdef.parent_cnt = 1;
+ def.clkdef.name = namebuf;
+ def.mult = 1;
+ def.freq = 0;
+
+ i = 0;
+ while (clkdef->dividers[i] != 0) {
+ def.div = clkdef->dividers[i];
+ def.clkdef.id = clkdef->clkdef.id + i;
+ snprintf(namebuf, QORIQ_CLK_NAME_MAX_LEN, "%s_div%d",
+ parent_name, clkdef->dividers[i]);
+
+ error = clknode_fixed_register(clkdom, &def);
+ if (error != 0)
+ return (error);
+
+ i++;
+ }
+
+ return (0);
+}
diff --git a/sys/arm64/qoriq/clk/qoriq_clk_pll.h b/sys/arm64/qoriq/clk/qoriq_clk_pll.h
new file mode 100644
index 000000000000..c9600f19e205
--- /dev/null
+++ b/sys/arm64/qoriq/clk/qoriq_clk_pll.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Alstom Group.
+ * Copyright (c) 2020 Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _QORIQ_CLK_PLL_H_
+#define _QORIQ_CLK_PLL_H_
+
+#include <dev/extres/clk/clk.h>
+
+#define QORIQ_CLK_PLL_HAS_KILL_BIT 0x01
+
+struct qoriq_clk_pll_def {
+ struct clknode_init_def clkdef;
+
+ bus_addr_t offset;
+ uint32_t mask;
+ uint8_t shift;
+ const uint8_t *dividers;
+ uint8_t flags;
+};
+
+int qoriq_clk_pll_register(struct clkdom *clkdom,
+ const struct qoriq_clk_pll_def *clkdef);
+
+#endif /* _QORIQ_CLK_PLL_H_ */
diff --git a/sys/arm64/qoriq/clk/qoriq_clkgen.c b/sys/arm64/qoriq/clk/qoriq_clkgen.c
new file mode 100644
index 000000000000..67fdf5d89624
--- /dev/null
+++ b/sys/arm64/qoriq/clk/qoriq_clkgen.c
@@ -0,0 +1,319 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Alstom Group.
+ * Copyright (c) 2020 Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/clk/clk_fixed.h>
+
+#include <arm64/qoriq/clk/qoriq_clkgen.h>
+
+#include "clkdev_if.h"
+
+MALLOC_DEFINE(M_QORIQ_CLKGEN, "qoriq_clkgen", "qoriq_clkgen");
+
+static struct resource_spec qoriq_clkgen_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+static const char *qoriq_pll_parents_coreclk[] = {
+ QORIQ_CORECLK_NAME
+};
+
+static const char *qoriq_pll_parents_sysclk[] = {
+ QORIQ_SYSCLK_NAME
+};
+
+static int
+qoriq_clkgen_ofw_mapper(struct clkdom *clkdom, uint32_t ncells,
+ phandle_t *cells, struct clknode **clk)
+{
+
+ if (ncells != 2)
+ return (EINVAL);
+
+ if (cells[0] > 5)
+ return (EINVAL);
+
+ if (cells[0] == QORIQ_TYPE_SYSCLK || cells[0] == QORIQ_TYPE_CORECLK)
+ if (cells[1] != 0)
+ return (EINVAL);
+
+ *clk = clknode_find_by_id(clkdom, QORIQ_CLK_ID(cells[0], cells[1]));
+
+ if (clk == NULL)
+ return (EINVAL);
+
+ return (0);
+}
+
+static int
+qoriq_clkgen_write_4(device_t dev, bus_addr_t addr, uint32_t val)
+{
+ struct qoriq_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->flags & QORIQ_LITTLE_ENDIAN)
+ bus_write_4(sc->res, addr, htole32(val));
+ else
+ bus_write_4(sc->res, addr, htobe32(val));
+ return (0);
+}
+
+static int
+qoriq_clkgen_read_4(device_t dev, bus_addr_t addr, uint32_t *val)
+{
+ struct qoriq_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->flags & QORIQ_LITTLE_ENDIAN)
+ *val = le32toh(bus_read_4(sc->res, addr));
+ else
+ *val = be32toh(bus_read_4(sc->res, addr));
+ return (0);
+}
+
+static int
+qoriq_clkgen_modify_4(device_t dev, bus_addr_t addr, uint32_t clr,
+ uint32_t set)
+{
+ struct qoriq_clkgen_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ if (sc->flags & QORIQ_LITTLE_ENDIAN)
+ reg = le32toh(bus_read_4(sc->res, addr));
+ else
+ reg = be32toh(bus_read_4(sc->res, addr));
+
+ reg &= ~clr;
+ reg |= set;
+
+ if (sc->flags & QORIQ_LITTLE_ENDIAN)
+ bus_write_4(sc->res, addr, htole32(reg));
+ else
+ bus_write_4(sc->res, addr, htobe32(reg));
+
+ return (0);
+}
+
+static void
+qoriq_clkgen_device_lock(device_t dev)
+{
+ struct qoriq_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->mtx);
+}
+
+static void
+qoriq_clkgen_device_unlock(device_t dev)
+{
+ struct qoriq_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_unlock(&sc->mtx);
+}
+
+static device_method_t qoriq_clkgen_methods[] = {
+ DEVMETHOD(clkdev_write_4, qoriq_clkgen_write_4),
+ DEVMETHOD(clkdev_read_4, qoriq_clkgen_read_4),
+ DEVMETHOD(clkdev_modify_4, qoriq_clkgen_modify_4),
+ DEVMETHOD(clkdev_device_lock, qoriq_clkgen_device_lock),
+ DEVMETHOD(clkdev_device_unlock, qoriq_clkgen_device_unlock),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(qoriq_clkgen, qoriq_clkgen_driver, qoriq_clkgen_methods,
+ sizeof(struct qoriq_clkgen_softc));
+
+static int
+qoriq_clkgen_create_sysclk(device_t dev)
+{
+ struct qoriq_clkgen_softc *sc;
+ struct clk_fixed_def def;
+ const char *clkname;
+ phandle_t node;
+ uint32_t freq;
+ clk_t clock;
+ int rv;
+
+ sc = device_get_softc(dev);
+ node = ofw_bus_get_node(dev);
+ sc->has_coreclk = false;
+
+ memset(&def, 0, sizeof(def));
+
+ rv = OF_getencprop(node, "clock-frequency", &freq, sizeof(freq));
+ if (rv > 0) {
+ def.clkdef.name = QORIQ_SYSCLK_NAME;
+ def.clkdef.id = QORIQ_CLK_ID(QORIQ_TYPE_SYSCLK, 0);
+ def.freq = freq;
+
+ rv = clknode_fixed_register(sc->clkdom, &def);
+ return (rv);
+ } else {
+ /*
+ * As both sysclk and coreclk need to be accessible from
+ * device tree, create internal 1:1 divider nodes.
+ */
+ def.clkdef.parent_cnt = 1;
+ def.freq = 0;
+ def.mult = 1;
+ def.div = 1;
+
+ rv = clk_get_by_ofw_name(dev, node, "coreclk", &clock);
+ if (rv == 0) {
+ def.clkdef.name = QORIQ_CORECLK_NAME;
+ clkname = clk_get_name(clock);
+ def.clkdef.parent_names = &clkname;
+ def.clkdef.id = QORIQ_CLK_ID(QORIQ_TYPE_CORECLK, 0);
+
+ rv = clknode_fixed_register(sc->clkdom, &def);
+ if (rv)
+ return (rv);
+
+ sc->has_coreclk = true;
+ }
+
+ rv = clk_get_by_ofw_name(dev, node, "sysclk", &clock);
+ if (rv != 0) {
+ rv = clk_get_by_ofw_index(dev, node, 0, &clock);
+ if (rv != 0)
+ return (rv);
+ }
+
+ clkname = clk_get_name(clock);
+ def.clkdef.name = QORIQ_SYSCLK_NAME;
+ def.clkdef.id = QORIQ_CLK_ID(QORIQ_TYPE_SYSCLK, 0);
+ def.clkdef.parent_names = &clkname;
+
+ rv = clknode_fixed_register(sc->clkdom, &def);
+ return (rv);
+ }
+}
+
+int
+qoriq_clkgen_attach(device_t dev)
+{
+ struct qoriq_clkgen_softc *sc;
+ int i, error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ if (bus_alloc_resources(dev, qoriq_clkgen_spec, &sc->res) != 0) {
+ device_printf(dev, "Cannot allocate resources.\n");
+ return (ENXIO);
+ }
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ sc->clkdom = clkdom_create(dev);
+ if (sc->clkdom == NULL)
+ panic("Cannot create clock domain.\n");
+
+ error = qoriq_clkgen_create_sysclk(dev);
+ if (error != 0) {
+ device_printf(dev, "Cannot create sysclk.\n");
+ return (error);
+ }
+
+ sc->pltfrm_pll_def->clkdef.parent_names = qoriq_pll_parents_sysclk;
+ sc->pltfrm_pll_def->clkdef.parent_cnt = 1;
+ error = qoriq_clk_pll_register(sc->clkdom, sc->pltfrm_pll_def);
+ if (error != 0) {
+ device_printf(dev, "Cannot create platform PLL.\n");
+ return (error);
+ }
+
+ for (i = 0; i < sc->cga_pll_num; i++) {
+ if (sc->has_coreclk)
+ sc->cga_pll[i]->clkdef.parent_names = qoriq_pll_parents_coreclk;
+ else
+ sc->cga_pll[i]->clkdef.parent_names = qoriq_pll_parents_sysclk;
+ sc->cga_pll[i]->clkdef.parent_cnt = 1;
+
+ error = qoriq_clk_pll_register(sc->clkdom, sc->cga_pll[i]);
+ if (error != 0) {
+ device_printf(dev, "Cannot create CGA PLLs\n.");
+ return (error);
+ }
+ }
+
+ /*
+ * Both CMUX and HWACCEL multiplexer nodes can be represented
+ * by using built in clk_mux nodes.
+ */
+ for (i = 0; i < sc->mux_num; i++) {
+ error = clknode_mux_register(sc->clkdom, sc->mux[i]);
+ if (error != 0) {
+ device_printf(dev, "Cannot create MUX nodes.\n");
+ return (error);
+ }
+ }
+
+ if (sc->init_func != NULL) {
+ error = sc->init_func(dev);
+ if (error) {
+ device_printf(dev, "Clock init function failed.\n");
+ return (error);
+ }
+ }
+
+ clkdom_set_ofw_mapper(sc->clkdom, qoriq_clkgen_ofw_mapper);
+
+ if (clkdom_finit(sc->clkdom) != 0)
+ panic("Cannot finalize clock domain initialization.\n");
+
+ if (bootverbose)
+ clkdom_dump(sc->clkdom);
+
+ return (0);
+}
diff --git a/sys/arm64/qoriq/clk/qoriq_clkgen.h b/sys/arm64/qoriq/clk/qoriq_clkgen.h
new file mode 100644
index 000000000000..b08b5d23bebb
--- /dev/null
+++ b/sys/arm64/qoriq/clk/qoriq_clkgen.h
@@ -0,0 +1,96 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Alstom Group.
+ * Copyright (c) 2020 Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _QORIQ_CLKGEN_H_
+#define _QORIQ_CLKGEN_H_
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/clk/clk_mux.h>
+
+#include <arm64/qoriq/clk/qoriq_clk_pll.h>
+
+#define QORIQ_CLK_NAME_MAX_LEN 32
+
+#define QORIQ_LITTLE_ENDIAN 0x01
+
+#define QORIQ_TYPE_SYSCLK 0
+#define QORIQ_TYPE_CMUX 1
+#define QORIQ_TYPE_HWACCEL 2
+#define QORIQ_TYPE_FMAN 3
+#define QORIQ_TYPE_PLATFORM_PLL 4
+#define QORIQ_TYPE_CORECLK 5
+#define QORIQ_TYPE_INTERNAL 6
+
+#define PLL_DIV1 0
+#define PLL_DIV2 1
+#define PLL_DIV3 2
+#define PLL_DIV4 3
+#define PLL_DIV5 4
+#define PLL_DIV6 5
+#define PLL_DIV7 6
+#define PLL_DIV8 7
+#define PLL_DIV9 8
+#define PLL_DIV10 9
+#define PLL_DIV11 10
+#define PLL_DIV12 11
+#define PLL_DIV13 12
+#define PLL_DIV14 13
+#define PLL_DIV15 14
+#define PLL_DIV16 15
+
+#define QORIQ_CLK_ID(_type, _index) ((_type << 8) + _index)
+
+#define QORIQ_SYSCLK_NAME "clockgen_sysclk"
+#define QORIQ_CORECLK_NAME "clockgen_coreclk"
+
+typedef int (*qoriq_init_func_t)(device_t);
+
+struct qoriq_clkgen_softc {
+ device_t dev;
+ struct resource *res;
+ struct clkdom *clkdom;
+ struct mtx mtx;
+ struct qoriq_clk_pll_def *pltfrm_pll_def;
+ struct qoriq_clk_pll_def **cga_pll;
+ int cga_pll_num;
+ struct clk_mux_def **mux;
+ int mux_num;
+ qoriq_init_func_t init_func;
+ uint32_t flags;
+ bool has_coreclk;
+};
+
+MALLOC_DECLARE(M_QORIQ_CLKGEN);
+DECLARE_CLASS(qoriq_clkgen_driver);
+
+int qoriq_clkgen_attach(device_t);
+
+#endif /* _QORIQ_CLKGEN_H_ */
diff --git a/sys/arm64/qoriq/ls1046_gpio.c b/sys/arm64/qoriq/ls1046_gpio.c
new file mode 100644
index 000000000000..7e72a2c9f5ed
--- /dev/null
+++ b/sys/arm64/qoriq/ls1046_gpio.c
@@ -0,0 +1,585 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Alstom Group.
+ * Copyright (c) 2020 Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/gpio.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+
+#include <dev/gpio/gpiobusvar.h>
+#include <dev/ofw/ofw_bus.h>
+#include <machine/bus.h>
+
+#include "gpio_if.h"
+
+/* constants */
+enum {
+ DIRECTION = 0x0,
+ OPEN_DRAIN = 0x4,
+ DATA = 0x8,
+ INT_EV = 0xC,
+ INT_MASK = 0x10,
+ INT_CTRL = 0x14
+};
+
+#define PIN_COUNT 32
+#define DEFAULT_CAPS \
+ (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \
+ GPIO_PIN_OPENDRAIN | GPIO_PIN_PUSHPULL)
+#define GPIO(n) (1 << (31 - (n)))
+
+struct gpio_res {
+ int mem_rid;
+ struct resource *mem_res;
+};
+
+/* software context */
+struct gpio_softc {
+ device_t dev;
+ device_t busdev;
+ struct gpio_res res;
+ struct gpio_pin setup[PIN_COUNT];
+ struct mtx mutex;
+};
+
+#define QORIQ_GPIO_LOCK(_sc) mtx_lock_spin(&(_sc)->mutex)
+#define QORIQ_GPIO_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->mutex)
+#define QORIQ_GPIO_ASSERT_LOCKED(_sc) mtx_assert(&(_sc)->mutex, MA_OWNED)
+
+/* prototypes */
+/* helpers */
+static int qoriq_make_gpio_res(device_t, struct gpio_res*);
+static uint32_t qoriq_gpio_reg_read(device_t, uint32_t);
+static void qoriq_gpio_reg_write(device_t, uint32_t, uint32_t);
+static void qoriq_gpio_reg_set(device_t, uint32_t, uint32_t);
+static void qoriq_gpio_reg_clear(device_t, uint32_t, uint32_t);
+static void qoriq_gpio_out_en(device_t, uint32_t, uint8_t);
+static void qoriq_gpio_value_set(device_t, uint32_t, uint8_t);
+static uint32_t qoriq_gpio_value_get(device_t, uint32_t);
+static void qoriq_gpio_open_drain_set(device_t, uint32_t, uint8_t);
+static int qoriq_gpio_configure(device_t, uint32_t, uint32_t);
+
+/* GPIO API */
+static int qoriq_gpio_probe(device_t);
+static int qoriq_gpio_attach(device_t);
+static device_t qoriq_gpio_get_bus(device_t);
+static int qoriq_gpio_pin_max(device_t, int*);
+static int qoriq_gpio_pin_getname(device_t, uint32_t, char*);
+static int qoriq_gpio_pin_getflags(device_t, uint32_t, uint32_t*);
+static int qoriq_gpio_pin_setflags(device_t, uint32_t, uint32_t);
+static int qoriq_gpio_pin_getcaps(device_t, uint32_t, uint32_t*);
+static int qoriq_gpio_pin_get(device_t, uint32_t, uint32_t*);
+static int qoriq_gpio_pin_set(device_t, uint32_t, uint32_t);
+static int qoriq_gpio_pin_toggle(device_t, uint32_t);
+static int qoriq_gpio_map_gpios(device_t, phandle_t, phandle_t,
+ int, pcell_t*, uint32_t*, uint32_t*);
+static int qoriq_gpio_pin_access_32(device_t, uint32_t, uint32_t, uint32_t,
+ uint32_t*);
+static int qoriq_gpio_pin_config_32(device_t, uint32_t, uint32_t, uint32_t*);
+
+static device_method_t qoriq_gpio_methods[] = {
+ DEVMETHOD(device_probe, qoriq_gpio_probe),
+ DEVMETHOD(device_attach, qoriq_gpio_attach),
+
+ /* GPIO protocol */
+ DEVMETHOD(gpio_get_bus, qoriq_gpio_get_bus),
+ DEVMETHOD(gpio_pin_max, qoriq_gpio_pin_max),
+ DEVMETHOD(gpio_pin_getname, qoriq_gpio_pin_getname),
+ DEVMETHOD(gpio_pin_getflags, qoriq_gpio_pin_getflags),
+ DEVMETHOD(gpio_pin_setflags, qoriq_gpio_pin_setflags),
+ DEVMETHOD(gpio_pin_getcaps, qoriq_gpio_pin_getcaps),
+ DEVMETHOD(gpio_pin_get, qoriq_gpio_pin_get),
+ DEVMETHOD(gpio_pin_set, qoriq_gpio_pin_set),
+ DEVMETHOD(gpio_pin_toggle, qoriq_gpio_pin_toggle),
+ DEVMETHOD(gpio_map_gpios, qoriq_gpio_map_gpios),
+ DEVMETHOD(gpio_pin_access_32, qoriq_gpio_pin_access_32),
+ DEVMETHOD(gpio_pin_config_32, qoriq_gpio_pin_config_32),
+
+ DEVMETHOD_END
+};
+
+static driver_t gpio_driver = {
+ "gpio",
+ qoriq_gpio_methods,
+ sizeof(struct gpio_softc),
+};
+
+static devclass_t gpio_devclass;
+
+DRIVER_MODULE(gpio, simplebus, gpio_driver, gpio_devclass, 0, 0);
+MODULE_VERSION(gpio, 1);
+
+/*
+ * helpers
+ */
+static int
+qoriq_make_gpio_res(device_t dev, struct gpio_res *out)
+{
+
+ out->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &out->mem_rid, RF_ACTIVE | RF_SHAREABLE);
+
+ if(out->mem_res == NULL) {
+ return (1);
+ } else {
+ return (0);
+ }
+}
+
+static uint32_t
+qoriq_gpio_reg_read(device_t dev, uint32_t reg)
+{
+ struct gpio_softc *sc = device_get_softc(dev);
+ uint32_t result;
+
+ QORIQ_GPIO_ASSERT_LOCKED(sc);
+ result = bus_read_4(sc->res.mem_res, reg);
+ return be32toh(result);
+}
+
+static void
+qoriq_gpio_reg_write(device_t dev, uint32_t reg, uint32_t val)
+{
+ struct gpio_softc *sc = device_get_softc(dev);
+
+ QORIQ_GPIO_ASSERT_LOCKED(sc);
+ val = htobe32(val);
+
+ bus_write_4(sc->res.mem_res, reg, val);
+ bus_barrier(sc->res.mem_res, reg, 4, BUS_SPACE_BARRIER_READ
+ | BUS_SPACE_BARRIER_WRITE);
+}
+
+static void
+qoriq_gpio_reg_set(device_t dev, uint32_t reg, uint32_t pin)
+{
+ uint32_t reg_val;
+
+ reg_val = qoriq_gpio_reg_read(dev, reg);
+ reg_val |= GPIO(pin);
+ qoriq_gpio_reg_write(dev, reg, reg_val);
+}
+
+static void
+qoriq_gpio_reg_clear(device_t dev, uint32_t reg, uint32_t pin)
+{
+ uint32_t reg_val;
+
+ reg_val = qoriq_gpio_reg_read(dev, reg);
+ reg_val &= ~(GPIO(pin));
+ qoriq_gpio_reg_write(dev, reg, reg_val);
+}
+
+static void
+qoriq_gpio_out_en(device_t dev, uint32_t pin, uint8_t enable)
+{
+
+ if (pin >= PIN_COUNT)
+ return;
+
+ if (enable) {
+ qoriq_gpio_reg_set(dev, DIRECTION, pin);
+ } else {
+ qoriq_gpio_reg_clear(dev, DIRECTION, pin);
+ }
+}
+
+static void
+qoriq_gpio_value_set(device_t dev, uint32_t pin, uint8_t val)
+{
+
+ if (pin >= PIN_COUNT)
+ return;
+
+ if (val) {
+ qoriq_gpio_reg_set(dev, DATA, pin);
+ } else {
+ qoriq_gpio_reg_clear(dev, DATA, pin);
+ }
+}
+
+static uint32_t
+qoriq_gpio_value_get(device_t dev, uint32_t pin)
+{
+ uint32_t reg_val;
+
+ if (pin >= PIN_COUNT)
+ return (0);
+
+ reg_val = qoriq_gpio_reg_read(dev, DATA);
+ return ((reg_val & GPIO(pin)) == 0 ? 0 : 1);
+}
+
+static void
+qoriq_gpio_open_drain_set(device_t dev, uint32_t pin, uint8_t val)
+{
+
+ if (pin >= PIN_COUNT) {
+ return;
+ }
+
+ if (val) {
+ qoriq_gpio_reg_set(dev, OPEN_DRAIN, pin);
+ } else {
+ qoriq_gpio_reg_clear(dev, OPEN_DRAIN, pin);
+ }
+}
+
+static int
+qoriq_gpio_configure(device_t dev, uint32_t pin, uint32_t flags)
+{
+ struct gpio_softc *sc = device_get_softc(dev);
+ uint32_t newflags;
+
+ if (pin >= PIN_COUNT) {
+ return (EINVAL);
+ }
+
+ /*
+ * Pin cannot function as input and output at the same time.
+ * The same applies to open-drain and push-pull functionality.
+ */
+ if (((flags & GPIO_PIN_INPUT) && (flags & GPIO_PIN_OUTPUT))
+ || ((flags & GPIO_PIN_OPENDRAIN) && (flags & GPIO_PIN_PUSHPULL))) {
+ return (EINVAL);
+ }
+
+ QORIQ_GPIO_ASSERT_LOCKED(sc);
+
+ if (flags & GPIO_PIN_INPUT) {
+ newflags = GPIO_PIN_INPUT;
+ qoriq_gpio_out_en(dev, pin, 0);
+ }
+
+ if (flags & GPIO_PIN_OUTPUT) {
+ newflags = GPIO_PIN_OUTPUT;
+ qoriq_gpio_out_en(dev, pin, 1);
+
+ if (flags & GPIO_PIN_OPENDRAIN) {
+ newflags |= GPIO_PIN_OPENDRAIN;
+ qoriq_gpio_open_drain_set(dev, pin, 1);
+ } else {
+ newflags |= GPIO_PIN_PUSHPULL;
+ qoriq_gpio_open_drain_set(dev, pin, 0);
+ }
+ }
+
+ sc->setup[pin].gp_flags = newflags;
+
+ return (0);
+}
+
+/* GPIO API */
+static int
+qoriq_gpio_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev)) {
+ return (ENXIO);
+ }
+
+ if (!ofw_bus_is_compatible(dev, "fsl,qoriq-gpio")) {
+ return (ENXIO);
+ }
+
+ device_set_desc(dev, "Integrated GPIO Controller");
+ return (0);
+}
+
+static int
+qoriq_gpio_attach(device_t dev)
+{
+ struct gpio_softc *sc = device_get_softc(dev);
+ int i;
+
+ if(qoriq_make_gpio_res(dev, &sc->res) != 0) {
+ return (ENXIO);
+ }
+
+ for(i = 0; i < PIN_COUNT; i++) {
+ sc->setup[i].gp_caps = DEFAULT_CAPS;
+ }
+
+ sc->dev = dev;
+
+ sc->busdev = gpiobus_attach_bus(dev);
+ if(sc->busdev == NULL) {
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static device_t
+qoriq_gpio_get_bus(device_t dev)
+{
+ struct gpio_softc *softc = device_get_softc(dev);
+
+ return (softc->busdev);
+}
+
+static int
+qoriq_gpio_pin_max(device_t dev, int *maxpin)
+{
+
+ if(maxpin == NULL) {
+ return (EINVAL);
+ }
+
+ *maxpin = PIN_COUNT - 1;
+ return (0);
+}
+
+static int
+qoriq_gpio_pin_getname(device_t dev, uint32_t pin, char *name)
+{
+
+ if(name == NULL || pin >= PIN_COUNT) {
+ return (EINVAL);
+ }
+
+ snprintf(name, GPIOMAXNAME, "pin %d", pin);
+
+ return (0);
+}
+
+static int
+qoriq_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *pflags)
+{
+ struct gpio_softc *sc = device_get_softc(dev);
+
+ if (pflags == NULL || pin >= PIN_COUNT) {
+ return (EINVAL);
+ }
+
+ QORIQ_GPIO_LOCK(sc);
+ *pflags = sc->setup[pin].gp_flags;
+ QORIQ_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+qoriq_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
+{
+ struct gpio_softc *sc = device_get_softc(dev);
+ int ret;
+
+ if (pin >= PIN_COUNT)
+ return (EINVAL);
+
+ /* Check for unwanted flags. */
+ QORIQ_GPIO_LOCK(sc);
+ if ((flags & sc->setup[pin].gp_caps) != flags) {
+ QORIQ_GPIO_UNLOCK(sc);
+ return (EINVAL);
+ }
+
+ ret = qoriq_gpio_configure(dev, pin, flags);
+
+ QORIQ_GPIO_UNLOCK(sc);
+ return (ret);
+}
+
+static int
+qoriq_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
+{
+ struct gpio_softc *sc = device_get_softc(dev);
+
+ if (caps == NULL || pin >= PIN_COUNT) {
+ return (EINVAL);
+ }
+
+ QORIQ_GPIO_LOCK(sc);
+ *caps = sc->setup[pin].gp_caps;
+ QORIQ_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+qoriq_gpio_pin_get(device_t dev, uint32_t pin, uint32_t *value)
+{
+ struct gpio_softc *sc = device_get_softc(dev);
+
+ if (value == NULL || pin >= PIN_COUNT) {
+ return (EINVAL);
+ }
+
+ QORIQ_GPIO_LOCK(sc);
+ *value = qoriq_gpio_value_get(dev, pin);
+ QORIQ_GPIO_UNLOCK(sc);
+ return (0);
+}
+
+static int
+qoriq_gpio_pin_set(device_t dev, uint32_t pin, uint32_t value)
+{
+ struct gpio_softc *sc = device_get_softc(dev);
+
+ if (pin >= PIN_COUNT) {
+ return (EINVAL);
+ }
+
+ QORIQ_GPIO_LOCK(sc);
+ qoriq_gpio_value_set(dev, pin, value);
+ QORIQ_GPIO_UNLOCK(sc);
+ return (0);
+}
+
+static int
+qoriq_gpio_pin_toggle(device_t dev, uint32_t pin)
+{
+ struct gpio_softc *sc;
+ uint32_t value;
+
+ if (pin >= PIN_COUNT) {
+ return (EINVAL);
+ }
+
+ sc = device_get_softc(dev);
+
+ QORIQ_GPIO_LOCK(sc);
+ value = qoriq_gpio_reg_read(dev, DATA);
+ if (value & (1 << pin))
+ value &= ~(1 << pin);
+ else
+ value |= (1 << pin);
+ qoriq_gpio_reg_write(dev, DATA, value);
+ QORIQ_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+qoriq_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells,
+ pcell_t *gpios, uint32_t *pin, uint32_t *flags)
+{
+ struct gpio_softc *sc = device_get_softc(bus);
+ int err;
+
+ if (gpios[0] >= PIN_COUNT)
+ return (EINVAL);
+
+ QORIQ_GPIO_LOCK(sc);
+ err = qoriq_gpio_configure(bus, gpios[0], gpios[1]);
+ QORIQ_GPIO_UNLOCK(sc);
+
+ if(err == 0) {
+ *pin = gpios[0];
+ *flags = gpios[1];
+ }
+
+ return (err);
+}
+
+static int
+qoriq_gpio_pin_access_32(device_t dev, uint32_t first_pin, uint32_t clear_pins,
+ uint32_t change_pins, uint32_t *orig_pins)
+{
+ struct gpio_softc *sc;
+ uint32_t hwstate;
+
+ sc = device_get_softc(dev);
+
+ if (first_pin != 0)
+ return (EINVAL);
+
+ QORIQ_GPIO_LOCK(sc);
+ hwstate = qoriq_gpio_reg_read(dev, DATA);
+ qoriq_gpio_reg_write(dev, DATA, (hwstate & ~clear_pins) ^ change_pins);
+ QORIQ_GPIO_UNLOCK(sc);
+
+ if (orig_pins != NULL)
+ *orig_pins = hwstate;
+
+ return (0);
+}
+
+static int
+qoriq_gpio_pin_config_32(device_t dev, uint32_t first_pin, uint32_t num_pins,
+ uint32_t *pin_flags)
+{
+ uint32_t dir, odr, mask, reg;
+ struct gpio_softc *sc;
+ uint32_t newflags[32];
+ int i;
+
+ if (first_pin != 0 || num_pins > PIN_COUNT)
+ return (EINVAL);
+
+ sc = device_get_softc(dev);
+
+ dir = 0;
+ odr = 0;
+ mask = 0;
+
+ for (i = 0; i < num_pins; i++) {
+ newflags[i] = 0;
+ mask |= (1 << i);
+
+ if (pin_flags[i] & GPIO_PIN_INPUT) {
+ newflags[i] = GPIO_PIN_INPUT;
+ dir &= ~(1 << i);
+ } else {
+ newflags[i] = GPIO_PIN_OUTPUT;
+ dir |= (1 << i);
+
+ if (pin_flags[i] & GPIO_PIN_OPENDRAIN) {
+ newflags[i] |= GPIO_PIN_OPENDRAIN;
+ odr |= (1 << i);
+ } else {
+ newflags[i] |= GPIO_PIN_PUSHPULL;
+ odr &= ~(1 << i);
+ }
+ }
+ }
+
+ QORIQ_GPIO_LOCK(sc);
+ reg = qoriq_gpio_reg_read(dev, DIRECTION);
+ reg &= ~mask;
+ reg |= dir;
+ qoriq_gpio_reg_write(dev, DIRECTION, reg);
+ reg = qoriq_gpio_reg_read(dev, OPEN_DRAIN);
+ reg &= ~mask;
+ reg |= odr;
+ qoriq_gpio_reg_write(dev, OPEN_DRAIN, reg);
+ for (i = 0; i < num_pins; i++) {
+ sc->setup[i].gp_flags = newflags[i];
+ }
+ QORIQ_GPIO_UNLOCK(sc);
+
+ return (0);
+}
diff --git a/sys/arm64/qualcomm/qcom_gcc.c b/sys/arm64/qualcomm/qcom_gcc.c
new file mode 100644
index 000000000000..34ff41ce42e6
--- /dev/null
+++ b/sys/arm64/qualcomm/qcom_gcc.c
@@ -0,0 +1,148 @@
+/*-
+ * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by BAE Systems, the University of Cambridge
+ * Computer Laboratory, and Memorial University under DARPA/AFRL contract
+ * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
+ * (TC) research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kthread.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#define GCC_QDSS_BCR 0x29000
+#define GCC_QDSS_BCR_BLK_ARES (1 << 0) /* Async software reset. */
+#define GCC_QDSS_CFG_AHB_CBCR 0x29008
+#define AHB_CBCR_CLK_ENABLE (1 << 0) /* AHB clk branch ctrl */
+#define GCC_QDSS_ETR_USB_CBCR 0x29028
+#define ETR_USB_CBCR_CLK_ENABLE (1 << 0) /* ETR USB clk branch ctrl */
+#define GCC_QDSS_DAP_CBCR 0x29084
+#define DAP_CBCR_CLK_ENABLE (1 << 0) /* DAP clk branch ctrl */
+
+static struct ofw_compat_data compat_data[] = {
+ { "qcom,gcc-msm8916", 1 },
+ { NULL, 0 }
+};
+
+struct qcom_gcc_softc {
+ struct resource *res;
+};
+
+static struct resource_spec qcom_gcc_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+/*
+ * Qualcomm Debug Subsystem (QDSS)
+ * block enabling routine.
+ */
+static void
+qcom_qdss_enable(struct qcom_gcc_softc *sc)
+{
+
+ /* Put QDSS block to reset */
+ bus_write_4(sc->res, GCC_QDSS_BCR, GCC_QDSS_BCR_BLK_ARES);
+
+ /* Enable AHB clock branch */
+ bus_write_4(sc->res, GCC_QDSS_CFG_AHB_CBCR, AHB_CBCR_CLK_ENABLE);
+
+ /* Enable DAP clock branch */
+ bus_write_4(sc->res, GCC_QDSS_DAP_CBCR, DAP_CBCR_CLK_ENABLE);
+
+ /* Enable ETR USB clock branch */
+ bus_write_4(sc->res, GCC_QDSS_ETR_USB_CBCR, ETR_USB_CBCR_CLK_ENABLE);
+
+ /* Out of reset */
+ bus_write_4(sc->res, GCC_QDSS_BCR, 0);
+}
+
+static int
+qcom_gcc_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Qualcomm Global Clock Controller");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+qcom_gcc_attach(device_t dev)
+{
+ struct qcom_gcc_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (bus_alloc_resources(dev, qcom_gcc_spec, &sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ return (ENXIO);
+ }
+
+ /*
+ * Enable debug unit.
+ * This is required for Coresight operation.
+ * This also enables USB clock branch.
+ */
+ qcom_qdss_enable(sc);
+
+ return (0);
+}
+
+static device_method_t qcom_gcc_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, qcom_gcc_probe),
+ DEVMETHOD(device_attach, qcom_gcc_attach),
+
+ DEVMETHOD_END
+};
+
+static driver_t qcom_gcc_driver = {
+ "qcom_gcc",
+ qcom_gcc_methods,
+ sizeof(struct qcom_gcc_softc),
+};
+
+static devclass_t qcom_gcc_devclass;
+
+EARLY_DRIVER_MODULE(qcom_gcc, simplebus, qcom_gcc_driver, qcom_gcc_devclass,
+ 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+MODULE_VERSION(qcom_gcc, 1);
diff --git a/sys/arm64/rockchip/clk/rk3328_cru.c b/sys/arm64/rockchip/clk/rk3328_cru.c
new file mode 100644
index 000000000000..69ad2dac873c
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk3328_cru.c
@@ -0,0 +1,1515 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/clk/clk_div.h>
+#include <dev/extres/clk/clk_fixed.h>
+#include <dev/extres/clk/clk_mux.h>
+
+#include <arm64/rockchip/clk/rk_cru.h>
+
+/* Registers */
+#define RK3328_GRF_SOC_CON4 0x410
+#define RK3328_GRF_MAC_CON1 0x904
+#define RK3328_GRF_MAC_CON2 0x908
+
+/* GATES */
+
+#define SCLK_MAC2PHY_RXTX 83
+#define SCLK_MAC2PHY_SRC 84
+#define SCLK_MAC2PHY_REF 85
+#define SCLK_MAC2PHY_OUT 86
+#define SCLK_MAC2IO_RX 87
+#define SCLK_MAC2IO_TX 88
+#define SCLK_MAC2IO_REFOUT 89
+#define SCLK_MAC2IO_REF 90
+#define SCLK_MAC2IO_OUT 91
+#define SCLK_USB3OTG_REF 96
+#define SCLK_MAC2IO_SRC 99
+#define SCLK_MAC2IO 100
+#define SCLK_MAC2PHY 101
+#define SCLK_MAC2IO_EXT 102
+#define ACLK_USB3OTG 132
+#define ACLK_GMAC 146
+#define ACLK_MAC2PHY 149
+#define ACLK_MAC2IO 150
+#define ACLK_PERI 153
+#define PCLK_GPIO0 200
+#define PCLK_GPIO1 201
+#define PCLK_GPIO2 202
+#define PCLK_GPIO3 203
+#define PCLK_I2C0 205
+#define PCLK_I2C1 206
+#define PCLK_I2C2 207
+#define PCLK_I2C3 208
+#define PCLK_TSADC 213
+#define PCLK_GMAC 220
+#define PCLK_MAC2PHY 222
+#define PCLK_MAC2IO 223
+#define PCLK_USB3PHY_OTG 224
+#define PCLK_USB3PHY_PIPE 225
+#define PCLK_USB3_GRF 226
+#define HCLK_SDMMC 317
+#define HCLK_SDIO 318
+#define HCLK_EMMC 319
+#define HCLK_SDMMC_EXT 320
+
+static struct rk_cru_gate rk3328_gates[] = {
+ /* CRU_CLKGATE_CON0 */
+ CRU_GATE(0, "apll_core", "apll", 0x200, 0)
+ CRU_GATE(0, "dpll_core", "dpll", 0x200, 1)
+ CRU_GATE(0, "gpll_core", "gpll", 0x200, 2)
+ CRU_GATE(0, "npll_core", "npll", 0x200, 12)
+
+ /* CRU_CLKGATE_CON4 */
+ CRU_GATE(0, "gpll_peri", "gpll", 0x210, 0)
+ CRU_GATE(0, "cpll_peri", "cpll", 0x210, 1)
+ CRU_GATE(SCLK_USB3OTG_REF, "clk_usb3otg_ref", "xin24m", 0x210, 7)
+
+ /* CRU_CLKGATE_CON8 */
+ CRU_GATE(0, "pclk_bus", "pclk_bus_pre", 0x220, 3)
+ CRU_GATE(0, "pclk_phy_pre", "pclk_bus_pre", 0x220, 4)
+
+ /* CRU_CLKGATE_CON8 */
+ CRU_GATE(SCLK_MAC2IO_REF, "clk_mac2io_ref", "clk_mac2io", 0x224, 7)
+ CRU_GATE(SCLK_MAC2IO_REFOUT, "clk_mac2io_refout", "clk_mac2io", 0x224, 6)
+ CRU_GATE(SCLK_MAC2IO_TX, "clk_mac2io_tx", "clk_mac2io", 0x224, 5)
+ CRU_GATE(SCLK_MAC2IO_RX, "clk_mac2io_rx", "clk_mac2io", 0x224, 4)
+ CRU_GATE(SCLK_MAC2PHY_REF, "clk_mac2phy_ref", "clk_mac2phy", 0x224, 3)
+ CRU_GATE(SCLK_MAC2PHY_RXTX, "clk_mac2phy_rxtx", "clk_mac2phy", 0x224, 1)
+
+ /* CRU_CLKGATE_CON10 */
+ CRU_GATE(ACLK_PERI, "aclk_peri", "aclk_peri_pre", 0x228, 0)
+
+ /* CRU_CLKGATE_CON15*/
+ CRU_GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus", 0x23C, 10)
+
+ /* CRU_CLKGATE_CON16 */
+ CRU_GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 0x23C, 0)
+ CRU_GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus", 0x23C, 1)
+ CRU_GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus", 0x23C, 2)
+ CRU_GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus", 0x23C, 14)
+
+ CRU_GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_bus", 0x240, 7)
+ CRU_GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus", 0x240, 8)
+ CRU_GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus", 0x240, 9)
+ CRU_GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus", 0x240, 10)
+
+ /* CRU_CLKGATE_CON17 */
+ CRU_GATE(PCLK_USB3_GRF, "pclk_usb3_grf", "pclk_phy_pre", 0x244, 2)
+
+ /* CRU_CLKGATE_CON19 */
+ CRU_GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0x24C, 0)
+ CRU_GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0x24C, 1)
+ CRU_GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0x24C, 2)
+ CRU_GATE(0, "hclk_peri_niu", "hclk_peri", 0x24C, 12)
+ CRU_GATE(0, "pclk_peri_niu", "hclk_peri", 0x24C, 13)
+ CRU_GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0x24C, 14)
+ CRU_GATE(HCLK_SDMMC_EXT, "hclk_sdmmc_ext", "hclk_peri", 0x24C, 15)
+
+ /* CRU_CLKGATE_CON26 */
+ CRU_GATE(ACLK_MAC2PHY, "aclk_mac2phy", "aclk_gmac", 0x268, 0)
+ CRU_GATE(PCLK_MAC2PHY, "pclk_mac2phy", "pclk_gmac", 0x268, 1)
+ CRU_GATE(ACLK_MAC2IO, "aclk_mac2io", "aclk_gmac", 0x268, 2)
+ CRU_GATE(PCLK_MAC2IO, "pclk_mac2io", "pclk_gmac", 0x268, 3)
+
+ /* CRU_CLKGATE_CON28 */
+ CRU_GATE(PCLK_USB3PHY_OTG, "pclk_usb3phy_otg", "pclk_phy_pre", 0x270, 1)
+ CRU_GATE(PCLK_USB3PHY_PIPE, "pclk_usb3phy_pipe", "pclk_phy_pre", 0x270, 2)
+};
+
+/*
+ * PLLs
+ */
+
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_GPLL 4
+#define PLL_NPLL 5
+
+static struct rk_clk_pll_rate rk3328_pll_rates[] = {
+ {
+ .freq = 1608000000,
+ .refdiv = 1,
+ .fbdiv = 67,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1584000000,
+ .refdiv = 1,
+ .fbdiv = 66,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1560000000,
+ .refdiv = 1,
+ .fbdiv = 65,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1536000000,
+ .refdiv = 1,
+ .fbdiv = 64,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1512000000,
+ .refdiv = 1,
+ .fbdiv = 63,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1488000000,
+ .refdiv = 1,
+ .fbdiv = 62,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1464000000,
+ .refdiv = 1,
+ .fbdiv = 61,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1440000000,
+ .refdiv = 1,
+ .fbdiv = 60,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1416000000,
+ .refdiv = 1,
+ .fbdiv = 59,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1392000000,
+ .refdiv = 1,
+ .fbdiv = 58,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1368000000,
+ .refdiv = 1,
+ .fbdiv = 57,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1344000000,
+ .refdiv = 1,
+ .fbdiv = 56,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1320000000,
+ .refdiv = 1,
+ .fbdiv = 55,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1296000000,
+ .refdiv = 1,
+ .fbdiv = 54,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1272000000,
+ .refdiv = 1,
+ .fbdiv = 53,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1248000000,
+ .refdiv = 1,
+ .fbdiv = 52,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1200000000,
+ .refdiv = 1,
+ .fbdiv = 50,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1188000000,
+ .refdiv = 2,
+ .fbdiv = 99,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1104000000,
+ .refdiv = 1,
+ .fbdiv = 46,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1100000000,
+ .refdiv = 12,
+ .fbdiv = 550,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1008000000,
+ .refdiv = 1,
+ .fbdiv = 84,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1000000000,
+ .refdiv = 6,
+ .fbdiv = 500,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 984000000,
+ .refdiv = 1,
+ .fbdiv = 82,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 960000000,
+ .refdiv = 1,
+ .fbdiv = 80,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 936000000,
+ .refdiv = 1,
+ .fbdiv = 78,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 912000000,
+ .refdiv = 1,
+ .fbdiv = 76,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 900000000,
+ .refdiv = 4,
+ .fbdiv = 300,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 888000000,
+ .refdiv = 1,
+ .fbdiv = 74,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 864000000,
+ .refdiv = 1,
+ .fbdiv = 72,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 840000000,
+ .refdiv = 1,
+ .fbdiv = 70,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 816000000,
+ .refdiv = 1,
+ .fbdiv = 68,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 800000000,
+ .refdiv = 6,
+ .fbdiv = 400,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 700000000,
+ .refdiv = 6,
+ .fbdiv = 350,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 696000000,
+ .refdiv = 1,
+ .fbdiv = 58,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 600000000,
+ .refdiv = 1,
+ .fbdiv = 75,
+ .postdiv1 = 3,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 594000000,
+ .refdiv = 2,
+ .fbdiv = 99,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 504000000,
+ .refdiv = 1,
+ .fbdiv = 63,
+ .postdiv1 = 3,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 500000000,
+ .refdiv = 6,
+ .fbdiv = 250,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 408000000,
+ .refdiv = 1,
+ .fbdiv = 68,
+ .postdiv1 = 2,
+ .postdiv2 = 2,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 312000000,
+ .refdiv = 1,
+ .fbdiv = 52,
+ .postdiv1 = 2,
+ .postdiv2 = 2,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 216000000,
+ .refdiv = 1,
+ .fbdiv = 72,
+ .postdiv1 = 4,
+ .postdiv2 = 2,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 96000000,
+ .refdiv = 1,
+ .fbdiv = 64,
+ .postdiv1 = 4,
+ .postdiv2 = 4,
+ .dsmpd = 1,
+ },
+ {},
+};
+
+static struct rk_clk_pll_rate rk3328_pll_frac_rates[] = {
+ {
+ .freq = 1016064000,
+ .refdiv = 3,
+ .fbdiv = 127,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 0,
+ .frac = 134217,
+ },
+ {
+ .freq = 983040000,
+ .refdiv = 24,
+ .fbdiv = 983,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 0,
+ .frac = 671088,
+ },
+ {
+ .freq = 491520000,
+ .refdiv = 24,
+ .fbdiv = 983,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 0,
+ .frac = 671088,
+ },
+ {
+ .freq = 61440000,
+ .refdiv = 6,
+ .fbdiv = 215,
+ .postdiv1 = 7,
+ .postdiv2 = 2,
+ .dsmpd = 0,
+ .frac = 671088,
+ },
+ {
+ .freq = 56448000,
+ .refdiv = 12,
+ .fbdiv = 451,
+ .postdiv1 = 4,
+ .postdiv2 = 4,
+ .dsmpd = 0,
+ .frac = 9797894,
+ },
+ {
+ .freq = 40960000,
+ .refdiv = 12,
+ .fbdiv = 409,
+ .postdiv1 = 4,
+ .postdiv2 = 5,
+ .dsmpd = 0,
+ .frac = 10066329,
+ },
+ {},
+};
+
+static const char *pll_parents[] = {"xin24m"};
+static struct rk_clk_pll_def apll = {
+ .clkdef = {
+ .id = PLL_APLL,
+ .name = "apll",
+ .parent_names = pll_parents,
+ .parent_cnt = nitems(pll_parents),
+ },
+ .base_offset = 0x00,
+ .gate_offset = 0x200,
+ .gate_shift = 0,
+ .mode_reg = 0x80,
+ .mode_shift = 1,
+ .flags = RK_CLK_PLL_HAVE_GATE,
+ .frac_rates = rk3328_pll_frac_rates,
+};
+
+static struct rk_clk_pll_def dpll = {
+ .clkdef = {
+ .id = PLL_DPLL,
+ .name = "dpll",
+ .parent_names = pll_parents,
+ .parent_cnt = nitems(pll_parents),
+ },
+ .base_offset = 0x20,
+ .gate_offset = 0x200,
+ .gate_shift = 1,
+ .mode_reg = 0x80,
+ .mode_shift = 4,
+ .flags = RK_CLK_PLL_HAVE_GATE,
+};
+
+static struct rk_clk_pll_def cpll = {
+ .clkdef = {
+ .id = PLL_CPLL,
+ .name = "cpll",
+ .parent_names = pll_parents,
+ .parent_cnt = nitems(pll_parents),
+ },
+ .base_offset = 0x40,
+ .mode_reg = 0x80,
+ .mode_shift = 8,
+ .rates = rk3328_pll_rates,
+};
+
+static struct rk_clk_pll_def gpll = {
+ .clkdef = {
+ .id = PLL_GPLL,
+ .name = "gpll",
+ .parent_names = pll_parents,
+ .parent_cnt = nitems(pll_parents),
+ },
+ .base_offset = 0x60,
+ .gate_offset = 0x200,
+ .gate_shift = 2,
+ .mode_reg = 0x80,
+ .mode_shift = 12,
+ .flags = RK_CLK_PLL_HAVE_GATE,
+ .frac_rates = rk3328_pll_frac_rates,
+};
+
+static struct rk_clk_pll_def npll = {
+ .clkdef = {
+ .id = PLL_NPLL,
+ .name = "npll",
+ .parent_names = pll_parents,
+ .parent_cnt = nitems(pll_parents),
+ },
+ .base_offset = 0xa0,
+ .gate_offset = 0x200,
+ .gate_shift = 12,
+ .mode_reg = 0x80,
+ .mode_shift = 1,
+ .flags = RK_CLK_PLL_HAVE_GATE,
+ .rates = rk3328_pll_rates,
+};
+
+/* CRU_CLKSEL_CON0 */
+#define ACLK_BUS_PRE 136
+
+/* Needs hdmiphy as parent too*/
+static const char *aclk_bus_pre_parents[] = {"cpll", "gpll"};
+static struct rk_clk_composite_def aclk_bus_pre = {
+ .clkdef = {
+ .id = ACLK_BUS_PRE,
+ .name = "aclk_bus_pre",
+ .parent_names = aclk_bus_pre_parents,
+ .parent_cnt = nitems(aclk_bus_pre_parents),
+ },
+ .muxdiv_offset = 0x100,
+ .mux_shift = 13,
+ .mux_width = 2,
+
+ .div_shift = 8,
+ .div_width = 5,
+
+ .gate_offset = 0x220,
+ .gate_shift = 0,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static struct rk_clk_armclk_rates rk3328_armclk_rates[] = {
+ {
+ .freq = 1296000000,
+ .div = 1,
+ },
+ {
+ .freq = 1200000000,
+ .div = 1,
+ },
+ {
+ .freq = 1104000000,
+ .div = 1,
+ },
+ {
+ .freq = 1008000000,
+ .div = 1,
+ },
+ {
+ .freq = 912000000,
+ .div = 1,
+ },
+ {
+ .freq = 816000000,
+ .div = 1,
+ },
+ {
+ .freq = 696000000,
+ .div = 1,
+ },
+ {
+ .freq = 600000000,
+ .div = 1,
+ },
+ {
+ .freq = 408000000,
+ .div = 1,
+ },
+ {
+ .freq = 312000000,
+ .div = 1,
+ },
+ {
+ .freq = 216000000,
+ .div = 1,
+ },
+ {
+ .freq = 96000000,
+ .div = 1,
+ },
+};
+
+#define ARMCLK 6
+static const char *armclk_parents[] = {"apll", "gpll", "dpll", "npll" };
+static struct rk_clk_armclk_def armclk = {
+ .clkdef = {
+ .id = ARMCLK,
+ .name = "armclk",
+ .parent_names = armclk_parents,
+ .parent_cnt = nitems(armclk_parents),
+ },
+ .muxdiv_offset = 0x100,
+ .mux_shift = 6,
+ .mux_width = 2,
+
+ .div_shift = 0,
+ .div_width = 5,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX,
+ .main_parent = 3, /* npll */
+ .alt_parent = 0, /* apll */
+
+ .rates = rk3328_armclk_rates,
+ .nrates = nitems(rk3328_armclk_rates),
+};
+
+/* CRU_CLKSEL_CON1 */
+
+#define PCLK_BUS_PRE 216
+#define HCLK_BUS_PRE 328
+
+static const char *hclk_bus_pre_parents[] = {"aclk_bus_pre"};
+static struct rk_clk_composite_def hclk_bus_pre = {
+ .clkdef = {
+ .id = HCLK_BUS_PRE,
+ .name = "hclk_bus_pre",
+ .parent_names = hclk_bus_pre_parents,
+ .parent_cnt = nitems(hclk_bus_pre_parents),
+ },
+ .muxdiv_offset = 0x104,
+
+ .div_shift = 8,
+ .div_width = 2,
+
+ .gate_offset = 0x220,
+ .gate_shift = 1,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static const char *pclk_bus_pre_parents[] = {"aclk_bus_pre"};
+static struct rk_clk_composite_def pclk_bus_pre = {
+ .clkdef = {
+ .id = PCLK_BUS_PRE,
+ .name = "pclk_bus_pre",
+ .parent_names = pclk_bus_pre_parents,
+ .parent_cnt = nitems(pclk_bus_pre_parents),
+ },
+ .muxdiv_offset = 0x104,
+
+ .div_shift = 12,
+ .div_width = 3,
+
+ .gate_offset = 0x220,
+ .gate_shift = 2,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+/* CRU_CLKSEL_CON22 */
+
+#define SCLK_TSADC 36
+
+static const char *clk_tsadc_parents[] = {"xin24m"};
+static struct rk_clk_composite_def clk_tsadc = {
+ .clkdef = {
+ .id = SCLK_TSADC,
+ .name = "clk_tsadc",
+ .parent_names = clk_tsadc_parents,
+ .parent_cnt = nitems(clk_tsadc_parents),
+ },
+ .div_shift = 0,
+ .div_width = 9,
+};
+
+/* CRU_CLKSEL_CON28 */
+
+#define ACLK_PERI_PRE 137
+
+static const char *aclk_peri_pre_parents[] = {"cpll", "gpll"/* , "hdmiphy" */};
+static struct rk_clk_composite_def aclk_peri_pre = {
+ .clkdef = {
+ .id = ACLK_PERI_PRE,
+ .name = "aclk_peri_pre",
+ .parent_names = aclk_peri_pre_parents,
+ .parent_cnt = nitems(aclk_peri_pre_parents),
+ },
+ .muxdiv_offset = 0x170,
+
+ .mux_shift = 6,
+ .mux_width = 2,
+
+ .div_shift = 0,
+ .div_width = 5,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX,
+};
+
+/* CRU_CLKSEL_CON29 */
+
+#define PCLK_PERI 230
+#define HCLK_PERI 308
+
+static const char *phclk_peri_parents[] = {"aclk_peri_pre"};
+static struct rk_clk_composite_def pclk_peri = {
+ .clkdef = {
+ .id = PCLK_PERI,
+ .name = "pclk_peri",
+ .parent_names = phclk_peri_parents,
+ .parent_cnt = nitems(phclk_peri_parents),
+ },
+
+ .div_shift = 0,
+ .div_width = 2,
+
+ /* CRU_CLKGATE_CON10 */
+ .gate_offset = 0x228,
+ .gate_shift = 2,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static struct rk_clk_composite_def hclk_peri = {
+ .clkdef = {
+ .id = HCLK_PERI,
+ .name = "hclk_peri",
+ .parent_names = phclk_peri_parents,
+ .parent_cnt = nitems(phclk_peri_parents),
+ },
+
+ .div_shift = 4,
+ .div_width = 3,
+
+ /* CRU_CLKGATE_CON10 */
+ .gate_offset = 0x228,
+ .gate_shift = 1,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+/* CRU_CLKSEL_CON30 */
+
+#define SCLK_SDMMC 33
+
+static const char *mmc_parents[] = {"cpll", "gpll", "xin24m"/* , "usb480m" */};
+static struct rk_clk_composite_def sdmmc = {
+ .clkdef = {
+ .id = SCLK_SDMMC,
+ .name = "clk_sdmmc",
+ .parent_names = mmc_parents,
+ .parent_cnt = nitems(mmc_parents),
+ },
+ .muxdiv_offset = 0x178,
+
+ .mux_shift = 8,
+ .mux_width = 2,
+
+ .div_shift = 0,
+ .div_width = 8,
+
+ /* CRU_CLKGATE_CON4 */
+ .gate_offset = 0x210,
+ .gate_shift = 3,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+/* CRU_CLKSEL_CON31 */
+#define SCLK_SDIO 34
+
+static struct rk_clk_composite_def sdio = {
+ .clkdef = {
+ .id = SCLK_SDIO,
+ .name = "clk_sdio",
+ .parent_names = mmc_parents,
+ .parent_cnt = nitems(mmc_parents),
+ },
+ .muxdiv_offset = 0x17C,
+
+ .mux_shift = 8,
+ .mux_width = 2,
+
+ .div_shift = 0,
+ .div_width = 8,
+
+ /* CRU_CLKGATE_CON4 */
+ .gate_offset = 0x210,
+ .gate_shift = 4,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+/* CRU_CLKSEL_CON32 */
+#define SCLK_EMMC 35
+
+static struct rk_clk_composite_def emmc = {
+ .clkdef = {
+ .id = SCLK_EMMC,
+ .name = "clk_emmc",
+ .parent_names = mmc_parents,
+ .parent_cnt = nitems(mmc_parents),
+ },
+ .muxdiv_offset = 0x180,
+
+ .mux_shift = 8,
+ .mux_width = 2,
+
+ .div_shift = 0,
+ .div_width = 8,
+
+ /* CRU_CLKGATE_CON4 */
+ .gate_offset = 0x210,
+ .gate_shift = 5,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+/* CRU_CLKSEL_CON34 */
+#define SCLK_I2C0 55
+#define SCLK_I2C1 56
+
+static const char *i2c_parents[] = {"cpll", "gpll"};
+
+static struct rk_clk_composite_def i2c0 = {
+ .clkdef = {
+ .id = SCLK_I2C0,
+ .name = "clk_i2c0",
+ .parent_names = i2c_parents,
+ .parent_cnt = nitems(i2c_parents),
+ },
+ .muxdiv_offset = 0x188,
+
+ .mux_shift = 7,
+ .mux_width = 1,
+
+ .div_shift = 0,
+ .div_width = 6,
+
+ /* CRU_CLKGATE_CON2 */
+ .gate_offset = 0x208,
+ .gate_shift = 9,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static struct rk_clk_composite_def i2c1 = {
+ .clkdef = {
+ .id = SCLK_I2C1,
+ .name = "clk_i2c1",
+ .parent_names = i2c_parents,
+ .parent_cnt = nitems(i2c_parents),
+ },
+ .muxdiv_offset = 0x188,
+
+ .mux_shift = 15,
+ .mux_width = 1,
+
+ .div_shift = 8,
+ .div_width = 6,
+
+ /* CRU_CLKGATE_CON2 */
+ .gate_offset = 0x208,
+ .gate_shift = 10,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+/* CRU_CLKSEL_CON35 */
+#define SCLK_I2C2 57
+#define SCLK_I2C3 58
+
+static struct rk_clk_composite_def i2c2 = {
+ .clkdef = {
+ .id = SCLK_I2C2,
+ .name = "clk_i2c2",
+ .parent_names = i2c_parents,
+ .parent_cnt = nitems(i2c_parents),
+ },
+ .muxdiv_offset = 0x18C,
+
+ .mux_shift = 7,
+ .mux_width = 1,
+
+ .div_shift = 0,
+ .div_width = 6,
+
+ /* CRU_CLKGATE_CON2 */
+ .gate_offset = 0x208,
+ .gate_shift = 11,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static struct rk_clk_composite_def i2c3 = {
+ .clkdef = {
+ .id = SCLK_I2C3,
+ .name = "clk_i2c3",
+ .parent_names = i2c_parents,
+ .parent_cnt = nitems(i2c_parents),
+ },
+ .muxdiv_offset = 0x18C,
+
+ .mux_shift = 15,
+ .mux_width = 1,
+
+ .div_shift = 8,
+ .div_width = 6,
+
+ /* CRU_CLKGATE_CON2 */
+ .gate_offset = 0x208,
+ .gate_shift = 12,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+#define SCLK_USB3_REF 72
+#define SCLK_USB3_SUSPEND 73
+#define SCLK_USB3PHY_REF 94
+#define SCLK_REF_USB3OTG 95
+#define SCLK_USB3OTG_SUSPEND 97
+#define SCLK_REF_USB3OTG_SRC 98
+
+static const char *ref_usb3otg_parents[] = { "xin24m", "clk_usb3otg_ref" };
+
+static struct rk_clk_composite_def ref_usb3otg = {
+ .clkdef = {
+ .id = SCLK_REF_USB3OTG,
+ .name = "clk_ref_usb3otg",
+ .parent_names = ref_usb3otg_parents,
+ .parent_cnt = nitems(ref_usb3otg_parents),
+ },
+ .muxdiv_offset = 0x1B4,
+
+ .mux_shift = 8,
+ .mux_width = 1,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX,
+};
+
+static const char *usb3otg_suspend_parents[] = { "xin24m"/*, "clk_rtc32k" */};
+
+static struct rk_clk_composite_def usb3otg_suspend = {
+ .clkdef = {
+ .id = SCLK_USB3OTG_SUSPEND,
+ .name = "clk_usb3otg_suspend",
+ .parent_names = usb3otg_suspend_parents,
+ .parent_cnt = nitems(usb3otg_suspend_parents),
+ },
+ .muxdiv_offset = 0x184,
+
+ .mux_shift = 15,
+ .mux_width = 1,
+
+ .div_shift = 0,
+ .div_width = 10,
+
+ /* CRU_CLKGATE_CON4 */
+ .gate_offset = 0x210,
+ .gate_shift = 8,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static const char *ref_usb3otg_src_parents[] = { "cpll", "gpll" };
+
+static struct rk_clk_composite_def ref_usb3otg_src = {
+ .clkdef = {
+ .id = SCLK_REF_USB3OTG_SRC,
+ .name = "clk_ref_usb3otg_src",
+ .parent_names = ref_usb3otg_src_parents,
+ .parent_cnt = nitems(ref_usb3otg_src_parents),
+ },
+ .muxdiv_offset = 0x1B4,
+
+ .mux_shift = 7,
+ .mux_width = 1,
+
+ .div_shift = 0,
+ .div_width = 7,
+
+ /* CRU_CLKGATE_CON4 */
+ .gate_offset = 0x210,
+ .gate_shift = 9,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static const char *mac2io_src_parents[] = { "cpll", "gpll" };
+
+static struct rk_clk_composite_def mac2io_src = {
+ .clkdef = {
+ .id = SCLK_MAC2IO_SRC,
+ .name = "clk_mac2io_src",
+ .parent_names = mac2io_src_parents,
+ .parent_cnt = nitems(mac2io_src_parents),
+ },
+ /* CRU_CLKSEL_CON27 */
+ .muxdiv_offset = 0x16c,
+
+ .mux_shift = 7,
+ .mux_width = 1,
+
+ .div_shift = 0,
+ .div_width = 5,
+
+ /* CRU_CLKGATE_CON3 */
+ .gate_offset = 0x20c,
+ .gate_shift = 1,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE | RK_CLK_COMPOSITE_HAVE_MUX,
+};
+
+static const char *mac2io_out_parents[] = { "cpll", "gpll" };
+
+static struct rk_clk_composite_def mac2io_out = {
+ .clkdef = {
+ .id = SCLK_MAC2IO_OUT,
+ .name = "clk_mac2io_out",
+ .parent_names = mac2io_out_parents,
+ .parent_cnt = nitems(mac2io_out_parents),
+ },
+ /* CRU_CLKSEL_CON27 */
+ .muxdiv_offset = 0x16c,
+
+ .mux_shift = 15,
+ .mux_width = 1,
+
+ .div_shift = 8,
+ .div_width = 5,
+
+ /* CRU_CLKGATE_CON3 */
+ .gate_offset = 0x20c,
+ .gate_shift = 5,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE | RK_CLK_COMPOSITE_HAVE_MUX,
+};
+
+static const char *mac2io_parents[] = { "clk_mac2io_src", "gmac_clkin" };
+
+static struct rk_clk_composite_def mac2io = {
+ .clkdef = {
+ .id = SCLK_MAC2IO,
+ .name = "clk_mac2io",
+ .parent_names = mac2io_parents,
+ .parent_cnt = nitems(mac2io_parents),
+ },
+ .muxdiv_offset = RK3328_GRF_MAC_CON1,
+
+ .mux_shift = 10,
+ .mux_width = 1,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_GRF
+};
+
+static const char *mac2io_ext_parents[] = { "clk_mac2io", "gmac_clkin" };
+
+static struct rk_clk_composite_def mac2io_ext = {
+ .clkdef = {
+ .id = SCLK_MAC2IO_EXT,
+ .name = "clk_mac2io_ext",
+ .parent_names = mac2io_ext_parents,
+ .parent_cnt = nitems(mac2io_ext_parents),
+ },
+ .muxdiv_offset = RK3328_GRF_SOC_CON4,
+
+ .mux_shift = 14,
+ .mux_width = 1,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_GRF
+};
+
+static const char *mac2phy_src_parents[] = { "cpll", "gpll" };
+
+static struct rk_clk_composite_def mac2phy_src = {
+ .clkdef = {
+ .id = SCLK_MAC2PHY_SRC,
+ .name = "clk_mac2phy_src",
+ .parent_names = mac2phy_src_parents,
+ .parent_cnt = nitems(mac2phy_src_parents),
+ },
+ /* CRU_CLKSEL_CON26 */
+ .muxdiv_offset = 0x168,
+
+ .mux_shift = 7,
+ .mux_width = 1,
+
+ .div_shift = 0,
+ .div_width = 5,
+
+ /* CRU_CLKGATE_CON3 */
+ .gate_offset = 0x20c,
+ .gate_shift = 0,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE | RK_CLK_COMPOSITE_HAVE_MUX,
+};
+
+static const char *mac2phy_parents[] = { "clk_mac2phy_src", "phy_50m_out" };
+
+static struct rk_clk_composite_def mac2phy = {
+ .clkdef = {
+ .id = SCLK_MAC2PHY,
+ .name = "clk_mac2phy",
+ .parent_names = mac2phy_parents,
+ .parent_cnt = nitems(mac2phy_parents),
+ },
+ .muxdiv_offset = RK3328_GRF_MAC_CON2,
+
+ .mux_shift = 10,
+ .mux_width = 1,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | RK_CLK_COMPOSITE_GRF
+};
+
+static const char *mac2phy_out_parents[] = { "clk_mac2phy" };
+
+static struct rk_clk_composite_def mac2phy_out = {
+ .clkdef = {
+ .id = SCLK_MAC2PHY_OUT,
+ .name = "clk_mac2phy_out",
+ .parent_names = mac2phy_out_parents,
+ .parent_cnt = nitems(mac2phy_out_parents),
+ },
+ /* CRU_CLKSEL_CON26 */
+ .muxdiv_offset = 0x168,
+
+ .div_shift = 8,
+ .div_width = 2,
+
+ /* CRU_CLKGATE_CON9 */
+ .gate_offset = 0x224,
+ .gate_shift = 2,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE
+};
+
+static struct clk_fixed_def phy_50m_out = {
+ .clkdef.name = "phy_50m_out",
+ .freq = 50000000,
+};
+
+static struct clk_link_def gmac_clkin = {
+ .clkdef.name = "gmac_clkin",
+};
+
+static const char *aclk_gmac_parents[] = { "cpll", "gpll" };
+
+static struct rk_clk_composite_def aclk_gmac = {
+ .clkdef = {
+ .id = ACLK_GMAC,
+ .name = "aclk_gmac",
+ .parent_names = aclk_gmac_parents,
+ .parent_cnt = nitems(aclk_gmac_parents),
+ },
+ /* CRU_CLKSEL_CON35 */
+ .muxdiv_offset = 0x18c,
+
+ .mux_shift = 6,
+ .mux_width = 2,
+
+ .div_shift = 0,
+ .div_width = 5,
+
+ /* CRU_CLKGATE_CON3 */
+ .gate_offset = 0x20c,
+ .gate_shift = 2,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE | RK_CLK_COMPOSITE_HAVE_MUX,
+};
+
+static const char *pclk_gmac_parents[] = { "aclk_gmac" };
+
+static struct rk_clk_composite_def pclk_gmac = {
+ .clkdef = {
+ .id = PCLK_GMAC,
+ .name = "pclk_gmac",
+ .parent_names = pclk_gmac_parents,
+ .parent_cnt = nitems(pclk_gmac_parents),
+ },
+ /* CRU_CLKSEL_CON25 */
+ .muxdiv_offset = 0x164,
+
+ .div_shift = 8,
+ .div_width = 3,
+
+ /* CRU_CLKGATE_CON9 */
+ .gate_offset = 0x224,
+ .gate_shift = 0,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE
+};
+
+static struct rk_clk rk3328_clks[] = {
+ {
+ .type = RK3328_CLK_PLL,
+ .clk.pll = &apll
+ },
+ {
+ .type = RK3328_CLK_PLL,
+ .clk.pll = &dpll
+ },
+ {
+ .type = RK3328_CLK_PLL,
+ .clk.pll = &cpll
+ },
+ {
+ .type = RK3328_CLK_PLL,
+ .clk.pll = &gpll
+ },
+ {
+ .type = RK3328_CLK_PLL,
+ .clk.pll = &npll
+ },
+
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &aclk_bus_pre
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &hclk_bus_pre
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &pclk_bus_pre
+ },
+
+ {
+ .type = RK_CLK_ARMCLK,
+ .clk.armclk = &armclk,
+ },
+
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &clk_tsadc,
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &aclk_peri_pre,
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &pclk_peri,
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &hclk_peri,
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &sdmmc
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &sdio
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &emmc
+ },
+
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &i2c0
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &i2c1
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &i2c2
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &i2c3
+ },
+
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &ref_usb3otg
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &ref_usb3otg_src
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &usb3otg_suspend
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &mac2io_src
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &mac2io
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &mac2io_out
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &mac2io_ext
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &mac2phy_src
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &mac2phy
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &mac2phy_out
+ },
+ {
+ .type = RK_CLK_FIXED,
+ .clk.fixed = &phy_50m_out
+ },
+ {
+ .type = RK_CLK_LINK,
+ .clk.link = &gmac_clkin
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &aclk_gmac
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &pclk_gmac
+ },
+};
+
+static int
+rk3328_cru_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_is_compatible(dev, "rockchip,rk3328-cru")) {
+ device_set_desc(dev, "Rockchip RK3328 Clock and Reset Unit");
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+rk3328_cru_attach(device_t dev)
+{
+ struct rk_cru_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ sc->gates = rk3328_gates;
+ sc->ngates = nitems(rk3328_gates);
+
+ sc->clks = rk3328_clks;
+ sc->nclks = nitems(rk3328_clks);
+
+ sc->reset_offset = 0x300;
+ sc->reset_num = 184;
+
+ return (rk_cru_attach(dev));
+}
+
+static device_method_t rk3328_cru_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk3328_cru_probe),
+ DEVMETHOD(device_attach, rk3328_cru_attach),
+
+ DEVMETHOD_END
+};
+
+static devclass_t rk3328_cru_devclass;
+
+DEFINE_CLASS_1(rk3328_cru, rk3328_cru_driver, rk3328_cru_methods,
+ sizeof(struct rk_cru_softc), rk_cru_driver);
+
+EARLY_DRIVER_MODULE(rk3328_cru, simplebus, rk3328_cru_driver,
+ rk3328_cru_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/rockchip/clk/rk3399_cru.c b/sys/arm64/rockchip/clk/rk3399_cru.c
new file mode 100644
index 000000000000..317b11edf26c
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk3399_cru.c
@@ -0,0 +1,1273 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ * Copyright (c) 2018 Greg V <greg@unrelenting.technology>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/clk/clk_div.h>
+#include <dev/extres/clk/clk_fixed.h>
+#include <dev/extres/clk/clk_mux.h>
+
+#include <arm64/rockchip/clk/rk_cru.h>
+
+#include <arm64/rockchip/clk/rk3399_cru_dt.h>
+
+#define CRU_CLKSEL_CON(x) (0x100 + (x) * 0x4)
+#define CRU_CLKGATE_CON(x) (0x300 + (x) * 0x4)
+
+/* GATES */
+
+static struct rk_cru_gate rk3399_gates[] = {
+ /* CRU_CLKGATE_CON0 */
+ /* 15-8 unused */
+ GATE(SCLK_PVTM_CORE_L, "clk_pvtm_core_l", "xin24m", 0, 7),
+ GATE(0, "pclk_dbg_core_l", "pclk_dbg_core_l_c", 0, 6),
+ GATE(0, "atclk_core_l", "atclk_core_l_c", 0, 5),
+ GATE(0, "aclkm_core_l", "aclkm_core_l_c", 0, 4),
+ GATE(0, "clk_core_l_gpll_src", "gpll", 0, 3),
+ GATE(0, "clk_core_l_dpll_src", "dpll", 0, 2),
+ GATE(0, "clk_core_l_bpll_src", "bpll", 0, 1),
+ GATE(0, "clk_core_l_lpll_src", "lpll", 0, 0),
+
+ /* CRU_CLKGATE_CON1 */
+ /* 15 - 8 unused */
+ GATE(SCLK_PVTM_CORE_B, "clk_pvtm_core_b", "xin24m", 1, 7),
+ GATE(0, "pclk_dbg_core_b","pclk_dbg_core_b_c", 1, 6),
+ GATE(0, "atclk_core_b", "atclk_core_b_c", 1, 5),
+ GATE(0, "aclkm_core_b", "aclkm_core_b_c", 1, 4),
+ GATE(0, "clk_core_b_gpll_src", "gpll", 1, 3),
+ GATE(0, "clk_core_b_dpll_src", "dpll", 1, 2),
+ GATE(0, "clk_core_b_bpll_src", "bpll", 1, 1),
+ GATE(0, "clk_core_b_lpll_src", "lpll", 1, 0),
+
+ /* CRU_CLKGATE_CON2 */
+ /* 15 - 11 unused */
+ GATE(0, "npll_cs", "npll", 2, 10),
+ GATE(0, "gpll_cs", "gpll", 2, 9),
+ GATE(0, "cpll_cs", "cpll", 2, 8),
+ GATE(SCLK_CCI_TRACE, "clk_cci_trace", "clk_cci_trace_c", 2, 7),
+ GATE(0, "gpll_cci_trace", "gpll", 2, 6),
+ GATE(0, "cpll_cci_trace", "cpll", 2, 5),
+ GATE(0, "aclk_cci_pre", "aclk_cci_pre_c", 2, 4),
+ GATE(0, "vpll_aclk_cci_src", "vpll", 2, 3),
+ GATE(0, "npll_aclk_cci_src", "npll", 2, 2),
+ GATE(0, "gpll_aclk_cci_src", "gpll", 2, 1),
+ GATE(0, "cpll_aclk_cci_src", "cpll", 2, 0),
+
+ /* CRU_CLKGATE_CON3 */
+ /* 15 - 8 unused */
+ GATE(0, "aclk_center", "aclk_center_c", 3, 7),
+ /* 6 unused */
+ /* 5 unused */
+ GATE(PCLK_DDR, "pclk_ddr", "pclk_ddr_c", 3, 4),
+ GATE(0, "clk_ddrc_gpll_src", "gpll", 3, 3),
+ GATE(0, "clk_ddrc_dpll_src", "dpll", 3, 2),
+ GATE(0, "clk_ddrc_bpll_src", "bpll", 3, 1),
+ GATE(0, "clk_ddrc_lpll_src", "lpll", 3, 0),
+
+ /* CRU_CLKGATE_CON4 */
+ /* 15 - 12 unused */
+ GATE(SCLK_PVTM_DDR, "clk_pvtm_ddr", "xin24m", 4, 11),
+ GATE(0, "clk_rga_core", "clk_rga_core_c", 4, 10),
+ GATE(0, "hclk_rga_pre", "hclk_rga_pre_c", 4, 9),
+ GATE(0, "aclk_rga_pre", "aclk_rga_pre_c", 4, 8),
+ GATE(0, "hclk_iep_pre", "hclk_iep_pre_c", 4, 7),
+ GATE(0, "aclk_iep_pre", "aclk_iep_pre_c", 4, 6),
+ GATE(SCLK_VDU_CA, "clk_vdu_ca", "clk_vdu_ca_c", 4, 5),
+ GATE(SCLK_VDU_CORE, "clk_vdu_core", "clk_vdu_core_c", 4, 4),
+ GATE(0, "hclk_vdu_pre", "hclk_vdu_pre_c", 4, 3),
+ GATE(0, "aclk_vdu_pre", "aclk_vdu_pre_c", 4, 2),
+ GATE(0, "hclk_vcodec_pre", "hclk_vcodec_pre_c", 4, 1),
+ GATE(0, "aclk_vcodec_pre", "aclk_vcodec_pre_c", 4, 0),
+
+ /* CRU_CLKGATE_CON5 */
+ /* 15 - 10 unused */
+ GATE(SCLK_MAC_TX, "clk_rmii_tx", "clk_rmii_src", 5, 9),
+ GATE(SCLK_MAC_RX, "clk_rmii_rx", "clk_rmii_src", 5, 8),
+ GATE(SCLK_MACREF, "clk_mac_ref", "clk_rmii_src", 5, 7),
+ GATE(SCLK_MACREF_OUT, "clk_mac_refout", "clk_rmii_src", 5, 6),
+ GATE(SCLK_MAC, "clk_gmac", "clk_gmac_c", 5, 5),
+ GATE(PCLK_PERIHP, "pclk_perihp", "pclk_perihp_c", 5, 4),
+ GATE(HCLK_PERIHP, "hclk_perihp", "hclk_perihp_c", 5, 3),
+ GATE(ACLK_PERIHP, "aclk_perihp", "aclk_perihp_c", 5, 2),
+ GATE(0, "cpll_aclk_perihp_src", "cpll", 5, 1),
+ GATE(0, "gpll_aclk_perihp_src", "gpll", 5, 0),
+
+ /* CRU_CLKGATE_CON6 */
+ /* 15 unused */
+ GATE(SCLK_EMMC, "clk_emmc", "clk_emmc_c", 6, 14),
+ GATE(0, "cpll_aclk_emmc_src", "cpll", 6, 13),
+ GATE(0, "gpll_aclk_emmc_src", "gpll", 6, 12),
+ GATE(0, "pclk_gmac_pre", "pclk_gmac_pre_c", 6, 11),
+ GATE(0, "aclk_gmac_pre", "aclk_gmac_pre_c", 6, 10),
+ GATE(0, "cpll_aclk_gmac_src", "cpll", 6, 9),
+ GATE(0, "gpll_aclk_gmac_src", "gpll", 6, 8),
+ /* 7 unused */
+ GATE(SCLK_USB2PHY1_REF, "clk_usb2phy1_ref", "xin24m", 6, 6),
+ GATE(SCLK_USB2PHY0_REF, "clk_usb2phy0_ref", "xin24m", 6, 5),
+ GATE(SCLK_HSICPHY, "clk_hsicphy", "clk_hsicphy_c", 6, 4),
+ GATE(0, "clk_pcie_core_cru", "clk_pcie_core_cru_c", 6, 3),
+ GATE(SCLK_PCIE_PM, "clk_pcie_pm", "clk_pcie_pm_c", 6, 2),
+ GATE(SCLK_SDMMC, "clk_sdmmc", "clk_sdmmc_c", 6, 1),
+ GATE(SCLK_SDIO, "clk_sdio", "clk_sdio_c", 6, 0),
+
+ /* CRU_CLKGATE_CON7 */
+ /* 15 - 10 unused */
+ GATE(FCLK_CM0S, "fclk_cm0s", "fclk_cm0s_c", 7, 9),
+ GATE(SCLK_CRYPTO1, "clk_crypto1", "clk_crypto1_c", 7, 8),
+ GATE(SCLK_CRYPTO0, "clk_crypto0", "clk_crypto0_c", 7, 7),
+ GATE(0, "cpll_fclk_cm0s_src", "cpll", 7, 6),
+ GATE(0, "gpll_fclk_cm0s_src", "gpll", 7, 5),
+ GATE(PCLK_PERILP0, "pclk_perilp0", "pclk_perilp0_c", 7, 4),
+ GATE(HCLK_PERILP0, "hclk_perilp0", "hclk_perilp0_c", 7, 3),
+ GATE(ACLK_PERILP0, "aclk_perilp0", "aclk_perilp0_c", 7, 2),
+ GATE(0, "cpll_aclk_perilp0_src", "cpll", 7, 1),
+ GATE(0, "gpll_aclk_perilp0_src", "gpll", 7, 0),
+
+ /* CRU_CLKGATE_CON8 */
+ GATE(SCLK_SPDIF_8CH, "clk_spdif", "clk_spdif_mux", 8, 15),
+ GATE(0, "clk_spdif_frac", "clk_spdif_frac_c", 8, 14),
+ GATE(0, "clk_spdif_div", "clk_spdif_div_c", 8, 13),
+ GATE(SCLK_I2S_8CH_OUT, "clk_i2sout", "clk_i2sout_c", 8, 12),
+ GATE(SCLK_I2S2_8CH, "clk_i2s2", "clk_i2s2_mux", 8, 11),
+ GATE(0, "clk_i2s2_frac", "clk_i2s2_frac_c", 8, 10),
+ GATE(0, "clk_i2s2_div", "clk_i2s2_div_c", 8, 9),
+ GATE(SCLK_I2S1_8CH, "clk_i2s1", "clk_i2s1_mux", 8, 8),
+ GATE(0, "clk_i2s1_frac", "clk_i2s1_frac_c", 8, 7),
+ GATE(0, "clk_i2s1_div", "clk_i2s1_div_c", 8, 6),
+ GATE(SCLK_I2S0_8CH, "clk_i2s0", "clk_i2s0_mux", 8, 5),
+ GATE(0, "clk_i2s0_frac","clk_i2s0_frac_c", 8, 4),
+ GATE(0, "clk_i2s0_div","clk_i2s0_div_c", 8, 3),
+ GATE(PCLK_PERILP1, "pclk_perilp1", "pclk_perilp1_c", 8, 2),
+ GATE(HCLK_PERILP1, "cpll_hclk_perilp1_src", "cpll", 8, 1),
+ GATE(0, "gpll_hclk_perilp1_src", "gpll", 8, 0),
+
+ /* CRU_CLKGATE_CON9 */
+ GATE(SCLK_SPI4, "clk_spi4", "clk_spi4_c", 9, 15),
+ GATE(SCLK_SPI2, "clk_spi2", "clk_spi2_c", 9, 14),
+ GATE(SCLK_SPI1, "clk_spi1", "clk_spi1_c", 9, 13),
+ GATE(SCLK_SPI0, "clk_spi0", "clk_spi0_c", 9, 12),
+ GATE(SCLK_SARADC, "clk_saradc", "clk_saradc_c", 9, 11),
+ GATE(SCLK_TSADC, "clk_tsadc", "clk_tsadc_c", 9, 10),
+ /* 9 - 8 unused */
+ GATE(0, "clk_uart3_frac", "clk_uart3_frac_c", 9, 7),
+ GATE(0, "clk_uart3_div", "clk_uart3_div_c", 9, 6),
+ GATE(0, "clk_uart2_frac", "clk_uart2_frac_c", 9, 5),
+ GATE(0, "clk_uart2_div", "clk_uart2_div_c", 9, 4),
+ GATE(0, "clk_uart1_frac", "clk_uart1_frac_c", 9, 3),
+ GATE(0, "clk_uart1_div", "clk_uart1_div_c", 9, 2),
+ GATE(0, "clk_uart0_frac", "clk_uart0_frac_c", 9, 1),
+ GATE(0, "clk_uart0_div", "clk_uart0_div_c", 9, 0),
+
+ /* CRU_CLKGATE_CON10 */
+ GATE(SCLK_VOP1_PWM, "clk_vop1_pwm", "clk_vop1_pwm_c", 10, 15),
+ GATE(SCLK_VOP0_PWM, "clk_vop0_pwm", "clk_vop0_pwm_c", 10, 14),
+ GATE(DCLK_VOP0_DIV, "dclk_vop0_div", "dclk_vop0_div_c", 10, 12),
+ GATE(DCLK_VOP1_DIV, "dclk_vop1_div", "dclk_vop1_div_c", 10, 13),
+ GATE(0, "hclk_vop1_pre", "hclk_vop1_pre_c", 10, 11),
+ GATE(ACLK_VOP1_PRE, "aclk_vop1_pre", "aclk_vop1_pre_c", 10, 10),
+ GATE(0, "hclk_vop0_pre", "hclk_vop0_pre_c", 10, 9),
+ GATE(ACLK_VOP0_PRE, "aclk_vop0_pre", "aclk_vop0_pre_c", 10, 8),
+ GATE(0, "clk_cifout_src", "clk_cifout_src_c", 10, 7),
+ GATE(SCLK_SPDIF_REC_DPTX, "clk_spdif_rec_dptx", "clk_spdif_rec_dptx_c", 10, 6),
+ GATE(SCLK_I2C7, "clk_i2c7", "clk_i2c7_c", 10, 5),
+ GATE(SCLK_I2C3, "clk_i2c3", "clk_i2c3_c", 10, 4),
+ GATE(SCLK_I2C6, "clk_i2c6", "clk_i2c6_c", 10, 3),
+ GATE(SCLK_I2C2, "clk_i2c2", "clk_i2c2_c", 10, 2),
+ GATE(SCLK_I2C5, "clk_i2c5", "clk_i2c5_c", 10, 1),
+ GATE(SCLK_I2C1, "clk_i2c1", "clk_i2c1_c", 10, 0),
+
+ /* CRU_CLKGATE_CON11 */
+ GATE(SCLK_MIPIDPHY_CFG, "clk_mipidphy_cfg", "xin24m", 11, 15),
+ GATE(SCLK_MIPIDPHY_REF, "clk_mipidphy_ref", "xin24m", 11, 14),
+ /* 13-12 unused */
+ GATE(PCLK_EDP, "pclk_edp", "pclk_edp_c", 11, 11),
+ GATE(PCLK_HDCP, "pclk_hdcp", "pclk_hdcp_c", 11, 10),
+ /* 9 unuwsed */
+ GATE(SCLK_DP_CORE, "clk_dp_core", "clk_dp_core_c", 11, 8),
+ GATE(SCLK_HDMI_CEC, "clk_hdmi_cec", "clk_hdmi_cec_c", 11, 7),
+ GATE(SCLK_HDMI_SFR, "clk_hdmi_sfr", "xin24m", 11, 6),
+ GATE(SCLK_ISP1, "clk_isp1", "clk_isp1_c", 11, 5),
+ GATE(SCLK_ISP0, "clk_isp0", "clk_isp0_c", 11, 4),
+ GATE(HCLK_HDCP, "hclk_hdcp", "hclk_hdcp_c", 11, 3),
+ GATE(ACLK_HDCP, "aclk_hdcp", "aclk_hdcp_c", 11, 2),
+ GATE(PCLK_VIO, "pclk_vio", "pclk_vio_c", 11, 1),
+ GATE(ACLK_VIO, "aclk_vio", "aclk_vio_c", 11, 0),
+
+ /* CRU_CLKGATE_CON12 */
+ /* 15 - 14 unused */
+ GATE(HCLK_SD, "hclk_sd", "hclk_sd_c", 12, 13),
+ GATE(ACLK_GIC_PRE, "aclk_gic_pre", "aclk_gic_pre_c", 12, 12),
+ GATE(HCLK_ISP1, "hclk_isp1", "hclk_isp1_c", 12, 11),
+ GATE(ACLK_ISP1, "aclk_isp1", "aclk_isp1_c", 12, 10),
+ GATE(HCLK_ISP0, "hclk_isp0", "hclk_isp0_c", 12, 9),
+ GATE(ACLK_ISP0, "aclk_isp0", "aclk_isp0_c", 12, 8),
+ /* 7 unused */
+ GATE(SCLK_PCIEPHY_REF100M, "clk_pciephy_ref100m", "clk_pciephy_ref100m_c", 12, 6),
+ /* 5 unused */
+ GATE(SCLK_USB3OTG1_SUSPEND, "clk_usb3otg1_suspend", "clk_usb3otg1_suspend_c", 12, 4),
+ GATE(SCLK_USB3OTG0_SUSPEND, "clk_usb3otg0_suspend", "clk_usb3otg0_suspend_c", 12, 3),
+ GATE(SCLK_USB3OTG1_REF, "clk_usb3otg1_ref", "xin24m", 12, 2),
+ GATE(SCLK_USB3OTG0_REF, "clk_usb3otg0_ref", "xin24m", 12, 1),
+ GATE(ACLK_USB3, "aclk_usb3", "aclk_usb3_c", 12, 0),
+
+ /* CRU_CLKGATE_CON13 */
+ GATE(SCLK_TESTCLKOUT2, "clk_testout2", "clk_testout2_c", 13, 15),
+ GATE(SCLK_TESTCLKOUT1, "clk_testout1", "clk_testout1_c", 13, 14),
+ GATE(SCLK_SPI5, "clk_spi5", "clk_spi5_c", 13, 13),
+ GATE(0, "clk_usbphy0_480m_src", "clk_usbphy0_480m", 13, 12),
+ GATE(0, "clk_usbphy1_480m_src", "clk_usbphy1_480m", 13, 12),
+ GATE(0, "clk_test", "clk_test_c", 13, 11),
+ /* 10 unused */
+ GATE(0, "clk_test_frac", "clk_test_frac_c", 13, 9),
+ /* 8 unused */
+ GATE(SCLK_UPHY1_TCPDCORE, "clk_uphy1_tcpdcore", "clk_uphy1_tcpdcore_c", 13, 7),
+ GATE(SCLK_UPHY1_TCPDPHY_REF, "clk_uphy1_tcpdphy_ref", "clk_uphy1_tcpdphy_ref_c", 13, 6),
+ GATE(SCLK_UPHY0_TCPDCORE, "clk_uphy0_tcpdcore", "clk_uphy0_tcpdcore_c", 13, 5),
+ GATE(SCLK_UPHY0_TCPDPHY_REF, "clk_uphy0_tcpdphy_ref", "clk_uphy0_tcpdphy_ref_c", 13, 4),
+ /* 3 - 2 unused */
+ GATE(SCLK_PVTM_GPU, "aclk_pvtm_gpu", "xin24m", 13, 1),
+ GATE(0, "aclk_gpu_pre", "aclk_gpu_pre_c", 13, 0),
+
+ /* CRU_CLKGATE_CON14 */
+ /* 15 - 14 unused */
+ GATE(ACLK_PERF_CORE_L, "aclk_perf_core_l", "aclkm_core_l", 14, 13),
+ GATE(ACLK_CORE_ADB400_CORE_L_2_CCI500, "aclk_core_adb400_core_l_2_cci500", "aclkm_core_l", 14, 12),
+ GATE(ACLK_GIC_ADB400_CORE_L_2_GIC, "aclk_core_adb400_core_l_2_gic", "armclkl", 14, 11),
+ GATE(ACLK_GIC_ADB400_GIC_2_CORE_L, "aclk_core_adb400_gic_2_core_l", "armclkl", 14, 10),
+ GATE(0, "clk_dbg_pd_core_l", "armclkl", 14, 9),
+ /* 8 - 7 unused */
+ GATE(ACLK_PERF_CORE_B, "aclk_perf_core_b", "aclkm_core_b", 14, 6),
+ GATE(ACLK_CORE_ADB400_CORE_B_2_CCI500, "aclk_core_adb400_core_b_2_cci500", "aclkm_core_b", 14, 5),
+ GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_core_adb400_core_b_2_gic", "armclkb", 14, 4),
+ GATE(ACLK_GIC_ADB400_GIC_2_CORE_B, "aclk_core_adb400_gic_2_core_b", "armclkb", 14, 3),
+ GATE(0, "pclk_dbg_cxcs_pd_core_b", "pclk_dbg_core_b", 14, 2),
+ GATE(0, "clk_dbg_pd_core_b", "armclkb", 14, 1),
+ /* 0 unused */
+
+ /* CRU_CLKGATE_CON15 */
+ /* 15 - 8 unused */
+ GATE(ACLK_CCI_GRF, "aclk_cci_grf", "aclk_cci_pre", 15, 7),
+ GATE(0, "clk_dbg_noc", "clk_cs", 15, 6),
+ GATE(0, "clk_dbg_cxcs", "clk_cs", 15, 5),
+ GATE(ACLK_CCI_NOC1, "aclk_cci_noc1", "aclk_cci_pre", 15, 4),
+ GATE(ACLK_CCI_NOC0, "aclk_cci_noc0", "aclk_cci_pre", 15, 3),
+ GATE(ACLK_CCI, "aclk_cci", "aclk_cci_pre", 15, 2),
+ GATE(ACLK_ADB400M_PD_CORE_B, "aclk_adb400m_pd_core_b", "aclk_cci_pre", 15, 1),
+ GATE(ACLK_ADB400M_PD_CORE_L, "aclk_adb400m_pd_core_l", "aclk_cci_pre", 15, 0),
+
+ /* CRU_CLKGATE_CON16 */
+ /* 15 - 12 unused */
+ GATE(HCLK_RGA_NOC, "hclk_rga_noc", "hclk_rga_pre", 16, 11),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_rga_pre", 16, 10),
+ GATE(ACLK_RGA_NOC, "aclk_rga_noc", "aclk_rga_pre", 16, 9),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 16, 8),
+ /* 7 - 4 unused */
+ GATE(HCLK_IEP_NOC, "hclk_iep_noc", "hclk_iep_pre", 16, 3),
+ GATE(HCLK_IEP, "hclk_iep", "hclk_iep_pre", 16, 2),
+ GATE(ACLK_IEP_NOC, "aclk_iep_noc", "aclk_iep_pre", 16, 1),
+ GATE(ACLK_IEP, "aclk_iep", "aclk_iep_pre", 16, 0),
+
+ /* CRU_CLKGATE_CON17 */
+ /* 15 - 12 unused */
+ GATE(HCLK_VDU_NOC, "hclk_vdu_noc", "hclk_vdu_pre", 17, 11),
+ GATE(HCLK_VDU, "hclk_vdu", "hclk_vdu_pre", 17, 10),
+ GATE(ACLK_VDU_NOC, "aclk_vdu_noc", "aclk_vdu_pre", 17, 9),
+ GATE(ACLK_VDU, "aclk_vdu", "aclk_vdu_pre", 17, 8),
+ GATE(0, "hclk_vcodec_noc", "hclk_vcodec_pre", 17, 3),
+ GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 17, 2),
+ GATE(0, "aclk_vcodec_noc", "aclk_vcodec_pre", 17, 1),
+ GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 17, 0),
+
+ /* CRU_CLKGATE_CON18 */
+ GATE(PCLK_CIC, "pclk_cic", "pclk_ddr", 18, 15),
+ GATE(0, "clk_ddr_mon_timer", "xin24m", 18, 14),
+ GATE(0, "clk_ddr_mon", "clk_ddrc_div2", 18, 13),
+ GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", 18, 12),
+ GATE(0, "clk_ddr_cic", "clk_ddrc_div2", 18, 11),
+ GATE(PCLK_CENTER_MAIN_NOC, "pclk_center_main_noc", "pclk_ddr", 18, 10),
+ GATE(0, "clk_ddrcfg_msch1", "clk_ddrc_div2", 18, 9),
+ GATE(0, "clk_ddrphy1", "clk_ddrc_div2", 18, 8),
+ GATE(0, "clk_ddrphy_ctrl1", "clk_ddrc_div2", 18, 7),
+ GATE(0, "clk_ddrc1", "clk_ddrc_div2", 18, 6),
+ GATE(0, "clk_ddr1_msch", "clk_ddrc_div2", 18, 5),
+ GATE(0, "clk_ddrcfg_msch0", "clk_ddrc_div2", 18, 4),
+ GATE(0, "clk_ddrphy0", "clk_ddrc_div2", 18, 3),
+ GATE(0, "clk_ddrphy_ctrl0", "clk_ddrc_div2", 18, 2),
+ GATE(0, "clk_ddrc0", "clk_ddrc_div2", 18, 1),
+
+ /* CRU_CLKGATE_CON19 */
+ /* 15 - 3 unused */
+ GATE(PCLK_DDR_SGRF, "pclk_ddr_sgrf", "pclk_ddr", 19, 2),
+ GATE(ACLK_CENTER_PERI_NOC, "aclk_center_peri_noc", "aclk_center", 19, 1),
+ GATE(ACLK_CENTER_MAIN_NOC, "aclk_center_main_noc", "aclk_center", 19, 0),
+
+ /* CRU_CLKGATE_CON20 */
+ GATE(0, "hclk_ahb1tom", "hclk_perihp", 20, 15),
+ GATE(0, "pclk_perihp_noc", "pclk_perihp", 20, 14),
+ GATE(0, "hclk_perihp_noc", "hclk_perihp", 20, 13),
+ GATE(0, "aclk_perihp_noc", "aclk_perihp", 20, 12),
+ GATE(PCLK_PCIE, "pclk_pcie", "pclk_perihp", 20, 11),
+ GATE(ACLK_PCIE, "aclk_pcie", "aclk_perihp", 20, 10),
+ GATE(HCLK_HSIC, "hclk_hsic", "hclk_perihp", 20, 9),
+ GATE(HCLK_HOST1_ARB, "hclk_host1_arb", "hclk_perihp", 20, 8),
+ GATE(HCLK_HOST1, "hclk_host1", "hclk_perihp", 20, 7),
+ GATE(HCLK_HOST0_ARB, "hclk_host0_arb", "hclk_perihp", 20, 6),
+ GATE(HCLK_HOST0, "hclk_host0", "hclk_perihp", 20, 5),
+ GATE(PCLK_PERIHP_GRF, "pclk_perihp_grf", "pclk_perihp", 20, 4),
+ GATE(ACLK_PERF_PCIE, "aclk_perf_pcie", "aclk_perihp", 20, 2),
+ /* 1 - 0 unused */
+
+ /* CRU_CLKGATE_CON21 */
+ /* 15 - 10 unused */
+ GATE(PCLK_UPHY1_TCPD_G, "pclk_uphy1_tcpd_g", "pclk_alive", 21, 9),
+ GATE(PCLK_UPHY1_TCPHY_G, "pclk_uphy1_tcphy_g", "pclk_alive", 21, 8),
+ /* 7 unused */
+ GATE(PCLK_UPHY0_TCPD_G, "pclk_uphy0_tcpd_g", "pclk_alive", 21, 6),
+ GATE(PCLK_UPHY0_TCPHY_G, "pclk_uphy0_tcphy_g", "pclk_alive", 21, 5),
+ GATE(PCLK_USBPHY_MUX_G, "pclk_usbphy_mux_g", "pclk_alive", 21, 4),
+ GATE(SCLK_DPHY_RX0_CFG, "clk_dphy_rx0_cfg", "clk_mipidphy_cfg", 21, 3),
+ GATE(SCLK_DPHY_TX1RX1_CFG, "clk_dphy_tx1rx1_cfg", "clk_mipidphy_cfg", 21, 2),
+ GATE(SCLK_DPHY_TX0_CFG, "clk_dphy_tx0_cfg", "clk_mipidphy_cfg", 21, 1),
+ GATE(SCLK_DPHY_PLL, "clk_dphy_pll", "clk_mipidphy_ref", 21, 0),
+
+ /* CRU_CLKGATE_CON22 */
+ GATE(PCLK_EFUSE1024S, "pclk_efuse1024s", "pclk_perilp1", 22, 15),
+ GATE(PCLK_EFUSE1024NS, "pclk_efuse1024ns", "pclk_perilp1", 22, 14),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_perilp1", 22, 13),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_perilp1", 22, 12),
+ GATE(PCLK_MAILBOX0, "pclk_mailbox0", "pclk_perilp1", 22, 11),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_perilp1", 22, 10),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_perilp1", 22, 9),
+ GATE(PCLK_I2C6, "pclk_i2c6", "pclk_perilp1", 22, 8),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_perilp1", 22, 7),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_perilp1", 22, 6),
+ GATE(PCLK_I2C7, "pclk_i2c7", "pclk_perilp1", 22, 5),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_perilp1", 22, 3),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_perilp1", 22, 2),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_perilp1", 22, 1),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_perilp1", 22, 0),
+
+ /* CRU_CLKGATE_CON23 */
+ /* 15 - 14 unused */
+ GATE(PCLK_SPI4, "pclk_spi4", "pclk_perilp1", 23, 13),
+ GATE(PCLK_SPI2, "pclk_spi2", "pclk_perilp1", 23, 12),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_perilp1", 23, 11),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_perilp1", 23, 10),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_perilp0", 23, 9),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_perilp0", 23, 8),
+ GATE(SCLK_INTMEM5, "clk_intmem5", "aclk_perilp0", 23, 7),
+ GATE(SCLK_INTMEM4, "clk_intmem4", "aclk_perilp0", 23, 6),
+ GATE(SCLK_INTMEM3, "clk_intmem3", "aclk_perilp0", 23, 5),
+ GATE(SCLK_INTMEM2, "clk_intmem2", "aclk_perilp0", 23, 4),
+ GATE(SCLK_INTMEM1, "clk_intmem1", "aclk_perilp0", 23, 3),
+ GATE(SCLK_INTMEM0, "clk_intmem0", "aclk_perilp0", 23, 2),
+ GATE(ACLK_TZMA, "aclk_tzma", "aclk_perilp0", 23, 1),
+ GATE(ACLK_INTMEM, "aclk_intmem", "aclk_perilp0", 23, 0),
+
+ /* CRU_CLKGATE_CON24 */
+ GATE(HCLK_S_CRYPTO1, "hclk_s_crypto1", "hclk_perilp0", 24, 15),
+ GATE(HCLK_M_CRYPTO1, "hclk_m_crypto1", "hclk_perilp0", 24, 14),
+ GATE(PCLK_PERIHP_GRF, "pclk_perilp_sgrf", "pclk_perilp1", 24, 13),
+ GATE(SCLK_M0_PERILP_DEC, "clk_m0_perilp_dec", "fclk_cm0s", 24, 11),
+ GATE(DCLK_M0_PERILP, "dclk_m0_perilp", "fclk_cm0s", 24, 10),
+ GATE(HCLK_M0_PERILP, "hclk_m0_perilp", "fclk_cm0s", 24, 9),
+ GATE(SCLK_M0_PERILP, "sclk_m0_perilp", "fclk_cm0s", 24, 8),
+ /* 7 - unused */
+ GATE(HCLK_S_CRYPTO0, "hclk_s_crypto0", "hclk_perilp0", 24, 6),
+ GATE(HCLK_M_CRYPTO0, "hclk_m_crypto0", "hclk_perilp0", 24, 5),
+ GATE(HCLK_ROM, "hclk_rom", "hclk_perilp0", 24, 4),
+ /* 3 - 0 unused */
+
+ /* CRU_CLKGATE_CON25 */
+ /* 15 - 13 unused */
+ GATE(0, "hclk_sdio_noc", "hclk_perilp1", 25, 12),
+ GATE(HCLK_M0_PERILP_NOC, "hclk_m0_perilp_noc", "fclk_cm0s", 25, 11),
+ GATE(0, "pclk_perilp1_noc", "pclk_perilp1", 25, 10),
+ GATE(0, "hclk_perilp1_noc", "hclk_perilp1", 25, 9),
+ GATE(HCLK_PERILP0_NOC, "hclk_perilp0_noc", "hclk_perilp0", 25, 8),
+ GATE(ACLK_PERILP0_NOC, "aclk_perilp0_noc", "aclk_perilp0", 25, 7),
+ GATE(ACLK_DMAC1_PERILP, "aclk_dmac1_perilp", "aclk_perilp0", 25, 6),
+ GATE(ACLK_DMAC0_PERILP, "aclk_dmac0_perilp", "aclk_perilp0", 25, 5),
+ /* 4 - 0 unused */
+
+ /* CRU_CLKGATE_CON26 */
+ /* 15 - 12 unused */
+ GATE(SCLK_TIMER11, "clk_timer11", "xin24m", 26, 11),
+ GATE(SCLK_TIMER10, "clk_timer10", "xin24m", 26, 10),
+ GATE(SCLK_TIMER09, "clk_timer09", "xin24m", 26, 9),
+ GATE(SCLK_TIMER08, "clk_timer08", "xin24m", 26, 8),
+ GATE(SCLK_TIMER07, "clk_timer07", "xin24m", 26, 7),
+ GATE(SCLK_TIMER06, "clk_timer06", "xin24m", 26, 6),
+ GATE(SCLK_TIMER05, "clk_timer05", "xin24m", 26, 5),
+ GATE(SCLK_TIMER04, "clk_timer04", "xin24m", 26, 4),
+ GATE(SCLK_TIMER03, "clk_timer03", "xin24m", 26, 3),
+ GATE(SCLK_TIMER02, "clk_timer02", "xin24m", 26, 2),
+ GATE(SCLK_TIMER01, "clk_timer01", "xin24m", 26, 1),
+ GATE(SCLK_TIMER00, "clk_timer00", "xin24m", 26, 0),
+
+ /* CRU_CLKGATE_CON27 */
+ /* 15 - 9 unused */
+ GATE(ACLK_ISP1_WRAPPER, "aclk_isp1_wrapper", "hclk_isp1", 27, 8),
+ GATE(HCLK_ISP1_WRAPPER, "hclk_isp1_wrapper", "aclk_isp0", 27, 7),
+ GATE(PCLK_ISP1_WRAPPER, "pclkin_isp1_wrapper", "pclkin_cif", 27, 6),
+ GATE(ACLK_ISP0_WRAPPER, "aclk_isp0_wrapper", "aclk_isp0", 27, 5),
+ GATE(HCLK_ISP0_WRAPPER, "hclk_isp0_wrapper", "hclk_isp0", 27, 4),
+ GATE(ACLK_ISP1_NOC, "aclk_isp1_noc", "aclk_isp1", 27, 3),
+ GATE(HCLK_ISP1_NOC, "hclk_isp1_noc", "hclk_isp1", 27, 2),
+ GATE(ACLK_ISP0_NOC, "aclk_isp0_noc", "aclk_isp0", 27, 1),
+ GATE(HCLK_ISP0_NOC, "hclk_isp0_noc", "hclk_isp0", 27, 0),
+
+ /* CRU_CLKGATE_CON28 */
+ /* 15 - 8 unused */
+ GATE(ACLK_VOP1, "aclk_vop1", "aclk_vop1_pre", 28, 7),
+ GATE(HCLK_VOP1, "hclk_vop1", "hclk_vop1_pre", 28, 6),
+ GATE(ACLK_VOP1_NOC, "aclk_vop1_noc", "aclk_vop1_pre", 28, 5),
+ GATE(HCLK_VOP1_NOC, "hclk_vop1_noc", "hclk_vop1_pre", 28, 4),
+ GATE(ACLK_VOP0, "aclk_vop0", "aclk_vop0_pre", 28, 3),
+ GATE(HCLK_VOP0, "hclk_vop0", "hclk_vop0_pre", 28, 2),
+ GATE(ACLK_VOP0_NOC, "aclk_vop0_noc", "aclk_vop0_pre", 28, 1),
+ GATE(HCLK_VOP0_NOC, "hclk_vop0_noc", "hclk_vop0_pre", 28, 0),
+
+ /* CRU_CLKGATE_CON29 */
+ /* 15 - 13 unused */
+ GATE(PCLK_VIO_GRF, "pclk_vio_grf", "pclk_vio", 29, 12),
+ GATE(PCLK_GASKET, "pclk_gasket", "pclk_hdcp", 29, 11),
+ GATE(ACLK_HDCP22, "aclk_hdcp22", "aclk_hdcp", 29, 10),
+ GATE(HCLK_HDCP22, "hclk_hdcp22", "hclk_hdcp", 29, 9),
+ GATE(PCLK_HDCP22, "pclk_hdcp22", "pclk_hdcp", 29, 8),
+ GATE(PCLK_DP_CTRL, "pclk_dp_ctrl", "pclk_hdcp", 29, 7),
+ GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "pclk_hdcp", 29, 6),
+ GATE(HCLK_HDCP_NOC, "hclk_hdcp_noc", "hclk_hdcp", 29, 5),
+ GATE(ACLK_HDCP_NOC, "aclk_hdcp_noc", "aclk_hdcp", 29, 4),
+ GATE(PCLK_HDCP_NOC, "pclk_hdcp_noc", "pclk_hdcp", 29, 3),
+ GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "pclk_vio", 29, 2),
+ GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "pclk_vio", 29, 1),
+ GATE(ACLK_VIO_NOC, "aclk_vio_noc", "aclk_vio", 29, 0),
+
+ /* CRU_CLKGATE_CON30 */
+ /* 15 - 12 unused */
+ GATE(ACLK_GPU_GRF, "aclk_gpu_grf", "aclk_gpu_pre", 30, 11),
+ GATE(ACLK_PERF_GPU, "aclk_perf_gpu", "aclk_gpu_pre", 30, 10),
+ /* 9 unused */
+ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 30, 8),
+ /* 7 - 5 unused */
+ GATE(ACLK_USB3_GRF, "aclk_usb3_grf", "aclk_usb3", 30, 4),
+ GATE(ACLK_USB3_RKSOC_AXI_PERF, "aclk_usb3_rksoc_axi_perf", "aclk_usb3", 30, 3),
+ GATE(ACLK_USB3OTG1, "aclk_usb3otg1", "aclk_usb3", 30, 2),
+ GATE(ACLK_USB3OTG0, "aclk_usb3otg0", "aclk_usb3", 30, 1),
+ GATE(ACLK_USB3_NOC, "aclk_usb3_noc", "aclk_usb3", 30, 0),
+
+ /* CRU_CLKGATE_CON31 */
+ /* 15 - 11 unused */
+ GATE(PCLK_SGRF, "pclk_sgrf", "pclk_alive", 31, 10),
+ GATE(PCLK_PMU_INTR_ARB, "pclk_pmu_intr_arb", "pclk_alive", 31, 9),
+ GATE(PCLK_HSICPHY, "pclk_hsicphy", "pclk_perihp", 31, 8),
+ GATE(PCLK_TIMER1, "pclk_timer1", "pclk_alive", 31, 7),
+ GATE(PCLK_TIMER0, "pclk_timer0", "pclk_alive", 31, 6),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_alive", 31, 5),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_alive", 31, 4),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_alive", 31, 3),
+ GATE(PCLK_INTR_ARB, "pclk_intr_arb", "pclk_alive", 31, 2),
+ GATE(PCLK_GRF, "pclk_grf", "pclk_alive", 31, 1),
+ /* 0 unused */
+
+ /* CRU_CLKGATE_CON32 */
+ /* 15 - 14 unused */
+ GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "pclk_edp", 32, 13),
+ GATE(PCLK_EDP_NOC, "pclk_edp_noc", "pclk_edp", 32, 12),
+ /* 11 unused */
+ GATE(ACLK_EMMC_GRF, "aclk_emmcgrf", "aclk_emmc", 32, 10),
+ GATE(ACLK_EMMC_NOC, "aclk_emmc_noc", "aclk_emmc", 32, 9),
+ GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", 32, 8),
+ /* 7 - 5 unused */
+ GATE(ACLK_PERF_GMAC, "aclk_perf_gmac", "aclk_gmac_pre", 32, 4),
+ GATE(PCLK_GMAC_NOC, "pclk_gmac_noc", "pclk_gmac_pre", 32, 3),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_gmac_pre", 32, 2),
+ GATE(ACLK_GMAC_NOC, "aclk_gmac_noc", "aclk_gmac_pre", 32, 1),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_pre", 32, 0),
+
+ /* CRU_CLKGATE_CON33 */
+ /* 15 - 10 unused */
+ GATE(0, "hclk_sdmmc_noc", "hclk_sd", 33, 9),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_sd", 33, 8),
+ GATE(ACLK_GIC_ADB400_GIC_2_CORE_B, "aclk_gic_adb400_gic_2_core_b", "aclk_gic_pre", 33, 5),
+ GATE(ACLK_GIC_ADB400_GIC_2_CORE_L, "aclk_gic_adb400_gic_2_core_l", "aclk_gic_pre", 33, 4),
+ GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_gic_adb400_core_b_2_gic", "aclk_gic_pre", 33, 3),
+ GATE(ACLK_GIC_ADB400_CORE_L_2_GIC, "aclk_gic_adb400_core_l_2_gic", "aclk_gic_pre", 33, 2),
+ GATE(ACLK_GIC_NOC, "aclk_gic_noc", "aclk_gic_pre", 33, 1),
+ GATE(ACLK_GIC, "aclk_gic", "aclk_gic_pre", 33, 0),
+
+ /* CRU_CLKGATE_CON34 */
+ /* 15 - 7 unused */
+ GATE(0, "hclk_sdioaudio_noc", "hclk_perilp1", 34, 6),
+ GATE(PCLK_SPI5, "pclk_spi5", "hclk_perilp1", 34, 5),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_perilp1", 34, 4),
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_perilp1", 34, 3),
+ GATE(HCLK_I2S2_8CH, "hclk_i2s2", "hclk_perilp1", 34, 2),
+ GATE(HCLK_I2S1_8CH, "hclk_i2s1", "hclk_perilp1", 34, 1),
+ GATE(HCLK_I2S0_8CH, "hclk_i2s0", "hclk_perilp1", 34, 0),
+};
+
+#define PLL_RATE(_hz, _ref, _fb, _post1, _post2, _dspd) \
+{ \
+ .freq = _hz, \
+ .refdiv = _ref, \
+ .fbdiv = _fb, \
+ .postdiv1 = _post1, \
+ .postdiv2 = _post2, \
+ .dsmpd = _dspd, \
+}
+
+static struct rk_clk_pll_rate rk3399_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ PLL_RATE(2208000000, 1, 92, 1, 1, 1),
+ PLL_RATE(2184000000, 1, 91, 1, 1, 1),
+ PLL_RATE(2160000000, 1, 90, 1, 1, 1),
+ PLL_RATE(2136000000, 1, 89, 1, 1, 1),
+ PLL_RATE(2112000000, 1, 88, 1, 1, 1),
+ PLL_RATE(2088000000, 1, 87, 1, 1, 1),
+ PLL_RATE(2064000000, 1, 86, 1, 1, 1),
+ PLL_RATE(2040000000, 1, 85, 1, 1, 1),
+ PLL_RATE(2016000000, 1, 84, 1, 1, 1),
+ PLL_RATE(1992000000, 1, 83, 1, 1, 1),
+ PLL_RATE(1968000000, 1, 82, 1, 1, 1),
+ PLL_RATE(1944000000, 1, 81, 1, 1, 1),
+ PLL_RATE(1920000000, 1, 80, 1, 1, 1),
+ PLL_RATE(1896000000, 1, 79, 1, 1, 1),
+ PLL_RATE(1872000000, 1, 78, 1, 1, 1),
+ PLL_RATE(1848000000, 1, 77, 1, 1, 1),
+ PLL_RATE(1824000000, 1, 76, 1, 1, 1),
+ PLL_RATE(1800000000, 1, 75, 1, 1, 1),
+ PLL_RATE(1776000000, 1, 74, 1, 1, 1),
+ PLL_RATE(1752000000, 1, 73, 1, 1, 1),
+ PLL_RATE(1728000000, 1, 72, 1, 1, 1),
+ PLL_RATE(1704000000, 1, 71, 1, 1, 1),
+ PLL_RATE(1680000000, 1, 70, 1, 1, 1),
+ PLL_RATE(1656000000, 1, 69, 1, 1, 1),
+ PLL_RATE(1632000000, 1, 68, 1, 1, 1),
+ PLL_RATE(1608000000, 1, 67, 1, 1, 1),
+ PLL_RATE(1600000000, 3, 200, 1, 1, 1),
+ PLL_RATE(1584000000, 1, 66, 1, 1, 1),
+ PLL_RATE(1560000000, 1, 65, 1, 1, 1),
+ PLL_RATE(1536000000, 1, 64, 1, 1, 1),
+ PLL_RATE(1512000000, 1, 63, 1, 1, 1),
+ PLL_RATE(1488000000, 1, 62, 1, 1, 1),
+ PLL_RATE(1464000000, 1, 61, 1, 1, 1),
+ PLL_RATE(1440000000, 1, 60, 1, 1, 1),
+ PLL_RATE(1416000000, 1, 59, 1, 1, 1),
+ PLL_RATE(1392000000, 1, 58, 1, 1, 1),
+ PLL_RATE(1368000000, 1, 57, 1, 1, 1),
+ PLL_RATE(1344000000, 1, 56, 1, 1, 1),
+ PLL_RATE(1320000000, 1, 55, 1, 1, 1),
+ PLL_RATE(1296000000, 1, 54, 1, 1, 1),
+ PLL_RATE(1272000000, 1, 53, 1, 1, 1),
+ PLL_RATE(1248000000, 1, 52, 1, 1, 1),
+ PLL_RATE(1200000000, 1, 50, 1, 1, 1),
+ PLL_RATE(1188000000, 2, 99, 1, 1, 1),
+ PLL_RATE(1104000000, 1, 46, 1, 1, 1),
+ PLL_RATE(1100000000, 12, 550, 1, 1, 1),
+ PLL_RATE(1008000000, 1, 84, 2, 1, 1),
+ PLL_RATE(1000000000, 1, 125, 3, 1, 1),
+ PLL_RATE( 984000000, 1, 82, 2, 1, 1),
+ PLL_RATE( 960000000, 1, 80, 2, 1, 1),
+ PLL_RATE( 936000000, 1, 78, 2, 1, 1),
+ PLL_RATE( 912000000, 1, 76, 2, 1, 1),
+ PLL_RATE( 900000000, 4, 300, 2, 1, 1),
+ PLL_RATE( 888000000, 1, 74, 2, 1, 1),
+ PLL_RATE( 864000000, 1, 72, 2, 1, 1),
+ PLL_RATE( 840000000, 1, 70, 2, 1, 1),
+ PLL_RATE( 816000000, 1, 68, 2, 1, 1),
+ PLL_RATE( 800000000, 1, 100, 3, 1, 1),
+ PLL_RATE( 700000000, 6, 350, 2, 1, 1),
+ PLL_RATE( 696000000, 1, 58, 2, 1, 1),
+ PLL_RATE( 676000000, 3, 169, 2, 1, 1),
+ PLL_RATE( 600000000, 1, 75, 3, 1, 1),
+ PLL_RATE( 594000000, 1, 99, 4, 1, 1),
+ PLL_RATE( 533250000, 8, 711, 4, 1, 1),
+ PLL_RATE( 504000000, 1, 63, 3, 1, 1),
+ PLL_RATE( 500000000, 6, 250, 2, 1, 1),
+ PLL_RATE( 408000000, 1, 68, 2, 2, 1),
+ PLL_RATE( 312000000, 1, 52, 2, 2, 1),
+ PLL_RATE( 297000000, 1, 99, 4, 2, 1),
+ PLL_RATE( 216000000, 1, 72, 4, 2, 1),
+ PLL_RATE( 148500000, 1, 99, 4, 4, 1),
+ PLL_RATE( 106500000, 1, 71, 4, 4, 1),
+ PLL_RATE( 96000000, 1, 64, 4, 4, 1),
+ PLL_RATE( 74250000, 2, 99, 4, 4, 1),
+ PLL_RATE( 65000000, 1, 65, 6, 4, 1),
+ PLL_RATE( 54000000, 1, 54, 6, 4, 1),
+ PLL_RATE( 27000000, 1, 27, 6, 4, 1),
+ {},
+};
+
+static struct rk_clk_armclk_rates rk3399_cpu_l_rates[] = {
+ {1800000000, 1},
+ {1704000000, 1},
+ {1608000000, 1},
+ {1512000000, 1},
+ {1488000000, 1},
+ {1416000000, 1},
+ {1200000000, 1},
+ {1008000000, 1},
+ { 816000000, 1},
+ { 696000000, 1},
+ { 600000000, 1},
+ { 408000000, 1},
+ { 312000000, 1},
+ { 216000000, 1},
+ { 96000000, 1},
+};
+
+static struct rk_clk_armclk_rates rk3399_cpu_b_rates[] = {
+ {2208000000, 1},
+ {2184000000, 1},
+ {2088000000, 1},
+ {2040000000, 1},
+ {2016000000, 1},
+ {1992000000, 1},
+ {1896000000, 1},
+ {1800000000, 1},
+ {1704000000, 1},
+ {1608000000, 1},
+ {1512000000, 1},
+ {1488000000, 1},
+ {1416000000, 1},
+ {1200000000, 1},
+ {1008000000, 1},
+ { 816000000, 1},
+ { 696000000, 1},
+ { 600000000, 1},
+ { 408000000, 1},
+ { 312000000, 1},
+ { 216000000, 1},
+ { 96000000, 1},
+};
+
+/* Standard PLL. */
+#define PLL(_id, _name, _base) \
+{ \
+ .type = RK3399_CLK_PLL, \
+ .clk.pll = &(struct rk_clk_pll_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = pll_src_p, \
+ .clkdef.parent_cnt = nitems(pll_src_p), \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .base_offset = _base, \
+ .rates = rk3399_pll_rates, \
+ }, \
+}
+
+#define PLIST(_name) static const char *_name[]
+PLIST(pll_src_p) = {"xin24m", "xin32k"};
+
+PLIST(armclkl_p) = {"clk_core_l_lpll_src", "clk_core_l_bpll_src",
+ "clk_core_l_dpll_src", "clk_core_l_gpll_src"};
+PLIST(armclkb_p) = {"clk_core_b_lpll_src", "clk_core_b_bpll_src",
+ "clk_core_b_dpll_src", "clk_core_b_gpll_src"};
+PLIST(ddrclk_p) = {"clk_ddrc_lpll_src", "clk_ddrc_bpll_src",
+ "clk_ddrc_dpll_src", "clk_ddrc_gpll_src"};
+PLIST(pll_src_cpll_gpll_p) = {"cpll", "gpll"};
+PLIST(pll_src_cpll_gpll_ppll_p) = {"cpll", "gpll", "ppll"};
+PLIST(pll_src_cpll_gpll_upll_p) = {"cpll", "gpll", "upll"};
+PLIST(pll_src_npll_cpll_gpll_p) = {"npll", "cpll", "gpll"};
+PLIST(pll_src_cpll_gpll_npll_npll_p) = {"cpll", "gpll", "npll", "npll"};
+PLIST(pll_src_cpll_gpll_npll_ppll_p) = {"cpll", "gpll", "npll", "ppll" };
+PLIST(pll_src_cpll_gpll_npll_24m_p) = {"cpll", "gpll", "npll", "xin24m" };
+PLIST(pll_src_cpll_gpll_npll_usbphy480m_p)= {"cpll", "gpll", "npll", "clk_usbphy_480m" };
+PLIST(pll_src_ppll_cpll_gpll_npll_upll_p) = { "ppll", "cpll", "gpll", "npll", "upll" };
+PLIST(pll_src_cpll_gpll_npll_upll_24m_p)= { "cpll", "gpll", "npll", "upll", "xin24m" };
+PLIST(pll_src_cpll_gpll_npll_ppll_upll_24m_p) = { "cpll", "gpll", "npll", "ppll", "upll", "xin24m" };
+PLIST(pll_src_vpll_cpll_gpll_gpll_p) = {"vpll", "cpll", "gpll", "gpll"};
+PLIST(pll_src_vpll_cpll_gpll_npll_p) = {"vpll", "cpll", "gpll", "npll"};
+
+PLIST(aclk_cci_p) = {"cpll_aclk_cci_src", "gpll_aclk_cci_src",
+ "npll_aclk_cci_src", "vpll_aclk_cci_src"};
+PLIST(cci_trace_p) = {"cpll_cci_trace","gpll_cci_trace"};
+PLIST(cs_p)= {"cpll_cs", "gpll_cs", "npll_cs","npll_cs"};
+PLIST(aclk_perihp_p)= {"cpll_aclk_perihp_src", "gpll_aclk_perihp_src" };
+PLIST(dclk_vop0_p) = {"dclk_vop0_div", "dclk_vop0_frac"};
+PLIST(dclk_vop1_p)= {"dclk_vop1_div", "dclk_vop1_frac"};
+
+PLIST(clk_cif_p) = {"clk_cifout_src", "xin24m"};
+
+PLIST(pll_src_24m_usbphy480m_p) = { "xin24m", "clk_usbphy_480m"};
+PLIST(pll_src_24m_pciephy_p) = { "xin24m", "clk_pciephy_ref100m"};
+PLIST(pll_src_24m_32k_cpll_gpll_p)= {"xin24m", "xin32k", "cpll", "gpll"};
+PLIST(pciecore_cru_phy_p) = {"clk_pcie_core_cru", "clk_pcie_core_phy"};
+
+PLIST(aclk_emmc_p) = { "cpll_aclk_emmc_src", "gpll_aclk_emmc_src"};
+
+PLIST(aclk_perilp0_p) = { "cpll_aclk_perilp0_src",
+ "gpll_aclk_perilp0_src" };
+
+PLIST(fclk_cm0s_p) = { "cpll_fclk_cm0s_src",
+ "gpll_fclk_cm0s_src" };
+
+PLIST(hclk_perilp1_p) = { "cpll_hclk_perilp1_src",
+ "gpll_hclk_perilp1_src" };
+
+PLIST(clk_testout1_p) = { "clk_testout1_pll_src", "xin24m" };
+PLIST(clk_testout2_p) = { "clk_testout2_pll_src", "xin24m" };
+
+PLIST(usbphy_480m_p) = { "clk_usbphy0_480m_src",
+ "clk_usbphy1_480m_src" };
+PLIST(aclk_gmac_p) = { "cpll_aclk_gmac_src",
+ "gpll_aclk_gmac_src" };
+PLIST(rmii_p) = { "clk_gmac", "clkin_gmac" };
+PLIST(spdif_p) = { "clk_spdif_div", "clk_spdif_frac",
+ "clkin_i2s", "xin12m" };
+PLIST(i2s0_p) = { "clk_i2s0_div", "clk_i2s0_frac",
+ "clkin_i2s", "xin12m" };
+PLIST(i2s1_p) = { "clk_i2s1_div", "clk_i2s1_frac",
+ "clkin_i2s", "xin12m" };
+PLIST(i2s2_p) = { "clk_i2s2_div", "clk_i2s2_frac",
+ "clkin_i2s", "xin12m" };
+PLIST(i2sch_p) = {"clk_i2s0", "clk_i2s1", "clk_i2s2"};
+PLIST(i2sout_p) = {"clk_i2sout_src", "xin12m"};
+
+PLIST(uart0_p)= {"clk_uart0_div", "clk_uart0_frac", "xin24m"};
+PLIST(uart1_p)= {"clk_uart1_div", "clk_uart1_frac", "xin24m"};
+PLIST(uart2_p)= {"clk_uart2_div", "clk_uart2_frac", "xin24m"};
+PLIST(uart3_p)= {"clk_uart3_div", "clk_uart3_frac", "xin24m"};
+
+static struct rk_clk rk3399_clks[] = {
+ /* External clocks */
+ LINK("xin24m"),
+ FRATE(0, "xin32k", 32768),
+ FFACT(0, "xin12m", "xin24m", 1, 2),
+ FRATE(0, "clkin_i2s", 0),
+ FRATE(0, "pclkin_cif", 0),
+ LINK("clk_usbphy0_480m"),
+ LINK("clk_usbphy1_480m"),
+ LINK("clkin_gmac"),
+ FRATE(0, "clk_pcie_core_phy", 0),
+ FFACT(0, "clk_ddrc_div2", "clk_ddrc", 1, 2),
+
+ /* PLLs */
+ PLL(PLL_APLLL, "lpll", 0x00),
+ PLL(PLL_APLLB, "bpll", 0x20),
+ PLL(PLL_DPLL, "dpll", 0x40),
+ PLL(PLL_CPLL, "cpll", 0x60),
+ PLL(PLL_GPLL, "gpll", 0x80),
+ PLL(PLL_NPLL, "npll", 0xA0),
+ PLL(PLL_VPLL, "vpll", 0xC0),
+
+ /* CRU_CLKSEL_CON0 */
+ CDIV(0, "aclkm_core_l_c", "armclkl", 0,
+ 0, 8, 5),
+ ARMDIV(ARMCLKL, "armclkl", armclkl_p, rk3399_cpu_l_rates,
+ 0, 0, 5, 6, 2, 0, 3),
+ /* CRU_CLKSEL_CON1 */
+ CDIV(0, "pclk_dbg_core_l_c", "armclkl", 0,
+ 1, 8, 5),
+ CDIV(0, "atclk_core_l_c", "armclkl", 0,
+ 1, 0, 5),
+
+ /* CRU_CLKSEL_CON2 */
+ CDIV(0, "aclkm_core_b_c", "armclkb", 0,
+ 2, 8, 5),
+ ARMDIV(ARMCLKB, "armclkb", armclkb_p, rk3399_cpu_b_rates,
+ 2, 0, 5, 6, 2, 1, 3),
+
+ /* CRU_CLKSEL_CON3 */
+ CDIV(0, "pclken_dbg_core_b", "pclk_dbg_core_b", 0,
+ 3, 13, 2),
+ CDIV(0, "pclk_dbg_core_b_c", "armclkb", 0,
+ 3, 8, 5),
+ CDIV(0, "atclk_core_b_c", "armclkb", 0,
+ 3, 0, 5),
+
+ /* CRU_CLKSEL_CON4 */
+ COMP(0, "clk_cs", cs_p, 0,
+ 4, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON5 */
+ COMP(0, "clk_cci_trace_c", cci_trace_p, 0,
+ 5, 8, 5, 15, 1),
+ COMP(0, "aclk_cci_pre_c", aclk_cci_p, 0,
+ 5, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON6 */
+ COMP(0, "pclk_ddr_c", pll_src_cpll_gpll_p, 0,
+ 6, 8, 5, 15, 1),
+ COMP(SCLK_DDRC, "clk_ddrc", ddrclk_p, 0,
+ 6, 0, 3, 4, 2),
+
+ /* CRU_CLKSEL_CON7 */
+ CDIV(0, "hclk_vcodec_pre_c", "aclk_vcodec_pre", 0,
+ 7, 8, 5),
+ COMP(0, "aclk_vcodec_pre_c", pll_src_cpll_gpll_npll_ppll_p, 0,
+ 7, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON8 */
+ CDIV(0, "hclk_vdu_pre_c", "aclk_vdu_pre", 0,
+ 8, 8, 5),
+ COMP(0, "aclk_vdu_pre_c", pll_src_cpll_gpll_npll_ppll_p, 0,
+ 8, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON9 */
+ COMP(0, "clk_vdu_ca_c", pll_src_cpll_gpll_npll_npll_p, 0,
+ 9, 8, 5, 14, 2),
+ COMP(0, "clk_vdu_core_c", pll_src_cpll_gpll_npll_npll_p, 0,
+ 9, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON10 */
+ CDIV(0, "hclk_iep_pre_c", "aclk_iep_pre", 0,
+ 10, 8, 5),
+ COMP(0, "aclk_iep_pre_c", pll_src_cpll_gpll_npll_ppll_p, 0,
+ 10, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON11 */
+ CDIV(0, "hclk_rga_pre_c", "aclk_rga_pre", 0,
+ 11, 8, 5),
+ COMP(0, "aclk_rga_pre_c", pll_src_cpll_gpll_npll_ppll_p, 0,
+ 11, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON12 */
+ COMP(0, "aclk_center_c", pll_src_cpll_gpll_npll_npll_p, 0,
+ 12, 8, 5, 14, 2),
+ COMP(SCLK_RGA_CORE, "clk_rga_core_c", pll_src_cpll_gpll_npll_ppll_p, 0,
+ 12, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON13 */
+ COMP(0, "hclk_sd_c", pll_src_cpll_gpll_p, 0,
+ 13, 8, 5, 15, 1),
+ COMP(0, "aclk_gpu_pre_c", pll_src_ppll_cpll_gpll_npll_upll_p, 0,
+ 13, 0, 5, 5, 3),
+
+ /* CRU_CLKSEL_CON14 */
+ MUX(0, "upll", pll_src_24m_usbphy480m_p, 0,
+ 14, 15, 1),
+ CDIV(0, "pclk_perihp_c", "aclk_perihp", 0,
+ 14, 12, 2),
+ CDIV(0, "hclk_perihp_c", "aclk_perihp", 0,
+ 14, 8, 2),
+ MUX(0, "clk_usbphy_480m", usbphy_480m_p, 0,
+ 14, 6, 1),
+ COMP(0, "aclk_perihp_c", aclk_perihp_p, 0,
+ 14, 0, 5, 7, 1),
+
+ /* CRU_CLKSEL_CON15 */
+ COMP(0, "clk_sdio_c", pll_src_cpll_gpll_npll_ppll_upll_24m_p, 0,
+ 15, 0, 7, 8, 3),
+
+ /* CRU_CLKSEL_CON16 */
+ COMP(0, "clk_sdmmc_c", pll_src_cpll_gpll_npll_ppll_upll_24m_p, 0,
+ 16, 0, 7, 8, 3),
+
+ /* CRU_CLKSEL_CON17 */
+ COMP(0, "clk_pcie_pm_c", pll_src_cpll_gpll_npll_24m_p, 0,
+ 17, 0, 7, 8, 3),
+
+ /* CRU_CLKSEL_CON18 */
+ CDIV(0, "clk_pciephy_ref100m_c", "npll", 0,
+ 18, 11, 5),
+ MUX(SCLK_PCIEPHY_REF, "clk_pciephy_ref", pll_src_24m_pciephy_p, 0,
+ 18, 10, 1),
+ MUX(SCLK_PCIE_CORE, "clk_pcie_core", pciecore_cru_phy_p, 0,
+ 18, 7, 1),
+ COMP(0, "clk_pcie_core_cru_c", pll_src_cpll_gpll_npll_npll_p, 0,
+ 18, 0, 7, 8, 2),
+
+ /* CRU_CLKSEL_CON19 */
+ CDIV(0, "pclk_gmac_pre_c", "aclk_gmac_pre", 0,
+ 19, 8, 3),
+ MUX(SCLK_RMII_SRC, "clk_rmii_src",rmii_p, 0,
+ 19, 4, 1),
+ MUX(SCLK_HSICPHY, "clk_hsicphy_c", pll_src_cpll_gpll_npll_usbphy480m_p, 0,
+ 19, 0, 2),
+
+ /* CRU_CLKSEL_CON20 */
+ COMP(0, "clk_gmac_c", pll_src_cpll_gpll_npll_npll_p, 0,
+ 20, 8, 5, 14, 2),
+ COMP(0, "aclk_gmac_pre_c", aclk_gmac_p, 0,
+ 20, 0, 5, 7, 1),
+
+ /* CRU_CLKSEL_CON21 */
+ COMP(ACLK_EMMC, "aclk_emmc", aclk_emmc_p, 0,
+ 21, 0, 5, 7, 1),
+
+ /* CRU_CLKSEL_CON22 */
+ COMP(0, "clk_emmc_c", pll_src_cpll_gpll_npll_upll_24m_p, 0,
+ 22, 0, 7, 8, 3),
+
+ /* CRU_CLKSEL_CON23 */
+ CDIV(0, "pclk_perilp0_c", "aclk_perilp0", 0,
+ 23, 12, 3),
+ CDIV(0, "hclk_perilp0_c", "aclk_perilp0", 0,
+ 23, 8, 2),
+ COMP(0, "aclk_perilp0_c", aclk_perilp0_p, 0,
+ 23, 0, 5, 7, 1),
+
+ /* CRU_CLKSEL_CON24 */
+ COMP(0, "fclk_cm0s_c", fclk_cm0s_p, 0,
+ 24, 8, 5, 15, 1),
+ COMP(0, "clk_crypto0_c", pll_src_cpll_gpll_ppll_p, 0,
+ 24, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON25 */
+ CDIV(0, "pclk_perilp1_c", "hclk_perilp1", 0,
+ 25, 8, 3),
+ COMP(HCLK_PERILP1, "hclk_perilp1", hclk_perilp1_p, 0,
+ 25, 0, 5, 7, 1),
+
+ /* CRU_CLKSEL_CON26 */
+ CDIV(0, "clk_saradc_c", "xin24m", 0,
+ 26, 8, 8),
+ COMP(0, "clk_crypto1_c", pll_src_cpll_gpll_ppll_p, 0,
+ 26, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON27 */
+ COMP(0, "clk_tsadc_c", pll_src_p, 0,
+ 27, 0, 10, 15, 1),
+
+ /* CRU_CLKSEL_CON28 */
+ MUX(0, "clk_i2s0_mux", i2s0_p, 0,
+ 28, 8, 2),
+ COMP(0, "clk_i2s0_div_c", pll_src_cpll_gpll_p, 0,
+ 28, 0, 7, 7, 1),
+
+ /* CRU_CLKSEL_CON29 */
+ MUX(0, "clk_i2s1_mux", i2s1_p, 0,
+ 29, 8, 2),
+ COMP(0, "clk_i2s1_div_c", pll_src_cpll_gpll_p, 0,
+ 29, 0, 7, 7, 1),
+
+ /* CRU_CLKSEL_CON30 */
+ MUX(0, "clk_i2s2_mux", i2s2_p, 0,
+ 30, 8, 2),
+ COMP(0, "clk_i2s2_div_c", pll_src_cpll_gpll_p, 0,
+ 30, 0, 7, 7, 1),
+
+ /* CRU_CLKSEL_CON31 */
+ MUX(0, "clk_i2sout_c", i2sout_p, 0,
+ 31, 2, 1),
+ MUX(0, "clk_i2sout_src", i2sch_p, 0,
+ 31, 0, 2),
+
+ /* CRU_CLKSEL_CON32 */
+ COMP(0, "clk_spdif_rec_dptx_c", pll_src_cpll_gpll_p, 0,
+ 32, 8, 5, 15, 1),
+ MUX(0, "clk_spdif_mux", spdif_p, 0,
+ 32, 13, 2),
+ COMP(0, "clk_spdif_div_c", pll_src_cpll_gpll_p, 0,
+ 32, 0, 7, 7, 1),
+
+ /* CRU_CLKSEL_CON33 */
+ MUX(0, "clk_uart_src", pll_src_cpll_gpll_p, 0,
+ 33, 15, 1),
+ MUX(0, "clk_uart0_src", pll_src_cpll_gpll_upll_p, 0,
+ 33, 12, 2),
+ MUX(SCLK_UART0, "clk_uart0", uart0_p, 0,
+ 33, 8, 2),
+ CDIV(0, "clk_uart0_div_c", "clk_uart0_src", 0,
+ 33, 0, 7),
+
+ /* CRU_CLKSEL_CON34 */
+ MUX(SCLK_UART1, "clk_uart1", uart1_p, 0,
+ 34, 8, 2),
+ CDIV(0, "clk_uart1_div_c", "clk_uart_src", 0,
+ 34, 0, 7),
+
+ /* CRU_CLKSEL_CON35 */
+ MUX(SCLK_UART2, "clk_uart2", uart2_p, 0,
+ 35, 8, 2),
+ CDIV(0, "clk_uart2_div_c", "clk_uart_src", 0,
+ 35, 0, 7),
+
+ /* CRU_CLKSEL_CON36 */
+ MUX(SCLK_UART3, "clk_uart3", uart3_p, 0,
+ 36, 8, 2),
+ CDIV(0, "clk_uart3_div_c", "clk_uart_src", 0,
+ 36, 0, 7),
+
+ /* CRU_CLKSEL_CON37 */
+ /* unused */
+
+ /* CRU_CLKSEL_CON38 */
+ MUX(0, "clk_testout2_pll_src", pll_src_cpll_gpll_npll_npll_p, 0,
+ 38, 14, 2),
+ COMP(0, "clk_testout2_c", clk_testout2_p, 0,
+ 38, 8, 5, 13, 1),
+ MUX(0, "clk_testout1_pll_src", pll_src_cpll_gpll_npll_npll_p, 0,
+ 38, 6, 2),
+ COMP(0, "clk_testout1_c", clk_testout1_p, 0,
+ 38, 0, 5, 5, 1),
+
+ /* CRU_CLKSEL_CON39 */
+ COMP(0, "aclk_usb3_c", pll_src_cpll_gpll_npll_npll_p, 0,
+ 39, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON40 */
+ COMP(0, "clk_usb3otg0_suspend_c", pll_src_p, 0,
+ 40, 0, 10, 15, 1),
+
+ /* CRU_CLKSEL_CON41 */
+ COMP(0, "clk_usb3otg1_suspend_c", pll_src_p, 0,
+ 41, 0, 10, 15, 1),
+
+ /* CRU_CLKSEL_CON42 */
+ COMP(0, "aclk_hdcp_c", pll_src_cpll_gpll_ppll_p, 0,
+ 42, 8, 5, 14, 2),
+ COMP(0, "aclk_vio_c", pll_src_cpll_gpll_ppll_p, 0,
+ 42, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON43 */
+ CDIV(0, "pclk_hdcp_c", "aclk_hdcp", 0,
+ 43, 10, 5),
+ CDIV(0, "hclk_hdcp_c", "aclk_hdcp", 0,
+ 43, 5, 5),
+ CDIV(0, "pclk_vio_c", "aclk_vio", 0,
+ 43, 0, 5),
+
+ /* CRU_CLKSEL_CON44 */
+ COMP(0, "pclk_edp_c", pll_src_cpll_gpll_p, 0,
+ 44, 8, 6, 15, 1),
+
+ /* CRU_CLKSEL_CON45 - XXX clocks in mux are reversed in TRM !!!*/
+ COMP(0, "clk_hdmi_cec_c", pll_src_p, 0,
+ 45, 0, 10, 15, 1),
+
+ /* CRU_CLKSEL_CON46 */
+ COMP(0, "clk_dp_core_c", pll_src_npll_cpll_gpll_p, 0,
+ 46, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON47 */
+ CDIV(0, "hclk_vop0_pre_c", "aclk_vop0_pre_c", 0,
+ 47, 8, 5),
+ COMP(0, "aclk_vop0_pre_c", pll_src_vpll_cpll_gpll_npll_p, 0,
+ 47, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON48 */
+ CDIV(0, "hclk_vop1_pre_c", "aclk_vop1_pre", 0,
+ 48, 8, 5),
+ COMP(0, "aclk_vop1_pre_c", pll_src_vpll_cpll_gpll_npll_p, 0,
+ 48, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON49 */
+ MUX(DCLK_VOP0, "dclk_vop0", dclk_vop0_p, 0,
+ 49, 11, 1),
+ COMP(0, "dclk_vop0_div_c", pll_src_vpll_cpll_gpll_gpll_p, 0,
+ 49, 0, 8, 8, 2),
+
+ /* CRU_CLKSEL_CON50 */
+ MUX(DCLK_VOP1, "dclk_vop1", dclk_vop1_p, 0,
+ 50, 11, 1),
+ COMP(0, "dclk_vop1_div_c", pll_src_vpll_cpll_gpll_gpll_p, 0,
+ 50, 0, 8, 8, 2),
+
+ /* CRU_CLKSEL_CON51 */
+ COMP(0, "clk_vop0_pwm_c", pll_src_vpll_cpll_gpll_gpll_p, 0,
+ 51, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON52 */
+ COMP(0, "clk_vop1_pwm_c", pll_src_vpll_cpll_gpll_gpll_p, 0,
+ 52, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON53 */
+ CDIV(0, "hclk_isp0_c", "aclk_isp0", 0,
+ 53, 8, 5),
+ COMP(0, "aclk_isp0_c", pll_src_cpll_gpll_ppll_p, 0,
+ 53, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON54 */
+ CDIV(0, "hclk_isp1_c", "aclk_isp1", 0,
+ 54, 8, 5),
+ COMP(0, "aclk_isp1_c", pll_src_cpll_gpll_ppll_p, 0,
+ 54, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON55 */
+ COMP(0, "clk_isp1_c", pll_src_cpll_gpll_npll_npll_p, 0,
+ 55, 8, 5, 14, 2),
+ COMP(0, "clk_isp0_c", pll_src_cpll_gpll_npll_npll_p, 0,
+ 55, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON56 */
+ COMP(0, "aclk_gic_pre_c", pll_src_cpll_gpll_p, 0,
+ 56, 8, 5, 15, 1),
+ MUX(0, "clk_cifout_src_c", pll_src_cpll_gpll_npll_npll_p, 0,
+ 56, 6, 2),
+ COMP(SCLK_CIF_OUT, "clk_cifout", clk_cif_p, 0,
+ 56, 0, 5, 5, 1),
+
+ /* CRU_CLKSEL_CON57 */
+ CDIV(0, "clk_test_24m", "xin24m", 0,
+ 57, 6, 10),
+ CDIV(PCLK_ALIVE, "pclk_alive", "gpll", 0,
+ 57, 0, 5),
+
+ /* CRU_CLKSEL_CON58 */
+ COMP(0, "clk_spi5_c", pll_src_cpll_gpll_p, 0,
+ 58, 8, 7, 15, 1),
+ MUX(0, "clk_test_pre", pll_src_cpll_gpll_p, 0,
+ 58, 7, 1),
+ CDIV(0, "clk_test_c", "clk_test_pre", 0,
+ 58, 0, 5),
+
+ /* CRU_CLKSEL_CON59 */
+ COMP(0, "clk_spi1_c", pll_src_cpll_gpll_p, 0,
+ 59, 8, 7, 15, 1),
+ COMP(0, "clk_spi0_c", pll_src_cpll_gpll_p, 0,
+ 59, 0, 7, 7, 1),
+
+ /* CRU_CLKSEL_CON60 */
+ COMP(0, "clk_spi4_c", pll_src_cpll_gpll_p, 0,
+ 60, 8, 7, 15, 1),
+ COMP(0, "clk_spi2_c", pll_src_cpll_gpll_p, 0,
+ 60, 0, 7, 7, 1),
+
+ /* CRU_CLKSEL_CON61 */
+ COMP(0, "clk_i2c5_c", pll_src_cpll_gpll_p, 0,
+ 61, 8, 7, 15, 1),
+ COMP(0, "clk_i2c1_c", pll_src_cpll_gpll_p, 0,
+ 61, 0, 7, 7, 1),
+
+ /* CRU_CLKSEL_CON62 */
+ COMP(0, "clk_i2c6_c", pll_src_cpll_gpll_p, 0,
+ 62, 8, 7, 15, 1),
+ COMP(0, "clk_i2c2_c", pll_src_cpll_gpll_p, 0,
+ 62, 0, 7, 7, 1),
+
+ /* CRU_CLKSEL_CON63 */
+ COMP(0, "clk_i2c7_c", pll_src_cpll_gpll_p, 0,
+ 63, 8, 7, 15, 1),
+ COMP(0, "clk_i2c3_c", pll_src_cpll_gpll_p, 0,
+ 63, 0, 7, 7, 1),
+
+ /* CRU_CLKSEL_CON64 */
+ COMP(0, "clk_uphy0_tcpdphy_ref_c", pll_src_p, 0,
+ 64, 8, 5, 15, 1),
+ COMP(0, "clk_uphy0_tcpdcore_c", pll_src_24m_32k_cpll_gpll_p, 0,
+ 64, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON65 */
+ COMP(0, "clk_uphy1_tcpdphy_ref_c", pll_src_p, 0,
+ 65, 8, 5, 15, 1),
+ COMP(0, "clk_uphy1_tcpdcore_c", pll_src_24m_32k_cpll_gpll_p, 0,
+ 65, 0, 5, 6, 2),
+
+ /* CRU_CLKSEL_CON99 - 107 */
+ FRACT(0, "clk_spdif_frac_c", "clk_spdif_div", 0,
+ 99),
+ FRACT(0, "clk_i2s0_frac_c", "clk_i2s0_div", 0,
+ 96),
+ FRACT(0, "clk_i2s1_frac_c", "clk_i2s1_div", 0,
+ 97),
+ FRACT(0, "clk_i2s2_frac_c", "clk_i2s2_div", 0,
+ 98),
+ FRACT(0, "clk_uart0_frac_c", "clk_uart0_div", 0,
+ 100),
+ FRACT(0, "clk_uart1_frac_c", "clk_uart1_div", 0,
+ 101),
+ FRACT(0, "clk_uart2_frac_c", "clk_uart2_div", 0,
+ 102),
+ FRACT(0, "clk_uart3_frac_c", "clk_uart3_div", 0,
+ 103),
+ FRACT(0, "clk_test_frac_c", "clk_test_pre", 0,
+ 105),
+ FRACT(DCLK_VOP0_FRAC, "dclk_vop0_frac", "dclk_vop0_div", 0,
+ 106),
+ FRACT(DCLK_VOP1_FRAC, "dclk_vop1_frac", "dclk_vop1_div", 0,
+ 107),
+
+/* Not yet implemented yet
+ * MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RK3399_SDMMC_CON0, 1),
+ * MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", RK3399_SDMMC_CON1, 1),
+ * MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RK3399_SDIO_CON0, 1),
+ * MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", RK3399_SDIO_CON1, 1),
+ */
+
+};
+
+static int
+rk3399_cru_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_is_compatible(dev, "rockchip,rk3399-cru")) {
+ device_set_desc(dev, "Rockchip RK3399 Clock and Reset Unit");
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+rk3399_cru_attach(device_t dev)
+{
+ struct rk_cru_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ sc->gates = rk3399_gates;
+ sc->ngates = nitems(rk3399_gates);
+
+ sc->clks = rk3399_clks;
+ sc->nclks = nitems(rk3399_clks);
+
+ sc->reset_offset = 0x400;
+ sc->reset_num = 335;
+
+ return (rk_cru_attach(dev));
+}
+
+static device_method_t rk3399_cru_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk3399_cru_probe),
+ DEVMETHOD(device_attach, rk3399_cru_attach),
+
+ DEVMETHOD_END
+};
+
+static devclass_t rk3399_cru_devclass;
+
+DEFINE_CLASS_1(rk3399_cru, rk3399_cru_driver, rk3399_cru_methods,
+ sizeof(struct rk_cru_softc), rk_cru_driver);
+
+EARLY_DRIVER_MODULE(rk3399_cru, simplebus, rk3399_cru_driver,
+ rk3399_cru_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/rockchip/clk/rk3399_cru_dt.h b/sys/arm64/rockchip/clk/rk3399_cru_dt.h
new file mode 100644
index 000000000000..f652f45129ca
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk3399_cru_dt.h
@@ -0,0 +1,320 @@
+#ifndef _RK3399_DT_H_
+#define _RK3399_DT_H_
+
+#define PLL_APLLL 1
+#define PLL_APLLB 2
+#define PLL_DPLL 3
+#define PLL_CPLL 4
+#define PLL_GPLL 5
+#define PLL_NPLL 6
+#define PLL_VPLL 7
+#define ARMCLKL 8
+#define ARMCLKB 9
+#define SCLK_I2C1 65
+#define SCLK_I2C2 66
+#define SCLK_I2C3 67
+#define SCLK_I2C5 68
+#define SCLK_I2C6 69
+#define SCLK_I2C7 70
+#define SCLK_SPI0 71
+#define SCLK_SPI1 72
+#define SCLK_SPI2 73
+#define SCLK_SPI4 74
+#define SCLK_SPI5 75
+#define SCLK_SDMMC 76
+#define SCLK_SDIO 77
+#define SCLK_EMMC 78
+#define SCLK_TSADC 79
+#define SCLK_SARADC 80
+#define SCLK_UART0 81
+#define SCLK_UART1 82
+#define SCLK_UART2 83
+#define SCLK_UART3 84
+#define SCLK_SPDIF_8CH 85
+#define SCLK_I2S0_8CH 86
+#define SCLK_I2S1_8CH 87
+#define SCLK_I2S2_8CH 88
+#define SCLK_I2S_8CH_OUT 89
+#define SCLK_TIMER00 90
+#define SCLK_TIMER01 91
+#define SCLK_TIMER02 92
+#define SCLK_TIMER03 93
+#define SCLK_TIMER04 94
+#define SCLK_TIMER05 95
+#define SCLK_TIMER06 96
+#define SCLK_TIMER07 97
+#define SCLK_TIMER08 98
+#define SCLK_TIMER09 99
+#define SCLK_TIMER10 100
+#define SCLK_TIMER11 101
+#define SCLK_MACREF 102
+#define SCLK_MAC_RX 103
+#define SCLK_MAC_TX 104
+#define SCLK_MAC 105
+#define SCLK_MACREF_OUT 106
+#define SCLK_VOP0_PWM 107
+#define SCLK_VOP1_PWM 108
+#define SCLK_RGA_CORE 109
+#define SCLK_ISP0 110
+#define SCLK_ISP1 111
+#define SCLK_HDMI_CEC 112
+#define SCLK_HDMI_SFR 113
+#define SCLK_DP_CORE 114
+#define SCLK_PVTM_CORE_L 115
+#define SCLK_PVTM_CORE_B 116
+#define SCLK_PVTM_GPU 117
+#define SCLK_PVTM_DDR 118
+#define SCLK_MIPIDPHY_REF 119
+#define SCLK_MIPIDPHY_CFG 120
+#define SCLK_HSICPHY 121
+#define SCLK_USBPHY480M 122
+#define SCLK_USB2PHY0_REF 123
+#define SCLK_USB2PHY1_REF 124
+#define SCLK_UPHY0_TCPDPHY_REF 125
+#define SCLK_UPHY0_TCPDCORE 126
+#define SCLK_UPHY1_TCPDPHY_REF 127
+#define SCLK_UPHY1_TCPDCORE 128
+#define SCLK_USB3OTG0_REF 129
+#define SCLK_USB3OTG1_REF 130
+#define SCLK_USB3OTG0_SUSPEND 131
+#define SCLK_USB3OTG1_SUSPEND 132
+#define SCLK_CRYPTO0 133
+#define SCLK_CRYPTO1 134
+#define SCLK_CCI_TRACE 135
+#define SCLK_CS 136
+#define SCLK_CIF_OUT 137
+#define SCLK_PCIEPHY_REF 138
+#define SCLK_PCIE_CORE 139
+#define SCLK_M0_PERILP 140
+#define SCLK_M0_PERILP_DEC 141
+#define SCLK_CM0S 142
+#define SCLK_DBG_NOC 143
+#define SCLK_DBG_PD_CORE_B 144
+#define SCLK_DBG_PD_CORE_L 145
+#define SCLK_DFIMON0_TIMER 146
+#define SCLK_DFIMON1_TIMER 147
+#define SCLK_INTMEM0 148
+#define SCLK_INTMEM1 149
+#define SCLK_INTMEM2 150
+#define SCLK_INTMEM3 151
+#define SCLK_INTMEM4 152
+#define SCLK_INTMEM5 153
+#define SCLK_SDMMC_DRV 154
+#define SCLK_SDMMC_SAMPLE 155
+#define SCLK_SDIO_DRV 156
+#define SCLK_SDIO_SAMPLE 157
+#define SCLK_VDU_CORE 158
+#define SCLK_VDU_CA 159
+#define SCLK_PCIE_PM 160
+#define SCLK_SPDIF_REC_DPTX 161
+#define SCLK_DPHY_PLL 162
+#define SCLK_DPHY_TX0_CFG 163
+#define SCLK_DPHY_TX1RX1_CFG 164
+#define SCLK_DPHY_RX0_CFG 165
+#define SCLK_RMII_SRC 166
+#define SCLK_PCIEPHY_REF100M 167
+#define SCLK_DDRC 168
+#define SCLK_TESTCLKOUT1 169
+#define SCLK_TESTCLKOUT2 170
+#define DCLK_VOP0 180
+#define DCLK_VOP1 181
+#define DCLK_VOP0_DIV 182
+#define DCLK_VOP1_DIV 183
+#define DCLK_M0_PERILP 184
+#define DCLK_VOP0_FRAC 185
+#define DCLK_VOP1_FRAC 186
+#define FCLK_CM0S 190
+#define ACLK_PERIHP 192
+#define ACLK_PERIHP_NOC 193
+#define ACLK_PERILP0 194
+#define ACLK_PERILP0_NOC 195
+#define ACLK_PERF_PCIE 196
+#define ACLK_PCIE 197
+#define ACLK_INTMEM 198
+#define ACLK_TZMA 199
+#define ACLK_DCF 200
+#define ACLK_CCI 201
+#define ACLK_CCI_NOC0 202
+#define ACLK_CCI_NOC1 203
+#define ACLK_CCI_GRF 204
+#define ACLK_CENTER 205
+#define ACLK_CENTER_MAIN_NOC 206
+#define ACLK_CENTER_PERI_NOC 207
+#define ACLK_GPU 208
+#define ACLK_PERF_GPU 209
+#define ACLK_GPU_GRF 210
+#define ACLK_DMAC0_PERILP 211
+#define ACLK_DMAC1_PERILP 212
+#define ACLK_GMAC 213
+#define ACLK_GMAC_NOC 214
+#define ACLK_PERF_GMAC 215
+#define ACLK_VOP0_NOC 216
+#define ACLK_VOP0 217
+#define ACLK_VOP1_NOC 218
+#define ACLK_VOP1 219
+#define ACLK_RGA 220
+#define ACLK_RGA_NOC 221
+#define ACLK_HDCP 222
+#define ACLK_HDCP_NOC 223
+#define ACLK_HDCP22 224
+#define ACLK_IEP 225
+#define ACLK_IEP_NOC 226
+#define ACLK_VIO 227
+#define ACLK_VIO_NOC 228
+#define ACLK_ISP0 229
+#define ACLK_ISP1 230
+#define ACLK_ISP0_NOC 231
+#define ACLK_ISP1_NOC 232
+#define ACLK_ISP0_WRAPPER 233
+#define ACLK_ISP1_WRAPPER 234
+#define ACLK_VCODEC 235
+#define ACLK_VCODEC_NOC 236
+#define ACLK_VDU 237
+#define ACLK_VDU_NOC 238
+#define ACLK_PERI 239
+#define ACLK_EMMC 240
+#define ACLK_EMMC_CORE 241
+#define ACLK_EMMC_NOC 242
+#define ACLK_EMMC_GRF 243
+#define ACLK_USB3 244
+#define ACLK_USB3_NOC 245
+#define ACLK_USB3OTG0 246
+#define ACLK_USB3OTG1 247
+#define ACLK_USB3_RKSOC_AXI_PERF 248
+#define ACLK_USB3_GRF 249
+#define ACLK_GIC 250
+#define ACLK_GIC_NOC 251
+#define ACLK_GIC_ADB400_CORE_L_2_GIC 252
+#define ACLK_GIC_ADB400_CORE_B_2_GIC 253
+#define ACLK_GIC_ADB400_GIC_2_CORE_L 254
+#define ACLK_GIC_ADB400_GIC_2_CORE_B 255
+#define ACLK_CORE_ADB400_CORE_L_2_CCI500 256
+#define ACLK_CORE_ADB400_CORE_B_2_CCI500 257
+#define ACLK_ADB400M_PD_CORE_L 258
+#define ACLK_ADB400M_PD_CORE_B 259
+#define ACLK_PERF_CORE_L 260
+#define ACLK_PERF_CORE_B 261
+#define ACLK_GIC_PRE 262
+#define ACLK_VOP0_PRE 263
+#define ACLK_VOP1_PRE 264
+#define PCLK_PERIHP 320
+#define PCLK_PERIHP_NOC 321
+#define PCLK_PERILP0 322
+#define PCLK_PERILP1 323
+#define PCLK_PERILP1_NOC 324
+#define PCLK_PERILP_SGRF 325
+#define PCLK_PERIHP_GRF 326
+#define PCLK_PCIE 327
+#define PCLK_SGRF 328
+#define PCLK_INTR_ARB 329
+#define PCLK_CENTER_MAIN_NOC 330
+#define PCLK_CIC 331
+#define PCLK_COREDBG_B 332
+#define PCLK_COREDBG_L 333
+#define PCLK_DBG_CXCS_PD_CORE_B 334
+#define PCLK_DCF 335
+#define PCLK_GPIO2 336
+#define PCLK_GPIO3 337
+#define PCLK_GPIO4 338
+#define PCLK_GRF 339
+#define PCLK_HSICPHY 340
+#define PCLK_I2C1 341
+#define PCLK_I2C2 342
+#define PCLK_I2C3 343
+#define PCLK_I2C5 344
+#define PCLK_I2C6 345
+#define PCLK_I2C7 346
+#define PCLK_SPI0 347
+#define PCLK_SPI1 348
+#define PCLK_SPI2 349
+#define PCLK_SPI4 350
+#define PCLK_SPI5 351
+#define PCLK_UART0 352
+#define PCLK_UART1 353
+#define PCLK_UART2 354
+#define PCLK_UART3 355
+#define PCLK_TSADC 356
+#define PCLK_SARADC 357
+#define PCLK_GMAC 358
+#define PCLK_GMAC_NOC 359
+#define PCLK_TIMER0 360
+#define PCLK_TIMER1 361
+#define PCLK_EDP 362
+#define PCLK_EDP_NOC 363
+#define PCLK_EDP_CTRL 364
+#define PCLK_VIO 365
+#define PCLK_VIO_NOC 366
+#define PCLK_VIO_GRF 367
+#define PCLK_MIPI_DSI0 368
+#define PCLK_MIPI_DSI1 369
+#define PCLK_HDCP 370
+#define PCLK_HDCP_NOC 371
+#define PCLK_HDMI_CTRL 372
+#define PCLK_DP_CTRL 373
+#define PCLK_HDCP22 374
+#define PCLK_GASKET 375
+#define PCLK_DDR 376
+#define PCLK_DDR_MON 377
+#define PCLK_DDR_SGRF 378
+#define PCLK_ISP1_WRAPPER 379
+#define PCLK_WDT 380
+#define PCLK_EFUSE1024NS 381
+#define PCLK_EFUSE1024S 382
+#define PCLK_PMU_INTR_ARB 383
+#define PCLK_MAILBOX0 384
+#define PCLK_USBPHY_MUX_G 385
+#define PCLK_UPHY0_TCPHY_G 386
+#define PCLK_UPHY0_TCPD_G 387
+#define PCLK_UPHY1_TCPHY_G 388
+#define PCLK_UPHY1_TCPD_G 389
+#define PCLK_ALIVE 390
+#define HCLK_PERIHP 448
+#define HCLK_PERILP0 449
+#define HCLK_PERILP1 450
+#define HCLK_PERILP0_NOC 451
+#define HCLK_PERILP1_NOC 452
+#define HCLK_M0_PERILP 453
+#define HCLK_M0_PERILP_NOC 454
+#define HCLK_AHB1TOM 455
+#define HCLK_HOST0 456
+#define HCLK_HOST0_ARB 457
+#define HCLK_HOST1 458
+#define HCLK_HOST1_ARB 459
+#define HCLK_HSIC 460
+#define HCLK_SD 461
+#define HCLK_SDMMC 462
+#define HCLK_SDMMC_NOC 463
+#define HCLK_M_CRYPTO0 464
+#define HCLK_M_CRYPTO1 465
+#define HCLK_S_CRYPTO0 466
+#define HCLK_S_CRYPTO1 467
+#define HCLK_I2S0_8CH 468
+#define HCLK_I2S1_8CH 469
+#define HCLK_I2S2_8CH 470
+#define HCLK_SPDIF 471
+#define HCLK_VOP0_NOC 472
+#define HCLK_VOP0 473
+#define HCLK_VOP1_NOC 474
+#define HCLK_VOP1 475
+#define HCLK_ROM 476
+#define HCLK_IEP 477
+#define HCLK_IEP_NOC 478
+#define HCLK_ISP0 479
+#define HCLK_ISP1 480
+#define HCLK_ISP0_NOC 481
+#define HCLK_ISP1_NOC 482
+#define HCLK_ISP0_WRAPPER 483
+#define HCLK_ISP1_WRAPPER 484
+#define HCLK_RGA 485
+#define HCLK_RGA_NOC 486
+#define HCLK_HDCP 487
+#define HCLK_HDCP_NOC 488
+#define HCLK_HDCP22 489
+#define HCLK_VCODEC 490
+#define HCLK_VCODEC_NOC 491
+#define HCLK_VDU 492
+#define HCLK_VDU_NOC 493
+#define HCLK_SDIO 494
+#define HCLK_SDIO_NOC 495
+#define HCLK_SDIOAUDIO_NOC 496
+#endif
diff --git a/sys/arm64/rockchip/clk/rk3399_pmucru.c b/sys/arm64/rockchip/clk/rk3399_pmucru.c
new file mode 100644
index 000000000000..3327117b98a1
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk3399_pmucru.c
@@ -0,0 +1,869 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ * Copyright (c) 2018 Greg V <greg@unrelenting.technology>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/clk/clk_div.h>
+#include <dev/extres/clk/clk_fixed.h>
+#include <dev/extres/clk/clk_mux.h>
+
+#include <arm64/rockchip/clk/rk_cru.h>
+
+/* GATES */
+
+#define PCLK_PMU 20
+#define PCLK_GPIO0_PMU 23
+#define PCLK_GPIO1_PMU 24
+#define PCLK_I2C0_PMU 27
+#define PCLK_I2C4_PMU 28
+#define PCLK_I2C8_PMU 29
+#define PCLK_RKPWM_PMU 30
+
+static struct rk_cru_gate rk3399_pmu_gates[] = {
+ /* PMUCRU_CLKGATE_CON1 */
+ CRU_GATE(PCLK_PMU, "pclk_pmu", "pclk_pmu_src", 0x104, 0)
+ CRU_GATE(PCLK_GPIO0_PMU, "pclk_gpio0_pmu", "pclk_pmu_src", 0x104, 3)
+ CRU_GATE(PCLK_GPIO1_PMU, "pclk_gpio1_pmu", "pclk_pmu_src", 0x104, 4)
+ CRU_GATE(PCLK_I2C0_PMU, "pclk_i2c0_pmu", "pclk_pmu_src", 0x104, 7)
+ CRU_GATE(PCLK_I2C4_PMU, "pclk_i2c4_pmu", "pclk_pmu_src", 0x104, 8)
+ CRU_GATE(PCLK_I2C8_PMU, "pclk_i2c8_pmu", "pclk_pmu_src", 0x104, 9)
+ CRU_GATE(PCLK_RKPWM_PMU, "pclk_rkpwm_pmu", "pclk_pmu_src", 0x104, 10)
+};
+
+/*
+ * PLLs
+ */
+
+#define PLL_PPLL 1
+
+static struct rk_clk_pll_rate rk3399_pll_rates[] = {
+ {
+ .freq = 2208000000,
+ .refdiv = 1,
+ .fbdiv = 92,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 2184000000,
+ .refdiv = 1,
+ .fbdiv = 91,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 2160000000,
+ .refdiv = 1,
+ .fbdiv = 90,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 2136000000,
+ .refdiv = 1,
+ .fbdiv = 89,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 2112000000,
+ .refdiv = 1,
+ .fbdiv = 88,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 2088000000,
+ .refdiv = 1,
+ .fbdiv = 87,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 2064000000,
+ .refdiv = 1,
+ .fbdiv = 86,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 2040000000,
+ .refdiv = 1,
+ .fbdiv = 85,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 2016000000,
+ .refdiv = 1,
+ .fbdiv = 84,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1992000000,
+ .refdiv = 1,
+ .fbdiv = 83,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1968000000,
+ .refdiv = 1,
+ .fbdiv = 82,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1944000000,
+ .refdiv = 1,
+ .fbdiv = 81,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1920000000,
+ .refdiv = 1,
+ .fbdiv = 80,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1896000000,
+ .refdiv = 1,
+ .fbdiv = 79,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1872000000,
+ .refdiv = 1,
+ .fbdiv = 78,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1848000000,
+ .refdiv = 1,
+ .fbdiv = 77,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1824000000,
+ .refdiv = 1,
+ .fbdiv = 76,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1800000000,
+ .refdiv = 1,
+ .fbdiv = 75,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1776000000,
+ .refdiv = 1,
+ .fbdiv = 74,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1752000000,
+ .refdiv = 1,
+ .fbdiv = 73,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1728000000,
+ .refdiv = 1,
+ .fbdiv = 72,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1704000000,
+ .refdiv = 1,
+ .fbdiv = 71,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1680000000,
+ .refdiv = 1,
+ .fbdiv = 70,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1656000000,
+ .refdiv = 1,
+ .fbdiv = 69,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1632000000,
+ .refdiv = 1,
+ .fbdiv = 68,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1608000000,
+ .refdiv = 1,
+ .fbdiv = 67,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1600000000,
+ .refdiv = 3,
+ .fbdiv = 200,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1584000000,
+ .refdiv = 1,
+ .fbdiv = 66,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1560000000,
+ .refdiv = 1,
+ .fbdiv = 65,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1536000000,
+ .refdiv = 1,
+ .fbdiv = 64,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1512000000,
+ .refdiv = 1,
+ .fbdiv = 63,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1488000000,
+ .refdiv = 1,
+ .fbdiv = 62,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1464000000,
+ .refdiv = 1,
+ .fbdiv = 61,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1440000000,
+ .refdiv = 1,
+ .fbdiv = 60,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1416000000,
+ .refdiv = 1,
+ .fbdiv = 59,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1392000000,
+ .refdiv = 1,
+ .fbdiv = 58,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1368000000,
+ .refdiv = 1,
+ .fbdiv = 57,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1344000000,
+ .refdiv = 1,
+ .fbdiv = 56,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1320000000,
+ .refdiv = 1,
+ .fbdiv = 55,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1296000000,
+ .refdiv = 1,
+ .fbdiv = 54,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1272000000,
+ .refdiv = 1,
+ .fbdiv = 53,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1248000000,
+ .refdiv = 1,
+ .fbdiv = 52,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1200000000,
+ .refdiv = 1,
+ .fbdiv = 50,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1188000000,
+ .refdiv = 2,
+ .fbdiv = 99,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1104000000,
+ .refdiv = 1,
+ .fbdiv = 46,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1100000000,
+ .refdiv = 12,
+ .fbdiv = 550,
+ .postdiv1 = 1,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1008000000,
+ .refdiv = 1,
+ .fbdiv = 84,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 1000000000,
+ .refdiv = 1,
+ .fbdiv = 125,
+ .postdiv1 = 3,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 984000000,
+ .refdiv = 1,
+ .fbdiv = 82,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 960000000,
+ .refdiv = 1,
+ .fbdiv = 80,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 936000000,
+ .refdiv = 1,
+ .fbdiv = 78,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 912000000,
+ .refdiv = 1,
+ .fbdiv = 76,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 900000000,
+ .refdiv = 4,
+ .fbdiv = 300,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 888000000,
+ .refdiv = 1,
+ .fbdiv = 74,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 864000000,
+ .refdiv = 1,
+ .fbdiv = 72,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 840000000,
+ .refdiv = 1,
+ .fbdiv = 70,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 816000000,
+ .refdiv = 1,
+ .fbdiv = 68,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 800000000,
+ .refdiv = 1,
+ .fbdiv = 100,
+ .postdiv1 = 3,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 700000000,
+ .refdiv = 6,
+ .fbdiv = 350,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 696000000,
+ .refdiv = 1,
+ .fbdiv = 58,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 676000000,
+ .refdiv = 3,
+ .fbdiv = 169,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 600000000,
+ .refdiv = 1,
+ .fbdiv = 75,
+ .postdiv1 = 3,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 594000000,
+ .refdiv = 1,
+ .fbdiv = 99,
+ .postdiv1 = 4,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 533250000,
+ .refdiv = 8,
+ .fbdiv = 711,
+ .postdiv1 = 4,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 504000000,
+ .refdiv = 1,
+ .fbdiv = 63,
+ .postdiv1 = 3,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 500000000,
+ .refdiv = 6,
+ .fbdiv = 250,
+ .postdiv1 = 2,
+ .postdiv2 = 1,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 408000000,
+ .refdiv = 1,
+ .fbdiv = 68,
+ .postdiv1 = 2,
+ .postdiv2 = 2,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 312000000,
+ .refdiv = 1,
+ .fbdiv = 52,
+ .postdiv1 = 2,
+ .postdiv2 = 2,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 297000000,
+ .refdiv = 1,
+ .fbdiv = 99,
+ .postdiv1 = 4,
+ .postdiv2 = 2,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 216000000,
+ .refdiv = 1,
+ .fbdiv = 72,
+ .postdiv1 = 4,
+ .postdiv2 = 2,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 148500000,
+ .refdiv = 1,
+ .fbdiv = 99,
+ .postdiv1 = 4,
+ .postdiv2 = 4,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 106500000,
+ .refdiv = 1,
+ .fbdiv = 71,
+ .postdiv1 = 4,
+ .postdiv2 = 4,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 96000000,
+ .refdiv = 1,
+ .fbdiv = 64,
+ .postdiv1 = 4,
+ .postdiv2 = 4,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 74250000,
+ .refdiv = 2,
+ .fbdiv = 99,
+ .postdiv1 = 4,
+ .postdiv2 = 4,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 65000000,
+ .refdiv = 1,
+ .fbdiv = 65,
+ .postdiv1 = 6,
+ .postdiv2 = 4,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 54000000,
+ .refdiv = 1,
+ .fbdiv = 54,
+ .postdiv1 = 6,
+ .postdiv2 = 4,
+ .dsmpd = 1,
+ },
+ {
+ .freq = 27000000,
+ .refdiv = 1,
+ .fbdiv = 27,
+ .postdiv1 = 6,
+ .postdiv2 = 4,
+ .dsmpd = 1,
+ },
+ {},
+};
+
+static const char *pll_parents[] = {"xin24m"};
+
+static struct rk_clk_pll_def ppll = {
+ .clkdef = {
+ .id = PLL_PPLL,
+ .name = "ppll",
+ .parent_names = pll_parents,
+ .parent_cnt = nitems(pll_parents),
+ },
+ .base_offset = 0x00,
+
+ .rates = rk3399_pll_rates,
+};
+
+static const char *pmu_parents[] = {"ppll"};
+
+#define PCLK_PMU_SRC 19
+
+static struct rk_clk_composite_def pclk_pmu_src = {
+ .clkdef = {
+ .id = PCLK_PMU_SRC,
+ .name = "pclk_pmu_src",
+ .parent_names = pmu_parents,
+ .parent_cnt = nitems(pmu_parents),
+ },
+ /* PMUCRU_CLKSEL_CON0 */
+ .muxdiv_offset = 0x80,
+
+ .div_shift = 0,
+ .div_width = 5,
+};
+
+#define SCLK_I2C0_PMU 9
+#define SCLK_I2C4_PMU 10
+#define SCLK_I2C8_PMU 11
+
+static struct rk_clk_composite_def i2c0 = {
+ .clkdef = {
+ .id = SCLK_I2C0_PMU,
+ .name = "clk_i2c0_pmu",
+ .parent_names = pmu_parents,
+ .parent_cnt = nitems(pmu_parents),
+ },
+ /* PMUCRU_CLKSEL_CON2 */
+ .muxdiv_offset = 0x88,
+
+ .div_shift = 0,
+ .div_width = 7,
+
+ /* PMUCRU_CLKGATE_CON0 */
+ .gate_offset = 0x100,
+ .gate_shift = 9,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static struct rk_clk_composite_def i2c8 = {
+ .clkdef = {
+ .id = SCLK_I2C8_PMU,
+ .name = "clk_i2c8_pmu",
+ .parent_names = pmu_parents,
+ .parent_cnt = nitems(pmu_parents),
+ },
+ /* PMUCRU_CLKSEL_CON2 */
+ .muxdiv_offset = 0x88,
+
+ .div_shift = 8,
+ .div_width = 7,
+
+ /* PMUCRU_CLKGATE_CON0 */
+ .gate_offset = 0x100,
+ .gate_shift = 11,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static struct rk_clk_composite_def i2c4 = {
+ .clkdef = {
+ .id = SCLK_I2C4_PMU,
+ .name = "clk_i2c4_pmu",
+ .parent_names = pmu_parents,
+ .parent_cnt = nitems(pmu_parents),
+ },
+ /* PMUCRU_CLKSEL_CON3 */
+ .muxdiv_offset = 0x8c,
+
+ .div_shift = 0,
+ .div_width = 7,
+
+ /* PMUCRU_CLKGATE_CON0 */
+ .gate_offset = 0x100,
+ .gate_shift = 10,
+
+ .flags = RK_CLK_COMPOSITE_HAVE_GATE,
+};
+
+static struct rk_clk rk3399_pmu_clks[] = {
+ {
+ .type = RK3399_CLK_PLL,
+ .clk.pll = &ppll
+ },
+
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &pclk_pmu_src
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &i2c0
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &i2c4
+ },
+ {
+ .type = RK_CLK_COMPOSITE,
+ .clk.composite = &i2c8
+ },
+};
+
+static int
+rk3399_pmucru_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_is_compatible(dev, "rockchip,rk3399-pmucru")) {
+ device_set_desc(dev, "Rockchip RK3399 PMU Clock and Reset Unit");
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+rk3399_pmucru_attach(device_t dev)
+{
+ struct rk_cru_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ sc->gates = rk3399_pmu_gates;
+ sc->ngates = nitems(rk3399_pmu_gates);
+
+ sc->clks = rk3399_pmu_clks;
+ sc->nclks = nitems(rk3399_pmu_clks);
+
+ sc->reset_offset = 0x110;
+ sc->reset_num = 30;
+
+ return (rk_cru_attach(dev));
+}
+
+static device_method_t rk3399_pmucru_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk3399_pmucru_probe),
+ DEVMETHOD(device_attach, rk3399_pmucru_attach),
+
+ DEVMETHOD_END
+};
+
+static devclass_t rk3399_pmucru_devclass;
+
+DEFINE_CLASS_1(rk3399_pmucru, rk3399_pmucru_driver, rk3399_pmucru_methods,
+ sizeof(struct rk_cru_softc), rk_cru_driver);
+
+EARLY_DRIVER_MODULE(rk3399_pmucru, simplebus, rk3399_pmucru_driver,
+ rk3399_pmucru_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/rockchip/clk/rk_clk_armclk.c b/sys/arm64/rockchip/clk/rk_clk_armclk.c
new file mode 100644
index 000000000000..548539541fde
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_armclk.c
@@ -0,0 +1,257 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/rockchip/clk/rk_clk_armclk.h>
+
+#include "clkdev_if.h"
+
+struct rk_clk_armclk_sc {
+ uint32_t muxdiv_offset;
+ uint32_t mux_shift;
+ uint32_t mux_width;
+ uint32_t mux_mask;
+
+ uint32_t div_shift;
+ uint32_t div_width;
+ uint32_t div_mask;
+
+ uint32_t gate_offset;
+ uint32_t gate_shift;
+
+ uint32_t flags;
+
+ uint32_t main_parent;
+ uint32_t alt_parent;
+
+ struct rk_clk_armclk_rates *rates;
+ int nrates;
+};
+
+#define WRITE4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define READ4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+#define RK_ARMCLK_WRITE_MASK_SHIFT 16
+
+#if 0
+#define dprintf(format, arg...) \
+ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg)
+#else
+#define dprintf(format, arg...)
+#endif
+
+static int
+rk_clk_armclk_init(struct clknode *clk, device_t dev)
+{
+ struct rk_clk_armclk_sc *sc;
+ uint32_t val, idx;
+
+ sc = clknode_get_softc(clk);
+
+ idx = 0;
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->muxdiv_offset, &val);
+ DEVICE_UNLOCK(clk);
+
+ idx = (val & sc->mux_mask) >> sc->mux_shift;
+
+ clknode_init_parent_idx(clk, idx);
+
+ return (0);
+}
+
+static int
+rk_clk_armclk_set_mux(struct clknode *clk, int index)
+{
+ struct rk_clk_armclk_sc *sc;
+ uint32_t val = 0;
+
+ sc = clknode_get_softc(clk);
+
+ dprintf("Set mux to %d\n", index);
+ DEVICE_LOCK(clk);
+ val |= index << sc->mux_shift;
+ val |= sc->mux_mask << RK_ARMCLK_WRITE_MASK_SHIFT;
+ dprintf("Write: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, val);
+ WRITE4(clk, sc->muxdiv_offset, val);
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+static int
+rk_clk_armclk_recalc(struct clknode *clk, uint64_t *freq)
+{
+ struct rk_clk_armclk_sc *sc;
+ uint32_t reg, div;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+
+ READ4(clk, sc->muxdiv_offset, &reg);
+ dprintf("Read: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, reg);
+
+ DEVICE_UNLOCK(clk);
+
+ div = ((reg & sc->div_mask) >> sc->div_shift) + 1;
+ dprintf("parent_freq=%ju, div=%u\n", *freq, div);
+
+ *freq = *freq / div;
+
+ return (0);
+}
+
+static int
+rk_clk_armclk_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
+ int flags, int *stop)
+{
+ struct rk_clk_armclk_sc *sc;
+ struct clknode *p_main;
+ const char **p_names;
+ uint64_t best = 0, best_p = 0;
+ uint32_t div = 0, val = 0;
+ int err, i, rate = 0;
+
+ sc = clknode_get_softc(clk);
+
+ dprintf("Finding best parent/div for target freq of %ju\n", *fout);
+ p_names = clknode_get_parent_names(clk);
+ p_main = clknode_find_by_name(p_names[sc->main_parent]);
+
+ for (i = 0; i < sc->nrates; i++) {
+ if (sc->rates[i].freq == *fout) {
+ best = sc->rates[i].freq;
+ div = sc->rates[i].div;
+ best_p = best * div;
+ rate = i;
+ dprintf("Best parent %s (%d) with best freq at %ju\n",
+ clknode_get_name(p_main),
+ sc->main_parent,
+ best);
+ break;
+ }
+ }
+
+ if (rate == sc->nrates)
+ return (0);
+
+ if ((flags & CLK_SET_DRYRUN) != 0) {
+ *fout = best;
+ *stop = 1;
+ return (0);
+ }
+
+ dprintf("Changing parent (%s) freq to %ju\n", clknode_get_name(p_main),
+ best_p);
+ err = clknode_set_freq(p_main, best_p, 0, 1);
+ if (err != 0)
+ printf("Cannot set %s to %ju\n",
+ clknode_get_name(p_main),
+ best_p);
+
+ clknode_set_parent_by_idx(clk, sc->main_parent);
+
+ clknode_get_freq(p_main, &best_p);
+ dprintf("main parent freq at %ju\n", best_p);
+ DEVICE_LOCK(clk);
+ val |= (div - 1) << sc->div_shift;
+ val |= sc->div_mask << RK_ARMCLK_WRITE_MASK_SHIFT;
+ dprintf("Write: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, val);
+ WRITE4(clk, sc->muxdiv_offset, val);
+ DEVICE_UNLOCK(clk);
+
+ *fout = best;
+ *stop = 1;
+
+ return (0);
+}
+
+static clknode_method_t rk_clk_armclk_clknode_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, rk_clk_armclk_init),
+ CLKNODEMETHOD(clknode_set_mux, rk_clk_armclk_set_mux),
+ CLKNODEMETHOD(clknode_recalc_freq, rk_clk_armclk_recalc),
+ CLKNODEMETHOD(clknode_set_freq, rk_clk_armclk_set_freq),
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(rk_clk_armclk_clknode, rk_clk_armclk_clknode_class,
+ rk_clk_armclk_clknode_methods, sizeof(struct rk_clk_armclk_sc),
+ clknode_class);
+
+int
+rk_clk_armclk_register(struct clkdom *clkdom, struct rk_clk_armclk_def *clkdef)
+{
+ struct clknode *clk;
+ struct rk_clk_armclk_sc *sc;
+
+ clk = clknode_create(clkdom, &rk_clk_armclk_clknode_class,
+ &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+
+ sc->muxdiv_offset = clkdef->muxdiv_offset;
+
+ sc->mux_shift = clkdef->mux_shift;
+ sc->mux_width = clkdef->mux_width;
+ sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift;
+
+ sc->div_shift = clkdef->div_shift;
+ sc->div_width = clkdef->div_width;
+ sc->div_mask = ((1 << clkdef->div_width) - 1) << sc->div_shift;
+
+ sc->flags = clkdef->flags;
+
+ sc->main_parent = clkdef->main_parent;
+ sc->alt_parent = clkdef->alt_parent;
+
+ sc->rates = clkdef->rates;
+ sc->nrates = clkdef->nrates;
+
+ clknode_register(clkdom, clk);
+
+ return (0);
+}
diff --git a/sys/arm64/rockchip/clk/rk_clk_armclk.h b/sys/arm64/rockchip/clk/rk_clk_armclk.h
new file mode 100644
index 000000000000..355f5031574b
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_armclk.h
@@ -0,0 +1,63 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _RK_CLK_ARMCLK_H_
+#define _RK_CLK_ARMCLK_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct rk_clk_armclk_rates {
+ uint64_t freq;
+ uint32_t div;
+};
+
+struct rk_clk_armclk_def {
+ struct clknode_init_def clkdef;
+
+ uint32_t muxdiv_offset;
+
+ uint32_t mux_shift;
+ uint32_t mux_width;
+
+ uint32_t div_shift;
+ uint32_t div_width;
+
+ uint32_t flags;
+
+ uint32_t main_parent;
+ uint32_t alt_parent;
+
+ struct rk_clk_armclk_rates *rates;
+ int nrates;
+};
+
+int rk_clk_armclk_register(struct clkdom *clkdom,
+ struct rk_clk_armclk_def *clkdef);
+
+#endif /* _RK_CLK_ARMCLK_H_ */
diff --git a/sys/arm64/rockchip/clk/rk_clk_composite.c b/sys/arm64/rockchip/clk/rk_clk_composite.c
new file mode 100644
index 000000000000..17d258e02985
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_composite.c
@@ -0,0 +1,370 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/syscon/syscon.h>
+
+#include <arm64/rockchip/clk/rk_clk_composite.h>
+
+#include "clkdev_if.h"
+#include "syscon_if.h"
+
+struct rk_clk_composite_sc {
+ uint32_t muxdiv_offset;
+ uint32_t mux_shift;
+ uint32_t mux_width;
+ uint32_t mux_mask;
+
+ uint32_t div_shift;
+ uint32_t div_width;
+ uint32_t div_mask;
+
+ uint32_t gate_offset;
+ uint32_t gate_shift;
+
+ uint32_t flags;
+
+ struct syscon *grf;
+};
+
+#define WRITE4(_clk, off, val) \
+ rk_clk_composite_write_4(_clk, off, val)
+#define READ4(_clk, off, val) \
+ rk_clk_composite_read_4(_clk, off, val)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+#define RK_CLK_COMPOSITE_MASK_SHIFT 16
+
+#if 0
+#define dprintf(format, arg...) \
+ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg)
+#else
+#define dprintf(format, arg...)
+#endif
+
+static void
+rk_clk_composite_read_4(struct clknode *clk, bus_addr_t addr, uint32_t *val)
+{
+ struct rk_clk_composite_sc *sc;
+
+ sc = clknode_get_softc(clk);
+ if (sc->grf)
+ *val = SYSCON_READ_4(sc->grf, addr);
+ else
+ CLKDEV_READ_4(clknode_get_device(clk), addr, val);
+}
+
+static void
+rk_clk_composite_write_4(struct clknode *clk, bus_addr_t addr, uint32_t val)
+{
+ struct rk_clk_composite_sc *sc;
+
+ sc = clknode_get_softc(clk);
+ if (sc->grf)
+ SYSCON_WRITE_4(sc->grf, addr, val | (0xffff << 16));
+ else
+ CLKDEV_WRITE_4(clknode_get_device(clk), addr, val);
+}
+
+static struct syscon *
+rk_clk_composite_get_grf(struct clknode *clk)
+{
+ device_t dev;
+ phandle_t node;
+ struct syscon *grf;
+
+ grf = NULL;
+ dev = clknode_get_device(clk);
+ node = ofw_bus_get_node(dev);
+ if (OF_hasprop(node, "rockchip,grf") &&
+ syscon_get_by_ofw_property(dev, node,
+ "rockchip,grf", &grf) != 0) {
+ return (NULL);
+ }
+
+ return (grf);
+}
+
+static int
+rk_clk_composite_init(struct clknode *clk, device_t dev)
+{
+ struct rk_clk_composite_sc *sc;
+ uint32_t val, idx;
+
+ sc = clknode_get_softc(clk);
+ if ((sc->flags & RK_CLK_COMPOSITE_GRF) != 0) {
+ sc->grf = rk_clk_composite_get_grf(clk);
+ if (sc->grf == NULL)
+ panic("clock %s has GRF flag set but no syscon is available",
+ clknode_get_name(clk));
+ }
+
+ idx = 0;
+ if ((sc->flags & RK_CLK_COMPOSITE_HAVE_MUX) != 0) {
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->muxdiv_offset, &val);
+ DEVICE_UNLOCK(clk);
+
+ idx = (val & sc->mux_mask) >> sc->mux_shift;
+ }
+
+ clknode_init_parent_idx(clk, idx);
+
+ return (0);
+}
+
+static int
+rk_clk_composite_set_gate(struct clknode *clk, bool enable)
+{
+ struct rk_clk_composite_sc *sc;
+ uint32_t val = 0;
+
+ sc = clknode_get_softc(clk);
+
+ if ((sc->flags & RK_CLK_COMPOSITE_HAVE_GATE) == 0)
+ return (0);
+
+ dprintf("%sabling gate\n", enable ? "En" : "Dis");
+ if (!enable)
+ val |= 1 << sc->gate_shift;
+ dprintf("sc->gate_shift: %x\n", sc->gate_shift);
+ val |= (1 << sc->gate_shift) << RK_CLK_COMPOSITE_MASK_SHIFT;
+ dprintf("Write: gate_offset=%x, val=%x\n", sc->gate_offset, val);
+ DEVICE_LOCK(clk);
+ WRITE4(clk, sc->gate_offset, val);
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+static int
+rk_clk_composite_set_mux(struct clknode *clk, int index)
+{
+ struct rk_clk_composite_sc *sc;
+ uint32_t val = 0;
+
+ sc = clknode_get_softc(clk);
+
+ if ((sc->flags & RK_CLK_COMPOSITE_HAVE_MUX) == 0)
+ return (0);
+
+ dprintf("Set mux to %d\n", index);
+ DEVICE_LOCK(clk);
+ val |= (index << sc->mux_shift);
+ val |= sc->mux_mask << RK_CLK_COMPOSITE_MASK_SHIFT;
+ dprintf("Write: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, val);
+ WRITE4(clk, sc->muxdiv_offset, val);
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+static int
+rk_clk_composite_recalc(struct clknode *clk, uint64_t *freq)
+{
+ struct rk_clk_composite_sc *sc;
+ uint32_t reg, div;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+
+ READ4(clk, sc->muxdiv_offset, &reg);
+ dprintf("Read: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, reg);
+
+ DEVICE_UNLOCK(clk);
+
+ div = ((reg & sc->div_mask) >> sc->div_shift);
+ if (sc->flags & RK_CLK_COMPOSITE_DIV_EXP)
+ div = 1 << div;
+ else
+ div += 1;
+ dprintf("parent_freq=%ju, div=%u\n", *freq, div);
+ *freq = *freq / div;
+ dprintf("Final freq=%ju\n", *freq);
+ return (0);
+}
+
+static uint32_t
+rk_clk_composite_find_best(struct rk_clk_composite_sc *sc, uint64_t fparent,
+ uint64_t freq, uint32_t *reg)
+{
+ uint64_t best, cur;
+ uint32_t best_div, best_div_reg;
+ uint32_t div, div_reg;
+
+ best = 0;
+ best_div = 0;
+ best_div_reg = 0;
+
+ for (div_reg = 0; div_reg <= ((sc->div_mask >> sc->div_shift) + 1);
+ div_reg++) {
+ if (sc->flags == RK_CLK_COMPOSITE_DIV_EXP)
+ div = 1 << div_reg;
+ else
+ div = div_reg + 1;
+ cur = fparent / div;
+ if ((freq - cur) < (freq - best)) {
+ best = cur;
+ best_div = div;
+ best_div_reg = div_reg;
+ break;
+ }
+ }
+ *reg = div_reg;
+ return (best_div);
+}
+
+static int
+rk_clk_composite_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
+ int flags, int *stop)
+{
+ struct rk_clk_composite_sc *sc;
+ struct clknode *p_clk;
+ const char **p_names;
+ uint64_t best, cur;
+ uint32_t div, div_reg, best_div, best_div_reg, val;
+ int p_idx, best_parent;
+
+ sc = clknode_get_softc(clk);
+ dprintf("Finding best parent/div for target freq of %ju\n", *fout);
+ p_names = clknode_get_parent_names(clk);
+ for (best_div = 0, best = 0, p_idx = 0;
+ p_idx != clknode_get_parents_num(clk); p_idx++) {
+ p_clk = clknode_find_by_name(p_names[p_idx]);
+ clknode_get_freq(p_clk, &fparent);
+ dprintf("Testing with parent %s (%d) at freq %ju\n",
+ clknode_get_name(p_clk), p_idx, fparent);
+ div = rk_clk_composite_find_best(sc, fparent, *fout, &div_reg);
+ cur = fparent / div;
+ if ((*fout - cur) < (*fout - best)) {
+ best = cur;
+ best_div = div;
+ best_div_reg = div_reg;
+ best_parent = p_idx;
+ dprintf("Best parent so far %s (%d) with best freq at "
+ "%ju\n", clknode_get_name(p_clk), p_idx, best);
+ }
+ }
+
+ *stop = 1;
+ if (best_div == 0)
+ return (ERANGE);
+
+ if ((best < *fout) && ((flags & CLK_SET_ROUND_DOWN) == 0))
+ return (ERANGE);
+
+ if ((best > *fout) && ((flags & CLK_SET_ROUND_UP) == 0)) {
+ return (ERANGE);
+ }
+
+ if ((flags & CLK_SET_DRYRUN) != 0) {
+ *fout = best;
+ return (0);
+ }
+
+ p_idx = clknode_get_parent_idx(clk);
+ if (p_idx != best_parent) {
+ dprintf("Switching parent index from %d to %d\n", p_idx,
+ best_parent);
+ clknode_set_parent_by_idx(clk, best_parent);
+ }
+
+ dprintf("Setting divider to %d (reg: %d)\n", best_div, best_div_reg);
+ dprintf(" div_mask: 0x%X, div_shift: %d\n", sc->div_mask,
+ sc->div_shift);
+
+ DEVICE_LOCK(clk);
+ val = best_div_reg << sc->div_shift;
+ val |= sc->div_mask << RK_CLK_COMPOSITE_MASK_SHIFT;
+ dprintf("Write: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, val);
+ WRITE4(clk, sc->muxdiv_offset, val);
+ DEVICE_UNLOCK(clk);
+
+ *fout = best;
+ return (0);
+}
+
+static clknode_method_t rk_clk_composite_clknode_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, rk_clk_composite_init),
+ CLKNODEMETHOD(clknode_set_gate, rk_clk_composite_set_gate),
+ CLKNODEMETHOD(clknode_set_mux, rk_clk_composite_set_mux),
+ CLKNODEMETHOD(clknode_recalc_freq, rk_clk_composite_recalc),
+ CLKNODEMETHOD(clknode_set_freq, rk_clk_composite_set_freq),
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(rk_clk_composite_clknode, rk_clk_composite_clknode_class,
+ rk_clk_composite_clknode_methods, sizeof(struct rk_clk_composite_sc),
+ clknode_class);
+
+int
+rk_clk_composite_register(struct clkdom *clkdom,
+ struct rk_clk_composite_def *clkdef)
+{
+ struct clknode *clk;
+ struct rk_clk_composite_sc *sc;
+
+ clk = clknode_create(clkdom, &rk_clk_composite_clknode_class,
+ &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+
+ sc->muxdiv_offset = clkdef->muxdiv_offset;
+
+ sc->mux_shift = clkdef->mux_shift;
+ sc->mux_width = clkdef->mux_width;
+ sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift;
+
+ sc->div_shift = clkdef->div_shift;
+ sc->div_width = clkdef->div_width;
+ sc->div_mask = ((1 << clkdef->div_width) - 1) << sc->div_shift;
+
+ sc->gate_offset = clkdef->gate_offset;
+ sc->gate_shift = clkdef->gate_shift;
+
+ sc->flags = clkdef->flags;
+
+ clknode_register(clkdom, clk);
+
+ return (0);
+}
diff --git a/sys/arm64/rockchip/clk/rk_clk_composite.h b/sys/arm64/rockchip/clk/rk_clk_composite.h
new file mode 100644
index 000000000000..3f221572bcb9
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_composite.h
@@ -0,0 +1,60 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _RK_CLK_COMPOSITE_H_
+#define _RK_CLK_COMPOSITE_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct rk_clk_composite_def {
+ struct clknode_init_def clkdef;
+
+ uint32_t muxdiv_offset;
+
+ uint32_t mux_shift;
+ uint32_t mux_width;
+
+ uint32_t div_shift;
+ uint32_t div_width;
+
+ uint32_t gate_offset;
+ uint32_t gate_shift;
+
+ uint32_t flags;
+};
+
+#define RK_CLK_COMPOSITE_HAVE_MUX 0x0001
+#define RK_CLK_COMPOSITE_HAVE_GATE 0x0002
+#define RK_CLK_COMPOSITE_DIV_EXP 0x0004 /* Register 0, 1, 2, 2, ... */
+ /* Divider 1, 2, 4, 8, ... */
+#define RK_CLK_COMPOSITE_GRF 0x0008 /* Use syscon registers instead of CRU's */
+int rk_clk_composite_register(struct clkdom *clkdom,
+ struct rk_clk_composite_def *clkdef);
+
+#endif /* _RK_CLK_COMPOSITE_H_ */
diff --git a/sys/arm64/rockchip/clk/rk_clk_fract.c b/sys/arm64/rockchip/clk/rk_clk_fract.c
new file mode 100644
index 000000000000..9bb4e169fb97
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_fract.c
@@ -0,0 +1,246 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2019 Michal Meloun <mmel@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/rockchip/clk/rk_clk_fract.h>
+
+#include "clkdev_if.h"
+
+#define WR4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define RD4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define MD4(_clk, off, clr, set ) \
+ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+static int rk_clk_fract_init(struct clknode *clk, device_t dev);
+static int rk_clk_fract_recalc(struct clknode *clk, uint64_t *req);
+static int rk_clk_fract_set_freq(struct clknode *clknode, uint64_t fin,
+ uint64_t *fout, int flag, int *stop);
+
+struct rk_clk_fract_sc {
+ uint32_t flags;
+ uint32_t offset;
+ uint32_t numerator;
+ uint32_t denominator;
+};
+
+static clknode_method_t rk_clk_fract_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, rk_clk_fract_init),
+ CLKNODEMETHOD(clknode_recalc_freq, rk_clk_fract_recalc),
+ CLKNODEMETHOD(clknode_set_freq, rk_clk_fract_set_freq),
+ CLKNODEMETHOD_END
+};
+DEFINE_CLASS_1(rk_clk_fract, rk_clk_fract_class, rk_clk_fract_methods,
+ sizeof(struct rk_clk_fract_sc), clknode_class);
+
+/*
+ * Compute best rational approximation of input fraction
+ * for fixed sized fractional divider registers.
+ * http://en.wikipedia.org/wiki/Continued_fraction
+ *
+ * - n_input, d_input Given input fraction
+ * - n_max, d_max Maximum vaues of divider registers
+ * - n_out, d_out Computed approximation
+ */
+
+static void
+clk_compute_fract_div(
+ uint64_t n_input, uint64_t d_input,
+ uint64_t n_max, uint64_t d_max,
+ uint64_t *n_out, uint64_t *d_out)
+{
+ uint64_t n_prev, d_prev; /* previous convergents */
+ uint64_t n_cur, d_cur; /* current convergents */
+ uint64_t n_rem, d_rem; /* fraction remainder */
+ uint64_t tmp, fact;
+
+ /* Initialize fraction reminder */
+ n_rem = n_input;
+ d_rem = d_input;
+
+ /* Init convergents to 0/1 and 1/0 */
+ n_prev = 0;
+ d_prev = 1;
+ n_cur = 1;
+ d_cur = 0;
+
+ while (d_rem != 0 && n_cur < n_max && d_cur < d_max) {
+ /* Factor for this step. */
+ fact = n_rem / d_rem;
+
+ /* Adjust fraction reminder */
+ tmp = d_rem;
+ d_rem = n_rem % d_rem;
+ n_rem = tmp;
+
+ /* Compute new nominator and save last one */
+ tmp = n_prev + fact * n_cur;
+ n_prev = n_cur;
+ n_cur = tmp;
+
+ /* Compute new denominator and save last one */
+ tmp = d_prev + fact * d_cur;
+ d_prev = d_cur;
+ d_cur = tmp;
+ }
+
+ if (n_cur > n_max || d_cur > d_max) {
+ *n_out = n_prev;
+ *d_out = d_prev;
+ } else {
+ *n_out = n_cur;
+ *d_out = d_cur;
+ }
+}
+
+static int
+rk_clk_fract_init(struct clknode *clk, device_t dev)
+{
+ uint32_t reg;
+ struct rk_clk_fract_sc *sc;
+
+ sc = clknode_get_softc(clk);
+ DEVICE_LOCK(clk);
+ RD4(clk, sc->offset, &reg);
+ DEVICE_UNLOCK(clk);
+
+ sc->numerator = (reg >> 16) & 0xFFFF;
+ sc->denominator = reg & 0xFFFF;
+ clknode_init_parent_idx(clk, 0);
+
+ return(0);
+}
+
+static int
+rk_clk_fract_recalc(struct clknode *clk, uint64_t *freq)
+{
+ struct rk_clk_fract_sc *sc;
+
+ sc = clknode_get_softc(clk);
+ if (sc->denominator == 0) {
+ printf("%s: %s denominator is zero!\n", clknode_get_name(clk),
+ __func__);
+ *freq = 0;
+ return(EINVAL);
+ }
+
+ *freq *= sc->numerator;
+ *freq /= sc->denominator;
+
+ return (0);
+}
+
+static int
+rk_clk_fract_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout,
+ int flags, int *stop)
+{
+ struct rk_clk_fract_sc *sc;
+ uint64_t div_n, div_d, _fout;
+
+ sc = clknode_get_softc(clk);
+
+ clk_compute_fract_div(*fout, fin, 0xFFFF, 0xFFFF, &div_n, &div_d);
+ _fout = fin * div_n;
+ _fout /= div_d;
+
+ /* Rounding. */
+ if ((flags & CLK_SET_ROUND_UP) && (_fout < *fout)) {
+ if (div_n > div_d && div_d > 1)
+ div_n++;
+ else
+ div_d--;
+ } else if ((flags & CLK_SET_ROUND_DOWN) && (_fout > *fout)) {
+ if (div_n > div_d && div_n > 1)
+ div_n--;
+ else
+ div_d++;
+ }
+
+ /* Check range after rounding */
+ if (div_n > 0xFFFF || div_d > 0xFFFF)
+ return (ERANGE);
+
+ if (div_d == 0) {
+ printf("%s: %s divider is zero!\n",
+ clknode_get_name(clk), __func__);
+ return(EINVAL);
+ }
+ /* Recompute final output frequency */
+ _fout = fin * div_n;
+ _fout /= div_d;
+
+ *stop = 1;
+
+ if ((flags & CLK_SET_DRYRUN) == 0) {
+ if (*stop != 0 &&
+ (flags & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) == 0 &&
+ *fout != _fout)
+ return (ERANGE);
+
+ sc->numerator = (uint32_t)div_n;
+ sc->denominator = (uint32_t)div_d;
+
+ DEVICE_LOCK(clk);
+ WR4(clk, sc->offset, sc->numerator << 16 | sc->denominator);
+ DEVICE_UNLOCK(clk);
+ }
+
+ *fout = _fout;
+ return (0);
+}
+
+int
+rk_clk_fract_register(struct clkdom *clkdom, struct rk_clk_fract_def *clkdef)
+{
+ struct clknode *clk;
+ struct rk_clk_fract_sc *sc;
+
+ clk = clknode_create(clkdom, &rk_clk_fract_class, &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+ sc->flags = clkdef->flags;
+ sc->offset = clkdef->offset;
+
+ clknode_register(clkdom, clk);
+ return (0);
+}
diff --git a/sys/arm64/rockchip/clk/rk_clk_fract.h b/sys/arm64/rockchip/clk/rk_clk_fract.h
new file mode 100644
index 000000000000..2fe8f47586e5
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_fract.h
@@ -0,0 +1,44 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2019 Michal Meloun <mmel@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _RK_CLK_FRACT_H_
+#define _RK_CLK_FRACT_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct rk_clk_fract_def {
+ struct clknode_init_def clkdef;
+ uint32_t offset;
+ uint32_t flags;
+};
+
+int rk_clk_fract_register(struct clkdom *clkdom,
+ struct rk_clk_fract_def *clkdef);
+
+#endif /* _RK_CLK_FRACT_H_ */
diff --git a/sys/arm64/rockchip/clk/rk_clk_gate.c b/sys/arm64/rockchip/clk/rk_clk_gate.c
new file mode 100644
index 000000000000..363d21c05372
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_gate.c
@@ -0,0 +1,135 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/rockchip/clk/rk_clk_gate.h>
+
+#include "clkdev_if.h"
+
+#define WR4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define RD4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define MD4(_clk, off, clr, set ) \
+ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+static int rk_clk_gate_init(struct clknode *clk, device_t dev);
+static int rk_clk_gate_set_gate(struct clknode *clk, bool enable);
+struct rk_clk_gate_sc {
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t mask;
+ uint32_t on_value;
+ uint32_t off_value;
+ int gate_flags;
+ bool ungated;
+};
+
+static clknode_method_t rk_clk_gate_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, rk_clk_gate_init),
+ CLKNODEMETHOD(clknode_set_gate, rk_clk_gate_set_gate),
+ CLKNODEMETHOD_END
+};
+DEFINE_CLASS_1(rk_clk_gate, rk_clk_gate_class, rk_clk_gate_methods,
+ sizeof(struct rk_clk_gate_sc), clknode_class);
+
+static int
+rk_clk_gate_init(struct clknode *clk, device_t dev)
+{
+ uint32_t reg;
+ struct rk_clk_gate_sc *sc;
+ int rv;
+
+ sc = clknode_get_softc(clk);
+ DEVICE_LOCK(clk);
+ rv = RD4(clk, sc->offset, &reg);
+ DEVICE_UNLOCK(clk);
+ if (rv != 0)
+ return (rv);
+ reg = (reg >> sc->shift) & sc->mask;
+ sc->ungated = reg == sc->on_value ? 1 : 0;
+ clknode_init_parent_idx(clk, 0);
+ return(0);
+}
+
+static int
+rk_clk_gate_set_gate(struct clknode *clk, bool enable)
+{
+ uint32_t reg;
+ struct rk_clk_gate_sc *sc;
+ int rv;
+
+ sc = clknode_get_softc(clk);
+ sc->ungated = enable;
+ DEVICE_LOCK(clk);
+ rv = MD4(clk, sc->offset, sc->mask << sc->shift,
+ ((sc->ungated ? sc->on_value : sc->off_value) << sc->shift) |
+ RK_CLK_GATE_MASK);
+ if (rv != 0) {
+ DEVICE_UNLOCK(clk);
+ return (rv);
+ }
+ RD4(clk, sc->offset, &reg);
+ DEVICE_UNLOCK(clk);
+ return(0);
+}
+
+int
+rk_clk_gate_register(struct clkdom *clkdom, struct rk_clk_gate_def *clkdef)
+{
+ struct clknode *clk;
+ struct rk_clk_gate_sc *sc;
+
+ clk = clknode_create(clkdom, &rk_clk_gate_class, &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+ sc->offset = clkdef->offset;
+ sc->shift = clkdef->shift;
+ sc->mask = clkdef->mask;
+ sc->on_value = clkdef->on_value;
+ sc->off_value = clkdef->off_value;
+ sc->gate_flags = clkdef->gate_flags;
+
+ clknode_register(clkdom, clk);
+ return (0);
+}
diff --git a/sys/arm64/rockchip/clk/rk_clk_gate.h b/sys/arm64/rockchip/clk/rk_clk_gate.h
new file mode 100644
index 000000000000..c90993899b6e
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_gate.h
@@ -0,0 +1,49 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _RK_CLK_GATE_H_
+#define _RK_CLK_GATE_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct rk_clk_gate_def {
+ struct clknode_init_def clkdef;
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t mask;
+ uint32_t on_value;
+ uint32_t off_value;
+ int gate_flags;
+};
+
+#define RK_CLK_GATE_MASK 0xFFFF0000
+
+int rk_clk_gate_register(struct clkdom *clkdom, struct rk_clk_gate_def *clkdef);
+
+#endif /* _RK_CLK_GATE_H_ */
diff --git a/sys/arm64/rockchip/clk/rk_clk_mux.c b/sys/arm64/rockchip/clk/rk_clk_mux.c
new file mode 100644
index 000000000000..94d31d34c47b
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_mux.c
@@ -0,0 +1,137 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/rockchip/clk/rk_cru.h>
+#include <arm64/rockchip/clk/rk_clk_mux.h>
+
+#include "clkdev_if.h"
+
+#define WR4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define RD4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define MD4(_clk, off, clr, set ) \
+ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+static int rk_clk_mux_init(struct clknode *clk, device_t dev);
+static int rk_clk_mux_set_mux(struct clknode *clk, int idx);
+
+struct rk_clk_mux_sc {
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t mask;
+ int mux_flags;
+};
+
+static clknode_method_t rk_clk_mux_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, rk_clk_mux_init),
+ CLKNODEMETHOD(clknode_set_mux, rk_clk_mux_set_mux),
+ CLKNODEMETHOD_END
+};
+DEFINE_CLASS_1(rk_clk_mux, rk_clk_mux_class, rk_clk_mux_methods,
+ sizeof(struct rk_clk_mux_sc), clknode_class);
+
+static int
+rk_clk_mux_init(struct clknode *clk, device_t dev)
+{
+ uint32_t reg;
+ struct rk_clk_mux_sc *sc;
+ int rv;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ rv = RD4(clk, sc->offset, &reg);
+ DEVICE_UNLOCK(clk);
+ if (rv != 0) {
+ return (rv);
+ }
+ reg = (reg >> sc->shift) & sc->mask;
+ clknode_init_parent_idx(clk, reg);
+ return(0);
+}
+
+static int
+rk_clk_mux_set_mux(struct clknode *clk, int idx)
+{
+ uint32_t reg;
+ struct rk_clk_mux_sc *sc;
+ int rv;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ rv = MD4(clk, sc->offset, sc->mask << sc->shift,
+ ((idx & sc->mask) << sc->shift) | RK_CLK_MUX_MASK);
+ if (rv != 0) {
+ DEVICE_UNLOCK(clk);
+ return (rv);
+ }
+ RD4(clk, sc->offset, &reg);
+ DEVICE_UNLOCK(clk);
+
+ return(0);
+}
+
+int
+rk_clk_mux_register(struct clkdom *clkdom, struct rk_clk_mux_def *clkdef)
+{
+ struct clknode *clk;
+ struct rk_clk_mux_sc *sc;
+
+ clk = clknode_create(clkdom, &rk_clk_mux_class, &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+ sc->offset = clkdef->offset;
+ sc->shift = clkdef->shift;
+ sc->mask = (1 << clkdef->width) - 1;
+ sc->mux_flags = clkdef->mux_flags;
+
+ clknode_register(clkdom, clk);
+ return (0);
+}
diff --git a/sys/arm64/rockchip/clk/rk_clk_mux.h b/sys/arm64/rockchip/clk/rk_clk_mux.h
new file mode 100644
index 000000000000..f44443790b18
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_mux.h
@@ -0,0 +1,47 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _RK_CLK_MUX_H_
+#define _RK_CLK_MUX_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct rk_clk_mux_def {
+ struct clknode_init_def clkdef;
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t width;
+ int mux_flags;
+};
+
+#define RK_CLK_MUX_MASK 0xFFFF0000
+
+int rk_clk_mux_register(struct clkdom *clkdom, struct rk_clk_mux_def *clkdef);
+
+#endif /* _RK_CLK_MUX_H_ */
diff --git a/sys/arm64/rockchip/clk/rk_clk_pll.c b/sys/arm64/rockchip/clk/rk_clk_pll.c
new file mode 100644
index 000000000000..887ee03c2851
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_pll.c
@@ -0,0 +1,543 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include <arm64/rockchip/clk/rk_clk_pll.h>
+
+#include "clkdev_if.h"
+
+struct rk_clk_pll_sc {
+ uint32_t base_offset;
+
+ uint32_t gate_offset;
+ uint32_t gate_shift;
+
+ uint32_t mode_reg;
+ uint32_t mode_shift;
+
+ uint32_t flags;
+
+ struct rk_clk_pll_rate *rates;
+ struct rk_clk_pll_rate *frac_rates;
+};
+
+#define WRITE4(_clk, off, val) \
+ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
+#define READ4(_clk, off, val) \
+ CLKDEV_READ_4(clknode_get_device(_clk), off, val)
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+#define RK_CLK_PLL_MASK_SHIFT 16
+
+#if 0
+#define dprintf(format, arg...) \
+ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg)
+#else
+#define dprintf(format, arg...)
+#endif
+
+static int
+rk_clk_pll_set_gate(struct clknode *clk, bool enable)
+{
+ struct rk_clk_pll_sc *sc;
+ uint32_t val = 0;
+
+ sc = clknode_get_softc(clk);
+
+ if ((sc->flags & RK_CLK_PLL_HAVE_GATE) == 0)
+ return (0);
+
+ dprintf("%sabling gate\n", enable ? "En" : "Dis");
+ if (!enable)
+ val |= 1 << sc->gate_shift;
+ dprintf("sc->gate_shift: %x\n", sc->gate_shift);
+ val |= (1 << sc->gate_shift) << RK_CLK_PLL_MASK_SHIFT;
+ dprintf("Write: gate_offset=%x, val=%x\n", sc->gate_offset, val);
+ DEVICE_LOCK(clk);
+ WRITE4(clk, sc->gate_offset, val);
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+#define RK3328_CLK_PLL_FBDIV_OFFSET 0
+#define RK3328_CLK_PLL_FBDIV_SHIFT 0
+#define RK3328_CLK_PLL_FBDIV_MASK 0xFFF
+
+#define RK3328_CLK_PLL_POSTDIV1_OFFSET 0
+#define RK3328_CLK_PLL_POSTDIV1_SHIFT 12
+#define RK3328_CLK_PLL_POSTDIV1_MASK 0x7000
+
+#define RK3328_CLK_PLL_DSMPD_OFFSET 4
+#define RK3328_CLK_PLL_DSMPD_SHIFT 12
+#define RK3328_CLK_PLL_DSMPD_MASK 0x1000
+
+#define RK3328_CLK_PLL_REFDIV_OFFSET 4
+#define RK3328_CLK_PLL_REFDIV_SHIFT 0
+#define RK3328_CLK_PLL_REFDIV_MASK 0x3F
+
+#define RK3328_CLK_PLL_POSTDIV2_OFFSET 4
+#define RK3328_CLK_PLL_POSTDIV2_SHIFT 6
+#define RK3328_CLK_PLL_POSTDIV2_MASK 0x1C0
+
+#define RK3328_CLK_PLL_FRAC_OFFSET 8
+#define RK3328_CLK_PLL_FRAC_SHIFT 0
+#define RK3328_CLK_PLL_FRAC_MASK 0xFFFFFF
+
+#define RK3328_CLK_PLL_LOCK_MASK 0x400
+
+#define RK3328_CLK_PLL_MODE_SLOW 0
+#define RK3328_CLK_PLL_MODE_NORMAL 1
+#define RK3328_CLK_PLL_MODE_MASK 0x1
+
+static int
+rk3328_clk_pll_init(struct clknode *clk, device_t dev)
+{
+ struct rk_clk_pll_sc *sc;
+
+ sc = clknode_get_softc(clk);
+
+ clknode_init_parent_idx(clk, 0);
+
+ return (0);
+}
+
+static int
+rk3328_clk_pll_recalc(struct clknode *clk, uint64_t *freq)
+{
+ struct rk_clk_pll_sc *sc;
+ uint64_t rate;
+ uint32_t dsmpd, refdiv, fbdiv;
+ uint32_t postdiv1, postdiv2, frac;
+ uint32_t raw1, raw2, raw3;
+
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+
+ READ4(clk, sc->base_offset, &raw1);
+ READ4(clk, sc->base_offset + 4, &raw2);
+ READ4(clk, sc->base_offset + 8, &raw3);
+
+ fbdiv = (raw1 & RK3328_CLK_PLL_FBDIV_MASK) >> RK3328_CLK_PLL_FBDIV_SHIFT;
+ postdiv1 = (raw1 & RK3328_CLK_PLL_POSTDIV1_MASK) >> RK3328_CLK_PLL_POSTDIV1_SHIFT;
+
+ dsmpd = (raw2 & RK3328_CLK_PLL_DSMPD_MASK) >> RK3328_CLK_PLL_DSMPD_SHIFT;
+ refdiv = (raw2 & RK3328_CLK_PLL_REFDIV_MASK) >> RK3328_CLK_PLL_REFDIV_SHIFT;
+ postdiv2 = (raw2 & RK3328_CLK_PLL_POSTDIV2_MASK) >> RK3328_CLK_PLL_POSTDIV2_SHIFT;
+
+ frac = (raw3 & RK3328_CLK_PLL_FRAC_MASK) >> RK3328_CLK_PLL_FRAC_SHIFT;
+
+ DEVICE_UNLOCK(clk);
+
+ rate = *freq * fbdiv / refdiv;
+ if (dsmpd == 0) {
+ /* Fractional mode */
+ uint64_t frac_rate;
+
+ frac_rate = *freq * frac / refdiv;
+ rate += frac_rate >> 24;
+ }
+
+ *freq = rate / postdiv1 / postdiv2;
+
+ if (*freq % 2)
+ *freq = *freq + 1;
+
+ return (0);
+}
+
+static int
+rk3328_clk_pll_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
+ int flags, int *stop)
+{
+ struct rk_clk_pll_rate *rates;
+ struct rk_clk_pll_sc *sc;
+ uint32_t reg;
+ int timeout;
+
+ sc = clknode_get_softc(clk);
+
+ if (sc->rates)
+ rates = sc->rates;
+ else if (sc->frac_rates)
+ rates = sc->frac_rates;
+ else
+ return (EINVAL);
+
+ for (; rates->freq; rates++) {
+ if (rates->freq == *fout)
+ break;
+ }
+ if (rates->freq == 0) {
+ *stop = 1;
+ return (EINVAL);
+ }
+
+ DEVICE_LOCK(clk);
+
+ /* Setting to slow mode during frequency change */
+ reg = (RK3328_CLK_PLL_MODE_MASK << sc->mode_shift) <<
+ RK_CLK_PLL_MASK_SHIFT;
+ dprintf("Set PLL_MODEREG to %x\n", reg);
+ WRITE4(clk, sc->mode_reg, reg);
+
+ /* Setting postdiv1 and fbdiv */
+ reg = (rates->postdiv1 << RK3328_CLK_PLL_POSTDIV1_SHIFT) |
+ (rates->fbdiv << RK3328_CLK_PLL_FBDIV_SHIFT);
+ reg |= (RK3328_CLK_PLL_POSTDIV1_MASK | RK3328_CLK_PLL_FBDIV_MASK) << 16;
+ dprintf("Set PLL_CON0 to %x\n", reg);
+ WRITE4(clk, sc->base_offset, reg);
+
+ /* Setting dsmpd, postdiv2 and refdiv */
+ reg = (rates->dsmpd << RK3328_CLK_PLL_DSMPD_SHIFT) |
+ (rates->postdiv2 << RK3328_CLK_PLL_POSTDIV2_SHIFT) |
+ (rates->refdiv << RK3328_CLK_PLL_REFDIV_SHIFT);
+ reg |= (RK3328_CLK_PLL_DSMPD_MASK |
+ RK3328_CLK_PLL_POSTDIV2_MASK |
+ RK3328_CLK_PLL_REFDIV_MASK) << RK_CLK_PLL_MASK_SHIFT;
+ dprintf("Set PLL_CON1 to %x\n", reg);
+ WRITE4(clk, sc->base_offset + 0x4, reg);
+
+ /* Setting frac */
+ READ4(clk, sc->base_offset + 0x8, &reg);
+ reg &= ~RK3328_CLK_PLL_FRAC_MASK;
+ reg |= rates->frac << RK3328_CLK_PLL_FRAC_SHIFT;
+ dprintf("Set PLL_CON2 to %x\n", reg);
+ WRITE4(clk, sc->base_offset + 0x8, reg);
+
+ /* Reading lock */
+ for (timeout = 1000; timeout; timeout--) {
+ READ4(clk, sc->base_offset + 0x4, &reg);
+ if ((reg & RK3328_CLK_PLL_LOCK_MASK) == 0)
+ break;
+ DELAY(1);
+ }
+
+ /* Set back to normal mode */
+ reg = (RK3328_CLK_PLL_MODE_NORMAL << sc->mode_shift);
+ reg |= (RK3328_CLK_PLL_MODE_MASK << sc->mode_shift) <<
+ RK_CLK_PLL_MASK_SHIFT;
+ dprintf("Set PLL_MODEREG to %x\n", reg);
+ WRITE4(clk, sc->mode_reg, reg);
+
+ DEVICE_UNLOCK(clk);
+
+ *stop = 1;
+ return (0);
+}
+
+static clknode_method_t rk3328_clk_pll_clknode_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, rk3328_clk_pll_init),
+ CLKNODEMETHOD(clknode_set_gate, rk_clk_pll_set_gate),
+ CLKNODEMETHOD(clknode_recalc_freq, rk3328_clk_pll_recalc),
+ CLKNODEMETHOD(clknode_set_freq, rk3328_clk_pll_set_freq),
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(rk3328_clk_pll_clknode, rk3328_clk_pll_clknode_class,
+ rk3328_clk_pll_clknode_methods, sizeof(struct rk_clk_pll_sc), clknode_class);
+
+int
+rk3328_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef)
+{
+ struct clknode *clk;
+ struct rk_clk_pll_sc *sc;
+
+ clk = clknode_create(clkdom, &rk3328_clk_pll_clknode_class,
+ &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+
+ sc->base_offset = clkdef->base_offset;
+ sc->gate_offset = clkdef->gate_offset;
+ sc->gate_shift = clkdef->gate_shift;
+ sc->mode_reg = clkdef->mode_reg;
+ sc->mode_shift = clkdef->mode_shift;
+ sc->flags = clkdef->flags;
+ sc->rates = clkdef->rates;
+ sc->frac_rates = clkdef->frac_rates;
+
+ clknode_register(clkdom, clk);
+
+ return (0);
+}
+
+#define RK3399_CLK_PLL_FBDIV_OFFSET 0
+#define RK3399_CLK_PLL_FBDIV_SHIFT 0
+#define RK3399_CLK_PLL_FBDIV_MASK 0xFFF
+
+#define RK3399_CLK_PLL_POSTDIV2_OFFSET 4
+#define RK3399_CLK_PLL_POSTDIV2_SHIFT 12
+#define RK3399_CLK_PLL_POSTDIV2_MASK 0x7000
+
+#define RK3399_CLK_PLL_POSTDIV1_OFFSET 4
+#define RK3399_CLK_PLL_POSTDIV1_SHIFT 8
+#define RK3399_CLK_PLL_POSTDIV1_MASK 0x700
+
+#define RK3399_CLK_PLL_REFDIV_OFFSET 4
+#define RK3399_CLK_PLL_REFDIV_SHIFT 0
+#define RK3399_CLK_PLL_REFDIV_MASK 0x3F
+
+#define RK3399_CLK_PLL_FRAC_OFFSET 8
+#define RK3399_CLK_PLL_FRAC_SHIFT 0
+#define RK3399_CLK_PLL_FRAC_MASK 0xFFFFFF
+
+#define RK3399_CLK_PLL_DSMPD_OFFSET 0xC
+#define RK3399_CLK_PLL_DSMPD_SHIFT 3
+#define RK3399_CLK_PLL_DSMPD_MASK 0x8
+
+#define RK3399_CLK_PLL_LOCK_OFFSET 8
+#define RK3399_CLK_PLL_LOCK_MASK 0x400
+
+#define RK3399_CLK_PLL_MODE_OFFSET 0xC
+#define RK3399_CLK_PLL_MODE_MASK 0x300
+#define RK3399_CLK_PLL_MODE_SLOW 0
+#define RK3399_CLK_PLL_MODE_NORMAL 1
+#define RK3399_CLK_PLL_MODE_DEEPSLOW 2
+#define RK3399_CLK_PLL_MODE_SHIFT 8
+
+#define RK3399_CLK_PLL_WRITE_MASK 0xFFFF0000
+
+static int
+rk3399_clk_pll_init(struct clknode *clk, device_t dev)
+{
+ struct rk_clk_pll_sc *sc;
+
+ sc = clknode_get_softc(clk);
+ clknode_init_parent_idx(clk, 0);
+
+ return (0);
+}
+
+static int
+rk3399_clk_pll_recalc(struct clknode *clk, uint64_t *freq)
+{
+ struct rk_clk_pll_sc *sc;
+ uint32_t dsmpd, refdiv, fbdiv;
+ uint32_t postdiv1, postdiv2, fracdiv;
+ uint32_t con1, con2, con3, con4;
+ uint64_t foutvco;
+ uint32_t mode;
+ sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+ READ4(clk, sc->base_offset, &con1);
+ READ4(clk, sc->base_offset + 4, &con2);
+ READ4(clk, sc->base_offset + 8, &con3);
+ READ4(clk, sc->base_offset + 0xC, &con4);
+ DEVICE_UNLOCK(clk);
+
+ /*
+ * if we are in slow mode the output freq
+ * is the parent one, the 24Mhz external oscillator
+ * if we are in deep mode the output freq is 32.768khz
+ */
+ mode = (con4 & RK3399_CLK_PLL_MODE_MASK) >> RK3399_CLK_PLL_MODE_SHIFT;
+ if (mode == RK3399_CLK_PLL_MODE_SLOW) {
+ dprintf("pll in slow mode, con4=%x\n", con4);
+ return (0);
+ } else if (mode == RK3399_CLK_PLL_MODE_DEEPSLOW) {
+ dprintf("pll in deep slow, con4=%x\n", con4);
+ *freq = 32768;
+ return (0);
+ }
+
+ dprintf("con0: %x\n", con1);
+ dprintf("con1: %x\n", con2);
+ dprintf("con2: %x\n", con3);
+ dprintf("con3: %x\n", con4);
+
+ fbdiv = (con1 & RK3399_CLK_PLL_FBDIV_MASK)
+ >> RK3399_CLK_PLL_FBDIV_SHIFT;
+
+ postdiv1 = (con2 & RK3399_CLK_PLL_POSTDIV1_MASK)
+ >> RK3399_CLK_PLL_POSTDIV1_SHIFT;
+ postdiv2 = (con2 & RK3399_CLK_PLL_POSTDIV2_MASK)
+ >> RK3399_CLK_PLL_POSTDIV2_SHIFT;
+ refdiv = (con2 & RK3399_CLK_PLL_REFDIV_MASK)
+ >> RK3399_CLK_PLL_REFDIV_SHIFT;
+
+ fracdiv = (con3 & RK3399_CLK_PLL_FRAC_MASK)
+ >> RK3399_CLK_PLL_FRAC_SHIFT;
+ fracdiv >>= 24;
+
+ dsmpd = (con4 & RK3399_CLK_PLL_DSMPD_MASK) >> RK3399_CLK_PLL_DSMPD_SHIFT;
+
+ dprintf("fbdiv: %d\n", fbdiv);
+ dprintf("postdiv1: %d\n", postdiv1);
+ dprintf("postdiv2: %d\n", postdiv2);
+ dprintf("refdiv: %d\n", refdiv);
+ dprintf("fracdiv: %d\n", fracdiv);
+ dprintf("dsmpd: %d\n", dsmpd);
+
+ dprintf("parent freq=%ju\n", *freq);
+
+ if (dsmpd == 0) {
+ /* Fractional mode */
+ foutvco = *freq / refdiv * (fbdiv + fracdiv);
+ } else {
+ /* Integer mode */
+ foutvco = *freq / refdiv * fbdiv;
+ }
+ dprintf("foutvco: %ju\n", foutvco);
+
+ *freq = foutvco / postdiv1 / postdiv2;
+ dprintf("freq: %ju\n", *freq);
+
+ return (0);
+}
+
+static int
+rk3399_clk_pll_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
+ int flags, int *stop)
+{
+ struct rk_clk_pll_rate *rates;
+ struct rk_clk_pll_sc *sc;
+ uint32_t reg;
+ int timeout;
+
+ sc = clknode_get_softc(clk);
+
+ if (sc->rates)
+ rates = sc->rates;
+ else if (sc->frac_rates)
+ rates = sc->frac_rates;
+ else
+ return (EINVAL);
+
+ for (; rates->freq; rates++) {
+ if (rates->freq == *fout)
+ break;
+ }
+ if (rates->freq == 0) {
+ *stop = 1;
+ return (EINVAL);
+ }
+
+ DEVICE_LOCK(clk);
+
+ /* Set to slow mode during frequency change */
+ reg = RK3399_CLK_PLL_MODE_SLOW << RK3399_CLK_PLL_MODE_SHIFT;
+ reg |= RK3399_CLK_PLL_MODE_MASK << RK_CLK_PLL_MASK_SHIFT;
+ WRITE4(clk, sc->base_offset + 0xC, reg);
+
+ /* Setting fbdiv */
+ reg = rates->fbdiv << RK3399_CLK_PLL_FBDIV_SHIFT;
+ reg |= RK3399_CLK_PLL_FBDIV_MASK << RK_CLK_PLL_MASK_SHIFT;
+ WRITE4(clk, sc->base_offset, reg);
+
+ /* Setting postdiv1, postdiv2 and refdiv */
+ reg = rates->postdiv1 << RK3399_CLK_PLL_POSTDIV1_SHIFT;
+ reg |= rates->postdiv2 << RK3399_CLK_PLL_POSTDIV2_SHIFT;
+ reg |= rates->refdiv << RK3399_CLK_PLL_REFDIV_SHIFT;
+ reg |= (RK3399_CLK_PLL_POSTDIV1_MASK | RK3399_CLK_PLL_POSTDIV2_MASK |
+ RK3399_CLK_PLL_REFDIV_MASK) << RK_CLK_PLL_MASK_SHIFT;
+ WRITE4(clk, sc->base_offset + 0x4, reg);
+
+ /* Setting frac */
+ READ4(clk, sc->base_offset + 0x8, &reg);
+ reg &= ~RK3399_CLK_PLL_FRAC_MASK;
+ reg |= rates->frac << RK3399_CLK_PLL_FRAC_SHIFT;
+ WRITE4(clk, sc->base_offset + 0x8, reg | RK3399_CLK_PLL_WRITE_MASK);
+
+ /* Set dsmpd */
+ reg = rates->dsmpd << RK3399_CLK_PLL_DSMPD_SHIFT;
+ reg |= RK3399_CLK_PLL_DSMPD_MASK << RK_CLK_PLL_MASK_SHIFT;
+ WRITE4(clk, sc->base_offset + 0xC, reg);
+
+ /* Reading lock */
+ for (timeout = 1000; timeout; timeout--) {
+ READ4(clk, sc->base_offset + RK3399_CLK_PLL_LOCK_OFFSET, &reg);
+ if ((reg & RK3399_CLK_PLL_LOCK_MASK) == 0)
+ break;
+ DELAY(1);
+ }
+
+ /* Set back to normal mode */
+ reg = RK3399_CLK_PLL_MODE_NORMAL << RK3399_CLK_PLL_MODE_SHIFT;
+ reg |= RK3399_CLK_PLL_MODE_MASK << RK_CLK_PLL_MASK_SHIFT;
+ WRITE4(clk, sc->base_offset + 0xC, reg);
+
+ DEVICE_UNLOCK(clk);
+
+ *stop = 1;
+ return (0);
+}
+
+static clknode_method_t rk3399_clk_pll_clknode_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, rk3399_clk_pll_init),
+ CLKNODEMETHOD(clknode_set_gate, rk_clk_pll_set_gate),
+ CLKNODEMETHOD(clknode_recalc_freq, rk3399_clk_pll_recalc),
+ CLKNODEMETHOD(clknode_set_freq, rk3399_clk_pll_set_freq),
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(rk3399_clk_pll_clknode, rk3399_clk_pll_clknode_class,
+ rk3399_clk_pll_clknode_methods, sizeof(struct rk_clk_pll_sc), clknode_class);
+
+int
+rk3399_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef)
+{
+ struct clknode *clk;
+ struct rk_clk_pll_sc *sc;
+
+ clk = clknode_create(clkdom, &rk3399_clk_pll_clknode_class,
+ &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+
+ sc->base_offset = clkdef->base_offset;
+ sc->gate_offset = clkdef->gate_offset;
+ sc->gate_shift = clkdef->gate_shift;
+ sc->flags = clkdef->flags;
+ sc->rates = clkdef->rates;
+ sc->frac_rates = clkdef->frac_rates;
+
+ clknode_register(clkdom, clk);
+
+ return (0);
+}
diff --git a/sys/arm64/rockchip/clk/rk_clk_pll.h b/sys/arm64/rockchip/clk/rk_clk_pll.h
new file mode 100644
index 000000000000..d04e0f31f848
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_clk_pll.h
@@ -0,0 +1,66 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _RK_CLK_PLL_H_
+#define _RK_CLK_PLL_H_
+
+#include <dev/extres/clk/clk.h>
+
+struct rk_clk_pll_rate {
+ uint32_t freq;
+ uint32_t refdiv;
+ uint32_t fbdiv;
+ uint32_t postdiv1;
+ uint32_t postdiv2;
+ uint32_t dsmpd;
+ uint32_t frac;
+};
+
+struct rk_clk_pll_def {
+ struct clknode_init_def clkdef;
+ uint32_t base_offset;
+
+ uint32_t gate_offset;
+ uint32_t gate_shift;
+
+ uint32_t mode_reg;
+ uint32_t mode_shift;
+
+ uint32_t flags;
+
+ struct rk_clk_pll_rate *rates;
+ struct rk_clk_pll_rate *frac_rates;
+};
+
+#define RK_CLK_PLL_HAVE_GATE 0x1
+
+int rk3328_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef);
+int rk3399_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef);
+
+#endif /* _RK_CLK_PLL_H_ */
diff --git a/sys/arm64/rockchip/clk/rk_cru.c b/sys/arm64/rockchip/clk/rk_cru.c
new file mode 100644
index 000000000000..45d3d304b3fd
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_cru.c
@@ -0,0 +1,306 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * RockChip Clock and Reset Unit
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/clk/clk_gate.h>
+#include <dev/extres/clk/clk_fixed.h>
+#include <dev/extres/clk/clk_link.h>
+#include <dev/extres/hwreset/hwreset.h>
+
+#include <arm64/rockchip/clk/rk_clk_composite.h>
+#include <arm64/rockchip/clk/rk_clk_gate.h>
+#include <arm64/rockchip/clk/rk_clk_mux.h>
+#include <arm64/rockchip/clk/rk_clk_pll.h>
+#include <arm64/rockchip/clk/rk_cru.h>
+
+#include "clkdev_if.h"
+#include "hwreset_if.h"
+
+static struct resource_spec rk_cru_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+#define CCU_READ4(sc, reg) bus_read_4((sc)->res, (reg))
+#define CCU_WRITE4(sc, reg, val) bus_write_4((sc)->res, (reg), (val))
+
+void rk3328_cru_register_clocks(struct rk_cru_softc *sc);
+
+static int
+rk_cru_write_4(device_t dev, bus_addr_t addr, uint32_t val)
+{
+ struct rk_cru_softc *sc;
+
+ sc = device_get_softc(dev);
+ CCU_WRITE4(sc, addr, val);
+ return (0);
+}
+
+static int
+rk_cru_read_4(device_t dev, bus_addr_t addr, uint32_t *val)
+{
+ struct rk_cru_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ *val = CCU_READ4(sc, addr);
+ return (0);
+}
+
+static int
+rk_cru_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set)
+{
+ struct rk_cru_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ reg = CCU_READ4(sc, addr);
+ reg &= ~clr;
+ reg |= set;
+ CCU_WRITE4(sc, addr, reg);
+
+ return (0);
+}
+
+static int
+rk_cru_reset_assert(device_t dev, intptr_t id, bool reset)
+{
+ struct rk_cru_softc *sc;
+ uint32_t reg;
+ int bit;
+ uint32_t val;
+
+ sc = device_get_softc(dev);
+
+ if (id > sc->reset_num)
+ return (ENXIO);
+
+ reg = sc->reset_offset + id / 16 * 4;
+ bit = id % 16;
+
+ mtx_lock(&sc->mtx);
+ val = 0;
+ if (reset)
+ val = (1 << bit);
+ CCU_WRITE4(sc, reg, val | ((1 << bit) << 16));
+ mtx_unlock(&sc->mtx);
+
+ return (0);
+}
+
+static int
+rk_cru_reset_is_asserted(device_t dev, intptr_t id, bool *reset)
+{
+ struct rk_cru_softc *sc;
+ uint32_t reg;
+ int bit;
+ uint32_t val;
+
+ sc = device_get_softc(dev);
+
+ if (id > sc->reset_num)
+ return (ENXIO);
+ reg = sc->reset_offset + id / 16 * 4;
+ bit = id % 16;
+
+ mtx_lock(&sc->mtx);
+ val = CCU_READ4(sc, reg);
+ mtx_unlock(&sc->mtx);
+
+ *reset = false;
+ if (val & (1 << bit))
+ *reset = true;
+
+ return (0);
+}
+
+static void
+rk_cru_device_lock(device_t dev)
+{
+ struct rk_cru_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->mtx);
+}
+
+static void
+rk_cru_device_unlock(device_t dev)
+{
+ struct rk_cru_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_unlock(&sc->mtx);
+}
+
+static int
+rk_cru_register_gates(struct rk_cru_softc *sc)
+{
+ struct rk_clk_gate_def def;
+ int i;
+
+ for (i = 0; i < sc->ngates; i++) {
+ if (sc->gates[i].name == NULL)
+ continue;
+ memset(&def, 0, sizeof(def));
+ def.clkdef.id = sc->gates[i].id;
+ def.clkdef.name = sc->gates[i].name;
+ def.clkdef.parent_names = &sc->gates[i].parent_name;
+ def.clkdef.parent_cnt = 1;
+ def.offset = sc->gates[i].offset;
+ def.shift = sc->gates[i].shift;
+ def.mask = 1;
+ def.on_value = 0;
+ def.off_value = 1;
+ rk_clk_gate_register(sc->clkdom, &def);
+ }
+
+ return (0);
+}
+
+int
+rk_cru_attach(device_t dev)
+{
+ struct rk_cru_softc *sc;
+ phandle_t node;
+ int i;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ node = ofw_bus_get_node(dev);
+
+ if (bus_alloc_resources(dev, rk_cru_spec, &sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ return (ENXIO);
+ }
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ sc->clkdom = clkdom_create(dev);
+ if (sc->clkdom == NULL)
+ panic("Cannot create clkdom\n");
+
+ for (i = 0; i < sc->nclks; i++) {
+ switch (sc->clks[i].type) {
+ case RK_CLK_UNDEFINED:
+ break;
+ case RK3328_CLK_PLL:
+ rk3328_clk_pll_register(sc->clkdom,
+ sc->clks[i].clk.pll);
+ break;
+ case RK3399_CLK_PLL:
+ rk3399_clk_pll_register(sc->clkdom,
+ sc->clks[i].clk.pll);
+ break;
+ case RK_CLK_COMPOSITE:
+ rk_clk_composite_register(sc->clkdom,
+ sc->clks[i].clk.composite);
+ break;
+ case RK_CLK_MUX:
+ rk_clk_mux_register(sc->clkdom, sc->clks[i].clk.mux);
+ break;
+ case RK_CLK_ARMCLK:
+ rk_clk_armclk_register(sc->clkdom,
+ sc->clks[i].clk.armclk);
+ break;
+ case RK_CLK_FIXED:
+ clknode_fixed_register(sc->clkdom,
+ sc->clks[i].clk.fixed);
+ break;
+ case RK_CLK_FRACT:
+ rk_clk_fract_register(sc->clkdom,
+ sc->clks[i].clk.fract);
+ break;
+ case RK_CLK_LINK:
+ clknode_link_register(sc->clkdom,
+ sc->clks[i].clk.link);
+ break;
+ default:
+ device_printf(dev, "Unknown clock type\n");
+ return (ENXIO);
+ }
+ }
+
+ if (sc->gates)
+ rk_cru_register_gates(sc);
+
+ if (clkdom_finit(sc->clkdom) != 0)
+ panic("cannot finalize clkdom initialization\n");
+
+ if (bootverbose)
+ clkdom_dump(sc->clkdom);
+
+ clk_set_assigned(dev, node);
+
+ /* register our self as a reset provider */
+ hwreset_register_ofw_provider(dev);
+
+ return (0);
+}
+
+static device_method_t rk_cru_methods[] = {
+ /* clkdev interface */
+ DEVMETHOD(clkdev_write_4, rk_cru_write_4),
+ DEVMETHOD(clkdev_read_4, rk_cru_read_4),
+ DEVMETHOD(clkdev_modify_4, rk_cru_modify_4),
+ DEVMETHOD(clkdev_device_lock, rk_cru_device_lock),
+ DEVMETHOD(clkdev_device_unlock, rk_cru_device_unlock),
+
+ /* Reset interface */
+ DEVMETHOD(hwreset_assert, rk_cru_reset_assert),
+ DEVMETHOD(hwreset_is_asserted, rk_cru_reset_is_asserted),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(rk_cru, rk_cru_driver, rk_cru_methods,
+ sizeof(struct rk_cru_softc));
diff --git a/sys/arm64/rockchip/clk/rk_cru.h b/sys/arm64/rockchip/clk/rk_cru.h
new file mode 100644
index 000000000000..a3e8b0017453
--- /dev/null
+++ b/sys/arm64/rockchip/clk/rk_cru.h
@@ -0,0 +1,252 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __RK_CRU_H__
+#define __RK_CRU_H__
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/clk/clk_div.h>
+#include <dev/extres/clk/clk_gate.h>
+#include <dev/extres/clk/clk_fixed.h>
+#include <dev/extres/clk/clk_link.h>
+
+#include <arm64/rockchip/clk/rk_clk_armclk.h>
+#include <arm64/rockchip/clk/rk_clk_composite.h>
+#include <arm64/rockchip/clk/rk_clk_fract.h>
+#include <arm64/rockchip/clk/rk_clk_gate.h>
+#include <arm64/rockchip/clk/rk_clk_mux.h>
+#include <arm64/rockchip/clk/rk_clk_pll.h>
+
+/* Macro for defining various types of clocks. */
+/* Pure gate */
+#define GATE(_idx, _clkname, _pname, _o, _s) \
+{ \
+ .id = _idx, \
+ .name = _clkname, \
+ .parent_name = _pname, \
+ .offset = CRU_CLKGATE_CON(_o), \
+ .shift = _s, \
+}
+
+/* Fixed rate clock. */
+#define FRATE(_id, _name, _freq) \
+{ \
+ .type = RK_CLK_FIXED, \
+ .clk.fixed = &(struct clk_fixed_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = NULL, \
+ .clkdef.parent_cnt = 0, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .freq = _freq, \
+ }, \
+}
+
+/* Fixed factor multipier/divider. */
+#define FFACT(_id, _name, _pname, _mult, _div) \
+{ \
+ .type = RK_CLK_FIXED, \
+ .clk.fixed = &(struct clk_fixed_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = (const char *[]){_pname}, \
+ .clkdef.parent_cnt = 1, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .mult = _mult, \
+ .div = _div, \
+ }, \
+}
+
+/* Linked clock. */
+#define LINK(_name) \
+{ \
+ .type = RK_CLK_LINK, \
+ .clk.link = &(struct clk_link_def) { \
+ .clkdef.id = 0, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = NULL, \
+ .clkdef.parent_cnt = 0, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ }, \
+}
+
+/* Complex clock fo ARM cores. */
+#define ARMDIV(_id, _name, _pn, _r, _o, _ds, _dw, _ms, _mw, _mp, _ap) \
+{ \
+ .type = RK_CLK_ARMCLK, \
+ .clk.armclk = &(struct rk_clk_armclk_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = _pn, \
+ .clkdef.parent_cnt = nitems(_pn), \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .muxdiv_offset = CRU_CLKSEL_CON(_o), \
+ .mux_shift = _ms, \
+ .mux_width = _mw, \
+ .div_shift = _ds, \
+ .div_width = _dw, \
+ .main_parent = _mp, \
+ .alt_parent = _ap, \
+ .rates = _r, \
+ .nrates = nitems(_r), \
+ }, \
+}
+
+/* Fractional rate multipier/divider. */
+#define FRACT(_id, _name, _pname, _f, _o) \
+{ \
+ .type = RK_CLK_FRACT, \
+ .clk.fract = &(struct rk_clk_fract_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = (const char *[]){_pname}, \
+ .clkdef.parent_cnt = 1, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .offset = CRU_CLKSEL_CON(_o), \
+ .flags = _f, \
+ }, \
+}
+
+/* Full composite clock. */
+#define COMP(_id, _name, _pnames, _f, _o, _ds, _dw, _ms, _mw) \
+{ \
+ .type = RK_CLK_COMPOSITE, \
+ .clk.composite = &(struct rk_clk_composite_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = _pnames, \
+ .clkdef.parent_cnt = nitems(_pnames), \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .muxdiv_offset = CRU_CLKSEL_CON(_o), \
+ .mux_shift = _ms, \
+ .mux_width = _mw, \
+ .div_shift = _ds, \
+ .div_width = _dw, \
+ .flags = RK_CLK_COMPOSITE_HAVE_MUX | _f, \
+ }, \
+}
+
+/* Composite clock without mux (divider only). */
+#define CDIV(_id, _name, _pname, _f, _o, _ds, _dw) \
+{ \
+ .type = RK_CLK_COMPOSITE, \
+ .clk.composite = &(struct rk_clk_composite_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = (const char *[]){_pname}, \
+ .clkdef.parent_cnt = 1, \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .muxdiv_offset = CRU_CLKSEL_CON(_o), \
+ .div_shift = _ds, \
+ .div_width = _dw, \
+ .flags = _f, \
+ }, \
+}
+
+/* Complex clock without divider (multiplexer only). */
+#define MUX(_id, _name, _pn, _f, _mo, _ms, _mw) \
+{ \
+ .type = RK_CLK_MUX, \
+ .clk.mux = &(struct rk_clk_mux_def) { \
+ .clkdef.id = _id, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = _pn, \
+ .clkdef.parent_cnt = nitems(_pn), \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .offset = CRU_CLKSEL_CON(_mo), \
+ .shift = _ms, \
+ .width = _mw, \
+ .mux_flags = _f, \
+ }, \
+}
+
+struct rk_cru_gate {
+ const char *name;
+ const char *parent_name;
+ uint32_t id;
+ uint32_t offset;
+ uint32_t shift;
+};
+
+#define CRU_GATE(idx, clkname, pname, o, s) \
+ { \
+ .id = idx, \
+ .name = clkname, \
+ .parent_name = pname, \
+ .offset = o, \
+ .shift = s, \
+ },
+
+enum rk_clk_type {
+ RK_CLK_UNDEFINED = 0,
+ RK3328_CLK_PLL,
+ RK3399_CLK_PLL,
+ RK_CLK_COMPOSITE,
+ RK_CLK_FIXED,
+ RK_CLK_FRACT,
+ RK_CLK_MUX,
+ RK_CLK_ARMCLK,
+ RK_CLK_LINK,
+};
+
+struct rk_clk {
+ enum rk_clk_type type;
+ union {
+ struct rk_clk_pll_def *pll;
+ struct rk_clk_composite_def *composite;
+ struct rk_clk_mux_def *mux;
+ struct rk_clk_armclk_def *armclk;
+ struct clk_fixed_def *fixed;
+ struct rk_clk_fract_def *fract;
+ struct clk_link_def *link;
+ } clk;
+};
+
+struct rk_cru_softc {
+ device_t dev;
+ struct resource *res;
+ struct clkdom *clkdom;
+ struct mtx mtx;
+ int type;
+ uint32_t reset_offset;
+ uint32_t reset_num;
+ struct rk_cru_gate *gates;
+ int ngates;
+ struct rk_clk *clks;
+ int nclks;
+ struct rk_clk_armclk_def *armclk;
+ struct rk_clk_armclk_rates *armclk_rates;
+ int narmclk_rates;
+};
+
+DECLARE_CLASS(rk_cru_driver);
+
+int rk_cru_attach(device_t dev);
+
+#endif /* __RK_CRU_H__ */
diff --git a/sys/arm64/rockchip/if_dwc_rk.c b/sys/arm64/rockchip/if_dwc_rk.c
new file mode 100644
index 000000000000..eafd9e00caf0
--- /dev/null
+++ b/sys/arm64/rockchip/if_dwc_rk.c
@@ -0,0 +1,625 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+*/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+
+#include <machine/bus.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <dev/dwc/if_dwc.h>
+#include <dev/dwc/if_dwcvar.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/hwreset/hwreset.h>
+#include <dev/extres/regulator/regulator.h>
+#include <dev/extres/syscon/syscon.h>
+
+#include "if_dwc_if.h"
+#include "syscon_if.h"
+
+#define RK3328_GRF_MAC_CON0 0x0900
+#define MAC_CON0_GMAC2IO_TX_DL_CFG_MASK 0x7F
+#define MAC_CON0_GMAC2IO_TX_DL_CFG_SHIFT 0
+#define MAC_CON0_GMAC2IO_RX_DL_CFG_MASK 0x7F
+#define MAC_CON0_GMAC2IO_RX_DL_CFG_SHIFT 7
+
+#define RK3328_GRF_MAC_CON1 0x0904
+#define MAC_CON1_GMAC2IO_GMAC_TXCLK_DLY_ENA (1 << 0)
+#define MAC_CON1_GMAC2IO_GMAC_RXCLK_DLY_ENA (1 << 1)
+#define MAC_CON1_GMAC2IO_GMII_CLK_SEL_MASK (3 << 11)
+#define MAC_CON1_GMAC2IO_GMII_CLK_SEL_125 (0 << 11)
+#define MAC_CON1_GMAC2IO_GMII_CLK_SEL_25 (3 << 11)
+#define MAC_CON1_GMAC2IO_GMII_CLK_SEL_2_5 (2 << 11)
+#define MAC_CON1_GMAC2IO_RMII_MODE_MASK (1 << 9)
+#define MAC_CON1_GMAC2IO_RMII_MODE (1 << 9)
+#define MAC_CON1_GMAC2IO_INTF_SEL_MASK (7 << 4)
+#define MAC_CON1_GMAC2IO_INTF_RMII (4 << 4)
+#define MAC_CON1_GMAC2IO_INTF_RGMII (1 << 4)
+#define MAC_CON1_GMAC2IO_RMII_CLK_SEL_MASK (1 << 7)
+#define MAC_CON1_GMAC2IO_RMII_CLK_SEL_25 (1 << 7)
+#define MAC_CON1_GMAC2IO_RMII_CLK_SEL_2_5 (0 << 7)
+#define MAC_CON1_GMAC2IO_MAC_SPEED_MASK (1 << 2)
+#define MAC_CON1_GMAC2IO_MAC_SPEED_100 (1 << 2)
+#define MAC_CON1_GMAC2IO_MAC_SPEED_10 (0 << 2)
+#define RK3328_GRF_MAC_CON2 0x0908
+#define RK3328_GRF_MACPHY_CON0 0x0B00
+#define MACPHY_CON0_CLK_50M_MASK (1 << 14)
+#define MACPHY_CON0_CLK_50M (1 << 14)
+#define MACPHY_CON0_RMII_MODE_MASK (3 << 6)
+#define MACPHY_CON0_RMII_MODE (1 << 6)
+#define RK3328_GRF_MACPHY_CON1 0x0B04
+#define MACPHY_CON1_RMII_MODE_MASK (1 << 9)
+#define MACPHY_CON1_RMII_MODE (1 << 9)
+#define RK3328_GRF_MACPHY_CON2 0x0B08
+#define RK3328_GRF_MACPHY_CON3 0x0B0C
+#define RK3328_GRF_MACPHY_STATUS 0x0B10
+
+#define RK3399_GRF_SOC_CON5 0xc214
+#define SOC_CON5_GMAC_CLK_SEL_MASK (3 << 4)
+#define SOC_CON5_GMAC_CLK_SEL_125 (0 << 4)
+#define SOC_CON5_GMAC_CLK_SEL_25 (3 << 4)
+#define SOC_CON5_GMAC_CLK_SEL_2_5 (2 << 4)
+#define RK3399_GRF_SOC_CON6 0xc218
+#define SOC_CON6_GMAC_TXCLK_DLY_ENA (1 << 7)
+#define SOC_CON6_TX_DL_CFG_MASK 0x7F
+#define SOC_CON6_TX_DL_CFG_SHIFT 0
+#define SOC_CON6_RX_DL_CFG_MASK 0x7F
+#define SOC_CON6_GMAC_RXCLK_DLY_ENA (1 << 15)
+#define SOC_CON6_RX_DL_CFG_SHIFT 8
+
+struct if_dwc_rk_softc;
+
+typedef void (*if_dwc_rk_set_delaysfn_t)(struct if_dwc_rk_softc *);
+typedef int (*if_dwc_rk_set_speedfn_t)(struct if_dwc_rk_softc *, int);
+typedef void (*if_dwc_rk_set_phy_modefn_t)(struct if_dwc_rk_softc *);
+typedef void (*if_dwc_rk_phy_powerupfn_t)(struct if_dwc_rk_softc *);
+
+struct if_dwc_rk_ops {
+ if_dwc_rk_set_delaysfn_t set_delays;
+ if_dwc_rk_set_speedfn_t set_speed;
+ if_dwc_rk_set_phy_modefn_t set_phy_mode;
+ if_dwc_rk_phy_powerupfn_t phy_powerup;
+};
+
+struct if_dwc_rk_softc {
+ struct dwc_softc base;
+ uint32_t tx_delay;
+ uint32_t rx_delay;
+ bool integrated_phy;
+ bool clock_in;
+ phandle_t phy_node;
+ struct syscon *grf;
+ struct if_dwc_rk_ops *ops;
+ /* Common clocks */
+ clk_t mac_clk_rx;
+ clk_t mac_clk_tx;
+ clk_t aclk_mac;
+ clk_t pclk_mac;
+ clk_t clk_stmmaceth;
+ /* RMII clocks */
+ clk_t clk_mac_ref;
+ clk_t clk_mac_refout;
+ /* PHY clock */
+ clk_t clk_phy;
+};
+
+static void rk3328_set_delays(struct if_dwc_rk_softc *sc);
+static int rk3328_set_speed(struct if_dwc_rk_softc *sc, int speed);
+static void rk3328_set_phy_mode(struct if_dwc_rk_softc *sc);
+static void rk3328_phy_powerup(struct if_dwc_rk_softc *sc);
+
+static void rk3399_set_delays(struct if_dwc_rk_softc *sc);
+static int rk3399_set_speed(struct if_dwc_rk_softc *sc, int speed);
+
+static struct if_dwc_rk_ops rk3288_ops = {
+};
+
+static struct if_dwc_rk_ops rk3328_ops = {
+ .set_delays = rk3328_set_delays,
+ .set_speed = rk3328_set_speed,
+ .set_phy_mode = rk3328_set_phy_mode,
+ .phy_powerup = rk3328_phy_powerup,
+};
+
+static struct if_dwc_rk_ops rk3399_ops = {
+ .set_delays = rk3399_set_delays,
+ .set_speed = rk3399_set_speed,
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk3288-gmac", (uintptr_t)&rk3288_ops},
+ {"rockchip,rk3328-gmac", (uintptr_t)&rk3328_ops},
+ {"rockchip,rk3399-gmac", (uintptr_t)&rk3399_ops},
+ {NULL, 0}
+};
+
+static void
+rk3328_set_delays(struct if_dwc_rk_softc *sc)
+{
+ uint32_t reg;
+ uint32_t tx, rx;
+
+ if (sc->base.phy_mode != PHY_MODE_RGMII)
+ return;
+
+ reg = SYSCON_READ_4(sc->grf, RK3328_GRF_MAC_CON0);
+ tx = ((reg >> MAC_CON0_GMAC2IO_TX_DL_CFG_SHIFT) & MAC_CON0_GMAC2IO_TX_DL_CFG_MASK);
+ rx = ((reg >> MAC_CON0_GMAC2IO_RX_DL_CFG_SHIFT) & MAC_CON0_GMAC2IO_RX_DL_CFG_MASK);
+
+ reg = SYSCON_READ_4(sc->grf, RK3328_GRF_MAC_CON1);
+ if (bootverbose) {
+ device_printf(sc->base.dev, "current delays settings: tx=%u(%s) rx=%u(%s)\n",
+ tx, ((reg & MAC_CON1_GMAC2IO_GMAC_TXCLK_DLY_ENA) ? "enabled" : "disabled"),
+ rx, ((reg & MAC_CON1_GMAC2IO_GMAC_RXCLK_DLY_ENA) ? "enabled" : "disabled"));
+
+ device_printf(sc->base.dev, "setting new RK3328 RX/TX delays: %d/%d\n",
+ sc->tx_delay, sc->rx_delay);
+ }
+
+ reg = (MAC_CON1_GMAC2IO_GMAC_TXCLK_DLY_ENA | MAC_CON1_GMAC2IO_GMAC_RXCLK_DLY_ENA) << 16;
+ reg |= (MAC_CON1_GMAC2IO_GMAC_TXCLK_DLY_ENA | MAC_CON1_GMAC2IO_GMAC_RXCLK_DLY_ENA);
+ SYSCON_WRITE_4(sc->grf, RK3328_GRF_MAC_CON1, reg);
+
+ reg = 0xffff << 16;
+ reg |= ((sc->tx_delay & MAC_CON0_GMAC2IO_TX_DL_CFG_MASK) <<
+ MAC_CON0_GMAC2IO_TX_DL_CFG_SHIFT);
+ reg |= ((sc->rx_delay & MAC_CON0_GMAC2IO_TX_DL_CFG_MASK) <<
+ MAC_CON0_GMAC2IO_RX_DL_CFG_SHIFT);
+ SYSCON_WRITE_4(sc->grf, RK3328_GRF_MAC_CON0, reg);
+}
+
+static int
+rk3328_set_speed(struct if_dwc_rk_softc *sc, int speed)
+{
+ uint32_t reg;
+
+ switch (sc->base.phy_mode) {
+ case PHY_MODE_RGMII:
+ switch (speed) {
+ case IFM_1000_T:
+ case IFM_1000_SX:
+ reg = MAC_CON1_GMAC2IO_GMII_CLK_SEL_125;
+ break;
+ case IFM_100_TX:
+ reg = MAC_CON1_GMAC2IO_GMII_CLK_SEL_25;
+ break;
+ case IFM_10_T:
+ reg = MAC_CON1_GMAC2IO_GMII_CLK_SEL_2_5;
+ break;
+ default:
+ device_printf(sc->base.dev, "unsupported RGMII media %u\n", speed);
+ return (-1);
+ }
+
+ SYSCON_WRITE_4(sc->grf, RK3328_GRF_MAC_CON1,
+ ((MAC_CON1_GMAC2IO_GMII_CLK_SEL_MASK << 16) | reg));
+ break;
+ case PHY_MODE_RMII:
+ switch (speed) {
+ case IFM_100_TX:
+ reg = MAC_CON1_GMAC2IO_RMII_CLK_SEL_25 |
+ MAC_CON1_GMAC2IO_MAC_SPEED_100;
+ break;
+ case IFM_10_T:
+ reg = MAC_CON1_GMAC2IO_RMII_CLK_SEL_2_5 |
+ MAC_CON1_GMAC2IO_MAC_SPEED_10;
+ break;
+ default:
+ device_printf(sc->base.dev, "unsupported RMII media %u\n", speed);
+ return (-1);
+ }
+
+ SYSCON_WRITE_4(sc->grf,
+ sc->integrated_phy ? RK3328_GRF_MAC_CON2 : RK3328_GRF_MAC_CON1,
+ reg |
+ ((MAC_CON1_GMAC2IO_RMII_CLK_SEL_MASK | MAC_CON1_GMAC2IO_MAC_SPEED_MASK) << 16));
+ break;
+ }
+
+ return (0);
+}
+
+static void
+rk3328_set_phy_mode(struct if_dwc_rk_softc *sc)
+{
+
+ switch (sc->base.phy_mode) {
+ case PHY_MODE_RGMII:
+ SYSCON_WRITE_4(sc->grf, RK3328_GRF_MAC_CON1,
+ ((MAC_CON1_GMAC2IO_INTF_SEL_MASK | MAC_CON1_GMAC2IO_RMII_MODE_MASK) << 16) |
+ MAC_CON1_GMAC2IO_INTF_RGMII);
+ break;
+ case PHY_MODE_RMII:
+ SYSCON_WRITE_4(sc->grf, sc->integrated_phy ? RK3328_GRF_MAC_CON2 : RK3328_GRF_MAC_CON1,
+ ((MAC_CON1_GMAC2IO_INTF_SEL_MASK | MAC_CON1_GMAC2IO_RMII_MODE_MASK) << 16) |
+ MAC_CON1_GMAC2IO_INTF_RMII | MAC_CON1_GMAC2IO_RMII_MODE);
+ break;
+ }
+}
+
+static void
+rk3328_phy_powerup(struct if_dwc_rk_softc *sc)
+{
+ SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON1,
+ (MACPHY_CON1_RMII_MODE_MASK << 16) |
+ MACPHY_CON1_RMII_MODE);
+}
+
+static void
+rk3399_set_delays(struct if_dwc_rk_softc *sc)
+{
+ uint32_t reg, tx, rx;
+
+ if (sc->base.phy_mode != PHY_MODE_RGMII)
+ return;
+
+ reg = SYSCON_READ_4(sc->grf, RK3399_GRF_SOC_CON6);
+ tx = ((reg >> SOC_CON6_TX_DL_CFG_SHIFT) & SOC_CON6_TX_DL_CFG_MASK);
+ rx = ((reg >> SOC_CON6_RX_DL_CFG_SHIFT) & SOC_CON6_RX_DL_CFG_MASK);
+
+ if (bootverbose) {
+ device_printf(sc->base.dev, "current delays settings: tx=%u(%s) rx=%u(%s)\n",
+ tx, ((reg & SOC_CON6_GMAC_TXCLK_DLY_ENA) ? "enabled" : "disabled"),
+ rx, ((reg & SOC_CON6_GMAC_RXCLK_DLY_ENA) ? "enabled" : "disabled"));
+
+ device_printf(sc->base.dev, "setting new RK3399 RX/TX delays: %d/%d\n",
+ sc->rx_delay, sc->tx_delay);
+ }
+
+ reg = 0xFFFF << 16;
+ reg |= ((sc->tx_delay & SOC_CON6_TX_DL_CFG_MASK) <<
+ SOC_CON6_TX_DL_CFG_SHIFT);
+ reg |= ((sc->rx_delay & SOC_CON6_RX_DL_CFG_MASK) <<
+ SOC_CON6_RX_DL_CFG_SHIFT);
+ reg |= SOC_CON6_GMAC_TXCLK_DLY_ENA | SOC_CON6_GMAC_RXCLK_DLY_ENA;
+
+ SYSCON_WRITE_4(sc->grf, RK3399_GRF_SOC_CON6, reg);
+}
+
+static int
+rk3399_set_speed(struct if_dwc_rk_softc *sc, int speed)
+{
+ uint32_t reg;
+
+ switch (speed) {
+ case IFM_1000_T:
+ case IFM_1000_SX:
+ reg = SOC_CON5_GMAC_CLK_SEL_125;
+ break;
+ case IFM_100_TX:
+ reg = SOC_CON5_GMAC_CLK_SEL_25;
+ break;
+ case IFM_10_T:
+ reg = SOC_CON5_GMAC_CLK_SEL_2_5;
+ break;
+ default:
+ device_printf(sc->base.dev, "unsupported media %u\n", speed);
+ return (-1);
+ }
+
+ SYSCON_WRITE_4(sc->grf, RK3399_GRF_SOC_CON5,
+ ((SOC_CON5_GMAC_CLK_SEL_MASK << 16) | reg));
+ return (0);
+}
+
+static int
+if_dwc_rk_sysctl_delays(SYSCTL_HANDLER_ARGS)
+{
+ struct if_dwc_rk_softc *sc;
+ int rv;
+ uint32_t rxtx;
+
+ sc = arg1;
+ rxtx = ((sc->rx_delay << 8) | sc->tx_delay);
+
+ rv = sysctl_handle_int(oidp, &rxtx, 0, req);
+ if (rv != 0 || req->newptr == NULL)
+ return (rv);
+ sc->tx_delay = rxtx & 0xff;
+ sc->rx_delay = (rxtx >> 8) & 0xff;
+
+ if (sc->ops->set_delays)
+ sc->ops->set_delays(sc);
+
+ return (0);
+}
+
+static int
+if_dwc_rk_init_sysctl(struct if_dwc_rk_softc *sc)
+{
+ struct sysctl_oid *child;
+ struct sysctl_ctx_list *ctx_list;
+
+ ctx_list = device_get_sysctl_ctx(sc->base.dev);
+ child = device_get_sysctl_tree(sc->base.dev);
+ SYSCTL_ADD_PROC(ctx_list,
+ SYSCTL_CHILDREN(child), OID_AUTO, "delays",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, sc, 0,
+ if_dwc_rk_sysctl_delays, "", "RGMII RX/TX delays: ((rx << 8) | tx)");
+
+ return (0);
+}
+
+static int
+if_dwc_rk_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+ device_set_desc(dev, "Rockchip Gigabit Ethernet Controller");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+if_dwc_rk_init_clocks(device_t dev)
+{
+ struct if_dwc_rk_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ error = clk_set_assigned(dev, ofw_bus_get_node(dev));
+ if (error != 0) {
+ device_printf(dev, "clk_set_assigned failed\n");
+ return (error);
+ }
+
+ /* Enable clocks */
+ error = clk_get_by_ofw_name(dev, 0, "stmmaceth", &sc->clk_stmmaceth);
+ if (error != 0) {
+ device_printf(dev, "could not find clock stmmaceth\n");
+ return (error);
+ }
+
+ if (clk_get_by_ofw_name(dev, 0, "mac_clk_rx", &sc->mac_clk_rx) != 0) {
+ device_printf(sc->base.dev, "could not get mac_clk_rx clock\n");
+ sc->mac_clk_rx = NULL;
+ }
+
+ if (clk_get_by_ofw_name(dev, 0, "mac_clk_tx", &sc->mac_clk_tx) != 0) {
+ device_printf(sc->base.dev, "could not get mac_clk_tx clock\n");
+ sc->mac_clk_tx = NULL;
+ }
+
+ if (clk_get_by_ofw_name(dev, 0, "aclk_mac", &sc->aclk_mac) != 0) {
+ device_printf(sc->base.dev, "could not get aclk_mac clock\n");
+ sc->aclk_mac = NULL;
+ }
+
+ if (clk_get_by_ofw_name(dev, 0, "pclk_mac", &sc->pclk_mac) != 0) {
+ device_printf(sc->base.dev, "could not get pclk_mac clock\n");
+ sc->pclk_mac = NULL;
+ }
+
+ if (sc->base.phy_mode == PHY_MODE_RGMII) {
+ if (clk_get_by_ofw_name(dev, 0, "clk_mac_ref", &sc->clk_mac_ref) != 0) {
+ device_printf(sc->base.dev, "could not get clk_mac_ref clock\n");
+ sc->clk_mac_ref = NULL;
+ }
+
+ if (!sc->clock_in) {
+ if (clk_get_by_ofw_name(dev, 0, "clk_mac_refout", &sc->clk_mac_refout) != 0) {
+ device_printf(sc->base.dev, "could not get clk_mac_refout clock\n");
+ sc->clk_mac_refout = NULL;
+ }
+
+ clk_set_freq(sc->clk_stmmaceth, 50000000, 0);
+ }
+ }
+
+ if ((sc->phy_node != 0) && sc->integrated_phy) {
+ if (clk_get_by_ofw_index(dev, sc->phy_node, 0, &sc->clk_phy) != 0) {
+ device_printf(sc->base.dev, "could not get PHY clock\n");
+ sc->clk_phy = NULL;
+ }
+
+ if (sc->clk_phy) {
+ clk_set_freq(sc->clk_phy, 50000000, 0);
+ }
+ }
+
+ if (sc->base.phy_mode == PHY_MODE_RMII) {
+ if (sc->mac_clk_rx)
+ clk_enable(sc->mac_clk_rx);
+ if (sc->clk_mac_ref)
+ clk_enable(sc->clk_mac_ref);
+ if (sc->clk_mac_refout)
+ clk_enable(sc->clk_mac_refout);
+ }
+ if (sc->clk_phy)
+ clk_enable(sc->clk_phy);
+ if (sc->aclk_mac)
+ clk_enable(sc->aclk_mac);
+ if (sc->pclk_mac)
+ clk_enable(sc->pclk_mac);
+ if (sc->mac_clk_tx)
+ clk_enable(sc->mac_clk_tx);
+
+ DELAY(50);
+
+ return (0);
+}
+
+static int
+if_dwc_rk_init(device_t dev)
+{
+ struct if_dwc_rk_softc *sc;
+ phandle_t node;
+ uint32_t rx, tx;
+ int err;
+ pcell_t phy_handle;
+ char *clock_in_out;
+ hwreset_t phy_reset;
+ regulator_t phy_supply;
+
+ sc = device_get_softc(dev);
+ node = ofw_bus_get_node(dev);
+ sc->ops = (struct if_dwc_rk_ops *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+ if (OF_hasprop(node, "rockchip,grf") &&
+ syscon_get_by_ofw_property(dev, node,
+ "rockchip,grf", &sc->grf) != 0) {
+ device_printf(dev, "cannot get grf driver handle\n");
+ return (ENXIO);
+ }
+
+ if (OF_getencprop(node, "tx_delay", &tx, sizeof(tx)) <= 0)
+ tx = 0x30;
+ if (OF_getencprop(node, "rx_delay", &rx, sizeof(rx)) <= 0)
+ rx = 0x10;
+ sc->tx_delay = tx;
+ sc->rx_delay = rx;
+
+ sc->clock_in = true;
+ if (OF_getprop_alloc(node, "clock_in_out", (void **)&clock_in_out)) {
+ if (strcmp(clock_in_out, "input") == 0)
+ sc->clock_in = true;
+ else
+ sc->clock_in = false;
+ OF_prop_free(clock_in_out);
+ }
+
+ if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
+ sizeof(phy_handle)) > 0)
+ sc->phy_node = OF_node_from_xref(phy_handle);
+
+ if (sc->phy_node)
+ sc->integrated_phy = OF_hasprop(sc->phy_node, "phy-is-integrated");
+
+ if (sc->integrated_phy)
+ device_printf(sc->base.dev, "PHY is integrated\n");
+
+ if_dwc_rk_init_clocks(dev);
+
+ if (sc->ops->set_phy_mode)
+ sc->ops->set_phy_mode(sc);
+
+ if (sc->ops->set_delays)
+ sc->ops->set_delays(sc);
+
+ /*
+ * this also sets delays if tunable is defined
+ */
+ err = if_dwc_rk_init_sysctl(sc);
+ if (err != 0)
+ return (err);
+
+ if (regulator_get_by_ofw_property(sc->base.dev, 0,
+ "phy-supply", &phy_supply) == 0) {
+ if (regulator_enable(phy_supply)) {
+ device_printf(sc->base.dev,
+ "cannot enable 'phy' regulator\n");
+ }
+ }
+ else
+ device_printf(sc->base.dev, "no phy-supply property\n");
+
+ /* Power up */
+ if (sc->integrated_phy) {
+ if (sc->ops->phy_powerup)
+ sc->ops->phy_powerup(sc);
+
+ SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON0,
+ (MACPHY_CON0_CLK_50M_MASK << 16) |
+ MACPHY_CON0_CLK_50M);
+ SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON0,
+ (MACPHY_CON0_RMII_MODE_MASK << 16) |
+ MACPHY_CON0_RMII_MODE);
+ SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON2, 0xffff1234);
+ SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON3, 0x003f0035);
+
+ if (hwreset_get_by_ofw_idx(dev, sc->phy_node, 0, &phy_reset) == 0) {
+ hwreset_assert(phy_reset);
+ DELAY(20);
+ hwreset_deassert(phy_reset);
+ DELAY(20);
+ }
+ }
+
+ return (0);
+}
+
+static int
+if_dwc_rk_mac_type(device_t dev)
+{
+
+ return (DWC_GMAC_NORMAL_DESC);
+}
+
+static int
+if_dwc_rk_mii_clk(device_t dev)
+{
+
+ /* Should be calculated from the clock */
+ return (GMAC_MII_CLK_150_250M_DIV102);
+}
+
+static int
+if_dwc_rk_set_speed(device_t dev, int speed)
+{
+ struct if_dwc_rk_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->ops->set_speed)
+ return sc->ops->set_speed(sc, speed);
+
+ return (0);
+}
+
+static device_method_t if_dwc_rk_methods[] = {
+ DEVMETHOD(device_probe, if_dwc_rk_probe),
+
+ DEVMETHOD(if_dwc_init, if_dwc_rk_init),
+ DEVMETHOD(if_dwc_mac_type, if_dwc_rk_mac_type),
+ DEVMETHOD(if_dwc_mii_clk, if_dwc_rk_mii_clk),
+ DEVMETHOD(if_dwc_set_speed, if_dwc_rk_set_speed),
+
+ DEVMETHOD_END
+};
+
+static devclass_t dwc_rk_devclass;
+
+extern driver_t dwc_driver;
+
+DEFINE_CLASS_1(dwc, dwc_rk_driver, if_dwc_rk_methods,
+ sizeof(struct if_dwc_rk_softc), dwc_driver);
+DRIVER_MODULE(dwc_rk, simplebus, dwc_rk_driver, dwc_rk_devclass, 0, 0);
+MODULE_DEPEND(dwc_rk, dwc, 1, 1, 1);
diff --git a/sys/arm64/rockchip/rk3399_emmcphy.c b/sys/arm64/rockchip/rk3399_emmcphy.c
new file mode 100644
index 000000000000..533ccb0b2c8f
--- /dev/null
+++ b/sys/arm64/rockchip/rk3399_emmcphy.c
@@ -0,0 +1,341 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Ganbold Tsagaankhuu <ganbold@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Rockchip RK3399 eMMC PHY
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/gpio.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/syscon/syscon.h>
+#include <dev/extres/phy/phy.h>
+
+#include "syscon_if.h"
+
+#define GRF_EMMCPHY_BASE 0xf780
+#define GRF_EMMCPHY_CON0 (GRF_EMMCPHY_BASE + 0x00)
+#define PHYCTRL_FRQSEL (1 << 13) | (1 << 12)
+#define PHYCTRL_FRQSEL_200M 0
+#define PHYCTRL_FRQSEL_50M 1
+#define PHYCTRL_FRQSEL_100M 2
+#define PHYCTRL_FRQSEL_150M 3
+#define PHYCTRL_OTAPDLYENA (1 << 11)
+#define PHYCTRL_OTAPDLYSEL (1 << 10) | (1 << 9) | (1 << 8) | (1 << 7)
+#define PHYCTRL_ITAPCHGWIN (1 << 6)
+#define PHYCTRL_ITAPDLYSEL (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) | \
+ (1 << 1)
+#define PHYCTRL_ITAPDLYENA (1 << 0)
+#define GRF_EMMCPHY_CON1 (GRF_EMMCPHY_BASE + 0x04)
+#define PHYCTRL_CLKBUFSEL (1 << 8) | (1 << 7) | (1 << 6)
+#define PHYCTRL_SELDLYTXCLK (1 << 5)
+#define PHYCTRL_SELDLYRXCLK (1 << 4)
+#define PHYCTRL_STRBSEL 0xf
+#define GRF_EMMCPHY_CON2 (GRF_EMMCPHY_BASE + 0x08)
+#define PHYCTRL_REN_STRB (1 << 9)
+#define PHYCTRL_REN_CMD (1 << 8)
+#define PHYCTRL_REN_DAT 0xff
+#define GRF_EMMCPHY_CON3 (GRF_EMMCPHY_BASE + 0x0c)
+#define PHYCTRL_PU_STRB (1 << 9)
+#define PHYCTRL_PU_CMD (1 << 8)
+#define PHYCTRL_PU_DAT 0xff
+#define GRF_EMMCPHY_CON4 (GRF_EMMCPHY_BASE + 0x10)
+#define PHYCTRL_OD_RELEASE_CMD (1 << 9)
+#define PHYCTRL_OD_RELEASE_STRB (1 << 8)
+#define PHYCTRL_OD_RELEASE_DAT 0xff
+#define GRF_EMMCPHY_CON5 (GRF_EMMCPHY_BASE + 0x14)
+#define PHYCTRL_ODEN_STRB (1 << 9)
+#define PHYCTRL_ODEN_CMD (1 << 8)
+#define PHYCTRL_ODEN_DAT 0xff
+#define GRF_EMMCPHY_CON6 (GRF_EMMCPHY_BASE + 0x18)
+#define PHYCTRL_DLL_TRM_ICP (1 << 12) | (1 << 11) | (1 << 10) | (1 << 9)
+#define PHYCTRL_EN_RTRIM (1 << 8)
+#define PHYCTRL_RETRIM (1 << 7)
+#define PHYCTRL_DR_TY (1 << 6) | (1 << 5) | (1 << 4)
+#define PHYCTRL_RETENB (1 << 3)
+#define PHYCTRL_RETEN (1 << 2)
+#define PHYCTRL_ENDLL (1 << 1)
+#define PHYCTRL_PDB (1 << 0)
+#define GRF_EMMCPHY_STATUS (GRF_EMMCPHY_BASE + 0x20)
+#define PHYCTRL_CALDONE (1 << 6)
+#define PHYCTRL_DLLRDY (1 << 5)
+#define PHYCTRL_RTRIM (1 << 4) | (1 << 3) | (1 << 2) | (1 << 1)
+#define PHYCTRL_EXR_NINST (1 << 0)
+
+static struct ofw_compat_data compat_data[] = {
+ { "rockchip,rk3399-emmc-phy", 1 },
+ { NULL, 0 }
+};
+
+struct rk_emmcphy_softc {
+ struct syscon *syscon;
+ struct rk_emmcphy_conf *phy_conf;
+ clk_t clk;
+};
+
+#define LOWEST_SET_BIT(mask) ((((mask) - 1) & (mask)) ^ (mask))
+#define SHIFTIN(x, mask) ((x) * LOWEST_SET_BIT(mask))
+
+/* Phy class and methods. */
+static int rk_emmcphy_enable(struct phynode *phynode, bool enable);
+static phynode_method_t rk_emmcphy_phynode_methods[] = {
+ PHYNODEMETHOD(phynode_enable, rk_emmcphy_enable),
+ PHYNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(rk_emmcphy_phynode, rk_emmcphy_phynode_class,
+ rk_emmcphy_phynode_methods, 0, phynode_class);
+
+static int
+rk_emmcphy_enable(struct phynode *phynode, bool enable)
+{
+ struct rk_emmcphy_softc *sc;
+ device_t dev;
+ intptr_t phy;
+ uint64_t rate, frqsel;
+ uint32_t mask, val;
+ int error;
+
+ dev = phynode_get_device(phynode);
+ phy = phynode_get_id(phynode);
+ sc = device_get_softc(dev);
+
+ if (bootverbose)
+ device_printf(dev, "Phy id: %ld\n", phy);
+
+ if (phy != 0) {
+ device_printf(dev, "Unknown phy: %ld\n", phy);
+ return (ERANGE);
+ }
+ if (enable) {
+ /* Drive strength */
+ mask = PHYCTRL_DR_TY;
+ val = SHIFTIN(0, PHYCTRL_DR_TY);
+ SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON6,
+ (mask << 16) | val);
+
+ /* Enable output tap delay */
+ mask = PHYCTRL_OTAPDLYENA | PHYCTRL_OTAPDLYSEL;
+ val = PHYCTRL_OTAPDLYENA | SHIFTIN(4, PHYCTRL_OTAPDLYSEL);
+ SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON0,
+ (mask << 16) | val);
+ }
+
+ /* Power down PHY and disable DLL before making changes */
+ mask = PHYCTRL_ENDLL | PHYCTRL_PDB;
+ val = 0;
+ SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON6, (mask << 16) | val);
+
+ if (enable == false)
+ return (0);
+
+ sc->phy_conf = (struct rk_emmcphy_conf *)ofw_bus_search_compatible(dev,
+ compat_data)->ocd_data;
+
+ /* Get clock */
+ error = clk_get_by_ofw_name(dev, 0, "emmcclk", &sc->clk);
+ if (error != 0) {
+ device_printf(dev, "cannot get emmcclk clock, continue\n");
+ sc->clk = NULL;
+ } else
+ device_printf(dev, "got emmcclk clock\n");
+
+ if (sc->clk) {
+ error = clk_get_freq(sc->clk, &rate);
+ if (error != 0) {
+ device_printf(dev, "cannot get clock frequency\n");
+ return (ENXIO);
+ }
+ } else
+ rate = 0;
+
+ if (rate != 0) {
+ if (rate < 75000000)
+ frqsel = PHYCTRL_FRQSEL_50M;
+ else if (rate < 125000000)
+ frqsel = PHYCTRL_FRQSEL_100M;
+ else if (rate < 175000000)
+ frqsel = PHYCTRL_FRQSEL_150M;
+ else
+ frqsel = PHYCTRL_FRQSEL_200M;
+ } else
+ frqsel = PHYCTRL_FRQSEL_200M;
+
+ DELAY(3);
+
+ /* Power up PHY */
+ mask = PHYCTRL_PDB;
+ val = PHYCTRL_PDB;
+ SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON6, (mask << 16) | val);
+
+ /* Wait for calibration */
+ DELAY(10);
+ val = SYSCON_READ_4(sc->syscon, GRF_EMMCPHY_STATUS);
+ if ((val & PHYCTRL_CALDONE) == 0) {
+ device_printf(dev, "PHY calibration did not complete\n");
+ return (ENXIO);
+ }
+
+ /* Set DLL frequency */
+ mask = PHYCTRL_FRQSEL;
+ val = SHIFTIN(frqsel, PHYCTRL_FRQSEL);
+ SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON0, (mask << 16) | val);
+
+ /* Enable DLL */
+ mask = PHYCTRL_ENDLL;
+ val = PHYCTRL_ENDLL;
+ SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON6, (mask << 16) | val);
+
+ if (rate != 0) {
+ /*
+ * Rockchip RK3399 TRM V1.3 Part2.pdf says in page 698:
+ * After the DLL control loop reaches steady state a DLL
+ * ready signal is generated by the DLL circuits
+ * 'phyctrl_dllrdy'.
+ * The time from 'phyctrl_endll' to DLL ready signal
+ * 'phyctrl_dllrdy' varies with the clock frequency.
+ * At 200MHz clock frequency the DLL ready delay is 2.56us,
+ * at 100MHz clock frequency the DLL ready delay is 5.112us and
+ * at 50 MHz clock frequency the DLL ready delay is 10.231us.
+ * We could use safe values for wait, 12us, 8us, 6us and 4us
+ * respectively.
+ * However due to some unknown reason it is not working and
+ * DLL seems to take extra long time to lock.
+ * So we will use more safe value 50ms here.
+ */
+
+ /* Wait for DLL ready */
+ DELAY(50000);
+ val = SYSCON_READ_4(sc->syscon, GRF_EMMCPHY_STATUS);
+ if ((val & PHYCTRL_DLLRDY) == 0) {
+ device_printf(dev, "DLL loop failed to lock\n");
+ return (ENXIO);
+ }
+ }
+
+ return (0);
+}
+
+static int
+rk_emmcphy_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Rockchip RK3399 eMMC PHY");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_emmcphy_attach(device_t dev)
+{
+ struct phynode_init_def phy_init;
+ struct phynode *phynode;
+ struct rk_emmcphy_softc *sc;
+ phandle_t node;
+ phandle_t xnode;
+ pcell_t handle;
+ intptr_t phy;
+
+ sc = device_get_softc(dev);
+ node = ofw_bus_get_node(dev);
+
+ if (OF_getencprop(node, "clocks", (void *)&handle,
+ sizeof(handle)) <= 0) {
+ device_printf(dev, "cannot get clocks handle\n");
+ return (ENXIO);
+ }
+ xnode = OF_node_from_xref(handle);
+ if (OF_hasprop(xnode, "arasan,soc-ctl-syscon") &&
+ syscon_get_by_ofw_property(dev, xnode,
+ "arasan,soc-ctl-syscon", &sc->syscon) != 0) {
+ device_printf(dev, "cannot get grf driver handle\n");
+ return (ENXIO);
+ }
+
+ if (sc->syscon == NULL) {
+ device_printf(dev, "failed to get syscon\n");
+ return (ENXIO);
+ }
+
+ /* Create and register phy */
+ bzero(&phy_init, sizeof(phy_init));
+ phy_init.id = 0;
+ phy_init.ofw_node = ofw_bus_get_node(dev);
+ phynode = phynode_create(dev, &rk_emmcphy_phynode_class, &phy_init);
+ if (phynode == NULL) {
+ device_printf(dev, "failed to create eMMC PHY\n");
+ return (ENXIO);
+ }
+ if (phynode_register(phynode) == NULL) {
+ device_printf(dev, "failed to register eMMC PHY\n");
+ return (ENXIO);
+ }
+ if (bootverbose) {
+ phy = phynode_get_id(phynode);
+ device_printf(dev, "Attached phy id: %ld\n", phy);
+ }
+ return (0);
+}
+
+static device_method_t rk_emmcphy_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_emmcphy_probe),
+ DEVMETHOD(device_attach, rk_emmcphy_attach),
+
+ DEVMETHOD_END
+};
+
+static driver_t rk_emmcphy_driver = {
+ "rk_emmcphy",
+ rk_emmcphy_methods,
+ sizeof(struct rk_emmcphy_softc)
+};
+
+static devclass_t rk_emmcphy_devclass;
+EARLY_DRIVER_MODULE(rk_emmcphy, simplebus, rk_emmcphy_driver,
+ rk_emmcphy_devclass, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE);
+MODULE_VERSION(rk_emmcphy, 1);
diff --git a/sys/arm64/rockchip/rk805.c b/sys/arm64/rockchip/rk805.c
new file mode 100644
index 000000000000..19397627a8b0
--- /dev/null
+++ b/sys/arm64/rockchip/rk805.c
@@ -0,0 +1,741 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <machine/bus.h>
+
+#include <dev/iicbus/iiconf.h>
+#include <dev/iicbus/iicbus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/regulator/regulator.h>
+
+#include <arm64/rockchip/rk805reg.h>
+
+#include "regdev_if.h"
+
+MALLOC_DEFINE(M_RK805_REG, "RK805 regulator", "RK805 power regulator");
+
+/* #define dprintf(sc, format, arg...) device_printf(sc->base_dev, "%s: " format, __func__, arg) */
+#define dprintf(sc, format, arg...)
+
+enum rk_pmic_type {
+ RK805 = 1,
+ RK808,
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk805", RK805},
+ {"rockchip,rk808", RK808},
+ {NULL, 0}
+};
+
+struct rk805_regdef {
+ intptr_t id;
+ char *name;
+ uint8_t enable_reg;
+ uint8_t enable_mask;
+ uint8_t voltage_reg;
+ uint8_t voltage_mask;
+ int voltage_min;
+ int voltage_max;
+ int voltage_step;
+ int voltage_nstep;
+};
+
+struct rk805_reg_sc {
+ struct regnode *regnode;
+ device_t base_dev;
+ struct rk805_regdef *def;
+ phandle_t xref;
+ struct regnode_std_param *param;
+};
+
+struct reg_list {
+ TAILQ_ENTRY(reg_list) next;
+ struct rk805_reg_sc *reg;
+};
+
+struct rk805_softc {
+ device_t dev;
+ struct mtx mtx;
+ struct resource * res[1];
+ void * intrcookie;
+ struct intr_config_hook intr_hook;
+ enum rk_pmic_type type;
+
+ TAILQ_HEAD(, reg_list) regs;
+ int nregs;
+};
+
+static int rk805_regnode_status(struct regnode *regnode, int *status);
+static int rk805_regnode_set_voltage(struct regnode *regnode, int min_uvolt,
+ int max_uvolt, int *udelay);
+static int rk805_regnode_get_voltage(struct regnode *regnode, int *uvolt);
+
+static struct rk805_regdef rk805_regdefs[] = {
+ {
+ .id = RK805_DCDC1,
+ .name = "DCDC_REG1",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x11,
+ .voltage_reg = RK805_DCDC1_ON_VSEL,
+ .voltage_mask = 0x3F,
+ .voltage_min = 712500,
+ .voltage_max = 1450000,
+ .voltage_step = 12500,
+ .voltage_nstep = 64,
+ },
+ {
+ .id = RK805_DCDC2,
+ .name = "DCDC_REG2",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x22,
+ .voltage_reg = RK805_DCDC2_ON_VSEL,
+ .voltage_mask = 0x3F,
+ .voltage_min = 712500,
+ .voltage_max = 1450000,
+ .voltage_step = 12500,
+ .voltage_nstep = 64,
+ },
+ {
+ .id = RK805_DCDC3,
+ .name = "DCDC_REG3",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x44,
+ },
+ {
+ .id = RK805_DCDC4,
+ .name = "DCDC_REG4",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x88,
+ .voltage_reg = RK805_DCDC4_ON_VSEL,
+ .voltage_mask = 0x3F,
+ .voltage_min = 800000,
+ .voltage_max = 3500000,
+ .voltage_step = 100000,
+ .voltage_nstep = 28,
+ },
+ {
+ .id = RK805_LDO1,
+ .name = "LDO_REG1",
+ .enable_reg = RK805_LDO_EN,
+ .enable_mask = 0x11,
+ .voltage_reg = RK805_LDO1_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 800000,
+ .voltage_max = 3400000,
+ .voltage_step = 100000,
+ .voltage_nstep = 27,
+ },
+ {
+ .id = RK805_LDO2,
+ .name = "LDO_REG2",
+ .enable_reg = RK805_LDO_EN,
+ .enable_mask = 0x22,
+ .voltage_reg = RK805_LDO2_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 800000,
+ .voltage_max = 3400000,
+ .voltage_step = 100000,
+ .voltage_nstep = 27,
+ },
+ {
+ .id = RK805_LDO3,
+ .name = "LDO_REG3",
+ .enable_reg = RK805_LDO_EN,
+ .enable_mask = 0x44,
+ .voltage_reg = RK805_LDO3_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 800000,
+ .voltage_max = 3400000,
+ .voltage_step = 100000,
+ .voltage_nstep = 27,
+ },
+};
+
+static struct rk805_regdef rk808_regdefs[] = {
+ {
+ .id = RK805_DCDC1,
+ .name = "DCDC_REG1",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x1,
+ .voltage_reg = RK805_DCDC1_ON_VSEL,
+ .voltage_mask = 0x3F,
+ .voltage_min = 712500,
+ .voltage_max = 1500000,
+ .voltage_step = 12500,
+ .voltage_nstep = 64,
+ },
+ {
+ .id = RK805_DCDC2,
+ .name = "DCDC_REG2",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x2,
+ .voltage_reg = RK805_DCDC2_ON_VSEL,
+ .voltage_mask = 0x3F,
+ .voltage_min = 712500,
+ .voltage_max = 1500000,
+ .voltage_step = 12500,
+ .voltage_nstep = 64,
+ },
+ {
+ /* BUCK3 voltage is calculated based on external resistor */
+ .id = RK805_DCDC3,
+ .name = "DCDC_REG3",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x4,
+ },
+ {
+ .id = RK805_DCDC4,
+ .name = "DCDC_REG4",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x8,
+ .voltage_reg = RK805_DCDC4_ON_VSEL,
+ .voltage_mask = 0xF,
+ .voltage_min = 1800000,
+ .voltage_max = 3300000,
+ .voltage_step = 100000,
+ .voltage_nstep = 16,
+ },
+ {
+ .id = RK808_LDO1,
+ .name = "LDO_REG1",
+ .enable_reg = RK808_LDO_EN,
+ .enable_mask = 0x1,
+ .voltage_reg = RK805_LDO1_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 1800000,
+ .voltage_max = 3400000,
+ .voltage_step = 100000,
+ .voltage_nstep = 17,
+ },
+ {
+ .id = RK808_LDO2,
+ .name = "LDO_REG2",
+ .enable_reg = RK808_LDO_EN,
+ .enable_mask = 0x2,
+ .voltage_reg = RK805_LDO2_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 1800000,
+ .voltage_max = 3400000,
+ .voltage_step = 100000,
+ .voltage_nstep = 17,
+ },
+ {
+ .id = RK808_LDO3,
+ .name = "LDO_REG3",
+ .enable_reg = RK808_LDO_EN,
+ .enable_mask = 0x4,
+ .voltage_reg = RK805_LDO3_ON_VSEL,
+ .voltage_mask = 0xF,
+ .voltage_min = 800000,
+ .voltage_max = 2500000,
+ .voltage_step = 100000,
+ .voltage_nstep = 18,
+ },
+ {
+ .id = RK808_LDO4,
+ .name = "LDO_REG4",
+ .enable_reg = RK808_LDO_EN,
+ .enable_mask = 0x8,
+ .voltage_reg = RK808_LDO4_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 1800000,
+ .voltage_max = 3400000,
+ .voltage_step = 100000,
+ .voltage_nstep = 17,
+ },
+ {
+ .id = RK808_LDO5,
+ .name = "LDO_REG5",
+ .enable_reg = RK808_LDO_EN,
+ .enable_mask = 0x10,
+ .voltage_reg = RK808_LDO5_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 1800000,
+ .voltage_max = 3400000,
+ .voltage_step = 100000,
+ .voltage_nstep = 17,
+ },
+ {
+ .id = RK808_LDO6,
+ .name = "LDO_REG6",
+ .enable_reg = RK808_LDO_EN,
+ .enable_mask = 0x20,
+ .voltage_reg = RK808_LDO6_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 800000,
+ .voltage_max = 2500000,
+ .voltage_step = 100000,
+ .voltage_nstep = 18,
+ },
+ {
+ .id = RK808_LDO7,
+ .name = "LDO_REG7",
+ .enable_reg = RK808_LDO_EN,
+ .enable_mask = 0x40,
+ .voltage_reg = RK808_LDO7_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 800000,
+ .voltage_max = 2500000,
+ .voltage_step = 100000,
+ .voltage_nstep = 18,
+ },
+ {
+ .id = RK808_LDO8,
+ .name = "LDO_REG8",
+ .enable_reg = RK808_LDO_EN,
+ .enable_mask = 0x80,
+ .voltage_reg = RK808_LDO8_ON_VSEL,
+ .voltage_mask = 0x1F,
+ .voltage_min = 1800000,
+ .voltage_max = 3400000,
+ .voltage_step = 100000,
+ .voltage_nstep = 17,
+ },
+ {
+ .id = RK808_SWITCH1,
+ .name = "SWITCH_REG1",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x20,
+ .voltage_min = 3000000,
+ .voltage_max = 3000000,
+ },
+ {
+ .id = RK808_SWITCH2,
+ .name = "SWITCH_REG2",
+ .enable_reg = RK805_DCDC_EN,
+ .enable_mask = 0x40,
+ .voltage_min = 3000000,
+ .voltage_max = 3000000,
+ },
+};
+
+static int
+rk805_read(device_t dev, uint8_t reg, uint8_t *data, uint8_t size)
+{
+ int err;
+
+ err = iicdev_readfrom(dev, reg, data, size, IIC_INTRWAIT);
+ return (err);
+}
+
+static int
+rk805_write(device_t dev, uint8_t reg, uint8_t data)
+{
+
+ return (iicdev_writeto(dev, reg, &data, 1, IIC_INTRWAIT));
+}
+
+static int
+rk805_regnode_init(struct regnode *regnode)
+{
+ struct rk805_reg_sc *sc;
+ struct regnode_std_param *param;
+ int rv, udelay, uvolt, status;
+
+ sc = regnode_get_softc(regnode);
+ dprintf(sc, "Regulator %s init called\n", sc->def->name);
+ param = regnode_get_stdparam(regnode);
+ if (param->min_uvolt == 0)
+ return (0);
+
+ /* Check that the regulator is preset to the correct voltage */
+ rv = rk805_regnode_get_voltage(regnode, &uvolt);
+ if (rv != 0)
+ return(rv);
+
+ if (uvolt >= param->min_uvolt && uvolt <= param->max_uvolt)
+ return(0);
+ /*
+ * Set the regulator at the correct voltage if it is not enabled.
+ * Do not enable it, this is will be done either by a
+ * consumer or by regnode_set_constraint if boot_on is true
+ */
+ rv = rk805_regnode_status(regnode, &status);
+ if (rv != 0 || status == REGULATOR_STATUS_ENABLED)
+ return (rv);
+
+ rv = rk805_regnode_set_voltage(regnode, param->min_uvolt,
+ param->max_uvolt, &udelay);
+ if (udelay != 0)
+ DELAY(udelay);
+
+ return (rv);
+}
+
+static int
+rk805_regnode_enable(struct regnode *regnode, bool enable, int *udelay)
+{
+ struct rk805_reg_sc *sc;
+ uint8_t val;
+
+ sc = regnode_get_softc(regnode);
+
+ dprintf(sc, "%sabling regulator %s\n",
+ enable ? "En" : "Dis",
+ sc->def->name);
+ rk805_read(sc->base_dev, sc->def->enable_reg, &val, 1);
+ if (enable)
+ val |= sc->def->enable_mask;
+ else
+ val &= ~sc->def->enable_mask;
+ rk805_write(sc->base_dev, sc->def->enable_reg, val);
+
+ *udelay = 0;
+
+ return (0);
+}
+
+static void
+rk805_regnode_reg_to_voltage(struct rk805_reg_sc *sc, uint8_t val, int *uv)
+{
+ if (val < sc->def->voltage_nstep)
+ *uv = sc->def->voltage_min + val * sc->def->voltage_step;
+ else
+ *uv = sc->def->voltage_min +
+ (sc->def->voltage_nstep * sc->def->voltage_step);
+}
+
+static int
+rk805_regnode_voltage_to_reg(struct rk805_reg_sc *sc, int min_uvolt,
+ int max_uvolt, uint8_t *val)
+{
+ uint8_t nval;
+ int nstep, uvolt;
+
+ nval = 0;
+ uvolt = sc->def->voltage_min;
+
+ for (nstep = 0; nstep < sc->def->voltage_nstep && uvolt < min_uvolt;
+ nstep++) {
+ ++nval;
+ uvolt += sc->def->voltage_step;
+ }
+ if (uvolt > max_uvolt)
+ return (EINVAL);
+
+ *val = nval;
+ return (0);
+}
+
+static int
+rk805_regnode_status(struct regnode *regnode, int *status)
+{
+ struct rk805_reg_sc *sc;
+ uint8_t val;
+
+ sc = regnode_get_softc(regnode);
+
+ *status = 0;
+ rk805_read(sc->base_dev, sc->def->enable_reg, &val, 1);
+ if (val & sc->def->enable_mask)
+ *status = REGULATOR_STATUS_ENABLED;
+
+ return (0);
+}
+
+static int
+rk805_regnode_set_voltage(struct regnode *regnode, int min_uvolt,
+ int max_uvolt, int *udelay)
+{
+ struct rk805_reg_sc *sc;
+ uint8_t val;
+ int uvolt;
+
+ sc = regnode_get_softc(regnode);
+
+ if (!sc->def->voltage_step)
+ return (ENXIO);
+
+ dprintf(sc, "Setting %s to %d<->%d uvolts\n",
+ sc->def->name,
+ min_uvolt,
+ max_uvolt);
+ rk805_read(sc->base_dev, sc->def->voltage_reg, &val, 1);
+ if (rk805_regnode_voltage_to_reg(sc, min_uvolt, max_uvolt, &val) != 0)
+ return (ERANGE);
+
+ rk805_write(sc->base_dev, sc->def->voltage_reg, val);
+
+ rk805_read(sc->base_dev, sc->def->voltage_reg, &val, 1);
+
+ *udelay = 0;
+
+ rk805_regnode_reg_to_voltage(sc, val, &uvolt);
+ dprintf(sc, "Regulator %s set to %d uvolt\n",
+ sc->def->name,
+ uvolt);
+
+ return (0);
+}
+
+static int
+rk805_regnode_get_voltage(struct regnode *regnode, int *uvolt)
+{
+ struct rk805_reg_sc *sc;
+ uint8_t val;
+
+ sc = regnode_get_softc(regnode);
+
+ if (sc->def->voltage_min == sc->def->voltage_max) {
+ *uvolt = sc->def->voltage_min;
+ return (0);
+ }
+
+ if (!sc->def->voltage_step)
+ return (ENXIO);
+
+ rk805_read(sc->base_dev, sc->def->voltage_reg, &val, 1);
+ rk805_regnode_reg_to_voltage(sc, val & sc->def->voltage_mask, uvolt);
+
+ dprintf(sc, "Regulator %s is at %d uvolt\n",
+ sc->def->name,
+ *uvolt);
+
+ return (0);
+}
+
+static regnode_method_t rk805_regnode_methods[] = {
+ /* Regulator interface */
+ REGNODEMETHOD(regnode_init, rk805_regnode_init),
+ REGNODEMETHOD(regnode_enable, rk805_regnode_enable),
+ REGNODEMETHOD(regnode_status, rk805_regnode_status),
+ REGNODEMETHOD(regnode_set_voltage, rk805_regnode_set_voltage),
+ REGNODEMETHOD(regnode_get_voltage, rk805_regnode_get_voltage),
+ REGNODEMETHOD(regnode_check_voltage, regnode_method_check_voltage),
+ REGNODEMETHOD_END
+};
+DEFINE_CLASS_1(rk805_regnode, rk805_regnode_class, rk805_regnode_methods,
+ sizeof(struct rk805_reg_sc), regnode_class);
+
+static struct rk805_reg_sc *
+rk805_reg_attach(device_t dev, phandle_t node,
+ struct rk805_regdef *def)
+{
+ struct rk805_reg_sc *reg_sc;
+ struct regnode_init_def initdef;
+ struct regnode *regnode;
+
+ memset(&initdef, 0, sizeof(initdef));
+ if (regulator_parse_ofw_stdparam(dev, node, &initdef) != 0) {
+ device_printf(dev, "cannot create regulator\n");
+ return (NULL);
+ }
+ if (initdef.std_param.min_uvolt == 0)
+ initdef.std_param.min_uvolt = def->voltage_min;
+ if (initdef.std_param.max_uvolt == 0)
+ initdef.std_param.max_uvolt = def->voltage_max;
+ initdef.id = def->id;
+ initdef.ofw_node = node;
+
+ regnode = regnode_create(dev, &rk805_regnode_class, &initdef);
+ if (regnode == NULL) {
+ device_printf(dev, "cannot create regulator\n");
+ return (NULL);
+ }
+
+ reg_sc = regnode_get_softc(regnode);
+ reg_sc->regnode = regnode;
+ reg_sc->base_dev = dev;
+ reg_sc->def = def;
+ reg_sc->xref = OF_xref_from_node(node);
+ reg_sc->param = regnode_get_stdparam(regnode);
+
+ regnode_register(regnode);
+
+ return (reg_sc);
+}
+
+static int
+rk805_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "RockChip RK805 PMIC");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static void
+rk805_start(void *pdev)
+{
+ struct rk805_softc *sc;
+ device_t dev;
+ uint8_t data[2];
+ int err;
+
+ dev = pdev;
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ /* No version register in RK808 */
+ if (bootverbose && sc->type == RK805) {
+ err = rk805_read(dev, RK805_CHIP_NAME, data, 1);
+ if (err != 0) {
+ device_printf(dev, "Cannot read chip name reg\n");
+ return;
+ }
+ err = rk805_read(dev, RK805_CHIP_VER, data + 1, 1);
+ if (err != 0) {
+ device_printf(dev, "Cannot read chip version reg\n");
+ return;
+ }
+ device_printf(dev, "Chip Name: %x\n",
+ data[0] << 4 | ((data[1] >> 4) & 0xf));
+ device_printf(dev, "Chip Version: %x\n", data[1] & 0xf);
+ }
+
+ config_intrhook_disestablish(&sc->intr_hook);
+}
+
+static int
+rk805_attach(device_t dev)
+{
+ struct rk805_softc *sc;
+ struct rk805_reg_sc *reg;
+ struct rk805_regdef *regdefs;
+ struct reg_list *regp;
+ phandle_t rnode, child;
+ int i;
+
+ sc = device_get_softc(dev);
+
+ sc->intr_hook.ich_func = rk805_start;
+ sc->intr_hook.ich_arg = dev;
+
+ if (config_intrhook_establish(&sc->intr_hook) != 0)
+ return (ENOMEM);
+
+ sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+ switch (sc->type) {
+ case RK805:
+ regdefs = rk805_regdefs;
+ sc->nregs = nitems(rk805_regdefs);
+ break;
+ case RK808:
+ regdefs = rk808_regdefs;
+ sc->nregs = nitems(rk808_regdefs);
+ break;
+ default:
+ device_printf(dev, "Unknown type %d\n", sc->type);
+ return (ENXIO);
+ }
+
+ TAILQ_INIT(&sc->regs);
+
+ rnode = ofw_bus_find_child(ofw_bus_get_node(dev), "regulators");
+ if (rnode > 0) {
+ for (i = 0; i < sc->nregs; i++) {
+ child = ofw_bus_find_child(rnode,
+ regdefs[i].name);
+ if (child == 0)
+ continue;
+ if (OF_hasprop(child, "regulator-name") != 1)
+ continue;
+ reg = rk805_reg_attach(dev, child, &regdefs[i]);
+ if (reg == NULL) {
+ device_printf(dev,
+ "cannot attach regulator %s\n",
+ regdefs[i].name);
+ continue;
+ }
+ regp = malloc(sizeof(*regp), M_DEVBUF, M_WAITOK | M_ZERO);
+ regp->reg = reg;
+ TAILQ_INSERT_TAIL(&sc->regs, regp, next);
+ if (bootverbose)
+ device_printf(dev, "Regulator %s attached\n",
+ regdefs[i].name);
+ }
+ }
+
+ return (0);
+}
+
+static int
+rk805_detach(device_t dev)
+{
+
+ /* We cannot detach regulators */
+ return (EBUSY);
+}
+
+static int
+rk805_map(device_t dev, phandle_t xref, int ncells,
+ pcell_t *cells, intptr_t *id)
+{
+ struct rk805_softc *sc;
+ struct reg_list *regp;
+
+ sc = device_get_softc(dev);
+
+ TAILQ_FOREACH(regp, &sc->regs, next) {
+ if (regp->reg->xref == xref) {
+ *id = regp->reg->def->id;
+ return (0);
+ }
+ }
+
+ return (ERANGE);
+}
+
+static device_method_t rk805_methods[] = {
+ DEVMETHOD(device_probe, rk805_probe),
+ DEVMETHOD(device_attach, rk805_attach),
+ DEVMETHOD(device_detach, rk805_detach),
+
+ /* regdev interface */
+ DEVMETHOD(regdev_map, rk805_map),
+ DEVMETHOD_END
+};
+
+static driver_t rk805_driver = {
+ "rk805_pmu",
+ rk805_methods,
+ sizeof(struct rk805_softc),
+};
+
+static devclass_t rk805_devclass;
+
+EARLY_DRIVER_MODULE(rk805, iicbus, rk805_driver, rk805_devclass, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST);
+MODULE_DEPEND(rk805, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER);
+MODULE_VERSION(rk805, 1);
diff --git a/sys/arm64/rockchip/rk805reg.h b/sys/arm64/rockchip/rk805reg.h
new file mode 100644
index 000000000000..db489d77c26e
--- /dev/null
+++ b/sys/arm64/rockchip/rk805reg.h
@@ -0,0 +1,98 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _RK805REG_H_
+#define _RK805REG_H_
+
+#define RK805_CHIP_NAME 0x17
+#define RK805_CHIP_VER 0x18
+#define RK805_OTP_VER 0x19
+
+#define RK805_DCDC_EN 0x23
+#define RK808_LDO_EN 0x24
+#define RK805_SLEEP_DCDC_EN 0x25
+#define RK805_SLEEP_LDO_EN 0x26
+#define RK805_LDO_EN 0x27
+#define RK805_SLEEP_LDO_LP_EN 0x2A
+
+#define RK805_DCDC1_CONFIG 0x2E
+#define RK805_DCDC1_ON_VSEL 0x2F
+#define RK805_DCDC1_SLEEP_VSEL 0x30
+#define RK805_DCDC2_CONFIG 0x32
+#define RK805_DCDC2_ON_VSEL 0x33
+#define RK805_DCDC2_SLEEP_VSEL 0x34
+#define RK805_DCDC3_CONFIG 0x36
+#define RK805_DCDC4_CONFIG 0x37
+#define RK805_DCDC4_ON_VSEL 0x38
+#define RK805_DCDC4_SLEEP_VSEL 0x39
+#define RK805_LDO1_ON_VSEL 0x3B
+#define RK805_LDO1_SLEEP_VSEL 0x3C
+#define RK805_LDO2_ON_VSEL 0x3D
+#define RK805_LDO2_SLEEP_VSEL 0x3E
+#define RK805_LDO3_ON_VSEL 0x3F
+#define RK805_LDO3_SLEEP_VSEL 0x40
+#define RK808_LDO4_ON_VSEL 0x41
+#define RK808_LDO4_SLEEP_VSEL 0x42
+#define RK808_LDO5_ON_VSEL 0x43
+#define RK808_LDO5_SLEEP_VSEL 0x44
+#define RK808_LDO6_ON_VSEL 0x45
+#define RK808_LDO6_SLEEP_VSEL 0x46
+#define RK808_LDO7_ON_VSEL 0x47
+#define RK808_LDO7_SLEEP_VSEL 0x48
+#define RK808_LDO8_ON_VSEL 0x49
+#define RK808_LDO8_SLEEP_VSEL 0x4A
+
+enum rk805_regulator {
+ RK805_DCDC1 = 0,
+ RK805_DCDC2,
+ RK805_DCDC3,
+ RK805_DCDC4,
+ RK805_LDO1,
+ RK805_LDO2,
+ RK805_LDO3,
+};
+
+enum rk808_regulator {
+ RK808_DCDC1 = 0,
+ RK808_DCDC2,
+ RK808_DCDC3,
+ RK808_DCDC4,
+ RK808_LDO1,
+ RK808_LDO2,
+ RK808_LDO3,
+ RK808_LDO4,
+ RK808_LDO5,
+ RK808_LDO6,
+ RK808_LDO7,
+ RK808_LDO8,
+ RK808_SWITCH1,
+ RK808_SWITCH2,
+};
+
+#endif /* _RK805REG_H_ */
diff --git a/sys/arm64/rockchip/rk_dwc3.c b/sys/arm64/rockchip/rk_dwc3.c
new file mode 100644
index 000000000000..d63ba67907f8
--- /dev/null
+++ b/sys/arm64/rockchip/rk_dwc3.c
@@ -0,0 +1,209 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Emmanuel Vadot <manu@FreeBSD.Org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Rockchip DWC3 glue
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/gpio.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_subr.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/hwreset/hwreset.h>
+#include <dev/extres/phy/phy_usb.h>
+#include <dev/extres/syscon/syscon.h>
+
+enum rk_dwc3_type {
+ RK3328 = 1,
+ RK3399,
+};
+
+static struct ofw_compat_data compat_data[] = {
+ { "rockchip,rk3328-dwc3", RK3328 },
+ { "rockchip,rk3399-dwc3", RK3399 },
+ { NULL, 0 }
+};
+
+struct rk_dwc3_softc {
+ struct simplebus_softc sc;
+ device_t dev;
+ clk_t clk_ref;
+ clk_t clk_suspend;
+ clk_t clk_bus;
+ clk_t clk_axi_perf;
+ clk_t clk_usb3;
+ clk_t clk_grf;
+ hwreset_t rst_usb3;
+ enum rk_dwc3_type type;
+};
+
+static int
+rk_dwc3_probe(device_t dev)
+{
+ phandle_t node;
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ /* Binding says that we need a child node for the actual dwc3 controller */
+ node = ofw_bus_get_node(dev);
+ if (OF_child(node) <= 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Rockchip RK3399 DWC3");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_dwc3_attach(device_t dev)
+{
+ struct rk_dwc3_softc *sc;
+ device_t cdev;
+ phandle_t node, child;
+ int err;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ node = ofw_bus_get_node(dev);
+ sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+
+ /* Mandatory clocks */
+ if (clk_get_by_ofw_name(dev, 0, "ref_clk", &sc->clk_ref) != 0) {
+ device_printf(dev, "Cannot get ref_clk clock\n");
+ return (ENXIO);
+ }
+ err = clk_enable(sc->clk_ref);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->clk_ref));
+ return (ENXIO);
+ }
+ if (clk_get_by_ofw_name(dev, 0, "suspend_clk", &sc->clk_suspend) != 0) {
+ device_printf(dev, "Cannot get suspend_clk clock\n");
+ return (ENXIO);
+ }
+ err = clk_enable(sc->clk_suspend);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->clk_suspend));
+ return (ENXIO);
+ }
+ if (clk_get_by_ofw_name(dev, 0, "bus_clk", &sc->clk_bus) != 0) {
+ device_printf(dev, "Cannot get bus_clk clock\n");
+ return (ENXIO);
+ }
+ err = clk_enable(sc->clk_bus);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->clk_bus));
+ return (ENXIO);
+ }
+ if (sc->type == RK3399) {
+ if (clk_get_by_ofw_name(dev, 0, "grf_clk", &sc->clk_grf) != 0) {
+ device_printf(dev, "Cannot get grf_clk clock\n");
+ return (ENXIO);
+ }
+ err = clk_enable(sc->clk_grf);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->clk_grf));
+ return (ENXIO);
+ }
+ }
+ /* Optional clocks */
+ if (clk_get_by_ofw_name(dev, 0, "aclk_usb3_rksoc_axi_perf", &sc->clk_axi_perf) == 0) {
+ err = clk_enable(sc->clk_axi_perf);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->clk_axi_perf));
+ return (ENXIO);
+ }
+ }
+ if (clk_get_by_ofw_name(dev, 0, "aclk_usb3", &sc->clk_usb3) == 0) {
+ err = clk_enable(sc->clk_usb3);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->clk_usb3));
+ return (ENXIO);
+ }
+ }
+
+ /* Put module out of reset */
+ if (hwreset_get_by_ofw_name(dev, node, "usb3-otg", &sc->rst_usb3) == 0) {
+ if (hwreset_deassert(sc->rst_usb3) != 0) {
+ device_printf(dev, "Cannot deassert reset\n");
+ return (ENXIO);
+ }
+ }
+
+ simplebus_init(dev, node);
+ if (simplebus_fill_ranges(node, &sc->sc) < 0) {
+ device_printf(dev, "could not get ranges\n");
+ return (ENXIO);
+ }
+
+ for (child = OF_child(node); child > 0; child = OF_peer(child)) {
+ cdev = simplebus_add_device(dev, child, 0, NULL, -1, NULL);
+ if (cdev != NULL)
+ device_probe_and_attach(cdev);
+ }
+
+ return (bus_generic_attach(dev));
+}
+
+static device_method_t rk_dwc3_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_dwc3_probe),
+ DEVMETHOD(device_attach, rk_dwc3_attach),
+
+ DEVMETHOD_END
+};
+
+static devclass_t rk_dwc3_devclass;
+
+DEFINE_CLASS_1(rk_dwc3, rk_dwc3_driver, rk_dwc3_methods,
+ sizeof(struct rk_dwc3_softc), simplebus_driver);
+DRIVER_MODULE(rk_dwc3, simplebus, rk_dwc3_driver, rk_dwc3_devclass, 0, 0);
diff --git a/sys/arm64/rockchip/rk_gpio.c b/sys/arm64/rockchip/rk_gpio.c
new file mode 100644
index 000000000000..aa44a6bc9e09
--- /dev/null
+++ b/sys/arm64/rockchip/rk_gpio.c
@@ -0,0 +1,474 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/gpio.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/intr.h>
+
+#include <dev/gpio/gpiobusvar.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/extres/clk/clk.h>
+
+#include "gpio_if.h"
+
+#include "fdt_pinctrl_if.h"
+
+#define RK_GPIO_SWPORTA_DR 0x00 /* Data register */
+#define RK_GPIO_SWPORTA_DDR 0x04 /* Data direction register */
+
+#define RK_GPIO_INTEN 0x30 /* Interrupt enable register */
+#define RK_GPIO_INTMASK 0x34 /* Interrupt mask register */
+#define RK_GPIO_INTTYPE_LEVEL 0x38 /* Interrupt level register */
+#define RK_GPIO_INT_POLARITY 0x3C /* Interrupt polarity register */
+#define RK_GPIO_INT_STATUS 0x40 /* Interrupt status register */
+#define RK_GPIO_INT_RAWSTATUS 0x44 /* Raw Interrupt status register */
+
+#define RK_GPIO_DEBOUNCE 0x48 /* Debounce enable register */
+
+#define RK_GPIO_PORTA_EOI 0x4C /* Clear interrupt register */
+#define RK_GPIO_EXT_PORTA 0x50 /* External port register */
+
+#define RK_GPIO_LS_SYNC 0x60 /* Level sensitive syncronization enable register */
+
+#define RK_GPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \
+ GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN)
+
+struct rk_gpio_softc {
+ device_t sc_dev;
+ device_t sc_busdev;
+ struct mtx sc_mtx;
+ struct resource *sc_res[2];
+ bus_space_tag_t sc_bst;
+ bus_space_handle_t sc_bsh;
+ clk_t clk;
+ device_t pinctrl;
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,gpio-bank", 1},
+ {NULL, 0}
+};
+
+static struct resource_spec rk_gpio_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+static int rk_gpio_detach(device_t dev);
+
+#define RK_GPIO_LOCK(_sc) mtx_lock_spin(&(_sc)->sc_mtx)
+#define RK_GPIO_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->sc_mtx)
+#define RK_GPIO_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
+
+#define RK_GPIO_WRITE(_sc, _off, _val) \
+ bus_space_write_4(_sc->sc_bst, _sc->sc_bsh, _off, _val)
+#define RK_GPIO_READ(_sc, _off) \
+ bus_space_read_4(_sc->sc_bst, _sc->sc_bsh, _off)
+
+static int
+rk_gpio_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "RockChip GPIO Bank controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_gpio_attach(device_t dev)
+{
+ struct rk_gpio_softc *sc;
+ phandle_t node;
+ int err;
+
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+ sc->pinctrl = device_get_parent(dev);
+
+ node = ofw_bus_get_node(sc->sc_dev);
+ if (!OF_hasprop(node, "gpio-controller"))
+ return (ENXIO);
+
+ mtx_init(&sc->sc_mtx, "rk gpio", "gpio", MTX_SPIN);
+
+ if (bus_alloc_resources(dev, rk_gpio_spec, sc->sc_res)) {
+ device_printf(dev, "could not allocate resources\n");
+ bus_release_resources(dev, rk_gpio_spec, sc->sc_res);
+ mtx_destroy(&sc->sc_mtx);
+ return (ENXIO);
+ }
+
+ sc->sc_bst = rman_get_bustag(sc->sc_res[0]);
+ sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]);
+
+ if (clk_get_by_ofw_index(dev, 0, 0, &sc->clk) != 0) {
+ device_printf(dev, "Cannot get clock\n");
+ rk_gpio_detach(dev);
+ return (ENXIO);
+ }
+ err = clk_enable(sc->clk);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->clk));
+ rk_gpio_detach(dev);
+ return (ENXIO);
+ }
+
+ sc->sc_busdev = gpiobus_attach_bus(dev);
+ if (sc->sc_busdev == NULL) {
+ rk_gpio_detach(dev);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+rk_gpio_detach(device_t dev)
+{
+ struct rk_gpio_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->sc_busdev)
+ gpiobus_detach_bus(dev);
+ bus_release_resources(dev, rk_gpio_spec, sc->sc_res);
+ mtx_destroy(&sc->sc_mtx);
+ clk_disable(sc->clk);
+
+ return(0);
+}
+
+static device_t
+rk_gpio_get_bus(device_t dev)
+{
+ struct rk_gpio_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ return (sc->sc_busdev);
+}
+
+static int
+rk_gpio_pin_max(device_t dev, int *maxpin)
+{
+
+ /* Each bank have always 32 pins */
+ /* XXX not true*/
+ *maxpin = 31;
+ return (0);
+}
+
+static int
+rk_gpio_pin_getname(device_t dev, uint32_t pin, char *name)
+{
+ struct rk_gpio_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (pin >= 32)
+ return (EINVAL);
+
+ RK_GPIO_LOCK(sc);
+ snprintf(name, GPIOMAXNAME, "gpio%d", pin);
+ RK_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+rk_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
+{
+ struct rk_gpio_softc *sc;
+ uint32_t reg;
+ int rv;
+ bool is_gpio;
+
+ sc = device_get_softc(dev);
+
+ rv = FDT_PINCTRL_IS_GPIO(sc->pinctrl, dev, pin, &is_gpio);
+ if (rv != 0)
+ return (rv);
+ if (!is_gpio)
+ return (EINVAL);
+
+ *flags = 0;
+ rv = FDT_PINCTRL_GET_FLAGS(sc->pinctrl, dev, pin, flags);
+ if (rv != 0)
+ return (rv);
+
+ RK_GPIO_LOCK(sc);
+ reg = RK_GPIO_READ(sc, RK_GPIO_SWPORTA_DDR);
+ RK_GPIO_UNLOCK(sc);
+
+ if (reg & (1 << pin))
+ *flags |= GPIO_PIN_OUTPUT;
+ else
+ *flags |= GPIO_PIN_INPUT;
+
+ return (0);
+}
+
+static int
+rk_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
+{
+
+ *caps = RK_GPIO_DEFAULT_CAPS;
+ return (0);
+}
+
+static int
+rk_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
+{
+ struct rk_gpio_softc *sc;
+ uint32_t reg;
+ int rv;
+ bool is_gpio;
+
+ sc = device_get_softc(dev);
+
+ rv = FDT_PINCTRL_IS_GPIO(sc->pinctrl, dev, pin, &is_gpio);
+ if (rv != 0)
+ return (rv);
+ if (!is_gpio)
+ return (EINVAL);
+
+ rv = FDT_PINCTRL_SET_FLAGS(sc->pinctrl, dev, pin, flags);
+ if (rv != 0)
+ return (rv);
+
+ RK_GPIO_LOCK(sc);
+
+ reg = RK_GPIO_READ(sc, RK_GPIO_SWPORTA_DDR);
+ if (flags & GPIO_PIN_INPUT)
+ reg &= ~(1 << pin);
+ else if (flags & GPIO_PIN_OUTPUT)
+ reg |= (1 << pin);
+
+ RK_GPIO_WRITE(sc, RK_GPIO_SWPORTA_DDR, reg);
+ RK_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+rk_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val)
+{
+ struct rk_gpio_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ RK_GPIO_LOCK(sc);
+ reg = RK_GPIO_READ(sc, RK_GPIO_EXT_PORTA);
+ RK_GPIO_UNLOCK(sc);
+
+ *val = reg & (1 << pin) ? 1 : 0;
+
+ return (0);
+}
+
+static int
+rk_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value)
+{
+ struct rk_gpio_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ RK_GPIO_LOCK(sc);
+ reg = RK_GPIO_READ(sc, RK_GPIO_SWPORTA_DR);
+ if (value)
+ reg |= (1 << pin);
+ else
+ reg &= ~(1 << pin);
+ RK_GPIO_WRITE(sc, RK_GPIO_SWPORTA_DR, reg);
+ RK_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+rk_gpio_pin_toggle(device_t dev, uint32_t pin)
+{
+ struct rk_gpio_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ RK_GPIO_LOCK(sc);
+ reg = RK_GPIO_READ(sc, RK_GPIO_SWPORTA_DR);
+ if (reg & (1 << pin))
+ reg &= ~(1 << pin);
+ else
+ reg |= (1 << pin);
+ RK_GPIO_WRITE(sc, RK_GPIO_SWPORTA_DR, reg);
+ RK_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+rk_gpio_pin_access_32(device_t dev, uint32_t first_pin, uint32_t clear_pins,
+ uint32_t change_pins, uint32_t *orig_pins)
+{
+ struct rk_gpio_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ RK_GPIO_LOCK(sc);
+ reg = RK_GPIO_READ(sc, RK_GPIO_SWPORTA_DR);
+ if (orig_pins)
+ *orig_pins = reg;
+
+ if ((clear_pins | change_pins) != 0) {
+ reg = (reg & ~clear_pins) ^ change_pins;
+ RK_GPIO_WRITE(sc, RK_GPIO_SWPORTA_DR, reg);
+ }
+ RK_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+rk_gpio_pin_config_32(device_t dev, uint32_t first_pin, uint32_t num_pins,
+ uint32_t *pin_flags)
+{
+ struct rk_gpio_softc *sc;
+ uint32_t reg, set, mask, flags;
+ int i;
+
+ sc = device_get_softc(dev);
+
+ if (first_pin != 0 || num_pins > 32)
+ return (EINVAL);
+
+ set = 0;
+ mask = 0;
+ for (i = 0; i < num_pins; i++) {
+ mask = (mask << 1) | 1;
+ flags = pin_flags[i];
+ if (flags & GPIO_PIN_INPUT) {
+ set &= ~(1 << i);
+ } else if (flags & GPIO_PIN_OUTPUT) {
+ set |= (1 << i);
+ }
+ }
+
+ RK_GPIO_LOCK(sc);
+ reg = RK_GPIO_READ(sc, RK_GPIO_SWPORTA_DDR);
+ reg &= ~mask;
+ reg |= set;
+ RK_GPIO_WRITE(sc, RK_GPIO_SWPORTA_DDR, reg);
+ RK_GPIO_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+rk_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells,
+ pcell_t *gpios, uint32_t *pin, uint32_t *flags)
+{
+
+ /* The gpios are mapped as <pin flags> */
+ *pin = gpios[0];
+ *flags = gpios[1];
+ return (0);
+}
+
+static phandle_t
+rk_gpio_get_node(device_t bus, device_t dev)
+{
+
+ /* We only have one child, the GPIO bus, which needs our own node. */
+ return (ofw_bus_get_node(bus));
+}
+
+static device_method_t rk_gpio_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_gpio_probe),
+ DEVMETHOD(device_attach, rk_gpio_attach),
+ DEVMETHOD(device_detach, rk_gpio_detach),
+
+ /* GPIO protocol */
+ DEVMETHOD(gpio_get_bus, rk_gpio_get_bus),
+ DEVMETHOD(gpio_pin_max, rk_gpio_pin_max),
+ DEVMETHOD(gpio_pin_getname, rk_gpio_pin_getname),
+ DEVMETHOD(gpio_pin_getflags, rk_gpio_pin_getflags),
+ DEVMETHOD(gpio_pin_getcaps, rk_gpio_pin_getcaps),
+ DEVMETHOD(gpio_pin_setflags, rk_gpio_pin_setflags),
+ DEVMETHOD(gpio_pin_get, rk_gpio_pin_get),
+ DEVMETHOD(gpio_pin_set, rk_gpio_pin_set),
+ DEVMETHOD(gpio_pin_toggle, rk_gpio_pin_toggle),
+ DEVMETHOD(gpio_pin_access_32, rk_gpio_pin_access_32),
+ DEVMETHOD(gpio_pin_config_32, rk_gpio_pin_config_32),
+ DEVMETHOD(gpio_map_gpios, rk_gpio_map_gpios),
+
+ /* ofw_bus interface */
+ DEVMETHOD(ofw_bus_get_node, rk_gpio_get_node),
+
+ DEVMETHOD_END
+};
+
+static driver_t rk_gpio_driver = {
+ "gpio",
+ rk_gpio_methods,
+ sizeof(struct rk_gpio_softc),
+};
+
+static devclass_t rk_gpio_devclass;
+
+/*
+ * GPIO driver is always a child of rk_pinctrl driver and should be probed
+ * and attached within rk_pinctrl_attach function. Due to this, bus pass order
+ * must be same as bus pass order of rk_pinctrl driver.
+ */
+EARLY_DRIVER_MODULE(rk_gpio, simplebus, rk_gpio_driver,
+ rk_gpio_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/rockchip/rk_grf.c b/sys/arm64/rockchip/rk_grf.c
new file mode 100644
index 000000000000..d55bdd04e861
--- /dev/null
+++ b/sys/arm64/rockchip/rk_grf.c
@@ -0,0 +1,79 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/syscon/syscon.h>
+#include <dev/fdt/simple_mfd.h>
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk3288-grf", 1},
+ {"rockchip,rk3328-grf", 1},
+ {"rockchip,rk3399-grf", 1},
+ {"rockchip,rk3399-pmugrf", 1},
+ {NULL, 0}
+};
+
+static int
+rk_grf_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "RockChip General Register Files");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static device_method_t rk_grf_methods[] = {
+ DEVMETHOD(device_probe, rk_grf_probe),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(rk_grf, rk_grf_driver, rk_grf_methods,
+ sizeof(struct simple_mfd_softc), simple_mfd_driver);
+
+static devclass_t rk_grf_devclass;
+EARLY_DRIVER_MODULE(rk_grf, simplebus, rk_grf_driver, rk_grf_devclass,
+ 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+MODULE_VERSION(rk_grf, 1);
diff --git a/sys/arm64/rockchip/rk_i2c.c b/sys/arm64/rockchip/rk_i2c.c
new file mode 100644
index 000000000000..fa824c76003b
--- /dev/null
+++ b/sys/arm64/rockchip/rk_i2c.c
@@ -0,0 +1,700 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/iicbus/iiconf.h>
+#include <dev/iicbus/iicbus.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include "iicbus_if.h"
+
+#define RK_I2C_CON 0x00
+#define RK_I2C_CON_EN (1 << 0)
+#define RK_I2C_CON_MODE_SHIFT 1
+#define RK_I2C_CON_MODE_TX 0
+#define RK_I2C_CON_MODE_RRX 1
+#define RK_I2C_CON_MODE_RX 2
+#define RK_I2C_CON_MODE_RTX 3
+#define RK_I2C_CON_MODE_MASK 0x6
+#define RK_I2C_CON_START (1 << 3)
+#define RK_I2C_CON_STOP (1 << 4)
+#define RK_I2C_CON_LASTACK (1 << 5)
+#define RK_I2C_CON_NAKSTOP (1 << 6)
+#define RK_I2C_CON_CTRL_MASK 0xFF
+
+#define RK_I2C_CLKDIV 0x04
+#define RK_I2C_CLKDIVL_MASK 0xFFFF
+#define RK_I2C_CLKDIVL_SHIFT 0
+#define RK_I2C_CLKDIVH_MASK 0xFFFF0000
+#define RK_I2C_CLKDIVH_SHIFT 16
+#define RK_I2C_CLKDIV_MUL 8
+
+#define RK_I2C_MRXADDR 0x08
+#define RK_I2C_MRXADDR_SADDR_MASK 0xFFFFFF
+#define RK_I2C_MRXADDR_VALID(x) (1 << (24 + x))
+
+#define RK_I2C_MRXRADDR 0x0C
+#define RK_I2C_MRXRADDR_SRADDR_MASK 0xFFFFFF
+#define RK_I2C_MRXRADDR_VALID(x) (1 << (24 + x))
+
+#define RK_I2C_MTXCNT 0x10
+#define RK_I2C_MTXCNT_MASK 0x3F
+
+#define RK_I2C_MRXCNT 0x14
+#define RK_I2C_MRXCNT_MASK 0x3F
+
+#define RK_I2C_IEN 0x18
+#define RK_I2C_IEN_BTFIEN (1 << 0)
+#define RK_I2C_IEN_BRFIEN (1 << 1)
+#define RK_I2C_IEN_MBTFIEN (1 << 2)
+#define RK_I2C_IEN_MBRFIEN (1 << 3)
+#define RK_I2C_IEN_STARTIEN (1 << 4)
+#define RK_I2C_IEN_STOPIEN (1 << 5)
+#define RK_I2C_IEN_NAKRCVIEN (1 << 6)
+#define RK_I2C_IEN_ALL (RK_I2C_IEN_MBTFIEN | RK_I2C_IEN_MBRFIEN | \
+ RK_I2C_IEN_STARTIEN | RK_I2C_IEN_STOPIEN | RK_I2C_IEN_NAKRCVIEN)
+
+#define RK_I2C_IPD 0x1C
+#define RK_I2C_IPD_BTFIPD (1 << 0)
+#define RK_I2C_IPD_BRFIPD (1 << 1)
+#define RK_I2C_IPD_MBTFIPD (1 << 2)
+#define RK_I2C_IPD_MBRFIPD (1 << 3)
+#define RK_I2C_IPD_STARTIPD (1 << 4)
+#define RK_I2C_IPD_STOPIPD (1 << 5)
+#define RK_I2C_IPD_NAKRCVIPD (1 << 6)
+#define RK_I2C_IPD_ALL (RK_I2C_IPD_MBTFIPD | RK_I2C_IPD_MBRFIPD | \
+ RK_I2C_IPD_STARTIPD | RK_I2C_IPD_STOPIPD | RK_I2C_IPD_NAKRCVIPD)
+
+#define RK_I2C_FNCT 0x20
+#define RK_I2C_FNCT_MASK 0x3F
+
+#define RK_I2C_TXDATA_BASE 0x100
+
+#define RK_I2C_RXDATA_BASE 0x200
+
+enum rk_i2c_state {
+ STATE_IDLE = 0,
+ STATE_START,
+ STATE_READ,
+ STATE_WRITE,
+ STATE_STOP
+};
+
+struct rk_i2c_softc {
+ device_t dev;
+ struct resource *res[2];
+ struct mtx mtx;
+ clk_t sclk;
+ clk_t pclk;
+ int busy;
+ void * intrhand;
+ uint32_t intr;
+ uint32_t ipd;
+ struct iic_msg *msg;
+ size_t cnt;
+ int msg_len;
+ bool transfer_done;
+ bool nak_recv;
+ bool tx_slave_addr;
+ uint8_t mode;
+ uint8_t state;
+
+ device_t iicbus;
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk3288-i2c", 1},
+ {"rockchip,rk3328-i2c", 1},
+ {"rockchip,rk3399-i2c", 1},
+ {NULL, 0}
+};
+
+static struct resource_spec rk_i2c_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
+ { -1, 0 }
+};
+
+static int rk_i2c_probe(device_t dev);
+static int rk_i2c_attach(device_t dev);
+static int rk_i2c_detach(device_t dev);
+
+#define RK_I2C_LOCK(sc) mtx_lock(&(sc)->mtx)
+#define RK_I2C_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
+#define RK_I2C_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
+#define RK_I2C_READ(sc, reg) bus_read_4((sc)->res[0], (reg))
+#define RK_I2C_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val))
+
+static uint32_t
+rk_i2c_get_clkdiv(struct rk_i2c_softc *sc, uint32_t speed)
+{
+ uint64_t sclk_freq;
+ uint32_t clkdiv;
+ int err;
+
+ err = clk_get_freq(sc->sclk, &sclk_freq);
+ if (err != 0)
+ return (err);
+
+ clkdiv = (sclk_freq / speed / RK_I2C_CLKDIV_MUL / 2) - 1;
+ clkdiv &= RK_I2C_CLKDIVL_MASK;
+
+ clkdiv = clkdiv << RK_I2C_CLKDIVH_SHIFT | clkdiv;
+
+ return (clkdiv);
+}
+
+static int
+rk_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr)
+{
+ struct rk_i2c_softc *sc;
+ uint32_t clkdiv;
+ u_int busfreq;
+
+ sc = device_get_softc(dev);
+
+ busfreq = IICBUS_GET_FREQUENCY(sc->iicbus, speed);
+
+ clkdiv = rk_i2c_get_clkdiv(sc, busfreq);
+
+ RK_I2C_LOCK(sc);
+
+ /* Set the clock divider */
+ RK_I2C_WRITE(sc, RK_I2C_CLKDIV, clkdiv);
+
+ /* Disable the module */
+ RK_I2C_WRITE(sc, RK_I2C_CON, 0);
+
+ RK_I2C_UNLOCK(sc);
+
+ return (0);
+}
+
+static uint8_t
+rk_i2c_fill_tx(struct rk_i2c_softc *sc)
+{
+ uint32_t buf32;
+ uint8_t buf;
+ int i, j, len;
+
+ if (sc->msg == NULL || sc->msg->len == sc->cnt)
+ return (0);
+
+ len = sc->msg->len - sc->cnt;
+ if (len > 8)
+ len = 8;
+
+ for (i = 0; i < len; i++) {
+ buf32 = 0;
+ for (j = 0; j < 4 ; j++) {
+ if (sc->cnt == sc->msg->len)
+ break;
+
+ /* Fill the addr if needed */
+ if (sc->cnt == 0 && sc->tx_slave_addr) {
+ buf = sc->msg->slave;
+ sc->tx_slave_addr = false;
+ } else {
+ buf = sc->msg->buf[sc->cnt];
+ sc->cnt++;
+ }
+ buf32 |= buf << (j * 8);
+ }
+ RK_I2C_WRITE(sc, RK_I2C_TXDATA_BASE + 4 * i, buf32);
+
+ if (sc->cnt == sc->msg->len)
+ break;
+ }
+
+ return (uint8_t)len;
+}
+
+static void
+rk_i2c_drain_rx(struct rk_i2c_softc *sc)
+{
+ uint32_t buf32 = 0;
+ uint8_t buf8;
+ int len;
+ int i;
+
+ if (sc->msg == NULL) {
+ device_printf(sc->dev, "No current iic msg\n");
+ return;
+ }
+
+ len = sc->msg->len - sc->cnt;
+ if (len > 32)
+ len = 32;
+
+ for (i = 0; i < len; i++) {
+ if (i % 4 == 0)
+ buf32 = RK_I2C_READ(sc, RK_I2C_RXDATA_BASE + (i / 4) * 4);
+
+ buf8 = (buf32 >> ((i % 4) * 8)) & 0xFF;
+ sc->msg->buf[sc->cnt++] = buf8;
+ }
+}
+
+static void
+rk_i2c_send_stop(struct rk_i2c_softc *sc)
+{
+ uint32_t reg;
+
+ RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_STOPIEN);
+
+ sc->state = STATE_STOP;
+
+ reg = RK_I2C_READ(sc, RK_I2C_CON);
+ reg |= RK_I2C_CON_STOP;
+ RK_I2C_WRITE(sc, RK_I2C_CON, reg);
+}
+
+static void
+rk_i2c_intr_locked(struct rk_i2c_softc *sc)
+{
+ uint32_t reg;
+
+ sc->ipd = RK_I2C_READ(sc, RK_I2C_IPD);
+
+ /* Something to handle? */
+ if ((sc->ipd & RK_I2C_IPD_ALL) == 0)
+ return;
+
+ RK_I2C_WRITE(sc, RK_I2C_IPD, sc->ipd);
+ sc->ipd &= RK_I2C_IPD_ALL;
+
+ if (sc->ipd & RK_I2C_IPD_NAKRCVIPD) {
+ /* NACK received */
+ sc->ipd &= ~RK_I2C_IPD_NAKRCVIPD;
+ sc->nak_recv = 1;
+ /* XXXX last byte !!!, signal error !!! */
+ sc->transfer_done = 1;
+ sc->state = STATE_IDLE;
+ goto err;
+ }
+
+ switch (sc->state) {
+ case STATE_START:
+ /* Disable start bit */
+ reg = RK_I2C_READ(sc, RK_I2C_CON);
+ reg &= ~RK_I2C_CON_START;
+ RK_I2C_WRITE(sc, RK_I2C_CON, reg);
+
+ if (sc->mode == RK_I2C_CON_MODE_RRX ||
+ sc->mode == RK_I2C_CON_MODE_RX) {
+ sc->state = STATE_READ;
+ RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBRFIEN |
+ RK_I2C_IEN_NAKRCVIEN);
+
+ reg = RK_I2C_READ(sc, RK_I2C_CON);
+ reg |= RK_I2C_CON_LASTACK;
+ RK_I2C_WRITE(sc, RK_I2C_CON, reg);
+
+ RK_I2C_WRITE(sc, RK_I2C_MRXCNT, sc->msg->len);
+ } else {
+ sc->state = STATE_WRITE;
+ RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBTFIEN |
+ RK_I2C_IEN_NAKRCVIEN);
+
+ sc->msg->len += 1;
+ rk_i2c_fill_tx(sc);
+ RK_I2C_WRITE(sc, RK_I2C_MTXCNT, sc->msg->len);
+ }
+ break;
+ case STATE_READ:
+ rk_i2c_drain_rx(sc);
+
+ if (sc->cnt == sc->msg->len)
+ rk_i2c_send_stop(sc);
+
+ break;
+ case STATE_WRITE:
+ if (sc->cnt == sc->msg->len &&
+ !(sc->msg->flags & IIC_M_NOSTOP)) {
+ rk_i2c_send_stop(sc);
+ break;
+ }
+ /* passthru */
+ case STATE_STOP:
+ /* Disable stop bit */
+ reg = RK_I2C_READ(sc, RK_I2C_CON);
+ reg &= ~RK_I2C_CON_STOP;
+ RK_I2C_WRITE(sc, RK_I2C_CON, reg);
+
+ sc->transfer_done = 1;
+ sc->state = STATE_IDLE;
+ break;
+ case STATE_IDLE:
+ break;
+ }
+
+err:
+ wakeup(sc);
+}
+
+static void
+rk_i2c_intr(void *arg)
+{
+ struct rk_i2c_softc *sc;
+
+ sc = (struct rk_i2c_softc *)arg;
+
+ RK_I2C_LOCK(sc);
+ rk_i2c_intr_locked(sc);
+ RK_I2C_UNLOCK(sc);
+}
+
+static void
+rk_i2c_start_xfer(struct rk_i2c_softc *sc, struct iic_msg *msg, boolean_t last)
+{
+ uint32_t reg;
+ uint8_t len;
+
+ sc->transfer_done = false;
+ sc->nak_recv = false;
+ sc->tx_slave_addr = false;
+ sc->cnt = 0;
+ sc->state = STATE_IDLE;
+ sc->msg = msg;
+ sc->msg_len = sc->msg->len;
+
+ reg = RK_I2C_READ(sc, RK_I2C_CON) & ~RK_I2C_CON_CTRL_MASK;
+ if (!(sc->msg->flags & IIC_M_NOSTART)) {
+ /* Stadard message */
+ if (sc->mode == RK_I2C_CON_MODE_TX) {
+ sc->msg_len++; /* Take slave address in account. */
+ sc->tx_slave_addr = true;
+ }
+ sc->state = STATE_START;
+ reg |= RK_I2C_CON_START;
+
+ RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_STARTIEN);
+ } else {
+ /* Continuation message */
+ if (sc->mode == RK_I2C_CON_MODE_RX) {
+ sc->state = STATE_READ;
+ if (last)
+ reg |= RK_I2C_CON_LASTACK;
+
+ RK_I2C_WRITE(sc, RK_I2C_MRXCNT, sc->msg->len);
+ RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBRFIEN |
+ RK_I2C_IEN_NAKRCVIEN);
+ } else {
+ sc->state = STATE_WRITE;
+ len = rk_i2c_fill_tx(sc);
+
+ RK_I2C_WRITE(sc, RK_I2C_MTXCNT, len);
+
+ RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBTFIEN |
+ RK_I2C_IEN_NAKRCVIEN);
+ }
+ }
+ reg |= sc->mode << RK_I2C_CON_MODE_SHIFT;
+ reg |= RK_I2C_CON_EN;
+ RK_I2C_WRITE(sc, RK_I2C_CON, reg);
+}
+
+static int
+rk_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs)
+{
+ struct rk_i2c_softc *sc;
+ uint32_t reg;
+ bool last_msg;
+ int i, j, timeout, err;
+
+ sc = device_get_softc(dev);
+
+ RK_I2C_LOCK(sc);
+
+ while (sc->busy)
+ mtx_sleep(sc, &sc->mtx, 0, "i2cbuswait", 0);
+ sc->busy = 1;
+
+ /* Disable the module and interrupts */
+ RK_I2C_WRITE(sc, RK_I2C_CON, 0);
+ RK_I2C_WRITE(sc, RK_I2C_IEN, 0);
+
+ /* Clean stale interrupts */
+ RK_I2C_WRITE(sc, RK_I2C_IPD, RK_I2C_IPD_ALL);
+
+ err = 0;
+ for (i = 0; i < nmsgs; i++) {
+ /* Validate parameters. */
+ if (msgs == NULL || msgs[i].buf == NULL ||
+ msgs[i].len == 0) {
+ err = EINVAL;
+ break;
+ }
+ /*
+ * If next message have NOSTART flag, then they both
+ * should be same type (read/write) and same address.
+ */
+ if (i < nmsgs - 1) {
+ if ((msgs[i + 1].flags & IIC_M_NOSTART) &&
+ ((msgs[i].flags & IIC_M_RD) !=
+ (msgs[i + 1].flags & IIC_M_RD) ||
+ (msgs[i].slave != msgs[i + 1].slave))) {
+ err = EINVAL;
+ break;
+ }
+ }
+ /*
+ * Detect simple register read case.
+ * The first message should be IIC_M_WR | IIC_M_NOSTOP,
+ * next pure IIC_M_RD (no other flags allowed). Both
+ * messages should have same slave address.
+ */
+
+ if (nmsgs - i >= 2 && msgs[i].len < 4 &&
+ msgs[i].flags == (IIC_M_WR | IIC_M_NOSTOP) &&
+ msgs[i + 1].flags == IIC_M_RD &&
+ (msgs[i].slave & ~LSB) == (msgs[i + 1].slave & ~LSB)) {
+ sc->mode = RK_I2C_CON_MODE_RRX;
+
+ /* Write slave address */
+ reg = msgs[i].slave & ~LSB;
+ reg |= RK_I2C_MRXADDR_VALID(0);
+ RK_I2C_WRITE(sc, RK_I2C_MRXADDR, reg);
+
+ /* Write slave register address */
+ reg = 0;
+ for (j = 0; j < msgs[i].len ; j++) {
+ reg |= (msgs[i].buf[j] & 0xff) << (j * 8);
+ reg |= RK_I2C_MRXADDR_VALID(j);
+ }
+ RK_I2C_WRITE(sc, RK_I2C_MRXRADDR, reg);
+
+ i++;
+ } else {
+ if (msgs[i].flags & IIC_M_RD) {
+ if (msgs[i].flags & IIC_M_NOSTART) {
+ sc->mode = RK_I2C_CON_MODE_RX;
+ } else {
+ sc->mode = RK_I2C_CON_MODE_RRX;
+ reg = msgs[i].slave & LSB;
+ reg |= RK_I2C_MRXADDR_VALID(0);
+ RK_I2C_WRITE(sc, RK_I2C_MRXADDR, reg);
+ RK_I2C_WRITE(sc, RK_I2C_MRXRADDR, 0);
+ }
+ } else {
+ sc->mode = RK_I2C_CON_MODE_TX;
+ }
+ }
+ /* last message ? */
+ last_msg = (i > nmsgs - 1) ||
+ !(msgs[i + 1].flags & IIC_M_NOSTART);
+ rk_i2c_start_xfer(sc, msgs + i, last_msg);
+
+ if (cold) {
+ for(timeout = 10000; timeout > 0; timeout--) {
+ rk_i2c_intr_locked(sc);
+ if (sc->transfer_done != 0)
+ break;
+ DELAY(1000);
+ }
+ if (timeout <= 0)
+ err = ETIMEDOUT;
+ } else {
+ while (err == 0 && sc->transfer_done != 1) {
+ err = msleep(sc, &sc->mtx, PZERO, "rk_i2c",
+ 10 * hz);
+ }
+ }
+ }
+
+ /* Disable the module and interrupts */
+ RK_I2C_WRITE(sc, RK_I2C_CON, 0);
+ RK_I2C_WRITE(sc, RK_I2C_IEN, 0);
+
+ sc->busy = 0;
+
+ RK_I2C_UNLOCK(sc);
+ return (err);
+}
+
+static int
+rk_i2c_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "RockChip I2C");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_i2c_attach(device_t dev)
+{
+ struct rk_i2c_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), "rk_i2c", MTX_DEF);
+
+ if (bus_alloc_resources(dev, rk_i2c_spec, sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ if (bus_setup_intr(dev, sc->res[1],
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL, rk_i2c_intr, sc,
+ &sc->intrhand)) {
+ bus_release_resources(dev, rk_i2c_spec, sc->res);
+ device_printf(dev, "cannot setup interrupt handler\n");
+ return (ENXIO);
+ }
+
+ clk_set_assigned(dev, ofw_bus_get_node(dev));
+
+ /* Activate the module clocks. */
+ error = clk_get_by_ofw_name(dev, 0, "i2c", &sc->sclk);
+ if (error != 0) {
+ device_printf(dev, "cannot get i2c clock\n");
+ goto fail;
+ }
+ error = clk_enable(sc->sclk);
+ if (error != 0) {
+ device_printf(dev, "cannot enable i2c clock\n");
+ goto fail;
+ }
+ /* pclk clock is optional. */
+ error = clk_get_by_ofw_name(dev, 0, "pclk", &sc->pclk);
+ if (error != 0 && error != ENOENT) {
+ device_printf(dev, "cannot get pclk clock\n");
+ goto fail;
+ }
+ if (sc->pclk != NULL) {
+ error = clk_enable(sc->pclk);
+ if (error != 0) {
+ device_printf(dev, "cannot enable pclk clock\n");
+ goto fail;
+ }
+ }
+
+ sc->iicbus = device_add_child(dev, "iicbus", -1);
+ if (sc->iicbus == NULL) {
+ device_printf(dev, "cannot add iicbus child device\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ bus_generic_attach(dev);
+
+ return (0);
+
+fail:
+ if (rk_i2c_detach(dev) != 0)
+ device_printf(dev, "Failed to detach\n");
+ return (error);
+}
+
+static int
+rk_i2c_detach(device_t dev)
+{
+ struct rk_i2c_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ if ((error = bus_generic_detach(dev)) != 0)
+ return (error);
+
+ if (sc->iicbus != NULL)
+ if ((error = device_delete_child(dev, sc->iicbus)) != 0)
+ return (error);
+
+ if (sc->sclk != NULL)
+ clk_release(sc->sclk);
+ if (sc->pclk != NULL)
+ clk_release(sc->pclk);
+
+ if (sc->intrhand != NULL)
+ bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand);
+
+ bus_release_resources(dev, rk_i2c_spec, sc->res);
+
+ mtx_destroy(&sc->mtx);
+
+ return (0);
+}
+
+static phandle_t
+rk_i2c_get_node(device_t bus, device_t dev)
+{
+
+ return ofw_bus_get_node(bus);
+}
+
+static device_method_t rk_i2c_methods[] = {
+ DEVMETHOD(device_probe, rk_i2c_probe),
+ DEVMETHOD(device_attach, rk_i2c_attach),
+ DEVMETHOD(device_detach, rk_i2c_detach),
+
+ /* OFW methods */
+ DEVMETHOD(ofw_bus_get_node, rk_i2c_get_node),
+
+ DEVMETHOD(iicbus_callback, iicbus_null_callback),
+ DEVMETHOD(iicbus_reset, rk_i2c_reset),
+ DEVMETHOD(iicbus_transfer, rk_i2c_transfer),
+
+ DEVMETHOD_END
+};
+
+static driver_t rk_i2c_driver = {
+ "rk_i2c",
+ rk_i2c_methods,
+ sizeof(struct rk_i2c_softc),
+};
+
+static devclass_t rk_i2c_devclass;
+
+EARLY_DRIVER_MODULE(rk_i2c, simplebus, rk_i2c_driver, rk_i2c_devclass, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
+EARLY_DRIVER_MODULE(ofw_iicbus, rk_i2c, ofw_iicbus_driver, ofw_iicbus_devclass,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
+MODULE_DEPEND(rk_i2c, iicbus, 1, 1, 1);
+MODULE_VERSION(rk_i2c, 1);
diff --git a/sys/arm64/rockchip/rk_iodomain.c b/sys/arm64/rockchip/rk_iodomain.c
new file mode 100644
index 000000000000..df773012fe4e
--- /dev/null
+++ b/sys/arm64/rockchip/rk_iodomain.c
@@ -0,0 +1,222 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/syscon/syscon.h>
+#include <dev/extres/regulator/regulator.h>
+
+#include "syscon_if.h"
+
+#define RK3288_GRF_IO_VSEL 0x380
+#define RK3399_GRF_IO_VSEL 0xe640
+#define RK3399_PMUGRF_SOC_CON0 0x180
+
+struct rk_iodomain_supply {
+ char *name;
+ uint32_t bit;
+};
+
+struct rk_iodomain_softc;
+
+struct rk_iodomain_conf {
+ struct rk_iodomain_supply *supply;
+ int nsupply;
+ uint32_t grf_reg;
+ void (*init)(struct rk_iodomain_softc *sc);
+};
+
+struct rk_iodomain_softc {
+ device_t dev;
+ struct syscon *grf;
+ phandle_t node;
+ struct rk_iodomain_conf *conf;
+};
+
+static struct rk_iodomain_supply rk3288_supply[] = {
+ {"lcdc-supply", 0},
+ {"dvp-supply", 1},
+ {"flash0-supply", 2},
+ {"flash1-supply", 3},
+ {"wifi-supply", 4},
+ {"bb-supply", 5},
+ {"audio-supply", 6},
+ {"sdcard-supply", 7},
+ {"gpio30-supply", 8},
+ {"gpio1830-supply", 9},
+};
+
+static struct rk_iodomain_conf rk3288_conf = {
+ .supply = rk3288_supply,
+ .nsupply = nitems(rk3288_supply),
+ .grf_reg = RK3288_GRF_IO_VSEL,
+};
+
+static struct rk_iodomain_supply rk3399_supply[] = {
+ {"bt656-supply", 0},
+ {"audio-supply", 1},
+ {"sdmmc-supply", 2},
+ {"gpio1830-supply", 3},
+};
+
+static struct rk_iodomain_conf rk3399_conf = {
+ .supply = rk3399_supply,
+ .nsupply = nitems(rk3399_supply),
+ .grf_reg = RK3399_GRF_IO_VSEL,
+};
+
+static struct rk_iodomain_supply rk3399_pmu_supply[] = {
+ {"pmu1830-supply", 9},
+};
+
+static void rk3399_pmu_init(struct rk_iodomain_softc *sc);
+static struct rk_iodomain_conf rk3399_pmu_conf = {
+ .supply = rk3399_pmu_supply,
+ .nsupply = nitems(rk3399_pmu_supply),
+ .grf_reg = RK3399_PMUGRF_SOC_CON0,
+ .init = rk3399_pmu_init,
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk3288-io-voltage-domain", (uintptr_t)&rk3288_conf},
+ {"rockchip,rk3399-io-voltage-domain", (uintptr_t)&rk3399_conf},
+ {"rockchip,rk3399-pmu-io-voltage-domain", (uintptr_t)&rk3399_pmu_conf},
+ {NULL, 0}
+};
+
+static void
+rk3399_pmu_init(struct rk_iodomain_softc *sc)
+{
+
+ SYSCON_WRITE_4(sc->grf, RK3399_PMUGRF_SOC_CON0,
+ (1 << 8) | (1 << (8 + 16))); /* set pmu1830_volsel */
+}
+
+static void
+rk_iodomain_set(struct rk_iodomain_softc *sc)
+{
+ regulator_t supply;
+ uint32_t reg = 0;
+ uint32_t mask = 0;
+ int uvolt, i;
+
+ for (i = 0; i < sc->conf->nsupply; i++) {
+ mask |= (1 << sc->conf->supply[i].bit) << 16;
+ if (regulator_get_by_ofw_property(sc->dev, sc->node,
+ sc->conf->supply[i].name, &supply) == 0) {
+ if (regulator_get_voltage(supply, &uvolt) == 0) {
+ if (uvolt == 1800000)
+ reg |= (1 << sc->conf->supply[i].bit);
+ else if (uvolt != 3000000)
+ device_printf(sc->dev,
+ "%s regulator is at %duV, ignoring\n",
+ sc->conf->supply[i].name, uvolt);
+ } else
+ device_printf(sc->dev, "Cannot get current "
+ "voltage for regulator %s\n",
+ sc->conf->supply[i].name);
+ }
+ }
+
+ SYSCON_WRITE_4(sc->grf, sc->conf->grf_reg, reg | mask);
+ if (sc->conf->init != NULL)
+ sc->conf->init(sc);
+}
+
+static int
+rk_iodomain_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "RockChip IO Voltage Domain");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_iodomain_attach(device_t dev)
+{
+ struct rk_iodomain_softc *sc;
+ int rv;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->node = ofw_bus_get_node(dev);
+
+ rv = syscon_get_handle_default(dev, &sc->grf);
+ if (rv != 0) {
+ device_printf(dev, "Cannot get grf handle\n");
+ return (ENXIO);
+ }
+
+ sc->conf = (struct rk_iodomain_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+ rk_iodomain_set(sc);
+
+ return (0);
+}
+
+static int
+rk_iodomain_detach(device_t dev)
+{
+
+ return (0);
+}
+
+static device_method_t rk_iodomain_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_iodomain_probe),
+ DEVMETHOD(device_attach, rk_iodomain_attach),
+ DEVMETHOD(device_detach, rk_iodomain_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t rk_iodomain_driver = {
+ "rk_iodomain",
+ rk_iodomain_methods,
+ sizeof(struct rk_iodomain_softc),
+};
+
+static devclass_t rk_iodomain_devclass;
+
+EARLY_DRIVER_MODULE(rk_iodomain, simplebus, rk_iodomain_driver,
+ rk_iodomain_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/rockchip/rk_pcie.c b/sys/arm64/rockchip/rk_pcie.c
new file mode 100644
index 000000000000..9c1e6b04d8e2
--- /dev/null
+++ b/sys/arm64/rockchip/rk_pcie.c
@@ -0,0 +1,1402 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Michal Meloun <mmel@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/* Rockchip PCIe controller driver */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/gpio.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+#include <machine/resource.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/hwreset/hwreset.h>
+#include <dev/extres/phy/phy.h>
+#include <dev/extres/regulator/regulator.h>
+#include <dev/gpio/gpiobusvar.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_pci.h>
+#include <dev/ofw/ofwpci.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcib_private.h>
+
+#include <dev/ofw/ofw_bus.h>
+
+#include "pcib_if.h"
+
+#define ATU_CFG_BUS(x) (((x) & 0x0ff) << 20)
+#define ATU_CFG_SLOT(x) (((x) & 0x01f) << 15)
+#define ATU_CFG_FUNC(x) (((x) & 0x007) << 12)
+#define ATU_CFG_REG(x) (((x) & 0xfff) << 0)
+
+#define ATU_TYPE_MEM 0x2
+#define ATU_TYPE_IO 0x6
+#define ATU_TYPE_CFG0 0xA
+#define ATU_TYPE_CFG1 0xB
+#define ATY_TYPE_NOR_MSG 0xC
+
+#define ATU_OB_REGIONS 33
+#define ATU_OB_REGION_SHIFT 20
+#define ATU_OB_REGION_SIZE (1 << ATU_OB_REGION_SHIFT)
+#define ATU_OB_REGION_0_SIZE (( ATU_OB_REGIONS - 1) * ATU_OB_REGION_SIZE)
+
+#define ATU_IB_REGIONS 3
+
+#define PCIE_CLIENT_BASIC_STRAP_CONF 0x000000
+#define STRAP_CONF_GEN_2 (1 << 7)
+#define STRAP_CONF_MODE_RC (1 << 6)
+#define STRAP_CONF_LANES(n) ((((n) / 2) & 0x3) << 4)
+#define STRAP_CONF_ARI_EN (1 << 3)
+#define STRAP_CONF_SR_IOV_EN (1 << 2)
+#define STRAP_CONF_LINK_TRAIN_EN (1 << 1)
+#define STRAP_CONF_CONF_EN (1 << 0)
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x000018
+#define HOT_RESET_CTRL_LINK_DOWN_RESET (1 << 1)
+#define HOT_RESET_CTRL_HOT_RESET_IN (1 << 0)
+#define PCIE_CLIENT_BASIC_STATUS0 0x000044
+#define PCIE_CLIENT_BASIC_STATUS1 0x000048
+#define STATUS1_LINK_ST_GET(x) (((x) >> 20) & 0x3)
+#define STATUS1_LINK_ST_UP 3
+#define PCIE_CLIENT_INT_MASK 0x00004C
+#define PCIE_CLIENT_INT_STATUS 0x000050
+#define PCIE_CLIENT_INT_LEGACY_DONE (1 << 15)
+#define PCIE_CLIENT_INT_MSG (1 << 14)
+#define PCIE_CLIENT_INT_HOT_RST (1 << 13)
+#define PCIE_CLIENT_INT_DPA (1 << 12)
+#define PCIE_CLIENT_INT_FATAL_ERR (1 << 11)
+#define PCIE_CLIENT_INT_NFATAL_ERR (1 << 10)
+#define PCIE_CLIENT_INT_CORR_ERR (1 << 9)
+#define PCIE_CLIENT_INT_INTD (1 << 8)
+#define PCIE_CLIENT_INT_INTC (1 << 7)
+#define PCIE_CLIENT_INT_INTB (1 << 6)
+#define PCIE_CLIENT_INT_INTA (1 << 5)
+#define PCIE_CLIENT_INT_LOCAL (1 << 4)
+#define PCIE_CLIENT_INT_UDMA (1 << 3)
+#define PCIE_CLIENT_INT_PHY (1 << 2)
+#define PCIE_CLIENT_INT_HOT_PLUG (1 << 1)
+#define PCIE_CLIENT_INT_PWR_STCG (1 << 0)
+#define PCIE_CLIENT_INT_LEGACY (PCIE_CLIENT_INT_INTA | \
+ PCIE_CLIENT_INT_INTB | \
+ PCIE_CLIENT_INT_INTC | \
+ PCIE_CLIENT_INT_INTD)
+
+#define PCIE_CORE_CTRL0 0x900000
+#define CORE_CTRL_LANES_GET(x) (((x) >> 20) & 0x3)
+#define PCIE_CORE_CTRL1 0x900004
+#define PCIE_CORE_CONFIG_VENDOR 0x900044
+#define PCIE_CORE_INT_STATUS 0x90020c
+#define PCIE_CORE_INT_PRFPE (1 << 0)
+#define PCIE_CORE_INT_CRFPE (1 << 1)
+#define PCIE_CORE_INT_RRPE (1 << 2)
+#define PCIE_CORE_INT_PRFO (1 << 3)
+#define PCIE_CORE_INT_CRFO (1 << 4)
+#define PCIE_CORE_INT_RT (1 << 5)
+#define PCIE_CORE_INT_RTR (1 << 6)
+#define PCIE_CORE_INT_PE (1 << 7)
+#define PCIE_CORE_INT_MTR (1 << 8)
+#define PCIE_CORE_INT_UCR (1 << 9)
+#define PCIE_CORE_INT_FCE (1 << 10)
+#define PCIE_CORE_INT_CT (1 << 11)
+#define PCIE_CORE_INT_UTC (1 << 18)
+#define PCIE_CORE_INT_MMVC (1 << 19)
+#define PCIE_CORE_INT_MASK 0x900210
+#define PCIE_CORE_PHY_FUNC_CONF 0x9002C0
+#define PCIE_CORE_RC_BAR_CONF 0x900300
+
+#define PCIE_RC_CONFIG_STD_BASE 0x800000
+#define PCIE_RC_CONFIG_PRIV_BASE 0xA00000
+#define PCIE_RC_CONFIG_DCSR 0xA000C8
+#define PCIE_RC_CONFIG_DCSR_MPS_MASK (0x7 << 5)
+#define PCIE_RC_CONFIG_DCSR_MPS_128 (0 << 5)
+#define PCIE_RC_CONFIG_DCSR_MPS_256 (1 << 5)
+#define PCIE_RC_CONFIG_LINK_CAP 0xA00CC
+#define PCIE_RC_CONFIG_LINK_CAP_L0S (1 << 10)
+
+#define PCIE_RC_CONFIG_LCS 0xA000D0
+#define PCIE_RC_CONFIG_THP_CAP 0xA00274
+#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK 0xFFF00000
+
+#define PCIE_CORE_OB_ADDR0(n) (0xC00000 + 0x20 * (n) + 0x00)
+#define PCIE_CORE_OB_ADDR1(n) (0xC00000 + 0x20 * (n) + 0x04)
+#define PCIE_CORE_OB_DESC0(n) (0xC00000 + 0x20 * (n) + 0x08)
+#define PCIE_CORE_OB_DESC1(n) (0xC00000 + 0x20 * (n) + 0x0C)
+#define PCIE_CORE_OB_DESC2(n) (0xC00000 + 0x20 * (n) + 0x10)
+#define PCIE_CORE_OB_DESC3(n) (0xC00000 + 0x20 * (n) + 0x14)
+
+#define PCIE_CORE_IB_ADDR0(n) (0xC00800 + 0x8 * (n) + 0x00)
+#define PCIE_CORE_IB_ADDR1(n) (0xC00800 + 0x8 * (n) + 0x04)
+
+#define PRIV_CFG_RD4(sc, reg) \
+ (uint32_t)rk_pcie_local_cfg_read(sc, true, reg, 4)
+#define PRIV_CFG_RD2(sc, reg) \
+ (uint16_t)rk_pcie_local_cfg_read(sc, true, reg, 2)
+#define PRIV_CFG_RD1(sc, reg) \
+ (uint8_t)rk_pcie_local_cfg_read(sc, true, reg, 1)
+#define PRIV_CFG_WR4(sc, reg, val) \
+ rk_pcie_local_cfg_write(sc, true, reg, val, 4)
+#define PRIV_CFG_WR2(sc, reg, val) \
+ rk_pcie_local_cfg_write(sc, true, reg, val, 2)
+#define PRIV_CFG_WR1(sc, reg, val) \
+ rk_pcie_local_cfg_write(sc, true, reg, val, 1)
+
+#define APB_WR4(_sc, _r, _v) bus_write_4((_sc)->apb_mem_res, (_r), (_v))
+#define APB_RD4(_sc, _r) bus_read_4((_sc)->apb_mem_res, (_r))
+
+#define MAX_LANES 4
+
+#define RK_PCIE_ENABLE_MSI
+#define RK_PCIE_ENABLE_MSIX
+
+struct rk_pcie_softc {
+ struct ofw_pci_softc ofw_pci; /* Must be first */
+
+ struct resource *axi_mem_res;
+ struct resource *apb_mem_res;
+ struct resource *client_irq_res;
+ struct resource *legacy_irq_res;
+ struct resource *sys_irq_res;
+ void *client_irq_cookie;
+ void *legacy_irq_cookie;
+ void *sys_irq_cookie;
+
+ device_t dev;
+ phandle_t node;
+ struct mtx mtx;
+
+ struct ofw_pci_range mem_range;
+ struct ofw_pci_range pref_mem_range;
+ struct ofw_pci_range io_range;
+
+ bool coherent;
+ bus_dma_tag_t dmat;
+
+ int num_lanes;
+ bool link_is_gen2;
+ bool no_l0s;
+
+ u_int bus_start;
+ u_int bus_end;
+ u_int root_bus;
+ u_int sub_bus;
+
+ regulator_t supply_12v;
+ regulator_t supply_3v3;
+ regulator_t supply_1v8;
+ regulator_t supply_0v9;
+ hwreset_t hwreset_core;
+ hwreset_t hwreset_mgmt;
+ hwreset_t hwreset_mgmt_sticky;
+ hwreset_t hwreset_pipe;
+ hwreset_t hwreset_pm;
+ hwreset_t hwreset_aclk;
+ hwreset_t hwreset_pclk;
+ clk_t clk_aclk;
+ clk_t clk_aclk_perf;
+ clk_t clk_hclk;
+ clk_t clk_pm;
+ phy_t phys[MAX_LANES];
+ gpio_pin_t gpio_ep;
+};
+
+/* Compatible devices. */
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk3399-pcie", 1},
+ {NULL, 0},
+};
+
+static uint32_t
+rk_pcie_local_cfg_read(struct rk_pcie_softc *sc, bool priv, u_int reg,
+ int bytes)
+{
+ uint32_t val;
+ bus_addr_t base;
+
+ if (priv)
+ base = PCIE_RC_CONFIG_PRIV_BASE;
+ else
+ base = PCIE_RC_CONFIG_STD_BASE;
+
+ switch (bytes) {
+ case 4:
+ val = bus_read_4(sc->apb_mem_res, base + reg);
+ break;
+ case 2:
+ val = bus_read_2(sc->apb_mem_res, base + reg);
+ break;
+ case 1:
+ val = bus_read_1(sc->apb_mem_res, base + reg);
+ break;
+ default:
+ val = 0xFFFFFFFF;
+ }
+ return (val);
+}
+
+static void
+rk_pcie_local_cfg_write(struct rk_pcie_softc *sc, bool priv, u_int reg,
+ uint32_t val, int bytes)
+{
+ uint32_t val2;
+ bus_addr_t base;
+
+ if (priv)
+ base = PCIE_RC_CONFIG_PRIV_BASE;
+ else
+ base = PCIE_RC_CONFIG_STD_BASE;
+
+ switch (bytes) {
+ case 4:
+ bus_write_4(sc->apb_mem_res, base + reg, val);
+ break;
+ case 2:
+ val2 = bus_read_4(sc->apb_mem_res, base + (reg & ~3));
+ val2 &= ~(0xffff << ((reg & 3) << 3));
+ val2 |= ((val & 0xffff) << ((reg & 3) << 3));
+ bus_write_4(sc->apb_mem_res, base + (reg & ~3), val2);
+ break;
+ case 1:
+ val2 = bus_read_4(sc->apb_mem_res, base + (reg & ~3));
+ val2 &= ~(0xff << ((reg & 3) << 3));
+ val2 |= ((val & 0xff) << ((reg & 3) << 3));
+ bus_write_4(sc->apb_mem_res, base + (reg & ~3), val2);
+ break;
+ }
+}
+
+static bool
+rk_pcie_check_dev(struct rk_pcie_softc *sc, u_int bus, u_int slot, u_int func,
+ u_int reg)
+{
+ uint32_t val;
+
+ if (bus < sc->bus_start || bus > sc->bus_end || slot > PCI_SLOTMAX ||
+ func > PCI_FUNCMAX || reg > PCI_REGMAX)
+ return (false);
+
+ if (bus == sc->root_bus) {
+ /* we have only 1 device with 1 function root port */
+ if (slot > 0 || func > 0)
+ return (false);
+ return (true);
+ }
+
+ /* link is needed for accessing non-root busses */
+ val = APB_RD4(sc, PCIE_CLIENT_BASIC_STATUS1);
+ if (STATUS1_LINK_ST_GET(val) != STATUS1_LINK_ST_UP)
+ return (false);
+
+ /* only one device is on first subordinate bus */
+ if (bus == sc->sub_bus && slot)
+ return (false);
+ return (true);
+}
+
+static void
+rk_pcie_map_out_atu(struct rk_pcie_softc *sc, int idx, int type,
+ int num_bits, uint64_t pa)
+{
+ uint32_t addr0;
+ uint64_t max_size;
+
+ /* Check HW constrains */
+ max_size = idx == 0 ? ATU_OB_REGION_0_SIZE: ATU_OB_REGION_SIZE;
+ KASSERT(idx < ATU_OB_REGIONS, ("Invalid region index: %d\n", idx));
+ KASSERT(num_bits >= 7 && num_bits <= 63,
+ ("Bit width of region is invalid: %d\n", num_bits));
+ KASSERT(max_size <= (1ULL << (num_bits + 1)),
+ ("Bit width is invalid for given region[%d]: %d\n", idx, num_bits));
+
+ addr0 = (uint32_t)pa & 0xFFFFFF00;
+ addr0 |= num_bits;
+ APB_WR4(sc, PCIE_CORE_OB_ADDR0(idx), addr0);
+ APB_WR4(sc, PCIE_CORE_OB_ADDR1(idx), (uint32_t)(pa >> 32));
+ APB_WR4(sc, PCIE_CORE_OB_DESC0(idx), 1 << 23 | type);
+ APB_WR4(sc, PCIE_CORE_OB_DESC1(idx), sc->root_bus);
+
+ /* Readback for sync */
+ APB_RD4(sc, PCIE_CORE_OB_DESC1(idx));
+}
+
+static void
+rk_pcie_map_cfg_atu(struct rk_pcie_softc *sc, int idx, int type)
+{
+
+ /* Check HW constrains */
+ KASSERT(idx < ATU_OB_REGIONS, ("Invalid region index: %d\n", idx));
+
+ /*
+ * Config window is only 25 bits width, so we cannot encode full bus
+ * range into it. Remaining bits of bus number should be taken from
+ * DESC1 field.
+ */
+ APB_WR4(sc, PCIE_CORE_OB_ADDR0(idx), 25 - 1);
+ APB_WR4(sc, PCIE_CORE_OB_ADDR1(idx), 0);
+ APB_WR4(sc, PCIE_CORE_OB_DESC0(idx), 1 << 23 | type);
+ APB_WR4(sc, PCIE_CORE_OB_DESC1(idx), sc->root_bus);
+
+ /* Readback for sync */
+ APB_RD4(sc, PCIE_CORE_OB_DESC1(idx));
+
+}
+
+static void
+rk_pcie_map_in_atu(struct rk_pcie_softc *sc, int idx, int num_bits, uint64_t pa)
+{
+ uint32_t addr0;
+
+ /* Check HW constrains */
+ KASSERT(idx < ATU_IB_REGIONS, ("Invalid region index: %d\n", idx));
+ KASSERT(num_bits >= 7 && num_bits <= 63,
+ ("Bit width of region is invalid: %d\n", num_bits));
+
+ addr0 = (uint32_t)pa & 0xFFFFFF00;
+ addr0 |= num_bits;
+ APB_WR4(sc, PCIE_CORE_IB_ADDR0(idx), addr0);
+ APB_WR4(sc, PCIE_CORE_IB_ADDR1(idx), (uint32_t)(pa >> 32));
+
+ /* Readback for sync */
+ APB_RD4(sc, PCIE_CORE_IB_ADDR1(idx));
+}
+
+static int
+rk_pcie_decode_ranges(struct rk_pcie_softc *sc, struct ofw_pci_range *ranges,
+ int nranges)
+{
+ int i;
+
+ for (i = 0; i < nranges; i++) {
+ if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) ==
+ OFW_PCI_PHYS_HI_SPACE_IO) {
+ if (sc->io_range.size != 0) {
+ device_printf(sc->dev,
+ "Duplicated IO range found in DT\n");
+ return (ENXIO);
+ }
+ sc->io_range = ranges[i];
+ }
+ if (((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) ==
+ OFW_PCI_PHYS_HI_SPACE_MEM64)) {
+ if (ranges[i].pci_hi & OFW_PCI_PHYS_HI_PREFETCHABLE) {
+ if (sc->pref_mem_range.size != 0) {
+ device_printf(sc->dev,
+ "Duplicated memory range found "
+ "in DT\n");
+ return (ENXIO);
+ }
+ sc->pref_mem_range = ranges[i];
+ } else {
+ if (sc->mem_range.size != 0) {
+ device_printf(sc->dev,
+ "Duplicated memory range found "
+ "in DT\n");
+ return (ENXIO);
+ }
+ sc->mem_range = ranges[i];
+ }
+ }
+ }
+ if (sc->mem_range.size == 0) {
+ device_printf(sc->dev,
+ " At least memory range should be defined in DT.\n");
+ return (ENXIO);
+ }
+ return (0);
+}
+
+/*-----------------------------------------------------------------------------
+ *
+ * P C I B I N T E R F A C E
+ */
+static uint32_t
+rk_pcie_read_config(device_t dev, u_int bus, u_int slot,
+ u_int func, u_int reg, int bytes)
+{
+ struct rk_pcie_softc *sc;
+ uint32_t data;
+ uint64_t addr;
+ int type;
+
+ sc = device_get_softc(dev);
+
+ if (!rk_pcie_check_dev(sc, bus, slot, func, reg))
+ return (0xFFFFFFFFU);
+
+ if (bus == sc->root_bus)
+ return (rk_pcie_local_cfg_read(sc, false, reg, bytes));
+
+ addr = ATU_CFG_BUS(bus) | ATU_CFG_SLOT(slot) | ATU_CFG_FUNC(func) |
+ ATU_CFG_REG(reg);
+ if (bus == sc->sub_bus) {
+ type = ATU_TYPE_CFG0;
+ } else {
+ type = ATU_TYPE_CFG1;
+ /*
+ * XXX FIXME: any attempt to generate type1 configuration
+ * access causes external data abort
+ */
+ return (0xFFFFFFFFU);
+ }
+ rk_pcie_map_cfg_atu(sc, 0, type);
+
+ switch (bytes) {
+ case 1:
+ data = bus_read_1(sc->axi_mem_res, addr);
+ break;
+ case 2:
+ data = bus_read_2(sc->axi_mem_res, addr);
+ break;
+ case 4:
+ data = bus_read_4(sc->axi_mem_res, addr);
+ break;
+ default:
+ data = 0xFFFFFFFFU;
+ }
+ return (data);
+}
+
+static void
+rk_pcie_write_config(device_t dev, u_int bus, u_int slot,
+ u_int func, u_int reg, uint32_t val, int bytes)
+{
+ struct rk_pcie_softc *sc;
+ uint64_t addr;
+ int type;
+
+ sc = device_get_softc(dev);
+
+ if (!rk_pcie_check_dev(sc, bus, slot, func, reg))
+ return;
+
+ if (bus == sc->root_bus)
+ return (rk_pcie_local_cfg_write(sc, false, reg, val, bytes));
+
+ addr = ATU_CFG_BUS(bus) | ATU_CFG_SLOT(slot) | ATU_CFG_FUNC(func) |
+ ATU_CFG_REG(reg);
+ if (bus == sc->sub_bus){
+ type = ATU_TYPE_CFG0;
+ } else {
+ type = ATU_TYPE_CFG1;
+ /*
+ * XXX FIXME: any attempt to generate type1 configuration
+ * access causes external data abort
+ */
+ return;
+ }
+ rk_pcie_map_cfg_atu(sc, 0, type);
+
+ switch (bytes) {
+ case 1:
+ bus_write_1(sc->axi_mem_res, addr, val);
+ break;
+ case 2:
+ bus_write_2(sc->axi_mem_res, addr, val);
+ break;
+ case 4:
+ bus_write_4(sc->axi_mem_res, addr, val);
+ break;
+ default:
+ break;
+ }
+}
+
+#ifdef RK_PCIE_ENABLE_MSI
+static int
+rk_pcie_alloc_msi(device_t pci, device_t child, int count,
+ int maxcount, int *irqs)
+{
+ phandle_t msi_parent;
+ int rv;
+
+ rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (rv != 0)
+ return (rv);
+
+ rv = intr_alloc_msi(pci, child, msi_parent, count, maxcount,irqs);
+ return (rv);
+}
+
+static int
+rk_pcie_release_msi(device_t pci, device_t child, int count, int *irqs)
+{
+ phandle_t msi_parent;
+ int rv;
+
+ rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (rv != 0)
+ return (rv);
+ rv = intr_release_msi(pci, child, msi_parent, count, irqs);
+ return (rv);
+}
+#endif
+
+static int
+rk_pcie_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
+ uint32_t *data)
+{
+ phandle_t msi_parent;
+ int rv;
+
+ rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (rv != 0)
+ return (rv);
+ rv = intr_map_msi(pci, child, msi_parent, irq, addr, data);
+ return (rv);
+}
+
+#ifdef RK_PCIE_ENABLE_MSIX
+static int
+rk_pcie_alloc_msix(device_t pci, device_t child, int *irq)
+{
+ phandle_t msi_parent;
+ int rv;
+
+ rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (rv != 0)
+ return (rv);
+ rv = intr_alloc_msix(pci, child, msi_parent, irq);
+ return (rv);
+}
+
+static int
+rk_pcie_release_msix(device_t pci, device_t child, int irq)
+{
+ phandle_t msi_parent;
+ int rv;
+
+ rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
+ &msi_parent, NULL);
+ if (rv != 0)
+ return (rv);
+ rv = intr_release_msix(pci, child, msi_parent, irq);
+ return (rv);
+}
+#endif
+
+static int
+rk_pcie_get_id(device_t pci, device_t child, enum pci_id_type type,
+ uintptr_t *id)
+{
+ phandle_t node;
+ int rv;
+ uint32_t rid;
+ uint16_t pci_rid;
+
+ if (type != PCI_ID_MSI)
+ return (pcib_get_id(pci, child, type, id));
+
+ node = ofw_bus_get_node(pci);
+ pci_rid = pci_get_rid(child);
+
+ rv = ofw_bus_msimap(node, pci_rid, NULL, &rid);
+ if (rv != 0)
+ return (rv);
+
+ *id = rid;
+ return (0);
+}
+
+static int
+rk_pcie_route_interrupt(device_t bus, device_t dev, int pin)
+{
+ struct rk_pcie_softc *sc;
+ u_int irq;
+
+ sc = device_get_softc(bus);
+ irq = intr_map_clone_irq(rman_get_start(sc->legacy_irq_res));
+ device_printf(bus, "route pin %d for device %d.%d to %u\n",
+ pin, pci_get_slot(dev), pci_get_function(dev), irq);
+
+ return (irq);
+}
+
+/*-----------------------------------------------------------------------------
+ *
+ * B U S / D E V I C E I N T E R F A C E
+ */
+static int
+rk_pcie_parse_fdt_resources(struct rk_pcie_softc *sc)
+{
+ int i, rv;
+ char buf[16];
+
+ /* Regulators. All are optional. */
+ rv = regulator_get_by_ofw_property(sc->dev, 0,
+ "vpcie12v-supply", &sc->supply_12v);
+ if (rv != 0 && rv != ENOENT) {
+ device_printf(sc->dev,"Cannot get 'vpcie12' regulator\n");
+ return (ENXIO);
+ }
+ rv = regulator_get_by_ofw_property(sc->dev, 0,
+ "vpcie3v3-supply", &sc->supply_3v3);
+ if (rv != 0 && rv != ENOENT) {
+ device_printf(sc->dev,"Cannot get 'vpcie3v3' regulator\n");
+ return (ENXIO);
+ }
+ rv = regulator_get_by_ofw_property(sc->dev, 0,
+ "vpcie1v8-supply", &sc->supply_1v8);
+ if (rv != 0 && rv != ENOENT) {
+ device_printf(sc->dev,"Cannot get 'vpcie1v8' regulator\n");
+ return (ENXIO);
+ }
+ rv = regulator_get_by_ofw_property(sc->dev, 0,
+ "vpcie0v9-supply", &sc->supply_0v9);
+ if (rv != 0 && rv != ENOENT) {
+ device_printf(sc->dev,"Cannot get 'vpcie0v9' regulator\n");
+ return (ENXIO);
+ }
+
+ /* Resets. */
+ rv = hwreset_get_by_ofw_name(sc->dev, 0, "core", &sc->hwreset_core);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'core' reset\n");
+ return (ENXIO);
+ }
+ rv = hwreset_get_by_ofw_name(sc->dev, 0, "mgmt", &sc->hwreset_mgmt);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'mgmt' reset\n");
+ return (ENXIO);
+ }
+ rv = hwreset_get_by_ofw_name(sc->dev, 0, "mgmt-sticky",
+ &sc->hwreset_mgmt_sticky);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'mgmt-sticky' reset\n");
+ return (ENXIO);
+ }
+ rv = hwreset_get_by_ofw_name(sc->dev, 0, "pipe", &sc->hwreset_pipe);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'pipe' reset\n");
+ return (ENXIO);
+ }
+ rv = hwreset_get_by_ofw_name(sc->dev, 0, "pm", &sc->hwreset_pm);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'pm' reset\n");
+ return (ENXIO);
+ }
+ rv = hwreset_get_by_ofw_name(sc->dev, 0, "aclk", &sc->hwreset_aclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'aclk' reset\n");
+ return (ENXIO);
+ }
+ rv = hwreset_get_by_ofw_name(sc->dev, 0, "pclk", &sc->hwreset_pclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'pclk' reset\n");
+ return (ENXIO);
+ }
+
+ /* Clocks. */
+ rv = clk_get_by_ofw_name(sc->dev, 0, "aclk", &sc->clk_aclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'aclk' clock\n");
+ return (ENXIO);
+ }
+ rv = clk_get_by_ofw_name(sc->dev, 0, "aclk-perf", &sc->clk_aclk_perf);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'aclk-perf' clock\n");
+ return (ENXIO);
+ }
+ rv = clk_get_by_ofw_name(sc->dev, 0, "hclk", &sc->clk_hclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'hclk' clock\n");
+ return (ENXIO);
+ }
+ rv = clk_get_by_ofw_name(sc->dev, 0, "pm", &sc->clk_pm);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'pm' clock\n");
+ return (ENXIO);
+ }
+
+ /* Phys. */
+ for (i = 0; i < MAX_LANES; i++ ) {
+ sprintf (buf, "pcie-phy-%d", i);
+ rv = phy_get_by_ofw_name(sc->dev, 0, buf, sc->phys + i);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get '%s' phy\n", buf);
+ return (ENXIO);
+ }
+ }
+
+ /* GPIO for PERST#. Optional */
+ rv = gpio_pin_get_by_ofw_property(sc->dev, sc->node, "ep-gpios",
+ &sc->gpio_ep);
+ if (rv != 0 && rv != ENOENT) {
+ device_printf(sc->dev, "Cannot get 'ep-gpios' gpio\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+rk_pcie_enable_resources(struct rk_pcie_softc *sc)
+{
+ int i, rv;
+ uint32_t val;
+
+ /* Assert all resets */
+ rv = hwreset_assert(sc->hwreset_pclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot assert 'pclk' reset\n");
+ return (rv);
+ }
+ rv = hwreset_assert(sc->hwreset_aclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot assert 'aclk' reset\n");
+ return (rv);
+ }
+ rv = hwreset_assert(sc->hwreset_pm);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot assert 'pm' reset\n");
+ return (rv);
+ }
+ rv = hwreset_assert(sc->hwreset_pipe);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot assert 'pipe' reset\n");
+ return (rv);
+ }
+ rv = hwreset_assert(sc->hwreset_mgmt_sticky);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot assert 'mgmt_sticky' reset\n");
+ return (rv);
+ }
+ rv = hwreset_assert(sc->hwreset_mgmt);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot assert 'hmgmt' reset\n");
+ return (rv);
+ }
+ rv = hwreset_assert(sc->hwreset_core);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot assert 'hcore' reset\n");
+ return (rv);
+ }
+ DELAY(10000);
+
+ /* Enable clockls */
+ rv = clk_enable(sc->clk_aclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot enable 'aclk' clock\n");
+ return (rv);
+ }
+ rv = clk_enable(sc->clk_aclk_perf);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot enable 'aclk_perf' clock\n");
+ return (rv);
+ }
+ rv = clk_enable(sc->clk_hclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot enable 'hclk' clock\n");
+ return (rv);
+ }
+ rv = clk_enable(sc->clk_pm);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot enable 'pm' clock\n");
+ return (rv);
+ }
+
+ /* Power up regulators */
+ if (sc->supply_12v != NULL) {
+ rv = regulator_enable(sc->supply_12v);
+ if (rv != 0) {
+ device_printf(sc->dev,
+ "Cannot enable 'vpcie12' regulator\n");
+ return (rv);
+ }
+ }
+ if (sc->supply_3v3 != NULL) {
+ rv = regulator_enable(sc->supply_3v3);
+ if (rv != 0) {
+ device_printf(sc->dev,
+ "Cannot enable 'vpcie3v3' regulator\n");
+ return (rv);
+ }
+ }
+ if (sc->supply_1v8 != NULL) {
+ rv = regulator_enable(sc->supply_1v8);
+ if (rv != 0) {
+ device_printf(sc->dev,
+ "Cannot enable 'vpcie1v8' regulator\n");
+ return (rv);
+ }
+ }
+ if (sc->supply_0v9 != NULL) {
+ rv = regulator_enable(sc->supply_0v9);
+ if (rv != 0) {
+ device_printf(sc->dev,
+ "Cannot enable 'vpcie1v8' regulator\n");
+ return (rv);
+ }
+ }
+ DELAY(1000);
+
+ /* Deassert basic resets*/
+ rv = hwreset_deassert(sc->hwreset_pm);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot deassert 'pm' reset\n");
+ return (rv);
+ }
+ rv = hwreset_deassert(sc->hwreset_aclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot deassert 'aclk' reset\n");
+ return (rv);
+ }
+ rv = hwreset_deassert(sc->hwreset_pclk);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot deassert 'pclk' reset\n");
+ return (rv);
+ }
+
+ /* Set basic PCIe core mode (RC, lanes, gen1 or 2) */
+ val = STRAP_CONF_GEN_2 << 16 |
+ (sc->link_is_gen2 ? STRAP_CONF_GEN_2: 0);
+ val |= STRAP_CONF_MODE_RC << 16 | STRAP_CONF_MODE_RC;
+ val |= STRAP_CONF_LANES(~0) << 16 | STRAP_CONF_LANES(sc->num_lanes);
+ val |= STRAP_CONF_ARI_EN << 16 | STRAP_CONF_ARI_EN;
+ val |= STRAP_CONF_CONF_EN << 16 | STRAP_CONF_CONF_EN;
+ APB_WR4(sc, PCIE_CLIENT_BASIC_STRAP_CONF, val);
+
+ for (i = 0; i < MAX_LANES; i++) {
+ rv = phy_enable(sc->phys[i]);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot enable phy %d\n", i);
+ return (rv);
+ }
+ }
+
+ /* Deassert rest of resets - order is important ! */
+ rv = hwreset_deassert(sc->hwreset_mgmt_sticky);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot deassert 'mgmt_sticky' reset\n");
+ return (rv);
+ }
+ rv = hwreset_deassert(sc->hwreset_core);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot deassert 'core' reset\n");
+ return (rv);
+ }
+ rv = hwreset_deassert(sc->hwreset_mgmt);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot deassert 'mgmt' reset\n");
+ return (rv);
+ }
+ rv = hwreset_deassert(sc->hwreset_pipe);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot deassert 'pipe' reset\n");
+ return (rv);
+ }
+ return (0);
+}
+
+static int
+rk_pcie_setup_hw(struct rk_pcie_softc *sc)
+{
+ uint32_t val;
+ int i, rv;
+
+ /* Assert PERST# if defined */
+ if (sc->gpio_ep != NULL) {
+ rv = gpio_pin_set_active(sc->gpio_ep, 0);
+ if (rv != 0) {
+ device_printf(sc->dev,
+ "Cannot clear 'gpio-ep' gpio\n");
+ return (rv);
+ }
+ }
+
+ rv = rk_pcie_enable_resources(sc);
+ if (rv != 0)
+ return(rv);
+
+ /* Fix wrong default value for transmited FTS for L0s exit */
+ val = APB_RD4(sc, PCIE_CORE_CTRL1);
+ val |= 0xFFFF << 8;
+ APB_WR4(sc, PCIE_CORE_CTRL1, val);
+
+ /* Setup PCIE Link Status & Control register */
+ val = APB_RD4(sc, PCIE_RC_CONFIG_LCS);
+ val |= PCIEM_LINK_CTL_COMMON_CLOCK;
+ APB_WR4(sc, PCIE_RC_CONFIG_LCS, val);
+ val = APB_RD4(sc, PCIE_RC_CONFIG_LCS);
+ val |= PCIEM_LINK_CTL_RCB;
+ APB_WR4(sc, PCIE_RC_CONFIG_LCS, val);
+
+ /* Enable training for GEN1 */
+ APB_WR4(sc, PCIE_CLIENT_BASIC_STRAP_CONF,
+ STRAP_CONF_LINK_TRAIN_EN << 16 | STRAP_CONF_LINK_TRAIN_EN);
+
+ /* Deassert PERST# if defined */
+ if (sc->gpio_ep != NULL) {
+ rv = gpio_pin_set_active(sc->gpio_ep, 1);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot set 'gpio-ep' gpio\n");
+ return (rv);
+ }
+ }
+
+ /* Wait for link */
+ for (i = 500; i > 0; i--) {
+ val = APB_RD4(sc, PCIE_CLIENT_BASIC_STATUS1);
+ if (STATUS1_LINK_ST_GET(val) == STATUS1_LINK_ST_UP)
+ break;
+ DELAY(1000);
+ }
+ if (i <= 0) {
+ device_printf(sc->dev,
+ "Gen1 link training timeouted: 0x%08X.\n", val);
+ return (0);
+ }
+
+ if (sc->link_is_gen2) {
+ val = APB_RD4(sc, PCIE_RC_CONFIG_LCS);
+ val |= PCIEM_LINK_CTL_RETRAIN_LINK;
+ APB_WR4(sc, PCIE_RC_CONFIG_LCS, val);
+
+ /* Wait for link */
+ for (i = 500; i > 0; i--) {
+ val = APB_RD4(sc, PCIE_CLIENT_BASIC_STATUS1);
+ if (STATUS1_LINK_ST_GET(val) ==
+ STATUS1_LINK_ST_UP)
+ break;
+ DELAY(1000);
+ }
+ if (i <= 0)
+ device_printf(sc->dev, "Gen2 link training "
+ "timeouted: 0x%08X.\n", val);
+ }
+
+ val = APB_RD4(sc, PCIE_CORE_CTRL0);
+ val = CORE_CTRL_LANES_GET(val);
+ if (bootverbose)
+ device_printf(sc->dev, "Link width: %d\n", 1 << val);
+
+ return (0);
+}
+
+static int
+rk_pcie_setup_sw(struct rk_pcie_softc *sc)
+{
+ uint32_t val;
+ int i, region;
+
+ pcib_bridge_init(sc->dev);
+
+ /* Setup config registers */
+ APB_WR4(sc, PCIE_CORE_CONFIG_VENDOR, 0x1D87); /* Rockchip vendor ID*/
+ PRIV_CFG_WR1(sc, PCIR_CLASS, PCIC_BRIDGE);
+ PRIV_CFG_WR1(sc, PCIR_SUBCLASS, PCIS_BRIDGE_PCI);
+ PRIV_CFG_WR1(sc, PCIR_PRIBUS_1, sc->root_bus);
+ PRIV_CFG_WR1(sc, PCIR_SECBUS_1, sc->sub_bus);
+ PRIV_CFG_WR1(sc, PCIR_SUBBUS_1, sc->bus_end);
+ PRIV_CFG_WR2(sc, PCIR_COMMAND, PCIM_CMD_MEMEN |
+ PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN);
+
+ /* Don't advertise L1 power substate */
+ val = APB_RD4(sc, PCIE_RC_CONFIG_THP_CAP);
+ val &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+ APB_WR4(sc, PCIE_RC_CONFIG_THP_CAP, val);
+
+ /* Don't advertise L0s */
+ if (sc->no_l0s) {
+ val = APB_RD4(sc, PCIE_RC_CONFIG_LINK_CAP);
+ val &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+ APB_WR4(sc, PCIE_RC_CONFIG_LINK_CAP_L0S, val);
+ }
+
+ /*Adjust maximum payload size*/
+ val = APB_RD4(sc, PCIE_RC_CONFIG_DCSR);
+ val &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+ val |= PCIE_RC_CONFIG_DCSR_MPS_128;
+ APB_WR4(sc, PCIE_RC_CONFIG_DCSR, val);
+
+ /*
+ * Prepare IB ATU
+ * map whole address range in 1:1 mappings
+ */
+ rk_pcie_map_in_atu(sc, 2, 64 - 1, 0);
+
+ /* Prepare OB ATU */
+ /* - region 0 (32 MB) is used for config access */
+ region = 0;
+ rk_pcie_map_out_atu(sc, region++, ATU_TYPE_CFG0, 25 - 1, 0);
+
+ /* - then map memory (by using 1MB regions */
+ for (i = 0; i < sc->mem_range.size / ATU_OB_REGION_SIZE; i++) {
+ rk_pcie_map_out_atu(sc, region++, ATU_TYPE_MEM,
+ ATU_OB_REGION_SHIFT - 1,
+ sc->mem_range.pci + ATU_OB_REGION_SIZE * i);
+ }
+
+ /* - IO space is next, one region typically*/
+ for (i = 0; i < sc->io_range.size / ATU_OB_REGION_SIZE; i++) {
+ rk_pcie_map_out_atu(sc, region++, ATU_TYPE_IO,
+ ATU_OB_REGION_SHIFT - 1,
+ sc->io_range.pci + ATU_OB_REGION_SIZE * i);
+ }
+ APB_WR4(sc, PCIE_CORE_RC_BAR_CONF, 0);
+ return (0);
+}
+
+static int
+rk_pcie_sys_irq(void *arg)
+{
+ struct rk_pcie_softc *sc;
+ uint32_t irq;
+
+ sc = (struct rk_pcie_softc *)arg;
+ irq = APB_RD4(sc, PCIE_CLIENT_INT_STATUS);
+ if (irq & PCIE_CLIENT_INT_LOCAL) {
+ irq = APB_RD4(sc, PCIE_CORE_INT_STATUS);
+ APB_WR4(sc, PCIE_CORE_INT_STATUS, irq);
+ APB_WR4(sc, PCIE_CLIENT_INT_STATUS, PCIE_CLIENT_INT_LOCAL);
+
+ device_printf(sc->dev, "'sys' interrupt received: 0x%04X\n",
+ irq);
+ }
+
+ return (FILTER_HANDLED);
+}
+
+static int
+rk_pcie_client_irq(void *arg)
+{
+ struct rk_pcie_softc *sc;
+ uint32_t irq;
+
+ sc = (struct rk_pcie_softc *)arg;
+ irq = APB_RD4(sc, PCIE_CLIENT_INT_STATUS);
+ /* Clear causes handled by other interrups */
+ irq &= ~PCIE_CLIENT_INT_LOCAL;
+ irq &= ~PCIE_CLIENT_INT_LEGACY;
+ APB_WR4(sc, PCIE_CLIENT_INT_STATUS, irq);
+
+ device_printf(sc->dev, "'client' interrupt received: 0x%04X\n", irq);
+
+ return (FILTER_HANDLED);
+}
+
+static int
+rk_pcie_legacy_irq(void *arg)
+{
+ struct rk_pcie_softc *sc;
+ uint32_t irq;
+
+ sc = (struct rk_pcie_softc *)arg;
+ irq = APB_RD4(sc, PCIE_CLIENT_INT_STATUS);
+ irq &= PCIE_CLIENT_INT_LEGACY;
+ APB_WR4(sc, PCIE_CLIENT_INT_STATUS, irq);
+
+ /* all legacy interrupt are shared, do nothing */
+ return (FILTER_STRAY);
+}
+
+static bus_dma_tag_t
+rk_pcie_get_dma_tag(device_t dev, device_t child)
+{
+ struct rk_pcie_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (sc->dmat);
+}
+
+static int
+rk_pcie_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Rockchip PCIe controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_pcie_attach(device_t dev)
+{ struct rk_pcie_softc *sc;
+ uint32_t val;
+ int rv, rid, max_speed;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->node = ofw_bus_get_node(dev);
+
+ mtx_init(&sc->mtx, "rk_pcie_mtx", NULL, MTX_DEF);
+
+ /* XXX Should not be this configurable ? */
+ sc->bus_start = 0;
+ sc->bus_end = 0x1F;
+ sc->root_bus = sc->bus_start;
+ sc->sub_bus = 1;
+
+ /* Read FDT properties */
+ rv = rk_pcie_parse_fdt_resources(sc);
+ if (rv != 0)
+ return (rv);
+
+ sc->coherent = OF_hasprop(sc->node, "dma-coherent");
+ sc->no_l0s = OF_hasprop(sc->node, "aspm-no-l0s");
+ rv = OF_getencprop(sc->node, "num-lanes", &sc->num_lanes,
+ sizeof(sc->num_lanes));
+ if (rv != sizeof(sc->num_lanes))
+ sc->num_lanes = 1;
+ if (sc->num_lanes != 1 && sc->num_lanes != 2 && sc->num_lanes != 4) {
+ device_printf(dev,
+ "invalid number of lanes: %d\n",sc->num_lanes);
+ sc->num_lanes = 0;
+ rv = ENXIO;
+ goto out;
+ }
+
+ rv = OF_getencprop(sc->node, "max-link-speed", &max_speed,
+ sizeof(max_speed));
+ if (rv != sizeof(max_speed) || max_speed != 1)
+ sc->link_is_gen2 = true;
+ else
+ sc->link_is_gen2 = false;
+
+ rv = ofw_bus_find_string_index(sc->node, "reg-names", "axi-base", &rid);
+ if (rv != 0) {
+ device_printf(dev, "Cannot get 'axi-base' memory\n");
+ rv = ENXIO;
+ goto out;
+ }
+ sc->axi_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->axi_mem_res == NULL) {
+ device_printf(dev, "Cannot allocate 'axi-base' (rid: %d)\n",
+ rid);
+ rv = ENXIO;
+ goto out;
+ }
+ rv = ofw_bus_find_string_index(sc->node, "reg-names", "apb-base", &rid);
+ if (rv != 0) {
+ device_printf(dev, "Cannot get 'apb-base' memory\n");
+ rv = ENXIO;
+ goto out;
+ }
+ sc->apb_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->apb_mem_res == NULL) {
+ device_printf(dev, "Cannot allocate 'apb-base' (rid: %d)\n",
+ rid);
+ rv = ENXIO;
+ goto out;
+ }
+
+ rv = ofw_bus_find_string_index(sc->node, "interrupt-names",
+ "client", &rid);
+ if (rv != 0) {
+ device_printf(dev, "Cannot get 'client' IRQ\n");
+ rv = ENXIO;
+ goto out;
+ }
+ sc->client_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE | RF_SHAREABLE);
+ if (sc->client_irq_res == NULL) {
+ device_printf(dev, "Cannot allocate 'client' IRQ resource\n");
+ rv = ENXIO;
+ goto out;
+ }
+
+ rv = ofw_bus_find_string_index(sc->node, "interrupt-names",
+ "legacy", &rid);
+ if (rv != 0) {
+ device_printf(dev, "Cannot get 'legacy' IRQ\n");
+ rv = ENXIO;
+ goto out;
+ }
+ sc->legacy_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE | RF_SHAREABLE);
+ if (sc->legacy_irq_res == NULL) {
+ device_printf(dev, "Cannot allocate 'legacy' IRQ resource\n");
+ rv = ENXIO;
+ goto out;
+ }
+
+ rv = ofw_bus_find_string_index(sc->node, "interrupt-names",
+ "sys", &rid);
+ if (rv != 0) {
+ device_printf(dev, "Cannot get 'sys' IRQ\n");
+ rv = ENXIO;
+ goto out;
+ }
+ sc->sys_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE | RF_SHAREABLE);
+ if (sc->sys_irq_res == NULL) {
+ device_printf(dev, "Cannot allocate 'sys' IRQ resource\n");
+ rv = ENXIO;
+ goto out;
+ }
+
+ if (bootverbose)
+ device_printf(dev, "Bus is%s cache-coherent\n",
+ sc->coherent ? "" : " not");
+ rv = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE, /* maxsize */
+ BUS_SPACE_UNRESTRICTED, /* nsegments */
+ BUS_SPACE_MAXSIZE, /* maxsegsize */
+ sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->dmat);
+ if (rv != 0)
+ goto out;
+
+ rv = ofw_pci_init(dev);
+ if (rv != 0)
+ goto out;
+
+ rv = rk_pcie_decode_ranges(sc, sc->ofw_pci.sc_range,
+ sc->ofw_pci.sc_nrange);
+ if (rv != 0)
+ goto out;
+ rv = rk_pcie_setup_hw(sc);
+ if (rv != 0)
+ goto out;
+
+ rv = rk_pcie_setup_sw(sc);
+ if (rv != 0)
+ goto out;
+
+ rv = bus_setup_intr(dev, sc->client_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
+ rk_pcie_client_irq, NULL, sc, &sc->client_irq_cookie);
+ if (rv != 0) {
+ device_printf(dev, "cannot setup client interrupt handler\n");
+ rv = ENXIO;
+ goto out;
+ }
+
+ rv = bus_setup_intr(dev, sc->legacy_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
+ rk_pcie_legacy_irq, NULL, sc, &sc->legacy_irq_cookie);
+ if (rv != 0) {
+ device_printf(dev, "cannot setup client interrupt handler\n");
+ rv = ENXIO;
+ goto out;
+ }
+
+ rv = bus_setup_intr(dev, sc->sys_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
+ rk_pcie_sys_irq, NULL, sc, &sc->sys_irq_cookie);
+ if (rv != 0) {
+ device_printf(dev, "cannot setup client interrupt handler\n");
+ rv = ENXIO;
+ goto out;
+ }
+
+ /* Enable interrupts */
+ val =
+ PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR |
+ PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA |
+ PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG |
+ PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_INTA |
+ PCIE_CLIENT_INT_INTB | PCIE_CLIENT_INT_INTC |
+ PCIE_CLIENT_INT_INTD | PCIE_CLIENT_INT_PHY;
+
+ APB_WR4(sc, PCIE_CLIENT_INT_MASK, (val << 16) & ~val);
+
+ val =
+ PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE |
+ PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO |
+ PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR |
+ PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR |
+ PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE |
+ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC |
+ PCIE_CORE_INT_MMVC;
+ APB_WR4(sc, PCIE_CORE_INT_MASK, ~(val));
+
+ val = APB_RD4(sc, PCIE_RC_CONFIG_LCS);
+ val |= PCIEM_LINK_CTL_LBMIE | PCIEM_LINK_CTL_LABIE;
+ APB_WR4(sc, PCIE_RC_CONFIG_LCS, val);
+
+ DELAY(250000);
+ device_add_child(dev, "pci", -1);
+ return (bus_generic_attach(dev));
+out:
+ /* XXX Cleanup */
+ return (rv);
+}
+
+static device_method_t rk_pcie_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_pcie_probe),
+ DEVMETHOD(device_attach, rk_pcie_attach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_get_dma_tag, rk_pcie_get_dma_tag),
+
+ /* pcib interface */
+ DEVMETHOD(pcib_read_config, rk_pcie_read_config),
+ DEVMETHOD(pcib_write_config, rk_pcie_write_config),
+ DEVMETHOD(pcib_route_interrupt, rk_pcie_route_interrupt),
+#ifdef RK_PCIE_ENABLE_MSI
+ DEVMETHOD(pcib_alloc_msi, rk_pcie_alloc_msi),
+ DEVMETHOD(pcib_release_msi, rk_pcie_release_msi),
+#endif
+#ifdef RK_PCIE_ENABLE_MSIX
+ DEVMETHOD(pcib_alloc_msix, rk_pcie_alloc_msix),
+ DEVMETHOD(pcib_release_msix, rk_pcie_release_msix),
+#endif
+ DEVMETHOD(pcib_map_msi, rk_pcie_map_msi),
+ DEVMETHOD(pcib_get_id, rk_pcie_get_id),
+
+ /* OFW bus interface */
+ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
+ DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
+ DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
+ DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
+ DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(pcib, rk_pcie_driver, rk_pcie_methods,
+ sizeof(struct rk_pcie_softc), ofw_pci_driver);
+static devclass_t rk_pcie_devclass;
+DRIVER_MODULE( rk_pcie, simplebus, rk_pcie_driver, rk_pcie_devclass,
+ NULL, NULL);
diff --git a/sys/arm64/rockchip/rk_pcie_phy.c b/sys/arm64/rockchip/rk_pcie_phy.c
new file mode 100644
index 000000000000..75bd213bfd23
--- /dev/null
+++ b/sys/arm64/rockchip/rk_pcie_phy.c
@@ -0,0 +1,364 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Michal Meloun <mmel@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Rockchip PHY TYPEC
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/gpio.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_subr.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/phy/phy.h>
+#include <dev/extres/phy/phy_internal.h>
+#include <dev/extres/syscon/syscon.h>
+#include <dev/extres/hwreset/hwreset.h>
+
+#include "syscon_if.h"
+
+#define GRF_HIWORD_SHIFT 16
+#define GRF_SOC_CON_5_PCIE 0xE214
+#define CON_5_PCIE_IDLE_OFF(x) (1 <<(((x) & 0x3) + 3))
+#define GRF_SOC_CON8 0xE220
+#define GRF_SOC_STATUS1 0xE2A4
+
+/* PHY config registers - write */
+#define PHY_CFG_CLK_TEST 0x10
+#define CLK_TEST_SEPE_RATE (1 << 3)
+#define PHY_CFG_CLK_SCC 0x12
+#define CLK_SCC_PLL_100M (1 << 3)
+
+/* PHY config registers - read */
+#define PHY_CFG_PLL_LOCK 0x10
+#define CLK_PLL_LOCKED (1 << 1)
+#define PHY_CFG_SCC_LOCK 0x12
+#define CLK_SCC_100M_GATE (1 << 2)
+
+#define STATUS1_PLL_LOCKED (1 << 9)
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk3399-pcie-phy", 1},
+ {NULL, 0}
+};
+
+struct rk_pcie_phy_softc {
+ device_t dev;
+ struct syscon *syscon;
+ struct mtx mtx;
+ clk_t clk_ref;
+ hwreset_t hwreset_phy;
+ int enable_count;
+};
+
+#define PHY_LOCK(_sc) mtx_lock(&(_sc)->mtx)
+#define PHY_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
+#define PHY_LOCK_INIT(_sc) mtx_init(&(_sc)->mtx, \
+ device_get_nameunit(_sc->dev), "rk_pcie_phyc", MTX_DEF)
+#define PHY_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx);
+#define PHY_ASSERT_LOCKED(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED);
+#define PHY_ASSERT_UNLOCKED(_sc) mtx_assert(&(_sc)->mtx, MA_NOTOWNED);
+
+#define RD4(sc, reg) SYSCON_READ_4((sc)->syscon, (reg))
+#define WR4(sc, reg, mask, val) \
+ SYSCON_WRITE_4((sc)->syscon, (reg), ((mask) << GRF_HIWORD_SHIFT) | (val))
+
+#define MAX_LANE 4
+
+static void
+cfg_write(struct rk_pcie_phy_softc *sc, uint32_t reg, uint32_t data)
+{
+ /* setup register address and data first */
+ WR4(sc, GRF_SOC_CON8, 0x7FF,
+ (reg & 0x3F) << 1 | (data & 0x0F) << 7);
+ /* dummy readback for sync */
+ RD4(sc, GRF_SOC_CON8);
+
+ /* Do write pulse */
+ WR4(sc, GRF_SOC_CON8, 1, 1);
+ RD4(sc, GRF_SOC_CON8);
+ DELAY(10);
+ WR4(sc, GRF_SOC_CON8, 1, 0);
+ RD4(sc, GRF_SOC_CON8);
+ DELAY(10);
+}
+
+static uint32_t
+cfg_read(struct rk_pcie_phy_softc *sc, uint32_t reg)
+{
+ uint32_t val;
+
+ WR4(sc, GRF_SOC_CON8, 0x3FF, reg << 1);
+ RD4(sc, GRF_SOC_CON8);
+ DELAY(10);
+ val = RD4(sc, GRF_SOC_STATUS1);
+ return ((val >> 8) & 0x0f);
+}
+
+static int
+rk_pcie_phy_up(struct rk_pcie_phy_softc *sc, int id)
+{
+ uint32_t val;
+ int i, rv;
+
+ PHY_LOCK(sc);
+
+ sc->enable_count++;
+ if (sc->enable_count != 1) {
+ PHY_UNLOCK(sc);
+ return (0);
+ }
+
+ rv = hwreset_deassert(sc->hwreset_phy);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot deassert 'phy' reset\n");
+ PHY_UNLOCK(sc);
+ return (rv);
+ }
+ /* Un-idle all lanes */
+ for (i = 0; i < MAX_LANE; i++)
+ WR4(sc, GRF_SOC_CON_5_PCIE, CON_5_PCIE_IDLE_OFF(i), 0);
+
+ /* Wait for PLL lock */
+ for (i = 100; i > 0; i--) {
+ val = cfg_read(sc, PHY_CFG_PLL_LOCK);
+ if (val & CLK_PLL_LOCKED)
+ break;
+ DELAY(1000);
+ }
+ if (i <= 0) {
+ device_printf(sc->dev, "PLL lock timeouted, 0x%02X\n", val);
+ PHY_UNLOCK(sc);
+ return (ETIMEDOUT);
+ }
+ /* Switch PLL to stable 5GHz, rate adjustment is done by divider */
+ cfg_write(sc, PHY_CFG_CLK_TEST, CLK_TEST_SEPE_RATE);
+ /* Enable 100MHz output for PCIe ref clock */
+ cfg_write(sc, PHY_CFG_CLK_SCC, CLK_SCC_PLL_100M);
+
+ /* Wait for ungating of ref clock */
+ for (i = 100; i > 0; i--) {
+ val = cfg_read(sc, PHY_CFG_SCC_LOCK);
+ if ((val & CLK_SCC_100M_GATE) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (i <= 0) {
+ device_printf(sc->dev, "PLL output enable timeouted\n");
+ PHY_UNLOCK(sc);
+ return (ETIMEDOUT);
+ }
+
+ /* Wait for PLL relock (to 5GHz) */
+ for (i = 100; i > 0; i--) {
+ val = cfg_read(sc, PHY_CFG_PLL_LOCK);
+ if (val & CLK_PLL_LOCKED)
+ break;
+ DELAY(1000);
+ }
+ if (i <= 0) {
+ device_printf(sc->dev, "PLL relock timeouted\n");
+ PHY_UNLOCK(sc);
+ return (ETIMEDOUT);
+ }
+
+ PHY_UNLOCK(sc);
+ return (rv);
+}
+
+static int
+rk_pcie_phy_down(struct rk_pcie_phy_softc *sc, int id)
+{
+ int rv;
+
+ PHY_LOCK(sc);
+
+ rv = 0;
+ if (sc->enable_count <= 0)
+ panic("unpaired enable/disable");
+
+ sc->enable_count--;
+
+ /* Idle given lane */
+ WR4(sc, GRF_SOC_CON_5_PCIE,
+ CON_5_PCIE_IDLE_OFF(id),
+ CON_5_PCIE_IDLE_OFF(id));
+
+ if (sc->enable_count == 0) {
+ rv = hwreset_assert(sc->hwreset_phy);
+ if (rv != 0)
+ device_printf(sc->dev, "Cannot assert 'phy' reset\n");
+ }
+ PHY_UNLOCK(sc);
+ return (rv);
+}
+
+static int
+rk_pcie_phy_enable(struct phynode *phynode, bool enable)
+{
+ struct rk_pcie_phy_softc *sc;
+ device_t dev;
+ intptr_t phy;
+ int rv;
+
+ dev = phynode_get_device(phynode);
+ phy = phynode_get_id(phynode);
+ sc = device_get_softc(dev);
+
+ if (enable)
+ rv = rk_pcie_phy_up(sc, (int)phy);
+ else
+ rv = rk_pcie_phy_down(sc, (int) phy);
+
+ return (rv);
+}
+
+/* Phy class and methods. */
+static int rk_pcie_phy_enable(struct phynode *phynode, bool enable);
+static phynode_method_t rk_pcie_phy_phynode_methods[] = {
+ PHYNODEMETHOD(phynode_enable, rk_pcie_phy_enable),
+
+ PHYNODEMETHOD_END
+};
+
+DEFINE_CLASS_1( rk_pcie_phy_phynode, rk_pcie_phy_phynode_class,
+ rk_pcie_phy_phynode_methods, 0, phynode_class);
+
+static int
+ rk_pcie_phy_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Rockchip RK3399 PCIe PHY");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+ rk_pcie_phy_attach(device_t dev)
+{
+ struct rk_pcie_phy_softc *sc;
+ struct phynode_init_def phy_init;
+ struct phynode *phynode;
+ phandle_t node;
+ int i, rv;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ node = ofw_bus_get_node(dev);
+ PHY_LOCK_INIT(sc);
+
+ if (SYSCON_GET_HANDLE(sc->dev, &sc->syscon) != 0 ||
+ sc->syscon == NULL) {
+ device_printf(dev, "cannot get syscon for device\n");
+ rv = ENXIO;
+ goto fail;
+ }
+
+ rv = clk_get_by_ofw_name(sc->dev, 0, "refclk", &sc->clk_ref);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'refclk' clock\n");
+ rv = ENXIO;
+ goto fail;
+ }
+ rv = hwreset_get_by_ofw_name(sc->dev, 0, "phy", &sc->hwreset_phy);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot get 'phy' reset\n");
+ rv = ENXIO;
+ goto fail;
+ }
+
+ rv = hwreset_assert(sc->hwreset_phy);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot assert 'phy' reset\n");
+ rv = ENXIO;
+ goto fail;
+ }
+
+ rv = clk_enable(sc->clk_ref);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot enable 'ref' clock\n");
+ rv = ENXIO;
+ goto fail;
+ }
+
+ for (i = 0; i < MAX_LANE; i++) {
+ phy_init.id = i;
+ phy_init.ofw_node = node;
+ phynode = phynode_create(dev, &rk_pcie_phy_phynode_class,
+ &phy_init);
+ if (phynode == NULL) {
+ device_printf(dev, "Cannot create phy[%d]\n", i);
+ rv = ENXIO;
+ goto fail;
+ }
+ if (phynode_register(phynode) == NULL) {
+ device_printf(dev, "Cannot register phy[%d]\n", i);
+ rv = ENXIO;
+ goto fail;
+ }
+ }
+
+ return (0);
+
+fail:
+ return (rv);
+}
+
+static device_method_t rk_pcie_phy_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_pcie_phy_probe),
+ DEVMETHOD(device_attach, rk_pcie_phy_attach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(rk_pcie_phy, rk_pcie_phy_driver, rk_pcie_phy_methods,
+ sizeof(struct rk_pcie_phy_softc));
+
+static devclass_t rk_pcie_phy_devclass;
+EARLY_DRIVER_MODULE(rk_pcie_phy, simplebus, rk_pcie_phy_driver,
+ rk_pcie_phy_devclass, NULL, NULL,
+ BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/rockchip/rk_pinctrl.c b/sys/arm64/rockchip/rk_pinctrl.c
new file mode 100644
index 000000000000..24ad7798eef0
--- /dev/null
+++ b/sys/arm64/rockchip/rk_pinctrl.c
@@ -0,0 +1,1348 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/gpio.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/intr.h>
+
+#include <dev/fdt/simplebus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/fdt/fdt_pinctrl.h>
+
+#include <dev/extres/syscon/syscon.h>
+
+#include "gpio_if.h"
+#include "syscon_if.h"
+#include "fdt_pinctrl_if.h"
+
+struct rk_pinctrl_pin_drive {
+ uint32_t bank;
+ uint32_t subbank;
+ uint32_t offset;
+ uint32_t value;
+ uint32_t ma;
+};
+
+struct rk_pinctrl_bank {
+ uint32_t bank;
+ uint32_t subbank;
+ uint32_t offset;
+ uint32_t nbits;
+};
+
+struct rk_pinctrl_pin_fixup {
+ uint32_t bank;
+ uint32_t subbank;
+ uint32_t pin;
+ uint32_t reg;
+ uint32_t bit;
+ uint32_t mask;
+};
+
+struct rk_pinctrl_gpio {
+ uint32_t bank;
+ char *gpio_name;
+ device_t gpio_dev;
+};
+
+struct rk_pinctrl_softc;
+
+struct rk_pinctrl_conf {
+ struct rk_pinctrl_bank *iomux_conf;
+ uint32_t iomux_nbanks;
+ struct rk_pinctrl_pin_fixup *pin_fixup;
+ uint32_t npin_fixup;
+ struct rk_pinctrl_pin_drive *pin_drive;
+ uint32_t npin_drive;
+ struct rk_pinctrl_gpio *gpio_bank;
+ uint32_t ngpio_bank;
+ uint32_t (*get_pd_offset)(struct rk_pinctrl_softc *, uint32_t);
+ struct syscon *(*get_syscon)(struct rk_pinctrl_softc *, uint32_t);
+ int (*parse_bias)(phandle_t, int);
+ int (*resolv_bias_value)(int, int);
+ int (*get_bias_value)(int, int);
+};
+
+struct rk_pinctrl_softc {
+ struct simplebus_softc simplebus_sc;
+ device_t dev;
+ struct syscon *grf;
+ struct syscon *pmu;
+ struct rk_pinctrl_conf *conf;
+ struct mtx mtx;
+};
+
+#define RK_PINCTRL_LOCK(_sc) mtx_lock_spin(&(_sc)->mtx)
+#define RK_PINCTRL_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->mtx)
+#define RK_PINCTRL_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED)
+
+#define RK_IOMUX(_bank, _subbank, _offset, _nbits) \
+{ \
+ .bank = _bank, \
+ .subbank = _subbank, \
+ .offset = _offset, \
+ .nbits = _nbits, \
+}
+
+#define RK_PINFIX(_bank, _pin, _reg, _bit, _mask) \
+{ \
+ .bank = _bank, \
+ .pin = _pin, \
+ .reg = _reg, \
+ .bit = _bit, \
+ .mask = _mask, \
+}
+
+#define RK_PINDRIVE(_bank, _subbank, _offset, _value, _ma) \
+{ \
+ .bank = _bank, \
+ .subbank = _subbank, \
+ .offset = _offset, \
+ .value = _value, \
+ .ma = _ma, \
+}
+#define RK_GPIO(_bank, _name) \
+{ \
+ .bank = _bank, \
+ .gpio_name = _name, \
+}
+
+static struct rk_pinctrl_gpio rk3288_gpio_bank[] = {
+ RK_GPIO(0, "gpio0"),
+ RK_GPIO(1, "gpio1"),
+ RK_GPIO(2, "gpio2"),
+ RK_GPIO(3, "gpio3"),
+ RK_GPIO(4, "gpio4"),
+ RK_GPIO(5, "gpio5"),
+ RK_GPIO(6, "gpio6"),
+ RK_GPIO(7, "gpio7"),
+ RK_GPIO(8, "gpio8"),
+};
+
+static struct rk_pinctrl_bank rk3288_iomux_bank[] = {
+ /* bank sub offs nbits */
+ /* PMU */
+ RK_IOMUX(0, 0, 0x0084, 2),
+ RK_IOMUX(0, 1, 0x0088, 2),
+ RK_IOMUX(0, 2, 0x008C, 2),
+ /* GFR */
+ RK_IOMUX(1, 3, 0x000C, 2),
+ RK_IOMUX(2, 0, 0x0010, 2),
+ RK_IOMUX(2, 1, 0x0014, 2),
+ RK_IOMUX(2, 2, 0x0018, 2),
+ RK_IOMUX(2, 3, 0x001C, 2),
+ RK_IOMUX(3, 0, 0x0020, 2),
+ RK_IOMUX(3, 1, 0x0024, 2),
+ RK_IOMUX(3, 2, 0x0028, 2),
+ RK_IOMUX(3, 3, 0x002C, 4),
+ RK_IOMUX(4, 0, 0x0034, 4),
+ RK_IOMUX(4, 1, 0x003C, 4),
+ RK_IOMUX(4, 2, 0x0044, 2),
+ RK_IOMUX(4, 3, 0x0048, 2),
+ /* 5,0 - Empty */
+ RK_IOMUX(5, 1, 0x0050, 2),
+ RK_IOMUX(5, 2, 0x0054, 2),
+ /* 5,3 - Empty */
+ RK_IOMUX(6, 0, 0x005C, 2),
+ RK_IOMUX(6, 1, 0x0060, 2),
+ RK_IOMUX(6, 2, 0x0064, 2),
+ /* 6,3 - Empty */
+ RK_IOMUX(7, 0, 0x006C, 2),
+ RK_IOMUX(7, 1, 0x0070, 2),
+ RK_IOMUX(7, 2, 0x0074, 4),
+ /* 7,3 - Empty */
+ RK_IOMUX(8, 0, 0x0080, 2),
+ RK_IOMUX(8, 1, 0x0084, 2),
+ /* 8,2 - Empty */
+ /* 8,3 - Empty */
+
+};
+
+static struct rk_pinctrl_pin_fixup rk3288_pin_fixup[] = {
+};
+
+static struct rk_pinctrl_pin_drive rk3288_pin_drive[] = {
+ /* bank sub offs val ma */
+ /* GPIO0A (PMU)*/
+ RK_PINDRIVE(0, 0, 0x070, 0, 2),
+ RK_PINDRIVE(0, 0, 0x070, 1, 4),
+ RK_PINDRIVE(0, 0, 0x070, 2, 8),
+ RK_PINDRIVE(0, 0, 0x070, 3, 12),
+
+ /* GPIO0B (PMU)*/
+ RK_PINDRIVE(0, 1, 0x074, 0, 2),
+ RK_PINDRIVE(0, 1, 0x074, 1, 4),
+ RK_PINDRIVE(0, 1, 0x074, 2, 8),
+ RK_PINDRIVE(0, 1, 0x074, 3, 12),
+
+ /* GPIO0C (PMU)*/
+ RK_PINDRIVE(0, 2, 0x078, 0, 2),
+ RK_PINDRIVE(0, 2, 0x078, 1, 4),
+ RK_PINDRIVE(0, 2, 0x078, 2, 8),
+ RK_PINDRIVE(0, 2, 0x078, 3, 12),
+
+ /* GPIO1D */
+ RK_PINDRIVE(1, 3, 0x1CC, 0, 2),
+ RK_PINDRIVE(1, 3, 0x1CC, 1, 4),
+ RK_PINDRIVE(1, 3, 0x1CC, 2, 8),
+ RK_PINDRIVE(1, 3, 0x1CC, 3, 12),
+
+ /* GPIO2A */
+ RK_PINDRIVE(2, 0, 0x1D0, 0, 2),
+ RK_PINDRIVE(2, 0, 0x1D0, 1, 4),
+ RK_PINDRIVE(2, 0, 0x1D0, 2, 8),
+ RK_PINDRIVE(2, 0, 0x1D0, 3, 12),
+
+ /* GPIO2B */
+ RK_PINDRIVE(2, 1, 0x1D4, 0, 2),
+ RK_PINDRIVE(2, 1, 0x1D4, 1, 4),
+ RK_PINDRIVE(2, 1, 0x1D4, 2, 8),
+ RK_PINDRIVE(2, 1, 0x1D4, 3, 12),
+
+ /* GPIO2C */
+ RK_PINDRIVE(2, 2, 0x1D8, 0, 2),
+ RK_PINDRIVE(2, 2, 0x1D8, 1, 4),
+ RK_PINDRIVE(2, 2, 0x1D8, 2, 8),
+ RK_PINDRIVE(2, 2, 0x1D8, 3, 12),
+
+ /* GPIO2D */
+ RK_PINDRIVE(2, 3, 0x1DC, 0, 2),
+ RK_PINDRIVE(2, 3, 0x1DC, 1, 4),
+ RK_PINDRIVE(2, 3, 0x1DC, 2, 8),
+ RK_PINDRIVE(2, 3, 0x1DC, 3, 12),
+
+ /* GPIO3A */
+ RK_PINDRIVE(3, 0, 0x1E0, 0, 2),
+ RK_PINDRIVE(3, 0, 0x1E0, 1, 4),
+ RK_PINDRIVE(3, 0, 0x1E0, 2, 8),
+ RK_PINDRIVE(3, 0, 0x1E0, 3, 12),
+
+ /* GPIO3B */
+ RK_PINDRIVE(3, 1, 0x1E4, 0, 2),
+ RK_PINDRIVE(3, 1, 0x1E4, 1, 4),
+ RK_PINDRIVE(3, 1, 0x1E4, 2, 8),
+ RK_PINDRIVE(3, 1, 0x1E4, 3, 12),
+
+ /* GPIO3C */
+ RK_PINDRIVE(3, 2, 0x1E8, 0, 2),
+ RK_PINDRIVE(3, 2, 0x1E8, 1, 4),
+ RK_PINDRIVE(3, 2, 0x1E8, 2, 8),
+ RK_PINDRIVE(3, 2, 0x1E8, 3, 12),
+
+ /* GPIO3D */
+ RK_PINDRIVE(3, 3, 0x1EC, 0, 2),
+ RK_PINDRIVE(3, 3, 0x1EC, 1, 4),
+ RK_PINDRIVE(3, 3, 0x1EC, 2, 8),
+ RK_PINDRIVE(3, 3, 0x1EC, 3, 12),
+
+ /* GPIO4A */
+ RK_PINDRIVE(4, 0, 0x1F0, 0, 2),
+ RK_PINDRIVE(4, 0, 0x1F0, 1, 4),
+ RK_PINDRIVE(4, 0, 0x1F0, 2, 8),
+ RK_PINDRIVE(4, 0, 0x1F0, 3, 12),
+
+ /* GPIO4B */
+ RK_PINDRIVE(4, 1, 0x1F4, 0, 2),
+ RK_PINDRIVE(4, 1, 0x1F4, 1, 4),
+ RK_PINDRIVE(4, 1, 0x1F4, 2, 8),
+ RK_PINDRIVE(4, 1, 0x1F4, 3, 12),
+
+ /* GPIO4C */
+ RK_PINDRIVE(4, 2, 0x1F8, 0, 2),
+ RK_PINDRIVE(4, 2, 0x1F8, 1, 4),
+ RK_PINDRIVE(4, 2, 0x1F8, 2, 8),
+ RK_PINDRIVE(4, 2, 0x1F8, 3, 12),
+
+ /* GPIO4D */
+ RK_PINDRIVE(4, 3, 0x1FC, 0, 2),
+ RK_PINDRIVE(4, 3, 0x1FC, 1, 4),
+ RK_PINDRIVE(4, 3, 0x1FC, 2, 8),
+ RK_PINDRIVE(4, 3, 0x1FC, 3, 12),
+
+ /* GPIO5B */
+ RK_PINDRIVE(5, 1, 0x204, 0, 2),
+ RK_PINDRIVE(5, 1, 0x204, 1, 4),
+ RK_PINDRIVE(5, 1, 0x204, 2, 8),
+ RK_PINDRIVE(5, 1, 0x204, 3, 12),
+
+ /* GPIO5C */
+ RK_PINDRIVE(5, 2, 0x208, 0, 2),
+ RK_PINDRIVE(5, 2, 0x208, 1, 4),
+ RK_PINDRIVE(5, 2, 0x208, 2, 8),
+ RK_PINDRIVE(5, 2, 0x208, 3, 12),
+
+ /* GPIO6A */
+ RK_PINDRIVE(6, 0, 0x210, 0, 2),
+ RK_PINDRIVE(6, 0, 0x210, 1, 4),
+ RK_PINDRIVE(6, 0, 0x210, 2, 8),
+ RK_PINDRIVE(6, 0, 0x210, 3, 12),
+
+ /* GPIO6B */
+ RK_PINDRIVE(6, 1, 0x214, 0, 2),
+ RK_PINDRIVE(6, 1, 0x214, 1, 4),
+ RK_PINDRIVE(6, 1, 0x214, 2, 8),
+ RK_PINDRIVE(6, 1, 0x214, 3, 12),
+
+ /* GPIO6C */
+ RK_PINDRIVE(6, 2, 0x218, 0, 2),
+ RK_PINDRIVE(6, 2, 0x218, 1, 4),
+ RK_PINDRIVE(6, 2, 0x218, 2, 8),
+ RK_PINDRIVE(6, 2, 0x218, 3, 12),
+
+ /* GPIO7A */
+ RK_PINDRIVE(7, 0, 0x220, 0, 2),
+ RK_PINDRIVE(7, 0, 0x220, 1, 4),
+ RK_PINDRIVE(7, 0, 0x220, 2, 8),
+ RK_PINDRIVE(7, 0, 0x220, 3, 12),
+
+ /* GPIO7B */
+ RK_PINDRIVE(7, 1, 0x224, 0, 2),
+ RK_PINDRIVE(7, 1, 0x224, 1, 4),
+ RK_PINDRIVE(7, 1, 0x224, 2, 8),
+ RK_PINDRIVE(7, 1, 0x224, 3, 12),
+
+ /* GPIO7C */
+ RK_PINDRIVE(7, 2, 0x228, 0, 2),
+ RK_PINDRIVE(7, 2, 0x228, 1, 4),
+ RK_PINDRIVE(7, 2, 0x228, 2, 8),
+ RK_PINDRIVE(7, 2, 0x228, 3, 12),
+
+ /* GPIO8A */
+ RK_PINDRIVE(8, 0, 0x230, 0, 2),
+ RK_PINDRIVE(8, 0, 0x230, 1, 4),
+ RK_PINDRIVE(8, 0, 0x230, 2, 8),
+ RK_PINDRIVE(8, 0, 0x230, 3, 12),
+
+ /* GPIO8B */
+ RK_PINDRIVE(8, 1, 0x234, 0, 2),
+ RK_PINDRIVE(8, 1, 0x234, 1, 4),
+ RK_PINDRIVE(8, 1, 0x234, 2, 8),
+ RK_PINDRIVE(8, 1, 0x234, 3, 12),
+};
+
+static uint32_t
+rk3288_get_pd_offset(struct rk_pinctrl_softc *sc, uint32_t bank)
+{
+ if (bank == 0)
+ return (0x064); /* PMU */
+ return (0x130);
+}
+
+static struct syscon *
+rk3288_get_syscon(struct rk_pinctrl_softc *sc, uint32_t bank)
+{
+ if (bank == 0)
+ return (sc->pmu);
+ return (sc->grf);
+}
+
+static int
+rk3288_parse_bias(phandle_t node, int bank)
+{
+ if (OF_hasprop(node, "bias-disable"))
+ return (0);
+ if (OF_hasprop(node, "bias-pull-up"))
+ return (1);
+ if (OF_hasprop(node, "bias-pull-down"))
+ return (2);
+
+ return (-1);
+}
+
+static int
+rk3288_resolv_bias_value(int bank, int bias)
+{
+ int rv = 0;
+
+ if (bias == 1)
+ rv = GPIO_PIN_PULLUP;
+ else if (bias == 2)
+ rv = GPIO_PIN_PULLDOWN;
+
+ return (rv);
+}
+
+static int
+rk3288_get_bias_value(int bank, int bias)
+{
+ int rv = 0;
+
+ if (bias & GPIO_PIN_PULLUP)
+ rv = 1;
+ else if (bias & GPIO_PIN_PULLDOWN)
+ rv = 2;
+
+ return (rv);
+}
+
+struct rk_pinctrl_conf rk3288_conf = {
+ .iomux_conf = rk3288_iomux_bank,
+ .iomux_nbanks = nitems(rk3288_iomux_bank),
+ .pin_fixup = rk3288_pin_fixup,
+ .npin_fixup = nitems(rk3288_pin_fixup),
+ .pin_drive = rk3288_pin_drive,
+ .npin_drive = nitems(rk3288_pin_drive),
+ .gpio_bank = rk3288_gpio_bank,
+ .ngpio_bank = nitems(rk3288_gpio_bank),
+ .get_pd_offset = rk3288_get_pd_offset,
+ .get_syscon = rk3288_get_syscon,
+ .parse_bias = rk3288_parse_bias,
+ .resolv_bias_value = rk3288_resolv_bias_value,
+ .get_bias_value = rk3288_get_bias_value,
+};
+
+static struct rk_pinctrl_gpio rk3328_gpio_bank[] = {
+ RK_GPIO(0, "gpio0"),
+ RK_GPIO(1, "gpio1"),
+ RK_GPIO(2, "gpio2"),
+ RK_GPIO(3, "gpio3"),
+};
+
+static struct rk_pinctrl_bank rk3328_iomux_bank[] = {
+ /* bank sub offs nbits */
+ RK_IOMUX(0, 0, 0x0000, 2),
+ RK_IOMUX(0, 1, 0x0004, 2),
+ RK_IOMUX(0, 2, 0x0008, 2),
+ RK_IOMUX(0, 3, 0x000C, 2),
+ RK_IOMUX(1, 0, 0x0010, 2),
+ RK_IOMUX(1, 1, 0x0014, 2),
+ RK_IOMUX(1, 2, 0x0018, 2),
+ RK_IOMUX(1, 3, 0x001C, 2),
+ RK_IOMUX(2, 0, 0x0020, 2),
+ RK_IOMUX(2, 1, 0x0024, 3),
+ RK_IOMUX(2, 2, 0x002c, 3),
+ RK_IOMUX(2, 3, 0x0034, 2),
+ RK_IOMUX(3, 0, 0x0038, 3),
+ RK_IOMUX(3, 1, 0x0040, 3),
+ RK_IOMUX(3, 2, 0x0048, 2),
+ RK_IOMUX(3, 3, 0x004c, 2),
+};
+
+static struct rk_pinctrl_pin_fixup rk3328_pin_fixup[] = {
+ /* bank pin reg bit mask */
+ RK_PINFIX(2, 12, 0x24, 8, 0x300),
+ RK_PINFIX(2, 15, 0x28, 0, 0x7),
+ RK_PINFIX(2, 23, 0x30, 14, 0x6000),
+};
+
+static struct rk_pinctrl_pin_drive rk3328_pin_drive[] = {
+ /* bank sub offs val ma */
+ RK_PINDRIVE(0, 0, 0x200, 0, 2),
+ RK_PINDRIVE(0, 0, 0x200, 1, 4),
+ RK_PINDRIVE(0, 0, 0x200, 2, 8),
+ RK_PINDRIVE(0, 0, 0x200, 3, 12),
+
+ RK_PINDRIVE(0, 1, 0x204, 0, 2),
+ RK_PINDRIVE(0, 1, 0x204, 1, 4),
+ RK_PINDRIVE(0, 1, 0x204, 2, 8),
+ RK_PINDRIVE(0, 1, 0x204, 3, 12),
+
+ RK_PINDRIVE(0, 2, 0x208, 0, 2),
+ RK_PINDRIVE(0, 2, 0x208, 1, 4),
+ RK_PINDRIVE(0, 2, 0x208, 2, 8),
+ RK_PINDRIVE(0, 2, 0x208, 3, 12),
+
+ RK_PINDRIVE(0, 3, 0x20C, 0, 2),
+ RK_PINDRIVE(0, 3, 0x20C, 1, 4),
+ RK_PINDRIVE(0, 3, 0x20C, 2, 8),
+ RK_PINDRIVE(0, 3, 0x20C, 3, 12),
+
+ RK_PINDRIVE(1, 0, 0x210, 0, 2),
+ RK_PINDRIVE(1, 0, 0x210, 1, 4),
+ RK_PINDRIVE(1, 0, 0x210, 2, 8),
+ RK_PINDRIVE(1, 0, 0x210, 3, 12),
+
+ RK_PINDRIVE(1, 1, 0x214, 0, 2),
+ RK_PINDRIVE(1, 1, 0x214, 1, 4),
+ RK_PINDRIVE(1, 1, 0x214, 2, 8),
+ RK_PINDRIVE(1, 1, 0x214, 3, 12),
+
+ RK_PINDRIVE(1, 2, 0x218, 0, 2),
+ RK_PINDRIVE(1, 2, 0x218, 1, 4),
+ RK_PINDRIVE(1, 2, 0x218, 2, 8),
+ RK_PINDRIVE(1, 2, 0x218, 3, 12),
+
+ RK_PINDRIVE(1, 3, 0x21C, 0, 2),
+ RK_PINDRIVE(1, 3, 0x21C, 1, 4),
+ RK_PINDRIVE(1, 3, 0x21C, 2, 8),
+ RK_PINDRIVE(1, 3, 0x21C, 3, 12),
+
+ RK_PINDRIVE(2, 0, 0x220, 0, 2),
+ RK_PINDRIVE(2, 0, 0x220, 1, 4),
+ RK_PINDRIVE(2, 0, 0x220, 2, 8),
+ RK_PINDRIVE(2, 0, 0x220, 3, 12),
+
+ RK_PINDRIVE(2, 1, 0x224, 0, 2),
+ RK_PINDRIVE(2, 1, 0x224, 1, 4),
+ RK_PINDRIVE(2, 1, 0x224, 2, 8),
+ RK_PINDRIVE(2, 1, 0x224, 3, 12),
+
+ RK_PINDRIVE(2, 2, 0x228, 0, 2),
+ RK_PINDRIVE(2, 2, 0x228, 1, 4),
+ RK_PINDRIVE(2, 2, 0x228, 2, 8),
+ RK_PINDRIVE(2, 2, 0x228, 3, 12),
+
+ RK_PINDRIVE(2, 3, 0x22C, 0, 2),
+ RK_PINDRIVE(2, 3, 0x22C, 1, 4),
+ RK_PINDRIVE(2, 3, 0x22C, 2, 8),
+ RK_PINDRIVE(2, 3, 0x22C, 3, 12),
+
+ RK_PINDRIVE(3, 0, 0x230, 0, 2),
+ RK_PINDRIVE(3, 0, 0x230, 1, 4),
+ RK_PINDRIVE(3, 0, 0x230, 2, 8),
+ RK_PINDRIVE(3, 0, 0x230, 3, 12),
+
+ RK_PINDRIVE(3, 1, 0x234, 0, 2),
+ RK_PINDRIVE(3, 1, 0x234, 1, 4),
+ RK_PINDRIVE(3, 1, 0x234, 2, 8),
+ RK_PINDRIVE(3, 1, 0x234, 3, 12),
+
+ RK_PINDRIVE(3, 2, 0x238, 0, 2),
+ RK_PINDRIVE(3, 2, 0x238, 1, 4),
+ RK_PINDRIVE(3, 2, 0x238, 2, 8),
+ RK_PINDRIVE(3, 2, 0x238, 3, 12),
+
+ RK_PINDRIVE(3, 3, 0x23C, 0, 2),
+ RK_PINDRIVE(3, 3, 0x23C, 1, 4),
+ RK_PINDRIVE(3, 3, 0x23C, 2, 8),
+ RK_PINDRIVE(3, 3, 0x23C, 3, 12),
+};
+
+static uint32_t
+rk3328_get_pd_offset(struct rk_pinctrl_softc *sc, uint32_t bank)
+{
+ return (0x100);
+}
+
+static struct syscon *
+rk3328_get_syscon(struct rk_pinctrl_softc *sc, uint32_t bank)
+{
+ return (sc->grf);
+}
+
+struct rk_pinctrl_conf rk3328_conf = {
+ .iomux_conf = rk3328_iomux_bank,
+ .iomux_nbanks = nitems(rk3328_iomux_bank),
+ .pin_fixup = rk3328_pin_fixup,
+ .npin_fixup = nitems(rk3328_pin_fixup),
+ .pin_drive = rk3328_pin_drive,
+ .npin_drive = nitems(rk3328_pin_drive),
+ .gpio_bank = rk3328_gpio_bank,
+ .ngpio_bank = nitems(rk3328_gpio_bank),
+ .get_pd_offset = rk3328_get_pd_offset,
+ .get_syscon = rk3328_get_syscon,
+ .parse_bias = rk3288_parse_bias,
+ .resolv_bias_value = rk3288_resolv_bias_value,
+ .get_bias_value = rk3288_get_bias_value,
+};
+
+static struct rk_pinctrl_gpio rk3399_gpio_bank[] = {
+ RK_GPIO(0, "gpio0"),
+ RK_GPIO(1, "gpio1"),
+ RK_GPIO(2, "gpio2"),
+ RK_GPIO(3, "gpio3"),
+ RK_GPIO(4, "gpio4"),
+};
+
+static struct rk_pinctrl_bank rk3399_iomux_bank[] = {
+ /* bank sub offs nbits */
+ RK_IOMUX(0, 0, 0x0000, 2),
+ RK_IOMUX(0, 1, 0x0004, 2),
+ RK_IOMUX(0, 2, 0x0008, 2),
+ RK_IOMUX(0, 3, 0x000C, 2),
+ RK_IOMUX(1, 0, 0x0010, 2),
+ RK_IOMUX(1, 1, 0x0014, 2),
+ RK_IOMUX(1, 2, 0x0018, 2),
+ RK_IOMUX(1, 3, 0x001C, 2),
+ RK_IOMUX(2, 0, 0xE000, 2),
+ RK_IOMUX(2, 1, 0xE004, 2),
+ RK_IOMUX(2, 2, 0xE008, 2),
+ RK_IOMUX(2, 3, 0xE00C, 2),
+ RK_IOMUX(3, 0, 0xE010, 2),
+ RK_IOMUX(3, 1, 0xE014, 2),
+ RK_IOMUX(3, 2, 0xE018, 2),
+ RK_IOMUX(3, 3, 0xE01C, 2),
+ RK_IOMUX(4, 0, 0xE020, 2),
+ RK_IOMUX(4, 1, 0xE024, 2),
+ RK_IOMUX(4, 2, 0xE028, 2),
+ RK_IOMUX(4, 3, 0xE02C, 2),
+};
+
+static struct rk_pinctrl_pin_fixup rk3399_pin_fixup[] = {};
+
+static struct rk_pinctrl_pin_drive rk3399_pin_drive[] = {
+ /* bank sub offs val ma */
+ /* GPIO0A */
+ RK_PINDRIVE(0, 0, 0x80, 0, 5),
+ RK_PINDRIVE(0, 0, 0x80, 1, 10),
+ RK_PINDRIVE(0, 0, 0x80, 2, 15),
+ RK_PINDRIVE(0, 0, 0x80, 3, 20),
+
+ /* GPIOB */
+ RK_PINDRIVE(0, 1, 0x88, 0, 5),
+ RK_PINDRIVE(0, 1, 0x88, 1, 10),
+ RK_PINDRIVE(0, 1, 0x88, 2, 15),
+ RK_PINDRIVE(0, 1, 0x88, 3, 20),
+
+ /* GPIO1A */
+ RK_PINDRIVE(1, 0, 0xA0, 0, 3),
+ RK_PINDRIVE(1, 0, 0xA0, 1, 6),
+ RK_PINDRIVE(1, 0, 0xA0, 2, 9),
+ RK_PINDRIVE(1, 0, 0xA0, 3, 12),
+
+ /* GPIO1B */
+ RK_PINDRIVE(1, 1, 0xA8, 0, 3),
+ RK_PINDRIVE(1, 1, 0xA8, 1, 6),
+ RK_PINDRIVE(1, 1, 0xA8, 2, 9),
+ RK_PINDRIVE(1, 1, 0xA8, 3, 12),
+
+ /* GPIO1C */
+ RK_PINDRIVE(1, 2, 0xB0, 0, 3),
+ RK_PINDRIVE(1, 2, 0xB0, 1, 6),
+ RK_PINDRIVE(1, 2, 0xB0, 2, 9),
+ RK_PINDRIVE(1, 2, 0xB0, 3, 12),
+
+ /* GPIO1D */
+ RK_PINDRIVE(1, 3, 0xB8, 0, 3),
+ RK_PINDRIVE(1, 3, 0xB8, 1, 6),
+ RK_PINDRIVE(1, 3, 0xB8, 2, 9),
+ RK_PINDRIVE(1, 3, 0xB8, 3, 12),
+};
+
+static uint32_t
+rk3399_get_pd_offset(struct rk_pinctrl_softc *sc, uint32_t bank)
+{
+ if (bank < 2)
+ return (0x40);
+
+ return (0xE040);
+}
+
+static struct syscon *
+rk3399_get_syscon(struct rk_pinctrl_softc *sc, uint32_t bank)
+{
+ if (bank < 2)
+ return (sc->pmu);
+
+ return (sc->grf);
+}
+
+static int
+rk3399_parse_bias(phandle_t node, int bank)
+{
+ int pullup, pulldown;
+
+ if (OF_hasprop(node, "bias-disable"))
+ return (0);
+
+ switch (bank) {
+ case 0:
+ case 2:
+ pullup = 3;
+ pulldown = 1;
+ break;
+ case 1:
+ case 3:
+ case 4:
+ pullup = 1;
+ pulldown = 2;
+ break;
+ }
+
+ if (OF_hasprop(node, "bias-pull-up"))
+ return (pullup);
+ if (OF_hasprop(node, "bias-pull-down"))
+ return (pulldown);
+
+ return (-1);
+}
+
+static int
+rk3399_resolv_bias_value(int bank, int bias)
+{
+ int rv = 0;
+
+ switch (bank) {
+ case 0:
+ case 2:
+ if (bias == 3)
+ rv = GPIO_PIN_PULLUP;
+ else if (bias == 1)
+ rv = GPIO_PIN_PULLDOWN;
+ break;
+ case 1:
+ case 3:
+ case 4:
+ if (bias == 1)
+ rv = GPIO_PIN_PULLUP;
+ else if (bias == 2)
+ rv = GPIO_PIN_PULLDOWN;
+ break;
+ }
+
+ return (rv);
+}
+
+static int
+rk3399_get_bias_value(int bank, int bias)
+{
+ int rv = 0;
+
+ switch (bank) {
+ case 0:
+ case 2:
+ if (bias & GPIO_PIN_PULLUP)
+ rv = 3;
+ else if (bias & GPIO_PIN_PULLDOWN)
+ rv = 1;
+ break;
+ case 1:
+ case 3:
+ case 4:
+ if (bias & GPIO_PIN_PULLUP)
+ rv = 1;
+ else if (bias & GPIO_PIN_PULLDOWN)
+ rv = 2;
+ break;
+ }
+
+ return (rv);
+}
+
+struct rk_pinctrl_conf rk3399_conf = {
+ .iomux_conf = rk3399_iomux_bank,
+ .iomux_nbanks = nitems(rk3399_iomux_bank),
+ .pin_fixup = rk3399_pin_fixup,
+ .npin_fixup = nitems(rk3399_pin_fixup),
+ .pin_drive = rk3399_pin_drive,
+ .npin_drive = nitems(rk3399_pin_drive),
+ .gpio_bank = rk3399_gpio_bank,
+ .ngpio_bank = nitems(rk3399_gpio_bank),
+ .get_pd_offset = rk3399_get_pd_offset,
+ .get_syscon = rk3399_get_syscon,
+ .parse_bias = rk3399_parse_bias,
+ .resolv_bias_value = rk3399_resolv_bias_value,
+ .get_bias_value = rk3399_get_bias_value,
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk3288-pinctrl", (uintptr_t)&rk3288_conf},
+ {"rockchip,rk3328-pinctrl", (uintptr_t)&rk3328_conf},
+ {"rockchip,rk3399-pinctrl", (uintptr_t)&rk3399_conf},
+ {NULL, 0}
+};
+
+static int
+rk_pinctrl_parse_drive(struct rk_pinctrl_softc *sc, phandle_t node,
+ uint32_t bank, uint32_t subbank, uint32_t *drive, uint32_t *offset)
+{
+ uint32_t value;
+ int i;
+
+ if (OF_getencprop(node, "drive-strength", &value,
+ sizeof(value)) != 0)
+ return (-1);
+
+ /* Map to the correct drive value */
+ for (i = 0; i < sc->conf->npin_drive; i++) {
+ if (sc->conf->pin_drive[i].bank != bank &&
+ sc->conf->pin_drive[i].subbank != subbank)
+ continue;
+ if (sc->conf->pin_drive[i].ma == value) {
+ *drive = sc->conf->pin_drive[i].value;
+ return (0);
+ }
+ }
+
+ return (-1);
+}
+
+static void
+rk_pinctrl_get_fixup(struct rk_pinctrl_softc *sc, uint32_t bank, uint32_t pin,
+ uint32_t *reg, uint32_t *mask, uint32_t *bit)
+{
+ int i;
+
+ for (i = 0; i < sc->conf->npin_fixup; i++)
+ if (sc->conf->pin_fixup[i].bank == bank &&
+ sc->conf->pin_fixup[i].pin == pin) {
+ *reg = sc->conf->pin_fixup[i].reg;
+ *mask = sc->conf->pin_fixup[i].mask;
+ *bit = sc->conf->pin_fixup[i].bit;
+
+ return;
+ }
+}
+
+static int
+rk_pinctrl_handle_io(struct rk_pinctrl_softc *sc, phandle_t node, uint32_t bank,
+uint32_t pin)
+{
+ bool have_cfg, have_direction, have_value;
+ uint32_t direction_value, pin_value;
+ struct rk_pinctrl_gpio *gpio;
+ int i, rv;
+
+ have_cfg = false;
+ have_direction = false;
+ have_value = false;
+
+ /* Get (subset of) GPIO pin properties. */
+ if (OF_hasprop(node, "output-disable")) {
+ have_cfg = true;
+ have_direction = true;
+ direction_value = GPIO_PIN_INPUT;
+ }
+
+ if (OF_hasprop(node, "output-enable")) {
+ have_cfg = true;
+ have_direction = true;
+ direction_value = GPIO_PIN_OUTPUT;
+ }
+
+ if (OF_hasprop(node, "output-low")) {
+ have_cfg = true;
+ have_direction = true;
+ direction_value = GPIO_PIN_OUTPUT;
+ have_value = true;
+ pin_value = 0;
+ }
+
+ if (OF_hasprop(node, "output-high")) {
+ have_cfg = true;
+ have_direction = true;
+ direction_value = GPIO_PIN_OUTPUT;
+ have_value = true;
+ pin_value = 1;
+ }
+
+ if (!have_cfg)
+ return (0);
+
+ /* Find gpio */
+ gpio = NULL;
+ for(i = 0; i < sc->conf->ngpio_bank; i++) {
+ if (bank == sc->conf->gpio_bank[i].bank) {
+ gpio = sc->conf->gpio_bank + i;
+ break;
+ }
+ }
+ if (gpio == NULL) {
+ device_printf(sc->dev, "Cannot find GPIO bank %d\n", bank);
+ return (ENXIO);
+ }
+ if (gpio->gpio_dev == NULL) {
+ device_printf(sc->dev,
+ "No GPIO subdevice found for bank %d\n", bank);
+ return (ENXIO);
+ }
+
+ rv = 0;
+ if (have_value) {
+ rv = GPIO_PIN_SET(gpio->gpio_dev, pin, pin_value);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot set GPIO value: %d\n",
+ rv);
+ return (rv);
+ }
+ }
+
+ if (have_direction) {
+ rv = GPIO_PIN_SETFLAGS(gpio->gpio_dev, pin, direction_value);
+ if (rv != 0) {
+ device_printf(sc->dev,
+ "Cannot set GPIO direction: %d\n", rv);
+ return (rv);
+ }
+ }
+
+ return (0);
+}
+
+static void
+rk_pinctrl_configure_pin(struct rk_pinctrl_softc *sc, uint32_t *pindata)
+{
+ phandle_t pin_conf;
+ struct syscon *syscon;
+ uint32_t bank, subbank, pin, function, bias;
+ uint32_t bit, mask, reg, drive;
+ int i, rv;
+
+ bank = pindata[0];
+ pin = pindata[1];
+ function = pindata[2];
+ pin_conf = OF_node_from_xref(pindata[3]);
+ subbank = pin / 8;
+
+ for (i = 0; i < sc->conf->iomux_nbanks; i++)
+ if (sc->conf->iomux_conf[i].bank == bank &&
+ sc->conf->iomux_conf[i].subbank == subbank)
+ break;
+
+ if (i == sc->conf->iomux_nbanks) {
+ device_printf(sc->dev, "Unknown pin %d in bank %d\n", pin,
+ bank);
+ return;
+ }
+
+ /* Find syscon */
+ syscon = sc->conf->get_syscon(sc, bank);
+
+ /* Setup GPIO properties first */
+ rv = rk_pinctrl_handle_io(sc, pin_conf, bank, pin);
+
+ /* Then pin pull-up/down */
+ bias = sc->conf->parse_bias(pin_conf, bank);
+ if (bias >= 0) {
+ reg = sc->conf->get_pd_offset(sc, bank);
+ reg += bank * 0x10 + ((pin / 8) * 0x4);
+ bit = (pin % 8) * 2;
+ mask = (0x3 << bit);
+ SYSCON_MODIFY_4(syscon, reg, mask, bias << bit | (mask << 16));
+ }
+
+ /* Then drive strength */
+ rv = rk_pinctrl_parse_drive(sc, pin_conf, bank, subbank, &drive, &reg);
+ if (rv == 0) {
+ bit = (pin % 8) * 2;
+ mask = (0x3 << bit);
+ SYSCON_MODIFY_4(syscon, reg, mask, drive << bit | (mask << 16));
+ }
+
+ /* Finally set the pin function */
+ reg = sc->conf->iomux_conf[i].offset;
+ switch (sc->conf->iomux_conf[i].nbits) {
+ case 4:
+ if ((pin % 8) >= 4)
+ reg += 0x4;
+ bit = (pin % 4) * 4;
+ mask = (0xF << bit);
+ break;
+ case 3:
+ if ((pin % 8) >= 5)
+ reg += 4;
+ bit = (pin % 8 % 5) * 3;
+ mask = (0x7 << bit);
+ break;
+ case 2:
+ bit = (pin % 8) * 2;
+ mask = (0x3 << bit);
+ break;
+ default:
+ device_printf(sc->dev,
+ "Unknown pin stride width %d in bank %d\n",
+ sc->conf->iomux_conf[i].nbits, bank);
+ return;
+ }
+ rk_pinctrl_get_fixup(sc, bank, pin, &reg, &mask, &bit);
+
+ /*
+ * NOTE: not all syscon registers uses hi-word write mask, thus
+ * register modify method should be used.
+ * XXXX We should not pass write mask to syscon register
+ * without hi-word write mask.
+ */
+ SYSCON_MODIFY_4(syscon, reg, mask, function << bit | (mask << 16));
+}
+
+static int
+rk_pinctrl_configure_pins(device_t dev, phandle_t cfgxref)
+{
+ struct rk_pinctrl_softc *sc;
+ phandle_t node;
+ uint32_t *pins;
+ int i, npins;
+
+ sc = device_get_softc(dev);
+ node = OF_node_from_xref(cfgxref);
+
+ npins = OF_getencprop_alloc_multi(node, "rockchip,pins", sizeof(*pins),
+ (void **)&pins);
+ if (npins <= 0)
+ return (ENOENT);
+
+ for (i = 0; i != npins; i += 4)
+ rk_pinctrl_configure_pin(sc, pins + i);
+
+ return (0);
+}
+
+static int
+rk_pinctrl_is_gpio_locked(struct rk_pinctrl_softc *sc, struct syscon *syscon,
+ int bank, uint32_t pin, bool *is_gpio)
+{
+ uint32_t subbank, bit, mask, reg;
+ uint32_t pinfunc;
+ int i;
+
+ RK_PINCTRL_LOCK_ASSERT(sc);
+
+ subbank = pin / 8;
+ *is_gpio = false;
+
+ for (i = 0; i < sc->conf->iomux_nbanks; i++)
+ if (sc->conf->iomux_conf[i].bank == bank &&
+ sc->conf->iomux_conf[i].subbank == subbank)
+ break;
+
+ if (i == sc->conf->iomux_nbanks) {
+ device_printf(sc->dev, "Unknown pin %d in bank %d\n", pin,
+ bank);
+ return (EINVAL);
+ }
+
+ syscon = sc->conf->get_syscon(sc, bank);
+
+ /* Parse pin function */
+ reg = sc->conf->iomux_conf[i].offset;
+ switch (sc->conf->iomux_conf[i].nbits) {
+ case 4:
+ if ((pin % 8) >= 4)
+ reg += 0x4;
+ bit = (pin % 4) * 4;
+ mask = (0xF << bit);
+ break;
+ case 3:
+ if ((pin % 8) >= 5)
+ reg += 4;
+ bit = (pin % 8 % 5) * 3;
+ mask = (0x7 << bit);
+ break;
+ case 2:
+ bit = (pin % 8) * 2;
+ mask = (0x3 << bit);
+ break;
+ default:
+ device_printf(sc->dev,
+ "Unknown pin stride width %d in bank %d\n",
+ sc->conf->iomux_conf[i].nbits, bank);
+ return (EINVAL);
+ }
+ rk_pinctrl_get_fixup(sc, bank, pin, &reg, &mask, &bit);
+
+ reg = SYSCON_READ_4(syscon, reg);
+ pinfunc = (reg & mask) >> bit;
+
+ /* Test if the pin is in gpio mode */
+ if (pinfunc == 0)
+ *is_gpio = true;
+
+ return (0);
+}
+
+static int
+rk_pinctrl_get_bank(struct rk_pinctrl_softc *sc, device_t gpio, int *bank)
+{
+ int i;
+
+ for (i = 0; i < sc->conf->ngpio_bank; i++) {
+ if (sc->conf->gpio_bank[i].gpio_dev == gpio)
+ break;
+ }
+ if (i == sc->conf->ngpio_bank)
+ return (EINVAL);
+
+ *bank = i;
+ return (0);
+}
+
+static int
+rk_pinctrl_is_gpio(device_t pinctrl, device_t gpio, uint32_t pin, bool *is_gpio)
+{
+ struct rk_pinctrl_softc *sc;
+ struct syscon *syscon;
+ int bank;
+ int rv;
+
+ sc = device_get_softc(pinctrl);
+ RK_PINCTRL_LOCK(sc);
+
+ rv = rk_pinctrl_get_bank(sc, gpio, &bank);
+ if (rv != 0)
+ goto done;
+ syscon = sc->conf->get_syscon(sc, bank);
+ rv = rk_pinctrl_is_gpio_locked(sc, syscon, bank, pin, is_gpio);
+
+done:
+ RK_PINCTRL_UNLOCK(sc);
+
+ return (rv);
+}
+
+static int
+rk_pinctrl_get_flags(device_t pinctrl, device_t gpio, uint32_t pin,
+ uint32_t *flags)
+{
+ struct rk_pinctrl_softc *sc;
+ struct syscon *syscon;
+ uint32_t reg, mask, bit;
+ uint32_t bias;
+ int bank;
+ int rv = 0;
+ bool is_gpio;
+
+ sc = device_get_softc(pinctrl);
+ RK_PINCTRL_LOCK(sc);
+
+ rv = rk_pinctrl_get_bank(sc, gpio, &bank);
+ if (rv != 0)
+ goto done;
+ syscon = sc->conf->get_syscon(sc, bank);
+ rv = rk_pinctrl_is_gpio_locked(sc, syscon, bank, pin, &is_gpio);
+ if (rv != 0)
+ goto done;
+ if (!is_gpio) {
+ rv = EINVAL;
+ goto done;
+ }
+ /* Get the pullup/pulldown configuration */
+ reg = sc->conf->get_pd_offset(sc, bank);
+ reg += bank * 0x10 + ((pin / 8) * 0x4);
+ bit = (pin % 8) * 2;
+ mask = (0x3 << bit) << 16;
+ reg = SYSCON_READ_4(syscon, reg);
+ reg = (reg >> bit) & 0x3;
+ bias = sc->conf->resolv_bias_value(bank, reg);
+ *flags = bias;
+
+done:
+ RK_PINCTRL_UNLOCK(sc);
+ return (rv);
+}
+
+static int
+rk_pinctrl_set_flags(device_t pinctrl, device_t gpio, uint32_t pin,
+ uint32_t flags)
+{
+ struct rk_pinctrl_softc *sc;
+ struct syscon *syscon;
+ uint32_t bit, mask, reg;
+ uint32_t bias;
+ int bank;
+ int rv = 0;
+ bool is_gpio;
+
+ sc = device_get_softc(pinctrl);
+ RK_PINCTRL_LOCK(sc);
+
+ rv = rk_pinctrl_get_bank(sc, gpio, &bank);
+ if (rv != 0)
+ goto done;
+ syscon = sc->conf->get_syscon(sc, bank);
+ rv = rk_pinctrl_is_gpio_locked(sc, syscon, bank, pin, &is_gpio);
+ if (rv != 0)
+ goto done;
+ if (!is_gpio) {
+ rv = EINVAL;
+ goto done;
+ }
+ /* Get the pullup/pulldown configuration */
+ reg = sc->conf->get_pd_offset(sc, bank);
+ reg += bank * 0x10 + ((pin / 8) * 0x4);
+ bit = (pin % 8) * 2;
+ mask = (0x3 << bit);
+ bias = sc->conf->get_bias_value(bank, flags);
+ SYSCON_MODIFY_4(syscon, reg, mask, bias << bit | (mask << 16));
+
+done:
+ RK_PINCTRL_UNLOCK(sc);
+ return (rv);
+}
+
+static int
+rk_pinctrl_register_gpio(struct rk_pinctrl_softc *sc, char *gpio_name,
+ device_t gpio_dev)
+{
+ int i;
+
+ for(i = 0; i < sc->conf->ngpio_bank; i++) {
+ if (strcmp(gpio_name, sc->conf->gpio_bank[i].gpio_name) != 0)
+ continue;
+ sc->conf->gpio_bank[i].gpio_dev = gpio_dev;
+ return(0);
+ }
+ return (ENXIO);
+}
+
+static int
+rk_pinctrl_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "RockChip Pinctrl controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_pinctrl_attach(device_t dev)
+{
+ struct rk_pinctrl_softc *sc;
+ phandle_t node;
+ device_t cdev;
+ char *gpio_name, *eptr;
+ int rv;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ node = ofw_bus_get_node(dev);
+
+ if (OF_hasprop(node, "rockchip,grf") &&
+ syscon_get_by_ofw_property(dev, node,
+ "rockchip,grf", &sc->grf) != 0) {
+ device_printf(dev, "cannot get grf driver handle\n");
+ return (ENXIO);
+ }
+
+ /* RK3399,RK3288 has banks in PMU. RK3328 does not have a PMU. */
+ if (ofw_bus_node_is_compatible(node, "rockchip,rk3399-pinctrl") ||
+ ofw_bus_node_is_compatible(node, "rockchip,rk3288-pinctrl")) {
+ if (OF_hasprop(node, "rockchip,pmu") &&
+ syscon_get_by_ofw_property(dev, node,
+ "rockchip,pmu", &sc->pmu) != 0) {
+ device_printf(dev, "cannot get pmu driver handle\n");
+ return (ENXIO);
+ }
+ }
+
+ mtx_init(&sc->mtx, "rk pinctrl", "pinctrl", MTX_SPIN);
+
+ sc->conf = (struct rk_pinctrl_conf *)ofw_bus_search_compatible(dev,
+ compat_data)->ocd_data;
+
+ fdt_pinctrl_register(dev, "rockchip,pins");
+
+ simplebus_init(dev, node);
+
+ bus_generic_probe(dev);
+
+ /* Attach child devices */
+ for (node = OF_child(node); node > 0; node = OF_peer(node)) {
+ if (!ofw_bus_node_is_compatible(node, "rockchip,gpio-bank"))
+ continue;
+
+ rv = OF_getprop_alloc(node, "name", (void **)&gpio_name);
+ if (rv <= 0) {
+ device_printf(sc->dev, "Cannot GPIO subdevice name.\n");
+ continue;
+ }
+
+ cdev = simplebus_add_device(dev, node, 0, NULL, -1, NULL);
+ if (cdev == NULL) {
+ device_printf(dev, " Cannot add GPIO subdevice: %s\n",
+ gpio_name);
+ OF_prop_free(gpio_name);
+ continue;
+ }
+
+ rv = device_probe_and_attach(cdev);
+ if (rv != 0) {
+ device_printf(sc->dev,
+ "Cannot attach GPIO subdevice: %s\n", gpio_name);
+ OF_prop_free(gpio_name);
+ continue;
+ }
+
+ /* Grep device name from name property */
+ eptr = gpio_name;
+ strsep(&eptr, "@");
+ if (gpio_name == eptr) {
+ device_printf(sc->dev,
+ "Unrecognized format of GPIO subdevice name: %s\n",
+ gpio_name);
+ OF_prop_free(gpio_name);
+ continue;
+ }
+ rv = rk_pinctrl_register_gpio(sc, gpio_name, cdev);
+ if (rv != 0) {
+ device_printf(sc->dev,
+ "Cannot register GPIO subdevice %s: %d\n",
+ gpio_name, rv);
+ OF_prop_free(gpio_name);
+ continue;
+ }
+ OF_prop_free(gpio_name);
+ }
+
+ fdt_pinctrl_configure_tree(dev);
+
+ return (bus_generic_attach(dev));
+}
+
+static int
+rk_pinctrl_detach(device_t dev)
+{
+
+ return (EBUSY);
+}
+
+static device_method_t rk_pinctrl_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_pinctrl_probe),
+ DEVMETHOD(device_attach, rk_pinctrl_attach),
+ DEVMETHOD(device_detach, rk_pinctrl_detach),
+
+ /* fdt_pinctrl interface */
+ DEVMETHOD(fdt_pinctrl_configure, rk_pinctrl_configure_pins),
+ DEVMETHOD(fdt_pinctrl_is_gpio, rk_pinctrl_is_gpio),
+ DEVMETHOD(fdt_pinctrl_get_flags, rk_pinctrl_get_flags),
+ DEVMETHOD(fdt_pinctrl_set_flags, rk_pinctrl_set_flags),
+
+ DEVMETHOD_END
+};
+
+static devclass_t rk_pinctrl_devclass;
+
+DEFINE_CLASS_1(rk_pinctrl, rk_pinctrl_driver, rk_pinctrl_methods,
+ sizeof(struct rk_pinctrl_softc), simplebus_driver);
+
+EARLY_DRIVER_MODULE(rk_pinctrl, simplebus, rk_pinctrl_driver,
+ rk_pinctrl_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+MODULE_VERSION(rk_pinctrl, 1);
diff --git a/sys/arm64/rockchip/rk_pwm.c b/sys/arm64/rockchip/rk_pwm.c
new file mode 100644
index 000000000000..2244700ee98b
--- /dev/null
+++ b/sys/arm64/rockchip/rk_pwm.c
@@ -0,0 +1,403 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
+ * Copyright (c) 2019 Brandon Bergren <git@bdragon.rtk0.net>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/resource.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/extres/clk/clk.h>
+
+#include "pwmbus_if.h"
+
+/* Register offsets. */
+#define RK_PWM_COUNTER 0x00
+#define RK_PWM_PERIOD 0x04
+#define RK_PWM_DUTY 0x08
+#define RK_PWM_CTRL 0x0c
+
+#define SET(reg,mask,val) reg = ((reg & ~mask) | val)
+
+#define RK_PWM_CTRL_ENABLE_MASK (1 << 0)
+#define RK_PWM_CTRL_ENABLED (1 << 0)
+#define RK_PWM_CTRL_DISABLED (0)
+
+#define RK_PWM_CTRL_MODE_MASK (3 << 1)
+#define RK_PWM_CTRL_MODE_ONESHOT (0)
+#define RK_PWM_CTRL_MODE_CONTINUOUS (1 << 1)
+#define RK_PWM_CTRL_MODE_CAPTURE (1 << 2)
+
+#define RK_PWM_CTRL_DUTY_MASK (1 << 3)
+#define RK_PWM_CTRL_DUTY_POSITIVE (1 << 3)
+#define RK_PWM_CTRL_DUTY_NEGATIVE (0)
+
+#define RK_PWM_CTRL_INACTIVE_MASK (1 << 4)
+#define RK_PWM_CTRL_INACTIVE_POSITIVE (1 << 4)
+#define RK_PWM_CTRL_INACTIVE_NEGATIVE (0)
+
+/* PWM Output Alignment */
+#define RK_PWM_CTRL_ALIGN_MASK (1 << 5)
+#define RK_PWM_CTRL_ALIGN_CENTER (1 << 5)
+#define RK_PWM_CTRL_ALIGN_LEFT (0)
+
+/* Low power mode: disable prescaler when inactive */
+#define RK_PWM_CTRL_LP_MASK (1 << 8)
+#define RK_PWM_CTRL_LP_ENABLE (1 << 8)
+#define RK_PWM_CTRL_LP_DISABLE (0)
+
+/* Clock source: bypass the scaler or not */
+#define RK_PWM_CTRL_CLOCKSRC_MASK (1 << 9)
+#define RK_PWM_CTRL_CLOCKSRC_NONSCALED (0)
+#define RK_PWM_CTRL_CLOCKSRC_SCALED (1 << 9)
+
+#define RK_PWM_CTRL_PRESCALE_MASK (7 << 12)
+#define RK_PWM_CTRL_PRESCALE_SHIFT 12
+
+#define RK_PWM_CTRL_SCALE_MASK (0xFF << 16)
+#define RK_PWM_CTRL_SCALE_SHIFT 16
+
+#define RK_PWM_CTRL_REPEAT_MASK (0xFF << 24)
+#define RK_PWM_CTRL_REPEAT_SHIFT 24
+
+#define NS_PER_SEC 1000000000
+
+static struct ofw_compat_data compat_data[] = {
+ { "rockchip,rk3399-pwm", 1 },
+ { NULL, 0 }
+};
+
+static struct resource_spec rk_pwm_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+struct rk_pwm_softc {
+ device_t dev;
+ device_t busdev;
+ clk_t clk;
+ struct resource *res;
+
+ uint64_t clk_freq;
+ unsigned int period;
+ unsigned int duty;
+ uint32_t flags;
+ uint8_t prescaler;
+ uint8_t scaler;
+ bool using_scaler;
+ bool enabled;
+};
+
+#define RK_PWM_READ(sc, reg) bus_read_4((sc)->res, (reg))
+#define RK_PWM_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val))
+
+static int rk_pwm_probe(device_t dev);
+static int rk_pwm_attach(device_t dev);
+static int rk_pwm_detach(device_t dev);
+
+static int
+rk_pwm_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, "Rockchip PWM");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_pwm_attach(device_t dev)
+{
+ struct rk_pwm_softc *sc;
+ phandle_t node;
+ uint64_t clk_freq;
+ uint32_t reg;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk);
+ if (error != 0) {
+ device_printf(dev, "cannot get clock\n");
+ goto fail;
+ }
+ error = clk_enable(sc->clk);
+ if (error != 0) {
+ device_printf(dev, "cannot enable clock\n");
+ goto fail;
+ }
+ error = clk_get_freq(sc->clk, &sc->clk_freq);
+ if (error != 0) {
+ device_printf(dev, "cannot get base frequency\n");
+ goto fail;
+ }
+
+ if (bus_alloc_resources(dev, rk_pwm_spec, &sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ /* Read the configuration left by U-Boot */
+ reg = RK_PWM_READ(sc, RK_PWM_CTRL);
+ if ((reg & RK_PWM_CTRL_ENABLE_MASK) == RK_PWM_CTRL_ENABLED)
+ sc->enabled = true;
+
+ reg = RK_PWM_READ(sc, RK_PWM_CTRL);
+ reg &= RK_PWM_CTRL_PRESCALE_MASK;
+ sc->prescaler = reg >> RK_PWM_CTRL_PRESCALE_SHIFT;
+
+ reg = RK_PWM_READ(sc, RK_PWM_CTRL);
+ reg &= RK_PWM_CTRL_SCALE_MASK;
+ sc->scaler = reg >> RK_PWM_CTRL_SCALE_SHIFT;
+
+ reg = RK_PWM_READ(sc, RK_PWM_CTRL);
+ if ((reg & RK_PWM_CTRL_CLOCKSRC_MASK) == RK_PWM_CTRL_CLOCKSRC_SCALED)
+ sc->using_scaler = true;
+ else
+ sc->using_scaler = false;
+
+ clk_freq = sc->clk_freq / (2 ^ sc->prescaler);
+
+ if (sc->using_scaler) {
+ if (sc->scaler == 0)
+ clk_freq /= 512;
+ else
+ clk_freq /= (sc->scaler * 2);
+ }
+
+ reg = RK_PWM_READ(sc, RK_PWM_PERIOD);
+ sc->period = NS_PER_SEC /
+ (clk_freq / reg);
+ reg = RK_PWM_READ(sc, RK_PWM_DUTY);
+ sc->duty = NS_PER_SEC /
+ (clk_freq / reg);
+
+ node = ofw_bus_get_node(dev);
+ OF_device_register_xref(OF_xref_from_node(node), dev);
+
+ sc->busdev = device_add_child(dev, "pwmbus", -1);
+
+ return (bus_generic_attach(dev));
+
+fail:
+ rk_pwm_detach(dev);
+ return (error);
+}
+
+static int
+rk_pwm_detach(device_t dev)
+{
+ struct rk_pwm_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ bus_generic_detach(sc->dev);
+
+ bus_release_resources(dev, rk_pwm_spec, &sc->res);
+
+ return (0);
+}
+
+static phandle_t
+aw_pwm_get_node(device_t bus, device_t dev)
+{
+
+ /*
+ * Share our controller node with our pwmbus child; it instantiates
+ * devices by walking the children contained within our node.
+ */
+ return ofw_bus_get_node(bus);
+}
+
+static int
+rk_pwm_channel_count(device_t dev, u_int *nchannel)
+{
+ /* The device supports 4 channels, but attaches multiple times in the
+ * device tree. This interferes with advanced usage though, as
+ * the interrupt capability and channel 3 FIFO register offsets
+ * don't work right in this situation.
+ * But since we don't support those yet, pretend we are singlechannel.
+ */
+ *nchannel = 1;
+
+ return (0);
+}
+
+static int
+rk_pwm_channel_config(device_t dev, u_int channel, u_int period, u_int duty)
+{
+ struct rk_pwm_softc *sc;
+ uint64_t period_freq, duty_freq;
+ uint32_t reg;
+ uint32_t period_out;
+ uint32_t duty_out;
+ uint8_t prescaler;
+ uint8_t scaler;
+ bool using_scaler;
+
+ sc = device_get_softc(dev);
+
+ period_freq = NS_PER_SEC / period;
+ /* Datasheet doesn't define, so use Nyquist frequency. */
+ if (period_freq > (sc->clk_freq / 2))
+ return (EINVAL);
+ duty_freq = NS_PER_SEC / duty;
+ if (duty_freq < period_freq) {
+ device_printf(sc->dev, "duty < period\n");
+ return (EINVAL);
+ }
+
+ /* Assuming 24 MHz reference, we should never actually have
+ to use the divider due to pwm API limitations. */
+ prescaler = 0;
+ scaler = 0;
+ using_scaler = false;
+
+ /* XXX Expand API to allow for 64 bit period/duty. */
+ period_out = (sc->clk_freq * period) / NS_PER_SEC;
+ duty_out = (sc->clk_freq * duty) / NS_PER_SEC;
+
+ reg = RK_PWM_READ(sc, RK_PWM_CTRL);
+
+ if ((reg & RK_PWM_CTRL_MODE_MASK) != RK_PWM_CTRL_MODE_CONTINUOUS) {
+ /* Switching modes, disable just in case. */
+ SET(reg, RK_PWM_CTRL_ENABLE_MASK, RK_PWM_CTRL_DISABLED);
+ RK_PWM_WRITE(sc, RK_PWM_CTRL, reg);
+ }
+
+ RK_PWM_WRITE(sc, RK_PWM_PERIOD, period_out);
+ RK_PWM_WRITE(sc, RK_PWM_DUTY, duty_out);
+
+ SET(reg, RK_PWM_CTRL_ENABLE_MASK, RK_PWM_CTRL_ENABLED);
+ SET(reg, RK_PWM_CTRL_MODE_MASK, RK_PWM_CTRL_MODE_CONTINUOUS);
+ SET(reg, RK_PWM_CTRL_ALIGN_MASK, RK_PWM_CTRL_ALIGN_LEFT);
+ SET(reg, RK_PWM_CTRL_CLOCKSRC_MASK, using_scaler);
+ SET(reg, RK_PWM_CTRL_PRESCALE_MASK,
+ prescaler << RK_PWM_CTRL_PRESCALE_SHIFT);
+ SET(reg, RK_PWM_CTRL_SCALE_MASK,
+ scaler << RK_PWM_CTRL_SCALE_SHIFT);
+
+ RK_PWM_WRITE(sc, RK_PWM_CTRL, reg);
+
+ sc->period = period;
+ sc->duty = duty;
+
+ return (0);
+}
+
+static int
+rk_pwm_channel_get_config(device_t dev, u_int channel, u_int *period, u_int *duty)
+{
+ struct rk_pwm_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ *period = sc->period;
+ *duty = sc->duty;
+
+ return (0);
+}
+
+static int
+rk_pwm_channel_enable(device_t dev, u_int channel, bool enable)
+{
+ struct rk_pwm_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ if (enable && sc->enabled)
+ return (0);
+
+ reg = RK_PWM_READ(sc, RK_PWM_CTRL);
+ SET(reg, RK_PWM_CTRL_ENABLE_MASK, enable);
+
+ RK_PWM_WRITE(sc, RK_PWM_CTRL, reg);
+
+ sc->enabled = enable;
+
+ return (0);
+}
+
+static int
+rk_pwm_channel_is_enabled(device_t dev, u_int channel, bool *enabled)
+{
+ struct rk_pwm_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ *enabled = sc->enabled;
+
+ return (0);
+}
+
+static device_method_t rk_pwm_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_pwm_probe),
+ DEVMETHOD(device_attach, rk_pwm_attach),
+ DEVMETHOD(device_detach, rk_pwm_detach),
+
+ /* ofw_bus interface */
+ DEVMETHOD(ofw_bus_get_node, aw_pwm_get_node),
+
+ /* pwm interface */
+ DEVMETHOD(pwmbus_channel_count, rk_pwm_channel_count),
+ DEVMETHOD(pwmbus_channel_config, rk_pwm_channel_config),
+ DEVMETHOD(pwmbus_channel_get_config, rk_pwm_channel_get_config),
+ DEVMETHOD(pwmbus_channel_enable, rk_pwm_channel_enable),
+ DEVMETHOD(pwmbus_channel_is_enabled, rk_pwm_channel_is_enabled),
+
+ DEVMETHOD_END
+};
+
+static driver_t rk_pwm_driver = {
+ "pwm",
+ rk_pwm_methods,
+ sizeof(struct rk_pwm_softc),
+};
+
+static devclass_t rk_pwm_devclass;
+
+DRIVER_MODULE(rk_pwm, simplebus, rk_pwm_driver, rk_pwm_devclass, 0, 0);
+SIMPLEBUS_PNP_INFO(compat_data);
diff --git a/sys/arm64/rockchip/rk_spi.c b/sys/arm64/rockchip/rk_spi.c
new file mode 100644
index 000000000000..13e3abb80085
--- /dev/null
+++ b/sys/arm64/rockchip/rk_spi.c
@@ -0,0 +1,483 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/resource.h>
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/spibus/spi.h>
+#include <dev/spibus/spibusvar.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/hwreset/hwreset.h>
+
+#include "spibus_if.h"
+
+#define RK_SPI_CTRLR0 0x0000
+#define CTRLR0_OPM_MASTER (0 << 20)
+#define CTRLR0_XFM_TR (0 << 18)
+#define CTRLR0_FRF_MOTO (0 << 16)
+#define CTRLR0_BHT_8BIT (1 << 13)
+#define CTRLR0_EM_BIG (1 << 11)
+#define CTRLR0_SSD_ONE (1 << 10)
+#define CTRLR0_SCPOL (1 << 7)
+#define CTRLR0_SCPH (1 << 6)
+#define CTRLR0_DFS_8BIT (1 << 0)
+#define RK_SPI_CTRLR1 0x0004
+#define RK_SPI_ENR 0x0008
+#define RK_SPI_SER 0x000c
+#define RK_SPI_BAUDR 0x0010
+#define RK_SPI_TXFTLR 0x0014
+#define RK_SPI_RXFTLR 0x0018
+#define RK_SPI_TXFLR 0x001c
+#define RK_SPI_RXFLR 0x0020
+#define RK_SPI_SR 0x0024
+#define SR_BUSY (1 << 0)
+#define RK_SPI_IPR 0x0028
+#define RK_SPI_IMR 0x002c
+#define IMR_RFFIM (1 << 4)
+#define IMR_TFEIM (1 << 0)
+#define RK_SPI_ISR 0x0030
+#define ISR_RFFIS (1 << 4)
+#define ISR_TFEIS (1 << 0)
+#define RK_SPI_RISR 0x0034
+#define RK_SPI_ICR 0x0038
+#define RK_SPI_DMACR 0x003c
+#define RK_SPI_DMATDLR 0x0040
+#define RK_SPI_DMARDLR 0x0044
+#define RK_SPI_TXDR 0x0400
+#define RK_SPI_RXDR 0x0800
+
+#define CS_MAX 1
+
+static struct ofw_compat_data compat_data[] = {
+ { "rockchip,rk3399-spi", 1 },
+ { NULL, 0 }
+};
+
+static struct resource_spec rk_spi_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
+ { -1, 0 }
+};
+
+struct rk_spi_softc {
+ device_t dev;
+ device_t spibus;
+ struct resource *res[2];
+ struct mtx mtx;
+ clk_t clk_apb;
+ clk_t clk_spi;
+ void * intrhand;
+ int transfer;
+ uint32_t fifo_size;
+ uint64_t max_freq;
+
+ uint32_t intreg;
+ uint8_t *rxbuf;
+ uint32_t rxidx;
+ uint8_t *txbuf;
+ uint32_t txidx;
+ uint32_t txlen;
+ uint32_t rxlen;
+};
+
+#define RK_SPI_LOCK(sc) mtx_lock(&(sc)->mtx)
+#define RK_SPI_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
+#define RK_SPI_READ_4(sc, reg) bus_read_4((sc)->res[0], (reg))
+#define RK_SPI_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val))
+
+static int rk_spi_probe(device_t dev);
+static int rk_spi_attach(device_t dev);
+static int rk_spi_detach(device_t dev);
+static void rk_spi_intr(void *arg);
+
+static void
+rk_spi_enable_chip(struct rk_spi_softc *sc, int enable)
+{
+
+ RK_SPI_WRITE_4(sc, RK_SPI_ENR, enable ? 1 : 0);
+}
+
+static int
+rk_spi_set_cs(struct rk_spi_softc *sc, uint32_t cs, bool active)
+{
+ uint32_t reg;
+
+ if (cs & SPIBUS_CS_HIGH) {
+ device_printf(sc->dev, "SPIBUS_CS_HIGH is not supported\n");
+ return (EINVAL);
+ }
+
+ if (cs > CS_MAX)
+ return (EINVAL);
+
+ reg = RK_SPI_READ_4(sc, RK_SPI_SER);
+ if (active)
+ reg |= (1 << cs);
+ else
+ reg &= ~(1 << cs);
+ RK_SPI_WRITE_4(sc, RK_SPI_SER, reg);
+
+ return (0);
+}
+
+static void
+rk_spi_hw_setup(struct rk_spi_softc *sc, uint32_t mode, uint32_t freq)
+{
+ uint32_t cr0;
+ uint32_t div;
+
+ cr0 = CTRLR0_OPM_MASTER | CTRLR0_XFM_TR | CTRLR0_FRF_MOTO |
+ CTRLR0_BHT_8BIT | CTRLR0_EM_BIG | CTRLR0_SSD_ONE |
+ CTRLR0_DFS_8BIT;
+
+ if (mode & SPIBUS_MODE_CPHA)
+ cr0 |= CTRLR0_SCPH;
+ if (mode & SPIBUS_MODE_CPOL)
+ cr0 |= CTRLR0_SCPOL;
+
+ /* minimum divider is 2 */
+ if (sc->max_freq < freq*2) {
+ clk_set_freq(sc->clk_spi, 2 * freq, CLK_SET_ROUND_DOWN);
+ clk_get_freq(sc->clk_spi, &sc->max_freq);
+ }
+
+ div = ((sc->max_freq + freq - 1) / freq);
+ div = (div + 1) & 0xfffe;
+ RK_SPI_WRITE_4(sc, RK_SPI_BAUDR, div);
+
+ RK_SPI_WRITE_4(sc, RK_SPI_CTRLR0, cr0);
+}
+
+static uint32_t
+rk_spi_fifo_size(struct rk_spi_softc *sc)
+{
+ uint32_t txftlr, reg;
+
+ for (txftlr = 2; txftlr < 32; txftlr++) {
+ RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, txftlr);
+ reg = RK_SPI_READ_4(sc, RK_SPI_TXFTLR);
+ if (reg != txftlr)
+ break;
+ }
+ RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, 0);
+
+ if (txftlr == 31)
+ return 0;
+
+ return txftlr;
+}
+
+static void
+rk_spi_empty_rxfifo(struct rk_spi_softc *sc)
+{
+ uint32_t rxlevel;
+ rxlevel = RK_SPI_READ_4(sc, RK_SPI_RXFLR);
+ while (sc->rxidx < sc->rxlen &&
+ (rxlevel-- > 0)) {
+ sc->rxbuf[sc->rxidx++] = (uint8_t)RK_SPI_READ_4(sc, RK_SPI_RXDR);
+ }
+}
+
+static void
+rk_spi_fill_txfifo(struct rk_spi_softc *sc)
+{
+ uint32_t txlevel;
+ txlevel = RK_SPI_READ_4(sc, RK_SPI_TXFLR);
+ int cnt = 0;
+
+ while (sc->txidx < sc->txlen && txlevel < sc->fifo_size) {
+ RK_SPI_WRITE_4(sc, RK_SPI_TXDR, sc->txbuf[sc->txidx++]);
+ txlevel++;
+ cnt++;
+ }
+
+ if (sc->txidx != sc->txlen)
+ sc->intreg |= (IMR_TFEIM | IMR_RFFIM);
+}
+
+static int
+rk_spi_xfer_buf(struct rk_spi_softc *sc, void *rxbuf, void *txbuf, uint32_t len)
+{
+ int err;
+
+ if (len == 0)
+ return (0);
+
+ sc->rxbuf = rxbuf;
+ sc->rxlen = len;
+ sc->rxidx = 0;
+ sc->txbuf = txbuf;
+ sc->txlen = len;
+ sc->txidx = 0;
+ sc->intreg = 0;
+ rk_spi_fill_txfifo(sc);
+
+ RK_SPI_WRITE_4(sc, RK_SPI_IMR, sc->intreg);
+
+ err = 0;
+ while (err == 0 && sc->intreg != 0)
+ err = msleep(sc, &sc->mtx, 0, "rk_spi", 10 * hz);
+
+ while (err == 0 && sc->rxidx != sc->txidx) {
+ /* read residual data from RX fifo */
+ rk_spi_empty_rxfifo(sc);
+ }
+
+ if (sc->rxidx != sc->rxlen || sc->txidx != sc->txlen)
+ err = EIO;
+
+ return (err);
+}
+
+static int
+rk_spi_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, "Rockchip SPI");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_spi_attach(device_t dev)
+{
+ struct rk_spi_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ if (bus_alloc_resources(dev, rk_spi_spec, sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ if (bus_setup_intr(dev, sc->res[1],
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL, rk_spi_intr, sc,
+ &sc->intrhand)) {
+ bus_release_resources(dev, rk_spi_spec, sc->res);
+ device_printf(dev, "cannot setup interrupt handler\n");
+ return (ENXIO);
+ }
+
+ /* Activate the module clock. */
+ error = clk_get_by_ofw_name(dev, 0, "apb_pclk", &sc->clk_apb);
+ if (error != 0) {
+ device_printf(dev, "cannot get apb_pclk clock\n");
+ goto fail;
+ }
+ error = clk_get_by_ofw_name(dev, 0, "spiclk", &sc->clk_spi);
+ if (error != 0) {
+ device_printf(dev, "cannot get spiclk clock\n");
+ goto fail;
+ }
+ error = clk_enable(sc->clk_apb);
+ if (error != 0) {
+ device_printf(dev, "cannot enable ahb clock\n");
+ goto fail;
+ }
+ error = clk_enable(sc->clk_spi);
+ if (error != 0) {
+ device_printf(dev, "cannot enable spiclk clock\n");
+ goto fail;
+ }
+ clk_get_freq(sc->clk_spi, &sc->max_freq);
+
+ sc->fifo_size = rk_spi_fifo_size(sc);
+ if (sc->fifo_size == 0) {
+ device_printf(dev, "failed to get fifo size\n");
+ goto fail;
+ }
+
+ sc->spibus = device_add_child(dev, "spibus", -1);
+
+ RK_SPI_WRITE_4(sc, RK_SPI_IMR, 0);
+ RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, sc->fifo_size/2 - 1);
+ RK_SPI_WRITE_4(sc, RK_SPI_RXFTLR, sc->fifo_size/2 - 1);
+
+ return (bus_generic_attach(dev));
+
+fail:
+ rk_spi_detach(dev);
+ return (error);
+}
+
+static int
+rk_spi_detach(device_t dev)
+{
+ struct rk_spi_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ bus_generic_detach(sc->dev);
+ if (sc->spibus != NULL)
+ device_delete_child(dev, sc->spibus);
+
+ if (sc->clk_spi != NULL)
+ clk_release(sc->clk_spi);
+ if (sc->clk_apb)
+ clk_release(sc->clk_apb);
+
+ if (sc->intrhand != NULL)
+ bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand);
+
+ bus_release_resources(dev, rk_spi_spec, sc->res);
+ mtx_destroy(&sc->mtx);
+
+ return (0);
+}
+
+static void
+rk_spi_intr(void *arg)
+{
+ struct rk_spi_softc *sc;
+ uint32_t intreg, isr;
+
+ sc = arg;
+
+ RK_SPI_LOCK(sc);
+ intreg = RK_SPI_READ_4(sc, RK_SPI_IMR);
+ isr = RK_SPI_READ_4(sc, RK_SPI_ISR);
+ RK_SPI_WRITE_4(sc, RK_SPI_ICR, isr);
+
+ if (isr & ISR_RFFIS)
+ rk_spi_empty_rxfifo(sc);
+
+ if (isr & ISR_TFEIS)
+ rk_spi_fill_txfifo(sc);
+
+ /* no bytes left, disable interrupt */
+ if (sc->txidx == sc->txlen) {
+ sc->intreg = 0;
+ wakeup(sc);
+ }
+
+ if (sc->intreg != intreg) {
+ (void)RK_SPI_WRITE_4(sc, RK_SPI_IMR, sc->intreg);
+ (void)RK_SPI_READ_4(sc, RK_SPI_IMR);
+ }
+
+ RK_SPI_UNLOCK(sc);
+}
+
+static phandle_t
+rk_spi_get_node(device_t bus, device_t dev)
+{
+
+ return ofw_bus_get_node(bus);
+}
+
+static int
+rk_spi_transfer(device_t dev, device_t child, struct spi_command *cmd)
+{
+ struct rk_spi_softc *sc;
+ uint32_t cs, mode, clock;
+ int err = 0;
+
+ sc = device_get_softc(dev);
+
+ spibus_get_cs(child, &cs);
+ spibus_get_clock(child, &clock);
+ spibus_get_mode(child, &mode);
+
+ RK_SPI_LOCK(sc);
+ rk_spi_hw_setup(sc, mode, clock);
+ rk_spi_enable_chip(sc, 1);
+ err = rk_spi_set_cs(sc, cs, true);
+ if (err != 0) {
+ rk_spi_enable_chip(sc, 0);
+ RK_SPI_UNLOCK(sc);
+ return (err);
+ }
+
+ /* Transfer command then data bytes. */
+ err = 0;
+ if (cmd->tx_cmd_sz > 0)
+ err = rk_spi_xfer_buf(sc, cmd->rx_cmd, cmd->tx_cmd,
+ cmd->tx_cmd_sz);
+ if (cmd->tx_data_sz > 0 && err == 0)
+ err = rk_spi_xfer_buf(sc, cmd->rx_data, cmd->tx_data,
+ cmd->tx_data_sz);
+
+ rk_spi_set_cs(sc, cs, false);
+ rk_spi_enable_chip(sc, 0);
+ RK_SPI_UNLOCK(sc);
+
+ return (err);
+}
+
+static device_method_t rk_spi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_spi_probe),
+ DEVMETHOD(device_attach, rk_spi_attach),
+ DEVMETHOD(device_detach, rk_spi_detach),
+
+ /* spibus_if */
+ DEVMETHOD(spibus_transfer, rk_spi_transfer),
+
+ /* ofw_bus_if */
+ DEVMETHOD(ofw_bus_get_node, rk_spi_get_node),
+
+ DEVMETHOD_END
+};
+
+static driver_t rk_spi_driver = {
+ "spi",
+ rk_spi_methods,
+ sizeof(struct rk_spi_softc),
+};
+
+static devclass_t rk_spi_devclass;
+
+DRIVER_MODULE(rk_spi, simplebus, rk_spi_driver, rk_spi_devclass, 0, 0);
+DRIVER_MODULE(ofw_spibus, rk_spi, ofw_spibus_driver, ofw_spibus_devclass, 0, 0);
+MODULE_DEPEND(rk_spi, ofw_spibus, 1, 1, 1);
+OFWBUS_PNP_INFO(compat_data);
diff --git a/sys/arm64/rockchip/rk_tsadc.c b/sys/arm64/rockchip/rk_tsadc.c
new file mode 100644
index 000000000000..37134772da00
--- /dev/null
+++ b/sys/arm64/rockchip/rk_tsadc.c
@@ -0,0 +1,792 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Michal Meloun <mmel@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Thermometer and thermal zones driver for RockChip SoCs.
+ * Calibration data are taken from Linux, because this part of SoC
+ * is undocumented in TRM.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/gpio.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+
+#include <machine/bus.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/hwreset/hwreset.h>
+#include <dev/extres/syscon/syscon.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include "syscon_if.h"
+#include "rk_tsadc_if.h"
+
+/* Global registers */
+#define TSADC_USER_CON 0x000
+#define TSADC_AUTO_CON 0x004
+#define TSADC_AUTO_CON_POL_HI (1 << 8)
+#define TSADC_AUTO_SRC_EN(x) (1 << (4 + (x)))
+#define TSADC_AUTO_Q_SEL (1 << 1) /* V3 only */
+#define TSADC_AUTO_CON_AUTO (1 << 0)
+
+#define TSADC_INT_EN 0x008
+#define TSADC_INT_EN_2CRU_EN_SRC(x) (1 << (8 + (x)))
+#define TSADC_INT_EN_2GPIO_EN_SRC(x) (1 << (4 + (x)))
+#define TSADC_INT_PD 0x00c
+#define TSADC_DATA(x) (0x20 + (x) * 0x04)
+#define TSADC_COMP_INT(x) (0x30 + (x) * 0x04)
+#define TSADC_COMP_INT_SRC_EN(x) (1 << (0 + (x)))
+#define TSADC_COMP_SHUT(x) (0x40 + (x) * 0x04)
+#define TSADC_HIGHT_INT_DEBOUNCE 0x060
+#define TSADC_HIGHT_TSHUT_DEBOUNCE 0x064
+#define TSADC_AUTO_PERIOD 0x068
+#define TSADC_AUTO_PERIOD_HT 0x06c
+#define TSADC_COMP0_LOW_INT 0x080 /* V3 only */
+#define TSADC_COMP1_LOW_INT 0x084 /* V3 only */
+
+/* GFR Bits */
+#define GRF_SARADC_TESTBIT 0x0e644
+#define GRF_SARADC_TESTBIT_ON (0x10001 << 2)
+#define GRF_TSADC_TESTBIT_L 0x0e648
+#define GRF_TSADC_VCM_EN_L (0x10001 << 7)
+#define GRF_TSADC_TESTBIT_H 0x0e64c
+#define GRF_TSADC_VCM_EN_H (0x10001 << 7)
+#define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2)
+
+#define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v))
+#define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r))
+
+static struct sysctl_ctx_list tsadc_sysctl_ctx;
+
+struct tsensor {
+ char *name;
+ int id;
+ int channel;
+};
+
+struct rk_calib_entry {
+ uint32_t raw;
+ int temp;
+};
+
+struct tsadc_calib_info {
+ struct rk_calib_entry *table;
+ int nentries;
+};
+
+struct tsadc_conf {
+ int use_syscon;
+ int q_sel_ntc;
+ int shutdown_temp;
+ int shutdown_mode;
+ int shutdown_pol;
+ struct tsensor *tsensors;
+ int ntsensors;
+ struct tsadc_calib_info calib_info;
+};
+
+struct tsadc_softc {
+ device_t dev;
+ struct resource *mem_res;
+ struct resource *irq_res;
+ void *irq_ih;
+
+ clk_t tsadc_clk;
+ clk_t apb_pclk_clk;
+ hwreset_t hwreset;
+ struct syscon *grf;
+
+ struct tsadc_conf *conf;
+
+ int shutdown_temp;
+ int shutdown_mode;
+ int shutdown_pol;
+
+ int alarm_temp;
+};
+
+static struct rk_calib_entry rk3288_calib_data[] = {
+ {3800, -40000},
+ {3792, -35000},
+ {3783, -30000},
+ {3774, -25000},
+ {3765, -20000},
+ {3756, -15000},
+ {3747, -10000},
+ {3737, -5000},
+ {3728, 0},
+ {3718, 5000},
+ {3708, 10000},
+ {3698, 15000},
+ {3688, 20000},
+ {3678, 25000},
+ {3667, 30000},
+ {3656, 35000},
+ {3645, 40000},
+ {3634, 45000},
+ {3623, 50000},
+ {3611, 55000},
+ {3600, 60000},
+ {3588, 65000},
+ {3575, 70000},
+ {3563, 75000},
+ {3550, 80000},
+ {3537, 85000},
+ {3524, 90000},
+ {3510, 95000},
+ {3496, 100000},
+ {3482, 105000},
+ {3467, 110000},
+ {3452, 115000},
+ {3437, 120000},
+ {3421, 125000},
+};
+
+struct tsensor rk3288_tsensors[] = {
+ { .channel = 0, .id = 2, .name = "reserved"},
+ { .channel = 1, .id = 0, .name = "CPU"},
+ { .channel = 2, .id = 1, .name = "GPU"},
+};
+
+struct tsadc_conf rk3288_tsadc_conf = {
+ .use_syscon = 0,
+ .q_sel_ntc = 0,
+ .shutdown_temp = 95000,
+ .shutdown_mode = 1, /* GPIO */
+ .shutdown_pol = 0, /* Low */
+ .tsensors = rk3288_tsensors,
+ .ntsensors = nitems(rk3288_tsensors),
+ .calib_info = {
+ .table = rk3288_calib_data,
+ .nentries = nitems(rk3288_calib_data),
+ }
+};
+
+static struct rk_calib_entry rk3328_calib_data[] = {
+ {296, -40000},
+ {304, -35000},
+ {313, -30000},
+ {331, -20000},
+ {340, -15000},
+ {349, -10000},
+ {359, -5000},
+ {368, 0},
+ {378, 5000},
+ {388, 10000},
+ {398, 15000},
+ {408, 20000},
+ {418, 25000},
+ {429, 30000},
+ {440, 35000},
+ {451, 40000},
+ {462, 45000},
+ {473, 50000},
+ {485, 55000},
+ {496, 60000},
+ {508, 65000},
+ {521, 70000},
+ {533, 75000},
+ {546, 80000},
+ {559, 85000},
+ {572, 90000},
+ {586, 95000},
+ {600, 100000},
+ {614, 105000},
+ {629, 110000},
+ {644, 115000},
+ {659, 120000},
+ {675, 125000},
+};
+
+static struct tsensor rk3328_tsensors[] = {
+ { .channel = 0, .id = 0, .name = "CPU"},
+};
+
+static struct tsadc_conf rk3328_tsadc_conf = {
+ .use_syscon = 0,
+ .q_sel_ntc = 1,
+ .shutdown_temp = 95000,
+ .shutdown_mode = 0, /* CRU */
+ .shutdown_pol = 0, /* Low */
+ .tsensors = rk3328_tsensors,
+ .ntsensors = nitems(rk3328_tsensors),
+ .calib_info = {
+ .table = rk3328_calib_data,
+ .nentries = nitems(rk3328_calib_data),
+ }
+};
+
+static struct rk_calib_entry rk3399_calib_data[] = {
+ {402, -40000},
+ {410, -35000},
+ {419, -30000},
+ {427, -25000},
+ {436, -20000},
+ {444, -15000},
+ {453, -10000},
+ {461, -5000},
+ {470, 0},
+ {478, 5000},
+ {487, 10000},
+ {496, 15000},
+ {504, 20000},
+ {513, 25000},
+ {521, 30000},
+ {530, 35000},
+ {538, 40000},
+ {547, 45000},
+ {555, 50000},
+ {564, 55000},
+ {573, 60000},
+ {581, 65000},
+ {590, 70000},
+ {599, 75000},
+ {607, 80000},
+ {616, 85000},
+ {624, 90000},
+ {633, 95000},
+ {642, 100000},
+ {650, 105000},
+ {659, 110000},
+ {668, 115000},
+ {677, 120000},
+ {685, 125000},
+};
+
+static struct tsensor rk3399_tsensors[] = {
+ { .channel = 0, .id = 0, .name = "CPU"},
+ { .channel = 1, .id = 1, .name = "GPU"},
+};
+
+static struct tsadc_conf rk3399_tsadc_conf = {
+ .use_syscon = 1,
+ .q_sel_ntc = 1,
+ .shutdown_temp = 95000,
+ .shutdown_mode = 1, /* GPIO */
+ .shutdown_pol = 0, /* Low */
+ .tsensors = rk3399_tsensors,
+ .ntsensors = nitems(rk3399_tsensors),
+ .calib_info = {
+ .table = rk3399_calib_data,
+ .nentries = nitems(rk3399_calib_data),
+ }
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk3288-tsadc", (uintptr_t)&rk3288_tsadc_conf},
+ {"rockchip,rk3328-tsadc", (uintptr_t)&rk3328_tsadc_conf},
+ {"rockchip,rk3399-tsadc", (uintptr_t)&rk3399_tsadc_conf},
+ {NULL, 0}
+};
+
+static uint32_t
+tsadc_temp_to_raw(struct tsadc_softc *sc, int temp)
+{
+ struct rk_calib_entry *tbl;
+ int denom, ntbl, raw, i;
+
+ tbl = sc->conf->calib_info.table;
+ ntbl = sc->conf->calib_info.nentries;
+
+ if (temp <= tbl[0].temp)
+ return (tbl[0].raw);
+
+ if (temp >= tbl[ntbl - 1].temp)
+ return (tbl[ntbl - 1].raw);
+
+ for (i = 1; i < (ntbl - 1); i++) {
+ /* Exact match */
+ if (temp == tbl[i].temp)
+ return (tbl[i].raw);
+ if (temp < tbl[i].temp)
+ break;
+ }
+
+ /*
+ * Translated value is between i and i - 1 table entries.
+ * Do linear interpolation for it.
+ */
+ raw = (int)tbl[i - 1].raw - (int)tbl[i].raw;
+ raw *= temp - tbl[i - 1].temp;
+ denom = tbl[i - 1].temp - tbl[i].temp;
+ raw = tbl[i - 1].raw + raw / denom;
+ return (raw);
+}
+
+static int
+tsadc_raw_to_temp(struct tsadc_softc *sc, uint32_t raw)
+{
+ struct rk_calib_entry *tbl;
+ int denom, ntbl, temp, i;
+ bool descending;
+
+ tbl = sc->conf->calib_info.table;
+ ntbl = sc->conf->calib_info.nentries;
+ descending = tbl[0].raw > tbl[1].raw;
+
+ if (descending) {
+ /* Raw column is in descending order. */
+ if (raw >= tbl[0].raw)
+ return (tbl[0].temp);
+ if (raw <= tbl[ntbl - 1].raw)
+ return (tbl[ntbl - 1].temp);
+
+ for (i = ntbl - 2; i > 0; i--) {
+ /* Exact match */
+ if (raw == tbl[i].raw)
+ return (tbl[i].temp);
+ if (raw < tbl[i].raw)
+ break;
+ }
+ } else {
+ /* Raw column is in ascending order. */
+ if (raw <= tbl[0].raw)
+ return (tbl[0].temp);
+ if (raw >= tbl[ntbl - 1].raw)
+ return (tbl[ntbl - 1].temp);
+ for (i = 1; i < (ntbl - 1); i++) {
+ /* Exact match */
+ if (raw == tbl[i].raw)
+ return (tbl[i].temp);
+ if (raw < tbl[i].raw)
+ break;
+ }
+ }
+
+ /*
+ * Translated value is between i and i - 1 table entries.
+ * Do linear interpolation for it.
+ */
+ temp = (int)tbl[i - 1].temp - (int)tbl[i].temp;
+ temp *= raw - tbl[i - 1].raw;
+ denom = tbl[i - 1].raw - tbl[i].raw;
+ temp = tbl[i - 1].temp + temp / denom;
+ return (temp);
+}
+
+static void
+tsadc_init_tsensor(struct tsadc_softc *sc, struct tsensor *sensor)
+{
+ uint32_t val;
+
+ /* Shutdown mode */
+ val = RD4(sc, TSADC_INT_EN);
+ if (sc->shutdown_mode != 0) {
+ /* Signal shutdown of GPIO pin */
+ val &= ~TSADC_INT_EN_2CRU_EN_SRC(sensor->channel);
+ val |= TSADC_INT_EN_2GPIO_EN_SRC(sensor->channel);
+ } else {
+ val |= TSADC_INT_EN_2CRU_EN_SRC(sensor->channel);
+ val &= ~TSADC_INT_EN_2GPIO_EN_SRC(sensor->channel);
+ }
+ WR4(sc, TSADC_INT_EN, val);
+
+ /* Shutdown temperature */
+ val = tsadc_raw_to_temp(sc, sc->shutdown_temp);
+ WR4(sc, TSADC_COMP_SHUT(sensor->channel), val);
+ val = RD4(sc, TSADC_AUTO_CON);
+ val |= TSADC_AUTO_SRC_EN(sensor->channel);
+ WR4(sc, TSADC_AUTO_CON, val);
+
+ /* Alarm temperature */
+ val = tsadc_temp_to_raw(sc, sc->alarm_temp);
+ WR4(sc, TSADC_COMP_INT(sensor->channel), val);
+ val = RD4(sc, TSADC_INT_EN);
+ val |= TSADC_COMP_INT_SRC_EN(sensor->channel);
+ WR4(sc, TSADC_INT_EN, val);
+}
+
+static void
+tsadc_init(struct tsadc_softc *sc)
+{
+ uint32_t val;
+
+ /* Common part */
+ val = 0; /* XXX Is this right? */
+ if (sc->shutdown_pol != 0)
+ val |= TSADC_AUTO_CON_POL_HI;
+ else
+ val &= ~TSADC_AUTO_CON_POL_HI;
+ if (sc->conf->q_sel_ntc)
+ val |= TSADC_AUTO_Q_SEL;
+ WR4(sc, TSADC_AUTO_CON, val);
+
+ if (!sc->conf->use_syscon) {
+ /* V2 init */
+ WR4(sc, TSADC_AUTO_PERIOD, 250); /* 250 ms */
+ WR4(sc, TSADC_AUTO_PERIOD_HT, 50); /* 50 ms */
+ WR4(sc, TSADC_HIGHT_INT_DEBOUNCE, 4);
+ WR4(sc, TSADC_HIGHT_TSHUT_DEBOUNCE, 4);
+ } else {
+ /* V3 init */
+ if (sc->grf == NULL) {
+ /* Errata: adjust interleave to working value */
+ WR4(sc, TSADC_USER_CON, 13 << 6); /* 13 clks */
+ } else {
+ SYSCON_WRITE_4(sc->grf, GRF_TSADC_TESTBIT_L,
+ GRF_TSADC_VCM_EN_L);
+ SYSCON_WRITE_4(sc->grf, GRF_TSADC_TESTBIT_H,
+ GRF_TSADC_VCM_EN_H);
+ DELAY(30); /* 15 usec min */
+
+ SYSCON_WRITE_4(sc->grf, GRF_SARADC_TESTBIT,
+ GRF_SARADC_TESTBIT_ON);
+ SYSCON_WRITE_4(sc->grf, GRF_TSADC_TESTBIT_H,
+ GRF_TSADC_TESTBIT_H_ON);
+ DELAY(180); /* 90 usec min */
+ }
+ WR4(sc, TSADC_AUTO_PERIOD, 1875); /* 2.5 ms */
+ WR4(sc, TSADC_AUTO_PERIOD_HT, 1875); /* 2.5 ms */
+ WR4(sc, TSADC_HIGHT_INT_DEBOUNCE, 4);
+ WR4(sc, TSADC_HIGHT_TSHUT_DEBOUNCE, 4);
+ }
+}
+
+static int
+tsadc_read_temp(struct tsadc_softc *sc, struct tsensor *sensor, int *temp)
+{
+ uint32_t val;
+
+ val = RD4(sc, TSADC_DATA(sensor->channel));
+ *temp = tsadc_raw_to_temp(sc, val);
+
+#ifdef DEBUG
+ printf("%s: Sensor(id: %d, ch: %d), temp: %d\n", __func__,
+ sensor->id, sensor->channel, *temp);
+ printf(" status: 0x%08X, 0x%08X\n",
+ RD4(sc, TSADC_USER_CON),
+ RD4(sc, TSADC_AUTO_CON));
+ printf(" Data: 0x%08X, 0x%08X, 0x%08X\n",
+ RD4(sc, TSADC_DATA(sensor->channel)),
+ RD4(sc, TSADC_COMP_INT(sensor->channel)),
+ RD4(sc, TSADC_COMP_SHUT(sensor->channel)));
+#endif
+ return (0);
+}
+
+static int
+tsadc_get_temp(device_t dev, device_t cdev, uintptr_t id, int *val)
+{
+ struct tsadc_softc *sc;
+ int i, rv;
+
+ sc = device_get_softc(dev);
+
+ if (id >= sc->conf->ntsensors)
+ return (ERANGE);
+
+ for (i = 0; i < sc->conf->ntsensors; i++) {
+ if (sc->conf->tsensors->id == id) {
+ rv =tsadc_read_temp(sc, sc->conf->tsensors + id, val);
+ return (rv);
+ }
+ }
+ return (ERANGE);
+}
+
+static int
+tsadc_sysctl_temperature(SYSCTL_HANDLER_ARGS)
+{
+ struct tsadc_softc *sc;
+ int val;
+ int rv;
+ int id;
+
+ /* Write request */
+ if (req->newptr != NULL)
+ return (EINVAL);
+
+ sc = arg1;
+ id = arg2;
+
+ if (id >= sc->conf->ntsensors)
+ return (ERANGE);
+ rv = tsadc_read_temp(sc, sc->conf->tsensors + id, &val);
+ if (rv != 0)
+ return (rv);
+
+ val = val / 100;
+ val += 2731;
+ rv = sysctl_handle_int(oidp, &val, 0, req);
+ return (rv);
+}
+
+static int
+tsadc_init_sysctl(struct tsadc_softc *sc)
+{
+ int i;
+ struct sysctl_oid *oid, *tmp;
+
+ sysctl_ctx_init(&tsadc_sysctl_ctx);
+ /* create node for hw.temp */
+ oid = SYSCTL_ADD_NODE(&tsadc_sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, "temperature",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
+ if (oid == NULL)
+ return (ENXIO);
+
+ /* Add sensors */
+ for (i = sc->conf->ntsensors - 1; i >= 0; i--) {
+ tmp = SYSCTL_ADD_PROC(&tsadc_sysctl_ctx,
+ SYSCTL_CHILDREN(oid), OID_AUTO, sc->conf->tsensors[i].name,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i,
+ tsadc_sysctl_temperature, "IK", "SoC Temperature");
+ if (tmp == NULL)
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+tsadc_intr(void *arg)
+{
+ struct tsadc_softc *sc;
+ uint32_t val;
+
+ sc = (struct tsadc_softc *)arg;
+
+ val = RD4(sc, TSADC_INT_PD);
+ WR4(sc, TSADC_INT_PD, val);
+
+ /* XXX Handle shutdown and alarm interrupts. */
+ if (val & 0x00F0) {
+ device_printf(sc->dev, "Alarm: device temperature "
+ "is above of shutdown level.\n");
+ } else if (val & 0x000F) {
+ device_printf(sc->dev, "Alarm: device temperature "
+ "is above of alarm level.\n");
+ }
+ return (FILTER_HANDLED);
+}
+
+static int
+tsadc_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "RockChip temperature sensors");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+tsadc_attach(device_t dev)
+{
+ struct tsadc_softc *sc;
+ phandle_t node;
+ uint32_t val;
+ int i, rid, rv;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ node = ofw_bus_get_node(sc->dev);
+ sc->conf = (struct tsadc_conf *)
+ ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+ sc->alarm_temp = 90000;
+
+ rid = 0;
+ sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->mem_res == NULL) {
+ device_printf(dev, "Cannot allocate memory resources\n");
+ goto fail;
+ }
+
+ rid = 0;
+ sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE);
+ if (sc->irq_res == NULL) {
+ device_printf(dev, "Cannot allocate IRQ resources\n");
+ goto fail;
+ }
+
+ if ((bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
+ tsadc_intr, NULL, sc, &sc->irq_ih))) {
+ device_printf(dev,
+ "WARNING: unable to register interrupt handler\n");
+ goto fail;
+ }
+
+ /* FDT resources */
+ rv = hwreset_get_by_ofw_name(dev, 0, "tsadc-apb", &sc->hwreset);
+ if (rv != 0) {
+ device_printf(dev, "Cannot get 'tsadc-apb' reset\n");
+ goto fail;
+ }
+ rv = clk_get_by_ofw_name(dev, 0, "tsadc", &sc->tsadc_clk);
+ if (rv != 0) {
+ device_printf(dev, "Cannot get 'tsadc' clock: %d\n", rv);
+ goto fail;
+ }
+ rv = clk_get_by_ofw_name(dev, 0, "apb_pclk", &sc->apb_pclk_clk);
+ if (rv != 0) {
+ device_printf(dev, "Cannot get 'apb_pclk' clock: %d\n", rv);
+ goto fail;
+ }
+
+ /* grf is optional */
+ rv = syscon_get_by_ofw_property(dev, node, "rockchip,grf", &sc->grf);
+ if (rv != 0 && rv != ENOENT) {
+ device_printf(dev, "Cannot get 'grf' syscon: %d\n", rv);
+ goto fail;
+ }
+
+ rv = OF_getencprop(node, "rockchip,hw-tshut-temp",
+ &sc->shutdown_temp, sizeof(sc->shutdown_temp));
+ if (rv <= 0)
+ sc->shutdown_temp = sc->conf->shutdown_temp;
+
+ rv = OF_getencprop(node, "rockchip,hw-tshut-mode",
+ &sc->shutdown_mode, sizeof(sc->shutdown_mode));
+ if (rv <= 0)
+ sc->shutdown_mode = sc->conf->shutdown_mode;
+
+ rv = OF_getencprop(node, "rockchip,hw-tshut-polarity",
+ &sc->shutdown_pol, sizeof(sc->shutdown_pol));
+ if (rv <= 0)
+ sc->shutdown_pol = sc->conf->shutdown_pol;
+
+ /* Wakeup controller */
+ rv = hwreset_assert(sc->hwreset);
+ if (rv != 0) {
+ device_printf(dev, "Cannot assert reset\n");
+ goto fail;
+ }
+
+ /* Set the assigned clocks parent and freq */
+ if (clk_set_assigned(sc->dev, node) != 0) {
+ device_printf(dev, "clk_set_assigned failed\n");
+ goto fail;
+ }
+
+ rv = clk_enable(sc->tsadc_clk);
+ if (rv != 0) {
+ device_printf(dev, "Cannot enable 'tsadc_clk' clock: %d\n", rv);
+ goto fail;
+ }
+ rv = clk_enable(sc->apb_pclk_clk);
+ if (rv != 0) {
+ device_printf(dev, "Cannot enable 'apb_pclk' clock: %d\n", rv);
+ goto fail;
+ }
+ rv = hwreset_deassert(sc->hwreset);
+ if (rv != 0) {
+ device_printf(dev, "Cannot deassert reset\n");
+ goto fail;
+ }
+
+ tsadc_init(sc);
+ for (i = 0; i < sc->conf->ntsensors; i++)
+ tsadc_init_tsensor(sc, sc->conf->tsensors + i);
+
+ /* Enable auto mode */
+ val = RD4(sc, TSADC_AUTO_CON);
+ val |= TSADC_AUTO_CON_AUTO;
+ WR4(sc, TSADC_AUTO_CON, val);
+
+ rv = tsadc_init_sysctl(sc);
+ if (rv != 0) {
+ device_printf(sc->dev, "Cannot initialize sysctls\n");
+ goto fail_sysctl;
+ }
+
+ OF_device_register_xref(OF_xref_from_node(node), dev);
+ return (bus_generic_attach(dev));
+
+fail_sysctl:
+ sysctl_ctx_free(&tsadc_sysctl_ctx);
+fail:
+ if (sc->irq_ih != NULL)
+ bus_teardown_intr(dev, sc->irq_res, sc->irq_ih);
+ if (sc->tsadc_clk != NULL)
+ clk_release(sc->tsadc_clk);
+ if (sc->apb_pclk_clk != NULL)
+ clk_release(sc->apb_pclk_clk);
+ if (sc->hwreset != NULL)
+ hwreset_release(sc->hwreset);
+ if (sc->irq_res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res);
+ if (sc->mem_res != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
+
+ return (ENXIO);
+}
+
+static int
+tsadc_detach(device_t dev)
+{
+ struct tsadc_softc *sc;
+ sc = device_get_softc(dev);
+
+ if (sc->irq_ih != NULL)
+ bus_teardown_intr(dev, sc->irq_res, sc->irq_ih);
+ sysctl_ctx_free(&tsadc_sysctl_ctx);
+ if (sc->tsadc_clk != NULL)
+ clk_release(sc->tsadc_clk);
+ if (sc->apb_pclk_clk != NULL)
+ clk_release(sc->apb_pclk_clk);
+ if (sc->hwreset != NULL)
+ hwreset_release(sc->hwreset);
+ if (sc->irq_res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res);
+ if (sc->mem_res != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
+
+ return (ENXIO);
+}
+
+static device_method_t rk_tsadc_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, tsadc_probe),
+ DEVMETHOD(device_attach, tsadc_attach),
+ DEVMETHOD(device_detach, tsadc_detach),
+
+ /* TSADC interface */
+ DEVMETHOD(rk_tsadc_get_temperature, tsadc_get_temp),
+
+ DEVMETHOD_END
+};
+
+static devclass_t rk_tsadc_devclass;
+static DEFINE_CLASS_0(rk_tsadc, rk_tsadc_driver, rk_tsadc_methods,
+ sizeof(struct tsadc_softc));
+EARLY_DRIVER_MODULE(rk_tsadc, simplebus, rk_tsadc_driver,
+ rk_tsadc_devclass, NULL, NULL, BUS_PASS_TIMER + BUS_PASS_ORDER_LAST);
diff --git a/sys/arm64/rockchip/rk_tsadc_if.m b/sys/arm64/rockchip/rk_tsadc_if.m
new file mode 100644
index 000000000000..890448f4d7a0
--- /dev/null
+++ b/sys/arm64/rockchip/rk_tsadc_if.m
@@ -0,0 +1,43 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+#
+# Copyright (c) 2019 Michal Meloun <mmel@FreeBSD.org>
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <machine/bus.h>
+
+INTERFACE rk_tsadc;
+
+
+/**
+ * Read temperature
+ */
+METHOD int get_temperature{
+ device_t dev;
+ device_t consumer;
+ uintptr_t id;
+ int *val;
+};
diff --git a/sys/arm64/rockchip/rk_typec_phy.c b/sys/arm64/rockchip/rk_typec_phy.c
new file mode 100644
index 000000000000..35e677e3bfee
--- /dev/null
+++ b/sys/arm64/rockchip/rk_typec_phy.c
@@ -0,0 +1,474 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Emmanuel Vadot <manu@FreeBSD.Org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Rockchip PHY TYPEC
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/gpio.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_subr.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/phy/phy_usb.h>
+#include <dev/extres/syscon/syscon.h>
+#include <dev/extres/hwreset/hwreset.h>
+
+#include "syscon_if.h"
+
+#define GRF_USB3OTG_BASE(x) (0x2430 + (0x10 * x))
+#define GRF_USB3OTG_CON0(x) (GRF_USB3OTG_BASE(x) + 0x0)
+#define GRF_USB3OTG_CON1(x) (GRF_USB3OTG_BASE(x) + 0x4)
+#define USB3OTG_CON1_U3_DIS (1 << 0)
+
+#define GRF_USB3PHY_BASE(x) (0x0e580 + (0xc * (x)))
+#define GRF_USB3PHY_CON0(x) (GRF_USB3PHY_BASE(x) + 0x0)
+#define USB3PHY_CON0_USB2_ONLY (1 << 3)
+#define GRF_USB3PHY_CON1(x) (GRF_USB3PHY_BASE(x) + 0x4)
+#define GRF_USB3PHY_CON2(x) (GRF_USB3PHY_BASE(x) + 0x8)
+#define GRF_USB3PHY_STATUS0 0x0e5c0
+#define GRF_USB3PHY_STATUS1 0x0e5c4
+
+#define CMN_PLL0_VCOCAL_INIT (0x84 << 2)
+#define CMN_PLL0_VCOCAL_ITER (0x85 << 2)
+#define CMN_PLL0_INTDIV (0x94 << 2)
+#define CMN_PLL0_FRACDIV (0x95 << 2)
+#define CMN_PLL0_HIGH_THR (0x96 << 2)
+#define CMN_PLL0_DSM_DIAG (0x97 << 2)
+#define CMN_PLL0_SS_CTRL1 (0x98 << 2)
+#define CMN_PLL0_SS_CTRL2 (0x99 << 2)
+#define CMN_DIAG_PLL0_FBH_OVRD (0x1c0 << 2)
+#define CMN_DIAG_PLL0_FBL_OVRD (0x1c1 << 2)
+#define CMN_DIAG_PLL0_OVRD (0x1c2 << 2)
+#define CMN_DIAG_PLL0_V2I_TUNE (0x1c5 << 2)
+#define CMN_DIAG_PLL0_CP_TUNE (0x1c6 << 2)
+#define CMN_DIAG_PLL0_LF_PROG (0x1c7 << 2)
+#define CMN_DIAG_HSCLK_SEL (0x1e0 << 2)
+#define CMN_DIAG_HSCLK_SEL_PLL_CONFIG 0x30
+#define CMN_DIAG_HSCLK_SEL_PLL_MASK 0x33
+
+#define TX_TXCC_MGNFS_MULT_000(lane) ((0x4050 | ((lane) << 9)) << 2)
+#define XCVR_DIAG_BIDI_CTRL(lane) ((0x40e8 | ((lane) << 9)) << 2)
+#define XCVR_DIAG_LANE_FCM_EN_MGN(lane) ((0x40f2 | ((lane) << 9)) << 2)
+#define TX_PSC_A0(lane) ((0x4100 | ((lane) << 9)) << 2)
+#define TX_PSC_A1(lane) ((0x4101 | ((lane) << 9)) << 2)
+#define TX_PSC_A2(lane) ((0x4102 | ((lane) << 9)) << 2)
+#define TX_PSC_A3(lane) ((0x4103 | ((lane) << 9)) << 2)
+#define TX_RCVDET_EN_TMR(lane) ((0x4122 | ((lane) << 9)) << 2)
+#define TX_RCVDET_ST_TMR(lane) ((0x4123 | ((lane) << 9)) << 2)
+
+#define RX_PSC_A0(lane) ((0x8000 | ((lane) << 9)) << 2)
+#define RX_PSC_A1(lane) ((0x8001 | ((lane) << 9)) << 2)
+#define RX_PSC_A2(lane) ((0x8002 | ((lane) << 9)) << 2)
+#define RX_PSC_A3(lane) ((0x8003 | ((lane) << 9)) << 2)
+#define RX_PSC_CAL(lane) ((0x8006 | ((lane) << 9)) << 2)
+#define RX_PSC_RDY(lane) ((0x8007 | ((lane) << 9)) << 2)
+#define RX_SIGDET_HL_FILT_TMR(lane) ((0x8090 | ((lane) << 9)) << 2)
+#define RX_REE_CTRL_DATA_MASK(lane) ((0x81bb | ((lane) << 9)) << 2)
+#define RX_DIAG_SIGDET_TUNE(lane) ((0x81dc | ((lane) << 9)) << 2)
+
+#define PMA_LANE_CFG (0xc000 << 2)
+#define PIN_ASSIGN_D_F 0x5100
+#define DP_MODE_CTL (0xc008 << 2)
+#define DP_MODE_ENTER_A2 0xc104
+#define PMA_CMN_CTRL1 (0xc800 << 2)
+#define PMA_CMN_CTRL1_READY (1 << 0)
+
+static struct ofw_compat_data compat_data[] = {
+ { "rockchip,rk3399-typec-phy", 1 },
+ { NULL, 0 }
+};
+
+static struct resource_spec rk_typec_phy_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { -1, 0 }
+};
+
+struct rk_typec_phy_softc {
+ device_t dev;
+ struct resource *res;
+ struct syscon *grf;
+ clk_t tcpdcore;
+ clk_t tcpdphy_ref;
+ hwreset_t rst_uphy;
+ hwreset_t rst_pipe;
+ hwreset_t rst_tcphy;
+ int mode;
+ int phy_ctrl_id;
+};
+
+#define RK_TYPEC_PHY_READ(sc, reg) bus_read_4(sc->res, (reg))
+#define RK_TYPEC_PHY_WRITE(sc, reg, val) bus_write_4(sc->res, (reg), (val))
+
+/* Phy class and methods. */
+static int rk_typec_phy_enable(struct phynode *phynode, bool enable);
+static int rk_typec_phy_get_mode(struct phynode *phy, int *mode);
+static int rk_typec_phy_set_mode(struct phynode *phy, int mode);
+static phynode_method_t rk_typec_phy_phynode_methods[] = {
+ PHYNODEMETHOD(phynode_enable, rk_typec_phy_enable),
+ PHYNODEMETHOD(phynode_usb_get_mode, rk_typec_phy_get_mode),
+ PHYNODEMETHOD(phynode_usb_set_mode, rk_typec_phy_set_mode),
+
+ PHYNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(rk_typec_phy_phynode, rk_typec_phy_phynode_class,
+ rk_typec_phy_phynode_methods,
+ sizeof(struct phynode_usb_sc), phynode_usb_class);
+
+enum RK3399_USBPHY {
+ RK3399_TYPEC_PHY_DP = 0,
+ RK3399_TYPEC_PHY_USB3,
+};
+
+static void
+rk_typec_phy_set_usb2_only(struct rk_typec_phy_softc *sc, bool usb2only)
+{
+ uint32_t reg;
+
+ /* Disable usb3tousb2 only */
+ reg = SYSCON_READ_4(sc->grf, GRF_USB3PHY_CON0(sc->phy_ctrl_id));
+ if (usb2only)
+ reg |= USB3PHY_CON0_USB2_ONLY;
+ else
+ reg &= ~USB3PHY_CON0_USB2_ONLY;
+ /* Write Mask */
+ reg |= (USB3PHY_CON0_USB2_ONLY) << 16;
+ SYSCON_WRITE_4(sc->grf, GRF_USB3PHY_CON0(sc->phy_ctrl_id), reg);
+
+ /* Enable the USB3 Super Speed port */
+ reg = SYSCON_READ_4(sc->grf, GRF_USB3OTG_CON1(sc->phy_ctrl_id));
+ if (usb2only)
+ reg |= USB3OTG_CON1_U3_DIS;
+ else
+ reg &= ~USB3OTG_CON1_U3_DIS;
+ /* Write Mask */
+ reg |= (USB3OTG_CON1_U3_DIS) << 16;
+ SYSCON_WRITE_4(sc->grf, GRF_USB3OTG_CON1(sc->phy_ctrl_id), reg);
+}
+
+static int
+rk_typec_phy_enable(struct phynode *phynode, bool enable)
+{
+ struct rk_typec_phy_softc *sc;
+ device_t dev;
+ intptr_t phy;
+ uint32_t reg;
+ int err, retry;
+
+ dev = phynode_get_device(phynode);
+ phy = phynode_get_id(phynode);
+ sc = device_get_softc(dev);
+
+ if (phy != RK3399_TYPEC_PHY_USB3)
+ return (ERANGE);
+
+ rk_typec_phy_set_usb2_only(sc, false);
+
+ err = clk_enable(sc->tcpdcore);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->tcpdcore));
+ return (ENXIO);
+ }
+ err = clk_enable(sc->tcpdphy_ref);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->tcpdphy_ref));
+ clk_disable(sc->tcpdcore);
+ return (ENXIO);
+ }
+
+ hwreset_deassert(sc->rst_tcphy);
+
+ /* 24M configuration, magic values from rockchip */
+ RK_TYPEC_PHY_WRITE(sc, PMA_CMN_CTRL1, 0x830);
+ for (int i = 0; i < 4; i++) {
+ RK_TYPEC_PHY_WRITE(sc, XCVR_DIAG_LANE_FCM_EN_MGN(i), 0x90);
+ RK_TYPEC_PHY_WRITE(sc, TX_RCVDET_EN_TMR(i), 0x960);
+ RK_TYPEC_PHY_WRITE(sc, TX_RCVDET_ST_TMR(i), 0x30);
+ }
+ reg = RK_TYPEC_PHY_READ(sc, CMN_DIAG_HSCLK_SEL);
+ reg &= ~CMN_DIAG_HSCLK_SEL_PLL_MASK;
+ reg |= CMN_DIAG_HSCLK_SEL_PLL_CONFIG;
+ RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_HSCLK_SEL, reg);
+
+ /* PLL configuration, magic values from rockchip */
+ RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_VCOCAL_INIT, 0xf0);
+ RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_VCOCAL_ITER, 0x18);
+ RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_INTDIV, 0xd0);
+ RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_FRACDIV, 0x4a4a);
+ RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_HIGH_THR, 0x34);
+ RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_SS_CTRL1, 0x1ee);
+ RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_SS_CTRL2, 0x7f03);
+ RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_DSM_DIAG, 0x20);
+ RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_OVRD, 0);
+ RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_FBH_OVRD, 0);
+ RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_FBL_OVRD, 0);
+ RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_V2I_TUNE, 0x7);
+ RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_CP_TUNE, 0x45);
+ RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_LF_PROG, 0x8);
+
+ /* Configure the TX and RX line, magic values from rockchip */
+ RK_TYPEC_PHY_WRITE(sc, TX_PSC_A0(0), 0x7799);
+ RK_TYPEC_PHY_WRITE(sc, TX_PSC_A1(0), 0x7798);
+ RK_TYPEC_PHY_WRITE(sc, TX_PSC_A2(0), 0x5098);
+ RK_TYPEC_PHY_WRITE(sc, TX_PSC_A3(0), 0x5098);
+ RK_TYPEC_PHY_WRITE(sc, TX_TXCC_MGNFS_MULT_000(0), 0x0);
+ RK_TYPEC_PHY_WRITE(sc, XCVR_DIAG_BIDI_CTRL(0), 0xbf);
+
+ RK_TYPEC_PHY_WRITE(sc, RX_PSC_A0(1), 0xa6fd);
+ RK_TYPEC_PHY_WRITE(sc, RX_PSC_A1(1), 0xa6fd);
+ RK_TYPEC_PHY_WRITE(sc, RX_PSC_A2(1), 0xa410);
+ RK_TYPEC_PHY_WRITE(sc, RX_PSC_A3(1), 0x2410);
+ RK_TYPEC_PHY_WRITE(sc, RX_PSC_CAL(1), 0x23ff);
+ RK_TYPEC_PHY_WRITE(sc, RX_SIGDET_HL_FILT_TMR(1), 0x13);
+ RK_TYPEC_PHY_WRITE(sc, RX_REE_CTRL_DATA_MASK(1), 0x03e7);
+ RK_TYPEC_PHY_WRITE(sc, RX_DIAG_SIGDET_TUNE(1), 0x1004);
+ RK_TYPEC_PHY_WRITE(sc, RX_PSC_RDY(1), 0x2010);
+ RK_TYPEC_PHY_WRITE(sc, XCVR_DIAG_BIDI_CTRL(1), 0xfb);
+
+ RK_TYPEC_PHY_WRITE(sc, PMA_LANE_CFG, PIN_ASSIGN_D_F);
+
+ RK_TYPEC_PHY_WRITE(sc, DP_MODE_CTL, DP_MODE_ENTER_A2);
+
+ hwreset_deassert(sc->rst_uphy);
+
+ for (retry = 10000; retry > 0; retry--) {
+ reg = RK_TYPEC_PHY_READ(sc, PMA_CMN_CTRL1);
+ if (reg & PMA_CMN_CTRL1_READY)
+ break;
+ DELAY(10);
+ }
+ if (retry == 0) {
+ device_printf(sc->dev, "Timeout waiting for PMA\n");
+ return (ENXIO);
+ }
+
+ hwreset_deassert(sc->rst_pipe);
+
+ return (0);
+}
+
+static int
+rk_typec_phy_get_mode(struct phynode *phynode, int *mode)
+{
+ struct rk_typec_phy_softc *sc;
+ intptr_t phy;
+ device_t dev;
+
+ dev = phynode_get_device(phynode);
+ phy = phynode_get_id(phynode);
+ sc = device_get_softc(dev);
+
+ if (phy != RK3399_TYPEC_PHY_USB3)
+ return (ERANGE);
+
+ *mode = sc->mode;
+
+ return (0);
+}
+
+static int
+rk_typec_phy_set_mode(struct phynode *phynode, int mode)
+{
+ struct rk_typec_phy_softc *sc;
+ intptr_t phy;
+ device_t dev;
+
+ dev = phynode_get_device(phynode);
+ phy = phynode_get_id(phynode);
+ sc = device_get_softc(dev);
+
+ if (phy != RK3399_TYPEC_PHY_USB3)
+ return (ERANGE);
+
+ sc->mode = mode;
+
+ return (0);
+}
+
+static int
+rk_typec_phy_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Rockchip RK3399 PHY TYPEC");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_typec_phy_attach(device_t dev)
+{
+ struct rk_typec_phy_softc *sc;
+ struct phynode_init_def phy_init;
+ struct phynode *phynode;
+ phandle_t node, usb3;
+ phandle_t reg_prop[4];
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ node = ofw_bus_get_node(dev);
+
+ /*
+ * Find out which phy we are.
+ * There is not property for this so we need to know the
+ * address to use the correct GRF registers.
+ */
+ if (OF_getencprop(node, "reg", reg_prop, sizeof(reg_prop)) <= 0) {
+ device_printf(dev, "Cannot guess phy controller id\n");
+ return (ENXIO);
+ }
+ switch (reg_prop[1]) {
+ case 0xff7c0000:
+ sc->phy_ctrl_id = 0;
+ break;
+ case 0xff800000:
+ sc->phy_ctrl_id = 1;
+ break;
+ default:
+ device_printf(dev, "Unknown address %x for typec-phy\n", reg_prop[1]);
+ return (ENXIO);
+ }
+
+ if (bus_alloc_resources(dev, rk_typec_phy_spec, &sc->res) != 0) {
+ device_printf(dev, "cannot allocate resources for device\n");
+ goto fail;
+ }
+
+ if (syscon_get_by_ofw_property(dev, node,
+ "rockchip,grf", &sc->grf) != 0) {
+ device_printf(dev, "Cannot get syscon handle\n");
+ goto fail;
+ }
+
+ if (clk_get_by_ofw_name(dev, 0, "tcpdcore", &sc->tcpdcore) != 0) {
+ device_printf(dev, "Cannot get tcpdcore clock\n");
+ goto fail;
+ }
+ if (clk_get_by_ofw_name(dev, 0, "tcpdphy-ref", &sc->tcpdphy_ref) != 0) {
+ device_printf(dev, "Cannot get tcpdphy-ref clock\n");
+ goto fail;
+ }
+
+ if (hwreset_get_by_ofw_name(dev, 0, "uphy", &sc->rst_uphy) != 0) {
+ device_printf(dev, "Cannot get uphy reset\n");
+ goto fail;
+ }
+ if (hwreset_get_by_ofw_name(dev, 0, "uphy-pipe", &sc->rst_pipe) != 0) {
+ device_printf(dev, "Cannot get uphy-pipe reset\n");
+ goto fail;
+ }
+ if (hwreset_get_by_ofw_name(dev, 0, "uphy-tcphy", &sc->rst_tcphy) != 0) {
+ device_printf(dev, "Cannot get uphy-tcphy reset\n");
+ goto fail;
+ }
+
+ /*
+ * Make sure that the module is asserted
+ * We need to deassert in a certain order when we enable the phy
+ */
+ hwreset_assert(sc->rst_uphy);
+ hwreset_assert(sc->rst_pipe);
+ hwreset_assert(sc->rst_tcphy);
+
+ /* Set the assigned clocks parent and freq */
+ if (clk_set_assigned(dev, node) != 0) {
+ device_printf(dev, "clk_set_assigned failed\n");
+ goto fail;
+ }
+
+ /* Only usb3 port is supported right now */
+ usb3 = ofw_bus_find_child(node, "usb3-port");
+ if (usb3 == 0) {
+ device_printf(dev, "Cannot find usb3-port child node\n");
+ goto fail;
+ }
+ /* If the child isn't enable attach the driver
+ * but do not register the PHY.
+ */
+ if (!ofw_bus_node_status_okay(usb3))
+ return (0);
+
+ phy_init.id = RK3399_TYPEC_PHY_USB3;
+ phy_init.ofw_node = usb3;
+ phynode = phynode_create(dev, &rk_typec_phy_phynode_class, &phy_init);
+ if (phynode == NULL) {
+ device_printf(dev, "failed to create phy usb3-port\n");
+ goto fail;
+ }
+ if (phynode_register(phynode) == NULL) {
+ device_printf(dev, "failed to register phy usb3-port\n");
+ goto fail;
+ }
+
+ OF_device_register_xref(OF_xref_from_node(usb3), dev);
+
+ return (0);
+
+fail:
+ bus_release_resources(dev, rk_typec_phy_spec, &sc->res);
+
+ return (ENXIO);
+}
+
+static device_method_t rk_typec_phy_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_typec_phy_probe),
+ DEVMETHOD(device_attach, rk_typec_phy_attach),
+
+ DEVMETHOD_END
+};
+
+static driver_t rk_typec_phy_driver = {
+ "rk_typec_phy",
+ rk_typec_phy_methods,
+ sizeof(struct rk_typec_phy_softc)
+};
+
+static devclass_t rk_typec_phy_devclass;
+EARLY_DRIVER_MODULE(rk_typec_phy, simplebus, rk_typec_phy_driver,
+ rk_typec_phy_devclass, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE);
+MODULE_VERSION(rk_typec_phy, 1);
diff --git a/sys/arm64/rockchip/rk_usb2phy.c b/sys/arm64/rockchip/rk_usb2phy.c
new file mode 100644
index 000000000000..72315f1475ab
--- /dev/null
+++ b/sys/arm64/rockchip/rk_usb2phy.c
@@ -0,0 +1,417 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2019 Emmanuel Vadot <manu@FreeBSD.Org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Rockchip USB2PHY
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/gpio.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_subr.h>
+
+#include <dev/extres/clk/clk.h>
+#include <dev/extres/phy/phy_usb.h>
+#include <dev/extres/regulator/regulator.h>
+#include <dev/extres/syscon/syscon.h>
+
+#include "clkdev_if.h"
+#include "syscon_if.h"
+
+#define RK3399_GRF_USB20_PHY0_CON0 0x0
+#define RK3399_GRF_USB20_PHY0_CON1 0x4
+#define RK3399_GRF_USB20_PHY0_CON2 0x8
+#define RK3399_GRF_USB20_PHY0_CON3 0xC
+
+struct rk_usb2phy_reg {
+ uint32_t offset;
+ uint32_t enable_mask;
+ uint32_t disable_mask;
+};
+
+struct rk_usb2phy_regs {
+ struct rk_usb2phy_reg clk_ctl;
+};
+
+struct rk_usb2phy_regs rk3399_regs = {
+ .clk_ctl = {
+ /* bit 4 put pll in suspend */
+ .enable_mask = 0x100000,
+ .disable_mask = 0x100010,
+ }
+};
+
+static struct ofw_compat_data compat_data[] = {
+ { "rockchip,rk3399-usb2phy", (uintptr_t)&rk3399_regs },
+ { NULL, 0 }
+};
+
+struct rk_usb2phy_softc {
+ device_t dev;
+ struct syscon *grf;
+ regulator_t phy_supply;
+ clk_t clk;
+ int mode;
+};
+
+/* Phy class and methods. */
+static int rk_usb2phy_enable(struct phynode *phynode, bool enable);
+static int rk_usb2phy_get_mode(struct phynode *phy, int *mode);
+static int rk_usb2phy_set_mode(struct phynode *phy, int mode);
+static phynode_method_t rk_usb2phy_phynode_methods[] = {
+ PHYNODEMETHOD(phynode_enable, rk_usb2phy_enable),
+ PHYNODEMETHOD(phynode_usb_get_mode, rk_usb2phy_get_mode),
+ PHYNODEMETHOD(phynode_usb_set_mode, rk_usb2phy_set_mode),
+
+ PHYNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(rk_usb2phy_phynode, rk_usb2phy_phynode_class,
+ rk_usb2phy_phynode_methods,
+ sizeof(struct phynode_usb_sc), phynode_usb_class);
+
+enum RK3399_USBPHY {
+ RK3399_USBPHY_HOST = 0,
+ RK3399_USBPHY_OTG,
+};
+
+static int
+rk_usb2phy_enable(struct phynode *phynode, bool enable)
+{
+ struct rk_usb2phy_softc *sc;
+ device_t dev;
+ intptr_t phy;
+ int error;
+
+ dev = phynode_get_device(phynode);
+ phy = phynode_get_id(phynode);
+ sc = device_get_softc(dev);
+
+ if (phy != RK3399_USBPHY_HOST)
+ return (ERANGE);
+
+ if (sc->phy_supply) {
+ if (enable)
+ error = regulator_enable(sc->phy_supply);
+ else
+ error = regulator_disable(sc->phy_supply);
+ if (error != 0) {
+ device_printf(dev, "Cannot %sable the regulator\n",
+ enable ? "En" : "Dis");
+ goto fail;
+ }
+ }
+
+ return (0);
+fail:
+ return (ENXIO);
+}
+
+static int
+rk_usb2phy_get_mode(struct phynode *phynode, int *mode)
+{
+ struct rk_usb2phy_softc *sc;
+ intptr_t phy;
+ device_t dev;
+
+ dev = phynode_get_device(phynode);
+ phy = phynode_get_id(phynode);
+ sc = device_get_softc(dev);
+
+ if (phy != RK3399_USBPHY_HOST)
+ return (ERANGE);
+
+ *mode = sc->mode;
+
+ return (0);
+}
+
+static int
+rk_usb2phy_set_mode(struct phynode *phynode, int mode)
+{
+ struct rk_usb2phy_softc *sc;
+ intptr_t phy;
+ device_t dev;
+
+ dev = phynode_get_device(phynode);
+ phy = phynode_get_id(phynode);
+ sc = device_get_softc(dev);
+
+ if (phy != RK3399_USBPHY_HOST)
+ return (ERANGE);
+
+ sc->mode = mode;
+
+ return (0);
+}
+
+/* Clock class and method */
+struct rk_usb2phy_clk_sc {
+ device_t clkdev;
+ struct syscon *grf;
+ struct rk_usb2phy_regs *regs;
+};
+
+static int
+rk_usb2phy_clk_init(struct clknode *clk, device_t dev)
+{
+
+ clknode_init_parent_idx(clk, 0);
+ return (0);
+}
+
+static int
+rk_usb2phy_clk_set_gate(struct clknode *clk, bool enable)
+{
+ struct rk_usb2phy_clk_sc *sc;
+
+ sc = clknode_get_softc(clk);
+
+ if (enable)
+ SYSCON_WRITE_4(sc->grf, sc->regs->clk_ctl.offset,
+ sc->regs->clk_ctl.enable_mask);
+ else
+ SYSCON_WRITE_4(sc->grf, sc->regs->clk_ctl.offset,
+ sc->regs->clk_ctl.disable_mask);
+ return (0);
+}
+
+static int
+rk_usb2phy_clk_recalc(struct clknode *clk, uint64_t *freq)
+{
+
+ *freq = 480000000;
+
+ return (0);
+}
+
+static clknode_method_t rk_usb2phy_clk_clknode_methods[] = {
+ /* Device interface */
+
+ CLKNODEMETHOD(clknode_init, rk_usb2phy_clk_init),
+ CLKNODEMETHOD(clknode_set_gate, rk_usb2phy_clk_set_gate),
+ CLKNODEMETHOD(clknode_recalc_freq, rk_usb2phy_clk_recalc),
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(rk_usb2phy_clk_clknode, rk_usb2phy_clk_clknode_class,
+ rk_usb2phy_clk_clknode_methods, sizeof(struct rk_usb2phy_clk_sc),
+ clknode_class);
+
+static int
+rk_usb2phy_clk_ofw_map(struct clkdom *clkdom, uint32_t ncells,
+ phandle_t *cells, struct clknode **clk)
+{
+
+ if (ncells != 0)
+ return (ERANGE);
+
+ *clk = clknode_find_by_id(clkdom, 0);
+
+ if (*clk == NULL)
+ return (ENXIO);
+ return (0);
+}
+
+static int
+rk_usb2phy_export_clock(struct rk_usb2phy_softc *devsc)
+{
+ struct clknode_init_def def;
+ struct rk_usb2phy_clk_sc *sc;
+ const char **clknames;
+ struct clkdom *clkdom;
+ struct clknode *clk;
+ clk_t clk_parent;
+ phandle_t node;
+ phandle_t regs[2];
+ int i, nclocks, ncells, error;
+
+ node = ofw_bus_get_node(devsc->dev);
+
+ error = ofw_bus_parse_xref_list_get_length(node, "clocks",
+ "#clock-cells", &ncells);
+ if (error != 0 || ncells != 1) {
+ device_printf(devsc->dev, "couldn't find parent clock\n");
+ return (ENXIO);
+ }
+
+ nclocks = ofw_bus_string_list_to_array(node, "clock-output-names",
+ &clknames);
+ if (nclocks != 1)
+ return (ENXIO);
+
+ clkdom = clkdom_create(devsc->dev);
+ clkdom_set_ofw_mapper(clkdom, rk_usb2phy_clk_ofw_map);
+
+ memset(&def, 0, sizeof(def));
+ def.id = 0;
+ def.name = clknames[0];
+ def.parent_names = malloc(sizeof(char *) * ncells, M_OFWPROP, M_WAITOK);
+ for (i = 0; i < ncells; i++) {
+ error = clk_get_by_ofw_index(devsc->dev, 0, i, &clk_parent);
+ if (error != 0) {
+ device_printf(devsc->dev, "cannot get clock %d\n", error);
+ return (ENXIO);
+ }
+ def.parent_names[i] = clk_get_name(clk_parent);
+ clk_release(clk_parent);
+ }
+ def.parent_cnt = ncells;
+
+ clk = clknode_create(clkdom, &rk_usb2phy_clk_clknode_class, &def);
+ if (clk == NULL) {
+ device_printf(devsc->dev, "cannot create clknode\n");
+ return (ENXIO);
+ }
+
+ sc = clknode_get_softc(clk);
+ sc->clkdev = device_get_parent(devsc->dev);
+ sc->grf = devsc->grf;
+ sc->regs = (struct rk_usb2phy_regs *)ofw_bus_search_compatible(devsc->dev, compat_data)->ocd_data;
+ OF_getencprop(node, "reg", regs, sizeof(regs));
+ sc->regs->clk_ctl.offset = regs[0];
+ clknode_register(clkdom, clk);
+
+ if (clkdom_finit(clkdom) != 0) {
+ device_printf(devsc->dev, "cannot finalize clkdom initialization\n");
+ return (ENXIO);
+ }
+
+ if (bootverbose)
+ clkdom_dump(clkdom);
+
+ return (0);
+}
+
+static int
+rk_usb2phy_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Rockchip RK3399 USB2PHY");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_usb2phy_attach(device_t dev)
+{
+ struct rk_usb2phy_softc *sc;
+ struct phynode_init_def phy_init;
+ struct phynode *phynode;
+ phandle_t node, host;
+ int err;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ node = ofw_bus_get_node(dev);
+
+ if (syscon_get_handle_default(dev, &sc->grf) != 0) {
+ device_printf(dev, "Cannot get syscon handle\n");
+ return (ENXIO);
+ }
+
+ if (clk_get_by_ofw_name(dev, 0, "phyclk", &sc->clk) != 0) {
+ device_printf(dev, "Cannot get clock\n");
+ return (ENXIO);
+ }
+ err = clk_enable(sc->clk);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->clk));
+ return (ENXIO);
+ }
+
+ err = rk_usb2phy_export_clock(sc);
+ if (err != 0)
+ return (err);
+
+ /* Only host is supported right now */
+
+ host = ofw_bus_find_child(node, "host-port");
+ if (host == 0) {
+ device_printf(dev, "Cannot find host-port child node\n");
+ return (ENXIO);
+ }
+
+ if (!ofw_bus_node_status_okay(host)) {
+ device_printf(dev, "host-port isn't okay\n");
+ return (0);
+ }
+
+ regulator_get_by_ofw_property(dev, host, "phy-supply", &sc->phy_supply);
+ phy_init.id = RK3399_USBPHY_HOST;
+ phy_init.ofw_node = host;
+ phynode = phynode_create(dev, &rk_usb2phy_phynode_class, &phy_init);
+ if (phynode == NULL) {
+ device_printf(dev, "failed to create host USB2PHY\n");
+ return (ENXIO);
+ }
+ if (phynode_register(phynode) == NULL) {
+ device_printf(dev, "failed to register host USB2PHY\n");
+ return (ENXIO);
+ }
+
+ OF_device_register_xref(OF_xref_from_node(host), dev);
+
+ return (0);
+}
+
+static device_method_t rk_usb2phy_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, rk_usb2phy_probe),
+ DEVMETHOD(device_attach, rk_usb2phy_attach),
+
+ DEVMETHOD_END
+};
+
+static driver_t rk_usb2phy_driver = {
+ "rk_usb2phy",
+ rk_usb2phy_methods,
+ sizeof(struct rk_usb2phy_softc)
+};
+
+static devclass_t rk_usb2phy_devclass;
+EARLY_DRIVER_MODULE(rk_usb2phy, simplebus, rk_usb2phy_driver,
+ rk_usb2phy_devclass, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE);
+MODULE_VERSION(rk_usb2phy, 1);