aboutsummaryrefslogtreecommitdiff
path: root/sys/amd64/amd64/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64/amd64/mem.c')
-rw-r--r--sys/amd64/amd64/mem.c245
1 files changed, 245 insertions, 0 deletions
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
new file mode 100644
index 000000000000..851f2df0e6e1
--- /dev/null
+++ b/sys/amd64/amd64/mem.c
@@ -0,0 +1,245 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and code derived from software contributed to
+ * Berkeley by William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: mem.c 1.13 89/10/08$
+ */
+
+#include <sys/cdefs.h>
+/*
+ * Memory special file
+ */
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/memrange.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+#include <machine/vmparam.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+
+#include <machine/memdev.h>
+
+/*
+ * Used in /dev/mem drivers and elsewhere
+ */
+MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
+
+/* ARGSUSED */
+int
+memrw(struct cdev *dev, struct uio *uio, int flags)
+{
+ struct iovec *iov;
+ void *p;
+ ssize_t orig_resid;
+ vm_prot_t prot;
+ u_long v, vd;
+ u_int c;
+ int error;
+
+ error = 0;
+ orig_resid = uio->uio_resid;
+ while (uio->uio_resid > 0 && error == 0) {
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ if (uio->uio_iovcnt < 0)
+ panic("memrw");
+ continue;
+ }
+ v = uio->uio_offset;
+ c = ulmin(iov->iov_len, PAGE_SIZE - (u_int)(v & PAGE_MASK));
+
+ switch (dev2unit(dev)) {
+ case CDEV_MINOR_KMEM:
+ /*
+ * Since c is clamped to be less or equal than
+ * PAGE_SIZE, the uiomove() call does not
+ * access past the end of the direct map.
+ */
+ if (v >= kva_layout.dmap_low &&
+ v < kva_layout.dmap_high) {
+ error = uiomove((void *)v, c, uio);
+ break;
+ }
+
+ switch (uio->uio_rw) {
+ case UIO_READ:
+ prot = VM_PROT_READ;
+ break;
+ case UIO_WRITE:
+ prot = VM_PROT_WRITE;
+ break;
+ }
+
+ if (!kernacc((void *)v, c, prot)) {
+ error = EFAULT;
+ break;
+ }
+
+ /*
+ * If the extracted address is not accessible
+ * through the direct map, then we make a
+ * private (uncached) mapping because we can't
+ * depend on the existing kernel mapping
+ * remaining valid until the completion of
+ * uiomove().
+ *
+ * XXX We cannot provide access to the
+ * physical page 0 mapped into KVA.
+ */
+ v = pmap_extract(kernel_pmap, v);
+ if (v == 0) {
+ error = EFAULT;
+ break;
+ }
+ /* FALLTHROUGH */
+ case CDEV_MINOR_MEM:
+ if (v < dmaplimit) {
+ vd = PHYS_TO_DMAP(v);
+ error = uiomove((void *)vd, c, uio);
+ break;
+ }
+ if (v > cpu_getmaxphyaddr()) {
+ error = EFAULT;
+ break;
+ }
+ p = pmap_mapdev(v, PAGE_SIZE);
+ error = uiomove(p, c, uio);
+ pmap_unmapdev(p, PAGE_SIZE);
+ break;
+ }
+ }
+ /*
+ * Don't return error if any byte was written. Read and write
+ * can return error only if no i/o was performed.
+ */
+ if (uio->uio_resid != orig_resid)
+ error = 0;
+ return (error);
+}
+
+/*
+ * allow user processes to MMAP some memory sections
+ * instead of going through read/write
+ */
+/* ARGSUSED */
+int
+memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int prot __unused, vm_memattr_t *memattr __unused)
+{
+ if (dev2unit(dev) == CDEV_MINOR_MEM) {
+ if (offset > cpu_getmaxphyaddr())
+ return (-1);
+ *paddr = offset;
+ return (0);
+ }
+ return (-1);
+}
+
+/*
+ * Operations for changing memory attributes.
+ *
+ * This is basically just an ioctl shim for mem_range_attr_get
+ * and mem_range_attr_set.
+ */
+int
+memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags,
+ struct thread *td)
+{
+ int nd, error = 0;
+ struct mem_range_op *mo = (struct mem_range_op *)data;
+ struct mem_range_desc *md;
+
+ /* is this for us? */
+ if ((cmd != MEMRANGE_GET) &&
+ (cmd != MEMRANGE_SET))
+ return (ENOTTY);
+
+ /* any chance we can handle this? */
+ if (mem_range_softc.mr_op == NULL)
+ return (EOPNOTSUPP);
+
+ /* do we have any descriptors? */
+ if (mem_range_softc.mr_ndesc == 0)
+ return (ENXIO);
+
+ switch (cmd) {
+ case MEMRANGE_GET:
+ nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc);
+ if (nd > 0) {
+ md = (struct mem_range_desc *)
+ malloc(nd * sizeof(struct mem_range_desc),
+ M_MEMDESC, M_WAITOK);
+ error = mem_range_attr_get(md, &nd);
+ if (!error)
+ error = copyout(md, mo->mo_desc,
+ nd * sizeof(struct mem_range_desc));
+ free(md, M_MEMDESC);
+ }
+ else
+ nd = mem_range_softc.mr_ndesc;
+ mo->mo_arg[0] = nd;
+ break;
+
+ case MEMRANGE_SET:
+ md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc),
+ M_MEMDESC, M_WAITOK);
+ error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc));
+ /* clamp description string */
+ md->mr_owner[sizeof(md->mr_owner) - 1] = 0;
+ if (error == 0)
+ error = mem_range_attr_set(md, &mo->mo_arg[0]);
+ free(md, M_MEMDESC);
+ break;
+ }
+ return (error);
+}