aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorConrad Meyer <cem@FreeBSD.org>2015-08-24 19:32:03 +0000
committerConrad Meyer <cem@FreeBSD.org>2015-08-24 19:32:03 +0000
commite974f91c38cfb7a97b684082089d6dae948a68fd (patch)
treee39b5d8a8f10f490748aeb45dc074c2ddc3eb06b /sys
parent3166be0fb42f6d600da7dd67bb1b2c6ce5bda529 (diff)
downloadsrc-e974f91c38cfb7a97b684082089d6dae948a68fd.tar.gz
src-e974f91c38cfb7a97b684082089d6dae948a68fd.zip
Import ioat(4) driver
I/OAT is also referred to as Crystal Beach DMA and is a Platform Storage Extension (PSE) on some Intel server platforms. This driver currently supports DMA descriptors only and is part of a larger effort to upstream an interconnect between multiple systems using the Non-Transparent Bridge (NTB) PSE. For now, this driver is only built on AMD64 platforms. It may be ported to work on i386 later, if that is desired. The hardware is exclusive to x86. Further documentation on ioat(4), including API documentation and usage, can be found in the new manual page. Bring in a test tool, ioatcontrol(8), in tools/tools/ioat. The test tool is not hooked up to the build and is not intended for end users. Submitted by: jimharris, Carl Delsey <carl.r.delsey@intel.com> Reviewed by: jimharris (reviewed my changes) Approved by: markj (mentor) Relnotes: yes Sponsored by: Intel Sponsored by: EMC / Isilon Storage Division Differential Revision: https://reviews.freebsd.org/D3456
Notes
Notes: svn path=/head/; revision=287117
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/conf/NOTES1
-rw-r--r--sys/conf/files.amd642
-rw-r--r--sys/dev/ioat/ioat.c1009
-rw-r--r--sys/dev/ioat/ioat.h77
-rw-r--r--sys/dev/ioat/ioat_hw.h104
-rw-r--r--sys/dev/ioat/ioat_internal.h447
-rw-r--r--sys/dev/ioat/ioat_test.c256
-rw-r--r--sys/dev/ioat/ioat_test.h46
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/ioat/Makefile15
10 files changed, 1959 insertions, 0 deletions
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index bfcf9635f98a..3e7876aaaa44 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -553,6 +553,7 @@ device tpm
device padlock_rng # VIA Padlock RNG
device rdrand_rng # Intel Bull Mountain RNG
device aesni # AES-NI OpenCrypto module
+device ioat # Intel I/OAT DMA engine
#
# Laptop/Notebook options:
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 8451e0071fa3..bd708254a9b5 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -205,6 +205,8 @@ dev/if_ndis/if_ndis_pccard.c optional ndis pccard
dev/if_ndis/if_ndis_pci.c optional ndis cardbus | ndis pci
dev/if_ndis/if_ndis_usb.c optional ndis usb
dev/io/iodev.c optional io
+dev/ioat/ioat.c optional ioat pci
+dev/ioat/ioat_test.c optional ioat pci
dev/ipmi/ipmi.c optional ipmi
dev/ipmi/ipmi_acpi.c optional ipmi acpi
dev/ipmi/ipmi_isa.c optional ipmi isa
diff --git a/sys/dev/ioat/ioat.c b/sys/dev/ioat/ioat.c
new file mode 100644
index 000000000000..a3bd5ce40670
--- /dev/null
+++ b/sys/dev/ioat/ioat.c
@@ -0,0 +1,1009 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+
+#include "ioat.h"
+#include "ioat_hw.h"
+#include "ioat_internal.h"
+
+static int ioat_probe(device_t device);
+static int ioat_attach(device_t device);
+static int ioat_detach(device_t device);
+static int ioat3_attach(device_t device);
+static int ioat_map_pci_bar(struct ioat_softc *ioat);
+static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
+ int error);
+static int ioat_interrupt_setup(struct ioat_softc *ioat);
+static void ioat_interrupt_handler(void *arg);
+static void ioat_process_events(struct ioat_softc *ioat);
+static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
+static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
+static void ioat_free_ring_entry(struct ioat_softc *ioat,
+ struct ioat_descriptor *desc);
+static struct ioat_descriptor * ioat_alloc_ring_entry(struct ioat_softc *ioat);
+static int ioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs);
+static struct ioat_descriptor * ioat_get_ring_entry(struct ioat_softc *ioat,
+ uint32_t index);
+static boolean_t resize_ring(struct ioat_softc *ioat, int order);
+static void ioat_timer_callback(void *arg);
+static void dump_descriptor(void *hw_desc);
+static void ioat_submit_single(struct ioat_softc *ioat);
+static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
+ int error);
+static int ioat_reset_hw(struct ioat_softc *ioat);
+static void ioat_setup_sysctl(device_t device);
+
+MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
+SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
+
+static int g_force_legacy_interrupts;
+SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
+ &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
+
+static int g_ioat_debug_level = 0;
+SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
+ 0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
+
+/*
+ * OS <-> Driver interface structures
+ */
+static device_method_t ioat_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ioat_probe),
+ DEVMETHOD(device_attach, ioat_attach),
+ DEVMETHOD(device_detach, ioat_detach),
+ { 0, 0 }
+};
+
+static driver_t ioat_pci_driver = {
+ "ioat",
+ ioat_pci_methods,
+ sizeof(struct ioat_softc),
+};
+
+static devclass_t ioat_devclass;
+DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
+
+/*
+ * Private data structures
+ */
+static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
+static int ioat_channel_index = 0;
+SYSCTL_INT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
+ "Number of IOAT channels attached");
+
+static struct _pcsid
+{
+ u_int32_t type;
+ const char *desc;
+} pci_ids[] = {
+ { 0x34308086, "TBG IOAT Ch0" },
+ { 0x34318086, "TBG IOAT Ch1" },
+ { 0x34328086, "TBG IOAT Ch2" },
+ { 0x34338086, "TBG IOAT Ch3" },
+ { 0x34298086, "TBG IOAT Ch4" },
+ { 0x342a8086, "TBG IOAT Ch5" },
+ { 0x342b8086, "TBG IOAT Ch6" },
+ { 0x342c8086, "TBG IOAT Ch7" },
+
+ { 0x37108086, "JSF IOAT Ch0" },
+ { 0x37118086, "JSF IOAT Ch1" },
+ { 0x37128086, "JSF IOAT Ch2" },
+ { 0x37138086, "JSF IOAT Ch3" },
+ { 0x37148086, "JSF IOAT Ch4" },
+ { 0x37158086, "JSF IOAT Ch5" },
+ { 0x37168086, "JSF IOAT Ch6" },
+ { 0x37178086, "JSF IOAT Ch7" },
+ { 0x37188086, "JSF IOAT Ch0 (RAID)" },
+ { 0x37198086, "JSF IOAT Ch1 (RAID)" },
+
+ { 0x3c208086, "SNB IOAT Ch0" },
+ { 0x3c218086, "SNB IOAT Ch1" },
+ { 0x3c228086, "SNB IOAT Ch2" },
+ { 0x3c238086, "SNB IOAT Ch3" },
+ { 0x3c248086, "SNB IOAT Ch4" },
+ { 0x3c258086, "SNB IOAT Ch5" },
+ { 0x3c268086, "SNB IOAT Ch6" },
+ { 0x3c278086, "SNB IOAT Ch7" },
+ { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
+ { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
+
+ { 0x0e208086, "IVB IOAT Ch0" },
+ { 0x0e218086, "IVB IOAT Ch1" },
+ { 0x0e228086, "IVB IOAT Ch2" },
+ { 0x0e238086, "IVB IOAT Ch3" },
+ { 0x0e248086, "IVB IOAT Ch4" },
+ { 0x0e258086, "IVB IOAT Ch5" },
+ { 0x0e268086, "IVB IOAT Ch6" },
+ { 0x0e278086, "IVB IOAT Ch7" },
+ { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
+ { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
+
+ { 0x2f208086, "HSW IOAT Ch0" },
+ { 0x2f218086, "HSW IOAT Ch1" },
+ { 0x2f228086, "HSW IOAT Ch2" },
+ { 0x2f238086, "HSW IOAT Ch3" },
+ { 0x2f248086, "HSW IOAT Ch4" },
+ { 0x2f258086, "HSW IOAT Ch5" },
+ { 0x2f268086, "HSW IOAT Ch6" },
+ { 0x2f278086, "HSW IOAT Ch7" },
+ { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
+ { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
+
+ { 0x0c508086, "BWD IOAT Ch0" },
+ { 0x0c518086, "BWD IOAT Ch1" },
+ { 0x0c528086, "BWD IOAT Ch2" },
+ { 0x0c538086, "BWD IOAT Ch3" },
+
+ { 0x6f508086, "BDXDE IOAT Ch0" },
+ { 0x6f518086, "BDXDE IOAT Ch1" },
+ { 0x6f528086, "BDXDE IOAT Ch2" },
+ { 0x6f538086, "BDXDE IOAT Ch3" },
+
+ { 0x00000000, NULL }
+};
+
+/*
+ * OS <-> Driver linkage functions
+ */
+static int
+ioat_probe(device_t device)
+{
+ struct _pcsid *ep;
+ u_int32_t type;
+
+ type = pci_get_devid(device);
+ for (ep = pci_ids; ep->type; ep++) {
+ if (ep->type == type) {
+ device_set_desc(device, ep->desc);
+ return (0);
+ }
+ }
+ return (ENXIO);
+}
+
+static int
+ioat_attach(device_t device)
+{
+ struct ioat_softc *ioat;
+ int error;
+
+ ioat = DEVICE2SOFTC(device);
+ ioat->device = device;
+
+ error = ioat_map_pci_bar(ioat);
+ if (error != 0)
+ goto err;
+
+ ioat->version = ioat_read_cbver(ioat);
+ ioat_interrupt_setup(ioat);
+
+ if (ioat->version < IOAT_VER_3_0) {
+ error = ENODEV;
+ goto err;
+ }
+
+ error = ioat3_attach(device);
+ if (error != 0)
+ goto err;
+
+ error = pci_enable_busmaster(device);
+ if (error != 0)
+ goto err;
+
+ ioat_channel[ioat_channel_index++] = ioat;
+
+err:
+ if (error != 0)
+ ioat_detach(device);
+ return (error);
+}
+
+static int
+ioat_detach(device_t device)
+{
+ struct ioat_softc *ioat;
+ uint32_t i;
+
+ ioat = DEVICE2SOFTC(device);
+ callout_drain(&ioat->timer);
+
+ pci_disable_busmaster(device);
+
+ if (ioat->pci_resource != NULL)
+ bus_release_resource(device, SYS_RES_MEMORY,
+ ioat->pci_resource_id, ioat->pci_resource);
+
+ if (ioat->ring != NULL) {
+ for (i = 0; i < (1 << ioat->ring_size_order); i++)
+ ioat_free_ring_entry(ioat, ioat->ring[i]);
+ free(ioat->ring, M_IOAT);
+ }
+
+ if (ioat->comp_update != NULL) {
+ bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
+ bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
+ ioat->comp_update_map);
+ bus_dma_tag_destroy(ioat->comp_update_tag);
+ }
+
+ bus_dma_tag_destroy(ioat->hw_desc_tag);
+
+ if (ioat->tag != NULL)
+ bus_teardown_intr(device, ioat->res, ioat->tag);
+
+ if (ioat->res != NULL)
+ bus_release_resource(device, SYS_RES_IRQ,
+ rman_get_rid(ioat->res), ioat->res);
+
+ pci_release_msi(device);
+
+ return (0);
+}
+
+static int
+ioat3_selftest(struct ioat_softc *ioat)
+{
+ uint64_t status;
+ uint32_t chanerr;
+ int i;
+
+ ioat_acquire(&ioat->dmaengine);
+ ioat_null(&ioat->dmaengine, NULL, NULL, 0);
+ ioat_release(&ioat->dmaengine);
+
+ for (i = 0; i < 100; i++) {
+ DELAY(1);
+ status = ioat_get_chansts(ioat);
+ if (is_ioat_idle(status))
+ return (0);
+ }
+
+ chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
+ ioat_log_message(0, "could not start channel: "
+ "status = %#jx error = %x\n", (uintmax_t)status, chanerr);
+ return (ENXIO);
+}
+
+/*
+ * Initialize Hardware
+ */
+static int
+ioat3_attach(device_t device)
+{
+ struct ioat_softc *ioat;
+ struct ioat_descriptor **ring;
+ struct ioat_descriptor *next;
+ struct ioat_dma_hw_descriptor *dma_hw_desc;
+ uint32_t capabilities;
+ int i, num_descriptors;
+ int error;
+ uint8_t xfercap;
+
+ error = 0;
+ ioat = DEVICE2SOFTC(device);
+ capabilities = ioat_read_dmacapability(ioat);
+
+ xfercap = ioat_read_xfercap(ioat);
+
+ /* Only bits [4:0] are valid. */
+ xfercap &= 0x1f;
+ ioat->max_xfer_size = 1 << xfercap;
+
+ /* TODO: need to check DCA here if we ever do XOR/PQ */
+
+ mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
+ mtx_init(&ioat->cleanup_lock, "ioat_process_events", NULL, MTX_DEF);
+ callout_init(&ioat->timer, CALLOUT_MPSAFE);
+
+ ioat->is_resize_pending = FALSE;
+ ioat->is_completion_pending = FALSE;
+ ioat->is_reset_pending = FALSE;
+ ioat->is_channel_running = FALSE;
+ ioat->is_waiting_for_ack = FALSE;
+
+ bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
+ &ioat->comp_update_tag);
+
+ error = bus_dmamem_alloc(ioat->comp_update_tag,
+ (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
+ if (ioat->comp_update == NULL)
+ return (ENOMEM);
+
+ error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
+ ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
+ 0);
+ if (error != 0)
+ return (error);
+
+ ioat->ring_size_order = IOAT_MIN_ORDER;
+
+ num_descriptors = 1 << ioat->ring_size_order;
+
+ bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ sizeof(struct ioat_dma_hw_descriptor), 1,
+ sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL,
+ &ioat->hw_desc_tag);
+
+ ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
+ M_ZERO | M_NOWAIT);
+ if (ioat->ring == NULL)
+ return (ENOMEM);
+
+ ring = ioat->ring;
+ for (i = 0; i < num_descriptors; i++) {
+ ring[i] = ioat_alloc_ring_entry(ioat);
+ if (ring[i] == NULL)
+ return (ENOMEM);
+
+ ring[i]->id = i;
+ }
+
+ for (i = 0; i < num_descriptors - 1; i++) {
+ next = ring[i + 1];
+ dma_hw_desc = ring[i]->u.dma;
+
+ dma_hw_desc->next = next->hw_desc_bus_addr;
+ }
+
+ ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;
+
+ ioat->head = 0;
+ ioat->tail = 0;
+ ioat->last_seen = 0;
+
+ error = ioat_reset_hw(ioat);
+ if (error != 0)
+ return (error);
+
+ ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
+ ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
+ ioat_write_chainaddr(ioat, ring[0]->hw_desc_bus_addr);
+
+ error = ioat3_selftest(ioat);
+ if (error != 0)
+ return (error);
+
+ ioat_process_events(ioat);
+ ioat_setup_sysctl(device);
+ return (0);
+}
+
+static int
+ioat_map_pci_bar(struct ioat_softc *ioat)
+{
+
+ ioat->pci_resource_id = PCIR_BAR(0);
+ ioat->pci_resource = bus_alloc_resource(ioat->device, SYS_RES_MEMORY,
+ &ioat->pci_resource_id, 0, ~0, 1, RF_ACTIVE);
+
+ if (ioat->pci_resource == NULL) {
+ ioat_log_message(0, "unable to allocate pci resource\n");
+ return (ENODEV);
+ }
+
+ ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
+ ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
+ return (0);
+}
+
+static void
+ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ struct ioat_softc *ioat = arg;
+
+ ioat->comp_update_bus_addr = seg[0].ds_addr;
+}
+
+static void
+ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ bus_addr_t *baddr;
+
+ baddr = arg;
+ *baddr = segs->ds_addr;
+}
+
+/*
+ * Interrupt setup and handlers
+ */
+static int
+ioat_interrupt_setup(struct ioat_softc *ioat)
+{
+ uint32_t num_vectors;
+ int error;
+ boolean_t use_msix;
+ boolean_t force_legacy_interrupts;
+
+ use_msix = FALSE;
+ force_legacy_interrupts = FALSE;
+
+ if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
+ num_vectors = 1;
+ pci_alloc_msix(ioat->device, &num_vectors);
+ if (num_vectors == 1)
+ use_msix = TRUE;
+ }
+
+ if (use_msix) {
+ ioat->rid = 1;
+ ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
+ &ioat->rid, RF_ACTIVE);
+ } else {
+ ioat->rid = 0;
+ ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
+ &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
+ }
+ if (ioat->res == NULL) {
+ ioat_log_message(0, "bus_alloc_resource failed\n");
+ return (ENOMEM);
+ }
+
+ ioat->tag = NULL;
+ error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
+ INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
+ if (error != 0) {
+ ioat_log_message(0, "bus_setup_intr failed\n");
+ return (error);
+ }
+
+ ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
+ return (0);
+}
+
+static void
+ioat_interrupt_handler(void *arg)
+{
+ struct ioat_softc *ioat = arg;
+
+ ioat_process_events(ioat);
+}
+
+static void
+ioat_process_events(struct ioat_softc *ioat)
+{
+ struct ioat_descriptor *desc;
+ struct bus_dmadesc *dmadesc;
+ uint64_t comp_update, status;
+ uint32_t completed;
+
+ mtx_lock(&ioat->cleanup_lock);
+
+ completed = 0;
+ comp_update = *ioat->comp_update;
+ status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
+
+ ioat_log_message(3, "%s\n", __func__);
+
+ if (status == ioat->last_seen) {
+ mtx_unlock(&ioat->cleanup_lock);
+ return;
+ }
+
+ while (1) {
+ desc = ioat_get_ring_entry(ioat, ioat->tail);
+ dmadesc = &desc->bus_dmadesc;
+ ioat_log_message(3, "completing desc %d\n", ioat->tail);
+
+ if (dmadesc->callback_fn)
+ (*dmadesc->callback_fn)(dmadesc->callback_arg);
+
+ ioat->tail++;
+ if (desc->hw_desc_bus_addr == status)
+ break;
+ }
+
+ ioat->last_seen = desc->hw_desc_bus_addr;
+
+ if (ioat->head == ioat->tail) {
+ ioat->is_completion_pending = FALSE;
+ callout_reset(&ioat->timer, 5 * hz, ioat_timer_callback, ioat);
+ }
+
+ ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
+ mtx_unlock(&ioat->cleanup_lock);
+}
+
+/*
+ * User API functions
+ */
+bus_dmaengine_t
+ioat_get_dmaengine(uint32_t index)
+{
+
+ if (index < ioat_channel_index)
+ return (&ioat_channel[index]->dmaengine);
+ return (NULL);
+}
+
+void
+ioat_acquire(bus_dmaengine_t dmaengine)
+{
+ struct ioat_softc *ioat;
+
+ ioat = to_ioat_softc(dmaengine);
+ mtx_lock(&ioat->submit_lock);
+ ioat_log_message(3, "%s\n", __func__);
+}
+
+void
+ioat_release(bus_dmaengine_t dmaengine)
+{
+ struct ioat_softc *ioat;
+
+ ioat_log_message(3, "%s\n", __func__);
+ ioat = to_ioat_softc(dmaengine);
+ ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->head);
+ mtx_unlock(&ioat->submit_lock);
+}
+
+struct bus_dmadesc *
+ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
+ void *callback_arg, uint32_t flags)
+{
+ struct ioat_softc *ioat;
+ struct ioat_descriptor *desc;
+ struct ioat_dma_hw_descriptor *hw_desc;
+
+ KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x",
+ flags & ~DMA_ALL_FLAGS));
+
+ ioat = to_ioat_softc(dmaengine);
+
+ if (ioat_reserve_space_and_lock(ioat, 1) != 0)
+ return (NULL);
+
+ ioat_log_message(3, "%s\n", __func__);
+
+ desc = ioat_get_ring_entry(ioat, ioat->head);
+ hw_desc = desc->u.dma;
+
+ hw_desc->u.control_raw = 0;
+ hw_desc->u.control.null = 1;
+ hw_desc->u.control.completion_update = 1;
+
+ if ((flags & DMA_INT_EN) != 0)
+ hw_desc->u.control.int_enable = 1;
+
+ hw_desc->size = 8;
+ hw_desc->src_addr = 0;
+ hw_desc->dest_addr = 0;
+
+ desc->bus_dmadesc.callback_fn = callback_fn;
+ desc->bus_dmadesc.callback_arg = callback_arg;
+
+ ioat_submit_single(ioat);
+ return (&desc->bus_dmadesc);
+}
+
+struct bus_dmadesc *
+ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
+ bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
+ void *callback_arg, uint32_t flags)
+{
+ struct ioat_descriptor *desc;
+ struct ioat_dma_hw_descriptor *hw_desc;
+ struct ioat_softc *ioat;
+
+ KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x",
+ flags & ~DMA_ALL_FLAGS));
+
+ ioat = to_ioat_softc(dmaengine);
+
+ if (len > ioat->max_xfer_size) {
+ ioat_log_message(0, "%s: max_xfer_size = %d, requested = %d\n",
+ __func__, ioat->max_xfer_size, (int)len);
+ return (NULL);
+ }
+
+ if (ioat_reserve_space_and_lock(ioat, 1) != 0)
+ return (NULL);
+
+ ioat_log_message(3, "%s\n", __func__);
+
+ desc = ioat_get_ring_entry(ioat, ioat->head);
+ hw_desc = desc->u.dma;
+
+ hw_desc->u.control_raw = 0;
+ hw_desc->u.control.completion_update = 1;
+
+ if ((flags & DMA_INT_EN) != 0)
+ hw_desc->u.control.int_enable = 1;
+
+ hw_desc->size = len;
+ hw_desc->src_addr = src;
+ hw_desc->dest_addr = dst;
+
+ if (g_ioat_debug_level >= 3)
+ dump_descriptor(hw_desc);
+
+ desc->bus_dmadesc.callback_fn = callback_fn;
+ desc->bus_dmadesc.callback_arg = callback_arg;
+
+ ioat_submit_single(ioat);
+ return (&desc->bus_dmadesc);
+}
+
+/*
+ * Ring Management
+ */
+static inline uint32_t
+ioat_get_active(struct ioat_softc *ioat)
+{
+
+ return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
+}
+
+static inline uint32_t
+ioat_get_ring_space(struct ioat_softc *ioat)
+{
+
+ return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
+}
+
+static struct ioat_descriptor *
+ioat_alloc_ring_entry(struct ioat_softc *ioat)
+{
+ struct ioat_dma_hw_descriptor *hw_desc;
+ struct ioat_descriptor *desc;
+
+ desc = malloc(sizeof(struct ioat_descriptor), M_IOAT, M_NOWAIT);
+ if (desc == NULL)
+ return (NULL);
+
+ bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc, BUS_DMA_ZERO,
+ &ioat->hw_desc_map);
+ if (hw_desc == NULL) {
+ free(desc, M_IOAT);
+ return (NULL);
+ }
+
+ bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
+ sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr, 0);
+
+ desc->u.dma = hw_desc;
+ return (desc);
+}
+
+static void
+ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc)
+{
+
+ if (desc == NULL)
+ return;
+
+ if (desc->u.dma)
+ bus_dmamem_free(ioat->hw_desc_tag, desc->u.dma,
+ ioat->hw_desc_map);
+ free(desc, M_IOAT);
+}
+
+static int
+ioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs)
+{
+ boolean_t retry;
+
+ while (1) {
+ if (ioat_get_ring_space(ioat) >= num_descs)
+ return (0);
+
+ mtx_lock(&ioat->cleanup_lock);
+ retry = resize_ring(ioat, ioat->ring_size_order + 1);
+ mtx_unlock(&ioat->cleanup_lock);
+
+ if (!retry)
+ return (ENOMEM);
+ }
+}
+
+static struct ioat_descriptor *
+ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
+{
+
+ return (ioat->ring[index % (1 << ioat->ring_size_order)]);
+}
+
+static boolean_t
+resize_ring(struct ioat_softc *ioat, int order)
+{
+ struct ioat_descriptor **ring;
+ struct ioat_descriptor *next;
+ struct ioat_dma_hw_descriptor *hw;
+ struct ioat_descriptor *ent;
+ uint32_t current_size, active, new_size, i, new_idx, current_idx;
+ uint32_t new_idx2;
+
+ current_size = 1 << ioat->ring_size_order;
+ active = (ioat->head - ioat->tail) & (current_size - 1);
+ new_size = 1 << order;
+
+ if (order > IOAT_MAX_ORDER)
+ return (FALSE);
+
+ /*
+ * when shrinking, verify that we can hold the current active
+ * set in the new ring
+ */
+ if (active >= new_size)
+ return (FALSE);
+
+ /* allocate the array to hold the software ring */
+ ring = malloc(new_size * sizeof(*ring), M_IOAT, M_ZERO | M_NOWAIT);
+ if (ring == NULL)
+ return (FALSE);
+
+ ioat_log_message(2, "ring resize: new: %d old: %d\n",
+ new_size, current_size);
+
+ /* allocate/trim descriptors as needed */
+ if (new_size > current_size) {
+ /* copy current descriptors to the new ring */
+ for (i = 0; i < current_size; i++) {
+ current_idx = (ioat->tail + i) & (current_size - 1);
+ new_idx = (ioat->tail + i) & (new_size - 1);
+
+ ring[new_idx] = ioat->ring[current_idx];
+ ring[new_idx]->id = new_idx;
+ }
+
+ /* add new descriptors to the ring */
+ for (i = current_size; i < new_size; i++) {
+ new_idx = (ioat->tail + i) & (new_size - 1);
+
+ ring[new_idx] = ioat_alloc_ring_entry(ioat);
+ if (!ring[new_idx]) {
+ while (i--) {
+ new_idx2 = (ioat->tail + i) &
+ (new_size - 1);
+
+ ioat_free_ring_entry(ioat,
+ ring[new_idx2]);
+ }
+ free(ring, M_IOAT);
+ return (FALSE);
+ }
+ ring[new_idx]->id = new_idx;
+ }
+
+ for (i = current_size - 1; i < new_size; i++) {
+ new_idx = (ioat->tail + i) & (new_size - 1);
+ next = ring[(new_idx + 1) & (new_size - 1)];
+ hw = ring[new_idx]->u.dma;
+
+ hw->next = next->hw_desc_bus_addr;
+ }
+ } else {
+ /*
+ * copy current descriptors to the new ring, dropping the
+ * removed descriptors
+ */
+ for (i = 0; i < new_size; i++) {
+ current_idx = (ioat->tail + i) & (current_size - 1);
+ new_idx = (ioat->tail + i) & (new_size - 1);
+
+ ring[new_idx] = ioat->ring[current_idx];
+ ring[new_idx]->id = new_idx;
+ }
+
+ /* free deleted descriptors */
+ for (i = new_size; i < current_size; i++) {
+ ent = ioat_get_ring_entry(ioat, ioat->tail + i);
+ ioat_free_ring_entry(ioat, ent);
+ }
+
+ /* fix up hardware ring */
+ hw = ring[(ioat->tail + new_size - 1) & (new_size - 1)]->u.dma;
+ next = ring[(ioat->tail + new_size) & (new_size - 1)];
+ hw->next = next->hw_desc_bus_addr;
+ }
+
+ free(ioat->ring, M_IOAT);
+ ioat->ring = ring;
+ ioat->ring_size_order = order;
+
+ return (TRUE);
+}
+
+static void
+ioat_timer_callback(void *arg)
+{
+ struct ioat_descriptor *desc;
+ struct ioat_softc *ioat;
+ uint64_t status;
+ uint32_t chanerr;
+
+ ioat = arg;
+ ioat_log_message(2, "%s\n", __func__);
+
+ if (ioat->is_completion_pending) {
+ status = ioat_get_chansts(ioat);
+
+ /*
+ * When halted due to errors, check for channel programming
+ * errors before advancing the completion state.
+ */
+ if (is_ioat_halted(status)) {
+ chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
+ ioat_log_message(0, "Channel halted (%x)\n", chanerr);
+
+ desc = ioat_get_ring_entry(ioat, ioat->tail + 0);
+ dump_descriptor(desc->u.raw);
+
+ desc = ioat_get_ring_entry(ioat, ioat->tail + 1);
+ dump_descriptor(desc->u.raw);
+ }
+ ioat_process_events(ioat);
+ } else {
+ mtx_lock(&ioat->submit_lock);
+ mtx_lock(&ioat->cleanup_lock);
+
+ if (ioat_get_active(ioat) == 0 &&
+ ioat->ring_size_order > IOAT_MIN_ORDER)
+ resize_ring(ioat, ioat->ring_size_order - 1);
+
+ mtx_unlock(&ioat->cleanup_lock);
+ mtx_unlock(&ioat->submit_lock);
+
+ if (ioat->ring_size_order > IOAT_MIN_ORDER)
+ callout_reset(&ioat->timer, 5 * hz,
+ ioat_timer_callback, ioat);
+ }
+}
+
+/*
+ * Support Functions
+ */
+static void
+ioat_submit_single(struct ioat_softc *ioat)
+{
+
+ atomic_add_rel_int(&ioat->head, 1);
+
+ if (!ioat->is_completion_pending) {
+ ioat->is_completion_pending = TRUE;
+ callout_reset(&ioat->timer, 10 * hz, ioat_timer_callback,
+ ioat);
+ }
+}
+
+static int
+ioat_reset_hw(struct ioat_softc *ioat)
+{
+ uint64_t status;
+ uint32_t chanerr;
+ int timeout;
+
+ status = ioat_get_chansts(ioat);
+ if (is_ioat_active(status) || is_ioat_idle(status))
+ ioat_suspend(ioat);
+
+ /* Wait at most 20 ms */
+ for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
+ timeout < 20; timeout++) {
+ DELAY(1000);
+ status = ioat_get_chansts(ioat);
+ }
+ if (timeout == 20)
+ return (ETIMEDOUT);
+
+ chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
+ ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
+
+ /*
+ * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
+ * that can cause stability issues for IOAT v3.
+ */
+ pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
+ 4);
+ chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
+ pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
+
+ ioat_reset(ioat);
+
+ /* Wait at most 20 ms */
+ for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
+ DELAY(1000);
+ if (timeout == 20)
+ return (ETIMEDOUT);
+
+ return (0);
+}
+
+static void
+dump_descriptor(void *hw_desc)
+{
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 8; j++)
+ printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
+ printf("\n");
+ }
+}
+
+static void
+ioat_setup_sysctl(device_t device)
+{
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+ struct ioat_softc *ioat;
+
+ ioat = DEVICE2SOFTC(device);
+ sysctl_ctx = device_get_sysctl_ctx(device);
+ sysctl_tree = device_get_sysctl_tree(device);
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "ring_size_order", CTLFLAG_RD, &ioat->ring_size_order,
+ 0, "HW descriptor ring size order");
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "head", CTLFLAG_RD, &ioat->head,
+ 0, "HW descriptor head pointer index");
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "tail", CTLFLAG_RD, &ioat->tail,
+ 0, "HW descriptor tail pointer index");
+}
+
+void
+ioat_log_message(int verbosity, char *fmt, ...)
+{
+ va_list argp;
+ char buffer[512];
+ struct timeval tv;
+
+ if (verbosity > g_ioat_debug_level)
+ return;
+
+ va_start(argp, fmt);
+ vsnprintf(buffer, sizeof(buffer) - 1, fmt, argp);
+ va_end(argp);
+ microuptime(&tv);
+
+ printf("[%d:%06d] ioat: %s", (int)tv.tv_sec, (int)tv.tv_usec, buffer);
+}
diff --git a/sys/dev/ioat/ioat.h b/sys/dev/ioat/ioat.h
new file mode 100644
index 000000000000..dce4142fa214
--- /dev/null
+++ b/sys/dev/ioat/ioat.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+__FBSDID("$FreeBSD$");
+
+#ifndef __IOAT_H__
+#define __IOAT_H__
+
+#include <sys/param.h>
+#include <machine/bus.h>
+
+/*
+ * This file defines the public interface to the IOAT driver.
+ */
+
+/*
+ * Enables an interrupt for this operation. Typically, you would only enable
+ * this on the last operation in a group
+ */
+#define DMA_INT_EN 0x1
+#define DMA_ALL_FLAGS (DMA_INT_EN)
+
+typedef void *bus_dmaengine_t;
+struct bus_dmadesc;
+typedef void (*bus_dmaengine_callback_t)(void *arg);
+
+/*
+ * Called first to acquire a reference to the DMA channel
+ */
+bus_dmaengine_t ioat_get_dmaengine(uint32_t channel_index);
+
+/*
+ * Acquire must be called before issuing an operation to perform. Release is
+ * called after. Multiple operations can be issued within the context of one
+ * acquire and release
+ */
+void ioat_acquire(bus_dmaengine_t dmaengine);
+void ioat_release(bus_dmaengine_t dmaengine);
+
+/* Issues the copy data operation */
+struct bus_dmadesc *ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
+ bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
+ void *callback_arg, uint32_t flags);
+
+/*
+ * Issues a null operation. This issues the operation to the hardware, but the
+ * hardware doesn't do anything with it.
+ */
+struct bus_dmadesc *ioat_null(bus_dmaengine_t dmaengine,
+ bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags);
+
+
+#endif /* __IOAT_H__ */
+
diff --git a/sys/dev/ioat/ioat_hw.h b/sys/dev/ioat/ioat_hw.h
new file mode 100644
index 000000000000..8e9952af184f
--- /dev/null
+++ b/sys/dev/ioat/ioat_hw.h
@@ -0,0 +1,104 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+__FBSDID("$FreeBSD$");
+
+#ifndef __IOAT_HW_H__
+#define __IOAT_HW_H__
+
+#define IOAT_MAX_CHANNELS 32
+
+#define IOAT_CHANCNT_OFFSET 0x00
+
+#define IOAT_XFERCAP_OFFSET 0x01
+
+#define IOAT_GENCTRL_OFFSET 0x02
+
+#define IOAT_INTRCTRL_OFFSET 0x03
+#define IOAT_INTRCTRL_MASTER_INT_EN 0x01
+
+#define IOAT_ATTNSTATUS_OFFSET 0x04
+
+#define IOAT_CBVER_OFFSET 0x08
+
+#define IOAT_VER_3_0 0x30
+#define IOAT_VER_3_3 0x33
+
+#define IOAT_INTRDELAY_OFFSET 0x0C
+
+#define IOAT_CS_STATUS_OFFSET 0x0E
+
+#define IOAT_DMACAPABILITY_OFFSET 0x10
+
+/* DMA Channel Registers */
+#define IOAT_CHANCTRL_OFFSET 0x80
+#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
+#define IOAT_CHANCTRL_COMPL_DCA_EN 0x0200
+#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
+#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
+#define IOAT_CHANCTRL_ERR_INT_EN 0x0010
+#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
+#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
+#define IOAT_CHANCTRL_INT_REARM 0x0001
+#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
+ IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
+
+#define IOAT_CHANCMD_OFFSET 0x84
+#define IOAT_CHANCMD_RESET 0x20
+#define IOAT_CHANCMD_SUSPEND 0x04
+
+#define IOAT_DMACOUNT_OFFSET 0x86
+
+#define IOAT_CHANSTS_OFFSET_LOW 0x88
+#define IOAT_CHANSTS_OFFSET_HIGH 0x8C
+#define IOAT_CHANSTS_OFFSET 0x88
+
+#define IOAT_CHANSTS_STATUS 0x7ULL
+#define IOAT_CHANSTS_ACTIVE 0x0
+#define IOAT_CHANSTS_IDLE 0x1
+#define IOAT_CHANSTS_SUSPENDED 0x2
+#define IOAT_CHANSTS_HALTED 0x3
+
+#define IOAT_CHANSTS_UNAFFILIATED_ERROR 0x8ULL
+#define IOAT_CHANSTS_SOFT_ERROR 0x10ULL
+
+#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK (~0x3FULL)
+
+#define IOAT_CHAINADDR_OFFSET_LOW 0x90
+#define IOAT_CHAINADDR_OFFSET_HIGH 0x94
+
+#define IOAT_CHANCMP_OFFSET_LOW 0x98
+#define IOAT_CHANCMP_OFFSET_HIGH 0x9C
+
+#define IOAT_CHANERR_OFFSET 0xA8
+
+#define IOAT_CFG_CHANERR_INT_OFFSET 0x180
+#define IOAT_CFG_CHANERRMASK_INT_OFFSET 0x184
+
+#define IOAT_MIN_ORDER 4
+#define IOAT_MAX_ORDER 16
+
+#endif /* __IOAT_HW_H__ */
diff --git a/sys/dev/ioat/ioat_internal.h b/sys/dev/ioat/ioat_internal.h
new file mode 100644
index 000000000000..4f398c6747ae
--- /dev/null
+++ b/sys/dev/ioat/ioat_internal.h
@@ -0,0 +1,447 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+__FBSDID("$FreeBSD$");
+
+#ifndef __IOAT_INTERNAL_H__
+#define __IOAT_INTERNAL_H__
+
+#define DEVICE2SOFTC(dev) ((struct ioat_softc *) device_get_softc(dev))
+
+#define ioat_read_chancnt(ioat) \
+ ioat_read_1((ioat), IOAT_CHANCNT_OFFSET)
+
+#define ioat_read_xfercap(ioat) \
+ ioat_read_1((ioat), IOAT_XFERCAP_OFFSET)
+
+#define ioat_write_intrctrl(ioat, value) \
+ ioat_write_1((ioat), IOAT_INTRCTRL_OFFSET, (value))
+
+#define ioat_read_cbver(ioat) \
+ (ioat_read_1((ioat), IOAT_CBVER_OFFSET) & 0xFF)
+
+#define ioat_read_dmacapability(ioat) \
+ ioat_read_4((ioat), IOAT_DMACAPABILITY_OFFSET)
+
+#define ioat_write_chanctrl(ioat, value) \
+ ioat_write_2((ioat), IOAT_CHANCTRL_OFFSET, (value))
+
+static __inline uint64_t
+ioat_bus_space_read_8_lower_first(bus_space_tag_t tag,
+ bus_space_handle_t handle, bus_size_t offset)
+{
+ return (bus_space_read_4(tag, handle, offset) |
+ ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
+}
+
+static __inline void
+ioat_bus_space_write_8_lower_first(bus_space_tag_t tag,
+ bus_space_handle_t handle, bus_size_t offset, uint64_t val)
+{
+ bus_space_write_4(tag, handle, offset, val);
+ bus_space_write_4(tag, handle, offset + 4, val >> 32);
+}
+
+#ifdef i386
+#define ioat_bus_space_read_8 ioat_bus_space_read_8_lower_first
+#define ioat_bus_space_write_8 ioat_bus_space_write_8_lower_first
+#else
+#define ioat_bus_space_read_8(tag, handle, offset) \
+ bus_space_read_8((tag), (handle), (offset))
+#define ioat_bus_space_write_8(tag, handle, offset, val) \
+ bus_space_write_8((tag), (handle), (offset), (val))
+#endif
+
+#define ioat_read_1(ioat, offset) \
+ bus_space_read_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset))
+
+#define ioat_read_2(ioat, offset) \
+ bus_space_read_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset))
+
+#define ioat_read_4(ioat, offset) \
+ bus_space_read_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset))
+
+#define ioat_read_8(ioat, offset) \
+ ioat_bus_space_read_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset))
+
+#define ioat_read_double_4(ioat, offset) \
+ ioat_bus_space_read_8_lower_first((ioat)->pci_bus_tag, \
+ (ioat)->pci_bus_handle, (offset))
+
+#define ioat_write_1(ioat, offset, value) \
+ bus_space_write_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset), (value))
+
+#define ioat_write_2(ioat, offset, value) \
+ bus_space_write_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset), (value))
+
+#define ioat_write_4(ioat, offset, value) \
+ bus_space_write_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset), (value))
+
+#define ioat_write_8(ioat, offset, value) \
+ ioat_bus_space_write_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
+ (offset), (value))
+
+#define ioat_write_double_4(ioat, offset, value) \
+ ioat_bus_space_write_8_lower_first((ioat)->pci_bus_tag, \
+ (ioat)->pci_bus_handle, (offset), (value))
+
+MALLOC_DECLARE(M_IOAT);
+
+SYSCTL_DECL(_hw_ioat);
+
+void ioat_log_message(int verbosity, char *fmt, ...);
+
+struct ioat_dma_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t null:1;
+ uint32_t src_page_break:1;
+ uint32_t dest_page_break:1;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t reserved:13;
+ #define IOAT_OP_COPY 0x00
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t reserved;
+ uint64_t reserved2;
+ uint64_t user1;
+ uint64_t user2;
+};
+
+struct ioat_fill_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t reserved:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t reserved2:2;
+ uint32_t dest_page_break:1;
+ uint32_t bundle:1;
+ uint32_t reserved3:15;
+ #define IOAT_OP_FILL 0x01
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_data;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t reserved;
+ uint64_t next_dest_addr;
+ uint64_t user1;
+ uint64_t user2;
+};
+
+struct ioat_xor_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t src_count:3;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t reserved:13;
+ #define IOAT_OP_XOR 0x87
+ #define IOAT_OP_XOR_VAL 0x88
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t dest_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+};
+
+struct ioat_xor_ext_hw_descriptor {
+ uint64_t src_addr6;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t next;
+ uint64_t reserved[4];
+};
+
+struct ioat_pq_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t src_count:3;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t p_disable:1;
+ uint32_t q_disable:1;
+ uint32_t reserved:11;
+ #define IOAT_OP_PQ 0x89
+ #define IOAT_OP_PQ_VAL 0x8a
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint8_t coef[8];
+ uint64_t q_addr;
+};
+
+struct ioat_pq_ext_hw_descriptor {
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+ uint64_t src_addr6;
+ uint64_t next;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t reserved[2];
+};
+
+struct ioat_pq_update_hw_descriptor {
+ uint32_t size;
+ union {
+ uint32_t control_raw;
+ struct {
+ uint32_t int_enable:1;
+ uint32_t src_snoop_disable:1;
+ uint32_t dest_snoop_disable:1;
+ uint32_t completion_update:1;
+ uint32_t fence:1;
+ uint32_t src_cnt:3;
+ uint32_t bundle:1;
+ uint32_t dest_dca:1;
+ uint32_t hint:1;
+ uint32_t p_disable:1;
+ uint32_t q_disable:1;
+ uint32_t reserved:3;
+ uint32_t coef:8;
+ #define IOAT_OP_PQ_UP 0x8b
+ uint32_t op:8;
+ } control;
+ } u;
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t p_src;
+ uint64_t q_src;
+ uint64_t q_addr;
+};
+
+struct ioat_raw_hw_descriptor {
+ uint64_t field[8];
+};
+
+struct bus_dmadesc {
+ bus_dmaengine_callback_t callback_fn;
+ void *callback_arg;
+};
+
+struct ioat_descriptor {
+ struct bus_dmadesc bus_dmadesc;
+ union {
+ struct ioat_dma_hw_descriptor *dma;
+ struct ioat_fill_hw_descriptor *fill;
+ struct ioat_xor_hw_descriptor *xor;
+ struct ioat_xor_ext_hw_descriptor *xor_ext;
+ struct ioat_pq_hw_descriptor *pq;
+ struct ioat_pq_ext_hw_descriptor *pq_ext;
+ struct ioat_raw_hw_descriptor *raw;
+ } u;
+ uint32_t id;
+ uint32_t length;
+ enum validate_flags *validate_result;
+ bus_addr_t hw_desc_bus_addr;
+};
+
+/* One of these per allocated PCI device. */
+struct ioat_softc {
+ bus_dmaengine_t dmaengine;
+#define to_ioat_softc(_dmaeng) \
+({ \
+ bus_dmaengine_t *_p = (_dmaeng); \
+ (struct ioat_softc *)((char *)_p - \
+ offsetof(struct ioat_softc, dmaengine)); \
+})
+
+ int version;
+
+ struct mtx submit_lock;
+ int num_interrupts;
+ device_t device;
+ bus_space_tag_t pci_bus_tag;
+ bus_space_handle_t pci_bus_handle;
+ int pci_resource_id;
+ struct resource *pci_resource;
+ uint32_t max_xfer_size;
+
+ struct resource *res;
+ int rid;
+ void *tag;
+
+ bus_dma_tag_t hw_desc_tag;
+ bus_dmamap_t hw_desc_map;
+
+ bus_dma_tag_t comp_update_tag;
+ bus_dmamap_t comp_update_map;
+ uint64_t *comp_update;
+ bus_addr_t comp_update_bus_addr;
+
+ struct callout timer;
+
+ boolean_t is_resize_pending;
+ boolean_t is_completion_pending;
+ boolean_t is_reset_pending;
+ boolean_t is_channel_running;
+ boolean_t is_waiting_for_ack;
+
+ uint32_t xfercap_log;
+ uint32_t head;
+ uint32_t tail;
+ uint16_t reserved;
+ uint32_t ring_size_order;
+ bus_addr_t last_seen;
+
+ struct ioat_descriptor **ring;
+
+ struct mtx cleanup_lock;
+};
+
+static inline uint64_t
+ioat_get_chansts(struct ioat_softc *ioat)
+{
+ uint64_t status;
+
+ if (ioat->version >= IOAT_VER_3_3)
+ status = ioat_read_8(ioat, IOAT_CHANSTS_OFFSET);
+ else
+ /* Must read lower 4 bytes before upper 4 bytes. */
+ status = ioat_read_double_4(ioat, IOAT_CHANSTS_OFFSET);
+ return (status);
+}
+
+static inline void
+ioat_write_chancmp(struct ioat_softc *ioat, uint64_t addr)
+{
+
+ if (ioat->version >= IOAT_VER_3_3)
+ ioat_write_8(ioat, IOAT_CHANCMP_OFFSET_LOW, addr);
+ else
+ ioat_write_double_4(ioat, IOAT_CHANCMP_OFFSET_LOW, addr);
+}
+
+static inline void
+ioat_write_chainaddr(struct ioat_softc *ioat, uint64_t addr)
+{
+
+ if (ioat->version >= IOAT_VER_3_3)
+ ioat_write_8(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr);
+ else
+ ioat_write_double_4(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr);
+}
+
+static inline boolean_t
+is_ioat_active(uint64_t status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
+}
+
+static inline boolean_t
+is_ioat_idle(uint64_t status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_IDLE);
+}
+
+static inline boolean_t
+is_ioat_halted(uint64_t status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
+}
+
+static inline boolean_t
+is_ioat_suspended(uint64_t status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
+}
+
+static inline void
+ioat_suspend(struct ioat_softc *ioat)
+{
+ ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_SUSPEND);
+}
+
+static inline void
+ioat_reset(struct ioat_softc *ioat)
+{
+ ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET);
+}
+
+static inline boolean_t
+ioat_reset_pending(struct ioat_softc *ioat)
+{
+ uint8_t cmd;
+
+ cmd = ioat_read_1(ioat, IOAT_CHANCMD_OFFSET);
+ return ((cmd & IOAT_CHANCMD_RESET) != 0);
+}
+
+#endif /* __IOAT_INTERNAL_H__ */
diff --git a/sys/dev/ioat/ioat_test.c b/sys/dev/ioat/ioat_test.c
new file mode 100644
index 000000000000..8352e7428a54
--- /dev/null
+++ b/sys/dev/ioat/ioat_test.c
@@ -0,0 +1,256 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "ioat.h"
+#include "ioat_hw.h"
+#include "ioat_internal.h"
+#include "ioat_test.h"
+
+MALLOC_DEFINE(M_IOAT_TEST, "ioat_test", "ioat test allocations");
+
+#define IOAT_TEST_SIZE 0x40000
+#define IOAT_MAX_BUFS 8
+
+struct test_transaction {
+ uint8_t num_buffers;
+ void *buf[IOAT_MAX_BUFS];
+ uint32_t length;
+ struct ioat_test *test;
+};
+
+static int g_thread_index = 1;
+static struct cdev *g_ioat_cdev = NULL;
+
+static void
+ioat_test_transaction_destroy(struct test_transaction *tx)
+{
+ int i;
+
+ for (i = 0; i < IOAT_MAX_BUFS; i++) {
+ if (tx->buf[i] != NULL) {
+ contigfree(tx->buf[i], IOAT_TEST_SIZE, M_IOAT_TEST);
+ tx->buf[i] = NULL;
+ }
+ }
+
+ free(tx, M_IOAT_TEST);
+}
+
+static struct
+test_transaction *ioat_test_transaction_create(uint8_t num_buffers,
+ uint32_t buffer_size)
+{
+ struct test_transaction *tx;
+ int i;
+
+ tx = malloc(sizeof(struct test_transaction), M_IOAT_TEST, M_NOWAIT | M_ZERO);
+ if (tx == NULL)
+ return (NULL);
+
+ tx->num_buffers = num_buffers;
+ tx->length = buffer_size;
+
+ for (i = 0; i < num_buffers; i++) {
+ tx->buf[i] = contigmalloc(buffer_size, M_IOAT_TEST, M_NOWAIT,
+ 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
+
+ if (tx->buf[i] == NULL) {
+ ioat_test_transaction_destroy(tx);
+ return (NULL);
+ }
+ }
+ return (tx);
+}
+
+static void
+ioat_dma_test_callback(void *arg)
+{
+ struct test_transaction *tx;
+ struct ioat_test *test;
+
+ tx = arg;
+ test = tx->test;
+
+ if (memcmp(tx->buf[0], tx->buf[1], tx->length) != 0) {
+ ioat_log_message(0, "miscompare found\n");
+ test->status = IOAT_TEST_MISCOMPARE;
+ }
+ atomic_add_32(&test->num_completions, 1);
+ ioat_test_transaction_destroy(tx);
+ if (test->num_completions == test->num_loops)
+ wakeup(test);
+}
+
+static void
+ioat_dma_test(void *arg)
+{
+ struct test_transaction *tx;
+ struct ioat_test *test;
+ bus_dmaengine_t dmaengine;
+ uint32_t loops;
+ int index, i;
+
+ test = arg;
+ loops = test->num_loops;
+
+ test->status = IOAT_TEST_OK;
+ test->num_completions = 0;
+
+ index = g_thread_index++;
+ dmaengine = ioat_get_dmaengine(test->channel_index);
+
+ if (dmaengine == NULL) {
+ ioat_log_message(0, "Couldn't acquire dmaengine\n");
+ test->status = IOAT_TEST_NO_DMA_ENGINE;
+ return;
+ }
+
+ ioat_log_message(0, "Thread %d: num_loops remaining: 0x%07x\n", index,
+ test->num_loops);
+
+ for (loops = 0; loops < test->num_loops; loops++) {
+ bus_addr_t src, dest;
+
+ if (loops % 0x10000 == 0) {
+ ioat_log_message(0, "Thread %d: "
+ "num_loops remaining: 0x%07x\n", index,
+ test->num_loops - loops);
+ }
+
+ tx = ioat_test_transaction_create(2, IOAT_TEST_SIZE);
+ if (tx == NULL) {
+ ioat_log_message(0, "tx == NULL - memory exhausted\n");
+ atomic_add_32(&test->num_completions, 1);
+ test->status = IOAT_TEST_NO_MEMORY;
+ continue;
+ }
+
+ tx->test = test;
+ wmb();
+
+ /* fill in source buffer */
+ for (i = 0; i < (IOAT_TEST_SIZE / sizeof(uint32_t)); i++) {
+ uint32_t val = i + (loops << 16) + (index << 28);
+ ((uint32_t *)tx->buf[0])[i] = ~val;
+ ((uint32_t *)tx->buf[1])[i] = val;
+ }
+
+ src = pmap_kextract((vm_offset_t)tx->buf[0]);
+ dest = pmap_kextract((vm_offset_t)tx->buf[1]);
+
+ ioat_acquire(dmaengine);
+ ioat_copy(dmaengine, src, dest, IOAT_TEST_SIZE,
+ ioat_dma_test_callback, tx, DMA_INT_EN);
+ ioat_release(dmaengine);
+ }
+
+ while (test->num_completions < test->num_loops)
+ tsleep(test, 0, "compl", 5 * hz);
+
+}
+
+static int
+ioat_test_open(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+
+ return (0);
+}
+
+static int
+ioat_test_close(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+
+ return (0);
+}
+
+static int
+ioat_test_ioctl(struct cdev *dev, unsigned long cmd, caddr_t arg, int flag,
+ struct thread *td)
+{
+
+ switch (cmd) {
+ case IOAT_DMATEST:
+ ioat_dma_test(arg);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static struct cdevsw ioat_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = 0,
+ .d_open = ioat_test_open,
+ .d_close = ioat_test_close,
+ .d_ioctl = ioat_test_ioctl,
+ .d_name = "ioat_test",
+};
+
+static int
+sysctl_enable_ioat_test(SYSCTL_HANDLER_ARGS)
+{
+ int error, enabled;
+
+ enabled = (g_ioat_cdev != NULL);
+ error = sysctl_handle_int(oidp, &enabled, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ if (enabled != 0 && g_ioat_cdev == NULL) {
+ g_ioat_cdev = make_dev(&ioat_cdevsw, 0, UID_ROOT, GID_WHEEL,
+ 0600, "ioat_test");
+ } else if (enabled == 0 && g_ioat_cdev != NULL) {
+ destroy_dev(g_ioat_cdev);
+ g_ioat_cdev = NULL;
+ }
+ return (0);
+}
+SYSCTL_PROC(_hw_ioat, OID_AUTO, enable_ioat_test, CTLTYPE_INT | CTLFLAG_RW,
+ 0, 0, sysctl_enable_ioat_test, "I",
+ "Non-zero: Enable the /dev/ioat_test device");
diff --git a/sys/dev/ioat/ioat_test.h b/sys/dev/ioat/ioat_test.h
new file mode 100644
index 000000000000..636a97132d54
--- /dev/null
+++ b/sys/dev/ioat/ioat_test.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+__FBSDID("$FreeBSD$");
+
+#ifndef __IOAT_TEST_H__
+#define __IOAT_TEST_H__
+
+struct ioat_test {
+ uint32_t channel_index;
+ uint32_t num_loops;
+ volatile uint32_t num_completions;
+ uint32_t status;
+};
+
+#define IOAT_TEST_OK 0
+#define IOAT_TEST_NO_DMA_ENGINE 1
+#define IOAT_TEST_NO_MEMORY 2
+#define IOAT_TEST_MISCOMPARE 3
+
+#define IOAT_DMATEST _IOWR('i', 0, struct ioat_test)
+
+#endif /* __IOAT_TEST_H__ */
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 21009a9a4ec5..7fa73dd557b3 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -158,6 +158,7 @@ SUBDIR= \
${_iir} \
imgact_binmisc \
${_io} \
+ ${_ioat} \
${_ipoib} \
${_ipdivert} \
${_ipfilter} \
@@ -630,6 +631,7 @@ _x86bios= x86bios
.if ${MACHINE_CPUARCH} == "amd64"
_cloudabi64= cloudabi64
+_ioat= ioat
_ixl= ixl
_ixlv= ixlv
_linux64= linux64
diff --git a/sys/modules/ioat/Makefile b/sys/modules/ioat/Makefile
new file mode 100644
index 000000000000..5a2c4177437d
--- /dev/null
+++ b/sys/modules/ioat/Makefile
@@ -0,0 +1,15 @@
+# ioat Loadable Kernel Module
+#
+# $FreeBSD$
+
+IOAT_SRC_PATH = ${.CURDIR}/../..
+
+.PATH: ${IOAT_SRC_PATH}/dev/ioat
+
+KMOD= ioat
+SRCS= ioat.c ioat_test.c
+SRCS+= device_if.h bus_if.h pci_if.h
+
+CFLAGS+= -I${IOAT_SRC_PATH}
+
+.include <bsd.kmod.mk>