aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2020-11-05 15:55:23 +0000
committerMark Johnston <markj@FreeBSD.org>2020-11-05 15:55:23 +0000
commit72143e89bb430c3b1406b399b810806904f6c882 (patch)
tree53bdf1de310fd9fbfa5e1bdf4a88997f1cce732c
parent2dee296a3d67da017ab81cab010a55b098a8daa1 (diff)
downloadsrc-72143e89bb43.tar.gz
src-72143e89bb43.zip
Add qat(4)
This provides an OpenCrypto driver for Intel QuickAssist devices. The driver was initially ported from NetBSD and comes with a few improvements: - support for GMAC/AES-GCM, AES-CTR and AES-XTS, and support for SHA/HMAC-authenticated encryption - support for detaching the driver - various bug fixes - DH895X support Discussed with: jhb MFC after: 3 days Sponsored by: Rubicon Communications, LLC (Netgate) Differential Revision: https://reviews.freebsd.org/D26963
Notes
Notes: svn path=/head/; revision=367386
-rw-r--r--share/man/man4/Makefile2
-rw-r--r--share/man/man4/qat.499
-rw-r--r--sys/amd64/conf/NOTES4
-rw-r--r--sys/conf/files.x869
-rw-r--r--sys/dev/qat/qat.c2140
-rw-r--r--sys/dev/qat/qat_ae.c3456
-rw-r--r--sys/dev/qat/qat_aevar.h73
-rw-r--r--sys/dev/qat/qat_c2xxx.c217
-rw-r--r--sys/dev/qat/qat_c2xxxreg.h177
-rw-r--r--sys/dev/qat/qat_c3xxx.c298
-rw-r--r--sys/dev/qat/qat_c3xxxreg.h178
-rw-r--r--sys/dev/qat/qat_c62x.c314
-rw-r--r--sys/dev/qat/qat_c62xreg.h201
-rw-r--r--sys/dev/qat/qat_d15xx.c314
-rw-r--r--sys/dev/qat/qat_d15xxreg.h201
-rw-r--r--sys/dev/qat/qat_dh895xcc.c271
-rw-r--r--sys/dev/qat/qat_dh895xccreg.h119
-rw-r--r--sys/dev/qat/qat_hw15.c953
-rw-r--r--sys/dev/qat/qat_hw15reg.h635
-rw-r--r--sys/dev/qat/qat_hw15var.h105
-rw-r--r--sys/dev/qat/qat_hw17.c662
-rw-r--r--sys/dev/qat/qat_hw17reg.h2460
-rw-r--r--sys/dev/qat/qat_hw17var.h80
-rw-r--r--sys/dev/qat/qatreg.h1582
-rw-r--r--sys/dev/qat/qatvar.h1073
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/qat/Makefile19
27 files changed, 15644 insertions, 0 deletions
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index efc4d64c62ca..c4647c7d4b56 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -431,6 +431,7 @@ MAN= aac.4 \
pty.4 \
puc.4 \
pwmc.4 \
+ ${_qat.4} \
${_qlxge.4} \
${_qlxgb.4} \
${_qlxgbe.4} \
@@ -823,6 +824,7 @@ _nvram.4= nvram.4
_ossl.4= ossl.4
_padlock.4= padlock.4
_pchtherm.4= pchtherm.4
+_qat.4= qat.4
_rr232x.4= rr232x.4
_speaker.4= speaker.4
_spkr.4= spkr.4
diff --git a/share/man/man4/qat.4 b/share/man/man4/qat.4
new file mode 100644
index 000000000000..9e9491f22aea
--- /dev/null
+++ b/share/man/man4/qat.4
@@ -0,0 +1,99 @@
+.\"-
+.\" Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd November 5, 2020
+.Dt QAT 4
+.Os
+.Sh NAME
+.Nm qat
+.Nd Intel QuickAssist Technology (QAT) driver
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following lines in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device crypto"
+.Cd "device cryptodev"
+.Cd "device qat"
+.Ed
+.Pp
+Alternatively, to load the driver as a
+module at boot time, place the following lines in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+qat_load="YES"
+qat_c2xxxfw_load="YES"
+qat_c3xxxfw_load="YES"
+qat_c63xfw_load="YES"
+qat_d15xxfw_load="YES"
+qat_dh895xcc_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver implements
+.Xr crypto 4
+support for some of the cryptographic acceleration functions of the Intel
+QuickAssist device.
+The
+.Nm
+driver supports the QAT devices integrated with Atom C2000 and C3000 and Xeon
+C620 and D-1500 chipsets, and the Intel QAT Adapter 8950.
+It can accelerate AES in CBC, CTR, XTS (except for the C2000) and GCM modes,
+and can perform authenticated encryption combining the CBC, CTR and XTS modes
+with SHA1-HMAC and SHA2-HMAC.
+The
+.Nm
+driver can also compute SHA1 and SHA2 digests.
+.Sh SEE ALSO
+.Xr crypto 4 ,
+.Xr ipsec 4 ,
+.Xr pci 4 ,
+.Xr random 4 ,
+.Xr crypto 7 ,
+.Xr crypto 9
+.Sh HISTORY
+The
+.Nm
+driver first appeared in
+.Fx 13.0 .
+.Sh AUTHORS
+The
+.Nm
+driver was written for
+.Nx
+by
+.An Hikaru Abe Aq Mt hikaru@iij.ad.jp
+and ported to
+.Fx
+by
+.An Mark Johnston Aq Mt markj@FreeBSD.org .
+.Sh BUGS
+Some Atom C2000 QAT devices have two acceleration engines instead of one.
+The
+.Nm
+driver currently misbehaves when both are enabled and thus does not enable
+the second acceleration engine if one is present.
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index 4bee0f804970..4ee84a591fc8 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -471,6 +471,10 @@ device vmd_bus # bus for VMD children
device pmspcv
#
+# Intel QuickAssist
+device qat
+
+#
# SafeNet crypto driver: can be moved to the MI NOTES as soon as
# it's tested on a big-endian machine
#
diff --git a/sys/conf/files.x86 b/sys/conf/files.x86
index 1ac490b46517..968807bd8950 100644
--- a/sys/conf/files.x86
+++ b/sys/conf/files.x86
@@ -291,6 +291,15 @@ dev/isci/scil/scif_sas_task_request_state_handlers.c optional isci
dev/isci/scil/scif_sas_task_request_states.c optional isci
dev/isci/scil/scif_sas_timer.c optional isci
dev/itwd/itwd.c optional itwd
+dev/qat/qat.c optional qat
+dev/qat/qat_ae.c optional qat
+dev/qat/qat_c2xxx.c optional qat
+dev/qat/qat_c3xxx.c optional qat
+dev/qat/qat_c62x.c optional qat
+dev/qat/qat_d15xx.c optional qat
+dev/qat/qat_dh895xcc.c optional qat
+dev/qat/qat_hw15.c optional qat
+dev/qat/qat_hw17.c optional qat
libkern/x86/crc32_sse42.c standard
#
# x86 shared code between IA32 and AMD64 architectures
diff --git a/sys/dev/qat/qat.c b/sys/dev/qat/qat.c
new file mode 100644
index 000000000000..de07f8db5642
--- /dev/null
+++ b/sys/dev/qat/qat.c
@@ -0,0 +1,2140 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $");
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/firmware.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/md5.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+
+#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
+
+#include "cryptodev_if.h"
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qatvar.h"
+#include "qat_aevar.h"
+
+extern struct qat_hw qat_hw_c2xxx;
+extern struct qat_hw qat_hw_c3xxx;
+extern struct qat_hw qat_hw_c62x;
+extern struct qat_hw qat_hw_d15xx;
+extern struct qat_hw qat_hw_dh895xcc;
+
+#define PCI_VENDOR_INTEL 0x8086
+#define PCI_PRODUCT_INTEL_C2000_IQIA_PHYS 0x1f18
+#define PCI_PRODUCT_INTEL_C3K_QAT 0x19e2
+#define PCI_PRODUCT_INTEL_C3K_QAT_VF 0x19e3
+#define PCI_PRODUCT_INTEL_C620_QAT 0x37c8
+#define PCI_PRODUCT_INTEL_C620_QAT_VF 0x37c9
+#define PCI_PRODUCT_INTEL_XEOND_QAT 0x6f54
+#define PCI_PRODUCT_INTEL_XEOND_QAT_VF 0x6f55
+#define PCI_PRODUCT_INTEL_DH895XCC_QAT 0x0435
+#define PCI_PRODUCT_INTEL_DH895XCC_QAT_VF 0x0443
+
+static const struct qat_product {
+ uint16_t qatp_vendor;
+ uint16_t qatp_product;
+ const char *qatp_name;
+ enum qat_chip_type qatp_chip;
+ const struct qat_hw *qatp_hw;
+} qat_products[] = {
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
+ "Intel C2000 QuickAssist PF",
+ QAT_CHIP_C2XXX, &qat_hw_c2xxx },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT,
+ "Intel C3000 QuickAssist PF",
+ QAT_CHIP_C3XXX, &qat_hw_c3xxx },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT,
+ "Intel C620/Xeon D-2100 QuickAssist PF",
+ QAT_CHIP_C62X, &qat_hw_c62x },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT,
+ "Intel Xeon D-1500 QuickAssist PF",
+ QAT_CHIP_D15XX, &qat_hw_d15xx },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH895XCC_QAT,
+ "Intel 8950 QuickAssist PCIe Adapter PF",
+ QAT_CHIP_DH895XCC, &qat_hw_dh895xcc },
+ { 0, 0, NULL, 0, NULL },
+};
+
+/* Hash Algorithm specific structure */
+
+/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
+ 0x67, 0x45, 0x23, 0x01,
+ 0xef, 0xcd, 0xab, 0x89,
+ 0x98, 0xba, 0xdc, 0xfe,
+ 0x10, 0x32, 0x54, 0x76,
+ 0xc3, 0xd2, 0xe1, 0xf0
+};
+
+/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
+ 0x6a, 0x09, 0xe6, 0x67,
+ 0xbb, 0x67, 0xae, 0x85,
+ 0x3c, 0x6e, 0xf3, 0x72,
+ 0xa5, 0x4f, 0xf5, 0x3a,
+ 0x51, 0x0e, 0x52, 0x7f,
+ 0x9b, 0x05, 0x68, 0x8c,
+ 0x1f, 0x83, 0xd9, 0xab,
+ 0x5b, 0xe0, 0xcd, 0x19
+};
+
+/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
+ 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
+ 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
+ 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+ 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
+ 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
+ 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
+ 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
+ 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
+};
+
+/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
+static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
+ 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
+ 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
+ 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
+ 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
+ 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
+ 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+ 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
+ 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
+};
+
+static const struct qat_sym_hash_alg_info sha1_info = {
+ .qshai_digest_len = QAT_HASH_SHA1_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_SHA1_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_SHA1_STATE_SIZE,
+ .qshai_init_state = sha1_initial_state,
+ .qshai_sah = &auth_hash_hmac_sha1,
+ .qshai_state_offset = 0,
+ .qshai_state_word = 4,
+};
+
+static const struct qat_sym_hash_alg_info sha256_info = {
+ .qshai_digest_len = QAT_HASH_SHA256_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_SHA256_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_SHA256_STATE_SIZE,
+ .qshai_init_state = sha256_initial_state,
+ .qshai_sah = &auth_hash_hmac_sha2_256,
+ .qshai_state_offset = offsetof(SHA256_CTX, state),
+ .qshai_state_word = 4,
+};
+
+static const struct qat_sym_hash_alg_info sha384_info = {
+ .qshai_digest_len = QAT_HASH_SHA384_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_SHA384_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_SHA384_STATE_SIZE,
+ .qshai_init_state = sha384_initial_state,
+ .qshai_sah = &auth_hash_hmac_sha2_384,
+ .qshai_state_offset = offsetof(SHA384_CTX, state),
+ .qshai_state_word = 8,
+};
+
+static const struct qat_sym_hash_alg_info sha512_info = {
+ .qshai_digest_len = QAT_HASH_SHA512_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_SHA512_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_SHA512_STATE_SIZE,
+ .qshai_init_state = sha512_initial_state,
+ .qshai_sah = &auth_hash_hmac_sha2_512,
+ .qshai_state_offset = offsetof(SHA512_CTX, state),
+ .qshai_state_word = 8,
+};
+
+static const struct qat_sym_hash_alg_info aes_gcm_info = {
+ .qshai_digest_len = QAT_HASH_AES_GCM_DIGEST_SIZE,
+ .qshai_block_len = QAT_HASH_AES_GCM_BLOCK_SIZE,
+ .qshai_state_size = QAT_HASH_AES_GCM_STATE_SIZE,
+ .qshai_sah = &auth_hash_nist_gmac_aes_128,
+};
+
+/* Hash QAT specific structures */
+
+static const struct qat_sym_hash_qat_info sha1_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_SHA1,
+ .qshqi_auth_counter = QAT_HASH_SHA1_BLOCK_SIZE,
+ .qshqi_state1_len = HW_SHA1_STATE1_SZ,
+ .qshqi_state2_len = HW_SHA1_STATE2_SZ,
+};
+
+static const struct qat_sym_hash_qat_info sha256_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_SHA256,
+ .qshqi_auth_counter = QAT_HASH_SHA256_BLOCK_SIZE,
+ .qshqi_state1_len = HW_SHA256_STATE1_SZ,
+ .qshqi_state2_len = HW_SHA256_STATE2_SZ
+};
+
+static const struct qat_sym_hash_qat_info sha384_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_SHA384,
+ .qshqi_auth_counter = QAT_HASH_SHA384_BLOCK_SIZE,
+ .qshqi_state1_len = HW_SHA384_STATE1_SZ,
+ .qshqi_state2_len = HW_SHA384_STATE2_SZ
+};
+
+static const struct qat_sym_hash_qat_info sha512_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_SHA512,
+ .qshqi_auth_counter = QAT_HASH_SHA512_BLOCK_SIZE,
+ .qshqi_state1_len = HW_SHA512_STATE1_SZ,
+ .qshqi_state2_len = HW_SHA512_STATE2_SZ
+};
+
+static const struct qat_sym_hash_qat_info aes_gcm_config = {
+ .qshqi_algo_enc = HW_AUTH_ALGO_GALOIS_128,
+ .qshqi_auth_counter = QAT_HASH_AES_GCM_BLOCK_SIZE,
+ .qshqi_state1_len = HW_GALOIS_128_STATE1_SZ,
+ .qshqi_state2_len =
+ HW_GALOIS_H_SZ + HW_GALOIS_LEN_A_SZ + HW_GALOIS_E_CTR0_SZ,
+};
+
+static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
+ [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
+ [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
+ [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
+ [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
+ [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
+};
+
+static const struct qat_product *qat_lookup(device_t);
+static int qat_probe(device_t);
+static int qat_attach(device_t);
+static int qat_init(struct device *);
+static int qat_start(struct device *);
+static int qat_detach(device_t);
+
+static int qat_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp);
+static void qat_freesession(device_t dev, crypto_session_t cses);
+
+static int qat_setup_msix_intr(struct qat_softc *);
+
+static void qat_etr_init(struct qat_softc *);
+static void qat_etr_deinit(struct qat_softc *);
+static void qat_etr_bank_init(struct qat_softc *, int);
+static void qat_etr_bank_deinit(struct qat_softc *sc, int);
+
+static void qat_etr_ap_bank_init(struct qat_softc *);
+static void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
+static void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
+ uint32_t, int);
+static void qat_etr_ap_bank_setup_ring(struct qat_softc *,
+ struct qat_ring *);
+static int qat_etr_verify_ring_size(uint32_t, uint32_t);
+
+static int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
+ struct qat_ring *);
+static void qat_etr_bank_intr(void *);
+
+static void qat_arb_update(struct qat_softc *, struct qat_bank *);
+
+static struct qat_sym_cookie *qat_crypto_alloc_sym_cookie(
+ struct qat_crypto_bank *);
+static void qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
+ struct qat_sym_cookie *);
+static int qat_crypto_setup_ring(struct qat_softc *,
+ struct qat_crypto_bank *);
+static int qat_crypto_bank_init(struct qat_softc *,
+ struct qat_crypto_bank *);
+static int qat_crypto_init(struct qat_softc *);
+static void qat_crypto_deinit(struct qat_softc *);
+static int qat_crypto_start(struct qat_softc *);
+static void qat_crypto_stop(struct qat_softc *);
+static int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
+
+static MALLOC_DEFINE(M_QAT, "qat", "Intel QAT driver");
+
+static const struct qat_product *
+qat_lookup(device_t dev)
+{
+ const struct qat_product *qatp;
+
+ for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
+ if (pci_get_vendor(dev) == qatp->qatp_vendor &&
+ pci_get_device(dev) == qatp->qatp_product)
+ return qatp;
+ }
+ return NULL;
+}
+
+static int
+qat_probe(device_t dev)
+{
+ const struct qat_product *prod;
+
+ prod = qat_lookup(dev);
+ if (prod != NULL) {
+ device_set_desc(dev, prod->qatp_name);
+ return BUS_PROBE_DEFAULT;
+ }
+ return ENXIO;
+}
+
+static int
+qat_attach(device_t dev)
+{
+ struct qat_softc *sc = device_get_softc(dev);
+ const struct qat_product *qatp;
+ bus_size_t msixtbl_offset;
+ int bar, count, error, i, msixoff, msixtbl_bar;
+
+ sc->sc_dev = dev;
+ sc->sc_rev = pci_get_revid(dev);
+
+ qatp = qat_lookup(dev);
+ memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
+
+ /* Determine active accelerators and engines */
+ sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
+ sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
+
+ sc->sc_accel_num = 0;
+ for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
+ if (sc->sc_accel_mask & (1 << i))
+ sc->sc_accel_num++;
+ }
+ sc->sc_ae_num = 0;
+ for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
+ if (sc->sc_ae_mask & (1 << i))
+ sc->sc_ae_num++;
+ }
+
+ if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
+ device_printf(sc->sc_dev, "couldn't find acceleration");
+ goto fail;
+ }
+
+ MPASS(sc->sc_accel_num <= MAX_NUM_ACCEL);
+ MPASS(sc->sc_ae_num <= MAX_NUM_AE);
+
+ /* Determine SKU and capabilities */
+ sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
+ sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
+ sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
+
+ /* Map BARs */
+ msixtbl_bar = 0;
+ msixtbl_offset = 0;
+ if (pci_find_cap(dev, PCIY_MSIX, &msixoff) == 0) {
+ uint32_t msixtbl;
+ msixtbl = pci_read_config(dev, msixoff + PCIR_MSIX_TABLE, 4);
+ msixtbl_offset = msixtbl & ~PCIM_MSIX_BIR_MASK;
+ msixtbl_bar = PCIR_BAR(msixtbl & PCIM_MSIX_BIR_MASK);
+ }
+
+ i = 0;
+ if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
+ MPASS(sc->sc_hw.qhw_sram_bar_id == 0);
+ uint32_t fusectl = pci_read_config(dev, FUSECTL_REG, 4);
+ /* Skip SRAM BAR */
+ i = (fusectl & FUSECTL_MASK) ? 1 : 0;
+ }
+ for (bar = 0; bar < PCIR_MAX_BAR_0; bar++) {
+ uint32_t val = pci_read_config(dev, PCIR_BAR(bar), 4);
+ if (val == 0 || !PCI_BAR_MEM(val))
+ continue;
+
+ sc->sc_rid[i] = PCIR_BAR(bar);
+ sc->sc_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->sc_rid[i], RF_ACTIVE);
+ if (sc->sc_res[i] == NULL) {
+ device_printf(dev, "couldn't map BAR %d\n", bar);
+ goto fail;
+ }
+
+ sc->sc_csrt[i] = rman_get_bustag(sc->sc_res[i]);
+ sc->sc_csrh[i] = rman_get_bushandle(sc->sc_res[i]);
+
+ i++;
+ if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64)
+ bar++;
+ }
+
+ pci_enable_busmaster(dev);
+
+ count = sc->sc_hw.qhw_num_banks + 1;
+ if (pci_msix_count(dev) < count) {
+ device_printf(dev, "insufficient MSI-X vectors (%d vs. %d)\n",
+ pci_msix_count(dev), count);
+ goto fail;
+ }
+ error = pci_alloc_msix(dev, &count);
+ if (error != 0) {
+ device_printf(dev, "failed to allocate MSI-X vectors\n");
+ goto fail;
+ }
+
+ error = qat_init(dev);
+ if (error == 0)
+ return 0;
+
+fail:
+ qat_detach(dev);
+ return ENXIO;
+}
+
+static int
+qat_init(device_t dev)
+{
+ struct qat_softc *sc = device_get_softc(dev);
+ int error;
+
+ qat_etr_init(sc);
+
+ if (sc->sc_hw.qhw_init_admin_comms != NULL &&
+ (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "Could not initialize admin comms: %d\n", error);
+ return error;
+ }
+
+ if (sc->sc_hw.qhw_init_arb != NULL &&
+ (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "Could not initialize hw arbiter: %d\n", error);
+ return error;
+ }
+
+ error = qat_ae_init(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not initialize Acceleration Engine: %d\n", error);
+ return error;
+ }
+
+ error = qat_aefw_load(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not load firmware: %d\n", error);
+ return error;
+ }
+
+ error = qat_setup_msix_intr(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not setup interrupts: %d\n", error);
+ return error;
+ }
+
+ sc->sc_hw.qhw_enable_intr(sc);
+
+ error = qat_crypto_init(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not initialize service: %d\n", error);
+ return error;
+ }
+
+ if (sc->sc_hw.qhw_enable_error_correction != NULL)
+ sc->sc_hw.qhw_enable_error_correction(sc);
+
+ if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
+ (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "Could not initialize watchdog timer: %d\n", error);
+ return error;
+ }
+
+ error = qat_start(dev);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not start: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+qat_start(device_t dev)
+{
+ struct qat_softc *sc = device_get_softc(dev);
+ int error;
+
+ error = qat_ae_start(sc);
+ if (error)
+ return error;
+
+ if (sc->sc_hw.qhw_send_admin_init != NULL &&
+ (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
+ return error;
+ }
+
+ error = qat_crypto_start(sc);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int
+qat_detach(device_t dev)
+{
+ struct qat_softc *sc;
+ int bar, i;
+
+ sc = device_get_softc(dev);
+
+ qat_crypto_stop(sc);
+ qat_crypto_deinit(sc);
+ qat_aefw_unload(sc);
+
+ if (sc->sc_etr_banks != NULL) {
+ for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
+ struct qat_bank *qb = &sc->sc_etr_banks[i];
+
+ if (qb->qb_ih_cookie != NULL)
+ (void)bus_teardown_intr(dev, qb->qb_ih,
+ qb->qb_ih_cookie);
+ if (qb->qb_ih != NULL)
+ (void)bus_release_resource(dev, SYS_RES_IRQ,
+ i + 1, qb->qb_ih);
+ }
+ }
+ if (sc->sc_ih_cookie != NULL) {
+ (void)bus_teardown_intr(dev, sc->sc_ih, sc->sc_ih_cookie);
+ sc->sc_ih_cookie = NULL;
+ }
+ if (sc->sc_ih != NULL) {
+ (void)bus_release_resource(dev, SYS_RES_IRQ, i + 1, sc->sc_ih);
+ sc->sc_ih = NULL;
+ }
+ pci_release_msi(dev);
+
+ qat_etr_deinit(sc);
+
+ for (bar = 0; bar < MAX_BARS; bar++) {
+ if (sc->sc_res[bar] != NULL) {
+ (void)bus_release_resource(dev, SYS_RES_MEMORY,
+ sc->sc_rid[bar], sc->sc_res[bar]);
+ sc->sc_res[bar] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+void *
+qat_alloc_mem(size_t size)
+{
+ return (malloc(size, M_QAT, M_WAITOK | M_ZERO));
+}
+
+void
+qat_free_mem(void *ptr)
+{
+ free(ptr, M_QAT);
+}
+
+static void
+qat_alloc_dmamem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
+ int error)
+{
+ struct qat_dmamem *qdm;
+
+ if (error != 0)
+ return;
+
+ KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
+ qdm = arg;
+ qdm->qdm_dma_seg = segs[0];
+}
+
+int
+qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
+ int nseg, bus_size_t size, bus_size_t alignment)
+{
+ int error;
+
+ KASSERT(qdm->qdm_dma_vaddr == NULL,
+ ("%s: DMA memory descriptor in use", __func__));
+
+ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
+ alignment, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ nseg, /* nsegments */
+ size, /* maxsegsize */
+ BUS_DMA_COHERENT, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &qdm->qdm_dma_tag);
+ if (error != 0)
+ return error;
+
+ error = bus_dmamem_alloc(qdm->qdm_dma_tag, &qdm->qdm_dma_vaddr,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &qdm->qdm_dma_map);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "couldn't allocate dmamem, error = %d\n", error);
+ goto fail_0;
+ }
+
+ error = bus_dmamap_load(qdm->qdm_dma_tag, qdm->qdm_dma_map,
+ qdm->qdm_dma_vaddr, size, qat_alloc_dmamem_cb, qdm,
+ BUS_DMA_NOWAIT);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "couldn't load dmamem map, error = %d\n", error);
+ goto fail_1;
+ }
+
+ return 0;
+fail_1:
+ bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, qdm->qdm_dma_map);
+fail_0:
+ bus_dma_tag_destroy(qdm->qdm_dma_tag);
+ return error;
+}
+
+void
+qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
+{
+ if (qdm->qdm_dma_tag != NULL) {
+ bus_dmamap_unload(qdm->qdm_dma_tag, qdm->qdm_dma_map);
+ bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr,
+ qdm->qdm_dma_map);
+ bus_dma_tag_destroy(qdm->qdm_dma_tag);
+ explicit_bzero(qdm, sizeof(*qdm));
+ }
+}
+
+static int
+qat_setup_msix_intr(struct qat_softc *sc)
+{
+ device_t dev;
+ int error, i, rid;
+
+ dev = sc->sc_dev;
+
+ for (i = 1; i <= sc->sc_hw.qhw_num_banks; i++) {
+ struct qat_bank *qb = &sc->sc_etr_banks[i - 1];
+
+ rid = i;
+ qb->qb_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE);
+ if (qb->qb_ih == NULL) {
+ device_printf(dev,
+ "failed to allocate bank intr resource\n");
+ return ENXIO;
+ }
+ error = bus_setup_intr(dev, qb->qb_ih,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, qat_etr_bank_intr, qb,
+ &qb->qb_ih_cookie);
+ if (error != 0) {
+ device_printf(dev, "failed to set up bank intr\n");
+ return error;
+ }
+ error = bus_bind_intr(dev, qb->qb_ih, (i - 1) % mp_ncpus);
+ if (error != 0)
+ device_printf(dev, "failed to bind intr %d\n", i);
+ }
+
+ rid = i;
+ sc->sc_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_ACTIVE);
+ if (sc->sc_ih == NULL)
+ return ENXIO;
+ error = bus_setup_intr(dev, sc->sc_ih, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, qat_ae_cluster_intr, sc, &sc->sc_ih_cookie);
+
+ return error;
+}
+
+static void
+qat_etr_init(struct qat_softc *sc)
+{
+ int i;
+
+ sc->sc_etr_banks = qat_alloc_mem(
+ sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
+
+ for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
+ qat_etr_bank_init(sc, i);
+
+ if (sc->sc_hw.qhw_num_ap_banks) {
+ sc->sc_etr_ap_banks = qat_alloc_mem(
+ sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
+ qat_etr_ap_bank_init(sc);
+ }
+}
+
+static void
+qat_etr_deinit(struct qat_softc *sc)
+{
+ int i;
+
+ if (sc->sc_etr_banks != NULL) {
+ for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
+ qat_etr_bank_deinit(sc, i);
+ qat_free_mem(sc->sc_etr_banks);
+ sc->sc_etr_banks = NULL;
+ }
+ if (sc->sc_etr_ap_banks != NULL) {
+ qat_free_mem(sc->sc_etr_ap_banks);
+ sc->sc_etr_ap_banks = NULL;
+ }
+}
+
+static void
+qat_etr_bank_init(struct qat_softc *sc, int bank)
+{
+ struct qat_bank *qb = &sc->sc_etr_banks[bank];
+ int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
+
+ MPASS(bank < sc->sc_hw.qhw_num_banks);
+
+ mtx_init(&qb->qb_bank_mtx, "qb bank", NULL, MTX_DEF);
+
+ qb->qb_sc = sc;
+ qb->qb_bank = bank;
+ qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
+
+ /* Clean CSRs for all rings within the bank */
+ for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
+ struct qat_ring *qr = &qb->qb_et_rings[i];
+
+ qat_etr_bank_ring_write_4(sc, bank, i,
+ ETR_RING_CONFIG, 0);
+ qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
+
+ if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
+ qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
+ } else if (sc->sc_hw.qhw_tx_rings_mask &
+ (1 << (i - tx_rx_gap))) {
+ /* Share inflight counter with rx and tx */
+ qr->qr_inflight =
+ qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
+ }
+ }
+
+ if (sc->sc_hw.qhw_init_etr_intr != NULL) {
+ sc->sc_hw.qhw_init_etr_intr(sc, bank);
+ } else {
+ /* common code in qat 1.7 */
+ qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
+ ETR_INT_REG_CLEAR_MASK);
+ for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
+ ETR_RINGS_PER_INT_SRCSEL; i++) {
+ qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
+ (i * ETR_INT_SRCSEL_NEXT_OFFSET),
+ ETR_INT_SRCSEL_MASK);
+ }
+ }
+}
+
+static void
+qat_etr_bank_deinit(struct qat_softc *sc, int bank)
+{
+ struct qat_bank *qb;
+ struct qat_ring *qr;
+ int i;
+
+ qb = &sc->sc_etr_banks[bank];
+ for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
+ if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
+ qr = &qb->qb_et_rings[i];
+ qat_free_mem(qr->qr_inflight);
+ }
+ }
+}
+
+static void
+qat_etr_ap_bank_init(struct qat_softc *sc)
+{
+ int ap_bank;
+
+ for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
+ struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
+
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
+ ETR_AP_NF_MASK_INIT);
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
+ ETR_AP_NE_MASK_INIT);
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
+
+ memset(qab, 0, sizeof(*qab));
+ }
+}
+
+static void
+qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
+{
+ if (set_mask)
+ *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
+ else
+ *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
+}
+
+static void
+qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest,
+ uint32_t ring, int set_dest)
+{
+ uint32_t ae_mask;
+ uint8_t mailbox, ae, nae;
+ uint8_t *dest = (uint8_t *)ap_dest;
+
+ mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring);
+
+ nae = 0;
+ ae_mask = sc->sc_ae_mask;
+ for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) {
+ if ((ae_mask & (1 << ae)) == 0)
+ continue;
+
+ if (set_dest) {
+ dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) |
+ __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) |
+ ETR_AP_DEST_ENABLE;
+ } else {
+ dest[nae] = 0;
+ }
+ nae++;
+ if (nae == ETR_MAX_AE_PER_MAILBOX)
+ break;
+ }
+}
+
+static void
+qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr)
+{
+ struct qat_ap_bank *qab;
+ int ap_bank;
+
+ if (sc->sc_hw.qhw_num_ap_banks == 0)
+ return;
+
+ ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring);
+ MPASS(ap_bank < sc->sc_hw.qhw_num_ap_banks);
+ qab = &sc->sc_etr_ap_banks[ap_bank];
+
+ if (qr->qr_cb == NULL) {
+ qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1);
+ if (!qab->qab_ne_dest) {
+ qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest,
+ qr->qr_ring, 1);
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST,
+ qab->qab_ne_dest);
+ }
+ } else {
+ qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1);
+ if (!qab->qab_nf_dest) {
+ qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest,
+ qr->qr_ring, 1);
+ qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST,
+ qab->qab_nf_dest);
+ }
+ }
+}
+
+static int
+qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs)
+{
+ int i = QAT_MIN_RING_SIZE;
+
+ for (; i <= QAT_MAX_RING_SIZE; i++)
+ if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i))
+ return i;
+
+ return QAT_DEFAULT_RING_SIZE;
+}
+
+int
+qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring,
+ uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg,
+ const char *name, struct qat_ring **rqr)
+{
+ struct qat_bank *qb;
+ struct qat_ring *qr = NULL;
+ int error;
+ uint32_t ring_size_bytes, ring_config;
+ uint64_t ring_base;
+ uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512;
+ uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0;
+
+ MPASS(bank < sc->sc_hw.qhw_num_banks);
+
+ /* Allocate a ring from specified bank */
+ qb = &sc->sc_etr_banks[bank];
+
+ if (ring >= sc->sc_hw.qhw_num_rings_per_bank)
+ return EINVAL;
+ if (qb->qb_allocated_rings & (1 << ring))
+ return ENOENT;
+ qr = &qb->qb_et_rings[ring];
+ qb->qb_allocated_rings |= 1 << ring;
+
+ /* Initialize allocated ring */
+ qr->qr_ring = ring;
+ qr->qr_bank = bank;
+ qr->qr_name = name;
+ qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring;
+ qr->qr_ring_mask = (1 << ring);
+ qr->qr_cb = cb;
+ qr->qr_cb_arg = cb_arg;
+
+ /* Setup the shadow variables */
+ qr->qr_head = 0;
+ qr->qr_tail = 0;
+ qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size);
+ qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs);
+
+ /*
+ * To make sure that ring is alligned to ring size allocate
+ * at least 4k and then tell the user it is smaller.
+ */
+ ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size);
+ ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes);
+ error = qat_alloc_dmamem(sc, &qr->qr_dma, 1, ring_size_bytes,
+ ring_size_bytes);
+ if (error)
+ return error;
+
+ qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr;
+ qr->qr_ring_paddr = qr->qr_dma.qdm_dma_seg.ds_addr;
+
+ memset(qr->qr_ring_vaddr, QAT_RING_PATTERN,
+ qr->qr_dma.qdm_dma_seg.ds_len);
+
+ bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+
+ if (cb == NULL) {
+ ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size);
+ } else {
+ ring_config =
+ ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne);
+ }
+ qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config);
+
+ ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size);
+ qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base);
+
+ if (sc->sc_hw.qhw_init_arb != NULL)
+ qat_arb_update(sc, qb);
+
+ mtx_init(&qr->qr_ring_mtx, "qr ring", NULL, MTX_DEF);
+
+ qat_etr_ap_bank_setup_ring(sc, qr);
+
+ if (cb != NULL) {
+ uint32_t intr_mask;
+
+ qb->qb_intr_mask |= qr->qr_ring_mask;
+ intr_mask = qb->qb_intr_mask;
+
+ qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN, intr_mask);
+ qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL,
+ ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
+ }
+
+ *rqr = qr;
+
+ return 0;
+}
+
+static inline u_int
+qat_modulo(u_int data, u_int shift)
+{
+ u_int div = data >> shift;
+ u_int mult = div << shift;
+ return data - mult;
+}
+
+int
+qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg)
+{
+ uint32_t inflight;
+ uint32_t *addr;
+
+ mtx_lock(&qr->qr_ring_mtx);
+
+ inflight = atomic_fetchadd_32(qr->qr_inflight, 1) + 1;
+ if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
+ atomic_subtract_32(qr->qr_inflight, 1);
+ qr->qr_need_wakeup = true;
+ mtx_unlock(&qr->qr_ring_mtx);
+ counter_u64_add(sc->sc_ring_full_restarts, 1);
+ return ERESTART;
+ }
+
+ addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail);
+
+ memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
+
+ bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
+ BUS_DMASYNC_PREWRITE);
+
+ qr->qr_tail = qat_modulo(qr->qr_tail +
+ QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
+ QAT_RING_SIZE_MODULO(qr->qr_ring_size));
+
+ qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
+ ETR_RING_TAIL_OFFSET, qr->qr_tail);
+
+ mtx_unlock(&qr->qr_ring_mtx);
+
+ return 0;
+}
+
+static int
+qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb,
+ struct qat_ring *qr)
+{
+ uint32_t *msg, nmsg = 0;
+ int handled = 0;
+ bool blocked = false;
+
+ mtx_lock(&qr->qr_ring_mtx);
+
+ msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
+
+ bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ while (atomic_load_32(msg) != ETR_RING_EMPTY_ENTRY_SIG) {
+ atomic_subtract_32(qr->qr_inflight, 1);
+
+ if (qr->qr_cb != NULL) {
+ mtx_unlock(&qr->qr_ring_mtx);
+ handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg);
+ mtx_lock(&qr->qr_ring_mtx);
+ }
+
+ atomic_store_32(msg, ETR_RING_EMPTY_ENTRY_SIG);
+
+ qr->qr_head = qat_modulo(qr->qr_head +
+ QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
+ QAT_RING_SIZE_MODULO(qr->qr_ring_size));
+ nmsg++;
+
+ msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
+ }
+
+ bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+
+ if (nmsg > 0) {
+ qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
+ ETR_RING_HEAD_OFFSET, qr->qr_head);
+ if (qr->qr_need_wakeup) {
+ blocked = true;
+ qr->qr_need_wakeup = false;
+ }
+ }
+
+ mtx_unlock(&qr->qr_ring_mtx);
+
+ if (blocked)
+ crypto_unblock(sc->sc_crypto.qcy_cid, CRYPTO_SYMQ);
+
+ return handled;
+}
+
+static void
+qat_etr_bank_intr(void *arg)
+{
+ struct qat_bank *qb = arg;
+ struct qat_softc *sc = qb->qb_sc;
+ uint32_t estat;
+ int i, handled = 0;
+
+ mtx_lock(&qb->qb_bank_mtx);
+
+ qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0);
+
+ /* Now handle all the responses */
+ estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT);
+ estat &= qb->qb_intr_mask;
+
+ qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL,
+ ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
+
+ mtx_unlock(&qb->qb_bank_mtx);
+
+ while ((i = ffs(estat)) != 0) {
+ struct qat_ring *qr = &qb->qb_et_rings[--i];
+ estat &= ~(1 << i);
+ handled |= qat_etr_ring_intr(sc, qb, qr);
+ }
+}
+
+void
+qat_arb_update(struct qat_softc *sc, struct qat_bank *qb)
+{
+
+ qat_arb_ringsrvarben_write_4(sc, qb->qb_bank,
+ qb->qb_allocated_rings & 0xff);
+}
+
+static struct qat_sym_cookie *
+qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb)
+{
+ struct qat_sym_cookie *qsc;
+
+ mtx_lock(&qcb->qcb_bank_mtx);
+
+ if (qcb->qcb_symck_free_count == 0) {
+ mtx_unlock(&qcb->qcb_bank_mtx);
+ return NULL;
+ }
+
+ qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count];
+
+ mtx_unlock(&qcb->qcb_bank_mtx);
+
+ return qsc;
+}
+
+static void
+qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb,
+ struct qat_sym_cookie *qsc)
+{
+
+ explicit_bzero(qsc->qsc_iv_buf, sizeof(qsc->qsc_iv_buf));
+ explicit_bzero(qsc->qsc_auth_res, sizeof(qsc->qsc_auth_res));
+
+ mtx_lock(&qcb->qcb_bank_mtx);
+ qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
+ mtx_unlock(&qcb->qcb_bank_mtx);
+}
+
+void
+qat_memcpy_htobe64(void *dst, const void *src, size_t len)
+{
+ uint64_t *dst0 = dst;
+ const uint64_t *src0 = src;
+ size_t i;
+
+ MPASS(len % sizeof(*dst0) == 0);
+
+ for (i = 0; i < len / sizeof(*dst0); i++)
+ *(dst0 + i) = htobe64(*(src0 + i));
+}
+
+void
+qat_memcpy_htobe32(void *dst, const void *src, size_t len)
+{
+ uint32_t *dst0 = dst;
+ const uint32_t *src0 = src;
+ size_t i;
+
+ MPASS(len % sizeof(*dst0) == 0);
+
+ for (i = 0; i < len / sizeof(*dst0); i++)
+ *(dst0 + i) = htobe32(*(src0 + i));
+}
+
+void
+qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte)
+{
+ switch (wordbyte) {
+ case 4:
+ qat_memcpy_htobe32(dst, src, len);
+ break;
+ case 8:
+ qat_memcpy_htobe64(dst, src, len);
+ break;
+ default:
+ panic("invalid word size %u", wordbyte);
+ }
+}
+
+void
+qat_crypto_gmac_precompute(const struct qat_crypto_desc *desc,
+ const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def,
+ uint8_t *state)
+{
+ uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
+ char zeros[AES_BLOCK_LEN];
+ int rounds;
+
+ memset(zeros, 0, sizeof(zeros));
+ rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
+ rijndaelEncrypt(ks, rounds, zeros, state);
+ explicit_bzero(ks, sizeof(ks));
+}
+
+void
+qat_crypto_hmac_precompute(const struct qat_crypto_desc *desc,
+ const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def,
+ uint8_t *state1, uint8_t *state2)
+{
+ union authctx ctx;
+ const struct auth_hash *sah = hash_def->qshd_alg->qshai_sah;
+ uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset;
+ uint32_t state_size = hash_def->qshd_alg->qshai_state_size;
+ uint32_t state_word = hash_def->qshd_alg->qshai_state_word;
+
+ hmac_init_ipad(sah, key, klen, &ctx);
+ qat_memcpy_htobe(state1, (uint8_t *)&ctx + state_offset, state_size,
+ state_word);
+ hmac_init_opad(sah, key, klen, &ctx);
+ qat_memcpy_htobe(state2, (uint8_t *)&ctx + state_offset, state_size,
+ state_word);
+ explicit_bzero(&ctx, sizeof(ctx));
+}
+
+static enum hw_cipher_algo
+qat_aes_cipher_algo(int klen)
+{
+ switch (klen) {
+ case HW_AES_128_KEY_SZ:
+ return HW_CIPHER_ALGO_AES128;
+ case HW_AES_192_KEY_SZ:
+ return HW_CIPHER_ALGO_AES192;
+ case HW_AES_256_KEY_SZ:
+ return HW_CIPHER_ALGO_AES256;
+ default:
+ panic("invalid key length %d", klen);
+ }
+}
+
+uint16_t
+qat_crypto_load_cipher_session(const struct qat_crypto_desc *desc,
+ const struct qat_session *qs)
+{
+ enum hw_cipher_algo algo;
+ enum hw_cipher_dir dir;
+ enum hw_cipher_convert key_convert;
+ enum hw_cipher_mode mode;
+
+ dir = desc->qcd_cipher_dir;
+ key_convert = HW_CIPHER_NO_CONVERT;
+ mode = qs->qs_cipher_mode;
+ switch (mode) {
+ case HW_CIPHER_CBC_MODE:
+ case HW_CIPHER_XTS_MODE:
+ algo = qs->qs_cipher_algo;
+
+ /*
+ * AES decrypt key needs to be reversed.
+ * Instead of reversing the key at session registration,
+ * it is instead reversed on-the-fly by setting the KEY_CONVERT
+ * bit here.
+ */
+ if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT)
+ key_convert = HW_CIPHER_KEY_CONVERT;
+ break;
+ case HW_CIPHER_CTR_MODE:
+ algo = qs->qs_cipher_algo;
+ dir = HW_CIPHER_ENCRYPT;
+ break;
+ default:
+ panic("unhandled cipher mode %d", mode);
+ break;
+ }
+
+ return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert, dir);
+}
+
+uint16_t
+qat_crypto_load_auth_session(const struct qat_crypto_desc *desc,
+ const struct qat_session *qs, const struct qat_sym_hash_def **hash_def)
+{
+ enum qat_sym_hash_algorithm algo;
+
+ switch (qs->qs_auth_algo) {
+ case HW_AUTH_ALGO_SHA1:
+ algo = QAT_SYM_HASH_SHA1;
+ break;
+ case HW_AUTH_ALGO_SHA256:
+ algo = QAT_SYM_HASH_SHA256;
+ break;
+ case HW_AUTH_ALGO_SHA384:
+ algo = QAT_SYM_HASH_SHA384;
+ break;
+ case HW_AUTH_ALGO_SHA512:
+ algo = QAT_SYM_HASH_SHA512;
+ break;
+ case HW_AUTH_ALGO_GALOIS_128:
+ algo = QAT_SYM_HASH_AES_GCM;
+ break;
+ default:
+ panic("unhandled auth algorithm %d", qs->qs_auth_algo);
+ break;
+ }
+ *hash_def = &qat_sym_hash_defs[algo];
+
+ return HW_AUTH_CONFIG_BUILD(qs->qs_auth_mode,
+ (*hash_def)->qshd_qat->qshqi_algo_enc,
+ (*hash_def)->qshd_alg->qshai_digest_len);
+}
+
+struct qat_crypto_load_cb_arg {
+ struct qat_session *qs;
+ struct qat_sym_cookie *qsc;
+ struct cryptop *crp;
+ int error;
+};
+
+static void
+qat_crypto_load_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
+ int error)
+{
+ struct cryptop *crp;
+ struct flat_buffer_desc *flatbuf;
+ struct qat_crypto_load_cb_arg *arg;
+ struct qat_session *qs;
+ struct qat_sym_cookie *qsc;
+ bus_addr_t addr;
+ bus_size_t len;
+ int iseg, oseg, skip;
+
+ arg = _arg;
+ if (error != 0) {
+ arg->error = error;
+ return;
+ }
+
+ crp = arg->crp;
+ qs = arg->qs;
+ qsc = arg->qsc;
+
+ if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
+ /*
+ * The firmware expects AAD to be in a contiguous buffer and
+ * padded to a multiple of 16 bytes. To satisfy these
+ * constraints we bounce the AAD into a per-request buffer.
+ */
+ crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length,
+ qsc->qsc_gcm_aad);
+ memset(qsc->qsc_gcm_aad + crp->crp_aad_length, 0,
+ roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) -
+ crp->crp_aad_length);
+ skip = crp->crp_payload_start;
+ } else if (crp->crp_aad_length > 0) {
+ skip = crp->crp_aad_start;
+ } else {
+ skip = crp->crp_payload_start;
+ }
+
+ for (iseg = oseg = 0; iseg < nseg; iseg++) {
+ addr = segs[iseg].ds_addr;
+ len = segs[iseg].ds_len;
+
+ if (skip > 0) {
+ if (skip < len) {
+ addr += skip;
+ len -= skip;
+ skip = 0;
+ } else {
+ skip -= len;
+ continue;
+ }
+ }
+
+ flatbuf = &qsc->qsc_flat_bufs[oseg++];
+ flatbuf->data_len_in_bytes = (uint32_t)len;
+ flatbuf->phy_buffer = (uint64_t)addr;
+ }
+ qsc->qsc_buf_list.num_buffers = oseg;
+}
+
+static int
+qat_crypto_load(struct qat_session *qs, struct qat_sym_cookie *qsc,
+ struct qat_crypto_desc const *desc, struct cryptop *crp)
+{
+ struct qat_crypto_load_cb_arg arg;
+ int error;
+
+ crypto_read_iv(crp, qsc->qsc_iv_buf);
+
+ arg.crp = crp;
+ arg.qs = qs;
+ arg.qsc = qsc;
+ arg.error = 0;
+ error = bus_dmamap_load_crp(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
+ crp, qat_crypto_load_cb, &arg, BUS_DMA_NOWAIT);
+ if (error == 0)
+ error = arg.error;
+ return error;
+}
+
+static inline struct qat_crypto_bank *
+qat_crypto_select_bank(struct qat_crypto *qcy)
+{
+ u_int cpuid = PCPU_GET(cpuid);
+
+ return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks];
+}
+
+static int
+qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
+{
+ int error, i, bank;
+ int curname = 0;
+ char *name;
+
+ bank = qcb->qcb_bank;
+
+ name = qcb->qcb_ring_names[curname++];
+ snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
+ error = qat_etr_setup_ring(sc, qcb->qcb_bank,
+ sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size,
+ NULL, NULL, name, &qcb->qcb_sym_tx);
+ if (error)
+ return error;
+
+ name = qcb->qcb_ring_names[curname++];
+ snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank);
+ error = qat_etr_setup_ring(sc, qcb->qcb_bank,
+ sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size,
+ qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx);
+ if (error)
+ return error;
+
+ for (i = 0; i < QAT_NSYMCOOKIE; i++) {
+ struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i];
+ struct qat_sym_cookie *qsc;
+
+ error = qat_alloc_dmamem(sc, qdm, 1,
+ sizeof(struct qat_sym_cookie), QAT_OPTIMAL_ALIGN);
+ if (error)
+ return error;
+
+ qsc = qdm->qdm_dma_vaddr;
+ qsc->qsc_self_dmamap = qdm->qdm_dma_map;
+ qsc->qsc_self_dma_tag = qdm->qdm_dma_tag;
+ qsc->qsc_bulk_req_params_buf_paddr =
+ qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
+ u.qsc_bulk_cookie.qsbc_req_params_buf);
+ qsc->qsc_buffer_list_desc_paddr =
+ qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
+ qsc_buf_list);
+ qsc->qsc_iv_buf_paddr =
+ qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
+ qsc_iv_buf);
+ qsc->qsc_auth_res_paddr =
+ qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
+ qsc_auth_res);
+ qsc->qsc_gcm_aad_paddr =
+ qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
+ qsc_gcm_aad);
+ qsc->qsc_content_desc_paddr =
+ qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
+ qsc_content_desc);
+ qcb->qcb_symck_free[i] = qsc;
+ qcb->qcb_symck_free_count++;
+
+ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ QAT_MAXLEN, /* maxsize */
+ QAT_MAXSEG, /* nsegments */
+ QAT_MAXLEN, /* maxsegsize */
+ BUS_DMA_COHERENT, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &qsc->qsc_buf_dma_tag);
+ if (error != 0)
+ return error;
+
+ error = bus_dmamap_create(qsc->qsc_buf_dma_tag,
+ BUS_DMA_COHERENT, &qsc->qsc_buf_dmamap);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb)
+{
+ mtx_init(&qcb->qcb_bank_mtx, "qcb bank", NULL, MTX_DEF);
+
+ return qat_crypto_setup_ring(sc, qcb);
+}
+
+static void
+qat_crypto_bank_deinit(struct qat_softc *sc, struct qat_crypto_bank *qcb)
+{
+ struct qat_dmamem *qdm;
+ int i;
+
+ for (i = 0; i < QAT_NSYMCOOKIE; i++) {
+ qdm = &qcb->qcb_symck_dmamems[i];
+ qat_free_dmamem(sc, qdm);
+ }
+ qat_free_dmamem(sc, &qcb->qcb_sym_tx->qr_dma);
+ qat_free_dmamem(sc, &qcb->qcb_sym_rx->qr_dma);
+
+ mtx_destroy(&qcb->qcb_bank_mtx);
+}
+
+static int
+qat_crypto_init(struct qat_softc *sc)
+{
+ struct qat_crypto *qcy = &sc->sc_crypto;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children;
+ int bank, error, num_banks;
+
+ qcy->qcy_sc = sc;
+
+ if (sc->sc_hw.qhw_init_arb != NULL)
+ num_banks = imin(mp_ncpus, sc->sc_hw.qhw_num_banks);
+ else
+ num_banks = sc->sc_ae_num;
+
+ qcy->qcy_num_banks = num_banks;
+
+ qcy->qcy_banks =
+ qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks);
+
+ for (bank = 0; bank < num_banks; bank++) {
+ struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank];
+ qcb->qcb_bank = bank;
+ error = qat_crypto_bank_init(sc, qcb);
+ if (error)
+ return error;
+ }
+
+ mtx_init(&qcy->qcy_crypto_mtx, "qcy crypto", NULL, MTX_DEF);
+
+ ctx = device_get_sysctl_ctx(sc->sc_dev);
+ oid = device_get_sysctl_tree(sc->sc_dev);
+ children = SYSCTL_CHILDREN(oid);
+ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
+ children = SYSCTL_CHILDREN(oid);
+
+ sc->sc_gcm_aad_restarts = counter_u64_alloc(M_WAITOK);
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_restarts",
+ CTLFLAG_RD, &sc->sc_gcm_aad_restarts,
+ "GCM requests deferred due to AAD size change");
+ sc->sc_gcm_aad_updates = counter_u64_alloc(M_WAITOK);
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_updates",
+ CTLFLAG_RD, &sc->sc_gcm_aad_updates,
+ "GCM requests that required session state update");
+ sc->sc_ring_full_restarts = counter_u64_alloc(M_WAITOK);
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ring_full",
+ CTLFLAG_RD, &sc->sc_ring_full_restarts,
+ "Requests deferred due to in-flight max reached");
+
+ return 0;
+}
+
+static void
+qat_crypto_deinit(struct qat_softc *sc)
+{
+ struct qat_crypto *qcy = &sc->sc_crypto;
+ struct qat_crypto_bank *qcb;
+ int bank;
+
+ if (qcy->qcy_banks != NULL) {
+ for (bank = 0; bank < qcy->qcy_num_banks; bank++) {
+ qcb = &qcy->qcy_banks[bank];
+ qat_crypto_bank_deinit(sc, qcb);
+ }
+ qat_free_mem(qcy->qcy_banks);
+ mtx_destroy(&qcy->qcy_crypto_mtx);
+ }
+}
+
+static int
+qat_crypto_start(struct qat_softc *sc)
+{
+ struct qat_crypto *qcy;
+
+ qcy = &sc->sc_crypto;
+ qcy->qcy_cid = crypto_get_driverid(sc->sc_dev,
+ sizeof(struct qat_session), CRYPTOCAP_F_HARDWARE);
+ if (qcy->qcy_cid < 0) {
+ device_printf(sc->sc_dev,
+ "could not get opencrypto driver id\n");
+ return ENOENT;
+ }
+
+ return 0;
+}
+
+static void
+qat_crypto_stop(struct qat_softc *sc)
+{
+ struct qat_crypto *qcy;
+
+ qcy = &sc->sc_crypto;
+ if (qcy->qcy_cid >= 0)
+ (void)crypto_unregister_all(qcy->qcy_cid);
+}
+
+static int
+qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
+{
+ char icv[QAT_SYM_HASH_BUFFER_LEN];
+ struct qat_crypto_bank *qcb = arg;
+ struct qat_crypto *qcy;
+ struct qat_session *qs;
+ struct qat_sym_cookie *qsc;
+ struct qat_sym_bulk_cookie *qsbc;
+ struct cryptop *crp;
+ int error;
+ uint16_t auth_sz;
+ bool blocked;
+
+ qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset);
+
+ qsbc = &qsc->u.qsc_bulk_cookie;
+ qcy = qsbc->qsbc_crypto;
+ qs = qsbc->qsbc_session;
+ crp = qsbc->qsbc_cb_tag;
+
+ bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_sync(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap);
+
+ error = 0;
+ if ((auth_sz = qs->qs_auth_mlen) != 0) {
+ if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
+ crypto_copydata(crp, crp->crp_digest_start,
+ auth_sz, icv);
+ if (timingsafe_bcmp(icv, qsc->qsc_auth_res,
+ auth_sz) != 0) {
+ error = EBADMSG;
+ }
+ } else {
+ crypto_copyback(crp, crp->crp_digest_start,
+ auth_sz, qsc->qsc_auth_res);
+ }
+ }
+
+ qat_crypto_free_sym_cookie(qcb, qsc);
+
+ blocked = false;
+ mtx_lock(&qs->qs_session_mtx);
+ MPASS(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
+ qs->qs_inflight--;
+ if (__predict_false(qs->qs_need_wakeup && qs->qs_inflight == 0)) {
+ blocked = true;
+ qs->qs_need_wakeup = false;
+ }
+ mtx_unlock(&qs->qs_session_mtx);
+
+ crp->crp_etype = error;
+ crypto_done(crp);
+
+ if (blocked)
+ crypto_unblock(qcy->qcy_cid, CRYPTO_SYMQ);
+
+ return 1;
+}
+
+static int
+qat_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+ if (csp->csp_cipher_alg == CRYPTO_AES_XTS &&
+ qat_lookup(dev)->qatp_chip == QAT_CHIP_C2XXX) {
+ /*
+ * AES-XTS is not supported by the NanoQAT.
+ */
+ return EINVAL;
+ }
+
+ switch (csp->csp_mode) {
+ case CSP_MODE_CIPHER:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ case CRYPTO_AES_ICM:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return EINVAL;
+ break;
+ case CRYPTO_AES_XTS:
+ if (csp->csp_ivlen != AES_XTS_IV_LEN)
+ return EINVAL;
+ break;
+ default:
+ return EINVAL;
+ }
+ break;
+ case CSP_MODE_DIGEST:
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512:
+ case CRYPTO_SHA2_512_HMAC:
+ break;
+ case CRYPTO_AES_NIST_GMAC:
+ if (csp->csp_ivlen != AES_GCM_IV_LEN)
+ return EINVAL;
+ break;
+ default:
+ return EINVAL;
+ }
+ break;
+ case CSP_MODE_AEAD:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ if (csp->csp_ivlen != AES_GCM_IV_LEN)
+ return EINVAL;
+ break;
+ default:
+ return EINVAL;
+ }
+ break;
+ case CSP_MODE_ETA:
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ case CRYPTO_AES_ICM:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return EINVAL;
+ break;
+ case CRYPTO_AES_XTS:
+ if (csp->csp_ivlen != AES_XTS_IV_LEN)
+ return EINVAL;
+ break;
+ default:
+ return EINVAL;
+ }
+ break;
+ default:
+ return EINVAL;
+ }
+ break;
+ default:
+ return EINVAL;
+ }
+
+ return CRYPTODEV_PROBE_HARDWARE;
+}
+
+static int
+qat_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
+{
+ struct qat_crypto *qcy;
+ struct qat_dmamem *qdm;
+ struct qat_session *qs;
+ struct qat_softc *sc;
+ struct qat_crypto_desc *ddesc, *edesc;
+ int error, slices;
+
+ sc = device_get_softc(dev);
+ qs = crypto_get_driver_session(cses);
+ qcy = &sc->sc_crypto;
+
+ qdm = &qs->qs_desc_mem;
+ error = qat_alloc_dmamem(sc, qdm, QAT_MAXSEG,
+ sizeof(struct qat_crypto_desc) * 2, QAT_OPTIMAL_ALIGN);
+ if (error != 0)
+ return error;
+
+ mtx_init(&qs->qs_session_mtx, "qs session", NULL, MTX_DEF);
+ qs->qs_aad_length = -1;
+
+ qs->qs_dec_desc = ddesc = qdm->qdm_dma_vaddr;
+ qs->qs_enc_desc = edesc = ddesc + 1;
+
+ ddesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr;
+ ddesc->qcd_hash_state_paddr = ddesc->qcd_desc_paddr +
+ offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
+ edesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr +
+ sizeof(struct qat_crypto_desc);
+ edesc->qcd_hash_state_paddr = edesc->qcd_desc_paddr +
+ offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
+
+ qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
+ qs->qs_inflight = 0;
+
+ qs->qs_cipher_key = csp->csp_cipher_key;
+ qs->qs_cipher_klen = csp->csp_cipher_klen;
+ qs->qs_auth_key = csp->csp_auth_key;
+ qs->qs_auth_klen = csp->csp_auth_klen;
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
+ qs->qs_cipher_mode = HW_CIPHER_CBC_MODE;
+ break;
+ case CRYPTO_AES_ICM:
+ qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
+ qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
+ break;
+ case CRYPTO_AES_XTS:
+ qs->qs_cipher_algo =
+ qat_aes_cipher_algo(csp->csp_cipher_klen / 2);
+ qs->qs_cipher_mode = HW_CIPHER_XTS_MODE;
+ break;
+ case CRYPTO_AES_NIST_GCM_16:
+ qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
+ qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
+ qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ break;
+ case 0:
+ break;
+ default:
+ panic("%s: unhandled cipher algorithm %d", __func__,
+ csp->csp_cipher_alg);
+ }
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA1_HMAC:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ break;
+ case CRYPTO_SHA1:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
+ qs->qs_auth_mode = HW_AUTH_MODE0;
+ break;
+ case CRYPTO_SHA2_256_HMAC:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ break;
+ case CRYPTO_SHA2_256:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
+ qs->qs_auth_mode = HW_AUTH_MODE0;
+ break;
+ case CRYPTO_SHA2_384_HMAC:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ break;
+ case CRYPTO_SHA2_384:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
+ qs->qs_auth_mode = HW_AUTH_MODE0;
+ break;
+ case CRYPTO_SHA2_512_HMAC:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ break;
+ case CRYPTO_SHA2_512:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
+ qs->qs_auth_mode = HW_AUTH_MODE0;
+ break;
+ case CRYPTO_AES_NIST_GMAC:
+ qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_auth_klen);
+ qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
+ qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+
+ qs->qs_cipher_key = qs->qs_auth_key;
+ qs->qs_cipher_klen = qs->qs_auth_klen;
+ break;
+ case 0:
+ break;
+ default:
+ panic("%s: unhandled auth algorithm %d", __func__,
+ csp->csp_auth_alg);
+ }
+
+ slices = 0;
+ switch (csp->csp_mode) {
+ case CSP_MODE_AEAD:
+ case CSP_MODE_ETA:
+ /* auth then decrypt */
+ ddesc->qcd_slices[0] = FW_SLICE_AUTH;
+ ddesc->qcd_slices[1] = FW_SLICE_CIPHER;
+ ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
+ ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
+ /* encrypt then auth */
+ edesc->qcd_slices[0] = FW_SLICE_CIPHER;
+ edesc->qcd_slices[1] = FW_SLICE_AUTH;
+ edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
+ edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
+ slices = 2;
+ break;
+ case CSP_MODE_CIPHER:
+ /* decrypt */
+ ddesc->qcd_slices[0] = FW_SLICE_CIPHER;
+ ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
+ ddesc->qcd_cmd_id = FW_LA_CMD_CIPHER;
+ /* encrypt */
+ edesc->qcd_slices[0] = FW_SLICE_CIPHER;
+ edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
+ edesc->qcd_cmd_id = FW_LA_CMD_CIPHER;
+ slices = 1;
+ break;
+ case CSP_MODE_DIGEST:
+ if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
+ /* auth then decrypt */
+ ddesc->qcd_slices[0] = FW_SLICE_AUTH;
+ ddesc->qcd_slices[1] = FW_SLICE_CIPHER;
+ ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
+ ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
+ /* encrypt then auth */
+ edesc->qcd_slices[0] = FW_SLICE_CIPHER;
+ edesc->qcd_slices[1] = FW_SLICE_AUTH;
+ edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
+ edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
+ slices = 2;
+ } else {
+ ddesc->qcd_slices[0] = FW_SLICE_AUTH;
+ ddesc->qcd_cmd_id = FW_LA_CMD_AUTH;
+ edesc->qcd_slices[0] = FW_SLICE_AUTH;
+ edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
+ slices = 1;
+ }
+ break;
+ default:
+ panic("%s: unhandled crypto algorithm %d, %d", __func__,
+ csp->csp_cipher_alg, csp->csp_auth_alg);
+ }
+ ddesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
+ edesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
+
+ qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, ddesc);
+ qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, edesc);
+
+ if (csp->csp_auth_mlen != 0)
+ qs->qs_auth_mlen = csp->csp_auth_mlen;
+ else
+ qs->qs_auth_mlen = edesc->qcd_auth_sz;
+
+ /* Compute the GMAC by specifying a null cipher payload. */
+ if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC)
+ ddesc->qcd_cmd_id = edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
+
+ return 0;
+}
+
+static void
+qat_crypto_clear_desc(struct qat_crypto_desc *desc)
+{
+ explicit_bzero(desc->qcd_content_desc, sizeof(desc->qcd_content_desc));
+ explicit_bzero(desc->qcd_hash_state_prefix_buf,
+ sizeof(desc->qcd_hash_state_prefix_buf));
+ explicit_bzero(desc->qcd_req_cache, sizeof(desc->qcd_req_cache));
+}
+
+static void
+qat_freesession(device_t dev, crypto_session_t cses)
+{
+ struct qat_session *qs;
+
+ qs = crypto_get_driver_session(cses);
+ KASSERT(qs->qs_inflight == 0,
+ ("%s: session %p has requests in flight", __func__, qs));
+
+ qat_crypto_clear_desc(qs->qs_enc_desc);
+ qat_crypto_clear_desc(qs->qs_dec_desc);
+ qat_free_dmamem(device_get_softc(dev), &qs->qs_desc_mem);
+ mtx_destroy(&qs->qs_session_mtx);
+}
+
+static int
+qat_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct qat_crypto *qcy;
+ struct qat_crypto_bank *qcb;
+ struct qat_crypto_desc const *desc;
+ struct qat_session *qs;
+ struct qat_softc *sc;
+ struct qat_sym_cookie *qsc;
+ struct qat_sym_bulk_cookie *qsbc;
+ int error;
+
+ sc = device_get_softc(dev);
+ qcy = &sc->sc_crypto;
+ qs = crypto_get_driver_session(crp->crp_session);
+ qsc = NULL;
+
+ if (__predict_false(crypto_buffer_len(&crp->crp_buf) > QAT_MAXLEN)) {
+ error = E2BIG;
+ goto fail1;
+ }
+
+ mtx_lock(&qs->qs_session_mtx);
+ if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
+ if (crp->crp_aad_length > QAT_GCM_AAD_SIZE_MAX) {
+ error = E2BIG;
+ mtx_unlock(&qs->qs_session_mtx);
+ goto fail1;
+ }
+
+ /*
+ * The firmware interface for GCM annoyingly requires the AAD
+ * size to be stored in the session's content descriptor, which
+ * is not really meant to be updated after session
+ * initialization. For IPSec the AAD size is fixed so this is
+ * not much of a problem in practice, but we have to catch AAD
+ * size updates here so that the device code can safely update
+ * the session's recorded AAD size.
+ */
+ if (__predict_false(crp->crp_aad_length != qs->qs_aad_length)) {
+ if (qs->qs_inflight == 0) {
+ if (qs->qs_aad_length != -1) {
+ counter_u64_add(sc->sc_gcm_aad_updates,
+ 1);
+ }
+ qs->qs_aad_length = crp->crp_aad_length;
+ } else {
+ qs->qs_need_wakeup = true;
+ mtx_unlock(&qs->qs_session_mtx);
+ counter_u64_add(sc->sc_gcm_aad_restarts, 1);
+ error = ERESTART;
+ goto fail1;
+ }
+ }
+ }
+ qs->qs_inflight++;
+ mtx_unlock(&qs->qs_session_mtx);
+
+ qcb = qat_crypto_select_bank(qcy);
+
+ qsc = qat_crypto_alloc_sym_cookie(qcb);
+ if (qsc == NULL) {
+ error = ENOBUFS;
+ goto fail2;
+ }
+
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
+ desc = qs->qs_enc_desc;
+ else
+ desc = qs->qs_dec_desc;
+
+ error = qat_crypto_load(qs, qsc, desc, crp);
+ if (error != 0)
+ goto fail2;
+
+ qsbc = &qsc->u.qsc_bulk_cookie;
+ qsbc->qsbc_crypto = qcy;
+ qsbc->qsbc_session = qs;
+ qsbc->qsbc_cb_tag = crp;
+
+ sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, crp);
+
+ bus_dmamap_sync(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+
+ error = qat_etr_put_msg(sc, qcb->qcb_sym_tx,
+ (uint32_t *)qsbc->qsbc_msg);
+ if (error)
+ goto fail2;
+
+ return 0;
+
+fail2:
+ if (qsc)
+ qat_crypto_free_sym_cookie(qcb, qsc);
+ mtx_lock(&qs->qs_session_mtx);
+ qs->qs_inflight--;
+ mtx_unlock(&qs->qs_session_mtx);
+fail1:
+ crp->crp_etype = error;
+ crypto_done(crp);
+ return 0;
+}
+
+static device_method_t qat_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, qat_probe),
+ DEVMETHOD(device_attach, qat_attach),
+ DEVMETHOD(device_detach, qat_detach),
+
+ /* Cryptodev interface */
+ DEVMETHOD(cryptodev_probesession, qat_probesession),
+ DEVMETHOD(cryptodev_newsession, qat_newsession),
+ DEVMETHOD(cryptodev_freesession, qat_freesession),
+ DEVMETHOD(cryptodev_process, qat_process),
+
+ DEVMETHOD_END
+};
+
+static devclass_t qat_devclass;
+
+static driver_t qat_driver = {
+ .name = "qat",
+ .methods = qat_methods,
+ .size = sizeof(struct qat_softc),
+};
+
+DRIVER_MODULE(qat, pci, qat_driver, qat_devclass, 0, 0);
+MODULE_VERSION(qat, 1);
+MODULE_DEPEND(qat, crypto, 1, 1, 1);
+MODULE_DEPEND(qat, pci, 1, 1, 1);
diff --git a/sys/dev/qat/qat_ae.c b/sys/dev/qat/qat_ae.c
new file mode 100644
index 000000000000..83695355f73d
--- /dev/null
+++ b/sys/dev/qat/qat_ae.c
@@ -0,0 +1,3456 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_ae.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: qat_ae.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
+#endif
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/firmware.h>
+#include <sys/limits.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qatvar.h"
+#include "qat_aevar.h"
+
+static int qat_ae_write_4(struct qat_softc *, u_char, bus_size_t,
+ uint32_t);
+static int qat_ae_read_4(struct qat_softc *, u_char, bus_size_t,
+ uint32_t *);
+static int qat_ae_write_4(struct qat_softc *, u_char, bus_size_t,
+ uint32_t);
+static void qat_ae_ctx_indr_write(struct qat_softc *, u_char, uint32_t,
+ bus_size_t, uint32_t);
+static int qat_ae_ctx_indr_read(struct qat_softc *, u_char, uint32_t,
+ bus_size_t, uint32_t *);
+
+static u_short qat_aereg_get_10bit_addr(enum aereg_type, u_short);
+static int qat_aereg_rel_data_write(struct qat_softc *, u_char, u_char,
+ enum aereg_type, u_short, uint32_t);
+static int qat_aereg_rel_data_read(struct qat_softc *, u_char, u_char,
+ enum aereg_type, u_short, uint32_t *);
+static int qat_aereg_rel_rdxfer_write(struct qat_softc *, u_char, u_char,
+ enum aereg_type, u_short, uint32_t);
+static int qat_aereg_rel_wrxfer_write(struct qat_softc *, u_char, u_char,
+ enum aereg_type, u_short, uint32_t);
+static int qat_aereg_rel_nn_write(struct qat_softc *, u_char, u_char,
+ enum aereg_type, u_short, uint32_t);
+static int qat_aereg_abs_to_rel(struct qat_softc *, u_char, u_short,
+ u_short *, u_char *);
+static int qat_aereg_abs_data_write(struct qat_softc *, u_char,
+ enum aereg_type, u_short, uint32_t);
+
+static void qat_ae_enable_ctx(struct qat_softc *, u_char, u_int);
+static void qat_ae_disable_ctx(struct qat_softc *, u_char, u_int);
+static void qat_ae_write_ctx_mode(struct qat_softc *, u_char, u_char);
+static void qat_ae_write_nn_mode(struct qat_softc *, u_char, u_char);
+static void qat_ae_write_lm_mode(struct qat_softc *, u_char,
+ enum aereg_type, u_char);
+static void qat_ae_write_shared_cs_mode0(struct qat_softc *, u_char,
+ u_char);
+static void qat_ae_write_shared_cs_mode(struct qat_softc *, u_char, u_char);
+static int qat_ae_set_reload_ustore(struct qat_softc *, u_char, u_int, int,
+ u_int);
+
+static enum qat_ae_status qat_ae_get_status(struct qat_softc *, u_char);
+static int qat_ae_is_active(struct qat_softc *, u_char);
+static int qat_ae_wait_num_cycles(struct qat_softc *, u_char, int, int);
+
+static int qat_ae_clear_reset(struct qat_softc *);
+static int qat_ae_check(struct qat_softc *);
+static int qat_ae_reset_timestamp(struct qat_softc *);
+static void qat_ae_clear_xfer(struct qat_softc *);
+static int qat_ae_clear_gprs(struct qat_softc *);
+
+static void qat_ae_get_shared_ustore_ae(u_char, u_char *);
+static u_int qat_ae_ucode_parity64(uint64_t);
+static uint64_t qat_ae_ucode_set_ecc(uint64_t);
+static int qat_ae_ucode_write(struct qat_softc *, u_char, u_int, u_int,
+ const uint64_t *);
+static int qat_ae_ucode_read(struct qat_softc *, u_char, u_int, u_int,
+ uint64_t *);
+static u_int qat_ae_concat_ucode(uint64_t *, u_int, u_int, u_int, u_int *);
+static int qat_ae_exec_ucode(struct qat_softc *, u_char, u_char,
+ uint64_t *, u_int, int, u_int, u_int *);
+static int qat_ae_exec_ucode_init_lm(struct qat_softc *, u_char, u_char,
+ int *, uint64_t *, u_int,
+ u_int *, u_int *, u_int *, u_int *, u_int *);
+static int qat_ae_restore_init_lm_gprs(struct qat_softc *, u_char, u_char,
+ u_int, u_int, u_int, u_int, u_int);
+static int qat_ae_get_inst_num(int);
+static int qat_ae_batch_put_lm(struct qat_softc *, u_char,
+ struct qat_ae_batch_init_list *, size_t);
+static int qat_ae_write_pc(struct qat_softc *, u_char, u_int, u_int);
+
+static u_int qat_aefw_csum(char *, int);
+static const char *qat_aefw_uof_string(struct qat_softc *, size_t);
+static struct uof_chunk_hdr *qat_aefw_uof_find_chunk(struct qat_softc *,
+ const char *, struct uof_chunk_hdr *);
+
+static int qat_aefw_load_mof(struct qat_softc *);
+static void qat_aefw_unload_mof(struct qat_softc *);
+static int qat_aefw_load_mmp(struct qat_softc *);
+static void qat_aefw_unload_mmp(struct qat_softc *);
+
+static int qat_aefw_mof_find_uof0(struct qat_softc *,
+ struct mof_uof_hdr *, struct mof_uof_chunk_hdr *,
+ u_int, size_t, const char *,
+ size_t *, void **);
+static int qat_aefw_mof_find_uof(struct qat_softc *);
+static int qat_aefw_mof_parse(struct qat_softc *);
+
+static int qat_aefw_uof_parse_image(struct qat_softc *,
+ struct qat_uof_image *, struct uof_chunk_hdr *uch);
+static int qat_aefw_uof_parse_images(struct qat_softc *);
+static int qat_aefw_uof_parse(struct qat_softc *);
+
+static int qat_aefw_alloc_auth_dmamem(struct qat_softc *, char *, size_t,
+ struct qat_dmamem *);
+static int qat_aefw_auth(struct qat_softc *, struct qat_dmamem *);
+static int qat_aefw_suof_load(struct qat_softc *sc,
+ struct qat_dmamem *dma);
+static int qat_aefw_suof_parse_image(struct qat_softc *,
+ struct qat_suof_image *, struct suof_chunk_hdr *);
+static int qat_aefw_suof_parse(struct qat_softc *);
+static int qat_aefw_suof_write(struct qat_softc *);
+
+static int qat_aefw_uof_assign_image(struct qat_softc *, struct qat_ae *,
+ struct qat_uof_image *);
+static int qat_aefw_uof_init_ae(struct qat_softc *, u_char);
+static int qat_aefw_uof_init(struct qat_softc *);
+
+static int qat_aefw_init_memory_one(struct qat_softc *,
+ struct uof_init_mem *);
+static void qat_aefw_free_lm_init(struct qat_softc *, u_char);
+static int qat_aefw_init_ustore(struct qat_softc *);
+static int qat_aefw_init_reg(struct qat_softc *, u_char, u_char,
+ enum aereg_type, u_short, u_int);
+static int qat_aefw_init_reg_sym_expr(struct qat_softc *, u_char,
+ struct qat_uof_image *);
+static int qat_aefw_init_memory(struct qat_softc *);
+static int qat_aefw_init_globals(struct qat_softc *);
+static uint64_t qat_aefw_get_uof_inst(struct qat_softc *,
+ struct qat_uof_page *, u_int);
+static int qat_aefw_do_pagein(struct qat_softc *, u_char,
+ struct qat_uof_page *);
+static int qat_aefw_uof_write_one(struct qat_softc *,
+ struct qat_uof_image *);
+static int qat_aefw_uof_write(struct qat_softc *);
+
+static int
+qat_ae_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
+ uint32_t value)
+{
+ int times = TIMEOUT_AE_CSR;
+
+ do {
+ qat_ae_local_write_4(sc, ae, offset, value);
+ if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) &
+ LOCAL_CSR_STATUS_STATUS) == 0)
+ return 0;
+
+ } while (times--);
+
+ device_printf(sc->sc_dev,
+ "couldn't write AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset);
+ return EFAULT;
+}
+
+static int
+qat_ae_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
+ uint32_t *value)
+{
+ int times = TIMEOUT_AE_CSR;
+ uint32_t v;
+
+ do {
+ v = qat_ae_local_read_4(sc, ae, offset);
+ if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) &
+ LOCAL_CSR_STATUS_STATUS) == 0) {
+ *value = v;
+ return 0;
+ }
+ } while (times--);
+
+ device_printf(sc->sc_dev,
+ "couldn't read AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset);
+ return EFAULT;
+}
+
+static void
+qat_ae_ctx_indr_write(struct qat_softc *sc, u_char ae, uint32_t ctx_mask,
+ bus_size_t offset, uint32_t value)
+{
+ int ctx;
+ uint32_t ctxptr;
+
+ MPASS(offset == CTX_FUTURE_COUNT_INDIRECT ||
+ offset == FUTURE_COUNT_SIGNAL_INDIRECT ||
+ offset == CTX_STS_INDIRECT ||
+ offset == CTX_WAKEUP_EVENTS_INDIRECT ||
+ offset == CTX_SIG_EVENTS_INDIRECT ||
+ offset == LM_ADDR_0_INDIRECT ||
+ offset == LM_ADDR_1_INDIRECT ||
+ offset == INDIRECT_LM_ADDR_0_BYTE_INDEX ||
+ offset == INDIRECT_LM_ADDR_1_BYTE_INDEX);
+
+ qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr);
+ for (ctx = 0; ctx < MAX_AE_CTX; ctx++) {
+ if ((ctx_mask & (1 << ctx)) == 0)
+ continue;
+ qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx);
+ qat_ae_write_4(sc, ae, offset, value);
+ }
+ qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr);
+}
+
+static int
+qat_ae_ctx_indr_read(struct qat_softc *sc, u_char ae, uint32_t ctx,
+ bus_size_t offset, uint32_t *value)
+{
+ int error;
+ uint32_t ctxptr;
+
+ MPASS(offset == CTX_FUTURE_COUNT_INDIRECT ||
+ offset == FUTURE_COUNT_SIGNAL_INDIRECT ||
+ offset == CTX_STS_INDIRECT ||
+ offset == CTX_WAKEUP_EVENTS_INDIRECT ||
+ offset == CTX_SIG_EVENTS_INDIRECT ||
+ offset == LM_ADDR_0_INDIRECT ||
+ offset == LM_ADDR_1_INDIRECT ||
+ offset == INDIRECT_LM_ADDR_0_BYTE_INDEX ||
+ offset == INDIRECT_LM_ADDR_1_BYTE_INDEX);
+
+ /* save the ctx ptr */
+ qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr);
+ if ((ctxptr & CSR_CTX_POINTER_CONTEXT) !=
+ (ctx & CSR_CTX_POINTER_CONTEXT))
+ qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx);
+
+ error = qat_ae_read_4(sc, ae, offset, value);
+
+ /* restore ctx ptr */
+ if ((ctxptr & CSR_CTX_POINTER_CONTEXT) !=
+ (ctx & CSR_CTX_POINTER_CONTEXT))
+ qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr);
+
+ return error;
+}
+
+static u_short
+qat_aereg_get_10bit_addr(enum aereg_type regtype, u_short reg)
+{
+ u_short addr;
+
+ switch (regtype) {
+ case AEREG_GPA_ABS:
+ case AEREG_GPB_ABS:
+ addr = (reg & 0x7f) | 0x80;
+ break;
+ case AEREG_GPA_REL:
+ case AEREG_GPB_REL:
+ addr = reg & 0x1f;
+ break;
+ case AEREG_SR_RD_REL:
+ case AEREG_SR_WR_REL:
+ case AEREG_SR_REL:
+ addr = 0x180 | (reg & 0x1f);
+ break;
+ case AEREG_SR_INDX:
+ addr = 0x140 | ((reg & 0x3) << 1);
+ break;
+ case AEREG_DR_RD_REL:
+ case AEREG_DR_WR_REL:
+ case AEREG_DR_REL:
+ addr = 0x1c0 | (reg & 0x1f);
+ break;
+ case AEREG_DR_INDX:
+ addr = 0x100 | ((reg & 0x3) << 1);
+ break;
+ case AEREG_NEIGH_INDX:
+ addr = 0x241 | ((reg & 0x3) << 1);
+ break;
+ case AEREG_NEIGH_REL:
+ addr = 0x280 | (reg & 0x1f);
+ break;
+ case AEREG_LMEM0:
+ addr = 0x200;
+ break;
+ case AEREG_LMEM1:
+ addr = 0x220;
+ break;
+ case AEREG_NO_DEST:
+ addr = 0x300 | (reg & 0xff);
+ break;
+ default:
+ addr = AEREG_BAD_REGADDR;
+ break;
+ }
+ return (addr);
+}
+
+static int
+qat_aereg_rel_data_write(struct qat_softc *sc, u_char ae, u_char ctx,
+ enum aereg_type regtype, u_short relreg, uint32_t value)
+{
+ uint16_t srchi, srclo, destaddr, data16hi, data16lo;
+ uint64_t inst[] = {
+ 0x0F440000000ull, /* immed_w1[reg, val_hi16] */
+ 0x0F040000000ull, /* immed_w0[reg, val_lo16] */
+ 0x0F0000C0300ull, /* nop */
+ 0x0E000010000ull /* ctx_arb[kill] */
+ };
+ const int ninst = nitems(inst);
+ const int imm_w1 = 0, imm_w0 = 1;
+ unsigned int ctxen;
+ uint16_t mask;
+
+ /* This logic only works for GPRs and LM index registers,
+ not NN or XFER registers! */
+ MPASS(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL ||
+ regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1);
+
+ if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL)) {
+ /* determine the context mode */
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
+ if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
+ /* 4-ctx mode */
+ if (ctx & 0x1)
+ return EINVAL;
+ mask = 0x1f;
+ } else {
+ /* 8-ctx mode */
+ mask = 0x0f;
+ }
+ if (relreg & ~mask)
+ return EINVAL;
+ }
+ if ((destaddr = qat_aereg_get_10bit_addr(regtype, relreg)) ==
+ AEREG_BAD_REGADDR) {
+ return EINVAL;
+ }
+
+ data16lo = 0xffff & value;
+ data16hi = 0xffff & (value >> 16);
+ srchi = qat_aereg_get_10bit_addr(AEREG_NO_DEST,
+ (uint16_t)(0xff & data16hi));
+ srclo = qat_aereg_get_10bit_addr(AEREG_NO_DEST,
+ (uint16_t)(0xff & data16lo));
+
+ switch (regtype) {
+ case AEREG_GPA_REL: /* A rel source */
+ inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) |
+ ((srchi & 0x3ff) << 10) | (destaddr & 0x3ff);
+ inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) |
+ ((srclo & 0x3ff) << 10) | (destaddr & 0x3ff);
+ break;
+ default:
+ inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) |
+ ((destaddr & 0x3ff) << 10) | (srchi & 0x3ff);
+ inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) |
+ ((destaddr & 0x3ff) << 10) | (srclo & 0x3ff);
+ break;
+ }
+
+ return qat_ae_exec_ucode(sc, ae, ctx, inst, ninst, 1, ninst * 5, NULL);
+}
+
+static int
+qat_aereg_rel_data_read(struct qat_softc *sc, u_char ae, u_char ctx,
+ enum aereg_type regtype, u_short relreg, uint32_t *value)
+{
+ uint64_t inst, savucode;
+ uint32_t ctxen, misc, nmisc, savctx, ctxarbctl, ulo, uhi;
+ u_int uaddr, ustore_addr;
+ int error;
+ u_short mask, regaddr;
+ u_char nae;
+
+ MPASS(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL ||
+ regtype == AEREG_SR_REL || regtype == AEREG_SR_RD_REL ||
+ regtype == AEREG_DR_REL || regtype == AEREG_DR_RD_REL ||
+ regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1);
+
+ if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL) ||
+ (regtype == AEREG_SR_REL) || (regtype == AEREG_SR_RD_REL) ||
+ (regtype == AEREG_DR_REL) || (regtype == AEREG_DR_RD_REL))
+ {
+ /* determine the context mode */
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
+ if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
+ /* 4-ctx mode */
+ if (ctx & 0x1)
+ return EINVAL;
+ mask = 0x1f;
+ } else {
+ /* 8-ctx mode */
+ mask = 0x0f;
+ }
+ if (relreg & ~mask)
+ return EINVAL;
+ }
+ if ((regaddr = qat_aereg_get_10bit_addr(regtype, relreg)) ==
+ AEREG_BAD_REGADDR) {
+ return EINVAL;
+ }
+
+ /* instruction -- alu[--, --, B, reg] */
+ switch (regtype) {
+ case AEREG_GPA_REL:
+ /* A rel source */
+ inst = 0xA070000000ull | (regaddr & 0x3ff);
+ break;
+ default:
+ inst = (0xA030000000ull | ((regaddr & 0x3ff) << 10));
+ break;
+ }
+
+ /* backup shared control store bit, and force AE to
+ * none-shared mode before executing ucode snippet */
+ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
+ if (misc & AE_MISC_CONTROL_SHARE_CS) {
+ qat_ae_get_shared_ustore_ae(ae, &nae);
+ if ((1 << nae) & sc->sc_ae_mask && qat_ae_is_active(sc, nae))
+ return EBUSY;
+ }
+
+ nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS;
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc);
+
+ /* read current context */
+ qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx);
+ qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl);
+
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
+ /* prevent clearing the W1C bits: the breakpoint bit,
+ ECC error bit, and Parity error bit */
+ ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
+
+ /* change the context */
+ if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO))
+ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
+ ctx & ACTIVE_CTX_STATUS_ACNO);
+ /* save a ustore location */
+ if ((error = qat_ae_ucode_read(sc, ae, 0, 1, &savucode)) != 0) {
+ /* restore AE_MISC_CONTROL csr */
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
+
+ /* restore the context */
+ if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) {
+ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
+ savctx & ACTIVE_CTX_STATUS_ACNO);
+ }
+ qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl);
+
+ return (error);
+ }
+
+ /* turn off ustore parity */
+ qat_ae_write_4(sc, ae, CTX_ENABLES,
+ ctxen & (~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE));
+
+ /* save ustore-addr csr */
+ qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr);
+
+ /* write the ALU instruction to ustore, enable ecs bit */
+ uaddr = 0 | USTORE_ADDRESS_ECS;
+
+ /* set the uaddress */
+ qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
+ inst = qat_ae_ucode_set_ecc(inst);
+
+ ulo = (uint32_t)(inst & 0xffffffff);
+ uhi = (uint32_t)(inst >> 32);
+
+ qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo);
+
+ /* this will auto increment the address */
+ qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi);
+
+ /* set the uaddress */
+ qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
+
+ /* delay for at least 8 cycles */
+ qat_ae_wait_num_cycles(sc, ae, 0x8, 0);
+
+ /* read ALU output -- the instruction should have been executed
+ prior to clearing the ECS in putUwords */
+ qat_ae_read_4(sc, ae, ALU_OUT, value);
+
+ /* restore ustore-addr csr */
+ qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr);
+
+ /* restore the ustore */
+ error = qat_ae_ucode_write(sc, ae, 0, 1, &savucode);
+
+ /* restore the context */
+ if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) {
+ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
+ savctx & ACTIVE_CTX_STATUS_ACNO);
+ }
+
+ qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl);
+
+ /* restore AE_MISC_CONTROL csr */
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
+
+ qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
+
+ return error;
+}
+
+static int
+qat_aereg_rel_rdxfer_write(struct qat_softc *sc, u_char ae, u_char ctx,
+ enum aereg_type regtype, u_short relreg, uint32_t value)
+{
+ bus_size_t addr;
+ int error;
+ uint32_t ctxen;
+ u_short mask;
+ u_short dr_offset;
+
+ MPASS(regtype == AEREG_SR_REL || regtype == AEREG_DR_REL ||
+ regtype == AEREG_SR_RD_REL || regtype == AEREG_DR_RD_REL);
+
+ error = qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
+ if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
+ if (ctx & 0x1) {
+ device_printf(sc->sc_dev,
+ "bad ctx argument in 4-ctx mode,ctx=0x%x\n", ctx);
+ return EINVAL;
+ }
+ mask = 0x1f;
+ dr_offset = 0x20;
+
+ } else {
+ mask = 0x0f;
+ dr_offset = 0x10;
+ }
+
+ if (relreg & ~mask)
+ return EINVAL;
+
+ addr = relreg + (ctx << 0x5);
+
+ switch (regtype) {
+ case AEREG_SR_REL:
+ case AEREG_SR_RD_REL:
+ qat_ae_xfer_write_4(sc, ae, addr, value);
+ break;
+ case AEREG_DR_REL:
+ case AEREG_DR_RD_REL:
+ qat_ae_xfer_write_4(sc, ae, addr + dr_offset, value);
+ break;
+ default:
+ error = EINVAL;
+ }
+
+ return error;
+}
+
+static int
+qat_aereg_rel_wrxfer_write(struct qat_softc *sc, u_char ae, u_char ctx,
+ enum aereg_type regtype, u_short relreg, uint32_t value)
+{
+
+ panic("notyet");
+
+ return 0;
+}
+
+static int
+qat_aereg_rel_nn_write(struct qat_softc *sc, u_char ae, u_char ctx,
+ enum aereg_type regtype, u_short relreg, uint32_t value)
+{
+
+ panic("notyet");
+
+ return 0;
+}
+
+static int
+qat_aereg_abs_to_rel(struct qat_softc *sc, u_char ae,
+ u_short absreg, u_short *relreg, u_char *ctx)
+{
+ uint32_t ctxen;
+
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
+ if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
+ /* 4-ctx mode */
+ *relreg = absreg & 0x1f;
+ *ctx = (absreg >> 0x4) & 0x6;
+ } else {
+ /* 8-ctx mode */
+ *relreg = absreg & 0x0f;
+ *ctx = (absreg >> 0x4) & 0x7;
+ }
+
+ return 0;
+}
+
+static int
+qat_aereg_abs_data_write(struct qat_softc *sc, u_char ae,
+ enum aereg_type regtype, u_short absreg, uint32_t value)
+{
+ int error;
+ u_short relreg;
+ u_char ctx;
+
+ qat_aereg_abs_to_rel(sc, ae, absreg, &relreg, &ctx);
+
+ switch (regtype) {
+ case AEREG_GPA_ABS:
+ MPASS(absreg < MAX_GPR_REG);
+ error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL,
+ relreg, value);
+ break;
+ case AEREG_GPB_ABS:
+ MPASS(absreg < MAX_GPR_REG);
+ error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL,
+ relreg, value);
+ break;
+ case AEREG_DR_RD_ABS:
+ MPASS(absreg < MAX_XFER_REG);
+ error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_DR_RD_REL,
+ relreg, value);
+ break;
+ case AEREG_SR_RD_ABS:
+ MPASS(absreg < MAX_XFER_REG);
+ error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_SR_RD_REL,
+ relreg, value);
+ break;
+ case AEREG_DR_WR_ABS:
+ MPASS(absreg < MAX_XFER_REG);
+ error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_DR_WR_REL,
+ relreg, value);
+ break;
+ case AEREG_SR_WR_ABS:
+ MPASS(absreg < MAX_XFER_REG);
+ error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_SR_WR_REL,
+ relreg, value);
+ break;
+ case AEREG_NEIGH_ABS:
+ MPASS(absreg < MAX_NN_REG);
+ if (absreg >= MAX_NN_REG)
+ return EINVAL;
+ error = qat_aereg_rel_nn_write(sc, ae, ctx, AEREG_NEIGH_REL,
+ relreg, value);
+ break;
+ default:
+ panic("Invalid Register Type");
+ }
+
+ return error;
+}
+
+static void
+qat_ae_enable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask)
+{
+ uint32_t ctxen;
+
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
+ ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
+
+ if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
+ ctx_mask &= 0x55;
+ } else {
+ ctx_mask &= 0xff;
+ }
+
+ ctxen |= __SHIFTIN(ctx_mask, CTX_ENABLES_ENABLE);
+ qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
+}
+
+static void
+qat_ae_disable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask)
+{
+ uint32_t ctxen;
+
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
+ ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
+ ctxen &= ~(__SHIFTIN(ctx_mask & AE_ALL_CTX, CTX_ENABLES_ENABLE));
+ qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
+}
+
+static void
+qat_ae_write_ctx_mode(struct qat_softc *sc, u_char ae, u_char mode)
+{
+ uint32_t val, nval;
+
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
+ val &= CTX_ENABLES_IGNORE_W1C_MASK;
+
+ if (mode == 4)
+ nval = val | CTX_ENABLES_INUSE_CONTEXTS;
+ else
+ nval = val & ~CTX_ENABLES_INUSE_CONTEXTS;
+
+ if (val != nval)
+ qat_ae_write_4(sc, ae, CTX_ENABLES, nval);
+}
+
+static void
+qat_ae_write_nn_mode(struct qat_softc *sc, u_char ae, u_char mode)
+{
+ uint32_t val, nval;
+
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
+ val &= CTX_ENABLES_IGNORE_W1C_MASK;
+
+ if (mode)
+ nval = val | CTX_ENABLES_NN_MODE;
+ else
+ nval = val & ~CTX_ENABLES_NN_MODE;
+
+ if (val != nval)
+ qat_ae_write_4(sc, ae, CTX_ENABLES, nval);
+}
+
+static void
+qat_ae_write_lm_mode(struct qat_softc *sc, u_char ae,
+ enum aereg_type lm, u_char mode)
+{
+ uint32_t val, nval;
+ uint32_t bit;
+
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
+ val &= CTX_ENABLES_IGNORE_W1C_MASK;
+
+ switch (lm) {
+ case AEREG_LMEM0:
+ bit = CTX_ENABLES_LMADDR_0_GLOBAL;
+ break;
+ case AEREG_LMEM1:
+ bit = CTX_ENABLES_LMADDR_1_GLOBAL;
+ break;
+ default:
+ panic("invalid lmem reg type");
+ break;
+ }
+
+ if (mode)
+ nval = val | bit;
+ else
+ nval = val & ~bit;
+
+ if (val != nval)
+ qat_ae_write_4(sc, ae, CTX_ENABLES, nval);
+}
+
+static void
+qat_ae_write_shared_cs_mode0(struct qat_softc *sc, u_char ae, u_char mode)
+{
+ uint32_t val, nval;
+
+ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val);
+
+ if (mode == 1)
+ nval = val | AE_MISC_CONTROL_SHARE_CS;
+ else
+ nval = val & ~AE_MISC_CONTROL_SHARE_CS;
+
+ if (val != nval)
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nval);
+}
+
+static void
+qat_ae_write_shared_cs_mode(struct qat_softc *sc, u_char ae, u_char mode)
+{
+ u_char nae;
+
+ qat_ae_get_shared_ustore_ae(ae, &nae);
+
+ qat_ae_write_shared_cs_mode0(sc, ae, mode);
+
+ if ((sc->sc_ae_mask & (1 << nae))) {
+ qat_ae_write_shared_cs_mode0(sc, nae, mode);
+ }
+}
+
+static int
+qat_ae_set_reload_ustore(struct qat_softc *sc, u_char ae,
+ u_int reload_size, int shared_mode, u_int ustore_dram_addr)
+{
+ uint32_t val, cs_reload;
+
+ switch (reload_size) {
+ case 0:
+ cs_reload = 0x0;
+ break;
+ case QAT_2K:
+ cs_reload = 0x1;
+ break;
+ case QAT_4K:
+ cs_reload = 0x2;
+ break;
+ case QAT_8K:
+ cs_reload = 0x3;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ if (cs_reload)
+ QAT_AE(sc, ae).qae_ustore_dram_addr = ustore_dram_addr;
+
+ QAT_AE(sc, ae).qae_reload_size = reload_size;
+
+ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val);
+ val &= ~(AE_MISC_CONTROL_ONE_CTX_RELOAD |
+ AE_MISC_CONTROL_CS_RELOAD | AE_MISC_CONTROL_SHARE_CS);
+ val |= __SHIFTIN(cs_reload, AE_MISC_CONTROL_CS_RELOAD) |
+ __SHIFTIN(shared_mode, AE_MISC_CONTROL_ONE_CTX_RELOAD);
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val);
+
+ return 0;
+}
+
+static enum qat_ae_status
+qat_ae_get_status(struct qat_softc *sc, u_char ae)
+{
+ int error;
+ uint32_t val = 0;
+
+ error = qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
+ if (error || val & CTX_ENABLES_ENABLE)
+ return QAT_AE_ENABLED;
+
+ qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val);
+ if (val & ACTIVE_CTX_STATUS_ABO)
+ return QAT_AE_ACTIVE;
+
+ return QAT_AE_DISABLED;
+}
+
+
+static int
+qat_ae_is_active(struct qat_softc *sc, u_char ae)
+{
+ uint32_t val;
+
+ if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED)
+ return 1;
+
+ qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val);
+ if (val & ACTIVE_CTX_STATUS_ABO)
+ return 1;
+ else
+ return 0;
+}
+
+/* returns 1 if actually waited for specified number of cycles */
+static int
+qat_ae_wait_num_cycles(struct qat_softc *sc, u_char ae, int cycles, int check)
+{
+ uint32_t cnt, actx;
+ int pcnt, ccnt, elapsed, times;
+
+ qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt);
+ pcnt = cnt & 0xffff;
+
+ times = TIMEOUT_AE_CHECK;
+ do {
+ qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt);
+ ccnt = cnt & 0xffff;
+
+ elapsed = ccnt - pcnt;
+ if (elapsed == 0) {
+ times--;
+ }
+ if (times <= 0) {
+ device_printf(sc->sc_dev,
+ "qat_ae_wait_num_cycles timeout\n");
+ return -1;
+ }
+
+ if (elapsed < 0)
+ elapsed += 0x10000;
+
+ if (elapsed >= CYCLES_FROM_READY2EXE && check) {
+ if (qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS,
+ &actx) == 0) {
+ if ((actx & ACTIVE_CTX_STATUS_ABO) == 0)
+ return 0;
+ }
+ }
+ } while (cycles > elapsed);
+
+ if (check && qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &actx) == 0) {
+ if ((actx & ACTIVE_CTX_STATUS_ABO) == 0)
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+qat_ae_init(struct qat_softc *sc)
+{
+ int error;
+ uint32_t mask, val = 0;
+ u_char ae;
+
+ /* XXX adf_initSysMemInfo */
+
+ /* XXX Disable clock gating for some chip if debug mode */
+
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ struct qat_ae *qae = &sc->sc_ae[ae];
+ if (!(mask & 1))
+ continue;
+
+ qae->qae_ustore_size = USTORE_SIZE;
+
+ qae->qae_free_addr = 0;
+ qae->qae_free_size = USTORE_SIZE;
+ qae->qae_live_ctx_mask = AE_ALL_CTX;
+ qae->qae_ustore_dram_addr = 0;
+ qae->qae_reload_size = 0;
+ }
+
+ /* XXX Enable attention interrupt */
+
+ error = qat_ae_clear_reset(sc);
+ if (error)
+ return error;
+
+ qat_ae_clear_xfer(sc);
+
+ if (!sc->sc_hw.qhw_fw_auth) {
+ error = qat_ae_clear_gprs(sc);
+ if (error)
+ return error;
+ }
+
+ /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ qat_ae_read_4(sc, ae, SIGNATURE_ENABLE, &val);
+ val |= 0x1;
+ qat_ae_write_4(sc, ae, SIGNATURE_ENABLE, val);
+ }
+
+ error = qat_ae_clear_reset(sc);
+ if (error)
+ return error;
+
+ /* XXX XXX XXX Clean MMP memory if mem scrub is supported */
+ /* halMem_ScrubMMPMemory */
+
+ return 0;
+}
+
+int
+qat_ae_start(struct qat_softc *sc)
+{
+ int error;
+ u_char ae;
+
+ for (ae = 0; ae < sc->sc_ae_num; ae++) {
+ if ((sc->sc_ae_mask & (1 << ae)) == 0)
+ continue;
+
+ error = qat_aefw_start(sc, ae, 0xff);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+void
+qat_ae_cluster_intr(void *arg)
+{
+ /* Nothing to implement until we support SRIOV. */
+ printf("qat_ae_cluster_intr\n");
+}
+
+static int
+qat_ae_clear_reset(struct qat_softc *sc)
+{
+ int error;
+ uint32_t times, reset, clock, reg, mask;
+ u_char ae;
+
+ reset = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET);
+ reset &= ~(__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK));
+ reset &= ~(__SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK));
+ times = TIMEOUT_AE_RESET;
+ do {
+ qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_RESET, reset);
+ if ((times--) == 0) {
+ device_printf(sc->sc_dev, "couldn't reset AEs\n");
+ return EBUSY;
+ }
+ reg = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET);
+ } while ((__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK) |
+ __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK))
+ & reg);
+
+ /* Enable clock for AE and QAT */
+ clock = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_CLK_EN);
+ clock |= __SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_CLK_EN_AE_MASK);
+ clock |= __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_CLK_EN_ACCEL_MASK);
+ qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_CLK_EN, clock);
+
+ error = qat_ae_check(sc);
+ if (error)
+ return error;
+
+ /*
+ * Set undefined power-up/reset states to reasonable default values...
+ * just to make sure we're starting from a known point
+ */
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ /* init the ctx_enable */
+ qat_ae_write_4(sc, ae, CTX_ENABLES,
+ CTX_ENABLES_INIT);
+
+ /* initialize the PCs */
+ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
+ CTX_STS_INDIRECT,
+ UPC_MASK & CTX_STS_INDIRECT_UPC_INIT);
+
+ /* init the ctx_arb */
+ qat_ae_write_4(sc, ae, CTX_ARB_CNTL,
+ CTX_ARB_CNTL_INIT);
+
+ /* enable cc */
+ qat_ae_write_4(sc, ae, CC_ENABLE,
+ CC_ENABLE_INIT);
+ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
+ CTX_WAKEUP_EVENTS_INDIRECT,
+ CTX_WAKEUP_EVENTS_INDIRECT_INIT);
+ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
+ CTX_SIG_EVENTS_INDIRECT,
+ CTX_SIG_EVENTS_INDIRECT_INIT);
+ }
+
+ if ((sc->sc_ae_mask != 0) &&
+ sc->sc_flags & QAT_FLAG_ESRAM_ENABLE_AUTO_INIT) {
+ /* XXX XXX XXX init eSram only when this is boot time */
+ }
+
+ if ((sc->sc_ae_mask != 0) &&
+ sc->sc_flags & QAT_FLAG_SHRAM_WAIT_READY) {
+ /* XXX XXX XXX wait shram to complete initialization */
+ }
+
+ qat_ae_reset_timestamp(sc);
+
+ return 0;
+}
+
+static int
+qat_ae_check(struct qat_softc *sc)
+{
+ int error, times, ae;
+ uint32_t cnt, pcnt, mask;
+
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ times = TIMEOUT_AE_CHECK;
+ error = qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "couldn't access AE %d CSR\n", ae);
+ return error;
+ }
+ pcnt = cnt & 0xffff;
+
+ while (1) {
+ error = qat_ae_read_4(sc, ae,
+ PROFILE_COUNT, &cnt);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "couldn't access AE %d CSR\n", ae);
+ return error;
+ }
+ cnt &= 0xffff;
+ if (cnt == pcnt)
+ times--;
+ else
+ break;
+ if (times <= 0) {
+ device_printf(sc->sc_dev,
+ "AE %d CSR is useless\n", ae);
+ return EFAULT;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+qat_ae_reset_timestamp(struct qat_softc *sc)
+{
+ uint32_t misc, mask;
+ u_char ae;
+
+ /* stop the timestamp timers */
+ misc = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_MISC);
+ if (misc & CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN) {
+ qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC,
+ misc & (~CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN));
+ }
+
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ qat_ae_write_4(sc, ae, TIMESTAMP_LOW, 0);
+ qat_ae_write_4(sc, ae, TIMESTAMP_HIGH, 0);
+ }
+
+ /* start timestamp timers */
+ qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC,
+ misc | CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN);
+
+ return 0;
+}
+
+static void
+qat_ae_clear_xfer(struct qat_softc *sc)
+{
+ u_int mask, reg;
+ u_char ae;
+
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ for (reg = 0; reg < MAX_GPR_REG; reg++) {
+ qat_aereg_abs_data_write(sc, ae, AEREG_SR_RD_ABS,
+ reg, 0);
+ qat_aereg_abs_data_write(sc, ae, AEREG_DR_RD_ABS,
+ reg, 0);
+ }
+ }
+}
+
+static int
+qat_ae_clear_gprs(struct qat_softc *sc)
+{
+ uint32_t val;
+ uint32_t saved_ctx = 0;
+ int times = TIMEOUT_AE_CHECK, rv;
+ u_char ae;
+ u_int mask;
+
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ /* turn off share control store bit */
+ val = qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val);
+ val &= ~AE_MISC_CONTROL_SHARE_CS;
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val);
+
+ /* turn off ucode parity */
+ /* make sure nn_mode is set to self */
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
+ val &= CTX_ENABLES_IGNORE_W1C_MASK;
+ val |= CTX_ENABLES_NN_MODE;
+ val &= ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE;
+ qat_ae_write_4(sc, ae, CTX_ENABLES, val);
+
+ /* copy instructions to ustore */
+ qat_ae_ucode_write(sc, ae, 0, nitems(ae_clear_gprs_inst),
+ ae_clear_gprs_inst);
+
+ /* set PC */
+ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_STS_INDIRECT,
+ UPC_MASK & CTX_STS_INDIRECT_UPC_INIT);
+
+ /* save current context */
+ qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &saved_ctx);
+ /* change the active context */
+ /* start the context from ctx 0 */
+ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 0);
+
+ /* wakeup-event voluntary */
+ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
+ CTX_WAKEUP_EVENTS_INDIRECT,
+ CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY);
+ /* clean signals */
+ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
+ CTX_SIG_EVENTS_INDIRECT, 0);
+ qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+
+ qat_ae_enable_ctx(sc, ae, AE_ALL_CTX);
+ }
+
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ /* wait for AE to finish */
+ do {
+ rv = qat_ae_wait_num_cycles(sc, ae, AE_EXEC_CYCLE, 1);
+ } while (rv && times--);
+ if (times <= 0) {
+ device_printf(sc->sc_dev,
+ "qat_ae_clear_gprs timeout");
+ return ETIMEDOUT;
+ }
+ qat_ae_disable_ctx(sc, ae, AE_ALL_CTX);
+ /* change the active context */
+ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
+ saved_ctx & ACTIVE_CTX_STATUS_ACNO);
+ /* init the ctx_enable */
+ qat_ae_write_4(sc, ae, CTX_ENABLES, CTX_ENABLES_INIT);
+ /* initialize the PCs */
+ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
+ CTX_STS_INDIRECT, UPC_MASK & CTX_STS_INDIRECT_UPC_INIT);
+ /* init the ctx_arb */
+ qat_ae_write_4(sc, ae, CTX_ARB_CNTL, CTX_ARB_CNTL_INIT);
+ /* enable cc */
+ qat_ae_write_4(sc, ae, CC_ENABLE, CC_ENABLE_INIT);
+ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
+ CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_INIT);
+ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_SIG_EVENTS_INDIRECT,
+ CTX_SIG_EVENTS_INDIRECT_INIT);
+ }
+
+ return 0;
+}
+
+static void
+qat_ae_get_shared_ustore_ae(u_char ae, u_char *nae)
+{
+ if (ae & 0x1)
+ *nae = ae - 1;
+ else
+ *nae = ae + 1;
+}
+
+static u_int
+qat_ae_ucode_parity64(uint64_t ucode)
+{
+
+ ucode ^= ucode >> 1;
+ ucode ^= ucode >> 2;
+ ucode ^= ucode >> 4;
+ ucode ^= ucode >> 8;
+ ucode ^= ucode >> 16;
+ ucode ^= ucode >> 32;
+
+ return ((u_int)(ucode & 1));
+}
+
+static uint64_t
+qat_ae_ucode_set_ecc(uint64_t ucode)
+{
+ static const uint64_t
+ bit0mask=0xff800007fffULL, bit1mask=0x1f801ff801fULL,
+ bit2mask=0xe387e0781e1ULL, bit3mask=0x7cb8e388e22ULL,
+ bit4mask=0xaf5b2c93244ULL, bit5mask=0xf56d5525488ULL,
+ bit6mask=0xdaf69a46910ULL;
+
+ /* clear the ecc bits */
+ ucode &= ~(0x7fULL << USTORE_ECC_BIT_0);
+
+ ucode |= (uint64_t)qat_ae_ucode_parity64(bit0mask & ucode) <<
+ USTORE_ECC_BIT_0;
+ ucode |= (uint64_t)qat_ae_ucode_parity64(bit1mask & ucode) <<
+ USTORE_ECC_BIT_1;
+ ucode |= (uint64_t)qat_ae_ucode_parity64(bit2mask & ucode) <<
+ USTORE_ECC_BIT_2;
+ ucode |= (uint64_t)qat_ae_ucode_parity64(bit3mask & ucode) <<
+ USTORE_ECC_BIT_3;
+ ucode |= (uint64_t)qat_ae_ucode_parity64(bit4mask & ucode) <<
+ USTORE_ECC_BIT_4;
+ ucode |= (uint64_t)qat_ae_ucode_parity64(bit5mask & ucode) <<
+ USTORE_ECC_BIT_5;
+ ucode |= (uint64_t)qat_ae_ucode_parity64(bit6mask & ucode) <<
+ USTORE_ECC_BIT_6;
+
+ return (ucode);
+}
+
+static int
+qat_ae_ucode_write(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst,
+ const uint64_t *ucode)
+{
+ uint64_t tmp;
+ uint32_t ustore_addr, ulo, uhi;
+ int i;
+
+ qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr);
+ uaddr |= USTORE_ADDRESS_ECS;
+
+ qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
+ for (i = 0; i < ninst; i++) {
+ tmp = qat_ae_ucode_set_ecc(ucode[i]);
+ ulo = (uint32_t)(tmp & 0xffffffff);
+ uhi = (uint32_t)(tmp >> 32);
+
+ qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo);
+ /* this will auto increment the address */
+ qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi);
+ }
+ qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr);
+
+ return 0;
+}
+
+static int
+qat_ae_ucode_read(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst,
+ uint64_t *ucode)
+{
+ uint32_t misc, ustore_addr, ulo, uhi;
+ u_int ii;
+ u_char nae;
+
+ if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED)
+ return EBUSY;
+
+ /* determine whether it neighbour AE runs in shared control store
+ * status */
+ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
+ if (misc & AE_MISC_CONTROL_SHARE_CS) {
+ qat_ae_get_shared_ustore_ae(ae, &nae);
+ if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae))
+ return EBUSY;
+ }
+
+ /* if reloadable, then get it all from dram-ustore */
+ if (__SHIFTOUT(misc, AE_MISC_CONTROL_CS_RELOAD))
+ panic("notyet"); /* XXX getReloadUwords */
+
+ /* disable SHARE_CS bit to workaround silicon bug */
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc & 0xfffffffb);
+
+ MPASS(uaddr + ninst <= USTORE_SIZE);
+
+ /* save ustore-addr csr */
+ qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr);
+
+ uaddr |= USTORE_ADDRESS_ECS; /* enable ecs bit */
+ for (ii = 0; ii < ninst; ii++) {
+ qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
+
+ uaddr++;
+ qat_ae_read_4(sc, ae, USTORE_DATA_LOWER, &ulo);
+ qat_ae_read_4(sc, ae, USTORE_DATA_UPPER, &uhi);
+ ucode[ii] = uhi;
+ ucode[ii] = (ucode[ii] << 32) | ulo;
+ }
+
+ /* restore SHARE_CS bit to workaround silicon bug */
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
+ qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr);
+
+ return 0;
+}
+
+static u_int
+qat_ae_concat_ucode(uint64_t *ucode, u_int ninst, u_int size, u_int addr,
+ u_int *value)
+{
+ const uint64_t *inst_arr;
+ u_int ninst0, curvalue;
+ int ii, vali, fixup, usize = 0;
+
+ if (size == 0)
+ return 0;
+
+ ninst0 = ninst;
+ vali = 0;
+ curvalue = value[vali++];
+
+ switch (size) {
+ case 0x1:
+ inst_arr = ae_inst_1b;
+ usize = nitems(ae_inst_1b);
+ break;
+ case 0x2:
+ inst_arr = ae_inst_2b;
+ usize = nitems(ae_inst_2b);
+ break;
+ case 0x3:
+ inst_arr = ae_inst_3b;
+ usize = nitems(ae_inst_3b);
+ break;
+ default:
+ inst_arr = ae_inst_4b;
+ usize = nitems(ae_inst_4b);
+ break;
+ }
+
+ fixup = ninst;
+ for (ii = 0; ii < usize; ii++)
+ ucode[ninst++] = inst_arr[ii];
+
+ INSERT_IMMED_GPRA_CONST(ucode[fixup], (addr));
+ fixup++;
+ INSERT_IMMED_GPRA_CONST(ucode[fixup], 0);
+ fixup++;
+ INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0));
+ fixup++;
+ INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16));
+ /* XXX fixup++ ? */
+
+ if (size <= 0x4)
+ return (ninst - ninst0);
+
+ size -= sizeof(u_int);
+ while (size >= sizeof(u_int)) {
+ curvalue = value[vali++];
+ fixup = ninst;
+ ucode[ninst++] = ae_inst_4b[0x2];
+ ucode[ninst++] = ae_inst_4b[0x3];
+ ucode[ninst++] = ae_inst_4b[0x8];
+ INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16));
+ fixup++;
+ INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0));
+ /* XXX fixup++ ? */
+
+ addr += sizeof(u_int);
+ size -= sizeof(u_int);
+ }
+ /* call this function recusive when the left size less than 4 */
+ ninst +=
+ qat_ae_concat_ucode(ucode, ninst, size, addr, value + vali);
+
+ return (ninst - ninst0);
+}
+
+static int
+qat_ae_exec_ucode(struct qat_softc *sc, u_char ae, u_char ctx,
+ uint64_t *ucode, u_int ninst, int cond_code_off, u_int max_cycles,
+ u_int *endpc)
+{
+ int error = 0, share_cs = 0;
+ uint64_t savucode[MAX_EXEC_INST];
+ uint32_t indr_lm_addr_0, indr_lm_addr_1;
+ uint32_t indr_lm_addr_byte_0, indr_lm_addr_byte_1;
+ uint32_t indr_future_cnt_sig;
+ uint32_t indr_sig, active_sig;
+ uint32_t wakeup_ev, savpc, savcc, savctx, ctxarbctl;
+ uint32_t misc, nmisc, ctxen;
+ u_char nae;
+
+ MPASS(ninst <= USTORE_SIZE);
+
+ if (qat_ae_is_active(sc, ae))
+ return EBUSY;
+
+ /* save current LM addr */
+ qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_0_INDIRECT, &indr_lm_addr_0);
+ qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_1_INDIRECT, &indr_lm_addr_1);
+ qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
+ &indr_lm_addr_byte_0);
+ qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
+ &indr_lm_addr_byte_1);
+
+ /* backup shared control store bit, and force AE to
+ none-shared mode before executing ucode snippet */
+ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
+ if (misc & AE_MISC_CONTROL_SHARE_CS) {
+ share_cs = 1;
+ qat_ae_get_shared_ustore_ae(ae, &nae);
+ if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae))
+ return EBUSY;
+ }
+ nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS;
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc);
+
+ /* save current states: */
+ if (ninst <= MAX_EXEC_INST) {
+ error = qat_ae_ucode_read(sc, ae, 0, ninst, savucode);
+ if (error) {
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
+ return error;
+ }
+ }
+
+ /* save wakeup-events */
+ qat_ae_ctx_indr_read(sc, ae, ctx, CTX_WAKEUP_EVENTS_INDIRECT,
+ &wakeup_ev);
+ /* save PC */
+ qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, &savpc);
+ savpc &= UPC_MASK;
+
+ /* save ctx enables */
+ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
+ ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
+ /* save conditional-code */
+ qat_ae_read_4(sc, ae, CC_ENABLE, &savcc);
+ /* save current context */
+ qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx);
+ qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl);
+
+ /* save indirect csrs */
+ qat_ae_ctx_indr_read(sc, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
+ &indr_future_cnt_sig);
+ qat_ae_ctx_indr_read(sc, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &indr_sig);
+ qat_ae_read_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, &active_sig);
+
+ /* turn off ucode parity */
+ qat_ae_write_4(sc, ae, CTX_ENABLES,
+ ctxen & ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE);
+
+ /* copy instructions to ustore */
+ qat_ae_ucode_write(sc, ae, 0, ninst, ucode);
+ /* set PC */
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, 0);
+ /* change the active context */
+ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
+ ctx & ACTIVE_CTX_STATUS_ACNO);
+
+ if (cond_code_off) {
+ /* disable conditional-code*/
+ qat_ae_write_4(sc, ae, CC_ENABLE, savcc & 0xffffdfff);
+ }
+
+ /* wakeup-event voluntary */
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx,
+ CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY);
+
+ /* clean signals */
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT, 0);
+ qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+
+ /* enable context */
+ qat_ae_enable_ctx(sc, ae, 1 << ctx);
+
+ /* wait for it to finish */
+ if (qat_ae_wait_num_cycles(sc, ae, max_cycles, 1) != 0)
+ error = ETIMEDOUT;
+
+ /* see if we need to get the current PC */
+ if (endpc != NULL) {
+ uint32_t ctx_status;
+
+ qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT,
+ &ctx_status);
+ *endpc = ctx_status & UPC_MASK;
+ }
+#if 0
+ {
+ uint32_t ctx_status;
+
+ qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT,
+ &ctx_status);
+ printf("%s: endpc 0x%08x\n", __func__,
+ ctx_status & UPC_MASK);
+ }
+#endif
+
+ /* retore to previous states: */
+ /* disable context */
+ qat_ae_disable_ctx(sc, ae, 1 << ctx);
+ if (ninst <= MAX_EXEC_INST) {
+ /* instructions */
+ qat_ae_ucode_write(sc, ae, 0, ninst, savucode);
+ }
+ /* wakeup-events */
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_WAKEUP_EVENTS_INDIRECT,
+ wakeup_ev);
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, savpc);
+
+ /* only restore shared control store bit,
+ other bit might be changed by AE code snippet */
+ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
+ if (share_cs)
+ nmisc = misc | AE_MISC_CONTROL_SHARE_CS;
+ else
+ nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS;
+ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc);
+ /* conditional-code */
+ qat_ae_write_4(sc, ae, CC_ENABLE, savcc);
+ /* change the active context */
+ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
+ savctx & ACTIVE_CTX_STATUS_ACNO);
+ /* restore the nxt ctx to run */
+ qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl);
+ /* restore current LM addr */
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_0_INDIRECT,
+ indr_lm_addr_0);
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_1_INDIRECT,
+ indr_lm_addr_1);
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
+ indr_lm_addr_byte_0);
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
+ indr_lm_addr_byte_1);
+
+ /* restore indirect csrs */
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
+ indr_future_cnt_sig);
+ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT,
+ indr_sig);
+ qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, active_sig);
+
+ /* ctx-enables */
+ qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
+
+ return error;
+}
+
+static int
+qat_ae_exec_ucode_init_lm(struct qat_softc *sc, u_char ae, u_char ctx,
+ int *first_exec, uint64_t *ucode, u_int ninst,
+ u_int *gpr_a0, u_int *gpr_a1, u_int *gpr_a2, u_int *gpr_b0, u_int *gpr_b1)
+{
+
+ if (*first_exec) {
+ qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0);
+ qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1);
+ qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2);
+ qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0);
+ qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1);
+ *first_exec = 0;
+ }
+
+ return qat_ae_exec_ucode(sc, ae, ctx, ucode, ninst, 1, ninst * 5, NULL);
+}
+
+static int
+qat_ae_restore_init_lm_gprs(struct qat_softc *sc, u_char ae, u_char ctx,
+ u_int gpr_a0, u_int gpr_a1, u_int gpr_a2, u_int gpr_b0, u_int gpr_b1)
+{
+ qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0);
+ qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1);
+ qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2);
+ qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0);
+ qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1);
+
+ return 0;
+}
+
+static int
+qat_ae_get_inst_num(int lmsize)
+{
+ int ninst, left;
+
+ if (lmsize == 0)
+ return 0;
+
+ left = lmsize % sizeof(u_int);
+
+ if (left) {
+ ninst = nitems(ae_inst_1b) +
+ qat_ae_get_inst_num(lmsize - left);
+ } else {
+ /* 3 instruction is needed for further code */
+ ninst = (lmsize - sizeof(u_int)) * 3 / 4 + nitems(ae_inst_4b);
+ }
+
+ return (ninst);
+}
+
+static int
+qat_ae_batch_put_lm(struct qat_softc *sc, u_char ae,
+ struct qat_ae_batch_init_list *qabi_list, size_t nqabi)
+{
+ struct qat_ae_batch_init *qabi;
+ size_t alloc_ninst, ninst;
+ uint64_t *ucode;
+ u_int gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1;
+ int insnsz, error = 0, execed = 0, first_exec = 1;
+
+ if (STAILQ_FIRST(qabi_list) == NULL)
+ return 0;
+
+ alloc_ninst = min(USTORE_SIZE, nqabi);
+ ucode = qat_alloc_mem(sizeof(uint64_t) * alloc_ninst);
+
+ ninst = 0;
+ STAILQ_FOREACH(qabi, qabi_list, qabi_next) {
+ insnsz = qat_ae_get_inst_num(qabi->qabi_size);
+ if (insnsz + ninst > alloc_ninst) {
+ /* add ctx_arb[kill] */
+ ucode[ninst++] = 0x0E000010000ull;
+ execed = 1;
+
+ error = qat_ae_exec_ucode_init_lm(sc, ae, 0,
+ &first_exec, ucode, ninst,
+ &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1);
+ if (error) {
+ qat_ae_restore_init_lm_gprs(sc, ae, 0,
+ gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1);
+ qat_free_mem(ucode);
+ return error;
+ }
+ /* run microExec to execute the microcode */
+ ninst = 0;
+ }
+ ninst += qat_ae_concat_ucode(ucode, ninst,
+ qabi->qabi_size, qabi->qabi_addr, qabi->qabi_value);
+ }
+
+ if (ninst > 0) {
+ ucode[ninst++] = 0x0E000010000ull;
+ execed = 1;
+
+ error = qat_ae_exec_ucode_init_lm(sc, ae, 0,
+ &first_exec, ucode, ninst,
+ &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1);
+ }
+ if (execed) {
+ qat_ae_restore_init_lm_gprs(sc, ae, 0,
+ gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1);
+ }
+
+ qat_free_mem(ucode);
+
+ return error;
+}
+
+static int
+qat_ae_write_pc(struct qat_softc *sc, u_char ae, u_int ctx_mask, u_int upc)
+{
+
+ if (qat_ae_is_active(sc, ae))
+ return EBUSY;
+
+ qat_ae_ctx_indr_write(sc, ae, ctx_mask, CTX_STS_INDIRECT,
+ UPC_MASK & upc);
+ return 0;
+}
+
+static inline u_int
+qat_aefw_csum_calc(u_int reg, int ch)
+{
+ int i;
+ u_int topbit = CRC_BITMASK(CRC_WIDTH - 1);
+ u_int inbyte = (u_int)((reg >> 0x18) ^ ch);
+
+ reg ^= inbyte << (CRC_WIDTH - 0x8);
+ for (i = 0; i < 0x8; i++) {
+ if (reg & topbit)
+ reg = (reg << 1) ^ CRC_POLY;
+ else
+ reg <<= 1;
+ }
+
+ return (reg & CRC_WIDTHMASK(CRC_WIDTH));
+}
+
+static u_int
+qat_aefw_csum(char *buf, int size)
+{
+ u_int csum = 0;
+
+ while (size--) {
+ csum = qat_aefw_csum_calc(csum, *buf++);
+ }
+
+ return csum;
+}
+
+static const char *
+qat_aefw_uof_string(struct qat_softc *sc, size_t offset)
+{
+ if (offset >= sc->sc_aefw_uof.qafu_str_tab_size)
+ return NULL;
+ if (sc->sc_aefw_uof.qafu_str_tab == NULL)
+ return NULL;
+
+ return (const char *)((uintptr_t)sc->sc_aefw_uof.qafu_str_tab + offset);
+}
+
+static struct uof_chunk_hdr *
+qat_aefw_uof_find_chunk(struct qat_softc *sc,
+ const char *id, struct uof_chunk_hdr *cur)
+{
+ struct uof_obj_hdr *uoh = sc->sc_aefw_uof.qafu_obj_hdr;
+ struct uof_chunk_hdr *uch;
+ int i;
+
+ uch = (struct uof_chunk_hdr *)(uoh + 1);
+ for (i = 0; i < uoh->uoh_num_chunks; i++, uch++) {
+ if (uch->uch_offset + uch->uch_size > sc->sc_aefw_uof.qafu_size)
+ return NULL;
+
+ if (cur < uch && !strncmp(uch->uch_id, id, UOF_OBJ_ID_LEN))
+ return uch;
+ }
+
+ return NULL;
+}
+
+static int
+qat_aefw_load_mof(struct qat_softc *sc)
+{
+ const struct firmware *fw;
+
+ fw = firmware_get(sc->sc_hw.qhw_mof_fwname);
+ if (fw == NULL) {
+ device_printf(sc->sc_dev, "couldn't load MOF firmware %s\n",
+ sc->sc_hw.qhw_mof_fwname);
+ return ENXIO;
+ }
+
+ sc->sc_fw_mof = qat_alloc_mem(fw->datasize);
+ sc->sc_fw_mof_size = fw->datasize;
+ memcpy(sc->sc_fw_mof, fw->data, fw->datasize);
+ firmware_put(fw, FIRMWARE_UNLOAD);
+ return 0;
+}
+
+static void
+qat_aefw_unload_mof(struct qat_softc *sc)
+{
+ if (sc->sc_fw_mof != NULL) {
+ qat_free_mem(sc->sc_fw_mof);
+ sc->sc_fw_mof = NULL;
+ }
+}
+
+static int
+qat_aefw_load_mmp(struct qat_softc *sc)
+{
+ const struct firmware *fw;
+
+ fw = firmware_get(sc->sc_hw.qhw_mmp_fwname);
+ if (fw == NULL) {
+ device_printf(sc->sc_dev, "couldn't load MOF firmware %s\n",
+ sc->sc_hw.qhw_mmp_fwname);
+ return ENXIO;
+ }
+
+ sc->sc_fw_mmp = qat_alloc_mem(fw->datasize);
+ sc->sc_fw_mmp_size = fw->datasize;
+ memcpy(sc->sc_fw_mmp, fw->data, fw->datasize);
+ firmware_put(fw, FIRMWARE_UNLOAD);
+ return 0;
+}
+
+static void
+qat_aefw_unload_mmp(struct qat_softc *sc)
+{
+ if (sc->sc_fw_mmp != NULL) {
+ qat_free_mem(sc->sc_fw_mmp);
+ sc->sc_fw_mmp = NULL;
+ }
+}
+
+static int
+qat_aefw_mof_find_uof0(struct qat_softc *sc,
+ struct mof_uof_hdr *muh, struct mof_uof_chunk_hdr *head,
+ u_int nchunk, size_t size, const char *id,
+ size_t *fwsize, void **fwptr)
+{
+ int i;
+ char *uof_name;
+
+ for (i = 0; i < nchunk; i++) {
+ struct mof_uof_chunk_hdr *much = &head[i];
+
+ if (strncmp(much->much_id, id, MOF_OBJ_ID_LEN))
+ return EINVAL;
+
+ if (much->much_offset + much->much_size > size)
+ return EINVAL;
+
+ if (sc->sc_mof.qmf_sym_size <= much->much_name)
+ return EINVAL;
+
+ uof_name = (char *)((uintptr_t)sc->sc_mof.qmf_sym +
+ much->much_name);
+
+ if (!strcmp(uof_name, sc->sc_fw_uof_name)) {
+ *fwptr = (void *)((uintptr_t)muh +
+ (uintptr_t)much->much_offset);
+ *fwsize = (size_t)much->much_size;
+ return 0;
+ }
+ }
+
+ return ENOENT;
+}
+
+static int
+qat_aefw_mof_find_uof(struct qat_softc *sc)
+{
+ struct mof_uof_hdr *uof_hdr, *suof_hdr;
+ u_int nuof_chunks = 0, nsuof_chunks = 0;
+ int error;
+
+ uof_hdr = sc->sc_mof.qmf_uof_objs;
+ suof_hdr = sc->sc_mof.qmf_suof_objs;
+
+ if (uof_hdr != NULL) {
+ if (uof_hdr->muh_max_chunks < uof_hdr->muh_num_chunks) {
+ return EINVAL;
+ }
+ nuof_chunks = uof_hdr->muh_num_chunks;
+ }
+ if (suof_hdr != NULL) {
+ if (suof_hdr->muh_max_chunks < suof_hdr->muh_num_chunks)
+ return EINVAL;
+ nsuof_chunks = suof_hdr->muh_num_chunks;
+ }
+
+ if (nuof_chunks + nsuof_chunks == 0)
+ return EINVAL;
+
+ if (uof_hdr != NULL) {
+ error = qat_aefw_mof_find_uof0(sc, uof_hdr,
+ (struct mof_uof_chunk_hdr *)(uof_hdr + 1), nuof_chunks,
+ sc->sc_mof.qmf_uof_objs_size, UOF_IMAG,
+ &sc->sc_fw_uof_size, &sc->sc_fw_uof);
+ if (error && error != ENOENT)
+ return error;
+ }
+
+ if (suof_hdr != NULL) {
+ error = qat_aefw_mof_find_uof0(sc, suof_hdr,
+ (struct mof_uof_chunk_hdr *)(suof_hdr + 1), nsuof_chunks,
+ sc->sc_mof.qmf_suof_objs_size, SUOF_IMAG,
+ &sc->sc_fw_suof_size, &sc->sc_fw_suof);
+ if (error && error != ENOENT)
+ return error;
+ }
+
+ if (sc->sc_fw_uof == NULL && sc->sc_fw_suof == NULL)
+ return ENOENT;
+
+ return 0;
+}
+
+static int
+qat_aefw_mof_parse(struct qat_softc *sc)
+{
+ const struct mof_file_hdr *mfh;
+ const struct mof_file_chunk_hdr *mfch;
+ size_t size;
+ u_int csum;
+ int error, i;
+
+ size = sc->sc_fw_mof_size;
+
+ if (size < sizeof(struct mof_file_hdr))
+ return EINVAL;
+ size -= sizeof(struct mof_file_hdr);
+
+ mfh = sc->sc_fw_mof;
+
+ if (mfh->mfh_fid != MOF_FID)
+ return EINVAL;
+
+ csum = qat_aefw_csum((char *)((uintptr_t)sc->sc_fw_mof +
+ offsetof(struct mof_file_hdr, mfh_min_ver)),
+ sc->sc_fw_mof_size -
+ offsetof(struct mof_file_hdr, mfh_min_ver));
+ if (mfh->mfh_csum != csum)
+ return EINVAL;
+
+ if (mfh->mfh_min_ver != MOF_MIN_VER ||
+ mfh->mfh_maj_ver != MOF_MAJ_VER)
+ return EINVAL;
+
+ if (mfh->mfh_max_chunks < mfh->mfh_num_chunks)
+ return EINVAL;
+
+ if (size < sizeof(struct mof_file_chunk_hdr) * mfh->mfh_num_chunks)
+ return EINVAL;
+ mfch = (const struct mof_file_chunk_hdr *)(mfh + 1);
+
+ for (i = 0; i < mfh->mfh_num_chunks; i++, mfch++) {
+ if (mfch->mfch_offset + mfch->mfch_size > sc->sc_fw_mof_size)
+ return EINVAL;
+
+ if (!strncmp(mfch->mfch_id, SYM_OBJS, MOF_OBJ_ID_LEN)) {
+ if (sc->sc_mof.qmf_sym != NULL)
+ return EINVAL;
+
+ sc->sc_mof.qmf_sym =
+ (void *)((uintptr_t)sc->sc_fw_mof +
+ (uintptr_t)mfch->mfch_offset + sizeof(u_int));
+ sc->sc_mof.qmf_sym_size =
+ *(u_int *)((uintptr_t)sc->sc_fw_mof +
+ (uintptr_t)mfch->mfch_offset);
+
+ if (sc->sc_mof.qmf_sym_size % sizeof(u_int) != 0)
+ return EINVAL;
+ if (mfch->mfch_size != sc->sc_mof.qmf_sym_size +
+ sizeof(u_int) || mfch->mfch_size == 0)
+ return EINVAL;
+ if (*(char *)((uintptr_t)sc->sc_mof.qmf_sym +
+ sc->sc_mof.qmf_sym_size - 1) != '\0')
+ return EINVAL;
+
+ } else if (!strncmp(mfch->mfch_id, UOF_OBJS, MOF_OBJ_ID_LEN)) {
+ if (sc->sc_mof.qmf_uof_objs != NULL)
+ return EINVAL;
+
+ sc->sc_mof.qmf_uof_objs =
+ (void *)((uintptr_t)sc->sc_fw_mof +
+ (uintptr_t)mfch->mfch_offset);
+ sc->sc_mof.qmf_uof_objs_size = mfch->mfch_size;
+
+ } else if (!strncmp(mfch->mfch_id, SUOF_OBJS, MOF_OBJ_ID_LEN)) {
+ if (sc->sc_mof.qmf_suof_objs != NULL)
+ return EINVAL;
+
+ sc->sc_mof.qmf_suof_objs =
+ (void *)((uintptr_t)sc->sc_fw_mof +
+ (uintptr_t)mfch->mfch_offset);
+ sc->sc_mof.qmf_suof_objs_size = mfch->mfch_size;
+ }
+ }
+
+ if (sc->sc_mof.qmf_sym == NULL ||
+ (sc->sc_mof.qmf_uof_objs == NULL &&
+ sc->sc_mof.qmf_suof_objs == NULL))
+ return EINVAL;
+
+ error = qat_aefw_mof_find_uof(sc);
+ if (error)
+ return error;
+ return 0;
+}
+
+static int
+qat_aefw_uof_parse_image(struct qat_softc *sc,
+ struct qat_uof_image *qui, struct uof_chunk_hdr *uch)
+{
+ struct uof_image *image;
+ struct uof_code_page *page;
+ uintptr_t base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr;
+ size_t lim = uch->uch_offset + uch->uch_size, size;
+ int i, p;
+
+ size = uch->uch_size;
+ if (size < sizeof(struct uof_image))
+ return EINVAL;
+ size -= sizeof(struct uof_image);
+
+ qui->qui_image = image =
+ (struct uof_image *)(base + uch->uch_offset);
+
+#define ASSIGN_OBJ_TAB(np, typep, type, base, off, lim) \
+do { \
+ u_int nent; \
+ nent = ((struct uof_obj_table *)((base) + (off)))->uot_nentries;\
+ if ((lim) < off + sizeof(struct uof_obj_table) + \
+ sizeof(type) * nent) \
+ return EINVAL; \
+ *(np) = nent; \
+ if (nent > 0) \
+ *(typep) = (type)((struct uof_obj_table *) \
+ ((base) + (off)) + 1); \
+ else \
+ *(typep) = NULL; \
+} while (0)
+
+ ASSIGN_OBJ_TAB(&qui->qui_num_ae_reg, &qui->qui_ae_reg,
+ struct uof_ae_reg *, base, image->ui_reg_tab, lim);
+ ASSIGN_OBJ_TAB(&qui->qui_num_init_reg_sym, &qui->qui_init_reg_sym,
+ struct uof_init_reg_sym *, base, image->ui_init_reg_sym_tab, lim);
+ ASSIGN_OBJ_TAB(&qui->qui_num_sbreak, &qui->qui_sbreak,
+ struct qui_sbreak *, base, image->ui_sbreak_tab, lim);
+
+ if (size < sizeof(struct uof_code_page) * image->ui_num_pages)
+ return EINVAL;
+ if (nitems(qui->qui_pages) < image->ui_num_pages)
+ return EINVAL;
+
+ page = (struct uof_code_page *)(image + 1);
+
+ for (p = 0; p < image->ui_num_pages; p++, page++) {
+ struct qat_uof_page *qup = &qui->qui_pages[p];
+ struct uof_code_area *uca;
+
+ qup->qup_page_num = page->ucp_page_num;
+ qup->qup_def_page = page->ucp_def_page;
+ qup->qup_page_region = page->ucp_page_region;
+ qup->qup_beg_vaddr = page->ucp_beg_vaddr;
+ qup->qup_beg_paddr = page->ucp_beg_paddr;
+
+ ASSIGN_OBJ_TAB(&qup->qup_num_uc_var, &qup->qup_uc_var,
+ struct uof_uword_fixup *, base,
+ page->ucp_uc_var_tab, lim);
+ ASSIGN_OBJ_TAB(&qup->qup_num_imp_var, &qup->qup_imp_var,
+ struct uof_import_var *, base,
+ page->ucp_imp_var_tab, lim);
+ ASSIGN_OBJ_TAB(&qup->qup_num_imp_expr, &qup->qup_imp_expr,
+ struct uof_uword_fixup *, base,
+ page->ucp_imp_expr_tab, lim);
+ ASSIGN_OBJ_TAB(&qup->qup_num_neigh_reg, &qup->qup_neigh_reg,
+ struct uof_uword_fixup *, base,
+ page->ucp_neigh_reg_tab, lim);
+
+ if (lim < page->ucp_code_area + sizeof(struct uof_code_area))
+ return EINVAL;
+
+ uca = (struct uof_code_area *)(base + page->ucp_code_area);
+ qup->qup_num_micro_words = uca->uca_num_micro_words;
+
+ ASSIGN_OBJ_TAB(&qup->qup_num_uw_blocks, &qup->qup_uw_blocks,
+ struct qat_uof_uword_block *, base,
+ uca->uca_uword_block_tab, lim);
+
+ for (i = 0; i < qup->qup_num_uw_blocks; i++) {
+ u_int uwordoff = ((struct uof_uword_block *)(
+ &qup->qup_uw_blocks[i]))->uub_uword_offset;
+
+ if (lim < uwordoff)
+ return EINVAL;
+
+ qup->qup_uw_blocks[i].quub_micro_words =
+ (base + uwordoff);
+ }
+ }
+
+#undef ASSIGN_OBJ_TAB
+
+ return 0;
+}
+
+static int
+qat_aefw_uof_parse_images(struct qat_softc *sc)
+{
+ struct uof_chunk_hdr *uch = NULL;
+ u_int assigned_ae;
+ int i, error;
+
+ for (i = 0; i < MAX_NUM_AE * MAX_AE_CTX; i++) {
+ uch = qat_aefw_uof_find_chunk(sc, UOF_IMAG, uch);
+ if (uch == NULL)
+ break;
+
+ if (i >= nitems(sc->sc_aefw_uof.qafu_imgs))
+ return ENOENT;
+
+ error = qat_aefw_uof_parse_image(sc, &sc->sc_aefw_uof.qafu_imgs[i], uch);
+ if (error)
+ return error;
+
+ sc->sc_aefw_uof.qafu_num_imgs++;
+ }
+
+ assigned_ae = 0;
+ for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
+ assigned_ae |= sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned;
+ }
+
+ return 0;
+}
+
+static int
+qat_aefw_uof_parse(struct qat_softc *sc)
+{
+ struct uof_file_hdr *ufh;
+ struct uof_file_chunk_hdr *ufch;
+ struct uof_obj_hdr *uoh;
+ struct uof_chunk_hdr *uch;
+ void *uof = NULL;
+ size_t size, uof_size, hdr_size;
+ uintptr_t base;
+ u_int csum;
+ int i;
+
+ size = sc->sc_fw_uof_size;
+ if (size < MIN_UOF_SIZE)
+ return EINVAL;
+ size -= sizeof(struct uof_file_hdr);
+
+ ufh = sc->sc_fw_uof;
+
+ if (ufh->ufh_id != UOF_FID)
+ return EINVAL;
+ if (ufh->ufh_min_ver != UOF_MIN_VER || ufh->ufh_maj_ver != UOF_MAJ_VER)
+ return EINVAL;
+
+ if (ufh->ufh_max_chunks < ufh->ufh_num_chunks)
+ return EINVAL;
+ if (size < sizeof(struct uof_file_chunk_hdr) * ufh->ufh_num_chunks)
+ return EINVAL;
+ ufch = (struct uof_file_chunk_hdr *)(ufh + 1);
+
+ uof_size = 0;
+ for (i = 0; i < ufh->ufh_num_chunks; i++, ufch++) {
+ if (ufch->ufch_offset + ufch->ufch_size > sc->sc_fw_uof_size)
+ return EINVAL;
+
+ if (!strncmp(ufch->ufch_id, UOF_OBJS, UOF_OBJ_ID_LEN)) {
+ if (uof != NULL)
+ return EINVAL;
+
+ uof =
+ (void *)((uintptr_t)sc->sc_fw_uof +
+ ufch->ufch_offset);
+ uof_size = ufch->ufch_size;
+
+ csum = qat_aefw_csum(uof, uof_size);
+ if (csum != ufch->ufch_csum)
+ return EINVAL;
+ }
+ }
+
+ if (uof == NULL)
+ return ENOENT;
+
+ size = uof_size;
+ if (size < sizeof(struct uof_obj_hdr))
+ return EINVAL;
+ size -= sizeof(struct uof_obj_hdr);
+
+ uoh = uof;
+
+ if (size < sizeof(struct uof_chunk_hdr) * uoh->uoh_num_chunks)
+ return EINVAL;
+
+ /* Check if the UOF objects are compatible with the chip */
+ if ((uoh->uoh_cpu_type & sc->sc_hw.qhw_prod_type) == 0)
+ return ENOTSUP;
+
+ if (uoh->uoh_min_cpu_ver > sc->sc_rev ||
+ uoh->uoh_max_cpu_ver < sc->sc_rev)
+ return ENOTSUP;
+
+ sc->sc_aefw_uof.qafu_size = uof_size;
+ sc->sc_aefw_uof.qafu_obj_hdr = uoh;
+
+ base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr;
+
+ /* map uof string-table */
+ uch = qat_aefw_uof_find_chunk(sc, UOF_STRT, NULL);
+ if (uch != NULL) {
+ hdr_size = offsetof(struct uof_str_tab, ust_strings);
+ sc->sc_aefw_uof.qafu_str_tab =
+ (void *)(base + uch->uch_offset + hdr_size);
+ sc->sc_aefw_uof.qafu_str_tab_size = uch->uch_size - hdr_size;
+ }
+
+ /* get ustore mem inits table -- should be only one */
+ uch = qat_aefw_uof_find_chunk(sc, UOF_IMEM, NULL);
+ if (uch != NULL) {
+ if (uch->uch_size < sizeof(struct uof_obj_table))
+ return EINVAL;
+ sc->sc_aefw_uof.qafu_num_init_mem = ((struct uof_obj_table *)(base +
+ uch->uch_offset))->uot_nentries;
+ if (sc->sc_aefw_uof.qafu_num_init_mem) {
+ sc->sc_aefw_uof.qafu_init_mem =
+ (struct uof_init_mem *)(base + uch->uch_offset +
+ sizeof(struct uof_obj_table));
+ sc->sc_aefw_uof.qafu_init_mem_size =
+ uch->uch_size - sizeof(struct uof_obj_table);
+ }
+ }
+
+ uch = qat_aefw_uof_find_chunk(sc, UOF_MSEG, NULL);
+ if (uch != NULL) {
+ if (uch->uch_size < sizeof(struct uof_obj_table) +
+ sizeof(struct uof_var_mem_seg))
+ return EINVAL;
+ sc->sc_aefw_uof.qafu_var_mem_seg =
+ (struct uof_var_mem_seg *)(base + uch->uch_offset +
+ sizeof(struct uof_obj_table));
+ }
+
+ return qat_aefw_uof_parse_images(sc);
+}
+
+static int
+qat_aefw_suof_parse_image(struct qat_softc *sc, struct qat_suof_image *qsi,
+ struct suof_chunk_hdr *sch)
+{
+ struct qat_aefw_suof *qafs = &sc->sc_aefw_suof;
+ struct simg_ae_mode *ae_mode;
+ u_int maj_ver;
+
+ qsi->qsi_simg_buf = qafs->qafs_suof_buf + sch->sch_offset +
+ sizeof(struct suof_obj_hdr);
+ qsi->qsi_simg_len =
+ ((struct suof_obj_hdr *)
+ (qafs->qafs_suof_buf + sch->sch_offset))->soh_img_length;
+
+ qsi->qsi_css_header = qsi->qsi_simg_buf;
+ qsi->qsi_css_key = qsi->qsi_css_header + sizeof(struct css_hdr);
+ qsi->qsi_css_signature = qsi->qsi_css_key +
+ CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN;
+ qsi->qsi_css_simg = qsi->qsi_css_signature + CSS_SIGNATURE_LEN;
+
+ ae_mode = (struct simg_ae_mode *)qsi->qsi_css_simg;
+ qsi->qsi_ae_mask = ae_mode->sam_ae_mask;
+ qsi->qsi_simg_name = (u_long)&ae_mode->sam_simg_name;
+ qsi->qsi_appmeta_data = (u_long)&ae_mode->sam_appmeta_data;
+ qsi->qsi_fw_type = ae_mode->sam_fw_type;
+
+ if (ae_mode->sam_dev_type != sc->sc_hw.qhw_prod_type)
+ return EINVAL;
+
+ maj_ver = (QAT_PID_MAJOR_REV | (sc->sc_rev & QAT_PID_MINOR_REV)) & 0xff;
+ if ((maj_ver > ae_mode->sam_devmax_ver) ||
+ (maj_ver < ae_mode->sam_devmin_ver)) {
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qat_aefw_suof_parse(struct qat_softc *sc)
+{
+ struct suof_file_hdr *sfh;
+ struct suof_chunk_hdr *sch;
+ struct qat_aefw_suof *qafs = &sc->sc_aefw_suof;
+ struct qat_suof_image *qsi;
+ size_t size;
+ u_int csum;
+ int ae0_img = MAX_AE;
+ int i, error;
+
+ size = sc->sc_fw_suof_size;
+ if (size < sizeof(struct suof_file_hdr))
+ return EINVAL;
+
+ sfh = sc->sc_fw_suof;
+
+ if (sfh->sfh_file_id != SUOF_FID)
+ return EINVAL;
+ if (sfh->sfh_fw_type != 0)
+ return EINVAL;
+ if (sfh->sfh_num_chunks <= 1)
+ return EINVAL;
+ if (sfh->sfh_min_ver != SUOF_MIN_VER ||
+ sfh->sfh_maj_ver != SUOF_MAJ_VER)
+ return EINVAL;
+
+ csum = qat_aefw_csum((char *)&sfh->sfh_min_ver,
+ size - offsetof(struct suof_file_hdr, sfh_min_ver));
+ if (csum != sfh->sfh_check_sum)
+ return EINVAL;
+
+ size -= sizeof(struct suof_file_hdr);
+
+ qafs->qafs_file_id = SUOF_FID;
+ qafs->qafs_suof_buf = sc->sc_fw_suof;
+ qafs->qafs_suof_size = sc->sc_fw_suof_size;
+ qafs->qafs_check_sum = sfh->sfh_check_sum;
+ qafs->qafs_min_ver = sfh->sfh_min_ver;
+ qafs->qafs_maj_ver = sfh->sfh_maj_ver;
+ qafs->qafs_fw_type = sfh->sfh_fw_type;
+
+ if (size < sizeof(struct suof_chunk_hdr))
+ return EINVAL;
+ sch = (struct suof_chunk_hdr *)(sfh + 1);
+ size -= sizeof(struct suof_chunk_hdr);
+
+ if (size < sizeof(struct suof_str_tab))
+ return EINVAL;
+ size -= offsetof(struct suof_str_tab, sst_strings);
+
+ qafs->qafs_sym_size = ((struct suof_str_tab *)
+ (qafs->qafs_suof_buf + sch->sch_offset))->sst_tab_length;
+ if (size < qafs->qafs_sym_size)
+ return EINVAL;
+ qafs->qafs_sym_str = qafs->qafs_suof_buf + sch->sch_offset +
+ offsetof(struct suof_str_tab, sst_strings);
+
+ qafs->qafs_num_simgs = sfh->sfh_num_chunks - 1;
+ if (qafs->qafs_num_simgs == 0)
+ return EINVAL;
+
+ qsi = qat_alloc_mem(
+ sizeof(struct qat_suof_image) * qafs->qafs_num_simgs);
+ qafs->qafs_simg = qsi;
+
+ for (i = 0; i < qafs->qafs_num_simgs; i++) {
+ error = qat_aefw_suof_parse_image(sc, &qsi[i], &sch[i + 1]);
+ if (error)
+ return error;
+ if ((qsi[i].qsi_ae_mask & 0x1) != 0)
+ ae0_img = i;
+ }
+
+ if (ae0_img != qafs->qafs_num_simgs - 1) {
+ struct qat_suof_image last_qsi;
+
+ memcpy(&last_qsi, &qsi[qafs->qafs_num_simgs - 1],
+ sizeof(struct qat_suof_image));
+ memcpy(&qsi[qafs->qafs_num_simgs - 1], &qsi[ae0_img],
+ sizeof(struct qat_suof_image));
+ memcpy(&qsi[ae0_img], &last_qsi,
+ sizeof(struct qat_suof_image));
+ }
+
+ return 0;
+}
+
+static int
+qat_aefw_alloc_auth_dmamem(struct qat_softc *sc, char *image, size_t size,
+ struct qat_dmamem *dma)
+{
+ struct css_hdr *css = (struct css_hdr *)image;
+ struct auth_chunk *auth_chunk;
+ struct fw_auth_desc *auth_desc;
+ size_t mapsize, simg_offset = sizeof(struct auth_chunk);
+ bus_size_t bus_addr;
+ uintptr_t virt_addr;
+ int error;
+
+ if (size > AE_IMG_OFFSET + CSS_MAX_IMAGE_LEN)
+ return EINVAL;
+
+ mapsize = (css->css_fw_type == CSS_AE_FIRMWARE) ?
+ CSS_AE_SIMG_LEN + simg_offset :
+ size + CSS_FWSK_PAD_LEN + simg_offset;
+ error = qat_alloc_dmamem(sc, dma, 1, mapsize, PAGE_SIZE);
+ if (error)
+ return error;
+
+ memset(dma->qdm_dma_vaddr, 0, mapsize);
+
+ auth_chunk = dma->qdm_dma_vaddr;
+ auth_chunk->ac_chunk_size = mapsize;
+ auth_chunk->ac_chunk_bus_addr = dma->qdm_dma_seg.ds_addr;
+
+ virt_addr = (uintptr_t)dma->qdm_dma_vaddr;
+ virt_addr += simg_offset;
+ bus_addr = auth_chunk->ac_chunk_bus_addr;
+ bus_addr += simg_offset;
+
+ auth_desc = &auth_chunk->ac_fw_auth_desc;
+ auth_desc->fad_css_hdr_high = (uint64_t)bus_addr >> 32;
+ auth_desc->fad_css_hdr_low = bus_addr;
+
+ memcpy((void *)virt_addr, image, sizeof(struct css_hdr));
+ /* pub key */
+ virt_addr += sizeof(struct css_hdr);
+ bus_addr += sizeof(struct css_hdr);
+ image += sizeof(struct css_hdr);
+
+ auth_desc->fad_fwsk_pub_high = (uint64_t)bus_addr >> 32;
+ auth_desc->fad_fwsk_pub_low = bus_addr;
+
+ memcpy((void *)virt_addr, image, CSS_FWSK_MODULUS_LEN);
+ memset((void *)(virt_addr + CSS_FWSK_MODULUS_LEN), 0, CSS_FWSK_PAD_LEN);
+ memcpy((void *)(virt_addr + CSS_FWSK_MODULUS_LEN + CSS_FWSK_PAD_LEN),
+ image + CSS_FWSK_MODULUS_LEN, sizeof(uint32_t));
+
+ virt_addr += CSS_FWSK_PUB_LEN;
+ bus_addr += CSS_FWSK_PUB_LEN;
+ image += CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN;
+
+ auth_desc->fad_signature_high = (uint64_t)bus_addr >> 32;
+ auth_desc->fad_signature_low = bus_addr;
+
+ memcpy((void *)virt_addr, image, CSS_SIGNATURE_LEN);
+
+ virt_addr += CSS_SIGNATURE_LEN;
+ bus_addr += CSS_SIGNATURE_LEN;
+ image += CSS_SIGNATURE_LEN;
+
+ auth_desc->fad_img_high = (uint64_t)bus_addr >> 32;
+ auth_desc->fad_img_low = bus_addr;
+ auth_desc->fad_img_len = size - AE_IMG_OFFSET;
+
+ memcpy((void *)virt_addr, image, auth_desc->fad_img_len);
+
+ if (css->css_fw_type == CSS_AE_FIRMWARE) {
+ auth_desc->fad_img_ae_mode_data_high = auth_desc->fad_img_high;
+ auth_desc->fad_img_ae_mode_data_low = auth_desc->fad_img_low;
+
+ bus_addr += sizeof(struct simg_ae_mode);
+
+ auth_desc->fad_img_ae_init_data_high = (uint64_t)bus_addr >> 32;
+ auth_desc->fad_img_ae_init_data_low = bus_addr;
+
+ bus_addr += SIMG_AE_INIT_SEQ_LEN;
+
+ auth_desc->fad_img_ae_insts_high = (uint64_t)bus_addr >> 32;
+ auth_desc->fad_img_ae_insts_low = bus_addr;
+ } else {
+ auth_desc->fad_img_ae_insts_high = auth_desc->fad_img_high;
+ auth_desc->fad_img_ae_insts_low = auth_desc->fad_img_low;
+ }
+
+ bus_dmamap_sync(dma->qdm_dma_tag, dma->qdm_dma_map,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+
+ return 0;
+}
+
+static int
+qat_aefw_auth(struct qat_softc *sc, struct qat_dmamem *dma)
+{
+ bus_addr_t addr;
+ uint32_t fcu, sts;
+ int retry = 0;
+
+ addr = dma->qdm_dma_seg.ds_addr;
+ qat_cap_global_write_4(sc, FCU_DRAM_ADDR_HI, (uint64_t)addr >> 32);
+ qat_cap_global_write_4(sc, FCU_DRAM_ADDR_LO, addr);
+ qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_AUTH);
+
+ do {
+ DELAY(FW_AUTH_WAIT_PERIOD * 1000);
+ fcu = qat_cap_global_read_4(sc, FCU_STATUS);
+ sts = __SHIFTOUT(fcu, FCU_STATUS_STS);
+ if (sts == FCU_STATUS_STS_VERI_FAIL)
+ goto fail;
+ if (fcu & FCU_STATUS_AUTHFWLD &&
+ sts == FCU_STATUS_STS_VERI_DONE) {
+ return 0;
+ }
+ } while (retry++ < FW_AUTH_MAX_RETRY);
+
+fail:
+ device_printf(sc->sc_dev,
+ "firmware authentication error: status 0x%08x retry %d\n",
+ fcu, retry);
+ return EINVAL;
+}
+
+static int
+qat_aefw_suof_load(struct qat_softc *sc, struct qat_dmamem *dma)
+{
+ struct simg_ae_mode *ae_mode;
+ uint32_t fcu, sts, loaded;
+ u_int mask;
+ u_char ae;
+ int retry = 0;
+
+ ae_mode = (struct simg_ae_mode *)((uintptr_t)dma->qdm_dma_vaddr +
+ sizeof(struct auth_chunk) + sizeof(struct css_hdr) +
+ CSS_FWSK_PUB_LEN + CSS_SIGNATURE_LEN);
+
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ if (!((ae_mode->sam_ae_mask >> ae) & 0x1))
+ continue;
+ if (qat_ae_is_active(sc, ae)) {
+ device_printf(sc->sc_dev, "AE %d is active\n", ae);
+ return EINVAL;
+ }
+ qat_cap_global_write_4(sc, FCU_CTRL,
+ FCU_CTRL_CMD_LOAD | __SHIFTIN(ae, FCU_CTRL_AE));
+ do {
+ DELAY(FW_AUTH_WAIT_PERIOD * 1000);
+ fcu = qat_cap_global_read_4(sc, FCU_STATUS);
+ sts = __SHIFTOUT(fcu, FCU_STATUS_STS);
+ loaded = __SHIFTOUT(fcu, FCU_STATUS_LOADED_AE);
+ if (sts == FCU_STATUS_STS_LOAD_DONE &&
+ (loaded & (1 << ae))) {
+ break;
+ }
+ } while (retry++ < FW_AUTH_MAX_RETRY);
+
+ if (retry > FW_AUTH_MAX_RETRY) {
+ device_printf(sc->sc_dev,
+ "firmware load timeout: status %08x\n", fcu);
+ return EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qat_aefw_suof_write(struct qat_softc *sc)
+{
+ struct qat_suof_image *qsi = NULL;
+ int i, error = 0;
+
+ for (i = 0; i < sc->sc_aefw_suof.qafs_num_simgs; i++) {
+ qsi = &sc->sc_aefw_suof.qafs_simg[i];
+ error = qat_aefw_alloc_auth_dmamem(sc, qsi->qsi_simg_buf,
+ qsi->qsi_simg_len, &qsi->qsi_dma);
+ if (error)
+ return error;
+ error = qat_aefw_auth(sc, &qsi->qsi_dma);
+ if (error)
+ goto fail;
+ error = qat_aefw_suof_load(sc, &qsi->qsi_dma);
+ if (error)
+ goto fail;
+
+ qat_free_dmamem(sc, &qsi->qsi_dma);
+ }
+ qat_free_mem(sc->sc_aefw_suof.qafs_simg);
+
+ return 0;
+fail:
+ if (qsi != NULL)
+ qat_free_dmamem(sc, &qsi->qsi_dma);
+ return error;
+}
+
+static int
+qat_aefw_uof_assign_image(struct qat_softc *sc, struct qat_ae *qae,
+ struct qat_uof_image *qui)
+{
+ struct qat_ae_slice *slice;
+ int i, npages, nregions;
+
+ if (qae->qae_num_slices >= nitems(qae->qae_slices))
+ return ENOENT;
+
+ if (qui->qui_image->ui_ae_mode &
+ (AE_MODE_RELOAD_CTX_SHARED | AE_MODE_SHARED_USTORE)) {
+ /* XXX */
+ device_printf(sc->sc_dev,
+ "shared ae mode is not supported yet\n");
+ return ENOTSUP;
+ }
+
+ qae->qae_shareable_ustore = 0; /* XXX */
+ qae->qae_effect_ustore_size = USTORE_SIZE;
+
+ slice = &qae->qae_slices[qae->qae_num_slices];
+
+ slice->qas_image = qui;
+ slice->qas_assigned_ctx_mask = qui->qui_image->ui_ctx_assigned;
+
+ nregions = qui->qui_image->ui_num_page_regions;
+ npages = qui->qui_image->ui_num_pages;
+
+ if (nregions > nitems(slice->qas_regions))
+ return ENOENT;
+ if (npages > nitems(slice->qas_pages))
+ return ENOENT;
+
+ for (i = 0; i < nregions; i++) {
+ STAILQ_INIT(&slice->qas_regions[i].qar_waiting_pages);
+ }
+ for (i = 0; i < npages; i++) {
+ struct qat_ae_page *page = &slice->qas_pages[i];
+ int region;
+
+ page->qap_page = &qui->qui_pages[i];
+ region = page->qap_page->qup_page_region;
+ if (region >= nregions)
+ return EINVAL;
+
+ page->qap_region = &slice->qas_regions[region];
+ }
+
+ qae->qae_num_slices++;
+
+ return 0;
+}
+
+static int
+qat_aefw_uof_init_ae(struct qat_softc *sc, u_char ae)
+{
+ struct uof_image *image;
+ struct qat_ae *qae = &(QAT_AE(sc, ae));
+ int s;
+ u_char nn_mode;
+
+ for (s = 0; s < qae->qae_num_slices; s++) {
+ if (qae->qae_slices[s].qas_image == NULL)
+ continue;
+
+ image = qae->qae_slices[s].qas_image->qui_image;
+ qat_ae_write_ctx_mode(sc, ae,
+ __SHIFTOUT(image->ui_ae_mode, AE_MODE_CTX_MODE));
+
+ nn_mode = __SHIFTOUT(image->ui_ae_mode, AE_MODE_NN_MODE);
+ if (nn_mode != AE_MODE_NN_MODE_DONTCARE)
+ qat_ae_write_nn_mode(sc, ae, nn_mode);
+
+ qat_ae_write_lm_mode(sc, ae, AEREG_LMEM0,
+ __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM0));
+ qat_ae_write_lm_mode(sc, ae, AEREG_LMEM1,
+ __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM1));
+
+ qat_ae_write_shared_cs_mode(sc, ae,
+ __SHIFTOUT(image->ui_ae_mode, AE_MODE_SHARED_USTORE));
+ qat_ae_set_reload_ustore(sc, ae, image->ui_reloadable_size,
+ __SHIFTOUT(image->ui_ae_mode, AE_MODE_RELOAD_CTX_SHARED),
+ qae->qae_reloc_ustore_dram);
+ }
+
+ return 0;
+}
+
+static int
+qat_aefw_uof_init(struct qat_softc *sc)
+{
+ int ae, i, error;
+ uint32_t mask;
+
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ struct qat_ae *qae;
+
+ if (!(mask & 1))
+ continue;
+
+ qae = &(QAT_AE(sc, ae));
+
+ for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
+ if ((sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned &
+ (1 << ae)) == 0)
+ continue;
+
+ error = qat_aefw_uof_assign_image(sc, qae,
+ &sc->sc_aefw_uof.qafu_imgs[i]);
+ if (error)
+ return error;
+ }
+
+ /* XXX UcLo_initNumUwordUsed */
+
+ qae->qae_reloc_ustore_dram = UINT_MAX; /* XXX */
+
+ error = qat_aefw_uof_init_ae(sc, ae);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+int
+qat_aefw_load(struct qat_softc *sc)
+{
+ int error;
+
+ error = qat_aefw_load_mof(sc);
+ if (error)
+ return error;
+
+ error = qat_aefw_load_mmp(sc);
+ if (error)
+ return error;
+
+ error = qat_aefw_mof_parse(sc);
+ if (error) {
+ device_printf(sc->sc_dev, "couldn't parse mof: %d\n", error);
+ return error;
+ }
+
+ if (sc->sc_hw.qhw_fw_auth) {
+ error = qat_aefw_suof_parse(sc);
+ if (error) {
+ device_printf(sc->sc_dev, "couldn't parse suof: %d\n",
+ error);
+ return error;
+ }
+
+ error = qat_aefw_suof_write(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "could not write firmware: %d\n", error);
+ return error;
+ }
+
+ } else {
+ error = qat_aefw_uof_parse(sc);
+ if (error) {
+ device_printf(sc->sc_dev, "couldn't parse uof: %d\n",
+ error);
+ return error;
+ }
+
+ error = qat_aefw_uof_init(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "couldn't init for aefw: %d\n", error);
+ return error;
+ }
+
+ error = qat_aefw_uof_write(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not write firmware: %d\n", error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+void
+qat_aefw_unload(struct qat_softc *sc)
+{
+ qat_aefw_unload_mmp(sc);
+ qat_aefw_unload_mof(sc);
+}
+
+int
+qat_aefw_start(struct qat_softc *sc, u_char ae, u_int ctx_mask)
+{
+ uint32_t fcu;
+ int retry = 0;
+
+ if (sc->sc_hw.qhw_fw_auth) {
+ qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_START);
+ do {
+ DELAY(FW_AUTH_WAIT_PERIOD * 1000);
+ fcu = qat_cap_global_read_4(sc, FCU_STATUS);
+ if (fcu & FCU_STATUS_DONE)
+ return 0;
+ } while (retry++ < FW_AUTH_MAX_RETRY);
+
+ device_printf(sc->sc_dev,
+ "firmware start timeout: status %08x\n", fcu);
+ return EINVAL;
+ } else {
+ qat_ae_ctx_indr_write(sc, ae, (~ctx_mask) & AE_ALL_CTX,
+ CTX_WAKEUP_EVENTS_INDIRECT,
+ CTX_WAKEUP_EVENTS_INDIRECT_SLEEP);
+ qat_ae_enable_ctx(sc, ae, ctx_mask);
+ }
+
+ return 0;
+}
+
+static int
+qat_aefw_init_memory_one(struct qat_softc *sc, struct uof_init_mem *uim)
+{
+ struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
+ struct qat_ae_batch_init_list *qabi_list;
+ struct uof_mem_val_attr *memattr;
+ size_t *curinit;
+ u_long ael;
+ int i;
+ const char *sym;
+ char *ep;
+
+ memattr = (struct uof_mem_val_attr *)(uim + 1);
+
+ switch (uim->uim_region) {
+ case LMEM_REGION:
+ if ((uim->uim_addr + uim->uim_num_bytes) > MAX_LMEM_REG * 4) {
+ device_printf(sc->sc_dev,
+ "Invalid lmem addr or bytes\n");
+ return ENOBUFS;
+ }
+ if (uim->uim_scope != UOF_SCOPE_LOCAL)
+ return EINVAL;
+ sym = qat_aefw_uof_string(sc, uim->uim_sym_name);
+ ael = strtoul(sym, &ep, 10);
+ if (ep == sym || ael > MAX_AE)
+ return EINVAL;
+ if ((sc->sc_ae_mask & (1 << ael)) == 0)
+ return 0; /* ae is fused out */
+
+ curinit = &qafu->qafu_num_lm_init[ael];
+ qabi_list = &qafu->qafu_lm_init[ael];
+
+ for (i = 0; i < uim->uim_num_val_attr; i++, memattr++) {
+ struct qat_ae_batch_init *qabi;
+
+ qabi = qat_alloc_mem(sizeof(struct qat_ae_batch_init));
+ if (*curinit == 0)
+ STAILQ_INIT(qabi_list);
+ STAILQ_INSERT_TAIL(qabi_list, qabi, qabi_next);
+
+ qabi->qabi_ae = (u_int)ael;
+ qabi->qabi_addr =
+ uim->uim_addr + memattr->umva_byte_offset;
+ qabi->qabi_value = &memattr->umva_value;
+ qabi->qabi_size = 4;
+ qafu->qafu_num_lm_init_inst[ael] +=
+ qat_ae_get_inst_num(qabi->qabi_size);
+ (*curinit)++;
+ if (*curinit >= MAX_LMEM_REG) {
+ device_printf(sc->sc_dev,
+ "Invalid lmem val attr\n");
+ return ENOBUFS;
+ }
+ }
+ break;
+ case SRAM_REGION:
+ case DRAM_REGION:
+ case DRAM1_REGION:
+ case SCRATCH_REGION:
+ case UMEM_REGION:
+ /* XXX */
+ /* fallthrough */
+ default:
+ device_printf(sc->sc_dev,
+ "unsupported memory region to init: %d\n",
+ uim->uim_region);
+ return ENOTSUP;
+ }
+
+ return 0;
+}
+
+static void
+qat_aefw_free_lm_init(struct qat_softc *sc, u_char ae)
+{
+ struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
+ struct qat_ae_batch_init *qabi;
+
+ while ((qabi = STAILQ_FIRST(&qafu->qafu_lm_init[ae])) != NULL) {
+ STAILQ_REMOVE_HEAD(&qafu->qafu_lm_init[ae], qabi_next);
+ qat_free_mem(qabi);
+ }
+
+ qafu->qafu_num_lm_init[ae] = 0;
+ qafu->qafu_num_lm_init_inst[ae] = 0;
+}
+
+static int
+qat_aefw_init_ustore(struct qat_softc *sc)
+{
+ uint64_t *fill;
+ uint32_t dont_init;
+ int a, i, p;
+ int error = 0;
+ int usz, end, start;
+ u_char ae, nae;
+
+ fill = qat_alloc_mem(MAX_USTORE * sizeof(uint64_t));
+
+ for (a = 0; a < sc->sc_aefw_uof.qafu_num_imgs; a++) {
+ struct qat_uof_image *qui = &sc->sc_aefw_uof.qafu_imgs[a];
+ struct uof_image *ui = qui->qui_image;
+
+ for (i = 0; i < MAX_USTORE; i++)
+ memcpy(&fill[i], ui->ui_fill_pattern, sizeof(uint64_t));
+ /*
+ * Compute do_not_init value as a value that will not be equal
+ * to fill data when cast to an int
+ */
+ dont_init = 0;
+ if (dont_init == (uint32_t)fill[0])
+ dont_init = 0xffffffff;
+
+ for (p = 0; p < ui->ui_num_pages; p++) {
+ struct qat_uof_page *qup = &qui->qui_pages[p];
+ if (!qup->qup_def_page)
+ continue;
+
+ for (i = qup->qup_beg_paddr;
+ i < qup->qup_beg_paddr + qup->qup_num_micro_words;
+ i++ ) {
+ fill[i] = (uint64_t)dont_init;
+ }
+ }
+
+ for (ae = 0; ae < sc->sc_ae_num; ae++) {
+ MPASS(ae < UOF_MAX_NUM_OF_AE);
+ if ((ui->ui_ae_assigned & (1 << ae)) == 0)
+ continue;
+
+ if (QAT_AE(sc, ae).qae_shareable_ustore && (ae & 1)) {
+ qat_ae_get_shared_ustore_ae(ae, &nae);
+ if (ui->ui_ae_assigned & (1 << ae))
+ continue;
+ }
+ usz = QAT_AE(sc, ae).qae_effect_ustore_size;
+
+ /* initialize the areas not going to be overwritten */
+ end = -1;
+ do {
+ /* find next uword that needs to be initialized */
+ for (start = end + 1; start < usz; start++) {
+ if ((uint32_t)fill[start] != dont_init)
+ break;
+ }
+ /* see if there are no more such uwords */
+ if (start >= usz)
+ break;
+ for (end = start + 1; end < usz; end++) {
+ if ((uint32_t)fill[end] == dont_init)
+ break;
+ }
+ if (QAT_AE(sc, ae).qae_shareable_ustore) {
+ error = ENOTSUP; /* XXX */
+ goto out;
+ } else {
+ error = qat_ae_ucode_write(sc, ae,
+ start, end - start, &fill[start]);
+ if (error) {
+ goto out;
+ }
+ }
+
+ } while (end < usz);
+ }
+ }
+
+out:
+ qat_free_mem(fill);
+ return error;
+}
+
+static int
+qat_aefw_init_reg(struct qat_softc *sc, u_char ae, u_char ctx_mask,
+ enum aereg_type regtype, u_short regaddr, u_int value)
+{
+ int error = 0;
+ u_char ctx;
+
+ switch (regtype) {
+ case AEREG_GPA_REL:
+ case AEREG_GPB_REL:
+ case AEREG_SR_REL:
+ case AEREG_SR_RD_REL:
+ case AEREG_SR_WR_REL:
+ case AEREG_DR_REL:
+ case AEREG_DR_RD_REL:
+ case AEREG_DR_WR_REL:
+ case AEREG_NEIGH_REL:
+ /* init for all valid ctx */
+ for (ctx = 0; ctx < MAX_AE_CTX; ctx++) {
+ if ((ctx_mask & (1 << ctx)) == 0)
+ continue;
+ error = qat_aereg_rel_data_write(sc, ae, ctx, regtype,
+ regaddr, value);
+ }
+ break;
+ case AEREG_GPA_ABS:
+ case AEREG_GPB_ABS:
+ case AEREG_SR_ABS:
+ case AEREG_SR_RD_ABS:
+ case AEREG_SR_WR_ABS:
+ case AEREG_DR_ABS:
+ case AEREG_DR_RD_ABS:
+ case AEREG_DR_WR_ABS:
+ error = qat_aereg_abs_data_write(sc, ae, regtype,
+ regaddr, value);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return error;
+}
+
+static int
+qat_aefw_init_reg_sym_expr(struct qat_softc *sc, u_char ae,
+ struct qat_uof_image *qui)
+{
+ u_int i, expres;
+ u_char ctx_mask;
+
+ for (i = 0; i < qui->qui_num_init_reg_sym; i++) {
+ struct uof_init_reg_sym *uirs = &qui->qui_init_reg_sym[i];
+
+ if (uirs->uirs_value_type == EXPR_VAL) {
+ /* XXX */
+ device_printf(sc->sc_dev,
+ "does not support initializing EXPR_VAL\n");
+ return ENOTSUP;
+ } else {
+ expres = uirs->uirs_value;
+ }
+
+ switch (uirs->uirs_init_type) {
+ case INIT_REG:
+ if (__SHIFTOUT(qui->qui_image->ui_ae_mode,
+ AE_MODE_CTX_MODE) == MAX_AE_CTX) {
+ ctx_mask = 0xff; /* 8-ctx mode */
+ } else {
+ ctx_mask = 0x55; /* 4-ctx mode */
+ }
+ qat_aefw_init_reg(sc, ae, ctx_mask,
+ (enum aereg_type)uirs->uirs_reg_type,
+ (u_short)uirs->uirs_addr_offset, expres);
+ break;
+ case INIT_REG_CTX:
+ if (__SHIFTOUT(qui->qui_image->ui_ae_mode,
+ AE_MODE_CTX_MODE) == MAX_AE_CTX) {
+ ctx_mask = 0xff; /* 8-ctx mode */
+ } else {
+ ctx_mask = 0x55; /* 4-ctx mode */
+ }
+ if (((1 << uirs->uirs_ctx) & ctx_mask) == 0)
+ return EINVAL;
+ qat_aefw_init_reg(sc, ae, 1 << uirs->uirs_ctx,
+ (enum aereg_type)uirs->uirs_reg_type,
+ (u_short)uirs->uirs_addr_offset, expres);
+ break;
+ case INIT_EXPR:
+ case INIT_EXPR_ENDIAN_SWAP:
+ default:
+ device_printf(sc->sc_dev,
+ "does not support initializing init_type %d\n",
+ uirs->uirs_init_type);
+ return ENOTSUP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qat_aefw_init_memory(struct qat_softc *sc)
+{
+ struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
+ size_t uimsz, initmemsz = qafu->qafu_init_mem_size;
+ struct uof_init_mem *uim;
+ int error, i;
+ u_char ae;
+
+ uim = qafu->qafu_init_mem;
+ for (i = 0; i < qafu->qafu_num_init_mem; i++) {
+ uimsz = sizeof(struct uof_init_mem) +
+ sizeof(struct uof_mem_val_attr) * uim->uim_num_val_attr;
+ if (uimsz > initmemsz) {
+ device_printf(sc->sc_dev,
+ "invalid uof_init_mem or uof_mem_val_attr size\n");
+ return EINVAL;
+ }
+
+ if (uim->uim_num_bytes > 0) {
+ error = qat_aefw_init_memory_one(sc, uim);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not init ae memory: %d\n", error);
+ return error;
+ }
+ }
+ uim = (struct uof_init_mem *)((uintptr_t)uim + uimsz);
+ initmemsz -= uimsz;
+ }
+
+ /* run Batch put LM API */
+ for (ae = 0; ae < MAX_AE; ae++) {
+ error = qat_ae_batch_put_lm(sc, ae, &qafu->qafu_lm_init[ae],
+ qafu->qafu_num_lm_init_inst[ae]);
+ if (error)
+ device_printf(sc->sc_dev, "Could not put lm\n");
+
+ qat_aefw_free_lm_init(sc, ae);
+ }
+
+ error = qat_aefw_init_ustore(sc);
+
+ /* XXX run Batch put LM API */
+
+ return error;
+}
+
+static int
+qat_aefw_init_globals(struct qat_softc *sc)
+{
+ struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
+ int error, i, p, s;
+ u_char ae;
+
+ /* initialize the memory segments */
+ if (qafu->qafu_num_init_mem > 0) {
+ error = qat_aefw_init_memory(sc);
+ if (error)
+ return error;
+ } else {
+ error = qat_aefw_init_ustore(sc);
+ if (error)
+ return error;
+ }
+
+ /* XXX bind import variables with ivd values */
+
+ /* XXX bind the uC global variables
+ * local variables will done on-the-fly */
+ for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
+ for (p = 0; p < sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_num_pages; p++) {
+ struct qat_uof_page *qup =
+ &sc->sc_aefw_uof.qafu_imgs[i].qui_pages[p];
+ if (qup->qup_num_uw_blocks &&
+ (qup->qup_num_uc_var || qup->qup_num_imp_var)) {
+ device_printf(sc->sc_dev,
+ "not support uC global variables\n");
+ return ENOTSUP;
+ }
+ }
+ }
+
+ for (ae = 0; ae < sc->sc_ae_num; ae++) {
+ struct qat_ae *qae = &(QAT_AE(sc, ae));
+
+ for (s = 0; s < qae->qae_num_slices; s++) {
+ struct qat_ae_slice *qas = &qae->qae_slices[s];
+
+ if (qas->qas_image == NULL)
+ continue;
+
+ error =
+ qat_aefw_init_reg_sym_expr(sc, ae, qas->qas_image);
+ if (error)
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static uint64_t
+qat_aefw_get_uof_inst(struct qat_softc *sc, struct qat_uof_page *qup,
+ u_int addr)
+{
+ uint64_t uinst = 0;
+ u_int i;
+
+ /* find the block */
+ for (i = 0; i < qup->qup_num_uw_blocks; i++) {
+ struct qat_uof_uword_block *quub = &qup->qup_uw_blocks[i];
+
+ if ((addr >= quub->quub_start_addr) &&
+ (addr <= (quub->quub_start_addr +
+ (quub->quub_num_words - 1)))) {
+ /* unpack n bytes and assigned to the 64-bit uword value.
+ note: the microwords are stored as packed bytes.
+ */
+ addr -= quub->quub_start_addr;
+ addr *= AEV2_PACKED_UWORD_BYTES;
+ memcpy(&uinst,
+ (void *)((uintptr_t)quub->quub_micro_words + addr),
+ AEV2_PACKED_UWORD_BYTES);
+ uinst = uinst & UWORD_MASK;
+
+ return uinst;
+ }
+ }
+
+ return INVLD_UWORD;
+}
+
+static int
+qat_aefw_do_pagein(struct qat_softc *sc, u_char ae, struct qat_uof_page *qup)
+{
+ struct qat_ae *qae = &(QAT_AE(sc, ae));
+ uint64_t fill, *ucode_cpybuf;
+ u_int error, i, upaddr, uraddr, ninst, cpylen;
+
+ if (qup->qup_num_uc_var || qup->qup_num_neigh_reg ||
+ qup->qup_num_imp_var || qup->qup_num_imp_expr) {
+ device_printf(sc->sc_dev,
+ "does not support fixup locals\n");
+ return ENOTSUP;
+ }
+
+ ucode_cpybuf = qat_alloc_mem(UWORD_CPYBUF_SIZE * sizeof(uint64_t));
+
+ /* XXX get fill-pattern from an image -- they are all the same */
+ memcpy(&fill, sc->sc_aefw_uof.qafu_imgs[0].qui_image->ui_fill_pattern,
+ sizeof(uint64_t));
+
+ upaddr = qup->qup_beg_paddr;
+ uraddr = 0;
+ ninst = qup->qup_num_micro_words;
+ while (ninst > 0) {
+ cpylen = min(ninst, UWORD_CPYBUF_SIZE);
+
+ /* load the buffer */
+ for (i = 0; i < cpylen; i++) {
+ /* keep below code structure in case there are
+ * different handling for shared secnarios */
+ if (!qae->qae_shareable_ustore) {
+ /* qat_aefw_get_uof_inst() takes an address that
+ * is relative to the start of the page.
+ * So we don't need to add in the physical
+ * offset of the page. */
+ if (qup->qup_page_region != 0) {
+ /* XXX */
+ device_printf(sc->sc_dev,
+ "region != 0 is not supported\n");
+ qat_free_mem(ucode_cpybuf);
+ return ENOTSUP;
+ } else {
+ /* for mixing case, it should take
+ * physical address */
+ ucode_cpybuf[i] = qat_aefw_get_uof_inst(
+ sc, qup, upaddr + i);
+ if (ucode_cpybuf[i] == INVLD_UWORD) {
+ /* fill hole in the uof */
+ ucode_cpybuf[i] = fill;
+ }
+ }
+ } else {
+ /* XXX */
+ qat_free_mem(ucode_cpybuf);
+ return ENOTSUP;
+ }
+ }
+
+ /* copy the buffer to ustore */
+ if (!qae->qae_shareable_ustore) {
+ error = qat_ae_ucode_write(sc, ae, upaddr, cpylen,
+ ucode_cpybuf);
+ if (error)
+ return error;
+ } else {
+ /* XXX */
+ qat_free_mem(ucode_cpybuf);
+ return ENOTSUP;
+ }
+ upaddr += cpylen;
+ uraddr += cpylen;
+ ninst -= cpylen;
+ }
+
+ qat_free_mem(ucode_cpybuf);
+
+ return 0;
+}
+
+static int
+qat_aefw_uof_write_one(struct qat_softc *sc, struct qat_uof_image *qui)
+{
+ struct uof_image *ui = qui->qui_image;
+ struct qat_ae_page *qap;
+ u_int s, p, c;
+ int error;
+ u_char ae, ctx_mask;
+
+ if (__SHIFTOUT(ui->ui_ae_mode, AE_MODE_CTX_MODE) == MAX_AE_CTX)
+ ctx_mask = 0xff; /* 8-ctx mode */
+ else
+ ctx_mask = 0x55; /* 4-ctx mode */
+
+ /* load the default page and set assigned CTX PC
+ * to the entrypoint address */
+ for (ae = 0; ae < sc->sc_ae_num; ae++) {
+ struct qat_ae *qae = &(QAT_AE(sc, ae));
+ struct qat_ae_slice *qas;
+ u_int metadata;
+
+ MPASS(ae < UOF_MAX_NUM_OF_AE);
+
+ if ((ui->ui_ae_assigned & (1 << ae)) == 0)
+ continue;
+
+ /* find the slice to which this image is assigned */
+ for (s = 0; s < qae->qae_num_slices; s++) {
+ qas = &qae->qae_slices[s];
+ if (ui->ui_ctx_assigned & qas->qas_assigned_ctx_mask)
+ break;
+ }
+ if (s >= qae->qae_num_slices)
+ continue;
+
+ qas = &qae->qae_slices[s];
+
+ for (p = 0; p < ui->ui_num_pages; p++) {
+ qap = &qas->qas_pages[p];
+
+ /* Only load pages loaded by default */
+ if (!qap->qap_page->qup_def_page)
+ continue;
+
+ error = qat_aefw_do_pagein(sc, ae, qap->qap_page);
+ if (error)
+ return error;
+ }
+
+ metadata = qas->qas_image->qui_image->ui_app_metadata;
+ if (metadata != 0xffffffff && bootverbose) {
+ device_printf(sc->sc_dev,
+ "loaded firmware: %s\n",
+ qat_aefw_uof_string(sc, metadata));
+ }
+
+ /* Assume starting page is page 0 */
+ qap = &qas->qas_pages[0];
+ for (c = 0; c < MAX_AE_CTX; c++) {
+ if (ctx_mask & (1 << c))
+ qas->qas_cur_pages[c] = qap;
+ else
+ qas->qas_cur_pages[c] = NULL;
+ }
+
+ /* set the live context */
+ qae->qae_live_ctx_mask = ui->ui_ctx_assigned;
+
+ /* set context PC to the image entrypoint address */
+ error = qat_ae_write_pc(sc, ae, ui->ui_ctx_assigned,
+ ui->ui_entry_address);
+ if (error)
+ return error;
+ }
+
+ /* XXX store the checksum for convenience */
+
+ return 0;
+}
+
+static int
+qat_aefw_uof_write(struct qat_softc *sc)
+{
+ int error = 0;
+ int i;
+
+ error = qat_aefw_init_globals(sc);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Could not initialize globals\n");
+ return error;
+ }
+
+ for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
+ error = qat_aefw_uof_write_one(sc,
+ &sc->sc_aefw_uof.qafu_imgs[i]);
+ if (error)
+ break;
+ }
+
+ /* XXX UcLo_computeFreeUstore */
+
+ return error;
+}
diff --git a/sys/dev/qat/qat_aevar.h b/sys/dev/qat/qat_aevar.h
new file mode 100644
index 000000000000..4aea6a5f8e99
--- /dev/null
+++ b/sys/dev/qat/qat_aevar.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_aevar.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_AEVAR_H_
+#define _DEV_PCI_QAT_AEVAR_H_
+
+int qat_ae_init(struct qat_softc *);
+int qat_ae_start(struct qat_softc *);
+void qat_ae_cluster_intr(void *);
+
+int qat_aefw_load(struct qat_softc *);
+void qat_aefw_unload(struct qat_softc *);
+int qat_aefw_start(struct qat_softc *, u_char, u_int);
+
+#endif
diff --git a/sys/dev/qat/qat_c2xxx.c b/sys/dev/qat/qat_c2xxx.c
new file mode 100644
index 000000000000..60ddafe07576
--- /dev/null
+++ b/sys/dev/qat/qat_c2xxx.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_c2xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: qat_c2xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
+#endif
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qat_hw15reg.h"
+#include "qat_c2xxxreg.h"
+#include "qatvar.h"
+#include "qat_hw15var.h"
+
+static uint32_t
+qat_c2xxx_get_accel_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+
+ return ((~fusectl) & ACCEL_MASK_C2XXX);
+}
+
+static uint32_t
+qat_c2xxx_get_ae_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ if (fusectl & (
+ FUSECTL_C2XXX_PKE_DISABLE |
+ FUSECTL_C2XXX_ATH_DISABLE |
+ FUSECTL_C2XXX_CPH_DISABLE)) {
+ return 0;
+ } else {
+ if ((~fusectl & AE_MASK_C2XXX) == 0x3) {
+ /*
+ * With both AEs enabled we get spurious completions on
+ * ETR rings. Work around that for now by simply
+ * disabling the second AE.
+ */
+ device_printf(sc->sc_dev, "disabling second AE\n");
+ fusectl |= 0x2;
+ }
+ return ((~fusectl) & AE_MASK_C2XXX);
+ }
+}
+
+static enum qat_sku
+qat_c2xxx_get_sku(struct qat_softc *sc)
+{
+ uint32_t fusectl;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+
+ switch (sc->sc_ae_num) {
+ case 1:
+ if (fusectl & FUSECTL_C2XXX_LOW_SKU)
+ return QAT_SKU_3;
+ else if (fusectl & FUSECTL_C2XXX_MID_SKU)
+ return QAT_SKU_2;
+ break;
+ case MAX_AE_C2XXX:
+ return QAT_SKU_1;
+ }
+
+ return QAT_SKU_UNKNOWN;
+}
+
+static uint32_t
+qat_c2xxx_get_accel_cap(struct qat_softc *sc)
+{
+ return QAT_ACCEL_CAP_CRYPTO_SYMMETRIC |
+ QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC |
+ QAT_ACCEL_CAP_CIPHER |
+ QAT_ACCEL_CAP_AUTHENTICATION;
+}
+
+static const char *
+qat_c2xxx_get_fw_uof_name(struct qat_softc *sc)
+{
+ if (sc->sc_rev < QAT_REVID_C2XXX_B0)
+ return AE_FW_UOF_NAME_C2XXX_A0;
+
+ /* QAT_REVID_C2XXX_B0 and QAT_REVID_C2XXX_C0 */
+ return AE_FW_UOF_NAME_C2XXX_B0;
+}
+
+static void
+qat_c2xxx_enable_intr(struct qat_softc *sc)
+{
+
+ qat_misc_write_4(sc, EP_SMIA_C2XXX, EP_SMIA_MASK_C2XXX);
+}
+
+static void
+qat_c2xxx_init_etr_intr(struct qat_softc *sc, int bank)
+{
+ /*
+ * For now, all rings within the bank are setup such that the generation
+ * of flag interrupts will be triggered when ring leaves the empty
+ * state. Note that in order for the ring interrupt to generate an IRQ
+ * the interrupt must also be enabled for the ring.
+ */
+ qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL,
+ ETR_INT_SRCSEL_MASK_0_C2XXX);
+ qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL_2,
+ ETR_INT_SRCSEL_MASK_X_C2XXX);
+}
+
+const struct qat_hw qat_hw_c2xxx = {
+ .qhw_sram_bar_id = BAR_SRAM_ID_C2XXX,
+ .qhw_misc_bar_id = BAR_PMISC_ID_C2XXX,
+ .qhw_etr_bar_id = BAR_ETR_ID_C2XXX,
+ .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C2XXX,
+ .qhw_ae_offset = AE_OFFSET_C2XXX,
+ .qhw_ae_local_offset = AE_LOCAL_OFFSET_C2XXX,
+ .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C2XXX,
+ .qhw_num_banks = ETR_MAX_BANKS_C2XXX,
+ .qhw_num_ap_banks = ETR_MAX_AP_BANKS_C2XXX,
+ .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
+ .qhw_num_accel = MAX_ACCEL_C2XXX,
+ .qhw_num_engines = MAX_AE_C2XXX,
+ .qhw_tx_rx_gap = ETR_TX_RX_GAP_C2XXX,
+ .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C2XXX,
+ .qhw_msix_ae_vec_gap = MSIX_AE_VEC_GAP_C2XXX,
+ .qhw_fw_auth = false,
+ .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW15,
+ .qhw_fw_resp_size = FW_REQ_DEFAULT_SZ_HW15,
+ .qhw_ring_asym_tx = 2,
+ .qhw_ring_asym_rx = 3,
+ .qhw_ring_sym_tx = 4,
+ .qhw_ring_sym_rx = 5,
+ .qhw_mof_fwname = AE_FW_MOF_NAME_C2XXX,
+ .qhw_mmp_fwname = AE_FW_MMP_NAME_C2XXX,
+ .qhw_prod_type = AE_FW_PROD_TYPE_C2XXX,
+ .qhw_get_accel_mask = qat_c2xxx_get_accel_mask,
+ .qhw_get_ae_mask = qat_c2xxx_get_ae_mask,
+ .qhw_get_sku = qat_c2xxx_get_sku,
+ .qhw_get_accel_cap = qat_c2xxx_get_accel_cap,
+ .qhw_get_fw_uof_name = qat_c2xxx_get_fw_uof_name,
+ .qhw_enable_intr = qat_c2xxx_enable_intr,
+ .qhw_init_etr_intr = qat_c2xxx_init_etr_intr,
+ .qhw_init_admin_comms = qat_adm_ring_init,
+ .qhw_send_admin_init = qat_adm_ring_send_init,
+ .qhw_crypto_setup_desc = qat_hw15_crypto_setup_desc,
+ .qhw_crypto_setup_req_params = qat_hw15_crypto_setup_req_params,
+ .qhw_crypto_opaque_offset =
+ offsetof(struct fw_la_resp, comn_resp.opaque_data),
+};
diff --git a/sys/dev/qat/qat_c2xxxreg.h b/sys/dev/qat/qat_c2xxxreg.h
new file mode 100644
index 000000000000..3d5cae51138e
--- /dev/null
+++ b/sys/dev/qat/qat_c2xxxreg.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_c2xxxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_C2XXXREG_H_
+#define _DEV_PCI_QAT_C2XXXREG_H_
+
+/* PCI revision IDs */
+#define QAT_REVID_C2XXX_A0 0x00
+#define QAT_REVID_C2XXX_B0 0x02
+#define QAT_REVID_C2XXX_C0 0x03
+
+/* Max number of accelerators and engines */
+#define MAX_ACCEL_C2XXX 1
+#define MAX_AE_C2XXX 2
+
+/* PCIe BAR index */
+#define BAR_SRAM_ID_C2XXX NO_PCI_REG
+#define BAR_PMISC_ID_C2XXX 0
+#define BAR_ETR_ID_C2XXX 1
+
+#define ACCEL_MASK_C2XXX 0x1
+#define AE_MASK_C2XXX 0x3
+
+#define MSIX_AE_VEC_GAP_C2XXX 8
+
+/* PCIe configuration space registers */
+/* PESRAM: 512K eSRAM */
+#define BAR_PESRAM_C2XXX NO_PCI_REG
+#define BAR_PESRAM_SIZE_C2XXX 0
+
+/*
+ * PMISC: 16K CAP, 16K Scratch, 32K SSU(QATs),
+ * 32K AE CSRs and transfer registers, 8K CHAP/PMU,
+ * 4K EP CSRs, 4K MSI-X Tables
+ */
+#define BAR_PMISC_C2XXX 0x18
+#define BAR_PMISC_SIZE_C2XXX 0x20000 /* 128K */
+
+/* PETRINGCSR: 8K 16 bundles of ET Ring CSRs */
+#define BAR_PETRINGCSR_C2XXX 0x20
+#define BAR_PETRINGCSR_SIZE_C2XXX 0x4000 /* 16K */
+
+/* Fuse Control */
+#define FUSECTL_C2XXX_PKE_DISABLE (1 << 6)
+#define FUSECTL_C2XXX_ATH_DISABLE (1 << 5)
+#define FUSECTL_C2XXX_CPH_DISABLE (1 << 4)
+#define FUSECTL_C2XXX_LOW_SKU (1 << 3)
+#define FUSECTL_C2XXX_MID_SKU (1 << 2)
+#define FUSECTL_C2XXX_AE1_DISABLE (1 << 1)
+
+/* SINT: Signal Target Raw Interrupt Register */
+#define EP_SINTPF_C2XXX 0x1A024
+
+/* SMIA: Signal Target IA Mask Register */
+#define EP_SMIA_C2XXX 0x1A028
+#define EP_SMIA_BUNDLES_IRQ_MASK_C2XXX 0xFF
+#define EP_SMIA_AE_IRQ_MASK_C2XXX 0x10000
+#define EP_SMIA_MASK_C2XXX \
+ (EP_SMIA_BUNDLES_IRQ_MASK_C2XXX | EP_SMIA_AE_IRQ_MASK_C2XXX)
+
+#define EP_RIMISCCTL_C2XXX 0x1A0C4
+#define EP_RIMISCCTL_MASK_C2XXX 0x40000000
+
+#define PFCGCIOSFPRIR_REG_C2XXX 0x2C0
+#define PFCGCIOSFPRIR_MASK_C2XXX 0XFFFF7FFF
+
+/* BAR sub-regions */
+#define PESRAM_BAR_C2XXX NO_PCI_REG
+#define PESRAM_OFFSET_C2XXX 0x0
+#define PESRAM_SIZE_C2XXX 0x0
+#define CAP_GLOBAL_BAR_C2XXX BAR_PMISC_C2XXX
+#define CAP_GLOBAL_OFFSET_C2XXX 0x00000
+#define CAP_GLOBAL_SIZE_C2XXX 0x04000
+#define CAP_HASH_OFFSET 0x900
+#define SCRATCH_BAR_C2XXX NO_PCI_REG
+#define SCRATCH_OFFSET_C2XXX NO_REG_OFFSET
+#define SCRATCH_SIZE_C2XXX 0x0
+#define SSU_BAR_C2XXX BAR_PMISC_C2XXX
+#define SSU_OFFSET_C2XXX 0x08000
+#define SSU_SIZE_C2XXX 0x08000
+#define AE_BAR_C2XXX BAR_PMISC_C2XXX
+#define AE_OFFSET_C2XXX 0x10000
+#define AE_LOCAL_OFFSET_C2XXX 0x10800
+#define PMU_BAR_C2XXX NO_PCI_REG
+#define PMU_OFFSET_C2XXX NO_REG_OFFSET
+#define PMU_SIZE_C2XXX 0x0
+#define EP_BAR_C2XXX BAR_PMISC_C2XXX
+#define EP_OFFSET_C2XXX 0x1A000
+#define EP_SIZE_C2XXX 0x01000
+#define MSIX_TAB_BAR_C2XXX NO_PCI_REG /* mapped by pci(9) */
+#define MSIX_TAB_OFFSET_C2XXX 0x1B000
+#define MSIX_TAB_SIZE_C2XXX 0x01000
+#define PETRINGCSR_BAR_C2XXX BAR_PETRINGCSR_C2XXX
+#define PETRINGCSR_OFFSET_C2XXX 0x0
+#define PETRINGCSR_SIZE_C2XXX 0x0 /* use size of BAR */
+
+/* ETR */
+#define ETR_MAX_BANKS_C2XXX 8
+#define ETR_MAX_ET_RINGS_C2XXX \
+ (ETR_MAX_BANKS_C2XXX * ETR_MAX_RINGS_PER_BANK_C2XXX)
+#define ETR_MAX_AP_BANKS_C2XXX 4
+
+#define ETR_TX_RX_GAP_C2XXX 1
+#define ETR_TX_RINGS_MASK_C2XXX 0x51
+
+#define ETR_BUNDLE_SIZE_C2XXX 0x0200
+
+/* Initial bank Interrupt Source mask */
+#define ETR_INT_SRCSEL_MASK_0_C2XXX 0x4444444CUL
+#define ETR_INT_SRCSEL_MASK_X_C2XXX 0x44444444UL
+
+/* AE firmware */
+#define AE_FW_PROD_TYPE_C2XXX 0x00800000
+#define AE_FW_MOF_NAME_C2XXX "mof_firmware_c2xxx"
+#define AE_FW_MMP_NAME_C2XXX "mmp_firmware_c2xxx"
+#define AE_FW_UOF_NAME_C2XXX_A0 "icp_qat_nae.uof"
+#define AE_FW_UOF_NAME_C2XXX_B0 "icp_qat_nae_b0.uof"
+
+#endif
diff --git a/sys/dev/qat/qat_c3xxx.c b/sys/dev/qat/qat_c3xxx.c
new file mode 100644
index 000000000000..83d39da7d0af
--- /dev/null
+++ b/sys/dev/qat/qat_c3xxx.c
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
+#endif
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qat_hw17reg.h"
+#include "qat_c3xxxreg.h"
+#include "qatvar.h"
+#include "qat_hw17var.h"
+
+static uint32_t
+qat_c3xxx_get_accel_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl, strap;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4);
+
+ return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C3XXX) &
+ ACCEL_MASK_C3XXX);
+}
+
+static uint32_t
+qat_c3xxx_get_ae_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl, me_strap, me_disable, ssms_disabled;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4);
+
+ /* If SSMs are disabled, then disable the corresponding MEs */
+ ssms_disabled = (~qat_c3xxx_get_accel_mask(sc)) & ACCEL_MASK_C3XXX;
+ me_disable = 0x3;
+ while (ssms_disabled) {
+ if (ssms_disabled & 1)
+ me_strap |= me_disable;
+ ssms_disabled >>= 1;
+ me_disable <<= 2;
+ }
+
+ return (~(fusectl | me_strap)) & AE_MASK_C3XXX;
+}
+
+static enum qat_sku
+qat_c3xxx_get_sku(struct qat_softc *sc)
+{
+ switch (sc->sc_ae_num) {
+ case MAX_AE_C3XXX:
+ return QAT_SKU_4;
+ }
+
+ return QAT_SKU_UNKNOWN;
+}
+
+static uint32_t
+qat_c3xxx_get_accel_cap(struct qat_softc *sc)
+{
+ uint32_t cap, legfuse, strap;
+
+ legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
+ strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4);
+
+ cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
+ QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
+ QAT_ACCEL_CAP_CIPHER +
+ QAT_ACCEL_CAP_AUTHENTICATION +
+ QAT_ACCEL_CAP_COMPRESSION +
+ QAT_ACCEL_CAP_ZUC +
+ QAT_ACCEL_CAP_SHA3;
+
+ if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
+ cap &= ~QAT_ACCEL_CAP_CIPHER;
+ }
+ if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
+ cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
+ if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
+ if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
+ cap &= ~QAT_ACCEL_CAP_COMPRESSION;
+ if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
+ cap &= ~QAT_ACCEL_CAP_ZUC;
+
+ if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C3XXX)
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
+ if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C3XXX)
+ cap &= ~QAT_ACCEL_CAP_COMPRESSION;
+
+ return cap;
+}
+
+static const char *
+qat_c3xxx_get_fw_uof_name(struct qat_softc *sc)
+{
+
+ return AE_FW_UOF_NAME_C3XXX;
+}
+
+static void
+qat_c3xxx_enable_intr(struct qat_softc *sc)
+{
+
+ /* Enable bundle and misc interrupts */
+ qat_misc_write_4(sc, SMIAPF0_C3XXX, SMIA0_MASK_C3XXX);
+ qat_misc_write_4(sc, SMIAPF1_C3XXX, SMIA1_MASK_C3XXX);
+}
+
+/* Worker thread to service arbiter mappings */
+static uint32_t thrd_to_arb_map[] = {
+ 0x12222AAA, 0x11222AAA, 0x12222AAA,
+ 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static void
+qat_c3xxx_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
+{
+ int i;
+
+ for (i = 1; i < MAX_AE_C3XXX; i++) {
+ if ((~sc->sc_ae_mask) & (1 << i))
+ thrd_to_arb_map[i] = 0;
+ }
+ *arb_map_config = thrd_to_arb_map;
+}
+
+static void
+qat_c3xxx_enable_error_interrupts(struct qat_softc *sc)
+{
+ qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C3XXX); /* ME0-ME3 */
+ qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C3XXX); /* ME4-ME5 */
+ qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C3XXX); /* SSM2 */
+
+ /* Reset everything except VFtoPF1_16. */
+ qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C3XXX);
+
+ /* RI CPP bus interface error detection and reporting. */
+ qat_misc_write_4(sc, RICPPINTCTL_C3XXX, RICPP_EN_C3XXX);
+
+ /* TI CPP bus interface error detection and reporting. */
+ qat_misc_write_4(sc, TICPPINTCTL_C3XXX, TICPP_EN_C3XXX);
+
+ /* Enable CFC Error interrupts and logging. */
+ qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C3XXX, CPP_CFC_UE_C3XXX);
+}
+
+static void
+qat_c3xxx_disable_error_interrupts(struct qat_softc *sc)
+{
+ /* ME0-ME3 */
+ qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C3XXX | ERRMSK0_CERR_C3XXX);
+ /* ME4-ME5 */
+ qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C3XXX | ERRMSK1_CERR_C3XXX);
+ /* CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
+ qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C3XXX);
+ /* SSM2 */
+ qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C3XXX);
+}
+
+static void
+qat_c3xxx_enable_error_correction(struct qat_softc *sc)
+{
+ u_int i, mask;
+
+ /* Enable Accel Engine error detection & correction */
+ for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C3XXX(i),
+ ENABLE_AE_ECC_ERR_C3XXX);
+ qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C3XXX(i),
+ ENABLE_AE_ECC_PARITY_CORR_C3XXX);
+ }
+
+ /* Enable shared memory error detection & correction */
+ for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C3XXX);
+ qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C3XXX);
+ qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C3XXX);
+ }
+
+ qat_c3xxx_enable_error_interrupts(sc);
+}
+
+const struct qat_hw qat_hw_c3xxx = {
+ .qhw_sram_bar_id = BAR_SRAM_ID_C3XXX,
+ .qhw_misc_bar_id = BAR_PMISC_ID_C3XXX,
+ .qhw_etr_bar_id = BAR_ETR_ID_C3XXX,
+ .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C3XXX,
+ .qhw_ae_offset = AE_OFFSET_C3XXX,
+ .qhw_ae_local_offset = AE_LOCAL_OFFSET_C3XXX,
+ .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C3XXX,
+ .qhw_num_banks = ETR_MAX_BANKS_C3XXX,
+ .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
+ .qhw_num_accel = MAX_ACCEL_C3XXX,
+ .qhw_num_engines = MAX_AE_C3XXX,
+ .qhw_tx_rx_gap = ETR_TX_RX_GAP_C3XXX,
+ .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C3XXX,
+ .qhw_clock_per_sec = CLOCK_PER_SEC_C3XXX,
+ .qhw_fw_auth = true,
+ .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
+ .qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
+ .qhw_ring_asym_tx = 0,
+ .qhw_ring_asym_rx = 8,
+ .qhw_ring_sym_tx = 2,
+ .qhw_ring_sym_rx = 10,
+ .qhw_mof_fwname = AE_FW_MOF_NAME_C3XXX,
+ .qhw_mmp_fwname = AE_FW_MMP_NAME_C3XXX,
+ .qhw_prod_type = AE_FW_PROD_TYPE_C3XXX,
+ .qhw_get_accel_mask = qat_c3xxx_get_accel_mask,
+ .qhw_get_ae_mask = qat_c3xxx_get_ae_mask,
+ .qhw_get_sku = qat_c3xxx_get_sku,
+ .qhw_get_accel_cap = qat_c3xxx_get_accel_cap,
+ .qhw_get_fw_uof_name = qat_c3xxx_get_fw_uof_name,
+ .qhw_enable_intr = qat_c3xxx_enable_intr,
+ .qhw_init_admin_comms = qat_adm_mailbox_init,
+ .qhw_send_admin_init = qat_adm_mailbox_send_init,
+ .qhw_init_arb = qat_arb_init,
+ .qhw_get_arb_mapping = qat_c3xxx_get_arb_mapping,
+ .qhw_enable_error_correction = qat_c3xxx_enable_error_correction,
+ .qhw_disable_error_interrupts = qat_c3xxx_disable_error_interrupts,
+ .qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
+ .qhw_check_slice_hang = qat_check_slice_hang,
+ .qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
+ .qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
+ .qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
+};
diff --git a/sys/dev/qat/qat_c3xxxreg.h b/sys/dev/qat/qat_c3xxxreg.h
new file mode 100644
index 000000000000..9c4215bc0099
--- /dev/null
+++ b/sys/dev/qat/qat_c3xxxreg.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_c3xxxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_C3XXXREG_H_
+#define _DEV_PCI_QAT_C3XXXREG_H_
+
+/* Max number of accelerators and engines */
+#define MAX_ACCEL_C3XXX 3
+#define MAX_AE_C3XXX 6
+
+/* PCIe BAR index */
+#define BAR_SRAM_ID_C3XXX NO_PCI_REG
+#define BAR_PMISC_ID_C3XXX 0
+#define BAR_ETR_ID_C3XXX 1
+
+/* BAR PMISC sub-regions */
+#define AE_OFFSET_C3XXX 0x20000
+#define AE_LOCAL_OFFSET_C3XXX 0x20800
+#define CAP_GLOBAL_OFFSET_C3XXX 0x30000
+
+#define SOFTSTRAP_REG_C3XXX 0x2EC
+#define SOFTSTRAP_SS_POWERGATE_CY_C3XXX __BIT(23)
+#define SOFTSTRAP_SS_POWERGATE_PKE_C3XXX __BIT(24)
+
+#define ACCEL_REG_OFFSET_C3XXX 16
+#define ACCEL_MASK_C3XXX 0x7
+#define AE_MASK_C3XXX 0x3F
+
+#define SMIAPF0_C3XXX 0x3A028
+#define SMIAPF1_C3XXX 0x3A030
+#define SMIA0_MASK_C3XXX 0xFFFF
+#define SMIA1_MASK_C3XXX 0x1
+
+/* Error detection and correction */
+#define AE_CTX_ENABLES_C3XXX(i) ((i) * 0x1000 + 0x20818)
+#define AE_MISC_CONTROL_C3XXX(i) ((i) * 0x1000 + 0x20960)
+#define ENABLE_AE_ECC_ERR_C3XXX __BIT(28)
+#define ENABLE_AE_ECC_PARITY_CORR_C3XXX (__BIT(24) | __BIT(12))
+#define ERRSSMSH_EN_C3XXX __BIT(3)
+/* BIT(2) enables the logging of push/pull data errors. */
+#define PPERR_EN_C3XXX (__BIT(2))
+
+/* Mask for VF2PF interrupts */
+#define VF2PF1_16_C3XXX (0xFFFF << 9)
+#define ERRSOU3_VF2PF_C3XXX(errsou3) (((errsou3) & 0x01FFFE00) >> 9)
+#define ERRMSK3_VF2PF_C3XXX(vf_mask) (((vf_mask) & 0xFFFF) << 9)
+
+/* Masks for correctable error interrupts. */
+#define ERRMSK0_CERR_C3XXX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
+#define ERRMSK1_CERR_C3XXX (__BIT(8) | __BIT(0))
+#define ERRMSK5_CERR_C3XXX (0)
+
+/* Masks for uncorrectable error interrupts. */
+#define ERRMSK0_UERR_C3XXX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
+#define ERRMSK1_UERR_C3XXX (__BIT(9) | __BIT(1))
+#define ERRMSK3_UERR_C3XXX (__BIT(6) | __BIT(5) | __BIT(4) | __BIT(3) | \
+ __BIT(2) | __BIT(0))
+#define ERRMSK5_UERR_C3XXX (__BIT(16))
+
+/* RI CPP control */
+#define RICPPINTCTL_C3XXX (0x3A000 + 0x110)
+/*
+ * BIT(2) enables error detection and reporting on the RI Parity Error.
+ * BIT(1) enables error detection and reporting on the RI CPP Pull interface.
+ * BIT(0) enables error detection and reporting on the RI CPP Push interface.
+ */
+#define RICPP_EN_C3XXX (__BIT(2) | __BIT(1) | __BIT(0))
+
+/* TI CPP control */
+#define TICPPINTCTL_C3XXX (0x3A400 + 0x138)
+/*
+ * BIT(3) enables error detection and reporting on the ETR Parity Error.
+ * BIT(2) enables error detection and reporting on the TI Parity Error.
+ * BIT(1) enables error detection and reporting on the TI CPP Pull interface.
+ * BIT(0) enables error detection and reporting on the TI CPP Push interface.
+ */
+#define TICPP_EN_C3XXX \
+ (__BIT(3) | __BIT(2) | __BIT(1) | __BIT(0))
+
+/* CFC Uncorrectable Errors */
+#define CPP_CFC_ERR_CTRL_C3XXX (0x30000 + 0xC00)
+/*
+ * BIT(1) enables interrupt.
+ * BIT(0) enables detecting and logging of push/pull data errors.
+ */
+#define CPP_CFC_UE_C3XXX (__BIT(1) | __BIT(0))
+
+#define SLICEPWRDOWN_C3XXX(i) ((i) * 0x4000 + 0x2C)
+/* Enabling PKE4-PKE0. */
+#define MMP_PWR_UP_MSK_C3XXX \
+ (__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16))
+
+/* CPM Uncorrectable Errors */
+#define INTMASKSSM_C3XXX(i) ((i) * 0x4000 + 0x0)
+/* Disabling interrupts for correctable errors. */
+#define INTMASKSSM_UERR_C3XXX \
+ (__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1))
+
+/* MMP */
+/* BIT(3) enables correction. */
+#define CERRSSMMMP_EN_C3XXX (__BIT(3))
+
+/* BIT(3) enables logging. */
+#define UERRSSMMMP_EN_C3XXX (__BIT(3))
+
+/* ETR */
+#define ETR_MAX_BANKS_C3XXX 16
+#define ETR_TX_RX_GAP_C3XXX 8
+#define ETR_TX_RINGS_MASK_C3XXX 0xFF
+#define ETR_BUNDLE_SIZE_C3XXX 0x1000
+
+/* AE firmware */
+#define AE_FW_PROD_TYPE_C3XXX 0x02000000
+#define AE_FW_MOF_NAME_C3XXX "qat_c3xxx"
+#define AE_FW_MMP_NAME_C3XXX "qat_c3xxx_mmp"
+#define AE_FW_UOF_NAME_C3XXX "icp_qat_ae.suof"
+
+/* Clock frequency */
+#define CLOCK_PER_SEC_C3XXX (685 * 1000000 / 16)
+
+#endif
diff --git a/sys/dev/qat/qat_c62x.c b/sys/dev/qat/qat_c62x.c
new file mode 100644
index 000000000000..826c68db9854
--- /dev/null
+++ b/sys/dev/qat/qat_c62x.c
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
+#endif
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qat_hw17reg.h"
+#include "qat_c62xreg.h"
+#include "qatvar.h"
+#include "qat_hw17var.h"
+
+static uint32_t
+qat_c62x_get_accel_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl, strap;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4);
+
+ return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C62X) &
+ ACCEL_MASK_C62X);
+}
+
+static uint32_t
+qat_c62x_get_ae_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl, me_strap, me_disable, ssms_disabled;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4);
+
+ /* If SSMs are disabled, then disable the corresponding MEs */
+ ssms_disabled = (~qat_c62x_get_accel_mask(sc)) & ACCEL_MASK_C62X;
+ me_disable = 0x3;
+ while (ssms_disabled) {
+ if (ssms_disabled & 1)
+ me_strap |= me_disable;
+ ssms_disabled >>= 1;
+ me_disable <<= 2;
+ }
+
+ return (~(fusectl | me_strap)) & AE_MASK_C62X;
+}
+
+static enum qat_sku
+qat_c62x_get_sku(struct qat_softc *sc)
+{
+ switch (sc->sc_ae_num) {
+ case 8:
+ return QAT_SKU_2;
+ case MAX_AE_C62X:
+ return QAT_SKU_4;
+ }
+
+ return QAT_SKU_UNKNOWN;
+}
+
+static uint32_t
+qat_c62x_get_accel_cap(struct qat_softc *sc)
+{
+ uint32_t cap, legfuse, strap;
+
+ legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
+ strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4);
+
+ cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
+ QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
+ QAT_ACCEL_CAP_CIPHER +
+ QAT_ACCEL_CAP_AUTHENTICATION +
+ QAT_ACCEL_CAP_COMPRESSION +
+ QAT_ACCEL_CAP_ZUC +
+ QAT_ACCEL_CAP_SHA3;
+
+ if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
+ cap &= ~QAT_ACCEL_CAP_CIPHER;
+ }
+ if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
+ cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
+ if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
+ if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
+ cap &= ~QAT_ACCEL_CAP_COMPRESSION;
+ if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
+ cap &= ~QAT_ACCEL_CAP_ZUC;
+
+ if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C62X)
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
+ if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C62X)
+ cap &= ~QAT_ACCEL_CAP_COMPRESSION;
+
+ return cap;
+}
+
+static const char *
+qat_c62x_get_fw_uof_name(struct qat_softc *sc)
+{
+
+ return AE_FW_UOF_NAME_C62X;
+}
+
+static void
+qat_c62x_enable_intr(struct qat_softc *sc)
+{
+
+ /* Enable bundle and misc interrupts */
+ qat_misc_write_4(sc, SMIAPF0_C62X, SMIA0_MASK_C62X);
+ qat_misc_write_4(sc, SMIAPF1_C62X, SMIA1_MASK_C62X);
+}
+
+/* Worker thread to service arbiter mappings */
+static uint32_t thrd_to_arb_map[] = {
+ 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+ 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static void
+qat_c62x_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
+{
+ int i;
+
+ for (i = 1; i < MAX_AE_C62X; i++) {
+ if ((~sc->sc_ae_mask) & (1 << i))
+ thrd_to_arb_map[i] = 0;
+ }
+ *arb_map_config = thrd_to_arb_map;
+}
+
+static void
+qat_c62x_enable_error_interrupts(struct qat_softc *sc)
+{
+ qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C62X); /* ME0-ME3 */
+ qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C62X); /* ME4-ME7 */
+ qat_misc_write_4(sc, ERRMSK4, ERRMSK4_CERR_C62X); /* ME8-ME9 */
+ qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C62X); /* SSM2-SSM4 */
+
+ /* Reset everything except VFtoPF1_16. */
+ qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C62X);
+ /* Disable Secure RAM correctable error interrupt */
+ qat_misc_read_write_or_4(sc, ERRMSK3, ERRMSK3_CERR_C62X);
+
+ /* RI CPP bus interface error detection and reporting. */
+ qat_misc_write_4(sc, RICPPINTCTL_C62X, RICPP_EN_C62X);
+
+ /* TI CPP bus interface error detection and reporting. */
+ qat_misc_write_4(sc, TICPPINTCTL_C62X, TICPP_EN_C62X);
+
+ /* Enable CFC Error interrupts and logging. */
+ qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C62X, CPP_CFC_UE_C62X);
+
+ /* Enable SecureRAM to fix and log Correctable errors */
+ qat_misc_write_4(sc, SECRAMCERR_C62X, SECRAM_CERR_C62X);
+
+ /* Enable SecureRAM Uncorrectable error interrupts and logging */
+ qat_misc_write_4(sc, SECRAMUERR, SECRAM_UERR_C62X);
+
+ /* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
+ qat_misc_write_4(sc, CPPMEMTGTERR, TGT_UERR_C62X);
+}
+
+static void
+qat_c62x_disable_error_interrupts(struct qat_softc *sc)
+{
+ /* ME0-ME3 */
+ qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C62X | ERRMSK0_CERR_C62X);
+ /* ME4-ME7 */
+ qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C62X | ERRMSK1_CERR_C62X);
+ /* Secure RAM, CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
+ qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C62X | ERRMSK3_CERR_C62X);
+ /* ME8-ME9 */
+ qat_misc_write_4(sc, ERRMSK4, ERRMSK4_UERR_C62X | ERRMSK4_CERR_C62X);
+ /* SSM2-SSM4 */
+ qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C62X | ERRMSK5_CERR_C62X);
+}
+
+static void
+qat_c62x_enable_error_correction(struct qat_softc *sc)
+{
+ u_int i, mask;
+
+ /* Enable Accel Engine error detection & correction */
+ for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C62X(i),
+ ENABLE_AE_ECC_ERR_C62X);
+ qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C62X(i),
+ ENABLE_AE_ECC_PARITY_CORR_C62X);
+ }
+
+ /* Enable shared memory error detection & correction */
+ for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C62X);
+ qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C62X);
+ qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C62X);
+ }
+
+ qat_c62x_enable_error_interrupts(sc);
+}
+
+const struct qat_hw qat_hw_c62x = {
+ .qhw_sram_bar_id = BAR_SRAM_ID_C62X,
+ .qhw_misc_bar_id = BAR_PMISC_ID_C62X,
+ .qhw_etr_bar_id = BAR_ETR_ID_C62X,
+ .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C62X,
+ .qhw_ae_offset = AE_OFFSET_C62X,
+ .qhw_ae_local_offset = AE_LOCAL_OFFSET_C62X,
+ .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C62X,
+ .qhw_num_banks = ETR_MAX_BANKS_C62X,
+ .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
+ .qhw_num_accel = MAX_ACCEL_C62X,
+ .qhw_num_engines = MAX_AE_C62X,
+ .qhw_tx_rx_gap = ETR_TX_RX_GAP_C62X,
+ .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C62X,
+ .qhw_clock_per_sec = CLOCK_PER_SEC_C62X,
+ .qhw_fw_auth = true,
+ .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
+ .qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
+ .qhw_ring_asym_tx = 0,
+ .qhw_ring_asym_rx = 8,
+ .qhw_ring_sym_tx = 2,
+ .qhw_ring_sym_rx = 10,
+ .qhw_mof_fwname = AE_FW_MOF_NAME_C62X,
+ .qhw_mmp_fwname = AE_FW_MMP_NAME_C62X,
+ .qhw_prod_type = AE_FW_PROD_TYPE_C62X,
+ .qhw_get_accel_mask = qat_c62x_get_accel_mask,
+ .qhw_get_ae_mask = qat_c62x_get_ae_mask,
+ .qhw_get_sku = qat_c62x_get_sku,
+ .qhw_get_accel_cap = qat_c62x_get_accel_cap,
+ .qhw_get_fw_uof_name = qat_c62x_get_fw_uof_name,
+ .qhw_enable_intr = qat_c62x_enable_intr,
+ .qhw_init_admin_comms = qat_adm_mailbox_init,
+ .qhw_send_admin_init = qat_adm_mailbox_send_init,
+ .qhw_init_arb = qat_arb_init,
+ .qhw_get_arb_mapping = qat_c62x_get_arb_mapping,
+ .qhw_enable_error_correction = qat_c62x_enable_error_correction,
+ .qhw_disable_error_interrupts = qat_c62x_disable_error_interrupts,
+ .qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
+ .qhw_check_slice_hang = qat_check_slice_hang,
+ .qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
+ .qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
+ .qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
+};
diff --git a/sys/dev/qat/qat_c62xreg.h b/sys/dev/qat/qat_c62xreg.h
new file mode 100644
index 000000000000..e052992fa6f5
--- /dev/null
+++ b/sys/dev/qat/qat_c62xreg.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_c62xreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_C62XREG_H_
+#define _DEV_PCI_QAT_C62XREG_H_
+
+/* Max number of accelerators and engines */
+#define MAX_ACCEL_C62X 5
+#define MAX_AE_C62X 10
+
+/* PCIe BAR index */
+#define BAR_SRAM_ID_C62X 0
+#define BAR_PMISC_ID_C62X 1
+#define BAR_ETR_ID_C62X 2
+
+/* BAR PMISC sub-regions */
+#define AE_OFFSET_C62X 0x20000
+#define AE_LOCAL_OFFSET_C62X 0x20800
+#define CAP_GLOBAL_OFFSET_C62X 0x30000
+
+#define SOFTSTRAP_REG_C62X 0x2EC
+#define SOFTSTRAP_SS_POWERGATE_CY_C62X __BIT(23)
+#define SOFTSTRAP_SS_POWERGATE_PKE_C62X __BIT(24)
+
+#define ACCEL_REG_OFFSET_C62X 16
+#define ACCEL_MASK_C62X 0x1F
+#define AE_MASK_C62X 0x3FF
+
+#define SMIAPF0_C62X 0x3A028
+#define SMIAPF1_C62X 0x3A030
+#define SMIA0_MASK_C62X 0xFFFF
+#define SMIA1_MASK_C62X 0x1
+
+/* Error detection and correction */
+#define AE_CTX_ENABLES_C62X(i) ((i) * 0x1000 + 0x20818)
+#define AE_MISC_CONTROL_C62X(i) ((i) * 0x1000 + 0x20960)
+#define ENABLE_AE_ECC_ERR_C62X __BIT(28)
+#define ENABLE_AE_ECC_PARITY_CORR_C62X (__BIT(24) | __BIT(12))
+#define ERRSSMSH_EN_C62X __BIT(3)
+/* BIT(2) enables the logging of push/pull data errors. */
+#define PPERR_EN_C62X (__BIT(2))
+
+/* Mask for VF2PF interrupts */
+#define VF2PF1_16_C62X (0xFFFF << 9)
+#define ERRSOU3_VF2PF_C62X(errsou3) (((errsou3) & 0x01FFFE00) >> 9)
+#define ERRMSK3_VF2PF_C62X(vf_mask) (((vf_mask) & 0xFFFF) << 9)
+
+/* Masks for correctable error interrupts. */
+#define ERRMSK0_CERR_C62X (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
+#define ERRMSK1_CERR_C62X (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
+#define ERRMSK3_CERR_C62X (__BIT(7))
+#define ERRMSK4_CERR_C62X (__BIT(8) | __BIT(0))
+#define ERRMSK5_CERR_C62X (0)
+
+/* Masks for uncorrectable error interrupts. */
+#define ERRMSK0_UERR_C62X (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
+#define ERRMSK1_UERR_C62X (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
+#define ERRMSK3_UERR_C62X (__BIT(8) | __BIT(6) | __BIT(5) | __BIT(4) | \
+ __BIT(3) | __BIT(2) | __BIT(0))
+#define ERRMSK4_UERR_C62X (__BIT(9) | __BIT(1))
+#define ERRMSK5_UERR_C62X (__BIT(18) | __BIT(17) | __BIT(16))
+
+/* RI CPP control */
+#define RICPPINTCTL_C62X (0x3A000 + 0x110)
+/*
+ * BIT(2) enables error detection and reporting on the RI Parity Error.
+ * BIT(1) enables error detection and reporting on the RI CPP Pull interface.
+ * BIT(0) enables error detection and reporting on the RI CPP Push interface.
+ */
+#define RICPP_EN_C62X (__BIT(2) | __BIT(1) | __BIT(0))
+
+/* TI CPP control */
+#define TICPPINTCTL_C62X (0x3A400 + 0x138)
+/*
+ * BIT(3) enables error detection and reporting on the ETR Parity Error.
+ * BIT(2) enables error detection and reporting on the TI Parity Error.
+ * BIT(1) enables error detection and reporting on the TI CPP Pull interface.
+ * BIT(0) enables error detection and reporting on the TI CPP Push interface.
+ */
+#define TICPP_EN_C62X \
+ (__BIT(4) | __BIT(3) | __BIT(2) | __BIT(1) | __BIT(0))
+
+/* CFC Uncorrectable Errors */
+#define CPP_CFC_ERR_CTRL_C62X (0x30000 + 0xC00)
+/*
+ * BIT(1) enables interrupt.
+ * BIT(0) enables detecting and logging of push/pull data errors.
+ */
+#define CPP_CFC_UE_C62X (__BIT(1) | __BIT(0))
+
+/* Correctable SecureRAM Error Reg */
+#define SECRAMCERR_C62X (0x3AC00 + 0x00)
+/* BIT(3) enables fixing and logging of correctable errors. */
+#define SECRAM_CERR_C62X (__BIT(3))
+
+/* Uncorrectable SecureRAM Error Reg */
+/*
+ * BIT(17) enables interrupt.
+ * BIT(3) enables detecting and logging of uncorrectable errors.
+ */
+#define SECRAM_UERR_C62X (__BIT(17) | __BIT(3))
+
+/* Miscellaneous Memory Target Errors Register */
+/*
+ * BIT(3) enables detecting and logging push/pull data errors.
+ * BIT(2) enables interrupt.
+ */
+#define TGT_UERR_C62X (__BIT(3) | __BIT(2))
+
+
+#define SLICEPWRDOWN_C62X(i) ((i) * 0x4000 + 0x2C)
+/* Enabling PKE4-PKE0. */
+#define MMP_PWR_UP_MSK_C62X \
+ (__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16))
+
+/* CPM Uncorrectable Errors */
+#define INTMASKSSM_C62X(i) ((i) * 0x4000 + 0x0)
+/* Disabling interrupts for correctable errors. */
+#define INTMASKSSM_UERR_C62X \
+ (__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1))
+
+/* MMP */
+/* BIT(3) enables correction. */
+#define CERRSSMMMP_EN_C62X (__BIT(3))
+
+/* BIT(3) enables logging. */
+#define UERRSSMMMP_EN_C62X (__BIT(3))
+
+/* ETR */
+#define ETR_MAX_BANKS_C62X 16
+#define ETR_TX_RX_GAP_C62X 8
+#define ETR_TX_RINGS_MASK_C62X 0xFF
+#define ETR_BUNDLE_SIZE_C62X 0x1000
+
+/* AE firmware */
+#define AE_FW_PROD_TYPE_C62X 0x01000000
+#define AE_FW_MOF_NAME_C62X "qat_c62x"
+#define AE_FW_MMP_NAME_C62X "qat_c62x_mmp"
+#define AE_FW_UOF_NAME_C62X "icp_qat_ae.suof"
+
+/* Clock frequency */
+#define CLOCK_PER_SEC_C62X (685 * 1000000 / 16)
+
+#endif
diff --git a/sys/dev/qat/qat_d15xx.c b/sys/dev/qat/qat_d15xx.c
new file mode 100644
index 000000000000..cc0effb75934
--- /dev/null
+++ b/sys/dev/qat/qat_d15xx.c
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_d15xx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: qat_d15xx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
+#endif
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qat_hw17reg.h"
+#include "qat_d15xxreg.h"
+#include "qatvar.h"
+#include "qat_hw17var.h"
+
+static uint32_t
+qat_d15xx_get_accel_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl, strap;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4);
+
+ return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_D15XX) &
+ ACCEL_MASK_D15XX);
+}
+
+static uint32_t
+qat_d15xx_get_ae_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl, me_strap, me_disable, ssms_disabled;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4);
+
+ /* If SSMs are disabled, then disable the corresponding MEs */
+ ssms_disabled = (~qat_d15xx_get_accel_mask(sc)) & ACCEL_MASK_D15XX;
+ me_disable = 0x3;
+ while (ssms_disabled) {
+ if (ssms_disabled & 1)
+ me_strap |= me_disable;
+ ssms_disabled >>= 1;
+ me_disable <<= 2;
+ }
+
+ return (~(fusectl | me_strap)) & AE_MASK_D15XX;
+}
+
+static enum qat_sku
+qat_d15xx_get_sku(struct qat_softc *sc)
+{
+ switch (sc->sc_ae_num) {
+ case 8:
+ return QAT_SKU_2;
+ case MAX_AE_D15XX:
+ return QAT_SKU_4;
+ }
+
+ return QAT_SKU_UNKNOWN;
+}
+
+static uint32_t
+qat_d15xx_get_accel_cap(struct qat_softc *sc)
+{
+ uint32_t cap, legfuse, strap;
+
+ legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
+ strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4);
+
+ cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
+ QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
+ QAT_ACCEL_CAP_CIPHER +
+ QAT_ACCEL_CAP_AUTHENTICATION +
+ QAT_ACCEL_CAP_COMPRESSION +
+ QAT_ACCEL_CAP_ZUC +
+ QAT_ACCEL_CAP_SHA3;
+
+ if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
+ cap &= ~QAT_ACCEL_CAP_CIPHER;
+ }
+ if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
+ cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
+ if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
+ if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
+ cap &= ~QAT_ACCEL_CAP_COMPRESSION;
+ if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
+ cap &= ~QAT_ACCEL_CAP_ZUC;
+
+ if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_D15XX)
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
+ if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_D15XX)
+ cap &= ~QAT_ACCEL_CAP_COMPRESSION;
+
+ return cap;
+}
+
+static const char *
+qat_d15xx_get_fw_uof_name(struct qat_softc *sc)
+{
+
+ return AE_FW_UOF_NAME_D15XX;
+}
+
+static void
+qat_d15xx_enable_intr(struct qat_softc *sc)
+{
+
+ /* Enable bundle and misc interrupts */
+ qat_misc_write_4(sc, SMIAPF0_D15XX, SMIA0_MASK_D15XX);
+ qat_misc_write_4(sc, SMIAPF1_D15XX, SMIA1_MASK_D15XX);
+}
+
+/* Worker thread to service arbiter mappings */
+static uint32_t thrd_to_arb_map[] = {
+ 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+ 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static void
+qat_d15xx_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
+{
+ int i;
+
+ for (i = 1; i < MAX_AE_D15XX; i++) {
+ if ((~sc->sc_ae_mask) & (1 << i))
+ thrd_to_arb_map[i] = 0;
+ }
+ *arb_map_config = thrd_to_arb_map;
+}
+
+static void
+qat_d15xx_enable_error_interrupts(struct qat_softc *sc)
+{
+ qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_D15XX); /* ME0-ME3 */
+ qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_D15XX); /* ME4-ME7 */
+ qat_misc_write_4(sc, ERRMSK4, ERRMSK4_CERR_D15XX); /* ME8-ME9 */
+ qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_D15XX); /* SSM2-SSM4 */
+
+ /* Reset everything except VFtoPF1_16. */
+ qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_D15XX);
+ /* Disable Secure RAM correctable error interrupt */
+ qat_misc_read_write_or_4(sc, ERRMSK3, ERRMSK3_CERR_D15XX);
+
+ /* RI CPP bus interface error detection and reporting. */
+ qat_misc_write_4(sc, RICPPINTCTL_D15XX, RICPP_EN_D15XX);
+
+ /* TI CPP bus interface error detection and reporting. */
+ qat_misc_write_4(sc, TICPPINTCTL_D15XX, TICPP_EN_D15XX);
+
+ /* Enable CFC Error interrupts and logging. */
+ qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_D15XX, CPP_CFC_UE_D15XX);
+
+ /* Enable SecureRAM to fix and log Correctable errors */
+ qat_misc_write_4(sc, SECRAMCERR_D15XX, SECRAM_CERR_D15XX);
+
+ /* Enable SecureRAM Uncorrectable error interrupts and logging */
+ qat_misc_write_4(sc, SECRAMUERR, SECRAM_UERR_D15XX);
+
+ /* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
+ qat_misc_write_4(sc, CPPMEMTGTERR, TGT_UERR_D15XX);
+}
+
+static void
+qat_d15xx_disable_error_interrupts(struct qat_softc *sc)
+{
+ /* ME0-ME3 */
+ qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_D15XX | ERRMSK0_CERR_D15XX);
+ /* ME4-ME7 */
+ qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_D15XX | ERRMSK1_CERR_D15XX);
+ /* Secure RAM, CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
+ qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_D15XX | ERRMSK3_CERR_D15XX);
+ /* ME8-ME9 */
+ qat_misc_write_4(sc, ERRMSK4, ERRMSK4_UERR_D15XX | ERRMSK4_CERR_D15XX);
+ /* SSM2-SSM4 */
+ qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_D15XX | ERRMSK5_CERR_D15XX);
+}
+
+static void
+qat_d15xx_enable_error_correction(struct qat_softc *sc)
+{
+ u_int i, mask;
+
+ /* Enable Accel Engine error detection & correction */
+ for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_D15XX(i),
+ ENABLE_AE_ECC_ERR_D15XX);
+ qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_D15XX(i),
+ ENABLE_AE_ECC_PARITY_CORR_D15XX);
+ }
+
+ /* Enable shared memory error detection & correction */
+ for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_D15XX);
+ qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_D15XX);
+ qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_D15XX);
+ }
+
+ qat_d15xx_enable_error_interrupts(sc);
+}
+
+const struct qat_hw qat_hw_d15xx = {
+ .qhw_sram_bar_id = BAR_SRAM_ID_D15XX,
+ .qhw_misc_bar_id = BAR_PMISC_ID_D15XX,
+ .qhw_etr_bar_id = BAR_ETR_ID_D15XX,
+ .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_D15XX,
+ .qhw_ae_offset = AE_OFFSET_D15XX,
+ .qhw_ae_local_offset = AE_LOCAL_OFFSET_D15XX,
+ .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_D15XX,
+ .qhw_num_banks = ETR_MAX_BANKS_D15XX,
+ .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
+ .qhw_num_accel = MAX_ACCEL_D15XX,
+ .qhw_num_engines = MAX_AE_D15XX,
+ .qhw_tx_rx_gap = ETR_TX_RX_GAP_D15XX,
+ .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_D15XX,
+ .qhw_clock_per_sec = CLOCK_PER_SEC_D15XX,
+ .qhw_fw_auth = true,
+ .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
+ .qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
+ .qhw_ring_asym_tx = 0,
+ .qhw_ring_asym_rx = 8,
+ .qhw_ring_sym_tx = 2,
+ .qhw_ring_sym_rx = 10,
+ .qhw_mof_fwname = AE_FW_MOF_NAME_D15XX,
+ .qhw_mmp_fwname = AE_FW_MMP_NAME_D15XX,
+ .qhw_prod_type = AE_FW_PROD_TYPE_D15XX,
+ .qhw_get_accel_mask = qat_d15xx_get_accel_mask,
+ .qhw_get_ae_mask = qat_d15xx_get_ae_mask,
+ .qhw_get_sku = qat_d15xx_get_sku,
+ .qhw_get_accel_cap = qat_d15xx_get_accel_cap,
+ .qhw_get_fw_uof_name = qat_d15xx_get_fw_uof_name,
+ .qhw_enable_intr = qat_d15xx_enable_intr,
+ .qhw_init_admin_comms = qat_adm_mailbox_init,
+ .qhw_send_admin_init = qat_adm_mailbox_send_init,
+ .qhw_init_arb = qat_arb_init,
+ .qhw_get_arb_mapping = qat_d15xx_get_arb_mapping,
+ .qhw_enable_error_correction = qat_d15xx_enable_error_correction,
+ .qhw_disable_error_interrupts = qat_d15xx_disable_error_interrupts,
+ .qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
+ .qhw_check_slice_hang = qat_check_slice_hang,
+ .qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
+ .qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
+ .qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
+};
diff --git a/sys/dev/qat/qat_d15xxreg.h b/sys/dev/qat/qat_d15xxreg.h
new file mode 100644
index 000000000000..aefeb5fca04e
--- /dev/null
+++ b/sys/dev/qat/qat_d15xxreg.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_d15xxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_D15XXREG_H_
+#define _DEV_PCI_QAT_D15XXREG_H_
+
+/* Max number of accelerators and engines */
+#define MAX_ACCEL_D15XX 5
+#define MAX_AE_D15XX 10
+
+/* PCIe BAR index */
+#define BAR_SRAM_ID_D15XX 0
+#define BAR_PMISC_ID_D15XX 1
+#define BAR_ETR_ID_D15XX 2
+
+/* BAR PMISC sub-regions */
+#define AE_OFFSET_D15XX 0x20000
+#define AE_LOCAL_OFFSET_D15XX 0x20800
+#define CAP_GLOBAL_OFFSET_D15XX 0x30000
+
+#define SOFTSTRAP_REG_D15XX 0x2EC
+#define SOFTSTRAP_SS_POWERGATE_CY_D15XX __BIT(23)
+#define SOFTSTRAP_SS_POWERGATE_PKE_D15XX __BIT(24)
+
+#define ACCEL_REG_OFFSET_D15XX 16
+#define ACCEL_MASK_D15XX 0x1F
+#define AE_MASK_D15XX 0x3FF
+
+#define SMIAPF0_D15XX 0x3A028
+#define SMIAPF1_D15XX 0x3A030
+#define SMIA0_MASK_D15XX 0xFFFF
+#define SMIA1_MASK_D15XX 0x1
+
+/* Error detection and correction */
+#define AE_CTX_ENABLES_D15XX(i) ((i) * 0x1000 + 0x20818)
+#define AE_MISC_CONTROL_D15XX(i) ((i) * 0x1000 + 0x20960)
+#define ENABLE_AE_ECC_ERR_D15XX __BIT(28)
+#define ENABLE_AE_ECC_PARITY_CORR_D15XX (__BIT(24) | __BIT(12))
+#define ERRSSMSH_EN_D15XX __BIT(3)
+/* BIT(2) enables the logging of push/pull data errors. */
+#define PPERR_EN_D15XX (__BIT(2))
+
+/* Mask for VF2PF interrupts */
+#define VF2PF1_16_D15XX (0xFFFF << 9)
+#define ERRSOU3_VF2PF_D15XX(errsou3) (((errsou3) & 0x01FFFE00) >> 9)
+#define ERRMSK3_VF2PF_D15XX(vf_mask) (((vf_mask) & 0xFFFF) << 9)
+
+/* Masks for correctable error interrupts. */
+#define ERRMSK0_CERR_D15XX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
+#define ERRMSK1_CERR_D15XX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
+#define ERRMSK3_CERR_D15XX (__BIT(7))
+#define ERRMSK4_CERR_D15XX (__BIT(8) | __BIT(0))
+#define ERRMSK5_CERR_D15XX (0)
+
+/* Masks for uncorrectable error interrupts. */
+#define ERRMSK0_UERR_D15XX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
+#define ERRMSK1_UERR_D15XX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
+#define ERRMSK3_UERR_D15XX (__BIT(8) | __BIT(6) | __BIT(5) | __BIT(4) | \
+ __BIT(3) | __BIT(2) | __BIT(0))
+#define ERRMSK4_UERR_D15XX (__BIT(9) | __BIT(1))
+#define ERRMSK5_UERR_D15XX (__BIT(18) | __BIT(17) | __BIT(16))
+
+/* RI CPP control */
+#define RICPPINTCTL_D15XX (0x3A000 + 0x110)
+/*
+ * BIT(2) enables error detection and reporting on the RI Parity Error.
+ * BIT(1) enables error detection and reporting on the RI CPP Pull interface.
+ * BIT(0) enables error detection and reporting on the RI CPP Push interface.
+ */
+#define RICPP_EN_D15XX (__BIT(2) | __BIT(1) | __BIT(0))
+
+/* TI CPP control */
+#define TICPPINTCTL_D15XX (0x3A400 + 0x138)
+/*
+ * BIT(3) enables error detection and reporting on the ETR Parity Error.
+ * BIT(2) enables error detection and reporting on the TI Parity Error.
+ * BIT(1) enables error detection and reporting on the TI CPP Pull interface.
+ * BIT(0) enables error detection and reporting on the TI CPP Push interface.
+ */
+#define TICPP_EN_D15XX \
+ (__BIT(4) | __BIT(3) | __BIT(2) | __BIT(1) | __BIT(0))
+
+/* CFC Uncorrectable Errors */
+#define CPP_CFC_ERR_CTRL_D15XX (0x30000 + 0xC00)
+/*
+ * BIT(1) enables interrupt.
+ * BIT(0) enables detecting and logging of push/pull data errors.
+ */
+#define CPP_CFC_UE_D15XX (__BIT(1) | __BIT(0))
+
+/* Correctable SecureRAM Error Reg */
+#define SECRAMCERR_D15XX (0x3AC00 + 0x00)
+/* BIT(3) enables fixing and logging of correctable errors. */
+#define SECRAM_CERR_D15XX (__BIT(3))
+
+/* Uncorrectable SecureRAM Error Reg */
+/*
+ * BIT(17) enables interrupt.
+ * BIT(3) enables detecting and logging of uncorrectable errors.
+ */
+#define SECRAM_UERR_D15XX (__BIT(17) | __BIT(3))
+
+/* Miscellaneous Memory Target Errors Register */
+/*
+ * BIT(3) enables detecting and logging push/pull data errors.
+ * BIT(2) enables interrupt.
+ */
+#define TGT_UERR_D15XX (__BIT(3) | __BIT(2))
+
+
+#define SLICEPWRDOWN_D15XX(i) ((i) * 0x4000 + 0x2C)
+/* Enabling PKE4-PKE0. */
+#define MMP_PWR_UP_MSK_D15XX \
+ (__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16))
+
+/* CPM Uncorrectable Errors */
+#define INTMASKSSM_D15XX(i) ((i) * 0x4000 + 0x0)
+/* Disabling interrupts for correctable errors. */
+#define INTMASKSSM_UERR_D15XX \
+ (__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1))
+
+/* MMP */
+/* BIT(3) enables correction. */
+#define CERRSSMMMP_EN_D15XX (__BIT(3))
+
+/* BIT(3) enables logging. */
+#define UERRSSMMMP_EN_D15XX (__BIT(3))
+
+/* ETR */
+#define ETR_MAX_BANKS_D15XX 16
+#define ETR_TX_RX_GAP_D15XX 8
+#define ETR_TX_RINGS_MASK_D15XX 0xFF
+#define ETR_BUNDLE_SIZE_D15XX 0x1000
+
+/* AE firmware */
+#define AE_FW_PROD_TYPE_D15XX 0x01000000
+#define AE_FW_MOF_NAME_D15XX "qat_d15xx"
+#define AE_FW_MMP_NAME_D15XX "qat_d15xx_mmp"
+#define AE_FW_UOF_NAME_D15XX "icp_qat_ae.suof"
+
+/* Clock frequency */
+#define CLOCK_PER_SEC_D15XX (685 * 1000000 / 16)
+
+#endif
diff --git a/sys/dev/qat/qat_dh895xcc.c b/sys/dev/qat/qat_dh895xcc.c
new file mode 100644
index 000000000000..8b566f3925d4
--- /dev/null
+++ b/sys/dev/qat/qat_dh895xcc.c
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause */
+/*
+ * Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 - 2020 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+
+#include <machine/bus.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qatvar.h"
+#include "qat_hw17reg.h"
+#include "qat_hw17var.h"
+#include "qat_dh895xccreg.h"
+
+static uint32_t
+qat_dh895xcc_get_accel_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl, strap;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_DH895XCC, 4);
+
+ return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_DH895XCC) &
+ ACCEL_MASK_DH895XCC);
+}
+
+static uint32_t
+qat_dh895xcc_get_ae_mask(struct qat_softc *sc)
+{
+ uint32_t fusectl, strap;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_DH895XCC, 4);
+
+ return (~(fusectl | strap)) & AE_MASK_DH895XCC;
+}
+
+static enum qat_sku
+qat_dh895xcc_get_sku(struct qat_softc *sc)
+{
+ uint32_t fusectl, sku;
+
+ fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
+ sku = (fusectl & FUSECTL_SKU_MASK_DH895XCC) >>
+ FUSECTL_SKU_SHIFT_DH895XCC;
+ switch (sku) {
+ case FUSECTL_SKU_1_DH895XCC:
+ return QAT_SKU_1;
+ case FUSECTL_SKU_2_DH895XCC:
+ return QAT_SKU_2;
+ case FUSECTL_SKU_3_DH895XCC:
+ return QAT_SKU_3;
+ case FUSECTL_SKU_4_DH895XCC:
+ return QAT_SKU_4;
+ default:
+ return QAT_SKU_UNKNOWN;
+ }
+}
+
+static uint32_t
+qat_dh895xcc_get_accel_cap(struct qat_softc *sc)
+{
+ uint32_t cap, legfuse;
+
+ legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
+
+ cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
+ QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
+ QAT_ACCEL_CAP_CIPHER +
+ QAT_ACCEL_CAP_AUTHENTICATION +
+ QAT_ACCEL_CAP_COMPRESSION +
+ QAT_ACCEL_CAP_ZUC +
+ QAT_ACCEL_CAP_SHA3;
+
+ if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
+ cap &= ~QAT_ACCEL_CAP_CIPHER;
+ }
+ if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
+ cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
+ if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
+ cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
+ if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
+ cap &= ~QAT_ACCEL_CAP_COMPRESSION;
+ if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
+ cap &= ~QAT_ACCEL_CAP_ZUC;
+
+ return cap;
+}
+
+static const char *
+qat_dh895xcc_get_fw_uof_name(struct qat_softc *sc)
+{
+ return AE_FW_UOF_NAME_DH895XCC;
+}
+
+static void
+qat_dh895xcc_enable_intr(struct qat_softc *sc)
+{
+ /* Enable bundle and misc interrupts */
+ qat_misc_write_4(sc, SMIAPF0_DH895XCC, SMIA0_MASK_DH895XCC);
+ qat_misc_write_4(sc, SMIAPF1_DH895XCC, SMIA1_MASK_DH895XCC);
+}
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static uint32_t thrd_to_arb_map_sku4[] = {
+ 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+ 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static uint32_t thrd_to_arb_map_sku6[] = {
+ 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+ 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+ 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+};
+
+static void
+qat_dh895xcc_get_arb_mapping(struct qat_softc *sc,
+ const uint32_t **arb_map_config)
+{
+ uint32_t *map, sku;
+ int i;
+
+ sku = qat_dh895xcc_get_sku(sc);
+ switch (sku) {
+ case QAT_SKU_1:
+ map = thrd_to_arb_map_sku4;
+ break;
+ case QAT_SKU_2:
+ case QAT_SKU_4:
+ map = thrd_to_arb_map_sku6;
+ break;
+ default:
+ *arb_map_config = NULL;
+ return;
+ }
+
+ for (i = 1; i < MAX_AE_DH895XCC; i++) {
+ if ((~sc->sc_ae_mask) & (1 << i))
+ map[i] = 0;
+ }
+ *arb_map_config = map;
+}
+
+static void
+qat_dh895xcc_enable_error_correction(struct qat_softc *sc)
+{
+ uint32_t mask;
+ u_int i;
+
+ /* Enable Accel Engine error detection & correction */
+ for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_DH895XCC(i),
+ ENABLE_AE_ECC_ERR_DH895XCC);
+ qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_DH895XCC(i),
+ ENABLE_AE_ECC_PARITY_CORR_DH895XCC);
+ }
+
+ /* Enable shared memory error detection & correction */
+ for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_DH895XCC);
+ qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_DH895XCC);
+ qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_DH895XCC);
+ }
+}
+
+const struct qat_hw qat_hw_dh895xcc = {
+ .qhw_sram_bar_id = BAR_SRAM_ID_DH895XCC,
+ .qhw_misc_bar_id = BAR_PMISC_ID_DH895XCC,
+ .qhw_etr_bar_id = BAR_ETR_ID_DH895XCC,
+ .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_DH895XCC,
+ .qhw_ae_offset = AE_OFFSET_DH895XCC,
+ .qhw_ae_local_offset = AE_LOCAL_OFFSET_DH895XCC,
+ .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_DH895XCC,
+ .qhw_num_banks = ETR_MAX_BANKS_DH895XCC,
+ .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
+ .qhw_num_accel = MAX_ACCEL_DH895XCC,
+ .qhw_num_engines = MAX_AE_DH895XCC,
+ .qhw_tx_rx_gap = ETR_TX_RX_GAP_DH895XCC,
+ .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_DH895XCC,
+ .qhw_clock_per_sec = CLOCK_PER_SEC_DH895XCC,
+ .qhw_fw_auth = false,
+ .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
+ .qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
+ .qhw_ring_asym_tx = 0,
+ .qhw_ring_asym_rx = 8,
+ .qhw_ring_sym_tx = 2,
+ .qhw_ring_sym_rx = 10,
+ .qhw_mof_fwname = AE_FW_MOF_NAME_DH895XCC,
+ .qhw_mmp_fwname = AE_FW_MMP_NAME_DH895XCC,
+ .qhw_prod_type = AE_FW_PROD_TYPE_DH895XCC,
+ .qhw_get_accel_mask = qat_dh895xcc_get_accel_mask,
+ .qhw_get_ae_mask = qat_dh895xcc_get_ae_mask,
+ .qhw_get_sku = qat_dh895xcc_get_sku,
+ .qhw_get_accel_cap = qat_dh895xcc_get_accel_cap,
+ .qhw_get_fw_uof_name = qat_dh895xcc_get_fw_uof_name,
+ .qhw_enable_intr = qat_dh895xcc_enable_intr,
+ .qhw_init_admin_comms = qat_adm_mailbox_init,
+ .qhw_send_admin_init = qat_adm_mailbox_send_init,
+ .qhw_init_arb = qat_arb_init,
+ .qhw_get_arb_mapping = qat_dh895xcc_get_arb_mapping,
+ .qhw_enable_error_correction = qat_dh895xcc_enable_error_correction,
+ .qhw_check_slice_hang = qat_check_slice_hang,
+ .qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
+ .qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
+ .qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
+};
diff --git a/sys/dev/qat/qat_dh895xccreg.h b/sys/dev/qat/qat_dh895xccreg.h
new file mode 100644
index 000000000000..2657f7ea4211
--- /dev/null
+++ b/sys/dev/qat/qat_dh895xccreg.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014-2020 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_DH895XCCREG_H_
+#define _DEV_PCI_QAT_DH895XCCREG_H_
+
+/* Max number of accelerators and engines */
+#define MAX_ACCEL_DH895XCC 6
+#define MAX_AE_DH895XCC 12
+
+/* PCIe BAR index */
+#define BAR_SRAM_ID_DH895XCC 0
+#define BAR_PMISC_ID_DH895XCC 1
+#define BAR_ETR_ID_DH895XCC 2
+
+/* BAR PMISC sub-regions */
+#define AE_OFFSET_DH895XCC 0x20000
+#define AE_LOCAL_OFFSET_DH895XCC 0x20800
+#define CAP_GLOBAL_OFFSET_DH895XCC 0x30000
+
+#define SOFTSTRAP_REG_DH895XCC 0x2EC
+
+#define FUSECTL_SKU_MASK_DH895XCC 0x300000
+#define FUSECTL_SKU_SHIFT_DH895XCC 20
+#define FUSECTL_SKU_1_DH895XCC 0
+#define FUSECTL_SKU_2_DH895XCC 1
+#define FUSECTL_SKU_3_DH895XCC 2
+#define FUSECTL_SKU_4_DH895XCC 3
+
+#define ACCEL_REG_OFFSET_DH895XCC 13
+#define ACCEL_MASK_DH895XCC 0x3F
+#define AE_MASK_DH895XCC 0xFFF
+
+#define SMIAPF0_DH895XCC 0x3A028
+#define SMIAPF1_DH895XCC 0x3A030
+#define SMIA0_MASK_DH895XCC 0xFFFFFFFF
+#define SMIA1_MASK_DH895XCC 0x1
+
+/* Error detection and correction */
+#define AE_CTX_ENABLES_DH895XCC(i) ((i) * 0x1000 + 0x20818)
+#define AE_MISC_CONTROL_DH895XCC(i) ((i) * 0x1000 + 0x20960)
+#define ENABLE_AE_ECC_ERR_DH895XCC __BIT(28)
+#define ENABLE_AE_ECC_PARITY_CORR_DH895XCC (__BIT(24) | __BIT(12))
+#define ERRSSMSH_EN_DH895XCC __BIT(3)
+/* BIT(2) enables the logging of push/pull data errors. */
+#define PPERR_EN_DH895XCC (__BIT(2))
+
+/* ETR */
+#define ETR_MAX_BANKS_DH895XCC 32
+#define ETR_TX_RX_GAP_DH895XCC 8
+#define ETR_TX_RINGS_MASK_DH895XCC 0xFF
+#define ETR_BUNDLE_SIZE_DH895XCC 0x1000
+
+/* AE firmware */
+#define AE_FW_PROD_TYPE_DH895XCC 0x00400000
+#define AE_FW_MOF_NAME_DH895XCC "qat_895xcc"
+#define AE_FW_MMP_NAME_DH895XCC "qat_895xcc_mmp"
+#define AE_FW_UOF_NAME_DH895XCC "icp_qat_ae.uof"
+
+/* Clock frequency */
+#define CLOCK_PER_SEC_DH895XCC (685 * 1000000 / 16)
+
+#endif
diff --git a/sys/dev/qat/qat_hw15.c b/sys/dev/qat/qat_hw15.c
new file mode 100644
index 000000000000..4f823a0a9ae4
--- /dev/null
+++ b/sys/dev/qat/qat_hw15.c
@@ -0,0 +1,953 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
+#endif
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#include <opencrypto/xform.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qat_hw15reg.h"
+#include "qatvar.h"
+#include "qat_hw15var.h"
+
+static int qat_adm_ring_init_ring_table(struct qat_softc *);
+static void qat_adm_ring_build_slice_mask(uint16_t *, uint32_t, uint32_t);
+static void qat_adm_ring_build_shram_mask(uint64_t *, uint32_t, uint32_t);
+static int qat_adm_ring_build_ring_table(struct qat_softc *, uint32_t);
+static int qat_adm_ring_build_init_msg(struct qat_softc *,
+ struct fw_init_req *, enum fw_init_cmd_id, uint32_t,
+ struct qat_accel_init_cb *);
+static int qat_adm_ring_send_init_msg_sync(struct qat_softc *,
+ enum fw_init_cmd_id, uint32_t);
+static int qat_adm_ring_send_init_msg(struct qat_softc *,
+ enum fw_init_cmd_id);
+static int qat_adm_ring_intr(struct qat_softc *, void *, void *);
+
+void
+qat_msg_req_type_populate(struct arch_if_req_hdr *msg, enum arch_if_req type,
+ uint32_t rxring)
+{
+
+ memset(msg, 0, sizeof(struct arch_if_req_hdr));
+ msg->flags = ARCH_IF_FLAGS_VALID_FLAG |
+ ARCH_IF_FLAGS_RESP_RING_TYPE_ET | ARCH_IF_FLAGS_RESP_TYPE_S;
+ msg->req_type = type;
+ msg->resp_pipe_id = rxring;
+}
+
+void
+qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *msg, bus_addr_t desc_paddr,
+ uint8_t hdrsz, uint8_t hwblksz, uint16_t comn_req_flags, uint32_t flow_id)
+{
+ struct fw_comn_req_hdr *hdr = &msg->comn_hdr;
+
+ hdr->comn_req_flags = comn_req_flags;
+ hdr->content_desc_params_sz = hwblksz;
+ hdr->content_desc_hdr_sz = hdrsz;
+ hdr->content_desc_addr = desc_paddr;
+ msg->flow_id = flow_id;
+}
+
+void
+qat_msg_service_cmd_populate(struct fw_la_bulk_req *msg, enum fw_la_cmd_id cmdid,
+ uint16_t cmd_flags)
+{
+ msg->comn_la_req.la_cmd_id = cmdid;
+ msg->comn_la_req.u.la_flags = cmd_flags;
+}
+
+void
+qat_msg_cmn_mid_populate(struct fw_comn_req_mid *msg, void *cookie,
+ uint64_t src, uint64_t dst)
+{
+
+ msg->opaque_data = (uint64_t)(uintptr_t)cookie;
+ msg->src_data_addr = src;
+ if (dst == 0)
+ msg->dest_data_addr = src;
+ else
+ msg->dest_data_addr = dst;
+}
+
+void
+qat_msg_req_params_populate(struct fw_la_bulk_req *msg,
+ bus_addr_t req_params_paddr, uint8_t req_params_sz)
+{
+ msg->req_params_addr = req_params_paddr;
+ msg->comn_la_req.u1.req_params_blk_sz = req_params_sz / 8;
+}
+
+void
+qat_msg_cmn_footer_populate(union fw_comn_req_ftr *msg, uint64_t next_addr)
+{
+ msg->next_request_addr = next_addr;
+}
+
+void
+qat_msg_params_populate(struct fw_la_bulk_req *msg,
+ struct qat_crypto_desc *desc, uint8_t req_params_sz,
+ uint16_t service_cmd_flags, uint16_t comn_req_flags)
+{
+ qat_msg_cmn_hdr_populate(msg, desc->qcd_desc_paddr,
+ desc->qcd_hdr_sz, desc->qcd_hw_blk_sz, comn_req_flags, 0);
+ qat_msg_service_cmd_populate(msg, desc->qcd_cmd_id, service_cmd_flags);
+ qat_msg_cmn_mid_populate(&msg->comn_mid, NULL, 0, 0);
+ qat_msg_req_params_populate(msg, 0, req_params_sz);
+ qat_msg_cmn_footer_populate(&msg->comn_ftr, 0);
+}
+
+static int
+qat_adm_ring_init_ring_table(struct qat_softc *sc)
+{
+ struct qat_admin_rings *qadr = &sc->sc_admin_rings;
+
+ if (sc->sc_ae_num == 1) {
+ qadr->qadr_cya_ring_tbl =
+ &qadr->qadr_master_ring_tbl[0];
+ qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
+ } else if (sc->sc_ae_num == 2 || sc->sc_ae_num == 4) {
+ qadr->qadr_cya_ring_tbl =
+ &qadr->qadr_master_ring_tbl[0];
+ qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
+ qadr->qadr_cyb_ring_tbl =
+ &qadr->qadr_master_ring_tbl[1];
+ qadr->qadr_srv_mask[1] = QAT_SERVICE_CRYPTO_B;
+ }
+
+ return 0;
+}
+
+int
+qat_adm_ring_init(struct qat_softc *sc)
+{
+ struct qat_admin_rings *qadr = &sc->sc_admin_rings;
+ int error, i, j;
+
+ error = qat_alloc_dmamem(sc, &qadr->qadr_dma, 1, PAGE_SIZE, PAGE_SIZE);
+ if (error)
+ return error;
+
+ qadr->qadr_master_ring_tbl = qadr->qadr_dma.qdm_dma_vaddr;
+
+ MPASS(sc->sc_ae_num *
+ sizeof(struct fw_init_ring_table) <= PAGE_SIZE);
+
+ /* Initialize the Master Ring Table */
+ for (i = 0; i < sc->sc_ae_num; i++) {
+ struct fw_init_ring_table *firt =
+ &qadr->qadr_master_ring_tbl[i];
+
+ for (j = 0; j < INIT_RING_TABLE_SZ; j++) {
+ struct fw_init_ring_params *firp =
+ &firt->firt_bulk_rings[j];
+
+ firp->firp_reserved = 0;
+ firp->firp_curr_weight = QAT_DEFAULT_RING_WEIGHT;
+ firp->firp_init_weight = QAT_DEFAULT_RING_WEIGHT;
+ firp->firp_ring_pvl = QAT_DEFAULT_PVL;
+ }
+ memset(firt->firt_ring_mask, 0, sizeof(firt->firt_ring_mask));
+ }
+
+ error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_TX,
+ ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_req_size,
+ NULL, NULL, "admin_tx", &qadr->qadr_admin_tx);
+ if (error)
+ return error;
+
+ error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_RX,
+ ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_resp_size,
+ qat_adm_ring_intr, qadr, "admin_rx", &qadr->qadr_admin_rx);
+ if (error)
+ return error;
+
+ /*
+ * Finally set up the service indices into the Master Ring Table
+ * and convenient ring table pointers for each service enabled.
+ * Only the Admin rings are initialized.
+ */
+ error = qat_adm_ring_init_ring_table(sc);
+ if (error)
+ return error;
+
+ /*
+ * Calculate the number of active AEs per QAT
+ * needed for Shram partitioning.
+ */
+ for (i = 0; i < sc->sc_ae_num; i++) {
+ if (qadr->qadr_srv_mask[i])
+ qadr->qadr_active_aes_per_accel++;
+ }
+
+ return 0;
+}
+
+static void
+qat_adm_ring_build_slice_mask(uint16_t *slice_mask, uint32_t srv_mask,
+ uint32_t init_shram)
+{
+ uint16_t shram = 0, comn_req = 0;
+
+ if (init_shram)
+ shram = COMN_REQ_SHRAM_INIT_REQUIRED;
+
+ if (srv_mask & QAT_SERVICE_CRYPTO_A)
+ comn_req |= COMN_REQ_CY0_ONLY(shram);
+ if (srv_mask & QAT_SERVICE_CRYPTO_B)
+ comn_req |= COMN_REQ_CY1_ONLY(shram);
+
+ *slice_mask = comn_req;
+}
+
+static void
+qat_adm_ring_build_shram_mask(uint64_t *shram_mask, uint32_t active_aes,
+ uint32_t ae)
+{
+ *shram_mask = 0;
+
+ if (active_aes == 1) {
+ *shram_mask = ~(*shram_mask);
+ } else if (active_aes == 2) {
+ if (ae == 1)
+ *shram_mask = ((~(*shram_mask)) & 0xffffffff);
+ else
+ *shram_mask = ((~(*shram_mask)) & 0xffffffff00000000ull);
+ } else if (active_aes == 3) {
+ if (ae == 0)
+ *shram_mask = ((~(*shram_mask)) & 0x7fffff);
+ else if (ae == 1)
+ *shram_mask = ((~(*shram_mask)) & 0x3fffff800000ull);
+ else
+ *shram_mask = ((~(*shram_mask)) & 0xffffc00000000000ull);
+ } else {
+ panic("Only three services are supported in current version");
+ }
+}
+
+static int
+qat_adm_ring_build_ring_table(struct qat_softc *sc, uint32_t ae)
+{
+ struct qat_admin_rings *qadr = &sc->sc_admin_rings;
+ struct fw_init_ring_table *tbl;
+ struct fw_init_ring_params *param;
+ uint8_t srv_mask = sc->sc_admin_rings.qadr_srv_mask[ae];
+
+ if ((srv_mask & QAT_SERVICE_CRYPTO_A)) {
+ tbl = qadr->qadr_cya_ring_tbl;
+ } else if ((srv_mask & QAT_SERVICE_CRYPTO_B)) {
+ tbl = qadr->qadr_cyb_ring_tbl;
+ } else {
+ device_printf(sc->sc_dev,
+ "Invalid execution engine %d\n", ae);
+ return EINVAL;
+ }
+
+ param = &tbl->firt_bulk_rings[sc->sc_hw.qhw_ring_sym_tx];
+ param->firp_curr_weight = QAT_HI_PRIO_RING_WEIGHT;
+ param->firp_init_weight = QAT_HI_PRIO_RING_WEIGHT;
+ FW_INIT_RING_MASK_SET(tbl, sc->sc_hw.qhw_ring_sym_tx);
+
+ return 0;
+}
+
+static int
+qat_adm_ring_build_init_msg(struct qat_softc *sc,
+ struct fw_init_req *initmsg, enum fw_init_cmd_id cmd, uint32_t ae,
+ struct qat_accel_init_cb *cb)
+{
+ struct fw_init_set_ae_info_hdr *aehdr;
+ struct fw_init_set_ae_info *aeinfo;
+ struct fw_init_set_ring_info_hdr *ringhdr;
+ struct fw_init_set_ring_info *ringinfo;
+ int init_shram = 0, tgt_id, cluster_id;
+ uint32_t srv_mask;
+
+ srv_mask = sc->sc_admin_rings.qadr_srv_mask[
+ ae % sc->sc_ae_num];
+
+ memset(initmsg, 0, sizeof(struct fw_init_req));
+
+ qat_msg_req_type_populate(&initmsg->comn_hdr.arch_if,
+ ARCH_IF_REQ_QAT_FW_INIT,
+ sc->sc_admin_rings.qadr_admin_rx->qr_ring_id);
+
+ qat_msg_cmn_mid_populate(&initmsg->comn_mid, cb, 0, 0);
+
+ switch (cmd) {
+ case FW_INIT_CMD_SET_AE_INFO:
+ if (ae % sc->sc_ae_num == 0)
+ init_shram = 1;
+ if (ae >= sc->sc_ae_num) {
+ tgt_id = 1;
+ cluster_id = 1;
+ } else {
+ cluster_id = 0;
+ if (sc->sc_ae_mask)
+ tgt_id = 0;
+ else
+ tgt_id = 1;
+ }
+ aehdr = &initmsg->u.set_ae_info;
+ aeinfo = &initmsg->u1.set_ae_info;
+
+ aehdr->init_cmd_id = cmd;
+ /* XXX that does not support sparse ae_mask */
+ aehdr->init_trgt_id = ae;
+ aehdr->init_ring_cluster_id = cluster_id;
+ aehdr->init_qat_id = tgt_id;
+
+ qat_adm_ring_build_slice_mask(&aehdr->init_slice_mask, srv_mask,
+ init_shram);
+
+ qat_adm_ring_build_shram_mask(&aeinfo->init_shram_mask,
+ sc->sc_admin_rings.qadr_active_aes_per_accel,
+ ae % sc->sc_ae_num);
+
+ break;
+ case FW_INIT_CMD_SET_RING_INFO:
+ ringhdr = &initmsg->u.set_ring_info;
+ ringinfo = &initmsg->u1.set_ring_info;
+
+ ringhdr->init_cmd_id = cmd;
+ /* XXX that does not support sparse ae_mask */
+ ringhdr->init_trgt_id = ae;
+
+ /* XXX */
+ qat_adm_ring_build_ring_table(sc,
+ ae % sc->sc_ae_num);
+
+ ringhdr->init_ring_tbl_sz = sizeof(struct fw_init_ring_table);
+
+ ringinfo->init_ring_table_ptr =
+ sc->sc_admin_rings.qadr_dma.qdm_dma_seg.ds_addr +
+ ((ae % sc->sc_ae_num) *
+ sizeof(struct fw_init_ring_table));
+
+ break;
+ default:
+ return ENOTSUP;
+ }
+
+ return 0;
+}
+
+static int
+qat_adm_ring_send_init_msg_sync(struct qat_softc *sc,
+ enum fw_init_cmd_id cmd, uint32_t ae)
+{
+ struct fw_init_req initmsg;
+ struct qat_accel_init_cb cb;
+ int error;
+
+ error = qat_adm_ring_build_init_msg(sc, &initmsg, cmd, ae, &cb);
+ if (error)
+ return error;
+
+ error = qat_etr_put_msg(sc, sc->sc_admin_rings.qadr_admin_tx,
+ (uint32_t *)&initmsg);
+ if (error)
+ return error;
+
+ error = tsleep(&cb, PZERO, "qat_init", hz * 3 / 2);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "Timed out initialization firmware: %d\n", error);
+ return error;
+ }
+ if (cb.qaic_status) {
+ device_printf(sc->sc_dev, "Failed to initialize firmware\n");
+ return EIO;
+ }
+
+ return error;
+}
+
+static int
+qat_adm_ring_send_init_msg(struct qat_softc *sc,
+ enum fw_init_cmd_id cmd)
+{
+ struct qat_admin_rings *qadr = &sc->sc_admin_rings;
+ uint32_t error, ae;
+
+ for (ae = 0; ae < sc->sc_ae_num; ae++) {
+ uint8_t srv_mask = qadr->qadr_srv_mask[ae];
+ switch (cmd) {
+ case FW_INIT_CMD_SET_AE_INFO:
+ case FW_INIT_CMD_SET_RING_INFO:
+ if (!srv_mask)
+ continue;
+ break;
+ case FW_INIT_CMD_TRNG_ENABLE:
+ case FW_INIT_CMD_TRNG_DISABLE:
+ if (!(srv_mask & QAT_SERVICE_CRYPTO_A))
+ continue;
+ break;
+ default:
+ return ENOTSUP;
+ }
+
+ error = qat_adm_ring_send_init_msg_sync(sc, cmd, ae);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+int
+qat_adm_ring_send_init(struct qat_softc *sc)
+{
+ int error;
+
+ error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_AE_INFO);
+ if (error)
+ return error;
+
+ error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_RING_INFO);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int
+qat_adm_ring_intr(struct qat_softc *sc, void *arg, void *msg)
+{
+ struct arch_if_resp_hdr *resp;
+ struct fw_init_resp *init_resp;
+ struct qat_accel_init_cb *init_cb;
+ int handled = 0;
+
+ resp = (struct arch_if_resp_hdr *)msg;
+
+ switch (resp->resp_type) {
+ case ARCH_IF_REQ_QAT_FW_INIT:
+ init_resp = (struct fw_init_resp *)msg;
+ init_cb = (struct qat_accel_init_cb *)
+ (uintptr_t)init_resp->comn_resp.opaque_data;
+ init_cb->qaic_status =
+ __SHIFTOUT(init_resp->comn_resp.comn_status,
+ COMN_RESP_INIT_ADMIN_STATUS);
+ wakeup(init_cb);
+ break;
+ default:
+ device_printf(sc->sc_dev,
+ "unknown resp type %d\n", resp->resp_type);
+ break;
+ }
+
+ return handled;
+}
+
+static inline uint16_t
+qat_hw15_get_comn_req_flags(uint8_t ae)
+{
+ if (ae == 0) {
+ return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
+ COMN_REQ_AUTH0_SLICE_REQUIRED |
+ COMN_REQ_CIPHER0_SLICE_REQUIRED;
+ } else {
+ return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
+ COMN_REQ_AUTH1_SLICE_REQUIRED |
+ COMN_REQ_CIPHER1_SLICE_REQUIRED;
+ }
+}
+
+static uint32_t
+qat_hw15_crypto_setup_cipher_desc(struct qat_crypto_desc *desc,
+ struct qat_session *qs, struct fw_cipher_hdr *cipher_hdr,
+ uint32_t hw_blk_offset, enum fw_slice next_slice)
+{
+ desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
+
+ cipher_hdr->state_padding_sz = 0;
+ cipher_hdr->key_sz = qs->qs_cipher_klen / 8;
+
+ cipher_hdr->state_sz = desc->qcd_cipher_blk_sz / 8;
+
+ cipher_hdr->next_id = next_slice;
+ cipher_hdr->curr_id = FW_SLICE_CIPHER;
+ cipher_hdr->offset = hw_blk_offset / 8;
+ cipher_hdr->resrvd = 0;
+
+ return sizeof(struct hw_cipher_config) + qs->qs_cipher_klen;
+}
+
+static void
+qat_hw15_crypto_setup_cipher_config(const struct qat_crypto_desc *desc,
+ const struct qat_session *qs, const struct cryptop *crp,
+ struct hw_cipher_config *cipher_config)
+{
+ const uint8_t *key;
+ uint8_t *cipher_key;
+
+ cipher_config->val = qat_crypto_load_cipher_session(desc, qs);
+ cipher_config->reserved = 0;
+
+ cipher_key = (uint8_t *)(cipher_config + 1);
+ if (crp != NULL && crp->crp_cipher_key != NULL)
+ key = crp->crp_cipher_key;
+ else
+ key = qs->qs_cipher_key;
+ memcpy(cipher_key, key, qs->qs_cipher_klen);
+}
+
+static uint32_t
+qat_hw15_crypto_setup_auth_desc(struct qat_crypto_desc *desc,
+ struct qat_session *qs, struct fw_auth_hdr *auth_hdr,
+ uint32_t ctrl_blk_offset, uint32_t hw_blk_offset,
+ enum fw_slice next_slice)
+{
+ const struct qat_sym_hash_def *hash_def;
+
+ (void)qat_crypto_load_auth_session(desc, qs, &hash_def);
+
+ auth_hdr->next_id = next_slice;
+ auth_hdr->curr_id = FW_SLICE_AUTH;
+ auth_hdr->offset = hw_blk_offset / 8;
+ auth_hdr->resrvd = 0;
+
+ auth_hdr->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
+ auth_hdr->u.inner_prefix_sz = 0;
+ auth_hdr->outer_prefix_sz = 0;
+ auth_hdr->final_sz = hash_def->qshd_alg->qshai_digest_len;
+ auth_hdr->inner_state1_sz =
+ roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
+ auth_hdr->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
+ auth_hdr->inner_state2_sz =
+ roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
+ auth_hdr->inner_state2_off = auth_hdr->offset +
+ ((sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz) / 8);
+
+ auth_hdr->outer_config_off = 0;
+ auth_hdr->outer_state1_sz = 0;
+ auth_hdr->outer_res_sz = 0;
+ auth_hdr->outer_prefix_off = 0;
+
+ desc->qcd_auth_sz = hash_def->qshd_alg->qshai_sah->hashsize;
+ desc->qcd_state_storage_sz = (sizeof(struct hw_auth_counter) +
+ roundup(hash_def->qshd_alg->qshai_state_size, 8)) / 8;
+ desc->qcd_gcm_aad_sz_offset1 = desc->qcd_auth_offset +
+ sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
+ AES_BLOCK_LEN;
+ desc->qcd_gcm_aad_sz_offset2 = ctrl_blk_offset +
+ offsetof(struct fw_auth_hdr, u.aad_sz);
+
+ return sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
+ auth_hdr->inner_state2_sz;
+}
+
+static void
+qat_hw15_crypto_setup_auth_setup(const struct qat_crypto_desc *desc,
+ const struct qat_session *qs, const struct cryptop *crp,
+ struct hw_auth_setup *auth_setup)
+{
+ const struct qat_sym_hash_def *hash_def;
+ const uint8_t *key;
+ uint8_t *state1, *state2;
+ uint32_t state_sz, state1_sz, state2_sz, state1_pad_len, state2_pad_len;
+
+ auth_setup->auth_config.config = qat_crypto_load_auth_session(desc, qs,
+ &hash_def);
+ auth_setup->auth_config.reserved = 0;
+
+ auth_setup->auth_counter.counter =
+ htobe32(hash_def->qshd_qat->qshqi_auth_counter);
+ auth_setup->auth_counter.reserved = 0;
+
+ state1 = (uint8_t *)(auth_setup + 1);
+ state2 = state1 + roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
+ switch (qs->qs_auth_algo) {
+ case HW_AUTH_ALGO_GALOIS_128:
+ qat_crypto_gmac_precompute(desc, qs->qs_cipher_key,
+ qs->qs_cipher_klen, hash_def, state2);
+ break;
+ case HW_AUTH_ALGO_SHA1:
+ state_sz = hash_def->qshd_alg->qshai_state_size;
+ state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
+ state2_sz = roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
+ if (qs->qs_auth_mode == HW_AUTH_MODE1) {
+ state1_pad_len = state1_sz - state_sz;
+ state2_pad_len = state2_sz - state_sz;
+ if (state1_pad_len > 0)
+ memset(state1 + state_sz, 0, state1_pad_len);
+ if (state2_pad_len > 0)
+ memset(state2 + state_sz, 0, state2_pad_len);
+ }
+ /* FALLTHROUGH */
+ case HW_AUTH_ALGO_SHA256:
+ case HW_AUTH_ALGO_SHA384:
+ case HW_AUTH_ALGO_SHA512:
+ switch (qs->qs_auth_mode) {
+ case HW_AUTH_MODE0:
+ memcpy(state1, hash_def->qshd_alg->qshai_init_state,
+ state1_sz);
+ /* Override for mode 0 hashes. */
+ auth_setup->auth_counter.counter = 0;
+ break;
+ case HW_AUTH_MODE1:
+ if (crp != NULL && crp->crp_auth_key != NULL)
+ key = crp->crp_auth_key;
+ else
+ key = qs->qs_auth_key;
+ if (key != NULL) {
+ qat_crypto_hmac_precompute(desc, key,
+ qs->qs_auth_klen, hash_def, state1, state2);
+ }
+ break;
+ default:
+ panic("%s: unhandled auth mode %d", __func__,
+ qs->qs_auth_mode);
+ }
+ break;
+ default:
+ panic("%s: unhandled auth algorithm %d", __func__,
+ qs->qs_auth_algo);
+ }
+}
+
+void
+qat_hw15_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
+ struct qat_crypto_desc *desc)
+{
+ struct fw_cipher_hdr *cipher_hdr;
+ struct fw_auth_hdr *auth_hdr;
+ struct fw_la_bulk_req *req_cache;
+ struct hw_auth_setup *auth_setup;
+ struct hw_cipher_config *cipher_config;
+ uint32_t ctrl_blk_sz, ctrl_blk_offset, hw_blk_offset;
+ int i;
+ uint16_t la_cmd_flags;
+ uint8_t req_params_sz;
+ uint8_t *ctrl_blk_ptr, *hw_blk_ptr;
+
+ ctrl_blk_sz = 0;
+ if (qs->qs_cipher_algo != HW_CIPHER_ALGO_NULL)
+ ctrl_blk_sz += sizeof(struct fw_cipher_hdr);
+ if (qs->qs_auth_algo != HW_AUTH_ALGO_NULL)
+ ctrl_blk_sz += sizeof(struct fw_auth_hdr);
+
+ ctrl_blk_ptr = desc->qcd_content_desc;
+ ctrl_blk_offset = 0;
+ hw_blk_ptr = ctrl_blk_ptr + ctrl_blk_sz;
+ hw_blk_offset = 0;
+
+ la_cmd_flags = 0;
+ req_params_sz = 0;
+ for (i = 0; i < MAX_FW_SLICE; i++) {
+ switch (desc->qcd_slices[i]) {
+ case FW_SLICE_CIPHER:
+ cipher_hdr = (struct fw_cipher_hdr *)(ctrl_blk_ptr +
+ ctrl_blk_offset);
+ cipher_config = (struct hw_cipher_config *)(hw_blk_ptr +
+ hw_blk_offset);
+ desc->qcd_cipher_offset = ctrl_blk_sz + hw_blk_offset;
+ hw_blk_offset += qat_hw15_crypto_setup_cipher_desc(desc,
+ qs, cipher_hdr, hw_blk_offset,
+ desc->qcd_slices[i + 1]);
+ qat_hw15_crypto_setup_cipher_config(desc, qs, NULL,
+ cipher_config);
+ ctrl_blk_offset += sizeof(struct fw_cipher_hdr);
+ req_params_sz += sizeof(struct fw_la_cipher_req_params);
+ break;
+ case FW_SLICE_AUTH:
+ auth_hdr = (struct fw_auth_hdr *)(ctrl_blk_ptr +
+ ctrl_blk_offset);
+ auth_setup = (struct hw_auth_setup *)(hw_blk_ptr +
+ hw_blk_offset);
+ desc->qcd_auth_offset = ctrl_blk_sz + hw_blk_offset;
+ hw_blk_offset += qat_hw15_crypto_setup_auth_desc(desc,
+ qs, auth_hdr, ctrl_blk_offset, hw_blk_offset,
+ desc->qcd_slices[i + 1]);
+ qat_hw15_crypto_setup_auth_setup(desc, qs, NULL,
+ auth_setup);
+ ctrl_blk_offset += sizeof(struct fw_auth_hdr);
+ req_params_sz += sizeof(struct fw_la_auth_req_params);
+ la_cmd_flags |= LA_FLAGS_RET_AUTH_RES;
+ /* no digest verify */
+ break;
+ case FW_SLICE_DRAM_WR:
+ i = MAX_FW_SLICE; /* end of chain */
+ break;
+ default:
+ MPASS(0);
+ break;
+ }
+ }
+
+ desc->qcd_hdr_sz = ctrl_blk_offset / 8;
+ desc->qcd_hw_blk_sz = hw_blk_offset / 8;
+
+ req_cache = (struct fw_la_bulk_req *)desc->qcd_req_cache;
+ qat_msg_req_type_populate(
+ &req_cache->comn_hdr.arch_if,
+ ARCH_IF_REQ_QAT_FW_LA, 0);
+
+ if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128)
+ la_cmd_flags |= LA_FLAGS_PROTO_GCM | LA_FLAGS_GCM_IV_LEN_FLAG;
+ else
+ la_cmd_flags |= LA_FLAGS_PROTO_NO;
+
+ qat_msg_params_populate(req_cache, desc, req_params_sz,
+ la_cmd_flags, 0);
+
+ bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
+ qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE);
+}
+
+static void
+qat_hw15_crypto_req_setkey(const struct qat_crypto_desc *desc,
+ const struct qat_session *qs, struct qat_sym_cookie *qsc,
+ struct fw_la_bulk_req *bulk_req, struct cryptop *crp)
+{
+ struct hw_auth_setup *auth_setup;
+ struct hw_cipher_config *cipher_config;
+ uint8_t *cdesc;
+ int i;
+
+ cdesc = qsc->qsc_content_desc;
+ memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE);
+ for (i = 0; i < MAX_FW_SLICE; i++) {
+ switch (desc->qcd_slices[i]) {
+ case FW_SLICE_CIPHER:
+ cipher_config = (struct hw_cipher_config *)
+ (cdesc + desc->qcd_cipher_offset);
+ qat_hw15_crypto_setup_cipher_config(desc, qs, crp,
+ cipher_config);
+ break;
+ case FW_SLICE_AUTH:
+ auth_setup = (struct hw_auth_setup *)
+ (cdesc + desc->qcd_auth_offset);
+ qat_hw15_crypto_setup_auth_setup(desc, qs, crp,
+ auth_setup);
+ break;
+ case FW_SLICE_DRAM_WR:
+ i = MAX_FW_SLICE; /* end of chain */
+ break;
+ default:
+ MPASS(0);
+ }
+ }
+
+ bulk_req->comn_hdr.content_desc_addr = qsc->qsc_content_desc_paddr;
+}
+
+void
+qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb,
+ struct qat_session *qs, struct qat_crypto_desc const *desc,
+ struct qat_sym_cookie *qsc, struct cryptop *crp)
+{
+ struct qat_sym_bulk_cookie *qsbc;
+ struct fw_la_bulk_req *bulk_req;
+ struct fw_la_cipher_req_params *cipher_req;
+ struct fw_la_auth_req_params *auth_req;
+ bus_addr_t digest_paddr;
+ uint8_t *aad_szp2, *req_params_ptr;
+ uint32_t aad_sz, *aad_szp1;
+ enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
+ enum fw_slice next_slice;
+
+ qsbc = &qsc->u.qsc_bulk_cookie;
+
+ bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
+ memcpy(bulk_req, &desc->qcd_req_cache, QAT_HW15_SESSION_REQ_CACHE_SIZE);
+ bulk_req->comn_hdr.arch_if.resp_pipe_id = qcb->qcb_sym_rx->qr_ring_id;
+ bulk_req->comn_hdr.comn_req_flags =
+ qat_hw15_get_comn_req_flags(qcb->qcb_bank % 2);
+ bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
+ bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
+ bulk_req->req_params_addr = qsc->qsc_bulk_req_params_buf_paddr;
+ bulk_req->comn_ftr.next_request_addr = 0;
+ bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
+ if (__predict_false(crp->crp_cipher_key != NULL ||
+ crp->crp_auth_key != NULL)) {
+ qat_hw15_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
+ }
+
+ digest_paddr = 0;
+ if (desc->qcd_auth_sz != 0)
+ digest_paddr = qsc->qsc_auth_res_paddr;
+
+ req_params_ptr = qsbc->qsbc_req_params_buf;
+ memset(req_params_ptr, 0, sizeof(qsbc->qsbc_req_params_buf));
+
+ /*
+ * The SG list layout is a bit different for GCM and GMAC, it's simpler
+ * to handle those cases separately.
+ */
+ if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
+ cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr;
+ auth_req = (struct fw_la_auth_req_params *)
+ (req_params_ptr + sizeof(struct fw_la_cipher_req_params));
+
+ cipher_req->cipher_state_sz = desc->qcd_cipher_blk_sz / 8;
+ cipher_req->curr_id = FW_SLICE_CIPHER;
+ if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH)
+ cipher_req->next_id = FW_SLICE_DRAM_WR;
+ else
+ cipher_req->next_id = FW_SLICE_AUTH;
+ cipher_req->state_address = qsc->qsc_iv_buf_paddr;
+
+ if (cmd_id != FW_LA_CMD_AUTH) {
+ /*
+ * Don't fill out the cipher block if we're doing GMAC
+ * only.
+ */
+ cipher_req->cipher_off = 0;
+ cipher_req->cipher_len = crp->crp_payload_length;
+ }
+ auth_req->curr_id = FW_SLICE_AUTH;
+ if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH)
+ auth_req->next_id = FW_SLICE_CIPHER;
+ else
+ auth_req->next_id = FW_SLICE_DRAM_WR;
+
+ auth_req->auth_res_address = digest_paddr;
+ auth_req->auth_res_sz = desc->qcd_auth_sz;
+
+ auth_req->auth_off = 0;
+ auth_req->auth_len = crp->crp_payload_length;
+
+ auth_req->hash_state_sz =
+ roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) >> 3;
+ auth_req->u1.aad_addr = crp->crp_aad_length > 0 ?
+ qsc->qsc_gcm_aad_paddr : 0;
+
+ /*
+ * Update the hash state block if necessary. This only occurs
+ * when the AAD length changes between requests in a session and
+ * is synchronized by qat_process().
+ */
+ aad_sz = htobe32(crp->crp_aad_length);
+ aad_szp1 = (uint32_t *)(
+ __DECONST(uint8_t *, desc->qcd_content_desc) +
+ desc->qcd_gcm_aad_sz_offset1);
+ aad_szp2 = __DECONST(uint8_t *, desc->qcd_content_desc) +
+ desc->qcd_gcm_aad_sz_offset2;
+ if (__predict_false(*aad_szp1 != aad_sz)) {
+ *aad_szp1 = aad_sz;
+ *aad_szp2 = (uint8_t)roundup2(crp->crp_aad_length,
+ QAT_AES_GCM_AAD_ALIGN);
+ bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
+ qs->qs_desc_mem.qdm_dma_map,
+ BUS_DMASYNC_PREWRITE);
+ }
+ } else {
+ cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr;
+ if (cmd_id != FW_LA_CMD_AUTH) {
+ if (cmd_id == FW_LA_CMD_CIPHER ||
+ cmd_id == FW_LA_CMD_HASH_CIPHER)
+ next_slice = FW_SLICE_DRAM_WR;
+ else
+ next_slice = FW_SLICE_AUTH;
+
+ cipher_req->cipher_state_sz =
+ desc->qcd_cipher_blk_sz / 8;
+
+ cipher_req->curr_id = FW_SLICE_CIPHER;
+ cipher_req->next_id = next_slice;
+
+ cipher_req->cipher_off = crp->crp_aad_length == 0 ? 0 :
+ crp->crp_payload_start - crp->crp_aad_start;
+ cipher_req->cipher_len = crp->crp_payload_length;
+ cipher_req->state_address = qsc->qsc_iv_buf_paddr;
+ }
+ if (cmd_id != FW_LA_CMD_CIPHER) {
+ if (cmd_id == FW_LA_CMD_AUTH)
+ auth_req = (struct fw_la_auth_req_params *)
+ req_params_ptr;
+ else
+ auth_req = (struct fw_la_auth_req_params *)
+ (cipher_req + 1);
+ if (cmd_id == FW_LA_CMD_HASH_CIPHER)
+ next_slice = FW_SLICE_CIPHER;
+ else
+ next_slice = FW_SLICE_DRAM_WR;
+
+ auth_req->curr_id = FW_SLICE_AUTH;
+ auth_req->next_id = next_slice;
+
+ auth_req->auth_res_address = digest_paddr;
+ auth_req->auth_res_sz = desc->qcd_auth_sz;
+
+ auth_req->auth_len =
+ crp->crp_payload_length + crp->crp_aad_length;
+ auth_req->auth_off = 0;
+
+ auth_req->hash_state_sz = 0;
+ auth_req->u1.prefix_addr = desc->qcd_hash_state_paddr +
+ desc->qcd_state_storage_sz;
+ }
+ }
+}
diff --git a/sys/dev/qat/qat_hw15reg.h b/sys/dev/qat/qat_hw15reg.h
new file mode 100644
index 000000000000..df3fa14a0d85
--- /dev/null
+++ b/sys/dev/qat/qat_hw15reg.h
@@ -0,0 +1,635 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_hw15reg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_HW15REG_H_
+#define _DEV_PCI_QAT_HW15REG_H_
+
+/* Default message size in bytes */
+#define FW_REQ_DEFAULT_SZ_HW15 64
+#define FW_RESP_DEFAULT_SZ_HW15 64
+
+#define ADMIN_RING_SIZE 256
+#define RING_NUM_ADMIN_TX 0
+#define RING_NUM_ADMIN_RX 1
+
+/* -------------------------------------------------------------------------- */
+/* accel */
+
+#define ARCH_IF_FLAGS_VALID_FLAG __BIT(7)
+#define ARCH_IF_FLAGS_RESP_RING_TYPE __BITS(4, 3)
+#define ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT 3
+#define ARCH_IF_FLAGS_RESP_RING_TYPE_SCRATCH (0 << ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT)
+#define ARCH_IF_FLAGS_RESP_RING_TYPE_NN (1 << ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT)
+#define ARCH_IF_FLAGS_RESP_RING_TYPE_ET (2 << ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT)
+#define ARCH_IF_FLAGS_RESP_TYPE __BITS(2, 0)
+#define ARCH_IF_FLAGS_RESP_TYPE_SHIFT 0
+#define ARCH_IF_FLAGS_RESP_TYPE_A (0 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT)
+#define ARCH_IF_FLAGS_RESP_TYPE_B (1 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT)
+#define ARCH_IF_FLAGS_RESP_TYPE_C (2 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT)
+#define ARCH_IF_FLAGS_RESP_TYPE_S (3 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT)
+
+enum arch_if_req {
+ ARCH_IF_REQ_NULL, /* NULL request type */
+
+ /* QAT-AE Service Request Type IDs - 01 to 20 */
+ ARCH_IF_REQ_QAT_FW_INIT, /* QAT-FW Initialization Request */
+ ARCH_IF_REQ_QAT_FW_ADMIN, /* QAT-FW Administration Request */
+ ARCH_IF_REQ_QAT_FW_PKE, /* QAT-FW PKE Request */
+ ARCH_IF_REQ_QAT_FW_LA, /* QAT-FW Lookaside Request */
+ ARCH_IF_REQ_QAT_FW_IPSEC, /* QAT-FW IPSec Request */
+ ARCH_IF_REQ_QAT_FW_SSL, /* QAT-FW SSL Request */
+ ARCH_IF_REQ_QAT_FW_DMA, /* QAT-FW DMA Request */
+ ARCH_IF_REQ_QAT_FW_STORAGE, /* QAT-FW Storage Request */
+ ARCH_IF_REQ_QAT_FW_COMPRESS, /* QAT-FW Compression Request */
+ ARCH_IF_REQ_QAT_FW_PATMATCH, /* QAT-FW Pattern Matching Request */
+
+ /* IP Service (Range Match and Exception) Blocks Request Type IDs 21 - 30 */
+ ARCH_IF_REQ_RM_FLOW_MISS = 21, /* RM flow miss request */
+ ARCH_IF_REQ_RM_FLOW_TIMER_EXP, /* RM flow timer exp Request */
+ ARCH_IF_REQ_IP_SERVICES_RFC_LOOKUP_UPDATE, /* RFC Lookup request */
+ ARCH_IF_REQ_IP_SERVICES_CONFIG_UPDATE, /* Config Update request */
+ ARCH_IF_REQ_IP_SERVICES_FCT_CONFIG, /* FCT Config request */
+ ARCH_IF_REQ_IP_SERVICES_NEXT_HOP_TIMER_EXPIRY, /* NH Timer expiry request */
+ ARCH_IF_REQ_IP_SERVICES_EXCEPTION, /* Exception processign request */
+ ARCH_IF_REQ_IP_SERVICES_STACK_DRIVER, /* Send to SD request */
+ ARCH_IF_REQ_IP_SERVICES_ACTION_HANDLER, /* Send to AH request */
+ ARCH_IF_REQ_IP_SERVICES_EVENT_HANDLER, /* Send to EH request */
+ ARCH_IF_REQ_DELIMITER /* End delimiter */
+};
+
+struct arch_if_req_hdr {
+ uint8_t resp_dest_id;
+ /* Opaque identifier passed from the request to response to allow
+ * response handler perform any further processing */
+ uint8_t resp_pipe_id;
+ /* Response pipe to write the response associated with this request to */
+ uint8_t req_type;
+ /* Definition of the service described by the request */
+ uint8_t flags;
+ /* Request and response control flags */
+};
+
+struct arch_if_resp_hdr {
+ uint8_t dest_id;
+ /* Opaque identifier passed from the request to response to allow
+ * response handler perform any further processing */
+ uint8_t serv_id;
+ /* Definition of the service id generating the response */
+ uint8_t resp_type;
+ /* Definition of the service described by the request */
+ uint8_t flags;
+ /* Request and response control flags */
+};
+
+struct fw_comn_req_hdr {
+ struct arch_if_req_hdr arch_if;
+ /* Common arch fields used by all ICP interface requests. Remaining
+ * fields are specific to the common QAT FW service. */
+ uint16_t comn_req_flags;
+ /* Flags used to describe common processing required by the request and
+ * the meaning of parameters in it i.e. differentiating between a buffer
+ * descriptor and a flat buffer pointer in the source (src) and destination
+ * (dest) data address fields. Full definition of the fields is given
+ * below */
+ uint8_t content_desc_params_sz;
+ /* Size of the content descriptor parameters in quad words. These
+ * parameters describe the session setup configuration info for the
+ * slices that this request relies upon i.e. the configuration word and
+ * cipher key needed by the cipher slice if there is a request for cipher
+ * processing. The format of the parameters are contained in icp_qat_hw.h
+ * and vary depending on the algorithm and mode being used. It is the
+ * clients responsibility to ensure this structure is correctly packed */
+ uint8_t content_desc_hdr_sz;
+ /* Size of the content descriptor header in quad words. This information
+ * is read into the QAT AE xfr registers */
+ uint64_t content_desc_addr;
+ /* Address of the content descriptor containing both the content header
+ * the size of which is defined by content_desc_hdr_sz followed by the
+ * content parameters whose size is described bycontent_desc_params_sz
+ */
+};
+
+struct fw_comn_req_mid {
+ uint64_t opaque_data;
+ /* Opaque data passed unmodified from the request to response messages
+ * by firmware (fw) */
+ uint64_t src_data_addr;
+ /* Generic definition of the source data supplied to the QAT AE. The
+ * common flags are used to further describe the attributes of this
+ * field */
+ uint64_t dest_data_addr;
+ /* Generic definition of the destination data supplied to the QAT AE.
+ * The common flags are used to further describe the attributes of this
+ * field */
+};
+
+union fw_comn_req_ftr {
+ uint64_t next_request_addr;
+ /* Overloaded field, for stateful requests, this field is the pointer to
+ next request descriptor */
+ struct {
+ uint32_t src_length;
+ /* Length of source flat buffer incase src buffer type is flat */
+ uint32_t dst_length;
+ /* Length of source flat buffer incase dst buffer type is flat */
+ } s;
+};
+
+union fw_comn_error {
+ struct {
+ uint8_t resrvd; /* 8 bit reserved field */
+ uint8_t comn_err_code; /**< 8 bit common error code */
+ } s;
+ /* Structure which is used for non-compression responses */
+
+ struct {
+ uint8_t xlat_err_code; /* 8 bit translator error field */
+ uint8_t cmp_err_code; /* 8 bit compression error field */
+ } s1;
+ /* Structure which is used for compression responses */
+};
+
+struct fw_comn_resp_hdr {
+ struct arch_if_resp_hdr arch_if;
+ /* Common arch fields used by all ICP interface response messages. The
+ * remaining fields are specific to the QAT FW */
+ union fw_comn_error comn_error;
+ /* This field is overloaded to allow for one 8 bit common error field
+ * or two 8 bit error fields from compression and translator */
+ uint8_t comn_status;
+ /* Status field which specifies which slice(s) report an error */
+ uint8_t serv_cmd_id;
+ /* For services that define multiple commands this field represents the
+ * command. If only 1 command is supported then this field will be 0 */
+ uint64_t opaque_data;
+ /* Opaque data passed from the request to the response message */
+};
+
+
+#define RING_MASK_TABLE_ENTRY_LOG_SZ (5)
+
+#define FW_INIT_RING_MASK_SET(table, id) \
+ table->firt_ring_mask[id >> RING_MASK_TABLE_ENTRY_LOG_SZ] =\
+ table->firt_ring_mask[id >> RING_MASK_TABLE_ENTRY_LOG_SZ] | \
+ (1 << (id & 0x1f))
+
+struct fw_init_ring_params {
+ uint8_t firp_curr_weight; /* Current ring weight (working copy),
+ * has to be equal to init_weight */
+ uint8_t firp_init_weight; /* Initial ring weight: -1 ... 0
+ * -1 is equal to FF, -2 is equal to FE,
+ * the weighting uses negative logic
+ * where FF means poll the ring once,
+ * -2 is poll the ring twice,
+ * 0 is poll the ring 255 times */
+ uint8_t firp_ring_pvl; /* Ring Privilege Level. */
+ uint8_t firp_reserved; /* Reserved field which must be set
+ * to 0 by the client */
+};
+
+#define INIT_RING_TABLE_SZ 128
+#define INIT_RING_TABLE_LW_SZ 4
+
+struct fw_init_ring_table {
+ struct fw_init_ring_params firt_bulk_rings[INIT_RING_TABLE_SZ];
+ /* array of ring parameters */
+ uint32_t firt_ring_mask[INIT_RING_TABLE_LW_SZ];
+ /* Structure to hold the bit masks for
+ * 128 rings. */
+};
+
+struct fw_init_set_ae_info_hdr {
+ uint16_t init_slice_mask; /* Init time flags to set the ownership of the slices */
+ uint16_t resrvd; /* Reserved field and must be set to 0 by the client */
+ uint8_t init_qat_id; /* Init time qat id described in the request */
+ uint8_t init_ring_cluster_id; /* Init time ring cluster Id */
+ uint8_t init_trgt_id; /* Init time target AE id described in the request */
+ uint8_t init_cmd_id; /* Init time command that is described in the request */
+};
+
+struct fw_init_set_ae_info {
+ uint64_t init_shram_mask; /* Init time shram mask to set the page ownership in page pool of AE*/
+ uint64_t resrvd; /* Reserved field and must be set to 0 by the client */
+};
+
+struct fw_init_set_ring_info_hdr {
+ uint32_t resrvd; /* Reserved field and must be set to 0 by the client */
+ uint16_t init_ring_tbl_sz; /* Init time information to state size of the ring table */
+ uint8_t init_trgt_id; /* Init time target AE id described in the request */
+ uint8_t init_cmd_id; /* Init time command that is described in the request */
+};
+
+struct fw_init_set_ring_info {
+ uint64_t init_ring_table_ptr; /* Pointer to weighting information for 128 rings */
+ uint64_t resrvd; /* Reserved field and must be set to 0 by the client */
+};
+
+struct fw_init_trng_hdr {
+ uint32_t resrvd; /* Reserved field and must be set to 0 by the client */
+ union {
+ uint8_t resrvd; /* Reserved field set to if cmd type is trng disable */
+ uint8_t init_trng_cfg_sz; /* Size of the trng config word in QW*/
+ } u;
+ uint8_t resrvd1; /* Reserved field and must be set to 0 by the client */
+ uint8_t init_trgt_id; /* Init time target AE id described in the request */
+ uint8_t init_cmd_id; /* Init time command that is described in the request */
+};
+
+struct fw_init_trng {
+ union {
+ uint64_t resrvd; /* Reserved field set to 0 if cmd type is trng disable */
+ uint64_t init_trng_cfg_ptr; /* Pointer to TRNG Slice config word*/
+ } u;
+ uint64_t resrvd; /* Reserved field and must be set to 0 by the client */
+};
+
+struct fw_init_req {
+ struct fw_comn_req_hdr comn_hdr; /* Common request header */
+ union {
+ struct fw_init_set_ae_info_hdr set_ae_info;
+ /* INIT SET_AE_INFO request header structure */
+ struct fw_init_set_ring_info_hdr set_ring_info;
+ /* INIT SET_RING_INFO request header structure */
+ struct fw_init_trng_hdr init_trng;
+ /* INIT TRNG ENABLE/DISABLE request header structure */
+ } u;
+ struct fw_comn_req_mid comn_mid; /* Common request middle section */
+ union {
+ struct fw_init_set_ae_info set_ae_info;
+ /* INIT SET_AE_INFO request data structure */
+ struct fw_init_set_ring_info set_ring_info;
+ /* INIT SET_RING_INFO request data structure */
+ struct fw_init_trng init_trng;
+ /* INIT TRNG ENABLE/DISABLE request data structure */
+ } u1;
+};
+
+enum fw_init_cmd_id {
+ FW_INIT_CMD_SET_AE_INFO, /* Setup AE Info command type */
+ FW_INIT_CMD_SET_RING_INFO, /* Setup Ring Info command type */
+ FW_INIT_CMD_TRNG_ENABLE, /* TRNG Enable command type */
+ FW_INIT_CMD_TRNG_DISABLE, /* TRNG Disable command type */
+ FW_INIT_CMD_DELIMITER /* Delimiter type */
+};
+
+struct fw_init_resp {
+ struct fw_comn_resp_hdr comn_resp; /* Common interface response */
+ uint8_t resrvd[64 - sizeof(struct fw_comn_resp_hdr)];
+ /* XXX FW_RESP_DEFAULT_SZ_HW15 */
+ /* Reserved padding out to the default response size */
+};
+
+/* -------------------------------------------------------------------------- */
+/* look aside */
+
+#define COMN_REQ_ORD UINT16_C(0x8000)
+#define COMN_REQ_ORD_SHIFT 15
+#define COMN_REQ_ORD_NONE (0 << COMN_REQ_ORD_SHIFT)
+#define COMN_REQ_ORD_STRICT (1 << COMN_REQ_ORD_SHIFT)
+#define COMN_REQ_PTR_TYPE UINT16_C(0x4000)
+#define COMN_REQ_PTR_TYPE_SHIFT 14
+#define COMN_REQ_PTR_TYPE_FLAT (0 << COMN_REQ_PTR_TYPE_SHIFT)
+#define COMN_REQ_PTR_TYPE_SGL (1 << COMN_REQ_PTR_TYPE_SHIFT)
+#define COMN_REQ_RESERVED UINT16_C(0x2000)
+#define COMN_REQ_SHRAM_INIT UINT16_C(0x1000)
+#define COMN_REQ_SHRAM_INIT_SHIFT 12
+#define COMN_REQ_SHRAM_INIT_REQUIRED (1 << COMN_REQ_SHRAM_INIT_SHIFT)
+#define COMN_REQ_REGEX_SLICE UINT16_C(0x0800)
+#define COMN_REQ_REGEX_SLICE_SHIFT 11
+#define COMN_REQ_REGEX_SLICE_REQUIRED (1 << COMN_REQ_REGEX_SLICE_SHIFT)
+#define COMN_REQ_XLAT_SLICE UINT16_C(0x0400)
+#define COMN_REQ_XLAT_SLICE_SHIFT 10
+#define COMN_REQ_XLAT_SLICE_REQUIRED (1 << COMN_REQ_XLAT_SLICE_SHIFT)
+#define COMN_REQ_CPR_SLICE UINT16_C(0x0200)
+#define COMN_REQ_CPR_SLICE_SHIFT 9
+#define COMN_REQ_CPR_SLICE_REQUIRED (1 << COMN_REQ_CPR_SLICE_SHIFT)
+#define COMN_REQ_BULK_SLICE UINT16_C(0x0100)
+#define COMN_REQ_BULK_SLICE_SHIFT 8
+#define COMN_REQ_BULK_SLICE_REQUIRED (1 << COMN_REQ_BULK_SLICE_SHIFT)
+#define COMN_REQ_STORAGE_SLICE UINT16_C(0x0080)
+#define COMN_REQ_STORAGE_SLICE_SHIFT 7
+#define COMN_REQ_STORAGE_SLICE_REQUIRED (1 << COMN_REQ_STORAGE_SLICE_SHIFT)
+#define COMN_REQ_RND_SLICE UINT16_C(0x0040)
+#define COMN_REQ_RND_SLICE_SHIFT 6
+#define COMN_REQ_RND_SLICE_REQUIRED (1 << COMN_REQ_RND_SLICE_SHIFT)
+#define COMN_REQ_PKE1_SLICE UINT16_C(0x0020)
+#define COMN_REQ_PKE1_SLICE_SHIFT 5
+#define COMN_REQ_PKE1_SLICE_REQUIRED (1 << COMN_REQ_PKE1_SLICE_SHIFT)
+#define COMN_REQ_PKE0_SLICE UINT16_C(0x0010)
+#define COMN_REQ_PKE0_SLICE_SHIFT 4
+#define COMN_REQ_PKE0_SLICE_REQUIRED (1 << COMN_REQ_PKE0_SLICE_SHIFT)
+#define COMN_REQ_AUTH1_SLICE UINT16_C(0x0008)
+#define COMN_REQ_AUTH1_SLICE_SHIFT 3
+#define COMN_REQ_AUTH1_SLICE_REQUIRED (1 << COMN_REQ_AUTH1_SLICE_SHIFT)
+#define COMN_REQ_AUTH0_SLICE UINT16_C(0x0004)
+#define COMN_REQ_AUTH0_SLICE_SHIFT 2
+#define COMN_REQ_AUTH0_SLICE_REQUIRED (1 << COMN_REQ_AUTH0_SLICE_SHIFT)
+#define COMN_REQ_CIPHER1_SLICE UINT16_C(0x0002)
+#define COMN_REQ_CIPHER1_SLICE_SHIFT 1
+#define COMN_REQ_CIPHER1_SLICE_REQUIRED (1 << COMN_REQ_CIPHER1_SLICE_SHIFT)
+#define COMN_REQ_CIPHER0_SLICE UINT16_C(0x0001)
+#define COMN_REQ_CIPHER0_SLICE_SHIFT 0
+#define COMN_REQ_CIPHER0_SLICE_REQUIRED (1 << COMN_REQ_CIPHER0_SLICE_SHIFT)
+
+#define COMN_REQ_CY0_ONLY(shram) \
+ COMN_REQ_ORD_STRICT | \
+ COMN_REQ_PTR_TYPE_FLAT | \
+ (shram) | \
+ COMN_REQ_RND_SLICE_REQUIRED | \
+ COMN_REQ_PKE0_SLICE_REQUIRED | \
+ COMN_REQ_AUTH0_SLICE_REQUIRED | \
+ COMN_REQ_CIPHER0_SLICE_REQUIRED;
+#define COMN_REQ_CY1_ONLY(shram) \
+ COMN_REQ_ORD_STRICT | \
+ COMN_REQ_PTR_TYPE_FLAT | \
+ (shram) | \
+ COMN_REQ_PKE1_SLICE_REQUIRED | \
+ COMN_REQ_AUTH1_SLICE_REQUIRED | \
+ COMN_REQ_CIPHER1_SLICE_REQUIRED;
+
+#define COMN_RESP_CRYPTO_STATUS __BIT(7)
+#define COMN_RESP_PKE_STATUS __BIT(6)
+#define COMN_RESP_CMP_STATUS __BIT(5)
+#define COMN_RESP_XLAT_STATUS __BIT(4)
+#define COMN_RESP_PM_STATUS __BIT(3)
+#define COMN_RESP_INIT_ADMIN_STATUS __BIT(2)
+
+#define COMN_STATUS_FLAG_OK 0
+#define COMN_STATUS_FLAG_ERROR 1
+
+struct fw_la_ssl_tls_common {
+ uint8_t out_len; /* Number of bytes of key material to output. */
+ uint8_t label_len; /* Number of bytes of label for SSL and bytes
+ * for TLS key generation */
+};
+
+struct fw_la_mgf_common {
+ uint8_t hash_len;
+ /* Number of bytes of hash output by the QAT per iteration */
+ uint8_t seed_len;
+ /* Number of bytes of seed provided in src buffer for MGF1 */
+};
+
+struct fw_cipher_hdr {
+ uint8_t state_sz;
+ /* State size in quad words of the cipher algorithm used in this session.
+ * Set to zero if the algorithm doesnt provide any state */
+ uint8_t offset;
+ /* Quad word offset from the content descriptor parameters address i.e.
+ * (content_address + (cd_hdr_sz << 3)) to the parameters for the cipher
+ * processing */
+ uint8_t curr_id;
+ /* Initialised with the cipher slice type */
+ uint8_t next_id;
+ /* Set to the next slice to pass the ciphered data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * anymore slices after cipher */
+ uint16_t resrvd;
+ /* Reserved padding byte to bring the struct to the word boundary. MUST be
+ * set to 0 */
+ uint8_t state_padding_sz;
+ /* State padding size in quad words. Set to 0 if no padding is required. */
+ uint8_t key_sz;
+ /* Key size in quad words of the cipher algorithm used in this session */
+};
+
+struct fw_auth_hdr {
+ uint8_t hash_flags;
+ /* General flags defining the processing to perform. 0 is normal processing
+ * and 1 means there is a nested hash processing loop to go through */
+ uint8_t offset;
+ /* Quad word offset from the content descriptor parameters address to the
+ * parameters for the auth processing */
+ uint8_t curr_id;
+ /* Initialised with the auth slice type */
+ uint8_t next_id;
+ /* Set to the next slice to pass data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * anymore slices after auth */
+ union {
+ uint8_t inner_prefix_sz;
+ /* Size in bytes of the inner prefix data */
+ uint8_t aad_sz;
+ /* Size in bytes of padded AAD data to prefix to the packet for CCM
+ * or GCM processing */
+ } u;
+
+ uint8_t outer_prefix_sz;
+ /* Size in bytes of outer prefix data */
+ uint8_t final_sz;
+ /* Size in bytes of digest to be returned to the client if requested */
+ uint8_t inner_res_sz;
+ /* Size in bytes of the digest from the inner hash algorithm */
+ uint8_t resrvd;
+ /* This field is unused, assumed value is zero. */
+ uint8_t inner_state1_sz;
+ /* Size in bytes of inner hash state1 data. Must be a qword multiple */
+ uint8_t inner_state2_off;
+ /* Quad word offset from the content descriptor parameters pointer to the
+ * inner state2 value */
+ uint8_t inner_state2_sz;
+ /* Size in bytes of inner hash state2 data. Must be a qword multiple */
+ uint8_t outer_config_off;
+ /* Quad word offset from the content descriptor parameters pointer to the
+ * outer configuration information */
+ uint8_t outer_state1_sz;
+ /* Size in bytes of the outer state1 value */
+ uint8_t outer_res_sz;
+ /* Size in bytes of digest from the outer auth algorithm */
+ uint8_t outer_prefix_off;
+ /* Quad word offset from the start of the inner prefix data to the outer
+ * prefix information. Should equal the rounded inner prefix size, converted
+ * to qwords */
+};
+
+#define FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define FW_AUTH_HDR_FLAG_NO_NESTED 0
+
+struct fw_la_comn_req {
+ union {
+ uint16_t la_flags;
+ /* Definition of the common LA processing flags used for the
+ * bulk processing */
+ union {
+ struct fw_la_ssl_tls_common ssl_tls_common;
+ /* For TLS or SSL Key Generation, this field is
+ * overloaded with ssl_tls common information */
+ struct fw_la_mgf_common mgf_common;
+ /* For MGF Key Generation, this field is overloaded with
+ mgf information */
+ } u;
+ } u;
+
+ union {
+ uint8_t resrvd;
+ /* If not useRd by a request this field must be set to 0 */
+ uint8_t tls_seed_len;
+ /* Byte Len of tls seed */
+ uint8_t req_params_blk_sz;
+ /* For bulk processing this field represents the request
+ * parameters block size */
+ uint8_t trng_cfg_sz;
+ /* This field is used for TRNG_ENABLE requests to indicate the
+ * size of the TRNG Slice configuration word. Size is in QW's */
+ } u1;
+ uint8_t la_cmd_id;
+ /* Definition of the LA command defined by this request */
+};
+
+#define LA_FLAGS_GCM_IV_LEN_FLAG __BIT(9)
+#define LA_FLAGS_PROTO __BITS(8, 6)
+#define LA_FLAGS_PROTO_SNOW_3G __SHIFTIN(4, LA_FLAGS_PROTO)
+#define LA_FLAGS_PROTO_GCM __SHIFTIN(2, LA_FLAGS_PROTO)
+#define LA_FLAGS_PROTO_CCM __SHIFTIN(1, LA_FLAGS_PROTO)
+#define LA_FLAGS_PROTO_NO __SHIFTIN(0, LA_FLAGS_PROTO)
+#define LA_FLAGS_DIGEST_IN_BUFFER __BIT(5)
+#define LA_FLAGS_CMP_AUTH_RES __BIT(4)
+#define LA_FLAGS_RET_AUTH_RES __BIT(3)
+#define LA_FLAGS_UPDATE_STATE __BIT(2)
+#define LA_FLAGS_PARTIAL __BITS(1, 0)
+
+struct fw_la_bulk_req {
+ struct fw_comn_req_hdr comn_hdr;
+ /* Common request header */
+ uint32_t flow_id;
+ /* Field used by Firmware to limit the number of stateful requests
+ * for a session being processed at a given point of time */
+ struct fw_la_comn_req comn_la_req;
+ /* Common LA request parameters */
+ struct fw_comn_req_mid comn_mid;
+ /* Common request middle section */
+ uint64_t req_params_addr;
+ /* Memory address of the request parameters */
+ union fw_comn_req_ftr comn_ftr;
+ /* Common request footer */
+};
+
+struct fw_la_resp {
+ struct fw_comn_resp_hdr comn_resp;
+ uint8_t resrvd[64 - sizeof(struct fw_comn_resp_hdr)];
+ /* FW_RESP_DEFAULT_SZ_HW15 */
+};
+
+struct fw_la_cipher_req_params {
+ uint8_t resrvd;
+ /* Reserved field and assumed set to 0 */
+ uint8_t cipher_state_sz;
+ /* Number of quad words of state data for the cipher algorithm */
+ uint8_t curr_id;
+ /* Initialised with the cipher slice type */
+ uint8_t next_id;
+ /* Set to the next slice to pass the ciphered data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * anymore slices after cipher */
+ uint16_t resrvd1;
+ /* Reserved field, should be set to zero*/
+ uint8_t resrvd2;
+ /* Reserved field, should be set to zero*/
+ uint8_t next_offset;
+ /* Offset in bytes to the next request parameter block */
+ uint32_t cipher_off;
+ /* Byte offset from the start of packet to the cipher data region */
+ uint32_t cipher_len;
+ /* Byte length of the cipher data region */
+ uint64_t state_address;
+ /* Flat buffer address in memory of the cipher state information. Unused
+ * if the state size is 0 */
+};
+
+struct fw_la_auth_req_params {
+ uint8_t auth_res_sz;
+ /* Size in quad words of digest information to validate */
+ uint8_t hash_state_sz;
+ /* Number of quad words of inner and outer hash prefix data to process */
+ uint8_t curr_id;
+ /* Initialised with the auth slice type */
+ uint8_t next_id;
+ /* Set to the next slice to pass the auth data through.
+ * Set to ICP_QAT_FW_SLICE_NULL for in-place auth-only requests
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR for all other request types
+ * if the data is not to go through anymore slices after auth */
+ union {
+ uint16_t resrvd;
+ /* Reserved field should be set to zero for bulk services */
+ uint16_t tls_secret_len;
+ /* Length of Secret information for TLS. */
+ } u;
+ uint8_t resrvd;
+ /* Reserved field, should be set to zero*/
+ uint8_t next_offset;
+ /* offset in bytes to the next request parameter block */
+ uint32_t auth_off;
+ /* Byte offset from the start of packet to the auth data region */
+ uint32_t auth_len;
+ /* Byte length of the auth data region */
+ union {
+ uint64_t prefix_addr;
+ /* Address of the prefix information */
+ uint64_t aad_addr;
+ /* Address of the AAD info in DRAM. Used for the CCM and GCM
+ * protocols */
+ } u1;
+ uint64_t auth_res_address;
+ /* Address of the auth result information to validate or the location to
+ * writeback the digest information to */
+};
+
+#endif
diff --git a/sys/dev/qat/qat_hw15var.h b/sys/dev/qat/qat_hw15var.h
new file mode 100644
index 000000000000..4939aba7ff12
--- /dev/null
+++ b/sys/dev/qat/qat_hw15var.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_hw15var.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_HW15VAR_H_
+#define _DEV_PCI_QAT_HW15VAR_H_
+
+CTASSERT(HASH_CONTENT_DESC_SIZE >=
+ sizeof(struct fw_auth_hdr) + MAX_HASH_SETUP_BLK_SZ);
+CTASSERT(CIPHER_CONTENT_DESC_SIZE >=
+ sizeof(struct fw_cipher_hdr) + MAX_CIPHER_SETUP_BLK_SZ);
+CTASSERT(CONTENT_DESC_MAX_SIZE >=
+ roundup(HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE,
+ QAT_OPTIMAL_ALIGN));
+CTASSERT(QAT_SYM_REQ_PARAMS_SIZE_PADDED >=
+ roundup(sizeof(struct fw_la_cipher_req_params) +
+ sizeof(struct fw_la_auth_req_params), QAT_OPTIMAL_ALIGN));
+
+/* length of the 5 long words of the request that are stored in the session
+ * This is rounded up to 32 in order to use the fast memcopy function */
+#define QAT_HW15_SESSION_REQ_CACHE_SIZE (32)
+
+void qat_msg_req_type_populate(struct arch_if_req_hdr *,
+ enum arch_if_req, uint32_t);
+void qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *, bus_addr_t,
+ uint8_t, uint8_t, uint16_t, uint32_t);
+void qat_msg_service_cmd_populate(struct fw_la_bulk_req *,
+ enum fw_la_cmd_id, uint16_t);
+void qat_msg_cmn_mid_populate(struct fw_comn_req_mid *, void *,
+ uint64_t , uint64_t);
+void qat_msg_req_params_populate(struct fw_la_bulk_req *, bus_addr_t,
+ uint8_t);
+void qat_msg_cmn_footer_populate(union fw_comn_req_ftr *, uint64_t);
+void qat_msg_params_populate(struct fw_la_bulk_req *,
+ struct qat_crypto_desc *, uint8_t, uint16_t,
+ uint16_t);
+
+
+int qat_adm_ring_init(struct qat_softc *);
+int qat_adm_ring_send_init(struct qat_softc *);
+
+void qat_hw15_crypto_setup_desc(struct qat_crypto *,
+ struct qat_session *, struct qat_crypto_desc *);
+void qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *,
+ struct qat_session *, struct qat_crypto_desc const *,
+ struct qat_sym_cookie *, struct cryptop *);
+
+#endif
diff --git a/sys/dev/qat/qat_hw17.c b/sys/dev/qat/qat_hw17.c
new file mode 100644
index 000000000000..643b624ba840
--- /dev/null
+++ b/sys/dev/qat/qat_hw17.c
@@ -0,0 +1,662 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/proc.h>
+
+#include <machine/bus.h>
+
+#include <opencrypto/xform.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "qatreg.h"
+#include "qat_hw17reg.h"
+#include "qatvar.h"
+#include "qat_hw17var.h"
+
+int qat_adm_mailbox_put_msg_sync(struct qat_softc *, uint32_t,
+ void *, void *);
+int qat_adm_mailbox_send(struct qat_softc *,
+ struct fw_init_admin_req *, struct fw_init_admin_resp *);
+int qat_adm_mailbox_send_init_me(struct qat_softc *);
+int qat_adm_mailbox_send_hb_timer(struct qat_softc *);
+int qat_adm_mailbox_send_fw_status(struct qat_softc *);
+int qat_adm_mailbox_send_constants(struct qat_softc *);
+
+int
+qat_adm_mailbox_init(struct qat_softc *sc)
+{
+ uint64_t addr;
+ int error;
+ struct qat_dmamem *qdm;
+
+ error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_dma, 1,
+ PAGE_SIZE, PAGE_SIZE);
+ if (error)
+ return error;
+
+ qdm = &sc->sc_admin_comms.qadc_const_tbl_dma;
+ error = qat_alloc_dmamem(sc, qdm, 1, PAGE_SIZE, PAGE_SIZE);
+ if (error)
+ return error;
+
+ memcpy(qdm->qdm_dma_vaddr,
+ mailbox_const_tab, sizeof(mailbox_const_tab));
+
+ bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
+ BUS_DMASYNC_PREWRITE);
+
+ error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_hb_dma, 1,
+ PAGE_SIZE, PAGE_SIZE);
+ if (error)
+ return error;
+
+ addr = (uint64_t)sc->sc_admin_comms.qadc_dma.qdm_dma_seg.ds_addr;
+ qat_misc_write_4(sc, ADMINMSGUR, addr >> 32);
+ qat_misc_write_4(sc, ADMINMSGLR, addr);
+
+ return 0;
+}
+
+int
+qat_adm_mailbox_put_msg_sync(struct qat_softc *sc, uint32_t ae,
+ void *in, void *out)
+{
+ struct qat_dmamem *qdm;
+ uint32_t mailbox;
+ bus_size_t mb_offset = MAILBOX_BASE + (ae * MAILBOX_STRIDE);
+ int offset = ae * ADMINMSG_LEN * 2;
+ int times, received;
+ uint8_t *buf = (uint8_t *)sc->sc_admin_comms.qadc_dma.qdm_dma_vaddr + offset;
+
+ mailbox = qat_misc_read_4(sc, mb_offset);
+ if (mailbox == 1)
+ return EAGAIN;
+
+ qdm = &sc->sc_admin_comms.qadc_dma;
+ memcpy(buf, in, ADMINMSG_LEN);
+ bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ qat_misc_write_4(sc, mb_offset, 1);
+
+ received = 0;
+ for (times = 0; times < 50; times++) {
+ DELAY(20000);
+ if (qat_misc_read_4(sc, mb_offset) == 0) {
+ received = 1;
+ break;
+ }
+ }
+ if (received) {
+ bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ memcpy(out, buf + ADMINMSG_LEN, ADMINMSG_LEN);
+ } else {
+ device_printf(sc->sc_dev,
+ "Failed to send admin msg to accelerator\n");
+ }
+
+ return received ? 0 : EFAULT;
+}
+
+int
+qat_adm_mailbox_send(struct qat_softc *sc,
+ struct fw_init_admin_req *req, struct fw_init_admin_resp *resp)
+{
+ int error;
+ uint32_t mask;
+ uint8_t ae;
+
+ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ error = qat_adm_mailbox_put_msg_sync(sc, ae, req, resp);
+ if (error)
+ return error;
+ if (resp->init_resp_hdr.status) {
+ device_printf(sc->sc_dev,
+ "Failed to send admin msg: cmd %d\n",
+ req->init_admin_cmd_id);
+ return EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int
+qat_adm_mailbox_send_init_me(struct qat_softc *sc)
+{
+ struct fw_init_admin_req req;
+ struct fw_init_admin_resp resp;
+
+ memset(&req, 0, sizeof(req));
+ req.init_admin_cmd_id = FW_INIT_ME;
+
+ return qat_adm_mailbox_send(sc, &req, &resp);
+}
+
+int
+qat_adm_mailbox_send_hb_timer(struct qat_softc *sc)
+{
+ struct fw_init_admin_req req;
+ struct fw_init_admin_resp resp;
+
+ memset(&req, 0, sizeof(req));
+ req.init_admin_cmd_id = FW_HEARTBEAT_TIMER_SET;
+
+ req.init_cfg_ptr = sc->sc_admin_comms.qadc_hb_dma.qdm_dma_seg.ds_addr;
+ req.heartbeat_ticks =
+ sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_HB_INTERVAL;
+
+ return qat_adm_mailbox_send(sc, &req, &resp);
+}
+
+int
+qat_adm_mailbox_send_fw_status(struct qat_softc *sc)
+{
+ int error;
+ struct fw_init_admin_req req;
+ struct fw_init_admin_resp resp;
+
+ memset(&req, 0, sizeof(req));
+ req.init_admin_cmd_id = FW_STATUS_GET;
+
+ error = qat_adm_mailbox_send(sc, &req, &resp);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+int
+qat_adm_mailbox_send_constants(struct qat_softc *sc)
+{
+ struct fw_init_admin_req req;
+ struct fw_init_admin_resp resp;
+
+ memset(&req, 0, sizeof(req));
+ req.init_admin_cmd_id = FW_CONSTANTS_CFG;
+
+ req.init_cfg_sz = 1024;
+ req.init_cfg_ptr =
+ sc->sc_admin_comms.qadc_const_tbl_dma.qdm_dma_seg.ds_addr;
+
+ return qat_adm_mailbox_send(sc, &req, &resp);
+}
+
+int
+qat_adm_mailbox_send_init(struct qat_softc *sc)
+{
+ int error;
+
+ error = qat_adm_mailbox_send_init_me(sc);
+ if (error)
+ return error;
+
+ error = qat_adm_mailbox_send_hb_timer(sc);
+ if (error)
+ return error;
+
+ error = qat_adm_mailbox_send_fw_status(sc);
+ if (error)
+ return error;
+
+ return qat_adm_mailbox_send_constants(sc);
+}
+
+int
+qat_arb_init(struct qat_softc *sc)
+{
+ uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+ uint32_t arb, i;
+ const uint32_t *thd_2_arb_cfg;
+
+ /* Service arb configured for 32 bytes responses and
+ * ring flow control check enabled. */
+ for (arb = 0; arb < MAX_ARB; arb++)
+ qat_arb_sarconfig_write_4(sc, arb, arb_cfg);
+
+ /* Map worker threads to service arbiters */
+ sc->sc_hw.qhw_get_arb_mapping(sc, &thd_2_arb_cfg);
+
+ if (!thd_2_arb_cfg)
+ return EINVAL;
+
+ for (i = 0; i < sc->sc_hw.qhw_num_engines; i++)
+ qat_arb_wrk_2_ser_map_write_4(sc, i, *(thd_2_arb_cfg + i));
+
+ return 0;
+}
+
+int
+qat_set_ssm_wdtimer(struct qat_softc *sc)
+{
+ uint32_t timer;
+ u_int mask;
+ int i;
+
+ timer = sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_SSM_WDT;
+ for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ qat_misc_write_4(sc, SSMWDT(i), timer);
+ qat_misc_write_4(sc, SSMWDTPKE(i), timer);
+ }
+
+ return 0;
+}
+
+int
+qat_check_slice_hang(struct qat_softc *sc)
+{
+ int handled = 0;
+
+ return handled;
+}
+
+static uint32_t
+qat_hw17_crypto_setup_cipher_ctrl(struct qat_crypto_desc *desc,
+ struct qat_session *qs, uint32_t cd_blk_offset,
+ struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
+{
+ struct fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
+ (struct fw_cipher_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
+
+ desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
+ desc->qcd_cipher_offset = cd_blk_offset;
+
+ cipher_cd_ctrl->cipher_state_sz = desc->qcd_cipher_blk_sz >> 3;
+ cipher_cd_ctrl->cipher_key_sz = qs->qs_cipher_klen >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset = cd_blk_offset >> 3;
+ FW_COMN_CURR_ID_SET(cipher_cd_ctrl, FW_SLICE_CIPHER);
+ FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, next_slice);
+
+ return roundup(sizeof(struct hw_cipher_config) + qs->qs_cipher_klen, 8);
+}
+
+static void
+qat_hw17_crypto_setup_cipher_cdesc(const struct qat_crypto_desc *desc,
+ const struct qat_session *qs, const struct cryptop *crp,
+ union hw_cipher_algo_blk *cipher)
+{
+ const uint8_t *key;
+
+ cipher->max.cipher_config.val =
+ qat_crypto_load_cipher_session(desc, qs);
+ if (crp != NULL && crp->crp_cipher_key != NULL)
+ key = crp->crp_cipher_key;
+ else
+ key = qs->qs_cipher_key;
+ memcpy(cipher->max.key, key, qs->qs_cipher_klen);
+}
+
+static uint32_t
+qat_hw17_crypto_setup_auth_ctrl(struct qat_crypto_desc *desc,
+ struct qat_session *qs, uint32_t cd_blk_offset,
+ struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
+{
+ struct fw_auth_cd_ctrl_hdr *auth_cd_ctrl =
+ (struct fw_auth_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
+ struct qat_sym_hash_def const *hash_def;
+
+ (void)qat_crypto_load_auth_session(desc, qs, &hash_def);
+
+ auth_cd_ctrl->hash_cfg_offset = cd_blk_offset >> 3;
+ auth_cd_ctrl->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
+ auth_cd_ctrl->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
+ auth_cd_ctrl->final_sz = hash_def->qshd_alg->qshai_sah->hashsize;
+
+ auth_cd_ctrl->inner_state1_sz =
+ roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
+ auth_cd_ctrl->inner_state2_sz =
+ roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
+ auth_cd_ctrl->inner_state2_offset =
+ auth_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct hw_auth_setup) +
+ auth_cd_ctrl->inner_state1_sz) >> 3);
+
+ FW_COMN_CURR_ID_SET(auth_cd_ctrl, FW_SLICE_AUTH);
+ FW_COMN_NEXT_ID_SET(auth_cd_ctrl, next_slice);
+
+ desc->qcd_auth_sz = auth_cd_ctrl->final_sz;
+ desc->qcd_auth_offset = cd_blk_offset;
+ desc->qcd_gcm_aad_sz_offset1 =
+ cd_blk_offset + offsetof(union hw_auth_algo_blk, max.state1) +
+ auth_cd_ctrl->inner_state1_sz + AES_BLOCK_LEN;
+
+ return roundup(auth_cd_ctrl->inner_state1_sz +
+ auth_cd_ctrl->inner_state2_sz +
+ sizeof(struct hw_auth_setup), 8);
+}
+
+static void
+qat_hw17_crypto_setup_auth_cdesc(const struct qat_crypto_desc *desc,
+ const struct qat_session *qs, const struct cryptop *crp,
+ union hw_auth_algo_blk *auth)
+{
+ struct qat_sym_hash_def const *hash_def;
+ uint8_t inner_state1_sz, *state1, *state2;
+ const uint8_t *key;
+
+ auth->max.inner_setup.auth_config.config =
+ qat_crypto_load_auth_session(desc, qs, &hash_def);
+ auth->max.inner_setup.auth_counter.counter =
+ htobe32(hash_def->qshd_qat->qshqi_auth_counter);
+ inner_state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
+
+ state1 = auth->max.state1;
+ state2 = auth->max.state1 + inner_state1_sz;
+ switch (qs->qs_auth_algo) {
+ case HW_AUTH_ALGO_GALOIS_128:
+ key = NULL;
+ if (crp != NULL && crp->crp_cipher_key != NULL)
+ key = crp->crp_cipher_key;
+ else if (qs->qs_cipher_key != NULL)
+ key = qs->qs_cipher_key;
+ if (key != NULL) {
+ qat_crypto_gmac_precompute(desc, key,
+ qs->qs_cipher_klen, hash_def, state2);
+ }
+ break;
+ case HW_AUTH_ALGO_SHA1:
+ case HW_AUTH_ALGO_SHA256:
+ case HW_AUTH_ALGO_SHA384:
+ case HW_AUTH_ALGO_SHA512:
+ switch (qs->qs_auth_mode) {
+ case HW_AUTH_MODE0:
+ memcpy(state1, hash_def->qshd_alg->qshai_init_state,
+ inner_state1_sz);
+ /* Override for mode 0 hashes. */
+ auth->max.inner_setup.auth_counter.counter = 0;
+ break;
+ case HW_AUTH_MODE1:
+ if (crp != NULL && crp->crp_auth_key != NULL)
+ key = crp->crp_auth_key;
+ else
+ key = qs->qs_auth_key;
+ if (key != NULL) {
+ qat_crypto_hmac_precompute(desc, key,
+ qs->qs_auth_klen, hash_def, state1, state2);
+ }
+ break;
+ default:
+ panic("%s: unhandled auth mode %d", __func__,
+ qs->qs_auth_mode);
+ }
+ break;
+ default:
+ panic("%s: unhandled auth algorithm %d", __func__,
+ qs->qs_auth_algo);
+ }
+}
+
+static void
+qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *desc,
+ struct fw_la_bulk_req *req)
+{
+ union fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ struct fw_comn_req_hdr *req_hdr = &req->comn_hdr;
+
+ req_hdr->service_cmd_id = desc->qcd_cmd_id;
+ req_hdr->hdr_flags = FW_COMN_VALID;
+ req_hdr->service_type = FW_COMN_REQ_CPM_FW_LA;
+ req_hdr->comn_req_flags = FW_COMN_FLAGS_BUILD(
+ COMN_CD_FLD_TYPE_64BIT_ADR, COMN_PTR_TYPE_SGL);
+ req_hdr->serv_specif_flags = 0;
+ cd_pars->s.content_desc_addr = desc->qcd_desc_paddr;
+}
+
+void
+qat_hw17_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
+ struct qat_crypto_desc *desc)
+{
+ union hw_cipher_algo_blk *cipher;
+ union hw_auth_algo_blk *auth;
+ struct fw_la_bulk_req *req_tmpl;
+ struct fw_comn_req_hdr *req_hdr;
+ uint32_t cd_blk_offset = 0;
+ int i;
+ uint8_t *cd_blk_ptr;
+
+ req_tmpl = (struct fw_la_bulk_req *)desc->qcd_req_cache;
+ req_hdr = &req_tmpl->comn_hdr;
+ cd_blk_ptr = desc->qcd_content_desc;
+
+ memset(req_tmpl, 0, sizeof(struct fw_la_bulk_req));
+ qat_hw17_init_comn_req_hdr(desc, req_tmpl);
+
+ for (i = 0; i < MAX_FW_SLICE; i++) {
+ switch (desc->qcd_slices[i]) {
+ case FW_SLICE_CIPHER:
+ cipher = (union hw_cipher_algo_blk *)(cd_blk_ptr +
+ cd_blk_offset);
+ cd_blk_offset += qat_hw17_crypto_setup_cipher_ctrl(desc,
+ qs, cd_blk_offset, req_tmpl,
+ desc->qcd_slices[i + 1]);
+ qat_hw17_crypto_setup_cipher_cdesc(desc, qs, NULL,
+ cipher);
+ break;
+ case FW_SLICE_AUTH:
+ auth = (union hw_auth_algo_blk *)(cd_blk_ptr +
+ cd_blk_offset);
+ cd_blk_offset += qat_hw17_crypto_setup_auth_ctrl(desc,
+ qs, cd_blk_offset, req_tmpl,
+ desc->qcd_slices[i + 1]);
+ qat_hw17_crypto_setup_auth_cdesc(desc, qs, NULL, auth);
+ req_hdr->serv_specif_flags |= FW_LA_RET_AUTH_RES;
+ break;
+ case FW_SLICE_DRAM_WR:
+ i = MAX_FW_SLICE; /* end of chain */
+ break;
+ default:
+ MPASS(0);
+ break;
+ }
+ }
+
+ req_tmpl->cd_pars.s.content_desc_params_sz =
+ roundup(cd_blk_offset, QAT_OPTIMAL_ALIGN) >> 3;
+ if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128)
+ req_hdr->serv_specif_flags |=
+ FW_LA_PROTO_GCM | FW_LA_GCM_IV_LEN_12_OCTETS;
+
+ bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
+ qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE);
+}
+
+static void
+qat_hw17_crypto_req_setkey(const struct qat_crypto_desc *desc,
+ const struct qat_session *qs, struct qat_sym_cookie *qsc,
+ struct fw_la_bulk_req *bulk_req, const struct cryptop *crp)
+{
+ union hw_auth_algo_blk *auth;
+ union hw_cipher_algo_blk *cipher;
+ uint8_t *cdesc;
+ int i;
+
+ cdesc = qsc->qsc_content_desc;
+ memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE);
+ for (i = 0; i < MAX_FW_SLICE; i++) {
+ switch (desc->qcd_slices[i]) {
+ case FW_SLICE_CIPHER:
+ cipher = (union hw_cipher_algo_blk *)
+ (cdesc + desc->qcd_cipher_offset);
+ qat_hw17_crypto_setup_cipher_cdesc(desc, qs, crp,
+ cipher);
+ break;
+ case FW_SLICE_AUTH:
+ auth = (union hw_auth_algo_blk *)
+ (cdesc + desc->qcd_auth_offset);
+ qat_hw17_crypto_setup_auth_cdesc(desc, qs, crp, auth);
+ break;
+ case FW_SLICE_DRAM_WR:
+ i = MAX_FW_SLICE; /* end of chain */
+ break;
+ default:
+ MPASS(0);
+ }
+ }
+
+ bulk_req->cd_pars.s.content_desc_addr = qsc->qsc_content_desc_paddr;
+}
+
+void
+qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb __unused,
+ struct qat_session *qs, const struct qat_crypto_desc *desc,
+ struct qat_sym_cookie *qsc, struct cryptop *crp)
+{
+ struct qat_sym_bulk_cookie *qsbc;
+ struct fw_la_bulk_req *bulk_req;
+ struct fw_la_cipher_req_params *cipher_param;
+ struct fw_la_auth_req_params *auth_param;
+ bus_addr_t digest_paddr;
+ uint32_t aad_sz, *aad_szp;
+ uint8_t *req_params_ptr;
+ enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
+
+ qsbc = &qsc->u.qsc_bulk_cookie;
+ bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
+
+ memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req));
+ bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
+ bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
+ bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
+ if (__predict_false(crp->crp_cipher_key != NULL ||
+ crp->crp_auth_key != NULL))
+ qat_hw17_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
+
+ digest_paddr = 0;
+ if (desc->qcd_auth_sz != 0)
+ digest_paddr = qsc->qsc_auth_res_paddr;
+
+ req_params_ptr = (uint8_t *)&bulk_req->serv_specif_rqpars;
+ cipher_param = (struct fw_la_cipher_req_params *)req_params_ptr;
+ auth_param = (struct fw_la_auth_req_params *)
+ (req_params_ptr + sizeof(struct fw_la_cipher_req_params));
+
+ cipher_param->u.s.cipher_IV_ptr = qsc->qsc_iv_buf_paddr;
+
+ /*
+ * The SG list layout is a bit different for GCM and GMAC, it's simpler
+ * to handle those cases separately.
+ */
+ if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
+ if (cmd_id != FW_LA_CMD_AUTH) {
+ /*
+ * Don't fill out the cipher block if we're doing GMAC
+ * only.
+ */
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = crp->crp_payload_length;
+ }
+ auth_param->auth_off = 0;
+ auth_param->auth_len = crp->crp_payload_length;
+ auth_param->auth_res_addr = digest_paddr;
+ auth_param->auth_res_sz = desc->qcd_auth_sz;
+ auth_param->u1.aad_adr =
+ crp->crp_aad_length > 0 ? qsc->qsc_gcm_aad_paddr : 0;
+ auth_param->u2.aad_sz =
+ roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN);
+ auth_param->hash_state_sz = auth_param->u2.aad_sz >> 3;
+
+ /*
+ * Update the hash state block if necessary. This only occurs
+ * when the AAD length changes between requests in a session and
+ * is synchronized by qat_process().
+ */
+ aad_sz = htobe32(crp->crp_aad_length);
+ aad_szp = (uint32_t *)(
+ __DECONST(uint8_t *, desc->qcd_content_desc) +
+ desc->qcd_gcm_aad_sz_offset1);
+ if (__predict_false(*aad_szp != aad_sz)) {
+ *aad_szp = aad_sz;
+ bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
+ qs->qs_desc_mem.qdm_dma_map,
+ BUS_DMASYNC_PREWRITE);
+ }
+ } else {
+ if (cmd_id != FW_LA_CMD_AUTH) {
+ cipher_param->cipher_offset =
+ crp->crp_aad_length == 0 ? 0 :
+ crp->crp_payload_start - crp->crp_aad_start;
+ cipher_param->cipher_length = crp->crp_payload_length;
+ }
+ if (cmd_id != FW_LA_CMD_CIPHER) {
+ auth_param->auth_off = 0;
+ auth_param->auth_len =
+ crp->crp_payload_length + crp->crp_aad_length;
+ auth_param->auth_res_addr = digest_paddr;
+ auth_param->auth_res_sz = desc->qcd_auth_sz;
+ auth_param->u1.aad_adr = 0;
+ auth_param->u2.aad_sz = 0;
+ auth_param->hash_state_sz = 0;
+ }
+ }
+}
diff --git a/sys/dev/qat/qat_hw17reg.h b/sys/dev/qat/qat_hw17reg.h
new file mode 100644
index 000000000000..6655b905dde0
--- /dev/null
+++ b/sys/dev/qat/qat_hw17reg.h
@@ -0,0 +1,2460 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_hw17reg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_HW17REG_H_
+#define _DEV_PCI_QAT_HW17REG_H_
+
+/* Default message size in bytes */
+#define FW_REQ_DEFAULT_SZ_HW17 128
+#define FW_RESP_DEFAULT_SZ_HW17 32
+
+/* -------------------------------------------------------------------------- */
+/* accel */
+
+enum fw_init_admin_cmd_id {
+ FW_INIT_ME = 0,
+ FW_TRNG_ENABLE = 1,
+ FW_TRNG_DISABLE = 2,
+ FW_CONSTANTS_CFG = 3,
+ FW_STATUS_GET = 4,
+ FW_COUNTERS_GET = 5,
+ FW_LOOPBACK = 6,
+ FW_HEARTBEAT_SYNC = 7,
+ FW_HEARTBEAT_GET = 8,
+ FW_COMP_CAPABILITY_GET = 9,
+ FW_CRYPTO_CAPABILITY_GET = 10,
+ FW_HEARTBEAT_TIMER_SET = 13,
+};
+
+enum fw_init_admin_resp_status {
+ FW_INIT_RESP_STATUS_SUCCESS = 0,
+ FW_INIT_RESP_STATUS_FAIL = 1,
+ FW_INIT_RESP_STATUS_UNSUPPORTED = 4
+};
+
+struct fw_init_admin_req {
+ uint16_t init_cfg_sz;
+ uint8_t resrvd1;
+ uint8_t init_admin_cmd_id;
+ uint32_t resrvd2;
+ uint64_t opaque_data;
+ uint64_t init_cfg_ptr;
+
+ union {
+ struct {
+ uint16_t ibuf_size_in_kb;
+ uint16_t resrvd3;
+ };
+ uint32_t heartbeat_ticks;
+ };
+
+ uint32_t resrvd4;
+};
+
+struct fw_init_admin_resp_hdr {
+ uint8_t flags;
+ uint8_t resrvd1;
+ uint8_t status;
+ uint8_t init_admin_cmd_id;
+};
+
+enum fw_init_admin_init_flag {
+ FW_INIT_FLAG_PKE_DISABLED = 0
+};
+
+struct fw_init_admin_fw_capability_resp_hdr {
+ uint16_t reserved;
+ uint8_t status;
+ uint8_t init_admin_cmd_id;
+};
+
+struct fw_init_admin_capability_resp {
+ struct fw_init_admin_fw_capability_resp_hdr init_resp_hdr;
+ uint32_t extended_features;
+ uint64_t opaque_data;
+ union {
+ struct {
+ uint16_t compression_algos;
+ uint16_t checksum_algos;
+ uint32_t deflate_capabilities;
+ uint32_t resrvd1;
+ uint32_t lzs_capabilities;
+ } compression;
+ struct {
+ uint32_t cipher_algos;
+ uint32_t hash_algos;
+ uint16_t keygen_algos;
+ uint16_t other;
+ uint16_t public_key_algos;
+ uint16_t prime_algos;
+ } crypto;
+ };
+};
+
+struct fw_init_admin_resp_pars {
+ union {
+ uint32_t resrvd1[4];
+ struct {
+ uint32_t version_patch_num;
+ uint8_t context_id;
+ uint8_t ae_id;
+ uint16_t resrvd1;
+ uint64_t resrvd2;
+ } s1;
+ struct {
+ uint64_t req_rec_count;
+ uint64_t resp_sent_count;
+ } s2;
+ } u;
+};
+
+struct fw_init_admin_hb_cnt {
+ uint16_t resp_heartbeat_cnt;
+ uint16_t req_heartbeat_cnt;
+};
+
+#define QAT_NUM_THREADS 8
+
+struct fw_init_admin_hb_stats {
+ struct fw_init_admin_hb_cnt stats[QAT_NUM_THREADS];
+};
+
+struct fw_init_admin_resp {
+ struct fw_init_admin_resp_hdr init_resp_hdr;
+ union {
+ uint32_t resrvd2;
+ struct {
+ uint16_t version_minor_num;
+ uint16_t version_major_num;
+ } s;
+ } u;
+ uint64_t opaque_data;
+ struct fw_init_admin_resp_pars init_resp_pars;
+};
+
+#define FW_COMN_HEARTBEAT_OK 0
+#define FW_COMN_HEARTBEAT_BLOCKED 1
+#define FW_COMN_HEARTBEAT_FLAG_BITPOS 0
+#define FW_COMN_HEARTBEAT_FLAG_MASK 0x1
+#define FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
+#define FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
+ FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
+
+#define FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
+ FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+
+#define FW_COMN_HEARTBEAT_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, \
+ FW_COMN_HEARTBEAT_FLAG_BITPOS, \
+ FW_COMN_HEARTBEAT_FLAG_MASK)
+
+/* -------------------------------------------------------------------------- */
+
+/* Big assumptions that both bitpos and mask are constants */
+#define FIELD_SET(flags, val, bitpos, mask) \
+ (flags) = \
+ (((flags) & (~((mask) << (bitpos)))) | (((val) & (mask)) << (bitpos)))
+
+#define FIELD_GET(flags, bitpos, mask) (((flags) >> (bitpos)) & (mask))
+
+#define FLAG_SET(flags, bitpos) (flags) = ((flags) | (1 << (bitpos)))
+
+#define FLAG_CLEAR(flags, bitpos) (flags) = ((flags) & (~(1 << (bitpos))))
+
+#define FLAG_GET(flags, bitpos) (((flags) >> (bitpos)) & 1)
+
+/* Default request and response ring size in bytes */
+#define FW_REQ_DEFAULT_SZ 128
+#define FW_RESP_DEFAULT_SZ 32
+
+#define FW_COMN_ONE_BYTE_SHIFT 8
+#define FW_COMN_SINGLE_BYTE_MASK 0xFF
+
+/* Common Request - Block sizes definitions in multiples of individual long
+ * words */
+#define FW_NUM_LONGWORDS_1 1
+#define FW_NUM_LONGWORDS_2 2
+#define FW_NUM_LONGWORDS_3 3
+#define FW_NUM_LONGWORDS_4 4
+#define FW_NUM_LONGWORDS_5 5
+#define FW_NUM_LONGWORDS_6 6
+#define FW_NUM_LONGWORDS_7 7
+#define FW_NUM_LONGWORDS_10 10
+#define FW_NUM_LONGWORDS_13 13
+
+/* Definition of the associated service Id for NULL service type.
+ Note: the response is expected to use FW_COMN_RESP_SERV_CPM_FW */
+#define FW_NULL_REQ_SERV_ID 1
+
+/*
+ * Definition of the firmware interface service users, for
+ * responses.
+ * Enumeration which is used to indicate the ids of the services
+ * for responses using the external firmware interfaces.
+ */
+
+enum fw_comn_resp_serv_id {
+ FW_COMN_RESP_SERV_NULL, /* NULL service id type */
+ FW_COMN_RESP_SERV_CPM_FW, /* CPM FW Service ID */
+ FW_COMN_RESP_SERV_DELIMITER /* Delimiter service id type */
+};
+
+/*
+ * Definition of the request types
+ * Enumeration which is used to indicate the ids of the request
+ * types used in each of the external firmware interfaces
+ */
+
+enum fw_comn_request_id {
+ FW_COMN_REQ_NULL = 0, /* NULL request type */
+ FW_COMN_REQ_CPM_FW_PKE = 3, /* CPM FW PKE Request */
+ FW_COMN_REQ_CPM_FW_LA = 4, /* CPM FW Lookaside Request */
+ FW_COMN_REQ_CPM_FW_DMA = 7, /* CPM FW DMA Request */
+ FW_COMN_REQ_CPM_FW_COMP = 9, /* CPM FW Compression Request */
+ FW_COMN_REQ_DELIMITER /* End delimiter */
+
+};
+
+/*
+ * Definition of the common QAT FW request content descriptor field -
+ * points to the content descriptor parameters or itself contains service-
+ * specific data. Also specifies content descriptor parameter size.
+ * Contains reserved fields.
+ * Common section of the request used across all of the services exposed
+ * by the QAT FW. Each of the services inherit these common fields
+ */
+union fw_comn_req_hdr_cd_pars {
+ /* LWs 2-5 */
+ struct
+ {
+ uint64_t content_desc_addr;
+ /* Address of the content descriptor */
+
+ uint16_t content_desc_resrvd1;
+ /* Content descriptor reserved field */
+
+ uint8_t content_desc_params_sz;
+ /* Size of the content descriptor parameters in quad words. These
+ * parameters describe the session setup configuration info for the
+ * slices that this request relies upon i.e. the configuration word and
+ * cipher key needed by the cipher slice if there is a request for
+ * cipher processing. */
+
+ uint8_t content_desc_hdr_resrvd2;
+ /* Content descriptor reserved field */
+
+ uint32_t content_desc_resrvd3;
+ /* Content descriptor reserved field */
+ } s;
+
+ struct
+ {
+ uint32_t serv_specif_fields[FW_NUM_LONGWORDS_4];
+
+ } s1;
+
+};
+
+/*
+ * Definition of the common QAT FW request middle block.
+ * Common section of the request used across all of the services exposed
+ * by the QAT FW. Each of the services inherit these common fields
+ */
+struct fw_comn_req_mid
+{
+ /* LWs 6-13 */
+ uint64_t opaque_data;
+ /* Opaque data passed unmodified from the request to response messages by
+ * firmware (fw) */
+
+ uint64_t src_data_addr;
+ /* Generic definition of the source data supplied to the QAT AE. The
+ * common flags are used to further describe the attributes of this
+ * field */
+
+ uint64_t dest_data_addr;
+ /* Generic definition of the destination data supplied to the QAT AE. The
+ * common flags are used to further describe the attributes of this
+ * field */
+
+ uint32_t src_length;
+ /* Length of source flat buffer incase src buffer
+ * type is flat */
+
+ uint32_t dst_length;
+ /* Length of source flat buffer incase dst buffer
+ * type is flat */
+
+};
+
+/*
+ * Definition of the common QAT FW request content descriptor control
+ * block.
+ *
+ * Service specific section of the request used across all of the services
+ * exposed by the QAT FW. Each of the services populates this block
+ * uniquely. Refer to the service-specific header structures e.g.
+ * 'fw_cipher_hdr_s' (for Cipher) etc.
+ */
+struct fw_comn_req_cd_ctrl
+{
+ /* LWs 27-31 */
+ uint32_t content_desc_ctrl_lw[FW_NUM_LONGWORDS_5];
+
+};
+
+/*
+ * Definition of the common QAT FW request header.
+ * Common section of the request used across all of the services exposed
+ * by the QAT FW. Each of the services inherit these common fields. The
+ * reserved field of 7 bits and the service command Id field are all
+ * service-specific fields, along with the service specific flags.
+ */
+struct fw_comn_req_hdr
+{
+ /* LW0 */
+ uint8_t resrvd1;
+ /* reserved field */
+
+ uint8_t service_cmd_id;
+ /* Service Command Id - this field is service-specific
+ * Please use service-specific command Id here e.g.Crypto Command Id
+ * or Compression Command Id etc. */
+
+ uint8_t service_type;
+ /* Service type */
+
+ uint8_t hdr_flags;
+ /* This represents a flags field for the Service Request.
+ * The most significant bit is the 'valid' flag and the only
+ * one used. All remaining bit positions are unused and
+ * are therefore reserved and need to be set to 0. */
+
+ /* LW1 */
+ uint16_t serv_specif_flags;
+ /* Common Request service-specific flags
+ * e.g. Symmetric Crypto Command Flags */
+
+ uint16_t comn_req_flags;
+ /* Common Request Flags consisting of
+ * - 14 reserved bits,
+ * - 1 Content Descriptor field type bit and
+ * - 1 Source/destination pointer type bit */
+
+};
+
+/*
+ * Definition of the common QAT FW request parameter field.
+ *
+ * Service specific section of the request used across all of the services
+ * exposed by the QAT FW. Each of the services populates this block
+ * uniquely. Refer to service-specific header structures e.g.
+ * 'fw_comn_req_cipher_rqpars_s' (for Cipher) etc.
+ *
+ */
+struct fw_comn_req_rqpars
+{
+ /* LWs 14-26 */
+ uint32_t serv_specif_rqpars_lw[FW_NUM_LONGWORDS_13];
+
+};
+
+/*
+ * Definition of the common request structure with service specific
+ * fields
+ * This is a definition of the full qat request structure used by all
+ * services. Each service is free to use the service fields in its own
+ * way. This struct is useful as a message passing argument before the
+ * service contained within the request is determined.
+ */
+struct fw_comn_req
+{
+ /* LWs 0-1 */
+ struct fw_comn_req_hdr comn_hdr;
+ /* Common request header */
+
+ /* LWs 2-5 */
+ union fw_comn_req_hdr_cd_pars cd_pars;
+ /* Common Request content descriptor field which points either to a
+ * content descriptor
+ * parameter block or contains the service-specific data itself. */
+
+ /* LWs 6-13 */
+ struct fw_comn_req_mid comn_mid;
+ /* Common request middle section */
+
+ /* LWs 14-26 */
+ struct fw_comn_req_rqpars serv_specif_rqpars;
+ /* Common request service-specific parameter field */
+
+ /* LWs 27-31 */
+ struct fw_comn_req_cd_ctrl cd_ctrl;
+ /* Common request content descriptor control block -
+ * this field is service-specific */
+
+};
+
+/*
+ * Error code field
+ *
+ * Overloaded field with 8 bit common error field or two
+ * 8 bit compression error fields for compression and translator slices
+ */
+union fw_comn_error {
+ struct
+ {
+ uint8_t resrvd;
+ /* 8 bit reserved field */
+
+ uint8_t comn_err_code;
+ /* 8 bit common error code */
+
+ } s;
+ /* Structure which is used for non-compression responses */
+
+ struct
+ {
+ uint8_t xlat_err_code;
+ /* 8 bit translator error field */
+
+ uint8_t cmp_err_code;
+ /* 8 bit compression error field */
+
+ } s1;
+ /* Structure which is used for compression responses */
+
+};
+
+/*
+ * Definition of the common QAT FW response header.
+ * This section of the response is common across all of the services
+ * that generate a firmware interface response
+ */
+struct fw_comn_resp_hdr
+{
+ /* LW0 */
+ uint8_t resrvd1;
+ /* Reserved field - this field is service-specific -
+ * Note: The Response Destination Id has been removed
+ * from first QWord */
+
+ uint8_t service_id;
+ /* Service Id returned by service block */
+
+ uint8_t response_type;
+ /* Response type - copied from the request to
+ * the response message */
+
+ uint8_t hdr_flags;
+ /* This represents a flags field for the Response.
+ * Bit<7> = 'valid' flag
+ * Bit<6> = 'CNV' flag indicating that CNV was executed
+ * on the current request
+ * Bit<5> = 'CNVNR' flag indicating that a recovery happened
+ * on the current request following a CNV error
+ * All remaining bits are unused and are therefore reserved.
+ * They must to be set to 0.
+ */
+
+ /* LW 1 */
+ union fw_comn_error comn_error;
+ /* This field is overloaded to allow for one 8 bit common error field
+ * or two 8 bit error fields from compression and translator */
+
+ uint8_t comn_status;
+ /* Status field which specifies which slice(s) report an error */
+
+ uint8_t cmd_id;
+ /* Command Id - passed from the request to the response message */
+
+};
+
+/*
+ * Definition of the common response structure with service specific
+ * fields
+ * This is a definition of the full qat response structure used by all
+ * services.
+ */
+struct fw_comn_resp
+{
+ /* LWs 0-1 */
+ struct fw_comn_resp_hdr comn_hdr;
+ /* Common header fields */
+
+ /* LWs 2-3 */
+ uint64_t opaque_data;
+ /* Opaque data passed from the request to the response message */
+
+ /* LWs 4-7 */
+ uint32_t resrvd[FW_NUM_LONGWORDS_4];
+ /* Reserved */
+
+};
+
+/* Common QAT FW request header - structure of LW0
+ * + ===== + ---- + ----------- + ----------- + ----------- + ----------- +
+ * | Bit | 31 | 30 - 24 | 21 - 16 | 15 - 8 | 7 - 0 |
+ * + ===== + ---- + ----------- + ----------- + ----------- + ----------- +
+ * | Flags | V | Reserved | Serv Type | Serv Cmd Id | Reserved |
+ * + ===== + ---- + ----------- + ----------- + ----------- + ----------- +
+ */
+
+#define FW_COMN_VALID __BIT(7)
+
+/* Common QAT FW response header - structure of LW0
+ * + ===== + --- + --- + ----- + ----- + --------- + ----------- + ----- +
+ * | Bit | 31 | 30 | 29 | 28-24 | 21 - 16 | 15 - 8 | 7-0 |
+ * + ===== + --- + ----+ ----- + ----- + --------- + ----------- + ----- +
+ * | Flags | V | CNV | CNVNR | Rsvd | Serv Type | Serv Cmd Id | Rsvd |
+ * + ===== + --- + --- + ----- + ----- + --------- + ----------- + ----- + */
+/* Macros defining the bit position and mask of 'CNV' flag
+ * within the hdr_flags field of LW0 (service response only) */
+#define FW_COMN_CNV_FLAG_BITPOS 6
+#define FW_COMN_CNV_FLAG_MASK 0x1
+
+/* Macros defining the bit position and mask of CNVNR flag
+ * within the hdr_flags field of LW0 (service response only) */
+#define FW_COMN_CNVNR_FLAG_BITPOS 5
+#define FW_COMN_CNVNR_FLAG_MASK 0x1
+
+/*
+ * Macro for extraction of Service Type Field
+ *
+ * struct fw_comn_req_hdr Structure 'fw_comn_req_hdr_t'
+ * to extract the Service Type Field
+ */
+#define FW_COMN_OV_SRV_TYPE_GET(fw_comn_req_hdr_t) \
+ fw_comn_req_hdr_t.service_type
+
+/*
+ * Macro for setting of Service Type Field
+ *
+ * 'fw_comn_req_hdr_t' structure to set the Service
+ * Type Field
+ * val Value of the Service Type Field
+ */
+#define FW_COMN_OV_SRV_TYPE_SET(fw_comn_req_hdr_t, val) \
+ fw_comn_req_hdr_t.service_type = val
+
+/*
+ * Macro for extraction of Service Command Id Field
+ *
+ * struct fw_comn_req_hdr Structure 'fw_comn_req_hdr_t'
+ * to extract the Service Command Id Field
+ */
+#define FW_COMN_OV_SRV_CMD_ID_GET(fw_comn_req_hdr_t) \
+ fw_comn_req_hdr_t.service_cmd_id
+
+/*
+ * Macro for setting of Service Command Id Field
+ *
+ * 'fw_comn_req_hdr_t' structure to set the
+ * Service Command Id Field
+ * val Value of the Service Command Id Field
+ */
+#define FW_COMN_OV_SRV_CMD_ID_SET(fw_comn_req_hdr_t, val) \
+ fw_comn_req_hdr_t.service_cmd_id = val
+
+/*
+ * Extract the valid flag from the request or response's header flags.
+ *
+ * hdr_t Request or Response 'hdr_t' structure to extract the valid bit
+ * from the 'hdr_flags' field.
+ */
+#define FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+ FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+/*
+ * Extract the CNVNR flag from the header flags in the response only.
+ *
+ * hdr_t Response 'hdr_t' structure to extract the CNVNR bit
+ * from the 'hdr_flags' field.
+ */
+#define FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
+ FIELD_GET(hdr_flags, \
+ FW_COMN_CNVNR_FLAG_BITPOS, \
+ FW_COMN_CNVNR_FLAG_MASK)
+
+/*
+ * Extract the CNV flag from the header flags in the response only.
+ *
+ * hdr_t Response 'hdr_t' structure to extract the CNV bit
+ * from the 'hdr_flags' field.
+ */
+#define FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
+ FIELD_GET(hdr_flags, \
+ FW_COMN_CNV_FLAG_BITPOS, \
+ FW_COMN_CNV_FLAG_MASK)
+
+/*
+ * Set the valid bit in the request's header flags.
+ *
+ * hdr_t Request or Response 'hdr_t' structure to set the valid bit
+ * val Value of the valid bit flag.
+ */
+#define FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+ FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+/*
+ * Common macro to extract the valid flag from the header flags field
+ * within the header structure (request or response).
+ *
+ * hdr_t Structure (request or response) to extract the
+ * valid bit from the 'hdr_flags' field.
+ */
+#define FW_COMN_VALID_FLAG_GET(hdr_flags) \
+ FIELD_GET(hdr_flags, \
+ FW_COMN_VALID_FLAG_BITPOS, \
+ FW_COMN_VALID_FLAG_MASK)
+
+/*
+ * Common macro to extract the remaining reserved flags from the header
+ * flags field within the header structure (request or response).
+ *
+ * hdr_t Structure (request or response) to extract the
+ * remaining bits from the 'hdr_flags' field (excluding the
+ * valid flag).
+ */
+#define FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+ (hdr_flags & FW_COMN_HDR_RESRVD_FLD_MASK)
+
+/*
+ * Common macro to set the valid bit in the header flags field within
+ * the header structure (request or response).
+ *
+ * hdr_t Structure (request or response) containing the header
+ * flags field, to allow the valid bit to be set.
+ * val Value of the valid bit flag.
+ */
+#define FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+ FIELD_SET((hdr_t.hdr_flags), \
+ (val), \
+ FW_COMN_VALID_FLAG_BITPOS, \
+ FW_COMN_VALID_FLAG_MASK)
+
+/*
+ * Macro that must be used when building the common header flags.
+ * Note that all bits reserved field bits 0-6 (LW0) need to be forced to 0.
+ *
+ * ptr Value of the valid flag
+ */
+
+#define FW_COMN_HDR_FLAGS_BUILD(valid) \
+ (((valid)&FW_COMN_VALID_FLAG_MASK) \
+ << FW_COMN_VALID_FLAG_BITPOS)
+
+/*
+ * Common Request Flags Definition
+ * The bit offsets below are within the flags field. These are NOT relative to
+ * the memory word. Unused fields e.g. reserved bits, must be zeroed.
+ *
+ * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- +
+ * | Bits [15:8] | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 |
+ * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- +
+ * | Flags[15:8] | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv |
+ * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- +
+ * | Bits [7:0] | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- +
+ * | Flags [7:0] | Rsv | Rsv | Rsv | Rsv | Rsv | BnP | Cdt | Ptr |
+ * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- +
+ */
+
+#define COMN_PTR_TYPE_BITPOS 0
+/* Common Request Flags - Starting bit position indicating
+ * Src&Dst Buffer Pointer type */
+
+#define COMN_PTR_TYPE_MASK 0x1
+/* Common Request Flags - One bit mask used to determine
+ * Src&Dst Buffer Pointer type */
+
+#define COMN_CD_FLD_TYPE_BITPOS 1
+/* Common Request Flags - Starting bit position indicating
+ * CD Field type */
+
+#define COMN_CD_FLD_TYPE_MASK 0x1
+/* Common Request Flags - One bit mask used to determine
+ * CD Field type */
+
+#define COMN_BNP_ENABLED_BITPOS 2
+/* Common Request Flags - Starting bit position indicating
+ * the source buffer contains batch of requests. if this
+ * bit is set, source buffer is type of Batch And Pack OpData List
+ * and the Ptr Type Bit only applies to Destination buffer. */
+
+#define COMN_BNP_ENABLED_MASK 0x1
+/* Batch And Pack Enabled Flag Mask - One bit mask used to determine
+ * the source buffer is in Batch and Pack OpData Link List Mode. */
+
+/* ========================================================================= */
+/* Pointer Type Flag definitions */
+/* ========================================================================= */
+#define COMN_PTR_TYPE_FLAT 0x0
+/* Constant value indicating Src&Dst Buffer Pointer type is flat
+ * If Batch and Pack mode is enabled, only applies to Destination buffer. */
+
+#define COMN_PTR_TYPE_SGL 0x1
+/* Constant value indicating Src&Dst Buffer Pointer type is SGL type
+ * If Batch and Pack mode is enabled, only applies to Destination buffer. */
+
+#define COMN_PTR_TYPE_BATCH 0x2
+/* Constant value indicating Src is a batch request
+ * and Dst Buffer Pointer type is SGL type */
+
+/* ========================================================================= */
+/* CD Field Flag definitions */
+/* ========================================================================= */
+#define COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+/* Constant value indicating CD Field contains 64-bit address */
+
+#define COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+/* Constant value indicating CD Field contains 16 bytes of setup data */
+
+/* ========================================================================= */
+/* Batch And Pack Enable/Disable Definitions */
+/* ========================================================================= */
+#define COMN_BNP_ENABLED 0x1
+/* Constant value indicating Source buffer will point to Batch And Pack OpData
+ * List */
+
+#define COMN_BNP_DISABLED 0x0
+/* Constant value indicating Source buffer will point to Batch And Pack OpData
+ * List */
+
+/*
+ * Macro that must be used when building the common request flags (for all
+ * requests but comp BnP).
+ * Note that all bits reserved field bits 2-15 (LW1) need to be forced to 0.
+ *
+ * ptr Value of the pointer type flag
+ * cdt Value of the cd field type flag
+*/
+#define FW_COMN_FLAGS_BUILD(cdt, ptr) \
+ ((((cdt)&COMN_CD_FLD_TYPE_MASK) << COMN_CD_FLD_TYPE_BITPOS) | \
+ (((ptr)&COMN_PTR_TYPE_MASK) << COMN_PTR_TYPE_BITPOS))
+
+/*
+ * Macro that must be used when building the common request flags for comp
+ * BnP service.
+ * Note that all bits reserved field bits 3-15 (LW1) need to be forced to 0.
+ *
+ * ptr Value of the pointer type flag
+ * cdt Value of the cd field type flag
+ * bnp Value of the bnp enabled flag
+ */
+#define FW_COMN_FLAGS_BUILD_BNP(cdt, ptr, bnp) \
+ ((((cdt)&COMN_CD_FLD_TYPE_MASK) << COMN_CD_FLD_TYPE_BITPOS) | \
+ (((ptr)&COMN_PTR_TYPE_MASK) << COMN_PTR_TYPE_BITPOS) | \
+ (((bnp)&COMN_BNP_ENABLED_MASK) << COMN_BNP_ENABLED_BITPOS))
+
+/*
+ * Macro for extraction of the pointer type bit from the common flags
+ *
+ * flags Flags to extract the pointer type bit from
+ */
+#define FW_COMN_PTR_TYPE_GET(flags) \
+ FIELD_GET(flags, COMN_PTR_TYPE_BITPOS, COMN_PTR_TYPE_MASK)
+
+/*
+ * Macro for extraction of the cd field type bit from the common flags
+ *
+ * flags Flags to extract the cd field type type bit from
+ */
+#define FW_COMN_CD_FLD_TYPE_GET(flags) \
+ FIELD_GET(flags, COMN_CD_FLD_TYPE_BITPOS, COMN_CD_FLD_TYPE_MASK)
+
+/*
+ * Macro for extraction of the bnp field type bit from the common flags
+ *
+ * flags Flags to extract the bnp field type type bit from
+ *
+ */
+#define FW_COMN_BNP_ENABLED_GET(flags) \
+ FIELD_GET(flags, COMN_BNP_ENABLED_BITPOS, COMN_BNP_ENABLED_MASK)
+
+/*
+ * Macro for setting the pointer type bit in the common flags
+ *
+ * flags Flags in which Pointer Type bit will be set
+ * val Value of the bit to be set in flags
+ *
+ */
+#define FW_COMN_PTR_TYPE_SET(flags, val) \
+ FIELD_SET(flags, val, COMN_PTR_TYPE_BITPOS, COMN_PTR_TYPE_MASK)
+
+/*
+ * Macro for setting the cd field type bit in the common flags
+ *
+ * flags Flags in which Cd Field Type bit will be set
+ * val Value of the bit to be set in flags
+ *
+ */
+#define FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+ FIELD_SET( \
+ flags, val, COMN_CD_FLD_TYPE_BITPOS, COMN_CD_FLD_TYPE_MASK)
+
+/*
+ * Macro for setting the bnp field type bit in the common flags
+ *
+ * flags Flags in which Bnp Field Type bit will be set
+ * val Value of the bit to be set in flags
+ *
+ */
+#define FW_COMN_BNP_ENABLE_SET(flags, val) \
+ FIELD_SET( \
+ flags, val, COMN_BNP_ENABLED_BITPOS, COMN_BNP_ENABLED_MASK)
+
+/*
+ * Macros using the bit position and mask to set/extract the next
+ * and current id nibbles within the next_curr_id field of the
+ * content descriptor header block. Note that these are defined
+ * in the common header file, as they are used by compression, cipher
+ * and authentication.
+ *
+ * cd_ctrl_hdr_t Content descriptor control block header pointer.
+ * val Value of the field being set.
+ */
+#define FW_COMN_NEXT_ID_BITPOS 4
+#define FW_COMN_NEXT_ID_MASK 0xF0
+#define FW_COMN_CURR_ID_BITPOS 0
+#define FW_COMN_CURR_ID_MASK 0x0F
+
+#define FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & FW_COMN_NEXT_ID_MASK) >> \
+ (FW_COMN_NEXT_ID_BITPOS))
+
+#define FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ ((cd_ctrl_hdr_t)->next_curr_id) = \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & FW_COMN_CURR_ID_MASK) | \
+ ((val << FW_COMN_NEXT_ID_BITPOS) & \
+ FW_COMN_NEXT_ID_MASK))
+
+#define FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id) & FW_COMN_CURR_ID_MASK)
+
+#define FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ ((cd_ctrl_hdr_t)->next_curr_id) = \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & FW_COMN_NEXT_ID_MASK) | \
+ ((val)&FW_COMN_CURR_ID_MASK))
+
+/*
+ * Common Status Field Definition The bit offsets below are within the COMMON
+ * RESPONSE status field, assumed to be 8 bits wide. In the case of the PKE
+ * response (which follows the CPM 1.5 message format), the status field is 16
+ * bits wide.
+ * The status flags are contained within the most significant byte and align
+ * with the diagram below. Please therefore refer to the service-specific PKE
+ * header file for the appropriate macro definition to extract the PKE status
+ * flag from the PKE response, which assumes that a word is passed to the
+ * macro.
+ * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- +
+ * | Bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- +
+ * | Flags | Crypto | Pke | Cmp | Xlat | EOLB | UnSupReq | Rsvd | XltWaApply |
+ * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- +
+ * Note:
+ * For the service specific status bit definitions refer to service header files
+ * Eg. Crypto Status bit refers to Symmetric Crypto, Key Generation, and NRBG
+ * Requests' Status. Unused bits e.g. reserved bits need to have been forced to
+ * 0.
+ */
+
+#define COMN_RESP_CRYPTO_STATUS_BITPOS 7
+/* Starting bit position indicating Response for Crypto service Flag */
+
+#define COMN_RESP_CRYPTO_STATUS_MASK 0x1
+/* One bit mask used to determine Crypto status mask */
+
+#define COMN_RESP_PKE_STATUS_BITPOS 6
+/* Starting bit position indicating Response for PKE service Flag */
+
+#define COMN_RESP_PKE_STATUS_MASK 0x1
+/* One bit mask used to determine PKE status mask */
+
+#define COMN_RESP_CMP_STATUS_BITPOS 5
+/* Starting bit position indicating Response for Compression service Flag */
+
+#define COMN_RESP_CMP_STATUS_MASK 0x1
+/* One bit mask used to determine Compression status mask */
+
+#define COMN_RESP_XLAT_STATUS_BITPOS 4
+/* Starting bit position indicating Response for Xlat service Flag */
+
+#define COMN_RESP_XLAT_STATUS_MASK 0x1
+/* One bit mask used to determine Translator status mask */
+
+#define COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+/* Starting bit position indicating the last block in a deflate stream for
+ the compression service Flag */
+
+#define COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+/* One bit mask used to determine the last block in a deflate stream
+ status mask */
+
+#define COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
+/* Starting bit position indicating when an unsupported service request Flag */
+
+#define COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+/* One bit mask used to determine the unsupported service request status mask */
+
+#define COMN_RESP_XLT_WA_APPLIED_BITPOS 0
+/* Bit position indicating a firmware workaround was applied to translation */
+
+#define COMN_RESP_XLT_WA_APPLIED_MASK 0x1
+/* One bit mask */
+
+/*
+ * Macro that must be used when building the status
+ * for the common response
+ *
+ * crypto Value of the Crypto Service status flag
+ * comp Value of the Compression Service Status flag
+ * xlat Value of the Xlator Status flag
+ * eolb Value of the Compression End of Last Block Status flag
+ * unsupp Value of the Unsupported Request flag
+ * xlt_wa Value of the Translation WA marker
+ */
+#define FW_COMN_RESP_STATUS_BUILD( \
+ crypto, pke, comp, xlat, eolb, unsupp, xlt_wa) \
+ ((((crypto)&COMN_RESP_CRYPTO_STATUS_MASK) \
+ << COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+ (((pke)&COMN_RESP_PKE_STATUS_MASK) \
+ << COMN_RESP_PKE_STATUS_BITPOS) | \
+ (((xlt_wa)&COMN_RESP_XLT_WA_APPLIED_MASK) \
+ << COMN_RESP_XLT_WA_APPLIED_BITPOS) | \
+ (((comp)&COMN_RESP_CMP_STATUS_MASK) \
+ << COMN_RESP_CMP_STATUS_BITPOS) | \
+ (((xlat)&COMN_RESP_XLAT_STATUS_MASK) \
+ << COMN_RESP_XLAT_STATUS_BITPOS) | \
+ (((eolb)&COMN_RESP_CMP_END_OF_LAST_BLK_MASK) \
+ << COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS) | \
+ (((unsupp)&COMN_RESP_UNSUPPORTED_REQUEST_BITPOS) \
+ << COMN_RESP_UNSUPPORTED_REQUEST_MASK))
+
+/*
+ * Macro for extraction of the Crypto bit from the status
+ *
+ * status Status to extract the status bit from
+ */
+#define FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+ FIELD_GET(status, \
+ COMN_RESP_CRYPTO_STATUS_BITPOS, \
+ COMN_RESP_CRYPTO_STATUS_MASK)
+
+/*
+ * Macro for extraction of the PKE bit from the status
+ *
+ * status Status to extract the status bit from
+ */
+#define FW_COMN_RESP_PKE_STAT_GET(status) \
+ FIELD_GET(status, \
+ COMN_RESP_PKE_STATUS_BITPOS, \
+ COMN_RESP_PKE_STATUS_MASK)
+
+/*
+ * Macro for extraction of the Compression bit from the status
+ *
+ * status Status to extract the status bit from
+ */
+#define FW_COMN_RESP_CMP_STAT_GET(status) \
+ FIELD_GET(status, \
+ COMN_RESP_CMP_STATUS_BITPOS, \
+ COMN_RESP_CMP_STATUS_MASK)
+
+/*
+ * Macro for extraction of the Translator bit from the status
+ *
+ * status Status to extract the status bit from
+ */
+#define FW_COMN_RESP_XLAT_STAT_GET(status) \
+ FIELD_GET(status, \
+ COMN_RESP_XLAT_STATUS_BITPOS, \
+ COMN_RESP_XLAT_STATUS_MASK)
+
+/*
+ * Macro for extraction of the Translation Workaround Applied bit from the
+ * status
+ *
+ * status Status to extract the status bit from
+ */
+#define FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \
+ FIELD_GET(status, \
+ COMN_RESP_XLT_WA_APPLIED_BITPOS, \
+ COMN_RESP_XLT_WA_APPLIED_MASK)
+
+/*
+ * Macro for extraction of the end of compression block bit from the
+ * status
+ *
+ * status
+ * Status to extract the status bit from
+ */
+#define FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+ FIELD_GET(status, \
+ COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+ COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+/*
+ * Macro for extraction of the Unsupported request from the status
+ *
+ * status
+ * Status to extract the status bit from
+ */
+#define FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \
+ FIELD_GET(status, \
+ COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
+ COMN_RESP_UNSUPPORTED_REQUEST_MASK)
+
+#define FW_COMN_STATUS_FLAG_OK 0
+/* Definition of successful processing of a request */
+
+#define FW_COMN_STATUS_FLAG_ERROR 1
+/* Definition of erroneous processing of a request */
+
+#define FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+/* Final Deflate block of a compression request not completed */
+
+#define FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+/* Final Deflate block of a compression request completed */
+
+#define ERR_CODE_NO_ERROR 0
+/* Error Code constant value for no error */
+
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+/* Invalid block type (type == 3)*/
+
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+/* Stored block length does not match one's complement */
+
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+/* Too many length or distance codes */
+
+#define ERR_CODE_INCOMPLETE_LEN -4
+/* Code lengths codes incomplete */
+
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+/* Repeat lengths with no first length */
+
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+/* Repeat more than specified lengths */
+
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+/* Invalid lit/len code lengths */
+
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+/* Invalid distance code lengths */
+
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+/* Invalid lit/len or distance code in fixed/dynamic block */
+
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+/* Distance too far back in fixed or dynamic block */
+
+/* Common Error code definitions */
+#define ERR_CODE_OVERFLOW_ERROR -11
+/* Error Code constant value for overflow error */
+
+#define ERR_CODE_SOFT_ERROR -12
+/* Error Code constant value for soft error */
+
+#define ERR_CODE_FATAL_ERROR -13
+/* Error Code constant value for hard/fatal error */
+
+#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14
+/* Error Code constant for compression output corruption */
+
+#define ERR_CODE_HW_INCOMPLETE_FILE -15
+/* Error Code constant value for incomplete file hardware error */
+
+#define ERR_CODE_SSM_ERROR -16
+/* Error Code constant value for error detected by SSM e.g. slice hang */
+
+#define ERR_CODE_ENDPOINT_ERROR -17
+/* Error Code constant value for error detected by PCIe Endpoint, e.g. push
+ * data error */
+
+#define ERR_CODE_CNV_ERROR -18
+/* Error Code constant value for cnv failure */
+
+#define ERR_CODE_EMPTY_DYM_BLOCK -19
+/* Error Code constant value for submission of empty dynamic stored block to
+ * slice */
+
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20
+/* Error Code constant for invalid handle in kpt crypto service */
+
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21
+/* Error Code constant for failed hmac in kpt crypto service */
+
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22
+/* Error Code constant for invalid wrapping algo in kpt crypto service */
+
+#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23
+/* Error Code constant for no drng seed is not loaded in kpt ecdsa signrs
+/service */
+
+#define FW_LA_ICV_VER_STATUS_PASS FW_COMN_STATUS_FLAG_OK
+/* Status flag indicating that the ICV verification passed */
+
+#define FW_LA_ICV_VER_STATUS_FAIL FW_COMN_STATUS_FLAG_ERROR
+/* Status flag indicating that the ICV verification failed */
+
+#define FW_LA_TRNG_STATUS_PASS FW_COMN_STATUS_FLAG_OK
+/* Status flag indicating that the TRNG returned valid entropy data */
+
+#define FW_LA_TRNG_STATUS_FAIL FW_COMN_STATUS_FLAG_ERROR
+/* Status flag indicating that the TRNG Command Failed. */
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Definition of the full bulk processing request structure.
+ * Used for hash, cipher, hash-cipher and authentication-encryption
+ * requests etc.
+ */
+struct fw_la_bulk_req
+{
+ /* LWs 0-1 */
+ struct fw_comn_req_hdr comn_hdr;
+ /* Common request header - for Service Command Id,
+ * use service-specific Crypto Command Id.
+ * Service Specific Flags - use Symmetric Crypto Command Flags
+ * (all of cipher, auth, SSL3, TLS and MGF,
+ * excluding TRNG - field unused) */
+
+ /* LWs 2-5 */
+ union fw_comn_req_hdr_cd_pars cd_pars;
+ /* Common Request content descriptor field which points either to a
+ * content descriptor
+ * parameter block or contains the service-specific data itself. */
+
+ /* LWs 6-13 */
+ struct fw_comn_req_mid comn_mid;
+ /* Common request middle section */
+
+ /* LWs 14-26 */
+ struct fw_comn_req_rqpars serv_specif_rqpars;
+ /* Common request service-specific parameter field */
+
+ /* LWs 27-31 */
+ struct fw_comn_req_cd_ctrl cd_ctrl;
+ /* Common request content descriptor control block -
+ * this field is service-specific */
+
+};
+
+/* clang-format off */
+
+/*
+ * LA BULK (SYMMETRIC CRYPTO) COMMAND FLAGS
+ *
+ * + ===== + ---------- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- +
+ * | Bit | [15:13] | 12 | 11 | 10 | 7-9 | 6 | 5 | 4 | 3 | 2 | 1-0 |
+ * + ===== + ---------- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ------+ ----- +
+ * | Flags | Resvd Bits | ZUC | GcmIV |Digest | Prot | Cmp | Rtn | Upd | Ciph/ | CiphIV| Part- |
+ * | | =0 | Prot | Len | In Buf| flgs | Auth | Auth | State | Auth | Field | ial |
+ * + ===== + ---------- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ------+ ----- +
+ */
+
+/* clang-format on */
+
+/* Private defines */
+
+#define FW_LA_ZUC_3G_PROTO __BIT(12)
+/* Indicating ZUC processing for a encrypt command
+ * Must be set for Cipher-only, Cipher + Auth and Auth-only */
+
+#define FW_LA_GCM_IV_LEN_12_OCTETS __BIT(11)
+/* Indicates the IV Length for GCM protocol is 96 Bits (12 Octets)
+ * If set FW does the padding to compute CTR0 */
+
+#define FW_LA_DIGEST_IN_BUFFER __BIT(10)
+/* Flag representing that authentication digest is stored or is extracted
+ * from the source buffer. Auth Result Pointer will be ignored in this case. */
+
+#define FW_LA_PROTO __BITS(7, 9)
+#define FW_LA_PROTO_SNOW_3G __BIT(9)
+/* Indicates SNOW_3G processing for a encrypt command */
+#define FW_LA_PROTO_GCM __BIT(8)
+/* Indicates GCM processing for a auth_encrypt command */
+#define FW_LA_PROTO_CCM __BIT(7)
+/* Indicates CCM processing for a auth_encrypt command */
+#define FW_LA_PROTO_NONE 0
+/* Indicates no specific protocol processing for the command */
+
+#define FW_LA_CMP_AUTH_RES __BIT(6)
+/* Flag representing the need to compare the auth result data to the expected
+ * value in DRAM at the auth_address. */
+
+#define FW_LA_RET_AUTH_RES __BIT(5)
+/* Flag representing the need to return the auth result data to dram after the
+ * request processing is complete */
+
+#define FW_LA_UPDATE_STATE __BIT(4)
+/* Flag representing the need to update the state data in dram after the
+ * request processing is complete */
+
+#define FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP __BIT(3)
+/* Flag representing Cipher/Auth Config Offset Type, where the offset
+ * is contained in SHRAM constants page. When the SHRAM constants page
+ * is not used for cipher/auth configuration, then the Content Descriptor
+ * pointer field must be a pointer (as opposed to a 16-byte key), since
+ * the block pointed to must contain both the slice config and the key */
+
+#define FW_CIPH_IV_16BYTE_DATA __BIT(2)
+/* Flag representing Cipher IV field contents as 16-byte data array
+ * Otherwise Cipher IV field contents via 64-bit pointer */
+
+#define FW_LA_PARTIAL __BITS(0, 1)
+#define FW_LA_PARTIAL_NONE 0
+/* Flag representing no need for partial processing condition i.e.
+ * entire packet processed in the current command */
+#define FW_LA_PARTIAL_START 1
+/* Flag representing the first chunk of the partial packet */
+#define FW_LA_PARTIAL_MID 3
+/* Flag representing a middle chunk of the partial packet */
+#define FW_LA_PARTIAL_END 2
+/* Flag representing the final/end chunk of the partial packet */
+
+/* The table below defines the meaning of the prefix_addr & hash_state_sz in
+ * the case of partial processing. See the HLD for further details
+ *
+ * + ====== + ------------------------- + ----------------------- +
+ * | Parial | Prefix Addr | Hash State Sz |
+ * | State | | |
+ * + ====== + ------------------------- + ----------------------- +
+ * | FULL | Points to the prefix data | Prefix size as below. |
+ * | | | No update of state |
+ * + ====== + ------------------------- + ----------------------- +
+ * | SOP | Points to the prefix | = inner prefix rounded |
+ * | | data. State is updated | to qwrds + outer prefix |
+ * | | at prefix_addr - state_sz | rounded to qwrds. The |
+ * | | - 8 (counter size) | writeback state sz |
+ * | | | comes from the CD |
+ * + ====== + ------------------------- + ----------------------- +
+ * | MOP | Points to the state data | State size rounded to |
+ * | | Updated state written to | num qwrds + 8 (for the |
+ * | | same location | counter) + inner prefix |
+ * | | | rounded to qwrds + |
+ * | | | outer prefix rounded to |
+ * | | | qwrds. |
+ * + ====== + ------------------------- + ----------------------- +
+ * | EOP | Points to the state data | State size rounded to |
+ * | | | num qwrds + 8 (for the |
+ * | | | counter) + inner prefix |
+ * | | | rounded to qwrds + |
+ * | | | outer prefix rounded to |
+ * | | | qwrds. |
+ * + ====== + ------------------------- + ----------------------- +
+ *
+ * Notes:
+ *
+ * - If the EOP is set it is assumed that no state update is to be performed.
+ * However it is the clients responsibility to set the update_state flag
+ * correctly i.e. not set for EOP or Full packet cases. Only set for SOP and
+ * MOP with no EOP flag
+ * - The SOP take precedence over the MOP and EOP i.e. in the calculation of
+ * the address to writeback the state.
+ * - The prefix address must be on at least the 8 byte boundary
+ */
+
+/* Macros for extracting field bits */
+/*
+ * Macro for extraction of the Cipher IV field contents (bit 2)
+ *
+ * flags Flags to extract the Cipher IV field contents
+ *
+ */
+#define FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+ FIELD_GET(flags, LA_CIPH_IV_FLD_BITPOS, LA_CIPH_IV_FLD_MASK)
+
+/*
+ * Macro for extraction of the Cipher/Auth Config
+ * offset type (bit 3)
+ *
+ * flags Flags to extract the Cipher/Auth Config offset type
+ *
+ */
+#define FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+ FIELD_GET(flags, \
+ LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+/*
+ * Macro for extraction of the ZUC protocol bit
+ * information (bit 11)
+ *
+ * flags Flags to extract the ZUC protocol bit
+ */
+#define FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+ FIELD_GET(flags, \
+ FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+/*
+ * Macro for extraction of the GCM IV Len is 12 Octets / 96 Bits
+ * information (bit 11)
+ *
+ * flags Flags to extract the GCM IV length
+ */
+#define FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+ FIELD_GET( \
+ flags, LA_GCM_IV_LEN_FLAG_BITPOS, LA_GCM_IV_LEN_FLAG_MASK)
+
+/*
+ * Macro for extraction of the LA protocol state (bits 9-7)
+ *
+ * flags Flags to extract the protocol state
+ */
+#define FW_LA_PROTO_GET(flags) \
+ FIELD_GET(flags, LA_PROTO_BITPOS, LA_PROTO_MASK)
+
+/*
+ * Macro for extraction of the "compare auth" state (bit 6)
+ *
+ * flags Flags to extract the compare auth result state
+ *
+ */
+#define FW_LA_CMP_AUTH_GET(flags) \
+ FIELD_GET(flags, LA_CMP_AUTH_RES_BITPOS, LA_CMP_AUTH_RES_MASK)
+
+/*
+ * Macro for extraction of the "return auth" state (bit 5)
+ *
+ * flags Flags to extract the return auth result state
+ *
+ */
+#define FW_LA_RET_AUTH_GET(flags) \
+ FIELD_GET(flags, LA_RET_AUTH_RES_BITPOS, LA_RET_AUTH_RES_MASK)
+
+/*
+ * Macro for extraction of the "digest in buffer" state (bit 10)
+ *
+ * flags Flags to extract the digest in buffer state
+ *
+ */
+#define FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+ FIELD_GET( \
+ flags, LA_DIGEST_IN_BUFFER_BITPOS, LA_DIGEST_IN_BUFFER_MASK)
+
+/*
+ * Macro for extraction of the update content state value. (bit 4)
+ *
+ * flags Flags to extract the update content state bit
+ */
+#define FW_LA_UPDATE_STATE_GET(flags) \
+ FIELD_GET(flags, LA_UPDATE_STATE_BITPOS, LA_UPDATE_STATE_MASK)
+
+/*
+ * Macro for extraction of the "partial" packet state (bits 1-0)
+ *
+ * flags Flags to extract the partial state
+ */
+#define FW_LA_PARTIAL_GET(flags) \
+ FIELD_GET(flags, LA_PARTIAL_BITPOS, LA_PARTIAL_MASK)
+
+/* Macros for setting field bits */
+/*
+ * Macro for setting the Cipher IV field contents
+ *
+ * flags Flags to set with the Cipher IV field contents
+ * val Field contents indicator value
+ */
+#define FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+ FIELD_SET( \
+ flags, val, LA_CIPH_IV_FLD_BITPOS, LA_CIPH_IV_FLD_MASK)
+
+/*
+ * Macro for setting the Cipher/Auth Config
+ * offset type
+ *
+ * flags Flags to set the Cipher/Auth Config offset type
+ * val Offset type value
+ */
+#define FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+ FIELD_SET(flags, \
+ val, \
+ LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+/*
+ * Macro for setting the ZUC protocol flag
+ *
+ * flags Flags to set the ZUC protocol flag
+ * val Protocol value
+ */
+#define FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+ FIELD_SET(flags, \
+ val, \
+ FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+/*
+ * Macro for setting the GCM IV length flag state
+ *
+ * flags Flags to set the GCM IV length flag state
+ * val Protocol value
+ */
+#define FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+ FIELD_SET(flags, \
+ val, \
+ LA_GCM_IV_LEN_FLAG_BITPOS, \
+ LA_GCM_IV_LEN_FLAG_MASK)
+
+/*
+ * Macro for setting the LA protocol flag state
+ *
+ * flags Flags to set the protocol state
+ * val Protocol value
+ */
+#define FW_LA_PROTO_SET(flags, val) \
+ FIELD_SET(flags, val, LA_PROTO_BITPOS, LA_PROTO_MASK)
+
+/*
+ * Macro for setting the "compare auth" flag state
+ *
+ * flags Flags to set the compare auth result state
+ * val Compare Auth value
+ */
+#define FW_LA_CMP_AUTH_SET(flags, val) \
+ FIELD_SET( \
+ flags, val, LA_CMP_AUTH_RES_BITPOS, LA_CMP_AUTH_RES_MASK)
+
+/*
+ * Macro for setting the "return auth" flag state
+ *
+ * flags Flags to set the return auth result state
+ * val Return Auth value
+ */
+#define FW_LA_RET_AUTH_SET(flags, val) \
+ FIELD_SET( \
+ flags, val, LA_RET_AUTH_RES_BITPOS, LA_RET_AUTH_RES_MASK)
+
+/*
+ * Macro for setting the "digest in buffer" flag state
+ *
+ * flags Flags to set the digest in buffer state
+ * val Digest in buffer value
+ */
+#define FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+ FIELD_SET(flags, \
+ val, \
+ LA_DIGEST_IN_BUFFER_BITPOS, \
+ LA_DIGEST_IN_BUFFER_MASK)
+
+/*
+ * Macro for setting the "update state" flag value
+ *
+ * flags Flags to set the update content state
+ * val Update Content State flag value
+ */
+#define FW_LA_UPDATE_STATE_SET(flags, val) \
+ FIELD_SET( \
+ flags, val, LA_UPDATE_STATE_BITPOS, LA_UPDATE_STATE_MASK)
+
+/*
+ * Macro for setting the "partial" packet flag state
+ *
+ * flags Flags to set the partial state
+ * val Partial state value
+ */
+#define FW_LA_PARTIAL_SET(flags, val) \
+ FIELD_SET(flags, val, LA_PARTIAL_BITPOS, LA_PARTIAL_MASK)
+
+/*
+ * Definition of the Cipher header Content Descriptor pars block
+ * Definition of the cipher processing header cd pars block.
+ * The structure is a service-specific implementation of the common
+ * 'fw_comn_req_hdr_cd_pars_s' structure.
+ */
+union fw_cipher_req_hdr_cd_pars {
+ /* LWs 2-5 */
+ struct
+ {
+ uint64_t content_desc_addr;
+ /* Address of the content descriptor */
+
+ uint16_t content_desc_resrvd1;
+ /* Content descriptor reserved field */
+
+ uint8_t content_desc_params_sz;
+ /* Size of the content descriptor parameters in quad words. These
+ * parameters describe the session setup configuration info for the
+ * slices that this request relies upon i.e. the configuration word and
+ * cipher key needed by the cipher slice if there is a request for
+ * cipher processing. */
+
+ uint8_t content_desc_hdr_resrvd2;
+ /* Content descriptor reserved field */
+
+ uint32_t content_desc_resrvd3;
+ /* Content descriptor reserved field */
+ } s;
+
+ struct
+ {
+ uint32_t cipher_key_array[FW_NUM_LONGWORDS_4];
+ /* Cipher Key Array */
+
+ } s1;
+
+};
+
+/*
+ * Definition of the Authentication header Content Descriptor pars block
+ * Definition of the authentication processing header cd pars block.
+ */
+/* Note: Authentication uses the common 'fw_comn_req_hdr_cd_pars_s'
+ * structure - similarly, it is also used by SSL3, TLS and MGF. Only cipher
+ * and cipher + authentication require service-specific implementations of
+ * the structure */
+
+/*
+ * Definition of the Cipher + Auth header Content Descriptor pars block
+ * Definition of the cipher + auth processing header cd pars block.
+ * The structure is a service-specific implementation of the common
+ * 'fw_comn_req_hdr_cd_pars_s' structure.
+ */
+union fw_cipher_auth_req_hdr_cd_pars {
+ /* LWs 2-5 */
+ struct
+ {
+ uint64_t content_desc_addr;
+ /* Address of the content descriptor */
+
+ uint16_t content_desc_resrvd1;
+ /* Content descriptor reserved field */
+
+ uint8_t content_desc_params_sz;
+ /* Size of the content descriptor parameters in quad words. These
+ * parameters describe the session setup configuration info for the
+ * slices that this request relies upon i.e. the configuration word and
+ * cipher key needed by the cipher slice if there is a request for
+ * cipher processing. */
+
+ uint8_t content_desc_hdr_resrvd2;
+ /* Content descriptor reserved field */
+
+ uint32_t content_desc_resrvd3;
+ /* Content descriptor reserved field */
+ } s;
+
+ struct
+ {
+ uint32_t cipher_key_array[FW_NUM_LONGWORDS_4];
+ /* Cipher Key Array */
+
+ } sl;
+
+};
+
+/*
+ * Cipher content descriptor control block (header)
+ * Definition of the service-specific cipher control block header
+ * structure. This header forms part of the content descriptor
+ * block incorporating LWs 27-31, as defined by the common base
+ * parameters structure.
+ */
+struct fw_cipher_cd_ctrl_hdr
+{
+ /* LW 27 */
+ uint8_t cipher_state_sz;
+ /* State size in quad words of the cipher algorithm used in this session.
+ * Set to zero if the algorithm doesnt provide any state */
+
+ uint8_t cipher_key_sz;
+ /* Key size in quad words of the cipher algorithm used in this session */
+
+ uint8_t cipher_cfg_offset;
+ /* Quad word offset from the content descriptor parameters address i.e.
+ * (content_address + (cd_hdr_sz << 3)) to the parameters for the cipher
+ * processing */
+
+ uint8_t next_curr_id;
+ /* This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the ciphered data through.
+ * Set to FW_SLICE_DRAM_WR if the data is not to go through
+ * any more slices after cipher.
+ * Current Id: Initialised with the cipher slice type */
+
+ /* LW 28 */
+ uint8_t cipher_padding_sz;
+ /* State padding size in quad words. Set to 0 if no padding is required.
+ */
+
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+ /* Reserved bytes to bring the struct to the word boundary, used by
+ * authentication. MUST be set to 0 */
+
+ /* LWs 29-31 */
+ uint32_t resrvd3[FW_NUM_LONGWORDS_3];
+ /* Reserved bytes used by authentication. MUST be set to 0 */
+
+};
+
+/*
+ * Authentication content descriptor control block (header)
+ * Definition of the service-specific authentication control block
+ * header structure. This header forms part of the content descriptor
+ * block incorporating LWs 27-31, as defined by the common base
+ * parameters structure, the first portion of which is reserved for
+ * cipher.
+ */
+struct fw_auth_cd_ctrl_hdr
+{
+ /* LW 27 */
+ uint32_t resrvd1;
+ /* Reserved bytes, used by cipher only. MUST be set to 0 */
+
+ /* LW 28 */
+ uint8_t resrvd2;
+ /* Reserved byte, used by cipher only. MUST be set to 0 */
+
+ uint8_t hash_flags;
+ /* General flags defining the processing to perform. 0 is normal
+ * processing
+ * and 1 means there is a nested hash processing loop to go through */
+
+ uint8_t hash_cfg_offset;
+ /* Quad word offset from the content descriptor parameters address to the
+ * parameters for the auth processing */
+
+ uint8_t next_curr_id;
+ /* This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the authentication data through.
+ * Set to FW_SLICE_DRAM_WR if the data is not to go through
+ * any more slices after authentication.
+ * Current Id: Initialised with the authentication slice type */
+
+ /* LW 29 */
+ uint8_t resrvd3;
+ /* Now a reserved field. MUST be set to 0 */
+
+ uint8_t outer_prefix_sz;
+ /* Size in bytes of outer prefix data */
+
+ uint8_t final_sz;
+ /* Size in bytes of digest to be returned to the client if requested */
+
+ uint8_t inner_res_sz;
+ /* Size in bytes of the digest from the inner hash algorithm */
+
+ /* LW 30 */
+ uint8_t resrvd4;
+ /* Now a reserved field. MUST be set to zero. */
+
+ uint8_t inner_state1_sz;
+ /* Size in bytes of inner hash state1 data. Must be a qword multiple */
+
+ uint8_t inner_state2_offset;
+ /* Quad word offset from the content descriptor parameters pointer to the
+ * inner state2 value */
+
+ uint8_t inner_state2_sz;
+ /* Size in bytes of inner hash state2 data. Must be a qword multiple */
+
+ /* LW 31 */
+ uint8_t outer_config_offset;
+ /* Quad word offset from the content descriptor parameters pointer to the
+ * outer configuration information */
+
+ uint8_t outer_state1_sz;
+ /* Size in bytes of the outer state1 value */
+
+ uint8_t outer_res_sz;
+ /* Size in bytes of digest from the outer auth algorithm */
+
+ uint8_t outer_prefix_offset;
+ /* Quad word offset from the start of the inner prefix data to the outer
+ * prefix information. Should equal the rounded inner prefix size, converted
+ * to qwords */
+
+};
+
+/*
+ * Cipher + Authentication content descriptor control block header
+ * Definition of both service-specific cipher + authentication control
+ * block header structures. This header forms part of the content
+ * descriptor block incorporating LWs 27-31, as defined by the common
+ * base parameters structure.
+ */
+struct fw_cipher_auth_cd_ctrl_hdr
+{
+ /* LW 27 */
+ uint8_t cipher_state_sz;
+ /* State size in quad words of the cipher algorithm used in this session.
+ * Set to zero if the algorithm doesnt provide any state */
+
+ uint8_t cipher_key_sz;
+ /* Key size in quad words of the cipher algorithm used in this session */
+
+ uint8_t cipher_cfg_offset;
+ /* Quad word offset from the content descriptor parameters address i.e.
+ * (content_address + (cd_hdr_sz << 3)) to the parameters for the cipher
+ * processing */
+
+ uint8_t next_curr_id_cipher;
+ /* This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the ciphered data through.
+ * Set to FW_SLICE_DRAM_WR if the data is not to go through
+ * any more slices after cipher.
+ * Current Id: Initialised with the cipher slice type */
+
+ /* LW 28 */
+ uint8_t cipher_padding_sz;
+ /* State padding size in quad words. Set to 0 if no padding is required.
+ */
+
+ uint8_t hash_flags;
+ /* General flags defining the processing to perform. 0 is normal
+ * processing
+ * and 1 means there is a nested hash processing loop to go through */
+
+ uint8_t hash_cfg_offset;
+ /* Quad word offset from the content descriptor parameters address to the
+ * parameters for the auth processing */
+
+ uint8_t next_curr_id_auth;
+ /* This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the authentication data through.
+ * Set to FW_SLICE_DRAM_WR if the data is not to go through
+ * any more slices after authentication.
+ * Current Id: Initialised with the authentication slice type */
+
+ /* LW 29 */
+ uint8_t resrvd1;
+ /* Reserved field. MUST be set to 0 */
+
+ uint8_t outer_prefix_sz;
+ /* Size in bytes of outer prefix data */
+
+ uint8_t final_sz;
+ /* Size in bytes of digest to be returned to the client if requested */
+
+ uint8_t inner_res_sz;
+ /* Size in bytes of the digest from the inner hash algorithm */
+
+ /* LW 30 */
+ uint8_t resrvd2;
+ /* Now a reserved field. MUST be set to zero. */
+
+ uint8_t inner_state1_sz;
+ /* Size in bytes of inner hash state1 data. Must be a qword multiple */
+
+ uint8_t inner_state2_offset;
+ /* Quad word offset from the content descriptor parameters pointer to the
+ * inner state2 value */
+
+ uint8_t inner_state2_sz;
+ /* Size in bytes of inner hash state2 data. Must be a qword multiple */
+
+ /* LW 31 */
+ uint8_t outer_config_offset;
+ /* Quad word offset from the content descriptor parameters pointer to the
+ * outer configuration information */
+
+ uint8_t outer_state1_sz;
+ /* Size in bytes of the outer state1 value */
+
+ uint8_t outer_res_sz;
+ /* Size in bytes of digest from the outer auth algorithm */
+
+ uint8_t outer_prefix_offset;
+ /* Quad word offset from the start of the inner prefix data to the outer
+ * prefix information. Should equal the rounded inner prefix size, converted
+ * to qwords */
+
+};
+
+#define FW_AUTH_HDR_FLAG_DO_NESTED 1
+/* Definition of the hash_flags bit of the auth_hdr to indicate the request
+ * requires nested hashing */
+
+#define FW_AUTH_HDR_FLAG_NO_NESTED 0
+/* Definition of the hash_flags bit of the auth_hdr for no nested hashing
+ * required */
+
+#define FW_CCM_GCM_AAD_SZ_MAX 240
+/* Maximum size of AAD data allowed for CCM or GCM processing. AAD data size90 -
+ * is stored in 8-bit field and must be multiple of hash block size. 240 is
+ * largest value which satisfy both requirements.AAD_SZ_MAX is in byte units */
+
+/*
+ * request parameter #defines
+ */
+#define FW_HASH_REQUEST_PARAMETERS_OFFSET \
+ (sizeof(fw_la_cipher_req_params_t))
+/* Offset in bytes from the start of the request parameters block to the hash
+ * (auth) request parameters */
+
+#define FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+/* Offset in bytes from the start of the request parameters block to the cipher
+ * request parameters */
+
+/*
+ * Definition of the cipher request parameters block
+ *
+ * Definition of the cipher processing request parameters block
+ * structure, which forms part of the block incorporating LWs 14-26,
+ * as defined by the common base parameters structure.
+ * Unused fields must be set to 0.
+ */
+struct fw_la_cipher_req_params {
+ /* LW 14 */
+ uint32_t cipher_offset;
+ /* Cipher offset long word. */
+
+ /* LW 15 */
+ uint32_t cipher_length;
+ /* Cipher length long word. */
+
+ /* LWs 16-19 */
+ union {
+ uint32_t cipher_IV_array[FW_NUM_LONGWORDS_4];
+ /* Cipher IV array */
+
+ struct
+ {
+ uint64_t cipher_IV_ptr;
+ /* Cipher IV pointer or Partial State Pointer */
+
+ uint64_t resrvd1;
+ /* reserved */
+
+ } s;
+
+ } u;
+
+};
+
+/*
+ * Definition of the auth request parameters block
+ * Definition of the authentication processing request parameters block
+ * structure, which forms part of the block incorporating LWs 14-26,
+ * as defined by the common base parameters structure. Note:
+ * This structure is used by TLS only.
+ */
+struct fw_la_auth_req_params {
+ /* LW 20 */
+ uint32_t auth_off;
+ /* Byte offset from the start of packet to the auth data region */
+
+ /* LW 21 */
+ uint32_t auth_len;
+ /* Byte length of the auth data region */
+
+ /* LWs 22-23 */
+ union {
+ uint64_t auth_partial_st_prefix;
+ /* Address of the authentication partial state prefix
+ * information */
+
+ uint64_t aad_adr;
+ /* Address of the AAD info in DRAM. Used for the CCM and GCM
+ * protocols */
+
+ } u1;
+
+ /* LWs 24-25 */
+ uint64_t auth_res_addr;
+ /* Address of the authentication result information to validate or
+ * the location to which the digest information can be written back to */
+
+ /* LW 26 */
+ union {
+ uint8_t inner_prefix_sz;
+ /* Size in bytes of the inner prefix data */
+
+ uint8_t aad_sz;
+ /* Size in bytes of padded AAD data to prefix to the packet for CCM
+ * or GCM processing */
+ } u2;
+
+ uint8_t resrvd1;
+ /* reserved */
+
+ uint8_t hash_state_sz;
+ /* Number of quad words of inner and outer hash prefix data to process
+ * Maximum size is 240 */
+
+ uint8_t auth_res_sz;
+ /* Size in bytes of the authentication result */
+
+} __packed;
+
+/*
+ * Definition of the auth request parameters block
+ * Definition of the authentication processing request parameters block
+ * structure, which forms part of the block incorporating LWs 14-26,
+ * as defined by the common base parameters structure. Note:
+ * This structure is used by SSL3 and MGF1 only. All fields other than
+ * inner prefix/ AAD size are unused and therefore reserved.
+ */
+struct fw_la_auth_req_params_resrvd_flds {
+ /* LWs 20-25 */
+ uint32_t resrvd[FW_NUM_LONGWORDS_6];
+
+ /* LW 26 */
+ union {
+ uint8_t inner_prefix_sz;
+ /* Size in bytes of the inner prefix data */
+
+ uint8_t aad_sz;
+ /* Size in bytes of padded AAD data to prefix to the packet for CCM
+ * or GCM processing */
+ } u2;
+
+ uint8_t resrvd1;
+ /* reserved */
+
+ uint16_t resrvd2;
+ /* reserved */
+};
+
+/*
+ * Definition of the shared fields within the parameter block
+ * containing SSL, TLS or MGF information.
+ * This structure defines the shared fields for SSL, TLS or MGF
+ * within the parameter block incorporating LWs 14-26, as defined
+ * by the common base parameters structure.
+ * Unused fields must be set to 0.
+ */
+struct fw_la_key_gen_common {
+ /* LW 14 */
+ union {
+ /* SSL3 */
+ uint16_t secret_lgth_ssl;
+ /* Length of Secret information for SSL. In the case of TLS the
+ * secret is supplied in the content descriptor */
+
+ /* MGF */
+ uint16_t mask_length;
+ /* Size in bytes of the desired output mask for MGF1*/
+
+ /* TLS */
+ uint16_t secret_lgth_tls;
+ /* TLS Secret length */
+
+ } u;
+
+ union {
+ /* SSL3 */
+ struct
+ {
+ uint8_t output_lgth_ssl;
+ /* Output length */
+
+ uint8_t label_lgth_ssl;
+ /* Label length */
+
+ } s1;
+
+ /* MGF */
+ struct
+ {
+ uint8_t hash_length;
+ /* Hash length */
+
+ uint8_t seed_length;
+ /* Seed length */
+
+ } s2;
+
+ /* TLS */
+ struct
+ {
+ uint8_t output_lgth_tls;
+ /* Output length */
+
+ uint8_t label_lgth_tls;
+ /* Label length */
+
+ } s3;
+
+ } u1;
+
+ /* LW 15 */
+ union {
+ /* SSL3 */
+ uint8_t iter_count;
+ /* Iteration count used by the SSL key gen request */
+
+ /* TLS */
+ uint8_t tls_seed_length;
+ /* TLS Seed length */
+
+ uint8_t resrvd1;
+ /* Reserved field set to 0 for MGF1 */
+
+ } u2;
+
+ uint8_t resrvd2;
+ uint16_t resrvd3;
+ /* Reserved space - unused */
+
+};
+
+/*
+ * Definition of the SSL3 request parameters block
+ * This structure contains the the SSL3 processing request parameters
+ * incorporating LWs 14-26, as defined by the common base
+ * parameters structure. Unused fields must be set to 0.
+ */
+struct fw_la_ssl3_req_params {
+ /* LWs 14-15 */
+ struct fw_la_key_gen_common keygen_comn;
+ /* For other key gen processing these field holds ssl, tls or mgf
+ * parameters */
+
+ /* LW 16-25 */
+ uint32_t resrvd[FW_NUM_LONGWORDS_10];
+ /* Reserved */
+
+ /* LW 26 */
+ union {
+ uint8_t inner_prefix_sz;
+ /* Size in bytes of the inner prefix data */
+
+ uint8_t aad_sz;
+ /* Size in bytes of padded AAD data to prefix to the packet for CCM
+ * or GCM processing */
+ } u2;
+
+ uint8_t resrvd1;
+ /* reserved */
+
+ uint16_t resrvd2;
+ /* reserved */
+
+};
+
+/*
+ * Definition of the MGF request parameters block
+ * This structure contains the the MGF processing request parameters
+ * incorporating LWs 14-26, as defined by the common base parameters
+ * structure. Unused fields must be set to 0.
+ */
+struct fw_la_mgf_req_params {
+ /* LWs 14-15 */
+ struct fw_la_key_gen_common keygen_comn;
+ /* For other key gen processing these field holds ssl or mgf
+ * parameters */
+
+ /* LW 16-25 */
+ uint32_t resrvd[FW_NUM_LONGWORDS_10];
+ /* Reserved */
+
+ /* LW 26 */
+ union {
+ uint8_t inner_prefix_sz;
+ /* Size in bytes of the inner prefix data */
+
+ uint8_t aad_sz;
+ /* Size in bytes of padded AAD data to prefix to the packet for CCM
+ * or GCM processing */
+ } u2;
+
+ uint8_t resrvd1;
+ /* reserved */
+
+ uint16_t resrvd2;
+ /* reserved */
+
+};
+
+/*
+ * Definition of the TLS request parameters block
+ * This structure contains the the TLS processing request parameters
+ * incorporating LWs 14-26, as defined by the common base parameters
+ * structure. Unused fields must be set to 0.
+ */
+struct fw_la_tls_req_params {
+ /* LWs 14-15 */
+ struct fw_la_key_gen_common keygen_comn;
+ /* For other key gen processing these field holds ssl, tls or mgf
+ * parameters */
+
+ /* LW 16-19 */
+ uint32_t resrvd[FW_NUM_LONGWORDS_4];
+ /* Reserved */
+
+};
+
+/*
+ * Definition of the common QAT FW request middle block for TRNG.
+ * Common section of the request used across all of the services exposed
+ * by the QAT FW. Each of the services inherit these common fields. TRNG
+ * requires a specific implementation.
+ */
+struct fw_la_trng_req_mid {
+ /* LWs 6-13 */
+ uint64_t opaque_data;
+ /* Opaque data passed unmodified from the request to response messages by
+ * firmware (fw) */
+
+ uint64_t resrvd1;
+ /* Reserved, unused for TRNG */
+
+ uint64_t dest_data_addr;
+ /* Generic definition of the destination data supplied to the QAT AE. The
+ * common flags are used to further describe the attributes of this
+ * field */
+
+ uint32_t resrvd2;
+ /* Reserved, unused for TRNG */
+
+ uint32_t entropy_length;
+ /* Size of the data in bytes to process. Used by the get_random
+ * command. Set to 0 for commands that dont need a length parameter */
+
+};
+
+/*
+ * Definition of the common LA QAT FW TRNG request
+ * Definition of the TRNG processing request type
+ */
+struct fw_la_trng_req {
+ /* LWs 0-1 */
+ struct fw_comn_req_hdr comn_hdr;
+ /* Common request header */
+
+ /* LWs 2-5 */
+ union fw_comn_req_hdr_cd_pars cd_pars;
+ /* Common Request content descriptor field which points either to a
+ * content descriptor
+ * parameter block or contains the service-specific data itself. */
+
+ /* LWs 6-13 */
+ struct fw_la_trng_req_mid comn_mid;
+ /* TRNG request middle section - differs from the common mid-section */
+
+ /* LWs 14-26 */
+ uint32_t resrvd1[FW_NUM_LONGWORDS_13];
+
+ /* LWs 27-31 */
+ uint32_t resrvd2[FW_NUM_LONGWORDS_5];
+
+};
+
+/*
+ * Definition of the Lookaside Eagle Tail Response
+ * This is the response delivered to the ET rings by the Lookaside
+ * QAT FW service for all commands
+ */
+struct fw_la_resp {
+ /* LWs 0-1 */
+ struct fw_comn_resp_hdr comn_resp;
+ /* Common interface response format see fw.h */
+
+ /* LWs 2-3 */
+ uint64_t opaque_data;
+ /* Opaque data passed from the request to the response message */
+
+ /* LWs 4-7 */
+ uint32_t resrvd[FW_NUM_LONGWORDS_4];
+ /* Reserved */
+
+};
+
+/*
+ * Definition of the Lookaside TRNG Test Status Structure
+ * As an addition to FW_LA_TRNG_STATUS Pass or Fail information
+ * in common response fields, as a response to TRNG_TEST request, Test
+ * status, Counter for failed tests and 4 entropy counter values are
+ * sent
+ * Status of test status and the fail counts.
+ */
+struct fw_la_trng_test_result {
+ uint32_t test_status_info;
+ /* TRNG comparator health test status& Validity information
+ see Test Status Bit Fields below. */
+
+ uint32_t test_status_fail_count;
+ /* TRNG comparator health test status, 32bit fail counter */
+
+ uint64_t r_ent_ones_cnt;
+ /* Raw Entropy ones counter */
+
+ uint64_t r_ent_zeros_cnt;
+ /* Raw Entropy zeros counter */
+
+ uint64_t c_ent_ones_cnt;
+ /* Conditioned Entropy ones counter */
+
+ uint64_t c_ent_zeros_cnt;
+ /* Conditioned Entropy zeros counter */
+
+ uint64_t resrvd;
+ /* Reserved field must be set to zero */
+
+};
+
+/*
+ * Definition of the Lookaside SSL Key Material Input
+ * This struct defines the layout of input parameters for the
+ * SSL3 key generation (source flat buffer format)
+ */
+struct fw_la_ssl_key_material_input {
+ uint64_t seed_addr;
+ /* Pointer to seed */
+
+ uint64_t label_addr;
+ /* Pointer to label(s) */
+
+ uint64_t secret_addr;
+ /* Pointer to secret */
+
+};
+
+/*
+ * Definition of the Lookaside TLS Key Material Input
+ * This struct defines the layout of input parameters for the
+ * TLS key generation (source flat buffer format)
+ * NOTE:
+ * Secret state value (S split into S1 and S2 parts) is supplied via
+ * Content Descriptor. S1 is placed in an outer prefix buffer, and S2
+ * inside the inner prefix buffer.
+ */
+struct fw_la_tls_key_material_input {
+ uint64_t seed_addr;
+ /* Pointer to seed */
+
+ uint64_t label_addr;
+ /* Pointer to label(s) */
+
+};
+
+/*
+ * Macros using the bit position and mask to set/extract the next
+ * and current id nibbles within the next_curr_id field of the
+ * content descriptor header block, ONLY FOR CIPHER + AUTH COMBINED.
+ * Note that for cipher only or authentication only, the common macros
+ * need to be used. These are defined in the 'fw.h' common header
+ * file, as they are used by compression, cipher and authentication.
+ *
+ * cd_ctrl_hdr_t Content descriptor control block header.
+ * val Value of the field being set.
+ */
+/* Cipher fields within Cipher + Authentication structure */
+#define FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ FW_COMN_NEXT_ID_MASK) >> \
+ (FW_COMN_NEXT_ID_BITPOS))
+
+#define FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ FW_COMN_CURR_ID_MASK) | \
+ ((val << FW_COMN_NEXT_ID_BITPOS) & \
+ FW_COMN_NEXT_ID_MASK))
+
+#define FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_cipher) & FW_COMN_CURR_ID_MASK)
+
+#define FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ FW_COMN_NEXT_ID_MASK) | \
+ ((val)&FW_COMN_CURR_ID_MASK))
+
+/* Authentication fields within Cipher + Authentication structure */
+#define FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & FW_COMN_NEXT_ID_MASK) >> \
+ (FW_COMN_NEXT_ID_BITPOS))
+
+#define FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & \
+ FW_COMN_CURR_ID_MASK) | \
+ ((val << FW_COMN_NEXT_ID_BITPOS) & \
+ FW_COMN_NEXT_ID_MASK))
+
+#define FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_auth) & FW_COMN_CURR_ID_MASK)
+
+#define FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & \
+ FW_COMN_NEXT_ID_MASK) | \
+ ((val)&FW_COMN_CURR_ID_MASK))
+
+/* Definitions of the bits in the test_status_info of the TRNG_TEST response.
+ * The values returned by the Lookaside service are given below
+ * The Test result and Test Fail Count values are only valid if the Test
+ * Results Valid (Tv) is set.
+ *
+ * TRNG Test Status Info
+ * + ===== + ------------------------------------------------ + --- + --- +
+ * | Bit | 31 - 2 | 1 | 0 |
+ * + ===== + ------------------------------------------------ + --- + --- +
+ * | Flags | RESERVED = 0 | Tv | Ts |
+ * + ===== + ------------------------------------------------------------ +
+ */
+/*
+ * Definition of the Lookaside TRNG Test Status Information received as
+ * a part of fw_la_trng_test_result_t
+ *
+ */
+#define FW_LA_TRNG_TEST_STATUS_TS_BITPOS 0
+/* TRNG Test Result t_status field bit pos definition. */
+
+#define FW_LA_TRNG_TEST_STATUS_TS_MASK 0x1
+/* TRNG Test Result t_status field mask definition. */
+
+#define FW_LA_TRNG_TEST_STATUS_TV_BITPOS 1
+/* TRNG Test Result test results valid field bit pos definition. */
+
+#define FW_LA_TRNG_TEST_STATUS_TV_MASK 0x1
+/* TRNG Test Result test results valid field mask definition. */
+
+/*
+ * Definition of the Lookaside TRNG test_status values.
+ *
+ *
+ */
+#define FW_LA_TRNG_TEST_STATUS_TV_VALID 1
+/* TRNG TEST Response Test Results Valid Value. */
+
+#define FW_LA_TRNG_TEST_STATUS_TV_NOT_VALID 0
+/* TRNG TEST Response Test Results are NOT Valid Value. */
+
+#define FW_LA_TRNG_TEST_STATUS_TS_NO_FAILS 1
+/* Value for TRNG Test status tests have NO FAILs Value. */
+
+#define FW_LA_TRNG_TEST_STATUS_TS_HAS_FAILS 0
+/* Value for TRNG Test status tests have one or more FAILS Value. */
+
+/*
+ * Macro for extraction of the Test Status Field returned in the response
+ * to TRNG TEST command.
+ *
+ * test_status 8 bit test_status value to extract the status bit
+ */
+#define FW_LA_TRNG_TEST_STATUS_TS_FLD_GET(test_status) \
+ FIELD_GET(test_status, \
+ FW_LA_TRNG_TEST_STATUS_TS_BITPOS, \
+ FW_LA_TRNG_TEST_STATUS_TS_MASK)
+/*
+ * Macro for extraction of the Test Results Valid Field returned in the
+ * response to TRNG TEST command.
+ *
+ * test_status 8 bit test_status value to extract the Tests
+ * Results valid bit
+ */
+#define FW_LA_TRNG_TEST_STATUS_TV_FLD_GET(test_status) \
+ FIELD_GET(test_status, \
+ FW_LA_TRNG_TEST_STATUS_TV_BITPOS, \
+ FW_LA_TRNG_TEST_STATUS_TV_MASK)
+
+/*
+ * MGF Max supported input parameters
+ */
+#define FW_LA_MGF_SEED_LEN_MAX 255
+/* Maximum seed length for MGF1 request in bytes
+ * Typical values may be 48, 64, 128 bytes (or any). */
+
+#define FW_LA_MGF_MASK_LEN_MAX 65528
+/* Maximum mask length for MGF1 request in bytes
+ * Typical values may be 8 (64-bit), 16 (128-bit). MUST be quad word multiple */
+
+/*
+ * SSL Max supported input parameters
+ */
+#define FW_LA_SSL_SECRET_LEN_MAX 512
+/* Maximum secret length for SSL3 Key Gen request (bytes) */
+
+#define FW_LA_SSL_ITERATES_LEN_MAX 16
+/* Maximum iterations for SSL3 Key Gen request (integer) */
+
+#define FW_LA_SSL_LABEL_LEN_MAX 136
+/* Maximum label length for SSL3 Key Gen request (bytes) */
+
+#define FW_LA_SSL_SEED_LEN_MAX 64
+/* Maximum seed length for SSL3 Key Gen request (bytes) */
+
+#define FW_LA_SSL_OUTPUT_LEN_MAX 248
+/* Maximum output length for SSL3 Key Gen request (bytes) */
+
+/*
+ * TLS Max supported input parameters
+ */
+#define FW_LA_TLS_SECRET_LEN_MAX 128
+/* Maximum secret length for TLS Key Gen request (bytes) */
+
+#define FW_LA_TLS_V1_1_SECRET_LEN_MAX 128
+/* Maximum secret length for TLS Key Gen request (bytes) */
+
+#define FW_LA_TLS_V1_2_SECRET_LEN_MAX 64
+/* Maximum secret length for TLS Key Gen request (bytes) */
+
+#define FW_LA_TLS_LABEL_LEN_MAX 255
+/* Maximum label length for TLS Key Gen request (bytes) */
+
+#define FW_LA_TLS_SEED_LEN_MAX 64
+/* Maximum seed length for TLS Key Gen request (bytes) */
+
+#define FW_LA_TLS_OUTPUT_LEN_MAX 248
+/* Maximum output length for TLS Key Gen request (bytes) */
+
+#endif
diff --git a/sys/dev/qat/qat_hw17var.h b/sys/dev/qat/qat_hw17var.h
new file mode 100644
index 000000000000..1f3d62b86f5e
--- /dev/null
+++ b/sys/dev/qat/qat_hw17var.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qat_hw17var.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QAT_HW17VAR_H_
+#define _DEV_PCI_QAT_HW17VAR_H_
+
+CTASSERT(CONTENT_DESC_MAX_SIZE >=
+ roundup(sizeof(union hw_cipher_algo_blk), 8) +
+ roundup(sizeof(union hw_auth_algo_blk), 8));
+
+int qat_adm_mailbox_init(struct qat_softc *);
+int qat_adm_mailbox_send_init(struct qat_softc *);
+int qat_arb_init(struct qat_softc *);
+int qat_set_ssm_wdtimer(struct qat_softc *);
+int qat_check_slice_hang(struct qat_softc *);
+
+void qat_hw17_crypto_setup_desc(struct qat_crypto *,
+ struct qat_session *, struct qat_crypto_desc *);
+void qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *,
+ struct qat_session *, struct qat_crypto_desc const *,
+ struct qat_sym_cookie *, struct cryptop *);
+
+#endif
diff --git a/sys/dev/qat/qatreg.h b/sys/dev/qat/qatreg.h
new file mode 100644
index 000000000000..2a0be321b4f9
--- /dev/null
+++ b/sys/dev/qat/qatreg.h
@@ -0,0 +1,1582 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qatreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QATREG_H_
+#define _DEV_PCI_QATREG_H_
+
+#define __BIT(__n) \
+ (((uintmax_t)(__n) >= NBBY * sizeof(uintmax_t)) ? 0 : \
+ ((uintmax_t)1 << (uintmax_t)((__n) & (NBBY * sizeof(uintmax_t) - 1))))
+#define __BITS(__m, __n) \
+ ((__BIT(MAX((__m), (__n)) + 1) - 1) ^ (__BIT(MIN((__m), (__n))) - 1))
+
+#define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask))
+#define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask))
+#define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
+
+/* Limits */
+#define MAX_NUM_AE 0x10
+#define MAX_NUM_ACCEL 6
+#define MAX_AE 0x18
+#define MAX_AE_CTX 8
+#define MAX_ARB 4
+
+#define MAX_USTORE_PER_SEG 0x8000 /* 16k * 2 */
+#define MAX_USTORE MAX_USTORE_PER_SEG
+
+#define MAX_AE_PER_ACCEL 4 /* XXX */
+#define MAX_BANK_PER_ACCEL 16 /* XXX */
+#define MAX_RING_PER_BANK 16
+
+#define MAX_XFER_REG 128
+#define MAX_GPR_REG 128
+#define MAX_NN_REG 128
+#define MAX_LMEM_REG 1024
+#define MAX_INP_STATE 16
+#define MAX_CAM_REG 16
+#define MAX_FIFO_QWADDR 160
+
+#define MAX_EXEC_INST 100
+#define UWORD_CPYBUF_SIZE 1024 /* micro-store copy buffer (bytes) */
+#define INVLD_UWORD 0xffffffffffull /* invalid micro-instruction */
+#define AEV2_PACKED_UWORD_BYTES 6 /* version 2 packed uword size */
+#define UWORD_MASK 0xbffffffffffull /* micro-word mask without parity */
+
+#define AE_ALL_CTX 0xff
+
+/* PCIe configuration space parameter */
+#define NO_PCI_REG (-1)
+#define NO_REG_OFFSET 0
+
+#define MAX_BARS 3
+
+/* Fuse Control */
+#define FUSECTL_REG 0x40
+#define FUSECTL_MASK __BIT(31)
+
+#define LEGFUSE_REG 0x4c
+#define LEGFUSE_ACCEL_MASK_CIPHER_SLICE __BIT(0)
+#define LEGFUSE_ACCEL_MASK_AUTH_SLICE __BIT(1)
+#define LEGFUSE_ACCEL_MASK_PKE_SLICE __BIT(2)
+#define LEGFUSE_ACCEL_MASK_COMPRESS_SLICE __BIT(3)
+#define LEGFUSE_ACCEL_MASK_LZS_SLICE __BIT(4)
+#define LEGFUSE_ACCEL_MASK_EIA3_SLICE __BIT(5)
+#define LEGFUSE_ACCEL_MASK_SHA3_SLICE __BIT(6)
+
+/* -------------------------------------------------------------------------- */
+/* PETRINGCSR region */
+
+/* ETR parameters */
+#define ETR_MAX_RINGS_PER_BANK 16
+
+/* ETR registers */
+#define ETR_RING_CONFIG 0x0000
+#define ETR_RING_LBASE 0x0040
+#define ETR_RING_UBASE 0x0080
+#define ETR_RING_HEAD_OFFSET 0x00C0
+#define ETR_RING_TAIL_OFFSET 0x0100
+#define ETR_RING_STAT 0x0140
+#define ETR_UO_STAT 0x0148
+#define ETR_E_STAT 0x014C
+#define ETR_NE_STAT 0x0150
+#define ETR_NF_STAT 0x0154
+#define ETR_F_STAT 0x0158
+#define ETR_C_STAT 0x015C
+#define ETR_INT_EN 0x016C
+#define ETR_INT_REG 0x0170
+#define ETR_INT_SRCSEL 0x0174
+#define ETR_INT_SRCSEL_2 0x0178
+#define ETR_INT_COL_EN 0x017C
+#define ETR_INT_COL_CTL 0x0180
+#define ETR_AP_NF_MASK 0x2000
+#define ETR_AP_NF_DEST 0x2020
+#define ETR_AP_NE_MASK 0x2040
+#define ETR_AP_NE_DEST 0x2060
+#define ETR_AP_DELAY 0x2080
+
+/* ARB registers */
+#define ARB_OFFSET 0x30000
+#define ARB_REG_SIZE 0x4
+#define ARB_WTR_SIZE 0x20
+#define ARB_REG_SLOT 0x1000
+#define ARB_WTR_OFFSET 0x010
+#define ARB_RO_EN_OFFSET 0x090
+#define ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ARB_RINGSRVARBEN_OFFSET 0x19c
+
+/* Ring Config */
+#define ETR_RING_CONFIG_LATE_HEAD_POINTER_MODE __BIT(31)
+#define ETR_RING_CONFIG_NEAR_FULL_WM __BITS(14, 10)
+#define ETR_RING_CONFIG_NEAR_EMPTY_WM __BITS(9, 5)
+#define ETR_RING_CONFIG_RING_SIZE __BITS(4, 0)
+
+#define ETR_RING_CONFIG_NEAR_WM_0 0x00
+#define ETR_RING_CONFIG_NEAR_WM_4 0x01
+#define ETR_RING_CONFIG_NEAR_WM_8 0x02
+#define ETR_RING_CONFIG_NEAR_WM_16 0x03
+#define ETR_RING_CONFIG_NEAR_WM_32 0x04
+#define ETR_RING_CONFIG_NEAR_WM_64 0x05
+#define ETR_RING_CONFIG_NEAR_WM_128 0x06
+#define ETR_RING_CONFIG_NEAR_WM_256 0x07
+#define ETR_RING_CONFIG_NEAR_WM_512 0x08
+#define ETR_RING_CONFIG_NEAR_WM_1K 0x09
+#define ETR_RING_CONFIG_NEAR_WM_2K 0x0A
+#define ETR_RING_CONFIG_NEAR_WM_4K 0x0B
+#define ETR_RING_CONFIG_NEAR_WM_8K 0x0C
+#define ETR_RING_CONFIG_NEAR_WM_16K 0x0D
+#define ETR_RING_CONFIG_NEAR_WM_32K 0x0E
+#define ETR_RING_CONFIG_NEAR_WM_64K 0x0F
+#define ETR_RING_CONFIG_NEAR_WM_128K 0x10
+#define ETR_RING_CONFIG_NEAR_WM_256K 0x11
+#define ETR_RING_CONFIG_NEAR_WM_512K 0x12
+#define ETR_RING_CONFIG_NEAR_WM_1M 0x13
+#define ETR_RING_CONFIG_NEAR_WM_2M 0x14
+#define ETR_RING_CONFIG_NEAR_WM_4M 0x15
+
+#define ETR_RING_CONFIG_SIZE_64 0x00
+#define ETR_RING_CONFIG_SIZE_128 0x01
+#define ETR_RING_CONFIG_SIZE_256 0x02
+#define ETR_RING_CONFIG_SIZE_512 0x03
+#define ETR_RING_CONFIG_SIZE_1K 0x04
+#define ETR_RING_CONFIG_SIZE_2K 0x05
+#define ETR_RING_CONFIG_SIZE_4K 0x06
+#define ETR_RING_CONFIG_SIZE_8K 0x07
+#define ETR_RING_CONFIG_SIZE_16K 0x08
+#define ETR_RING_CONFIG_SIZE_32K 0x09
+#define ETR_RING_CONFIG_SIZE_64K 0x0A
+#define ETR_RING_CONFIG_SIZE_128K 0x0B
+#define ETR_RING_CONFIG_SIZE_256K 0x0C
+#define ETR_RING_CONFIG_SIZE_512K 0x0D
+#define ETR_RING_CONFIG_SIZE_1M 0x0E
+#define ETR_RING_CONFIG_SIZE_2M 0x0F
+#define ETR_RING_CONFIG_SIZE_4M 0x10
+
+/* Default Ring Config is Nearly Full = Full and Nearly Empty = Empty */
+#define ETR_RING_CONFIG_BUILD(size) \
+ (__SHIFTIN(ETR_RING_CONFIG_NEAR_WM_0, \
+ ETR_RING_CONFIG_NEAR_FULL_WM) | \
+ __SHIFTIN(ETR_RING_CONFIG_NEAR_WM_0, \
+ ETR_RING_CONFIG_NEAR_EMPTY_WM) | \
+ __SHIFTIN((size), ETR_RING_CONFIG_RING_SIZE))
+
+/* Response Ring Configuration */
+#define ETR_RING_CONFIG_BUILD_RESP(size, wm_nf, wm_ne) \
+ (__SHIFTIN((wm_nf), ETR_RING_CONFIG_NEAR_FULL_WM) | \
+ __SHIFTIN((wm_ne), ETR_RING_CONFIG_NEAR_EMPTY_WM) | \
+ __SHIFTIN((size), ETR_RING_CONFIG_RING_SIZE))
+
+/* Ring Base */
+#define ETR_RING_BASE_BUILD(addr, size) \
+ (((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size)))
+
+#define ETR_INT_REG_CLEAR_MASK 0xffff
+
+/* Initial bank Interrupt Source mask */
+#define ETR_INT_SRCSEL_MASK 0x44444444UL
+
+#define ETR_INT_SRCSEL_NEXT_OFFSET 4
+
+#define ETR_RINGS_PER_INT_SRCSEL 8
+
+#define ETR_INT_COL_CTL_ENABLE __BIT(31)
+
+#define ETR_AP_NF_MASK_INIT 0xAAAAAAAA
+#define ETR_AP_NE_MASK_INIT 0x55555555
+
+/* Autopush destination AE bit */
+#define ETR_AP_DEST_ENABLE __BIT(7)
+#define ETR_AP_DEST_AE __BITS(6, 2)
+#define ETR_AP_DEST_MAILBOX __BITS(1, 0)
+
+/* Autopush destination enable bit */
+
+/* Autopush CSR Offset */
+#define ETR_AP_BANK_OFFSET 4
+
+/* Autopush maximum rings per bank */
+#define ETR_MAX_RINGS_PER_AP_BANK 32
+
+/* Maximum mailbox per acclerator */
+#define ETR_MAX_MAILBOX_PER_ACCELERATOR 4
+
+/* Maximum AEs per mailbox */
+#define ETR_MAX_AE_PER_MAILBOX 4
+
+/* Macro to get the ring's autopush bank number */
+#define ETR_RING_AP_BANK_NUMBER(ring) ((ring) >> 5)
+
+/* Macro to get the ring's autopush mailbox number */
+#define ETR_RING_AP_MAILBOX_NUMBER(ring) \
+ (ETR_RING_AP_BANK_NUMBER(ring) % ETR_MAX_MAILBOX_PER_ACCELERATOR)
+
+/* Macro to get the ring number in the autopush bank */
+#define ETR_RING_NUMBER_IN_AP_BANK(ring) \
+ ((ring) % ETR_MAX_RINGS_PER_AP_BANK)
+
+#define ETR_RING_EMPTY_ENTRY_SIG (0x7F7F7F7F)
+
+/* -------------------------------------------------------------------------- */
+/* CAP_GLOBAL_CTL region */
+
+#define FCU_CTRL 0x8c0
+#define FCU_CTRL_CMD_NOOP 0
+#define FCU_CTRL_CMD_AUTH 1
+#define FCU_CTRL_CMD_LOAD 2
+#define FCU_CTRL_CMD_START 3
+#define FCU_CTRL_AE __BITS(8, 31)
+
+#define FCU_STATUS 0x8c4
+#define FCU_STATUS_STS __BITS(0, 2)
+#define FCU_STATUS_STS_NO 0
+#define FCU_STATUS_STS_VERI_DONE 1
+#define FCU_STATUS_STS_LOAD_DONE 2
+#define FCU_STATUS_STS_VERI_FAIL 3
+#define FCU_STATUS_STS_LOAD_FAIL 4
+#define FCU_STATUS_STS_BUSY 5
+#define FCU_STATUS_AUTHFWLD __BIT(8)
+#define FCU_STATUS_DONE __BIT(9)
+#define FCU_STATUS_LOADED_AE __BITS(22, 31)
+
+#define FCU_STATUS1 0x8c8
+
+#define FCU_DRAM_ADDR_LO 0x8cc
+#define FCU_DRAM_ADDR_HI 0x8d0
+#define FCU_RAMBASE_ADDR_HI 0x8d4
+#define FCU_RAMBASE_ADDR_LO 0x8d8
+
+#define FW_AUTH_WAIT_PERIOD 10
+#define FW_AUTH_MAX_RETRY 300
+
+#define CAP_GLOBAL_CTL_BASE 0xa00
+#define CAP_GLOBAL_CTL_MISC CAP_GLOBAL_CTL_BASE + 0x04
+#define CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN __BIT(7)
+#define CAP_GLOBAL_CTL_RESET CAP_GLOBAL_CTL_BASE + 0x0c
+#define CAP_GLOBAL_CTL_RESET_MASK __BITS(31, 26)
+#define CAP_GLOBAL_CTL_RESET_ACCEL_MASK __BITS(25, 20)
+#define CAP_GLOBAL_CTL_RESET_AE_MASK __BITS(19, 0)
+#define CAP_GLOBAL_CTL_CLK_EN CAP_GLOBAL_CTL_BASE + 0x50
+#define CAP_GLOBAL_CTL_CLK_EN_ACCEL_MASK __BITS(25, 20)
+#define CAP_GLOBAL_CTL_CLK_EN_AE_MASK __BITS(19, 0)
+
+/* -------------------------------------------------------------------------- */
+/* AE region */
+#define UPC_MASK 0x1ffff
+#define USTORE_SIZE QAT_16K
+
+#define AE_LOCAL_AE_MASK __BITS(31, 12)
+#define AE_LOCAL_CSR_MASK __BITS(9, 0)
+
+/* AE_LOCAL registers */
+/* Control Store Address Register */
+#define USTORE_ADDRESS 0x000
+#define USTORE_ADDRESS_ECS __BIT(31)
+
+#define USTORE_ECC_BIT_0 44
+#define USTORE_ECC_BIT_1 45
+#define USTORE_ECC_BIT_2 46
+#define USTORE_ECC_BIT_3 47
+#define USTORE_ECC_BIT_4 48
+#define USTORE_ECC_BIT_5 49
+#define USTORE_ECC_BIT_6 50
+
+/* Control Store Data Lower Register */
+#define USTORE_DATA_LOWER 0x004
+/* Control Store Data Upper Register */
+#define USTORE_DATA_UPPER 0x008
+/* Control Store Error Status Register */
+#define USTORE_ERROR_STATUS 0x00c
+/* Arithmetic Logic Unit Output Register */
+#define ALU_OUT 0x010
+/* Context Arbiter Control Register */
+#define CTX_ARB_CNTL 0x014
+#define CTX_ARB_CNTL_INIT 0x00000000
+/* Context Enables Register */
+#define CTX_ENABLES 0x018
+#define CTX_ENABLES_INIT 0
+#define CTX_ENABLES_INUSE_CONTEXTS __BIT(31)
+#define CTX_ENABLES_CNTL_STORE_PARITY_ERROR __BIT(29)
+#define CTX_ENABLES_CNTL_STORE_PARITY_ENABLE __BIT(28)
+#define CTX_ENABLES_BREAKPOINT __BIT(27)
+#define CTX_ENABLES_PAR_ERR __BIT(25)
+#define CTX_ENABLES_NN_MODE __BIT(20)
+#define CTX_ENABLES_NN_RING_EMPTY __BIT(18)
+#define CTX_ENABLES_LMADDR_1_GLOBAL __BIT(17)
+#define CTX_ENABLES_LMADDR_0_GLOBAL __BIT(16)
+#define CTX_ENABLES_ENABLE __BITS(15,8)
+
+#define CTX_ENABLES_IGNORE_W1C_MASK \
+ (~(CTX_ENABLES_PAR_ERR | \
+ CTX_ENABLES_BREAKPOINT | \
+ CTX_ENABLES_CNTL_STORE_PARITY_ERROR))
+
+/* cycles from CTX_ENABLE high to CTX entering executing state */
+#define CYCLES_FROM_READY2EXE 8
+
+/* Condition Code Enable Register */
+#define CC_ENABLE 0x01c
+#define CC_ENABLE_INIT 0x2000
+
+/* CSR Context Pointer Register */
+#define CSR_CTX_POINTER 0x020
+#define CSR_CTX_POINTER_CONTEXT __BITS(2,0)
+/* Register Error Status Register */
+#define REG_ERROR_STATUS 0x030
+/* Indirect Context Status Register */
+#define CTX_STS_INDIRECT 0x040
+#define CTX_STS_INDIRECT_UPC_INIT 0x00000000
+
+/* Active Context Status Register */
+#define ACTIVE_CTX_STATUS 0x044
+#define ACTIVE_CTX_STATUS_ABO __BIT(31)
+#define ACTIVE_CTX_STATUS_ACNO __BITS(0, 2)
+/* Indirect Context Signal Events Register */
+#define CTX_SIG_EVENTS_INDIRECT 0x048
+#define CTX_SIG_EVENTS_INDIRECT_INIT 0x00000001
+/* Active Context Signal Events Register */
+#define CTX_SIG_EVENTS_ACTIVE 0x04c
+/* Indirect Context Wakeup Events Register */
+#define CTX_WAKEUP_EVENTS_INDIRECT 0x050
+#define CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY 0x00000001
+#define CTX_WAKEUP_EVENTS_INDIRECT_SLEEP 0x00010000
+
+#define CTX_WAKEUP_EVENTS_INDIRECT_INIT 0x00000001
+
+/* Active Context Wakeup Events Register */
+#define CTX_WAKEUP_EVENTS_ACTIVE 0x054
+/* Indirect Context Future Count Register */
+#define CTX_FUTURE_COUNT_INDIRECT 0x058
+/* Active Context Future Count Register */
+#define CTX_FUTURE_COUNT_ACTIVE 0x05c
+/* Indirect Local Memory Address 0 Register */
+#define LM_ADDR_0_INDIRECT 0x060
+/* Active Local Memory Address 0 Register */
+#define LM_ADDR_0_ACTIVE 0x064
+/* Indirect Local Memory Address 1 Register */
+#define LM_ADDR_1_INDIRECT 0x068
+/* Active Local Memory Address 1 Register */
+#define LM_ADDR_1_ACTIVE 0x06c
+/* Byte Index Register */
+#define BYTE_INDEX 0x070
+/* Indirect Local Memory Address 0 Byte Index Register */
+#define INDIRECT_LM_ADDR_0_BYTE_INDEX 0x0e0
+/* Active Local Memory Address 0 Byte Index Register */
+#define ACTIVE_LM_ADDR_0_BYTE_INDEX 0x0e4
+/* Indirect Local Memory Address 1 Byte Index Register */
+#define INDIRECT_LM_ADDR_1_BYTE_INDEX 0x0e8
+/* Active Local Memory Address 1 Byte Index Register */
+#define ACTIVE_LM_ADDR_1_BYTE_INDEX 0x0ec
+/* Transfer Index Concatenated with Byte Index Register */
+#define T_INDEX_BYTE_INDEX 0x0f4
+/* Transfer Index Register */
+#define T_INDEX 0x074
+/* Indirect Future Count Signal Signal Register */
+#define FUTURE_COUNT_SIGNAL_INDIRECT 0x078
+/* Active Context Future Count Register */
+#define FUTURE_COUNT_SIGNAL_ACTIVE 0x07c
+/* Next Neighbor Put Register */
+#define NN_PUT 0x080
+/* Next Neighbor Get Register */
+#define NN_GET 0x084
+/* Timestamp Low Register */
+#define TIMESTAMP_LOW 0x0c0
+/* Timestamp High Register */
+#define TIMESTAMP_HIGH 0x0c4
+/* Next Neighbor Signal Register */
+#define NEXT_NEIGHBOR_SIGNAL 0x100
+/* Previous Neighbor Signal Register */
+#define PREV_NEIGHBOR_SIGNAL 0x104
+/* Same AccelEngine Signal Register */
+#define SAME_AE_SIGNAL 0x108
+/* Cyclic Redundancy Check Remainder Register */
+#define CRC_REMAINDER 0x140
+/* Profile Count Register */
+#define PROFILE_COUNT 0x144
+/* Pseudorandom Number Register */
+#define PSEUDO_RANDOM_NUMBER 0x148
+/* Signature Enable Register */
+#define SIGNATURE_ENABLE 0x150
+/* Miscellaneous Control Register */
+#define AE_MISC_CONTROL 0x160
+#define AE_MISC_CONTROL_PARITY_ENABLE __BIT(24)
+#define AE_MISC_CONTROL_FORCE_BAD_PARITY __BIT(23)
+#define AE_MISC_CONTROL_ONE_CTX_RELOAD __BIT(22)
+#define AE_MISC_CONTROL_CS_RELOAD __BITS(21, 20)
+#define AE_MISC_CONTROL_SHARE_CS __BIT(2)
+/* Control Store Address 1 Register */
+#define USTORE_ADDRESS1 0x158
+/* Local CSR Status Register */
+#define LOCAL_CSR_STATUS 0x180
+#define LOCAL_CSR_STATUS_STATUS 0x1
+/* NULL Register */
+#define NULL_CSR 0x3fc
+
+/* AE_XFER macros */
+#define AE_XFER_AE_MASK __BITS(31, 12)
+#define AE_XFER_CSR_MASK __BITS(9, 2)
+
+#define AEREG_BAD_REGADDR 0xffff /* bad register address */
+
+/* -------------------------------------------------------------------------- */
+
+#define SSMWDT(i) ((i) * 0x4000 + 0x54)
+#define SSMWDTPKE(i) ((i) * 0x4000 + 0x58)
+#define INTSTATSSM(i) ((i) * 0x4000 + 0x04)
+#define INTSTATSSM_SHANGERR __BIT(13)
+#define PPERR(i) ((i) * 0x4000 + 0x08)
+#define PPERRID(i) ((i) * 0x4000 + 0x0C)
+#define CERRSSMSH(i) ((i) * 0x4000 + 0x10)
+#define UERRSSMSH(i) ((i) * 0x4000 + 0x18)
+#define UERRSSMSHAD(i) ((i) * 0x4000 + 0x1C)
+#define SLICEHANGSTATUS(i) ((i) * 0x4000 + 0x4C)
+#define SLICE_HANG_AUTH0_MASK __BIT(0)
+#define SLICE_HANG_AUTH1_MASK __BIT(1)
+#define SLICE_HANG_CPHR0_MASK __BIT(4)
+#define SLICE_HANG_CPHR1_MASK __BIT(5)
+#define SLICE_HANG_CMP0_MASK __BIT(8)
+#define SLICE_HANG_CMP1_MASK __BIT(9)
+#define SLICE_HANG_XLT0_MASK __BIT(12)
+#define SLICE_HANG_XLT1_MASK __BIT(13)
+#define SLICE_HANG_MMP0_MASK __BIT(16)
+#define SLICE_HANG_MMP1_MASK __BIT(17)
+#define SLICE_HANG_MMP2_MASK __BIT(18)
+#define SLICE_HANG_MMP3_MASK __BIT(19)
+#define SLICE_HANG_MMP4_MASK __BIT(20)
+
+#define SHINTMASKSSM(i) ((i) * 0x4000 + 0x1018)
+#define ENABLE_SLICE_HANG 0x000000
+#define MAX_MMP (5)
+#define MMP_BASE(i) ((i) * 0x1000 % 0x3800)
+#define CERRSSMMMP(i, n) ((i) * 0x4000 + MMP_BASE(n) + 0x380)
+#define UERRSSMMMP(i, n) ((i) * 0x4000 + MMP_BASE(n) + 0x388)
+#define UERRSSMMMPAD(i, n) ((i) * 0x4000 + MMP_BASE(n) + 0x38C)
+
+#define CPP_CFC_ERR_STATUS (0x30000 + 0xC04)
+#define CPP_CFC_ERR_PPID (0x30000 + 0xC08)
+
+#define ERRSOU0 (0x3A000 + 0x00)
+#define ERRSOU1 (0x3A000 + 0x04)
+#define ERRSOU2 (0x3A000 + 0x08)
+#define ERRSOU3 (0x3A000 + 0x0C)
+#define ERRSOU4 (0x3A000 + 0xD0)
+#define ERRSOU5 (0x3A000 + 0xD8)
+#define ERRMSK0 (0x3A000 + 0x10)
+#define ERRMSK1 (0x3A000 + 0x14)
+#define ERRMSK2 (0x3A000 + 0x18)
+#define ERRMSK3 (0x3A000 + 0x1C)
+#define ERRMSK4 (0x3A000 + 0xD4)
+#define ERRMSK5 (0x3A000 + 0xDC)
+#define EMSK3_CPM0_MASK __BIT(2)
+#define EMSK3_CPM1_MASK __BIT(3)
+#define EMSK5_CPM2_MASK __BIT(16)
+#define EMSK5_CPM3_MASK __BIT(17)
+#define EMSK5_CPM4_MASK __BIT(18)
+#define RICPPINTSTS (0x3A000 + 0x114)
+#define RIERRPUSHID (0x3A000 + 0x118)
+#define RIERRPULLID (0x3A000 + 0x11C)
+
+#define TICPPINTSTS (0x3A400 + 0x13C)
+#define TIERRPUSHID (0x3A400 + 0x140)
+#define TIERRPULLID (0x3A400 + 0x144)
+#define SECRAMUERR (0x3AC00 + 0x04)
+#define SECRAMUERRAD (0x3AC00 + 0x0C)
+#define CPPMEMTGTERR (0x3AC00 + 0x10)
+#define ERRPPID (0x3AC00 + 0x14)
+
+#define ADMINMSGUR 0x3a574
+#define ADMINMSGLR 0x3a578
+#define MAILBOX_BASE 0x20970
+#define MAILBOX_STRIDE 0x1000
+#define ADMINMSG_LEN 32
+
+/* -------------------------------------------------------------------------- */
+static const uint8_t mailbox_const_tab[1024] __aligned(1024) = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+/* -------------------------------------------------------------------------- */
+/* Microcode */
+
+/* Clear GPR of AE */
+static const uint64_t ae_clear_gprs_inst[] = {
+ 0x0F0000C0000ull, /* .0 l0000!val = 0 ; immed[l0000!val, 0x0] */
+ 0x0F000000380ull, /* .1 l0000!count = 128 ; immed[l0000!count, 0x80] */
+ 0x0D805000011ull, /* .2 br!=ctx[0, ctx_init#] */
+ 0x0FC082C0300ull, /* .3 local_csr_wr[nn_put, 0] */
+ 0x0F0000C0300ull, /* .4 nop */
+ 0x0F0000C0300ull, /* .5 nop */
+ 0x0F0000C0300ull, /* .6 nop */
+ 0x0F0000C0300ull, /* .7 nop */
+ 0x0A0643C0000ull, /* .8 init_nn#:alu[*n$index++, --, b, l0000!val] */
+ 0x0BAC0000301ull, /* .9 alu[l0000!count, l0000!count, -, 1] */
+ 0x0D802000101ull, /* .10 bne[init_nn#] */
+ 0x0F0000C0001ull, /* .11 l0000!indx = 0 ; immed[l0000!indx, 0x0] */
+ 0x0FC066C0001ull, /* .12 local_csr_wr[active_lm_addr_0, l0000!indx];
+ * put indx to lm_addr */
+ 0x0F0000C0300ull, /* .13 nop */
+ 0x0F0000C0300ull, /* .14 nop */
+ 0x0F0000C0300ull, /* .15 nop */
+ 0x0F000400300ull, /* .16 l0000!count = 1024 ; immed[l0000!count, 0x400] */
+ 0x0A0610C0000ull, /* .17 init_lm#:alu[*l$index0++, --, b, l0000!val] */
+ 0x0BAC0000301ull, /* .18 alu[l0000!count, l0000!count, -, 1] */
+ 0x0D804400101ull, /* .19 bne[init_lm#] */
+ 0x0A0580C0000ull, /* .20 ctx_init#:alu[$l0000!xfers[0], --, b, l0000!val] */
+ 0x0A0581C0000ull, /* .21 alu[$l0000!xfers[1], --, b, l0000!val] */
+ 0x0A0582C0000ull, /* .22 alu[$l0000!xfers[2], --, b, l0000!val] */
+ 0x0A0583C0000ull, /* .23 alu[$l0000!xfers[3], --, b, l0000!val] */
+ 0x0A0584C0000ull, /* .24 alu[$l0000!xfers[4], --, b, l0000!val] */
+ 0x0A0585C0000ull, /* .25 alu[$l0000!xfers[5], --, b, l0000!val] */
+ 0x0A0586C0000ull, /* .26 alu[$l0000!xfers[6], --, b, l0000!val] */
+ 0x0A0587C0000ull, /* .27 alu[$l0000!xfers[7], --, b, l0000!val] */
+ 0x0A0588C0000ull, /* .28 alu[$l0000!xfers[8], --, b, l0000!val] */
+ 0x0A0589C0000ull, /* .29 alu[$l0000!xfers[9], --, b, l0000!val] */
+ 0x0A058AC0000ull, /* .30 alu[$l0000!xfers[10], --, b, l0000!val] */
+ 0x0A058BC0000ull, /* .31 alu[$l0000!xfers[11], --, b, l0000!val] */
+ 0x0A058CC0000ull, /* .32 alu[$l0000!xfers[12], --, b, l0000!val] */
+ 0x0A058DC0000ull, /* .33 alu[$l0000!xfers[13], --, b, l0000!val] */
+ 0x0A058EC0000ull, /* .34 alu[$l0000!xfers[14], --, b, l0000!val] */
+ 0x0A058FC0000ull, /* .35 alu[$l0000!xfers[15], --, b, l0000!val] */
+ 0x0A05C0C0000ull, /* .36 alu[$l0000!xfers[16], --, b, l0000!val] */
+ 0x0A05C1C0000ull, /* .37 alu[$l0000!xfers[17], --, b, l0000!val] */
+ 0x0A05C2C0000ull, /* .38 alu[$l0000!xfers[18], --, b, l0000!val] */
+ 0x0A05C3C0000ull, /* .39 alu[$l0000!xfers[19], --, b, l0000!val] */
+ 0x0A05C4C0000ull, /* .40 alu[$l0000!xfers[20], --, b, l0000!val] */
+ 0x0A05C5C0000ull, /* .41 alu[$l0000!xfers[21], --, b, l0000!val] */
+ 0x0A05C6C0000ull, /* .42 alu[$l0000!xfers[22], --, b, l0000!val] */
+ 0x0A05C7C0000ull, /* .43 alu[$l0000!xfers[23], --, b, l0000!val] */
+ 0x0A05C8C0000ull, /* .44 alu[$l0000!xfers[24], --, b, l0000!val] */
+ 0x0A05C9C0000ull, /* .45 alu[$l0000!xfers[25], --, b, l0000!val] */
+ 0x0A05CAC0000ull, /* .46 alu[$l0000!xfers[26], --, b, l0000!val] */
+ 0x0A05CBC0000ull, /* .47 alu[$l0000!xfers[27], --, b, l0000!val] */
+ 0x0A05CCC0000ull, /* .48 alu[$l0000!xfers[28], --, b, l0000!val] */
+ 0x0A05CDC0000ull, /* .49 alu[$l0000!xfers[29], --, b, l0000!val] */
+ 0x0A05CEC0000ull, /* .50 alu[$l0000!xfers[30], --, b, l0000!val] */
+ 0x0A05CFC0000ull, /* .51 alu[$l0000!xfers[31], --, b, l0000!val] */
+ 0x0A0400C0000ull, /* .52 alu[l0000!gprega[0], --, b, l0000!val] */
+ 0x0B0400C0000ull, /* .53 alu[l0000!gpregb[0], --, b, l0000!val] */
+ 0x0A0401C0000ull, /* .54 alu[l0000!gprega[1], --, b, l0000!val] */
+ 0x0B0401C0000ull, /* .55 alu[l0000!gpregb[1], --, b, l0000!val] */
+ 0x0A0402C0000ull, /* .56 alu[l0000!gprega[2], --, b, l0000!val] */
+ 0x0B0402C0000ull, /* .57 alu[l0000!gpregb[2], --, b, l0000!val] */
+ 0x0A0403C0000ull, /* .58 alu[l0000!gprega[3], --, b, l0000!val] */
+ 0x0B0403C0000ull, /* .59 alu[l0000!gpregb[3], --, b, l0000!val] */
+ 0x0A0404C0000ull, /* .60 alu[l0000!gprega[4], --, b, l0000!val] */
+ 0x0B0404C0000ull, /* .61 alu[l0000!gpregb[4], --, b, l0000!val] */
+ 0x0A0405C0000ull, /* .62 alu[l0000!gprega[5], --, b, l0000!val] */
+ 0x0B0405C0000ull, /* .63 alu[l0000!gpregb[5], --, b, l0000!val] */
+ 0x0A0406C0000ull, /* .64 alu[l0000!gprega[6], --, b, l0000!val] */
+ 0x0B0406C0000ull, /* .65 alu[l0000!gpregb[6], --, b, l0000!val] */
+ 0x0A0407C0000ull, /* .66 alu[l0000!gprega[7], --, b, l0000!val] */
+ 0x0B0407C0000ull, /* .67 alu[l0000!gpregb[7], --, b, l0000!val] */
+ 0x0A0408C0000ull, /* .68 alu[l0000!gprega[8], --, b, l0000!val] */
+ 0x0B0408C0000ull, /* .69 alu[l0000!gpregb[8], --, b, l0000!val] */
+ 0x0A0409C0000ull, /* .70 alu[l0000!gprega[9], --, b, l0000!val] */
+ 0x0B0409C0000ull, /* .71 alu[l0000!gpregb[9], --, b, l0000!val] */
+ 0x0A040AC0000ull, /* .72 alu[l0000!gprega[10], --, b, l0000!val] */
+ 0x0B040AC0000ull, /* .73 alu[l0000!gpregb[10], --, b, l0000!val] */
+ 0x0A040BC0000ull, /* .74 alu[l0000!gprega[11], --, b, l0000!val] */
+ 0x0B040BC0000ull, /* .75 alu[l0000!gpregb[11], --, b, l0000!val] */
+ 0x0A040CC0000ull, /* .76 alu[l0000!gprega[12], --, b, l0000!val] */
+ 0x0B040CC0000ull, /* .77 alu[l0000!gpregb[12], --, b, l0000!val] */
+ 0x0A040DC0000ull, /* .78 alu[l0000!gprega[13], --, b, l0000!val] */
+ 0x0B040DC0000ull, /* .79 alu[l0000!gpregb[13], --, b, l0000!val] */
+ 0x0A040EC0000ull, /* .80 alu[l0000!gprega[14], --, b, l0000!val] */
+ 0x0B040EC0000ull, /* .81 alu[l0000!gpregb[14], --, b, l0000!val] */
+ 0x0A040FC0000ull, /* .82 alu[l0000!gprega[15], --, b, l0000!val] */
+ 0x0B040FC0000ull, /* .83 alu[l0000!gpregb[15], --, b, l0000!val] */
+ 0x0D81581C010ull, /* .84 br=ctx[7, exit#] */
+ 0x0E000010000ull, /* .85 ctx_arb[kill], any */
+ 0x0E000010000ull, /* .86 exit#:ctx_arb[kill], any */
+};
+
+static const uint64_t ae_inst_4b[] = {
+ 0x0F0400C0000ull, /* .0 immed_w0[l0000!indx, 0] */
+ 0x0F4400C0000ull, /* .1 immed_w1[l0000!indx, 0] */
+ 0x0F040000300ull, /* .2 immed_w0[l0000!myvalue, 0x0] */
+ 0x0F440000300ull, /* .3 immed_w1[l0000!myvalue, 0x0] */
+ 0x0FC066C0000ull, /* .4 local_csr_wr[active_lm_addr_0,
+ l0000!indx]; put indx to lm_addr */
+ 0x0F0000C0300ull, /* .5 nop */
+ 0x0F0000C0300ull, /* .6 nop */
+ 0x0F0000C0300ull, /* .7 nop */
+ 0x0A021000000ull, /* .8 alu[*l$index0++, --, b, l0000!myvalue] */
+};
+
+static const uint64_t ae_inst_1b[] = {
+ 0x0F0400C0000ull, /* .0 immed_w0[l0000!indx, 0] */
+ 0x0F4400C0000ull, /* .1 immed_w1[l0000!indx, 0] */
+ 0x0F040000300ull, /* .2 immed_w0[l0000!myvalue, 0x0] */
+ 0x0F440000300ull, /* .3 immed_w1[l0000!myvalue, 0x0] */
+ 0x0FC066C0000ull, /* .4 local_csr_wr[active_lm_addr_0,
+ l0000!indx]; put indx to lm_addr */
+ 0x0F0000C0300ull, /* .5 nop */
+ 0x0F0000C0300ull, /* .6 nop */
+ 0x0F0000C0300ull, /* .7 nop */
+ 0x0A000180000ull, /* .8 alu[l0000!val, --, b, *l$index0] */
+ 0x09080000200ull, /* .9 alu_shf[l0000!myvalue, --, b,
+ l0000!myvalue, <<24 ] */
+ 0x08180280201ull, /* .10 alu_shf[l0000!val1, --, b, l0000!val, <<8 ] */
+ 0x08080280102ull, /* .11 alu_shf[l0000!val1, --, b, l0000!val1 , >>8 ] */
+ 0x0BA00100002ull, /* .12 alu[l0000!val2, l0000!val1, or, l0000!myvalue] */
+
+};
+
+static const uint64_t ae_inst_2b[] = {
+ 0x0F0400C0000ull, /* .0 immed_w0[l0000!indx, 0] */
+ 0x0F4400C0000ull, /* .1 immed_w1[l0000!indx, 0] */
+ 0x0F040000300ull, /* .2 immed_w0[l0000!myvalue, 0x0] */
+ 0x0F440000300ull, /* .3 immed_w1[l0000!myvalue, 0x0] */
+ 0x0FC066C0000ull, /* .4 local_csr_wr[active_lm_addr_0,
+ l0000!indx]; put indx to lm_addr */
+ 0x0F0000C0300ull, /* .5 nop */
+ 0x0F0000C0300ull, /* .6 nop */
+ 0x0F0000C0300ull, /* .7 nop */
+ 0x0A000180000ull, /* .8 alu[l0000!val, --, b, *l$index0] */
+ 0x09100000200ull, /* .9 alu_shf[l0000!myvalue, --, b,
+ l0000!myvalue, <<16 ] */
+ 0x08100280201ull, /* .10 alu_shf[l0000!val1, --, b, l0000!val, <<16 ] */
+ 0x08100280102ull, /* .11 alu_shf[l0000!val1, --, b, l0000!val1 , >>16 ] */
+ 0x0BA00100002ull, /* .12 alu[l0000!val2, l0000!val1, or, l0000!myvalue] */
+};
+
+static const uint64_t ae_inst_3b[] = {
+ 0x0F0400C0000ull, /* .0 immed_w0[l0000!indx, 0] */
+ 0x0F4400C0000ull, /* .1 immed_w1[l0000!indx, 0] */
+ 0x0F040000300ull, /* .2 immed_w0[l0000!myvalue, 0x0] */
+ 0x0F440000300ull, /* .3 immed_w1[l0000!myvalue, 0x0] */
+ 0x0FC066C0000ull, /* .4 local_csr_wr[active_lm_addr_0,
+ l0000!indx]; put indx to lm_addr */
+ 0x0F0000C0300ull, /* .5 nop */
+ 0x0F0000C0300ull, /* .6 nop */
+ 0x0F0000C0300ull, /* .7 nop */
+ 0x0A000180000ull, /* .8 alu[l0000!val, --, b, *l$index0] */
+ 0x09180000200ull, /* .9 alu_shf[l0000!myvalue, --,
+ b, l0000!myvalue, <<8 ] */
+ 0x08080280201ull, /* .10 alu_shf[l0000!val1, --, b, l0000!val, <<24 ] */
+ 0x08180280102ull, /* .11 alu_shf[l0000!val1, --, b, l0000!val1 , >>24 ] */
+ 0x0BA00100002ull, /* .12 alu[l0000!val2, l0000!val1, or, l0000!myvalue] */
+};
+
+/* micro-instr fixup */
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+ inst = (inst & 0xFFFF00C03FFull) | \
+ ((((const_val) << 12) & 0x0FF00000ull) | \
+ (((const_val) << 10) & 0x0003FC00ull))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+ inst = (inst & 0xFFFF00FFF00ull) | \
+ ((((const_val) << 12) & 0x0FF00000ull) | \
+ (((const_val) << 0) & 0x000000FFull))
+
+enum aereg_type {
+ AEREG_NO_DEST, /* no destination */
+ AEREG_GPA_REL, /* general-purpose A register under relative mode */
+ AEREG_GPA_ABS, /* general-purpose A register under absolute mode */
+ AEREG_GPB_REL, /* general-purpose B register under relative mode */
+ AEREG_GPB_ABS, /* general-purpose B register under absolute mode */
+ AEREG_SR_REL, /* sram register under relative mode */
+ AEREG_SR_RD_REL, /* sram read register under relative mode */
+ AEREG_SR_WR_REL, /* sram write register under relative mode */
+ AEREG_SR_ABS, /* sram register under absolute mode */
+ AEREG_SR_RD_ABS, /* sram read register under absolute mode */
+ AEREG_SR_WR_ABS, /* sram write register under absolute mode */
+ AEREG_SR0_SPILL, /* sram0 spill register */
+ AEREG_SR1_SPILL, /* sram1 spill register */
+ AEREG_SR2_SPILL, /* sram2 spill register */
+ AEREG_SR3_SPILL, /* sram3 spill register */
+ AEREG_SR0_MEM_ADDR, /* sram0 memory address register */
+ AEREG_SR1_MEM_ADDR, /* sram1 memory address register */
+ AEREG_SR2_MEM_ADDR, /* sram2 memory address register */
+ AEREG_SR3_MEM_ADDR, /* sram3 memory address register */
+ AEREG_DR_REL, /* dram register under relative mode */
+ AEREG_DR_RD_REL, /* dram read register under relative mode */
+ AEREG_DR_WR_REL, /* dram write register under relative mode */
+ AEREG_DR_ABS, /* dram register under absolute mode */
+ AEREG_DR_RD_ABS, /* dram read register under absolute mode */
+ AEREG_DR_WR_ABS, /* dram write register under absolute mode */
+ AEREG_DR_MEM_ADDR, /* dram memory address register */
+ AEREG_LMEM, /* local memory */
+ AEREG_LMEM0, /* local memory bank0 */
+ AEREG_LMEM1, /* local memory bank1 */
+ AEREG_LMEM_SPILL, /* local memory spill */
+ AEREG_LMEM_ADDR, /* local memory address */
+ AEREG_NEIGH_REL, /* next neighbour register under relative mode */
+ AEREG_NEIGH_INDX, /* next neighbour register under index mode */
+ AEREG_SIG_REL, /* signal register under relative mode */
+ AEREG_SIG_INDX, /* signal register under index mode */
+ AEREG_SIG_DOUBLE, /* signal register */
+ AEREG_SIG_SINGLE, /* signal register */
+ AEREG_SCRATCH_MEM_ADDR, /* scratch memory address */
+ AEREG_UMEM0, /* ustore memory bank0 */
+ AEREG_UMEM1, /* ustore memory bank1 */
+ AEREG_UMEM_SPILL, /* ustore memory spill */
+ AEREG_UMEM_ADDR, /* ustore memory address */
+ AEREG_DR1_MEM_ADDR, /* dram segment1 address */
+ AEREG_SR0_IMPORTED, /* sram segment0 imported data */
+ AEREG_SR1_IMPORTED, /* sram segment1 imported data */
+ AEREG_SR2_IMPORTED, /* sram segment2 imported data */
+ AEREG_SR3_IMPORTED, /* sram segment3 imported data */
+ AEREG_DR_IMPORTED, /* dram segment0 imported data */
+ AEREG_DR1_IMPORTED, /* dram segment1 imported data */
+ AEREG_SCRATCH_IMPORTED, /* scratch imported data */
+ AEREG_XFER_RD_ABS, /* transfer read register under absolute mode */
+ AEREG_XFER_WR_ABS, /* transfer write register under absolute mode */
+ AEREG_CONST_VALUE, /* const alue */
+ AEREG_ADDR_TAKEN, /* address taken */
+ AEREG_OPTIMIZED_AWAY, /* optimized away */
+ AEREG_SHRAM_ADDR, /* shared ram0 address */
+ AEREG_SHRAM1_ADDR, /* shared ram1 address */
+ AEREG_SHRAM2_ADDR, /* shared ram2 address */
+ AEREG_SHRAM3_ADDR, /* shared ram3 address */
+ AEREG_SHRAM4_ADDR, /* shared ram4 address */
+ AEREG_SHRAM5_ADDR, /* shared ram5 address */
+ AEREG_ANY = 0xffff /* any register */
+};
+#define AEREG_SR_INDX AEREG_SR_ABS
+ /* sram transfer register under index mode */
+#define AEREG_DR_INDX AEREG_DR_ABS
+ /* dram transfer register under index mode */
+#define AEREG_NEIGH_ABS AEREG_NEIGH_INDX
+ /* next neighbor register under absolute mode */
+
+
+#define QAT_2K 0x0800
+#define QAT_4K 0x1000
+#define QAT_6K 0x1800
+#define QAT_8K 0x2000
+#define QAT_16K 0x4000
+
+#define MOF_OBJ_ID_LEN 8
+#define MOF_FID 0x00666f6d
+#define MOF_MIN_VER 0x1
+#define MOF_MAJ_VER 0x0
+#define SYM_OBJS "SYM_OBJS" /* symbol object string */
+#define UOF_OBJS "UOF_OBJS" /* uof object string */
+#define SUOF_OBJS "SUF_OBJS" /* suof object string */
+#define SUOF_IMAG "SUF_IMAG" /* suof chunk ID string */
+
+#define UOF_STRT "UOF_STRT" /* string table section ID */
+#define UOF_GTID "UOF_GTID" /* GTID section ID */
+#define UOF_IMAG "UOF_IMAG" /* image section ID */
+#define UOF_IMEM "UOF_IMEM" /* import section ID */
+#define UOF_MSEG "UOF_MSEG" /* memory section ID */
+
+#define CRC_POLY 0x1021
+#define CRC_WIDTH 16
+#define CRC_BITMASK(x) (1L << (x))
+#define CRC_WIDTHMASK(width) ((((1L<<(width-1))-1L)<<1)|1L)
+
+struct mof_file_hdr {
+ u_int mfh_fid;
+ u_int mfh_csum;
+ char mfh_min_ver;
+ char mfh_maj_ver;
+ u_short mfh_reserved;
+ u_short mfh_max_chunks;
+ u_short mfh_num_chunks;
+};
+
+struct mof_file_chunk_hdr {
+ char mfch_id[MOF_OBJ_ID_LEN];
+ uint64_t mfch_offset;
+ uint64_t mfch_size;
+};
+
+struct mof_uof_hdr {
+ u_short muh_max_chunks;
+ u_short muh_num_chunks;
+ u_int muh_reserved;
+};
+
+struct mof_uof_chunk_hdr {
+ char much_id[MOF_OBJ_ID_LEN]; /* should be UOF_IMAG */
+ uint64_t much_offset; /* uof image */
+ uint64_t much_size; /* uof image size */
+ u_int much_name; /* uof name string-table offset */
+ u_int much_reserved;
+};
+
+#define UOF_MAX_NUM_OF_AE 16 /* maximum number of AE */
+
+#define UOF_OBJ_ID_LEN 8 /* length of object ID */
+#define UOF_FIELD_POS_SIZE 12 /* field postion size */
+#define MIN_UOF_SIZE 24 /* minimum .uof file size */
+#define UOF_FID 0xc6c2 /* uof magic number */
+#define UOF_MIN_VER 0x11
+#define UOF_MAJ_VER 0x4
+
+struct uof_file_hdr {
+ u_short ufh_id; /* file id and endian indicator */
+ u_short ufh_reserved1; /* reserved for future use */
+ char ufh_min_ver; /* file format minor version */
+ char ufh_maj_ver; /* file format major version */
+ u_short ufh_reserved2; /* reserved for future use */
+ u_short ufh_max_chunks; /* max chunks in file */
+ u_short ufh_num_chunks; /* num of actual chunks */
+};
+
+struct uof_file_chunk_hdr {
+ char ufch_id[UOF_OBJ_ID_LEN]; /* chunk identifier */
+ u_int ufch_csum; /* chunk checksum */
+ u_int ufch_offset; /* offset of the chunk in the file */
+ u_int ufch_size; /* size of the chunk */
+};
+
+struct uof_obj_hdr {
+ u_int uoh_cpu_type; /* CPU type */
+ u_short uoh_min_cpu_ver; /* starting CPU version */
+ u_short uoh_max_cpu_ver; /* ending CPU version */
+ short uoh_max_chunks; /* max chunks in chunk obj */
+ short uoh_num_chunks; /* num of actual chunks */
+ u_int uoh_reserved1;
+ u_int uoh_reserved2;
+};
+
+struct uof_chunk_hdr {
+ char uch_id[UOF_OBJ_ID_LEN];
+ u_int uch_offset;
+ u_int uch_size;
+};
+
+struct uof_str_tab {
+ u_int ust_table_len; /* length of table */
+ u_int ust_reserved; /* reserved for future use */
+ uint64_t ust_strings; /* pointer to string table.
+ * NULL terminated strings */
+};
+
+#define AE_MODE_RELOAD_CTX_SHARED __BIT(12)
+#define AE_MODE_SHARED_USTORE __BIT(11)
+#define AE_MODE_LMEM1 __BIT(9)
+#define AE_MODE_LMEM0 __BIT(8)
+#define AE_MODE_NN_MODE __BITS(7, 4)
+#define AE_MODE_CTX_MODE __BITS(3, 0)
+
+#define AE_MODE_NN_MODE_NEIGH 0
+#define AE_MODE_NN_MODE_SELF 1
+#define AE_MODE_NN_MODE_DONTCARE 0xff
+
+struct uof_image {
+ u_int ui_name; /* image name */
+ u_int ui_ae_assigned; /* AccelEngines assigned */
+ u_int ui_ctx_assigned; /* AccelEngine contexts assigned */
+ u_int ui_cpu_type; /* cpu type */
+ u_int ui_entry_address; /* entry uaddress */
+ u_int ui_fill_pattern[2]; /* uword fill value */
+ u_int ui_reloadable_size; /* size of reloadable ustore section */
+
+ u_char ui_sensitivity; /*
+ * case sensitivity: 0 = insensitive,
+ * 1 = sensitive
+ */
+ u_char ui_reserved; /* reserved for future use */
+ u_short ui_ae_mode; /*
+ * unused<15:14>, legacyMode<13>,
+ * reloadCtxShared<12>, sharedUstore<11>,
+ * ecc<10>, locMem1<9>, locMem0<8>,
+ * nnMode<7:4>, ctx<3:0>
+ */
+
+ u_short ui_max_ver; /* max cpu ver on which the image can run */
+ u_short ui_min_ver; /* min cpu ver on which the image can run */
+
+ u_short ui_image_attrib; /* image attributes */
+ u_short ui_reserved2; /* reserved for future use */
+
+ u_short ui_num_page_regions; /* number of page regions */
+ u_short ui_num_pages; /* number of pages */
+
+ u_int ui_reg_tab; /* offset to register table */
+ u_int ui_init_reg_sym_tab; /* reg/sym init table */
+ u_int ui_sbreak_tab; /* offset to sbreak table */
+
+ u_int ui_app_metadata; /* application meta-data */
+ /* ui_npages of code page follows this header */
+};
+
+struct uof_obj_table {
+ u_int uot_nentries; /* number of table entries */
+ /* uot_nentries of object follows */
+};
+
+struct uof_ae_reg {
+ u_int uar_name; /* reg name string-table offset */
+ u_int uar_vis_name; /* reg visible name string-table offset */
+ u_short uar_type; /* reg type */
+ u_short uar_addr; /* reg address */
+ u_short uar_access_mode; /* uof_RegAccessMode_T: read/write/both/undef */
+ u_char uar_visible; /* register visibility */
+ u_char uar_reserved1; /* reserved for future use */
+ u_short uar_ref_count; /* number of contiguous registers allocated */
+ u_short uar_reserved2; /* reserved for future use */
+ u_int uar_xoid; /* xfer order ID */
+};
+
+enum uof_value_kind {
+ UNDEF_VAL, /* undefined value */
+ CHAR_VAL, /* character value */
+ SHORT_VAL, /* short value */
+ INT_VAL, /* integer value */
+ STR_VAL, /* string value */
+ STRTAB_VAL, /* string table value */
+ NUM_VAL, /* number value */
+ EXPR_VAL /* expression value */
+};
+
+enum uof_init_type {
+ INIT_EXPR,
+ INIT_REG,
+ INIT_REG_CTX,
+ INIT_EXPR_ENDIAN_SWAP
+};
+
+struct uof_init_reg_sym {
+ u_int uirs_name; /* symbol name */
+ char uirs_init_type; /* 0=expr, 1=register, 2=ctxReg,
+ * 3=expr_endian_swap */
+ char uirs_value_type; /* EXPR_VAL, STRTAB_VAL */
+ char uirs_reg_type; /* register type: ae_reg_type */
+ u_char uirs_ctx; /* AE context when initType=2 */
+ u_int uirs_addr_offset; /* reg address, or sym-value offset */
+ u_int uirs_value; /* integer value, or expression */
+};
+
+struct uof_sbreak {
+ u_int us_page_num; /* page number */
+ u_int us_virt_uaddr; /* virt uaddress */
+ u_char us_sbreak_type; /* sbreak type */
+ u_char us_reg_type; /* register type: ae_reg_type */
+ u_short us_reserved1; /* reserved for future use */
+ u_int us_addr_offset; /* branch target address or offset
+ * to be used with the reg value to
+ * calculate the target address */
+ u_int us_reg_rddr; /* register address */
+};
+struct uof_code_page {
+ u_int ucp_page_region; /* page associated region */
+ u_int ucp_page_num; /* code-page number */
+ u_char ucp_def_page; /* default page indicator */
+ u_char ucp_reserved2; /* reserved for future use */
+ u_short ucp_reserved1; /* reserved for future use */
+ u_int ucp_beg_vaddr; /* starting virtual uaddr */
+ u_int ucp_beg_paddr; /* starting physical uaddr */
+ u_int ucp_neigh_reg_tab; /* offset to neighbour-reg table */
+ u_int ucp_uc_var_tab; /* offset to uC var table */
+ u_int ucp_imp_var_tab; /* offset to import var table */
+ u_int ucp_imp_expr_tab; /* offset to import expression table */
+ u_int ucp_code_area; /* offset to code area */
+};
+
+struct uof_code_area {
+ u_int uca_num_micro_words; /* number of micro words */
+ u_int uca_uword_block_tab; /* offset to ublock table */
+};
+
+struct uof_uword_block {
+ u_int uub_start_addr; /* start address */
+ u_int uub_num_words; /* number of microwords */
+ u_int uub_uword_offset; /* offset to the uwords */
+ u_int uub_reserved; /* reserved for future use */
+};
+
+struct uof_uword_fixup {
+ u_int uuf_name; /* offset to string table */
+ u_int uuf_uword_address; /* micro word address */
+ u_int uuf_expr_value; /* string table offset of expr string, or value */
+ u_char uuf_val_type; /* VALUE_UNDEF, VALUE_NUM, VALUE_EXPR */
+ u_char uuf_value_attrs; /* bit<0> (Scope: 0=global, 1=local),
+ * bit<1> (init: 0=no, 1=yes) */
+ u_short uuf_reserved1; /* reserved for future use */
+ char uuf_field_attrs[UOF_FIELD_POS_SIZE];
+ /* field pos, size, and right shift value */
+};
+
+struct uof_import_var {
+ u_int uiv_name; /* import var name string-table offset */
+ u_char uiv_value_attrs; /* bit<0> (Scope: 0=global),
+ * bit<1> (init: 0=no, 1=yes) */
+ u_char uiv_reserved1; /* reserved for future use */
+ u_short uiv_reserved2; /* reserved for future use */
+ uint64_t uiv_value; /* 64-bit imported value */
+};
+
+struct uof_mem_val_attr {
+ u_int umva_byte_offset; /* byte-offset from the allocated memory */
+ u_int umva_value; /* memory value */
+};
+
+enum uof_mem_region {
+ SRAM_REGION, /* SRAM region */
+ DRAM_REGION, /* DRAM0 region */
+ DRAM1_REGION, /* DRAM1 region */
+ LMEM_REGION, /* local memory region */
+ SCRATCH_REGION, /* SCRATCH region */
+ UMEM_REGION, /* micro-store region */
+ RAM_REGION, /* RAM region */
+ SHRAM_REGION, /* shared memory-0 region */
+ SHRAM1_REGION, /* shared memory-1 region */
+ SHRAM2_REGION, /* shared memory-2 region */
+ SHRAM3_REGION, /* shared memory-3 region */
+ SHRAM4_REGION, /* shared memory-4 region */
+ SHRAM5_REGION /* shared memory-5 region */
+};
+
+#define UOF_SCOPE_GLOBAL 0
+#define UOF_SCOPE_LOCAL 1
+
+struct uof_init_mem {
+ u_int uim_sym_name; /* symbol name */
+ char uim_region; /* memory region -- uof_mem_region */
+ char uim_scope; /* visibility scope */
+ u_short uim_reserved1; /* reserved for future use */
+ u_int uim_addr; /* memory address */
+ u_int uim_num_bytes; /* number of bytes */
+ u_int uim_num_val_attr; /* number of values attributes */
+
+ /* uim_num_val_attr of uof_mem_val_attr follows this header */
+};
+
+struct uof_var_mem_seg {
+ u_int uvms_sram_base; /* SRAM memory segment base addr */
+ u_int uvms_sram_size; /* SRAM segment size bytes */
+ u_int uvms_sram_alignment; /* SRAM segment alignment bytes */
+ u_int uvms_sdram_base; /* DRAM0 memory segment base addr */
+ u_int uvms_sdram_size; /* DRAM0 segment size bytes */
+ u_int uvms_sdram_alignment; /* DRAM0 segment alignment bytes */
+ u_int uvms_sdram1_base; /* DRAM1 memory segment base addr */
+ u_int uvms_sdram1_size; /* DRAM1 segment size bytes */
+ u_int uvms_sdram1_alignment; /* DRAM1 segment alignment bytes */
+ u_int uvms_scratch_base; /* SCRATCH memory segment base addr */
+ u_int uvms_scratch_size; /* SCRATCH segment size bytes */
+ u_int uvms_scratch_alignment; /* SCRATCH segment alignment bytes */
+};
+
+#define SUOF_OBJ_ID_LEN 8
+#define SUOF_FID 0x53554f46
+#define SUOF_MAJ_VER 0x0
+#define SUOF_MIN_VER 0x1
+#define SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long))
+#define SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long))
+#define CSS_FWSK_MODULUS_LEN 256
+#define CSS_FWSK_EXPONENT_LEN 4
+#define CSS_FWSK_PAD_LEN 252
+#define CSS_FWSK_PUB_LEN (CSS_FWSK_MODULUS_LEN + \
+ CSS_FWSK_EXPONENT_LEN + \
+ CSS_FWSK_PAD_LEN)
+#define CSS_SIGNATURE_LEN 256
+#define CSS_AE_IMG_LEN (sizeof(struct simg_ae_mode) + \
+ SIMG_AE_INIT_SEQ_LEN + \
+ SIMG_AE_INSTS_LEN)
+#define CSS_AE_SIMG_LEN (sizeof(struct css_hdr) + \
+ CSS_FWSK_PUB_LEN + \
+ CSS_SIGNATURE_LEN + \
+ CSS_AE_IMG_LEN)
+#define AE_IMG_OFFSET (sizeof(struct css_hdr) + \
+ CSS_FWSK_MODULUS_LEN + \
+ CSS_FWSK_EXPONENT_LEN + \
+ CSS_SIGNATURE_LEN)
+#define CSS_MAX_IMAGE_LEN 0x40000
+
+struct fw_auth_desc {
+ u_int fad_img_len;
+ u_int fad_reserved;
+ u_int fad_css_hdr_high;
+ u_int fad_css_hdr_low;
+ u_int fad_img_high;
+ u_int fad_img_low;
+ u_int fad_signature_high;
+ u_int fad_signature_low;
+ u_int fad_fwsk_pub_high;
+ u_int fad_fwsk_pub_low;
+ u_int fad_img_ae_mode_data_high;
+ u_int fad_img_ae_mode_data_low;
+ u_int fad_img_ae_init_data_high;
+ u_int fad_img_ae_init_data_low;
+ u_int fad_img_ae_insts_high;
+ u_int fad_img_ae_insts_low;
+};
+
+struct auth_chunk {
+ struct fw_auth_desc ac_fw_auth_desc;
+ uint64_t ac_chunk_size;
+ uint64_t ac_chunk_bus_addr;
+};
+
+enum css_fwtype {
+ CSS_AE_FIRMWARE = 0,
+ CSS_MMP_FIRMWARE = 1
+};
+
+struct css_hdr {
+ u_int css_module_type;
+ u_int css_header_len;
+ u_int css_header_ver;
+ u_int css_module_id;
+ u_int css_module_vendor;
+ u_int css_date;
+ u_int css_size;
+ u_int css_key_size;
+ u_int css_module_size;
+ u_int css_exponent_size;
+ u_int css_fw_type;
+ u_int css_reserved[21];
+};
+
+struct simg_ae_mode {
+ u_int sam_file_id;
+ u_short sam_maj_ver;
+ u_short sam_min_ver;
+ u_int sam_dev_type;
+ u_short sam_devmax_ver;
+ u_short sam_devmin_ver;
+ u_int sam_ae_mask;
+ u_int sam_ctx_enables;
+ char sam_fw_type;
+ char sam_ctx_mode;
+ char sam_nn_mode;
+ char sam_lm0_mode;
+ char sam_lm1_mode;
+ char sam_scs_mode;
+ char sam_lm2_mode;
+ char sam_lm3_mode;
+ char sam_tindex_mode;
+ u_char sam_reserved[7];
+ char sam_simg_name[256];
+ char sam_appmeta_data[256];
+};
+
+struct suof_file_hdr {
+ u_int sfh_file_id;
+ u_int sfh_check_sum;
+ char sfh_min_ver;
+ char sfh_maj_ver;
+ char sfh_fw_type;
+ char sfh_reserved;
+ u_short sfh_max_chunks;
+ u_short sfh_num_chunks;
+};
+
+struct suof_chunk_hdr {
+ char sch_chunk_id[SUOF_OBJ_ID_LEN];
+ uint64_t sch_offset;
+ uint64_t sch_size;
+};
+
+struct suof_str_tab {
+ u_int sst_tab_length;
+ u_int sst_strings;
+};
+
+struct suof_obj_hdr {
+ u_int soh_img_length;
+ u_int soh_reserved;
+};
+
+/* -------------------------------------------------------------------------- */
+/* accel */
+
+enum fw_slice {
+ FW_SLICE_NULL = 0, /* NULL slice type */
+ FW_SLICE_CIPHER = 1, /* CIPHER slice type */
+ FW_SLICE_AUTH = 2, /* AUTH slice type */
+ FW_SLICE_DRAM_RD = 3, /* DRAM_RD Logical slice type */
+ FW_SLICE_DRAM_WR = 4, /* DRAM_WR Logical slice type */
+ FW_SLICE_COMP = 5, /* Compression slice type */
+ FW_SLICE_XLAT = 6, /* Translator slice type */
+ FW_SLICE_DELIMITER /* End delimiter */
+};
+#define MAX_FW_SLICE FW_SLICE_DELIMITER
+
+#define QAT_OPTIMAL_ALIGN_SHIFT 6
+#define QAT_OPTIMAL_ALIGN (1 << QAT_OPTIMAL_ALIGN_SHIFT)
+
+enum hw_auth_algo {
+ HW_AUTH_ALGO_NULL = 0, /* Null hashing */
+ HW_AUTH_ALGO_SHA1 = 1, /* SHA1 hashing */
+ HW_AUTH_ALGO_MD5 = 2, /* MD5 hashing */
+ HW_AUTH_ALGO_SHA224 = 3, /* SHA-224 hashing */
+ HW_AUTH_ALGO_SHA256 = 4, /* SHA-256 hashing */
+ HW_AUTH_ALGO_SHA384 = 5, /* SHA-384 hashing */
+ HW_AUTH_ALGO_SHA512 = 6, /* SHA-512 hashing */
+ HW_AUTH_ALGO_AES_XCBC_MAC = 7, /* AES-XCBC-MAC hashing */
+ HW_AUTH_ALGO_AES_CBC_MAC = 8, /* AES-CBC-MAC hashing */
+ HW_AUTH_ALGO_AES_F9 = 9, /* AES F9 hashing */
+ HW_AUTH_ALGO_GALOIS_128 = 10, /* Galois 128 bit hashing */
+ HW_AUTH_ALGO_GALOIS_64 = 11, /* Galois 64 hashing */
+ HW_AUTH_ALGO_KASUMI_F9 = 12, /* Kasumi F9 hashing */
+ HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, /* UIA2/SNOW_3H F9 hashing */
+ HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+ HW_AUTH_RESERVED_1 = 15,
+ HW_AUTH_RESERVED_2 = 16,
+ HW_AUTH_ALGO_SHA3_256 = 17,
+ HW_AUTH_RESERVED_3 = 18,
+ HW_AUTH_ALGO_SHA3_512 = 19,
+ HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum hw_auth_mode {
+ HW_AUTH_MODE0,
+ HW_AUTH_MODE1,
+ HW_AUTH_MODE2,
+ HW_AUTH_MODE_DELIMITER
+};
+
+struct hw_auth_config {
+ uint32_t config;
+ /* Configuration used for setting up the slice */
+ uint32_t reserved;
+ /* Reserved */
+};
+
+#define HW_AUTH_CONFIG_SHA3_ALGO __BITS(22, 23)
+#define HW_AUTH_CONFIG_SHA3_PADDING __BIT(16)
+#define HW_AUTH_CONFIG_CMPLEN __BITS(14, 8)
+ /* The length of the digest if the QAT is to the check*/
+#define HW_AUTH_CONFIG_MODE __BITS(7, 4)
+#define HW_AUTH_CONFIG_ALGO __BITS(3, 0)
+
+#define HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ __SHIFTIN(mode, HW_AUTH_CONFIG_MODE) | \
+ __SHIFTIN(algo, HW_AUTH_CONFIG_ALGO) | \
+ __SHIFTIN(cmp_len, HW_AUTH_CONFIG_CMPLEN)
+
+struct hw_auth_counter {
+ uint32_t counter; /* Counter value */
+ uint32_t reserved; /* Reserved */
+};
+
+struct hw_auth_setup {
+ struct hw_auth_config auth_config;
+ /* Configuration word for the auth slice */
+ struct hw_auth_counter auth_counter;
+ /* Auth counter value for this request */
+};
+
+#define HW_NULL_STATE1_SZ 32
+#define HW_MD5_STATE1_SZ 16
+#define HW_SHA1_STATE1_SZ 20
+#define HW_SHA224_STATE1_SZ 32
+#define HW_SHA256_STATE1_SZ 32
+#define HW_SHA3_256_STATE1_SZ 32
+#define HW_SHA384_STATE1_SZ 64
+#define HW_SHA512_STATE1_SZ 64
+#define HW_SHA3_512_STATE1_SZ 64
+#define HW_SHA3_224_STATE1_SZ 28
+#define HW_SHA3_384_STATE1_SZ 48
+#define HW_AES_XCBC_MAC_STATE1_SZ 16
+#define HW_AES_CBC_MAC_STATE1_SZ 16
+#define HW_AES_F9_STATE1_SZ 32
+#define HW_KASUMI_F9_STATE1_SZ 16
+#define HW_GALOIS_128_STATE1_SZ 16
+#define HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define HW_NULL_STATE2_SZ 32
+#define HW_MD5_STATE2_SZ 16
+#define HW_SHA1_STATE2_SZ 20
+#define HW_SHA224_STATE2_SZ 32
+#define HW_SHA256_STATE2_SZ 32
+#define HW_SHA3_256_STATE2_SZ 0
+#define HW_SHA384_STATE2_SZ 64
+#define HW_SHA512_STATE2_SZ 64
+#define HW_SHA3_512_STATE2_SZ 0
+#define HW_SHA3_224_STATE2_SZ 0
+#define HW_SHA3_384_STATE2_SZ 0
+#define HW_AES_XCBC_MAC_KEY_SZ 16
+#define HW_AES_CBC_MAC_KEY_SZ 16
+#define HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define HW_F9_IK_SZ 16
+#define HW_F9_FK_SZ 16
+#define HW_KASUMI_F9_STATE2_SZ (HW_F9_IK_SZ + HW_F9_FK_SZ)
+#define HW_AES_F9_STATE2_SZ HW_KASUMI_F9_STATE2_SZ
+#define HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define HW_GALOIS_H_SZ 16
+#define HW_GALOIS_LEN_A_SZ 8
+#define HW_GALOIS_E_CTR0_SZ 16
+
+struct hw_auth_sha512 {
+ struct hw_auth_setup inner_setup;
+ /* Inner loop configuration word for the slice */
+ uint8_t state1[HW_SHA512_STATE1_SZ];
+ /* Slice state1 variable */
+ struct hw_auth_setup outer_setup;
+ /* Outer configuration word for the slice */
+ uint8_t state2[HW_SHA512_STATE2_SZ];
+ /* Slice state2 variable */
+};
+
+union hw_auth_algo_blk {
+ struct hw_auth_sha512 max;
+ /* This is the largest possible auth setup block size */
+};
+
+enum hw_cipher_algo {
+ HW_CIPHER_ALGO_NULL = 0, /* Null ciphering */
+ HW_CIPHER_ALGO_DES = 1, /* DES ciphering */
+ HW_CIPHER_ALGO_3DES = 2, /* 3DES ciphering */
+ HW_CIPHER_ALGO_AES128 = 3, /* AES-128 ciphering */
+ HW_CIPHER_ALGO_AES192 = 4, /* AES-192 ciphering */
+ HW_CIPHER_ALGO_AES256 = 5, /* AES-256 ciphering */
+ HW_CIPHER_ALGO_ARC4 = 6, /* ARC4 ciphering */
+ HW_CIPHER_ALGO_KASUMI = 7, /* Kasumi */
+ HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, /* Snow_3G */
+ HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+ HW_CIPHER_DELIMITER = 10 /* Delimiter type */
+};
+
+enum hw_cipher_mode {
+ HW_CIPHER_ECB_MODE = 0, /* ECB mode */
+ HW_CIPHER_CBC_MODE = 1, /* CBC mode */
+ HW_CIPHER_CTR_MODE = 2, /* CTR mode */
+ HW_CIPHER_F8_MODE = 3, /* F8 mode */
+ HW_CIPHER_XTS_MODE = 6,
+ HW_CIPHER_MODE_DELIMITER = 7 /* Delimiter type */
+};
+
+struct hw_cipher_config {
+ uint32_t val; /* Cipher slice configuration */
+ uint32_t reserved; /* Reserved */
+};
+
+#define CIPHER_CONFIG_CONVERT __BIT(9)
+#define CIPHER_CONFIG_DIR __BIT(8)
+#define CIPHER_CONFIG_MODE __BITS(7, 4)
+#define CIPHER_CONFIG_ALGO __BITS(3, 0)
+#define HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+ __SHIFTIN(mode, CIPHER_CONFIG_MODE) | \
+ __SHIFTIN(algo, CIPHER_CONFIG_ALGO) | \
+ __SHIFTIN(convert, CIPHER_CONFIG_CONVERT) | \
+ __SHIFTIN(dir, CIPHER_CONFIG_DIR)
+
+enum hw_cipher_dir {
+ HW_CIPHER_ENCRYPT = 0, /* encryption is required */
+ HW_CIPHER_DECRYPT = 1, /* decryption is required */
+};
+
+enum hw_cipher_convert {
+ HW_CIPHER_NO_CONVERT = 0, /* no key convert is required*/
+ HW_CIPHER_KEY_CONVERT = 1, /* key conversion is required*/
+};
+
+#define CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define CIPHER_MODE_XTS_KEY_SZ_MULT 2
+
+#define HW_DES_BLK_SZ 8
+#define HW_3DES_BLK_SZ 8
+#define HW_NULL_BLK_SZ 8
+#define HW_AES_BLK_SZ 16
+#define HW_KASUMI_BLK_SZ 8
+#define HW_SNOW_3G_BLK_SZ 8
+#define HW_ZUC_3G_BLK_SZ 8
+#define HW_NULL_KEY_SZ 256
+#define HW_DES_KEY_SZ 8
+#define HW_3DES_KEY_SZ 24
+#define HW_AES_128_KEY_SZ 16
+#define HW_AES_192_KEY_SZ 24
+#define HW_AES_256_KEY_SZ 32
+#define HW_AES_128_F8_KEY_SZ (HW_AES_128_KEY_SZ * \
+ CIPHER_MODE_F8_KEY_SZ_MULT)
+#define HW_AES_192_F8_KEY_SZ (HW_AES_192_KEY_SZ * \
+ CIPHER_MODE_F8_KEY_SZ_MULT)
+#define HW_AES_256_F8_KEY_SZ (HW_AES_256_KEY_SZ * \
+ CIPHER_MODE_F8_KEY_SZ_MULT)
+#define HW_AES_128_XTS_KEY_SZ (HW_AES_128_KEY_SZ * \
+ CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define HW_AES_256_XTS_KEY_SZ (HW_AES_256_KEY_SZ * \
+ CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define HW_KASUMI_KEY_SZ 16
+#define HW_KASUMI_F8_KEY_SZ (HW_KASUMI_KEY_SZ * \
+ CIPHER_MODE_F8_KEY_SZ_MULT)
+#define HW_AES_128_XTS_KEY_SZ (HW_AES_128_KEY_SZ * \
+ CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define HW_AES_256_XTS_KEY_SZ (HW_AES_256_KEY_SZ * \
+ CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define HW_ARC4_KEY_SZ 256
+#define HW_SNOW_3G_UEA2_KEY_SZ 16
+#define HW_SNOW_3G_UEA2_IV_SZ 16
+#define HW_ZUC_3G_EEA3_KEY_SZ 16
+#define HW_ZUC_3G_EEA3_IV_SZ 16
+#define HW_MODE_F8_NUM_REG_TO_CLEAR 2
+
+struct hw_cipher_aes256_f8 {
+ struct hw_cipher_config cipher_config;
+ /* Cipher configuration word for the slice set to
+ * AES-256 and the F8 mode */
+ uint8_t key[HW_AES_256_F8_KEY_SZ];
+ /* Cipher key */
+};
+
+union hw_cipher_algo_blk {
+ struct hw_cipher_aes256_f8 max; /* AES-256 F8 Cipher */
+ /* This is the largest possible cipher setup block size */
+};
+
+struct flat_buffer_desc {
+ uint32_t data_len_in_bytes;
+ uint32_t reserved;
+ uint64_t phy_buffer;
+};
+
+struct buffer_list_desc {
+ uint64_t resrvd;
+ uint32_t num_buffers;
+ uint32_t reserved;
+};
+
+/* -------------------------------------------------------------------------- */
+/* look aside */
+
+enum fw_la_cmd_id {
+ FW_LA_CMD_CIPHER, /* Cipher Request */
+ FW_LA_CMD_AUTH, /* Auth Request */
+ FW_LA_CMD_CIPHER_HASH, /* Cipher-Hash Request */
+ FW_LA_CMD_HASH_CIPHER, /* Hash-Cipher Request */
+ FW_LA_CMD_TRNG_GET_RANDOM, /* TRNG Get Random Request */
+ FW_LA_CMD_TRNG_TEST, /* TRNG Test Request */
+ FW_LA_CMD_SSL3_KEY_DERIVE, /* SSL3 Key Derivation Request */
+ FW_LA_CMD_TLS_V1_1_KEY_DERIVE, /* TLS Key Derivation Request */
+ FW_LA_CMD_TLS_V1_2_KEY_DERIVE, /* TLS Key Derivation Request */
+ FW_LA_CMD_MGF1, /* MGF1 Request */
+ FW_LA_CMD_AUTH_PRE_COMP, /* Auth Pre-Compute Request */
+#if 0 /* incompatible between qat 1.5 and 1.7 */
+ FW_LA_CMD_CIPHER_CIPHER, /* Cipher-Cipher Request */
+ FW_LA_CMD_HASH_HASH, /* Hash-Hash Request */
+ FW_LA_CMD_CIPHER_PRE_COMP, /* Auth Pre-Compute Request */
+#endif
+ FW_LA_CMD_DELIMITER, /* Delimiter type */
+};
+
+#endif
diff --git a/sys/dev/qat/qatvar.h b/sys/dev/qat/qatvar.h
new file mode 100644
index 000000000000..10bc2833ca7d
--- /dev/null
+++ b/sys/dev/qat/qatvar.h
@@ -0,0 +1,1073 @@
+/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
+/* $NetBSD: qatvar.h,v 1.2 2020/03/14 18:08:39 ad Exp $ */
+
+/*
+ * Copyright (c) 2019 Internet Initiative Japan, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _DEV_PCI_QATVAR_H_
+#define _DEV_PCI_QATVAR_H_
+
+#include <sys/counter.h>
+#include <sys/malloc.h>
+
+#include <opencrypto/cryptodev.h>
+
+#define QAT_NSYMREQ 256
+#define QAT_NSYMCOOKIE ((QAT_NSYMREQ * 2 + 1) * 2)
+#define QAT_NASYMREQ 64
+#define QAT_BATCH_SUBMIT_FREE_SPACE 2
+
+#define QAT_EV_NAME_SIZE 32
+#define QAT_RING_NAME_SIZE 32
+
+#define QAT_MAXSEG 32 /* max segments for sg dma */
+#define QAT_MAXLEN 65535 /* IP_MAXPACKET */
+
+#define QAT_HB_INTERVAL 500 /* heartbeat msec */
+#define QAT_SSM_WDT 100
+
+enum qat_chip_type {
+ QAT_CHIP_C2XXX = 0, /* NanoQAT: Atom C2000 */
+ QAT_CHIP_C2XXX_IOV,
+ QAT_CHIP_C3XXX, /* Atom C3000 */
+ QAT_CHIP_C3XXX_IOV,
+ QAT_CHIP_C62X,
+ QAT_CHIP_C62X_IOV,
+ QAT_CHIP_D15XX,
+ QAT_CHIP_D15XX_IOV,
+ QAT_CHIP_DH895XCC,
+ QAT_CHIP_DH895XCC_IOV,
+};
+
+enum qat_sku {
+ QAT_SKU_UNKNOWN = 0,
+ QAT_SKU_1,
+ QAT_SKU_2,
+ QAT_SKU_3,
+ QAT_SKU_4,
+ QAT_SKU_VF,
+};
+
+enum qat_ae_status {
+ QAT_AE_ENABLED = 1,
+ QAT_AE_ACTIVE,
+ QAT_AE_DISABLED
+};
+
+#define TIMEOUT_AE_RESET 100
+#define TIMEOUT_AE_CHECK 10000
+#define TIMEOUT_AE_CSR 500
+#define AE_EXEC_CYCLE 20
+
+#define QAT_UOF_MAX_PAGE 1
+#define QAT_UOF_MAX_PAGE_REGION 1
+
+struct qat_dmamem {
+ bus_dma_tag_t qdm_dma_tag;
+ bus_dmamap_t qdm_dma_map;
+ bus_size_t qdm_dma_size;
+ bus_dma_segment_t qdm_dma_seg;
+ void *qdm_dma_vaddr;
+};
+
+/* Valid internal ring size values */
+#define QAT_RING_SIZE_128 0x01
+#define QAT_RING_SIZE_256 0x02
+#define QAT_RING_SIZE_512 0x03
+#define QAT_RING_SIZE_4K 0x06
+#define QAT_RING_SIZE_16K 0x08
+#define QAT_RING_SIZE_4M 0x10
+#define QAT_MIN_RING_SIZE QAT_RING_SIZE_128
+#define QAT_MAX_RING_SIZE QAT_RING_SIZE_4M
+#define QAT_DEFAULT_RING_SIZE QAT_RING_SIZE_16K
+
+/* Valid internal msg size values */
+#define QAT_MSG_SIZE_32 0x01
+#define QAT_MSG_SIZE_64 0x02
+#define QAT_MSG_SIZE_128 0x04
+#define QAT_MIN_MSG_SIZE QAT_MSG_SIZE_32
+#define QAT_MAX_MSG_SIZE QAT_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define QAT_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define QAT_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define QAT_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define QAT_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define QAT_RING_SIZE_BYTES_MIN(SIZE) \
+ ((SIZE < QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K)) ? \
+ QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K) : SIZE)
+#define QAT_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define QAT_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
+#define QAT_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+ ((((1 << (RING_SIZE - 1)) << 3) >> QAT_SIZE_TO_POW(MSG_SIZE)) - 1)
+
+#define QAT_RING_PATTERN 0x7f
+
+struct qat_softc;
+
+typedef int (*qat_cb_t)(struct qat_softc *, void *, void *);
+
+struct qat_ring {
+ struct mtx qr_ring_mtx; /* Lock per ring */
+ bool qr_need_wakeup;
+ void *qr_ring_vaddr;
+ uint32_t * volatile qr_inflight; /* tx/rx shared */
+ uint32_t qr_head;
+ uint32_t qr_tail;
+ uint8_t qr_msg_size;
+ uint8_t qr_ring_size;
+ uint32_t qr_ring; /* ring number in bank */
+ uint32_t qr_bank; /* bank number in device */
+ uint32_t qr_ring_id;
+ uint32_t qr_ring_mask;
+ qat_cb_t qr_cb;
+ void *qr_cb_arg;
+ struct qat_dmamem qr_dma;
+ bus_addr_t qr_ring_paddr;
+
+ const char *qr_name;
+};
+
+struct qat_bank {
+ struct qat_softc *qb_sc; /* back pointer to softc */
+ uint32_t qb_intr_mask; /* current interrupt mask */
+ uint32_t qb_allocated_rings; /* current allocated ring bitfiled */
+ uint32_t qb_coalescing_time; /* timer in nano sec, 0: disabled */
+#define COALESCING_TIME_INTERVAL_DEFAULT 10000
+#define COALESCING_TIME_INTERVAL_MIN 500
+#define COALESCING_TIME_INTERVAL_MAX 0xfffff
+ uint32_t qb_bank; /* bank index */
+ struct mtx qb_bank_mtx;
+ struct resource *qb_ih;
+ void *qb_ih_cookie;
+
+ struct qat_ring qb_et_rings[MAX_RING_PER_BANK];
+
+};
+
+struct qat_ap_bank {
+ uint32_t qab_nf_mask;
+ uint32_t qab_nf_dest;
+ uint32_t qab_ne_mask;
+ uint32_t qab_ne_dest;
+};
+
+struct qat_ae_page {
+ struct qat_ae_page *qap_next;
+ struct qat_uof_page *qap_page;
+ struct qat_ae_region *qap_region;
+ u_int qap_flags;
+};
+
+#define QAT_AE_PAGA_FLAG_WAITING (1 << 0)
+
+struct qat_ae_region {
+ struct qat_ae_page *qar_loaded_page;
+ STAILQ_HEAD(, qat_ae_page) qar_waiting_pages;
+};
+
+struct qat_ae_slice {
+ u_int qas_assigned_ctx_mask;
+ struct qat_ae_region qas_regions[QAT_UOF_MAX_PAGE_REGION];
+ struct qat_ae_page qas_pages[QAT_UOF_MAX_PAGE];
+ struct qat_ae_page *qas_cur_pages[MAX_AE_CTX];
+ struct qat_uof_image *qas_image;
+};
+
+#define QAT_AE(sc, ae) \
+ ((sc)->sc_ae[ae])
+
+struct qat_ae {
+ u_int qae_state; /* AE state */
+ u_int qae_ustore_size; /* free micro-store address */
+ u_int qae_free_addr; /* free micro-store address */
+ u_int qae_free_size; /* free micro-store size */
+ u_int qae_live_ctx_mask; /* live context mask */
+ u_int qae_ustore_dram_addr; /* mirco-store DRAM address */
+ u_int qae_reload_size; /* reloadable code size */
+
+ /* aefw */
+ u_int qae_num_slices;
+ struct qat_ae_slice qae_slices[MAX_AE_CTX];
+ u_int qae_reloc_ustore_dram; /* reloadable ustore-dram address */
+ u_int qae_effect_ustore_size; /* effective AE ustore size */
+ u_int qae_shareable_ustore;
+};
+
+struct qat_mof {
+ void *qmf_sym; /* SYM_OBJS in sc_fw_mof */
+ size_t qmf_sym_size;
+ void *qmf_uof_objs; /* UOF_OBJS in sc_fw_mof */
+ size_t qmf_uof_objs_size;
+ void *qmf_suof_objs; /* SUOF_OBJS in sc_fw_mof */
+ size_t qmf_suof_objs_size;
+};
+
+struct qat_ae_batch_init {
+ u_int qabi_ae;
+ u_int qabi_addr;
+ u_int *qabi_value;
+ u_int qabi_size;
+ STAILQ_ENTRY(qat_ae_batch_init) qabi_next;
+};
+
+STAILQ_HEAD(qat_ae_batch_init_list, qat_ae_batch_init);
+
+/* overwritten struct uof_uword_block */
+struct qat_uof_uword_block {
+ u_int quub_start_addr; /* start address */
+ u_int quub_num_words; /* number of microwords */
+ uint64_t quub_micro_words; /* pointer to the uwords */
+};
+
+struct qat_uof_page {
+ u_int qup_page_num; /* page number */
+ u_int qup_def_page; /* default page */
+ u_int qup_page_region; /* region of page */
+ u_int qup_beg_vaddr; /* begin virtual address */
+ u_int qup_beg_paddr; /* begin physical address */
+
+ u_int qup_num_uc_var; /* num of uC var in array */
+ struct uof_uword_fixup *qup_uc_var;
+ /* array of import variables */
+ u_int qup_num_imp_var; /* num of import var in array */
+ struct uof_import_var *qup_imp_var;
+ /* array of import variables */
+ u_int qup_num_imp_expr; /* num of import expr in array */
+ struct uof_uword_fixup *qup_imp_expr;
+ /* array of import expressions */
+ u_int qup_num_neigh_reg; /* num of neigh-reg in array */
+ struct uof_uword_fixup *qup_neigh_reg;
+ /* array of neigh-reg assignments */
+ u_int qup_num_micro_words; /* number of microwords in the seg */
+
+ u_int qup_num_uw_blocks; /* number of uword blocks */
+ struct qat_uof_uword_block *qup_uw_blocks;
+ /* array of uword blocks */
+};
+
+struct qat_uof_image {
+ struct uof_image *qui_image; /* image pointer */
+ struct qat_uof_page qui_pages[QAT_UOF_MAX_PAGE];
+ /* array of pages */
+
+ u_int qui_num_ae_reg; /* num of registers */
+ struct uof_ae_reg *qui_ae_reg; /* array of registers */
+
+ u_int qui_num_init_reg_sym; /* num of reg/sym init values */
+ struct uof_init_reg_sym *qui_init_reg_sym;
+ /* array of reg/sym init values */
+
+ u_int qui_num_sbreak; /* num of sbreak values */
+ struct qui_sbreak *qui_sbreak; /* array of sbreak values */
+
+ u_int qui_num_uwords_used;
+ /* highest uword addressreferenced + 1 */
+};
+
+struct qat_aefw_uof {
+ size_t qafu_size; /* uof size */
+ struct uof_obj_hdr *qafu_obj_hdr; /* UOF_OBJS */
+
+ void *qafu_str_tab;
+ size_t qafu_str_tab_size;
+
+ u_int qafu_num_init_mem;
+ struct uof_init_mem *qafu_init_mem;
+ size_t qafu_init_mem_size;
+
+ struct uof_var_mem_seg *qafu_var_mem_seg;
+
+ struct qat_ae_batch_init_list qafu_lm_init[MAX_AE];
+ size_t qafu_num_lm_init[MAX_AE];
+ size_t qafu_num_lm_init_inst[MAX_AE];
+
+ u_int qafu_num_imgs; /* number of uof image */
+ struct qat_uof_image qafu_imgs[MAX_NUM_AE * MAX_AE_CTX];
+ /* uof images */
+};
+
+#define QAT_SERVICE_CRYPTO_A (1 << 0)
+#define QAT_SERVICE_CRYPTO_B (1 << 1)
+
+struct qat_admin_rings {
+ uint32_t qadr_active_aes_per_accel;
+ uint8_t qadr_srv_mask[MAX_AE_PER_ACCEL];
+
+ struct qat_dmamem qadr_dma;
+ struct fw_init_ring_table *qadr_master_ring_tbl;
+ struct fw_init_ring_table *qadr_cya_ring_tbl;
+ struct fw_init_ring_table *qadr_cyb_ring_tbl;
+
+ struct qat_ring *qadr_admin_tx;
+ struct qat_ring *qadr_admin_rx;
+};
+
+struct qat_accel_init_cb {
+ int qaic_status;
+};
+
+struct qat_admin_comms {
+ struct qat_dmamem qadc_dma;
+ struct qat_dmamem qadc_const_tbl_dma;
+ struct qat_dmamem qadc_hb_dma;
+};
+
+#define QAT_PID_MINOR_REV 0xf
+#define QAT_PID_MAJOR_REV (0xf << 4)
+
+struct qat_suof_image {
+ char *qsi_simg_buf;
+ u_long qsi_simg_len;
+ char *qsi_css_header;
+ char *qsi_css_key;
+ char *qsi_css_signature;
+ char *qsi_css_simg;
+ u_long qsi_simg_size;
+ u_int qsi_ae_num;
+ u_int qsi_ae_mask;
+ u_int qsi_fw_type;
+ u_long qsi_simg_name;
+ u_long qsi_appmeta_data;
+ struct qat_dmamem qsi_dma;
+};
+
+struct qat_aefw_suof {
+ u_int qafs_file_id;
+ u_int qafs_check_sum;
+ char qafs_min_ver;
+ char qafs_maj_ver;
+ char qafs_fw_type;
+ char *qafs_suof_buf;
+ u_int qafs_suof_size;
+ char *qafs_sym_str;
+ u_int qafs_sym_size;
+ u_int qafs_num_simgs;
+ struct qat_suof_image *qafs_simg;
+};
+
+enum qat_sym_hash_algorithm {
+ QAT_SYM_HASH_NONE = 0,
+ QAT_SYM_HASH_MD5 = 1,
+ QAT_SYM_HASH_SHA1 = 2,
+ QAT_SYM_HASH_SHA224 = 3,
+ QAT_SYM_HASH_SHA256 = 4,
+ QAT_SYM_HASH_SHA384 = 5,
+ QAT_SYM_HASH_SHA512 = 6,
+ QAT_SYM_HASH_AES_XCBC = 7,
+ QAT_SYM_HASH_AES_CCM = 8,
+ QAT_SYM_HASH_AES_GCM = 9,
+ QAT_SYM_HASH_KASUMI_F9 = 10,
+ QAT_SYM_HASH_SNOW3G_UIA2 = 11,
+ QAT_SYM_HASH_AES_CMAC = 12,
+ QAT_SYM_HASH_AES_GMAC = 13,
+ QAT_SYM_HASH_AES_CBC_MAC = 14,
+};
+
+#define QAT_HASH_MD5_BLOCK_SIZE 64
+#define QAT_HASH_MD5_DIGEST_SIZE 16
+#define QAT_HASH_MD5_STATE_SIZE 16
+#define QAT_HASH_SHA1_BLOCK_SIZE 64
+#define QAT_HASH_SHA1_DIGEST_SIZE 20
+#define QAT_HASH_SHA1_STATE_SIZE 20
+#define QAT_HASH_SHA224_BLOCK_SIZE 64
+#define QAT_HASH_SHA224_DIGEST_SIZE 28
+#define QAT_HASH_SHA224_STATE_SIZE 32
+#define QAT_HASH_SHA256_BLOCK_SIZE 64
+#define QAT_HASH_SHA256_DIGEST_SIZE 32
+#define QAT_HASH_SHA256_STATE_SIZE 32
+#define QAT_HASH_SHA384_BLOCK_SIZE 128
+#define QAT_HASH_SHA384_DIGEST_SIZE 48
+#define QAT_HASH_SHA384_STATE_SIZE 64
+#define QAT_HASH_SHA512_BLOCK_SIZE 128
+#define QAT_HASH_SHA512_DIGEST_SIZE 64
+#define QAT_HASH_SHA512_STATE_SIZE 64
+#define QAT_HASH_XCBC_PRECOMP_KEY_NUM 3
+#define QAT_HASH_XCBC_MAC_BLOCK_SIZE 16
+#define QAT_HASH_XCBC_MAC_128_DIGEST_SIZE 16
+#define QAT_HASH_CMAC_BLOCK_SIZE 16
+#define QAT_HASH_CMAC_128_DIGEST_SIZE 16
+#define QAT_HASH_AES_CCM_BLOCK_SIZE 16
+#define QAT_HASH_AES_CCM_DIGEST_SIZE 16
+#define QAT_HASH_AES_GCM_BLOCK_SIZE 16
+#define QAT_HASH_AES_GCM_DIGEST_SIZE 16
+#define QAT_HASH_AES_GCM_STATE_SIZE 16
+#define QAT_HASH_KASUMI_F9_BLOCK_SIZE 8
+#define QAT_HASH_KASUMI_F9_DIGEST_SIZE 4
+#define QAT_HASH_SNOW3G_UIA2_BLOCK_SIZE 8
+#define QAT_HASH_SNOW3G_UIA2_DIGEST_SIZE 4
+#define QAT_HASH_AES_CBC_MAC_BLOCK_SIZE 16
+#define QAT_HASH_AES_CBC_MAC_DIGEST_SIZE 16
+#define QAT_HASH_AES_GCM_ICV_SIZE_8 8
+#define QAT_HASH_AES_GCM_ICV_SIZE_12 12
+#define QAT_HASH_AES_GCM_ICV_SIZE_16 16
+#define QAT_HASH_AES_CCM_ICV_SIZE_MIN 4
+#define QAT_HASH_AES_CCM_ICV_SIZE_MAX 16
+#define QAT_HASH_IPAD_BYTE 0x36
+#define QAT_HASH_OPAD_BYTE 0x5c
+#define QAT_HASH_IPAD_4_BYTES 0x36363636
+#define QAT_HASH_OPAD_4_BYTES 0x5c5c5c5c
+#define QAT_HASH_KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define QAT_SYM_XCBC_STATE_SIZE ((QAT_HASH_XCBC_MAC_BLOCK_SIZE) * 3)
+#define QAT_SYM_CMAC_STATE_SIZE ((QAT_HASH_CMAC_BLOCK_SIZE) * 3)
+
+struct qat_sym_hash_alg_info {
+ uint32_t qshai_digest_len; /* Digest length in bytes */
+ uint32_t qshai_block_len; /* Block length in bytes */
+ uint32_t qshai_state_size; /* size of above state in bytes */
+ const uint8_t *qshai_init_state; /* Initial state */
+
+ const struct auth_hash *qshai_sah; /* software auth hash */
+ uint32_t qshai_state_offset; /* offset to state in *_CTX */
+ uint32_t qshai_state_word;
+};
+
+struct qat_sym_hash_qat_info {
+ uint32_t qshqi_algo_enc; /* QAT Algorithm encoding */
+ uint32_t qshqi_auth_counter; /* Counter value for Auth */
+ uint32_t qshqi_state1_len; /* QAT state1 length in bytes */
+ uint32_t qshqi_state2_len; /* QAT state2 length in bytes */
+};
+
+struct qat_sym_hash_def {
+ const struct qat_sym_hash_alg_info *qshd_alg;
+ const struct qat_sym_hash_qat_info *qshd_qat;
+};
+
+#define QAT_SYM_REQ_PARAMS_SIZE_MAX (24 + 32)
+/* Reserve enough space for cipher and authentication request params */
+/* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
+
+#define QAT_SYM_REQ_PARAMS_SIZE_PADDED \
+ roundup(QAT_SYM_REQ_PARAMS_SIZE_MAX, QAT_OPTIMAL_ALIGN)
+/* Pad out to 64-byte multiple to ensure optimal alignment of next field */
+
+#define QAT_SYM_KEY_TLS_PREFIX_SIZE (128)
+/* Hash Prefix size in bytes for TLS (128 = MAX = SHA2 (384, 512)*/
+
+#define QAT_SYM_KEY_MAX_HASH_STATE_BUFFER \
+ (QAT_SYM_KEY_TLS_PREFIX_SIZE * 2)
+/* hash state prefix buffer structure that holds the maximum sized secret */
+
+#define QAT_SYM_HASH_BUFFER_LEN QAT_HASH_SHA512_STATE_SIZE
+/* Buffer length to hold 16 byte MD5 key and 20 byte SHA1 key */
+
+#define QAT_GCM_AAD_SIZE_MAX 240
+/* Maximum AAD size */
+
+#define QAT_AES_GCM_AAD_ALIGN 16
+
+struct qat_sym_bulk_cookie {
+ uint8_t qsbc_req_params_buf[QAT_SYM_REQ_PARAMS_SIZE_PADDED];
+ /* memory block reserved for request params
+ * NOTE: Field must be correctly aligned in memory for access by QAT
+ * engine */
+ struct qat_crypto *qsbc_crypto;
+ struct qat_session *qsbc_session;
+ /* Session context */
+ void *qsbc_cb_tag;
+ /* correlator supplied by the client */
+ uint8_t qsbc_msg[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
+ /* QAT request message */
+} __aligned(QAT_OPTIMAL_ALIGN);
+
+/* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
+#define HASH_CONTENT_DESC_SIZE 176
+#define CIPHER_CONTENT_DESC_SIZE 64
+
+#define CONTENT_DESC_MAX_SIZE roundup( \
+ HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE, \
+ QAT_OPTIMAL_ALIGN)
+
+struct qat_sym_cookie {
+ union qat_sym_cookie_u {
+ /* should be 64byte aligned */
+ struct qat_sym_bulk_cookie qsc_bulk_cookie;
+ /* symmetric bulk cookie */
+#ifdef notyet
+ struct qat_sym_key_cookie qsc_key_cookie;
+ /* symmetric key cookie */
+ struct qat_sym_nrbg_cookie qsc_nrbg_cookie;
+ /* symmetric NRBG cookie */
+#endif
+ } u;
+
+ /* should be 64-byte aligned */
+ struct buffer_list_desc qsc_buf_list;
+ struct flat_buffer_desc qsc_flat_bufs[QAT_MAXSEG]; /* should be here */
+
+ bus_dmamap_t qsc_self_dmamap; /* self DMA mapping and
+ end of DMA region */
+ bus_dma_tag_t qsc_self_dma_tag;
+
+ uint8_t qsc_iv_buf[EALG_MAX_BLOCK_LEN];
+ uint8_t qsc_auth_res[QAT_SYM_HASH_BUFFER_LEN];
+ uint8_t qsc_gcm_aad[QAT_GCM_AAD_SIZE_MAX];
+ uint8_t qsc_content_desc[CONTENT_DESC_MAX_SIZE];
+
+ bus_dmamap_t qsc_buf_dmamap; /* qsc_flat_bufs DMA mapping */
+ bus_dma_tag_t qsc_buf_dma_tag;
+ void *qsc_buf;
+
+ bus_addr_t qsc_bulk_req_params_buf_paddr;
+ bus_addr_t qsc_buffer_list_desc_paddr;
+ bus_addr_t qsc_iv_buf_paddr;
+ bus_addr_t qsc_auth_res_paddr;
+ bus_addr_t qsc_gcm_aad_paddr;
+ bus_addr_t qsc_content_desc_paddr;
+};
+
+CTASSERT(offsetof(struct qat_sym_cookie,
+ u.qsc_bulk_cookie.qsbc_req_params_buf) % QAT_OPTIMAL_ALIGN == 0);
+CTASSERT(offsetof(struct qat_sym_cookie, qsc_buf_list) % QAT_OPTIMAL_ALIGN == 0);
+
+#define MAX_CIPHER_SETUP_BLK_SZ \
+ (sizeof(struct hw_cipher_config) + \
+ 2 * HW_KASUMI_KEY_SZ + 2 * HW_KASUMI_BLK_SZ)
+#define MAX_HASH_SETUP_BLK_SZ sizeof(union hw_auth_algo_blk)
+
+struct qat_crypto_desc {
+ uint8_t qcd_content_desc[CONTENT_DESC_MAX_SIZE]; /* must be first */
+ /* using only for qat 1.5 */
+ uint8_t qcd_hash_state_prefix_buf[QAT_GCM_AAD_SIZE_MAX];
+
+ bus_addr_t qcd_desc_paddr;
+ bus_addr_t qcd_hash_state_paddr;
+
+ enum fw_slice qcd_slices[MAX_FW_SLICE];
+ enum fw_la_cmd_id qcd_cmd_id;
+ enum hw_cipher_dir qcd_cipher_dir;
+
+ /* content desc info */
+ uint8_t qcd_hdr_sz; /* in quad words */
+ uint8_t qcd_hw_blk_sz; /* in quad words */
+ uint32_t qcd_cipher_offset;
+ uint32_t qcd_auth_offset;
+ /* hash info */
+ uint8_t qcd_state_storage_sz; /* in quad words */
+ uint32_t qcd_gcm_aad_sz_offset1;
+ uint32_t qcd_gcm_aad_sz_offset2;
+ /* cipher info */
+ uint16_t qcd_cipher_blk_sz; /* in bytes */
+ uint16_t qcd_auth_sz; /* in bytes */
+
+ uint8_t qcd_req_cache[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
+} __aligned(QAT_OPTIMAL_ALIGN);
+
+/* should be aligned to 64bytes */
+struct qat_session {
+ struct qat_crypto_desc *qs_dec_desc; /* should be at top of struct*/
+ /* decrypt or auth then decrypt or auth */
+
+ struct qat_crypto_desc *qs_enc_desc;
+ /* encrypt or encrypt then auth */
+
+ struct qat_dmamem qs_desc_mem;
+
+ enum hw_cipher_algo qs_cipher_algo;
+ enum hw_cipher_mode qs_cipher_mode;
+ enum hw_auth_algo qs_auth_algo;
+ enum hw_auth_mode qs_auth_mode;
+
+ const uint8_t *qs_cipher_key;
+ int qs_cipher_klen;
+ const uint8_t *qs_auth_key;
+ int qs_auth_klen;
+ int qs_auth_mlen;
+
+ uint32_t qs_status;
+#define QAT_SESSION_STATUS_ACTIVE (1 << 0)
+#define QAT_SESSION_STATUS_FREEING (1 << 1)
+ uint32_t qs_inflight;
+ int qs_aad_length;
+ bool qs_need_wakeup;
+
+ struct mtx qs_session_mtx;
+};
+
+struct qat_crypto_bank {
+ uint16_t qcb_bank;
+
+ struct qat_ring *qcb_sym_tx;
+ struct qat_ring *qcb_sym_rx;
+
+ struct qat_dmamem qcb_symck_dmamems[QAT_NSYMCOOKIE];
+ struct qat_sym_cookie *qcb_symck_free[QAT_NSYMCOOKIE];
+ uint32_t qcb_symck_free_count;
+
+ struct mtx qcb_bank_mtx;
+
+ char qcb_ring_names[2][QAT_RING_NAME_SIZE]; /* sym tx,rx */
+};
+
+struct qat_crypto {
+ struct qat_softc *qcy_sc;
+ uint32_t qcy_bank_mask;
+ uint16_t qcy_num_banks;
+
+ int32_t qcy_cid; /* OpenCrypto driver ID */
+
+ struct qat_crypto_bank *qcy_banks; /* array of qat_crypto_bank */
+
+ uint32_t qcy_session_free_count;
+
+ struct mtx qcy_crypto_mtx;
+};
+
+struct qat_hw {
+ int8_t qhw_sram_bar_id;
+ int8_t qhw_misc_bar_id;
+ int8_t qhw_etr_bar_id;
+
+ bus_size_t qhw_cap_global_offset;
+ bus_size_t qhw_ae_offset;
+ bus_size_t qhw_ae_local_offset;
+ bus_size_t qhw_etr_bundle_size;
+
+ /* crypto processing callbacks */
+ size_t qhw_crypto_opaque_offset;
+ void (*qhw_crypto_setup_req_params)(struct qat_crypto_bank *,
+ struct qat_session *, struct qat_crypto_desc const *,
+ struct qat_sym_cookie *, struct cryptop *);
+ void (*qhw_crypto_setup_desc)(struct qat_crypto *, struct qat_session *,
+ struct qat_crypto_desc *);
+
+ uint8_t qhw_num_banks; /* max number of banks */
+ uint8_t qhw_num_ap_banks; /* max number of AutoPush banks */
+ uint8_t qhw_num_rings_per_bank; /* rings per bank */
+ uint8_t qhw_num_accel; /* max number of accelerators */
+ uint8_t qhw_num_engines; /* max number of accelerator engines */
+ uint8_t qhw_tx_rx_gap;
+ uint32_t qhw_tx_rings_mask;
+ uint32_t qhw_clock_per_sec;
+ bool qhw_fw_auth;
+ uint32_t qhw_fw_req_size;
+ uint32_t qhw_fw_resp_size;
+
+ uint8_t qhw_ring_sym_tx;
+ uint8_t qhw_ring_sym_rx;
+ uint8_t qhw_ring_asym_tx;
+ uint8_t qhw_ring_asym_rx;
+
+ /* MSIx */
+ uint32_t qhw_msix_ae_vec_gap; /* gap to ae vec from bank */
+
+ const char *qhw_mof_fwname;
+ const char *qhw_mmp_fwname;
+
+ uint32_t qhw_prod_type; /* cpu type */
+
+ /* setup callbacks */
+ uint32_t (*qhw_get_accel_mask)(struct qat_softc *);
+ uint32_t (*qhw_get_ae_mask)(struct qat_softc *);
+ enum qat_sku (*qhw_get_sku)(struct qat_softc *);
+ uint32_t (*qhw_get_accel_cap)(struct qat_softc *);
+ const char *(*qhw_get_fw_uof_name)(struct qat_softc *);
+ void (*qhw_enable_intr)(struct qat_softc *);
+ void (*qhw_init_etr_intr)(struct qat_softc *, int);
+ int (*qhw_init_admin_comms)(struct qat_softc *);
+ int (*qhw_send_admin_init)(struct qat_softc *);
+ int (*qhw_init_arb)(struct qat_softc *);
+ void (*qhw_get_arb_mapping)(struct qat_softc *, const uint32_t **);
+ void (*qhw_enable_error_correction)(struct qat_softc *);
+ int (*qhw_check_uncorrectable_error)(struct qat_softc *);
+ void (*qhw_print_err_registers)(struct qat_softc *);
+ void (*qhw_disable_error_interrupts)(struct qat_softc *);
+ int (*qhw_check_slice_hang)(struct qat_softc *);
+ int (*qhw_set_ssm_wdtimer)(struct qat_softc *);
+};
+
+
+/* sc_flags */
+#define QAT_FLAG_ESRAM_ENABLE_AUTO_INIT (1 << 0)
+#define QAT_FLAG_SHRAM_WAIT_READY (1 << 1)
+
+/* sc_accel_cap */
+#define QAT_ACCEL_CAP_CRYPTO_SYMMETRIC (1 << 0)
+#define QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC (1 << 1)
+#define QAT_ACCEL_CAP_CIPHER (1 << 2)
+#define QAT_ACCEL_CAP_AUTHENTICATION (1 << 3)
+#define QAT_ACCEL_CAP_REGEX (1 << 4)
+#define QAT_ACCEL_CAP_COMPRESSION (1 << 5)
+#define QAT_ACCEL_CAP_LZS_COMPRESSION (1 << 6)
+#define QAT_ACCEL_CAP_RANDOM_NUMBER (1 << 7)
+#define QAT_ACCEL_CAP_ZUC (1 << 8)
+#define QAT_ACCEL_CAP_SHA3 (1 << 9)
+#define QAT_ACCEL_CAP_KPT (1 << 10)
+
+#define QAT_ACCEL_CAP_BITS \
+ "\177\020" \
+ "b\x0a" "KPT\0" \
+ "b\x09" "SHA3\0" \
+ "b\x08" "ZUC\0" \
+ "b\x07" "RANDOM_NUMBER\0" \
+ "b\x06" "LZS_COMPRESSION\0" \
+ "b\x05" "COMPRESSION\0" \
+ "b\x04" "REGEX\0" \
+ "b\x03" "AUTHENTICATION\0" \
+ "b\x02" "CIPHER\0" \
+ "b\x01" "CRYPTO_ASYMMETRIC\0" \
+ "b\x00" "CRYPTO_SYMMETRIC\0"
+
+#define QAT_HI_PRIO_RING_WEIGHT 0xfc
+#define QAT_LO_PRIO_RING_WEIGHT 0xfe
+#define QAT_DEFAULT_RING_WEIGHT 0xff
+#define QAT_DEFAULT_PVL 0
+
+struct firmware;
+struct resource;
+
+struct qat_softc {
+ device_t sc_dev;
+
+ struct resource *sc_res[MAX_BARS];
+ int sc_rid[MAX_BARS];
+ bus_space_tag_t sc_csrt[MAX_BARS];
+ bus_space_handle_t sc_csrh[MAX_BARS];
+
+ uint32_t sc_ae_num;
+ uint32_t sc_ae_mask;
+
+ struct qat_crypto sc_crypto; /* crypto services */
+
+ struct qat_hw sc_hw;
+
+ uint8_t sc_rev;
+ enum qat_sku sc_sku;
+ uint32_t sc_flags;
+
+ uint32_t sc_accel_num;
+ uint32_t sc_accel_mask;
+ uint32_t sc_accel_cap;
+
+ struct qat_admin_rings sc_admin_rings; /* use only for qat 1.5 */
+ struct qat_admin_comms sc_admin_comms; /* use only for qat 1.7 */
+
+ /* ETR */
+ struct qat_bank *sc_etr_banks; /* array of etr banks */
+ struct qat_ap_bank *sc_etr_ap_banks; /* array of etr auto push banks */
+
+ /* AE */
+ struct qat_ae sc_ae[MAX_NUM_AE];
+
+ /* Interrupt */
+ struct resource *sc_ih; /* ae cluster ih */
+ void *sc_ih_cookie; /* ae cluster ih cookie */
+
+ /* Counters */
+ counter_u64_t sc_gcm_aad_restarts;
+ counter_u64_t sc_gcm_aad_updates;
+ counter_u64_t sc_ring_full_restarts;
+
+ /* Firmware */
+ void *sc_fw_mof; /* mof data */
+ size_t sc_fw_mof_size; /* mof size */
+ struct qat_mof sc_mof; /* mof sections */
+
+ const char *sc_fw_uof_name; /* uof/suof name in mof */
+
+ void *sc_fw_uof; /* uof head */
+ size_t sc_fw_uof_size; /* uof size */
+ struct qat_aefw_uof sc_aefw_uof; /* UOF_OBJS in uof */
+
+ void *sc_fw_suof; /* suof head */
+ size_t sc_fw_suof_size; /* suof size */
+ struct qat_aefw_suof sc_aefw_suof; /* suof context */
+
+ void *sc_fw_mmp; /* mmp data */
+ size_t sc_fw_mmp_size; /* mmp size */
+};
+
+static inline void
+qat_bar_write_4(struct qat_softc *sc, int baroff, bus_size_t offset,
+ uint32_t value)
+{
+
+ MPASS(baroff >= 0 && baroff < MAX_BARS);
+
+ bus_space_write_4(sc->sc_csrt[baroff],
+ sc->sc_csrh[baroff], offset, value);
+}
+
+static inline uint32_t
+qat_bar_read_4(struct qat_softc *sc, int baroff, bus_size_t offset)
+{
+
+ MPASS(baroff >= 0 && baroff < MAX_BARS);
+
+ return bus_space_read_4(sc->sc_csrt[baroff],
+ sc->sc_csrh[baroff], offset);
+}
+
+static inline void
+qat_misc_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
+{
+
+ qat_bar_write_4(sc, sc->sc_hw.qhw_misc_bar_id, offset, value);
+}
+
+static inline uint32_t
+qat_misc_read_4(struct qat_softc *sc, bus_size_t offset)
+{
+
+ return qat_bar_read_4(sc, sc->sc_hw.qhw_misc_bar_id, offset);
+}
+
+static inline void
+qat_misc_read_write_or_4(struct qat_softc *sc, bus_size_t offset,
+ uint32_t value)
+{
+ uint32_t reg;
+
+ reg = qat_misc_read_4(sc, offset);
+ reg |= value;
+ qat_misc_write_4(sc, offset, reg);
+}
+
+static inline void
+qat_misc_read_write_and_4(struct qat_softc *sc, bus_size_t offset,
+ uint32_t mask)
+{
+ uint32_t reg;
+
+ reg = qat_misc_read_4(sc, offset);
+ reg &= mask;
+ qat_misc_write_4(sc, offset, reg);
+}
+
+static inline void
+qat_etr_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
+{
+
+ qat_bar_write_4(sc, sc->sc_hw.qhw_etr_bar_id, offset, value);
+}
+
+static inline uint32_t
+qat_etr_read_4(struct qat_softc *sc, bus_size_t offset)
+{
+
+ return qat_bar_read_4(sc, sc->sc_hw.qhw_etr_bar_id, offset);
+}
+
+static inline void
+qat_ae_local_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
+ uint32_t value)
+{
+
+ offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
+ (offset & AE_LOCAL_CSR_MASK);
+
+ qat_misc_write_4(sc, sc->sc_hw.qhw_ae_local_offset + offset,
+ value);
+}
+
+static inline uint32_t
+qat_ae_local_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset)
+{
+
+ offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
+ (offset & AE_LOCAL_CSR_MASK);
+
+ return qat_misc_read_4(sc, sc->sc_hw.qhw_ae_local_offset + offset);
+}
+
+static inline void
+qat_ae_xfer_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
+ uint32_t value)
+{
+ offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_XFER_AE_MASK) |
+ __SHIFTIN(offset, AE_XFER_CSR_MASK);
+
+ qat_misc_write_4(sc, sc->sc_hw.qhw_ae_offset + offset, value);
+}
+
+static inline void
+qat_cap_global_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
+{
+
+ qat_misc_write_4(sc, sc->sc_hw.qhw_cap_global_offset + offset, value);
+}
+
+static inline uint32_t
+qat_cap_global_read_4(struct qat_softc *sc, bus_size_t offset)
+{
+
+ return qat_misc_read_4(sc, sc->sc_hw.qhw_cap_global_offset + offset);
+}
+
+
+static inline void
+qat_etr_bank_write_4(struct qat_softc *sc, int bank,
+ bus_size_t offset, uint32_t value)
+{
+
+ qat_etr_write_4(sc, sc->sc_hw.qhw_etr_bundle_size * bank + offset,
+ value);
+}
+
+static inline uint32_t
+qat_etr_bank_read_4(struct qat_softc *sc, int bank,
+ bus_size_t offset)
+{
+
+ return qat_etr_read_4(sc,
+ sc->sc_hw.qhw_etr_bundle_size * bank + offset);
+}
+
+static inline void
+qat_etr_ap_bank_write_4(struct qat_softc *sc, int ap_bank,
+ bus_size_t offset, uint32_t value)
+{
+
+ qat_etr_write_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset, value);
+}
+
+static inline uint32_t
+qat_etr_ap_bank_read_4(struct qat_softc *sc, int ap_bank,
+ bus_size_t offset)
+{
+
+ return qat_etr_read_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset);
+}
+
+
+static inline void
+qat_etr_bank_ring_write_4(struct qat_softc *sc, int bank, int ring,
+ bus_size_t offset, uint32_t value)
+{
+
+ qat_etr_bank_write_4(sc, bank, (ring << 2) + offset, value);
+}
+
+static inline uint32_t
+qat_etr_bank_ring_read_4(struct qat_softc *sc, int bank, int ring,
+ bus_size_t offset)
+{
+
+ return qat_etr_bank_read_4(sc, bank, (ring << 2) * offset);
+}
+
+static inline void
+qat_etr_bank_ring_base_write_8(struct qat_softc *sc, int bank, int ring,
+ uint64_t value)
+{
+ uint32_t lo, hi;
+
+ lo = (uint32_t)(value & 0xffffffff);
+ hi = (uint32_t)((value & 0xffffffff00000000ULL) >> 32);
+ qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_LBASE, lo);
+ qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_UBASE, hi);
+}
+
+static inline void
+qat_arb_ringsrvarben_write_4(struct qat_softc *sc, int index, uint32_t value)
+{
+
+ qat_etr_write_4(sc, ARB_RINGSRVARBEN_OFFSET +
+ (ARB_REG_SLOT * index), value);
+}
+
+static inline void
+qat_arb_sarconfig_write_4(struct qat_softc *sc, int index, uint32_t value)
+{
+
+ qat_etr_write_4(sc, ARB_OFFSET +
+ (ARB_REG_SIZE * index), value);
+}
+
+static inline void
+qat_arb_wrk_2_ser_map_write_4(struct qat_softc *sc, int index, uint32_t value)
+{
+
+ qat_etr_write_4(sc, ARB_OFFSET + ARB_WRK_2_SER_MAP_OFFSET +
+ (ARB_REG_SIZE * index), value);
+}
+
+void * qat_alloc_mem(size_t);
+void qat_free_mem(void *);
+void qat_free_dmamem(struct qat_softc *, struct qat_dmamem *);
+int qat_alloc_dmamem(struct qat_softc *, struct qat_dmamem *, int,
+ bus_size_t, bus_size_t);
+
+int qat_etr_setup_ring(struct qat_softc *, int, uint32_t, uint32_t,
+ uint32_t, qat_cb_t, void *, const char *,
+ struct qat_ring **);
+int qat_etr_put_msg(struct qat_softc *, struct qat_ring *,
+ uint32_t *);
+
+void qat_memcpy_htobe64(void *, const void *, size_t);
+void qat_memcpy_htobe32(void *, const void *, size_t);
+void qat_memcpy_htobe(void *, const void *, size_t, uint32_t);
+void qat_crypto_gmac_precompute(const struct qat_crypto_desc *,
+ const uint8_t *key, int klen,
+ const struct qat_sym_hash_def *, uint8_t *);
+void qat_crypto_hmac_precompute(const struct qat_crypto_desc *,
+ const uint8_t *, int, const struct qat_sym_hash_def *,
+ uint8_t *, uint8_t *);
+uint16_t qat_crypto_load_cipher_session(const struct qat_crypto_desc *,
+ const struct qat_session *);
+uint16_t qat_crypto_load_auth_session(const struct qat_crypto_desc *,
+ const struct qat_session *,
+ struct qat_sym_hash_def const **);
+
+#endif
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index b0feddbad301..58cdf5071436 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -300,6 +300,7 @@ SUBDIR= \
pty \
puc \
pwm \
+ ${_qat} \
${_qlxge} \
${_qlxgb} \
${_qlxgbe} \
@@ -630,6 +631,7 @@ _ntb= ntb
_ocs_fc= ocs_fc
_ossl= ossl
_pccard= pccard
+_qat= qat
.if ${MK_OFED} != "no" || defined(ALL_MODULES)
_rdma= rdma
.endif
diff --git a/sys/modules/qat/Makefile b/sys/modules/qat/Makefile
new file mode 100644
index 000000000000..57301b5b78e3
--- /dev/null
+++ b/sys/modules/qat/Makefile
@@ -0,0 +1,19 @@
+# $FreeBSD$
+
+.PATH: ${SRCTOP}/sys/dev/qat
+
+KMOD= qat
+
+SRCS= qat.c \
+ qat_ae.c \
+ qat_c2xxx.c \
+ qat_c3xxx.c \
+ qat_c62x.c \
+ qat_d15xx.c \
+ qat_dh895xcc.c \
+ qat_hw15.c \
+ qat_hw17.c
+
+SRCS+= bus_if.h cryptodev_if.h device_if.h pci_if.h
+
+.include <bsd.kmod.mk>