aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDoug Ambrisko <ambrisko@FreeBSD.org>2023-01-27 23:46:17 +0000
committerDoug Ambrisko <ambrisko@FreeBSD.org>2023-02-06 16:46:02 +0000
commit9c067b844f85a224f0416e6eb46ba3ef82aec5c4 (patch)
tree9d7f2b4b676b739a155ed7083247e32f40050deb
parent94e21add45344f0669f910ea77db499e8c892c90 (diff)
downloadsrc-9c067b844f85a224f0416e6eb46ba3ef82aec5c4.tar.gz
src-9c067b844f85a224f0416e6eb46ba3ef82aec5c4.zip
enic: Cisco VIC driver
This driver is based of the enic (Cisco VIC) DPDK driver. It provides basic ethernet functionality. Has been run with various VIC cards to do UEFI PXE boot with NFS root.
-rw-r--r--share/man/man4/Makefile1
-rw-r--r--share/man/man4/enic.490
-rw-r--r--sys/conf/files.amd648
-rw-r--r--sys/dev/enic/cq_desc.h97
-rw-r--r--sys/dev/enic/cq_enet_desc.h244
-rw-r--r--sys/dev/enic/enic.h406
-rw-r--r--sys/dev/enic/enic_compat.h65
-rw-r--r--sys/dev/enic/enic_res.c212
-rw-r--r--sys/dev/enic/enic_res.h73
-rw-r--r--sys/dev/enic/enic_txrx.c485
-rw-r--r--sys/dev/enic/if_enic.c1583
-rw-r--r--sys/dev/enic/rq_enet_desc.h46
-rw-r--r--sys/dev/enic/vnic_cq.c45
-rw-r--r--sys/dev/enic/vnic_cq.h164
-rw-r--r--sys/dev/enic/vnic_dev.c1039
-rw-r--r--sys/dev/enic/vnic_dev.h170
-rw-r--r--sys/dev/enic/vnic_devcmd.h1182
-rw-r--r--sys/dev/enic/vnic_enet.h66
-rw-r--r--sys/dev/enic/vnic_intr.c49
-rw-r--r--sys/dev/enic/vnic_intr.h100
-rw-r--r--sys/dev/enic/vnic_nic.h60
-rw-r--r--sys/dev/enic/vnic_resource.h67
-rw-r--r--sys/dev/enic/vnic_rq.c97
-rw-r--r--sys/dev/enic/vnic_rq.h142
-rw-r--r--sys/dev/enic/vnic_rss.h32
-rw-r--r--sys/dev/enic/vnic_stats.h56
-rw-r--r--sys/dev/enic/vnic_wq.c89
-rw-r--r--sys/dev/enic/vnic_wq.h108
-rw-r--r--sys/dev/enic/wq_enet_desc.h84
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/enic/Makefile19
31 files changed, 6881 insertions, 0 deletions
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index 1c7703bdaeb2..897c72d43baf 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -143,6 +143,7 @@ MAN= aac.4 \
em.4 \
ena.4 \
enc.4 \
+ enic.4 \
epair.4 \
est.4 \
et.4 \
diff --git a/share/man/man4/enic.4 b/share/man/man4/enic.4
new file mode 100644
index 000000000000..d33df8515f3e
--- /dev/null
+++ b/share/man/man4/enic.4
@@ -0,0 +1,90 @@
+.\" Copyright 2008-2017 Cisco Systems, Inc.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd Sept 7, 2022
+.Dt ENIC 4
+.Os
+.Sh NAME
+.Nm enic
+.Nd "VIC Ethernet NIC driver"
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following lines in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device iflib"
+.Cd "device enic"
+.Ed
+.Pp
+To load the driver as a module at run-time,
+run the following command as root:
+.Bd -literal -offset indent
+kldload if_enic
+.Ed
+.Pp
+To load the driver as a
+module at boot time, place the following lines in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+if_enic_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver provides support for Cisco Virtual Interface Card. Support
+is limited to basic network connectivity. Media is controlled by the
+NIC itself since there can be multiple virtual PCI NIC devices exposed
+to the PCI bus.
+.Sh HARDWARE
+The
+.Nm
+driver should supports all known Cisco VIC cards.
+.Sh CONFIGURATION
+The
+.Nm
+network interface is configured using
+.Xr ifconfig 8
+and the
+.Xr sysctl 8
+tree at
+.Dv dev.enic.<N> .
+All configurable entries are also tunables, and can be put directly into the
+.Xr loader.conf 5
+for persistent configuration.
+.Sh SEE ALSO
+.Xr ifconfig 8
+.Sh HISTORY
+The
+.Nm
+device driver first appeared in
+.Fx 14.0 .
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+driver was written by
+.An Cisco UCS team
+based of the DPDK driver.
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index f0fba5a88869..75be68543394 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -119,6 +119,14 @@ dev/axgbe/xgbe-txrx.c optional axp
dev/axgbe/xgbe_osdep.c optional axp
dev/axgbe/xgbe-i2c.c optional axp
dev/axgbe/xgbe-phy-v2.c optional axp
+dev/enic/enic_res.c optional enic
+dev/enic/enic_txrx.c optional enic
+dev/enic/if_enic.c optional enic
+dev/enic/vnic_cq.c optional enic
+dev/enic/vnic_dev.c optional enic
+dev/enic/vnic_intr.c optional enic
+dev/enic/vnic_rq.c optional enic
+dev/enic/vnic_wq.c optional enic
dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv
dev/iavf/if_iavf_iflib.c optional iavf pci \
diff --git a/sys/dev/enic/cq_desc.h b/sys/dev/enic/cq_desc.h
new file mode 100644
index 000000000000..ae8847c6d9a1
--- /dev/null
+++ b/sys/dev/enic/cq_desc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+ CQ_DESC_TYPE_WQ_ENET = 0,
+ CQ_DESC_TYPE_DESC_COPY = 1,
+ CQ_DESC_TYPE_WQ_EXCH = 2,
+ CQ_DESC_TYPE_RQ_ENET = 3,
+ CQ_DESC_TYPE_RQ_FCP = 4,
+ CQ_DESC_TYPE_IOMMU_MISS = 5,
+ CQ_DESC_TYPE_SGL = 6,
+ CQ_DESC_TYPE_CLASSIFIER = 7,
+ CQ_DESC_TYPE_TEST = 127,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout. The
+ * type_specfic area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 type_specfic[11];
+ u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS 4
+#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK 1
+#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80
+#define CQ_DESC_Q_NUM_BITS 10
+#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS 12
+#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_color_enc(struct cq_desc *desc, const u8 color)
+{
+ if (color)
+ desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT);
+ else
+ desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT);
+}
+
+static inline void cq_desc_enc(struct cq_desc *desc,
+ const u8 type, const u8 color, const u16 q_number,
+ const u16 completed_index)
+{
+ desc->type_color = (type & CQ_DESC_TYPE_MASK) |
+ ((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT);
+ desc->q_number = cpu_to_le16(q_number & CQ_DESC_Q_NUM_MASK);
+ desc->completed_index = cpu_to_le16(completed_index &
+ CQ_DESC_COMP_NDX_MASK);
+}
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+static inline void cq_color_dec(const struct cq_desc *desc_arg, u8 *color)
+{
+ volatile const struct cq_desc *desc = desc_arg;
+
+ *color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/sys/dev/enic/cq_enet_desc.h b/sys/dev/enic/cq_enet_desc.h
new file mode 100644
index 000000000000..5ced63cb1613
--- /dev/null
+++ b/sys/dev/enic/cq_enet_desc.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 reserved[11];
+ u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_enc(struct cq_enet_wq_desc *desc,
+ u8 type, u8 color, u16 q_number, u16 completed_index)
+{
+ cq_desc_enc((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_clsf_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le16 filter_id;
+ __le16 lif;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+ ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
+
+static inline void cq_enet_rq_desc_enc(struct cq_enet_rq_desc *desc,
+ u8 type, u8 color, u16 q_number, u16 completed_index,
+ u8 ingress_port, u8 fcoe, u8 eop, u8 sop, u8 rss_type, u8 csum_not_calc,
+ u32 rss_hash, u16 bytes_written, u8 packet_error, u8 vlan_stripped,
+ u16 vlan, u16 checksum, u8 fcoe_sof, u8 fcoe_fc_crc_ok,
+ u8 fcoe_enc_error, u8 fcoe_eof, u8 tcp_udp_csum_ok, u8 udp, u8 tcp,
+ u8 ipv4_csum_ok, u8 ipv6, u8 ipv4, u8 ipv4_fragment, u8 fcs_ok)
+{
+ cq_desc_enc((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+
+ desc->completed_index_flags |= cpu_to_le16(
+ (ingress_port ? CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT : 0) |
+ (fcoe ? CQ_ENET_RQ_DESC_FLAGS_FCOE : 0) |
+ (eop ? CQ_ENET_RQ_DESC_FLAGS_EOP : 0) |
+ (sop ? CQ_ENET_RQ_DESC_FLAGS_SOP : 0));
+
+ desc->q_number_rss_type_flags |= cpu_to_le16(
+ ((rss_type & CQ_ENET_RQ_DESC_RSS_TYPE_MASK) <<
+ CQ_DESC_Q_NUM_BITS) |
+ (csum_not_calc ? CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC : 0));
+
+ desc->rss_hash = cpu_to_le32(rss_hash);
+
+ desc->bytes_written_flags = cpu_to_le16(
+ (bytes_written & CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK) |
+ (packet_error ? CQ_ENET_RQ_DESC_FLAGS_TRUNCATED : 0) |
+ (vlan_stripped ? CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED : 0));
+
+ desc->vlan = cpu_to_le16(vlan);
+
+ if (fcoe) {
+ desc->checksum_fcoe = cpu_to_le16(
+ (fcoe_sof & CQ_ENET_RQ_DESC_FCOE_SOF_MASK) |
+ ((fcoe_eof & CQ_ENET_RQ_DESC_FCOE_EOF_MASK) <<
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT));
+ } else {
+ desc->checksum_fcoe = cpu_to_le16(checksum);
+ }
+
+ desc->flags =
+ (tcp_udp_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK : 0) |
+ (udp ? CQ_ENET_RQ_DESC_FLAGS_UDP : 0) |
+ (tcp ? CQ_ENET_RQ_DESC_FLAGS_TCP : 0) |
+ (ipv4_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK : 0) |
+ (ipv6 ? CQ_ENET_RQ_DESC_FLAGS_IPV6 : 0) |
+ (ipv4 ? CQ_ENET_RQ_DESC_FLAGS_IPV4 : 0) |
+ (ipv4_fragment ? CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT : 0) |
+ (fcs_ok ? CQ_ENET_RQ_DESC_FLAGS_FCS_OK : 0) |
+ (fcoe_fc_crc_ok ? CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK : 0) |
+ (fcoe_enc_error ? CQ_ENET_RQ_DESC_FCOE_ENC_ERROR : 0);
+}
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
+ u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
+ u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
+ u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/sys/dev/enic/enic.h b/sys/dev/enic/enic.h
new file mode 100644
index 000000000000..9d84ca6ecaf6
--- /dev/null
+++ b/sys/dev/enic/enic.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: $");
+
+#ifndef _ENIC_H
+#define _ENIC_H
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/iflib.h>
+
+#define u8 uint8_t
+#define u16 uint16_t
+#define u32 uint32_t
+#define u64 uint64_t
+
+struct enic_bar_info {
+ struct resource *res;
+ bus_space_tag_t tag;
+ bus_space_handle_t handle;
+ bus_size_t size;
+ int rid;
+ int offset;
+};
+
+#define ENIC_BUS_WRITE_8(res, index, value) \
+ bus_space_write_8(res->bar.tag, res->bar.handle, \
+ res->bar.offset + (index), value)
+#define ENIC_BUS_WRITE_4(res, index, value) \
+ bus_space_write_4(res->bar.tag, res->bar.handle, \
+ res->bar.offset + (index), value)
+#define ENIC_BUS_WRITE_REGION_4(res, index, values, count) \
+ bus_space_write_region_4(res->bar.tag, res->bar.handle, \
+ res->bar.offset + (index), values, count);
+
+#define ENIC_BUS_READ_8(res, index) \
+ bus_space_read_8(res->bar.tag, res->bar.handle, \
+ res->bar.offset + (index))
+#define ENIC_BUS_READ_4(res, index) \
+ bus_space_read_4(res->bar.tag, res->bar.handle, \
+ res->bar.offset + (index))
+#define ENIC_BUS_READ_REGION_4(res, type, index, values, count) \
+ bus_space_read_region_4(res->type.tag, res->type.handle, \
+ res->type.offset + (index), values, count);
+
+struct vnic_res {
+ unsigned int count;
+ struct enic_bar_info bar;
+};
+
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+#include "enic_res.h"
+#include "cq_enet_desc.h"
+
+#define ENIC_LOCK(_softc) mtx_lock(&(_softc)->enic_lock)
+#define ENIC_UNLOCK(_softc) mtx_unlock(&(_softc)->enic_lock)
+
+#define DRV_NAME "enic"
+#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC"
+#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
+
+#define ENIC_MAX_MAC_ADDR 64
+
+#define VLAN_ETH_HLEN 18
+
+#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
+
+#define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */
+#define ENIC_CALC_IP_CKSUM 1
+#define ENIC_CALC_TCP_UDP_CKSUM 2
+#define ENIC_MAX_MTU 9000
+#define ENIC_PAGE_SIZE 4096
+#define PAGE_ROUND_UP(x) \
+ ((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
+
+/* must be >= VNIC_COUNTER_DMA_MIN_PERIOD */
+#define VNIC_FLOW_COUNTER_UPDATE_MSECS 500
+
+/* PCI IDs */
+#define CISCO_VENDOR_ID 0x1137
+
+#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
+
+/* Special Filter id for non-specific packet flagging. Don't change value */
+#define ENIC_MAGIC_FILTER_ID 0xffff
+
+#define ENICPMD_FDIR_MAX 64
+
+/* HW default VXLAN port */
+#define ENIC_DEFAULT_VXLAN_PORT 4789
+
+/*
+ * Interrupt 0: LSC and errors
+ * Interrupt 1: rx queue 0
+ * Interrupt 2: rx queue 1
+ * ...
+ */
+#define ENICPMD_LSC_INTR_OFFSET 0
+#define ENICPMD_RXQ_INTR_OFFSET 1
+
+#include "vnic_devcmd.h"
+
+enum vnic_proxy_type {
+ PROXY_NONE,
+ PROXY_BY_BDF,
+ PROXY_BY_INDEX,
+};
+
+struct vnic_intr_coal_timer_info {
+ u32 mul;
+ u32 div;
+ u32 max_usec;
+};
+
+struct enic_softc;
+struct vnic_dev {
+ void *priv;
+ struct rte_pci_device *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_res __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ bus_addr_t notify_pa;
+ struct iflib_dma_info notify_res;
+ u32 notify_sz;
+ struct iflib_dma_info linkstatus_res;
+ struct vnic_stats *stats;
+ struct iflib_dma_info stats_res;
+ struct vnic_devcmd_fw_info *fw_info;
+ struct iflib_dma_info fw_info_res;
+ enum vnic_proxy_type proxy;
+ u32 proxy_index;
+ u64 args[VNIC_DEVCMD_NARGS];
+ int in_reset;
+ struct vnic_intr_coal_timer_info intr_coal_timer_info;
+ void *(*alloc_consistent)(void *priv, size_t size,
+ bus_addr_t *dma_handle, struct iflib_dma_info *res, u8 *name);
+ void (*free_consistent)(void *priv, size_t size, void *vaddr,
+ bus_addr_t dma_handle, struct iflib_dma_info *res);
+ struct vnic_counter_counts *flow_counters;
+ struct iflib_dma_info flow_counters_res;
+ u8 flow_counters_dma_active;
+ struct enic_softc *softc;
+};
+
+struct enic_soft_stats {
+ uint64_t rx_nombuf;
+ uint64_t rx_packet_errors;
+ uint64_t tx_oversized;
+};
+
+struct intr_queue {
+ struct if_irq intr_irq;
+ struct resource *res;
+ int rid;
+ struct enic_softc *softc;
+};
+
+struct enic {
+ struct enic *next;
+ struct rte_pci_device *pdev;
+ struct vnic_enet_config config;
+ struct vnic_dev_bar bar0;
+ struct vnic_dev *vdev;
+
+ /*
+ * mbuf_initializer contains 64 bits of mbuf rearm_data, used by
+ * the avx2 handler at this time.
+ */
+ uint64_t mbuf_initializer;
+ unsigned int port_id;
+ bool overlay_offload;
+ char bdf_name[ENICPMD_BDF_LENGTH];
+ int dev_fd;
+ int iommu_group_fd;
+ int iommu_groupid;
+ int eventfd;
+ uint8_t mac_addr[ETH_ALEN];
+ pthread_t err_intr_thread;
+ u8 ig_vlan_strip_en;
+ int link_status;
+ u8 hw_ip_checksum;
+ u16 max_mtu;
+ u8 adv_filters;
+ u32 flow_filter_mode;
+ u8 filter_actions; /* HW supported actions */
+ bool vxlan;
+ bool disable_overlay; /* devargs disable_overlay=1 */
+ uint8_t enable_avx2_rx; /* devargs enable-avx2-rx=1 */
+ bool nic_cfg_chk; /* NIC_CFG_CHK available */
+ bool udp_rss_weak; /* Bodega style UDP RSS */
+ uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
+ uint16_t vxlan_port; /* current vxlan port pushed to NIC */
+
+ unsigned int flags;
+ unsigned int priv_flags;
+
+ /* work queue (len = conf_wq_count) */
+ struct vnic_wq *wq;
+ unsigned int wq_count; /* equals eth_dev nb_tx_queues */
+
+ /* receive queue (len = conf_rq_count) */
+ struct vnic_rq *rq;
+ unsigned int rq_count; /* equals eth_dev nb_rx_queues */
+
+ /* completion queue (len = conf_cq_count) */
+ struct vnic_cq *cq;
+ unsigned int cq_count; /* equals rq_count + wq_count */
+
+ /* interrupt vectors (len = conf_intr_count) */
+ struct vnic_intr *intr;
+ struct intr_queue *intr_queues;;
+ unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
+
+
+ /* software counters */
+ struct enic_soft_stats soft_stats;
+
+ /* configured resources on vic */
+ unsigned int conf_rq_count;
+ unsigned int conf_wq_count;
+ unsigned int conf_cq_count;
+ unsigned int conf_intr_count;
+
+ /* linked list storing memory allocations */
+ LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
+
+ LIST_HEAD(enic_flows, rte_flow) flows;
+ int max_flow_counter;
+
+ /* RSS */
+ uint16_t reta_size;
+ uint8_t hash_key_size;
+ uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
+ /*
+ * Keep a copy of current RSS config for queries, as we cannot retrieve
+ * it from the NIC.
+ */
+ uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
+ uint8_t rss_enable;
+ uint64_t rss_hf; /* ETH_RSS flags */
+ union vnic_rss_key rss_key;
+ union vnic_rss_cpu rss_cpu;
+
+ uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
+ uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_offload_mask; /* PKT_TX flags accepted */
+ struct enic_softc *softc;
+ int port_mtu;
+};
+
+struct enic_softc {
+ device_t dev;
+ if_ctx_t ctx;
+ if_softc_ctx_t scctx;
+ if_shared_ctx_t sctx;
+ struct ifmedia *media;
+ struct ifnet *ifp;
+
+ struct mtx enic_lock;
+
+ struct enic_bar_info mem;
+ struct enic_bar_info io;
+
+ struct vnic_dev vdev;
+ struct enic enic;
+
+ int ntxqsets;
+ int nrxqsets;
+
+ struct if_irq enic_event_intr_irq;
+ struct if_irq enic_err_intr_irq;
+ uint8_t lladdr[ETHER_ADDR_LEN];
+ int link_active;
+ int stopped;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+
+ int directed;
+ int multicast;
+ int broadcast;
+ int promisc;
+ int allmulti;
+
+ u_int mc_count;
+ uint8_t *mta;
+};
+
+/* Per-instance private data structure */
+
+static inline unsigned int enic_vnic_rq_count(struct enic *enic)
+{
+ return enic->rq_count;
+}
+
+static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
+{
+ return rq;
+}
+
+static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
+{
+ return enic->rq_count + wq;
+}
+
+static inline uint32_t
+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
+{
+ uint32_t d = i0 + i1;
+ d -= (d >= n_descriptors) ? n_descriptors : 0;
+ return d;
+}
+
+static inline uint32_t
+enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
+{
+ int32_t d = i1 - i0;
+ return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
+}
+
+static inline uint32_t
+enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
+{
+ idx++;
+ if (unlikely(idx == n_descriptors))
+ idx = 0;
+ return idx;
+}
+
+void enic_free_wq(void *txq);
+int enic_alloc_intr_resources(struct enic *enic);
+int enic_setup_finish(struct enic *enic);
+int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, uint16_t nb_desc);
+void enic_start_wq(struct enic *enic, uint16_t queue_idx);
+int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
+void enic_start_rq(struct enic *enic, uint16_t queue_idx);
+void enic_free_rq(void *rxq);
+int enic_set_vnic_res(struct enic *enic);
+int enic_init_rss_nic_cfg(struct enic *enic);
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
+int enic_set_vlan_strip(struct enic *enic);
+int enic_enable(struct enic *enic);
+int enic_disable(struct enic *enic);
+void enic_remove(struct enic *enic);
+int enic_get_link_status(struct enic *enic);
+void enic_dev_stats_clear(struct enic *enic);
+void enic_add_packet_filter(struct enic *enic);
+int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
+int enic_del_mac_address(struct enic *enic, int mac_index);
+unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
+
+void enic_post_wq_index(struct vnic_wq *wq);
+int enic_probe(struct enic *enic);
+int enic_clsf_init(struct enic *enic);
+void enic_clsf_destroy(struct enic *enic);
+int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
+int enic_link_update(struct enic *enic);
+bool enic_use_vector_rx_handler(struct enic *enic);
+void enic_fdir_info(struct enic *enic);
+void enic_prep_wq_for_simple_tx(struct enic *, uint16_t);
+
+struct enic_ring {
+ uint64_t paddr;
+ caddr_t vaddr;
+ struct enic_softc *softc;
+ uint32_t ring_size; /* Must be a power of two */
+ uint16_t id; /* Logical ID */
+ uint16_t phys_id;
+};
+
+struct enic_cp_ring {
+ struct enic_ring ring;
+ struct if_irq irq;
+ uint32_t cons;
+ bool v_bit; /* Value of valid bit */
+ struct ctx_hw_stats *stats;
+ uint32_t stats_ctx_id;
+ uint32_t last_idx; /* Used by RX rings only
+ * set to the last read pidx
+ */
+};
+
+#endif /* _ENIC_H_ */
diff --git a/sys/dev/enic/enic_compat.h b/sys/dev/enic/enic_compat.h
new file mode 100644
index 000000000000..d5a10f94febb
--- /dev/null
+++ b/sys/dev/enic/enic_compat.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _ENIC_COMPAT_H_
+#define _ENIC_COMPAT_H_
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/endian.h>
+#include <sys/sockio.h>
+#include <sys/priv.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#define ETH_ALEN ETHER_ADDR_LEN
+
+#define typeof __typeof__
+#define __iomem
+#define unlikely(x) __builtin_expect((x),0)
+
+#define le16_to_cpu
+#define le32_to_cpu
+#define le64_to_cpu
+#define cpu_to_le16
+#define cpu_to_le32
+#define cpu_to_le64
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warning(0, y, ##args)
+#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__)
+
+#define VNIC_ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
+#define udelay(t) DELAY(t)
+#define usleep(x) pause("ENIC usleep", ((x) * 1000000 / hz + 1))
+
+#define dev_printk(level, fmt, args...) \
+ printf(fmt, ## args)
+
+#define dev_err(x, args...) dev_printk(ERR, args)
+/*#define dev_info(x, args...) dev_printk(INFO, args)*/
+#define dev_info(x, args...)
+
+#define __le16 uint16_t
+#define __le32 uint32_t
+#define __le64 uint64_t
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+#endif /* _ENIC_COMPAT_H_ */
diff --git a/sys/dev/enic/enic_res.c b/sys/dev/enic/enic_res.c
new file mode 100644
index 000000000000..d264874557a0
--- /dev/null
+++ b/sys/dev/enic/enic_res.c
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "enic.h"
+#include "enic_compat.h"
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+#include "enic_res.h"
+#include "enic.h"
+
+int enic_get_vnic_config(struct enic *enic)
+{
+ struct vnic_enet_config *c = &enic->config;
+ int err;
+ err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr);
+ if (err) {
+ dev_err(enic_get_dev(enic),
+ "Error getting MAC addr, %d\n", err);
+ return err;
+ }
+
+#define GET_CONFIG(m) \
+ do { \
+ err = vnic_dev_spec(enic->vdev, \
+ offsetof(struct vnic_enet_config, m), \
+ sizeof(c->m), &c->m); \
+ if (err) { \
+ dev_err(enic_get_dev(enic), \
+ "Error getting %s, %d\n", #m, err); \
+ return err; \
+ } \
+ } while (0)
+
+ GET_CONFIG(flags);
+ GET_CONFIG(wq_desc_count);
+ GET_CONFIG(rq_desc_count);
+ GET_CONFIG(mtu);
+ GET_CONFIG(intr_timer_type);
+ GET_CONFIG(intr_mode);
+ GET_CONFIG(intr_timer_usec);
+ GET_CONFIG(loop_tag);
+ GET_CONFIG(num_arfs);
+ GET_CONFIG(max_pkt_size);
+
+ /* max packet size is only defined in newer VIC firmware
+ * and will be 0 for legacy firmware and VICs
+ */
+ if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)
+ enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4);
+ else
+ enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE
+ - (ETHER_HDR_LEN + 4);
+ if (c->mtu == 0)
+ c->mtu = 1500;
+
+ enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev);
+
+ err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode,
+ &enic->filter_actions);
+ if (err) {
+ dev_err(enic_get_dev(enic),
+ "Error getting filter modes, %d\n", err);
+ return err;
+ }
+ vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk,
+ &enic->udp_rss_weak);
+
+ c->wq_desc_count =
+ min_t(u32, ENIC_MAX_WQ_DESCS,
+ max_t(u32, ENIC_MIN_WQ_DESCS,
+ c->wq_desc_count));
+ c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
+
+ c->rq_desc_count =
+ min_t(u32, ENIC_MAX_RQ_DESCS,
+ max_t(u32, ENIC_MIN_RQ_DESCS,
+ c->rq_desc_count));
+ c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
+
+ c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+
+ dev_info(enic_get_dev(enic),
+ "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
+ "wq/rq %d/%d mtu d, max mtu:%d\n",
+ enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
+ enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
+ c->wq_desc_count, c->rq_desc_count,
+ /* enic->rte_dev->data->mtu, */ enic->max_mtu);
+ dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
+ "rss %s intr mode %s type %s timer %d usec "
+ "loopback tag 0x%04x\n",
+ ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, RSS) ?
+ (ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" :
+ ((enic->udp_rss_weak ? "+udp" :
+ "yes"))) : "no",
+ c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
+ c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
+ c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
+ "unknown",
+ c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" :
+ c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" :
+ "unknown",
+ c->intr_timer_usec,
+ c->loop_tag);
+
+ /* RSS settings from vNIC */
+ enic->reta_size = ENIC_RSS_RETA_SIZE;
+ enic->hash_key_size = ENIC_RSS_HASH_KEY_SIZE;
+ enic->flow_type_rss_offloads = 0;
+
+ /* Zero offloads if RSS is not enabled */
+ if (!ENIC_SETTING(enic, RSS))
+ enic->flow_type_rss_offloads = 0;
+
+ enic->vxlan = ENIC_SETTING(enic, VXLAN) &&
+ vnic_dev_capable_vxlan(enic->vdev);
+ /*
+ * Default hardware capabilities. enic_dev_init() may add additional
+ * flags if it enables overlay offloads.
+ */
+ enic->tx_queue_offload_capa = 0;
+ return 0;
+}
+
+int enic_add_vlan(struct enic *enic, u16 vlanid)
+{
+ u64 a0 = vlanid, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
+ if (err)
+ dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err);
+
+ return err;
+}
+
+int enic_del_vlan(struct enic *enic, u16 vlanid)
+{
+ u64 a0 = vlanid, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
+ if (err)
+ dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err);
+
+ return err;
+}
+
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ enum vnic_devcmd_cmd cmd;
+ u64 a0, a1;
+ u32 nic_cfg;
+ int wait = 1000;
+
+ vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+ rss_hash_type, rss_hash_bits, rss_base_cpu,
+ rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+ a0 = nic_cfg;
+ a1 = 0;
+ cmd = enic->nic_cfg_chk ? CMD_NIC_CFG_CHK : CMD_NIC_CFG;
+ return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait);
+}
+
+void enic_get_res_counts(struct enic *enic)
+{
+ enic->conf_wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+ enic->conf_rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+ enic->conf_cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+ enic->conf_intr_count = vnic_dev_get_res_count(enic->vdev,
+ RES_TYPE_INTR_CTRL);
+
+ dev_info(enic_get_dev(enic),
+ "vNIC resources avail: wq %d rq %d cq %d intr %d\n",
+ enic->conf_wq_count, enic->conf_rq_count,
+ enic->conf_cq_count, enic->conf_intr_count);
+ enic->conf_rq_count = min(enic->conf_rq_count, enic->conf_wq_count);
+ enic->conf_wq_count = enic->conf_rq_count;
+ enic->conf_cq_count = enic->conf_rq_count + enic->conf_wq_count;
+ dev_info(enic_get_dev(enic),
+ "vNIC resources iflib: wq %d rq %d cq %d intr %d\n",
+ enic->conf_wq_count, enic->conf_rq_count,
+ enic->conf_cq_count, enic->conf_intr_count);
+ dev_info(enic_get_dev(enic),
+ "vNIC resources avail: wq_desc %d rq_desc %d\n",
+ enic->config.wq_desc_count, enic->config.rq_desc_count);
+
+ enic->wq_count = enic->conf_wq_count;
+ enic->rq_count = enic->conf_rq_count;
+ enic->cq_count = enic->conf_cq_count;
+}
diff --git a/sys/dev/enic/enic_res.h b/sys/dev/enic/enic_res.h
new file mode 100644
index 000000000000..1a6f3a3ca98f
--- /dev/null
+++ b/sys/dev/enic/enic_res.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _ENIC_RES_H_
+#define _ENIC_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS 4096
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 4096
+
+#define ENIC_MAX_MULTICAST_ADDRESSES 32
+
+/* A descriptor ring has a multiple of 32 descriptors */
+#define ENIC_ALIGN_DESCS 32
+#define ENIC_ALIGN_DESCS_MASK ~(ENIC_ALIGN_DESCS - 1)
+
+/* Request a completion index every 32 buffers (roughly packets) */
+#define ENIC_WQ_CQ_THRESH 32
+
+#define ENIC_MIN_MTU 68
+
+/* Does not include (possible) inserted VLAN tag and FCS */
+#define ENIC_DEFAULT_RX_MAX_PKT_SIZE 9022
+
+/* Does not include (possible) inserted VLAN tag and FCS */
+#define ENIC_TX_MAX_PKT_SIZE 9208
+
+#define ENIC_MULTICAST_PERFECT_FILTERS 32
+#define ENIC_UNICAST_PERFECT_FILTERS 32
+
+#define ENIC_NON_TSO_MAX_DESC 16
+#define ENIC_DEFAULT_RX_FREE_THRESH 32
+#define ENIC_TX_XMIT_MAX 64
+#define ENIC_RX_BURST_MAX 64
+
+/* Defaults for dev_info.default_{rx,tx}portconf */
+#define ENIC_DEFAULT_RX_BURST 32
+#define ENIC_DEFAULT_RX_RINGS 1
+#define ENIC_DEFAULT_RX_RING_SIZE 512
+#define ENIC_DEFAULT_TX_BURST 32
+#define ENIC_DEFAULT_TX_RINGS 1
+#define ENIC_DEFAULT_TX_RING_SIZE 512
+
+#define ENIC_RSS_DEFAULT_CPU 0
+#define ENIC_RSS_BASE_CPU 0
+#define ENIC_RSS_HASH_BITS 7
+#define ENIC_RSS_RETA_SIZE (1 << ENIC_RSS_HASH_BITS)
+#define ENIC_RSS_HASH_KEY_SIZE 40
+
+#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
+
+struct enic;
+
+int enic_get_vnic_config(struct enic *);
+int enic_add_vlan(struct enic *enic, u16 vlanid);
+int enic_del_vlan(struct enic *enic, u16 vlanid);
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en);
+void enic_get_res_counts(struct enic *enic);
+void enic_init_vnic_resources(struct enic *enic);
+int enic_alloc_vnic_resources(struct enic *);
+void enic_free_vnic_resources(struct enic *);
+
+#endif /* _ENIC_RES_H_ */
diff --git a/sys/dev/enic/enic_txrx.c b/sys/dev/enic/enic_txrx.c
new file mode 100644
index 000000000000..5a557fc7f94a
--- /dev/null
+++ b/sys/dev/enic/enic_txrx.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "opt_rss.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/endian.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/smp.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/if_media.h>
+#include <net/if_vlan_var.h>
+#include <net/iflib.h>
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet/udp.h>
+#include <netinet/tcp.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "ifdi_if.h"
+#include "enic.h"
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+static int enic_isc_txd_encap(void *, if_pkt_info_t);
+static void enic_isc_txd_flush(void *, uint16_t, qidx_t);
+static int enic_isc_txd_credits_update(void *, uint16_t, bool);
+static int enic_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
+static int enic_isc_rxd_pkt_get(void *, if_rxd_info_t);
+static void enic_isc_rxd_refill(void *, if_rxd_update_t);
+static void enic_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
+static int enic_legacy_intr(void *);
+static void enic_initial_post_rx(struct enic *, struct vnic_rq *);
+static int enic_wq_service(struct vnic_dev *, struct cq_desc *, u8, u16, u16,
+ void *);
+static int enic_rq_service(struct vnic_dev *, struct cq_desc *, u8, u16, u16,
+ void *);
+
+struct if_txrx enic_txrx = {
+ .ift_txd_encap = enic_isc_txd_encap,
+ .ift_txd_flush = enic_isc_txd_flush,
+ .ift_txd_credits_update = enic_isc_txd_credits_update,
+ .ift_rxd_available = enic_isc_rxd_available,
+ .ift_rxd_pkt_get = enic_isc_rxd_pkt_get,
+ .ift_rxd_refill = enic_isc_rxd_refill,
+ .ift_rxd_flush = enic_isc_rxd_flush,
+ .ift_legacy_intr = enic_legacy_intr
+};
+
+static int
+enic_isc_txd_encap(void *vsc, if_pkt_info_t pi)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+ struct vnic_wq *wq;
+ int nsegs;
+ int i;
+
+ struct wq_enet_desc *desc;
+ uint64_t bus_addr;
+ uint16_t mss = 7;
+ uint16_t header_len = 0;
+ uint8_t offload_mode = 0;
+ uint8_t eop = 0, cq;
+ uint8_t vlan_tag_insert = 0;
+ unsigned short vlan_id = 0;
+
+ unsigned int wq_desc_avail;
+ int head_idx;
+ unsigned int desc_count, data_len;
+
+ softc = vsc;
+ enic = &softc->enic;
+
+ wq = &enic->wq[pi->ipi_qsidx];
+ nsegs = pi->ipi_nsegs;
+
+ ENIC_LOCK(softc);
+ wq_desc_avail = vnic_wq_desc_avail(wq);
+ head_idx = wq->head_idx;
+ desc_count = wq->ring.desc_count;
+
+ for (i = 0; i < nsegs; i++) {
+ eop = 0;
+ cq = 0;
+ wq->cq_pend++;
+ if (i + 1 == nsegs) {
+ eop = 1;
+ cq = 1;
+ wq->cq_pend = 0;
+ }
+ desc = wq->ring.descs;
+ bus_addr = pi->ipi_segs[i].ds_addr;
+ data_len = pi->ipi_segs[i].ds_len;
+
+ wq_enet_desc_enc(&desc[head_idx], bus_addr, data_len, mss,
+ header_len, offload_mode, eop, cq, 0,
+ vlan_tag_insert, vlan_id, 0);
+
+ head_idx = enic_ring_incr(desc_count, head_idx);
+ wq_desc_avail--;
+ }
+
+ wq->ring.desc_avail = wq_desc_avail;
+ wq->head_idx = head_idx;
+
+ pi->ipi_new_pidx = head_idx;
+ ENIC_UNLOCK(softc);
+
+ return (0);
+}
+
+static void
+enic_isc_txd_flush(void *vsc, uint16_t txqid, qidx_t pidx)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+ struct vnic_wq *wq;
+ int head_idx;
+
+ softc = vsc;
+ enic = &softc->enic;
+
+ ENIC_LOCK(softc);
+ wq = &enic->wq[txqid];
+ head_idx = wq->head_idx;
+
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, head_idx);
+ ENIC_UNLOCK(softc);
+}
+
+static int
+enic_isc_txd_credits_update(void *vsc, uint16_t txqid, bool clear)
+{
+
+ struct enic_softc *softc;
+ struct enic *enic;
+ struct vnic_wq *wq;
+ struct vnic_cq *cq;
+ int processed;
+ unsigned int cq_wq;
+ unsigned int wq_work_to_do = 10;
+ unsigned int wq_work_avail;
+
+ softc = vsc;
+ enic = &softc->enic;
+ wq = &softc->enic.wq[txqid];
+
+ cq_wq = enic_cq_wq(enic, txqid);
+ cq = &enic->cq[cq_wq];
+
+ ENIC_LOCK(softc);
+ wq_work_avail = vnic_cq_work(cq, wq_work_to_do);
+ ENIC_UNLOCK(softc);
+
+ if (wq_work_avail == 0)
+ return (0);
+
+ if (!clear)
+ return (1);
+
+ ENIC_LOCK(softc);
+ vnic_cq_service(cq, wq_work_to_do,
+ enic_wq_service, NULL);
+
+ processed = wq->processed;
+ wq->processed = 0;
+
+ ENIC_UNLOCK(softc);
+
+ return (processed);
+}
+
+static int
+enic_isc_rxd_available(void *vsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+ struct vnic_cq *cq;
+ unsigned int rq_work_to_do = budget;
+ unsigned int rq_work_avail = 0;
+ unsigned int cq_rq;
+
+ softc = vsc;
+ enic = &softc->enic;
+
+ cq_rq = enic_cq_rq(&softc->enic, rxqid);
+ cq = &enic->cq[cq_rq];
+
+ rq_work_avail = vnic_cq_work(cq, rq_work_to_do);
+ return rq_work_avail;
+}
+
+static int
+enic_isc_rxd_pkt_get(void *vsc, if_rxd_info_t ri)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+ struct vnic_cq *cq;
+ unsigned int rq_work_to_do = 1;
+ unsigned int rq_work_done = 0;
+ unsigned int cq_rq;
+
+ softc = vsc;
+ enic = &softc->enic;
+
+ cq_rq = enic_cq_rq(&softc->enic, ri->iri_qsidx);
+ cq = &enic->cq[cq_rq];
+ ENIC_LOCK(softc);
+ rq_work_done = vnic_cq_service(cq, rq_work_to_do, enic_rq_service, ri);
+
+ if (rq_work_done != 0) {
+ vnic_intr_return_credits(&enic->intr[cq_rq], rq_work_done, 0,
+ 1);
+ ENIC_UNLOCK(softc);
+ return (0);
+ } else {
+ ENIC_UNLOCK(softc);
+ return (-1);
+ }
+
+}
+
+static void
+enic_isc_rxd_refill(void *vsc, if_rxd_update_t iru)
+{
+ struct enic_softc *softc;
+ struct vnic_rq *rq;
+ struct rq_enet_desc *rqd;
+
+ uint64_t *paddrs;
+ int count;
+ uint32_t pidx;
+ int len;
+ int idx;
+ int i;
+
+ count = iru->iru_count;
+ len = iru->iru_buf_size;
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+
+ softc = vsc;
+ rq = &softc->enic.rq[iru->iru_qsidx];
+ rqd = rq->ring.descs;
+
+ idx = pidx;
+ for (i = 0; i < count; i++, idx++) {
+
+ if (idx == rq->ring.desc_count)
+ idx = 0;
+ rq_enet_desc_enc(&rqd[idx], paddrs[i],
+ RQ_ENET_TYPE_ONLY_SOP,
+ len);
+
+ }
+
+ rq->in_use = 1;
+
+ if (rq->need_initial_post) {
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_FETCH_INDEX, 0);
+ }
+
+ enic_initial_post_rx(&softc->enic, rq);
+}
+
+static void
+enic_isc_rxd_flush(void *vsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
+{
+
+ struct enic_softc *softc;
+ struct vnic_rq *rq;
+
+ softc = vsc;
+ rq = &softc->enic.rq[rxqid];
+
+ /*
+ * pidx is the index of the last descriptor with a buffer the device
+ * can use, and the device needs to be told which index is one past
+ * that.
+ */
+
+ ENIC_LOCK(softc);
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, pidx);
+ ENIC_UNLOCK(softc);
+}
+
+static int
+enic_legacy_intr(void *xsc)
+{
+ return -1;
+}
+
+static inline void
+vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc,
+ u16 completed_index, void (*buf_service) (struct vnic_wq *wq,
+ struct cq_desc *cq_desc, /* struct vnic_wq_buf * *buf, */ void *opaque),
+ void *opaque)
+{
+ int processed;
+
+ processed = completed_index - wq->ring.last_count;
+ if (processed < 0)
+ processed += wq->ring.desc_count;
+ if (processed == 0)
+ processed++;
+
+ wq->ring.desc_avail += processed;
+ wq->processed += processed;
+ wq->ring.last_count = completed_index;
+}
+
+/*
+ * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
+ * allocated the buffers and filled the RQ descriptor ring. Just need to push
+ * the post index to the NIC.
+ */
+static void
+enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
+{
+ struct enic_softc *softc = enic->softc;
+ if (!rq->in_use || !rq->need_initial_post)
+ return;
+
+ ENIC_LOCK(softc);
+ /* make sure all prior writes are complete before doing the PIO write */
+ /* Post all but the last buffer to VIC. */
+ rq->posted_index = rq->ring.desc_count - 1;
+
+ rq->rx_nb_hold = 0;
+
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, rq->posted_index);
+
+ rq->need_initial_post = false;
+ ENIC_UNLOCK(softc);
+}
+
+static int
+enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
+ u16 q_number, u16 completed_index, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ vnic_wq_service(&enic->wq[q_number], cq_desc,
+ completed_index, NULL, opaque);
+ return 0;
+}
+
+static void
+vnic_rq_service(struct vnic_rq *rq, struct cq_desc *cq_desc,
+ u16 in_completed_index, int desc_return,
+ void(*buf_service)(struct vnic_rq *rq, struct cq_desc *cq_desc,
+ /* struct vnic_rq_buf * *buf, */ int skipped, void *opaque), void *opaque)
+{
+
+ if_rxd_info_t ri = (if_rxd_info_t) opaque;
+ u8 type, color, eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+ int cqidx;
+ if_rxd_frag_t frag;
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+ &type, &color, &q_number, &completed_index,
+ &ingress_port, &fcoe, &eop, &sop, &rss_type,
+ &csum_not_calc, &rss_hash, &bytes_written,
+ &packet_error, &vlan_stripped, &vlan_tci, &checksum,
+ &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
+ &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
+ &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
+ &fcs_ok);
+
+ cqidx = ri->iri_cidx;
+
+ frag = &ri->iri_frags[0];
+ frag->irf_idx = cqidx;
+ frag->irf_len = bytes_written;
+
+ if (++cqidx == rq->ring.desc_count) {
+ cqidx = 0;
+ }
+
+ ri->iri_cidx = cqidx;
+ ri->iri_nfrags = 1;
+ ri->iri_len = bytes_written;
+}
+
+static int
+enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+ if_rxd_info_t ri = (if_rxd_info_t) opaque;
+
+ vnic_rq_service(&enic->rq[ri->iri_qsidx], cq_desc, completed_index,
+ VNIC_RQ_RETURN_DESC, NULL, /* enic_rq_indicate_buf, */ opaque);
+
+ return 0;
+}
+
+void
+enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
+{
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ unsigned int i;
+
+ /*
+ * Fill WQ descriptor fields that never change. Every descriptor is
+ * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
+ * descriptors (i.e. request one completion update every 32 packets).
+ */
+ wq = &enic->wq[queue_idx];
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ for (i = 0; i < wq->ring.desc_count; i++, desc++) {
+ desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
+ if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
+ desc->header_length_flags |=
+ (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
+ }
+}
+
+void
+enic_start_wq(struct enic *enic, uint16_t queue_idx)
+{
+ vnic_wq_enable(&enic->wq[queue_idx]);
+}
+
+int
+enic_stop_wq(struct enic *enic, uint16_t queue_idx)
+{
+ int ret;
+
+ ret = vnic_wq_disable(&enic->wq[queue_idx]);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+enic_start_rq(struct enic *enic, uint16_t queue_idx)
+{
+ struct vnic_rq *rq;
+
+ rq = &enic->rq[queue_idx];
+ vnic_rq_enable(rq);
+ enic_initial_post_rx(enic, rq);
+}
diff --git a/sys/dev/enic/if_enic.c b/sys/dev/enic/if_enic.c
new file mode 100644
index 000000000000..d3ac5bb902c6
--- /dev/null
+++ b/sys/dev/enic/if_enic.c
@@ -0,0 +1,1583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "opt_rss.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/endian.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/smp.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/if_media.h>
+#include <net/if_vlan_var.h>
+#include <net/iflib.h>
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet/udp.h>
+#include <netinet/tcp.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "ifdi_if.h"
+#include "enic.h"
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+static SYSCTL_NODE(_hw, OID_AUTO, enic, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "ENIC");
+
+static pci_vendor_info_t enic_vendor_info_array[] =
+{
+ PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET,
+ DRV_DESCRIPTION),
+ PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF,
+ DRV_DESCRIPTION " VF"),
+ /* required last entry */
+
+ PVID_END
+};
+
+static void *enic_register(device_t);
+static int enic_attach_pre(if_ctx_t);
+static int enic_msix_intr_assign(if_ctx_t, int);
+
+static int enic_attach_post(if_ctx_t);
+static int enic_detach(if_ctx_t);
+
+static int enic_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
+static int enic_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
+static void enic_queues_free(if_ctx_t);
+static int enic_rxq_intr(void *);
+static int enic_event_intr(void *);
+static int enic_err_intr(void *);
+static void enic_stop(if_ctx_t);
+static void enic_init(if_ctx_t);
+static void enic_multi_set(if_ctx_t);
+static int enic_mtu_set(if_ctx_t, uint32_t);
+static void enic_media_status(if_ctx_t, struct ifmediareq *);
+static int enic_media_change(if_ctx_t);
+static int enic_promisc_set(if_ctx_t, int);
+static uint64_t enic_get_counter(if_ctx_t, ift_counter);
+static void enic_update_admin_status(if_ctx_t);
+static void enic_txq_timer(if_ctx_t, uint16_t);
+static int enic_link_is_up(struct enic_softc *);
+static void enic_link_status(struct enic_softc *);
+static void enic_set_lladdr(struct enic_softc *);
+static void enic_setup_txq_sysctl(struct vnic_wq *, int, struct sysctl_ctx_list *,
+ struct sysctl_oid_list *);
+static void enic_setup_rxq_sysctl(struct vnic_rq *, int, struct sysctl_ctx_list *,
+ struct sysctl_oid_list *);
+static void enic_setup_sysctl(struct enic_softc *);
+static int enic_tx_queue_intr_enable(if_ctx_t, uint16_t);
+static int enic_rx_queue_intr_enable(if_ctx_t, uint16_t);
+static void enic_enable_intr(struct enic_softc *, int);
+static void enic_disable_intr(struct enic_softc *, int);
+static void enic_intr_enable_all(if_ctx_t);
+static void enic_intr_disable_all(if_ctx_t);
+static int enic_dev_open(struct enic *);
+static int enic_dev_init(struct enic *);
+static void *enic_alloc_consistent(void *, size_t, bus_addr_t *,
+ struct iflib_dma_info *, u8 *);
+static void enic_free_consistent(void *, size_t, void *, bus_addr_t,
+ struct iflib_dma_info *);
+static int enic_pci_mapping(struct enic_softc *);
+static void enic_pci_mapping_free(struct enic_softc *);
+static int enic_dev_wait(struct vnic_dev *, int (*) (struct vnic_dev *, int),
+ int (*) (struct vnic_dev *, int *), int arg);
+static int enic_map_bar(struct enic_softc *, struct enic_bar_info *, int, bool);
+static void enic_update_packet_filter(struct enic *enic);
+
+typedef enum {
+ ENIC_BARRIER_RD,
+ ENIC_BARRIER_WR,
+ ENIC_BARRIER_RDWR,
+} enic_barrier_t;
+
+static device_method_t enic_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_register, enic_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
+ DEVMETHOD(device_suspend, iflib_device_suspend),
+ DEVMETHOD(device_resume, iflib_device_resume),
+ DEVMETHOD_END
+};
+
+static driver_t enic_driver = {
+ "enic", enic_methods, sizeof(struct enic_softc)
+};
+
+DRIVER_MODULE(enic, pci, enic_driver, 0, 0);
+IFLIB_PNP_INFO(pci, enic, enic_vendor_info_array);
+MODULE_VERSION(enic, 2);
+
+MODULE_DEPEND(enic, pci, 1, 1, 1);
+MODULE_DEPEND(enic, ether, 1, 1, 1);
+MODULE_DEPEND(enic, iflib, 1, 1, 1);
+
+static device_method_t enic_iflib_methods[] = {
+ DEVMETHOD(ifdi_tx_queues_alloc, enic_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, enic_rx_queues_alloc),
+ DEVMETHOD(ifdi_queues_free, enic_queues_free),
+
+ DEVMETHOD(ifdi_attach_pre, enic_attach_pre),
+ DEVMETHOD(ifdi_attach_post, enic_attach_post),
+ DEVMETHOD(ifdi_detach, enic_detach),
+
+ DEVMETHOD(ifdi_init, enic_init),
+ DEVMETHOD(ifdi_stop, enic_stop),
+ DEVMETHOD(ifdi_multi_set, enic_multi_set),
+ DEVMETHOD(ifdi_mtu_set, enic_mtu_set),
+ DEVMETHOD(ifdi_media_status, enic_media_status),
+ DEVMETHOD(ifdi_media_change, enic_media_change),
+ DEVMETHOD(ifdi_promisc_set, enic_promisc_set),
+ DEVMETHOD(ifdi_get_counter, enic_get_counter),
+ DEVMETHOD(ifdi_update_admin_status, enic_update_admin_status),
+ DEVMETHOD(ifdi_timer, enic_txq_timer),
+
+ DEVMETHOD(ifdi_tx_queue_intr_enable, enic_tx_queue_intr_enable),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, enic_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_intr_enable, enic_intr_enable_all),
+ DEVMETHOD(ifdi_intr_disable, enic_intr_disable_all),
+ DEVMETHOD(ifdi_msix_intr_assign, enic_msix_intr_assign),
+
+ DEVMETHOD_END
+};
+
+static driver_t enic_iflib_driver = {
+ "enic", enic_iflib_methods, sizeof(struct enic_softc)
+};
+
+extern struct if_txrx enic_txrx;
+
+static struct if_shared_ctx enic_sctx_init = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = 512,
+
+ .isc_tx_maxsize = ENIC_TX_MAX_PKT_SIZE,
+ .isc_tx_maxsegsize = PAGE_SIZE,
+
+ /*
+ * These values are used to configure the busdma tag used for receive
+ * descriptors. Each receive descriptor only points to one buffer.
+ */
+ .isc_rx_maxsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE, /* One buf per
+ * descriptor */
+ .isc_rx_nsegments = 1, /* One mapping per descriptor */
+ .isc_rx_maxsegsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE,
+ .isc_admin_intrcnt = 3,
+ .isc_vendor_info = enic_vendor_info_array,
+ .isc_driver_version = "1",
+ .isc_driver = &enic_iflib_driver,
+ .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ,
+
+ /*
+ * Number of receive queues per receive queue set, with associated
+ * descriptor settings for each.
+ */
+
+ .isc_nrxqs = 2,
+ .isc_nfl = 1, /* one free list for each receive command
+ * queue */
+ .isc_nrxd_min = {16, 16},
+ .isc_nrxd_max = {2048, 2048},
+ .isc_nrxd_default = {64, 64},
+
+ /*
+ * Number of transmit queues per transmit queue set, with associated
+ * descriptor settings for each.
+ */
+ .isc_ntxqs = 2,
+ .isc_ntxd_min = {16, 16},
+ .isc_ntxd_max = {2048, 2048},
+ .isc_ntxd_default = {64, 64},
+};
+
+static void *
+enic_register(device_t dev)
+{
+ return (&enic_sctx_init);
+}
+
+static int
+enic_attach_pre(if_ctx_t ctx)
+{
+ if_softc_ctx_t scctx;
+ struct enic_softc *softc;
+ struct vnic_dev *vdev;
+ struct enic *enic;
+ device_t dev;
+
+ int err = -1;
+ int rc = 0;
+ int i;
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ struct vnic_stats *stats;
+ int ret;
+
+ dev = iflib_get_dev(ctx);
+ softc = iflib_get_softc(ctx);
+ softc->dev = dev;
+ softc->ctx = ctx;
+ softc->sctx = iflib_get_sctx(ctx);
+ softc->scctx = iflib_get_softc_ctx(ctx);
+ softc->ifp = iflib_get_ifp(ctx);
+ softc->media = iflib_get_media(ctx);
+ softc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
+ ENIC_MAX_MULTICAST_ADDRESSES, M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (softc->mta == NULL)
+ return (ENOMEM);
+ scctx = softc->scctx;
+
+ mtx_init(&softc->enic_lock, "ENIC Lock", NULL, MTX_DEF);
+
+ pci_enable_busmaster(softc->dev);
+ if (enic_pci_mapping(softc))
+ return (ENXIO);
+
+ enic = &softc->enic;
+ enic->softc = softc;
+ vdev = &softc->vdev;
+ vdev->softc = softc;
+ enic->vdev = vdev;
+ vdev->priv = enic;
+
+ ENIC_LOCK(softc);
+ vnic_dev_register(vdev, &softc->mem, 1);
+ enic->vdev = vdev;
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+
+ vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
+ vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
+
+ bcopy((u_int8_t *) & a0, softc->mac_addr, ETHER_ADDR_LEN);
+ iflib_set_mac(ctx, softc->mac_addr);
+
+ vnic_register_cbacks(enic->vdev, enic_alloc_consistent,
+ enic_free_consistent);
+
+ /*
+ * Allocate the consistent memory for stats and counters upfront so
+ * both primary and secondary processes can access them.
+ */
+ ENIC_UNLOCK(softc);
+ err = vnic_dev_alloc_stats_mem(enic->vdev);
+ ENIC_LOCK(softc);
+ if (err) {
+ dev_err(enic, "Failed to allocate cmd memory, aborting\n");
+ goto err_out_unregister;
+ }
+ vnic_dev_stats_clear(enic->vdev);
+ ret = vnic_dev_stats_dump(enic->vdev, &stats);
+ if (ret) {
+ dev_err(enic, "Error in getting stats\n");
+ goto err_out_unregister;
+ }
+ err = vnic_dev_alloc_counter_mem(enic->vdev);
+ if (err) {
+ dev_err(enic, "Failed to allocate counter memory, aborting\n");
+ goto err_out_unregister;
+ }
+
+ /* Issue device open to get device in known state */
+ err = enic_dev_open(enic);
+ if (err) {
+ dev_err(enic, "vNIC dev open failed, aborting\n");
+ goto err_out_unregister;
+ }
+
+ /* Set ingress vlan rewrite mode before vnic initialization */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
+ err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+ enic->ig_vlan_rewrite_mode);
+ if (err) {
+ dev_err(enic,
+ "Failed to set ingress vlan rewrite mode, aborting.\n");
+ goto err_out_dev_close;
+ }
+
+ /*
+ * Issue device init to initialize the vnic-to-switch link. We'll
+ * start with carrier off and wait for link UP notification later to
+ * turn on carrier. We don't need to wait here for the
+ * vnic-to-switch link initialization to complete; link UP
+ * notification is the indication that the process is complete.
+ */
+
+ err = vnic_dev_init(enic->vdev, 0);
+ if (err) {
+ dev_err(enic, "vNIC dev init failed, aborting\n");
+ goto err_out_dev_close;
+ }
+
+ err = enic_dev_init(enic);
+ if (err) {
+ dev_err(enic, "Device initialization failed, aborting\n");
+ goto err_out_dev_close;
+ }
+ ENIC_UNLOCK(softc);
+
+ enic->port_mtu = vnic_dev_mtu(enic->vdev);
+
+ softc->scctx = iflib_get_softc_ctx(ctx);
+ scctx = softc->scctx;
+ scctx->isc_txrx = &enic_txrx;
+ scctx->isc_capabilities = scctx->isc_capenable = 0;
+ scctx->isc_tx_csum_flags = 0;
+ scctx->isc_max_frame_size = enic->config.mtu + ETHER_HDR_LEN + \
+ ETHER_CRC_LEN;
+ scctx->isc_nrxqsets_max = enic->conf_rq_count;
+ scctx->isc_ntxqsets_max = enic->conf_wq_count;
+ scctx->isc_nrxqsets = enic->conf_rq_count;
+ scctx->isc_ntxqsets = enic->conf_wq_count;
+ for (i = 0; i < enic->conf_wq_count; i++) {
+ scctx->isc_ntxd[i] = enic->config.wq_desc_count;
+ scctx->isc_txqsizes[i] = sizeof(struct cq_enet_wq_desc)
+ * scctx->isc_ntxd[i];
+ scctx->isc_ntxd[i + enic->conf_wq_count] =
+ enic->config.wq_desc_count;
+ scctx->isc_txqsizes[i + enic->conf_wq_count] =
+ sizeof(struct cq_desc) * scctx->isc_ntxd[i +
+ enic->conf_wq_count];
+ }
+ for (i = 0; i < enic->conf_rq_count; i++) {
+ scctx->isc_nrxd[i] = enic->config.rq_desc_count;
+ scctx->isc_rxqsizes[i] = sizeof(struct cq_enet_rq_desc) *
+ scctx->isc_nrxd[i];
+ scctx->isc_nrxd[i + enic->conf_rq_count] =
+ enic->config.rq_desc_count;
+ scctx->isc_rxqsizes[i + enic->conf_rq_count] = sizeof(struct
+ cq_desc) * scctx->isc_nrxd[i + enic->conf_rq_count];
+ }
+ scctx->isc_tx_nsegments = 31;
+
+ scctx->isc_vectors = enic->conf_cq_count;
+ scctx->isc_msix_bar = -1;
+
+ ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_add(softc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ ifmedia_add(softc->media, IFM_ETHER | IFM_10_FL, 0, NULL);
+
+ /*
+ * Allocate the CQ here since TX is called first before RX for now
+ * assume RX and TX are the same
+ */
+ if (softc->enic.cq == NULL)
+ softc->enic.cq = malloc(sizeof(struct vnic_cq) *
+ softc->enic.wq_count + softc->enic.rq_count, M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (softc->enic.cq == NULL)
+ return (ENOMEM);
+
+ softc->enic.cq->ntxqsets = softc->enic.wq_count + softc->enic.rq_count;
+
+ /*
+ * Allocate the consistent memory for stats and counters upfront so
+ * both primary and secondary processes can access them.
+ */
+ err = vnic_dev_alloc_stats_mem(enic->vdev);
+ if (err) {
+ dev_err(enic, "Failed to allocate cmd memory, aborting\n");
+ }
+
+ return (rc);
+
+err_out_dev_close:
+ vnic_dev_close(enic->vdev);
+err_out_unregister:
+ free(softc->vdev.devcmd, M_DEVBUF);
+ free(softc->enic.intr_queues, M_DEVBUF);
+ free(softc->enic.cq, M_DEVBUF);
+ free(softc->mta, M_DEVBUF);
+ rc = -1;
+ pci_disable_busmaster(softc->dev);
+ enic_pci_mapping_free(softc);
+ mtx_destroy(&softc->enic_lock);
+ return (rc);
+}
+
+static int
+enic_msix_intr_assign(if_ctx_t ctx, int msix)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+ if_softc_ctx_t scctx;
+
+ int error;
+ int i;
+ char irq_name[16];
+
+ softc = iflib_get_softc(ctx);
+ enic = &softc->enic;
+ scctx = softc->scctx;
+
+ ENIC_LOCK(softc);
+ vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
+ ENIC_UNLOCK(softc);
+
+ enic->intr_queues = malloc(sizeof(*enic->intr_queues) *
+ enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO);
+ enic->intr = malloc(sizeof(*enic->intr) * msix, M_DEVBUF, M_NOWAIT
+ | M_ZERO);
+ for (i = 0; i < scctx->isc_nrxqsets; i++) {
+ snprintf(irq_name, sizeof(irq_name), "erxq%d:%d", i,
+ device_get_unit(softc->dev));
+
+ error = iflib_irq_alloc_generic(ctx,
+ &enic->intr_queues[i].intr_irq, i + 1, IFLIB_INTR_RX,
+ enic_rxq_intr, &enic->rq[i], i, irq_name);
+ if (error) {
+ device_printf(iflib_get_dev(ctx),
+ "Failed to register rxq %d interrupt handler\n", i);
+ return (error);
+ }
+ enic->intr[i].index = i;
+ enic->intr[i].vdev = enic->vdev;
+ ENIC_LOCK(softc);
+ enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev,
+ RES_TYPE_INTR_CTRL, i);
+ vnic_intr_mask(&enic->intr[i]);
+ ENIC_UNLOCK(softc);
+ }
+
+ for (i = scctx->isc_nrxqsets; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) {
+ snprintf(irq_name, sizeof(irq_name), "etxq%d:%d", i -
+ scctx->isc_nrxqsets, device_get_unit(softc->dev));
+
+
+ iflib_softirq_alloc_generic(ctx, &enic->intr_queues[i].intr_irq, IFLIB_INTR_TX, &enic->wq[i - scctx->isc_nrxqsets], i - scctx->isc_nrxqsets, irq_name);
+
+
+ enic->intr[i].index = i;
+ enic->intr[i].vdev = enic->vdev;
+ ENIC_LOCK(softc);
+ enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev,
+ RES_TYPE_INTR_CTRL, i);
+ vnic_intr_mask(&enic->intr[i]);
+ ENIC_UNLOCK(softc);
+ }
+
+ i = scctx->isc_nrxqsets + scctx->isc_ntxqsets;
+ error = iflib_irq_alloc_generic(ctx, &softc->enic_event_intr_irq,
+ i + 1, IFLIB_INTR_ADMIN, enic_event_intr, softc, 0, "event");
+ if (error) {
+ device_printf(iflib_get_dev(ctx),
+ "Failed to register event interrupt handler\n");
+ return (error);
+ }
+
+ enic->intr[i].index = i;
+ enic->intr[i].vdev = enic->vdev;
+ ENIC_LOCK(softc);
+ enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL,
+ i);
+ vnic_intr_mask(&enic->intr[i]);
+ ENIC_UNLOCK(softc);
+
+ i++;
+ error = iflib_irq_alloc_generic(ctx, &softc->enic_err_intr_irq,
+ i + 1, IFLIB_INTR_ADMIN, enic_err_intr, softc, 0, "err");
+ if (error) {
+ device_printf(iflib_get_dev(ctx),
+ "Failed to register event interrupt handler\n");
+ return (error);
+ }
+ enic->intr[i].index = i;
+ enic->intr[i].vdev = enic->vdev;
+ ENIC_LOCK(softc);
+ enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL,
+ i);
+ vnic_intr_mask(&enic->intr[i]);
+ ENIC_UNLOCK(softc);
+
+ enic->intr_count = msix;
+
+ return (0);
+}
+
+static void
+enic_free_irqs(struct enic_softc *softc)
+{
+ if_softc_ctx_t scctx;
+
+ struct enic *enic;
+ int i;
+
+ scctx = softc->scctx;
+ enic = &softc->enic;
+
+ for (i = 0; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) {
+ iflib_irq_free(softc->ctx, &enic->intr_queues[i].intr_irq);
+ }
+
+ iflib_irq_free(softc->ctx, &softc->enic_event_intr_irq);
+ iflib_irq_free(softc->ctx, &softc->enic_err_intr_irq);
+ free(enic->intr_queues, M_DEVBUF);
+ free(enic->intr, M_DEVBUF);
+}
+
+static int
+enic_attach_post(if_ctx_t ctx)
+{
+ struct enic *enic;
+ struct enic_softc *softc;
+ int error = 0;
+
+ softc = iflib_get_softc(ctx);
+ enic = &softc->enic;
+
+ enic_setup_sysctl(softc);
+
+ enic_init_vnic_resources(enic);
+ enic_setup_finish(enic);
+
+ ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
+
+ return (error);
+}
+
+static int
+enic_detach(if_ctx_t ctx)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+
+ softc = iflib_get_softc(ctx);
+ enic = &softc->enic;
+
+ vnic_dev_notify_unset(enic->vdev);
+
+ enic_free_irqs(softc);
+
+ ENIC_LOCK(softc);
+ vnic_dev_close(enic->vdev);
+ free(softc->vdev.devcmd, M_DEVBUF);
+ pci_disable_busmaster(softc->dev);
+ enic_pci_mapping_free(softc);
+ ENIC_UNLOCK(softc);
+
+ return 0;
+}
+
+static int
+enic_tx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
+ int ntxqs, int ntxqsets)
+{
+ struct enic_softc *softc;
+ int q;
+
+ softc = iflib_get_softc(ctx);
+ /* Allocate the array of transmit queues */
+ softc->enic.wq = malloc(sizeof(struct vnic_wq) *
+ ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (softc->enic.wq == NULL)
+ return (ENOMEM);
+
+ /* Initialize driver state for each transmit queue */
+
+ /*
+ * Allocate queue state that is shared with the device. This check
+ * and call is performed in both enic_tx_queues_alloc() and
+ * enic_rx_queues_alloc() so that we don't have to care which order
+ * iflib invokes those routines in.
+ */
+
+ /* Record descriptor ring vaddrs and paddrs */
+ ENIC_LOCK(softc);
+ for (q = 0; q < ntxqsets; q++) {
+ struct vnic_wq *wq;
+ struct vnic_cq *cq;
+ unsigned int cq_wq;
+
+ wq = &softc->enic.wq[q];
+ cq_wq = enic_cq_wq(&softc->enic, q);
+ cq = &softc->enic.cq[cq_wq];
+
+ /* Completion ring */
+ wq->vdev = softc->enic.vdev;
+ wq->index = q;
+ wq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_WQ,
+ wq->index);
+ vnic_wq_disable(wq);
+
+ wq->ring.desc_size = sizeof(struct wq_enet_desc);
+ wq->ring.desc_count = softc->scctx->isc_ntxd[q];
+ wq->ring.desc_avail = wq->ring.desc_count - 1;
+ wq->ring.last_count = wq->ring.desc_count;
+ wq->head_idx = 0;
+ wq->tail_idx = 0;
+
+ wq->ring.size = wq->ring.desc_count * wq->ring.desc_size;
+ wq->ring.descs = vaddrs[q * ntxqs + 0];
+ wq->ring.base_addr = paddrs[q * ntxqs + 0];
+
+ /* Command ring */
+ cq->vdev = softc->enic.vdev;
+ cq->index = cq_wq;
+ cq->ctrl = vnic_dev_get_res(softc->enic.vdev,
+ RES_TYPE_CQ, cq->index);
+ cq->ring.desc_size = sizeof(struct cq_enet_wq_desc);
+ cq->ring.desc_count = softc->scctx->isc_ntxd[q];
+ cq->ring.desc_avail = cq->ring.desc_count - 1;
+
+ cq->ring.size = cq->ring.desc_count * cq->ring.desc_size;
+ cq->ring.descs = vaddrs[q * ntxqs + 1];
+ cq->ring.base_addr = paddrs[q * ntxqs + 1];
+
+ }
+
+ ENIC_UNLOCK(softc);
+
+ return (0);
+}
+
+
+
+static int
+enic_rx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
+ int nrxqs, int nrxqsets)
+{
+ struct enic_softc *softc;
+ int q;
+
+ softc = iflib_get_softc(ctx);
+ /* Allocate the array of receive queues */
+ softc->enic.rq = malloc(sizeof(struct vnic_rq) * nrxqsets, M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (softc->enic.rq == NULL)
+ return (ENOMEM);
+
+ /* Initialize driver state for each receive queue */
+
+ /*
+ * Allocate queue state that is shared with the device. This check
+ * and call is performed in both enic_tx_queues_alloc() and
+ * enic_rx_queues_alloc() so that we don't have to care which order
+ * iflib invokes those routines in.
+ */
+
+ /* Record descriptor ring vaddrs and paddrs */
+ ENIC_LOCK(softc);
+ for (q = 0; q < nrxqsets; q++) {
+ struct vnic_rq *rq;
+ struct vnic_cq *cq;
+ unsigned int cq_rq;
+
+ rq = &softc->enic.rq[q];
+ cq_rq = enic_cq_rq(&softc->enic, q);
+ cq = &softc->enic.cq[cq_rq];
+
+ /* Completion ring */
+ cq->vdev = softc->enic.vdev;
+ cq->index = cq_rq;
+ cq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_CQ,
+ cq->index);
+ cq->ring.desc_size = sizeof(struct cq_enet_wq_desc);
+ cq->ring.desc_count = softc->scctx->isc_nrxd[1];
+ cq->ring.desc_avail = cq->ring.desc_count - 1;
+
+ cq->ring.size = cq->ring.desc_count * cq->ring.desc_size;
+ cq->ring.descs = vaddrs[q * nrxqs + 0];
+ cq->ring.base_addr = paddrs[q * nrxqs + 0];
+
+ /* Command ring(s) */
+ rq->vdev = softc->enic.vdev;
+
+ rq->index = q;
+ rq->ctrl = vnic_dev_get_res(softc->enic.vdev,
+ RES_TYPE_RQ, rq->index);
+ vnic_rq_disable(rq);
+
+ rq->ring.desc_size = sizeof(struct rq_enet_desc);
+ rq->ring.desc_count = softc->scctx->isc_nrxd[0];
+ rq->ring.desc_avail = rq->ring.desc_count - 1;
+
+ rq->ring.size = rq->ring.desc_count * rq->ring.desc_size;
+ rq->ring.descs = vaddrs[q * nrxqs + 1];
+ rq->ring.base_addr = paddrs[q * nrxqs + 1];
+ rq->need_initial_post = true;
+ }
+
+ ENIC_UNLOCK(softc);
+
+ return (0);
+}
+
+static void
+enic_queues_free(if_ctx_t ctx)
+{
+ struct enic_softc *softc;
+ softc = iflib_get_softc(ctx);
+
+ free(softc->enic.rq, M_DEVBUF);
+ free(softc->enic.wq, M_DEVBUF);
+ free(softc->enic.cq, M_DEVBUF);
+}
+
+static int
+enic_rxq_intr(void *rxq)
+{
+ struct vnic_rq *rq;
+ struct ifnet *ifp;
+
+ rq = (struct vnic_rq *)rxq;
+ ifp = iflib_get_ifp(rq->vdev->softc->ctx);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return (FILTER_HANDLED);
+
+ return (FILTER_SCHEDULE_THREAD);
+}
+
+static int
+enic_event_intr(void *vsc)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+ uint32_t mtu;
+
+ softc = vsc;
+ enic = &softc->enic;
+
+ mtu = vnic_dev_mtu(enic->vdev);
+ if (mtu && mtu != enic->port_mtu) {
+ enic->port_mtu = mtu;
+ }
+
+ enic_link_status(softc);
+
+ return (FILTER_HANDLED);
+}
+
+static int
+enic_err_intr(void *vsc)
+{
+ struct enic_softc *softc;
+
+ softc = vsc;
+
+ enic_stop(softc->ctx);
+ enic_init(softc->ctx);
+
+ return (FILTER_HANDLED);
+}
+
+static void
+enic_stop(if_ctx_t ctx)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+ if_softc_ctx_t scctx;
+ unsigned int index;
+
+ softc = iflib_get_softc(ctx);
+ scctx = softc->scctx;
+ enic = &softc->enic;
+
+ if (softc->stopped)
+ return;
+ softc->link_active = 0;
+ softc->stopped = 1;
+
+ for (index = 0; index < scctx->isc_ntxqsets; index++) {
+ enic_stop_wq(enic, index);
+ vnic_wq_clean(&enic->wq[index]);
+ vnic_cq_clean(&enic->cq[enic_cq_rq(enic, index)]);
+ }
+
+ for (index = 0; index < scctx->isc_nrxqsets; index++) {
+ vnic_rq_clean(&enic->rq[index]);
+ vnic_cq_clean(&enic->cq[enic_cq_wq(enic, index)]);
+ }
+
+ for (index = 0; index < scctx->isc_vectors; index++) {
+ vnic_intr_clean(&enic->intr[index]);
+ }
+}
+
+static void
+enic_init(if_ctx_t ctx)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+ if_softc_ctx_t scctx;
+ unsigned int index;
+
+ softc = iflib_get_softc(ctx);
+ scctx = softc->scctx;
+ enic = &softc->enic;
+
+ for (index = 0; index < scctx->isc_ntxqsets; index++)
+ enic_prep_wq_for_simple_tx(&softc->enic, index);
+
+ for (index = 0; index < scctx->isc_ntxqsets; index++)
+ enic_start_wq(enic, index);
+
+ for (index = 0; index < scctx->isc_nrxqsets; index++)
+ enic_start_rq(enic, index);
+
+ /* Use the current MAC address. */
+ bcopy(IF_LLADDR(softc->ifp), softc->lladdr, ETHER_ADDR_LEN);
+ enic_set_lladdr(softc);
+
+ ENIC_LOCK(softc);
+ vnic_dev_enable_wait(enic->vdev);
+ ENIC_UNLOCK(softc);
+
+ enic_link_status(softc);
+}
+
+static void
+enic_del_mcast(struct enic_softc *softc) {
+ struct enic *enic;
+ int i;
+
+ enic = &softc->enic;
+ for (i=0; i < softc->mc_count; i++) {
+ vnic_dev_del_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]);
+ }
+ softc->multicast = 0;
+ softc->mc_count = 0;
+}
+
+static void
+enic_add_mcast(struct enic_softc *softc) {
+ struct enic *enic;
+ int i;
+
+ enic = &softc->enic;
+ for (i=0; i < softc->mc_count; i++) {
+ vnic_dev_add_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]);
+ }
+ softc->multicast = 1;
+}
+
+static u_int
+enic_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx)
+{
+ uint8_t *mta = arg;
+
+ if (idx == ENIC_MAX_MULTICAST_ADDRESSES)
+ return (0);
+
+ bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
+ return (1);
+}
+
+static void
+enic_multi_set(if_ctx_t ctx)
+{
+ struct ifnet *ifp;
+ struct enic_softc *softc;
+ u_int count;
+
+ softc = iflib_get_softc(ctx);
+ ifp = iflib_get_ifp(ctx);
+
+ ENIC_LOCK(softc);
+ enic_del_mcast(softc);
+ count = if_foreach_llmaddr(ifp, enic_copy_maddr, softc->mta);
+ softc->mc_count = count;
+ enic_add_mcast(softc);
+ ENIC_UNLOCK(softc);
+
+ if (ifp->if_flags & IFF_PROMISC) {
+ softc->promisc = 1;
+ } else {
+ softc->promisc = 0;
+ }
+ if (ifp->if_flags & IFF_ALLMULTI) {
+ softc->allmulti = 1;
+ } else {
+ softc->allmulti = 0;
+ }
+ enic_update_packet_filter(&softc->enic);
+}
+
+static int
+enic_mtu_set(if_ctx_t ctx, uint32_t mtu)
+{
+ struct enic_softc *softc;
+ struct enic *enic;
+ if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
+
+ softc = iflib_get_softc(ctx);
+ enic = &softc->enic;
+
+ if (mtu > enic->port_mtu){
+ return (EINVAL);
+ }
+
+ enic->config.mtu = mtu;
+ scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ return (0);
+}
+
+static void
+enic_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
+{
+ struct enic_softc *softc;
+ struct ifmedia_entry *next;
+ uint32_t speed;
+ uint64_t target_baudrate;
+
+ softc = iflib_get_softc(ctx);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (enic_link_is_up(softc) != 0) {
+ ENIC_LOCK(softc);
+ speed = vnic_dev_port_speed(&softc->vdev);
+ ENIC_UNLOCK(softc);
+ target_baudrate = 1000ull * speed;
+ LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
+ if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
+ ifmr->ifm_active |= next->ifm_media;
+ }
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active |= IFM_AUTO;
+ } else
+ ifmr->ifm_active |= IFM_NONE;
+}
+
+static int
+enic_media_change(if_ctx_t ctx)
+{
+ return (ENODEV);
+}
+
+static int
+enic_promisc_set(if_ctx_t ctx, int flags)
+{
+ struct ifnet *ifp;
+ struct enic_softc *softc;
+
+ softc = iflib_get_softc(ctx);
+ ifp = iflib_get_ifp(ctx);
+
+ if (ifp->if_flags & IFF_PROMISC) {
+ softc->promisc = 1;
+ } else {
+ softc->promisc = 0;
+ }
+ if (ifp->if_flags & IFF_ALLMULTI) {
+ softc->allmulti = 1;
+ } else {
+ softc->allmulti = 0;
+ }
+ enic_update_packet_filter(&softc->enic);
+
+ return (0);
+}
+
+static uint64_t
+enic_get_counter(if_ctx_t ctx, ift_counter cnt) {
+ if_t ifp = iflib_get_ifp(ctx);
+
+ if (cnt < IFCOUNTERS)
+ return if_get_counter_default(ifp, cnt);
+
+ return (0);
+}
+
+static void
+enic_update_admin_status(if_ctx_t ctx)
+{
+ struct enic_softc *softc;
+
+ softc = iflib_get_softc(ctx);
+
+ enic_link_status(softc);
+}
+
+uint32_t iflib_get_flags(if_ctx_t ctx);
+
+static void
+enic_txq_timer(if_ctx_t ctx, uint16_t qid)
+{
+
+ struct enic_softc *softc;
+ struct enic *enic;
+ struct vnic_stats *stats;
+ int ret;
+
+ softc = iflib_get_softc(ctx);
+ enic = &softc->enic;
+
+ ENIC_LOCK(softc);
+ ret = vnic_dev_stats_dump(enic->vdev, &stats);
+ ENIC_UNLOCK(softc);
+ if (ret) {
+ dev_err(enic, "Error in getting stats\n");
+ }
+}
+
+static int
+enic_link_is_up(struct enic_softc *softc)
+{
+ return (vnic_dev_link_status(&softc->vdev) == 1);
+}
+
+static void
+enic_link_status(struct enic_softc *softc)
+{
+ if_ctx_t ctx;
+ uint64_t speed;
+ int link;
+
+ ctx = softc->ctx;
+ link = enic_link_is_up(softc);
+ speed = IF_Gbps(10);
+
+ ENIC_LOCK(softc);
+ speed = vnic_dev_port_speed(&softc->vdev);
+ ENIC_UNLOCK(softc);
+
+ if (link != 0 && softc->link_active == 0) {
+ softc->link_active = 1;
+ iflib_link_state_change(ctx, LINK_STATE_UP, speed);
+ } else if (link == 0 && softc->link_active != 0) {
+ softc->link_active = 0;
+ iflib_link_state_change(ctx, LINK_STATE_DOWN, speed);
+ }
+}
+
+static void
+enic_set_lladdr(struct enic_softc *softc)
+{
+ struct enic *enic;
+ enic = &softc->enic;
+
+ ENIC_LOCK(softc);
+ vnic_dev_add_addr(enic->vdev, softc->lladdr);
+ ENIC_UNLOCK(softc);
+}
+
+
+static void
+enic_setup_txq_sysctl(struct vnic_wq *wq, int i, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child)
+{
+ struct sysctl_oid *txsnode;
+ struct sysctl_oid_list *txslist;
+ struct vnic_stats *stats = wq[i].vdev->stats;
+
+ txsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
+ txslist = SYSCTL_CHILDREN(txsnode);
+
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_frames_ok", CTLFLAG_RD,
+ &stats->tx.tx_frames_ok, "TX Frames OK");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_frames_ok", CTLFLAG_RD,
+ &stats->tx.tx_unicast_frames_ok, "TX unicast frames OK");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_frames_ok", CTLFLAG_RD,
+ &stats->tx.tx_multicast_frames_ok, "TX multicast framse OK");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_frames_ok", CTLFLAG_RD,
+ &stats->tx.tx_broadcast_frames_ok, "TX Broadcast frames OK");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_bytes_ok", CTLFLAG_RD,
+ &stats->tx.tx_bytes_ok, "TX bytes OK ");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_bytes_ok", CTLFLAG_RD,
+ &stats->tx.tx_unicast_bytes_ok, "TX unicast bytes OK");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_bytes_ok", CTLFLAG_RD,
+ &stats->tx.tx_multicast_bytes_ok, "TX multicast bytes OK");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_bytes_ok", CTLFLAG_RD,
+ &stats->tx.tx_broadcast_bytes_ok, "TX broadcast bytes OK");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_drops", CTLFLAG_RD,
+ &stats->tx.tx_drops, "TX drops");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_errors", CTLFLAG_RD,
+ &stats->tx.tx_errors, "TX errors");
+ SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_tso", CTLFLAG_RD,
+ &stats->tx.tx_tso, "TX TSO");
+}
+
+static void
+enic_setup_rxq_sysctl(struct vnic_rq *rq, int i, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child)
+{
+ struct sysctl_oid *rxsnode;
+ struct sysctl_oid_list *rxslist;
+ struct vnic_stats *stats = rq[i].vdev->stats;
+
+ rxsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
+ rxslist = SYSCTL_CHILDREN(rxsnode);
+
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_ok", CTLFLAG_RD,
+ &stats->rx.rx_frames_ok, "RX Frames OK");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_total", CTLFLAG_RD,
+ &stats->rx.rx_frames_total, "RX frames total");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_frames_ok", CTLFLAG_RD,
+ &stats->rx.rx_unicast_frames_ok, "RX unicast frames ok");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_frames_ok", CTLFLAG_RD,
+ &stats->rx.rx_multicast_frames_ok, "RX multicast Frames ok");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_frames_ok", CTLFLAG_RD,
+ &stats->rx.rx_broadcast_frames_ok, "RX broadcast frames ok");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_bytes_ok", CTLFLAG_RD,
+ &stats->rx.rx_bytes_ok, "RX bytes ok");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_bytes_ok", CTLFLAG_RD,
+ &stats->rx.rx_unicast_bytes_ok, "RX unicast bytes ok");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_bytes_ok", CTLFLAG_RD,
+ &stats->rx.rx_multicast_bytes_ok, "RX multicast bytes ok");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_bytes_ok", CTLFLAG_RD,
+ &stats->rx.rx_broadcast_bytes_ok, "RX broadcast bytes ok");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_drop", CTLFLAG_RD,
+ &stats->rx.rx_drop, "RX drop");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_errors", CTLFLAG_RD,
+ &stats->rx.rx_errors, "RX errors");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_rss", CTLFLAG_RD,
+ &stats->rx.rx_rss, "RX rss");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
+ &stats->rx.rx_crc_errors, "RX crc errors");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_64", CTLFLAG_RD,
+ &stats->rx.rx_frames_64, "RX frames 64");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_127", CTLFLAG_RD,
+ &stats->rx.rx_frames_127, "RX frames 127");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_255", CTLFLAG_RD,
+ &stats->rx.rx_frames_255, "RX frames 255");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_511", CTLFLAG_RD,
+ &stats->rx.rx_frames_511, "RX frames 511");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1023", CTLFLAG_RD,
+ &stats->rx.rx_frames_1023, "RX frames 1023");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1518", CTLFLAG_RD,
+ &stats->rx.rx_frames_1518, "RX frames 1518");
+ SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_to_max", CTLFLAG_RD,
+ &stats->rx.rx_frames_to_max, "RX frames to max");
+}
+
+static void
+enic_setup_queue_sysctl(struct enic_softc *softc, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child)
+{
+ enic_setup_txq_sysctl(softc->enic.wq, 0, ctx, child);
+ enic_setup_rxq_sysctl(softc->enic.rq, 0, ctx, child);
+}
+
+static void
+enic_setup_sysctl(struct enic_softc *softc)
+{
+ device_t dev;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+
+ dev = softc->dev;
+ ctx = device_get_sysctl_ctx(dev);
+ tree = device_get_sysctl_tree(dev);
+ child = SYSCTL_CHILDREN(tree);
+
+ enic_setup_queue_sysctl(softc, ctx, child);
+}
+
+static void
+enic_enable_intr(struct enic_softc *softc, int irq)
+{
+ struct enic *enic = &softc->enic;
+
+ vnic_intr_unmask(&enic->intr[irq]);
+ vnic_intr_return_all_credits(&enic->intr[irq]);
+}
+
+static void
+enic_disable_intr(struct enic_softc *softc, int irq)
+{
+ struct enic *enic = &softc->enic;
+
+ vnic_intr_mask(&enic->intr[irq]);
+ vnic_intr_masked(&enic->intr[irq]); /* flush write */
+}
+
+static int
+enic_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
+{
+ struct enic_softc *softc;
+ if_softc_ctx_t scctx;
+
+ softc = iflib_get_softc(ctx);
+ scctx = softc->scctx;
+
+ enic_enable_intr(softc, qid + scctx->isc_nrxqsets);
+
+ return 0;
+}
+
+static int
+enic_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
+{
+ struct enic_softc *softc;
+
+ softc = iflib_get_softc(ctx);
+ enic_enable_intr(softc, qid);
+
+ return 0;
+}
+
+static void
+enic_intr_enable_all(if_ctx_t ctx)
+{
+ struct enic_softc *softc;
+ if_softc_ctx_t scctx;
+ int i;
+
+ softc = iflib_get_softc(ctx);
+ scctx = softc->scctx;
+
+ for (i = 0; i < scctx->isc_vectors; i++) {
+ enic_enable_intr(softc, i);
+ }
+}
+
+static void
+enic_intr_disable_all(if_ctx_t ctx)
+{
+ struct enic_softc *softc;
+ if_softc_ctx_t scctx;
+ int i;
+
+ softc = iflib_get_softc(ctx);
+ scctx = softc->scctx;
+ /*
+ * iflib may invoke this routine before enic_attach_post() has run,
+ * which is before the top level shared data area is initialized and
+ * the device made aware of it.
+ */
+
+ for (i = 0; i < scctx->isc_vectors; i++) {
+ enic_disable_intr(softc, i);
+ }
+}
+
+static int
+enic_dev_open(struct enic *enic)
+{
+ int err;
+ int flags = CMD_OPENF_IG_DESCCACHE;
+
+ err = enic_dev_wait(enic->vdev, vnic_dev_open,
+ vnic_dev_open_done, flags);
+ if (err)
+ dev_err(enic_get_dev(enic),
+ "vNIC device open failed, err %d\n", err);
+
+ return err;
+}
+
+static int
+enic_dev_init(struct enic *enic)
+{
+ int err;
+
+ vnic_dev_intr_coal_timer_info_default(enic->vdev);
+
+ /*
+ * Get vNIC configuration
+ */
+ err = enic_get_vnic_config(enic);
+ if (err) {
+ dev_err(dev, "Get vNIC configuration failed, aborting\n");
+ return err;
+ }
+
+ /* Get available resource counts */
+ enic_get_res_counts(enic);
+
+ /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
+ enic->intr_queues = malloc(sizeof(*enic->intr_queues) *
+ enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO);
+
+ vnic_dev_set_reset_flag(enic->vdev, 0);
+ enic->max_flow_counter = -1;
+
+ /* set up link status checking */
+ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
+ enic->overlay_offload = false;
+ if (enic->disable_overlay && enic->vxlan) {
+ /*
+ * Explicitly disable overlay offload as the setting is
+ * sticky, and resetting vNIC does not disable it.
+ */
+ if (vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE)) {
+ dev_err(enic, "failed to disable overlay offload\n");
+ } else {
+ dev_info(enic, "Overlay offload is disabled\n");
+ }
+ }
+ if (!enic->disable_overlay && enic->vxlan &&
+ /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_ENABLE) == 0) {
+ enic->overlay_offload = true;
+ enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
+ dev_info(enic, "Overlay offload is enabled\n");
+ /*
+ * Reset the vxlan port to the default, as the NIC firmware
+ * does not reset it automatically and keeps the old setting.
+ */
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE, ENIC_DEFAULT_VXLAN_PORT)) {
+ dev_err(enic, "failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void *
+enic_alloc_consistent(void *priv, size_t size, bus_addr_t * dma_handle,
+ struct iflib_dma_info *res, u8 * name)
+{
+ void *vaddr;
+ *dma_handle = 0;
+ struct enic *enic = (struct enic *)priv;
+ int rz;
+
+ rz = iflib_dma_alloc(enic->softc->ctx, size, res, BUS_DMA_NOWAIT);
+ if (rz) {
+ pr_err("%s : Failed to allocate memory requested for %s\n",
+ __func__, name);
+ return NULL;
+ }
+
+ vaddr = res->idi_vaddr;
+ *dma_handle = res->idi_paddr;
+
+ return vaddr;
+}
+
+static void
+enic_free_consistent(void *priv, size_t size, void *vaddr,
+ bus_addr_t dma_handle, struct iflib_dma_info *res)
+{
+ iflib_dma_free(res);
+}
+
+static int
+enic_pci_mapping(struct enic_softc *softc)
+{
+ int rc;
+
+ rc = enic_map_bar(softc, &softc->mem, 0, true);
+ if (rc)
+ return rc;
+
+ rc = enic_map_bar(softc, &softc->io, 2, false);
+
+ return rc;
+}
+
+static void
+enic_pci_mapping_free(struct enic_softc *softc)
+{
+ if (softc->mem.res != NULL)
+ bus_release_resource(softc->dev, SYS_RES_MEMORY,
+ softc->mem.rid, softc->mem.res);
+ softc->mem.res = NULL;
+
+ if (softc->io.res != NULL)
+ bus_release_resource(softc->dev, SYS_RES_MEMORY,
+ softc->io.rid, softc->io.res);
+ softc->io.res = NULL;
+}
+
+static int
+enic_dev_wait(struct vnic_dev *vdev, int (*start) (struct vnic_dev *, int),
+ int (*finished) (struct vnic_dev *, int *), int arg)
+{
+ int done;
+ int err;
+ int i;
+
+ err = start(vdev, arg);
+ if (err)
+ return err;
+
+ /* Wait for func to complete...2 seconds max */
+ for (i = 0; i < 2000; i++) {
+ err = finished(vdev, &done);
+ if (err)
+ return err;
+ if (done)
+ return 0;
+ usleep(1000);
+ }
+ return -ETIMEDOUT;
+}
+
+static int
+enic_map_bar(struct enic_softc *softc, struct enic_bar_info *bar, int bar_num,
+ bool shareable)
+{
+ uint32_t flag;
+
+ if (bar->res != NULL) {
+ device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
+ return EDOOFUS;
+ }
+
+ bar->rid = PCIR_BAR(bar_num);
+ flag = RF_ACTIVE;
+ if (shareable)
+ flag |= RF_SHAREABLE;
+
+ if ((bar->res = bus_alloc_resource_any(softc->dev,
+ SYS_RES_MEMORY, &bar->rid, flag)) == NULL) {
+ device_printf(softc->dev,
+ "PCI BAR%d mapping failure\n", bar_num);
+ return (ENXIO);
+ }
+ bar->tag = rman_get_bustag(bar->res);
+ bar->handle = rman_get_bushandle(bar->res);
+ bar->size = rman_get_size(bar->res);
+
+ return 0;
+}
+
+void
+enic_init_vnic_resources(struct enic *enic)
+{
+ unsigned int error_interrupt_enable = 1;
+ unsigned int error_interrupt_offset = 0;
+ unsigned int rxq_interrupt_enable = 0;
+ unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
+ unsigned int txq_interrupt_enable = 0;
+ unsigned int txq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
+ unsigned int index = 0;
+ unsigned int cq_idx;
+ if_softc_ctx_t scctx;
+
+ scctx = enic->softc->scctx;
+
+
+ rxq_interrupt_enable = 1;
+ txq_interrupt_enable = 1;
+
+ rxq_interrupt_offset = 0;
+ txq_interrupt_offset = enic->intr_count - 2;
+ txq_interrupt_offset = 1;
+
+ for (index = 0; index < enic->intr_count; index++) {
+ vnic_intr_alloc(enic->vdev, &enic->intr[index], index);
+ }
+
+ for (index = 0; index < scctx->isc_nrxqsets; index++) {
+ cq_idx = enic_cq_rq(enic, index);
+
+ vnic_rq_clean(&enic->rq[index]);
+ vnic_rq_init(&enic->rq[index], cq_idx, error_interrupt_enable,
+ error_interrupt_offset);
+
+ vnic_cq_clean(&enic->cq[cq_idx]);
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */ ,
+ 1 /* color_enable */ ,
+ 0 /* cq_head */ ,
+ 0 /* cq_tail */ ,
+ 1 /* cq_tail_color */ ,
+ rxq_interrupt_enable,
+ 1 /* cq_entry_enable */ ,
+ 0 /* cq_message_enable */ ,
+ rxq_interrupt_offset,
+ 0 /* cq_message_addr */ );
+ if (rxq_interrupt_enable)
+ rxq_interrupt_offset++;
+ }
+
+ for (index = 0; index < scctx->isc_ntxqsets; index++) {
+ cq_idx = enic_cq_wq(enic, index);
+ vnic_wq_clean(&enic->wq[index]);
+ vnic_wq_init(&enic->wq[index], cq_idx, error_interrupt_enable,
+ error_interrupt_offset);
+ /* Compute unsupported ol flags for enic_prep_pkts() */
+ enic->wq[index].tx_offload_notsup_mask = 0;
+
+ vnic_cq_clean(&enic->cq[cq_idx]);
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */ ,
+ 1 /* color_enable */ ,
+ 0 /* cq_head */ ,
+ 0 /* cq_tail */ ,
+ 1 /* cq_tail_color */ ,
+ txq_interrupt_enable,
+ 1,
+ 0,
+ txq_interrupt_offset,
+ 0 /* (u64)enic->wq[index].cqmsg_rz->iova */ );
+
+ }
+
+ for (index = 0; index < enic->intr_count; index++) {
+ vnic_intr_init(&enic->intr[index], 125,
+ enic->config.intr_timer_type, /* mask_on_assertion */ 1);
+ }
+}
+
+static void
+enic_update_packet_filter(struct enic *enic)
+{
+ struct enic_softc *softc = enic->softc;
+
+ ENIC_LOCK(softc);
+ vnic_dev_packet_filter(enic->vdev,
+ softc->directed,
+ softc->multicast,
+ softc->broadcast,
+ softc->promisc,
+ softc->allmulti);
+ ENIC_UNLOCK(softc);
+}
+
+int
+enic_setup_finish(struct enic *enic)
+{
+ struct enic_softc *softc = enic->softc;
+
+ /* Default conf */
+ softc->directed = 1;
+ softc->multicast = 0;
+ softc->broadcast = 1;
+ softc->promisc = 0;
+ softc->allmulti = 1;;
+ enic_update_packet_filter(enic);
+
+ return 0;
+}
diff --git a/sys/dev/enic/rq_enet_desc.h b/sys/dev/enic/rq_enet_desc.h
new file mode 100644
index 000000000000..561ab28d0551
--- /dev/null
+++ b/sys/dev/enic/rq_enet_desc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+ __le64 address;
+ __le16 length_type;
+ u8 reserved[6];
+};
+
+enum rq_enet_type_types {
+ RQ_ENET_TYPE_ONLY_SOP = 0,
+ RQ_ENET_TYPE_NOT_SOP = 1,
+ RQ_ENET_TYPE_RESV2 = 2,
+ RQ_ENET_TYPE_RESV3 = 3,
+};
+
+#define RQ_ENET_ADDR_BITS 64
+#define RQ_ENET_LEN_BITS 14
+#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS 2
+#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
+ u64 address, u8 type, u16 length)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+ ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
+ u64 *address, u8 *type, u16 *length)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
+ *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+ RQ_ENET_TYPE_MASK);
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/sys/dev/enic/vnic_cq.c b/sys/dev/enic/vnic_cq.c
new file mode 100644
index 000000000000..72de29e5a381
--- /dev/null
+++ b/sys/dev/enic/vnic_cq.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "enic.h"
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int cq_message_enable,
+ unsigned int interrupt_offset, u64 cq_message_addr)
+{
+ u64 paddr;
+
+ paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+ ENIC_BUS_WRITE_8(cq->ctrl, CQ_RING_BASE, paddr);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_RING_SIZE, cq->ring.desc_count);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_FLOW_CONTROL_ENABLE, flow_control_enable);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_COLOR_ENABLE, color_enable);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_HEAD, cq_head);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL, cq_tail);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL_COLOR, cq_tail_color);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_INTR_ENABLE, interrupt_enable);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_ENTRY_ENABLE, cq_entry_enable);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_MESSAGE_ENABLE, cq_message_enable);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_INTR_OFFSET, interrupt_offset);
+ ENIC_BUS_WRITE_8(cq->ctrl, CQ_MESSAGE_ADDR, cq_message_addr);
+
+ cq->interrupt_offset = interrupt_offset;
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+ cq->to_clean = 0;
+ cq->last_color = 0;
+
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_HEAD, 0);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL, 0);
+ ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL_COLOR, 1);
+
+ vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/sys/dev/enic/vnic_cq.h b/sys/dev/enic/vnic_cq.h
new file mode 100644
index 000000000000..26f9009612c5
--- /dev/null
+++ b/sys/dev/enic/vnic_cq.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+ u64 ring_base; /* 0x00 */
+#define CQ_RING_BASE 0x00
+ u32 ring_size; /* 0x08 */
+#define CQ_RING_SIZE 0x08
+ u32 pad0;
+ u32 flow_control_enable; /* 0x10 */
+#define CQ_FLOW_CONTROL_ENABLE 0x10
+ u32 pad1;
+ u32 color_enable; /* 0x18 */
+#define CQ_COLOR_ENABLE 0x18
+ u32 pad2;
+ u32 cq_head; /* 0x20 */
+#define CQ_HEAD 0x20
+ u32 pad3;
+ u32 cq_tail; /* 0x28 */
+#define CQ_TAIL 0x28
+ u32 pad4;
+ u32 cq_tail_color; /* 0x30 */
+#define CQ_TAIL_COLOR 0x30
+ u32 pad5;
+ u32 interrupt_enable; /* 0x38 */
+#define CQ_INTR_ENABLE 0x38
+ u32 pad6;
+ u32 cq_entry_enable; /* 0x40 */
+#define CQ_ENTRY_ENABLE 0x40
+ u32 pad7;
+ u32 cq_message_enable; /* 0x48 */
+#define CQ_MESSAGE_ENABLE 0x48
+ u32 pad8;
+ u32 interrupt_offset; /* 0x50 */
+#define CQ_INTR_OFFSET 0x50
+ u32 pad9;
+ u64 cq_message_addr; /* 0x58 */
+#define CQ_MESSAGE_ADDR 0x58
+ u32 pad10;
+};
+
+#ifdef ENIC_AIC
+struct vnic_rx_bytes_counter {
+ unsigned int small_pkt_bytes_cnt;
+ unsigned int large_pkt_bytes_cnt;
+};
+#endif
+
+struct vnic_cq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_res *ctrl;
+ struct vnic_dev_ring ring;
+ unsigned int to_clean;
+ unsigned int last_color;
+ unsigned int interrupt_offset;
+#ifdef ENIC_AIC
+ struct vnic_rx_bytes_counter pkt_size_counter;
+ unsigned int cur_rx_coal_timeval;
+ unsigned int tobe_rx_coal_timeval;
+ ktime_t prev_ts;
+#endif
+ int ntxqsets;
+ int nrxqsets;
+ int ntxqsets_start;
+ int nrxqsets_start;
+};
+
+void vnic_cq_free(struct vnic_cq *cq);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int message_enable,
+ unsigned int interrupt_offset, u64 message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
+ unsigned int desc_size);
+
+static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
+ unsigned int work_to_do,
+ int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index, void *opaque),
+ void *opaque)
+{
+ struct cq_desc *cq_desc;
+ unsigned int work_done = 0;
+ u16 q_number, completed_index;
+ u8 type, color;
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ if ((*q_service)(cq->vdev, cq_desc, type,
+ q_number, completed_index, opaque))
+ break;
+
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ work_done++;
+ if (work_done >= work_to_do)
+ break;
+ }
+
+ return work_done;
+}
+
+static inline unsigned int vnic_cq_work(struct vnic_cq *cq,
+ unsigned int work_to_do)
+{
+ struct cq_desc *cq_desc;
+ unsigned int work_avail = 0;
+ u16 q_number, completed_index;
+ u8 type, color;
+ u32 to_clean, last_color;
+
+ to_clean = cq->to_clean;
+ last_color = cq->last_color;
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != last_color) {
+ to_clean++;
+ if (to_clean == cq->ring.desc_count) {
+ to_clean = 0;
+ last_color = last_color ? 0 : 1;
+ }
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ work_avail++;
+ if (work_avail >= work_to_do)
+ break;
+ }
+
+ return work_avail;
+}
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/sys/dev/enic/vnic_dev.c b/sys/dev/enic/vnic_dev.c
new file mode 100644
index 000000000000..3425d7372e56
--- /dev/null
+++ b/sys/dev/enic/vnic_dev.c
@@ -0,0 +1,1039 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "enic.h"
+#include "vnic_dev.h"
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_nic.h"
+#include "vnic_stats.h"
+
+#define VNIC_MAX_RES_HDR_SIZE \
+ (sizeof(struct vnic_resource_header) + \
+ sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE 128
+
+#define VNIC_MAX_FLOW_COUNTERS 2048
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+ return vdev->priv;
+}
+
+void vnic_register_cbacks(struct vnic_dev *vdev,
+ void *(*alloc_consistent)(void *priv, size_t size,
+ bus_addr_t *dma_handle, struct iflib_dma_info *res,u8 *name),
+ void (*free_consistent)(void *priv,
+ size_t size, void *vaddr,
+ bus_addr_t dma_handle,struct iflib_dma_info *res))
+{
+ vdev->alloc_consistent = alloc_consistent;
+ vdev->free_consistent = free_consistent;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+ struct vnic_dev_bar *bar, unsigned int num_bars)
+{
+ struct enic_softc *softc = vdev->softc;
+ struct vnic_resource_header __iomem *rh;
+ struct mgmt_barmap_hdr __iomem *mrh;
+ struct vnic_resource __iomem *r;
+ int r_offset;
+ u8 type;
+
+ if (num_bars == 0)
+ return -EINVAL;
+
+ rh = malloc(sizeof(*rh), M_DEVBUF, M_NOWAIT | M_ZERO);
+ mrh = malloc(sizeof(*mrh), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!rh) {
+ pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+ free(rh, M_DEVBUF);
+ free(mrh, M_DEVBUF);
+ return -EINVAL;
+ }
+
+ /* Check for mgmt vnic in addition to normal vnic */
+ ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)rh, sizeof(*rh) / 4);
+ ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)mrh, sizeof(*mrh) / 4);
+ if ((rh->magic != VNIC_RES_MAGIC) ||
+ (rh->version != VNIC_RES_VERSION)) {
+ if ((mrh->magic != MGMTVNIC_MAGIC) ||
+ mrh->version != MGMTVNIC_VERSION) {
+ pr_err("vNIC BAR0 res magic/version error " \
+ "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+ rh->magic, rh->version);
+ free(rh, M_DEVBUF);
+ free(mrh, M_DEVBUF);
+ return -EINVAL;
+ }
+ }
+
+ if (mrh->magic == MGMTVNIC_MAGIC)
+ r_offset = sizeof(*mrh);
+ else
+ r_offset = sizeof(*rh);
+
+ r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT | M_ZERO);
+ ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
+ while ((type = r->type) != RES_TYPE_EOL) {
+ u8 bar_num = r->bar;
+ u32 bar_offset =r->bar_offset;
+ u32 count = r->count;
+
+ r_offset += sizeof(*r);
+
+ if (bar_num >= num_bars)
+ continue;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ case RES_TYPE_INTR_PBA_LEGACY:
+ case RES_TYPE_DEVCMD:
+ break;
+ default:
+ ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
+ continue;
+ }
+
+ vdev->res[type].count = count;
+ bcopy(&softc->mem, &vdev->res[type].bar, sizeof(softc->mem));
+ vdev->res[type].bar.offset = bar_offset;
+ ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
+ }
+
+ free(rh, M_DEVBUF);
+ free(mrh, M_DEVBUF);
+ free(r, M_DEVBUF);
+ return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type)
+{
+ return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index)
+{
+ struct vnic_res *res;
+
+ if (!vdev->res[type].bar.tag)
+ return NULL;
+
+ res = malloc(sizeof(*res), M_DEVBUF, M_NOWAIT | M_ZERO);
+ bcopy(&vdev->res[type], res, sizeof(*res));
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ res->bar.offset +=
+ index * VNIC_RES_STRIDE;
+ default:
+ res->bar.offset += 0;
+ }
+
+ return res;
+}
+
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ /* The base address of the desc rings must be 512 byte aligned.
+ * Descriptor count is aligned to groups of 32 descriptors. A
+ * count of 0 means the maximum 4096 descriptors. Descriptor
+ * size is aligned to 16 bytes.
+ */
+
+ unsigned int count_align = 32;
+ unsigned int desc_align = 16;
+
+ ring->base_align = 512;
+
+ if (desc_count == 0)
+ desc_count = 4096;
+
+ ring->desc_count = VNIC_ALIGN(desc_count, count_align);
+
+ ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
+
+ ring->size = ring->desc_count * ring->desc_size;
+ ring->size_unaligned = ring->size + ring->base_align;
+
+ return ring->size_unaligned;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+ memset(ring->descs, 0, ring->size);
+}
+
+static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct vnic_res __iomem *devcmd = vdev->devcmd;
+ int delay;
+ u32 status;
+ int err;
+
+ status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
+ if (status & STAT_BUSY) {
+
+ pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+ return -EBUSY;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+ ENIC_BUS_WRITE_REGION_4(devcmd, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
+ }
+
+ ENIC_BUS_WRITE_4(devcmd, DEVCMD_CMD, cmd);
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) {
+ return 0;
+ }
+
+ for (delay = 0; delay < wait; delay++) {
+
+ udelay(100);
+
+ status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
+
+ if (!(status & STAT_BUSY)) {
+ if (status & STAT_ERROR) {
+ err = -(int)ENIC_BUS_READ_8(devcmd, DEVCMD_ARGS(0));
+
+ if (cmd != CMD_CAPABILITY)
+ pr_err("Devcmd %d failed " \
+ "with error code %d\n",
+ _CMD_N(cmd), err);
+ return err;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+ ENIC_BUS_READ_REGION_4(devcmd, bar, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
+ }
+
+ return 0;
+ }
+ }
+
+ pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+ return -ETIMEDOUT;
+}
+
+static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
+ u64 *args, int nargs, int wait)
+{
+ u32 status;
+ int err;
+
+ /*
+ * Proxy command consumes 2 arguments. One for proxy index,
+ * the other is for command to be proxied
+ */
+ if (nargs > VNIC_DEVCMD_NARGS - 2) {
+ pr_err("number of args %d exceeds the maximum\n", nargs);
+ return -EINVAL;
+ }
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ vdev->args[0] = vdev->proxy_index;
+ vdev->args[1] = cmd;
+ memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
+
+ err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+ if (err)
+ return err;
+
+ status = (u32)vdev->args[0];
+ if (status & STAT_ERROR) {
+ err = (int)vdev->args[1];
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+ return err;
+ }
+
+ memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
+
+ return 0;
+}
+
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
+{
+ int err;
+
+ if (nargs > VNIC_DEVCMD_NARGS) {
+ pr_err("number of args %d exceeds the maximum\n", nargs);
+ return -EINVAL;
+ }
+ memset(vdev->args, 0, sizeof(vdev->args));
+ memcpy(vdev->args, args, nargs * sizeof(args[0]));
+
+ err = _vnic_dev_cmd(vdev, cmd, wait);
+
+ memcpy(args, vdev->args, nargs * sizeof(args[0]));
+
+ return err;
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ u64 args[2];
+ int err;
+
+ args[0] = *a0;
+ args[1] = *a1;
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ switch (vdev->proxy) {
+ case PROXY_BY_INDEX:
+ err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
+ args, ARRAY_SIZE(args), wait);
+ break;
+ case PROXY_BY_BDF:
+ err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
+ args, ARRAY_SIZE(args), wait);
+ break;
+ case PROXY_NONE:
+ default:
+ err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
+ break;
+ }
+
+ if (err == 0) {
+ *a0 = args[0];
+ *a1 = args[1];
+ }
+
+ return err;
+}
+
+int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *args, int nargs, int wait)
+{
+ switch (vdev->proxy) {
+ case PROXY_BY_INDEX:
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
+ args, nargs, wait);
+ case PROXY_BY_BDF:
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
+ args, nargs, wait);
+ case PROXY_NONE:
+ default:
+ return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
+ }
+}
+
+static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
+ int nargs)
+{
+ memset(args, 0, nargs * sizeof(*args));
+ args[0] = CMD_ADD_ADV_FILTER;
+ args[1] = FILTER_CAP_MODE_V1_FLAG;
+ return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
+}
+
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
+{
+ u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err)
+ return 0;
+ return (a1 >= (u32)FILTER_DPDK_1);
+}
+
+/* Determine the "best" filtering mode VIC is capaible of. Returns one of 3
+ * value or 0 on error:
+ * FILTER_DPDK_1- advanced filters availabile
+ * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
+ * the IP layer must explicitly specified. I.e. cannot have a UDP
+ * filter that matches both IPv4 and IPv6.
+ * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
+ * all other filter types are not available.
+ * Retrun true in filter_tags if supported
+ */
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
+ u8 *filter_actions)
+{
+ u64 args[4];
+ int err;
+ u32 max_level = 0;
+
+ err = vnic_dev_advanced_filters_cap(vdev, args, 4);
+
+ /* determine supported filter actions */
+ *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
+ if (args[2] == FILTER_CAP_MODE_V1)
+ *filter_actions = args[3];
+
+ if (err || ((args[0] == 1) && (args[1] == 0))) {
+ /* Adv filter Command not supported or adv filters available but
+ * not enabled. Try the normal filter capability command.
+ */
+ args[0] = CMD_ADD_FILTER;
+ args[1] = 0;
+ err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
+ if (err)
+ return err;
+ max_level = args[1];
+ goto parse_max_level;
+ } else if (args[2] == FILTER_CAP_MODE_V1) {
+ /* parse filter capability mask in args[1] */
+ if (args[1] & FILTER_DPDK_1_FLAG)
+ *mode = FILTER_DPDK_1;
+ else if (args[1] & FILTER_USNIC_IP_FLAG)
+ *mode = FILTER_USNIC_IP;
+ else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
+ *mode = FILTER_IPV4_5TUPLE;
+ return 0;
+ }
+ max_level = args[1];
+parse_max_level:
+ if (max_level >= (u32)FILTER_USNIC_IP)
+ *mode = FILTER_USNIC_IP;
+ else
+ *mode = FILTER_IPV4_5TUPLE;
+ return 0;
+}
+
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak)
+{
+ u64 a0 = CMD_NIC_CFG, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *cfg_chk = false;
+ *weak = false;
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err == 0 && a0 != 0 && a1 != 0) {
+ *cfg_chk = true;
+ *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
+ }
+}
+
+int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
+{
+ u64 a0 = (u32)cmd, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+
+ return !(err || a0);
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
+ void *value)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = offset;
+ a1 = size;
+
+ err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+ switch (size) {
+ case 1:
+ *(u8 *)value = (u8)a0;
+ break;
+ case 2:
+ *(u16 *)value = (u16)a0;
+ break;
+ case 4:
+ *(u32 *)value = (u32)a0;
+ break;
+ case 8:
+ *(u64 *)value = a0;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return err;
+}
+
+int vnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int rc;
+
+ if (!vdev->stats)
+ return -ENOMEM;
+
+ *stats = vdev->stats;
+ a0 = vdev->stats_res.idi_paddr;
+ a1 = sizeof(struct vnic_stats);
+
+ bus_dmamap_sync(vdev->stats_res.idi_tag,
+ vdev->stats_res.idi_map,
+ BUS_DMASYNC_POSTREAD);
+ rc = vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+ bus_dmamap_sync(vdev->stats_res.idi_tag,
+ vdev->stats_res.idi_map,
+ BUS_DMASYNC_PREREAD);
+ return (rc);
+}
+
+/*
+ * Configure counter DMA
+ */
+int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
+ u32 num_counters)
+{
+ u64 args[3];
+ int wait = 1000;
+ int err;
+
+ if (num_counters > VNIC_MAX_FLOW_COUNTERS)
+ return -ENOMEM;
+ if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD ||
+ num_counters == 0))
+ return -EINVAL;
+
+ args[0] = num_counters;
+ args[1] = vdev->flow_counters_res.idi_paddr;
+ args[2] = period;
+ bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
+ vdev->flow_counters_res.idi_map,
+ BUS_DMASYNC_POSTREAD);
+ err = vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait);
+ bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
+ vdev->flow_counters_res.idi_map,
+ BUS_DMASYNC_PREREAD);
+
+ /* record if DMAs need to be stopped on close */
+ if (!err)
+ vdev->flow_counters_dma_active = (num_counters != 0 &&
+ period != 0);
+
+ return err;
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable_wait(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
+ return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
+ else
+ return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err, i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = ((u8 *)&a0)[i];
+
+ return 0;
+}
+
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+ (multicast ? CMD_PFILTER_MULTICAST : 0) |
+ (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+ (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+ (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+ err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't set packet filter\n");
+
+ return err;
+}
+
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+
+ return err;
+}
+
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+
+ return err;
+}
+
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode)
+{
+ u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
+ int wait = 1000;
+
+ if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
+ return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
+ &a0, &a1, wait);
+ else
+ return 0;
+}
+
+void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
+{
+ vdev->in_reset = state;
+}
+
+static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
+{
+ return vdev->in_reset;
+}
+
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, bus_addr_t notify_pa, u16 intr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int r;
+
+ bus_dmamap_sync(vdev->notify_res.idi_tag,
+ vdev->notify_res.idi_map,
+ BUS_DMASYNC_PREWRITE);
+ memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
+ bus_dmamap_sync(vdev->notify_res.idi_tag,
+ vdev->notify_res.idi_map,
+ BUS_DMASYNC_POSTWRITE);
+ if (!vnic_dev_in_reset(vdev)) {
+ vdev->notify = notify_addr;
+ vdev->notify_pa = notify_pa;
+ }
+
+ a0 = (u64)notify_pa;
+ a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ if (!vnic_dev_in_reset(vdev))
+ vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
+
+ return r;
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ void *notify_addr = NULL;
+ bus_addr_t notify_pa = 0;
+ char name[NAME_MAX];
+ static u32 instance;
+
+ if (vdev->notify || vdev->notify_pa) {
+ return vnic_dev_notify_setcmd(vdev, vdev->notify,
+ vdev->notify_pa, intr);
+ }
+ if (!vnic_dev_in_reset(vdev)) {
+ snprintf((char *)name, sizeof(name),
+ "vnic_notify-%u", instance++);
+ iflib_dma_alloc(vdev->softc->ctx,
+ sizeof(struct vnic_devcmd_notify),
+ &vdev->notify_res, BUS_DMA_NOWAIT);
+ notify_pa = vdev->notify_res.idi_paddr;
+ notify_addr = vdev->notify_res.idi_vaddr;
+ }
+
+ return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
+}
+
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = 0; /* paddr = 0 to unset notify buffer */
+ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ if (!vnic_dev_in_reset(vdev)) {
+ vdev->notify = NULL;
+ vdev->notify_pa = 0;
+ vdev->notify_sz = 0;
+ }
+
+ return err;
+}
+
+int vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ if (vdev->notify && !vnic_dev_in_reset(vdev)) {
+ iflib_dma_free(&vdev->notify_res);
+ }
+
+ return vnic_dev_notify_unsetcmd(vdev);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+ u32 *words;
+ unsigned int nwords = vdev->notify_sz / 4;
+ unsigned int i;
+ u32 csum;
+
+ if (!vdev->notify || !vdev->notify_sz)
+ return 0;
+
+ do {
+ csum = 0;
+ bus_dmamap_sync(vdev->notify_res.idi_tag,
+ vdev->notify_res.idi_map,
+ BUS_DMASYNC_PREREAD);
+ memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
+ bus_dmamap_sync(vdev->notify_res.idi_tag,
+ vdev->notify_res.idi_map,
+ BUS_DMASYNC_POSTREAD);
+ words = (u32 *)&vdev->notify_copy;
+ for (i = 1; i < nwords; i++)
+ csum += words[i];
+ } while (csum != words[0]);
+
+ return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ int r = 0;
+
+ if (vnic_dev_capable(vdev, CMD_INIT))
+ r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+ else {
+ vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
+ if (a0 & CMD_INITF_DEFAULT_MAC) {
+ /* Emulate these for old CMD_INIT_v1 which
+ * didn't pass a0 so no CMD_INITF_*.
+ */
+ vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
+ vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ }
+ }
+ return r;
+}
+
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
+{
+ /* Default: hardware intr coal timer is in units of 1.5 usecs */
+ vdev->intr_coal_timer_info.mul = 2;
+ vdev->intr_coal_timer_info.div = 3;
+ vdev->intr_coal_timer_info.max_usec =
+ vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_state;
+}
+
+u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.port_speed;
+}
+
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
+{
+ return (usec * vdev->intr_coal_timer_info.mul) /
+ vdev->intr_coal_timer_info.div;
+}
+
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
+{
+ return (hw_cycles * vdev->intr_coal_timer_info.div) /
+ vdev->intr_coal_timer_info.mul;
+}
+
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
+{
+ return vdev->intr_coal_timer_info.max_usec;
+}
+
+u32 vnic_dev_mtu(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.mtu;
+}
+
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode)
+{
+ vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
+ struct vnic_dev *vdev)
+{
+ return vdev->intr_mode;
+}
+
+
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
+{
+ char name[NAME_MAX];
+ static u32 instance;
+ struct enic_softc *softc;
+
+ softc = vdev->softc;
+
+ snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
+ iflib_dma_alloc(softc->ctx, sizeof(struct vnic_stats), &vdev->stats_res, 0);
+ vdev->stats = (struct vnic_stats *)vdev->stats_res.idi_vaddr;
+ return vdev->stats == NULL ? -ENOMEM : 0;
+}
+
+/*
+ * Initialize for up to VNIC_MAX_FLOW_COUNTERS
+ */
+int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev)
+{
+ char name[NAME_MAX];
+ static u32 instance;
+ struct enic_softc *softc;
+
+ softc = vdev->softc;
+
+ snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++);
+ iflib_dma_alloc(softc->ctx, sizeof(struct vnic_counter_counts) * VNIC_MAX_FLOW_COUNTERS, &vdev->flow_counters_res, 0);
+ vdev->flow_counters = (struct vnic_counter_counts *)vdev->flow_counters_res.idi_vaddr;
+ vdev->flow_counters_dma_active = 0;
+ return vdev->flow_counters == NULL ? -ENOMEM : 0;
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ struct enic_bar_info *mem, unsigned int num_bars)
+{
+ if (vnic_dev_discover_res(vdev, NULL, num_bars))
+ goto err_out;
+
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ goto err_out;
+
+ return vdev;
+
+err_out:
+ return NULL;
+}
+
+/*
+ * vnic_dev_classifier: Add/Delete classifier entries
+ * @vdev: vdev of the device
+ * @cmd: CLSF_ADD for Add filter
+ * CLSF_DEL for Delete filter
+ * @entry: In case of ADD filter, the caller passes the RQ number in this
+ * variable.
+ * This function stores the filter_id returned by the
+ * firmware in the same variable before return;
+ *
+ * In case of DEL filter, the caller passes the RQ number. Return
+ * value is irrelevant.
+ * @data: filter data
+ * @action: action data
+ */
+
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
+{
+ u64 a0 = overlay;
+ u64 a1 = config;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
+}
+
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number)
+{
+ u64 a1 = vxlan_udp_port_number;
+ u64 a0 = overlay;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
+}
+
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
+{
+ u64 a0 = VIC_FEATURE_VXLAN;
+ u64 a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
+ /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
+ return ret == 0 &&
+ (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
+ (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
+}
+
+bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx)
+{
+ u64 a0 = 0;
+ u64 a1 = 0;
+ int wait = 1000;
+
+ if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait))
+ return false;
+ *idx = (uint32_t)a0;
+ return true;
+}
+
+bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx)
+{
+ u64 a0 = idx;
+ u64 a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1,
+ wait) == 0;
+}
+
+bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
+ bool reset, uint64_t *packets, uint64_t *bytes)
+{
+ u64 a0 = idx;
+ u64 a1 = reset ? 1 : 0;
+ int wait = 1000;
+
+ if (reset) {
+ /* query/reset returns updated counters */
+ if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait))
+ return false;
+ *packets = a0;
+ *bytes = a1;
+ } else {
+ /* Get values DMA'd from the adapter */
+ *packets = vdev->flow_counters[idx].vcc_packets;
+ *bytes = vdev->flow_counters[idx].vcc_bytes;
+ }
+ return true;
+}
+
+device_t dev_from_vnic_dev(struct vnic_dev *vdev) {
+ return (vdev->softc->dev);
+}
diff --git a/sys/dev/enic/vnic_dev.h b/sys/dev/enic/vnic_dev.h
new file mode 100644
index 000000000000..f8ca29f4e175
--- /dev/null
+++ b/sys/dev/enic/vnic_dev.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "enic_compat.h"
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET 0x0000000000000000ULL
+#endif
+
+enum vnic_dev_intr_mode {
+ VNIC_DEV_INTR_MODE_UNKNOWN,
+ VNIC_DEV_INTR_MODE_INTX,
+ VNIC_DEV_INTR_MODE_MSI,
+ VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+ void __iomem *vaddr;
+ unsigned long len;
+};
+
+struct vnic_dev_ring {
+ void *descs; /* vaddr */
+ size_t size;
+ bus_addr_t base_addr; /* paddr */
+ size_t base_align;
+ void *descs_unaligned;
+ size_t size_unaligned;
+ bus_addr_t base_addr_unaligned;
+ unsigned int desc_size;
+ unsigned int desc_count;
+ unsigned int desc_avail;
+ unsigned int last_count;
+};
+
+struct vnic_dev_iomap_info {
+ bus_addr_t bus_addr;
+ unsigned long len;
+ void __iomem *vaddr;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+void vnic_register_cbacks(struct vnic_dev *vdev,
+ void *(*alloc_consistent)(void *priv, size_t size,
+ bus_addr_t *dma_handle, struct iflib_dma_info *res, u8 *name),
+ void (*free_consistent)(void *priv,
+ size_t size, void *vaddr,
+ bus_addr_t dma_handle, struct iflib_dma_info *res));
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index);
+uint8_t vnic_dev_get_res_bar(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+uint32_t vnic_dev_get_res_offset(struct vnic_dev *vdev,
+ enum vnic_res_type type, unsigned int index);
+unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait);
+int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *args, int nargs, int wait);
+void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index);
+void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
+int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd);
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
+ u8 *filter_actions);
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak);
+int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
+ void *value);
+int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
+ u32 num_counters);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
+ int multicast, int broadcast, int promisc, int allmulti);
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state);
+int vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, bus_addr_t notify_pa, u16 intr);
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+u32 vnic_dev_port_speed(struct vnic_dev *vdev);
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
+u32 vnic_dev_mtu(struct vnic_dev *vdev);
+u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
+u32 vnic_dev_notify_status(struct vnic_dev *vdev);
+u32 vnic_dev_uif(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_enable_wait(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
+int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_deinit(struct vnic_dev *vdev);
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
+int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec);
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles);
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev);
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode);
+struct enic;
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ struct enic_bar_info *mem, unsigned int num_bars);
+struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
+int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev);
+int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
+int vnic_dev_get_size(void);
+int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
+int vnic_dev_perbi(struct vnic_dev *vdev, u64 arg, u32 op);
+u32 vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev);
+int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_enable2(struct vnic_dev *vdev, int active);
+int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
+int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
+int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+ struct filter_v2 *data, struct filter_action_v2 *action_v2);
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev,
+ u8 overlay, u8 config);
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number);
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev);
+bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx);
+bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx);
+bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
+ bool reset, uint64_t *packets, uint64_t *bytes);
+
+device_t dev_from_vnic_dev(struct vnic_dev *vdev);
+
+#endif /* _VNIC_DEV_H_ */
diff --git a/sys/dev/enic/vnic_devcmd.h b/sys/dev/enic/vnic_devcmd.h
new file mode 100644
index 000000000000..5142d94f822b
--- /dev/null
+++ b/sys/dev/enic/vnic_devcmd.h
@@ -0,0 +1,1182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS 14
+#define _CMD_VTYPEBITS 10
+#define _CMD_FLAGSBITS 6
+#define _CMD_DIRBITS 2
+
+#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT 0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE 0U
+#define _CMD_DIR_WRITE 1U
+#define _CMD_DIR_READ 2U
+#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE 0U
+#define _CMD_VTYPE_ENET 1U
+#define _CMD_VTYPE_FC 2U
+#define _CMD_VTYPE_SCSI 4U
+#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+ */
+#define _CMDCF(dir, flags, vtype, nr) \
+ (((dir) << _CMD_DIRSHIFT) | \
+ ((flags) << _CMD_FLAGSSHIFT) | \
+ ((vtype) << _CMD_VTYPESHIFT) | \
+ ((nr) << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+ */
+#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+enum vnic_devcmd_cmd {
+ CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * action:
+ * Fills in struct vnic_devcmd_fw_info (128 bytes)
+ * note:
+ * An old definition of CMD_MCPU_FW_INFO
+ */
+ CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * (u16)a1=size of the structure
+ * out:
+ * (u16)a1=0 for in:a1 = 0,
+ * data size actually written for other values.
+ * action:
+ * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
+ * first in:a1 bytes for 0 < in:a1 <= 132,
+ * 132 bytes for other values of in:a1.
+ * note:
+ * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1
+ * for source compatibility.
+ */
+ CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
+
+ /* dev-specific block member:
+ * in: (u16)a0=offset,(u8)a1=size
+ * out: a0=value
+ */
+ CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+ /* stats clear */
+ CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+ /* stats dump in mem: (u64)a0=paddr to stats area,
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+ /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+
+ /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
+
+ /* hang detection notification */
+ CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+ /* MAC address in (u48)a0 */
+ CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+#define CMD_GET_MAC_ADDR CMD_MAC_ADDR /* some uses are aliased */
+
+ /* add addr from (u48)a0 */
+ CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+ /* del addr from (u48)a0 */
+ CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+ /* add VLAN id in (u16)a0 */
+ CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+ /* del VLAN id in (u16)a0 */
+ CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+ /*
+ * nic_cfg in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
+ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+ /*
+ * nic_cfg_chk (same as nic_cfg, but may return error)
+ * in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
+ CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+ /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+ /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+ /* initiate softreset */
+ CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+ /* softreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+ /* set struct vnic_devcmd_notify buffer in mem:
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+ /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
+ * (u8)a1=PXENV_UNDI_xxx */
+ CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+ /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+ CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+ /* open status:
+ * out: a0=0 open complete, a0=1 open in progress */
+ CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+ /* close vnic */
+ CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+/***** Replaced by CMD_INIT *****/
+ CMD_INIT_v1 = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+ /* variant of CMD_INIT, with provisioning info
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+ /* enable virtual link */
+ CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* enable virtual link, waiting variant. */
+ CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* disable virtual link */
+ CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+ /* stats dump sum of all vnic stats on same uplink in mem:
+ * (u64)a0=paddr
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+ /* init status:
+ * out: a0=0 init complete, a0=1 init in progress
+ * if a0=0, a1=errno */
+ CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx */
+ CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+ /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+ CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+ /* undo initialize of virtual link */
+ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+ CMD_INIT = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 35),
+
+ /* check fw capability of a cmd:
+ * in: (u32)a0=cmd
+ * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+ CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+ /* persistent binding info
+ * in: (u64)a0=paddr of arg
+ * (u32)a1=CMD_PERBI_XXX */
+ CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+ /* Interrupt Assert Register functionality
+ * in: (u16)a0=interrupt number to assert
+ */
+ CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+ /* initiate hangreset, like softreset after hang detected */
+ CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+ /* hangreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+ /*
+ * Set hw ingress packet vlan rewrite mode:
+ * in: (u32)a0=new vlan rewrite mode
+ * out: (u32)a0=old vlan rewrite mode */
+ CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+ /*
+ * in: (u16)a0=bdf of target vnic
+ * (u32)a1=cmd to proxy
+ * a2-a15=args to cmd in a1
+ * out: (u32)a0=status of proxied cmd
+ * a1-a15=out args of proxied cmd */
+ CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+ /*
+ * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+ * or SR-IOV virtual vnic
+ */
+ CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+ /*
+ * For HPP toggle:
+ * adapter-info-get
+ * in: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=size of buffer specified in a0.
+ * out: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+ * 0 if no VIF-CONFIG-INFO TLV was ever received. */
+ CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+ /*
+ * INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx
+ */
+ CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+ /*
+ * Set default vlan:
+ * in: (u16)a0=new default vlan
+ * (u16)a1=zero for overriding vlan with param a0,
+ * non-zero for resetting vlan to the default
+ * out: (u16)a0=old default vlan
+ */
+ CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
+ /* init_prov_info2:
+ * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
+ * the vnic until CMD_ENABLE2 is issued.
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
+
+ /* enable2:
+ * (u32)a0=0 ==> standby
+ * =CMD_ENABLE2_ACTIVE ==> active
+ */
+ CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
+
+ /*
+ * cmd_status:
+ * Returns the status of the specified command
+ * Input:
+ * a0 = command for which status is being queried.
+ * Possible values are:
+ * CMD_SOFT_RESET
+ * CMD_HANG_RESET
+ * CMD_OPEN
+ * CMD_INIT
+ * CMD_INIT_PROV_INFO
+ * CMD_DEINIT
+ * CMD_INIT_PROV_INFO2
+ * CMD_ENABLE2
+ * Output:
+ * if status == STAT_ERROR
+ * a0 = ERR_ENOTSUPPORTED - status for command in a0 is
+ * not supported
+ * if status == STAT_NONE
+ * a0 = status of the devcmd specified in a0 as follows.
+ * ERR_SUCCESS - command in a0 completed successfully
+ * ERR_EINPROGRESS - command in a0 is still in progress
+ */
+ CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
+
+ /*
+ * Returns interrupt coalescing timer conversion factors.
+ * After calling this devcmd, ENIC driver can convert
+ * interrupt coalescing timer in usec into CPU cycles as follows:
+ *
+ * intr_timer_cycles = intr_timer_usec * multiplier / divisor
+ *
+ * Interrupt coalescing timer in usecs can be be converted/obtained
+ * from CPU cycles as follows:
+ *
+ * intr_timer_usec = intr_timer_cycles * divisor / multiplier
+ *
+ * in: none
+ * out: (u32)a0 = multiplier
+ * (u32)a1 = divisor
+ * (u32)a2 = maximum timer value in usec
+ */
+ CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
+
+ /*
+ * ISCSI DUMP API:
+ * in: (u64)a0=paddr of the param or param itself
+ * (u32)a1=ISCSI_CMD_xxx
+ */
+ CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51),
+
+ /*
+ * ISCSI DUMP STATUS API:
+ * in: (u32)a0=cmd tag
+ * in: (u32)a1=ISCSI_CMD_xxx
+ * out: (u32)a0=cmd status
+ */
+ CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52),
+
+ /*
+ * Subvnic migration from MQ <--> VF.
+ * Enable the LIF migration from MQ to VF and vice versa. MQ and VF
+ * indexes are statically bound at the time of initialization.
+ * Based on the direction of migration, the resources of either MQ or
+ * the VF shall be attached to the LIF.
+ * in: (u32)a0=Direction of Migration
+ * 0=> Migrate to VF
+ * 1=> Migrate to MQ
+ * (u32)a1=VF index (MQ index)
+ */
+ CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
+
+ /*
+ * Register / Deregister the notification block for MQ subvnics
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54),
+
+ /*
+ * Set the predefined mac address as default
+ * in:
+ * (u48)a0=mac addr
+ */
+ CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+ /* Update the provisioning info of the given VIF
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+ /*
+ * Initialization for the devcmd2 interface.
+ * in: (u64) a0=host result buffer physical address
+ * in: (u16) a1=number of entries in result buffer
+ */
+ CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
+ /*
+ * Add a filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capability query supported
+ * (u64) a1= MAX filter type supported
+ */
+ CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
+
+ /*
+ * Delete a filter.
+ * in: (u32) a0=filter identifier
+ */
+ CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
+
+ /*
+ * Enable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
+
+ /*
+ * Disable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
+
+ /*
+ * Stats dump Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u64) a1=host buffer addr for status dump
+ * (u32) a2=length of the buffer
+ */
+ CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
+
+ /*
+ * Clear stats for Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ */
+ CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
+
+ /*
+ * UEFI BOOT API: (u64)a0= UEFI FLS_CMD_xxx
+ * (ui64)a1= paddr for the info buffer
+ */
+ CMD_FC_REQ = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 64),
+
+ /*
+ * Return the iSCSI config details required by the EFI Option ROM
+ * in: (u32) a0=0 Get Boot Info for PXE eNIC as per pxe_boot_config_t
+ * a0=1 Get Boot info for iSCSI enic as per
+ * iscsi_boot_efi_cfg_t
+ * in: (u64) a1=Host address where iSCSI config info is returned
+ */
+ CMD_VNIC_BOOT_CONFIG_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 65),
+
+ /*
+ * Create a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u32) a1 = Remote QP
+ * (u32) a2 = RDMA-RQ
+ * (u16) a3 = RQ Res Group
+ * (u16) a4 = SQ Res Group
+ * (u32) a5 = Protection Domain
+ * (u64) a6 = Remote MAC
+ * (u32) a7 = start PSN
+ * (u16) a8 = MSS
+ * (u32) a9 = protocol version
+ */
+ CMD_RDMA_QP_CREATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 66),
+
+ /*
+ * Delete a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ */
+ CMD_RDMA_QP_DELETE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 67),
+
+ /*
+ * Retrieve a Queue Pair's status information (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u64) a1 = host buffer addr for QP status struct
+ * (u32) a2 = length of the buffer
+ */
+ CMD_RDMA_QP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 68),
+
+ /*
+ * Use this devcmd for agreeing on the highest common version supported
+ * by both driver and fw for by features who need such a facility.
+ * in: (u64) a0 = feature (driver requests for the supported versions
+ * on this feature)
+ * out: (u64) a0 = bitmap of all supported versions for that feature
+ */
+ CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69),
+
+ /*
+ * Initialize the RDMA notification work queue
+ * in: (u64) a0 = host buffer address
+ * in: (u16) a1 = number of entries in buffer
+ * in: (u16) a2 = resource group number
+ * in: (u16) a3 = CQ number to post completion
+ */
+ CMD_RDMA_INIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 70),
+
+ /*
+ * De-init the RDMA notification work queue
+ * in: (u64) a0=resource group number
+ */
+ CMD_RDMA_DEINIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 71),
+
+ /*
+ * Control (Enable/Disable) overlay offloads on the given vnic
+ * in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
+ * a0 = OVERLAY_FEATURE_VXLAN : VxLAN
+ * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
+ * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or
+ * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2
+ */
+ CMD_OVERLAY_OFFLOAD_CTRL =
+ _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
+
+ /*
+ * Configuration of overlay offloads feature on a given vNIC
+ * in: (u8) a0 = OVERLAY_CFG_VXLAN_PORT_UPDATE : VxLAN
+ * in: (u16) a1 = unsigned short int port information
+ */
+ CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Return the configured name for the device
+ * in: (u64) a0=Host address where the name is copied
+ * (u32) a1=Size of the buffer
+ */
+ CMD_GET_CONFIG_NAME = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 74),
+
+ /*
+ * Enable group interrupt for the VF
+ * in: (u32) a0 = GRPINTR_ENABLE : enable
+ * a0 = GRPINTR_DISABLE : disable
+ * a0 = GRPINTR_UPD_VECT: update group vector addr
+ * in: (u32) a1 = interrupt group count
+ * in: (u64) a2 = Start of host buffer address for DMAing group
+ * vector bitmap
+ * in: (u64) a3 = Stride between group vectors
+ */
+ CMD_CONFIG_GRPINTR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 75),
+
+ /*
+ * Set cq arrary base and size in a list of consective wqs and
+ * rqs for a device
+ * in: (u16) a0 = the wq relative index in the device.
+ * -1 indicates skipping wq configuration
+ * in: (u16) a1 = the wcq relative index in the device
+ * in: (u16) a2 = the rq relative index in the device
+ * -1 indicates skipping rq configuration
+ * in: (u16) a3 = the rcq relative index in the device
+ */
+ CMD_CONFIG_CQ_ARRAY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 76),
+
+ /*
+ * Add an advanced filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * in: (u64) a1= supported filter capability exchange modes
+ * out: (u64) a0= 1 if capability query supported
+ * if (u64) a1 = 0: a1 = MAX filter type supported
+ * if (u64) a1 & FILTER_CAP_MODE_V1_FLAG:
+ * a1 = bitmask of supported filters
+ * a2 = FILTER_CAP_MODE_V1
+ * a3 = bitmask of supported actions
+ */
+ CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77),
+
+ /*
+ * Allocate a counter for use with CMD_ADD_FILTER
+ * out:(u32) a0 = counter index
+ */
+ CMD_COUNTER_ALLOC = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ENET, 85),
+
+ /*
+ * Free a counter
+ * in: (u32) a0 = counter_id
+ */
+ CMD_COUNTER_FREE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 86),
+
+ /*
+ * Read a counter
+ * in: (u32) a0 = counter_id
+ * (u32) a1 = clear counter if non-zero
+ * out:(u64) a0 = packet count
+ * (u64) a1 = byte count
+ */
+ CMD_COUNTER_QUERY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 87),
+
+ /*
+ * Configure periodic counter DMA. This will trigger an immediate
+ * DMA of the counters (unless period == 0), and then schedule a DMA
+ * of the counters every <period> seconds until disdabled.
+ * Each new COUNTER_DMA_CONFIG will override all previous commands on
+ * this vnic.
+ * Setting a2 (period) = 0 will disable periodic DMAs
+ * If a0 (num_counters) != 0, an immediate DMA will always be done,
+ * irrespective of the value in a2.
+ * in: (u32) a0 = number of counters to DMA
+ * (u64) a1 = host target DMA address
+ * (u32) a2 = DMA period in milliseconds (0 to disable)
+ */
+ CMD_COUNTER_DMA_CONFIG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 88),
+#define VNIC_COUNTER_DMA_MIN_PERIOD 500
+
+ /*
+ * Clear all counters on a vnic
+ */
+ CMD_COUNTER_CLEAR_ALL = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ENET, 89),
+};
+
+/* Modes for exchanging advanced filter capabilities. The modes supported by
+ * the driver are passed in the CMD_ADD_ADV_FILTER capability command and the
+ * mode selected is returned.
+ * V0: the maximum filter type supported is returned
+ * V1: bitmasks of supported filters and actions are returned
+ */
+enum filter_cap_mode {
+ FILTER_CAP_MODE_V0 = 0, /* Must always be 0 for legacy drivers */
+ FILTER_CAP_MODE_V1 = 1,
+};
+#define FILTER_CAP_MODE_V1_FLAG (1 << FILTER_CAP_MODE_V1)
+
+/* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_STANDBY 0x0
+#define CMD_ENABLE2_ACTIVE 0x1
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+
+/* flags for CMD_NIC_CFG */
+#define CMD_NIC_CFG_CAPF_UDP_WEAK (1ULL << 0) /* Bodega-style UDP RSS */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED 0x01
+#define CMD_PFILTER_MULTICAST 0x02
+#define CMD_PFILTER_BROADCAST 0x04
+#define CMD_PFILTER_PROMISCUOUS 0x08
+#define CMD_PFILTER_ALL_MULTICAST 0x10
+
+/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
+#define CMD_QP_RQWQ 0x0
+
+/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
+#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
+#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
+#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2
+#define IG_VLAN_REWRITE_MODE_PASS_THRU 3
+
+enum vnic_devcmd_status {
+ STAT_NONE = 0,
+ STAT_BUSY = 1 << 0, /* cmd in progress */
+ STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+ STAT_FAILOVER = 1 << 2, /* always set on vnics in pci standby state
+ * if seen a failover to the standby happened
+ */
+};
+
+enum vnic_devcmd_error {
+ ERR_SUCCESS = 0,
+ ERR_EINVAL = 1,
+ ERR_EFAULT = 2,
+ ERR_EPERM = 3,
+ ERR_EBUSY = 4,
+ ERR_ECMDUNKNOWN = 5,
+ ERR_EBADSTATE = 6,
+ ERR_ENOMEM = 7,
+ ERR_ETIMEDOUT = 8,
+ ERR_ELINKDOWN = 9,
+ ERR_EMAXRES = 10,
+ ERR_ENOTSUPPORTED = 11,
+ ERR_EINPROGRESS = 12,
+ ERR_MAX
+};
+
+/*
+ * note: hw_version and asic_rev refer to the same thing,
+ * but have different formats. hw_version is
+ * a 32-byte string (e.g. "A2") and asic_rev is
+ * a 16-bit integer (e.g. 0xA2).
+ */
+struct vnic_devcmd_fw_info {
+ char fw_version[32];
+ char fw_build[32];
+ char hw_version[32];
+ char hw_serial_number[32];
+ u16 asic_type;
+ u16 asic_rev;
+};
+
+enum fwinfo_asic_type {
+ FWINFO_ASIC_TYPE_UNKNOWN,
+ FWINFO_ASIC_TYPE_PALO,
+ FWINFO_ASIC_TYPE_SERENO,
+ FWINFO_ASIC_TYPE_CRUZ,
+};
+
+struct vnic_devcmd_notify {
+ u32 csum; /* checksum over following words */
+
+ u32 link_state; /* link up == 1 */
+ u32 port_speed; /* effective port speed (rate limit) */
+ u32 mtu; /* MTU */
+ u32 msglvl; /* requested driver msg lvl */
+ u32 uif; /* uplink interface */
+ u32 status; /* status bits (see VNIC_STF_*) */
+ u32 error; /* error code (see ERR_*) for first ERR */
+ u32 link_down_cnt; /* running count of link down transitions */
+ u32 perbi_rebuild_cnt; /* running count of perbi rebuilds */
+};
+#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
+#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */
+#define VNIC_STF_PFC_PAUSE 0x0004 /* priority flow control pause on */
+/* all supported status flags */
+#define VNIC_STF_ALL (VNIC_STF_FATAL_ERR |\
+ VNIC_STF_STD_PAUSE |\
+ VNIC_STF_PFC_PAUSE |\
+ 0)
+
+struct vnic_devcmd_provinfo {
+ u8 oui[3];
+ u8 type;
+ u8 data[0];
+};
+
+/*
+ * These are used in flags field of different filters to denote
+ * valid fields used.
+ */
+#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
+
+#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+
+#define FILTER_FIELDS_USNIC (FILTER_FIELD_USNIC_VLAN | \
+ FILTER_FIELD_USNIC_ETHTYPE | \
+ FILTER_FIELD_USNIC_PROTO | \
+ FILTER_FIELD_USNIC_ID)
+
+struct filter_usnic_id {
+ u32 flags;
+ u16 vlan;
+ u16 ethtype;
+ u8 proto_version;
+ u32 usnic_id;
+} __attribute__((packed));
+
+#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+
+#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_5TUP_PROTO | \
+ FILTER_FIELD_5TUP_SRC_AD | \
+ FILTER_FIELD_5TUP_DST_AD | \
+ FILTER_FIELD_5TUP_SRC_PT | \
+ FILTER_FIELD_5TUP_DST_PT)
+
+/* Enums for the protocol field. */
+enum protocol_e {
+ PROTO_UDP = 0,
+ PROTO_TCP = 1,
+ PROTO_IPV4 = 2,
+ PROTO_IPV6 = 3
+};
+
+struct filter_ipv4_5tuple {
+ u32 flags;
+ u32 protocol;
+ u32 src_addr;
+ u32 dst_addr;
+ u16 src_port;
+ u16 dst_port;
+} __attribute__((packed));
+
+#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+
+#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VMQ_VLAN | \
+ FILTER_FIELD_VMQ_MAC)
+
+#define FILTER_FIELDS_NVGRE FILTER_FIELD_VMQ_MAC
+
+struct filter_mac_vlan {
+ u32 flags;
+ u16 vlan;
+ u8 mac_addr[6];
+} __attribute__((packed));
+
+#define FILTER_FIELD_VLAN_IP_3TUP_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_PT FILTER_FIELD_VALID(5)
+
+#define FILTER_FIELDS_VLAN_IP_3TUP (FILTER_FIELD_VLAN_IP_3TUP_VLAN | \
+ FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_AD | \
+ FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_PT)
+
+struct filter_vlan_ip_3tuple {
+ u32 flags;
+ u16 vlan;
+ u16 l3_protocol;
+ union {
+ u32 dst_addr_v4;
+ u8 dst_addr_v6[16];
+ } u;
+ u32 l4_protocol;
+ u16 dst_port;
+} __attribute__((packed));
+
+#define FILTER_GENERIC_1_BYTES 64
+
+enum filter_generic_1_layer {
+ FILTER_GENERIC_1_L2,
+ FILTER_GENERIC_1_L3,
+ FILTER_GENERIC_1_L4,
+ FILTER_GENERIC_1_L5,
+ FILTER_GENERIC_1_NUM_LAYERS
+};
+
+#define FILTER_GENERIC_1_IPV4 (1 << 0)
+#define FILTER_GENERIC_1_IPV6 (1 << 1)
+#define FILTER_GENERIC_1_UDP (1 << 2)
+#define FILTER_GENERIC_1_TCP (1 << 3)
+#define FILTER_GENERIC_1_TCP_OR_UDP (1 << 4)
+#define FILTER_GENERIC_1_IP4SUM_OK (1 << 5)
+#define FILTER_GENERIC_1_L4SUM_OK (1 << 6)
+#define FILTER_GENERIC_1_IPFRAG (1 << 7)
+
+#define FILTER_GENERIC_1_KEY_LEN 64
+
+/*
+ * Version 1 of generic filter specification
+ * position is only 16 bits, reserving positions > 64k to be used by firmware
+ */
+struct filter_generic_1 {
+ u16 position; /* lower position comes first */
+ u32 mask_flags;
+ u32 val_flags;
+ u16 mask_vlan;
+ u16 val_vlan;
+ struct {
+ u8 mask[FILTER_GENERIC_1_KEY_LEN]; /* 0 bit means "don't care"*/
+ u8 val[FILTER_GENERIC_1_KEY_LEN];
+ } __attribute__((packed)) layer[FILTER_GENERIC_1_NUM_LAYERS];
+} __attribute__((packed));
+
+/* Specifies the filter_action type. */
+enum {
+ FILTER_ACTION_RQ_STEERING = 0,
+ FILTER_ACTION_V2 = 1,
+ FILTER_ACTION_MAX
+};
+
+struct filter_action {
+ u32 type;
+ union {
+ u32 rq_idx;
+ } u;
+} __attribute__((packed));
+
+#define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0)
+#define FILTER_ACTION_FILTER_ID_FLAG (1 << 1)
+#define FILTER_ACTION_DROP_FLAG (1 << 2)
+#define FILTER_ACTION_COUNTER_FLAG (1 << 3)
+#define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \
+ | FILTER_ACTION_FILTER_ID_FLAG \
+ | FILTER_ACTION_DROP_FLAG \
+ | FILTER_ACTION_COUNTER_FLAG)
+
+/* Version 2 of filter action must be a strict extension of struct filter_action
+ * where the first fields exactly match in size and meaning.
+ */
+struct filter_action_v2 {
+ u32 type;
+ u32 rq_idx;
+ u32 flags; /* use FILTER_ACTION_XXX_FLAG defines */
+ u16 filter_id;
+ u32 counter_index;
+ uint8_t reserved[28]; /* for future expansion */
+} __attribute__((packed));
+
+/* Specifies the filter type. */
+enum filter_type {
+ FILTER_USNIC_ID = 0,
+ FILTER_IPV4_5TUPLE = 1,
+ FILTER_MAC_VLAN = 2,
+ FILTER_VLAN_IP_3TUPLE = 3,
+ FILTER_NVGRE_VMQ = 4,
+ FILTER_USNIC_IP = 5,
+ FILTER_DPDK_1 = 6,
+ FILTER_MAX
+};
+
+#define FILTER_USNIC_ID_FLAG (1 << FILTER_USNIC_ID)
+#define FILTER_IPV4_5TUPLE_FLAG (1 << FILTER_IPV4_5TUPLE)
+#define FILTER_MAC_VLAN_FLAG (1 << FILTER_MAC_VLAN)
+#define FILTER_VLAN_IP_3TUPLE_FLAG (1 << FILTER_VLAN_IP_3TUPLE)
+#define FILTER_NVGRE_VMQ_FLAG (1 << FILTER_NVGRE_VMQ)
+#define FILTER_USNIC_IP_FLAG (1 << FILTER_USNIC_IP)
+#define FILTER_DPDK_1_FLAG (1 << FILTER_DPDK_1)
+#define FILTER_V1_ALL (FILTER_USNIC_ID_FLAG | \
+ FILTER_IPV4_5TUPLE_FLAG | \
+ FILTER_MAC_VLAN_FLAG | \
+ FILTER_VLAN_IP_3TUPLE_FLAG | \
+ FILTER_NVGRE_VMQ_FLAG | \
+ FILTER_USNIC_IP_FLAG | \
+ FILTER_DPDK_1_FLAG)
+
+struct filter {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ } u;
+} __attribute__((packed));
+
+/*
+ * This is a strict superset of "struct filter" and exists only
+ * because many drivers use "sizeof (struct filter)" in deciding TLV size.
+ * This new, larger struct filter would cause any code that uses that method
+ * to not work with older firmware, so we add filter_v2 to hold the
+ * new filter types. Drivers should use vnic_filter_size() to determine
+ * the TLV size instead of sizeof (struct fiter_v2) to guard against future
+ * growth.
+ */
+struct filter_v2 {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ struct filter_generic_1 generic_1;
+ } u;
+} __attribute__((packed));
+
+enum {
+ CLSF_TLV_FILTER = 0,
+ CLSF_TLV_ACTION = 1,
+};
+
+struct filter_tlv {
+ uint32_t type;
+ uint32_t length;
+ uint32_t val[0];
+};
+
+/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
+#define FILTER_MAX_BUF_SIZE 100
+#define FILTER_V2_MAX_BUF_SIZE (sizeof(struct filter_v2) + \
+ sizeof(struct filter_action_v2) + \
+ (2 * sizeof(struct filter_tlv)))
+
+/*
+ * Compute actual structure size given filter type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_v2)" when
+ * computing length for TLV.
+ */
+static inline uint32_t
+vnic_filter_size(struct filter_v2 *fp)
+{
+ uint32_t size;
+
+ switch (fp->type) {
+ case FILTER_USNIC_ID:
+ size = sizeof(fp->u.usnic);
+ break;
+ case FILTER_IPV4_5TUPLE:
+ size = sizeof(fp->u.ipv4);
+ break;
+ case FILTER_MAC_VLAN:
+ case FILTER_NVGRE_VMQ:
+ size = sizeof(fp->u.mac_vlan);
+ break;
+ case FILTER_VLAN_IP_3TUPLE:
+ size = sizeof(fp->u.vlan_3tuple);
+ break;
+ case FILTER_USNIC_IP:
+ case FILTER_DPDK_1:
+ size = sizeof(fp->u.generic_1);
+ break;
+ default:
+ size = sizeof(fp->u);
+ break;
+ }
+ size += sizeof(fp->type);
+ return size;
+}
+
+enum {
+ CLSF_ADD = 0,
+ CLSF_DEL = 1,
+};
+
+/*
+ * Get the action structure size given action type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_action_v2)"
+ * when computing length for TLV.
+ */
+static inline uint32_t
+vnic_action_size(struct filter_action_v2 *fap)
+{
+ uint32_t size;
+
+ switch (fap->type) {
+ case FILTER_ACTION_RQ_STEERING:
+ size = sizeof(struct filter_action);
+ break;
+ case FILTER_ACTION_V2:
+ size = sizeof(struct filter_action_v2);
+ break;
+ default:
+ size = sizeof(struct filter_action);
+ break;
+ }
+ return size;
+}
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only. While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+ u32 status; /* RO */
+ u32 cmd; /* RW */
+ u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
+};
+
+#define DEVCMD_STATUS 0
+#define DEVCMD_CMD 4
+#define DEVCMD_ARGS(x) (8 + (VNIC_DEVCMD_NARGS * x))
+
+/*
+ * Version 2 of the interface.
+ *
+ * Some things are carried over, notably the vnic_devcmd_cmd enum.
+ */
+
+/*
+ * Flags for vnic_devcmd2.flags
+ */
+
+#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+ u16 pad;
+ u16 flags;
+ u32 cmd; /* same command #defines as original */
+ u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+ u64 results[VNIC_DEVCMD2_NRESULTS];
+ u32 pad;
+ u16 completed_index; /* into copy WQ */
+ u8 error; /* same error codes as original */
+ u8 color; /* 0 or 1 as with completion queues */
+};
+
+#define DEVCMD2_RING_SIZE 32
+#define DEVCMD2_DESC_SIZE 128
+
+#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1)
+
+/* Overlay related definitions */
+
+/*
+ * This enum lists the flag associated with each of the overlay features
+ */
+typedef enum {
+ OVERLAY_FEATURE_NVGRE = 1,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_FEATURE_MAX,
+} overlay_feature_t;
+
+#define OVERLAY_OFFLOAD_ENABLE 0
+#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE_V2 2
+
+#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0
+
+/*
+ * Use this enum to get the supported versions for each of these features
+ * If you need to use the devcmd_get_supported_feature_version(), add
+ * the new feature into this enum and install function handler in devcmd.c
+ */
+typedef enum {
+ VIC_FEATURE_VXLAN,
+ VIC_FEATURE_RDMA,
+ VIC_FEATURE_MAX,
+} vic_feature_t;
+
+/*
+ * These flags are used in args[1] of devcmd CMD_GET_SUPP_FEATURE_VER
+ * to indicate the host driver about the VxLAN and Multi WQ features
+ * supported
+ */
+#define FEATURE_VXLAN_IPV6_INNER (1 << 0)
+#define FEATURE_VXLAN_IPV6_OUTER (1 << 1)
+#define FEATURE_VXLAN_MULTI_WQ (1 << 2)
+
+#define FEATURE_VXLAN_IPV6 (FEATURE_VXLAN_IPV6_INNER | \
+ FEATURE_VXLAN_IPV6_OUTER)
+
+/*
+ * CMD_CONFIG_GRPINTR subcommands
+ */
+typedef enum {
+ GRPINTR_ENABLE = 1,
+ GRPINTR_DISABLE,
+ GRPINTR_UPD_VECT,
+} grpintr_subcmd_t;
+
+/*
+ * Structure for counter DMA
+ * (DMAed by CMD_COUNTER_DMA_CONFIG)
+ */
+struct vnic_counter_counts {
+ u64 vcc_packets;
+ u64 vcc_bytes;
+};
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/sys/dev/enic/vnic_enet.h b/sys/dev/enic/vnic_enet.h
new file mode 100644
index 000000000000..901f3b46ef5d
--- /dev/null
+++ b/sys/dev/enic/vnic_enet.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_ENIC_H_
+#define _VNIC_ENIC_H_
+
+/* Hardware intr coalesce timer is in units of 1.5us */
+#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2 / 3)
+#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3 / 2)
+
+/* Device-specific region: enet configuration */
+struct vnic_enet_config {
+ u32 flags;
+ u32 wq_desc_count;
+ u32 rq_desc_count;
+ u16 mtu;
+ u16 intr_timer_deprecated;
+ u8 intr_timer_type;
+ u8 intr_mode;
+ char devname[16];
+ u32 intr_timer_usec;
+ u16 loop_tag;
+ u16 vf_rq_count;
+ u16 num_arfs;
+ u64 mem_paddr;
+ u16 rdma_qp_id;
+ u16 rdma_qp_count;
+ u16 rdma_resgrp;
+ u32 rdma_mr_id;
+ u32 rdma_mr_count;
+ u32 max_pkt_size;
+};
+
+#define VENETF_TSO 0x1 /* TSO enabled */
+#define VENETF_LRO 0x2 /* LRO enabled */
+#define VENETF_RXCSUM 0x4 /* RX csum enabled */
+#define VENETF_TXCSUM 0x8 /* TX csum enabled */
+#define VENETF_RSS 0x10 /* RSS enabled */
+#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */
+#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */
+#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */
+#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
+#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
+#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
+#define VENETF_LOOP 0x800 /* Loopback enabled */
+#define VENETF_FAILOVER 0x1000 /* Fabric failover enabled */
+#define VENETF_USPACE_NIC 0x2000 /* vHPC enabled */
+#define VENETF_VMQ 0x4000 /* VMQ enabled */
+#define VENETF_ARFS 0x8000 /* ARFS enabled */
+#define VENETF_VXLAN 0x10000 /* VxLAN offload */
+#define VENETF_NVGRE 0x20000 /* NVGRE offload */
+#define VENETF_GRPINTR 0x40000 /* group interrupt */
+#define VENETF_NICSWITCH 0x80000 /* NICSWITCH enabled */
+#define VENETF_RSSHASH_UDPIPV4 0x100000 /* Hash on UDP + IPv4 fields */
+#define VENETF_RSSHASH_UDPIPV6 0x200000 /* Hash on UDP + IPv6 fields */
+
+#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
+#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
+
+#define VENET_INTR_MODE_ANY 0 /* Try MSI-X, then MSI, then INTx */
+#define VENET_INTR_MODE_MSI 1 /* Try MSI then INTx */
+#define VENET_INTR_MODE_INTX 2 /* Try INTx only */
+
+#endif /* _VNIC_ENIC_H_ */
diff --git a/sys/dev/enic/vnic_intr.c b/sys/dev/enic/vnic_intr.c
new file mode 100644
index 000000000000..38e2ea6e066b
--- /dev/null
+++ b/sys/dev/enic/vnic_intr.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "enic.h"
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+ intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index)
+{
+ intr->index = index;
+ intr->vdev = vdev;
+
+ intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+ if (!intr->ctrl) {
+ pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+ vnic_intr_coalescing_timer_set(intr, coalescing_timer);
+ ENIC_BUS_WRITE_4(intr->ctrl, INTR_COALESCING_TYPE, coalescing_type);
+ ENIC_BUS_WRITE_4(intr->ctrl, INTR_MASK_ON_ASSERTION, mask_on_assertion);
+ ENIC_BUS_WRITE_4(intr->ctrl, INTR_CREDITS, 0);
+}
+
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ u32 coalescing_timer)
+{
+ ENIC_BUS_WRITE_4(intr->ctrl, INTR_COALESCING_TIMER,
+ vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev, coalescing_timer));
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+ ENIC_BUS_WRITE_4(intr->ctrl, INTR_CREDITS, 0);
+}
diff --git a/sys/dev/enic/vnic_intr.h b/sys/dev/enic/vnic_intr.h
new file mode 100644
index 000000000000..22db66096aae
--- /dev/null
+++ b/sys/dev/enic/vnic_intr.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+
+#include "vnic_dev.h"
+
+#define VNIC_INTR_TIMER_TYPE_ABS 0
+#define VNIC_INTR_TIMER_TYPE_QUIET 1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+ u32 coalescing_timer; /* 0x00 */
+#define INTR_COALESCING_TIMER 0x00
+ u32 pad0;
+ u32 coalescing_value; /* 0x08 */
+#define INTR_COALESCING_VALUE 0x08
+ u32 pad1;
+ u32 coalescing_type; /* 0x10 */
+#define INTR_COALESCING_TYPE 0x10
+ u32 pad2;
+ u32 mask_on_assertion; /* 0x18 */
+#define INTR_MASK_ON_ASSERTION 0x18
+ u32 pad3;
+ u32 mask; /* 0x20 */
+#define INTR_MASK 0x20
+ u32 pad4;
+ u32 int_credits; /* 0x28 */
+#define INTR_CREDITS 0x28
+ u32 pad5;
+ u32 int_credit_return; /* 0x30 */
+#define INTR_CREDIT_RETURN 0x30
+ u32 pad6;
+};
+
+struct vnic_intr {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_res *ctrl;
+};
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+ ENIC_BUS_WRITE_4(intr->ctrl, INTR_MASK, 1);
+}
+
+static inline int vnic_intr_masked(struct vnic_intr *intr)
+{
+ int ret;
+
+ ret = ENIC_BUS_READ_4(intr->ctrl, INTR_MASK);
+ return ret;
+}
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+ ENIC_BUS_WRITE_4(intr->ctrl, INTR_MASK, 0);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+ unsigned int credits, int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT 16
+#define VNIC_INTR_RESET_TIMER_SHIFT 17
+
+ u32 int_credit_return = (credits & 0xffff) |
+ (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+ (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+ ENIC_BUS_WRITE_4(intr->ctrl, INTR_CREDIT_RETURN, int_credit_return);
+}
+
+static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
+{
+ return ENIC_BUS_READ_4(intr->ctrl, INTR_CREDITS);
+}
+
+static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+ unsigned int credits = vnic_intr_credits(intr);
+ int unmask = 1;
+ int reset_timer = 1;
+
+ vnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ u32 coalescing_timer);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/sys/dev/enic/vnic_nic.h b/sys/dev/enic/vnic_nic.h
new file mode 100644
index 000000000000..ebb757744107
--- /dev/null
+++ b/sys/dev/enic/vnic_nic.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
+#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
+#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
+#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
+#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
+#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
+#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
+#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
+#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
+#define NIC_CFG_RSS_ENABLE (1UL << 22)
+#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
+#define NIC_CFG_RSS_ENABLE_SHIFT 22
+#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
+#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
+#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
+#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
+#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
+#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0)
+#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD1 (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD2 (1 << 6)
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7)
+
+static inline void vnic_set_nic_cfg(u32 *nic_cfg,
+ u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu,
+ u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+ ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+ ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_BITS_SHIFT) |
+ ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+ << NIC_CFG_RSS_BASE_CPU_SHIFT) |
+ ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+ << NIC_CFG_RSS_ENABLE_SHIFT) |
+ ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+ << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+ ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+ << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/sys/dev/enic/vnic_resource.h b/sys/dev/enic/vnic_resource.h
new file mode 100644
index 000000000000..184bfa7401df
--- /dev/null
+++ b/sys/dev/enic/vnic_resource.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
+#define VNIC_RES_VERSION 0x00000000L
+#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */
+#define MGMTVNIC_VERSION 0x00000000L
+
+/* The MAC address assigned to the CFG vNIC is fixed. */
+#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d }
+
+/* vNIC resource types */
+enum vnic_res_type {
+ RES_TYPE_EOL, /* End-of-list */
+ RES_TYPE_WQ, /* Work queues */
+ RES_TYPE_RQ, /* Receive queues */
+ RES_TYPE_CQ, /* Completion queues */
+ RES_TYPE_MEM, /* Window to dev memory */
+ RES_TYPE_NIC_CFG, /* Enet NIC config registers */
+ RES_TYPE_RSS_KEY, /* Enet RSS secret key */
+ RES_TYPE_RSS_CPU, /* Enet RSS indirection table */
+ RES_TYPE_TX_STATS, /* Netblock Tx statistic regs */
+ RES_TYPE_RX_STATS, /* Netblock Rx statistic regs */
+ RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
+ RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
+ RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
+ RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
+ RES_TYPE_DEBUG, /* Debug-only info */
+ RES_TYPE_DEV, /* Device-specific region */
+ RES_TYPE_DEVCMD, /* Device command region */
+ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+ RES_TYPE_SUBVNIC, /* subvnic resource type */
+ RES_TYPE_MQ_WQ, /* MQ Work queues */
+ RES_TYPE_MQ_RQ, /* MQ Receive queues */
+ RES_TYPE_MQ_CQ, /* MQ Completion queues */
+ RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
+ RES_TYPE_DEVCMD2, /* Device control region */
+ RES_TYPE_MAX, /* Count of resource types */
+};
+
+struct vnic_resource_header {
+ u32 magic;
+ u32 version;
+};
+
+struct mgmt_barmap_hdr {
+ u32 magic; /* magic number */
+ u32 version; /* header format version */
+ u16 lif; /* loopback lif for mgmt frames */
+ u16 pci_slot; /* installed pci slot */
+ char serial[16]; /* card serial number */
+};
+
+struct vnic_resource {
+ u8 type;
+ u8 bar;
+ u8 pad[2];
+ u32 bar_offset;
+ u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/sys/dev/enic/vnic_rq.c b/sys/dev/enic/vnic_rq.c
new file mode 100644
index 000000000000..3720da5f9aa6
--- /dev/null
+++ b/sys/dev/enic/vnic_rq.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "enic.h"
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = rq->ring.desc_count;
+
+ paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+ ENIC_BUS_WRITE_8(rq->ctrl, RX_RING_BASE, paddr);
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_RING_SIZE, count);
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_CQ_INDEX, cq_index);
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_ERROR_INTR_ENABLE, error_interrupt_enable);
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_ERROR_INTR_OFFSET, error_interrupt_offset);
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_ERROR_STATUS, 0);
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_FETCH_INDEX, fetch_index);
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, posted_index);
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u32 fetch_index = 0;
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ENIC_BUS_READ_4(rq->ctrl, RX_FETCH_INDEX);
+
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
+
+ vnic_rq_init_start(rq, cq_index,
+ fetch_index, fetch_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ rq->rxst_idx = 0;
+ rq->tot_pkts = 0;
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+ return ENIC_BUS_READ_4(rq->ctrl, RX_ERROR_STATUS);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_ENABLE, 1);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+ unsigned int wait;
+
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_ENABLE, 0);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 1000; wait++) {
+ if (!(ENIC_BUS_READ_4(rq->ctrl, RX_RUNNING)))
+ return 0;
+ udelay(10);
+ }
+
+ pr_err("Failed to disable RQ[%d]\n", rq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq)
+{
+ u32 fetch_index;
+ unsigned int count = rq->ring.desc_count;
+
+ rq->ring.desc_avail = count - 1;
+ rq->rx_nb_hold = 0;
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ENIC_BUS_READ_4(rq->ctrl, RX_FETCH_INDEX);
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
+
+ ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, fetch_index);
+
+ vnic_dev_clear_desc_ring(&rq->ring);
+}
diff --git a/sys/dev/enic/vnic_rq.h b/sys/dev/enic/vnic_rq.h
new file mode 100644
index 000000000000..ae8c1fdc39bd
--- /dev/null
+++ b/sys/dev/enic/vnic_rq.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+ u64 ring_base; /* 0x00 */
+#define RX_RING_BASE 0x00
+ u32 ring_size; /* 0x08 */
+#define RX_RING_SIZE 0x08
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+#define RX_POSTED_INDEX 0x10
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+#define RX_CQ_INDEX 0x18
+ u32 pad2;
+ u32 enable; /* 0x20 */
+#define RX_ENABLE 0x20
+ u32 pad3;
+ u32 running; /* 0x28 */
+#define RX_RUNNING 0x28
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+#define RX_FETCH_INDEX 0x30
+ u32 pad5;
+ u32 error_interrupt_enable; /* 0x38 */
+#define RX_ERROR_INTR_ENABLE 0x38
+ u32 pad6;
+ u32 error_interrupt_offset; /* 0x40 */
+#define RX_ERROR_INTR_OFFSET 0x40
+ u32 pad7;
+ u32 error_status; /* 0x48 */
+#define RX_ERROR_STATUS 0x48
+ u32 pad8;
+ u32 tcp_sn; /* 0x50 */
+#define RX_TCP_SN 0x50
+ u32 pad9;
+ u32 unused; /* 0x58 */
+ u32 pad10;
+ u32 dca_select; /* 0x60 */
+#define RX_DCA_SELECT 0x60
+ u32 pad11;
+ u32 dca_value; /* 0x68 */
+#define RX_DCA_VALUE 0x68
+ u32 pad12;
+ u32 data_ring; /* 0x70 */
+};
+
+struct vnic_rq {
+ unsigned int index;
+ unsigned int posted_index;
+ struct vnic_dev *vdev;
+ struct vnic_res *ctrl;
+ struct vnic_dev_ring ring;
+ int num_free_mbufs;
+ struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
+ unsigned int mbuf_next_idx; /* next mb to consume */
+ void *os_buf_head;
+ unsigned int pkts_outstanding;
+ uint16_t rx_nb_hold;
+ uint16_t rx_free_thresh;
+ unsigned int socket_id;
+ struct rte_mempool *mp;
+ uint16_t rxst_idx;
+ uint32_t tot_pkts;
+ uint8_t in_use;
+ unsigned int max_mbufs_per_pkt;
+ uint16_t tot_nb_desc;
+ bool need_initial_post;
+ struct iflib_dma_info data;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+ /* how many does SW own? */
+ return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+ /* how many does HW own? */
+ return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+enum desc_return_options {
+ VNIC_RQ_RETURN_DESC,
+ VNIC_RQ_DEFER_RETURN_DESC,
+};
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+ int (*buf_fill)(struct vnic_rq *rq))
+{
+ int err;
+
+ while (vnic_rq_desc_avail(rq) > 0) {
+
+ err = (*buf_fill)(rq);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static inline int vnic_rq_fill_count(struct vnic_rq *rq,
+ int (*buf_fill)(struct vnic_rq *rq), unsigned int count)
+{
+ int err;
+
+ while ((vnic_rq_desc_avail(rq) > 0) && (count--)) {
+
+ err = (*buf_fill)(rq);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq);
+
+#endif /* _VNIC_RQ_H_ */
diff --git a/sys/dev/enic/vnic_rss.h b/sys/dev/enic/vnic_rss.h
new file mode 100644
index 000000000000..abd7b9f131aa
--- /dev/null
+++ b/sys/dev/enic/vnic_rss.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_RSS_H_
+#define _VNIC_RSS_H_
+
+/* RSS key array */
+union vnic_rss_key {
+ struct {
+ u8 b[10];
+ u8 b_pad[6];
+ } key[4];
+ u64 raw[8];
+};
+
+/* RSS cpu array */
+union vnic_rss_cpu {
+ struct {
+ u8 b[4];
+ u8 b_pad[4];
+ } cpu[32];
+ u64 raw[32];
+};
+
+void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+
+#endif /* _VNIC_RSS_H_ */
diff --git a/sys/dev/enic/vnic_stats.h b/sys/dev/enic/vnic_stats.h
new file mode 100644
index 000000000000..49429cc2ada5
--- /dev/null
+++ b/sys/dev/enic/vnic_stats.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+ u64 tx_frames_ok;
+ u64 tx_unicast_frames_ok;
+ u64 tx_multicast_frames_ok;
+ u64 tx_broadcast_frames_ok;
+ u64 tx_bytes_ok;
+ u64 tx_unicast_bytes_ok;
+ u64 tx_multicast_bytes_ok;
+ u64 tx_broadcast_bytes_ok;
+ u64 tx_drops;
+ u64 tx_errors;
+ u64 tx_tso;
+ u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+ u64 rx_frames_ok;
+ u64 rx_frames_total;
+ u64 rx_unicast_frames_ok;
+ u64 rx_multicast_frames_ok;
+ u64 rx_broadcast_frames_ok;
+ u64 rx_bytes_ok;
+ u64 rx_unicast_bytes_ok;
+ u64 rx_multicast_bytes_ok;
+ u64 rx_broadcast_bytes_ok;
+ u64 rx_drop;
+ u64 rx_no_bufs;
+ u64 rx_errors;
+ u64 rx_rss;
+ u64 rx_crc_errors;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_to_max;
+ u64 rsvd[16];
+};
+
+struct vnic_stats {
+ struct vnic_tx_stats tx;
+ struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/sys/dev/enic/vnic_wq.c b/sys/dev/enic/vnic_wq.c
new file mode 100644
index 000000000000..b032df3392b2
--- /dev/null
+++ b/sys/dev/enic/vnic_wq.c
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include "enic.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = wq->ring.desc_count;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ ENIC_BUS_WRITE_8(wq->ctrl, TX_RING_BASE, paddr);
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_RING_SIZE, count);
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_FETCH_INDEX, fetch_index);
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, posted_index);
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_CQ_INDEX, cq_index);
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_INTR_ENABLE, error_interrupt_enable);
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_INTR_OFFSET, error_interrupt_offset);
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_STATUS, 0);
+
+ wq->head_idx = fetch_index;
+ wq->tail_idx = wq->head_idx;
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ vnic_wq_init_start(wq, cq_index, 0, 0,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ wq->cq_pend = 0;
+ wq->last_completed_index = 0;
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+ return ENIC_BUS_READ_4(wq->ctrl, TX_ERROR_STATUS);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_ENABLE, 1);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+ unsigned int wait;
+
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_ENABLE, 0);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 1000; wait++) {
+ if (!(ENIC_BUS_READ_4(wq->ctrl, TX_RUNNING)))
+ return 0;
+ udelay(10);
+ }
+
+ pr_err("Failed to disable WQ[%d]\n", wq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq)
+{
+ unsigned int to_clean = wq->tail_idx;
+
+ while (vnic_wq_desc_used(wq) > 0) {
+ to_clean = buf_idx_incr(wq->ring.desc_count, to_clean);
+ wq->ring.desc_avail++;
+ }
+
+ wq->head_idx = 0;
+ wq->tail_idx = 0;
+ wq->last_completed_index = 0;
+
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_FETCH_INDEX, 0);
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, 0);
+ ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_STATUS, 0);
+
+ vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/sys/dev/enic/vnic_wq.h b/sys/dev/enic/vnic_wq.h
new file mode 100644
index 000000000000..c4f551de8441
--- /dev/null
+++ b/sys/dev/enic/vnic_wq.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+ u64 ring_base; /* 0x00 */
+#define TX_RING_BASE 0x00
+ u32 ring_size; /* 0x08 */
+#define TX_RING_SIZE 0x08
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+#define TX_POSTED_INDEX 0x10
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+#define TX_CQ_INDEX 0x18
+ u32 pad2;
+ u32 enable; /* 0x20 */
+#define TX_ENABLE 0x20
+ u32 pad3;
+ u32 running; /* 0x28 */
+#define TX_RUNNING 0x28
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+#define TX_FETCH_INDEX 0x30
+ u32 pad5;
+ u32 dca_value; /* 0x38 */
+#define TX_DCA_VALUE 0x38
+ u32 pad6;
+ u32 error_interrupt_enable; /* 0x40 */
+#define TX_ERROR_INTR_ENABLE 0x40
+ u32 pad7;
+ u32 error_interrupt_offset; /* 0x48 */
+#define TX_ERROR_INTR_OFFSET 0x48
+ u32 pad8;
+ u32 error_status; /* 0x50 */
+#define TX_ERROR_STATUS 0x50
+ u32 pad9;
+};
+
+struct vnic_wq {
+ unsigned int index;
+ uint64_t tx_offload_notsup_mask;
+ struct vnic_dev *vdev;
+ struct vnic_res *ctrl;
+ struct vnic_dev_ring ring;
+ unsigned int head_idx;
+ unsigned int cq_pend;
+ unsigned int tail_idx;
+ unsigned int socket_id;
+ unsigned int processed;
+ const struct rte_memzone *cqmsg_rz;
+ uint16_t last_completed_index;
+ uint64_t offloads;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+ /* how many does SW own? */
+ return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+ /* how many does HW own? */
+ return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+#define PI_LOG2_CACHE_LINE_SIZE 5
+#define PI_INDEX_BITS 12
+#define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1)
+#define PI_PREFETCH_LEN_MASK ((1U << PI_LOG2_CACHE_LINE_SIZE) - 1)
+#define PI_PREFETCH_LEN_OFF 16
+#define PI_PREFETCH_ADDR_BITS 43
+#define PI_PREFETCH_ADDR_MASK ((1ULL << PI_PREFETCH_ADDR_BITS) - 1)
+#define PI_PREFETCH_ADDR_OFF 21
+
+static inline uint32_t
+buf_idx_incr(uint32_t n_descriptors, uint32_t idx)
+{
+ idx++;
+ if (unlikely(idx == n_descriptors))
+ idx = 0;
+ return idx;
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq);
+
+#endif /* _VNIC_WQ_H_ */
diff --git a/sys/dev/enic/wq_enet_desc.h b/sys/dev/enic/wq_enet_desc.h
new file mode 100644
index 000000000000..c7edcd67cf98
--- /dev/null
+++ b/sys/dev/enic/wq_enet_desc.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+ __le64 address;
+ __le16 length;
+ __le16 mss_loopback;
+ __le16 header_length_flags;
+ __le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS 64
+#define WQ_ENET_LEN_BITS 14
+#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS 14
+#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT 2
+#define WQ_ENET_LOOPBACK_SHIFT 1
+#define WQ_ENET_HDRLEN_BITS 10
+#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS 2
+#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT 12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM 0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
+#define WQ_ENET_OFFLOAD_MODE_TSO 3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+ u64 address, u16 length, u16 mss, u16 header_length,
+ u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+ u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+ desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+ WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+ desc->header_length_flags = cpu_to_le16(
+ (header_length & WQ_ENET_HDRLEN_MASK) |
+ (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+ (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+ (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+ (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+ (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+ desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+ u64 *address, u16 *length, u16 *mss, u16 *header_length,
+ u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+ u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+ *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+ WQ_ENET_MSS_MASK;
+ *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+ WQ_ENET_LOOPBACK_SHIFT) & 1);
+ *header_length = le16_to_cpu(desc->header_length_flags) &
+ WQ_ENET_HDRLEN_MASK;
+ *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+ *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+ *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+ *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+ *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+ *vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 61bbdb2341a1..9c2664460067 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -107,6 +107,7 @@ SUBDIR= \
${_efirt} \
${_em} \
${_ena} \
+ ${_enic} \
${_enetc} \
${_et} \
evdev \
@@ -782,6 +783,7 @@ _x86bios= x86bios
.if ${MACHINE_CPUARCH} == "amd64"
_amdgpio= amdgpio
_ccp= ccp
+_enic= enic
_iavf= iavf
_ioat= ioat
_ixl= ixl
diff --git a/sys/modules/enic/Makefile b/sys/modules/enic/Makefile
new file mode 100644
index 000000000000..62ac0f574a8b
--- /dev/null
+++ b/sys/modules/enic/Makefile
@@ -0,0 +1,19 @@
+#$FreeBSD: head/sys/modules/ix/Makefile 327031 2017-12-20 18:15:06Z erj $
+
+.PATH: ${SRCTOP}/sys/dev/enic
+
+COPTS=-g
+#WITH_DEBUG_FILES=yes
+DEBUG_FLAGS=-g
+
+KMOD = if_enic
+SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h ifdi_if.h
+SRCS += opt_inet.h opt_inet6.h opt_rss.h
+
+SRCS += if_enic.c enic_txrx.c enic_res.c
+SRCS += vnic_cq.c vnic_dev.c vnic_intr.c vnic_rq.c vnic_wq.c
+
+
+CFLAGS+= -I${SRCTOP}/sys/dev/enic_native -I${SRCTOP}/sys/dev/enic_native/base
+
+.include <bsd.kmod.mk>