aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/conf/files.amd646
-rw-r--r--sys/dev/qlxge/README.txt97
-rw-r--r--sys/dev/qlxge/qls_dbg.c307
-rw-r--r--sys/dev/qlxge/qls_dbg.h94
-rw-r--r--sys/dev/qlxge/qls_def.h377
-rw-r--r--sys/dev/qlxge/qls_dump.c1992
-rw-r--r--sys/dev/qlxge/qls_dump.h277
-rw-r--r--sys/dev/qlxge/qls_glbl.h105
-rw-r--r--sys/dev/qlxge/qls_hw.c2443
-rw-r--r--sys/dev/qlxge/qls_hw.h1090
-rw-r--r--sys/dev/qlxge/qls_inline.h113
-rw-r--r--sys/dev/qlxge/qls_ioctl.c127
-rw-r--r--sys/dev/qlxge/qls_ioctl.h51
-rw-r--r--sys/dev/qlxge/qls_isr.c401
-rw-r--r--sys/dev/qlxge/qls_os.c1536
-rw-r--r--sys/dev/qlxge/qls_os.h157
-rw-r--r--sys/dev/qlxge/qls_ver.h41
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/qlxge/Makefile50
19 files changed, 9266 insertions, 0 deletions
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index cdb43df4660f..7a41a269c8f1 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -227,6 +227,12 @@ dev/nvme/nvme_test.c optional nvme
dev/nvram/nvram.c optional nvram isa
dev/random/ivy.c optional random rdrand_rng
dev/random/nehemiah.c optional random padlock_rng
+dev/qlxge/qls_dbg.c optional qlxge pci
+dev/qlxge/qls_dump.c optional qlxge pci
+dev/qlxge/qls_hw.c optional qlxge pci
+dev/qlxge/qls_ioctl.c optional qlxge pci
+dev/qlxge/qls_isr.c optional qlxge pci
+dev/qlxge/qls_os.c optional qlxge pci
dev/qlxgb/qla_dbg.c optional qlxgb pci
dev/qlxgb/qla_hw.c optional qlxgb pci
dev/qlxgb/qla_ioctl.c optional qlxgb pci
diff --git a/sys/dev/qlxge/README.txt b/sys/dev/qlxge/README.txt
new file mode 100644
index 000000000000..fb01051382b5
--- /dev/null
+++ b/sys/dev/qlxge/README.txt
@@ -0,0 +1,97 @@
+#$FreeBSD$
+
+ README File
+ QLogic 8100 series Dual Port
+10 Gigabit Ethernet & CNA Adapter Driver for FreeBSD 9.x/10.x
+
+ QLogic Corporation.
+ All rights reserved.
+
+
+Table of Contents
+1. Package Contents
+2. OS Support
+3. Supported Features
+4. Using the Driver
+ 4.1 Installing the driver
+ 4.2 Removing the driver
+5. Driver Parameters
+6. Additional Notes
+7. Contacting Support
+
+1. Package Contents
+ * Documentation
+ - README (this document) version:1.0
+ - Release Notes Version:1.0
+ * Driver (if_qlxge.ko)
+ - FreeBSD 9.x/10.x
+ * Firmware: pre-flashed on QLogic adapter;
+
+2. OS Support
+
+The Qlogic 10Gigabit Ethernet/CNA driver is compatible with the
+following OS platforms:
+ * FreeBSD 9.x/10.x (64-bit) [Intel EM64T, AMD64]
+
+3. Supported Features
+10Gigabit Ethernet NIC/CNA driver supports following features
+
+* Large Segment Offload over TCP IPV4
+* Large Segment Offload over TCP IPV6
+* Receive Side scaling
+* TCP over IPv4 checksum offload
+* UDP over IPv4 checksum offload
+* IPV4 checksum offload
+* TCP over IPv6 checksum offload
+* UDP over IPv6 checksum offload
+* Jumbo frames
+* VLAN Tag
+
+
+4. Using the driver
+
+ 4.1 Installing the driver
+
+ - copy the driver file (if_qlxge.ko) into some directory (say qla_driver)
+ - cd <to qla_driver>
+ - kldload -v ./if_qlxge.ko
+
+ 4.2 Removing the driver
+
+ - kldunload if_qlxge
+
+5. Parameters to set prior to installing the driver
+
+ - Add the following lines to /etc/sysctl.conf and reboot the machine prior
+ to installing the driver
+
+ net.inet.tcp.recvbuf_max=262144
+ net.inet.tcp.recvbuf_inc=16384
+ kern.ipc.nmbclusters=1000000
+ kern.ipc.maxsockbuf=2097152
+ net.inet.tcp.recvspace=131072
+ net.inet.tcp.sendbuf_max=262144
+ net.inet.tcp.sendspace=65536
+
+ - If you do not want to reboot the system please run the following commands
+
+ login or su to root
+
+ sysctl net.inet.tcp.recvbuf_max=262144
+ sysctl net.inet.tcp.recvbuf_inc=16384
+ sysctl kern.ipc.nmbclusters=1000000
+ sysctl kern.ipc.maxsockbuf=2097152
+ sysctl net.inet.tcp.recvspace=131072
+ sysctl net.inet.tcp.sendbuf_max=262144
+ sysctl net.inet.tcp.sendspace=65536
+
+7. Contacting Support
+Please feel free to contact your QLogic approved reseller or QLogic
+Technical Support at any phase of integration for assistance. QLogic
+Technical Support can be reached by the following methods:
+Web: http://support.qlogic.com
+E-mail: support@qlogic.com
+(c) Copyright 2013-14. All rights reserved worldwide. QLogic, the QLogic
+logo, and the Powered by QLogic logo are registered trademarks of
+QLogic Corporation. All other brand and product names are trademarks
+or registered trademarks of their respective owners.
diff --git a/sys/dev/qlxge/qls_dbg.c b/sys/dev/qlxge/qls_dbg.c
new file mode 100644
index 000000000000..04eff20e57e0
--- /dev/null
+++ b/sys/dev/qlxge/qls_dbg.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * File : qls_dbg.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qls_os.h"
+#include "qls_hw.h"
+#include "qls_def.h"
+#include "qls_inline.h"
+#include "qls_ver.h"
+#include "qls_glbl.h"
+#include "qls_dbg.h"
+
+
+uint32_t qls_dbg_level = 0 ;
+/*
+ * Name: qls_dump_buf32
+ * Function: dumps a buffer as 32 bit words
+ */
+void
+qls_dump_buf32(qla_host_t *ha, const char *msg, void *dbuf32, uint32_t len32)
+{
+ device_t dev;
+ uint32_t i = 0;
+ uint32_t *buf;
+
+ dev = ha->pci_dev;
+ buf = dbuf32;
+
+ device_printf(dev, "%s: %s dump start\n", __func__, msg);
+
+ while (len32 >= 4) {
+ device_printf(dev,"0x%08x:\t0x%08x, 0x%08x, 0x%08x, 0x%08x,\n",
+ i, buf[0], buf[1], buf[2], buf[3]);
+ i += 4 * 4;
+ len32 -= 4;
+ buf += 4;
+ }
+ switch (len32) {
+ case 1:
+ device_printf(dev,"0x%08x: 0x%08x\n", i, buf[0]);
+ break;
+ case 2:
+ device_printf(dev,"0x%08x: 0x%08x 0x%08x\n", i, buf[0], buf[1]);
+ break;
+ case 3:
+ device_printf(dev,"0x%08x: 0x%08x 0x%08x 0x%08x\n",
+ i, buf[0], buf[1], buf[2]);
+ break;
+ default:
+ break;
+ }
+ device_printf(dev, "%s: %s dump end\n", __func__, msg);
+
+ return;
+}
+
+/*
+ * Name: qls_dump_buf16
+ * Function: dumps a buffer as 16 bit words
+ */
+void
+qls_dump_buf16(qla_host_t *ha, const char *msg, void *dbuf16, uint32_t len16)
+{
+ device_t dev;
+ uint32_t i = 0;
+ uint16_t *buf;
+
+ dev = ha->pci_dev;
+ buf = dbuf16;
+
+ device_printf(dev, "%s: %s dump start\n", __func__, msg);
+
+ while (len16 >= 8) {
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x 0x%04x 0x%04x"
+ " 0x%04x 0x%04x 0x%04x 0x%04x\n", i, buf[0],
+ buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+ i += 16;
+ len16 -= 8;
+ buf += 8;
+ }
+ switch (len16) {
+ case 1:
+ device_printf(dev,"0x%08x: 0x%04x\n", i, buf[0]);
+ break;
+ case 2:
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x\n", i, buf[0], buf[1]);
+ break;
+ case 3:
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x 0x%04x\n",
+ i, buf[0], buf[1], buf[2]);
+ break;
+ case 4:
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
+ buf[0], buf[1], buf[2], buf[3]);
+ break;
+ case 5:
+ device_printf(dev,"0x%08x:"
+ " 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4]);
+ break;
+ case 6:
+ device_printf(dev,"0x%08x:"
+ " 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+ break;
+ case 7:
+ device_printf(dev,"0x%04x: 0x%04x 0x%04x 0x%04x 0x%04x"
+ " 0x%04x 0x%04x 0x%04x\n", i, buf[0], buf[1],
+ buf[2], buf[3], buf[4], buf[5], buf[6]);
+ break;
+ default:
+ break;
+ }
+ device_printf(dev, "%s: %s dump end\n", __func__, msg);
+
+ return;
+}
+
+/*
+ * Name: qls_dump_buf8
+ * Function: dumps a buffer as bytes
+ */
+void
+qls_dump_buf8(qla_host_t *ha, const char *msg, void *dbuf, uint32_t len)
+{
+ device_t dev;
+ uint32_t i = 0;
+ uint8_t *buf;
+
+ dev = ha->pci_dev;
+ buf = dbuf;
+
+ device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
+
+ while (len >= 16) {
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], buf[15]);
+ i += 16;
+ len -= 16;
+ buf += 16;
+ }
+ switch (len) {
+ case 1:
+ device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
+ break;
+ case 2:
+ device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
+ break;
+ case 3:
+ device_printf(dev,"0x%08x: %02x %02x %02x\n",
+ i, buf[0], buf[1], buf[2]);
+ break;
+ case 4:
+ device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3]);
+ break;
+ case 5:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4]);
+ break;
+ case 6:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+ break;
+ case 7:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+ break;
+ case 8:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7]);
+ break;
+ case 9:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8]);
+ break;
+ case 10:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9]);
+ break;
+ case 11:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10]);
+ break;
+ case 12:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11]);
+ break;
+ case 13:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
+ break;
+ case 14:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
+ buf[13]);
+ break;
+ case 15:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
+ buf[13], buf[14]);
+ break;
+ default:
+ break;
+ }
+
+ device_printf(dev, "%s: %s dump end\n", __func__, msg);
+
+ return;
+}
+
+void
+qls_dump_cq(qla_host_t *ha)
+{
+ qls_dump_buf32(ha, "cq_icb", ha->rx_ring[0].cq_icb_vaddr,
+ (sizeof (q81_cq_icb_t) >> 2));
+
+ device_printf(ha->pci_dev, "%s: lbq_addr_tbl_paddr %p\n", __func__,
+ (void *)ha->rx_ring[0].lbq_addr_tbl_paddr);
+
+ qls_dump_buf32(ha, "lbq_addr_tbl", ha->rx_ring[0].lbq_addr_tbl_vaddr,
+ (PAGE_SIZE >> 2));
+
+ device_printf(ha->pci_dev, "%s: lbq_paddr %p\n", __func__,
+ (void *)ha->rx_ring[0].lbq_paddr);
+
+ qls_dump_buf32(ha, "lbq", ha->rx_ring[0].lbq_vaddr,
+ (QLA_LBQ_SIZE >> 2));
+
+ device_printf(ha->pci_dev, "%s: sbq_addr_tbl_paddr %p\n", __func__,
+ (void *)ha->rx_ring[0].sbq_addr_tbl_paddr);
+
+ qls_dump_buf32(ha, "sbq_addr_tbl", ha->rx_ring[0].sbq_addr_tbl_vaddr,
+ (PAGE_SIZE >> 2));
+
+ device_printf(ha->pci_dev, "%s: sbq_paddr %p\n", __func__,
+ (void *)ha->rx_ring[0].sbq_paddr);
+
+ qls_dump_buf32(ha, "sbq", ha->rx_ring[0].sbq_vaddr,
+ (QLA_SBQ_SIZE >> 2) );
+
+ device_printf(ha->pci_dev, "%s: lb_paddr %p\n", __func__,
+ (void *)ha->rx_ring[0].lb_paddr);
+
+ return;
+}
+
diff --git a/sys/dev/qlxge/qls_dbg.h b/sys/dev/qlxge/qls_dbg.h
new file mode 100644
index 000000000000..e4d7a416285d
--- /dev/null
+++ b/sys/dev/qlxge/qls_dbg.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * File : qls_dbg.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QL_DBG_H_
+#define _QL_DBG_H_
+
+extern uint32_t qls_dbg_level;
+
+extern void qls_dump_buf8(qla_host_t *ha, const char *str, void *dbuf,
+ uint32_t len);
+extern void qls_dump_buf16(qla_host_t *ha, const char *str, void *dbuf,
+ uint32_t len16);
+extern void qls_dump_buf32(qla_host_t *ha, const char *str, void *dbuf,
+ uint32_t len32);
+
+extern void qls_dump_cq(qla_host_t *ha);
+
+
+#ifdef QL_DBG
+
+#define QL_DPRINT1(x) if (qls_dbg_level & 0x0001) device_printf x
+#define QL_DPRINT2(x) if (qls_dbg_level & 0x0002) device_printf x
+#define QL_DPRINT4(x) if (qls_dbg_level & 0x0004) device_printf x
+#define QL_DPRINT8(x) if (qls_dbg_level & 0x0008) device_printf x
+#define QL_DPRINT10(x) if (qls_dbg_level & 0x0010) device_printf x
+#define QL_DPRINT20(x) if (qls_dbg_level & 0x0020) device_printf x
+#define QL_DPRINT40(x) if (qls_dbg_level & 0x0040) device_printf x
+#define QL_DPRINT80(x) if (qls_dbg_level & 0x0080) device_printf x
+
+#define QL_DUMP_BUFFER8(h, s, b, n) if (qls_dbg_level & 0x08000000)\
+ qls_dump_buf8(h, s, b, n)
+#define QL_DUMP_BUFFER16(h, s, b, n) if (qls_dbg_level & 0x08000000)\
+ qls_dump_buf16(h, s, b, n)
+#define QL_DUMP_BUFFER32(h, s, b, n) if (qls_dbg_level & 0x08000000)\
+ qls_dump_buf32(h, s, b, n)
+
+#define QL_ASSERT(ha, x, y) if (!x && !ha->err_inject) panic y
+
+#define QL_DUMP_CQ(ha) if (qls_dbg_level & 0x08000000) qls_dump_cq(ha)
+
+#else
+
+#define QL_DPRINT1(x)
+#define QL_DPRINT2(x)
+#define QL_DPRINT4(x)
+#define QL_DPRINT8(x)
+#define QL_DPRINT10(x)
+#define QL_DPRINT20(x)
+#define QL_DPRINT40(x)
+#define QL_DPRINT80(x)
+
+#define QL_DUMP_BUFFER8(h, s, b, n)
+#define QL_DUMP_BUFFER16(h, s, b, n)
+#define QL_DUMP_BUFFER32(h, s, b, n)
+
+#define QL_ASSERT(ha, x, y)
+
+#define QL_DUMP_CQ(ha)
+
+#endif
+
+
+#endif /* #ifndef _QL_DBG_H_ */
diff --git a/sys/dev/qlxge/qls_def.h b/sys/dev/qlxge/qls_def.h
new file mode 100644
index 000000000000..b486beb29994
--- /dev/null
+++ b/sys/dev/qlxge/qls_def.h
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * File: qls_def.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLS_DEF_H_
+#define _QLS_DEF_H_
+
+/*
+ * structure encapsulating a DMA buffer
+ */
+struct qla_dma {
+ bus_size_t alignment;
+ uint32_t size;
+ void *dma_b;
+ bus_addr_t dma_addr;
+ bus_dmamap_t dma_map;
+ bus_dma_tag_t dma_tag;
+};
+typedef struct qla_dma qla_dma_t;
+
+/*
+ * structure encapsulating interrupt vectors
+ */
+struct qla_ivec {
+ uint32_t cq_idx;
+ void *ha;
+ struct resource *irq;
+ void *handle;
+ int irq_rid;
+};
+typedef struct qla_ivec qla_ivec_t;
+
+/*
+ * Transmit Related Definitions
+ */
+
+#define MAX_TX_RINGS 1
+#define NUM_TX_DESCRIPTORS 1024
+
+#define QLA_MAX_SEGMENTS 64 /* maximum # of segs in a sg list */
+#define QLA_OAL_BLK_SIZE (sizeof (q81_txb_desc_t) * QLA_MAX_SEGMENTS)
+
+#define QLA_TX_OALB_TOTAL_SIZE (NUM_TX_DESCRIPTORS * QLA_OAL_BLK_SIZE)
+
+#define QLA_TX_PRIVATE_BSIZE ((QLA_TX_OALB_TOTAL_SIZE + \
+ PAGE_SIZE + \
+ (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
+
+#define QLA_MAX_MTU 9000
+#define QLA_STD_FRAME_SIZE 1514
+#define QLA_MAX_TSO_FRAME_SIZE ((64 * 1024 - 1) + 22)
+
+#define QL_FRAME_HDR_SIZE (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +\
+ sizeof (struct ip6_hdr) + sizeof (struct tcphdr) + 16)
+
+struct qla_tx_buf {
+ struct mbuf *m_head;
+ bus_dmamap_t map;
+
+ /* The number of entries in the OAL is determined by QLA_MAX_SEGMENTS */
+ bus_addr_t oal_paddr;
+ void *oal_vaddr;
+};
+typedef struct qla_tx_buf qla_tx_buf_t;
+
+struct qla_tx_ring {
+
+ volatile struct {
+ uint32_t wq_dma:1,
+ privb_dma:1;
+ } flags;
+
+ qla_dma_t privb_dma;
+ qla_dma_t wq_dma;
+
+ qla_tx_buf_t tx_buf[NUM_TX_DESCRIPTORS];
+ uint64_t count;
+
+ struct resource *wq_db_addr;
+ uint32_t wq_db_offset;
+
+ q81_tx_cmd_t *wq_vaddr;
+ bus_addr_t wq_paddr;
+
+ void *wq_icb_vaddr;
+ bus_addr_t wq_icb_paddr;
+
+ uint32_t *txr_cons_vaddr;
+ bus_addr_t txr_cons_paddr;
+
+ volatile uint32_t txr_free; /* # of free entries in tx ring */
+ volatile uint32_t txr_next; /* # next available tx ring entry */
+ volatile uint32_t txr_done;
+
+ uint64_t tx_frames;
+ uint64_t tx_tso_frames;
+ uint64_t tx_vlan_frames;
+};
+typedef struct qla_tx_ring qla_tx_ring_t;
+
+/*
+ * Receive Related Definitions
+ */
+
+#define MAX_RX_RINGS MAX_TX_RINGS
+
+#define NUM_RX_DESCRIPTORS 1024
+#define NUM_CQ_ENTRIES NUM_RX_DESCRIPTORS
+
+#define QLA_LGB_SIZE (12 * 1024)
+#define QLA_NUM_LGB_ENTRIES 32
+
+#define QLA_LBQ_SIZE (QLA_NUM_LGB_ENTRIES * sizeof(q81_bq_addr_e_t))
+
+#define QLA_LGBQ_AND_TABLE_SIZE \
+ ((QLA_LBQ_SIZE + PAGE_SIZE + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
+
+
+/* Please note that Small Buffer size is determined by max mtu size */
+#define QLA_NUM_SMB_ENTRIES NUM_RX_DESCRIPTORS
+
+#define QLA_SBQ_SIZE (QLA_NUM_SMB_ENTRIES * sizeof(q81_bq_addr_e_t))
+
+#define QLA_SMBQ_AND_TABLE_SIZE \
+ ((QLA_SBQ_SIZE + PAGE_SIZE + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
+
+struct qla_rx_buf {
+ struct mbuf *m_head;
+ bus_dmamap_t map;
+ bus_addr_t paddr;
+ void *next;
+};
+typedef struct qla_rx_buf qla_rx_buf_t;
+
+struct qla_rx_ring {
+ volatile struct {
+ uint32_t cq_dma:1,
+ lbq_dma:1,
+ sbq_dma:1,
+ lb_dma:1;
+ } flags;
+
+ qla_dma_t cq_dma;
+ qla_dma_t lbq_dma;
+ qla_dma_t sbq_dma;
+ qla_dma_t lb_dma;
+
+ struct lro_ctrl lro;
+
+ qla_rx_buf_t rx_buf[NUM_RX_DESCRIPTORS];
+ qla_rx_buf_t *rxb_free;
+ uint32_t rx_free;
+ uint32_t rx_next;
+
+ uint32_t cq_db_offset;
+
+ void *cq_icb_vaddr;
+ bus_addr_t cq_icb_paddr;
+
+ uint32_t *cqi_vaddr;
+ bus_addr_t cqi_paddr;
+
+ void *cq_base_vaddr;
+ bus_addr_t cq_base_paddr;
+ uint32_t cq_next; /* next cq entry to process */
+
+ void *lbq_addr_tbl_vaddr;
+ bus_addr_t lbq_addr_tbl_paddr;
+
+ void *lbq_vaddr;
+ bus_addr_t lbq_paddr;
+ uint32_t lbq_next; /* next entry in LBQ to process */
+ uint32_t lbq_free;/* # of entries in LBQ to arm */
+ uint32_t lbq_in; /* next entry in LBQ to arm */
+
+ void *lb_vaddr;
+ bus_addr_t lb_paddr;
+
+ void *sbq_addr_tbl_vaddr;
+ bus_addr_t sbq_addr_tbl_paddr;
+
+ void *sbq_vaddr;
+ bus_addr_t sbq_paddr;
+ uint32_t sbq_next; /* next entry in SBQ to process */
+ uint32_t sbq_free;/* # of entries in SBQ to arm */
+ uint32_t sbq_in; /* next entry in SBQ to arm */
+
+ uint64_t rx_int;
+ uint64_t rss_int;
+};
+typedef struct qla_rx_ring qla_rx_ring_t;
+
+
+#define QLA_WATCHDOG_CALLOUT_TICKS 1
+
+/*
+ * Multicast Definitions
+ */
+typedef struct _qla_mcast {
+ uint16_t rsrvd;
+ uint8_t addr[6];
+} __packed qla_mcast_t;
+
+/*
+ * Misc. definitions
+ */
+#define QLA_PAGE_SIZE 4096
+
+/*
+ * Adapter structure contains the hardware independant information of the
+ * pci function.
+ */
+struct qla_host {
+ volatile struct {
+ volatile uint32_t
+ mpi_dma :1,
+ rss_dma :1,
+ intr_enable :1,
+ qla_callout_init :1,
+ qla_watchdog_active :1,
+ qla_watchdog_exit :1,
+ qla_watchdog_pause :1,
+ lro_init :1,
+ parent_tag :1,
+ lock_init :1;
+ } flags;
+
+ volatile uint32_t hw_init;
+
+ volatile uint32_t qla_watchdog_exited;
+ volatile uint32_t qla_watchdog_paused;
+ volatile uint32_t qla_initiate_recovery;
+
+ device_t pci_dev;
+
+ uint8_t pci_func;
+ uint16_t watchdog_ticks;
+ uint8_t resvd;
+
+ /* ioctl related */
+ struct cdev *ioctl_dev;
+
+ /* register mapping */
+ struct resource *pci_reg;
+ int reg_rid;
+
+ struct resource *pci_reg1;
+ int reg_rid1;
+
+ int msix_count;
+ qla_ivec_t irq_vec[MAX_RX_RINGS];
+
+ /* parent dma tag */
+ bus_dma_tag_t parent_tag;
+
+ /* interface to o.s */
+ struct ifnet *ifp;
+
+ struct ifmedia media;
+ uint16_t max_frame_size;
+ uint16_t rsrvd0;
+ uint32_t msize;
+ int if_flags;
+
+ /* hardware access lock */
+ struct mtx hw_lock;
+ volatile uint32_t hw_lock_held;
+
+ uint32_t vm_pgsize;
+ /* transmit related */
+ uint32_t num_tx_rings;
+ qla_tx_ring_t tx_ring[MAX_TX_RINGS];
+
+ bus_dma_tag_t tx_tag;
+ struct task tx_task;
+ struct taskqueue *tx_tq;
+ struct callout tx_callout;
+ struct mtx tx_lock;
+
+ /* receive related */
+ uint32_t num_rx_rings;
+ qla_rx_ring_t rx_ring[MAX_RX_RINGS];
+ bus_dma_tag_t rx_tag;
+
+ /* stats */
+ uint32_t err_m_getcl;
+ uint32_t err_m_getjcl;
+ uint32_t err_tx_dmamap_create;
+ uint32_t err_tx_dmamap_load;
+ uint32_t err_tx_defrag;
+
+ /* mac address related */
+ uint8_t mac_rcv_mode;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint32_t nmcast;
+ qla_mcast_t mcast[Q8_MAX_NUM_MULTICAST_ADDRS];
+
+ /* Link Related */
+ uint8_t link_up;
+ uint32_t link_status;
+ uint32_t link_down_info;
+ uint32_t link_hw_info;
+ uint32_t link_dcbx_counters;
+ uint32_t link_change_counters;
+
+ /* Flash Related */
+ q81_flash_t flash;
+
+ /* debug stuff */
+ volatile const char *qla_lock;
+ volatile const char *qla_unlock;
+
+ /* Error Recovery Related */
+ uint32_t err_inject;
+ struct task err_task;
+ struct taskqueue *err_tq;
+
+ /* Chip related */
+ uint32_t rev_id;
+
+ /* mailbox completions */
+ uint32_t aen[Q81_NUM_AEN_REGISTERS];
+ uint32_t mbox[Q81_NUM_MBX_REGISTERS];
+ volatile uint32_t mbx_done;
+
+ /* mpi dump related */
+ qla_dma_t mpi_dma;
+ qla_dma_t rss_dma;
+
+};
+typedef struct qla_host qla_host_t;
+
+/* note that align has to be a power of 2 */
+#define QL_ALIGN(size, align) (size + (align - 1)) & ~(align - 1);
+#define QL_MIN(x, y) ((x < y) ? x : y)
+
+#define QL_RUNNING(ifp) \
+ ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
+ IFF_DRV_RUNNING)
+
+/* Return 0, if identical, else 1 */
+
+#define QL_MAC_CMP(mac1, mac2) \
+ ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
+ (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
+
+#endif /* #ifndef _QLS_DEF_H_ */
diff --git a/sys/dev/qlxge/qls_dump.c b/sys/dev/qlxge/qls_dump.c
new file mode 100644
index 000000000000..44866ac077a3
--- /dev/null
+++ b/sys/dev/qlxge/qls_dump.c
@@ -0,0 +1,1992 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qls_dump.c
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+#include "qls_os.h"
+#include "qls_hw.h"
+#include "qls_def.h"
+#include "qls_glbl.h"
+#include "qls_dump.h"
+
+qls_mpi_coredump_t ql_mpi_coredump;
+
+#define Q81_CORE_SEG_NUM 1
+#define Q81_TEST_LOGIC_SEG_NUM 2
+#define Q81_RMII_SEG_NUM 3
+#define Q81_FCMAC1_SEG_NUM 4
+#define Q81_FCMAC2_SEG_NUM 5
+#define Q81_FC1_MBOX_SEG_NUM 6
+#define Q81_IDE_SEG_NUM 7
+#define Q81_NIC1_MBOX_SEG_NUM 8
+#define Q81_SMBUS_SEG_NUM 9
+#define Q81_FC2_MBOX_SEG_NUM 10
+#define Q81_NIC2_MBOX_SEG_NUM 11
+#define Q81_I2C_SEG_NUM 12
+#define Q81_MEMC_SEG_NUM 13
+#define Q81_PBUS_SEG_NUM 14
+#define Q81_MDE_SEG_NUM 15
+#define Q81_NIC1_CONTROL_SEG_NUM 16
+#define Q81_NIC2_CONTROL_SEG_NUM 17
+#define Q81_NIC1_XGMAC_SEG_NUM 18
+#define Q81_NIC2_XGMAC_SEG_NUM 19
+#define Q81_WCS_RAM_SEG_NUM 20
+#define Q81_MEMC_RAM_SEG_NUM 21
+#define Q81_XAUI1_AN_SEG_NUM 22
+#define Q81_XAUI1_HSS_PCS_SEG_NUM 23
+#define Q81_XFI1_AN_SEG_NUM 24
+#define Q81_XFI1_TRAIN_SEG_NUM 25
+#define Q81_XFI1_HSS_PCS_SEG_NUM 26
+#define Q81_XFI1_HSS_TX_SEG_NUM 27
+#define Q81_XFI1_HSS_RX_SEG_NUM 28
+#define Q81_XFI1_HSS_PLL_SEG_NUM 29
+#define Q81_INTR_STATES_SEG_NUM 31
+#define Q81_ETS_SEG_NUM 34
+#define Q81_PROBE_DUMP_SEG_NUM 35
+#define Q81_ROUTING_INDEX_SEG_NUM 36
+#define Q81_MAC_PROTOCOL_SEG_NUM 37
+#define Q81_XAUI2_AN_SEG_NUM 38
+#define Q81_XAUI2_HSS_PCS_SEG_NUM 39
+#define Q81_XFI2_AN_SEG_NUM 40
+#define Q81_XFI2_TRAIN_SEG_NUM 41
+#define Q81_XFI2_HSS_PCS_SEG_NUM 42
+#define Q81_XFI2_HSS_TX_SEG_NUM 43
+#define Q81_XFI2_HSS_RX_SEG_NUM 44
+#define Q81_XFI2_HSS_PLL_SEG_NUM 45
+#define Q81_WQC1_SEG_NUM 46
+#define Q81_CQC1_SEG_NUM 47
+#define Q81_WQC2_SEG_NUM 48
+#define Q81_CQC2_SEG_NUM 49
+#define Q81_SEM_REGS_SEG_NUM 50
+
+enum
+{
+ Q81_PAUSE_SRC_LO = 0x00000100,
+ Q81_PAUSE_SRC_HI = 0x00000104,
+ Q81_GLOBAL_CFG = 0x00000108,
+ Q81_GLOBAL_CFG_RESET = (1 << 0), /*Control*/
+ Q81_GLOBAL_CFG_JUMBO = (1 << 6), /*Control*/
+ Q81_GLOBAL_CFG_TX_STAT_EN = (1 << 10), /*Control*/
+ Q81_GLOBAL_CFG_RX_STAT_EN = (1 << 11), /*Control*/
+ Q81_TX_CFG = 0x0000010c,
+ Q81_TX_CFG_RESET = (1 << 0), /*Control*/
+ Q81_TX_CFG_EN = (1 << 1), /*Control*/
+ Q81_TX_CFG_PREAM = (1 << 2), /*Control*/
+ Q81_RX_CFG = 0x00000110,
+ Q81_RX_CFG_RESET = (1 << 0), /*Control*/
+ Q81_RX_CFG_EN = (1 << 1), /*Control*/
+ Q81_RX_CFG_PREAM = (1 << 2), /*Control*/
+ Q81_FLOW_CTL = 0x0000011c,
+ Q81_PAUSE_OPCODE = 0x00000120,
+ Q81_PAUSE_TIMER = 0x00000124,
+ Q81_PAUSE_FRM_DEST_LO = 0x00000128,
+ Q81_PAUSE_FRM_DEST_HI = 0x0000012c,
+ Q81_MAC_TX_PARAMS = 0x00000134,
+ Q81_MAC_TX_PARAMS_JUMBO = (1 << 31), /*Control*/
+ Q81_MAC_TX_PARAMS_SIZE_SHIFT = 16, /*Control*/
+ Q81_MAC_RX_PARAMS = 0x00000138,
+ Q81_MAC_SYS_INT = 0x00000144,
+ Q81_MAC_SYS_INT_MASK = 0x00000148,
+ Q81_MAC_MGMT_INT = 0x0000014c,
+ Q81_MAC_MGMT_IN_MASK = 0x00000150,
+ Q81_EXT_ARB_MODE = 0x000001fc,
+ Q81_TX_PKTS = 0x00000200,
+ Q81_TX_PKTS_LO = 0x00000204,
+ Q81_TX_BYTES = 0x00000208,
+ Q81_TX_BYTES_LO = 0x0000020C,
+ Q81_TX_MCAST_PKTS = 0x00000210,
+ Q81_TX_MCAST_PKTS_LO = 0x00000214,
+ Q81_TX_BCAST_PKTS = 0x00000218,
+ Q81_TX_BCAST_PKTS_LO = 0x0000021C,
+ Q81_TX_UCAST_PKTS = 0x00000220,
+ Q81_TX_UCAST_PKTS_LO = 0x00000224,
+ Q81_TX_CTL_PKTS = 0x00000228,
+ Q81_TX_CTL_PKTS_LO = 0x0000022c,
+ Q81_TX_PAUSE_PKTS = 0x00000230,
+ Q81_TX_PAUSE_PKTS_LO = 0x00000234,
+ Q81_TX_64_PKT = 0x00000238,
+ Q81_TX_64_PKT_LO = 0x0000023c,
+ Q81_TX_65_TO_127_PKT = 0x00000240,
+ Q81_TX_65_TO_127_PKT_LO = 0x00000244,
+ Q81_TX_128_TO_255_PKT = 0x00000248,
+ Q81_TX_128_TO_255_PKT_LO = 0x0000024c,
+ Q81_TX_256_511_PKT = 0x00000250,
+ Q81_TX_256_511_PKT_LO = 0x00000254,
+ Q81_TX_512_TO_1023_PKT = 0x00000258,
+ Q81_TX_512_TO_1023_PKT_LO = 0x0000025c,
+ Q81_TX_1024_TO_1518_PKT = 0x00000260,
+ Q81_TX_1024_TO_1518_PKT_LO = 0x00000264,
+ Q81_TX_1519_TO_MAX_PKT = 0x00000268,
+ Q81_TX_1519_TO_MAX_PKT_LO = 0x0000026c,
+ Q81_TX_UNDERSIZE_PKT = 0x00000270,
+ Q81_TX_UNDERSIZE_PKT_LO = 0x00000274,
+ Q81_TX_OVERSIZE_PKT = 0x00000278,
+ Q81_TX_OVERSIZE_PKT_LO = 0x0000027c,
+ Q81_RX_HALF_FULL_DET = 0x000002a0,
+ Q81_TX_HALF_FULL_DET_LO = 0x000002a4,
+ Q81_RX_OVERFLOW_DET = 0x000002a8,
+ Q81_TX_OVERFLOW_DET_LO = 0x000002ac,
+ Q81_RX_HALF_FULL_MASK = 0x000002b0,
+ Q81_TX_HALF_FULL_MASK_LO = 0x000002b4,
+ Q81_RX_OVERFLOW_MASK = 0x000002b8,
+ Q81_TX_OVERFLOW_MASK_LO = 0x000002bc,
+ Q81_STAT_CNT_CTL = 0x000002c0,
+ Q81_STAT_CNT_CTL_CLEAR_TX = (1 << 0), /*Control*/
+ Q81_STAT_CNT_CTL_CLEAR_RX = (1 << 1), /*Control*/
+ Q81_AUX_RX_HALF_FULL_DET = 0x000002d0,
+ Q81_AUX_TX_HALF_FULL_DET = 0x000002d4,
+ Q81_AUX_RX_OVERFLOW_DET = 0x000002d8,
+ Q81_AUX_TX_OVERFLOW_DET = 0x000002dc,
+ Q81_AUX_RX_HALF_FULL_MASK = 0x000002f0,
+ Q81_AUX_TX_HALF_FULL_MASK = 0x000002f4,
+ Q81_AUX_RX_OVERFLOW_MASK = 0x000002f8,
+ Q81_AUX_TX_OVERFLOW_MASK = 0x000002fc,
+ Q81_RX_BYTES = 0x00000300,
+ Q81_RX_BYTES_LO = 0x00000304,
+ Q81_RX_BYTES_OK = 0x00000308,
+ Q81_RX_BYTES_OK_LO = 0x0000030c,
+ Q81_RX_PKTS = 0x00000310,
+ Q81_RX_PKTS_LO = 0x00000314,
+ Q81_RX_PKTS_OK = 0x00000318,
+ Q81_RX_PKTS_OK_LO = 0x0000031c,
+ Q81_RX_BCAST_PKTS = 0x00000320,
+ Q81_RX_BCAST_PKTS_LO = 0x00000324,
+ Q81_RX_MCAST_PKTS = 0x00000328,
+ Q81_RX_MCAST_PKTS_LO = 0x0000032c,
+ Q81_RX_UCAST_PKTS = 0x00000330,
+ Q81_RX_UCAST_PKTS_LO = 0x00000334,
+ Q81_RX_UNDERSIZE_PKTS = 0x00000338,
+ Q81_RX_UNDERSIZE_PKTS_LO = 0x0000033c,
+ Q81_RX_OVERSIZE_PKTS = 0x00000340,
+ Q81_RX_OVERSIZE_PKTS_LO = 0x00000344,
+ Q81_RX_JABBER_PKTS = 0x00000348,
+ Q81_RX_JABBER_PKTS_LO = 0x0000034c,
+ Q81_RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
+ Q81_RX_UNDERSIZE_FCERR_PKTS_LO = 0x00000354,
+ Q81_RX_DROP_EVENTS = 0x00000358,
+ Q81_RX_DROP_EVENTS_LO = 0x0000035c,
+ Q81_RX_FCERR_PKTS = 0x00000360,
+ Q81_RX_FCERR_PKTS_LO = 0x00000364,
+ Q81_RX_ALIGN_ERR = 0x00000368,
+ Q81_RX_ALIGN_ERR_LO = 0x0000036c,
+ Q81_RX_SYMBOL_ERR = 0x00000370,
+ Q81_RX_SYMBOL_ERR_LO = 0x00000374,
+ Q81_RX_MAC_ERR = 0x00000378,
+ Q81_RX_MAC_ERR_LO = 0x0000037c,
+ Q81_RX_CTL_PKTS = 0x00000380,
+ Q81_RX_CTL_PKTS_LO = 0x00000384,
+ Q81_RX_PAUSE_PKTS = 0x00000388,
+ Q81_RX_PAUSE_PKTS_LO = 0x0000038c,
+ Q81_RX_64_PKTS = 0x00000390,
+ Q81_RX_64_PKTS_LO = 0x00000394,
+ Q81_RX_65_TO_127_PKTS = 0x00000398,
+ Q81_RX_65_TO_127_PKTS_LO = 0x0000039c,
+ Q81_RX_128_255_PKTS = 0x000003a0,
+ Q81_RX_128_255_PKTS_LO = 0x000003a4,
+ Q81_RX_256_511_PKTS = 0x000003a8,
+ Q81_RX_256_511_PKTS_LO = 0x000003ac,
+ Q81_RX_512_TO_1023_PKTS = 0x000003b0,
+ Q81_RX_512_TO_1023_PKTS_LO = 0x000003b4,
+ Q81_RX_1024_TO_1518_PKTS = 0x000003b8,
+ Q81_RX_1024_TO_1518_PKTS_LO = 0x000003bc,
+ Q81_RX_1519_TO_MAX_PKTS = 0x000003c0,
+ Q81_RX_1519_TO_MAX_PKTS_LO = 0x000003c4,
+ Q81_RX_LEN_ERR_PKTS = 0x000003c8,
+ Q81_RX_LEN_ERR_PKTS_LO = 0x000003cc,
+ Q81_MDIO_TX_DATA = 0x00000400,
+ Q81_MDIO_RX_DATA = 0x00000410,
+ Q81_MDIO_CMD = 0x00000420,
+ Q81_MDIO_PHY_ADDR = 0x00000430,
+ Q81_MDIO_PORT = 0x00000440,
+ Q81_MDIO_STATUS = 0x00000450,
+ Q81_TX_CBFC_PAUSE_FRAMES0 = 0x00000500,
+ Q81_TX_CBFC_PAUSE_FRAMES0_LO = 0x00000504,
+ Q81_TX_CBFC_PAUSE_FRAMES1 = 0x00000508,
+ Q81_TX_CBFC_PAUSE_FRAMES1_LO = 0x0000050C,
+ Q81_TX_CBFC_PAUSE_FRAMES2 = 0x00000510,
+ Q81_TX_CBFC_PAUSE_FRAMES2_LO = 0x00000514,
+ Q81_TX_CBFC_PAUSE_FRAMES3 = 0x00000518,
+ Q81_TX_CBFC_PAUSE_FRAMES3_LO = 0x0000051C,
+ Q81_TX_CBFC_PAUSE_FRAMES4 = 0x00000520,
+ Q81_TX_CBFC_PAUSE_FRAMES4_LO = 0x00000524,
+ Q81_TX_CBFC_PAUSE_FRAMES5 = 0x00000528,
+ Q81_TX_CBFC_PAUSE_FRAMES5_LO = 0x0000052C,
+ Q81_TX_CBFC_PAUSE_FRAMES6 = 0x00000530,
+ Q81_TX_CBFC_PAUSE_FRAMES6_LO = 0x00000534,
+ Q81_TX_CBFC_PAUSE_FRAMES7 = 0x00000538,
+ Q81_TX_CBFC_PAUSE_FRAMES7_LO = 0x0000053C,
+ Q81_TX_FCOE_PKTS = 0x00000540,
+ Q81_TX_FCOE_PKTS_LO = 0x00000544,
+ Q81_TX_MGMT_PKTS = 0x00000548,
+ Q81_TX_MGMT_PKTS_LO = 0x0000054C,
+ Q81_RX_CBFC_PAUSE_FRAMES0 = 0x00000568,
+ Q81_RX_CBFC_PAUSE_FRAMES0_LO = 0x0000056C,
+ Q81_RX_CBFC_PAUSE_FRAMES1 = 0x00000570,
+ Q81_RX_CBFC_PAUSE_FRAMES1_LO = 0x00000574,
+ Q81_RX_CBFC_PAUSE_FRAMES2 = 0x00000578,
+ Q81_RX_CBFC_PAUSE_FRAMES2_LO = 0x0000057C,
+ Q81_RX_CBFC_PAUSE_FRAMES3 = 0x00000580,
+ Q81_RX_CBFC_PAUSE_FRAMES3_LO = 0x00000584,
+ Q81_RX_CBFC_PAUSE_FRAMES4 = 0x00000588,
+ Q81_RX_CBFC_PAUSE_FRAMES4_LO = 0x0000058C,
+ Q81_RX_CBFC_PAUSE_FRAMES5 = 0x00000590,
+ Q81_RX_CBFC_PAUSE_FRAMES5_LO = 0x00000594,
+ Q81_RX_CBFC_PAUSE_FRAMES6 = 0x00000598,
+ Q81_RX_CBFC_PAUSE_FRAMES6_LO = 0x0000059C,
+ Q81_RX_CBFC_PAUSE_FRAMES7 = 0x000005A0,
+ Q81_RX_CBFC_PAUSE_FRAMES7_LO = 0x000005A4,
+ Q81_RX_FCOE_PKTS = 0x000005A8,
+ Q81_RX_FCOE_PKTS_LO = 0x000005AC,
+ Q81_RX_MGMT_PKTS = 0x000005B0,
+ Q81_RX_MGMT_PKTS_LO = 0x000005B4,
+ Q81_RX_NIC_FIFO_DROP = 0x000005B8,
+ Q81_RX_NIC_FIFO_DROP_LO = 0x000005BC,
+ Q81_RX_FCOE_FIFO_DROP = 0x000005C0,
+ Q81_RX_FCOE_FIFO_DROP_LO = 0x000005C4,
+ Q81_RX_MGMT_FIFO_DROP = 0x000005C8,
+ Q81_RX_MGMT_FIFO_DROP_LO = 0x000005CC,
+ Q81_RX_PKTS_PRIORITY0 = 0x00000600,
+ Q81_RX_PKTS_PRIORITY0_LO = 0x00000604,
+ Q81_RX_PKTS_PRIORITY1 = 0x00000608,
+ Q81_RX_PKTS_PRIORITY1_LO = 0x0000060C,
+ Q81_RX_PKTS_PRIORITY2 = 0x00000610,
+ Q81_RX_PKTS_PRIORITY2_LO = 0x00000614,
+ Q81_RX_PKTS_PRIORITY3 = 0x00000618,
+ Q81_RX_PKTS_PRIORITY3_LO = 0x0000061C,
+ Q81_RX_PKTS_PRIORITY4 = 0x00000620,
+ Q81_RX_PKTS_PRIORITY4_LO = 0x00000624,
+ Q81_RX_PKTS_PRIORITY5 = 0x00000628,
+ Q81_RX_PKTS_PRIORITY5_LO = 0x0000062C,
+ Q81_RX_PKTS_PRIORITY6 = 0x00000630,
+ Q81_RX_PKTS_PRIORITY6_LO = 0x00000634,
+ Q81_RX_PKTS_PRIORITY7 = 0x00000638,
+ Q81_RX_PKTS_PRIORITY7_LO = 0x0000063C,
+ Q81_RX_OCTETS_PRIORITY0 = 0x00000640,
+ Q81_RX_OCTETS_PRIORITY0_LO = 0x00000644,
+ Q81_RX_OCTETS_PRIORITY1 = 0x00000648,
+ Q81_RX_OCTETS_PRIORITY1_LO = 0x0000064C,
+ Q81_RX_OCTETS_PRIORITY2 = 0x00000650,
+ Q81_RX_OCTETS_PRIORITY2_LO = 0x00000654,
+ Q81_RX_OCTETS_PRIORITY3 = 0x00000658,
+ Q81_RX_OCTETS_PRIORITY3_LO = 0x0000065C,
+ Q81_RX_OCTETS_PRIORITY4 = 0x00000660,
+ Q81_RX_OCTETS_PRIORITY4_LO = 0x00000664,
+ Q81_RX_OCTETS_PRIORITY5 = 0x00000668,
+ Q81_RX_OCTETS_PRIORITY5_LO = 0x0000066C,
+ Q81_RX_OCTETS_PRIORITY6 = 0x00000670,
+ Q81_RX_OCTETS_PRIORITY6_LO = 0x00000674,
+ Q81_RX_OCTETS_PRIORITY7 = 0x00000678,
+ Q81_RX_OCTETS_PRIORITY7_LO = 0x0000067C,
+ Q81_TX_PKTS_PRIORITY0 = 0x00000680,
+ Q81_TX_PKTS_PRIORITY0_LO = 0x00000684,
+ Q81_TX_PKTS_PRIORITY1 = 0x00000688,
+ Q81_TX_PKTS_PRIORITY1_LO = 0x0000068C,
+ Q81_TX_PKTS_PRIORITY2 = 0x00000690,
+ Q81_TX_PKTS_PRIORITY2_LO = 0x00000694,
+ Q81_TX_PKTS_PRIORITY3 = 0x00000698,
+ Q81_TX_PKTS_PRIORITY3_LO = 0x0000069C,
+ Q81_TX_PKTS_PRIORITY4 = 0x000006A0,
+ Q81_TX_PKTS_PRIORITY4_LO = 0x000006A4,
+ Q81_TX_PKTS_PRIORITY5 = 0x000006A8,
+ Q81_TX_PKTS_PRIORITY5_LO = 0x000006AC,
+ Q81_TX_PKTS_PRIORITY6 = 0x000006B0,
+ Q81_TX_PKTS_PRIORITY6_LO = 0x000006B4,
+ Q81_TX_PKTS_PRIORITY7 = 0x000006B8,
+ Q81_TX_PKTS_PRIORITY7_LO = 0x000006BC,
+ Q81_TX_OCTETS_PRIORITY0 = 0x000006C0,
+ Q81_TX_OCTETS_PRIORITY0_LO = 0x000006C4,
+ Q81_TX_OCTETS_PRIORITY1 = 0x000006C8,
+ Q81_TX_OCTETS_PRIORITY1_LO = 0x000006CC,
+ Q81_TX_OCTETS_PRIORITY2 = 0x000006D0,
+ Q81_TX_OCTETS_PRIORITY2_LO = 0x000006D4,
+ Q81_TX_OCTETS_PRIORITY3 = 0x000006D8,
+ Q81_TX_OCTETS_PRIORITY3_LO = 0x000006DC,
+ Q81_TX_OCTETS_PRIORITY4 = 0x000006E0,
+ Q81_TX_OCTETS_PRIORITY4_LO = 0x000006E4,
+ Q81_TX_OCTETS_PRIORITY5 = 0x000006E8,
+ Q81_TX_OCTETS_PRIORITY5_LO = 0x000006EC,
+ Q81_TX_OCTETS_PRIORITY6 = 0x000006F0,
+ Q81_TX_OCTETS_PRIORITY6_LO = 0x000006F4,
+ Q81_TX_OCTETS_PRIORITY7 = 0x000006F8,
+ Q81_TX_OCTETS_PRIORITY7_LO = 0x000006FC,
+ Q81_RX_DISCARD_PRIORITY0 = 0x00000700,
+ Q81_RX_DISCARD_PRIORITY0_LO = 0x00000704,
+ Q81_RX_DISCARD_PRIORITY1 = 0x00000708,
+ Q81_RX_DISCARD_PRIORITY1_LO = 0x0000070C,
+ Q81_RX_DISCARD_PRIORITY2 = 0x00000710,
+ Q81_RX_DISCARD_PRIORITY2_LO = 0x00000714,
+ Q81_RX_DISCARD_PRIORITY3 = 0x00000718,
+ Q81_RX_DISCARD_PRIORITY3_LO = 0x0000071C,
+ Q81_RX_DISCARD_PRIORITY4 = 0x00000720,
+ Q81_RX_DISCARD_PRIORITY4_LO = 0x00000724,
+ Q81_RX_DISCARD_PRIORITY5 = 0x00000728,
+ Q81_RX_DISCARD_PRIORITY5_LO = 0x0000072C,
+ Q81_RX_DISCARD_PRIORITY6 = 0x00000730,
+ Q81_RX_DISCARD_PRIORITY6_LO = 0x00000734,
+ Q81_RX_DISCARD_PRIORITY7 = 0x00000738,
+ Q81_RX_DISCARD_PRIORITY7_LO = 0x0000073C
+};
+
+static void
+qls_mpid_seg_hdr(qls_mpid_seg_hdr_t *seg_hdr, uint32_t seg_num,
+ uint32_t seg_size, unsigned char *desc)
+{
+ memset(seg_hdr, 0, sizeof(qls_mpid_seg_hdr_t));
+
+ seg_hdr->cookie = Q81_MPID_COOKIE;
+ seg_hdr->seg_num = seg_num;
+ seg_hdr->seg_size = seg_size;
+
+ memcpy(seg_hdr->desc, desc, (sizeof(seg_hdr->desc))-1);
+
+ return;
+}
+
+static int
+qls_wait_reg_rdy(qla_host_t *ha , uint32_t reg, uint32_t bit, uint32_t err_bit)
+{
+ uint32_t data;
+ int count = 10;
+
+ while (count) {
+
+ data = READ_REG32(ha, reg);
+
+ if (data & err_bit)
+ return (-1);
+ else if (data & bit)
+ return (0);
+
+ qls_mdelay(__func__, 10);
+ count--;
+ }
+ return (-1);
+}
+
+static int
+qls_rd_mpi_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
+{
+ int ret;
+
+ ret = qls_wait_reg_rdy(ha, Q81_CTL_PROC_ADDR, Q81_CTL_PROC_ADDR_RDY,
+ Q81_CTL_PROC_ADDR_ERR);
+
+ if (ret)
+ goto exit_qls_rd_mpi_reg;
+
+ WRITE_REG32(ha, Q81_CTL_PROC_ADDR, reg | Q81_CTL_PROC_ADDR_READ);
+
+ ret = qls_wait_reg_rdy(ha, Q81_CTL_PROC_ADDR, Q81_CTL_PROC_ADDR_RDY,
+ Q81_CTL_PROC_ADDR_ERR);
+
+ if (ret)
+ goto exit_qls_rd_mpi_reg;
+
+ *data = READ_REG32(ha, Q81_CTL_PROC_DATA);
+
+exit_qls_rd_mpi_reg:
+ return (ret);
+}
+
+static int
+qls_wr_mpi_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
+{
+ int ret = 0;
+
+ ret = qls_wait_reg_rdy(ha, Q81_CTL_PROC_ADDR, Q81_CTL_PROC_ADDR_RDY,
+ Q81_CTL_PROC_ADDR_ERR);
+ if (ret)
+ goto exit_qls_wr_mpi_reg;
+
+ WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
+
+ WRITE_REG32(ha, Q81_CTL_PROC_ADDR, reg);
+
+ ret = qls_wait_reg_rdy(ha, Q81_CTL_PROC_ADDR, Q81_CTL_PROC_ADDR_RDY,
+ Q81_CTL_PROC_ADDR_ERR);
+exit_qls_wr_mpi_reg:
+ return (ret);
+}
+
+
+#define Q81_TEST_LOGIC_FUNC_PORT_CONFIG 0x1002
+#define Q81_INVALID_NUM 0xFFFFFFFF
+
+#define Q81_NIC1_FUNC_ENABLE 0x00000001
+#define Q81_NIC1_FUNC_MASK 0x0000000e
+#define Q81_NIC1_FUNC_SHIFT 1
+#define Q81_NIC2_FUNC_ENABLE 0x00000010
+#define Q81_NIC2_FUNC_MASK 0x000000e0
+#define Q81_NIC2_FUNC_SHIFT 5
+#define Q81_FUNCTION_SHIFT 6
+
+static uint32_t
+qls_get_other_fnum(qla_host_t *ha)
+{
+ int ret;
+ uint32_t o_func;
+ uint32_t test_logic;
+ uint32_t nic1_fnum = Q81_INVALID_NUM;
+ uint32_t nic2_fnum = Q81_INVALID_NUM;
+
+ ret = qls_rd_mpi_reg(ha, Q81_TEST_LOGIC_FUNC_PORT_CONFIG, &test_logic);
+ if (ret)
+ return(Q81_INVALID_NUM);
+
+ if (test_logic & Q81_NIC1_FUNC_ENABLE)
+ nic1_fnum = (test_logic & Q81_NIC1_FUNC_MASK) >>
+ Q81_NIC1_FUNC_SHIFT;
+
+ if (test_logic & Q81_NIC2_FUNC_ENABLE)
+ nic2_fnum = (test_logic & Q81_NIC2_FUNC_MASK) >>
+ Q81_NIC2_FUNC_SHIFT;
+
+ if (ha->pci_func == 0)
+ o_func = nic2_fnum;
+ else
+ o_func = nic1_fnum;
+
+ return(o_func);
+}
+
+static uint32_t
+qls_rd_ofunc_reg(qla_host_t *ha, uint32_t reg)
+{
+ uint32_t ofunc;
+ uint32_t data;
+ int ret = 0;
+
+ ofunc = qls_get_other_fnum(ha);
+
+ if (ofunc == Q81_INVALID_NUM)
+ return(Q81_INVALID_NUM);
+
+ reg = Q81_CTL_PROC_ADDR_REG_BLOCK | (ofunc << Q81_FUNCTION_SHIFT) | reg;
+
+ ret = qls_rd_mpi_reg(ha, reg, &data);
+
+ if (ret != 0)
+ return(Q81_INVALID_NUM);
+
+ return(data);
+}
+
+static void
+qls_wr_ofunc_reg(qla_host_t *ha, uint32_t reg, uint32_t value)
+{
+ uint32_t ofunc;
+ int ret = 0;
+
+ ofunc = qls_get_other_fnum(ha);
+
+ if (ofunc == Q81_INVALID_NUM)
+ return;
+
+ reg = Q81_CTL_PROC_ADDR_REG_BLOCK | (ofunc << Q81_FUNCTION_SHIFT) | reg;
+
+ ret = qls_wr_mpi_reg(ha, reg, value);
+
+ return;
+}
+
+static int
+qls_wait_ofunc_reg_rdy(qla_host_t *ha , uint32_t reg, uint32_t bit,
+ uint32_t err_bit)
+{
+ uint32_t data;
+ int count = 10;
+
+ while (count) {
+
+ data = qls_rd_ofunc_reg(ha, reg);
+
+ if (data & err_bit)
+ return (-1);
+ else if (data & bit)
+ return (0);
+
+ qls_mdelay(__func__, 10);
+ count--;
+ }
+ return (-1);
+}
+
+#define Q81_XG_SERDES_ADDR_RDY BIT_31
+#define Q81_XG_SERDES_ADDR_READ BIT_30
+
+static int
+qls_rd_ofunc_serdes_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
+{
+ int ret;
+
+ /* wait for reg to come ready */
+ ret = qls_wait_ofunc_reg_rdy(ha, (Q81_CTL_XG_SERDES_ADDR >> 2),
+ Q81_XG_SERDES_ADDR_RDY, 0);
+ if (ret)
+ goto exit_qls_rd_ofunc_serdes_reg;
+
+ /* set up for reg read */
+ qls_wr_ofunc_reg(ha, (Q81_CTL_XG_SERDES_ADDR >> 2),
+ (reg | Q81_XG_SERDES_ADDR_READ));
+
+ /* wait for reg to come ready */
+ ret = qls_wait_ofunc_reg_rdy(ha, (Q81_CTL_XG_SERDES_ADDR >> 2),
+ Q81_XG_SERDES_ADDR_RDY, 0);
+ if (ret)
+ goto exit_qls_rd_ofunc_serdes_reg;
+
+ /* get the data */
+ *data = qls_rd_ofunc_reg(ha, (Q81_CTL_XG_SERDES_DATA >> 2));
+
+exit_qls_rd_ofunc_serdes_reg:
+ return ret;
+}
+
+#define Q81_XGMAC_ADDR_RDY BIT_31
+#define Q81_XGMAC_ADDR_R BIT_30
+#define Q81_XGMAC_ADDR_XME BIT_29
+
+static int
+qls_rd_ofunc_xgmac_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
+{
+ int ret = 0;
+
+ ret = qls_wait_ofunc_reg_rdy(ha, (Q81_CTL_XGMAC_ADDR >> 2),
+ Q81_XGMAC_ADDR_RDY, Q81_XGMAC_ADDR_XME);
+
+ if (ret)
+ goto exit_qls_rd_ofunc_xgmac_reg;
+
+ qls_wr_ofunc_reg(ha, (Q81_XGMAC_ADDR_RDY >> 2),
+ (reg | Q81_XGMAC_ADDR_R));
+
+ ret = qls_wait_ofunc_reg_rdy(ha, (Q81_CTL_XGMAC_ADDR >> 2),
+ Q81_XGMAC_ADDR_RDY, Q81_XGMAC_ADDR_XME);
+ if (ret)
+ goto exit_qls_rd_ofunc_xgmac_reg;
+
+ *data = qls_rd_ofunc_reg(ha, Q81_CTL_XGMAC_DATA);
+
+exit_qls_rd_ofunc_xgmac_reg:
+ return ret;
+}
+
+static int
+qls_rd_serdes_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
+{
+ int ret;
+
+ ret = qls_wait_reg_rdy(ha, Q81_CTL_XG_SERDES_ADDR,
+ Q81_XG_SERDES_ADDR_RDY, 0);
+
+ if (ret)
+ goto exit_qls_rd_serdes_reg;
+
+ WRITE_REG32(ha, Q81_CTL_XG_SERDES_ADDR, \
+ (reg | Q81_XG_SERDES_ADDR_READ));
+
+ ret = qls_wait_reg_rdy(ha, Q81_CTL_XG_SERDES_ADDR,
+ Q81_XG_SERDES_ADDR_RDY, 0);
+
+ if (ret)
+ goto exit_qls_rd_serdes_reg;
+
+ *data = READ_REG32(ha, Q81_CTL_XG_SERDES_DATA);
+
+exit_qls_rd_serdes_reg:
+
+ return ret;
+}
+
+static void
+qls_get_both_serdes(qla_host_t *ha, uint32_t addr, uint32_t *dptr,
+ uint32_t *ind_ptr, uint32_t dvalid, uint32_t ind_valid)
+{
+ int ret = -1;
+
+ if (dvalid)
+ ret = qls_rd_serdes_reg(ha, addr, dptr);
+
+ if (ret)
+ *dptr = Q81_BAD_DATA;
+
+ ret = -1;
+
+ if(ind_valid)
+ ret = qls_rd_ofunc_serdes_reg(ha, addr, ind_ptr);
+
+ if (ret)
+ *ind_ptr = Q81_BAD_DATA;
+}
+
+#define Q81_XFI1_POWERED_UP 0x00000005
+#define Q81_XFI2_POWERED_UP 0x0000000A
+#define Q81_XAUI_POWERED_UP 0x00000001
+
+static int
+qls_rd_serdes_regs(qla_host_t *ha, qls_mpi_coredump_t *mpi_dump)
+{
+ int ret;
+ uint32_t xfi_d_valid, xfi_ind_valid, xaui_d_valid, xaui_ind_valid;
+ uint32_t temp, xaui_reg, i;
+ uint32_t *dptr, *indptr;
+
+ xfi_d_valid = xfi_ind_valid = xaui_d_valid = xaui_ind_valid = 0;
+
+ xaui_reg = 0x800;
+
+ ret = qls_rd_ofunc_serdes_reg(ha, xaui_reg, &temp);
+ if (ret)
+ temp = 0;
+
+ if ((temp & Q81_XAUI_POWERED_UP) == Q81_XAUI_POWERED_UP)
+ xaui_ind_valid = 1;
+
+ ret = qls_rd_serdes_reg(ha, xaui_reg, &temp);
+ if (ret)
+ temp = 0;
+
+ if ((temp & Q81_XAUI_POWERED_UP) == Q81_XAUI_POWERED_UP)
+ xaui_d_valid = 1;
+
+ ret = qls_rd_serdes_reg(ha, 0x1E06, &temp);
+ if (ret)
+ temp = 0;
+
+ if ((temp & Q81_XFI1_POWERED_UP) == Q81_XFI1_POWERED_UP) {
+
+ if (ha->pci_func & 1)
+ xfi_ind_valid = 1; /* NIC 2, so the indirect
+ (NIC1) xfi is up*/
+ else
+ xfi_d_valid = 1;
+ }
+
+ if((temp & Q81_XFI2_POWERED_UP) == Q81_XFI2_POWERED_UP) {
+
+ if(ha->pci_func & 1)
+ xfi_d_valid = 1; /* NIC 2, so the indirect (NIC1)
+ xfi is up */
+ else
+ xfi_ind_valid = 1;
+ }
+
+ if (ha->pci_func & 1) {
+ dptr = (uint32_t *)(&mpi_dump->serdes2_xaui_an);
+ indptr = (uint32_t *)(&mpi_dump->serdes1_xaui_an);
+ } else {
+ dptr = (uint32_t *)(&mpi_dump->serdes1_xaui_an);
+ indptr = (uint32_t *)(&mpi_dump->serdes2_xaui_an);
+ }
+
+ for (i = 0; i <= 0x000000034; i += 4, dptr ++, indptr ++) {
+ qls_get_both_serdes(ha, i, dptr, indptr,
+ xaui_d_valid, xaui_ind_valid);
+ }
+
+ if (ha->pci_func & 1) {
+ dptr = (uint32_t *)(&mpi_dump->serdes2_xaui_hss_pcs);
+ indptr = (uint32_t *)(&mpi_dump->serdes1_xaui_hss_pcs);
+ } else {
+ dptr = (uint32_t *)(&mpi_dump->serdes1_xaui_hss_pcs);
+ indptr = (uint32_t *)(&mpi_dump->serdes2_xaui_hss_pcs);
+ }
+
+ for (i = 0x800; i <= 0x880; i += 4, dptr ++, indptr ++) {
+ qls_get_both_serdes(ha, i, dptr, indptr,
+ xaui_d_valid, xaui_ind_valid);
+ }
+
+ if (ha->pci_func & 1) {
+ dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_an);
+ indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_an);
+ } else {
+ dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_an);
+ indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_an);
+ }
+
+ for (i = 0x1000; i <= 0x1034; i += 4, dptr ++, indptr ++) {
+ qls_get_both_serdes(ha, i, dptr, indptr,
+ xfi_d_valid, xfi_ind_valid);
+ }
+
+ if (ha->pci_func & 1) {
+ dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_train);
+ indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_train);
+ } else {
+ dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_train);
+ indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_train);
+ }
+
+ for (i = 0x1050; i <= 0x107c; i += 4, dptr ++, indptr ++) {
+ qls_get_both_serdes(ha, i, dptr, indptr,
+ xfi_d_valid, xfi_ind_valid);
+ }
+
+ if (ha->pci_func & 1) {
+ dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_pcs);
+ indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_pcs);
+ } else {
+ dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_pcs);
+ indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_pcs);
+ }
+
+ for (i = 0x1800; i <= 0x1838; i += 4, dptr++, indptr ++) {
+ qls_get_both_serdes(ha, i, dptr, indptr,
+ xfi_d_valid, xfi_ind_valid);
+ }
+
+ if (ha->pci_func & 1) {
+ dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_tx);
+ indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_tx);
+ } else {
+ dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_tx);
+ indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_tx);
+ }
+
+ for (i = 0x1c00; i <= 0x1c1f; i++, dptr ++, indptr ++) {
+ qls_get_both_serdes(ha, i, dptr, indptr,
+ xfi_d_valid, xfi_ind_valid);
+ }
+
+ if (ha->pci_func & 1) {
+ dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_rx);
+ indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_rx);
+ } else {
+ dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_rx);
+ indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_rx);
+ }
+
+ for (i = 0x1c40; i <= 0x1c5f; i++, dptr ++, indptr ++) {
+ qls_get_both_serdes(ha, i, dptr, indptr,
+ xfi_d_valid, xfi_ind_valid);
+ }
+
+ if (ha->pci_func & 1) {
+ dptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_pll);
+ indptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_pll);
+ } else {
+ dptr = (uint32_t *)(&mpi_dump->serdes1_xfi_hss_pll);
+ indptr = (uint32_t *)(&mpi_dump->serdes2_xfi_hss_pll);
+ }
+
+ for (i = 0x1e00; i <= 0x1e1f; i++, dptr ++, indptr ++) {
+ qls_get_both_serdes(ha, i, dptr, indptr,
+ xfi_d_valid, xfi_ind_valid);
+ }
+
+ return(0);
+}
+
+static int
+qls_unpause_mpi_risc(qla_host_t *ha)
+{
+ uint32_t data;
+
+ data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
+
+ if (!(data & Q81_CTL_HCS_RISC_PAUSED))
+ return -1;
+
+ WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, \
+ Q81_CTL_HCS_CMD_CLR_RISC_PAUSE);
+
+ return 0;
+}
+
+static int
+qls_pause_mpi_risc(qla_host_t *ha)
+{
+ uint32_t data;
+ int count = 10;
+
+ WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, \
+ Q81_CTL_HCS_CMD_SET_RISC_PAUSE);
+
+ do {
+ data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
+
+ if (data & Q81_CTL_HCS_RISC_PAUSED)
+ break;
+
+ qls_mdelay(__func__, 10);
+
+ count--;
+
+ } while (count);
+
+ return ((count == 0) ? -1 : 0);
+}
+
+static void
+qls_get_intr_states(qla_host_t *ha, uint32_t *buf)
+{
+ int i;
+
+ for (i = 0; i < MAX_RX_RINGS; i++, buf++) {
+
+ WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, (0x037f0300 + i));
+
+ *buf = READ_REG32(ha, Q81_CTL_INTR_ENABLE);
+ }
+}
+
+static int
+qls_rd_xgmac_reg(qla_host_t *ha, uint32_t reg, uint32_t*data)
+{
+ int ret = 0;
+
+ ret = qls_wait_reg_rdy(ha, Q81_CTL_XGMAC_ADDR, Q81_XGMAC_ADDR_RDY,
+ Q81_XGMAC_ADDR_XME);
+ if (ret)
+ goto exit_qls_rd_xgmac_reg;
+
+ WRITE_REG32(ha, Q81_CTL_XGMAC_ADDR, (reg | Q81_XGMAC_ADDR_R));
+
+ ret = qls_wait_reg_rdy(ha, Q81_CTL_XGMAC_ADDR, Q81_XGMAC_ADDR_RDY,
+ Q81_XGMAC_ADDR_XME);
+ if (ret)
+ goto exit_qls_rd_xgmac_reg;
+
+ *data = READ_REG32(ha, Q81_CTL_XGMAC_DATA);
+
+exit_qls_rd_xgmac_reg:
+ return ret;
+}
+
+static int
+qls_rd_xgmac_regs(qla_host_t *ha, uint32_t *buf, uint32_t o_func)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < Q81_XGMAC_REGISTER_END; i += 4, buf ++) {
+
+ switch (i) {
+ case Q81_PAUSE_SRC_LO :
+ case Q81_PAUSE_SRC_HI :
+ case Q81_GLOBAL_CFG :
+ case Q81_TX_CFG :
+ case Q81_RX_CFG :
+ case Q81_FLOW_CTL :
+ case Q81_PAUSE_OPCODE :
+ case Q81_PAUSE_TIMER :
+ case Q81_PAUSE_FRM_DEST_LO :
+ case Q81_PAUSE_FRM_DEST_HI :
+ case Q81_MAC_TX_PARAMS :
+ case Q81_MAC_RX_PARAMS :
+ case Q81_MAC_SYS_INT :
+ case Q81_MAC_SYS_INT_MASK :
+ case Q81_MAC_MGMT_INT :
+ case Q81_MAC_MGMT_IN_MASK :
+ case Q81_EXT_ARB_MODE :
+ case Q81_TX_PKTS :
+ case Q81_TX_PKTS_LO :
+ case Q81_TX_BYTES :
+ case Q81_TX_BYTES_LO :
+ case Q81_TX_MCAST_PKTS :
+ case Q81_TX_MCAST_PKTS_LO :
+ case Q81_TX_BCAST_PKTS :
+ case Q81_TX_BCAST_PKTS_LO :
+ case Q81_TX_UCAST_PKTS :
+ case Q81_TX_UCAST_PKTS_LO :
+ case Q81_TX_CTL_PKTS :
+ case Q81_TX_CTL_PKTS_LO :
+ case Q81_TX_PAUSE_PKTS :
+ case Q81_TX_PAUSE_PKTS_LO :
+ case Q81_TX_64_PKT :
+ case Q81_TX_64_PKT_LO :
+ case Q81_TX_65_TO_127_PKT :
+ case Q81_TX_65_TO_127_PKT_LO :
+ case Q81_TX_128_TO_255_PKT :
+ case Q81_TX_128_TO_255_PKT_LO :
+ case Q81_TX_256_511_PKT :
+ case Q81_TX_256_511_PKT_LO :
+ case Q81_TX_512_TO_1023_PKT :
+ case Q81_TX_512_TO_1023_PKT_LO :
+ case Q81_TX_1024_TO_1518_PKT :
+ case Q81_TX_1024_TO_1518_PKT_LO :
+ case Q81_TX_1519_TO_MAX_PKT :
+ case Q81_TX_1519_TO_MAX_PKT_LO :
+ case Q81_TX_UNDERSIZE_PKT :
+ case Q81_TX_UNDERSIZE_PKT_LO :
+ case Q81_TX_OVERSIZE_PKT :
+ case Q81_TX_OVERSIZE_PKT_LO :
+ case Q81_RX_HALF_FULL_DET :
+ case Q81_TX_HALF_FULL_DET_LO :
+ case Q81_RX_OVERFLOW_DET :
+ case Q81_TX_OVERFLOW_DET_LO :
+ case Q81_RX_HALF_FULL_MASK :
+ case Q81_TX_HALF_FULL_MASK_LO :
+ case Q81_RX_OVERFLOW_MASK :
+ case Q81_TX_OVERFLOW_MASK_LO :
+ case Q81_STAT_CNT_CTL :
+ case Q81_AUX_RX_HALF_FULL_DET :
+ case Q81_AUX_TX_HALF_FULL_DET :
+ case Q81_AUX_RX_OVERFLOW_DET :
+ case Q81_AUX_TX_OVERFLOW_DET :
+ case Q81_AUX_RX_HALF_FULL_MASK :
+ case Q81_AUX_TX_HALF_FULL_MASK :
+ case Q81_AUX_RX_OVERFLOW_MASK :
+ case Q81_AUX_TX_OVERFLOW_MASK :
+ case Q81_RX_BYTES :
+ case Q81_RX_BYTES_LO :
+ case Q81_RX_BYTES_OK :
+ case Q81_RX_BYTES_OK_LO :
+ case Q81_RX_PKTS :
+ case Q81_RX_PKTS_LO :
+ case Q81_RX_PKTS_OK :
+ case Q81_RX_PKTS_OK_LO :
+ case Q81_RX_BCAST_PKTS :
+ case Q81_RX_BCAST_PKTS_LO :
+ case Q81_RX_MCAST_PKTS :
+ case Q81_RX_MCAST_PKTS_LO :
+ case Q81_RX_UCAST_PKTS :
+ case Q81_RX_UCAST_PKTS_LO :
+ case Q81_RX_UNDERSIZE_PKTS :
+ case Q81_RX_UNDERSIZE_PKTS_LO :
+ case Q81_RX_OVERSIZE_PKTS :
+ case Q81_RX_OVERSIZE_PKTS_LO :
+ case Q81_RX_JABBER_PKTS :
+ case Q81_RX_JABBER_PKTS_LO :
+ case Q81_RX_UNDERSIZE_FCERR_PKTS :
+ case Q81_RX_UNDERSIZE_FCERR_PKTS_LO :
+ case Q81_RX_DROP_EVENTS :
+ case Q81_RX_DROP_EVENTS_LO :
+ case Q81_RX_FCERR_PKTS :
+ case Q81_RX_FCERR_PKTS_LO :
+ case Q81_RX_ALIGN_ERR :
+ case Q81_RX_ALIGN_ERR_LO :
+ case Q81_RX_SYMBOL_ERR :
+ case Q81_RX_SYMBOL_ERR_LO :
+ case Q81_RX_MAC_ERR :
+ case Q81_RX_MAC_ERR_LO :
+ case Q81_RX_CTL_PKTS :
+ case Q81_RX_CTL_PKTS_LO :
+ case Q81_RX_PAUSE_PKTS :
+ case Q81_RX_PAUSE_PKTS_LO :
+ case Q81_RX_64_PKTS :
+ case Q81_RX_64_PKTS_LO :
+ case Q81_RX_65_TO_127_PKTS :
+ case Q81_RX_65_TO_127_PKTS_LO :
+ case Q81_RX_128_255_PKTS :
+ case Q81_RX_128_255_PKTS_LO :
+ case Q81_RX_256_511_PKTS :
+ case Q81_RX_256_511_PKTS_LO :
+ case Q81_RX_512_TO_1023_PKTS :
+ case Q81_RX_512_TO_1023_PKTS_LO :
+ case Q81_RX_1024_TO_1518_PKTS :
+ case Q81_RX_1024_TO_1518_PKTS_LO :
+ case Q81_RX_1519_TO_MAX_PKTS :
+ case Q81_RX_1519_TO_MAX_PKTS_LO :
+ case Q81_RX_LEN_ERR_PKTS :
+ case Q81_RX_LEN_ERR_PKTS_LO :
+ case Q81_MDIO_TX_DATA :
+ case Q81_MDIO_RX_DATA :
+ case Q81_MDIO_CMD :
+ case Q81_MDIO_PHY_ADDR :
+ case Q81_MDIO_PORT :
+ case Q81_MDIO_STATUS :
+ case Q81_TX_CBFC_PAUSE_FRAMES0 :
+ case Q81_TX_CBFC_PAUSE_FRAMES0_LO :
+ case Q81_TX_CBFC_PAUSE_FRAMES1 :
+ case Q81_TX_CBFC_PAUSE_FRAMES1_LO :
+ case Q81_TX_CBFC_PAUSE_FRAMES2 :
+ case Q81_TX_CBFC_PAUSE_FRAMES2_LO :
+ case Q81_TX_CBFC_PAUSE_FRAMES3 :
+ case Q81_TX_CBFC_PAUSE_FRAMES3_LO :
+ case Q81_TX_CBFC_PAUSE_FRAMES4 :
+ case Q81_TX_CBFC_PAUSE_FRAMES4_LO :
+ case Q81_TX_CBFC_PAUSE_FRAMES5 :
+ case Q81_TX_CBFC_PAUSE_FRAMES5_LO :
+ case Q81_TX_CBFC_PAUSE_FRAMES6 :
+ case Q81_TX_CBFC_PAUSE_FRAMES6_LO :
+ case Q81_TX_CBFC_PAUSE_FRAMES7 :
+ case Q81_TX_CBFC_PAUSE_FRAMES7_LO :
+ case Q81_TX_FCOE_PKTS :
+ case Q81_TX_FCOE_PKTS_LO :
+ case Q81_TX_MGMT_PKTS :
+ case Q81_TX_MGMT_PKTS_LO :
+ case Q81_RX_CBFC_PAUSE_FRAMES0 :
+ case Q81_RX_CBFC_PAUSE_FRAMES0_LO :
+ case Q81_RX_CBFC_PAUSE_FRAMES1 :
+ case Q81_RX_CBFC_PAUSE_FRAMES1_LO :
+ case Q81_RX_CBFC_PAUSE_FRAMES2 :
+ case Q81_RX_CBFC_PAUSE_FRAMES2_LO :
+ case Q81_RX_CBFC_PAUSE_FRAMES3 :
+ case Q81_RX_CBFC_PAUSE_FRAMES3_LO :
+ case Q81_RX_CBFC_PAUSE_FRAMES4 :
+ case Q81_RX_CBFC_PAUSE_FRAMES4_LO :
+ case Q81_RX_CBFC_PAUSE_FRAMES5 :
+ case Q81_RX_CBFC_PAUSE_FRAMES5_LO :
+ case Q81_RX_CBFC_PAUSE_FRAMES6 :
+ case Q81_RX_CBFC_PAUSE_FRAMES6_LO :
+ case Q81_RX_CBFC_PAUSE_FRAMES7 :
+ case Q81_RX_CBFC_PAUSE_FRAMES7_LO :
+ case Q81_RX_FCOE_PKTS :
+ case Q81_RX_FCOE_PKTS_LO :
+ case Q81_RX_MGMT_PKTS :
+ case Q81_RX_MGMT_PKTS_LO :
+ case Q81_RX_NIC_FIFO_DROP :
+ case Q81_RX_NIC_FIFO_DROP_LO :
+ case Q81_RX_FCOE_FIFO_DROP :
+ case Q81_RX_FCOE_FIFO_DROP_LO :
+ case Q81_RX_MGMT_FIFO_DROP :
+ case Q81_RX_MGMT_FIFO_DROP_LO :
+ case Q81_RX_PKTS_PRIORITY0 :
+ case Q81_RX_PKTS_PRIORITY0_LO :
+ case Q81_RX_PKTS_PRIORITY1 :
+ case Q81_RX_PKTS_PRIORITY1_LO :
+ case Q81_RX_PKTS_PRIORITY2 :
+ case Q81_RX_PKTS_PRIORITY2_LO :
+ case Q81_RX_PKTS_PRIORITY3 :
+ case Q81_RX_PKTS_PRIORITY3_LO :
+ case Q81_RX_PKTS_PRIORITY4 :
+ case Q81_RX_PKTS_PRIORITY4_LO :
+ case Q81_RX_PKTS_PRIORITY5 :
+ case Q81_RX_PKTS_PRIORITY5_LO :
+ case Q81_RX_PKTS_PRIORITY6 :
+ case Q81_RX_PKTS_PRIORITY6_LO :
+ case Q81_RX_PKTS_PRIORITY7 :
+ case Q81_RX_PKTS_PRIORITY7_LO :
+ case Q81_RX_OCTETS_PRIORITY0 :
+ case Q81_RX_OCTETS_PRIORITY0_LO :
+ case Q81_RX_OCTETS_PRIORITY1 :
+ case Q81_RX_OCTETS_PRIORITY1_LO :
+ case Q81_RX_OCTETS_PRIORITY2 :
+ case Q81_RX_OCTETS_PRIORITY2_LO :
+ case Q81_RX_OCTETS_PRIORITY3 :
+ case Q81_RX_OCTETS_PRIORITY3_LO :
+ case Q81_RX_OCTETS_PRIORITY4 :
+ case Q81_RX_OCTETS_PRIORITY4_LO :
+ case Q81_RX_OCTETS_PRIORITY5 :
+ case Q81_RX_OCTETS_PRIORITY5_LO :
+ case Q81_RX_OCTETS_PRIORITY6 :
+ case Q81_RX_OCTETS_PRIORITY6_LO :
+ case Q81_RX_OCTETS_PRIORITY7 :
+ case Q81_RX_OCTETS_PRIORITY7_LO :
+ case Q81_TX_PKTS_PRIORITY0 :
+ case Q81_TX_PKTS_PRIORITY0_LO :
+ case Q81_TX_PKTS_PRIORITY1 :
+ case Q81_TX_PKTS_PRIORITY1_LO :
+ case Q81_TX_PKTS_PRIORITY2 :
+ case Q81_TX_PKTS_PRIORITY2_LO :
+ case Q81_TX_PKTS_PRIORITY3 :
+ case Q81_TX_PKTS_PRIORITY3_LO :
+ case Q81_TX_PKTS_PRIORITY4 :
+ case Q81_TX_PKTS_PRIORITY4_LO :
+ case Q81_TX_PKTS_PRIORITY5 :
+ case Q81_TX_PKTS_PRIORITY5_LO :
+ case Q81_TX_PKTS_PRIORITY6 :
+ case Q81_TX_PKTS_PRIORITY6_LO :
+ case Q81_TX_PKTS_PRIORITY7 :
+ case Q81_TX_PKTS_PRIORITY7_LO :
+ case Q81_TX_OCTETS_PRIORITY0 :
+ case Q81_TX_OCTETS_PRIORITY0_LO :
+ case Q81_TX_OCTETS_PRIORITY1 :
+ case Q81_TX_OCTETS_PRIORITY1_LO :
+ case Q81_TX_OCTETS_PRIORITY2 :
+ case Q81_TX_OCTETS_PRIORITY2_LO :
+ case Q81_TX_OCTETS_PRIORITY3 :
+ case Q81_TX_OCTETS_PRIORITY3_LO :
+ case Q81_TX_OCTETS_PRIORITY4 :
+ case Q81_TX_OCTETS_PRIORITY4_LO :
+ case Q81_TX_OCTETS_PRIORITY5 :
+ case Q81_TX_OCTETS_PRIORITY5_LO :
+ case Q81_TX_OCTETS_PRIORITY6 :
+ case Q81_TX_OCTETS_PRIORITY6_LO :
+ case Q81_TX_OCTETS_PRIORITY7 :
+ case Q81_TX_OCTETS_PRIORITY7_LO :
+ case Q81_RX_DISCARD_PRIORITY0 :
+ case Q81_RX_DISCARD_PRIORITY0_LO :
+ case Q81_RX_DISCARD_PRIORITY1 :
+ case Q81_RX_DISCARD_PRIORITY1_LO :
+ case Q81_RX_DISCARD_PRIORITY2 :
+ case Q81_RX_DISCARD_PRIORITY2_LO :
+ case Q81_RX_DISCARD_PRIORITY3 :
+ case Q81_RX_DISCARD_PRIORITY3_LO :
+ case Q81_RX_DISCARD_PRIORITY4 :
+ case Q81_RX_DISCARD_PRIORITY4_LO :
+ case Q81_RX_DISCARD_PRIORITY5 :
+ case Q81_RX_DISCARD_PRIORITY5_LO :
+ case Q81_RX_DISCARD_PRIORITY6 :
+ case Q81_RX_DISCARD_PRIORITY6_LO :
+ case Q81_RX_DISCARD_PRIORITY7 :
+ case Q81_RX_DISCARD_PRIORITY7_LO :
+
+ if (o_func)
+ ret = qls_rd_ofunc_xgmac_reg(ha,
+ i, buf);
+ else
+ ret = qls_rd_xgmac_reg(ha, i, buf);
+
+ if (ret)
+ *buf = Q81_BAD_DATA;
+
+ break;
+
+ default:
+ break;
+
+ }
+ }
+ return 0;
+}
+
+static int
+qls_get_mpi_regs(qla_host_t *ha, uint32_t *buf, uint32_t offset, uint32_t count)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < count; i++, buf++) {
+
+ ret = qls_rd_mpi_reg(ha, (offset + i), buf);
+
+ if (ret)
+ return ret;
+ }
+
+ return (ret);
+}
+
+static int
+qls_get_mpi_shadow_regs(qla_host_t *ha, uint32_t *buf)
+{
+ uint32_t i;
+ int ret;
+
+#define Q81_RISC_124 0x0000007c
+#define Q81_RISC_127 0x0000007f
+#define Q81_SHADOW_OFFSET 0xb0000000
+
+ for (i = 0; i < Q81_MPI_CORE_SH_REGS_CNT; i++, buf++) {
+
+ ret = qls_wr_mpi_reg(ha,
+ (Q81_CTL_PROC_ADDR_RISC_INT_REG | Q81_RISC_124),
+ (Q81_SHADOW_OFFSET | i << 20));
+ if (ret)
+ goto exit_qls_get_mpi_shadow_regs;
+
+ ret = qls_mpi_risc_rd_reg(ha,
+ (Q81_CTL_PROC_ADDR_RISC_INT_REG | Q81_RISC_127),
+ buf);
+ if (ret)
+ goto exit_qls_get_mpi_shadow_regs;
+ }
+
+exit_qls_get_mpi_shadow_regs:
+ return ret;
+}
+
+#define SYS_CLOCK (0x00)
+#define PCI_CLOCK (0x80)
+#define FC_CLOCK (0x140)
+#define XGM_CLOCK (0x180)
+
+#define Q81_ADDRESS_REGISTER_ENABLE 0x00010000
+#define Q81_UP 0x00008000
+#define Q81_MAX_MUX 0x40
+#define Q81_MAX_MODULES 0x1F
+
+static uint32_t *
+qls_get_probe(qla_host_t *ha, uint32_t clock, uint8_t *valid, uint32_t *buf)
+{
+ uint32_t module, mux_sel, probe, lo_val, hi_val;
+
+ for (module = 0; module < Q81_MAX_MODULES; module ++) {
+
+ if (valid[module]) {
+
+ for (mux_sel = 0; mux_sel < Q81_MAX_MUX; mux_sel++) {
+
+ probe = clock | Q81_ADDRESS_REGISTER_ENABLE |
+ mux_sel | (module << 9);
+ WRITE_REG32(ha, Q81_CTL_XG_PROBE_MUX_ADDR,\
+ probe);
+
+ lo_val = READ_REG32(ha,\
+ Q81_CTL_XG_PROBE_MUX_DATA);
+
+ if (mux_sel == 0) {
+ *buf = probe;
+ buf ++;
+ }
+
+ probe |= Q81_UP;
+
+ WRITE_REG32(ha, Q81_CTL_XG_PROBE_MUX_ADDR,\
+ probe);
+ hi_val = READ_REG32(ha,\
+ Q81_CTL_XG_PROBE_MUX_DATA);
+
+ *buf = lo_val;
+ buf++;
+ *buf = hi_val;
+ buf++;
+ }
+ }
+ }
+
+ return(buf);
+}
+
+static int
+qls_get_probe_dump(qla_host_t *ha, uint32_t *buf)
+{
+
+ uint8_t sys_clock_valid_modules[0x20] = {
+ 1, // 0x00
+ 1, // 0x01
+ 1, // 0x02
+ 0, // 0x03
+ 1, // 0x04
+ 1, // 0x05
+ 1, // 0x06
+ 1, // 0x07
+ 1, // 0x08
+ 1, // 0x09
+ 1, // 0x0A
+ 1, // 0x0B
+ 1, // 0x0C
+ 1, // 0x0D
+ 1, // 0x0E
+ 0, // 0x0F
+ 1, // 0x10
+ 1, // 0x11
+ 1, // 0x12
+ 1, // 0x13
+ 0, // 0x14
+ 0, // 0x15
+ 0, // 0x16
+ 0, // 0x17
+ 0, // 0x18
+ 0, // 0x19
+ 0, // 0x1A
+ 0, // 0x1B
+ 0, // 0x1C
+ 0, // 0x1D
+ 0, // 0x1E
+ 0 // 0x1F
+ };
+
+
+ uint8_t pci_clock_valid_modules[0x20] = {
+ 1, // 0x00
+ 0, // 0x01
+ 0, // 0x02
+ 0, // 0x03
+ 0, // 0x04
+ 0, // 0x05
+ 1, // 0x06
+ 1, // 0x07
+ 0, // 0x08
+ 0, // 0x09
+ 0, // 0x0A
+ 0, // 0x0B
+ 0, // 0x0C
+ 0, // 0x0D
+ 1, // 0x0E
+ 0, // 0x0F
+ 0, // 0x10
+ 0, // 0x11
+ 0, // 0x12
+ 0, // 0x13
+ 0, // 0x14
+ 0, // 0x15
+ 0, // 0x16
+ 0, // 0x17
+ 0, // 0x18
+ 0, // 0x19
+ 0, // 0x1A
+ 0, // 0x1B
+ 0, // 0x1C
+ 0, // 0x1D
+ 0, // 0x1E
+ 0 // 0x1F
+ };
+
+
+ uint8_t xgm_clock_valid_modules[0x20] = {
+ 1, // 0x00
+ 0, // 0x01
+ 0, // 0x02
+ 1, // 0x03
+ 0, // 0x04
+ 0, // 0x05
+ 0, // 0x06
+ 0, // 0x07
+ 1, // 0x08
+ 1, // 0x09
+ 0, // 0x0A
+ 0, // 0x0B
+ 1, // 0x0C
+ 1, // 0x0D
+ 1, // 0x0E
+ 0, // 0x0F
+ 1, // 0x10
+ 1, // 0x11
+ 0, // 0x12
+ 0, // 0x13
+ 0, // 0x14
+ 0, // 0x15
+ 0, // 0x16
+ 0, // 0x17
+ 0, // 0x18
+ 0, // 0x19
+ 0, // 0x1A
+ 0, // 0x1B
+ 0, // 0x1C
+ 0, // 0x1D
+ 0, // 0x1E
+ 0 // 0x1F
+ };
+
+ uint8_t fc_clock_valid_modules[0x20] = {
+ 1, // 0x00
+ 0, // 0x01
+ 0, // 0x02
+ 0, // 0x03
+ 0, // 0x04
+ 0, // 0x05
+ 0, // 0x06
+ 0, // 0x07
+ 0, // 0x08
+ 0, // 0x09
+ 0, // 0x0A
+ 0, // 0x0B
+ 1, // 0x0C
+ 1, // 0x0D
+ 0, // 0x0E
+ 0, // 0x0F
+ 0, // 0x10
+ 0, // 0x11
+ 0, // 0x12
+ 0, // 0x13
+ 0, // 0x14
+ 0, // 0x15
+ 0, // 0x16
+ 0, // 0x17
+ 0, // 0x18
+ 0, // 0x19
+ 0, // 0x1A
+ 0, // 0x1B
+ 0, // 0x1C
+ 0, // 0x1D
+ 0, // 0x1E
+ 0 // 0x1F
+ };
+
+ qls_wr_mpi_reg(ha, 0x100e, 0x18a20000);
+
+ buf = qls_get_probe(ha, SYS_CLOCK, sys_clock_valid_modules, buf);
+
+ buf = qls_get_probe(ha, PCI_CLOCK, pci_clock_valid_modules, buf);
+
+ buf = qls_get_probe(ha, XGM_CLOCK, xgm_clock_valid_modules, buf);
+
+ buf = qls_get_probe(ha, FC_CLOCK, fc_clock_valid_modules, buf);
+
+ return(0);
+}
+
+static void
+qls_get_ridx_registers(qla_host_t *ha, uint32_t *buf)
+{
+ uint32_t type, idx, idx_max;
+ uint32_t r_idx;
+ uint32_t r_data;
+ uint32_t val;
+
+ for (type = 0; type < 4; type ++) {
+ if (type < 2)
+ idx_max = 8;
+ else
+ idx_max = 16;
+
+ for (idx = 0; idx < idx_max; idx ++) {
+
+ val = 0x04000000 | (type << 16) | (idx << 8);
+ WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, val);
+
+ r_idx = 0;
+ while ((r_idx & 0x40000000) == 0)
+ r_idx = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
+
+ r_data = READ_REG32(ha, Q81_CTL_ROUTING_DATA);
+
+ *buf = type;
+ buf ++;
+ *buf = idx;
+ buf ++;
+ *buf = r_idx;
+ buf ++;
+ *buf = r_data;
+ buf ++;
+ }
+ }
+}
+
+static void
+qls_get_mac_proto_regs(qla_host_t *ha, uint32_t* buf)
+{
+
+#define Q81_RS_AND_ADR 0x06000000
+#define Q81_RS_ONLY 0x04000000
+#define Q81_NUM_TYPES 10
+
+ uint32_t result_index, result_data;
+ uint32_t type;
+ uint32_t index;
+ uint32_t offset;
+ uint32_t val;
+ uint32_t initial_val;
+ uint32_t max_index;
+ uint32_t max_offset;
+
+ for (type = 0; type < Q81_NUM_TYPES; type ++) {
+ switch (type) {
+
+ case 0: // CAM
+ initial_val = Q81_RS_AND_ADR;
+ max_index = 512;
+ max_offset = 3;
+ break;
+
+ case 1: // Multicast MAC Address
+ initial_val = Q81_RS_ONLY;
+ max_index = 32;
+ max_offset = 2;
+ break;
+
+ case 2: // VLAN filter mask
+ case 3: // MC filter mask
+ initial_val = Q81_RS_ONLY;
+ max_index = 4096;
+ max_offset = 1;
+ break;
+
+ case 4: // FC MAC addresses
+ initial_val = Q81_RS_ONLY;
+ max_index = 4;
+ max_offset = 2;
+ break;
+
+ case 5: // Mgmt MAC addresses
+ initial_val = Q81_RS_ONLY;
+ max_index = 8;
+ max_offset = 2;
+ break;
+
+ case 6: // Mgmt VLAN addresses
+ initial_val = Q81_RS_ONLY;
+ max_index = 16;
+ max_offset = 1;
+ break;
+
+ case 7: // Mgmt IPv4 address
+ initial_val = Q81_RS_ONLY;
+ max_index = 4;
+ max_offset = 1;
+ break;
+
+ case 8: // Mgmt IPv6 address
+ initial_val = Q81_RS_ONLY;
+ max_index = 4;
+ max_offset = 4;
+ break;
+
+ case 9: // Mgmt TCP/UDP Dest port
+ initial_val = Q81_RS_ONLY;
+ max_index = 4;
+ max_offset = 1;
+ break;
+
+ default:
+ printf("Bad type!!! 0x%08x\n", type);
+ max_index = 0;
+ max_offset = 0;
+ break;
+ }
+
+ for (index = 0; index < max_index; index ++) {
+
+ for (offset = 0; offset < max_offset; offset ++) {
+
+ val = initial_val | (type << 16) |
+ (index << 4) | (offset);
+
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX,\
+ val);
+
+ result_index = 0;
+
+ while ((result_index & 0x40000000) == 0)
+ result_index =
+ READ_REG32(ha, \
+ Q81_CTL_MAC_PROTO_ADDR_INDEX);
+
+ result_data = READ_REG32(ha,\
+ Q81_CTL_MAC_PROTO_ADDR_DATA);
+
+ *buf = result_index;
+ buf ++;
+
+ *buf = result_data;
+ buf ++;
+ }
+ }
+ }
+}
+
+static int
+qls_get_ets_regs(qla_host_t *ha, uint32_t *buf)
+{
+ int ret = 0;
+ int i;
+
+ for(i = 0; i < 8; i ++, buf ++) {
+ WRITE_REG32(ha, Q81_CTL_NIC_ENH_TX_SCHD, \
+ ((i << 29) | 0x08000000));
+ *buf = READ_REG32(ha, Q81_CTL_NIC_ENH_TX_SCHD);
+ }
+
+ for(i = 0; i < 2; i ++, buf ++) {
+ WRITE_REG32(ha, Q81_CTL_CNA_ENH_TX_SCHD, \
+ ((i << 29) | 0x08000000));
+ *buf = READ_REG32(ha, Q81_CTL_CNA_ENH_TX_SCHD);
+ }
+
+ return ret;
+}
+
+int
+qls_mpi_core_dump(qla_host_t *ha)
+{
+ int ret;
+ int i;
+ uint32_t reg, reg_val;
+
+ qls_mpi_coredump_t *mpi_dump = &ql_mpi_coredump;
+
+ ret = qls_pause_mpi_risc(ha);
+ if (ret) {
+ printf("Failed RISC pause. Status = 0x%.08x\n",ret);
+ return(-1);
+ }
+
+ memset(&(mpi_dump->mpi_global_header), 0,
+ sizeof(qls_mpid_glbl_hdr_t));
+
+ mpi_dump->mpi_global_header.cookie = Q81_MPID_COOKIE;
+ mpi_dump->mpi_global_header.hdr_size =
+ sizeof(qls_mpid_glbl_hdr_t);
+ mpi_dump->mpi_global_header.img_size =
+ sizeof(qls_mpi_coredump_t);
+
+ memcpy(mpi_dump->mpi_global_header.id, "MPI Coredump",
+ sizeof(mpi_dump->mpi_global_header.id));
+
+ qls_mpid_seg_hdr(&mpi_dump->nic1_regs_seg_hdr,
+ Q81_NIC1_CONTROL_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->nic1_regs)),
+ "NIC1 Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->nic2_regs_seg_hdr,
+ Q81_NIC2_CONTROL_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->nic2_regs)),
+ "NIC2 Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xgmac1_seg_hdr,
+ Q81_NIC1_XGMAC_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->xgmac1)),
+ "NIC1 XGMac Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xgmac2_seg_hdr,
+ Q81_NIC2_XGMAC_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->xgmac2)),
+ "NIC2 XGMac Registers");
+
+ if (ha->pci_func & 1) {
+ for (i = 0; i < 64; i++)
+ mpi_dump->nic2_regs[i] =
+ READ_REG32(ha, i * sizeof(uint32_t));
+
+ for (i = 0; i < 64; i++)
+ mpi_dump->nic1_regs[i] =
+ qls_rd_ofunc_reg(ha,
+ (i * sizeof(uint32_t)) / 4);
+
+ qls_rd_xgmac_regs(ha, &mpi_dump->xgmac2[0], 0);
+ qls_rd_xgmac_regs(ha, &mpi_dump->xgmac1[0], 1);
+ } else {
+ for (i = 0; i < 64; i++)
+ mpi_dump->nic1_regs[i] =
+ READ_REG32(ha, i * sizeof(uint32_t));
+
+ for (i = 0; i < 64; i++)
+ mpi_dump->nic2_regs[i] =
+ qls_rd_ofunc_reg(ha,
+ (i * sizeof(uint32_t)) / 4);
+
+ qls_rd_xgmac_regs(ha, &mpi_dump->xgmac1[0], 0);
+ qls_rd_xgmac_regs(ha, &mpi_dump->xgmac2[0], 1);
+ }
+
+
+ qls_mpid_seg_hdr(&mpi_dump->xaui1_an_hdr,
+ Q81_XAUI1_AN_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes1_xaui_an)),
+ "XAUI1 AN Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xaui1_hss_pcs_hdr,
+ Q81_XAUI1_HSS_PCS_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes1_xaui_hss_pcs)),
+ "XAUI1 HSS PCS Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi1_an_hdr,
+ Q81_XFI1_AN_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes1_xfi_an)),
+ "XFI1 AN Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi1_train_hdr,
+ Q81_XFI1_TRAIN_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes1_xfi_train)),
+ "XFI1 TRAIN Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi1_hss_pcs_hdr,
+ Q81_XFI1_HSS_PCS_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes1_xfi_hss_pcs)),
+ "XFI1 HSS PCS Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi1_hss_tx_hdr,
+ Q81_XFI1_HSS_TX_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes1_xfi_hss_tx)),
+ "XFI1 HSS TX Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi1_hss_rx_hdr,
+ Q81_XFI1_HSS_RX_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes1_xfi_hss_rx)),
+ "XFI1 HSS RX Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi1_hss_pll_hdr,
+ Q81_XFI1_HSS_PLL_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes1_xfi_hss_pll)),
+ "XFI1 HSS PLL Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xaui2_an_hdr,
+ Q81_XAUI2_AN_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes2_xaui_an)),
+ "XAUI2 AN Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xaui2_hss_pcs_hdr,
+ Q81_XAUI2_HSS_PCS_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes2_xaui_hss_pcs)),
+ "XAUI2 HSS PCS Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi2_an_hdr,
+ Q81_XFI2_AN_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->serdes2_xfi_an)),
+ "XFI2 AN Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi2_train_hdr,
+ Q81_XFI2_TRAIN_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes2_xfi_train)),
+ "XFI2 TRAIN Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi2_hss_pcs_hdr,
+ Q81_XFI2_HSS_PCS_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes2_xfi_hss_pcs)),
+ "XFI2 HSS PCS Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi2_hss_tx_hdr,
+ Q81_XFI2_HSS_TX_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes2_xfi_hss_tx)),
+ "XFI2 HSS TX Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi2_hss_rx_hdr,
+ Q81_XFI2_HSS_RX_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes2_xfi_hss_rx)),
+ "XFI2 HSS RX Registers");
+
+ qls_mpid_seg_hdr(&mpi_dump->xfi2_hss_pll_hdr,
+ Q81_XFI2_HSS_PLL_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->serdes2_xfi_hss_pll)),
+ "XFI2 HSS PLL Registers");
+
+ qls_rd_serdes_regs(ha, mpi_dump);
+
+ qls_mpid_seg_hdr(&mpi_dump->core_regs_seg_hdr,
+ Q81_CORE_SEG_NUM,
+ (sizeof(mpi_dump->core_regs_seg_hdr) +
+ sizeof(mpi_dump->mpi_core_regs) +
+ sizeof(mpi_dump->mpi_core_sh_regs)),
+ "Core Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->mpi_core_regs[0],
+ Q81_MPI_CORE_REGS_ADDR, Q81_MPI_CORE_REGS_CNT);
+
+ ret = qls_get_mpi_shadow_regs(ha,
+ &mpi_dump->mpi_core_sh_regs[0]);
+
+ qls_mpid_seg_hdr(&mpi_dump->test_logic_regs_seg_hdr,
+ Q81_TEST_LOGIC_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->test_logic_regs)),
+ "Test Logic Regs");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->test_logic_regs[0],
+ Q81_TEST_REGS_ADDR, Q81_TEST_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->rmii_regs_seg_hdr,
+ Q81_RMII_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->rmii_regs)),
+ "RMII Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->rmii_regs[0],
+ Q81_RMII_REGS_ADDR, Q81_RMII_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->fcmac1_regs_seg_hdr,
+ Q81_FCMAC1_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->fcmac1_regs)),
+ "FCMAC1 Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->fcmac1_regs[0],
+ Q81_FCMAC1_REGS_ADDR, Q81_FCMAC_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->fcmac2_regs_seg_hdr,
+ Q81_FCMAC2_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->fcmac2_regs)),
+ "FCMAC2 Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->fcmac2_regs[0],
+ Q81_FCMAC2_REGS_ADDR, Q81_FCMAC_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->fc1_mbx_regs_seg_hdr,
+ Q81_FC1_MBOX_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->fc1_mbx_regs)),
+ "FC1 MBox Regs");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->fc1_mbx_regs[0],
+ Q81_FC1_MBX_REGS_ADDR, Q81_FC_MBX_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->ide_regs_seg_hdr,
+ Q81_IDE_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->ide_regs)),
+ "IDE Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->ide_regs[0],
+ Q81_IDE_REGS_ADDR, Q81_IDE_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->nic1_mbx_regs_seg_hdr,
+ Q81_NIC1_MBOX_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->nic1_mbx_regs)),
+ "NIC1 MBox Regs");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->nic1_mbx_regs[0],
+ Q81_NIC1_MBX_REGS_ADDR, Q81_NIC_MBX_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->smbus_regs_seg_hdr,
+ Q81_SMBUS_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->smbus_regs)),
+ "SMBus Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->smbus_regs[0],
+ Q81_SMBUS_REGS_ADDR, Q81_SMBUS_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->fc2_mbx_regs_seg_hdr,
+ Q81_FC2_MBOX_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->fc2_mbx_regs)),
+ "FC2 MBox Regs");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->fc2_mbx_regs[0],
+ Q81_FC2_MBX_REGS_ADDR, Q81_FC_MBX_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->nic2_mbx_regs_seg_hdr,
+ Q81_NIC2_MBOX_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->nic2_mbx_regs)),
+ "NIC2 MBox Regs");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->nic2_mbx_regs[0],
+ Q81_NIC2_MBX_REGS_ADDR, Q81_NIC_MBX_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->i2c_regs_seg_hdr,
+ Q81_I2C_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) +
+ sizeof(mpi_dump->i2c_regs)),
+ "I2C Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->i2c_regs[0],
+ Q81_I2C_REGS_ADDR, Q81_I2C_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->memc_regs_seg_hdr,
+ Q81_MEMC_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->memc_regs)),
+ "MEMC Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->memc_regs[0],
+ Q81_MEMC_REGS_ADDR, Q81_MEMC_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->pbus_regs_seg_hdr,
+ Q81_PBUS_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->pbus_regs)),
+ "PBUS Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->pbus_regs[0],
+ Q81_PBUS_REGS_ADDR, Q81_PBUS_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->mde_regs_seg_hdr,
+ Q81_MDE_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->mde_regs)),
+ "MDE Registers");
+
+ ret = qls_get_mpi_regs(ha, &mpi_dump->mde_regs[0],
+ Q81_MDE_REGS_ADDR, Q81_MDE_REGS_CNT);
+
+ qls_mpid_seg_hdr(&mpi_dump->intr_states_seg_hdr,
+ Q81_INTR_STATES_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->intr_states)),
+ "INTR States");
+
+ qls_get_intr_states(ha, &mpi_dump->intr_states[0]);
+
+ qls_mpid_seg_hdr(&mpi_dump->probe_dump_seg_hdr,
+ Q81_PROBE_DUMP_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->probe_dump)),
+ "Probe Dump");
+
+ qls_get_probe_dump(ha, &mpi_dump->probe_dump[0]);
+
+ qls_mpid_seg_hdr(&mpi_dump->routing_reg_seg_hdr,
+ Q81_ROUTING_INDEX_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->routing_regs)),
+ "Routing Regs");
+
+ qls_get_ridx_registers(ha, &mpi_dump->routing_regs[0]);
+
+ qls_mpid_seg_hdr(&mpi_dump->mac_prot_reg_seg_hdr,
+ Q81_MAC_PROTOCOL_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->mac_prot_regs)),
+ "MAC Prot Regs");
+
+ qls_get_mac_proto_regs(ha, &mpi_dump->mac_prot_regs[0]);
+
+ qls_mpid_seg_hdr(&mpi_dump->ets_seg_hdr,
+ Q81_ETS_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->ets)),
+ "ETS Registers");
+
+ ret = qls_get_ets_regs(ha, &mpi_dump->ets[0]);
+
+ qls_mpid_seg_hdr(&mpi_dump->sem_regs_seg_hdr,
+ Q81_SEM_REGS_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->sem_regs)),
+ "Sem Registers");
+
+ for(i = 0; i < Q81_MAX_SEMAPHORE_FUNCTIONS ; i ++) {
+
+ reg = Q81_CTL_PROC_ADDR_REG_BLOCK | (i << Q81_FUNCTION_SHIFT) |
+ (Q81_CTL_SEMAPHORE >> 2);
+
+ ret = qls_mpi_risc_rd_reg(ha, reg, &reg_val);
+ mpi_dump->sem_regs[i] = reg_val;
+
+ if (ret != 0)
+ mpi_dump->sem_regs[i] = Q81_BAD_DATA;
+ }
+
+ ret = qls_unpause_mpi_risc(ha);
+ if (ret)
+ printf("Failed RISC unpause. Status = 0x%.08x\n",ret);
+
+ ret = qls_mpi_reset(ha);
+ if (ret)
+ printf("Failed RISC reset. Status = 0x%.08x\n",ret);
+
+ WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, 0x80008000);
+
+ qls_mpid_seg_hdr(&mpi_dump->memc_ram_seg_hdr,
+ Q81_MEMC_RAM_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->memc_ram)),
+ "MEMC RAM");
+
+ ret = qls_mbx_dump_risc_ram(ha, &mpi_dump->memc_ram[0],
+ Q81_MEMC_RAM_ADDR, Q81_MEMC_RAM_CNT);
+ if (ret)
+ printf("Failed Dump of MEMC RAM. Status = 0x%.08x\n",ret);
+
+ qls_mpid_seg_hdr(&mpi_dump->code_ram_seg_hdr,
+ Q81_WCS_RAM_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->code_ram)),
+ "WCS RAM");
+
+ ret = qls_mbx_dump_risc_ram(ha, &mpi_dump->memc_ram[0],
+ Q81_CODE_RAM_ADDR, Q81_CODE_RAM_CNT);
+ if (ret)
+ printf("Failed Dump of CODE RAM. Status = 0x%.08x\n",ret);
+
+ qls_mpid_seg_hdr(&mpi_dump->wqc1_seg_hdr,
+ Q81_WQC1_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->wqc1)),
+ "WQC 1");
+
+ qls_mpid_seg_hdr(&mpi_dump->wqc2_seg_hdr,
+ Q81_WQC2_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->wqc2)),
+ "WQC 2");
+
+ qls_mpid_seg_hdr(&mpi_dump->cqc1_seg_hdr,
+ Q81_CQC1_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->cqc1)),
+ "CQC 1");
+
+ qls_mpid_seg_hdr(&mpi_dump->cqc2_seg_hdr,
+ Q81_CQC2_SEG_NUM,
+ (sizeof(qls_mpid_seg_hdr_t) + sizeof(mpi_dump->cqc2)),
+ "CQC 2");
+
+ return 0;
+}
+
diff --git a/sys/dev/qlxge/qls_dump.h b/sys/dev/qlxge/qls_dump.h
new file mode 100644
index 000000000000..9ae3b55e1a48
--- /dev/null
+++ b/sys/dev/qlxge/qls_dump.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * File: qls_dump.h
+ */
+
+#ifndef _QLS_DUMP_H_
+#define _QLS_DUMP_H_
+
+#define Q81_MPID_COOKIE 0x5555aaaa
+
+typedef struct qls_mpid_glbl_hdr
+{
+ uint32_t cookie;
+ uint8_t id[16];
+ uint32_t time_lo;
+ uint32_t time_hi;
+ uint32_t img_size;
+ uint32_t hdr_size;
+ uint8_t info[220];
+} qls_mpid_glbl_hdr_t;
+
+typedef struct qls_mpid_seg_hdr
+{
+ uint32_t cookie;
+ uint32_t seg_num;
+ uint32_t seg_size;
+ uint32_t extra;
+ uint8_t desc[16];
+} qls_mpid_seg_hdr_t;
+
+enum
+{
+ Q81_MPI_CORE_REGS_ADDR = 0x00030000,
+ Q81_MPI_CORE_REGS_CNT = 127,
+ Q81_MPI_CORE_SH_REGS_CNT = 16,
+ Q81_TEST_REGS_ADDR = 0x00001000,
+ Q81_TEST_REGS_CNT = 23,
+ Q81_RMII_REGS_ADDR = 0x00001040,
+ Q81_RMII_REGS_CNT = 64,
+ Q81_FCMAC1_REGS_ADDR = 0x00001080,
+ Q81_FCMAC2_REGS_ADDR = 0x000010c0,
+ Q81_FCMAC_REGS_CNT = 64,
+ Q81_FC1_MBX_REGS_ADDR = 0x00001100,
+ Q81_FC2_MBX_REGS_ADDR = 0x00001240,
+ Q81_FC_MBX_REGS_CNT = 64,
+ Q81_IDE_REGS_ADDR = 0x00001140,
+ Q81_IDE_REGS_CNT = 64,
+ Q81_NIC1_MBX_REGS_ADDR = 0x00001180,
+ Q81_NIC2_MBX_REGS_ADDR = 0x00001280,
+ Q81_NIC_MBX_REGS_CNT = 64,
+ Q81_SMBUS_REGS_ADDR = 0x00001200,
+ Q81_SMBUS_REGS_CNT = 64,
+ Q81_I2C_REGS_ADDR = 0x00001fc0,
+ Q81_I2C_REGS_CNT = 64,
+ Q81_MEMC_REGS_ADDR = 0x00003000,
+ Q81_MEMC_REGS_CNT = 256,
+ Q81_PBUS_REGS_ADDR = 0x00007c00,
+ Q81_PBUS_REGS_CNT = 256,
+ Q81_MDE_REGS_ADDR = 0x00010000,
+ Q81_MDE_REGS_CNT = 6,
+ Q81_CODE_RAM_ADDR = 0x00020000,
+ Q81_CODE_RAM_CNT = 0x2000,
+ Q81_MEMC_RAM_ADDR = 0x00100000,
+ Q81_MEMC_RAM_CNT = 0x2000,
+ Q81_XGMAC_REGISTER_END = 0x740,
+};
+
+#define Q81_PROBE_DATA_LENGTH_WORDS ((64*2) + 1)
+#define Q81_NUMBER_OF_PROBES 34
+
+#define Q81_PROBE_SIZE \
+ (Q81_PROBE_DATA_LENGTH_WORDS * Q81_NUMBER_OF_PROBES)
+
+#define Q81_NUMBER_ROUTING_REG_ENTRIES 48
+#define Q81_WORDS_PER_ROUTING_REG_ENTRY 4
+
+#define Q81_ROUT_REG_SIZE \
+ (Q81_NUMBER_ROUTING_REG_ENTRIES * Q81_WORDS_PER_ROUTING_REG_ENTRY)
+
+#define Q81_MAC_PROTOCOL_REGISTER_WORDS ((512 * 3) + (32 * 2) + (4096 * 1) +\
+ (4096 * 1) + (4 * 2) +\
+ (8 * 2) + (16 * 1) +\
+ (4 * 1) + (4 * 4) + (4 * 1))
+
+#define Q81_WORDS_PER_MAC_PROT_ENTRY 2
+#define Q81_MAC_REG_SIZE \
+ (Q81_MAC_PROTOCOL_REGISTER_WORDS * Q81_WORDS_PER_MAC_PROT_ENTRY)
+
+#define Q81_MAX_SEMAPHORE_FUNCTIONS 5
+
+#define Q81_WQC_WORD_SIZE 6
+#define Q81_NUMBER_OF_WQCS 128
+#define Q81_WQ_SIZE (Q81_WQC_WORD_SIZE * Q81_NUMBER_OF_WQCS)
+
+#define Q81_CQC_WORD_SIZE 13
+#define Q81_NUMBER_OF_CQCS 128
+#define Q81_CQ_SIZE (Q81_CQC_WORD_SIZE * Q81_NUMBER_OF_CQCS)
+
+struct qls_mpi_coredump {
+ qls_mpid_glbl_hdr_t mpi_global_header;
+
+ qls_mpid_seg_hdr_t core_regs_seg_hdr;
+ uint32_t mpi_core_regs[Q81_MPI_CORE_REGS_CNT];
+ uint32_t mpi_core_sh_regs[Q81_MPI_CORE_SH_REGS_CNT];
+
+ qls_mpid_seg_hdr_t test_logic_regs_seg_hdr;
+ uint32_t test_logic_regs[Q81_TEST_REGS_CNT];
+
+ qls_mpid_seg_hdr_t rmii_regs_seg_hdr;
+ uint32_t rmii_regs[Q81_RMII_REGS_CNT];
+
+ qls_mpid_seg_hdr_t fcmac1_regs_seg_hdr;
+ uint32_t fcmac1_regs[Q81_FCMAC_REGS_CNT];
+
+ qls_mpid_seg_hdr_t fcmac2_regs_seg_hdr;
+ uint32_t fcmac2_regs[Q81_FCMAC_REGS_CNT];
+
+ qls_mpid_seg_hdr_t fc1_mbx_regs_seg_hdr;
+ uint32_t fc1_mbx_regs[Q81_FC_MBX_REGS_CNT];
+
+ qls_mpid_seg_hdr_t ide_regs_seg_hdr;
+ uint32_t ide_regs[Q81_IDE_REGS_CNT];
+
+ qls_mpid_seg_hdr_t nic1_mbx_regs_seg_hdr;
+ uint32_t nic1_mbx_regs[Q81_NIC_MBX_REGS_CNT];
+
+ qls_mpid_seg_hdr_t smbus_regs_seg_hdr;
+ uint32_t smbus_regs[Q81_SMBUS_REGS_CNT];
+
+ qls_mpid_seg_hdr_t fc2_mbx_regs_seg_hdr;
+ uint32_t fc2_mbx_regs[Q81_FC_MBX_REGS_CNT];
+
+ qls_mpid_seg_hdr_t nic2_mbx_regs_seg_hdr;
+ uint32_t nic2_mbx_regs[Q81_NIC_MBX_REGS_CNT];
+
+ qls_mpid_seg_hdr_t i2c_regs_seg_hdr;
+ uint32_t i2c_regs[Q81_I2C_REGS_CNT];
+
+ qls_mpid_seg_hdr_t memc_regs_seg_hdr;
+ uint32_t memc_regs[Q81_MEMC_REGS_CNT];
+
+ qls_mpid_seg_hdr_t pbus_regs_seg_hdr;
+ uint32_t pbus_regs[Q81_PBUS_REGS_CNT];
+
+ qls_mpid_seg_hdr_t mde_regs_seg_hdr;
+ uint32_t mde_regs[Q81_MDE_REGS_CNT];
+
+ qls_mpid_seg_hdr_t xaui1_an_hdr;
+ uint32_t serdes1_xaui_an[14];
+
+ qls_mpid_seg_hdr_t xaui1_hss_pcs_hdr;
+ uint32_t serdes1_xaui_hss_pcs[33];
+
+ qls_mpid_seg_hdr_t xfi1_an_hdr;
+ uint32_t serdes1_xfi_an[14];
+
+ qls_mpid_seg_hdr_t xfi1_train_hdr;
+ uint32_t serdes1_xfi_train[12];
+
+ qls_mpid_seg_hdr_t xfi1_hss_pcs_hdr;
+ uint32_t serdes1_xfi_hss_pcs[15];
+
+ qls_mpid_seg_hdr_t xfi1_hss_tx_hdr;
+ uint32_t serdes1_xfi_hss_tx[32];
+
+ qls_mpid_seg_hdr_t xfi1_hss_rx_hdr;
+ uint32_t serdes1_xfi_hss_rx[32];
+
+ qls_mpid_seg_hdr_t xfi1_hss_pll_hdr;
+ uint32_t serdes1_xfi_hss_pll[32];
+
+ qls_mpid_seg_hdr_t xaui2_an_hdr;
+ uint32_t serdes2_xaui_an[14];
+
+ qls_mpid_seg_hdr_t xaui2_hss_pcs_hdr;
+ uint32_t serdes2_xaui_hss_pcs[33];
+
+ qls_mpid_seg_hdr_t xfi2_an_hdr;
+ uint32_t serdes2_xfi_an[14];
+
+ qls_mpid_seg_hdr_t xfi2_train_hdr;
+ uint32_t serdes2_xfi_train[12];
+
+ qls_mpid_seg_hdr_t xfi2_hss_pcs_hdr;
+ uint32_t serdes2_xfi_hss_pcs[15];
+
+ qls_mpid_seg_hdr_t xfi2_hss_tx_hdr;
+ uint32_t serdes2_xfi_hss_tx[32];
+
+ qls_mpid_seg_hdr_t xfi2_hss_rx_hdr;
+ uint32_t serdes2_xfi_hss_rx[32];
+
+ qls_mpid_seg_hdr_t xfi2_hss_pll_hdr;
+ uint32_t serdes2_xfi_hss_pll[32];
+
+ qls_mpid_seg_hdr_t nic1_regs_seg_hdr;
+ uint32_t nic1_regs[64];
+
+ qls_mpid_seg_hdr_t nic2_regs_seg_hdr;
+ uint32_t nic2_regs[64];
+
+ qls_mpid_seg_hdr_t intr_states_seg_hdr;
+ uint32_t intr_states[MAX_RX_RINGS];
+
+ qls_mpid_seg_hdr_t xgmac1_seg_hdr;
+ uint32_t xgmac1[Q81_XGMAC_REGISTER_END];
+
+ qls_mpid_seg_hdr_t xgmac2_seg_hdr;
+ uint32_t xgmac2[Q81_XGMAC_REGISTER_END];
+
+ qls_mpid_seg_hdr_t probe_dump_seg_hdr;
+ uint32_t probe_dump[Q81_PROBE_SIZE];
+
+ qls_mpid_seg_hdr_t routing_reg_seg_hdr;
+ uint32_t routing_regs[Q81_ROUT_REG_SIZE];
+
+ qls_mpid_seg_hdr_t mac_prot_reg_seg_hdr;
+ uint32_t mac_prot_regs[Q81_MAC_REG_SIZE];
+
+ qls_mpid_seg_hdr_t sem_regs_seg_hdr;
+ uint32_t sem_regs[Q81_MAX_SEMAPHORE_FUNCTIONS];
+
+ qls_mpid_seg_hdr_t ets_seg_hdr;
+ uint32_t ets[8+2];
+
+ qls_mpid_seg_hdr_t wqc1_seg_hdr;
+ uint32_t wqc1[Q81_WQ_SIZE];
+
+ qls_mpid_seg_hdr_t cqc1_seg_hdr;
+ uint32_t cqc1[Q81_CQ_SIZE];
+
+ qls_mpid_seg_hdr_t wqc2_seg_hdr;
+ uint32_t wqc2[Q81_WQ_SIZE];
+
+ qls_mpid_seg_hdr_t cqc2_seg_hdr;
+ uint32_t cqc2[Q81_CQ_SIZE];
+
+ qls_mpid_seg_hdr_t code_ram_seg_hdr;
+ uint32_t code_ram[Q81_CODE_RAM_CNT];
+
+ qls_mpid_seg_hdr_t memc_ram_seg_hdr;
+ uint32_t memc_ram[Q81_MEMC_RAM_CNT];
+};
+typedef struct qls_mpi_coredump qls_mpi_coredump_t;
+
+#define Q81_BAD_DATA 0xDEADBEEF
+
+#endif /* #ifndef _QLS_DUMP_H_ */
+
diff --git a/sys/dev/qlxge/qls_glbl.h b/sys/dev/qlxge/qls_glbl.h
new file mode 100644
index 000000000000..b7527742c043
--- /dev/null
+++ b/sys/dev/qlxge/qls_glbl.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qls_glbl.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ * Content: Contains prototypes of the exported functions from each file.
+ */
+#ifndef _QLS_GLBL_H_
+#define _QLS_GLBL_H_
+
+/*
+ * from qls_isr.c
+ */
+
+extern void qls_isr(void *arg);
+
+/*
+ * from qls_os.c
+ */
+
+extern int qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf);
+extern void qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf);
+extern int qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp);
+
+/*
+ * from qls_hw.c
+ */
+
+extern int qls_init_host_fw(qla_host_t *ha);
+extern int qls_get_msix_count(qla_host_t *ha);
+
+extern void qls_hw_add_sysctls(qla_host_t *ha);
+
+extern void qls_free_dma(qla_host_t *ha);
+extern int qls_alloc_dma(qla_host_t *ha);
+
+extern int qls_set_promisc(qla_host_t *ha);
+extern void qls_reset_promisc(qla_host_t *ha);
+extern int qls_set_allmulti(qla_host_t *ha);
+extern void qls_reset_allmulti(qla_host_t *ha);
+
+extern int qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx);
+
+extern int qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
+ uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx);
+
+extern void qls_del_hw_if(qla_host_t *ha);
+extern int qls_init_hw_if(qla_host_t *ha);
+
+extern void qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
+ uint32_t add_multi);
+
+extern void qls_update_link_state(qla_host_t *ha);
+
+extern int qls_init_hw(qla_host_t *ha);
+
+extern int qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data);
+extern int qls_rd_nic_params(qla_host_t *ha);
+
+extern int qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data);
+extern int qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data);
+extern int qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data);
+extern int qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data);
+
+extern int qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
+ uint32_t r_size);
+
+extern int qls_mpi_reset(qla_host_t *ha);
+
+/*
+ * from qls_ioctl.c
+ */
+
+extern int qls_make_cdev(qla_host_t *ha);
+extern void qls_del_cdev(qla_host_t *ha);
+
+extern int qls_mpi_core_dump(qla_host_t *ha);
+
+#endif /* #ifndef_QLS_GLBL_H_ */
diff --git a/sys/dev/qlxge/qls_hw.c b/sys/dev/qlxge/qls_hw.c
new file mode 100644
index 000000000000..181a7b0eb94a
--- /dev/null
+++ b/sys/dev/qlxge/qls_hw.c
@@ -0,0 +1,2443 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qls_hw.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ * Content: Contains Hardware dependant functions
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+
+#include "qls_os.h"
+#include "qls_hw.h"
+#include "qls_def.h"
+#include "qls_inline.h"
+#include "qls_ver.h"
+#include "qls_glbl.h"
+#include "qls_dbg.h"
+
+/*
+ * Static Functions
+ */
+static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
+static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
+static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
+ uint32_t add_mac, uint32_t index);
+
+static int qls_init_rss(qla_host_t *ha);
+static int qls_init_comp_queue(qla_host_t *ha, int cid);
+static int qls_init_work_queue(qla_host_t *ha, int wid);
+static int qls_init_fw_routing_table(qla_host_t *ha);
+static int qls_hw_add_all_mcast(qla_host_t *ha);
+static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
+static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
+static int qls_wait_for_flash_ready(qla_host_t *ha);
+
+static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
+static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
+
+static void qls_free_tx_dma(qla_host_t *ha);
+static int qls_alloc_tx_dma(qla_host_t *ha);
+static void qls_free_rx_dma(qla_host_t *ha);
+static int qls_alloc_rx_dma(qla_host_t *ha);
+static void qls_free_mpi_dma(qla_host_t *ha);
+static int qls_alloc_mpi_dma(qla_host_t *ha);
+static void qls_free_rss_dma(qla_host_t *ha);
+static int qls_alloc_rss_dma(qla_host_t *ha);
+
+static int qls_flash_validate(qla_host_t *ha, const char *signature);
+
+
+static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
+static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
+ uint32_t reg, uint32_t *data);
+static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
+ uint32_t reg, uint32_t data);
+
+static int qls_hw_reset(qla_host_t *ha);
+
+/*
+ * MPI Related Functions
+ */
+static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
+ uint32_t *out_mbx, uint32_t o_count);
+static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
+static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
+static void qls_mbx_get_link_status(qla_host_t *ha);
+static void qls_mbx_about_fw(qla_host_t *ha);
+
+int
+qls_get_msix_count(qla_host_t *ha)
+{
+ return (ha->num_rx_rings);
+}
+
+static int
+qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
+{
+ int err = 0, ret;
+ qla_host_t *ha;
+
+ err = sysctl_handle_int(oidp, &ret, 0, req);
+
+ if (err || !req->newptr)
+ return (err);
+
+
+ if (ret == 1) {
+ ha = (qla_host_t *)arg1;
+ qls_mpi_core_dump(ha);
+ }
+ return (err);
+}
+
+static int
+qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
+{
+ int err = 0, ret;
+ qla_host_t *ha;
+
+ err = sysctl_handle_int(oidp, &ret, 0, req);
+
+ if (err || !req->newptr)
+ return (err);
+
+
+ if (ret == 1) {
+ ha = (qla_host_t *)arg1;
+ qls_mbx_get_link_status(ha);
+ qls_mbx_about_fw(ha);
+ }
+ return (err);
+}
+
+void
+qls_hw_add_sysctls(qla_host_t *ha)
+{
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
+ ha->num_rx_rings, "Number of Completion Queues");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
+ ha->num_tx_rings, "Number of Transmit Rings");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "mpi_dump", CTLTYPE_INT | CTLFLAG_RW,
+ (void *)ha, 0,
+ qls_syctl_mpi_dump, "I", "MPI Dump");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
+ (void *)ha, 0,
+ qls_syctl_link_status, "I", "Link Status");
+}
+
+/*
+ * Name: qls_free_dma
+ * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
+ */
+void
+qls_free_dma(qla_host_t *ha)
+{
+ qls_free_rss_dma(ha);
+ qls_free_mpi_dma(ha);
+ qls_free_tx_dma(ha);
+ qls_free_rx_dma(ha);
+ return;
+}
+
+/*
+ * Name: qls_alloc_dma
+ * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
+ */
+int
+qls_alloc_dma(qla_host_t *ha)
+{
+ if (qls_alloc_rx_dma(ha))
+ return (-1);
+
+ if (qls_alloc_tx_dma(ha)) {
+ qls_free_rx_dma(ha);
+ return (-1);
+ }
+
+ if (qls_alloc_mpi_dma(ha)) {
+ qls_free_tx_dma(ha);
+ qls_free_rx_dma(ha);
+ return (-1);
+ }
+
+ if (qls_alloc_rss_dma(ha)) {
+ qls_free_mpi_dma(ha);
+ qls_free_tx_dma(ha);
+ qls_free_rx_dma(ha);
+ return (-1);
+ }
+
+ return (0);
+}
+
+
+static int
+qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
+{
+ uint32_t data32;
+ uint32_t count = 3;
+
+ while (count--) {
+ data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
+
+ if (data32 & op)
+ return (0);
+
+ QLA_USEC_DELAY(100);
+ }
+ ha->qla_initiate_recovery = 1;
+ return (-1);
+}
+
+/*
+ * Name: qls_config_unicast_mac_addr
+ * Function: binds/unbinds a unicast MAC address to the interface.
+ */
+static int
+qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
+{
+ int ret = 0;
+ uint32_t mac_upper = 0;
+ uint32_t mac_lower = 0;
+ uint32_t value = 0, index;
+
+ if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
+ Q81_CTL_SEM_SET_MAC_SERDES)) {
+ QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
+ return(-1);
+ }
+
+ if (add_mac) {
+ mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
+ mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
+ (ha->mac_addr[4] << 8) | ha->mac_addr[5];
+ }
+ ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
+ if (ret)
+ goto qls_config_unicast_mac_addr_exit;
+
+ index = 128 * (ha->pci_func & 0x1); /* index */
+
+ value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
+ Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
+
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
+
+ ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
+ if (ret)
+ goto qls_config_unicast_mac_addr_exit;
+
+ value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
+ Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
+
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
+
+ ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
+ if (ret)
+ goto qls_config_unicast_mac_addr_exit;
+
+ value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
+ Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
+
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
+
+ value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
+ ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
+ (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
+
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
+
+qls_config_unicast_mac_addr_exit:
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
+ return (ret);
+}
+
+/*
+ * Name: qls_config_mcast_mac_addr
+ * Function: binds/unbinds a multicast MAC address to the interface.
+ */
+static int
+qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
+ uint32_t index)
+{
+ int ret = 0;
+ uint32_t mac_upper = 0;
+ uint32_t mac_lower = 0;
+ uint32_t value = 0;
+
+ if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
+ Q81_CTL_SEM_SET_MAC_SERDES)) {
+ QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
+ return(-1);
+ }
+
+ if (add_mac) {
+ mac_upper = (mac_addr[0] << 8) | mac_addr[1];
+ mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+ }
+ ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
+ if (ret)
+ goto qls_config_mcast_mac_addr_exit;
+
+ value = Q81_CTL_MAC_PROTO_AI_E |
+ (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
+ Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
+
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
+
+ ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
+ if (ret)
+ goto qls_config_mcast_mac_addr_exit;
+
+ value = Q81_CTL_MAC_PROTO_AI_E |
+ (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
+ Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
+
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
+ WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
+
+qls_config_mcast_mac_addr_exit:
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
+
+ return (ret);
+}
+
+/*
+ * Name: qls_set_mac_rcv_mode
+ * Function: Enable/Disable AllMulticast and Promiscous Modes.
+ */
+static int
+qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
+{
+ uint32_t data32;
+ uint32_t count = 3;
+
+ while (count--) {
+ data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
+
+ if (data32 & op)
+ return (0);
+
+ QLA_USEC_DELAY(100);
+ }
+ ha->qla_initiate_recovery = 1;
+ return (-1);
+}
+
+static int
+qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
+{
+ int ret = 0;
+
+ ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
+
+ if (ret) {
+ device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
+ __func__, index, data);
+ goto qls_load_route_idx_reg_exit;
+ }
+
+
+ WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
+ WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
+
+qls_load_route_idx_reg_exit:
+ return (ret);
+}
+
+static int
+qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
+{
+ int ret = 0;
+
+ if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
+ Q81_CTL_SEM_SET_RIDX_DATAREG)) {
+ QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
+ return(-1);
+ }
+
+ ret = qls_load_route_idx_reg(ha, index, data);
+
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
+
+ return (ret);
+}
+
+static int
+qls_clear_routing_table(qla_host_t *ha)
+{
+ int i, ret = 0;
+
+ if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
+ Q81_CTL_SEM_SET_RIDX_DATAREG)) {
+ QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
+ return(-1);
+ }
+
+ for (i = 0; i < 16; i++) {
+ ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
+ (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
+ if (ret)
+ break;
+ }
+
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
+
+ return (ret);
+}
+
+int
+qls_set_promisc(qla_host_t *ha)
+{
+ int ret;
+
+ ret = qls_load_route_idx_reg_locked(ha,
+ (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
+ Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
+ Q81_CTL_RD_VALID_PKT);
+ return (ret);
+}
+
+void
+qls_reset_promisc(qla_host_t *ha)
+{
+ int ret;
+
+ ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
+ Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
+ return;
+}
+
+int
+qls_set_allmulti(qla_host_t *ha)
+{
+ int ret;
+
+ ret = qls_load_route_idx_reg_locked(ha,
+ (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
+ Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
+ Q81_CTL_RD_MCAST);
+ return (ret);
+}
+
+void
+qls_reset_allmulti(qla_host_t *ha)
+{
+ int ret;
+
+ ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
+ Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
+ return;
+}
+
+
+static int
+qls_init_fw_routing_table(qla_host_t *ha)
+{
+ int ret = 0;
+
+ ret = qls_clear_routing_table(ha);
+ if (ret)
+ return (-1);
+
+ if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
+ Q81_CTL_SEM_SET_RIDX_DATAREG)) {
+ QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
+ return(-1);
+ }
+
+ ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
+ Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
+ Q81_CTL_RD_ERROR_PKT);
+ if (ret)
+ goto qls_init_fw_routing_table_exit;
+
+ ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
+ Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
+ Q81_CTL_RD_BCAST);
+ if (ret)
+ goto qls_init_fw_routing_table_exit;
+
+ if (ha->num_rx_rings > 1 ) {
+ ret = qls_load_route_idx_reg(ha,
+ (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
+ Q81_CTL_RI_TYPE_NICQMASK |
+ Q81_CTL_RI_IDX_RSS_MATCH),
+ Q81_CTL_RD_RSS_MATCH);
+ if (ret)
+ goto qls_init_fw_routing_table_exit;
+ }
+
+ ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
+ Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
+ Q81_CTL_RD_MCAST_REG_MATCH);
+ if (ret)
+ goto qls_init_fw_routing_table_exit;
+
+ ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
+ Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
+ Q81_CTL_RD_CAM_HIT);
+ if (ret)
+ goto qls_init_fw_routing_table_exit;
+
+qls_init_fw_routing_table_exit:
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
+ return (ret);
+}
+
+static int
+qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
+{
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *th;
+ uint32_t ehdrlen, ip_hlen;
+ int ret = 0;
+ uint16_t etype;
+ device_t dev;
+ uint8_t buf[sizeof(struct ip6_hdr)];
+
+ dev = ha->pci_dev;
+
+ eh = mtod(mp, struct ether_vlan_header *);
+
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ etype = ntohs(eh->evl_proto);
+ } else {
+ ehdrlen = ETHER_HDR_LEN;
+ etype = ntohs(eh->evl_encap_proto);
+ }
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+
+ ip_hlen = sizeof (struct ip);
+
+ if (mp->m_len < (ehdrlen + ip_hlen)) {
+ m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
+ ip = (struct ip *)buf;
+ }
+ tx_mac->opcode = Q81_IOCB_TX_TSO;
+ tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
+
+ tx_mac->phdr_offsets = ehdrlen;
+
+ tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
+ Q81_TX_TSO_PHDR_SHIFT);
+
+ ip->ip_sum = 0;
+
+ if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
+ tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
+
+ th = (struct tcphdr *)(ip + 1);
+
+ th->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr,
+ htons(IPPROTO_TCP));
+ tx_mac->mss = mp->m_pkthdr.tso_segsz;
+ tx_mac->phdr_length = ip_hlen + ehdrlen +
+ (th->th_off << 2);
+ break;
+ }
+ tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
+
+
+ if (ip->ip_p == IPPROTO_TCP) {
+ tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
+ } else if (ip->ip_p == IPPROTO_UDP) {
+ tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
+ }
+ break;
+
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+
+ ip_hlen = sizeof(struct ip6_hdr);
+
+ if (mp->m_len < (ehdrlen + ip_hlen)) {
+ m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
+ buf);
+ ip6 = (struct ip6_hdr *)buf;
+ }
+
+ tx_mac->opcode = Q81_IOCB_TX_TSO;
+ tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
+ tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
+
+ tx_mac->phdr_offsets = ehdrlen;
+ tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
+ Q81_TX_TSO_PHDR_SHIFT);
+
+ if (ip6->ip6_nxt == IPPROTO_TCP) {
+ tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
+ } else if (ip6->ip6_nxt == IPPROTO_UDP) {
+ tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
+ }
+ break;
+
+ default:
+ ret = -1;
+ break;
+ }
+
+ return (ret);
+}
+
+#define QLA_TX_MIN_FREE 2
+int
+qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
+{
+ uint32_t txr_done, txr_next;
+
+ txr_done = ha->tx_ring[txr_idx].txr_done;
+ txr_next = ha->tx_ring[txr_idx].txr_next;
+
+ if (txr_done == txr_next) {
+ ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
+ } else if (txr_done > txr_next) {
+ ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
+ } else {
+ ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
+ txr_done - txr_next;
+ }
+
+ if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * Name: qls_hw_send
+ * Function: Transmits a packet. It first checks if the packet is a
+ * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
+ * offload. If either of these creteria are not met, it is transmitted
+ * as a regular ethernet frame.
+ */
+int
+qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
+ uint32_t txr_next, struct mbuf *mp, uint32_t txr_idx)
+{
+ q81_tx_mac_t *tx_mac;
+ q81_txb_desc_t *tx_desc;
+ uint32_t total_length = 0;
+ uint32_t i;
+ device_t dev;
+ int ret = 0;
+
+ dev = ha->pci_dev;
+
+ total_length = mp->m_pkthdr.len;
+
+ if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
+ device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
+ __func__, total_length);
+ return (-1);
+ }
+
+ if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
+ if (qls_hw_tx_done(ha, txr_idx)) {
+ device_printf(dev, "%s: tx_free[%d] = %d\n",
+ __func__, txr_idx,
+ ha->tx_ring[txr_idx].txr_free);
+ return (-1);
+ }
+ }
+
+ tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
+
+ bzero(tx_mac, sizeof(q81_tx_mac_t));
+
+ if ((mp->m_pkthdr.csum_flags &
+ (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
+
+ ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
+ if (ret)
+ return (EINVAL);
+
+ if (mp->m_pkthdr.csum_flags & CSUM_TSO)
+ ha->tx_ring[txr_idx].tx_tso_frames++;
+ else
+ ha->tx_ring[txr_idx].tx_frames++;
+
+ } else {
+ tx_mac->opcode = Q81_IOCB_TX_MAC;
+ }
+
+ if (mp->m_flags & M_VLANTAG) {
+
+ tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
+ tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
+
+ ha->tx_ring[txr_idx].tx_vlan_frames++;
+ }
+
+ tx_mac->frame_length = total_length;
+
+ tx_mac->tid_lo = txr_next;
+
+ if (nsegs <= MAX_TX_MAC_DESC) {
+
+ QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
+ tx_mac->tid_lo));
+
+ for (i = 0; i < nsegs; i++) {
+ tx_mac->txd[i].baddr = segs->ds_addr;
+ tx_mac->txd[i].length = segs->ds_len;
+ segs++;
+ }
+ tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
+
+ } else {
+ QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
+ tx_mac->tid_lo));
+
+ tx_mac->txd[0].baddr =
+ ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
+ tx_mac->txd[0].length =
+ nsegs * (sizeof(q81_txb_desc_t));
+ tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
+
+ tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
+
+ for (i = 0; i < nsegs; i++) {
+ tx_desc->baddr = segs->ds_addr;
+ tx_desc->length = segs->ds_len;
+
+ if (i == (nsegs -1))
+ tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
+ else
+ tx_desc->flags = 0;
+
+ segs++;
+ tx_desc++;
+ }
+ }
+ txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
+ ha->tx_ring[txr_idx].txr_next = txr_next;
+
+ ha->tx_ring[txr_idx].txr_free--;
+
+ Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
+
+ return (0);
+}
+
+/*
+ * Name: qls_del_hw_if
+ * Function: Destroys the hardware specific entities corresponding to an
+ * Ethernet Interface
+ */
+void
+qls_del_hw_if(qla_host_t *ha)
+{
+ uint32_t value;
+ int i;
+ //int count;
+
+ if (ha->hw_init == 0) {
+ qls_hw_reset(ha);
+ return;
+ }
+
+ for (i = 0; i < ha->num_tx_rings; i++) {
+ Q81_SET_WQ_INVALID(i);
+ }
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ Q81_SET_CQ_INVALID(i);
+ }
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ Q81_DISABLE_INTR(ha, i); /* MSI-x i */
+ }
+
+ value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
+ WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
+
+ value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
+ WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
+ ha->flags.intr_enable = 0;
+
+ qls_hw_reset(ha);
+
+ return;
+}
+
+/*
+ * Name: qls_init_hw_if
+ * Function: Creates the hardware specific entities corresponding to an
+ * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
+ * corresponding to the interface. Enables LRO if allowed.
+ */
+int
+qls_init_hw_if(qla_host_t *ha)
+{
+ device_t dev;
+ uint32_t value;
+ int ret = 0;
+ int i;
+
+
+ QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
+
+ dev = ha->pci_dev;
+
+ ret = qls_hw_reset(ha);
+ if (ret)
+ goto qls_init_hw_if_exit;
+
+ ha->vm_pgsize = 4096;
+
+ /* Enable FAE and EFE bits in System Register */
+ value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
+ value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
+
+ WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
+
+ /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
+ value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
+ WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
+
+ /* Function Specific Control Register - Set Page Size and Enable NIC */
+ value = Q81_CTL_FUNC_SPECIFIC_FE |
+ Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
+ Q81_CTL_FUNC_SPECIFIC_EPC_O |
+ Q81_CTL_FUNC_SPECIFIC_EPC_I |
+ Q81_CTL_FUNC_SPECIFIC_EC;
+ value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) |
+ Q81_CTL_FUNC_SPECIFIC_FE |
+ Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
+ Q81_CTL_FUNC_SPECIFIC_EPC_O |
+ Q81_CTL_FUNC_SPECIFIC_EPC_I |
+ Q81_CTL_FUNC_SPECIFIC_EC;
+
+ WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
+
+ /* Interrupt Mask Register */
+ value = Q81_CTL_INTRM_PI;
+ value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
+
+ WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
+
+ /* Initialiatize Completion Queue */
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ ret = qls_init_comp_queue(ha, i);
+ if (ret)
+ goto qls_init_hw_if_exit;
+ }
+
+ if (ha->num_rx_rings > 1 ) {
+ ret = qls_init_rss(ha);
+ if (ret)
+ goto qls_init_hw_if_exit;
+ }
+
+ /* Initialize Work Queue */
+
+ for (i = 0; i < ha->num_tx_rings; i++) {
+ ret = qls_init_work_queue(ha, i);
+ if (ret)
+ goto qls_init_hw_if_exit;
+ }
+
+ if (ret)
+ goto qls_init_hw_if_exit;
+
+ /* Set up CAM RAM with MAC Address */
+ ret = qls_config_unicast_mac_addr(ha, 1);
+ if (ret)
+ goto qls_init_hw_if_exit;
+
+ ret = qls_hw_add_all_mcast(ha);
+ if (ret)
+ goto qls_init_hw_if_exit;
+
+ /* Initialize Firmware Routing Table */
+ ret = qls_init_fw_routing_table(ha);
+ if (ret)
+ goto qls_init_hw_if_exit;
+
+ /* Get Chip Revision ID */
+ ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
+
+ /* Enable Global Interrupt */
+ value = Q81_CTL_INTRE_EI;
+ value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
+
+ WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
+
+ /* Enable Interrupt Handshake Disable */
+ value = Q81_CTL_INTRE_IHD;
+ value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
+
+ WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
+
+ /* Enable Completion Interrupt */
+
+ ha->flags.intr_enable = 1;
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ Q81_ENABLE_INTR(ha, i); /* MSI-x i */
+ }
+
+ ha->hw_init = 1;
+
+ qls_mbx_get_link_status(ha);
+
+ QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
+ ha->rx_ring[0].cq_db_offset));
+ QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
+ ha->tx_ring[0].wq_db_offset));
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+
+ Q81_WR_CQ_CONS_IDX(i, 0);
+ Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
+ Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
+
+ QL_DPRINT2((dev, "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
+ "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
+ Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
+ Q81_RD_SBQ_IDX(i)));
+ }
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ Q81_SET_CQ_VALID(i);
+ }
+
+qls_init_hw_if_exit:
+ QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
+ return (ret);
+}
+
+static int
+qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
+{
+ uint32_t data32;
+ uint32_t count = 3;
+
+ while (count--) {
+
+ data32 = READ_REG32(ha, Q81_CTL_CONFIG);
+
+ if ((data32 & bits) == value)
+ return (0);
+
+ QLA_USEC_DELAY(100);
+ }
+ ha->qla_initiate_recovery = 1;
+ device_printf(ha->pci_dev, "%s: failed\n", __func__);
+ return (-1);
+}
+
+static uint8_t q81_hash_key[] = {
+ 0xda, 0x56, 0x5a, 0x6d,
+ 0xc2, 0x0e, 0x5b, 0x25,
+ 0x3d, 0x25, 0x67, 0x41,
+ 0xb0, 0x8f, 0xa3, 0x43,
+ 0xcb, 0x2b, 0xca, 0xd0,
+ 0xb4, 0x30, 0x7b, 0xae,
+ 0xa3, 0x2d, 0xcb, 0x77,
+ 0x0c, 0xf2, 0x30, 0x80,
+ 0x3b, 0xb7, 0x42, 0x6a,
+ 0xfa, 0x01, 0xac, 0xbe };
+
+static int
+qls_init_rss(qla_host_t *ha)
+{
+ q81_rss_icb_t *rss_icb;
+ int ret = 0;
+ int i;
+ uint32_t value;
+
+ rss_icb = ha->rss_dma.dma_b;
+
+ bzero(rss_icb, sizeof (q81_rss_icb_t));
+
+ rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
+ Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
+ Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
+ Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6;
+
+ rss_icb->mask = 0x3FF;
+
+ for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
+ rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
+ }
+
+ memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
+ memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
+
+ ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
+
+ if (ret)
+ goto qls_init_rss_exit;
+
+ ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
+
+ if (ret) {
+ QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
+ goto qls_init_rss_exit;
+ }
+
+ value = (uint32_t)ha->rss_dma.dma_addr;
+ WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
+
+ value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
+ WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
+
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
+
+ value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
+ Q81_CTL_CONFIG_LR;
+
+ WRITE_REG32(ha, Q81_CTL_CONFIG, value);
+
+ ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
+
+qls_init_rss_exit:
+ return (ret);
+}
+
+static int
+qls_init_comp_queue(qla_host_t *ha, int cid)
+{
+ q81_cq_icb_t *cq_icb;
+ qla_rx_ring_t *rxr;
+ int ret = 0;
+ uint32_t value;
+
+ rxr = &ha->rx_ring[cid];
+
+ rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
+
+ cq_icb = rxr->cq_icb_vaddr;
+
+ bzero(cq_icb, sizeof (q81_cq_icb_t));
+
+ cq_icb->msix_vector = cid;
+ cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
+ Q81_CQ_ICB_FLAGS_LI |
+ Q81_CQ_ICB_FLAGS_LL |
+ Q81_CQ_ICB_FLAGS_LS |
+ Q81_CQ_ICB_FLAGS_LV;
+
+ cq_icb->length_v = NUM_CQ_ENTRIES;
+
+ cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
+ cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
+
+ cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
+ cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
+
+ cq_icb->pkt_idelay = 10;
+ cq_icb->idelay = 100;
+
+ cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
+ cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
+
+ cq_icb->lbq_bsize = QLA_LGB_SIZE;
+ cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
+
+ cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
+ cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
+
+ cq_icb->sbq_bsize = (uint16_t)ha->msize;
+ cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
+
+ QL_DUMP_CQ(ha);
+
+ ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
+
+ if (ret)
+ goto qls_init_comp_queue_exit;
+
+ ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
+
+ if (ret) {
+ QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
+ goto qls_init_comp_queue_exit;
+ }
+
+ value = (uint32_t)rxr->cq_icb_paddr;
+ WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
+
+ value = (uint32_t)(rxr->cq_icb_paddr >> 32);
+ WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
+
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
+
+ value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
+ value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
+ value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
+ WRITE_REG32(ha, Q81_CTL_CONFIG, value);
+
+ ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
+
+ rxr->cq_next = 0;
+ rxr->lbq_next = rxr->lbq_free = 0;
+ rxr->sbq_next = rxr->sbq_free = 0;
+ rxr->rx_free = rxr->rx_next = 0;
+ rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
+ rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
+
+qls_init_comp_queue_exit:
+ return (ret);
+}
+
+static int
+qls_init_work_queue(qla_host_t *ha, int wid)
+{
+ q81_wq_icb_t *wq_icb;
+ qla_tx_ring_t *txr;
+ int ret = 0;
+ uint32_t value;
+
+ txr = &ha->tx_ring[wid];
+
+ txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
+ + (ha->vm_pgsize * wid));
+
+ txr->wq_db_offset = (ha->vm_pgsize * wid);
+
+ wq_icb = txr->wq_icb_vaddr;
+ bzero(wq_icb, sizeof (q81_wq_icb_t));
+
+ wq_icb->length_v = NUM_TX_DESCRIPTORS |
+ Q81_WQ_ICB_VALID;
+
+ wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
+ Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
+
+ wq_icb->wqcqid_rss = wid;
+
+ wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
+ wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
+
+ wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
+ wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
+
+ ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
+
+ if (ret)
+ goto qls_init_wq_exit;
+
+ ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
+
+ if (ret) {
+ QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
+ goto qls_init_wq_exit;
+ }
+
+ value = (uint32_t)txr->wq_icb_paddr;
+ WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
+
+ value = (uint32_t)(txr->wq_icb_paddr >> 32);
+ WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
+
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
+
+ value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
+ value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
+ value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
+ WRITE_REG32(ha, Q81_CTL_CONFIG, value);
+
+ ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
+
+ txr->txr_free = NUM_TX_DESCRIPTORS;
+ txr->txr_next = 0;
+ txr->txr_done = 0;
+
+qls_init_wq_exit:
+ return (ret);
+}
+
+static int
+qls_hw_add_all_mcast(qla_host_t *ha)
+{
+ int i, nmcast;
+
+ nmcast = ha->nmcast;
+
+ for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
+ if ((ha->mcast[i].addr[0] != 0) ||
+ (ha->mcast[i].addr[1] != 0) ||
+ (ha->mcast[i].addr[2] != 0) ||
+ (ha->mcast[i].addr[3] != 0) ||
+ (ha->mcast[i].addr[4] != 0) ||
+ (ha->mcast[i].addr[5] != 0)) {
+
+ if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
+ 1, i)) {
+ device_printf(ha->pci_dev, "%s: failed\n",
+ __func__);
+ return (-1);
+ }
+
+ nmcast--;
+ }
+ }
+ return 0;
+}
+
+static int
+qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
+{
+ int i;
+
+ for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
+
+ if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
+ return 0; /* its been already added */
+ }
+
+ for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
+
+ if ((ha->mcast[i].addr[0] == 0) &&
+ (ha->mcast[i].addr[1] == 0) &&
+ (ha->mcast[i].addr[2] == 0) &&
+ (ha->mcast[i].addr[3] == 0) &&
+ (ha->mcast[i].addr[4] == 0) &&
+ (ha->mcast[i].addr[5] == 0)) {
+
+ if (qls_config_mcast_mac_addr(ha, mta, 1, i))
+ return (-1);
+
+ bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
+ ha->nmcast++;
+
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static int
+qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
+{
+ int i;
+
+ for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
+ if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
+
+ if (qls_config_mcast_mac_addr(ha, mta, 0, i))
+ return (-1);
+
+ ha->mcast[i].addr[0] = 0;
+ ha->mcast[i].addr[1] = 0;
+ ha->mcast[i].addr[2] = 0;
+ ha->mcast[i].addr[3] = 0;
+ ha->mcast[i].addr[4] = 0;
+ ha->mcast[i].addr[5] = 0;
+
+ ha->nmcast--;
+
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Name: qls_hw_set_multi
+ * Function: Sets the Multicast Addresses provided the host O.S into the
+ * hardware (for the given interface)
+ */
+void
+qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
+ uint32_t add_mac)
+{
+ int i;
+
+ for (i = 0; i < mcnt; i++) {
+ if (add_mac) {
+ if (qls_hw_add_mcast(ha, mta))
+ break;
+ } else {
+ if (qls_hw_del_mcast(ha, mta))
+ break;
+ }
+
+ mta += Q8_MAC_ADDR_LEN;
+ }
+ return;
+}
+
+void
+qls_update_link_state(qla_host_t *ha)
+{
+ uint32_t link_state;
+ uint32_t prev_link_state;
+
+ if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ ha->link_up = 0;
+ return;
+ }
+ link_state = READ_REG32(ha, Q81_CTL_STATUS);
+
+ prev_link_state = ha->link_up;
+
+ if ((ha->pci_func & 0x1) == 0)
+ ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
+ else
+ ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
+
+ if (prev_link_state != ha->link_up) {
+
+
+ if (ha->link_up) {
+ if_link_state_change(ha->ifp, LINK_STATE_UP);
+ } else {
+ if_link_state_change(ha->ifp, LINK_STATE_DOWN);
+ }
+ }
+ return;
+}
+
+static void
+qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
+{
+ if (ha->tx_ring[r_idx].flags.wq_dma) {
+ qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
+ ha->tx_ring[r_idx].flags.wq_dma = 0;
+ }
+
+ if (ha->tx_ring[r_idx].flags.privb_dma) {
+ qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
+ ha->tx_ring[r_idx].flags.privb_dma = 0;
+ }
+ return;
+}
+
+static void
+qls_free_tx_dma(qla_host_t *ha)
+{
+ int i, j;
+ qla_tx_buf_t *txb;
+
+ for (i = 0; i < ha->num_tx_rings; i++) {
+
+ qls_free_tx_ring_dma(ha, i);
+
+ for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
+
+ txb = &ha->tx_ring[i].tx_buf[j];
+
+ if (txb->map) {
+ bus_dmamap_destroy(ha->tx_tag, txb->map);
+ }
+ }
+ }
+
+ if (ha->tx_tag != NULL) {
+ bus_dma_tag_destroy(ha->tx_tag);
+ ha->tx_tag = NULL;
+ }
+
+ return;
+}
+
+static int
+qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
+{
+ int ret = 0, i;
+ uint8_t *v_addr;
+ bus_addr_t p_addr;
+ qla_tx_buf_t *txb;
+ device_t dev = ha->pci_dev;
+
+ ha->tx_ring[ridx].wq_dma.alignment = 8;
+ ha->tx_ring[ridx].wq_dma.size =
+ NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
+
+ ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
+
+ if (ret) {
+ device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
+ goto qls_alloc_tx_ring_dma_exit;
+ }
+ ha->tx_ring[ridx].flags.wq_dma = 1;
+
+ ha->tx_ring[ridx].privb_dma.alignment = 8;
+ ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
+
+ ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
+
+ if (ret) {
+ device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
+ goto qls_alloc_tx_ring_dma_exit;
+ }
+
+ ha->tx_ring[ridx].flags.privb_dma = 1;
+
+ ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
+ ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
+
+ v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
+ p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
+
+ ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
+ ha->tx_ring[ridx].wq_icb_paddr = p_addr;
+
+ ha->tx_ring[ridx].txr_cons_vaddr =
+ (uint32_t *)(v_addr + (PAGE_SIZE >> 1));
+ ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
+
+ v_addr = v_addr + (PAGE_SIZE >> 1);
+ p_addr = p_addr + (PAGE_SIZE >> 1);
+
+ txb = ha->tx_ring[ridx].tx_buf;
+
+ for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
+
+ txb[i].oal_vaddr = v_addr;
+ txb[i].oal_paddr = p_addr;
+
+ v_addr = v_addr + QLA_OAL_BLK_SIZE;
+ p_addr = p_addr + QLA_OAL_BLK_SIZE;
+ }
+
+qls_alloc_tx_ring_dma_exit:
+ return (ret);
+}
+
+static int
+qls_alloc_tx_dma(qla_host_t *ha)
+{
+ int i, j;
+ int ret = 0;
+ qla_tx_buf_t *txb;
+
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
+ QLA_MAX_SEGMENTS, /* nsegments */
+ PAGE_SIZE, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &ha->tx_tag)) {
+ device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
+ __func__);
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < ha->num_tx_rings; i++) {
+
+ ret = qls_alloc_tx_ring_dma(ha, i);
+
+ if (ret) {
+ qls_free_tx_dma(ha);
+ break;
+ }
+
+ for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
+
+ txb = &ha->tx_ring[i].tx_buf[j];
+
+ ret = bus_dmamap_create(ha->tx_tag,
+ BUS_DMA_NOWAIT, &txb->map);
+ if (ret) {
+ ha->err_tx_dmamap_create++;
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_create failed[%d, %d, %d]\n",
+ __func__, ret, i, j);
+
+ qls_free_tx_dma(ha);
+
+ return (ret);
+ }
+ }
+ }
+
+ return (ret);
+}
+
+static void
+qls_free_rss_dma(qla_host_t *ha)
+{
+ qls_free_dmabuf(ha, &ha->rss_dma);
+ ha->flags.rss_dma = 0;
+}
+
+static int
+qls_alloc_rss_dma(qla_host_t *ha)
+{
+ int ret = 0;
+
+ ha->rss_dma.alignment = 4;
+ ha->rss_dma.size = PAGE_SIZE;
+
+ ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
+
+ if (ret)
+ device_printf(ha->pci_dev, "%s: failed\n", __func__);
+ else
+ ha->flags.rss_dma = 1;
+
+ return (ret);
+}
+
+static void
+qls_free_mpi_dma(qla_host_t *ha)
+{
+ qls_free_dmabuf(ha, &ha->mpi_dma);
+ ha->flags.mpi_dma = 0;
+}
+
+static int
+qls_alloc_mpi_dma(qla_host_t *ha)
+{
+ int ret = 0;
+
+ ha->mpi_dma.alignment = 4;
+ ha->mpi_dma.size = (0x4000 * 4);
+
+ ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
+ if (ret)
+ device_printf(ha->pci_dev, "%s: failed\n", __func__);
+ else
+ ha->flags.mpi_dma = 1;
+
+ return (ret);
+}
+
+static void
+qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
+{
+ if (ha->rx_ring[ridx].flags.cq_dma) {
+ qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
+ ha->rx_ring[ridx].flags.cq_dma = 0;
+ }
+
+ if (ha->rx_ring[ridx].flags.lbq_dma) {
+ qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
+ ha->rx_ring[ridx].flags.lbq_dma = 0;
+ }
+
+ if (ha->rx_ring[ridx].flags.sbq_dma) {
+ qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
+ ha->rx_ring[ridx].flags.sbq_dma = 0;
+ }
+
+ if (ha->rx_ring[ridx].flags.lb_dma) {
+ qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
+ ha->rx_ring[ridx].flags.lb_dma = 0;
+ }
+ return;
+}
+
+static void
+qls_free_rx_dma(qla_host_t *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ qls_free_rx_ring_dma(ha, i);
+ }
+
+ if (ha->rx_tag != NULL) {
+ bus_dma_tag_destroy(ha->rx_tag);
+ ha->rx_tag = NULL;
+ }
+
+ return;
+}
+
+static int
+qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
+{
+ int i, ret = 0;
+ uint8_t *v_addr;
+ bus_addr_t p_addr;
+ volatile q81_bq_addr_e_t *bq_e;
+ device_t dev = ha->pci_dev;
+
+ ha->rx_ring[ridx].cq_dma.alignment = 128;
+ ha->rx_ring[ridx].cq_dma.size =
+ (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
+
+ ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
+
+ if (ret) {
+ device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
+ goto qls_alloc_rx_ring_dma_exit;
+ }
+ ha->rx_ring[ridx].flags.cq_dma = 1;
+
+ ha->rx_ring[ridx].lbq_dma.alignment = 8;
+ ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
+
+ ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
+
+ if (ret) {
+ device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
+ goto qls_alloc_rx_ring_dma_exit;
+ }
+ ha->rx_ring[ridx].flags.lbq_dma = 1;
+
+ ha->rx_ring[ridx].sbq_dma.alignment = 8;
+ ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
+
+ ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
+
+ if (ret) {
+ device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
+ goto qls_alloc_rx_ring_dma_exit;
+ }
+ ha->rx_ring[ridx].flags.sbq_dma = 1;
+
+ ha->rx_ring[ridx].lb_dma.alignment = 8;
+ ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
+
+ ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
+ if (ret) {
+ device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
+ goto qls_alloc_rx_ring_dma_exit;
+ }
+ ha->rx_ring[ridx].flags.lb_dma = 1;
+
+ bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
+ bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
+ bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
+ bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
+
+ /* completion queue */
+ ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
+ ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
+
+ v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
+ p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
+
+ v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
+ p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
+
+ /* completion queue icb */
+ ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
+ ha->rx_ring[ridx].cq_icb_paddr = p_addr;
+
+ v_addr = v_addr + (PAGE_SIZE >> 2);
+ p_addr = p_addr + (PAGE_SIZE >> 2);
+
+ /* completion queue index register */
+ ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
+ ha->rx_ring[ridx].cqi_paddr = p_addr;
+
+ v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
+ p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
+
+ /* large buffer queue address table */
+ ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
+ ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
+
+ /* large buffer queue */
+ ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
+ ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
+
+ v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
+ p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
+
+ /* small buffer queue address table */
+ ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
+ ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
+
+ /* small buffer queue */
+ ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
+ ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
+
+ ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
+ ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
+
+ /* Initialize Large Buffer Queue Table */
+
+ p_addr = ha->rx_ring[ridx].lbq_paddr;
+ bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
+
+ bq_e->addr_lo = p_addr & 0xFFFFFFFF;
+ bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
+
+ p_addr = ha->rx_ring[ridx].lb_paddr;
+ bq_e = ha->rx_ring[ridx].lbq_vaddr;
+
+ for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
+ bq_e->addr_lo = p_addr & 0xFFFFFFFF;
+ bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
+
+ p_addr = p_addr + QLA_LGB_SIZE;
+ bq_e++;
+ }
+
+ /* Initialize Small Buffer Queue Table */
+
+ p_addr = ha->rx_ring[ridx].sbq_paddr;
+ bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
+
+ for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
+ bq_e->addr_lo = p_addr & 0xFFFFFFFF;
+ bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
+
+ p_addr = p_addr + QLA_PAGE_SIZE;
+ bq_e++;
+ }
+
+qls_alloc_rx_ring_dma_exit:
+ return (ret);
+}
+
+static int
+qls_alloc_rx_dma(qla_host_t *ha)
+{
+ int i;
+ int ret = 0;
+
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUM9BYTES, /* maxsize */
+ 1, /* nsegments */
+ MJUM9BYTES, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &ha->rx_tag)) {
+
+ device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
+ __func__);
+
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ ret = qls_alloc_rx_ring_dma(ha, i);
+
+ if (ret) {
+ qls_free_rx_dma(ha);
+ break;
+ }
+ }
+
+ return (ret);
+}
+
+static int
+qls_wait_for_flash_ready(qla_host_t *ha)
+{
+ uint32_t data32;
+ uint32_t count = 3;
+
+ while (count--) {
+
+ data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
+
+ if (data32 & Q81_CTL_FLASH_ADDR_ERR)
+ goto qls_wait_for_flash_ready_exit;
+
+ if (data32 & Q81_CTL_FLASH_ADDR_RDY)
+ return (0);
+
+ QLA_USEC_DELAY(100);
+ }
+
+qls_wait_for_flash_ready_exit:
+ QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
+
+ return (-1);
+}
+
+/*
+ * Name: qls_rd_flash32
+ * Function: Read Flash Memory
+ */
+int
+qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
+{
+ int ret;
+
+ ret = qls_wait_for_flash_ready(ha);
+
+ if (ret)
+ return (ret);
+
+ WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
+
+ ret = qls_wait_for_flash_ready(ha);
+
+ if (ret)
+ return (ret);
+
+ *data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
+
+ return 0;
+}
+
+static int
+qls_flash_validate(qla_host_t *ha, const char *signature)
+{
+ uint16_t csum16 = 0;
+ uint16_t *data16;
+ int i;
+
+ if (bcmp(ha->flash.id, signature, 4)) {
+ QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
+ "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
+ ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
+ signature));
+ return(-1);
+ }
+
+ data16 = (uint16_t *)&ha->flash;
+
+ for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
+ csum16 += *data16++;
+ }
+
+ if (csum16) {
+ QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
+ return(-1);
+ }
+ return(0);
+}
+
+int
+qls_rd_nic_params(qla_host_t *ha)
+{
+ int i, ret = 0;
+ uint32_t faddr;
+ uint32_t *qflash;
+
+ if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
+ QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
+ return(-1);
+ }
+
+ if ((ha->pci_func & 0x1) == 0)
+ faddr = Q81_F0_FLASH_OFFSET >> 2;
+ else
+ faddr = Q81_F1_FLASH_OFFSET >> 2;
+
+ qflash = (uint32_t *)&ha->flash;
+
+ for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
+
+ ret = qls_rd_flash32(ha, faddr, qflash);
+
+ if (ret)
+ goto qls_rd_flash_data_exit;
+
+ faddr++;
+ qflash++;
+ }
+
+ QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
+
+ ret = qls_flash_validate(ha, Q81_FLASH_ID);
+
+ if (ret)
+ goto qls_rd_flash_data_exit;
+
+ bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
+
+ QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
+ __func__, ha->mac_addr[0], ha->mac_addr[1], ha->mac_addr[2],
+ ha->mac_addr[3], ha->mac_addr[4], ha->mac_addr[5]));
+
+qls_rd_flash_data_exit:
+
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
+
+ return(ret);
+}
+
+static int
+qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
+{
+ uint32_t count = 30;
+ uint32_t data;
+
+ while (count--) {
+ WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
+
+ data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
+
+ if (data & value) {
+ return (0);
+ } else {
+ QLA_USEC_DELAY(100);
+ }
+ }
+ ha->qla_initiate_recovery = 1;
+ return (-1);
+}
+
+static void
+qls_sem_unlock(qla_host_t *ha, uint32_t mask)
+{
+ WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
+}
+
+static int
+qls_wait_for_proc_addr_ready(qla_host_t *ha)
+{
+ uint32_t data32;
+ uint32_t count = 3;
+
+ while (count--) {
+
+ data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
+
+ if (data32 & Q81_CTL_PROC_ADDR_ERR)
+ goto qls_wait_for_proc_addr_ready_exit;
+
+ if (data32 & Q81_CTL_PROC_ADDR_RDY)
+ return (0);
+
+ QLA_USEC_DELAY(100);
+ }
+
+qls_wait_for_proc_addr_ready_exit:
+ QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
+
+ ha->qla_initiate_recovery = 1;
+ return (-1);
+}
+
+static int
+qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
+ uint32_t *data)
+{
+ int ret;
+ uint32_t value;
+
+ ret = qls_wait_for_proc_addr_ready(ha);
+
+ if (ret)
+ goto qls_proc_addr_rd_reg_exit;
+
+ value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
+
+ WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
+
+ ret = qls_wait_for_proc_addr_ready(ha);
+
+ if (ret)
+ goto qls_proc_addr_rd_reg_exit;
+
+ *data = READ_REG32(ha, Q81_CTL_PROC_DATA);
+
+qls_proc_addr_rd_reg_exit:
+ return (ret);
+}
+
+static int
+qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
+ uint32_t data)
+{
+ int ret;
+ uint32_t value;
+
+ ret = qls_wait_for_proc_addr_ready(ha);
+
+ if (ret)
+ goto qls_proc_addr_wr_reg_exit;
+
+ WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
+
+ value = addr_module | reg;
+
+ WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
+
+ ret = qls_wait_for_proc_addr_ready(ha);
+
+qls_proc_addr_wr_reg_exit:
+ return (ret);
+}
+
+static int
+qls_hw_nic_reset(qla_host_t *ha)
+{
+ int count;
+ uint32_t data;
+ device_t dev = ha->pci_dev;
+
+ ha->hw_init = 0;
+
+ data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
+ Q81_CTL_RESET_FUNC;
+ WRITE_REG32(ha, Q81_CTL_RESET, data);
+
+ count = 10;
+ while (count--) {
+ data = READ_REG32(ha, Q81_CTL_RESET);
+ if ((data & Q81_CTL_RESET_FUNC) == 0)
+ break;
+ QLA_USEC_DELAY(10);
+ }
+ if (count == 0) {
+ device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
+ __func__);
+ return (-1);
+ }
+ return (0);
+}
+
+static int
+qls_hw_reset(qla_host_t *ha)
+{
+ device_t dev = ha->pci_dev;
+ int ret;
+ int count;
+ uint32_t data;
+
+ QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
+
+ if (ha->hw_init == 0) {
+ ret = qls_hw_nic_reset(ha);
+ goto qls_hw_reset_exit;
+ }
+
+ ret = qls_clear_routing_table(ha);
+ if (ret)
+ goto qls_hw_reset_exit;
+
+ ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
+ if (ret)
+ goto qls_hw_reset_exit;
+
+ /*
+ * Wait for FIFO to empty
+ */
+ count = 5;
+ while (count--) {
+ data = READ_REG32(ha, Q81_CTL_STATUS);
+ if (data & Q81_CTL_STATUS_NFE)
+ break;
+ qls_mdelay(__func__, 100);
+ }
+ if (count == 0) {
+ device_printf(dev, "%s: NFE bit not set\n", __func__);
+ goto qls_hw_reset_exit;
+ }
+
+ count = 5;
+ while (count--) {
+ (void)qls_mbx_get_mgmt_ctrl(ha, &data);
+
+ if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
+ (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
+ break;
+ qls_mdelay(__func__, 100);
+ }
+ if (count == 0)
+ goto qls_hw_reset_exit;
+
+ /*
+ * Reset the NIC function
+ */
+ ret = qls_hw_nic_reset(ha);
+ if (ret)
+ goto qls_hw_reset_exit;
+
+ ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
+
+qls_hw_reset_exit:
+ if (ret)
+ device_printf(dev, "%s: failed\n", __func__);
+
+ return (ret);
+}
+
+/*
+ * MPI Related Functions
+ */
+int
+qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
+{
+ int ret;
+
+ ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
+ reg, data);
+ return (ret);
+}
+
+int
+qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
+{
+ int ret;
+
+ ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
+ reg, data);
+ return (ret);
+}
+
+int
+qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
+{
+ int ret;
+
+ if ((ha->pci_func & 0x1) == 0)
+ reg += Q81_FUNC0_MBX_OUT_REG0;
+ else
+ reg += Q81_FUNC1_MBX_OUT_REG0;
+
+ ret = qls_mpi_risc_rd_reg(ha, reg, data);
+
+ return (ret);
+}
+
+int
+qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
+{
+ int ret;
+
+ if ((ha->pci_func & 0x1) == 0)
+ reg += Q81_FUNC0_MBX_IN_REG0;
+ else
+ reg += Q81_FUNC1_MBX_IN_REG0;
+
+ ret = qls_mpi_risc_wr_reg(ha, reg, data);
+
+ return (ret);
+}
+
+
+static int
+qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
+ uint32_t *out_mbx, uint32_t o_count)
+{
+ int i, ret = -1;
+ uint32_t data32, mbx_cmd = 0;
+ uint32_t count = 50;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
+ __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
+
+ data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
+
+ if (data32 & Q81_CTL_HCS_HTR_INTR) {
+ device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
+ __func__, data32);
+ goto qls_mbx_cmd_exit;
+ }
+
+ if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
+ Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
+ device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
+ goto qls_mbx_cmd_exit;
+ }
+
+ ha->mbx_done = 0;
+
+ mbx_cmd = *in_mbx;
+
+ for (i = 0; i < i_count; i++) {
+
+ ret = qls_mbx_wr_reg(ha, i, *in_mbx);
+
+ if (ret) {
+ device_printf(ha->pci_dev,
+ "%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
+ i, *in_mbx);
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
+ goto qls_mbx_cmd_exit;
+ }
+
+ in_mbx++;
+ }
+ WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
+
+ qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
+
+ ret = -1;
+ ha->mbx_done = 0;
+
+ while (count--) {
+
+ if (ha->flags.intr_enable == 0) {
+ data32 = READ_REG32(ha, Q81_CTL_STATUS);
+
+ if (!(data32 & Q81_CTL_STATUS_PI)) {
+ qls_mdelay(__func__, 100);
+ continue;
+ }
+
+ ret = qls_mbx_rd_reg(ha, 0, &data32);
+
+ if (ret == 0 ) {
+ if ((data32 & 0xF000) == 0x4000) {
+
+ out_mbx[0] = data32;
+
+ for (i = 1; i < o_count; i++) {
+ ret = qls_mbx_rd_reg(ha, i,
+ &data32);
+ if (ret) {
+ device_printf(
+ ha->pci_dev,
+ "%s: mbx_rd[%d]"
+ " failed\n",
+ __func__, i);
+ break;
+ }
+ out_mbx[i] = data32;
+ }
+ break;
+ } else if ((data32 & 0xF000) == 0x8000) {
+ count = 50;
+ WRITE_REG32(ha,\
+ Q81_CTL_HOST_CMD_STATUS,\
+ Q81_CTL_HCS_CMD_CLR_RTH_INTR);
+ }
+ }
+ } else {
+ if (ha->mbx_done) {
+ for (i = 1; i < o_count; i++) {
+ out_mbx[i] = ha->mbox[i];
+ }
+ ret = 0;
+ break;
+ }
+ }
+ qls_mdelay(__func__, 1000);
+ }
+
+qls_mbx_cmd_exit:
+
+ if (ha->flags.intr_enable == 0) {
+ WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
+ Q81_CTL_HCS_CMD_CLR_RTH_INTR);
+ }
+
+ if (ret) {
+ ha->qla_initiate_recovery = 1;
+ }
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
+ return (ret);
+}
+
+static int
+qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
+{
+ uint32_t *mbox;
+ device_t dev = ha->pci_dev;
+
+ mbox = ha->mbox;
+ bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
+
+ mbox[0] = Q81_MBX_SET_MGMT_CTL;
+ mbox[1] = t_ctrl;
+
+ if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
+ device_printf(dev, "%s failed\n", __func__);
+ return (-1);
+ }
+
+ if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
+ ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
+ (mbox[0] == Q81_MBX_CMD_ERROR))){
+ return (0);
+ }
+ device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
+ return (-1);
+
+}
+
+static int
+qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
+{
+ uint32_t *mbox;
+ device_t dev = ha->pci_dev;
+
+ *t_status = 0;
+
+ mbox = ha->mbox;
+ bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
+
+ mbox[0] = Q81_MBX_GET_MGMT_CTL;
+
+ if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
+ device_printf(dev, "%s failed\n", __func__);
+ return (-1);
+ }
+
+ *t_status = mbox[1];
+
+ return (0);
+}
+
+static void
+qls_mbx_get_link_status(qla_host_t *ha)
+{
+ uint32_t *mbox;
+ device_t dev = ha->pci_dev;
+
+ mbox = ha->mbox;
+ bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
+
+ mbox[0] = Q81_MBX_GET_LNK_STATUS;
+
+ if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
+ device_printf(dev, "%s failed\n", __func__);
+ return;
+ }
+
+ ha->link_status = mbox[1];
+ ha->link_down_info = mbox[2];
+ ha->link_hw_info = mbox[3];
+ ha->link_dcbx_counters = mbox[4];
+ ha->link_change_counters = mbox[5];
+
+ device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
+
+ return;
+}
+
+static void
+qls_mbx_about_fw(qla_host_t *ha)
+{
+ uint32_t *mbox;
+ device_t dev = ha->pci_dev;
+
+ mbox = ha->mbox;
+ bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
+
+ mbox[0] = Q81_MBX_ABOUT_FW;
+
+ if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
+ device_printf(dev, "%s failed\n", __func__);
+ return;
+ }
+
+ device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
+}
+
+int
+qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
+ uint32_t r_size)
+{
+ bus_addr_t b_paddr;
+ uint32_t *mbox;
+ device_t dev = ha->pci_dev;
+
+ mbox = ha->mbox;
+ bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
+
+ bzero(ha->mpi_dma.dma_b,(r_size << 2));
+ b_paddr = ha->mpi_dma.dma_addr;
+
+ mbox[0] = Q81_MBX_DUMP_RISC_RAM;
+ mbox[1] = r_addr & 0xFFFF;
+ mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
+ mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
+ mbox[4] = (r_size >> 16) & 0xFFFF;
+ mbox[5] = r_size & 0xFFFF;
+ mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
+ mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
+ mbox[8] = (r_addr >> 16) & 0xFFFF;
+
+ bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
+ BUS_DMASYNC_PREREAD);
+
+ if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
+ device_printf(dev, "%s failed\n", __func__);
+ return (-1);
+ }
+ if (mbox[0] != 0x4000) {
+ device_printf(ha->pci_dev, "%s: failed!\n", __func__);
+ return (-1);
+ } else {
+ bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+ bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
+ }
+
+ return (0);
+}
+
+int
+qls_mpi_reset(qla_host_t *ha)
+{
+ int count;
+ uint32_t data;
+ device_t dev = ha->pci_dev;
+
+ WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
+ Q81_CTL_HCS_CMD_SET_RISC_RESET);
+
+ count = 10;
+ while (count--) {
+ data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
+ if (data & Q81_CTL_HCS_RISC_RESET) {
+ WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
+ Q81_CTL_HCS_CMD_CLR_RISC_RESET);
+ break;
+ }
+ qls_mdelay(__func__, 10);
+ }
+ if (count == 0) {
+ device_printf(dev, "%s: failed\n", __func__);
+ return (-1);
+ }
+ return (0);
+}
+
diff --git a/sys/dev/qlxge/qls_hw.h b/sys/dev/qlxge/qls_hw.h
new file mode 100644
index 000000000000..6805f1e94a70
--- /dev/null
+++ b/sys/dev/qlxge/qls_hw.h
@@ -0,0 +1,1090 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qls_hw.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+#ifndef _QLS_HW_H_
+#define _QLS_HW_H_
+
+#define Q8_MAX_NUM_MULTICAST_ADDRS 32
+#define Q8_MAC_ADDR_LEN 6
+
+#define BIT_0 (0x1 << 0)
+#define BIT_1 (0x1 << 1)
+#define BIT_2 (0x1 << 2)
+#define BIT_3 (0x1 << 3)
+#define BIT_4 (0x1 << 4)
+#define BIT_5 (0x1 << 5)
+#define BIT_6 (0x1 << 6)
+#define BIT_7 (0x1 << 7)
+#define BIT_8 (0x1 << 8)
+#define BIT_9 (0x1 << 9)
+#define BIT_10 (0x1 << 10)
+#define BIT_11 (0x1 << 11)
+#define BIT_12 (0x1 << 12)
+#define BIT_13 (0x1 << 13)
+#define BIT_14 (0x1 << 14)
+#define BIT_15 (0x1 << 15)
+#define BIT_16 (0x1 << 16)
+#define BIT_17 (0x1 << 17)
+#define BIT_18 (0x1 << 18)
+#define BIT_19 (0x1 << 19)
+#define BIT_20 (0x1 << 20)
+#define BIT_21 (0x1 << 21)
+#define BIT_22 (0x1 << 22)
+#define BIT_23 (0x1 << 23)
+#define BIT_24 (0x1 << 24)
+#define BIT_25 (0x1 << 25)
+#define BIT_11 (0x1 << 11)
+#define BIT_12 (0x1 << 12)
+#define BIT_13 (0x1 << 13)
+#define BIT_14 (0x1 << 14)
+#define BIT_15 (0x1 << 15)
+#define BIT_16 (0x1 << 16)
+#define BIT_17 (0x1 << 17)
+#define BIT_18 (0x1 << 18)
+#define BIT_19 (0x1 << 19)
+#define BIT_20 (0x1 << 20)
+#define BIT_21 (0x1 << 21)
+#define BIT_22 (0x1 << 22)
+#define BIT_23 (0x1 << 23)
+#define BIT_24 (0x1 << 24)
+#define BIT_25 (0x1 << 25)
+#define BIT_26 (0x1 << 26)
+#define BIT_27 (0x1 << 27)
+#define BIT_28 (0x1 << 28)
+#define BIT_29 (0x1 << 29)
+#define BIT_30 (0x1 << 30)
+#define BIT_31 (0x1 << 31)
+
+
+/*
+ * Firmware Interface
+ */
+
+/*********************************************************************
+ * Work Queue Register Map
+ *********************************************************************/
+#define Q81_WRKQ_INDEX_REG 0x00
+#define Q81_WRKQ_CONS_INDEX_MASK 0xFFFF0000
+#define Q81_WRKQ_PROD_INDEX_MASK 0x0000FFFF
+#define Q81_WRKQ_VALID_REG 0x04
+#define Q81_WRKQ_VALID_ONQ BIT_25
+#define Q81_WRKQ_VALID_V BIT_4
+
+/*********************************************************************
+ * Completion Queue Register Map
+ *********************************************************************/
+#define Q81_COMPQ_INDEX_REG 0x00
+#define Q81_COMPQ_PROD_INDEX_MASK 0xFFFF0000
+#define Q81_COMPQ_CONS_INDEX_MASK 0x0000FFFF
+#define Q81_COMPQ_VALID_REG 0x04
+#define Q81_COMPQ_VALID_V BIT_4
+#define Q81_LRGBQ_INDEX_REG 0x18
+#define Q81_LRGBQ_CONS_INDEX_MASK 0xFFFF0000
+#define Q81_LRGBQ_PROD_INDEX_MASK 0x0000FFFF
+#define Q81_SMBQ_INDEX_REG 0x1C
+#define Q81_SMBQ_CONS_INDEX_MASK 0xFFFF0000
+#define Q81_SMBQ_PROD_INDEX_MASK 0x0000FFFF
+
+/*********************************************************************
+ * Control Register Definitions
+ * (Access, Function Specific, Shared via Semaphore, Control by MPI FW)
+ *********************************************************************/
+#define Q81_CTL_PROC_ADDR 0x00 /* R/W - Y - */
+#define Q81_CTL_PROC_DATA 0x04 /* R/W - Y - */
+#define Q81_CTL_SYSTEM 0x08 /* MWR - - - */
+#define Q81_CTL_RESET 0x0C /* MWR Y - - */
+#define Q81_CTL_FUNC_SPECIFIC 0x10 /* MWR Y - - */
+#define Q81_CTL_HOST_CMD_STATUS 0x14 /* R/W Y - - */
+#define Q81_CTL_LED 0x18 /* R/W Y - Y */
+#define Q81_CTL_ICB_ACCESS_ADDR_LO 0x20 /* R/W - Y - */
+#define Q81_CTL_ICB_ACCESS_ADDR_HI 0x24 /* R/W - Y - */
+#define Q81_CTL_CONFIG 0x28 /* MWR - - - */
+#define Q81_CTL_STATUS 0x30 /* MWR Y - - */
+#define Q81_CTL_INTR_ENABLE 0x34 /* MWR Y - - */
+#define Q81_CTL_INTR_MASK 0x38 /* MWR Y - - */
+#define Q81_CTL_INTR_STATUS1 0x3C /* RO Y - - */
+#define Q81_CTL_INTR_STATUS2 0x40 /* RO Y - - */
+#define Q81_CTL_INTR_STATUS3 0x44 /* RO Y - - */
+#define Q81_CTL_INTR_STATUS4 0x48 /* RO Y - - */
+#define Q81_CTL_REV_ID 0x4C /* RO - - - */
+#define Q81_CTL_FATAL_ERR_STATUS 0x54 /* RO Y - - */
+#define Q81_CTL_COR_ECC_ERR_COUNTER 0x60 /* RO Y - - */
+#define Q81_CTL_SEMAPHORE 0x64 /* MWR Y - - */
+#define Q81_CTL_GPIO1 0x68 /* MWR Y - - */
+#define Q81_CTL_GPIO2 0x6C /* MWR Y - - */
+#define Q81_CTL_GPIO3 0x70 /* MWR Y - - */
+#define Q81_CTL_XGMAC_ADDR 0x78 /* R/W Y Y - */
+#define Q81_CTL_XGMAC_DATA 0x7C /* R/W Y Y Y */
+#define Q81_CTL_NIC_ENH_TX_SCHD 0x80 /* R/W Y - Y */
+#define Q81_CTL_CNA_ENH_TX_SCHD 0x84 /* R/W Y - Y */
+#define Q81_CTL_FLASH_ADDR 0x88 /* R/W - Y - */
+#define Q81_CTL_FLASH_DATA 0x8C /* R/W - Y - */
+#define Q81_CTL_STOP_CQ_PROCESSING 0x90 /* MWR Y - - */
+#define Q81_CTL_MAC_PROTO_ADDR_INDEX 0xA8 /* R/W - Y - */
+#define Q81_CTL_MAC_PROTO_ADDR_DATA 0xAC /* R/W - Y - */
+#define Q81_CTL_COS_DEF_CQ1 0xB0 /* R/W Y - - */
+#define Q81_CTL_COS_DEF_CQ2 0xB4 /* R/W Y - - */
+#define Q81_CTL_ETHERTYPE_SKIP_1 0xB8 /* R/W Y - - */
+#define Q81_CTL_ETHERTYPE_SKIP_2 0xBC /* R/W Y - - */
+#define Q81_CTL_SPLIT_HDR 0xC0 /* R/W Y - - */
+#define Q81_CTL_NIC_PAUSE_THRES 0xC8 /* R/W Y - Y */
+#define Q81_CTL_NIC_RCV_CONFIG 0xD4 /* MWR Y - Y */
+#define Q81_CTL_COS_TAGS_IN_NIC_FIFO 0xDC /* R/W Y - Y */
+#define Q81_CTL_MGMT_RCV_CONFIG 0xE0 /* MWR Y - Y */
+#define Q81_CTL_ROUTING_INDEX 0xE4 /* R/W Y Y - */
+#define Q81_CTL_ROUTING_DATA 0xE8 /* R/W Y Y - */
+#define Q81_CTL_XG_SERDES_ADDR 0xF0 /* R/W Y Y Y */
+#define Q81_CTL_XG_SERDES_DATA 0xF4 /* R/W Y Y Y */
+#define Q81_CTL_XG_PROBE_MUX_ADDR 0xF8 /* R/W - Y - */
+#define Q81_CTL_XG_PROBE_MUX_DATA 0xFC /* R/W - Y - */
+
+
+/*
+ * Process Address Register (0x00)
+ */
+#define Q81_CTL_PROC_ADDR_RDY BIT_31
+#define Q81_CTL_PROC_ADDR_READ BIT_30
+#define Q81_CTL_PROC_ADDR_ERR BIT_29
+#define Q81_CTL_PROC_ADDR_MPI_RISC (0x00 << 16)
+#define Q81_CTL_PROC_ADDR_MDE (0x01 << 16)
+#define Q81_CTL_PROC_ADDR_REG_BLOCK (0x02 << 16)
+#define Q81_CTL_PROC_ADDR_RISC_INT_REG (0x03 << 16)
+
+
+/*
+ * System Register (0x08)
+ */
+#define Q81_CTL_SYSTEM_MASK_SHIFT 16
+#define Q81_CTL_SYSTEM_ENABLE_VQM_WR BIT_5
+#define Q81_CTL_SYSTEM_ENABLE_DWC BIT_4
+#define Q81_CTL_SYSTEM_ENABLE_DA_SINGLE_THRD BIT_3
+#define Q81_CTL_SYSTEM_ENABLE_MDC BIT_2
+#define Q81_CTL_SYSTEM_ENABLE_FAE BIT_1
+#define Q81_CTL_SYSTEM_ENABLE_EFE BIT_0
+
+/*
+ * Reset Register (0x0C)
+ */
+#define Q81_CTL_RESET_MASK_SHIFT 16
+#define Q81_CTL_RESET_FUNC BIT_15
+#define Q81_CTL_RESET_RR_SHIFT 1
+
+/*
+ * Function Specific Control Register (0x10)
+ */
+#define Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT 16
+
+#define Q81_CTL_FUNC_SPECIFIC_FE BIT_15
+#define Q81_CTL_FUNC_SPECIFIC_STE BIT_13
+#define Q81_CTL_FUNC_SPECIFIC_DSB BIT_12
+#define Q81_CTL_FUNC_SPECIFIC_SH BIT_11
+
+#define Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK (0x7 << 8)
+#define Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_2K (0x1 << 8)
+#define Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K (0x2 << 8)
+#define Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_8K (0x3 << 8)
+#define Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_64K (0x6 << 8)
+
+#define Q81_CTL_FUNC_SPECIFIC_EPC_O BIT_7
+#define Q81_CTL_FUNC_SPECIFIC_EPC_I BIT_6
+#define Q81_CTL_FUNC_SPECIFIC_EC BIT_5
+#define Q81_CTL_FUNC_SPECIFIC_DBL_DBRST (0x00 << 3)
+#define Q81_CTL_FUNC_SPECIFIC_DBL_MAX_PAYLDSZ (0x01 << 3)
+#define Q81_CTL_FUNC_SPECIFIC_DBL_MAX_RDBRSTSZ (0x02 << 3)
+#define Q81_CTL_FUNC_SPECIFIC_DBL_128 (0x03 << 3)
+#define Q81_CTL_FUNC_SPECIFIC_DBRST_256 0x00
+#define Q81_CTL_FUNC_SPECIFIC_DBRST_512 0x01
+#define Q81_CTL_FUNC_SPECIFIC_DBRST_768 0x02
+#define Q81_CTL_FUNC_SPECIFIC_DBRST_1024 0x03
+
+
+/*
+ * Host Command/Status Register (0x14)
+ */
+#define Q81_CTL_HCS_CMD_NOP (0x00 << 28)
+#define Q81_CTL_HCS_CMD_SET_RISC_RESET (0x01 << 28)
+#define Q81_CTL_HCS_CMD_CLR_RISC_RESET (0x02 << 28)
+#define Q81_CTL_HCS_CMD_SET_RISC_PAUSE (0x03 << 28)
+#define Q81_CTL_HCS_CMD_CLR_RISC_PAUSE (0x04 << 28)
+#define Q81_CTL_HCS_CMD_SET_HTR_INTR (0x05 << 28)
+#define Q81_CTL_HCS_CMD_CLR_HTR_INTR (0x06 << 28)
+#define Q81_CTL_HCS_CMD_SET_PARITY_EN (0x07 << 28)
+#define Q81_CTL_HCS_CMD_FORCE_BAD_PARITY (0x08 << 28)
+#define Q81_CTL_HCS_CMD_CLR_BAD_PARITY (0x09 << 28)
+#define Q81_CTL_HCS_CMD_CLR_RTH_INTR (0x0A << 28)
+
+#define Q81_CTL_HCS_CMD_PAR_SHIFT 22
+#define Q81_CTL_HCS_RISC_PAUSED BIT_10
+#define Q81_CTL_HCS_HTR_INTR BIT_9
+#define Q81_CTL_HCS_RISC_RESET BIT_8
+#define Q81_CTL_HCS_ERR_STATUS_MASK 0x3F
+
+
+/*
+ * Configuration Register (0x28)
+ */
+#define Q81_CTL_CONFIG_MASK_SHIFT 16
+#define Q81_CTL_CONFIG_Q_NUM_SHIFT 8
+#define Q81_CTL_CONFIG_Q_NUM_MASK (0x7F << Q81_CTL_CONFIG_Q_NUM_SHIFT)
+#define Q81_CTL_CONFIG_DCQ BIT_7
+#define Q81_CTL_CONFIG_LCQ BIT_6
+#define Q81_CTL_CONFIG_LE BIT_5
+#define Q81_CTL_CONFIG_DR BIT_3
+#define Q81_CTL_CONFIG_LR BIT_2
+#define Q81_CTL_CONFIG_DRQ BIT_1
+#define Q81_CTL_CONFIG_LRQ BIT_0
+
+
+/*
+ * Status Register (0x30)
+ */
+#define Q81_CTL_STATUS_MASK_SHIFT 16
+#define Q81_CTL_STATUS_NFE BIT_12
+#define Q81_CTL_STATUS_F3E BIT_11
+#define Q81_CTL_STATUS_F2E BIT_10
+#define Q81_CTL_STATUS_F1E BIT_9
+#define Q81_CTL_STATUS_F0E BIT_8
+#define Q81_CTL_STATUS_FUNC_SHIFT 6
+#define Q81_CTL_STATUS_PI1 BIT_5
+#define Q81_CTL_STATUS_PI0 BIT_4
+#define Q81_CTL_STATUS_PL1 BIT_3
+#define Q81_CTL_STATUS_PL0 BIT_2
+#define Q81_CTL_STATUS_PI BIT_1
+#define Q81_CTL_STATUS_FE BIT_0
+
+/*
+ * Interrupt Enable Register (0x34)
+ */
+#define Q81_CTL_INTRE_MASK_SHIFT 16
+#define Q81_CTL_INTRE_EN BIT_15
+#define Q81_CTL_INTRE_EI BIT_14
+#define Q81_CTL_INTRE_IHD BIT_13
+#define Q81_CTL_INTRE_RTYPE_MASK (0x3 << 8)
+#define Q81_CTL_INTRE_RTYPE_ENABLE (0x1 << 8)
+#define Q81_CTL_INTRE_RTYPE_DISABLE (0x2 << 8)
+#define Q81_CTL_INTRE_RTYPE_SETUP_TO_RD (0x3 << 8)
+#define Q81_CTL_INTRE_HOST_INTR_MASK 0x7F
+
+/*
+ * Interrupt Mask Register (0x38)
+ */
+#define Q81_CTL_INTRM_MASK_SHIFT 16
+#define Q81_CTL_INTRM_MC BIT_7
+#define Q81_CTL_INTRM_LSC BIT_6
+#define Q81_CTL_INTRM_LH1 BIT_4
+#define Q81_CTL_INTRM_HL1 BIT_3
+#define Q81_CTL_INTRM_LH0 BIT_2
+#define Q81_CTL_INTRM_HL0 BIT_1
+#define Q81_CTL_INTRM_PI BIT_0
+
+/*
+ * Interrupt Status 1 Register (0x3C)
+ */
+#define Q81_CTL_INTRS1_COMPQ(i) (0x1 << i)
+
+/*
+ * Interrupt Status 2 Register (0x40)
+ */
+#define Q81_CTL_INTRS2_COMPQ(i) (0x1 << i)
+
+/*
+ * Interrupt Status 3 Register (0x44)
+ */
+#define Q81_CTL_INTRS3_COMPQ(i) (0x1 << i)
+
+/*
+ * Interrupt Status 4 Register (0x48)
+ */
+#define Q81_CTL_INTRS4_COMPQ(i) (0x1 << i)
+
+/*
+ * Revision ID Register (0x4C)
+ */
+#define Q81_CTL_REV_ID_CHIP_REV_MASK (0xF << 28)
+#define Q81_CTL_REV_ID_XGMAC_RCV_MASK (0xF << 16)
+#define Q81_CTL_REV_ID_XGMAC_ROLL_MASK (0xF << 8)
+#define Q81_CTL_REV_ID_NIC_REV_MASK (0xF << 4)
+#define Q81_CTL_REV_ID_NIC_ROLL_MASK (0xF << 0)
+
+/*
+ * Semaphore Register (0x64)
+ */
+
+#define Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV 0xC0000000
+
+#define Q81_CTL_SEM_MASK_RIDX_DATAREG 0x30000000
+
+#define Q81_CTL_SEM_MASK_FLASH 0x03000000
+
+#define Q81_CTL_SEM_MASK_MAC_SERDES 0x00C00000
+
+#define Q81_CTL_SEM_MASK_ICB 0x00300000
+
+#define Q81_CTL_SEM_MASK_XGMAC1 0x000C0000
+
+#define Q81_CTL_SEM_MASK_XGMAC0 0x00030000
+
+#define Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV 0x4000
+#define Q81_CTL_SEM_SET_RIDX_DATAREG 0x1000
+#define Q81_CTL_SEM_SET_FLASH 0x0100
+#define Q81_CTL_SEM_SET_MAC_SERDES 0x0040
+#define Q81_CTL_SEM_SET_ICB 0x0010
+#define Q81_CTL_SEM_SET_XGMAC1 0x0004
+#define Q81_CTL_SEM_SET_XGMAC0 0x0001
+
+
+/*
+ * Flash Address Register (0x88)
+ */
+#define Q81_CTL_FLASH_ADDR_RDY BIT_31
+#define Q81_CTL_FLASH_ADDR_R BIT_30
+#define Q81_CTL_FLASH_ADDR_ERR BIT_29
+#define Q81_CTL_FLASH_ADDR_MASK 0x7FFFFF
+
+/*
+ * Stop CQ Processing Register (0x90)
+ */
+#define Q81_CTL_STOP_CQ_MASK_SHIFT 16
+#define Q81_CTL_STOP_CQ_EN BIT_15
+#define Q81_CTL_STOP_CQ_RQ_STARTQ (0x1 << 8)
+#define Q81_CTL_STOP_CQ_RQ_STOPQ (0x2 << 8)
+#define Q81_CTL_STOP_CQ_RQ_READ (0x3 << 8)
+#define Q81_CTL_STOP_CQ_MASK 0x7F
+
+/*
+ * MAC Protocol Address Index Register (0xA8)
+ */
+#define Q81_CTL_MAC_PROTO_AI_MW BIT_31
+#define Q81_CTL_MAC_PROTO_AI_MR BIT_30
+#define Q81_CTL_MAC_PROTO_AI_E BIT_27
+#define Q81_CTL_MAC_PROTO_AI_RS BIT_26
+#define Q81_CTL_MAC_PROTO_AI_ADR BIT_25
+#define Q81_CTL_MAC_PROTO_AI_TYPE_SHIFT 16
+#define Q81_CTL_MAC_PROTO_AI_TYPE_MASK 0xF0000
+#define Q81_CTL_MAC_PROTO_AI_IDX_SHIFT 4
+#define Q81_CTL_MAC_PROTO_AI_IDX_MASK 0xFFF0
+#define Q81_CTL_MAC_PROTO_AI_OFF_MASK 0xF
+
+#define Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC (0 << 16)
+#define Q81_CTL_MAC_PROTO_AI_TYPE_MCAST (1 << 16)
+#define Q81_CTL_MAC_PROTO_AI_TYPE_VLAN (2 << 16)
+#define Q81_CTL_MAC_PROTO_AI_TYPE_MCAST_FILTER (3 << 16)
+#define Q81_CTL_MAC_PROTO_AI_TYPE_MGMT_MAC (5 << 16)
+#define Q81_CTL_MAC_PROTO_AI_TYPE_MGMMT_VLAN (6 << 16)
+#define Q81_CTL_MAC_PROTO_AI_TYPE_MGMT_IPV4 (7 << 16)
+#define Q81_CTL_MAC_PROTO_AI_TYPE_MGMT_IPV6 (8 << 16)
+#define Q81_CTL_MAC_PROTO_AI_TYPE_MGMT_PORT (9 << 16) /* TCP/UDP Port */
+
+/*
+ * CAM MAC offset 2 definitions
+ */
+#define Q81_CAM_MAC_OFF2_ROUTE_FC 0x00000000
+#define Q81_CAM_MAC_OFF2_ROUTE_NIC 0x00000001
+#define Q81_CAM_MAC_OFF2_FUNC_SHIFT 2
+#define Q81_CAM_MAC_OFF2_RV 0x00000010
+#define Q81_CAM_MAC_OFF2_CQID_SHIFT 5
+#define Q81_CAM_MAC_OFF2_SH 0x00008000
+#define Q81_CAM_MAC_OFF2_MHT 0x40000000
+#define Q81_CAM_MAC_OFF2_VLD 0x80000000
+
+/*
+ * NIC Pause Threshold Register (0xC8)
+ */
+#define Q81_CTL_NIC_PAUSE_THRES_PAUSE_SHIFT 16
+#define Q81_CTL_NIC_PAUSE_THRES_RESUME_SHIFT 0
+
+/*
+ * NIC Receive Configuration Register (0xD4)
+ */
+#define Q81_CTL_NIC_RCVC_MASK_SHIFT 16
+#define Q81_CTL_NIC_RCVC_DCQ_SHIFT 8
+#define Q81_CTL_NIC_RCVC_DCQ_MASK 0x7F00
+#define Q81_CTL_NIC_RCVC_DTP BIT_5
+#define Q81_CTL_NIC_RCVC_R4T BIT_4
+#define Q81_CTL_NIC_RCVC_RV BIT_3
+#define Q81_CTL_NIC_RCVC_VLAN_ALL (0x0 << 1)
+#define Q81_CTL_NIC_RCVC_VLAN_ONLY (0x1 << 1)
+#define Q81_CTL_NIC_RCVC_VLAN_NON_VLAN (0x2 << 1)
+#define Q81_CTL_NIC_RCVC_VLAN_REJECT (0x3 << 1)
+#define Q81_CTL_NIC_RCVC_PPE BIT_0
+
+
+/*
+ * Routing Index Register (0xE4)
+ */
+#define Q81_CTL_RI_MW BIT_31
+#define Q81_CTL_RI_MR BIT_30
+#define Q81_CTL_RI_E BIT_27
+#define Q81_CTL_RI_RS BIT_26
+
+#define Q81_CTL_RI_DST_RSS (0x00 << 20)
+#define Q81_CTL_RI_DST_CAMQ (0x01 << 20)
+#define Q81_CTL_RI_DST_COSQ (0x02 << 20)
+#define Q81_CTL_RI_DST_DFLTQ (0x03 << 20)
+#define Q81_CTL_RI_DST_DESTQ (0x04 << 20)
+#define Q81_CTL_RI_DST_DROP (0x07 << 20)
+
+#define Q81_CTL_RI_TYPE_RTMASK (0x00 << 16)
+#define Q81_CTL_RI_TYPE_RTINVMASK (0x01 << 16)
+#define Q81_CTL_RI_TYPE_NICQMASK (0x02 << 16)
+#define Q81_CTL_RI_TYPE_NICQINVMASK (0x03 << 16)
+
+/* these indices for the Routing Index Register are user defined */
+#define Q81_CTL_RI_IDX_ALL_ERROR (0x00 << 8)
+#define Q81_CTL_RI_IDX_MAC_ERROR (0x00 << 8)
+#define Q81_CTL_RI_IDX_IPCSUM_ERROR (0x01 << 8)
+#define Q81_CTL_RI_IDX_TCPCSUM_ERROR (0x02 << 8)
+#define Q81_CTL_RI_IDX_BCAST (0x03 << 8)
+#define Q81_CTL_RI_IDX_MCAST_MATCH (0x04 << 8)
+#define Q81_CTL_RI_IDX_ALLMULTI (0x05 << 8)
+#define Q81_CTL_RI_IDX_RSS_MATCH (0x08 << 8)
+#define Q81_CTL_RI_IDX_RSS_IPV4 (0x08 << 8)
+#define Q81_CTL_RI_IDX_RSS_IPV6 (0x09 << 8)
+#define Q81_CTL_RI_IDX_RSS_TCPV4 (0x0A << 8)
+#define Q81_CTL_RI_IDX_RSS_TCPV6 (0x0B << 8)
+#define Q81_CTL_RI_IDX_CAM_HIT (0x0C << 8)
+#define Q81_CTL_RI_IDX_PROMISCUOUS (0x0F << 8)
+
+/* Routing Masks to be loaded into Routing Data Register */
+#define Q81_CTL_RD_BCAST BIT_0
+#define Q81_CTL_RD_MCAST BIT_1
+#define Q81_CTL_RD_MCAST_MATCH BIT_2
+#define Q81_CTL_RD_MCAST_REG_MATCH BIT_3
+#define Q81_CTL_RD_MCAST_HASH_MATCH BIT_4
+#define Q81_CTL_RD_CAM_HIT BIT_7
+#define Q81_CTL_RD_CAM_BIT0 BIT_8
+#define Q81_CTL_RD_CAM_BIT1 BIT_9
+#define Q81_CTL_RD_VLAN_TAG_PRESENT BIT_10
+#define Q81_CTL_RD_VLAN_MATCH BIT_11
+#define Q81_CTL_RD_VLAN_FILTER_PASS BIT_12
+#define Q81_CTL_RD_SKIP_ETHERTYPE_1 BIT_13
+#define Q81_CTL_RD_SKIP_ETHERTYPE_2 BIT_14
+#define Q81_CTL_RD_BCAST_OR_MCAST_MATCH BIT_15
+#define Q81_CTL_RD_802_3_PKT BIT_16
+#define Q81_CTL_RD_LLDP_PKT BIT_17
+#define Q81_CTL_RD_TUNNELED_PKT BIT_18
+#define Q81_CTL_RD_ERROR_PKT BIT_22
+#define Q81_CTL_RD_VALID_PKT BIT_23
+#define Q81_CTL_RD_TCP_UDP_CSUM_ERR BIT_24
+#define Q81_CTL_RD_IPCSUM_ERR BIT_25
+#define Q81_CTL_RD_MAC_ERR BIT_26
+#define Q81_CTL_RD_RSS_TCP_IPV6 BIT_27
+#define Q81_CTL_RD_RSS_TCP_IPV4 BIT_28
+#define Q81_CTL_RD_RSS_IPV6 BIT_29
+#define Q81_CTL_RD_RSS_IPV4 BIT_30
+#define Q81_CTL_RD_RSS_MATCH BIT_31
+
+
+/*********************************************************************
+ * Host Data Structures *
+ *********************************************************************/
+
+/*
+ * Work Queue Initialization Control Block
+ */
+
+typedef struct _q81_wq_icb {
+
+ uint16_t length_v;
+#define Q81_WQ_ICB_VALID BIT_4
+
+ uint8_t pri;
+#define Q81_WQ_ICB_PRI_SHIFT 1
+
+ uint8_t flags;
+#define Q81_WQ_ICB_FLAGS_LO BIT_7
+#define Q81_WQ_ICB_FLAGS_LI BIT_6
+#define Q81_WQ_ICB_FLAGS_LB BIT_5
+#define Q81_WQ_ICB_FLAGS_LC BIT_4
+
+ uint16_t wqcqid_rss;
+#define Q81_WQ_ICB_RSS_V BIT_15
+
+ uint16_t rsrvd;
+
+ uint32_t baddr_lo;
+ uint32_t baddr_hi;
+
+ uint32_t ci_addr_lo;
+ uint32_t ci_addr_hi;
+} __packed q81_wq_icb_t;
+
+
+/*
+ * Completion Queue Initialization Control Block
+ */
+
+typedef struct _q81_cq_icb {
+ uint8_t msix_vector;
+ uint16_t rsrvd0;
+ uint8_t flags;
+#define Q81_CQ_ICB_FLAGS_LC BIT_7
+#define Q81_CQ_ICB_FLAGS_LI BIT_6
+#define Q81_CQ_ICB_FLAGS_LL BIT_5
+#define Q81_CQ_ICB_FLAGS_LS BIT_4
+#define Q81_CQ_ICB_FLAGS_LV BIT_3
+
+ uint16_t length_v;
+#define Q81_CQ_ICB_VALID BIT_4
+
+ uint16_t rsrvd1;
+
+ uint32_t cq_baddr_lo;
+ uint32_t cq_baddr_hi;
+
+ uint32_t cqi_addr_lo;
+ uint32_t cqi_addr_hi;
+
+ uint16_t pkt_idelay;
+ uint16_t idelay;
+
+ uint32_t lbq_baddr_lo;
+ uint32_t lbq_baddr_hi;
+ uint16_t lbq_bsize;
+ uint16_t lbq_length;
+
+ uint32_t sbq_baddr_lo;
+ uint32_t sbq_baddr_hi;
+ uint16_t sbq_bsize;
+ uint16_t sbq_length;
+} __packed q81_cq_icb_t;
+
+/*
+ * RSS Initialization Control Block
+ */
+typedef struct _q81_rss_icb {
+ uint16_t flags_base_cq_num;
+#define Q81_RSS_ICB_FLAGS_L4K BIT_7
+#define Q81_RSS_ICB_FLAGS_L6K BIT_8
+#define Q81_RSS_ICB_FLAGS_LI BIT_9
+#define Q81_RSS_ICB_FLAGS_LB BIT_10
+#define Q81_RSS_ICB_FLAGS_LM BIT_11
+#define Q81_RSS_ICB_FLAGS_RI4 BIT_12
+#define Q81_RSS_ICB_FLAGS_RT4 BIT_13
+#define Q81_RSS_ICB_FLAGS_RI6 BIT_14
+#define Q81_RSS_ICB_FLAGS_RT6 BIT_15
+
+ uint16_t mask; /* bits 9-0 are valid */
+
+#define Q81_RSS_ICB_NUM_INDTBL_ENTRIES 1024
+ /* Indirection Table */
+ uint8_t cq_id[Q81_RSS_ICB_NUM_INDTBL_ENTRIES];
+
+ /* Hash Keys */
+ uint32_t ipv6_rss_hash_key[10];
+ uint32_t ipv4_rss_hash_key[4];
+} __packed q81_rss_icb_t;
+
+
+
+/*
+ * Transmit Buffer Descriptor
+ */
+
+typedef struct _q81_txb_desc {
+ uint64_t baddr;
+ uint16_t length;
+
+ uint16_t flags;
+#define Q81_TXB_DESC_FLAGS_E BIT_15
+#define Q81_TXB_DESC_FLAGS_C BIT_14
+
+} __packed q81_txb_desc_t;
+
+
+/*
+ * Receive Buffer Descriptor
+ */
+
+typedef struct _q81_rxb_desc {
+ uint32_t baddr_lo;
+#define Q81_RXB_DESC_BADDR_LO_S BIT_1
+
+ uint64_t baddr;
+
+ uint16_t length;
+
+ uint16_t flags;
+#define Q81_RXB_DESC_FLAGS_E BIT_15
+#define Q81_RXB_DESC_FLAGS_C BIT_14
+
+} __packed q81_rxb_desc_t;
+
+/*
+ * IOCB Types
+ */
+
+#define Q81_IOCB_TX_MAC 0x01
+#define Q81_IOCB_TX_TSO 0x02
+#define Q81_IOCB_RX 0x20
+#define Q81_IOCB_MPI 0x21
+#define Q81_IOCB_SYS 0x3F
+
+
+/*
+ * IOCB Definitions
+ */
+
+/*
+ * MAC Tx Frame IOCB
+ * Total Size of each IOCB Entry = 4 * 32 = 128 bytes
+ */
+#define MAX_TX_MAC_DESC 8
+
+typedef struct _q81_tx_mac {
+
+ uint8_t opcode;
+
+ uint16_t flags;
+#define Q81_TX_MAC_FLAGS_D BIT_3
+#define Q81_TX_MAC_FLAGS_I BIT_1
+#define Q81_TX_MAC_FLAGS_OI BIT_0
+
+ uint8_t vlan_off;
+#define Q81_TX_MAC_VLAN_OFF_SHIFT 3
+#define Q81_TX_MAC_VLAN_OFF_V BIT_2
+#define Q81_TX_MAC_VLAN_OFF_DFP BIT_1
+
+ uint32_t rsrvd1;
+ uint32_t rsrvd2;
+
+ uint16_t frame_length; /* only bits0-13 are valid */
+ uint16_t rsrvd3;
+
+ uint32_t tid_lo;
+ uint32_t tid_hi;
+
+ uint32_t rsrvd4;
+
+ uint16_t vlan_tci;
+ uint16_t rsrvd5;
+
+ q81_txb_desc_t txd[MAX_TX_MAC_DESC];
+} __packed q81_tx_mac_t;
+
+
+/*
+ * MAC Tx Frame with TSO IOCB
+ * Total Size of each IOCB Entry = 4 * 32 = 128 bytes
+ */
+typedef struct _q81_tx_tso {
+ uint8_t opcode;
+
+ uint16_t flags;
+#define Q81_TX_TSO_FLAGS_OI BIT_0
+#define Q81_TX_TSO_FLAGS_I BIT_1
+#define Q81_TX_TSO_FLAGS_D BIT_3
+#define Q81_TX_TSO_FLAGS_IPV4 BIT_6
+#define Q81_TX_TSO_FLAGS_IPV6 BIT_7
+#define Q81_TX_TSO_FLAGS_LSO BIT_13
+#define Q81_TX_TSO_FLAGS_UC BIT_14
+#define Q81_TX_TSO_FLAGS_TC BIT_15
+
+ uint8_t vlan_off;
+#define Q81_TX_TSO_VLAN_OFF_SHIFT 3
+#define Q81_TX_TSO_VLAN_OFF_V BIT_2
+#define Q81_TX_TSO_VLAN_OFF_DFP BIT_1
+#define Q81_TX_TSO_VLAN_OFF_IC BIT_0
+
+ uint32_t rsrvd1;
+ uint32_t rsrvd2;
+
+ uint32_t length;
+ uint32_t tid_lo;
+ uint32_t tid_hi;
+
+ uint16_t phdr_length;
+
+ uint16_t phdr_offsets;
+#define Q81_TX_TSO_PHDR_SHIFT 6
+
+ uint16_t vlan_tci;
+ uint16_t mss;
+
+ q81_txb_desc_t txd[MAX_TX_MAC_DESC];
+} __packed q81_tx_tso_t;
+
+typedef struct _q81_tx_cmd {
+ uint8_t bytes[128];
+} __packed q81_tx_cmd_t;
+
+/*
+ * MAC TX Frame Completion
+ * Total Size of each IOCB Entry = 4 * 16 = 64 bytes
+ */
+
+typedef struct _q81_tx_mac_comp {
+ uint8_t opcode;
+
+ uint8_t flags;
+#define Q81_TX_MAC_COMP_FLAGS_OI BIT_0
+#define Q81_TX_MAC_COMP_FLAGS_I BIT_1
+#define Q81_TX_MAC_COMP_FLAGS_E BIT_3
+#define Q81_TX_MAC_COMP_FLAGS_S BIT_4
+#define Q81_TX_MAC_COMP_FLAGS_L BIT_5
+#define Q81_TX_MAC_COMP_FLAGS_P BIT_6
+
+ uint8_t rsrvd0;
+
+ uint8_t err;
+#define Q81_TX_MAC_COMP_ERR_B BIT_7
+
+ uint32_t tid_lo;
+ uint32_t tid_hi;
+
+ uint32_t rsrvd1[13];
+} __packed q81_tx_mac_comp_t;
+
+
+/*
+ * MAC TX Frame with LSO Completion
+ * Total Size of each IOCB Entry = 4 * 16 = 64 bytes
+ */
+
+typedef struct _q81_tx_tso_comp {
+ uint8_t opcode;
+
+ uint8_t flags;
+#define Q81_TX_TSO_COMP_FLAGS_OI BIT_0
+#define Q81_TX_TSO_COMP_FLAGS_I BIT_1
+#define Q81_TX_TSO_COMP_FLAGS_E BIT_3
+#define Q81_TX_TSO_COMP_FLAGS_S BIT_4
+#define Q81_TX_TSO_COMP_FLAGS_P BIT_6
+
+ uint8_t rsrvd0;
+
+ uint8_t err;
+#define Q81_TX_TSO_COMP_ERR_B BIT_7
+
+ uint32_t tid_lo;
+ uint32_t tid_hi;
+
+ uint32_t rsrvd1[13];
+} __packed q81_tx_tso_comp_t;
+
+
+/*
+ * SYS - Chip Event Notification Completion
+ * Total Size of each IOCB Entry = 4 * 16 = 64 bytes
+ */
+
+typedef struct _q81_sys_comp {
+ uint8_t opcode;
+
+ uint8_t flags;
+#define Q81_SYS_COMP_FLAGS_OI BIT_0
+#define Q81_SYS_COMP_FLAGS_I BIT_1
+
+ uint8_t etype;
+#define Q81_SYS_COMPE_LINK_UP 0x00
+#define Q81_SYS_COMPE_LINK_DOWN 0x01
+#define Q81_SYS_COMPE_MULTI_CAM_LOOKUP 0x06
+#define Q81_SYS_COMPE_SOFT_ECC 0x07
+#define Q81_SYS_COMPE_MPI_FATAL_ERROR 0x08
+#define Q81_SYS_COMPE_MAC_INTR 0x09
+#define Q81_SYS_COMPE_GPI0_HTOL 0x10
+#define Q81_SYS_COMPE_GPI0_LTOH 0x20
+#define Q81_SYS_COMPE_GPI1_HTOL 0x11
+#define Q81_SYS_COMPE_GPI1_LTOH 0x21
+
+ uint8_t q_id; /* only bits 0-6 are valid */
+
+ uint32_t rsrvd1[15];
+} __packed q81_sys_comp_t;
+
+
+
+/*
+ * Mac Rx Packet Completion
+ * Total Size of each IOCB Entry = 4 * 16 = 64 bytes
+ */
+
+typedef struct _q81_rx {
+ uint8_t opcode;
+
+ uint8_t flags0;
+#define Q81_RX_FLAGS0_OI BIT_0
+#define Q81_RX_FLAGS0_I BIT_1
+#define Q81_RX_FLAGS0_TE BIT_2
+#define Q81_RX_FLAGS0_NU BIT_3
+#define Q81_RX_FLAGS0_IE BIT_4
+
+#define Q81_RX_FLAGS0_MCAST_MASK (0x03 << 5)
+#define Q81_RX_FLAGS0_MCAST_NONE (0x00 << 5)
+#define Q81_RX_FLAGS0_MCAST_HASH_MATCH (0x01 << 5)
+#define Q81_RX_FLAGS0_MCAST_REG_MATCH (0x02 << 5)
+#define Q81_RX_FLAGS0_MCAST_PROMISC (0x03 << 5)
+
+#define Q81_RX_FLAGS0_B BIT_7
+
+ uint16_t flags1;
+#define Q81_RX_FLAGS1_P BIT_0
+#define Q81_RX_FLAGS1_V BIT_1
+
+#define Q81_RX_FLAGS1_ERR_NONE (0x00 << 2)
+#define Q81_RX_FLAGS1_ERR_CODE (0x01 << 2)
+#define Q81_RX_FLAGS1_ERR_OSIZE (0x02 << 2)
+#define Q81_RX_FLAGS1_ERR_USIZE (0x04 << 2)
+#define Q81_RX_FLAGS1_ERR_PREAMBLE (0x05 << 2)
+#define Q81_RX_FLAGS1_ERR_FRAMELENGTH (0x06 << 2)
+#define Q81_RX_FLAGS1_ERR_CRC (0x07 << 2)
+#define Q81_RX_FLAGS1_ERR_MASK (0x07 << 2)
+
+#define Q81_RX_FLAGS1_U BIT_5
+#define Q81_RX_FLAGS1_T BIT_6
+#define Q81_RX_FLAGS1_FO BIT_7
+#define Q81_RX_FLAGS1_RSS_NO_MATCH (0x00 << 8)
+#define Q81_RX_FLAGS1_RSS_IPV4_MATCH (0x04 << 8)
+#define Q81_RX_FLAGS1_RSS_IPV6_MATCH (0x02 << 8)
+#define Q81_RX_FLAGS1_RSS_TCPIPV4_MATCH (0x05 << 8)
+#define Q81_RX_FLAGS1_RSS_TCPIPV4_MATCH (0x05 << 8)
+#define Q81_RX_FLAGS1_RSS_MATCH_MASK (0x07 << 8)
+#define Q81_RX_FLAGS1_V4 BIT_11
+#define Q81_RX_FLAGS1_V6 BIT_12
+#define Q81_RX_FLAGS1_IH BIT_13
+#define Q81_RX_FLAGS1_DS BIT_14
+#define Q81_RX_FLAGS1_DL BIT_15
+
+ uint32_t length;
+ uint64_t b_paddr;
+
+ uint32_t rss;
+ uint16_t vlan_tag;
+ uint16_t rsrvd;
+ uint32_t rsrvd1;
+ uint32_t flags2;
+#define Q81_RX_FLAGS2_HV BIT_13
+#define Q81_RX_FLAGS2_HS BIT_14
+#define Q81_RX_FLAGS2_HL BIT_15
+
+ uint32_t hdr_length;
+ uint32_t hdr_baddr_lo;
+ uint32_t hdr_baddr_hi;
+
+} __packed q81_rx_t;
+
+typedef struct _q81_cq_e {
+ uint8_t opcode;
+ uint8_t bytes[63];
+} __packed q81_cq_e_t;
+
+typedef struct _q81_bq_addr_e {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+} __packed q81_bq_addr_e_t;
+
+
+/*
+ * Macros for reading and writing registers
+ */
+
+#if defined(__i386__) || defined(__amd64__)
+#define Q8_MB() __asm volatile("mfence" ::: "memory")
+#define Q8_WMB() __asm volatile("sfence" ::: "memory")
+#define Q8_RMB() __asm volatile("lfence" ::: "memory")
+#else
+#define Q8_MB()
+#define Q8_WMB()
+#define Q8_RMB()
+#endif
+
+#define READ_REG32(ha, reg) bus_read_4((ha->pci_reg), reg)
+#define READ_REG64(ha, reg) bus_read_8((ha->pci_reg), reg)
+
+#define WRITE_REG32_ONLY(ha, reg, val) bus_write_4((ha->pci_reg), reg, val)
+
+#define WRITE_REG32(ha, reg, val) bus_write_4((ha->pci_reg), reg, val)
+
+#define Q81_CTL_INTRE_MASK_VALUE \
+ (((Q81_CTL_INTRE_RTYPE_MASK | Q81_CTL_INTRE_HOST_INTR_MASK) << \
+ Q81_CTL_INTRE_MASK_SHIFT) | Q81_CTL_INTRE_RTYPE_ENABLE)
+
+#define Q81_ENABLE_INTR(ha, idx) \
+ WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, (Q81_CTL_INTRE_MASK_VALUE | idx))
+
+#define Q81_CTL_INTRD_MASK_VALUE \
+ (((Q81_CTL_INTRE_RTYPE_MASK | Q81_CTL_INTRE_HOST_INTR_MASK) << \
+ Q81_CTL_INTRE_MASK_SHIFT) | Q81_CTL_INTRE_RTYPE_DISABLE)
+
+#define Q81_DISABLE_INTR(ha, idx) \
+ WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, (Q81_CTL_INTRD_MASK_VALUE | idx))
+
+#define Q81_WR_WQ_PROD_IDX(wq_idx, idx) bus_write_4((ha->pci_reg1),\
+ (ha->tx_ring[wq_idx].wq_db_offset + Q81_WRKQ_INDEX_REG), idx)
+
+#define Q81_RD_WQ_IDX(wq_idx) bus_read_4((ha->pci_reg1),\
+ (ha->tx_ring[wq_idx].wq_db_offset + Q81_WRKQ_INDEX_REG))
+
+
+#define Q81_SET_WQ_VALID(wq_idx) bus_write_4((ha->pci_reg1),\
+ (ha->tx_ring[wq_idx].wq_db_offset + Q81_WRKQ_VALID_REG),\
+ Q81_COMPQ_VALID_V)
+
+#define Q81_SET_WQ_INVALID(wq_idx) bus_write_4((ha->pci_reg1),\
+ (ha->tx_ring[wq_idx].wq_db_offset + Q81_WRKQ_VALID_REG),\
+ (~Q81_COMPQ_VALID_V))
+
+#define Q81_WR_CQ_CONS_IDX(cq_idx, idx) bus_write_4((ha->pci_reg1),\
+ (ha->rx_ring[cq_idx].cq_db_offset + Q81_COMPQ_INDEX_REG), idx)
+
+#define Q81_RD_CQ_IDX(cq_idx) bus_read_4((ha->pci_reg1),\
+ (ha->rx_ring[cq_idx].cq_db_offset + Q81_COMPQ_INDEX_REG))
+
+#define Q81_SET_CQ_VALID(cq_idx) bus_write_4((ha->pci_reg1),\
+ (ha->rx_ring[cq_idx].cq_db_offset + Q81_COMPQ_VALID_REG),\
+ Q81_COMPQ_VALID_V)
+
+#define Q81_SET_CQ_INVALID(cq_idx) bus_write_4((ha->pci_reg1),\
+ (ha->rx_ring[cq_idx].cq_db_offset + Q81_COMPQ_VALID_REG),\
+ ~Q81_COMPQ_VALID_V)
+
+#define Q81_WR_LBQ_PROD_IDX(cq_idx, idx) bus_write_4((ha->pci_reg1),\
+ (ha->rx_ring[cq_idx].cq_db_offset + Q81_LRGBQ_INDEX_REG), idx)
+
+#define Q81_RD_LBQ_IDX(cq_idx) bus_read_4((ha->pci_reg1),\
+ (ha->rx_ring[cq_idx].cq_db_offset + Q81_LRGBQ_INDEX_REG))
+
+#define Q81_WR_SBQ_PROD_IDX(cq_idx, idx) bus_write_4((ha->pci_reg1),\
+ (ha->rx_ring[cq_idx].cq_db_offset + Q81_SMBQ_INDEX_REG), idx)
+
+#define Q81_RD_SBQ_IDX(cq_idx) bus_read_4((ha->pci_reg1),\
+ (ha->rx_ring[cq_idx].cq_db_offset + Q81_SMBQ_INDEX_REG))
+
+
+/*
+ * Flash Related
+ */
+
+#define Q81_F0_FLASH_OFFSET 0x140200
+#define Q81_F1_FLASH_OFFSET 0x140600
+#define Q81_FLASH_ID "8000"
+
+typedef struct _q81_flash {
+
+ uint8_t id[4]; /* equal to "8000" */
+
+ uint16_t version;
+ uint16_t size;
+ uint16_t csum;
+ uint16_t rsrvd0;
+ uint16_t total_size;
+ uint16_t nentries;
+
+ uint8_t dtype0;
+ uint8_t dsize0;
+ uint8_t mac_addr0[6];
+
+ uint8_t dtype1;
+ uint8_t dsize1;
+ uint8_t mac_addr1[6];
+
+ uint8_t dtype2;
+ uint8_t dsize2;
+ uint16_t vlan_id;
+
+ uint8_t dtype3;
+ uint8_t dsize3;
+ uint16_t last;
+
+ uint8_t rsrvd1[464];
+
+ uint16_t subsys_vid;
+ uint16_t subsys_did;
+
+ uint8_t rsrvd2[4];
+} __packed q81_flash_t;
+
+
+/*
+ * MPI Related
+ */
+
+#define Q81_NUM_MBX_REGISTERS 16
+#define Q81_NUM_AEN_REGISTERS 9
+
+#define Q81_FUNC0_MBX_IN_REG0 0x1180
+#define Q81_FUNC0_MBX_OUT_REG0 0x1190
+
+#define Q81_FUNC1_MBX_IN_REG0 0x1280
+#define Q81_FUNC1_MBX_OUT_REG0 0x1290
+
+#define Q81_MBX_NOP 0x0000
+#define Q81_MBX_EXEC_FW 0x0002
+#define Q81_MBX_REG_TEST 0x0006
+#define Q81_MBX_VERIFY_CHKSUM 0x0007
+#define Q81_MBX_ABOUT_FW 0x0008
+#define Q81_MBX_RISC_MEMCPY 0x000A
+#define Q81_MBX_LOAD_RISC_RAM 0x000B
+#define Q81_MBX_DUMP_RISC_RAM 0x000C
+#define Q81_MBX_WR_RAM_WORD 0x000D
+#define Q81_MBX_INIT_RISC_RAM 0x000E
+#define Q81_MBX_RD_RAM_WORD 0x000F
+#define Q81_MBX_STOP_FW 0x0014
+#define Q81_MBX_GEN_SYS_ERR 0x002A
+#define Q81_MBX_WR_SFP_PLUS 0x0030
+#define Q81_MBX_RD_SFP_PLUS 0x0031
+#define Q81_MBX_INIT_FW 0x0060
+#define Q81_MBX_GET_IFCB 0x0061
+#define Q81_MBX_GET_FW_STATE 0x0069
+#define Q81_MBX_IDC_REQ 0x0100
+#define Q81_MBX_IDC_ACK 0x0101
+#define Q81_MBX_IDC_TIME_EXTEND 0x0102
+#define Q81_MBX_WOL_MODE 0x0110
+#define Q81_MBX_SET_WOL_FILTER 0x0111
+#define Q81_MBX_CLR_WOL_FILTER 0x0112
+#define Q81_MBX_SET_WOL_MAGIC 0x0113
+#define Q81_MBX_WOL_MODE_IMM 0x0115
+#define Q81_MBX_PORT_RESET 0x0120
+#define Q81_MBX_SET_PORT_CFG 0x0122
+#define Q81_MBX_GET_PORT_CFG 0x0123
+#define Q81_MBX_GET_LNK_STATUS 0x0124
+#define Q81_MBX_SET_LED_CFG 0x0125
+#define Q81_MBX_GET_LED_CFG 0x0126
+#define Q81_MBX_SET_DCBX_CTLB 0x0130
+#define Q81_MBX_GET_DCBX_CTLB 0x0131
+#define Q81_MBX_GET_DCBX_TLV 0x0132
+#define Q81_MBX_DIAG_CMDS 0x0150
+#define Q81_MBX_SET_MGMT_CTL 0x0160
+#define Q81_MBX_SET_MGMT_CTL_STOP 0x01
+#define Q81_MBX_SET_MGMT_CTL_RESUME 0x02
+#define Q81_MBX_GET_MGMT_CTL 0x0161
+#define Q81_MBX_GET_MGMT_CTL_MASK ~0x3
+#define Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY 0x02
+#define Q81_MBX_GET_MGMT_CTL_SET_MGMT 0x01
+
+#define Q81_MBX_CMD_COMPLETE 0x4000
+#define Q81_MBX_CMD_INVALID 0x4001
+#define Q81_MBX_CMD_TEST_FAILED 0x4003
+#define Q81_MBX_CMD_ERROR 0x4005
+#define Q81_MBX_CMD_PARAM_ERROR 0x4006
+
+#endif /* #ifndef _QLS_HW_H_ */
diff --git a/sys/dev/qlxge/qls_inline.h b/sys/dev/qlxge/qls_inline.h
new file mode 100644
index 000000000000..6445567dd300
--- /dev/null
+++ b/sys/dev/qlxge/qls_inline.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qls_inline.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+#ifndef _QLS_INLINE_H_
+#define _QLS_INLINE_H_
+
+static __inline int
+qls_get_ifq_snd_maxlen(qla_host_t *ha)
+{
+ return((NUM_TX_DESCRIPTORS - 1));
+}
+
+static __inline uint32_t
+qls_get_optics(qla_host_t *ha)
+{
+ uint32_t link_speed = 0;
+
+ if (ha->link_up) {
+ switch ((ha->link_hw_info & 0xF0)) {
+ case (0x01 << 4):
+ case (0x02 << 4):
+ case (0x03 << 4):
+ link_speed = (IFM_10G_LR | IFM_10G_SR);
+ break;
+
+ case (0x04 << 4):
+ case (0x05 << 4):
+ case (0x06 << 4):
+ link_speed = IFM_10G_TWINAX;
+ break;
+
+ case (0x07 << 4):
+ case (0x08 << 4):
+ case (0x09 << 4):
+ case (0x0A << 4):
+ case (0x0B << 4):
+ link_speed = IFM_1000_SX;
+ break;
+ }
+ }
+
+ return(link_speed);
+}
+
+static __inline uint8_t *
+qls_get_mac_addr(qla_host_t *ha)
+{
+ return (ha->mac_addr);
+}
+
+static __inline int
+qls_lock(qla_host_t *ha, const char *str, uint32_t no_delay)
+{
+ int ret = -1;
+
+ while (1) {
+ mtx_lock(&ha->hw_lock);
+ if (!ha->hw_lock_held) {
+ ha->hw_lock_held = 1;
+ ha->qla_lock = str;
+ ret = 0;
+ mtx_unlock(&ha->hw_lock);
+ break;
+ }
+ mtx_unlock(&ha->hw_lock);
+
+ if (no_delay)
+ break;
+ else
+ qls_mdelay(__func__, 1);
+ }
+ return (ret);
+}
+
+static __inline void
+qls_unlock(qla_host_t *ha, const char *str)
+{
+ mtx_lock(&ha->hw_lock);
+ ha->hw_lock_held = 0;
+ ha->qla_unlock = str;
+ mtx_unlock(&ha->hw_lock);
+}
+
+#endif /* #ifndef _QLS_INLINE_H_ */
diff --git a/sys/dev/qlxge/qls_ioctl.c b/sys/dev/qlxge/qls_ioctl.c
new file mode 100644
index 000000000000..5afa77631985
--- /dev/null
+++ b/sys/dev/qlxge/qls_ioctl.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * File: qls_ioctl.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+#include "qls_os.h"
+#include "qls_hw.h"
+#include "qls_def.h"
+#include "qls_inline.h"
+#include "qls_glbl.h"
+#include "qls_ioctl.h"
+#include "qls_dump.h"
+extern qls_mpi_coredump_t ql_mpi_coredump;
+
+static int qls_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td);
+
+static struct cdevsw qla_cdevsw = {
+ .d_version = D_VERSION,
+ .d_ioctl = qls_eioctl,
+ .d_name = "qlxge",
+};
+
+int
+qls_make_cdev(qla_host_t *ha)
+{
+ ha->ioctl_dev = make_dev(&qla_cdevsw,
+ ha->ifp->if_dunit,
+ UID_ROOT,
+ GID_WHEEL,
+ 0600,
+ "%s",
+ if_name(ha->ifp));
+
+ if (ha->ioctl_dev == NULL)
+ return (-1);
+
+ ha->ioctl_dev->si_drv1 = ha;
+
+ return (0);
+}
+
+void
+qls_del_cdev(qla_host_t *ha)
+{
+ if (ha->ioctl_dev != NULL)
+ destroy_dev(ha->ioctl_dev);
+ return;
+}
+
+static int
+qls_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ qla_host_t *ha;
+ int rval = 0;
+ device_t pci_dev;
+
+ qls_mpi_dump_t *mpi_dump;
+
+ if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
+ return ENXIO;
+
+ pci_dev= ha->pci_dev;
+
+ switch(cmd) {
+
+ case QLA_MPI_DUMP:
+ mpi_dump = (qls_mpi_dump_t *)data;
+
+ if (mpi_dump->size == 0) {
+ mpi_dump->size = sizeof (qls_mpi_coredump_t);
+ } else {
+ if (mpi_dump->size < sizeof (qls_mpi_coredump_t))
+ rval = EINVAL;
+ else {
+ qls_mpi_core_dump(ha);
+ rval = copyout( &ql_mpi_coredump,
+ mpi_dump->dbuf,
+ mpi_dump->size);
+
+ if (rval) {
+ device_printf(ha->pci_dev,
+ "%s: mpidump failed[%d]\n",
+ __func__, rval);
+ }
+ }
+
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return rval;
+}
+
diff --git a/sys/dev/qlxge/qls_ioctl.h b/sys/dev/qlxge/qls_ioctl.h
new file mode 100644
index 000000000000..af3091f1a51b
--- /dev/null
+++ b/sys/dev/qlxge/qls_ioctl.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qls_ioctl.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLS_IOCTL_H_
+#define _QLS_IOCTL_H_
+
+#include <sys/ioccom.h>
+
+struct qls_mpi_dump {
+ uint32_t size;
+ void *dbuf;
+};
+typedef struct qls_mpi_dump qls_mpi_dump_t;
+
+/*
+ * Get MPI Dump
+ */
+#define QLA_MPI_DUMP _IOWR('q', 1, qls_mpi_dump_t)
+
+
+#endif /* #ifndef _QLS_IOCTL_H_ */
diff --git a/sys/dev/qlxge/qls_isr.c b/sys/dev/qlxge/qls_isr.c
new file mode 100644
index 000000000000..26e431e628db
--- /dev/null
+++ b/sys/dev/qlxge/qls_isr.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qls_isr.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+
+#include "qls_os.h"
+#include "qls_hw.h"
+#include "qls_def.h"
+#include "qls_inline.h"
+#include "qls_ver.h"
+#include "qls_glbl.h"
+#include "qls_dbg.h"
+
+
+static void
+qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp)
+{
+ qla_tx_buf_t *txb;
+ uint32_t tx_idx = tx_comp->tid_lo;
+
+ if (tx_idx >= NUM_TX_DESCRIPTORS) {
+ ha->qla_initiate_recovery = 1;
+ return;
+ }
+
+ txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx];
+
+ if (txb->m_head) {
+ ha->ifp->if_opackets++;
+ bus_dmamap_sync(ha->tx_tag, txb->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ha->tx_tag, txb->map);
+ m_freem(txb->m_head);
+
+ txb->m_head = NULL;
+ }
+
+ ha->tx_ring[txr_idx].txr_done++;
+
+ if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS)
+ ha->tx_ring[txr_idx].txr_done = 0;
+}
+
+static void
+qls_replenish_rx(qla_host_t *ha, uint32_t r_idx)
+{
+ qla_rx_buf_t *rxb;
+ qla_rx_ring_t *rxr;
+ int count;
+ volatile q81_bq_addr_e_t *sbq_e;
+
+ rxr = &ha->rx_ring[r_idx];
+
+ count = rxr->rx_free;
+ sbq_e = rxr->sbq_vaddr;
+
+ while (count--) {
+
+ rxb = &rxr->rx_buf[rxr->sbq_next];
+
+ if (rxb->m_head == NULL) {
+ if (qls_get_mbuf(ha, rxb, NULL) != 0) {
+ device_printf(ha->pci_dev,
+ "%s: qls_get_mbuf [0,%d,%d] failed\n",
+ __func__, rxr->sbq_next, r_idx);
+ rxb->m_head = NULL;
+ break;
+ }
+ }
+
+ if (rxb->m_head != NULL) {
+ sbq_e[rxr->sbq_next].addr_lo = (uint32_t)rxb->paddr;
+ sbq_e[rxr->sbq_next].addr_hi =
+ (uint32_t)(rxb->paddr >> 32);
+
+ rxr->sbq_next++;
+ if (rxr->sbq_next == NUM_RX_DESCRIPTORS)
+ rxr->sbq_next = 0;
+
+ rxr->sbq_free++;
+ rxr->rx_free--;
+ }
+
+ if (rxr->sbq_free == 16) {
+
+ rxr->sbq_in += 16;
+ rxr->sbq_in = rxr->sbq_in & (NUM_RX_DESCRIPTORS - 1);
+ rxr->sbq_free = 0;
+
+ Q81_WR_SBQ_PROD_IDX(r_idx, (rxr->sbq_in));
+ }
+ }
+}
+
+static int
+qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e)
+{
+ qla_rx_buf_t *rxb;
+ qla_rx_ring_t *rxr;
+ device_t dev = ha->pci_dev;
+ struct mbuf *mp = NULL;
+ struct ifnet *ifp = ha->ifp;
+ struct lro_ctrl *lro;
+ struct ether_vlan_header *eh;
+
+ rxr = &ha->rx_ring[rxr_idx];
+
+ lro = &rxr->lro;
+
+ rxb = &rxr->rx_buf[rxr->rx_next];
+
+ if (!(cq_e->flags1 & Q81_RX_FLAGS1_DS)) {
+ device_printf(dev, "%s: DS bit not set \n", __func__);
+ return -1;
+ }
+ if (rxb->paddr != cq_e->b_paddr) {
+
+ device_printf(dev,
+ "%s: (rxb->paddr != cq_e->b_paddr)[%p, %p] \n",
+ __func__, (void *)rxb->paddr, (void *)cq_e->b_paddr);
+
+ Q81_SET_CQ_INVALID(cq_idx);
+
+ ha->qla_initiate_recovery = 1;
+
+ return(-1);
+ }
+
+ rxr->rx_int++;
+
+ if ((cq_e->flags1 & Q81_RX_FLAGS1_ERR_MASK) == 0) {
+
+ mp = rxb->m_head;
+ rxb->m_head = NULL;
+
+ if (mp == NULL) {
+ device_printf(dev, "%s: mp == NULL\n", __func__);
+ } else {
+ mp->m_flags |= M_PKTHDR;
+ mp->m_pkthdr.len = cq_e->length;
+ mp->m_pkthdr.rcvif = ifp;
+ mp->m_len = cq_e->length;
+
+ eh = mtod(mp, struct ether_vlan_header *);
+
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ uint32_t *data = (uint32_t *)eh;
+
+ mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
+ mp->m_flags |= M_VLANTAG;
+
+ *(data + 3) = *(data + 2);
+ *(data + 2) = *(data + 1);
+ *(data + 1) = *data;
+
+ m_adj(mp, ETHER_VLAN_ENCAP_LEN);
+ }
+
+ if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) {
+ rxr->rss_int++;
+ mp->m_pkthdr.flowid = cq_e->rss;
+ mp->m_flags |= M_FLOWID;
+ }
+ if (cq_e->flags0 & (Q81_RX_FLAGS0_TE |
+ Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) {
+ mp->m_pkthdr.csum_flags = 0;
+ } else {
+ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
+ CSUM_IP_VALID | CSUM_DATA_VALID |
+ CSUM_PSEUDO_HDR;
+ mp->m_pkthdr.csum_data = 0xFFFF;
+ }
+ ifp->if_ipackets++;
+
+ if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
+ /* LRO packet has been successfuly queued */
+ } else {
+ (*ifp->if_input)(ifp, mp);
+ }
+ }
+ } else {
+ device_printf(dev, "%s: err [0%08x]\n", __func__, cq_e->flags1);
+ }
+
+ rxr->rx_free++;
+ rxr->rx_next++;
+
+ if (rxr->rx_next == NUM_RX_DESCRIPTORS)
+ rxr->rx_next = 0;
+
+ if ((rxr->rx_free + rxr->sbq_free) >= 16)
+ qls_replenish_rx(ha, rxr_idx);
+
+ return 0;
+}
+
+static void
+qls_cq_isr(qla_host_t *ha, uint32_t cq_idx)
+{
+ q81_cq_e_t *cq_e, *cq_b;
+ uint32_t i, cq_comp_idx;
+ int ret = 0, tx_comp_done = 0;
+ struct lro_ctrl *lro;
+ struct lro_entry *queued;
+
+ cq_b = ha->rx_ring[cq_idx].cq_base_vaddr;
+ lro = &ha->rx_ring[cq_idx].lro;
+
+ cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
+
+ i = ha->rx_ring[cq_idx].cq_next;
+
+ while (i != cq_comp_idx) {
+
+ cq_e = &cq_b[i];
+
+ switch (cq_e->opcode) {
+
+ case Q81_IOCB_TX_MAC:
+ case Q81_IOCB_TX_TSO:
+ qls_tx_comp(ha, cq_idx, (q81_tx_mac_comp_t *)cq_e);
+ tx_comp_done++;
+ break;
+
+ case Q81_IOCB_RX:
+ ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e);
+
+ break;
+
+ case Q81_IOCB_MPI:
+ case Q81_IOCB_SYS:
+ default:
+ device_printf(ha->pci_dev, "%s[%d %d 0x%x]: illegal \n",
+ __func__, i, (*(ha->rx_ring[cq_idx].cqi_vaddr)),
+ cq_e->opcode);
+ qls_dump_buf32(ha, __func__, cq_e,
+ (sizeof (q81_cq_e_t) >> 2));
+ break;
+ }
+
+ i++;
+ if (i == NUM_CQ_ENTRIES)
+ i = 0;
+
+ if (ret) {
+ break;
+ }
+
+ if (i == cq_comp_idx) {
+ cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
+ }
+
+ if (tx_comp_done) {
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ tx_comp_done = 0;
+ }
+ }
+
+ while((!SLIST_EMPTY(&lro->lro_active))) {
+ queued = SLIST_FIRST(&lro->lro_active);
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, queued);
+ }
+
+ ha->rx_ring[cq_idx].cq_next = cq_comp_idx;
+
+ if (!ret) {
+ Q81_WR_CQ_CONS_IDX(cq_idx, (ha->rx_ring[cq_idx].cq_next));
+ }
+ if (tx_comp_done)
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+
+ return;
+}
+
+static void
+qls_mbx_isr(qla_host_t *ha)
+{
+ uint32_t data;
+ int i;
+ device_t dev = ha->pci_dev;
+
+ if (qls_mbx_rd_reg(ha, 0, &data) == 0) {
+
+ if ((data & 0xF000) == 0x4000) {
+ ha->mbox[0] = data;
+ for (i = 1; i < Q81_NUM_MBX_REGISTERS; i++) {
+ if (qls_mbx_rd_reg(ha, i, &data))
+ break;
+ ha->mbox[i] = data;
+ }
+ ha->mbx_done = 1;
+ } else if ((data & 0xF000) == 0x8000) {
+
+ /* we have an AEN */
+
+ ha->aen[0] = data;
+ for (i = 1; i < Q81_NUM_AEN_REGISTERS; i++) {
+ if (qls_mbx_rd_reg(ha, i, &data))
+ break;
+ ha->aen[i] = data;
+ }
+ device_printf(dev,"%s: AEN "
+ "[0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
+ " 0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ __func__,
+ ha->aen[0], ha->aen[1], ha->aen[2],
+ ha->aen[3], ha->aen[4], ha->aen[5],
+ ha->aen[6], ha->aen[7], ha->aen[8]);
+
+ switch ((ha->aen[0] & 0xFFFF)) {
+
+ case 0x8011:
+ ha->link_up = 1;
+ break;
+
+ case 0x8012:
+ ha->link_up = 0;
+ break;
+
+ case 0x8130:
+ ha->link_hw_info = ha->aen[1];
+ break;
+
+ case 0x8131:
+ ha->link_hw_info = 0;
+ break;
+
+ }
+ }
+ }
+ WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_CLR_RTH_INTR);
+
+ return;
+}
+
+void
+qls_isr(void *arg)
+{
+ qla_ivec_t *ivec = arg;
+ qla_host_t *ha;
+ uint32_t status;
+ uint32_t cq_idx;
+ device_t dev;
+
+ ha = ivec->ha;
+ cq_idx = ivec->cq_idx;
+ dev = ha->pci_dev;
+
+ status = READ_REG32(ha, Q81_CTL_STATUS);
+
+ if (status & Q81_CTL_STATUS_FE) {
+ device_printf(dev, "%s fatal error\n", __func__);
+ return;
+ }
+
+ if ((cq_idx == 0) && (status & Q81_CTL_STATUS_PI)) {
+ qls_mbx_isr(ha);
+ }
+
+ status = READ_REG32(ha, Q81_CTL_INTR_STATUS1);
+
+ if (status & ( 0x1 << cq_idx))
+ qls_cq_isr(ha, cq_idx);
+
+ Q81_ENABLE_INTR(ha, cq_idx);
+
+ return;
+}
+
diff --git a/sys/dev/qlxge/qls_os.c b/sys/dev/qlxge/qls_os.c
new file mode 100644
index 000000000000..8b34d23bde52
--- /dev/null
+++ b/sys/dev/qlxge/qls_os.c
@@ -0,0 +1,1536 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qls_os.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+#include "qls_os.h"
+#include "qls_hw.h"
+#include "qls_def.h"
+#include "qls_inline.h"
+#include "qls_ver.h"
+#include "qls_glbl.h"
+#include "qls_dbg.h"
+#include <sys/smp.h>
+
+/*
+ * Some PCI Configuration Space Related Defines
+ */
+
+#ifndef PCI_VENDOR_QLOGIC
+#define PCI_VENDOR_QLOGIC 0x1077
+#endif
+
+#ifndef PCI_DEVICE_QLOGIC_8000
+#define PCI_DEVICE_QLOGIC_8000 0x8000
+#endif
+
+#define PCI_QLOGIC_DEV8000 \
+ ((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC)
+
+/*
+ * static functions
+ */
+static int qls_alloc_parent_dma_tag(qla_host_t *ha);
+static void qls_free_parent_dma_tag(qla_host_t *ha);
+
+static void qls_flush_xmt_bufs(qla_host_t *ha);
+
+static int qls_alloc_rcv_bufs(qla_host_t *ha);
+static void qls_free_rcv_bufs(qla_host_t *ha);
+
+static void qls_init_ifnet(device_t dev, qla_host_t *ha);
+static void qls_release(qla_host_t *ha);
+static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
+ int error);
+static void qls_stop(qla_host_t *ha);
+static int qls_send(qla_host_t *ha, struct mbuf **m_headp);
+static void qls_tx_done(void *context, int pending);
+
+static int qls_config_lro(qla_host_t *ha);
+static void qls_free_lro(qla_host_t *ha);
+
+static void qls_error_recovery(void *context, int pending);
+
+/*
+ * Hooks to the Operating Systems
+ */
+static int qls_pci_probe (device_t);
+static int qls_pci_attach (device_t);
+static int qls_pci_detach (device_t);
+
+static void qls_start(struct ifnet *ifp);
+static void qls_init(void *arg);
+static int qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
+static int qls_media_change(struct ifnet *ifp);
+static void qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
+
+static device_method_t qla_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, qls_pci_probe),
+ DEVMETHOD(device_attach, qls_pci_attach),
+ DEVMETHOD(device_detach, qls_pci_detach),
+ { 0, 0 }
+};
+
+static driver_t qla_pci_driver = {
+ "ql", qla_pci_methods, sizeof (qla_host_t),
+};
+
+static devclass_t qla8000_devclass;
+
+DRIVER_MODULE(qla8000, pci, qla_pci_driver, qla8000_devclass, 0, 0);
+
+MODULE_DEPEND(qla8000, pci, 1, 1, 1);
+MODULE_DEPEND(qla8000, ether, 1, 1, 1);
+
+MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver");
+
+static char dev_str[64];
+static char ver_str[64];
+
+/*
+ * Name: qls_pci_probe
+ * Function: Validate the PCI device to be a QLA80XX device
+ */
+static int
+qls_pci_probe(device_t dev)
+{
+ switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
+ case PCI_QLOGIC_DEV8000:
+ snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
+ "Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function",
+ QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
+ QLA_VERSION_BUILD);
+ snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
+ QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
+ QLA_VERSION_BUILD);
+ device_set_desc(dev, dev_str);
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ if (bootverbose)
+ printf("%s: %s\n ", __func__, dev_str);
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
+{
+ int err = 0, ret;
+ qla_host_t *ha;
+ uint32_t i;
+
+ err = sysctl_handle_int(oidp, &ret, 0, req);
+
+ if (err || !req->newptr)
+ return (err);
+
+ if (ret == 1) {
+
+ ha = (qla_host_t *)arg1;
+
+ for (i = 0; i < ha->num_tx_rings; i++) {
+
+ device_printf(ha->pci_dev,
+ "%s: tx_ring[%d].tx_frames= %p\n",
+ __func__, i,
+ (void *)ha->tx_ring[i].tx_frames);
+
+ device_printf(ha->pci_dev,
+ "%s: tx_ring[%d].tx_tso_frames= %p\n",
+ __func__, i,
+ (void *)ha->tx_ring[i].tx_tso_frames);
+
+ device_printf(ha->pci_dev,
+ "%s: tx_ring[%d].tx_vlan_frames= %p\n",
+ __func__, i,
+ (void *)ha->tx_ring[i].tx_vlan_frames);
+
+ device_printf(ha->pci_dev,
+ "%s: tx_ring[%d].txr_free= 0x%08x\n",
+ __func__, i,
+ ha->tx_ring[i].txr_free);
+
+ device_printf(ha->pci_dev,
+ "%s: tx_ring[%d].txr_next= 0x%08x\n",
+ __func__, i,
+ ha->tx_ring[i].txr_next);
+
+ device_printf(ha->pci_dev,
+ "%s: tx_ring[%d].txr_done= 0x%08x\n",
+ __func__, i,
+ ha->tx_ring[i].txr_done);
+
+ device_printf(ha->pci_dev,
+ "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n",
+ __func__, i,
+ *(ha->tx_ring[i].txr_cons_vaddr));
+ }
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+
+ device_printf(ha->pci_dev,
+ "%s: rx_ring[%d].rx_int= %p\n",
+ __func__, i,
+ (void *)ha->rx_ring[i].rx_int);
+
+ device_printf(ha->pci_dev,
+ "%s: rx_ring[%d].rss_int= %p\n",
+ __func__, i,
+ (void *)ha->rx_ring[i].rss_int);
+
+ device_printf(ha->pci_dev,
+ "%s: rx_ring[%d].lbq_next= 0x%08x\n",
+ __func__, i,
+ ha->rx_ring[i].lbq_next);
+
+ device_printf(ha->pci_dev,
+ "%s: rx_ring[%d].lbq_free= 0x%08x\n",
+ __func__, i,
+ ha->rx_ring[i].lbq_free);
+
+ device_printf(ha->pci_dev,
+ "%s: rx_ring[%d].lbq_in= 0x%08x\n",
+ __func__, i,
+ ha->rx_ring[i].lbq_in);
+
+ device_printf(ha->pci_dev,
+ "%s: rx_ring[%d].sbq_next= 0x%08x\n",
+ __func__, i,
+ ha->rx_ring[i].sbq_next);
+
+ device_printf(ha->pci_dev,
+ "%s: rx_ring[%d].sbq_free= 0x%08x\n",
+ __func__, i,
+ ha->rx_ring[i].sbq_free);
+
+ device_printf(ha->pci_dev,
+ "%s: rx_ring[%d].sbq_in= 0x%08x\n",
+ __func__, i,
+ ha->rx_ring[i].sbq_in);
+ }
+
+ device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n",
+ __func__, ha->err_m_getcl);
+ device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n",
+ __func__, ha->err_m_getjcl);
+ device_printf(ha->pci_dev,
+ "%s: err_tx_dmamap_create = 0x%08x\n",
+ __func__, ha->err_tx_dmamap_create);
+ device_printf(ha->pci_dev,
+ "%s: err_tx_dmamap_load = 0x%08x\n",
+ __func__, ha->err_tx_dmamap_load);
+ device_printf(ha->pci_dev,
+ "%s: err_tx_defrag = 0x%08x\n",
+ __func__, ha->err_tx_defrag);
+ }
+ return (err);
+}
+
+static void
+qls_add_sysctls(qla_host_t *ha)
+{
+ device_t dev = ha->pci_dev;
+
+ SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "version", CTLFLAG_RD,
+ ver_str, 0, "Driver Version");
+
+ qls_dbg_level = 0;
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug", CTLFLAG_RW,
+ &qls_dbg_level, qls_dbg_level, "Debug Level");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
+ (void *)ha, 0,
+ qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
+
+ return;
+}
+
+static void
+qls_watchdog(void *arg)
+{
+ qla_host_t *ha = arg;
+ struct ifnet *ifp;
+
+ ifp = ha->ifp;
+
+ if (ha->flags.qla_watchdog_exit) {
+ ha->qla_watchdog_exited = 1;
+ return;
+ }
+ ha->qla_watchdog_exited = 0;
+
+ if (!ha->flags.qla_watchdog_pause) {
+
+ if (ha->qla_initiate_recovery) {
+
+ ha->qla_watchdog_paused = 1;
+ ha->qla_initiate_recovery = 0;
+ ha->err_inject = 0;
+ taskqueue_enqueue(ha->err_tq, &ha->err_task);
+
+ } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
+
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ }
+
+ ha->qla_watchdog_paused = 0;
+ } else {
+ ha->qla_watchdog_paused = 1;
+ }
+
+ ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
+ callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
+ qls_watchdog, ha);
+
+ return;
+}
+
+/*
+ * Name: qls_pci_attach
+ * Function: attaches the device to the operating system
+ */
+static int
+qls_pci_attach(device_t dev)
+{
+ qla_host_t *ha = NULL;
+ int i;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ if ((ha = device_get_softc(dev)) == NULL) {
+ device_printf(dev, "cannot get softc\n");
+ return (ENOMEM);
+ }
+
+ memset(ha, 0, sizeof (qla_host_t));
+
+ if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) {
+ device_printf(dev, "device is not QLE8000\n");
+ return (ENXIO);
+ }
+
+ ha->pci_func = pci_get_function(dev);
+
+ ha->pci_dev = dev;
+
+ pci_enable_busmaster(dev);
+
+ ha->reg_rid = PCIR_BAR(1);
+ ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
+ RF_ACTIVE);
+
+ if (ha->pci_reg == NULL) {
+ device_printf(dev, "unable to map any ports\n");
+ goto qls_pci_attach_err;
+ }
+
+ ha->reg_rid1 = PCIR_BAR(3);
+ ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &ha->reg_rid1, RF_ACTIVE);
+
+ if (ha->pci_reg1 == NULL) {
+ device_printf(dev, "unable to map any ports\n");
+ goto qls_pci_attach_err;
+ }
+
+ mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
+
+ qls_add_sysctls(ha);
+ qls_hw_add_sysctls(ha);
+
+ ha->flags.lock_init = 1;
+
+ ha->msix_count = pci_msix_count(dev);
+
+ if (ha->msix_count < qls_get_msix_count(ha)) {
+ device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
+ ha->msix_count);
+ goto qls_pci_attach_err;
+ }
+
+ ha->msix_count = qls_get_msix_count(ha);
+
+ device_printf(dev, "\n%s: ha %p pci_func 0x%x msix_count 0x%x"
+ " pci_reg %p pci_reg1 %p\n", __func__, ha,
+ ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1);
+
+ if (pci_alloc_msix(dev, &ha->msix_count)) {
+ device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
+ ha->msix_count);
+ ha->msix_count = 0;
+ goto qls_pci_attach_err;
+ }
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ ha->irq_vec[i].cq_idx = i;
+ ha->irq_vec[i].ha = ha;
+ ha->irq_vec[i].irq_rid = 1 + i;
+
+ ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &ha->irq_vec[i].irq_rid,
+ (RF_ACTIVE | RF_SHAREABLE));
+
+ if (ha->irq_vec[i].irq == NULL) {
+ device_printf(dev, "could not allocate interrupt\n");
+ goto qls_pci_attach_err;
+ }
+
+ if (bus_setup_intr(dev, ha->irq_vec[i].irq,
+ (INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr,
+ &ha->irq_vec[i], &ha->irq_vec[i].handle)) {
+ device_printf(dev,
+ "could not setup interrupt\n");
+ goto qls_pci_attach_err;
+ }
+ }
+
+ qls_rd_nic_params(ha);
+
+ /* allocate parent dma tag */
+ if (qls_alloc_parent_dma_tag(ha)) {
+ device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n",
+ __func__);
+ goto qls_pci_attach_err;
+ }
+
+ /* alloc all dma buffers */
+ if (qls_alloc_dma(ha)) {
+ device_printf(dev, "%s: qls_alloc_dma failed\n", __func__);
+ goto qls_pci_attach_err;
+ }
+
+ /* create the o.s ethernet interface */
+ qls_init_ifnet(dev, ha);
+
+ ha->flags.qla_watchdog_active = 1;
+ ha->flags.qla_watchdog_pause = 1;
+
+ TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha);
+ ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
+ taskqueue_thread_enqueue, &ha->tx_tq);
+ taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
+ device_get_nameunit(ha->pci_dev));
+
+ callout_init(&ha->tx_callout, TRUE);
+ ha->flags.qla_callout_init = 1;
+
+ /* create ioctl device interface */
+ if (qls_make_cdev(ha)) {
+ device_printf(dev, "%s: qls_make_cdev failed\n", __func__);
+ goto qls_pci_attach_err;
+ }
+
+ callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
+ qls_watchdog, ha);
+
+ TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha);
+ ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
+ taskqueue_thread_enqueue, &ha->err_tq);
+ taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
+ device_get_nameunit(ha->pci_dev));
+
+ QL_DPRINT2((dev, "%s: exit 0\n", __func__));
+ return (0);
+
+qls_pci_attach_err:
+
+ qls_release(ha);
+
+ QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
+ return (ENXIO);
+}
+
+/*
+ * Name: qls_pci_detach
+ * Function: Unhooks the device from the operating system
+ */
+static int
+qls_pci_detach(device_t dev)
+{
+ qla_host_t *ha = NULL;
+ struct ifnet *ifp;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ if ((ha = device_get_softc(dev)) == NULL) {
+ device_printf(dev, "cannot get softc\n");
+ return (ENOMEM);
+ }
+
+ ifp = ha->ifp;
+
+ (void)QLA_LOCK(ha, __func__, 0);
+ qls_stop(ha);
+ QLA_UNLOCK(ha, __func__);
+
+ qls_release(ha);
+
+ QL_DPRINT2((dev, "%s: exit\n", __func__));
+
+ return (0);
+}
+
+/*
+ * Name: qls_release
+ * Function: Releases the resources allocated for the device
+ */
+static void
+qls_release(qla_host_t *ha)
+{
+ device_t dev;
+ int i;
+
+ dev = ha->pci_dev;
+
+ if (ha->err_tq) {
+ taskqueue_drain(ha->err_tq, &ha->err_task);
+ taskqueue_free(ha->err_tq);
+ }
+
+ if (ha->tx_tq) {
+ taskqueue_drain(ha->tx_tq, &ha->tx_task);
+ taskqueue_free(ha->tx_tq);
+ }
+
+ qls_del_cdev(ha);
+
+ if (ha->flags.qla_watchdog_active) {
+ ha->flags.qla_watchdog_exit = 1;
+
+ while (ha->qla_watchdog_exited == 0)
+ qls_mdelay(__func__, 1);
+ }
+
+ if (ha->flags.qla_callout_init)
+ callout_stop(&ha->tx_callout);
+
+ if (ha->ifp != NULL)
+ ether_ifdetach(ha->ifp);
+
+ qls_free_dma(ha);
+ qls_free_parent_dma_tag(ha);
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+
+ if (ha->irq_vec[i].handle) {
+ (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
+ ha->irq_vec[i].handle);
+ }
+
+ if (ha->irq_vec[i].irq) {
+ (void)bus_release_resource(dev, SYS_RES_IRQ,
+ ha->irq_vec[i].irq_rid,
+ ha->irq_vec[i].irq);
+ }
+ }
+
+ if (ha->msix_count)
+ pci_release_msi(dev);
+
+ if (ha->flags.lock_init) {
+ mtx_destroy(&ha->tx_lock);
+ mtx_destroy(&ha->hw_lock);
+ }
+
+ if (ha->pci_reg)
+ (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
+ ha->pci_reg);
+
+ if (ha->pci_reg1)
+ (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
+ ha->pci_reg1);
+}
+
+/*
+ * DMA Related Functions
+ */
+
+static void
+qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ *((bus_addr_t *)arg) = 0;
+
+ if (error) {
+ printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
+ return;
+ }
+
+ *((bus_addr_t *)arg) = segs[0].ds_addr;
+
+ return;
+}
+
+int
+qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
+{
+ int ret = 0;
+ device_t dev;
+ bus_addr_t b_addr;
+
+ dev = ha->pci_dev;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ ret = bus_dma_tag_create(
+ ha->parent_tag,/* parent */
+ dma_buf->alignment,
+ ((bus_size_t)(1ULL << 32)),/* boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ dma_buf->size, /* maxsize */
+ 1, /* nsegments */
+ dma_buf->size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &dma_buf->dma_tag);
+
+ if (ret) {
+ device_printf(dev, "%s: could not create dma tag\n", __func__);
+ goto qls_alloc_dmabuf_exit;
+ }
+ ret = bus_dmamem_alloc(dma_buf->dma_tag,
+ (void **)&dma_buf->dma_b,
+ (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
+ &dma_buf->dma_map);
+ if (ret) {
+ bus_dma_tag_destroy(dma_buf->dma_tag);
+ device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
+ goto qls_alloc_dmabuf_exit;
+ }
+
+ ret = bus_dmamap_load(dma_buf->dma_tag,
+ dma_buf->dma_map,
+ dma_buf->dma_b,
+ dma_buf->size,
+ qls_dmamap_callback,
+ &b_addr, BUS_DMA_NOWAIT);
+
+ if (ret || !b_addr) {
+ bus_dma_tag_destroy(dma_buf->dma_tag);
+ bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
+ dma_buf->dma_map);
+ ret = -1;
+ goto qls_alloc_dmabuf_exit;
+ }
+
+ dma_buf->dma_addr = b_addr;
+
+qls_alloc_dmabuf_exit:
+ QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
+ __func__, ret, (void *)dma_buf->dma_tag,
+ (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
+ dma_buf->size));
+
+ return ret;
+}
+
+void
+qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
+{
+ bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
+ bus_dma_tag_destroy(dma_buf->dma_tag);
+}
+
+static int
+qls_alloc_parent_dma_tag(qla_host_t *ha)
+{
+ int ret;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ /*
+ * Allocate parent DMA Tag
+ */
+ ret = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* parent */
+ 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &ha->parent_tag);
+
+ if (ret) {
+ device_printf(dev, "%s: could not create parent dma tag\n",
+ __func__);
+ return (-1);
+ }
+
+ ha->flags.parent_tag = 1;
+
+ return (0);
+}
+
+static void
+qls_free_parent_dma_tag(qla_host_t *ha)
+{
+ if (ha->flags.parent_tag) {
+ bus_dma_tag_destroy(ha->parent_tag);
+ ha->flags.parent_tag = 0;
+ }
+}
+
+/*
+ * Name: qls_init_ifnet
+ * Function: Creates the Network Device Interface and Registers it with the O.S
+ */
+
+static void
+qls_init_ifnet(device_t dev, qla_host_t *ha)
+{
+ struct ifnet *ifp;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ ifp = ha->ifp = if_alloc(IFT_ETHER);
+
+ if (ifp == NULL)
+ panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+
+#if __FreeBSD_version >= 1000000
+ if_initbaudrate(ifp, IF_Gbps(10));
+#else
+ ifp->if_baudrate = 1 * 1000 * 1000 * 1000;
+#endif /* #if (__FreeBSD_version > 1000000) */
+
+ ifp->if_init = qls_init;
+ ifp->if_softc = ha;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = qls_ioctl;
+ ifp->if_start = qls_start;
+
+ IFQ_SET_MAXLEN(&ifp->if_snd, qls_get_ifq_snd_maxlen(ha));
+ ifp->if_snd.ifq_drv_maxlen = qls_get_ifq_snd_maxlen(ha);
+ IFQ_SET_READY(&ifp->if_snd);
+
+ ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ if (ha->max_frame_size <= MCLBYTES) {
+ ha->msize = MCLBYTES;
+ } else if (ha->max_frame_size <= MJUMPAGESIZE) {
+ ha->msize = MJUMPAGESIZE;
+ } else
+ ha->msize = MJUM9BYTES;
+
+ ether_ifattach(ifp, qls_get_mac_addr(ha));
+
+ ifp->if_capabilities = IFCAP_JUMBO_MTU;
+
+ ifp->if_capabilities |= IFCAP_HWCSUM;
+ ifp->if_capabilities |= IFCAP_VLAN_MTU;
+
+ ifp->if_capabilities |= IFCAP_TSO4;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
+ ifp->if_capabilities |= IFCAP_LINKSTATE;
+
+ ifp->if_capenable = ifp->if_capabilities;
+
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+ ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status);
+
+ ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0,
+ NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
+
+ ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
+
+ QL_DPRINT2((dev, "%s: exit\n", __func__));
+
+ return;
+}
+
+static void
+qls_init_locked(qla_host_t *ha)
+{
+ struct ifnet *ifp = ha->ifp;
+
+ qls_stop(ha);
+
+ qls_flush_xmt_bufs(ha);
+
+ if (qls_alloc_rcv_bufs(ha) != 0)
+ return;
+
+ if (qls_config_lro(ha))
+ return;
+
+ bcopy(IF_LLADDR(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN);
+
+ ifp->if_hwassist = CSUM_IP;
+ ifp->if_hwassist |= CSUM_TCP;
+ ifp->if_hwassist |= CSUM_UDP;
+ ifp->if_hwassist |= CSUM_TSO;
+
+ if (qls_init_hw_if(ha) == 0) {
+ ifp = ha->ifp;
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ ha->flags.qla_watchdog_pause = 0;
+ }
+
+ return;
+}
+
+static void
+qls_init(void *arg)
+{
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)arg;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ (void)QLA_LOCK(ha, __func__, 0);
+ qls_init_locked(ha);
+ QLA_UNLOCK(ha, __func__);
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
+}
+
+static void
+qls_set_multi(qla_host_t *ha, uint32_t add_multi)
+{
+ uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
+ struct ifmultiaddr *ifma;
+ int mcnt = 0;
+ struct ifnet *ifp = ha->ifp;
+
+ if_maddr_rlock(ifp);
+
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+
+ if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
+ break;
+
+ bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+ &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
+
+ mcnt++;
+ }
+
+ if_maddr_runlock(ifp);
+
+ if (QLA_LOCK(ha, __func__, 1) == 0) {
+ qls_hw_set_multi(ha, mta, mcnt, add_multi);
+ QLA_UNLOCK(ha, __func__);
+ }
+
+ return;
+}
+
+static int
+qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ int ret = 0;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
+ __func__, cmd));
+
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ (void)QLA_LOCK(ha, __func__, 0);
+ qls_init_locked(ha);
+ QLA_UNLOCK(ha, __func__);
+ }
+ QL_DPRINT4((ha->pci_dev,
+ "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
+ __func__, cmd,
+ ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
+
+ arp_ifinit(ifp, ifa);
+ } else {
+ ether_ioctl(ifp, cmd, data);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
+ __func__, cmd));
+
+ if (ifr->ifr_mtu > QLA_MAX_MTU) {
+ ret = EINVAL;
+ } else {
+ (void) QLA_LOCK(ha, __func__, 0);
+
+ ifp->if_mtu = ifr->ifr_mtu;
+ ha->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ QLA_UNLOCK(ha, __func__);
+
+ if (ret)
+ ret = EINVAL;
+ }
+
+ break;
+
+ case SIOCSIFFLAGS:
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
+ __func__, cmd));
+
+ (void)QLA_LOCK(ha, __func__, 0);
+
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if ((ifp->if_flags ^ ha->if_flags) &
+ IFF_PROMISC) {
+ ret = qls_set_promisc(ha);
+ } else if ((ifp->if_flags ^ ha->if_flags) &
+ IFF_ALLMULTI) {
+ ret = qls_set_allmulti(ha);
+ }
+ } else {
+ ha->max_frame_size = ifp->if_mtu +
+ ETHER_HDR_LEN + ETHER_CRC_LEN;
+ qls_init_locked(ha);
+ }
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ qls_stop(ha);
+ ha->if_flags = ifp->if_flags;
+ }
+
+ QLA_UNLOCK(ha, __func__);
+ break;
+
+ case SIOCADDMULTI:
+ QL_DPRINT4((ha->pci_dev,
+ "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ qls_set_multi(ha, 1);
+ }
+ break;
+
+ case SIOCDELMULTI:
+ QL_DPRINT4((ha->pci_dev,
+ "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ qls_set_multi(ha, 0);
+ }
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ QL_DPRINT4((ha->pci_dev,
+ "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
+ __func__, cmd));
+ ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
+ break;
+
+ case SIOCSIFCAP:
+ {
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
+ __func__, cmd));
+
+ if (mask & IFCAP_HWCSUM)
+ ifp->if_capenable ^= IFCAP_HWCSUM;
+ if (mask & IFCAP_TSO4)
+ ifp->if_capenable ^= IFCAP_TSO4;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (mask & IFCAP_VLAN_HWTSO)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ qls_init(ha);
+
+ VLAN_CAPABILITIES(ifp);
+ break;
+ }
+
+ default:
+ QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
+ __func__, cmd));
+ ret = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (ret);
+}
+
+static int
+qls_media_change(struct ifnet *ifp)
+{
+ qla_host_t *ha;
+ struct ifmedia *ifm;
+ int ret = 0;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ ifm = &ha->media;
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ ret = EINVAL;
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
+
+ return (ret);
+}
+
+static void
+qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ qls_update_link_state(ha);
+ if (ha->link_up) {
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha));
+ }
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
+ (ha->link_up ? "link_up" : "link_down")));
+
+ return;
+}
+
+static void
+qls_start(struct ifnet *ifp)
+{
+ int i, ret = 0;
+ struct mbuf *m_head;
+ qla_host_t *ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
+
+ if (!mtx_trylock(&ha->tx_lock)) {
+ QL_DPRINT8((ha->pci_dev,
+ "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
+ return;
+ }
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) ==
+ IFF_DRV_RUNNING) {
+
+ for (i = 0; i < ha->num_tx_rings; i++) {
+ ret |= qls_hw_tx_done(ha, i);
+ }
+
+ if (ret == 0)
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ }
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING) {
+ QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
+ QLA_TX_UNLOCK(ha);
+ return;
+ }
+
+ if (!ha->link_up) {
+ qls_update_link_state(ha);
+ if (!ha->link_up) {
+ QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
+ QLA_TX_UNLOCK(ha);
+ return;
+ }
+ }
+
+ while (ifp->if_snd.ifq_head != NULL) {
+
+ IF_DEQUEUE(&ifp->if_snd, m_head);
+
+ if (m_head == NULL) {
+ QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
+ __func__));
+ break;
+ }
+
+ if (qls_send(ha, &m_head)) {
+ if (m_head == NULL)
+ break;
+ QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ IF_PREPEND(&ifp->if_snd, m_head);
+ break;
+ }
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, m_head);
+ }
+
+ QLA_TX_UNLOCK(ha);
+ QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
+ return;
+}
+
+static int
+qls_send(qla_host_t *ha, struct mbuf **m_headp)
+{
+ bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
+ bus_dmamap_t map;
+ int nsegs;
+ int ret = -1;
+ uint32_t tx_idx;
+ struct mbuf *m_head = *m_headp;
+ uint32_t txr_idx = 0;
+
+ QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
+
+ if (m_head->m_flags & M_FLOWID)
+ txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1);
+
+ tx_idx = ha->tx_ring[txr_idx].txr_next;
+
+ map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
+
+ ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
+ BUS_DMA_NOWAIT);
+
+ if (ret == EFBIG) {
+
+ struct mbuf *m;
+
+ QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
+ m_head->m_pkthdr.len));
+
+ m = m_defrag(m_head, M_DONTWAIT);
+ if (m == NULL) {
+ ha->err_tx_defrag++;
+ m_freem(m_head);
+ *m_headp = NULL;
+ device_printf(ha->pci_dev,
+ "%s: m_defrag() = NULL [%d]\n",
+ __func__, ret);
+ return (ENOBUFS);
+ }
+ m_head = m;
+ *m_headp = m_head;
+
+ if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
+ segs, &nsegs, BUS_DMA_NOWAIT))) {
+
+ ha->err_tx_dmamap_load++;
+
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
+ __func__, ret, m_head->m_pkthdr.len);
+
+ if (ret != ENOMEM) {
+ m_freem(m_head);
+ *m_headp = NULL;
+ }
+ return (ret);
+ }
+
+ } else if (ret) {
+
+ ha->err_tx_dmamap_load++;
+
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
+ __func__, ret, m_head->m_pkthdr.len);
+
+ if (ret != ENOMEM) {
+ m_freem(m_head);
+ *m_headp = NULL;
+ }
+ return (ret);
+ }
+
+ QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet"));
+
+ bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
+
+ if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
+
+ ha->tx_ring[txr_idx].count++;
+ ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
+ ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map;
+ } else {
+ if (ret == EINVAL) {
+ if (m_head)
+ m_freem(m_head);
+ *m_headp = NULL;
+ }
+ }
+
+ QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
+ return (ret);
+}
+
+static void
+qls_stop(qla_host_t *ha)
+{
+ struct ifnet *ifp = ha->ifp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
+
+ ha->flags.qla_watchdog_pause = 1;
+
+ while (!ha->qla_watchdog_paused)
+ qls_mdelay(__func__, 1);
+
+ qls_del_hw_if(ha);
+
+ qls_free_lro(ha);
+
+ qls_flush_xmt_bufs(ha);
+ qls_free_rcv_bufs(ha);
+
+ return;
+}
+
+/*
+ * Buffer Management Functions for Transmit and Receive Rings
+ */
+/*
+ * Release mbuf after it sent on the wire
+ */
+static void
+qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
+{
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ if (txb->m_head) {
+
+ bus_dmamap_unload(ha->tx_tag, txb->map);
+
+ m_freem(txb->m_head);
+ txb->m_head = NULL;
+ }
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
+}
+
+static void
+qls_flush_xmt_bufs(qla_host_t *ha)
+{
+ int i, j;
+
+ for (j = 0; j < ha->num_tx_rings; j++) {
+ for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
+ qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
+ }
+
+ return;
+}
+
+
+static int
+qls_alloc_rcv_mbufs(qla_host_t *ha, int r)
+{
+ int i, j, ret = 0;
+ qla_rx_buf_t *rxb;
+ qla_rx_ring_t *rx_ring;
+ volatile q81_bq_addr_e_t *sbq_e;
+
+
+ rx_ring = &ha->rx_ring[r];
+
+ for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
+
+ rxb = &rx_ring->rx_buf[i];
+
+ ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
+
+ if (ret) {
+ device_printf(ha->pci_dev,
+ "%s: dmamap[%d, %d] failed\n", __func__, r, i);
+
+ for (j = 0; j < i; j++) {
+ rxb = &rx_ring->rx_buf[j];
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ }
+ goto qls_alloc_rcv_mbufs_err;
+ }
+ }
+
+ rx_ring = &ha->rx_ring[r];
+
+ sbq_e = rx_ring->sbq_vaddr;
+
+ rxb = &rx_ring->rx_buf[0];
+
+ for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
+
+ if (!(ret = qls_get_mbuf(ha, rxb, NULL))) {
+
+ /*
+ * set the physical address in the
+ * corresponding descriptor entry in the
+ * receive ring/queue for the hba
+ */
+
+ sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF;
+ sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF;
+
+ } else {
+ device_printf(ha->pci_dev,
+ "%s: qls_get_mbuf [%d, %d] failed\n",
+ __func__, r, i);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ goto qls_alloc_rcv_mbufs_err;
+ }
+
+ rxb++;
+ sbq_e++;
+ }
+ return 0;
+
+qls_alloc_rcv_mbufs_err:
+ return (-1);
+}
+
+static void
+qls_free_rcv_bufs(qla_host_t *ha)
+{
+ int i, r;
+ qla_rx_buf_t *rxb;
+ qla_rx_ring_t *rxr;
+
+ for (r = 0; r < ha->num_rx_rings; r++) {
+
+ rxr = &ha->rx_ring[r];
+
+ for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
+
+ rxb = &rxr->rx_buf[i];
+
+ if (rxb->m_head != NULL) {
+ bus_dmamap_unload(ha->rx_tag, rxb->map);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ m_freem(rxb->m_head);
+ }
+ }
+ bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
+ }
+ return;
+}
+
+static int
+qls_alloc_rcv_bufs(qla_host_t *ha)
+{
+ int r, ret = 0;
+ qla_rx_ring_t *rxr;
+
+ for (r = 0; r < ha->num_rx_rings; r++) {
+ rxr = &ha->rx_ring[r];
+ bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
+ }
+
+ for (r = 0; r < ha->num_rx_rings; r++) {
+
+ ret = qls_alloc_rcv_mbufs(ha, r);
+
+ if (ret)
+ qls_free_rcv_bufs(ha);
+ }
+
+ return (ret);
+}
+
+int
+qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
+{
+ register struct mbuf *mp = nmp;
+ struct ifnet *ifp;
+ int ret = 0;
+ uint32_t offset;
+ bus_dma_segment_t segs[1];
+ int nsegs;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ ifp = ha->ifp;
+
+ if (mp == NULL) {
+
+ mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, ha->msize);
+
+ if (mp == NULL) {
+
+ if (ha->msize == MCLBYTES)
+ ha->err_m_getcl++;
+ else
+ ha->err_m_getjcl++;
+
+ ret = ENOBUFS;
+ device_printf(ha->pci_dev,
+ "%s: m_getcl failed\n", __func__);
+ goto exit_qls_get_mbuf;
+ }
+ mp->m_len = mp->m_pkthdr.len = ha->msize;
+ } else {
+ mp->m_len = mp->m_pkthdr.len = ha->msize;
+ mp->m_data = mp->m_ext.ext_buf;
+ mp->m_next = NULL;
+ }
+
+ /* align the receive buffers to 8 byte boundary */
+ offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
+ if (offset) {
+ offset = 8 - offset;
+ m_adj(mp, offset);
+ }
+
+ /*
+ * Using memory from the mbuf cluster pool, invoke the bus_dma
+ * machinery to arrange the memory mapping.
+ */
+ ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
+ mp, segs, &nsegs, BUS_DMA_NOWAIT);
+ rxb->paddr = segs[0].ds_addr;
+
+ if (ret || !rxb->paddr || (nsegs != 1)) {
+ m_freem(mp);
+ rxb->m_head = NULL;
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
+ __func__, ret, (long long unsigned int)rxb->paddr,
+ nsegs);
+ ret = -1;
+ goto exit_qls_get_mbuf;
+ }
+ rxb->m_head = mp;
+ bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
+
+exit_qls_get_mbuf:
+ QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
+ return (ret);
+}
+
+static void
+qls_tx_done(void *context, int pending)
+{
+ qla_host_t *ha = context;
+ struct ifnet *ifp;
+
+ ifp = ha->ifp;
+
+ if (!ifp)
+ return;
+
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
+ return;
+ }
+
+ qls_start(ha->ifp);
+ return;
+}
+
+static int
+qls_config_lro(qla_host_t *ha)
+{
+ int i;
+ struct lro_ctrl *lro;
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ lro = &ha->rx_ring[i].lro;
+ if (tcp_lro_init(lro)) {
+ device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
+ __func__);
+ return (-1);
+ }
+ lro->ifp = ha->ifp;
+ }
+ ha->flags.lro_init = 1;
+
+ QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
+ return (0);
+}
+
+static void
+qls_free_lro(qla_host_t *ha)
+{
+ int i;
+ struct lro_ctrl *lro;
+
+ if (!ha->flags.lro_init)
+ return;
+
+ for (i = 0; i < ha->num_rx_rings; i++) {
+ lro = &ha->rx_ring[i].lro;
+ tcp_lro_free(lro);
+ }
+ ha->flags.lro_init = 0;
+}
+
+static void
+qls_error_recovery(void *context, int pending)
+{
+ qla_host_t *ha = context;
+
+ qls_init(ha);
+
+ return;
+}
+
diff --git a/sys/dev/qlxge/qls_os.h b/sys/dev/qlxge/qls_os.h
new file mode 100644
index 000000000000..eba9bf55d93f
--- /dev/null
+++ b/sys/dev/qlxge/qls_os.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qls_os.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLS_OS_H_
+#define _QLS_OS_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/kernel.h>
+#include <sys/sockio.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <machine/_inttypes.h>
+#include <sys/conf.h>
+
+#if __FreeBSD_version < 900044
+#error FreeBSD Version not supported - use version >= 900044
+#endif
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+#include <netinet/in_var.h>
+#include <netinet/tcp_lro.h>
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <sys/pcpu.h>
+
+#include <sys/unistd.h>
+#include <sys/kthread.h>
+#include <machine/in_cksum.h>
+
+#define QLA_USEC_DELAY(usec) DELAY(usec)
+
+static __inline int qls_ms_to_hz(int ms)
+{
+ int qla_hz;
+
+ struct timeval t;
+
+ t.tv_sec = ms / 1000;
+ t.tv_usec = (ms % 1000) * 1000;
+
+ qla_hz = tvtohz(&t);
+
+ if (qla_hz < 0)
+ qla_hz = 0x7fffffff;
+ if (!qla_hz)
+ qla_hz = 1;
+
+ return (qla_hz);
+}
+
+static __inline int qls_sec_to_hz(int sec)
+{
+ struct timeval t;
+
+ t.tv_sec = sec;
+ t.tv_usec = 0;
+
+ return (tvtohz(&t));
+}
+
+
+#define qla_host_to_le16(x) htole16(x)
+#define qla_host_to_le32(x) htole32(x)
+#define qla_host_to_le64(x) htole64(x)
+#define qla_host_to_be16(x) htobe16(x)
+#define qla_host_to_be32(x) htobe32(x)
+#define qla_host_to_be64(x) htobe64(x)
+
+#define qla_le16_to_host(x) le16toh(x)
+#define qla_le32_to_host(x) le32toh(x)
+#define qla_le64_to_host(x) le64toh(x)
+#define qla_be16_to_host(x) be16toh(x)
+#define qla_be32_to_host(x) be32toh(x)
+#define qla_be64_to_host(x) be64toh(x)
+
+MALLOC_DECLARE(M_QLA8XXXBUF);
+
+#define qls_mdelay(fn, msecs) \
+ {\
+ if (cold) \
+ DELAY((msecs * 1000)); \
+ else \
+ pause(fn, qls_ms_to_hz(msecs)); \
+ }
+
+/*
+ * Locks
+ */
+#define QLA_LOCK(ha, str, no_delay) qls_lock(ha, str, no_delay)
+#define QLA_UNLOCK(ha, str) qls_unlock(ha, str)
+
+#define QLA_TX_LOCK(ha) mtx_lock(&ha->tx_lock);
+#define QLA_TX_UNLOCK(ha) mtx_unlock(&ha->tx_lock);
+
+#endif /* #ifndef _QLS_OS_H_ */
diff --git a/sys/dev/qlxge/qls_ver.h b/sys/dev/qlxge/qls_ver.h
new file mode 100644
index 000000000000..afac640c8104
--- /dev/null
+++ b/sys/dev/qlxge/qls_ver.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013-2014 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qls_ver.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLS_VER_H_
+#define _QLS_VER_H_
+
+#define QLA_VERSION_MAJOR 2
+#define QLA_VERSION_MINOR 0
+#define QLA_VERSION_BUILD 0
+
+#endif /* #ifndef _QLS_VER_H_ */
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index d25ead2d47f7..75cc5bd5bf57 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -271,6 +271,7 @@ SUBDIR= \
${_pst} \
pty \
puc \
+ ${_qlxge} \
${_qlxgb} \
${_qlxgbe} \
ral \
@@ -713,6 +714,7 @@ _opensolaris= opensolaris
_padlock= padlock
.endif
_pccard= pccard
+_qlxge= qlxge
_qlxgb= qlxgb
_qlxgbe= qlxgbe
_rdma= rdma
diff --git a/sys/modules/qlxge/Makefile b/sys/modules/qlxge/Makefile
new file mode 100644
index 000000000000..98b40fdb63da
--- /dev/null
+++ b/sys/modules/qlxge/Makefile
@@ -0,0 +1,50 @@
+#/*
+# * Copyright (c) 2013-2014 Qlogic Corporation
+# * All rights reserved.
+# *
+# * Redistribution and use in source and binary forms, with or without
+# * modification, are permitted provided that the following conditions
+# * are met:
+# *
+# * 1. Redistributions of source code must retain the above copyright
+# * notice, this list of conditions and the following disclaimer.
+# * 2. Redistributions in binary form must reproduce the above copyright
+# * notice, this list of conditions and the following disclaimer in the
+# * documentation and/or other materials provided with the distribution.
+# *
+# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# * POSSIBILITY OF SUCH DAMAGE.
+# */
+#/*
+# * File : Makefile
+# * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+# */
+#
+# $FreeBSD$
+#
+
+.PATH: ${.CURDIR}/../../dev/qlxge
+
+KMOD=if_qlxge
+SRCS=qls_os.c qls_dbg.c qls_hw.c qls_isr.c qls_dump.c
+SRCS+=qls_ioctl.c
+SRCS+= device_if.h bus_if.h pci_if.h
+
+CFLAGS += -DQL_DBG
+
+clean:
+ rm -f opt_bdg.h device_if.h bus_if.h pci_if.h export_syms
+ rm -f *.o *.kld *.ko
+ rm -f @ machine x86
+
+.include <bsd.kmod.mk>
+