aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--share/man/man4/Makefile5
-rw-r--r--share/man/man4/qlxgb.493
-rw-r--r--sys/conf/files.amd646
-rw-r--r--sys/dev/qlxgb/README.txt99
-rw-r--r--sys/dev/qlxgb/qla_dbg.c263
-rw-r--r--sys/dev/qlxgb/qla_dbg.h85
-rw-r--r--sys/dev/qlxgb/qla_def.h208
-rw-r--r--sys/dev/qlxgb/qla_glbl.h109
-rw-r--r--sys/dev/qlxgb/qla_hw.c1776
-rw-r--r--sys/dev/qlxgb/qla_hw.h831
-rw-r--r--sys/dev/qlxgb/qla_inline.h229
-rw-r--r--sys/dev/qlxgb/qla_ioctl.c119
-rw-r--r--sys/dev/qlxgb/qla_ioctl.h64
-rw-r--r--sys/dev/qlxgb/qla_isr.c416
-rw-r--r--sys/dev/qlxgb/qla_misc.c624
-rw-r--r--sys/dev/qlxgb/qla_os.c1481
-rw-r--r--sys/dev/qlxgb/qla_os.h176
-rw-r--r--sys/dev/qlxgb/qla_reg.h248
-rw-r--r--sys/dev/qlxgb/qla_ver.h41
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/qlxgb/Makefile43
21 files changed, 6918 insertions, 0 deletions
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index b38a01c0ef46..d2669ddd5fe4 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -347,6 +347,7 @@ MAN= aac.4 \
pts.4 \
pty.4 \
puc.4 \
+ ${_qlxgb.4} \
ral.4 \
random.4 \
rc.4 \
@@ -713,6 +714,10 @@ _xen.4= xen.4
MLINKS+=lindev.4 full.4
.endif
+.if ${MACHINE_CPUARCH} == "amd64"
+_qlxgb.4= qlxgb.4
+.endif
+
.if ${MACHINE_CPUARCH} == "powerpc"
_atp.4= atp.4
.endif
diff --git a/share/man/man4/qlxgb.4 b/share/man/man4/qlxgb.4
new file mode 100644
index 000000000000..201ad74d3efa
--- /dev/null
+++ b/share/man/man4/qlxgb.4
@@ -0,0 +1,93 @@
+.\"-
+.\" Copyright (c) 2011 "Bjoern A. Zeeb" <bz@FreeBSD.org>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd November 3, 2011
+.Dt QLXGB 4
+.Os
+.Sh NAME
+.Nm qlxgb
+.Nd "QLogic 10 Gigabit Ethernet & CNA Adapter Driver"
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following lines in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device qlxgb"
+.Ed
+.Pp
+To load the driver as a
+module at boot time, place the following line in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+if_qlxgb_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver supports IPv4 checksum offload,
+TCP and UDP checksum offload for both IPv4 and IPv6,
+Large Segment Offload for both IPv4 and IPv6,
+Jumbo frames, VLAN Tag, and
+Receive Side scaling.
+For further hardware information, see
+.Pa http://www.qlogic.com/ .
+.Sh HARDWARE
+The
+.Nm
+driver supports 10 Gigabit Ethernet & CNA Adapter based on the following
+chipsets:
+.Pp
+.Bl -bullet -compact
+.It
+QLogic 3200 series
+.It
+QLogic 8200 series
+.El
+.Sh SUPPORT
+For support questions please contact your QLogic approved reseller or
+QLogic Technical Support at
+.Pa http://support.qlogic.com ,
+or by E-mail at
+.Aq support@qlogic.com .
+.Sh SEE ALSO
+.Xr altq 4 ,
+.Xr arp 4 ,
+.Xr netintro 4 ,
+.Xr ng_ether 4 ,
+.Xr ifconfig 8
+.Sh HISTORY
+The
+.Nm
+device driver first appeared in
+.Fx 10.0 .
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+driver was written by
+.An David C Somayajulu
+at Qlogic Corporation.
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 1388d0102228..559b0c31a88b 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -208,6 +208,12 @@ dev/lindev/lindev.c optional lindev
dev/nfe/if_nfe.c optional nfe pci
dev/nve/if_nve.c optional nve pci
dev/nvram/nvram.c optional nvram isa
+dev/qlxgb/qla_dbg.c optional qlxgb pci
+dev/qlxgb/qla_hw.c optional qlxgb pci
+dev/qlxgb/qla_ioctl.c optional qlxgb pci
+dev/qlxgb/qla_isr.c optional qlxgb pci
+dev/qlxgb/qla_misc.c optional qlxgb pci
+dev/qlxgb/qla_os.c optional qlxgb pci
dev/sio/sio.c optional sio
dev/sio/sio_isa.c optional sio isa
dev/sio/sio_pccard.c optional sio pccard
diff --git a/sys/dev/qlxgb/README.txt b/sys/dev/qlxgb/README.txt
new file mode 100644
index 000000000000..d9773ccbd9b5
--- /dev/null
+++ b/sys/dev/qlxgb/README.txt
@@ -0,0 +1,99 @@
+# $FreeBSD$
+
+ README File
+ QLogic 3200 and 8200 series Single/Dual Port
+10 Gigabit Ethernet & CNA Adapter Driver for FreeBSD 7.x/8.x/9.x
+
+ QLogic Corporation.
+ All rights reserved.
+
+
+Table of Contents
+1. Package Contents
+2. OS Support
+3. Supported Features
+4. Using the Driver
+ 4.1 Installing the driver
+ 4.2 Removing the driver
+5. Driver Parameters
+6. Additional Notes
+7. Contacting Support
+
+1. Package Contents
+ * Documentation
+ - README (this document) version:1.0
+ - Release Notes Version:1.0
+ * Driver (if_qlxgb.ko)
+ - FreeBSD 7.x/8.x/9.x
+ * Firmware: pre-flashed on QLogic adapter;
+
+2. OS Support
+
+The Qlogic 10Gigabit Ethernet/CNA driver is compatible with the
+following OS platforms:
+ * FreeBSD 7.x/8.x/9.x (64-bit) [Intel EM64T, AMD64]
+
+3. Supported Features
+10Gigabit Ethernet NIC/CNA driver supports following features
+
+* Large Segment Offload over TCP IPV4
+* Large Segment Offload over TCP IPV6
+* Receive Side scaling
+* TCP over IPv4 checksum offload
+* UDP over IPv4 checksum offload
+* IPV4 checksum offload
+* TCP over IPv6 checksum offload
+* UDP over IPv6 checksum offload
+* Jumbo frames
+* VLAN Tag
+
+
+4. Using the driver
+
+ 4.1 Installing the driver
+
+ - copy the driver file (if_qlxgb.ko) into some directory (say qla_driver)
+ - cd <to qla_driver>
+ - kldload -v ./if_qlxgb.ko
+
+ 4.2 Removing the driver
+
+ - kldunload if_qlxgb
+
+5. Parameters to set prior to installing the driver
+
+ - Add the following lines to /etc/sysctl.conf and reboot the machine prior
+ to installing the driver
+
+ kern.ipc.nmbjumbo9=262144
+ net.inet.tcp.recvbuf_max=262144
+ net.inet.tcp.recvbuf_inc=16384
+ kern.ipc.nmbclusters=1000000
+ kern.ipc.maxsockbuf=2097152
+ net.inet.tcp.recvspace=131072
+ net.inet.tcp.sendbuf_max=262144
+ net.inet.tcp.sendspace=65536
+
+ - If you do not want to reboot the system please run the following commands
+
+ login or su to root
+
+ sysctl kern.ipc.nmbjumbo9=262144
+ sysctl net.inet.tcp.recvbuf_max=262144
+ sysctl net.inet.tcp.recvbuf_inc=16384
+ sysctl kern.ipc.nmbclusters=1000000
+ sysctl kern.ipc.maxsockbuf=2097152
+ sysctl net.inet.tcp.recvspace=131072
+ sysctl net.inet.tcp.sendbuf_max=262144
+ sysctl net.inet.tcp.sendspace=65536
+
+6. Contacting Support
+Please feel free to contact your QLogic approved reseller or QLogic
+Technical Support at any phase of integration for assistance. QLogic
+Technical Support can be reached by the following methods:
+Web: http://support.qlogic.com
+E-mail: support@qlogic.com
+(c) Copyright 2011. All rights reserved worldwide. QLogic, the QLogic
+logo, and the Powered by QLogic logo are registered trademarks of
+QLogic Corporation. All other brand and product names are trademarks
+or registered trademarks of their respective owners.
diff --git a/sys/dev/qlxgb/qla_dbg.c b/sys/dev/qlxgb/qla_dbg.c
new file mode 100644
index 000000000000..5fc6f461e72c
--- /dev/null
+++ b/sys/dev/qlxgb/qla_dbg.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * File : qla_dbg.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_inline.h"
+#include "qla_ver.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+
+uint32_t dbg_level = 0 ;
+/*
+ * Name: qla_dump_buf32
+ * Function: dumps a buffer as 32 bit words
+ */
+void qla_dump_buf32(qla_host_t *ha, char *msg, void *dbuf32, uint32_t len32)
+{
+ device_t dev;
+ uint32_t i = 0;
+ uint32_t *buf;
+
+ dev = ha->pci_dev;
+ buf = dbuf32;
+
+ device_printf(dev, "%s: %s dump start\n", __func__, msg);
+
+ while (len32 >= 4) {
+ device_printf(dev,"0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, buf[0], buf[1], buf[2], buf[3]);
+ i += 4 * 4;
+ len32 -= 4;
+ buf += 4;
+ }
+ switch (len32) {
+ case 1:
+ device_printf(dev,"0x%08x: 0x%08x\n", i, buf[0]);
+ break;
+ case 2:
+ device_printf(dev,"0x%08x: 0x%08x 0x%08x\n", i, buf[0], buf[1]);
+ break;
+ case 3:
+ device_printf(dev,"0x%08x: 0x%08x 0x%08x 0x%08x\n",
+ i, buf[0], buf[1], buf[2]);
+ break;
+ default:
+ break;
+ }
+ device_printf(dev, "%s: %s dump end\n", __func__, msg);
+}
+
+/*
+ * Name: qla_dump_buf16
+ * Function: dumps a buffer as 16 bit words
+ */
+void qla_dump_buf16(qla_host_t *ha, char *msg, void *dbuf16, uint32_t len16)
+{
+ device_t dev;
+ uint32_t i = 0;
+ uint16_t *buf;
+
+ dev = ha->pci_dev;
+ buf = dbuf16;
+
+ device_printf(dev, "%s: %s dump start\n", __func__, msg);
+
+ while (len16 >= 8) {
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x 0x%04x 0x%04x"
+ " 0x%04x 0x%04x 0x%04x 0x%04x\n", i, buf[0],
+ buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+ i += 16;
+ len16 -= 8;
+ buf += 8;
+ }
+ switch (len16) {
+ case 1:
+ device_printf(dev,"0x%08x: 0x%04x\n", i, buf[0]);
+ break;
+ case 2:
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x\n", i, buf[0], buf[1]);
+ break;
+ case 3:
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x 0x%04x\n",
+ i, buf[0], buf[1], buf[2]);
+ break;
+ case 4:
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
+ buf[0], buf[1], buf[2], buf[3]);
+ break;
+ case 5:
+ device_printf(dev,"0x%08x:"
+ " 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4]);
+ break;
+ case 6:
+ device_printf(dev,"0x%08x:"
+ " 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+ break;
+ case 7:
+ device_printf(dev,"0x%04x: 0x%04x 0x%04x 0x%04x 0x%04x"
+ " 0x%04x 0x%04x 0x%04x\n", i, buf[0], buf[1],
+ buf[2], buf[3], buf[4], buf[5], buf[6]);
+ break;
+ default:
+ break;
+ }
+ device_printf(dev, "%s: %s dump end\n", __func__, msg);
+}
+
+/*
+ * Name: qla_dump_buf8
+ * Function: dumps a buffer as bytes
+ */
+void qla_dump_buf8(qla_host_t *ha, char *msg, void *dbuf, uint32_t len)
+{
+ device_t dev;
+ uint32_t i = 0;
+ uint8_t *buf;
+
+ dev = ha->pci_dev;
+ buf = dbuf;
+
+ device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
+
+ while (len >= 16) {
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], buf[15]);
+ i += 16;
+ len -= 16;
+ buf += 16;
+ }
+ switch (len) {
+ case 1:
+ device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
+ break;
+ case 2:
+ device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
+ break;
+ case 3:
+ device_printf(dev,"0x%08x: %02x %02x %02x\n",
+ i, buf[0], buf[1], buf[2]);
+ break;
+ case 4:
+ device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3]);
+ break;
+ case 5:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4]);
+ break;
+ case 6:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+ break;
+ case 7:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+ break;
+ case 8:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7]);
+ break;
+ case 9:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8]);
+ break;
+ case 10:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9]);
+ break;
+ case 11:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10]);
+ break;
+ case 12:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11]);
+ break;
+ case 13:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
+ break;
+ case 14:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
+ buf[13]);
+ break;
+ case 15:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
+ buf[13], buf[14]);
+ break;
+ default:
+ break;
+ }
+
+ device_printf(dev, "%s: %s dump end\n", __func__, msg);
+}
diff --git a/sys/dev/qlxgb/qla_dbg.h b/sys/dev/qlxgb/qla_dbg.h
new file mode 100644
index 000000000000..1f0d184cf013
--- /dev/null
+++ b/sys/dev/qlxgb/qla_dbg.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * File : qla_dbg.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QL_DBG_H_
+#define _QL_DBG_H_
+
+extern uint32_t dbg_level;
+
+extern void qla_dump_buf8(qla_host_t *ha, char *str, void *dbuf,
+ uint32_t len);
+extern void qla_dump_buf16(qla_host_t *ha, char *str, void *dbuf,
+ uint32_t len16);
+extern void qla_dump_buf32(qla_host_t *ha, char *str, void *dbuf,
+ uint32_t len32);
+
+
+#define DBG 1
+
+#if DBG
+
+#define QL_DPRINT1(x) if (dbg_level & 0x0001) device_printf x
+#define QL_DPRINT2(x) if (dbg_level & 0x0002) device_printf x
+#define QL_DPRINT4(x) if (dbg_level & 0x0004) device_printf x
+#define QL_DPRINT8(x) if (dbg_level & 0x0008) device_printf x
+#define QL_DPRINT10(x) if (dbg_level & 0x0010) device_printf x
+#define QL_DPRINT20(x) if (dbg_level & 0x0020) device_printf x
+#define QL_DPRINT40(x) if (dbg_level & 0x0040) device_printf x
+#define QL_DPRINT80(x) if (dbg_level & 0x0080) device_printf x
+
+#define QL_DUMP_BUFFER8(h, s, b, n) if (dbg_level & 0x08000000)\
+ qla_dump_buf8(h, s, b, n)
+#define QL_DUMP_BUFFER16(h, s, b, n) if (dbg_level & 0x08000000)\
+ qla_dump_buf16(h, s, b, n)
+#define QL_DUMP_BUFFER32(h, s, b, n) if (dbg_level & 0x08000000)\
+ qla_dump_buf32(h, s, b, n)
+
+#else
+
+#define QL_DPRINT1(x)
+#define QL_DPRINT2(x)
+#define QL_DPRINT4(x)
+#define QL_DPRINT8(x)
+#define QL_DPRINT10(x)
+#define QL_DPRINT20(x)
+#define QL_DPRINT40(x)
+#define QL_DPRINT80(x)
+
+#define QL_DUMP_BUFFER8(h, s, b, n)
+#define QL_DUMP_BUFFER16(h, s, b, n)
+#define QL_DUMP_BUFFER32(h, s, b, n)
+
+#endif
+
+#endif /* #ifndef _QL_DBG_H_ */
diff --git a/sys/dev/qlxgb/qla_def.h b/sys/dev/qlxgb/qla_def.h
new file mode 100644
index 000000000000..8a761f483094
--- /dev/null
+++ b/sys/dev/qlxgb/qla_def.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * File: qla_def.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_DEF_H_
+#define _QLA_DEF_H_
+
+#define BIT_0 (0x1 << 0)
+#define BIT_1 (0x1 << 1)
+#define BIT_2 (0x1 << 2)
+#define BIT_3 (0x1 << 3)
+#define BIT_4 (0x1 << 4)
+#define BIT_5 (0x1 << 5)
+#define BIT_6 (0x1 << 6)
+#define BIT_7 (0x1 << 7)
+#define BIT_8 (0x1 << 8)
+#define BIT_9 (0x1 << 9)
+#define BIT_10 (0x1 << 10)
+#define BIT_11 (0x1 << 11)
+#define BIT_12 (0x1 << 12)
+#define BIT_13 (0x1 << 13)
+#define BIT_14 (0x1 << 14)
+#define BIT_15 (0x1 << 15)
+#define BIT_16 (0x1 << 16)
+#define BIT_17 (0x1 << 17)
+#define BIT_18 (0x1 << 18)
+#define BIT_19 (0x1 << 19)
+#define BIT_20 (0x1 << 20)
+#define BIT_21 (0x1 << 21)
+#define BIT_22 (0x1 << 22)
+#define BIT_23 (0x1 << 23)
+#define BIT_24 (0x1 << 24)
+#define BIT_25 (0x1 << 25)
+#define BIT_26 (0x1 << 26)
+#define BIT_27 (0x1 << 27)
+#define BIT_28 (0x1 << 28)
+#define BIT_29 (0x1 << 29)
+#define BIT_30 (0x1 << 30)
+#define BIT_31 (0x1 << 31)
+
+struct qla_rx_buf {
+ struct mbuf *m_head;
+ bus_dmamap_t map;
+ bus_addr_t paddr;
+ uint32_t handle;
+ void *next;
+};
+typedef struct qla_rx_buf qla_rx_buf_t;
+
+struct qla_tx_buf {
+ struct mbuf *m_head;
+ bus_dmamap_t map;
+};
+typedef struct qla_tx_buf qla_tx_buf_t;
+
+#define QLA_MAX_SEGMENTS 63 /* maximum # of segs in a sg list */
+#define QLA_MAX_FRAME_SIZE MJUM9BYTES
+#define QLA_STD_FRAME_SIZE 1514
+#define QLA_MAX_TSO_FRAME_SIZE (64 * 1024 - 1)
+
+/* Number of MSIX/MSI Vectors required */
+#define Q8_MSI_COUNT 4
+
+struct qla_ivec {
+ struct resource *irq;
+ void *handle;
+ int irq_rid;
+ void *ha;
+ struct task rcv_task;
+ struct taskqueue *rcv_tq;
+};
+
+typedef struct qla_ivec qla_ivec_t;
+
+#define QLA_WATCHDOG_CALLOUT_TICKS 1
+
+/*
+ * Adapter structure contains the hardware independant information of the
+ * pci function.
+ */
+struct qla_host {
+ volatile struct {
+ volatile uint32_t
+ qla_watchdog_active :1,
+ qla_watchdog_exit :1,
+ qla_watchdog_pause :1,
+ lro_init :1,
+ stop_rcv :1,
+ link_up :1,
+ parent_tag :1,
+ lock_init :1;
+ } flags;
+
+ device_t pci_dev;
+
+ uint8_t pci_func;
+ uint16_t watchdog_ticks;
+ uint8_t resvd;
+
+ /* ioctl related */
+ struct cdev *ioctl_dev;
+
+ /* register mapping */
+ struct resource *pci_reg;
+ int reg_rid;
+
+ /* interrupts */
+ struct resource *irq;
+ int msix_count;
+ void *intr_handle;
+ qla_ivec_t irq_vec[Q8_MSI_COUNT];
+
+ /* parent dma tag */
+ bus_dma_tag_t parent_tag;
+
+ /* interface to o.s */
+ struct ifnet *ifp;
+
+ struct ifmedia media;
+ uint16_t max_frame_size;
+ uint16_t rsrvd0;
+ int if_flags;
+
+ /* hardware access lock */
+ struct mtx hw_lock;
+ volatile uint32_t hw_lock_held;
+
+ /* transmit and receive buffers */
+ qla_tx_buf_t tx_buf[NUM_TX_DESCRIPTORS];
+ bus_dma_tag_t tx_tag;
+ struct mtx tx_lock;
+ struct task tx_task;
+ struct taskqueue *tx_tq;
+ struct callout tx_callout;
+
+ qla_rx_buf_t rx_buf[NUM_RX_DESCRIPTORS];
+ qla_rx_buf_t rx_jbuf[NUM_RX_JUMBO_DESCRIPTORS];
+ bus_dma_tag_t rx_tag;
+
+ struct mtx rx_lock;
+ struct mtx rxj_lock;
+
+ /* stats */
+ uint32_t err_m_getcl;
+ uint32_t err_m_getjcl;
+ uint32_t err_tx_dmamap_create;
+ uint32_t err_tx_dmamap_load;
+ uint32_t err_tx_defrag;
+
+ uint64_t rx_frames;
+ uint64_t rx_bytes;
+
+ uint64_t tx_frames;
+ uint64_t tx_bytes;
+
+ uint32_t fw_ver_major;
+ uint32_t fw_ver_minor;
+ uint32_t fw_ver_sub;
+ uint32_t fw_ver_build;
+
+ /* hardware specific */
+ qla_hw_t hw;
+
+ /* debug stuff */
+ volatile const char *qla_lock;
+ volatile const char *qla_unlock;
+};
+typedef struct qla_host qla_host_t;
+
+/* note that align has to be a power of 2 */
+#define QL_ALIGN(size, align) (size + (align - 1)) & ~(align - 1);
+#define QL_MIN(x, y) ((x < y) ? x : y)
+
+#define QL_RUNNING(ifp) \
+ ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
+ IFF_DRV_RUNNING)
+
+#endif /* #ifndef _QLA_DEF_H_ */
diff --git a/sys/dev/qlxgb/qla_glbl.h b/sys/dev/qlxgb/qla_glbl.h
new file mode 100644
index 000000000000..21ee99c3cc2d
--- /dev/null
+++ b/sys/dev/qlxgb/qla_glbl.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_glbl.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ * Content: Contains prototypes of the exported functions from each file.
+ */
+#ifndef _QLA_GLBL_H_
+#define _QLA_GLBL_H_
+
+/*
+ * from qla_isr.c
+ */
+extern void qla_isr(void *arg);
+extern void qla_rcv(void *context, int pending);
+
+/*
+ * from qla_os.c
+ */
+extern uint32_t std_replenish;
+extern uint32_t jumbo_replenish;
+extern uint32_t rcv_pkt_thres;
+extern uint32_t rcv_pkt_thres_d;
+extern uint32_t snd_pkt_thres;
+extern uint32_t free_pkt_thres;
+
+extern int qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf);
+extern void qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf);
+extern void qla_start(struct ifnet *ifp);
+extern int qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
+ uint32_t jumbo);
+
+
+/*
+ * from qla_flash.c
+ */
+extern int qla_flash_rd32_words(qla_host_t *ha, uint32_t addr,
+ uint32_t *val, uint32_t num);
+extern int qla_flash_rd32(qla_host_t *ha, uint32_t addr, uint32_t *val);
+
+/*
+ * from qla_hw.c
+ */
+extern int qla_get_msix_count(qla_host_t *ha);
+extern int qla_alloc_dma(qla_host_t *ha);
+extern void qla_free_dma(qla_host_t *ha);
+extern void qla_hw_add_sysctls(qla_host_t *ha);
+extern int qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
+ uint32_t *tx_idx, struct mbuf *mp);
+extern int qla_init_hw_if(qla_host_t *ha);
+extern void qla_get_hw_caps(qla_host_t *ha);
+extern void qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
+ uint32_t add_multi);
+extern void qla_del_hw_if(qla_host_t *ha);
+extern void qla_set_promisc(qla_host_t *ha);
+extern void qla_set_allmulti(qla_host_t *ha);
+extern void qla_reset_promisc_allmulti(qla_host_t *ha);
+extern void qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr);
+extern int qla_hw_tx_compl(qla_host_t *ha);
+extern void qla_update_link_state(qla_host_t *ha);
+extern void qla_hw_tx_done(qla_host_t *ha);
+extern int qla_config_lro(qla_host_t *ha);
+extern void qla_free_lro(qla_host_t *ha);
+extern int qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id);
+extern void qla_hw_stop_rcv(qla_host_t *ha);
+
+/*
+ * from qla_misc.c
+ */
+extern int qla_init_hw(qla_host_t *ha);
+extern int qla_rdwr_indreg32(qla_host_t *ha, uint32_t addr, uint32_t *val,
+ uint32_t rd);
+extern int qla_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data);
+
+/*
+ * from qla_ioctl.c
+ */
+extern int qla_make_cdev(qla_host_t *ha);
+extern void qla_del_cdev(qla_host_t *ha);
+extern int qla_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td);
+
+#endif /* #ifndef_QLA_GLBL_H_ */
diff --git a/sys/dev/qlxgb/qla_hw.c b/sys/dev/qlxgb/qla_hw.c
new file mode 100644
index 000000000000..477eb5720eac
--- /dev/null
+++ b/sys/dev/qlxgb/qla_hw.c
@@ -0,0 +1,1776 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qla_hw.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ * Content: Contains Hardware dependant functions
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_inline.h"
+#include "qla_ver.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+static uint32_t sysctl_num_rds_rings = 2;
+static uint32_t sysctl_num_sds_rings = 4;
+
+/*
+ * Static Functions
+ */
+
+static void qla_init_cntxt_regions(qla_host_t *ha);
+static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp);
+static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size);
+static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
+ uint16_t cntxt_id, uint32_t add_multi);
+static void qla_del_rcv_cntxt(qla_host_t *ha);
+static int qla_init_rcv_cntxt(qla_host_t *ha);
+static void qla_del_xmt_cntxt(qla_host_t *ha);
+static int qla_init_xmt_cntxt(qla_host_t *ha);
+static int qla_get_max_rds(qla_host_t *ha);
+static int qla_get_max_sds(qla_host_t *ha);
+static int qla_get_max_rules(qla_host_t *ha);
+static int qla_get_max_rcv_cntxts(qla_host_t *ha);
+static int qla_get_max_tx_cntxts(qla_host_t *ha);
+static int qla_get_max_mtu(qla_host_t *ha);
+static int qla_get_max_lro(qla_host_t *ha);
+static int qla_get_flow_control(qla_host_t *ha);
+static void qla_hw_tx_done_locked(qla_host_t *ha);
+
+int
+qla_get_msix_count(qla_host_t *ha)
+{
+ return (sysctl_num_sds_rings);
+}
+
+/*
+ * Name: qla_hw_add_sysctls
+ * Function: Add P3Plus specific sysctls
+ */
+void
+qla_hw_add_sysctls(qla_host_t *ha)
+{
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "num_rds_rings", CTLFLAG_RD, &sysctl_num_rds_rings,
+ sysctl_num_rds_rings, "Number of Rcv Descriptor Rings");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "num_sds_rings", CTLFLAG_RD, &sysctl_num_sds_rings,
+ sysctl_num_sds_rings, "Number of Status Descriptor Rings");
+}
+
+/*
+ * Name: qla_free_dma
+ * Function: Frees the DMA'able memory allocated in qla_alloc_dma()
+ */
+void
+qla_free_dma(qla_host_t *ha)
+{
+ uint32_t i;
+
+ if (ha->hw.dma_buf.flags.context) {
+ qla_free_dmabuf(ha, &ha->hw.dma_buf.context);
+ ha->hw.dma_buf.flags.context = 0;
+ }
+
+ if (ha->hw.dma_buf.flags.sds_ring) {
+ for (i = 0; i < ha->hw.num_sds_rings; i++)
+ qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
+ ha->hw.dma_buf.flags.sds_ring = 0;
+ }
+
+ if (ha->hw.dma_buf.flags.rds_ring) {
+ for (i = 0; i < ha->hw.num_rds_rings; i++)
+ qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
+ ha->hw.dma_buf.flags.rds_ring = 0;
+ }
+
+ if (ha->hw.dma_buf.flags.tx_ring) {
+ qla_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
+ ha->hw.dma_buf.flags.tx_ring = 0;
+ }
+}
+
+/*
+ * Name: qla_alloc_dma
+ * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
+ */
+int
+qla_alloc_dma(qla_host_t *ha)
+{
+ device_t dev;
+ uint32_t i, j, size;
+
+ dev = ha->pci_dev;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings;
+ ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings;
+
+ /*
+ * Allocate Transmit Ring
+ */
+
+ ha->hw.dma_buf.tx_ring.alignment = 8;
+ ha->hw.dma_buf.tx_ring.size =
+ (sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS;
+
+ if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) {
+ device_printf(dev, "%s: tx ring alloc failed\n", __func__);
+ goto qla_alloc_dma_exit;
+ }
+ ha->hw.dma_buf.flags.tx_ring = 1;
+
+ QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n",
+ __func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr),
+ ha->hw.dma_buf.tx_ring.dma_b));
+ /*
+ * Allocate Receive Descriptor Rings
+ */
+
+ for (i = 0; i < ha->hw.num_rds_rings; i++) {
+ ha->hw.dma_buf.rds_ring[i].alignment = 8;
+
+ if (i == RDS_RING_INDEX_NORMAL) {
+ ha->hw.dma_buf.rds_ring[i].size =
+ (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
+ } else if (i == RDS_RING_INDEX_JUMBO) {
+ ha->hw.dma_buf.rds_ring[i].size =
+ (sizeof(q80_recv_desc_t)) *
+ NUM_RX_JUMBO_DESCRIPTORS;
+ } else
+ break;
+
+ if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) {
+ QL_DPRINT4((dev, "%s: rds ring alloc failed\n",
+ __func__));
+
+ for (j = 0; j < i; j++)
+ qla_free_dmabuf(ha,
+ &ha->hw.dma_buf.rds_ring[j]);
+
+ goto qla_alloc_dma_exit;
+ }
+ QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n",
+ __func__, i,
+ (void *)(ha->hw.dma_buf.rds_ring[i].dma_addr),
+ ha->hw.dma_buf.rds_ring[i].dma_b));
+ }
+ ha->hw.dma_buf.flags.rds_ring = 1;
+
+ /*
+ * Allocate Status Descriptor Rings
+ */
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ ha->hw.dma_buf.sds_ring[i].alignment = 8;
+ ha->hw.dma_buf.sds_ring[i].size =
+ (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
+
+ if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) {
+ device_printf(dev, "%s: sds ring alloc failed\n",
+ __func__);
+
+ for (j = 0; j < i; j++)
+ qla_free_dmabuf(ha,
+ &ha->hw.dma_buf.sds_ring[j]);
+
+ goto qla_alloc_dma_exit;
+ }
+ QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n",
+ __func__, i,
+ (void *)(ha->hw.dma_buf.sds_ring[i].dma_addr),
+ ha->hw.dma_buf.sds_ring[i].dma_b));
+ }
+ ha->hw.dma_buf.flags.sds_ring = 1;
+
+ /*
+ * Allocate Context Area
+ */
+ size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);
+
+ size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);
+
+ size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);
+
+ size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);
+
+ size += sizeof (uint32_t); /* for tx consumer index */
+
+ size = QL_ALIGN(size, PAGE_SIZE);
+
+ ha->hw.dma_buf.context.alignment = 8;
+ ha->hw.dma_buf.context.size = size;
+
+ if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) {
+ device_printf(dev, "%s: context alloc failed\n", __func__);
+ goto qla_alloc_dma_exit;
+ }
+ ha->hw.dma_buf.flags.context = 1;
+ QL_DPRINT2((dev, "%s: context phys %p virt %p\n",
+ __func__, (void *)(ha->hw.dma_buf.context.dma_addr),
+ ha->hw.dma_buf.context.dma_b));
+
+ qla_init_cntxt_regions(ha);
+
+ return 0;
+
+qla_alloc_dma_exit:
+ qla_free_dma(ha);
+ return -1;
+}
+
+/*
+ * Name: qla_init_cntxt_regions
+ * Function: Initializes Tx/Rx Contexts.
+ */
+static void
+qla_init_cntxt_regions(qla_host_t *ha)
+{
+ qla_hw_t *hw;
+ q80_tx_cntxt_req_t *tx_cntxt_req;
+ q80_rcv_cntxt_req_t *rx_cntxt_req;
+ bus_addr_t phys_addr;
+ uint32_t i;
+ device_t dev;
+ uint32_t size;
+
+ dev = ha->pci_dev;
+
+ hw = &ha->hw;
+
+ hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++)
+ hw->sds[i].sds_ring_base =
+ (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
+
+
+ phys_addr = hw->dma_buf.context.dma_addr;
+
+ memset((void *)hw->dma_buf.context.dma_b, 0,
+ ha->hw.dma_buf.context.size);
+
+ hw->tx_cntxt_req =
+ (q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b;
+ hw->tx_cntxt_req_paddr = phys_addr;
+
+ size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);
+
+ hw->tx_cntxt_rsp =
+ (q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size);
+ hw->tx_cntxt_rsp_paddr = hw->tx_cntxt_req_paddr + size;
+
+ size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);
+
+ hw->rx_cntxt_req =
+ (q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size);
+ hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size;
+
+ size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);
+
+ hw->rx_cntxt_rsp =
+ (q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size);
+ hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size;
+
+ size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);
+
+ hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size);
+ hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size;
+
+ /*
+ * Initialize the Transmit Context Request so that we don't need to
+ * do it everytime we need to create a context
+ */
+ tx_cntxt_req = hw->tx_cntxt_req;
+
+ tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr);
+
+ tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr);
+
+ tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW |
+ CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO));
+
+ tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED);
+
+ tx_cntxt_req->phys_addr =
+ qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr);
+
+ tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS);
+
+ /*
+ * Initialize the Receive Context Request
+ */
+
+ rx_cntxt_req = hw->rx_cntxt_req;
+
+ rx_cntxt_req->rx_req.rsp_dma_addr =
+ qla_host_to_le64(hw->rx_cntxt_rsp_paddr);
+
+ rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW |
+ CNTXT_CAP0_LEGACY_MN |
+ CNTXT_CAP0_JUMBO |
+ CNTXT_CAP0_LRO|
+ CNTXT_CAP0_HW_LRO);
+
+ rx_cntxt_req->rx_req.intr_mode =
+ qla_host_to_le32(CNTXT_INTR_MODE_SHARED);
+
+ rx_cntxt_req->rx_req.rds_intr_mode =
+ qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE);
+
+ rx_cntxt_req->rx_req.rds_ring_offset = 0;
+ rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32(
+ (hw->num_rds_rings * sizeof(q80_rq_rds_ring_t)));
+ rx_cntxt_req->rx_req.num_rds_rings =
+ qla_host_to_le16(hw->num_rds_rings);
+ rx_cntxt_req->rx_req.num_sds_rings =
+ qla_host_to_le16(hw->num_sds_rings);
+
+ for (i = 0; i < hw->num_rds_rings; i++) {
+ rx_cntxt_req->rds_req[i].phys_addr =
+ qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
+
+ if (i == RDS_RING_INDEX_NORMAL) {
+ rx_cntxt_req->rds_req[i].buf_size =
+ qla_host_to_le64(MCLBYTES);
+ rx_cntxt_req->rds_req[i].size =
+ qla_host_to_le32(NUM_RX_DESCRIPTORS);
+ } else {
+ rx_cntxt_req->rds_req[i].buf_size =
+ qla_host_to_le64(MJUM9BYTES);
+ rx_cntxt_req->rds_req[i].size =
+ qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS);
+ }
+ }
+
+ for (i = 0; i < hw->num_sds_rings; i++) {
+ rx_cntxt_req->sds_req[i].phys_addr =
+ qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
+ rx_cntxt_req->sds_req[i].size =
+ qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
+ rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i);
+ }
+
+ QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n",
+ __func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr));
+ QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n",
+ __func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr));
+ QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n",
+ __func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr));
+ QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n",
+ __func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr));
+ QL_DPRINT2((ha->pci_dev, "%s: tx_cons = %p paddr %p\n",
+ __func__, hw->tx_cons, (void *)hw->tx_cons_paddr));
+}
+
+/*
+ * Name: qla_issue_cmd
+ * Function: Issues commands on the CDRP interface and returns responses.
+ */
+static int
+qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp)
+{
+ int ret = 0;
+ uint32_t signature;
+ uint32_t count = 400; /* 4 seconds or 400 10ms intervals */
+ uint32_t data;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ signature = 0xcafe0000 | 0x0100 | ha->pci_func;
+
+ ret = qla_sem_lock(ha, Q8_SEM5_LOCK, 0, (uint32_t)ha->pci_func);
+
+ if (ret) {
+ device_printf(dev, "%s: SEM5_LOCK lock failed\n", __func__);
+ return (ret);
+ }
+
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_SIGNATURE, signature);
+
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG1, (cdrp->cmd_arg1));
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG2, (cdrp->cmd_arg2));
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG3, (cdrp->cmd_arg3));
+
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_CMD_RSP, cdrp->cmd);
+
+ while (count) {
+ qla_mdelay(__func__, 10);
+
+ data = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP);
+
+ if ((!(data & 0x80000000)))
+ break;
+ count--;
+ }
+ if ((!count) || (data != 1))
+ ret = -1;
+
+ cdrp->rsp = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP);
+ cdrp->rsp_arg1 = READ_REG32(ha, Q8_NX_CDRP_ARG1);
+ cdrp->rsp_arg2 = READ_REG32(ha, Q8_NX_CDRP_ARG2);
+ cdrp->rsp_arg3 = READ_REG32(ha, Q8_NX_CDRP_ARG3);
+
+ qla_sem_unlock(ha, Q8_SEM5_UNLOCK);
+
+ if (ret) {
+ device_printf(dev, "%s: "
+ "cmd[0x%08x] = 0x%08x\n"
+ "\tsig[0x%08x] = 0x%08x\n"
+ "\targ1[0x%08x] = 0x%08x\n"
+ "\targ2[0x%08x] = 0x%08x\n"
+ "\targ3[0x%08x] = 0x%08x\n",
+ __func__, Q8_NX_CDRP_CMD_RSP, cdrp->cmd,
+ Q8_NX_CDRP_SIGNATURE, signature,
+ Q8_NX_CDRP_ARG1, cdrp->cmd_arg1,
+ Q8_NX_CDRP_ARG2, cdrp->cmd_arg2,
+ Q8_NX_CDRP_ARG3, cdrp->cmd_arg3);
+
+ device_printf(dev, "%s: exit (ret = 0x%x)\n"
+ "\t\t rsp = 0x%08x\n"
+ "\t\t arg1 = 0x%08x\n"
+ "\t\t arg2 = 0x%08x\n"
+ "\t\t arg3 = 0x%08x\n",
+ __func__, ret, cdrp->rsp,
+ cdrp->rsp_arg1, cdrp->rsp_arg2, cdrp->rsp_arg3);
+ }
+
+ return (ret);
+}
+
+#define QLA_TX_MIN_FREE 2
+
+/*
+ * Name: qla_fw_cmd
+ * Function: Issues firmware control commands on the Tx Ring.
+ */
+static int
+qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size)
+{
+ device_t dev;
+ q80_tx_cmd_t *tx_cmd;
+ qla_hw_t *hw = &ha->hw;
+ int count = 100;
+
+ dev = ha->pci_dev;
+
+ QLA_TX_LOCK(ha);
+
+ if (hw->txr_free <= QLA_TX_MIN_FREE) {
+ while (count--) {
+ qla_hw_tx_done_locked(ha);
+ if (hw->txr_free > QLA_TX_MIN_FREE)
+ break;
+
+ QLA_TX_UNLOCK(ha);
+ qla_mdelay(__func__, 10);
+ QLA_TX_LOCK(ha);
+ }
+ if (hw->txr_free <= QLA_TX_MIN_FREE) {
+ QLA_TX_UNLOCK(ha);
+ device_printf(dev, "%s: xmit queue full\n", __func__);
+ return (-1);
+ }
+ }
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+
+ bcopy(fw_cmd, tx_cmd, size);
+
+ hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
+ hw->txr_free--;
+
+ QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next);
+
+ QLA_TX_UNLOCK(ha);
+
+ return (0);
+}
+
+/*
+ * Name: qla_config_rss
+ * Function: Configure RSS for the context/interface.
+ */
+const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+static int
+qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
+{
+ qla_fw_cds_config_rss_t rss_config;
+ int ret, i;
+
+ bzero(&rss_config, sizeof(qla_fw_cds_config_rss_t));
+
+ rss_config.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ rss_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_RSS;
+ rss_config.hdr.cntxt_id = cntxt_id;
+
+ rss_config.hash_type = (Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP |
+ Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP);
+ rss_config.flags = Q8_FWCD_RSS_FLAGS_ENABLE_RSS;
+
+ rss_config.ind_tbl_mask = 0x7;
+
+ for (i = 0; i < 5; i++)
+ rss_config.rss_key[i] = rss_key[i];
+
+ ret = qla_fw_cmd(ha, &rss_config, sizeof(qla_fw_cds_config_rss_t));
+
+ return ret;
+}
+
+/*
+ * Name: qla_config_intr_coalesce
+ * Function: Configure Interrupt Coalescing.
+ */
+static int
+qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable)
+{
+ qla_fw_cds_config_intr_coalesc_t intr_coalesce;
+ int ret;
+
+ bzero(&intr_coalesce, sizeof(qla_fw_cds_config_intr_coalesc_t));
+
+ intr_coalesce.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ intr_coalesce.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING;
+ intr_coalesce.hdr.cntxt_id = cntxt_id;
+
+ intr_coalesce.flags = 0x04;
+ intr_coalesce.max_rcv_pkts = 256;
+ intr_coalesce.max_rcv_usecs = 3;
+ intr_coalesce.max_snd_pkts = 64;
+ intr_coalesce.max_snd_usecs = 4;
+
+ if (tenable) {
+ intr_coalesce.usecs_to = 1000; /* 1 millisecond */
+ intr_coalesce.timer_type = Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC;
+ intr_coalesce.sds_ring_bitmask =
+ Q8_FWCMD_INTR_COALESC_SDS_RING_0;
+ }
+
+ ret = qla_fw_cmd(ha, &intr_coalesce,
+ sizeof(qla_fw_cds_config_intr_coalesc_t));
+
+ return ret;
+}
+
+
+/*
+ * Name: qla_config_mac_addr
+ * Function: binds a MAC address to the context/interface.
+ * Can be unicast, multicast or broadcast.
+ */
+static int
+qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint16_t cntxt_id,
+ uint32_t add_multi)
+{
+ qla_fw_cds_config_mac_addr_t mac_config;
+ int ret;
+
+// device_printf(ha->pci_dev,
+// "%s: mac_addr %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
+// mac_addr[0], mac_addr[1], mac_addr[2],
+// mac_addr[3], mac_addr[4], mac_addr[5]);
+
+ bzero(&mac_config, sizeof(qla_fw_cds_config_mac_addr_t));
+
+ mac_config.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ mac_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_ADDR;
+ mac_config.hdr.cntxt_id = cntxt_id;
+
+ if (add_multi)
+ mac_config.cmd = Q8_FWCD_ADD_MAC_ADDR;
+ else
+ mac_config.cmd = Q8_FWCD_DEL_MAC_ADDR;
+ bcopy(mac_addr, mac_config.mac_addr,6);
+
+ ret = qla_fw_cmd(ha, &mac_config, sizeof(qla_fw_cds_config_mac_addr_t));
+
+ return ret;
+}
+
+
+/*
+ * Name: qla_set_mac_rcv_mode
+ * Function: Enable/Disable AllMulticast and Promiscous Modes.
+ */
+static int
+qla_set_mac_rcv_mode(qla_host_t *ha, uint16_t cntxt_id, uint32_t mode)
+{
+ qla_set_mac_rcv_mode_t rcv_mode;
+ int ret;
+
+ bzero(&rcv_mode, sizeof(qla_set_mac_rcv_mode_t));
+
+ rcv_mode.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ rcv_mode.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE;
+ rcv_mode.hdr.cntxt_id = cntxt_id;
+
+ rcv_mode.mode = mode;
+
+ ret = qla_fw_cmd(ha, &rcv_mode, sizeof(qla_set_mac_rcv_mode_t));
+
+ return ret;
+}
+
+void
+qla_set_promisc(qla_host_t *ha)
+{
+ (void)qla_set_mac_rcv_mode(ha,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
+ Q8_MAC_RCV_ENABLE_PROMISCUOUS);
+}
+
+void
+qla_set_allmulti(qla_host_t *ha)
+{
+ (void)qla_set_mac_rcv_mode(ha,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
+ Q8_MAC_RCV_ENABLE_ALLMULTI);
+}
+
+void
+qla_reset_promisc_allmulti(qla_host_t *ha)
+{
+ (void)qla_set_mac_rcv_mode(ha,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
+ Q8_MAC_RCV_RESET_PROMISC_ALLMULTI);
+}
+
+/*
+ * Name: qla_config_ipv4_addr
+ * Function: Configures the Destination IP Addr for LRO.
+ */
+void
+qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr)
+{
+ qla_config_ipv4_t ip_conf;
+
+ bzero(&ip_conf, sizeof(qla_config_ipv4_t));
+
+ ip_conf.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ ip_conf.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_IPADDR;
+ ip_conf.hdr.cntxt_id = (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id;
+
+ ip_conf.cmd = (uint64_t)Q8_CONFIG_CMD_IP_ENABLE;
+ ip_conf.ipv4_addr = (uint64_t)ipv4_addr;
+
+ (void)qla_fw_cmd(ha, &ip_conf, sizeof(qla_config_ipv4_t));
+
+ return;
+}
+
+/*
+ * Name: qla_tx_tso
+ * Function: Checks if the packet to be transmitted is a candidate for
+ * Large TCP Segment Offload. If yes, the appropriate fields in the Tx
+ * Ring Structure are plugged in.
+ */
+static int
+qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd)
+{
+ struct ether_vlan_header *eh;
+ struct ip *ip = NULL;
+ struct tcphdr *th = NULL;
+ uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen;
+ uint16_t etype, opcode, offload = 1;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ if (mp->m_pkthdr.len <= ha->max_frame_size)
+ return (-1);
+
+ eh = mtod(mp, struct ether_vlan_header *);
+
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ etype = ntohs(eh->evl_proto);
+ } else {
+ ehdrlen = ETHER_HDR_LEN;
+ etype = ntohs(eh->evl_encap_proto);
+ }
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ ip_hlen = ip->ip_hl << 2;
+ opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
+
+ if (ip->ip_p != IPPROTO_TCP) {
+ offload = 0;
+ } else
+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+ break;
+
+ default:
+ QL_DPRINT8((dev, "%s: type!=ip\n", __func__));
+ offload = 0;
+ break;
+ }
+
+ if (!offload)
+ return (-1);
+
+ tcp_hlen = th->th_off << 2;
+
+ hdrlen = ehdrlen + ip_hlen + tcp_hlen;
+
+ if (mp->m_len < hdrlen) {
+ device_printf(dev, "%s: (mp->m_len < hdrlen)\n", __func__);
+ return (-1);
+ }
+
+ tx_cmd->flags_opcode = opcode ;
+ tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
+ tx_cmd->ip_hdr_off = ehdrlen;
+ tx_cmd->mss = mp->m_pkthdr.tso_segsz;
+ tx_cmd->total_hdr_len = hdrlen;
+
+ /* Check for Multicast least significant bit of MSB == 1 */
+ if (eh->evl_dhost[0] & 0x01) {
+ tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST;
+ }
+
+ return (0);
+}
+
+/*
+ * Name: qla_tx_chksum
+ * Function: Checks if the packet to be transmitted is a candidate for
+ * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
+ * Ring Structure are plugged in.
+ */
+static int
+qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd)
+{
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ uint32_t ehdrlen, ip_hlen;
+ uint16_t etype, opcode, offload = 1;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
+ return (-1);
+
+ eh = mtod(mp, struct ether_vlan_header *);
+
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ etype = ntohs(eh->evl_proto);
+ } else {
+ ehdrlen = ETHER_HDR_LEN;
+ etype = ntohs(eh->evl_encap_proto);
+ }
+
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+
+ ip_hlen = ip->ip_hl << 2;
+
+ if (mp->m_len < (ehdrlen + ip_hlen)) {
+ device_printf(dev, "%s: ipv4 mlen\n", __func__);
+ offload = 0;
+ break;
+ }
+
+ if (ip->ip_p == IPPROTO_TCP)
+ opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
+ else if (ip->ip_p == IPPROTO_UDP)
+ opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
+ else {
+ device_printf(dev, "%s: ipv4\n", __func__);
+ offload = 0;
+ }
+ break;
+
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+
+ ip_hlen = sizeof(struct ip6_hdr);
+
+ if (mp->m_len < (ehdrlen + ip_hlen)) {
+ device_printf(dev, "%s: ipv6 mlen\n", __func__);
+ offload = 0;
+ break;
+ }
+
+ if (ip6->ip6_nxt == IPPROTO_TCP)
+ opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
+ else if (ip6->ip6_nxt == IPPROTO_UDP)
+ opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
+ else {
+ device_printf(dev, "%s: ipv6\n", __func__);
+ offload = 0;
+ }
+ break;
+
+ default:
+ offload = 0;
+ break;
+ }
+ if (!offload)
+ return (-1);
+
+ tx_cmd->flags_opcode = opcode;
+
+ tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
+
+ return (0);
+}
+
+/*
+ * Name: qla_hw_send
+ * Function: Transmits a packet. It first checks if the packet is a
+ * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
+ * offload. If either of these creteria are not met, it is transmitted
+ * as a regular ethernet frame.
+ */
+int
+qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
+ uint32_t *tx_idx, struct mbuf *mp)
+{
+ struct ether_vlan_header *eh;
+ qla_hw_t *hw = &ha->hw;
+ q80_tx_cmd_t *tx_cmd, tso_cmd;
+ bus_dma_segment_t *c_seg;
+ uint32_t num_tx_cmds, hdr_len = 0;
+ uint32_t total_length = 0, bytes, tx_cmd_count = 0;
+ device_t dev;
+ int i;
+
+ dev = ha->pci_dev;
+
+ /*
+ * Always make sure there is atleast one empty slot in the tx_ring
+ * tx_ring is considered full when there only one entry available
+ */
+ num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
+
+ total_length = mp->m_pkthdr.len;
+ if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
+ device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
+ __func__, total_length);
+ return (-1);
+ }
+
+ bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
+
+ if (qla_tx_tso(ha, mp, &tso_cmd) == 0) {
+ /* find the additional tx_cmd descriptors required */
+
+ hdr_len = tso_cmd.total_hdr_len;
+
+ bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
+ bytes = QL_MIN(bytes, hdr_len);
+
+ num_tx_cmds++;
+ hdr_len -= bytes;
+
+ while (hdr_len) {
+ bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
+ hdr_len -= bytes;
+ num_tx_cmds++;
+ }
+ hdr_len = tso_cmd.total_hdr_len;
+ }
+
+ if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
+ qla_hw_tx_done_locked(ha);
+ if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
+ QL_DPRINT8((dev, "%s: (hw->txr_free <= "
+ "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
+ __func__));
+ return (-1);
+ }
+ }
+
+ *tx_idx = hw->txr_next;
+
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+
+ if (hdr_len == 0) {
+ if ((nsegs > Q8_TX_MAX_SEGMENTS) ||
+ (mp->m_pkthdr.len > ha->max_frame_size)){
+ /* TBD: copy into private buffer and send it */
+ device_printf(dev,
+ "%s: (nsegs[%d, %d, 0x%x] > Q8_TX_MAX_SEGMENTS)\n",
+ __func__, nsegs, mp->m_pkthdr.len,
+ mp->m_pkthdr.csum_flags);
+ qla_dump_buf8(ha, "qla_hw_send: wrong pkt",
+ mtod(mp, char *), mp->m_len);
+ return (EINVAL);
+ }
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+ if (qla_tx_chksum(ha, mp, tx_cmd) != 0)
+ tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
+ } else {
+ bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
+ }
+
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
+ tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
+ else if (mp->m_flags & M_VLANTAG) {
+ tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
+ Q8_TX_CMD_FLAGS_HW_VLAN_ID);
+ tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
+ }
+
+
+ tx_cmd->n_bufs = (uint8_t)nsegs;
+ tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
+ tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
+ tx_cmd->port_cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
+
+ c_seg = segs;
+
+ while (1) {
+ for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
+
+ switch (i) {
+ case 0:
+ tx_cmd->buf1_addr = c_seg->ds_addr;
+ tx_cmd->buf1_len = c_seg->ds_len;
+ break;
+
+ case 1:
+ tx_cmd->buf2_addr = c_seg->ds_addr;
+ tx_cmd->buf2_len = c_seg->ds_len;
+ break;
+
+ case 2:
+ tx_cmd->buf3_addr = c_seg->ds_addr;
+ tx_cmd->buf3_len = c_seg->ds_len;
+ break;
+
+ case 3:
+ tx_cmd->buf4_addr = c_seg->ds_addr;
+ tx_cmd->buf4_len = c_seg->ds_len;
+ break;
+ }
+
+ c_seg++;
+ nsegs--;
+ }
+
+ hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
+ tx_cmd_count++;
+
+ if (!nsegs)
+ break;
+
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+ }
+
+ if (hdr_len) {
+ /* TSO : Copy the header in the following tx cmd descriptors */
+ uint8_t *src, *dst;
+
+ src = (uint8_t *)eh;
+
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+
+ bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
+ bytes = QL_MIN(bytes, hdr_len);
+
+ dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
+
+ if (mp->m_flags & M_VLANTAG) {
+ /* first copy the src/dst MAC addresses */
+ bcopy(src, dst, (ETHER_ADDR_LEN * 2));
+ dst += (ETHER_ADDR_LEN * 2);
+ src += (ETHER_ADDR_LEN * 2);
+
+ hdr_len -= (ETHER_ADDR_LEN * 2);
+
+ *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
+ dst += 2;
+ *((uint16_t *)dst) = mp->m_pkthdr.ether_vtag;
+ dst += 2;
+
+ bytes -= ((ETHER_ADDR_LEN * 2) + 4);
+
+ bcopy(src, dst, bytes);
+ src += bytes;
+ hdr_len -= bytes;
+ } else {
+ bcopy(src, dst, bytes);
+ src += bytes;
+ hdr_len -= bytes;
+ }
+
+ hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
+ tx_cmd_count++;
+
+ while (hdr_len) {
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+
+ bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
+
+ bcopy(src, tx_cmd, bytes);
+ src += bytes;
+ hdr_len -= bytes;
+ hw->txr_next =
+ (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
+ tx_cmd_count++;
+ }
+ }
+
+ hw->txr_free = hw->txr_free - tx_cmd_count;
+
+ QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next);
+ QL_DPRINT8((dev, "%s: return\n", __func__));
+ return (0);
+}
+
+/*
+ * Name: qla_del_hw_if
+ * Function: Destroys the hardware specific entities corresponding to an
+ * Ethernet Interface
+ */
+void
+qla_del_hw_if(qla_host_t *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++)
+ QL_DISABLE_INTERRUPTS(ha, i);
+
+ qla_del_rcv_cntxt(ha);
+ qla_del_xmt_cntxt(ha);
+
+ ha->hw.flags.lro = 0;
+}
+
+/*
+ * Name: qla_init_hw_if
+ * Function: Creates the hardware specific entities corresponding to an
+ * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
+ * corresponding to the interface. Enables LRO if allowed.
+ */
+int
+qla_init_hw_if(qla_host_t *ha)
+{
+ device_t dev;
+ int i;
+ uint8_t bcast_mac[6];
+
+ qla_get_hw_caps(ha);
+
+ dev = ha->pci_dev;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
+ ha->hw.dma_buf.sds_ring[i].size);
+ }
+ /*
+ * Create Receive Context
+ */
+ if (qla_init_rcv_cntxt(ha)) {
+ return (-1);
+ }
+
+ ha->hw.rx_next = NUM_RX_DESCRIPTORS - 2;
+ ha->hw.rxj_next = NUM_RX_JUMBO_DESCRIPTORS - 2;
+ ha->hw.rx_in = ha->hw.rxj_in = 0;
+
+ /* Update the RDS Producer Indices */
+ QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
+ QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
+
+ /*
+ * Create Transmit Context
+ */
+ if (qla_init_xmt_cntxt(ha)) {
+ qla_del_rcv_cntxt(ha);
+ return (-1);
+ }
+
+ qla_config_mac_addr(ha, ha->hw.mac_addr,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1);
+
+ bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
+ bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
+ qla_config_mac_addr(ha, bcast_mac,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1);
+
+ qla_config_rss(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
+
+ qla_config_intr_coalesce(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 0);
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++)
+ QL_ENABLE_INTERRUPTS(ha, i);
+
+ return (0);
+}
+
+/*
+ * Name: qla_init_rcv_cntxt
+ * Function: Creates the Receive Context.
+ */
+static int
+qla_init_rcv_cntxt(qla_host_t *ha)
+{
+ device_t dev;
+ qla_cdrp_t cdrp;
+ q80_rcv_cntxt_rsp_t *rsp;
+ q80_stat_desc_t *sdesc;
+ bus_addr_t phys_addr;
+ int i, j;
+ qla_hw_t *hw = &ha->hw;
+
+ dev = ha->pci_dev;
+
+ /*
+ * Create Receive Context
+ */
+
+ for (i = 0; i < hw->num_sds_rings; i++) {
+ sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
+ for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
+ sdesc->data[0] =
+ Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
+ }
+ }
+
+ phys_addr = ha->hw.rx_cntxt_req_paddr;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_CREATE_RX_CNTXT;
+ cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32);
+ cdrp.cmd_arg2 = (uint32_t)(phys_addr);
+ cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_rcv_cntxt_req_t));
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_CREATE_RX_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ rsp = ha->hw.rx_cntxt_rsp;
+
+ QL_DPRINT2((dev, "%s: rcv cntxt successful"
+ " rds_ring_offset = 0x%08x"
+ " sds_ring_offset = 0x%08x"
+ " cntxt_state = 0x%08x"
+ " funcs_per_port = 0x%08x"
+ " num_rds_rings = 0x%04x"
+ " num_sds_rings = 0x%04x"
+ " cntxt_id = 0x%04x"
+ " phys_port = 0x%02x"
+ " virt_port = 0x%02x\n",
+ __func__,
+ rsp->rx_rsp.rds_ring_offset,
+ rsp->rx_rsp.sds_ring_offset,
+ rsp->rx_rsp.cntxt_state,
+ rsp->rx_rsp.funcs_per_port,
+ rsp->rx_rsp.num_rds_rings,
+ rsp->rx_rsp.num_sds_rings,
+ rsp->rx_rsp.cntxt_id,
+ rsp->rx_rsp.phys_port,
+ rsp->rx_rsp.virt_port));
+
+ for (i = 0; i < ha->hw.num_rds_rings; i++) {
+ QL_DPRINT2((dev,
+ "%s: rcv cntxt rds[%i].producer_reg = 0x%08x\n",
+ __func__, i, rsp->rds_rsp[i].producer_reg));
+ }
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ QL_DPRINT2((dev,
+ "%s: rcv cntxt sds[%i].consumer_reg = 0x%08x"
+ " sds[%i].intr_mask_reg = 0x%08x\n",
+ __func__, i, rsp->sds_rsp[i].consumer_reg,
+ i, rsp->sds_rsp[i].intr_mask_reg));
+ }
+ }
+ ha->hw.flags.init_rx_cnxt = 1;
+ return (0);
+}
+
+/*
+ * Name: qla_del_rcv_cntxt
+ * Function: Destroys the Receive Context.
+ */
+void
+qla_del_rcv_cntxt(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev = ha->pci_dev;
+
+ if (!ha->hw.flags.init_rx_cnxt)
+ return;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_DESTROY_RX_CNTXT;
+ cdrp.cmd_arg1 = (uint32_t) (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_DESTROY_RX_CNTXT failed\n",
+ __func__);
+ }
+ ha->hw.flags.init_rx_cnxt = 0;
+}
+
+/*
+ * Name: qla_init_xmt_cntxt
+ * Function: Creates the Transmit Context.
+ */
+static int
+qla_init_xmt_cntxt(qla_host_t *ha)
+{
+ bus_addr_t phys_addr;
+ device_t dev;
+ q80_tx_cntxt_rsp_t *tx_rsp;
+ qla_cdrp_t cdrp;
+ qla_hw_t *hw = &ha->hw;
+
+ dev = ha->pci_dev;
+
+ /*
+ * Create Transmit Context
+ */
+ phys_addr = ha->hw.tx_cntxt_req_paddr;
+ tx_rsp = ha->hw.tx_cntxt_rsp;
+
+ hw->txr_comp = hw->txr_next = 0;
+ *(hw->tx_cons) = 0;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_CREATE_TX_CNTXT;
+ cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32);
+ cdrp.cmd_arg2 = (uint32_t)(phys_addr);
+ cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_tx_cntxt_req_t));
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_CREATE_TX_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.tx_prod_reg = tx_rsp->producer_reg;
+
+ QL_DPRINT2((dev, "%s: tx cntxt successful"
+ " cntxt_state = 0x%08x "
+ " cntxt_id = 0x%04x "
+ " phys_port_id = 0x%02x "
+ " virt_port_id = 0x%02x "
+ " producer_reg = 0x%08x "
+ " intr_mask_reg = 0x%08x\n",
+ __func__, tx_rsp->cntxt_state, tx_rsp->cntxt_id,
+ tx_rsp->phys_port_id, tx_rsp->virt_port_id,
+ tx_rsp->producer_reg, tx_rsp->intr_mask_reg));
+ }
+ ha->hw.txr_free = NUM_TX_DESCRIPTORS;
+
+ ha->hw.flags.init_tx_cnxt = 1;
+ return (0);
+}
+
+/*
+ * Name: qla_del_xmt_cntxt
+ * Function: Destroys the Transmit Context.
+ */
+static void
+qla_del_xmt_cntxt(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev = ha->pci_dev;
+
+ if (!ha->hw.flags.init_tx_cnxt)
+ return;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_DESTROY_TX_CNTXT;
+ cdrp.cmd_arg1 = (uint32_t) (ha->hw.tx_cntxt_rsp)->cntxt_id;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_DESTROY_TX_CNTXT failed\n",
+ __func__);
+ }
+ ha->hw.flags.init_tx_cnxt = 0;
+}
+
+/*
+ * Name: qla_get_max_rds
+ * Function: Returns the maximum number of Receive Descriptor Rings per context.
+ */
+static int
+qla_get_max_rds(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_RDS_PER_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_rds_per_cntxt = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_rds_per_context 0x%08x\n",
+ __func__, ha->hw.max_rds_per_cntxt));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_sds
+ * Function: Returns the maximum number of Status Descriptor Rings per context.
+ */
+static int
+qla_get_max_sds(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_SDS_PER_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_sds_per_cntxt = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_sds_per_context 0x%08x\n",
+ __func__, ha->hw.max_sds_per_cntxt));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_rules
+ * Function: Returns the maximum number of Rules per context.
+ */
+static int
+qla_get_max_rules(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_RULES_PER_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_RULES_PER_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_rules_per_cntxt = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_rules_per_cntxt 0x%08x\n",
+ __func__, ha->hw.max_rules_per_cntxt));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_rcv_cntxts
+ * Function: Returns the maximum number of Receive Contexts supported.
+ */
+static int
+qla_get_max_rcv_cntxts(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_RX_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_RX_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_rcv_cntxts = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_rcv_cntxts 0x%08x\n",
+ __func__, ha->hw.max_rcv_cntxts));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_tx_cntxts
+ * Function: Returns the maximum number of Transmit Contexts supported.
+ */
+static int
+qla_get_max_tx_cntxts(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_TX_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_TX_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_xmt_cntxts = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_xmt_cntxts 0x%08x\n",
+ __func__, ha->hw.max_xmt_cntxts));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_mtu
+ * Function: Returns the MTU supported for a context.
+ */
+static int
+qla_get_max_mtu(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_MTU;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__);
+ return (-1);
+ } else {
+ ha->hw.max_mtu = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_mtu 0x%08x\n", __func__,
+ ha->hw.max_mtu));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_set_max_mtu
+ * Function:
+ * Sets the maximum transfer unit size for the specified rcv context.
+ */
+int
+qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_SET_MTU;
+ cdrp.cmd_arg1 = (uint32_t)cntxt_id;
+ cdrp.cmd_arg2 = mtu;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__);
+ return (-1);
+ } else {
+ ha->hw.max_mtu = cdrp.rsp_arg1;
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_lro
+ * Function: Returns the maximum number of TCP Connection which can be supported
+ * with LRO.
+ */
+static int
+qla_get_max_lro(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_LRO;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_LRO failed\n", __func__);
+ return (-1);
+ } else {
+ ha->hw.max_lro = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_lro 0x%08x\n", __func__,
+ ha->hw.max_lro));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_flow_control
+ * Function: Returns the Receive/Transmit Flow Control (PAUSE) settings for
+ * PCI function.
+ */
+static int
+qla_get_flow_control(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_GET_FLOW_CNTRL;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_GET_FLOW_CNTRL failed\n",
+ __func__);
+ return (-1);
+ } else {
+ QL_DPRINT2((dev, "%s: flow control 0x%08x\n", __func__,
+ cdrp.rsp_arg1));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_flow_control
+ * Function: Retrieves hardware capabilities
+ */
+void
+qla_get_hw_caps(qla_host_t *ha)
+{
+ //qla_read_mac_addr(ha);
+ qla_get_max_rds(ha);
+ qla_get_max_sds(ha);
+ qla_get_max_rules(ha);
+ qla_get_max_rcv_cntxts(ha);
+ qla_get_max_tx_cntxts(ha);
+ qla_get_max_mtu(ha);
+ qla_get_max_lro(ha);
+ qla_get_flow_control(ha);
+ return;
+}
+
+/*
+ * Name: qla_hw_set_multi
+ * Function: Sets the Multicast Addresses provided the host O.S into the
+ * hardware (for the given interface)
+ */
+void
+qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
+ uint32_t add_multi)
+{
+ q80_rcv_cntxt_rsp_t *rsp;
+ int i;
+
+ rsp = ha->hw.rx_cntxt_rsp;
+ for (i = 0; i < mcnt; i++) {
+ qla_config_mac_addr(ha, mta, rsp->rx_rsp.cntxt_id, add_multi);
+ mta += Q8_MAC_ADDR_LEN;
+ }
+ return;
+}
+
+/*
+ * Name: qla_hw_tx_done_locked
+ * Function: Handle Transmit Completions
+ */
+static void
+qla_hw_tx_done_locked(qla_host_t *ha)
+{
+ qla_tx_buf_t *txb;
+ qla_hw_t *hw = &ha->hw;
+ uint32_t comp_idx, comp_count = 0;
+
+ /* retrieve index of last entry in tx ring completed */
+ comp_idx = qla_le32_to_host(*(hw->tx_cons));
+
+ while (comp_idx != hw->txr_comp) {
+
+ txb = &ha->tx_buf[hw->txr_comp];
+
+ hw->txr_comp++;
+ if (hw->txr_comp == NUM_TX_DESCRIPTORS)
+ hw->txr_comp = 0;
+
+ comp_count++;
+
+ if (txb->m_head) {
+ bus_dmamap_sync(ha->tx_tag, txb->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ha->tx_tag, txb->map);
+ bus_dmamap_destroy(ha->tx_tag, txb->map);
+ m_freem(txb->m_head);
+
+ txb->map = (bus_dmamap_t)0;
+ txb->m_head = NULL;
+ }
+ }
+
+ hw->txr_free += comp_count;
+
+ QL_DPRINT8((ha->pci_dev, "%s: return [c,f, p, pn][%d, %d, %d, %d]\n", __func__,
+ hw->txr_comp, hw->txr_free, hw->txr_next, READ_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000))));
+
+ return;
+}
+
+/*
+ * Name: qla_hw_tx_done
+ * Function: Handle Transmit Completions
+ */
+void
+qla_hw_tx_done(qla_host_t *ha)
+{
+ if (!mtx_trylock(&ha->tx_lock)) {
+ QL_DPRINT8((ha->pci_dev,
+ "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
+ return;
+ }
+ qla_hw_tx_done_locked(ha);
+
+ if (ha->hw.txr_free > free_pkt_thres)
+ ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ mtx_unlock(&ha->tx_lock);
+ return;
+}
+
+void
+qla_update_link_state(qla_host_t *ha)
+{
+ uint32_t link_state;
+
+ if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ ha->hw.flags.link_up = 0;
+ return;
+ }
+ link_state = READ_REG32(ha, Q8_LINK_STATE);
+
+ if (ha->pci_func == 0)
+ ha->hw.flags.link_up = (((link_state & 0xF) == 1)? 1 : 0);
+ else
+ ha->hw.flags.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
+}
+
+int
+qla_config_lro(qla_host_t *ha)
+{
+ int i;
+ qla_hw_t *hw = &ha->hw;
+ struct lro_ctrl *lro;
+
+ for (i = 0; i < hw->num_sds_rings; i++) {
+ lro = &hw->sds[i].lro;
+ if (tcp_lro_init(lro)) {
+ device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
+ __func__);
+ return (-1);
+ }
+ lro->ifp = ha->ifp;
+ }
+ ha->flags.lro_init = 1;
+
+ QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
+ return (0);
+}
+
+void
+qla_free_lro(qla_host_t *ha)
+{
+ int i;
+ qla_hw_t *hw = &ha->hw;
+ struct lro_ctrl *lro;
+
+ if (!ha->flags.lro_init)
+ return;
+
+ for (i = 0; i < hw->num_sds_rings; i++) {
+ lro = &hw->sds[i].lro;
+ tcp_lro_free(lro);
+ }
+ ha->flags.lro_init = 0;
+}
+
+void
+qla_hw_stop_rcv(qla_host_t *ha)
+{
+ int i, done, count = 100;
+
+ while (count--) {
+ done = 1;
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ if (ha->hw.sds[i].rcv_active)
+ done = 0;
+ }
+ if (done)
+ break;
+ else
+ qla_mdelay(__func__, 10);
+ }
+}
+
diff --git a/sys/dev/qlxgb/qla_hw.h b/sys/dev/qlxgb/qla_hw.h
new file mode 100644
index 000000000000..46780be9b2e9
--- /dev/null
+++ b/sys/dev/qlxgb/qla_hw.h
@@ -0,0 +1,831 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_hw.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+#ifndef _QLA_HW_H_
+#define _QLA_HW_H_
+
+#define Q8_MAX_NUM_MULTICAST_ADDRS 128
+#define Q8_MAC_ADDR_LEN 6
+
+/*
+ * Firmware Interface
+ */
+
+/*
+ * Command Response Interface - Commands
+ */
+typedef struct qla_cdrp {
+ uint32_t cmd;
+ uint32_t cmd_arg1;
+ uint32_t cmd_arg2;
+ uint32_t cmd_arg3;
+ uint32_t rsp;
+ uint32_t rsp_arg1;
+ uint32_t rsp_arg2;
+ uint32_t rsp_arg3;
+} qla_cdrp_t;
+
+#define Q8_CMD_RD_MAX_RDS_PER_CNTXT 0x80000002
+#define Q8_CMD_RD_MAX_SDS_PER_CNTXT 0x80000003
+#define Q8_CMD_RD_MAX_RULES_PER_CNTXT 0x80000004
+#define Q8_CMD_RD_MAX_RX_CNTXT 0x80000005
+#define Q8_CMD_RD_MAX_TX_CNTXT 0x80000006
+#define Q8_CMD_CREATE_RX_CNTXT 0x80000007
+#define Q8_CMD_DESTROY_RX_CNTXT 0x80000008
+#define Q8_CMD_CREATE_TX_CNTXT 0x80000009
+#define Q8_CMD_DESTROY_TX_CNTXT 0x8000000A
+#define Q8_CMD_SETUP_STATS 0x8000000E
+#define Q8_CMD_GET_STATS 0x8000000F
+#define Q8_CMD_DELETE_STATS 0x80000010
+#define Q8_CMD_GEN_INT 0x80000011
+#define Q8_CMD_SET_MTU 0x80000012
+#define Q8_CMD_GET_FLOW_CNTRL 0x80000016
+#define Q8_CMD_SET_FLOW_CNTRL 0x80000017
+#define Q8_CMD_RD_MAX_MTU 0x80000018
+#define Q8_CMD_RD_MAX_LRO 0x80000019
+
+/*
+ * Command Response Interface - Response
+ */
+#define Q8_RSP_SUCCESS 0x00000000
+#define Q8_RSP_NO_HOST_MEM 0x00000001
+#define Q8_RSP_NO_HOST_RSRC 0x00000002
+#define Q8_RSP_NO_CARD_CRB 0x00000003
+#define Q8_RSP_NO_CARD_MEM 0x00000004
+#define Q8_RSP_NO_CARD_RSRC 0x00000005
+#define Q8_RSP_INVALID_ARGS 0x00000006
+#define Q8_RSP_INVALID_ACTION 0x00000007
+#define Q8_RSP_INVALID_STATE 0x00000008
+#define Q8_RSP_NOT_SUPPORTED 0x00000009
+#define Q8_RSP_NOT_PERMITTED 0x0000000A
+#define Q8_RSP_NOT_READY 0x0000000B
+#define Q8_RSP_DOES_NOT_EXIST 0x0000000C
+#define Q8_RSP_ALREADY_EXISTS 0x0000000D
+#define Q8_RSP_BAD_SIGNATURE 0x0000000E
+#define Q8_RSP_CMD_NOT_IMPLEMENTED 0x0000000F
+#define Q8_RSP_CMD_INVALID 0x00000010
+#define Q8_RSP_TIMEOUT 0x00000011
+
+
+/*
+ * Transmit Related Definitions
+ */
+
+/*
+ * Transmit Context - Q8_CMD_CREATE_TX_CNTXT Command Configuration Data
+ */
+
+typedef struct _q80_tx_cntxt_req {
+ uint64_t rsp_dma_addr; /* rsp from firmware is DMA'ed here */
+ uint64_t cmd_cons_dma_addr;
+ uint64_t rsrvd0;
+
+ uint32_t caps[4]; /* capabilities - bit vector*/
+#define CNTXT_CAP0_BASEFW 0x0001
+#define CNTXT_CAP0_LEGACY_MN 0x0004
+#define CNTXT_CAP0_LSO 0x0040
+
+ uint32_t intr_mode; /* Interrupt Mode */
+#define CNTXT_INTR_MODE_UNIQUE 0x0000
+#define CNTXT_INTR_MODE_SHARED 0x0001
+
+ uint64_t rsrvd1;
+ uint16_t msi_index;
+ uint16_t rsrvd2;
+ uint64_t phys_addr; /* physical address of transmit ring
+ * in system memory */
+ uint32_t num_entries; /* number of entries in transmit ring */
+ uint8_t rsrvd3[128];
+} __packed q80_tx_cntxt_req_t; /* 188 bytes total */
+
+
+/*
+ * Transmit Context - Response from Firmware to Q8_CMD_CREATE_TX_CNTXT
+ */
+
+typedef struct _q80_tx_cntxt_rsp {
+ uint32_t cntxt_state; /* starting state */
+#define CNTXT_STATE_ALLOCATED_NOT_ACTIVE 0x0001
+#define CNTXT_STATE_ACTIVE 0x0002
+#define CNTXT_STATE_QUIESCED 0x0004
+
+ uint16_t cntxt_id; /* handle for context */
+ uint8_t phys_port_id; /* physical id of port */
+ uint8_t virt_port_id; /* virtual or logical id of port */
+ uint32_t producer_reg; /* producer register for transmit ring */
+ uint32_t intr_mask_reg; /* interrupt mask register */
+ uint8_t rsrvd[128];
+} __packed q80_tx_cntxt_rsp_t; /* 144 bytes */
+
+/*
+ * Transmit Command Descriptor
+ * These commands are issued on the Transmit Ring associated with a Transmit
+ * context
+ */
+typedef struct _q80_tx_cmd {
+ uint8_t tcp_hdr_off; /* TCP Header Offset */
+ uint8_t ip_hdr_off; /* IP Header Offset */
+ uint16_t flags_opcode; /* Bits 0-6: flags; 7-12: opcode */
+
+ /* flags field */
+#define Q8_TX_CMD_FLAGS_MULTICAST 0x01
+#define Q8_TX_CMD_FLAGS_LSO_TSO 0x02
+#define Q8_TX_CMD_FLAGS_VLAN_TAGGED 0x10
+#define Q8_TX_CMD_FLAGS_HW_VLAN_ID 0x40
+
+ /* opcode field */
+#define Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6 (0xC << 7)
+#define Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6 (0xB << 7)
+#define Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6 (0x6 << 7)
+#define Q8_TX_CMD_OP_XMT_TCP_LSO (0x5 << 7)
+#define Q8_TX_CMD_OP_XMT_UDP_CHKSUM (0x3 << 7)
+#define Q8_TX_CMD_OP_XMT_TCP_CHKSUM (0x2 << 7)
+#define Q8_TX_CMD_OP_XMT_ETHER (0x1 << 7)
+
+ uint8_t n_bufs; /* # of data segs in data buffer */
+ uint8_t data_len_lo; /* data length lower 8 bits */
+ uint16_t data_len_hi; /* data length upper 16 bits */
+
+ uint64_t buf2_addr; /* buffer 2 address */
+
+ uint16_t rsrvd0;
+ uint16_t mss; /* MSS for this packet */
+ uint8_t port_cntxtid; /* Bits 7-4: ContextId; 3-0: reserved */
+
+#define Q8_TX_CMD_PORT_CNXTID(c_id) ((c_id & 0xF) << 4)
+
+ uint8_t total_hdr_len; /* MAC+IP+TCP Header Length for LSO */
+ uint16_t rsrvd1;
+
+ uint64_t buf3_addr; /* buffer 3 address */
+ uint64_t buf1_addr; /* buffer 1 address */
+
+ uint16_t buf1_len; /* length of buffer 1 */
+ uint16_t buf2_len; /* length of buffer 2 */
+ uint16_t buf3_len; /* length of buffer 3 */
+ uint16_t buf4_len; /* length of buffer 4 */
+
+ uint64_t buf4_addr; /* buffer 4 address */
+
+ uint32_t rsrvd2;
+ uint16_t rsrvd3;
+ uint16_t vlan_tci; /* VLAN TCI when hw tagging is enabled*/
+
+} __packed q80_tx_cmd_t; /* 64 bytes */
+
+#define Q8_TX_CMD_MAX_SEGMENTS 4
+#define Q8_TX_CMD_TSO_ALIGN 2
+#define Q8_TX_MAX_SEGMENTS 14
+
+
+/*
+ * Receive Related Definitions
+ */
+/*
+ * Receive Context - Q8_CMD_CREATE_RX_CNTXT Command Configuration Data
+ */
+
+typedef struct _q80_rq_sds_ring {
+ uint64_t phys_addr; /* physical addr of status ring in system memory */
+ uint32_t size; /* number of entries in status ring */
+ uint16_t msi_index;
+ uint16_t rsrvd;
+} __packed q80_rq_sds_ring_t; /* 16 bytes */
+
+typedef struct _q80_rq_rds_ring {
+ uint64_t phys_addr; /* physical addr of rcv ring in system memory */
+ uint64_t buf_size; /* packet buffer size */
+ uint32_t size; /* number of entries in ring */
+ uint32_t rsrvd;
+} __packed q80_rq_rds_ring_t; /* 24 bytes */
+
+typedef struct _q80_rq_rcv_cntxt {
+ uint64_t rsp_dma_addr; /* rsp from firmware is DMA'ed here */
+ uint32_t caps[4]; /* bit vector */
+#define CNTXT_CAP0_JUMBO 0x0080 /* Contiguous Jumbo buffers*/
+#define CNTXT_CAP0_LRO 0x0100
+#define CNTXT_CAP0_HW_LRO 0x0800 /* HW LRO */
+
+ uint32_t intr_mode; /* same as q80_tx_cntxt_req_t */
+ uint32_t rds_intr_mode; /* same as q80_tx_cntxt_req_t */
+
+ uint32_t rds_ring_offset; /* rds configuration relative to data[0] */
+ uint32_t sds_ring_offset; /* sds configuration relative to data[0] */
+
+ uint16_t num_rds_rings;
+ uint16_t num_sds_rings;
+
+ uint8_t rsrvd1[132];
+} __packed q80_rq_rcv_cntxt_t; /* 176 bytes header + rds + sds ring rqsts */
+
+/*
+ * Receive Context - Response from Firmware to Q8_CMD_CREATE_RX_CNTXT
+ */
+
+typedef struct _q80_rsp_rds_ring {
+ uint32_t producer_reg;
+ uint32_t rsrvd;
+} __packed q80_rsp_rds_ring_t; /* 8 bytes */
+
+typedef struct _q80_rsp_sds_ring {
+ uint32_t consumer_reg;
+ uint32_t intr_mask_reg;
+} __packed q80_rsp_sds_ring_t; /* 8 bytes */
+
+typedef struct _q80_rsp_rcv_cntxt {
+ uint32_t rds_ring_offset; /* rds configuration relative to data[0] */
+ uint32_t sds_ring_offset; /* sds configuration relative to data[0] */
+
+ uint32_t cntxt_state; /* starting state */
+ uint32_t funcs_per_port; /* number of PCI functions sharing each port */
+
+ uint16_t num_rds_rings;
+ uint16_t num_sds_rings;
+
+ uint16_t cntxt_id; /* handle for context */
+
+ uint8_t phys_port; /* physical id of port */
+ uint8_t virt_port; /* virtual or logical id of port */
+
+ uint8_t rsrvd[128];
+ uint8_t data[0];
+} __packed q80_rsp_rcv_cntxt_t; /* 152 bytes header + rds + sds ring rspncs */
+
+
+/*
+ * Note:
+ * Transmit Context
+ * 188 (rq) + 144 (rsp) = 332 bytes are required
+ *
+ * Receive Context
+ * 1 RDS and 1 SDS rings: (16+24+176)+(8+8+152) = 384 bytes
+ *
+ * 3 RDS and 4 SDS rings: (((16+24)*3)+176) + (((8+8)*4)+152) =
+ * = 296 + 216 = 512 bytes
+ * Clearly this within the minimum PAGE size of most O.S platforms
+ * (typically 4Kbytes). Hence it is simpler to simply allocate one PAGE
+ * and then carve out space for each context. It is also a good idea to
+ * to throw in the shadown register for the consumer index of the transmit
+ * ring in this PAGE.
+ */
+
+/*
+ * Receive Descriptor corresponding to each entry in the receive ring
+ */
+typedef struct _q80_rcv_desc {
+ uint16_t handle;
+ uint16_t rsrvd;
+ uint32_t buf_size; /* buffer size in bytes */
+ uint64_t buf_addr; /* physical address of buffer */
+} __packed q80_recv_desc_t;
+
+/*
+ * Status Descriptor corresponding to each entry in the Status ring
+ */
+typedef struct _q80_stat_desc {
+ uint64_t data[2];
+} __packed q80_stat_desc_t;
+
+/*
+ * definitions for data[0] field of Status Descriptor
+ */
+#define Q8_STAT_DESC_OWNER(data) ((data >> 56) & 0x3)
+#define Q8_STAT_DESC_OWNER_HOST 0x1
+#define Q8_STAT_DESC_OWNER_FW 0x2
+
+#define Q8_STAT_DESC_OWNER_MASK (((uint64_t)0x3) << 56)
+#define Q8_STAT_DESC_SET_OWNER(owner) (uint64_t)(((uint64_t)owner) << 56)
+
+#define Q8_STAT_DESC_OPCODE(data) ((data >> 58) & 0x003F)
+#define Q8_STAT_DESC_OPCODE_SYN_OFFLOAD 0x03
+#define Q8_STAT_DESC_OPCODE_RCV_PKT 0x04
+#define Q8_STAT_DESC_OPCODE_CTRL_MSG 0x05
+#define Q8_STAT_DESC_OPCODE_LRO_PKT 0x12
+
+/*
+ * definitions for data[0] field of Status Descriptor for standard frames
+ * status descriptor opcode equals 0x04
+ */
+#define Q8_STAT_DESC_PORT(data) ((data) & 0x000F)
+#define Q8_STAT_DESC_STATUS(data) ((data >> 4) & 0x000F)
+#define Q8_STAT_DESC_STATUS_NO_CHKSUM 0x01
+#define Q8_STAT_DESC_STATUS_CHKSUM_OK 0x02
+#define Q8_STAT_DESC_STATUS_CHKSUM_ERR 0x03
+
+#define Q8_STAT_DESC_TYPE(data) ((data >> 8) & 0x000F)
+#define Q8_STAT_DESC_TOTAL_LENGTH(data) ((data >> 12) & 0xFFFF)
+#define Q8_STAT_DESC_HANDLE(data) ((data >> 28) & 0xFFFF)
+#define Q8_STAT_DESC_PROTOCOL(data) ((data >> 44) & 0x000F)
+#define Q8_STAT_DESC_L2_OFFSET(data) ((data >> 48) & 0x001F)
+#define Q8_STAT_DESC_COUNT(data) ((data >> 53) & 0x0007)
+
+/*
+ * definitions for data[0-1] fields of Status Descriptor for LRO
+ * status descriptor opcode equals 0x05
+ */
+/* definitions for data[0] field */
+#define Q8_LRO_STAT_DESC_HANDLE(data) ((data) & 0xFFFF)
+#define Q8_LRO_STAT_DESC_PAYLOAD_LENGTH(data) ((data >> 16) & 0xFFFF)
+#define Q8_LRO_STAT_DESC_L2_OFFSET(data) ((data >> 32) & 0xFF)
+#define Q8_LRO_STAT_DESC_L4_OFFSET(data) ((data >> 40) & 0xFF)
+#define Q8_LRO_STAT_DESC_TS_PRESENT(data) ((data >> 48) & 0x1)
+#define Q8_LRO_STAT_DESC_TYPE(data) ((data >> 49) & 0x7)
+#define Q8_LRO_STAT_DESC_PUSH_BIT(data) ((data >> 52) & 0x1)
+
+/* definitions for data[1] field */
+#define Q8_LRO_STAT_DESC_SEQ_NUM(data) (uint32_t)(data)
+
+/** Driver Related Definitions Begin **/
+
+#define MAX_RDS_RINGS 2 /* Max# of Receive Descriptor Rings */
+#define MAX_SDS_RINGS 4 /* Max# of Status Descriptor Rings */
+#define TX_SMALL_PKT_SIZE 128 /* size in bytes of small packets */
+
+/* The number of descriptors should be a power of 2 */
+#define NUM_TX_DESCRIPTORS 2048
+#define NUM_RX_DESCRIPTORS 8192
+//#define NUM_RX_JUMBO_DESCRIPTORS 1024
+#define NUM_RX_JUMBO_DESCRIPTORS 2048
+//#define NUM_STATUS_DESCRIPTORS 8192
+#define NUM_STATUS_DESCRIPTORS 2048
+
+typedef struct _q80_rcv_cntxt_req {
+ q80_rq_rcv_cntxt_t rx_req;
+ q80_rq_rds_ring_t rds_req[MAX_RDS_RINGS];
+ q80_rq_sds_ring_t sds_req[MAX_SDS_RINGS];
+} __packed q80_rcv_cntxt_req_t;
+
+typedef struct _q80_rcv_cntxt_rsp {
+ q80_rsp_rcv_cntxt_t rx_rsp;
+ q80_rsp_rds_ring_t rds_rsp[MAX_RDS_RINGS];
+ q80_rsp_sds_ring_t sds_rsp[MAX_SDS_RINGS];
+} __packed q80_rcv_cntxt_rsp_t;
+
+/*
+ * structure describing various dma buffers
+ */
+#define RDS_RING_INDEX_NORMAL 0
+#define RDS_RING_INDEX_JUMBO 1
+
+typedef struct qla_dmabuf {
+ volatile struct {
+ uint32_t tx_ring :1,
+ rds_ring :1,
+ sds_ring :1,
+ context :1;
+ } flags;
+
+ qla_dma_t tx_ring;
+ qla_dma_t rds_ring[MAX_RDS_RINGS];
+ qla_dma_t sds_ring[MAX_SDS_RINGS];
+ qla_dma_t context;
+} qla_dmabuf_t;
+
+/** Driver Related Definitions End **/
+
+/*
+ * Firmware Control Descriptor
+ */
+typedef struct _qla_fw_cds_hdr {
+ uint64_t cmd;
+#define Q8_FWCD_CNTRL_REQ (0x13 << 23)
+ uint8_t opcode;
+ uint8_t cookie;
+ uint16_t cntxt_id;
+ uint8_t response;
+#define Q8_FW_CDS_HDR_COMPLETION 0x1
+ uint16_t rsrvd;
+ uint8_t sub_opcode;
+} __packed qla_fw_cds_hdr_t;
+
+/*
+ * definitions for opcode in qla_fw_cds_hdr_t
+ */
+#define Q8_FWCD_OPCODE_CONFIG_RSS 0x01
+#define Q8_FWCD_OPCODE_CONFIG_RSS_TABLE 0x02
+#define Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING 0x03
+#define Q8_FWCD_OPCODE_CONFIG_LED 0x04
+#define Q8_FWCD_OPCODE_CONFIG_MAC_ADDR 0x06
+#define Q8_FWCD_OPCODE_LRO_FLOW 0x07
+#define Q8_FWCD_OPCODE_GET_SNMP_STATS 0x08
+#define Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE 0x0C
+#define Q8_FWCD_OPCODE_STATISTICS 0x10
+#define Q8_FWCD_OPCODE_CONFIG_IPADDR 0x12
+#define Q8_FWCD_OPCODE_CONFIG_LOOPBACK 0x13
+#define Q8_FWCD_OPCODE_LINK_EVENT_REQ 0x15
+#define Q8_FWCD_OPCODE_CONFIG_BRIDGING 0x17
+#define Q8_FWCD_OPCODE_CONFIG_LRO 0x18
+
+/*
+ * Configure RSS
+ */
+typedef struct _qla_fw_cds_config_rss {
+ qla_fw_cds_hdr_t hdr;
+ uint8_t hash_type;
+#define Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP (0x2 << 4)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV4_IP (0x1 << 4)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP (0x3 << 4)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP (0x2 << 6)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV6_IP (0x1 << 6)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP (0x3 << 6)
+
+ uint8_t flags;
+#define Q8_FWCD_RSS_FLAGS_ENABLE_RSS 0x1
+#define Q8_FWCD_RSS_FLAGS_USE_IND_TABLE 0x2
+ uint8_t rsrvd[4];
+ uint16_t ind_tbl_mask;
+ uint64_t rss_key[5];
+} __packed qla_fw_cds_config_rss_t;
+
+/*
+ * Configure RSS Table
+ */
+typedef struct _qla_fw_cds_config_rss_table {
+ qla_fw_cds_hdr_t hdr;
+ uint64_t index;
+ uint8_t table[40];
+} __packed qla_fw_cds_config_rss_table_t;
+
+/*
+ * Configure Interrupt Coalescing
+ */
+typedef struct _qla_fw_cds_config_intr_coalesc {
+ qla_fw_cds_hdr_t hdr;
+ uint16_t rsrvd0;
+ uint16_t rsrvd1;
+ uint16_t flags;
+ uint16_t rsrvd2;
+ uint64_t rsrvd3;
+ uint16_t max_rcv_pkts;
+ uint16_t max_rcv_usecs;
+ uint16_t max_snd_pkts;
+ uint16_t max_snd_usecs;
+ uint64_t rsrvd4;
+ uint64_t rsrvd5;
+ uint32_t usecs_to;
+ uint8_t timer_type;
+#define Q8_FWCMD_INTR_COALESC_TIMER_NONE 0x00
+#define Q8_FWCMD_INTR_COALESC_TIMER_ONCE 0x01
+#define Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC 0x02
+
+ uint8_t sds_ring_bitmask;
+#define Q8_FWCMD_INTR_COALESC_SDS_RING_0 0x01
+#define Q8_FWCMD_INTR_COALESC_SDS_RING_1 0x02
+#define Q8_FWCMD_INTR_COALESC_SDS_RING_2 0x04
+#define Q8_FWCMD_INTR_COALESC_SDS_RING_3 0x08
+
+ uint16_t rsrvd6;
+} __packed qla_fw_cds_config_intr_coalesc_t;
+
+/*
+ * Configure LED Parameters
+ */
+typedef struct _qla_fw_cds_config_led {
+ qla_fw_cds_hdr_t hdr;
+ uint32_t cntxt_id;
+ uint32_t blink_rate;
+ uint32_t blink_state;
+ uint32_t rsrvd;
+} __packed qla_fw_cds_config_led_t;
+
+/*
+ * Configure MAC Address
+ */
+typedef struct _qla_fw_cds_config_mac_addr {
+ qla_fw_cds_hdr_t hdr;
+ uint8_t cmd;
+#define Q8_FWCD_ADD_MAC_ADDR 0x1
+#define Q8_FWCD_DEL_MAC_ADDR 0x2
+ uint8_t rsrvd;
+ uint8_t mac_addr[6];
+} __packed qla_fw_cds_config_mac_addr_t;
+
+/*
+ * Configure Add/Delete LRO
+ */
+typedef struct _qla_fw_cds_config_lro {
+ qla_fw_cds_hdr_t hdr;
+ uint32_t dst_ip_addr;
+ uint32_t src_ip_addr;
+ uint16_t dst_tcp_port;
+ uint16_t src_tcp_port;
+ uint8_t ipv6;
+ uint8_t time_stamp;
+ uint16_t rsrvd;
+ uint32_t rss_hash;
+ uint32_t host_handle;
+} __packed qla_fw_cds_config_lro_t;
+
+/*
+ * Get SNMP Statistics
+ */
+typedef struct _qla_fw_cds_get_snmp {
+ qla_fw_cds_hdr_t hdr;
+ uint64_t phys_addr;
+ uint16_t size;
+ uint16_t cntxt_id;
+ uint32_t rsrvd;
+} __packed qla_fw_cds_get_snmp_t;
+
+typedef struct _qla_snmp_stats {
+ uint64_t jabber_state;
+ uint64_t false_carrier;
+ uint64_t rsrvd;
+ uint64_t mac_cntrl;
+ uint64_t align_errors;
+ uint64_t chksum_errors;
+ uint64_t oversize_frames;
+ uint64_t tx_errors;
+ uint64_t mac_rcv_errors;
+ uint64_t phy_rcv_errors;
+ uint64_t rcv_pause;
+ uint64_t tx_pause;
+} __packed qla_snmp_stats_t;
+
+/*
+ * Enable Link Event Requests
+ */
+typedef struct _qla_link_event_req {
+ qla_fw_cds_hdr_t hdr;
+ uint8_t enable;
+ uint8_t get_clnk_params;
+ uint8_t pad[6];
+} __packed qla_link_event_req_t;
+
+
+/*
+ * Set MAC Receive Mode
+ */
+typedef struct _qla_set_mac_rcv_mode {
+ qla_fw_cds_hdr_t hdr;
+
+ uint32_t mode;
+#define Q8_MAC_RCV_RESET_PROMISC_ALLMULTI 0x00
+#define Q8_MAC_RCV_ENABLE_PROMISCUOUS 0x01
+#define Q8_MAC_RCV_ENABLE_ALLMULTI 0x02
+
+ uint8_t pad[4];
+} __packed qla_set_mac_rcv_mode_t;
+
+/*
+ * Configure IP Address
+ */
+typedef struct _qla_config_ipv4 {
+ qla_fw_cds_hdr_t hdr;
+
+ uint64_t cmd;
+#define Q8_CONFIG_CMD_IP_ENABLE 0x02
+#define Q8_CONFIG_CMD_IP_DISABLE 0x03
+
+ uint64_t ipv4_addr;
+} __packed qla_config_ipv4_t;
+
+/*
+ * Configure LRO
+ */
+typedef struct _qla_config_lro {
+ qla_fw_cds_hdr_t hdr;
+
+ uint64_t cmd;
+#define Q8_CONFIG_LRO_ENABLE 0x08
+} __packed qla_config_lro_t;
+
+
+/*
+ * Control Messages Received on SDS Ring
+ */
+/* Header */
+typedef struct _qla_cntrl_msg_hdr {
+ uint16_t rsrvd0;
+ uint16_t err_code;
+ uint8_t rsp_type;
+ uint8_t comp_id;
+ uint16_t tag;
+#define Q8_CTRL_MSG_TAG_DESC_COUNT_MASK (0x7 << 5)
+#define Q8_CTRL_MSG_TAG_OWNER_MASK (0x3 << 8)
+#define Q8_CTRL_MSG_TAG_OPCODE_MASK (0x3F << 10)
+} __packed qla_cntrl_msg_hdr_t;
+
+/*
+ * definitions for rsp_type in qla_cntrl_msg_hdr_t
+ */
+#define Q8_CTRL_CONFIG_MAC_RSP 0x85
+#define Q8_CTRL_LRO_FLOW_DELETE_RSP 0x86
+#define Q8_CTRL_LRO_FLOW_ADD_FAILURE_RSP 0x87
+#define Q8_CTRL_GET_SNMP_STATS_RSP 0x88
+#define Q8_CTRL_GET_NETWORK_STATS_RSP 0x8C
+#define Q8_CTRL_LINK_EVENT_NOTIFICATION 0x8D
+
+/*
+ * Configure MAC Response
+ */
+typedef struct _qla_config_mac_rsp {
+ uint32_t rval;
+ uint32_t rsrvd;
+} __packed qla_config_mac_rsp_t;
+
+/*
+ * LRO Flow Response (can be LRO Flow Delete and LRO Flow Add Failure)
+ */
+typedef struct _qla_lro_flow_rsp {
+ uint32_t handle;
+ uint32_t rss_hash;
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_tcp_port;
+ uint16_t src_tcp_port;
+ uint8_t ipv6;
+ uint8_t rsrvd0;
+ uint16_t rsrvd1;
+} __packed qla_lro_flow_rsp_t;
+
+/*
+ * Get SNMP Statistics Response
+ */
+typedef struct _qla_get_snmp_stats_rsp {
+ uint64_t rsrvd;
+} __packed qla_get_snmp_stats_rsp_t;
+
+/*
+ * Get Network Statistics Response
+ */
+typedef struct _qla_get_net_stats_rsp {
+ uint64_t rsrvd;
+} __packed qla_get_net_stats_rsp_t;
+
+/*
+ * Link Event Notification
+ */
+typedef struct _qla_link_event {
+ uint32_t cable_oui;
+ uint16_t cable_length;
+
+ uint16_t link_speed;
+#define Q8_LE_SPEED_MASK 0xFFF
+#define Q8_LE_SPEED_10GBPS 0x710
+#define Q8_LE_SPEED_1GBPS 0x3E8
+#define Q8_LE_SPEED_100MBPS 0x064
+#define Q8_LE_SPEED_10MBPS 0x00A
+
+ uint8_t link_up;/* 0 = down; else up */
+
+ uint8_t mod_info;
+#define Q8_LE_MI_MODULE_NOT_PRESENT 0x01
+#define Q8_LE_MI_UNKNOWN_OPTICAL_MODULE 0x02
+#define Q8_LE_MI_SR_LR_OPTICAL_MODULE 0x03
+#define Q8_LE_MI_LRM_OPTICAL_MODULE 0x04
+#define Q8_LE_MI_SFP_1G_MODULE 0x05
+#define Q8_LE_MI_UNSUPPORTED_TWINAX 0x06
+#define Q8_LE_MI_UNSUPPORTED_TWINAX_LENGTH 0x07
+#define Q8_LE_MI_SUPPORTED_TWINAX 0x08
+
+ uint8_t fduplex; /* 1 = full duplex; 0 = half duplex */
+ uint8_t autoneg; /* 1 = autoneg enable; 0 = disabled */
+ uint32_t rsrvd;
+} __packed qla_link_event_t;
+
+typedef struct _qla_sds {
+ q80_stat_desc_t *sds_ring_base; /* start of sds ring */
+ uint32_t sdsr_next; /* next entry in SDS ring to process */
+ struct lro_ctrl lro;
+ void *rxb_free;
+ uint32_t rx_free;
+ void *rxjb_free;
+ uint32_t rxj_free;
+ volatile uint32_t rcv_active;
+} qla_sds_t;
+
+/*
+ * struct for storing hardware specific information for a given interface
+ */
+typedef struct _qla_hw {
+ struct {
+ uint32_t
+ lro :1,
+ init_tx_cnxt :1,
+ init_rx_cnxt :1,
+ fduplex :1,
+ autoneg :1,
+ link_up :1;
+ } flags;
+
+ uint16_t link_speed;
+ uint16_t cable_length;
+ uint16_t cable_oui;
+ uint8_t mod_info;
+ uint8_t rsrvd;
+
+ uint32_t max_rds_per_cntxt;
+ uint32_t max_sds_per_cntxt;
+ uint32_t max_rules_per_cntxt;
+ uint32_t max_rcv_cntxts;
+ uint32_t max_xmt_cntxts;
+ uint32_t max_mtu;
+ uint32_t max_lro;
+
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+
+ uint16_t num_rds_rings;
+ uint16_t num_sds_rings;
+
+ qla_dmabuf_t dma_buf;
+
+ /* Transmit Side */
+
+ q80_tx_cmd_t *tx_ring_base;
+
+ q80_tx_cntxt_req_t *tx_cntxt_req; /* TX Context Request */
+ bus_addr_t tx_cntxt_req_paddr;
+
+ q80_tx_cntxt_rsp_t *tx_cntxt_rsp; /* TX Context Response */
+ bus_addr_t tx_cntxt_rsp_paddr;
+
+ uint32_t *tx_cons; /* tx consumer shadow reg */
+ bus_addr_t tx_cons_paddr;
+
+ volatile uint32_t txr_free; /* # of free entries in tx ring */
+ volatile uint32_t txr_next; /* # next available tx ring entry */
+ volatile uint32_t txr_comp; /* index of last tx entry completed */
+
+ uint32_t tx_prod_reg;
+
+ /* Receive Side */
+ volatile uint32_t rx_next; /* next standard rcv ring to arm fw */
+ volatile int32_t rxj_next; /* next jumbo rcv ring to arm fw */
+
+ volatile int32_t rx_in; /* next standard rcv ring to add mbufs */
+ volatile int32_t rxj_in; /* next jumbo rcv ring to add mbufs */
+
+ q80_rcv_cntxt_req_t *rx_cntxt_req; /* Rcv Context Request */
+ bus_addr_t rx_cntxt_req_paddr;
+ q80_rcv_cntxt_rsp_t *rx_cntxt_rsp; /* Rcv Context Response */
+ bus_addr_t rx_cntxt_rsp_paddr;
+
+ qla_sds_t sds[MAX_SDS_RINGS];
+} qla_hw_t;
+
+#define QL_UPDATE_RDS_PRODUCER_INDEX(ha, i, val) \
+ WRITE_REG32(ha, ((ha->hw.rx_cntxt_rsp)->rds_rsp[i].producer_reg +\
+ 0x1b2000), val)
+
+#define QL_UPDATE_TX_PRODUCER_INDEX(ha, val) \
+ WRITE_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000), val)
+
+#define QL_UPDATE_SDS_CONSUMER_INDEX(ha, i, val) \
+ WRITE_REG32(ha, ((ha->hw.rx_cntxt_rsp)->sds_rsp[i].consumer_reg +\
+ 0x1b2000), val)
+
+#define QL_CLEAR_INTERRUPTS(ha) \
+ if (ha->pci_func == 0) {\
+ WRITE_REG32(ha, Q8_INT_TARGET_STATUS_F0, 0xFFFFFFFF);\
+ } else {\
+ WRITE_REG32(ha, Q8_INT_TARGET_STATUS_F1, 0xFFFFFFFF);\
+ }\
+
+#define QL_ENABLE_INTERRUPTS(ha, sds_index) \
+ {\
+ q80_rsp_sds_ring_t *rsp_sds;\
+ rsp_sds = &((ha->hw.rx_cntxt_rsp)->sds_rsp[sds_index]);\
+ WRITE_REG32(ha, (rsp_sds->intr_mask_reg + 0x1b2000), 0x1);\
+ }
+
+#define QL_DISABLE_INTERRUPTS(ha, sds_index) \
+ {\
+ q80_rsp_sds_ring_t *rsp_sds;\
+ rsp_sds = &((ha->hw.rx_cntxt_rsp)->sds_rsp[sds_index]);\
+ WRITE_REG32(ha, (rsp_sds->intr_mask_reg + 0x1b2000), 0x0);\
+ }
+
+
+#define QL_BUFFER_ALIGN 16
+
+#endif /* #ifndef _QLA_HW_H_ */
diff --git a/sys/dev/qlxgb/qla_inline.h b/sys/dev/qlxgb/qla_inline.h
new file mode 100644
index 000000000000..6a6be5f98a47
--- /dev/null
+++ b/sys/dev/qlxgb/qla_inline.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_inline.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+#ifndef _QLA_INLINE_H_
+#define _QLA_INLINE_H_
+
+/*
+ * Function: qla_hw_reset
+ */
+static __inline void qla_hw_reset(qla_host_t *ha)
+{
+ WRITE_OFFSET32(ha, Q8_ASIC_RESET, 0xFFFFFFFF);
+}
+
+#define QL8_SEMLOCK_TIMEOUT 1000/* QLA8020 Semaphore Lock Timeout 10ms */
+
+
+/*
+ * Inline functions for hardware semaphores
+ */
+
+/*
+ * Name: qla_sem_lock
+ * Function: Locks one of the semaphore registers (semaphore 2,3,5 & 7)
+ * If the id_reg is valid, then id_val is written into it.
+ * This is for debugging purpose
+ * Returns: 0 on success; otherwise its failed.
+ */
+static __inline int
+qla_sem_lock(qla_host_t *ha, uint32_t sem_reg, uint32_t id_reg, uint32_t id_val)
+{
+ int count = QL8_SEMLOCK_TIMEOUT;
+
+ while (count) {
+ if ((READ_REG32(ha, sem_reg) & SEM_LOCK_BIT))
+ break;
+ count--;
+
+ if (!count)
+ return(-1);
+ qla_mdelay(__func__, 10);
+ }
+ if (id_reg)
+ WRITE_OFFSET32(ha, id_reg, id_val);
+
+ return(0);
+}
+
+/*
+ * Name: qla_sem_unlock
+ * Function: Unlocks the semaphore registers (semaphore 2,3,5 & 7)
+ * previously locked by qla_sem_lock()
+ */
+static __inline void
+qla_sem_unlock(qla_host_t *ha, uint32_t sem_reg)
+{
+ READ_REG32(ha, sem_reg);
+}
+
+static __inline int
+qla_get_ifq_snd_maxlen(qla_host_t *ha)
+{
+ return((NUM_TX_DESCRIPTORS - 1));
+}
+
+static __inline uint32_t
+qla_get_optics(qla_host_t *ha)
+{
+ uint32_t link_speed;
+
+ link_speed = READ_REG32(ha, Q8_LINK_SPEED_0);
+ if (ha->pci_func == 0)
+ link_speed = link_speed & 0xFF;
+ else
+ link_speed = (link_speed >> 8) & 0xFF;
+
+ switch (link_speed) {
+ case 0x1:
+ link_speed = IFM_100_FX;
+ break;
+
+ case 0x10:
+ link_speed = IFM_1000_SX;
+ break;
+
+ default:
+ link_speed = (IFM_10G_LR | IFM_10G_SR);
+ break;
+ }
+
+ return(link_speed);
+}
+
+static __inline uint8_t *
+qla_get_mac_addr(qla_host_t *ha)
+{
+ return (ha->hw.mac_addr);
+}
+
+static __inline void
+qla_read_mac_addr(qla_host_t *ha)
+{
+ uint32_t mac_crb_addr;
+ uint32_t mac_lo;
+ uint32_t mac_hi;
+ uint8_t *macp;
+
+ mac_crb_addr = Q8_CRB_MAC_BLOCK_START +
+ (((ha->pci_func >> 1) * 3) << 2) + ((ha->pci_func & 0x01) << 2);
+
+ mac_lo = READ_REG32(ha, mac_crb_addr);
+ mac_hi = READ_REG32(ha, (mac_crb_addr + 0x4));
+
+ if (ha->pci_func & 0x01) {
+ mac_lo = mac_lo >> 16;
+
+ macp = (uint8_t *)&mac_lo;
+
+ ha->hw.mac_addr[5] = macp[0];
+ ha->hw.mac_addr[4] = macp[1];
+
+ macp = (uint8_t *)&mac_hi;
+
+ ha->hw.mac_addr[3] = macp[0];
+ ha->hw.mac_addr[2] = macp[1];
+ ha->hw.mac_addr[1] = macp[2];
+ ha->hw.mac_addr[0] = macp[3];
+ } else {
+ macp = (uint8_t *)&mac_lo;
+
+ ha->hw.mac_addr[5] = macp[0];
+ ha->hw.mac_addr[4] = macp[1];
+ ha->hw.mac_addr[3] = macp[2];
+ ha->hw.mac_addr[2] = macp[3];
+
+ macp = (uint8_t *)&mac_hi;
+
+ ha->hw.mac_addr[1] = macp[0];
+ ha->hw.mac_addr[0] = macp[1];
+ }
+ return;
+}
+
+static __inline void
+qla_set_hw_rcv_desc(qla_host_t *ha, uint32_t ridx, uint32_t index,
+ uint32_t handle, bus_addr_t paddr, uint32_t buf_size)
+{
+ q80_recv_desc_t *rcv_desc;
+
+ rcv_desc = (q80_recv_desc_t *)ha->hw.dma_buf.rds_ring[ridx].dma_b;
+
+ rcv_desc += index;
+
+ rcv_desc->handle = (uint16_t)handle;
+ rcv_desc->buf_size = buf_size;
+ rcv_desc->buf_addr = paddr;
+
+ return;
+}
+
+static __inline void
+qla_init_hw_rcv_descriptors(qla_host_t *ha, uint32_t ridx)
+{
+ if (ridx == RDS_RING_INDEX_NORMAL)
+ bzero((void *)ha->hw.dma_buf.rds_ring[ridx].dma_b,
+ (sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS));
+ else if (ridx == RDS_RING_INDEX_JUMBO)
+ bzero((void *)ha->hw.dma_buf.rds_ring[ridx].dma_b,
+ (sizeof(q80_recv_desc_t) * NUM_RX_JUMBO_DESCRIPTORS));
+ else
+ QL_ASSERT(0, ("%s: invalid rds index [%d]\n", __func__, ridx));
+}
+
+static __inline void
+qla_lock(qla_host_t *ha, const char *str)
+{
+ while (1) {
+ mtx_lock(&ha->hw_lock);
+ if (!ha->hw_lock_held) {
+ ha->hw_lock_held = 1;
+ ha->qla_lock = str;
+ mtx_unlock(&ha->hw_lock);
+ break;
+ }
+ mtx_unlock(&ha->hw_lock);
+ qla_mdelay(__func__, 1);
+ }
+ return;
+}
+
+static __inline void
+qla_unlock(qla_host_t *ha, const char *str)
+{
+ mtx_lock(&ha->hw_lock);
+ ha->hw_lock_held = 0;
+ ha->qla_unlock = str;
+ mtx_unlock(&ha->hw_lock);
+}
+
+#endif /* #ifndef _QLA_INLINE_H_ */
diff --git a/sys/dev/qlxgb/qla_ioctl.c b/sys/dev/qlxgb/qla_ioctl.c
new file mode 100644
index 000000000000..1e9557ab4e94
--- /dev/null
+++ b/sys/dev/qlxgb/qla_ioctl.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * File: qla_ioctl.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_reg.h"
+#include "qla_inline.h"
+#include "qla_glbl.h"
+#include "qla_ioctl.h"
+
+static struct cdevsw qla_cdevsw = {
+ .d_version = D_VERSION,
+ .d_ioctl = qla_eioctl,
+ .d_name = "qlcnic",
+};
+
+int
+qla_make_cdev(qla_host_t *ha)
+{
+ ha->ioctl_dev = make_dev(&qla_cdevsw,
+ ha->ifp->if_dunit,
+ UID_ROOT,
+ GID_WHEEL,
+ 0600,
+ "%s",
+ if_name(ha->ifp));
+
+ if (ha->ioctl_dev == NULL)
+ return (-1);
+
+ ha->ioctl_dev->si_drv1 = ha;
+
+ return (0);
+}
+
+void
+qla_del_cdev(qla_host_t *ha)
+{
+ if (ha->ioctl_dev != NULL)
+ destroy_dev(ha->ioctl_dev);
+ return;
+}
+
+int
+qla_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ qla_host_t *ha;
+ int rval = 0;
+ qla_reg_val_t *rv;
+ qla_rd_flash_t *rdf;
+
+ if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
+ return ENXIO;
+
+ switch(cmd) {
+
+ case QLA_RDWR_REG:
+
+ rv = (qla_reg_val_t *)data;
+
+ if (rv->direct) {
+ if (rv->rd) {
+ rv->val = READ_OFFSET32(ha, rv->reg);
+ } else {
+ WRITE_OFFSET32(ha, rv->reg, rv->val);
+ }
+ } else {
+ if ((rval = qla_rdwr_indreg32(ha, rv->reg, &rv->val,
+ rv->rd)))
+ rval = ENXIO;
+ }
+ break;
+
+ case QLA_RD_FLASH:
+ rdf = (qla_rd_flash_t *)data;
+ if ((rval = qla_rd_flash32(ha, rdf->off, &rdf->data)))
+ rval = ENXIO;
+ break;
+ default:
+ break;
+ }
+
+ return rval;
+}
+
diff --git a/sys/dev/qlxgb/qla_ioctl.h b/sys/dev/qlxgb/qla_ioctl.h
new file mode 100644
index 000000000000..160c46c50db0
--- /dev/null
+++ b/sys/dev/qlxgb/qla_ioctl.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_ioctl.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_IOCTL_H_
+#define _QLA_IOCTL_H_
+
+#include <sys/ioccom.h>
+
+struct qla_reg_val {
+ uint16_t rd;
+ uint16_t direct;
+ uint32_t reg;
+ uint32_t val;
+};
+typedef struct qla_reg_val qla_reg_val_t;
+
+struct qla_rd_flash {
+ uint32_t off;
+ uint32_t data;
+};
+typedef struct qla_rd_flash qla_rd_flash_t;
+
+
+/*
+ * Read/Write Register
+ */
+#define QLA_RDWR_REG _IOWR('q', 1, qla_reg_val_t)
+
+/*
+ * Read Flash
+ */
+#define QLA_RD_FLASH _IOWR('q', 2, qla_rd_flash_t)
+
+#endif /* #ifndef _QLA_IOCTL_H_ */
diff --git a/sys/dev/qlxgb/qla_isr.c b/sys/dev/qlxgb/qla_isr.c
new file mode 100644
index 000000000000..382d565acedd
--- /dev/null
+++ b/sys/dev/qlxgb/qla_isr.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qla_isr.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_inline.h"
+#include "qla_ver.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp);
+static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp);
+
+/*
+ * Name: qla_rx_intr
+ * Function: Handles normal ethernet frames received
+ */
+static void
+qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx,
+ struct lro_ctrl *lro)
+{
+ uint32_t idx, length, status, ring;
+ qla_rx_buf_t *rxb;
+ struct mbuf *mp;
+ struct ifnet *ifp = ha->ifp;
+ qla_sds_t *sdsp;
+ struct ether_vlan_header *eh;
+
+ sdsp = &ha->hw.sds[sds_idx];
+
+ ring = (uint32_t)Q8_STAT_DESC_TYPE(data);
+ idx = (uint32_t)Q8_STAT_DESC_HANDLE(data);
+ length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data);
+ status = (uint32_t)Q8_STAT_DESC_STATUS(data);
+
+ if (ring == 0) {
+ if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) {
+ device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
+ " len[0x%08x] invalid\n",
+ __func__, ring, idx, length);
+ return;
+ }
+ } else {
+ if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) {
+ device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
+ " len[0x%08x] invalid\n",
+ __func__, ring, idx, length);
+ return;
+ }
+ }
+
+ if (ring == 0)
+ rxb = &ha->rx_buf[idx];
+ else
+ rxb = &ha->rx_jbuf[idx];
+
+ QL_ASSERT((rxb != NULL),\
+ ("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\
+ __func__, ring, idx, sds_idx));
+
+ mp = rxb->m_head;
+
+ QL_ASSERT((mp != NULL),\
+ ("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\
+ __func__, ring, idx, rxb, sds_idx));
+
+ bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
+
+ if (ring == 0) {
+ rxb->m_head = NULL;
+ rxb->next = sdsp->rxb_free;
+ sdsp->rxb_free = rxb;
+ sdsp->rx_free++;
+ } else {
+ rxb->m_head = NULL;
+ rxb->next = sdsp->rxjb_free;
+ sdsp->rxjb_free = rxb;
+ sdsp->rxj_free++;
+ }
+
+ mp->m_len = length;
+ mp->m_pkthdr.len = length;
+ mp->m_pkthdr.rcvif = ifp;
+
+ eh = mtod(mp, struct ether_vlan_header *);
+
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ uint32_t *data = (uint32_t *)eh;
+
+ mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
+ mp->m_flags |= M_VLANTAG;
+
+ *(data + 3) = *(data + 2);
+ *(data + 2) = *(data + 1);
+ *(data + 1) = *data;
+
+ m_adj(mp, ETHER_VLAN_ENCAP_LEN);
+ }
+
+ if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
+ mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
+ } else {
+ mp->m_pkthdr.csum_flags = 0;
+ }
+
+ if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
+ /* LRO packet has been successfuly queued */
+ } else {
+ (*ifp->if_input)(ifp, mp);
+ }
+
+ if (sdsp->rx_free > std_replenish)
+ qla_replenish_normal_rx(ha, sdsp);
+
+ if (sdsp->rxj_free > jumbo_replenish)
+ qla_replenish_jumbo_rx(ha, sdsp);
+
+ return;
+}
+
+static void
+qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp)
+{
+ qla_rx_buf_t *rxb;
+ int count = jumbo_replenish;
+ uint32_t rxj_next;
+
+ if (!mtx_trylock(&ha->rxj_lock))
+ return;
+
+ rxj_next = ha->hw.rxj_next;
+
+ while (count--) {
+ rxb = sdsp->rxjb_free;
+
+ if (rxb == NULL)
+ break;
+
+ sdsp->rxjb_free = rxb->next;
+ sdsp->rxj_free--;
+
+
+ if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_JUMBO) == 0) {
+ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO,
+ ha->hw.rxj_in, rxb->handle, rxb->paddr,
+ (rxb->m_head)->m_pkthdr.len);
+ ha->hw.rxj_in++;
+ if (ha->hw.rxj_in == NUM_RX_JUMBO_DESCRIPTORS)
+ ha->hw.rxj_in = 0;
+ ha->hw.rxj_next++;
+ if (ha->hw.rxj_next == NUM_RX_JUMBO_DESCRIPTORS)
+ ha->hw.rxj_next = 0;
+ } else {
+ device_printf(ha->pci_dev,
+ "%s: qla_get_mbuf [1,(%d),(%d)] failed\n",
+ __func__, ha->hw.rxj_in, rxb->handle);
+
+ rxb->m_head = NULL;
+ rxb->next = sdsp->rxjb_free;
+ sdsp->rxjb_free = rxb;
+ sdsp->rxj_free++;
+
+ break;
+ }
+ }
+
+ if (rxj_next != ha->hw.rxj_next) {
+ QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
+ }
+ mtx_unlock(&ha->rxj_lock);
+}
+
+static void
+qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp)
+{
+ qla_rx_buf_t *rxb;
+ int count = std_replenish;
+ uint32_t rx_next;
+
+ if (!mtx_trylock(&ha->rx_lock))
+ return;
+
+ rx_next = ha->hw.rx_next;
+
+ while (count--) {
+ rxb = sdsp->rxb_free;
+
+ if (rxb == NULL)
+ break;
+
+ sdsp->rxb_free = rxb->next;
+ sdsp->rx_free--;
+
+ if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_NORMAL) == 0) {
+ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL,
+ ha->hw.rx_in, rxb->handle, rxb->paddr,
+ (rxb->m_head)->m_pkthdr.len);
+ ha->hw.rx_in++;
+ if (ha->hw.rx_in == NUM_RX_DESCRIPTORS)
+ ha->hw.rx_in = 0;
+ ha->hw.rx_next++;
+ if (ha->hw.rx_next == NUM_RX_DESCRIPTORS)
+ ha->hw.rx_next = 0;
+ } else {
+ device_printf(ha->pci_dev,
+ "%s: qla_get_mbuf [0,(%d),(%d)] failed\n",
+ __func__, ha->hw.rx_in, rxb->handle);
+
+ rxb->m_head = NULL;
+ rxb->next = sdsp->rxb_free;
+ sdsp->rxb_free = rxb;
+ sdsp->rx_free++;
+
+ break;
+ }
+ }
+
+ if (rx_next != ha->hw.rx_next) {
+ QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
+ }
+ mtx_unlock(&ha->rx_lock);
+}
+
+/*
+ * Name: qla_isr
+ * Function: Main Interrupt Service Routine
+ */
+static uint32_t
+qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
+{
+ device_t dev;
+ qla_hw_t *hw;
+ uint32_t comp_idx, desc_count;
+ q80_stat_desc_t *sdesc;
+ struct lro_ctrl *lro;
+ struct lro_entry *queued;
+ uint32_t ret = 0;
+
+ dev = ha->pci_dev;
+ hw = &ha->hw;
+
+ hw->sds[sds_idx].rcv_active = 1;
+ if (ha->flags.stop_rcv) {
+ hw->sds[sds_idx].rcv_active = 0;
+ return 0;
+ }
+
+ QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx));
+
+ /*
+ * receive interrupts
+ */
+ comp_idx = hw->sds[sds_idx].sdsr_next;
+ lro = &hw->sds[sds_idx].lro;
+
+ while (count--) {
+
+ sdesc = (q80_stat_desc_t *)
+ &hw->sds[sds_idx].sds_ring_base[comp_idx];
+
+ if (Q8_STAT_DESC_OWNER((sdesc->data[0])) !=
+ Q8_STAT_DESC_OWNER_HOST) {
+ QL_DPRINT2((dev, "%s: data %p sdsr_next 0x%08x\n",
+ __func__, (void *)sdesc->data[0], comp_idx));
+ break;
+ }
+
+ desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0]));
+
+ switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) {
+
+ case Q8_STAT_DESC_OPCODE_RCV_PKT:
+ case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
+ qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
+
+ break;
+
+ default:
+ device_printf(dev, "%s: default 0x%llx!\n", __func__,
+ (long long unsigned int)sdesc->data[0]);
+ break;
+ }
+
+ while (desc_count--) {
+ sdesc->data[0] =
+ Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
+ comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
+ sdesc = (q80_stat_desc_t *)
+ &hw->sds[sds_idx].sds_ring_base[comp_idx];
+ }
+ }
+
+ while((!SLIST_EMPTY(&lro->lro_active))) {
+ queued = SLIST_FIRST(&lro->lro_active);
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, queued);
+ }
+
+ if (hw->sds[sds_idx].sdsr_next != comp_idx) {
+ QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
+ }
+ hw->sds[sds_idx].sdsr_next = comp_idx;
+
+ sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
+ if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) ==
+ Q8_STAT_DESC_OWNER_HOST)) {
+ ret = -1;
+ }
+
+ hw->sds[sds_idx].rcv_active = 0;
+ return (ret);
+}
+
+void
+qla_isr(void *arg)
+{
+ qla_ivec_t *ivec = arg;
+ qla_host_t *ha;
+ uint32_t sds_idx;
+ uint32_t ret;
+
+ ha = ivec->ha;
+ sds_idx = ivec->irq_rid - 1;
+
+ if (sds_idx >= ha->hw.num_sds_rings) {
+ device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__,
+ sds_idx);
+
+ return;
+ }
+
+ if (sds_idx == 0)
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+
+ ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres);
+
+ if (sds_idx == 0)
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+
+ if (ret) {
+ taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq,
+ &ha->irq_vec[sds_idx].rcv_task);
+ } else {
+ QL_ENABLE_INTERRUPTS(ha, sds_idx);
+ }
+}
+
+void
+qla_rcv(void *context, int pending)
+{
+ qla_ivec_t *ivec = context;
+ qla_host_t *ha;
+ device_t dev;
+ qla_hw_t *hw;
+ uint32_t sds_idx;
+ uint32_t ret;
+ struct ifnet *ifp;
+
+ ha = ivec->ha;
+ dev = ha->pci_dev;
+ hw = &ha->hw;
+ sds_idx = ivec->irq_rid - 1;
+ ifp = ha->ifp;
+
+ do {
+ if (sds_idx == 0) {
+ if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ } else if ((ifp->if_snd.ifq_head != NULL) &&
+ QL_RUNNING(ifp)) {
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ }
+ }
+ ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d);
+ } while (ret);
+
+ if (sds_idx == 0)
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+
+ QL_ENABLE_INTERRUPTS(ha, sds_idx);
+}
+
diff --git a/sys/dev/qlxgb/qla_misc.c b/sys/dev/qlxgb/qla_misc.c
new file mode 100644
index 000000000000..c616d4f9052b
--- /dev/null
+++ b/sys/dev/qlxgb/qla_misc.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * File : qla_misc.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_reg.h"
+#include "qla_inline.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+/*
+ * structure encapsulating the value to read/write to offchip memory
+ */
+typedef struct _offchip_mem_val {
+ uint32_t data_lo;
+ uint32_t data_hi;
+ uint32_t data_ulo;
+ uint32_t data_uhi;
+} offchip_mem_val_t;
+
+#define Q8_ADDR_UNDEFINED 0xFFFFFFFF
+
+/*
+ * The index to this table is Bits 20-27 of the indirect register address
+ */
+static uint32_t indirect_to_base_map[] =
+ {
+ Q8_ADDR_UNDEFINED, /* 0x00 */
+ 0x77300000, /* 0x01 */
+ 0x29500000, /* 0x02 */
+ 0x2A500000, /* 0x03 */
+ Q8_ADDR_UNDEFINED, /* 0x04 */
+ 0x0D000000, /* 0x05 */
+ 0x1B100000, /* 0x06 */
+ 0x0E600000, /* 0x07 */
+ 0x0E000000, /* 0x08 */
+ 0x0E100000, /* 0x09 */
+ 0x0E200000, /* 0x0A */
+ 0x0E300000, /* 0x0B */
+ 0x42000000, /* 0x0C */
+ 0x41700000, /* 0x0D */
+ 0x42100000, /* 0x0E */
+ 0x34B00000, /* 0x0F */
+ 0x40500000, /* 0x10 */
+ 0x34000000, /* 0x11 */
+ 0x34100000, /* 0x12 */
+ 0x34200000, /* 0x13 */
+ 0x34300000, /* 0x14 */
+ 0x34500000, /* 0x15 */
+ 0x34400000, /* 0x16 */
+ 0x3C000000, /* 0x17 */
+ 0x3C100000, /* 0x18 */
+ 0x3C200000, /* 0x19 */
+ 0x3C300000, /* 0x1A */
+ Q8_ADDR_UNDEFINED, /* 0x1B */
+ 0x3C400000, /* 0x1C */
+ 0x41000000, /* 0x1D */
+ Q8_ADDR_UNDEFINED, /* 0x1E */
+ 0x0D100000, /* 0x1F */
+ Q8_ADDR_UNDEFINED, /* 0x20 */
+ 0x77300000, /* 0x21 */
+ 0x41600000, /* 0x22 */
+ Q8_ADDR_UNDEFINED, /* 0x23 */
+ Q8_ADDR_UNDEFINED, /* 0x24 */
+ Q8_ADDR_UNDEFINED, /* 0x25 */
+ Q8_ADDR_UNDEFINED, /* 0x26 */
+ Q8_ADDR_UNDEFINED, /* 0x27 */
+ 0x41700000, /* 0x28 */
+ Q8_ADDR_UNDEFINED, /* 0x29 */
+ 0x08900000, /* 0x2A */
+ 0x70A00000, /* 0x2B */
+ 0x70B00000, /* 0x2C */
+ 0x70C00000, /* 0x2D */
+ 0x08D00000, /* 0x2E */
+ 0x08E00000, /* 0x2F */
+ 0x70F00000, /* 0x30 */
+ 0x40500000, /* 0x31 */
+ 0x42000000, /* 0x32 */
+ 0x42100000, /* 0x33 */
+ Q8_ADDR_UNDEFINED, /* 0x34 */
+ 0x08800000, /* 0x35 */
+ 0x09100000, /* 0x36 */
+ 0x71200000, /* 0x37 */
+ 0x40600000, /* 0x38 */
+ Q8_ADDR_UNDEFINED, /* 0x39 */
+ 0x71800000, /* 0x3A */
+ 0x19900000, /* 0x3B */
+ 0x1A900000, /* 0x3C */
+ Q8_ADDR_UNDEFINED, /* 0x3D */
+ 0x34600000, /* 0x3E */
+ Q8_ADDR_UNDEFINED, /* 0x3F */
+ };
+
+/*
+ * Address Translation Table for CRB to offsets from PCI BAR0
+ */
+typedef struct _crb_to_pci {
+ uint32_t crb_addr;
+ uint32_t pci_addr;
+} crb_to_pci_t;
+
+static crb_to_pci_t crbinit_to_pciaddr[] = {
+ {(0x088 << 20), (0x035 << 20)},
+ {(0x089 << 20), (0x02A << 20)},
+ {(0x08D << 20), (0x02E << 20)},
+ {(0x08E << 20), (0x02F << 20)},
+ {(0x0C6 << 20), (0x023 << 20)},
+ {(0x0C7 << 20), (0x024 << 20)},
+ {(0x0C8 << 20), (0x025 << 20)},
+ {(0x0D0 << 20), (0x005 << 20)},
+ {(0x0D1 << 20), (0x01F << 20)},
+ {(0x0E0 << 20), (0x008 << 20)},
+ {(0x0E1 << 20), (0x009 << 20)},
+ {(0x0E2 << 20), (0x00A << 20)},
+ {(0x0E3 << 20), (0x00B << 20)},
+ {(0x0E6 << 20), (0x007 << 20)},
+ {(0x199 << 20), (0x03B << 20)},
+ {(0x1B1 << 20), (0x006 << 20)},
+ {(0x295 << 20), (0x002 << 20)},
+ {(0x29A << 20), (0x000 << 20)},
+ {(0x2A5 << 20), (0x003 << 20)},
+ {(0x340 << 20), (0x011 << 20)},
+ {(0x341 << 20), (0x012 << 20)},
+ {(0x342 << 20), (0x013 << 20)},
+ {(0x343 << 20), (0x014 << 20)},
+ {(0x344 << 20), (0x016 << 20)},
+ {(0x345 << 20), (0x015 << 20)},
+ {(0x3C0 << 20), (0x017 << 20)},
+ {(0x3C1 << 20), (0x018 << 20)},
+ {(0x3C2 << 20), (0x019 << 20)},
+ {(0x3C3 << 20), (0x01A << 20)},
+ {(0x3C4 << 20), (0x01C << 20)},
+ {(0x3C5 << 20), (0x01B << 20)},
+ {(0x405 << 20), (0x031 << 20)},
+ {(0x406 << 20), (0x038 << 20)},
+ {(0x410 << 20), (0x01D << 20)},
+ {(0x416 << 20), (0x022 << 20)},
+ {(0x417 << 20), (0x028 << 20)},
+ {(0x420 << 20), (0x032 << 20)},
+ {(0x421 << 20), (0x033 << 20)},
+ {(0x700 << 20), (0x00C << 20)},
+ {(0x701 << 20), (0x00D << 20)},
+ {(0x702 << 20), (0x00E << 20)},
+ {(0x703 << 20), (0x00F << 20)},
+ {(0x704 << 20), (0x010 << 20)},
+ {(0x70A << 20), (0x02B << 20)},
+ {(0x70B << 20), (0x02C << 20)},
+ {(0x70C << 20), (0x02D << 20)},
+ {(0x70F << 20), (0x030 << 20)},
+ {(0x718 << 20), (0x03A << 20)},
+ {(0x758 << 20), (0x026 << 20)},
+ {(0x759 << 20), (0x027 << 20)},
+ {(0x773 << 20), (0x001 << 20)}
+};
+
+#define Q8_INVALID_ADDRESS (-1)
+#define Q8_ADDR_MASK (0xFFF << 20)
+
+typedef struct _addr_val {
+ uint32_t addr;
+ uint32_t value;
+ uint32_t pci_addr;
+ uint32_t ind_addr;
+} addr_val_t;
+
+/*
+ * Name: qla_rdwr_indreg32
+ * Function: Read/Write an Indirect Register
+ */
+int
+qla_rdwr_indreg32(qla_host_t *ha, uint32_t addr, uint32_t *val, uint32_t rd)
+{
+ uint32_t offset;
+ int count = 100;
+
+ offset = (addr & 0xFFF00000) >> 20;
+
+ if (offset > 0x3F) {
+ device_printf(ha->pci_dev, "%s: invalid addr 0x%08x\n",
+ __func__, addr);
+ return -1;
+ }
+
+ offset = indirect_to_base_map[offset];
+ if (offset == Q8_ADDR_UNDEFINED) {
+ device_printf(ha->pci_dev, "%s: undefined map 0x%08x\n",
+ __func__, addr);
+ return -1;
+ }
+
+ offset = offset | (addr & 0x000F0000);
+
+ if (qla_sem_lock(ha, Q8_SEM7_LOCK, 0, 0)) {
+ device_printf(ha->pci_dev, "%s: SEM7_LOCK failed\n", __func__);
+ return (-1);
+ }
+
+ WRITE_OFFSET32(ha, Q8_CRB_WINDOW_2M, offset);
+
+ while (offset != (READ_OFFSET32(ha, Q8_CRB_WINDOW_2M))) {
+ count--;
+ if (!count) {
+ qla_sem_unlock(ha, Q8_SEM7_UNLOCK);
+ return -1;
+ }
+
+ qla_mdelay(__func__, 1);
+ }
+
+ if (rd) {
+ *val = READ_OFFSET32(ha, ((addr & 0xFFFF) | 0x1E0000));
+ } else {
+ WRITE_OFFSET32(ha, ((addr & 0xFFFF) | 0x1E0000), *val);
+ }
+
+ qla_sem_unlock(ha, Q8_SEM7_UNLOCK);
+ return 0;
+}
+
+/*
+ * Name: qla_rdwr_offchip_mem
+ * Function: Read/Write OffChip Memory
+ */
+static int
+qla_rdwr_offchip_mem(qla_host_t *ha, uint64_t addr, offchip_mem_val_t *val,
+ uint32_t rd)
+{
+ uint32_t count = 100;
+ uint32_t data;
+
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_ADDR_LO, (uint32_t)addr);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_ADDR_HI, (uint32_t)(addr >> 32));
+
+ if (!rd) {
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_WRDATA_LO, val->data_lo);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_WRDATA_HI, val->data_hi);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_WRDATA_ULO, val->data_ulo);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_WRDATA_UHI, val->data_uhi);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_CTRL, 0x07); /* Write */
+ } else {
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_CTRL, 0x03); /* Read */
+ }
+
+ while (count--) {
+ data = READ_OFFSET32(ha, Q8_MIU_TEST_AGT_CTRL);
+ if (!(data & BIT_3)) {
+ if (rd) {
+ val->data_lo = READ_OFFSET32(ha, \
+ Q8_MIU_TEST_AGT_RDDATA_LO);
+ val->data_hi = READ_OFFSET32(ha, \
+ Q8_MIU_TEST_AGT_RDDATA_HI);
+ val->data_ulo = READ_OFFSET32(ha, \
+ Q8_MIU_TEST_AGT_RDDATA_ULO);
+ val->data_uhi = READ_OFFSET32(ha, \
+ Q8_MIU_TEST_AGT_RDDATA_UHI);
+ }
+ return 0;
+ } else
+ qla_mdelay(__func__, 1);
+ }
+
+ device_printf(ha->pci_dev, "%s: failed[0x%08x]\n", __func__, data);
+ return (-1);
+}
+
+/*
+ * Name: qla_rd_flash32
+ * Function: Read Flash Memory
+ */
+int
+qla_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
+{
+ uint32_t val;
+ uint32_t count = 100;
+
+ if (qla_sem_lock(ha, Q8_SEM2_LOCK, 0, 0)) {
+ device_printf(ha->pci_dev, "%s: SEM2_LOCK failed\n", __func__);
+ return (-1);
+ }
+ WRITE_OFFSET32(ha, Q8_ROM_LOCKID, 0xa5a5a5a5);
+
+ val = addr;
+ qla_rdwr_indreg32(ha, Q8_ROM_ADDRESS, &val, 0);
+ val = 0;
+ qla_rdwr_indreg32(ha, Q8_ROM_DUMMY_BYTE_COUNT, &val, 0);
+ val = 3;
+ qla_rdwr_indreg32(ha, Q8_ROM_ADDR_BYTE_COUNT, &val, 0);
+
+ QLA_USEC_DELAY(100);
+
+ val = ROM_OPCODE_FAST_RD;
+ qla_rdwr_indreg32(ha, Q8_ROM_INSTR_OPCODE, &val, 0);
+
+ while (!((val = READ_OFFSET32(ha, Q8_ROM_STATUS)) & BIT_1)) {
+ count--;
+ if (!count) {
+ qla_sem_unlock(ha, Q8_SEM7_UNLOCK);
+ return -1;
+ }
+ }
+
+ val = 0;
+ qla_rdwr_indreg32(ha, Q8_ROM_DUMMY_BYTE_COUNT, &val, 0);
+ qla_rdwr_indreg32(ha, Q8_ROM_ADDR_BYTE_COUNT, &val, 0);
+
+ QLA_USEC_DELAY(100);
+
+ qla_rdwr_indreg32(ha, Q8_ROM_RD_DATA, data, 1);
+
+ qla_sem_unlock(ha, Q8_SEM2_UNLOCK);
+ return 0;
+}
+
+/*
+ * Name: qla_int_to_pci_addr_map
+ * Function: Convert's Internal(CRB) Address to Indirect Address
+ */
+static uint32_t
+qla_int_to_pci_addr_map(qla_host_t *ha, uint32_t int_addr)
+{
+ uint32_t crb_to_pci_table_size, i;
+ uint32_t addr;
+
+ crb_to_pci_table_size = sizeof(crbinit_to_pciaddr)/sizeof(crb_to_pci_t);
+ addr = int_addr & Q8_ADDR_MASK;
+
+ for (i = 0; i < crb_to_pci_table_size; i++) {
+ if (crbinit_to_pciaddr[i].crb_addr == addr) {
+ addr = (int_addr & ~Q8_ADDR_MASK) |
+ crbinit_to_pciaddr[i].pci_addr;
+ return (addr);
+ }
+ }
+ return (Q8_INVALID_ADDRESS);
+}
+
+/*
+ * Name: qla_filter_pci_addr
+ * Function: Filter's out Indirect Addresses which are not writeable
+ */
+static uint32_t
+qla_filter_pci_addr(qla_host_t *ha, uint32_t addr)
+{
+ if ((addr == Q8_INVALID_ADDRESS) ||
+ (addr == 0x00112040) ||
+ (addr == 0x00112048) ||
+ ((addr & 0xFFFF0FFF) == 0x001100C4) ||
+ ((addr & 0xFFFF0FFF) == 0x001100C8) ||
+ ((addr & 0x0FF00000) == 0x00200000) ||
+ (addr == 0x022021FC) ||
+ (addr == 0x0330001C) ||
+ (addr == 0x03300024) ||
+ (addr == 0x033000A8) ||
+ (addr == 0x033000C8) ||
+ (addr == 0x033000BC) ||
+ ((addr & 0x0FF00000) == 0x03A00000) ||
+ (addr == 0x03B0001C))
+ return (Q8_INVALID_ADDRESS);
+ else
+ return (addr);
+}
+
+/*
+ * Name: qla_crb_init
+ * Function: CRB Initialization - first step in the initialization after reset
+ * Essentially reads the address/value pairs from address = 0x00 and
+ * writes the value into address in the addr/value pair.
+ */
+static int
+qla_crb_init(qla_host_t *ha)
+{
+ uint32_t val, sig;
+ uint32_t offset, count, i;
+ addr_val_t *addr_val_map, *avmap;
+
+ qla_rd_flash32(ha, 0, &sig);
+ QL_DPRINT2((ha->pci_dev, "%s: val[0] = 0x%08x\n", __func__, val));
+
+ qla_rd_flash32(ha, 4, &val);
+ QL_DPRINT2((ha->pci_dev, "%s: val[4] = 0x%08x\n", __func__, val));
+
+ count = val >> 16;
+ offset = val & 0xFFFF;
+ offset = offset << 2;
+
+ QL_DPRINT2((ha->pci_dev, "%s: [sig,val]=[0x%08x, 0x%08x] %d pairs\n",
+ __func__, sig, val, count));
+
+ addr_val_map = avmap = malloc((sizeof(addr_val_t) * count),
+ M_QLA8XXXBUF, M_NOWAIT);
+
+ if (addr_val_map == NULL) {
+ device_printf(ha->pci_dev, "%s: malloc failed\n", __func__);
+ return (-1);
+ }
+ memset(avmap, 0, (sizeof(addr_val_t) * count));
+
+ count = count << 1;
+ for (i = 0; i < count; ) {
+ qla_rd_flash32(ha, (offset + (i * 4)), &avmap->value);
+ i++;
+ qla_rd_flash32(ha, (offset + (i * 4)), &avmap->addr);
+ i++;
+
+ avmap->pci_addr = qla_int_to_pci_addr_map(ha, avmap->addr);
+ avmap->ind_addr = qla_filter_pci_addr(ha, avmap->pci_addr);
+
+ QL_DPRINT2((ha->pci_dev,
+ "%s: [0x%02x][0x%08x:0x%08x:0x%08x] 0x%08x\n",
+ __func__, (i >> 1), avmap->addr, avmap->pci_addr,
+ avmap->ind_addr, avmap->value));
+
+ if (avmap->ind_addr != Q8_INVALID_ADDRESS) {
+ qla_rdwr_indreg32(ha, avmap->ind_addr, &avmap->value,0);
+ qla_mdelay(__func__, 1);
+ }
+ avmap++;
+ }
+
+ free (addr_val_map, M_QLA8XXXBUF);
+ return (0);
+}
+
+/*
+ * Name: qla_init_peg_regs
+ * Function: Protocol Engine Register Initialization
+ */
+static void
+qla_init_peg_regs(qla_host_t *ha)
+{
+ WRITE_OFFSET32(ha, Q8_PEG_D_RESET1, 0x001E);
+ WRITE_OFFSET32(ha, Q8_PEG_D_RESET2, 0x0008);
+ WRITE_OFFSET32(ha, Q8_PEG_I_RESET, 0x0008);
+ WRITE_OFFSET32(ha, Q8_PEG_0_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_0_CLR2, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_1_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_1_CLR2, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_2_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_2_CLR2, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_3_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_3_CLR2, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_4_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_4_CLR2, 0x0000);
+}
+
+/*
+ * Name: qla_load_fw_from_flash
+ * Function: Reads the Bootloader from Flash and Loads into Offchip Memory
+ */
+static void
+qla_load_fw_from_flash(qla_host_t *ha)
+{
+ uint64_t mem_off = 0x10000;
+ uint32_t flash_off = 0x10000;
+ uint32_t count;
+ offchip_mem_val_t val;
+
+
+ /* only bootloader needs to be loaded into memory */
+ for (count = 0; count < 0x20000 ; ) {
+ qla_rd_flash32(ha, flash_off, &val.data_lo);
+ count = count + 4;
+ flash_off = flash_off + 4;
+
+ qla_rd_flash32(ha, flash_off, &val.data_hi);
+ count = count + 4;
+ flash_off = flash_off + 4;
+
+ qla_rd_flash32(ha, flash_off, &val.data_ulo);
+ count = count + 4;
+ flash_off = flash_off + 4;
+
+ qla_rd_flash32(ha, flash_off, &val.data_uhi);
+ count = count + 4;
+ flash_off = flash_off + 4;
+
+ qla_rdwr_offchip_mem(ha, mem_off, &val, 0);
+
+ mem_off = mem_off + 16;
+ }
+ return;
+}
+
+/*
+ * Name: qla_init_from_flash
+ * Function: Performs Initialization which consists of the following sequence
+ * - reset
+ * - CRB Init
+ * - Peg Init
+ * - Read the Bootloader from Flash and Load into Offchip Memory
+ * - Kick start the bootloader which loads the rest of the firmware
+ * and performs the remaining steps in the initialization process.
+ */
+static int
+qla_init_from_flash(qla_host_t *ha)
+{
+ uint32_t delay = 300;
+ uint32_t data;
+
+ qla_hw_reset(ha);
+ qla_mdelay(__func__, 100);
+
+ qla_crb_init(ha);
+ qla_mdelay(__func__, 10);
+
+ qla_init_peg_regs(ha);
+ qla_mdelay(__func__, 10);
+
+ qla_load_fw_from_flash(ha);
+
+ WRITE_OFFSET32(ha, Q8_CMDPEG_STATE, 0x00000000);
+ WRITE_OFFSET32(ha, Q8_PEG_0_RESET, 0x00001020);
+ WRITE_OFFSET32(ha, Q8_ASIC_RESET, 0x0080001E);
+ qla_mdelay(__func__, 100);
+
+ do {
+ data = READ_OFFSET32(ha, Q8_CMDPEG_STATE);
+
+ QL_DPRINT2((ha->pci_dev, "%s: func[%d] cmdpegstate 0x%08x\n",
+ __func__, ha->pci_func, data));
+ if (data == CMDPEG_PHAN_INIT_COMPLETE) {
+ QL_DPRINT2((ha->pci_dev,
+ "%s: func[%d] init complete\n",
+ __func__, ha->pci_func));
+ return(0);
+ }
+ qla_mdelay(__func__, 100);
+ } while (delay--);
+
+ device_printf(ha->pci_dev,
+ "%s: func[%d] Q8_PEG_HALT_STATUS1[0x%08x] STATUS2[0x%08x]"
+ " HEARTBEAT[0x%08x] RCVPEG_STATE[0x%08x]"
+ " CMDPEG_STATE[0x%08x]\n",
+ __func__, ha->pci_func,
+ (READ_OFFSET32(ha, Q8_PEG_HALT_STATUS1)),
+ (READ_OFFSET32(ha, Q8_PEG_HALT_STATUS2)),
+ (READ_OFFSET32(ha, Q8_FIRMWARE_HEARTBEAT)),
+ (READ_OFFSET32(ha, Q8_RCVPEG_STATE)), data);
+
+ return (-1);
+}
+
+/*
+ * Name: qla_init_hw
+ * Function: Initializes P3+ hardware.
+ */
+int
+qla_init_hw(qla_host_t *ha)
+{
+ device_t dev;
+ int ret = 0;
+ uint32_t val, delay = 300;
+
+ dev = ha->pci_dev;
+
+ QL_DPRINT1((dev, "%s: enter\n", __func__));
+
+ qla_mdelay(__func__, 100);
+
+ if (ha->pci_func & 0x1) {
+ while ((ha->pci_func & 0x1) && delay--) {
+ val = READ_OFFSET32(ha, Q8_CMDPEG_STATE);
+
+ if (val == CMDPEG_PHAN_INIT_COMPLETE) {
+ QL_DPRINT2((dev,
+ "%s: func = %d init complete\n",
+ __func__, ha->pci_func));
+ qla_mdelay(__func__, 100);
+ goto qla_init_exit;
+ }
+ qla_mdelay(__func__, 100);
+ }
+ return (-1);
+ }
+
+ val = READ_OFFSET32(ha, Q8_CMDPEG_STATE);
+
+ if (val != CMDPEG_PHAN_INIT_COMPLETE) {
+ ret = qla_init_from_flash(ha);
+ qla_mdelay(__func__, 100);
+ }
+
+qla_init_exit:
+ ha->fw_ver_major = READ_OFFSET32(ha, Q8_FW_VER_MAJOR);
+ ha->fw_ver_minor = READ_OFFSET32(ha, Q8_FW_VER_MINOR);
+ ha->fw_ver_sub = READ_OFFSET32(ha, Q8_FW_VER_SUB);
+ ha->fw_ver_build = READ_OFFSET32(ha, Q8_FW_VER_BUILD);
+
+ return (ret);
+}
+
diff --git a/sys/dev/qlxgb/qla_os.c b/sys/dev/qlxgb/qla_os.c
new file mode 100644
index 000000000000..1fc30f593a1e
--- /dev/null
+++ b/sys/dev/qlxgb/qla_os.c
@@ -0,0 +1,1481 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qla_os.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_inline.h"
+#include "qla_ver.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+/*
+ * Some PCI Configuration Space Related Defines
+ */
+
+#ifndef PCI_VENDOR_QLOGIC
+#define PCI_VENDOR_QLOGIC 0x1077
+#endif
+
+#ifndef PCI_PRODUCT_QLOGIC_ISP8020
+#define PCI_PRODUCT_QLOGIC_ISP8020 0x8020
+#endif
+
+#define PCI_QLOGIC_ISP8020 \
+ ((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC)
+
+/*
+ * static functions
+ */
+static int qla_alloc_parent_dma_tag(qla_host_t *ha);
+static void qla_free_parent_dma_tag(qla_host_t *ha);
+static int qla_alloc_xmt_bufs(qla_host_t *ha);
+static void qla_free_xmt_bufs(qla_host_t *ha);
+static int qla_alloc_rcv_bufs(qla_host_t *ha);
+static void qla_free_rcv_bufs(qla_host_t *ha);
+
+static void qla_init_ifnet(device_t dev, qla_host_t *ha);
+static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
+static void qla_release(qla_host_t *ha);
+static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
+ int error);
+static void qla_stop(qla_host_t *ha);
+static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
+static void qla_tx_done(void *context, int pending);
+
+/*
+ * Hooks to the Operating Systems
+ */
+static int qla_pci_probe (device_t);
+static int qla_pci_attach (device_t);
+static int qla_pci_detach (device_t);
+
+static void qla_init(void *arg);
+static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
+static int qla_media_change(struct ifnet *ifp);
+static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
+
+static device_method_t qla_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, qla_pci_probe),
+ DEVMETHOD(device_attach, qla_pci_attach),
+ DEVMETHOD(device_detach, qla_pci_detach),
+ { 0, 0 }
+};
+
+static driver_t qla_pci_driver = {
+ "ql", qla_pci_methods, sizeof (qla_host_t),
+};
+
+static devclass_t qla80xx_devclass;
+
+DRIVER_MODULE(qla80xx, pci, qla_pci_driver, qla80xx_devclass, 0, 0);
+
+MODULE_DEPEND(qla80xx, pci, 1, 1, 1);
+MODULE_DEPEND(qla80xx, ether, 1, 1, 1);
+
+MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver");
+
+uint32_t std_replenish = 8;
+uint32_t jumbo_replenish = 2;
+uint32_t rcv_pkt_thres = 128;
+uint32_t rcv_pkt_thres_d = 32;
+uint32_t snd_pkt_thres = 16;
+uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2);
+
+static char dev_str[64];
+
+/*
+ * Name: qla_pci_probe
+ * Function: Validate the PCI device to be a QLA80XX device
+ */
+static int
+qla_pci_probe(device_t dev)
+{
+ switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
+ case PCI_QLOGIC_ISP8020:
+ snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
+ "Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function",
+ QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
+ QLA_VERSION_BUILD);
+ device_set_desc(dev, dev_str);
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ if (bootverbose)
+ printf("%s: %s\n ", __func__, dev_str);
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static void
+qla_add_sysctls(qla_host_t *ha)
+{
+ device_t dev = ha->pci_dev;
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD,
+ (void *)ha, 0,
+ qla_sysctl_get_stats, "I", "Statistics");
+
+ dbg_level = 0;
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug", CTLFLAG_RW,
+ &dbg_level, dbg_level, "Debug Level");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "std_replenish", CTLFLAG_RW,
+ &std_replenish, std_replenish,
+ "Threshold for Replenishing Standard Frames");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "jumbo_replenish", CTLFLAG_RW,
+ &jumbo_replenish, jumbo_replenish,
+ "Threshold for Replenishing Jumbo Frames");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW,
+ &rcv_pkt_thres, rcv_pkt_thres,
+ "Threshold for # of rcv pkts to trigger indication isr");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW,
+ &rcv_pkt_thres_d, rcv_pkt_thres_d,
+ "Threshold for # of rcv pkts to trigger indication defered");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "snd_pkt_thres", CTLFLAG_RW,
+ &snd_pkt_thres, snd_pkt_thres,
+ "Threshold for # of snd packets");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "free_pkt_thres", CTLFLAG_RW,
+ &free_pkt_thres, free_pkt_thres,
+ "Threshold for # of packets to free at a time");
+
+ return;
+}
+
+static void
+qla_watchdog(void *arg)
+{
+ qla_host_t *ha = arg;
+ qla_hw_t *hw;
+ struct ifnet *ifp;
+
+ hw = &ha->hw;
+ ifp = ha->ifp;
+
+ if (ha->flags.qla_watchdog_exit)
+ return;
+
+ if (!ha->flags.qla_watchdog_pause) {
+ if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ }
+ }
+ ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
+ callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
+ qla_watchdog, ha);
+}
+
+/*
+ * Name: qla_pci_attach
+ * Function: attaches the device to the operating system
+ */
+static int
+qla_pci_attach(device_t dev)
+{
+ qla_host_t *ha = NULL;
+ uint32_t rsrc_len, i;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ if ((ha = device_get_softc(dev)) == NULL) {
+ device_printf(dev, "cannot get softc\n");
+ return (ENOMEM);
+ }
+
+ memset(ha, 0, sizeof (qla_host_t));
+
+ if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) {
+ device_printf(dev, "device is not ISP8020\n");
+ return (ENXIO);
+ }
+
+ ha->pci_func = pci_get_function(dev);
+
+ ha->pci_dev = dev;
+
+ pci_enable_busmaster(dev);
+
+ ha->reg_rid = PCIR_BAR(0);
+ ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
+ RF_ACTIVE);
+
+ if (ha->pci_reg == NULL) {
+ device_printf(dev, "unable to map any ports\n");
+ goto qla_pci_attach_err;
+ }
+
+ rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
+ ha->reg_rid);
+
+ mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ ha->flags.lock_init = 1;
+
+ ha->msix_count = pci_msix_count(dev);
+
+ if (ha->msix_count < qla_get_msix_count(ha)) {
+ device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
+ ha->msix_count);
+ goto qla_pci_attach_err;
+ }
+
+ QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x"
+ " msix_count 0x%x pci_reg %p\n", __func__, ha,
+ ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
+
+ ha->msix_count = qla_get_msix_count(ha);
+
+ if (pci_alloc_msix(dev, &ha->msix_count)) {
+ device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
+ ha->msix_count);
+ ha->msix_count = 0;
+ goto qla_pci_attach_err;
+ }
+
+ TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
+ ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
+ taskqueue_thread_enqueue, &ha->tx_tq);
+ taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
+ device_get_nameunit(ha->pci_dev));
+
+ for (i = 0; i < ha->msix_count; i++) {
+ ha->irq_vec[i].irq_rid = i+1;
+ ha->irq_vec[i].ha = ha;
+
+ ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &ha->irq_vec[i].irq_rid,
+ (RF_ACTIVE | RF_SHAREABLE));
+
+ if (ha->irq_vec[i].irq == NULL) {
+ device_printf(dev, "could not allocate interrupt\n");
+ goto qla_pci_attach_err;
+ }
+
+ if (bus_setup_intr(dev, ha->irq_vec[i].irq,
+ (INTR_TYPE_NET | INTR_MPSAFE),
+ NULL, qla_isr, &ha->irq_vec[i],
+ &ha->irq_vec[i].handle)) {
+ device_printf(dev, "could not setup interrupt\n");
+ goto qla_pci_attach_err;
+ }
+
+ TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\
+ &ha->irq_vec[i]);
+
+ ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq",
+ M_NOWAIT, taskqueue_thread_enqueue,
+ &ha->irq_vec[i].rcv_tq);
+
+ taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET,
+ "%s rcvq",
+ device_get_nameunit(ha->pci_dev));
+ }
+
+ qla_add_sysctls(ha);
+
+ /* add hardware specific sysctls */
+ qla_hw_add_sysctls(ha);
+
+ /* initialize hardware */
+ if (qla_init_hw(ha)) {
+ device_printf(dev, "%s: qla_init_hw failed\n", __func__);
+ goto qla_pci_attach_err;
+ }
+
+ device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
+ ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
+ ha->fw_ver_build);
+
+ //qla_get_hw_caps(ha);
+ qla_read_mac_addr(ha);
+
+ /* allocate parent dma tag */
+ if (qla_alloc_parent_dma_tag(ha)) {
+ device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
+ __func__);
+ goto qla_pci_attach_err;
+ }
+
+ /* alloc all dma buffers */
+ if (qla_alloc_dma(ha)) {
+ device_printf(dev, "%s: qla_alloc_dma failed\n", __func__);
+ goto qla_pci_attach_err;
+ }
+
+ /* create the o.s ethernet interface */
+ qla_init_ifnet(dev, ha);
+
+ ha->flags.qla_watchdog_active = 1;
+ ha->flags.qla_watchdog_pause = 1;
+
+ callout_init(&ha->tx_callout, TRUE);
+
+ /* create ioctl device interface */
+ if (qla_make_cdev(ha)) {
+ device_printf(dev, "%s: qla_make_cdev failed\n", __func__);
+ goto qla_pci_attach_err;
+ }
+
+ callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
+ qla_watchdog, ha);
+
+ QL_DPRINT2((dev, "%s: exit 0\n", __func__));
+ return (0);
+
+qla_pci_attach_err:
+
+ qla_release(ha);
+
+ QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
+ return (ENXIO);
+}
+
+/*
+ * Name: qla_pci_detach
+ * Function: Unhooks the device from the operating system
+ */
+static int
+qla_pci_detach(device_t dev)
+{
+ qla_host_t *ha = NULL;
+ struct ifnet *ifp;
+ int i;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ if ((ha = device_get_softc(dev)) == NULL) {
+ device_printf(dev, "cannot get softc\n");
+ return (ENOMEM);
+ }
+
+ ifp = ha->ifp;
+
+ QLA_LOCK(ha, __func__);
+ qla_stop(ha);
+ QLA_UNLOCK(ha, __func__);
+
+ if (ha->tx_tq) {
+ taskqueue_drain(ha->tx_tq, &ha->tx_task);
+ taskqueue_free(ha->tx_tq);
+ }
+
+ for (i = 0; i < ha->msix_count; i++) {
+ taskqueue_drain(ha->irq_vec[i].rcv_tq,
+ &ha->irq_vec[i].rcv_task);
+ taskqueue_free(ha->irq_vec[i].rcv_tq);
+ }
+
+ qla_release(ha);
+
+ QL_DPRINT2((dev, "%s: exit\n", __func__));
+
+ return (0);
+}
+
+/*
+ * SYSCTL Related Callbacks
+ */
+static int
+qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
+{
+ int err, ret = 0;
+ qla_host_t *ha;
+
+ err = sysctl_handle_int(oidp, &ret, 0, req);
+
+ if (err)
+ return (err);
+
+ ha = (qla_host_t *)arg1;
+ //qla_get_stats(ha);
+ QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret));
+ return (err);
+}
+
+
+/*
+ * Name: qla_release
+ * Function: Releases the resources allocated for the device
+ */
+static void
+qla_release(qla_host_t *ha)
+{
+ device_t dev;
+ int i;
+
+ dev = ha->pci_dev;
+
+ qla_del_cdev(ha);
+
+ if (ha->flags.qla_watchdog_active)
+ ha->flags.qla_watchdog_exit = 1;
+
+ callout_stop(&ha->tx_callout);
+ qla_mdelay(__func__, 100);
+
+ if (ha->ifp != NULL)
+ ether_ifdetach(ha->ifp);
+
+ qla_free_dma(ha);
+ qla_free_parent_dma_tag(ha);
+
+ for (i = 0; i < ha->msix_count; i++) {
+ if (ha->irq_vec[i].handle)
+ (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
+ ha->irq_vec[i].handle);
+ if (ha->irq_vec[i].irq)
+ (void) bus_release_resource(dev, SYS_RES_IRQ,
+ ha->irq_vec[i].irq_rid,
+ ha->irq_vec[i].irq);
+ }
+ if (ha->msix_count)
+ pci_release_msi(dev);
+
+ if (ha->flags.lock_init) {
+ mtx_destroy(&ha->tx_lock);
+ mtx_destroy(&ha->rx_lock);
+ mtx_destroy(&ha->rxj_lock);
+ mtx_destroy(&ha->hw_lock);
+ }
+
+ if (ha->pci_reg)
+ (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
+ ha->pci_reg);
+}
+
+/*
+ * DMA Related Functions
+ */
+
+static void
+qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ *((bus_addr_t *)arg) = 0;
+
+ if (error) {
+ printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
+ return;
+ }
+
+ QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs));
+
+ *((bus_addr_t *)arg) = segs[0].ds_addr;
+
+ return;
+}
+
+int
+qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
+{
+ int ret = 0;
+ device_t dev;
+ bus_addr_t b_addr;
+
+ dev = ha->pci_dev;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ ret = bus_dma_tag_create(
+ ha->parent_tag,/* parent */
+ dma_buf->alignment,
+ ((bus_size_t)(1ULL << 32)),/* boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ dma_buf->size, /* maxsize */
+ 1, /* nsegments */
+ dma_buf->size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &dma_buf->dma_tag);
+
+ if (ret) {
+ device_printf(dev, "%s: could not create dma tag\n", __func__);
+ goto qla_alloc_dmabuf_exit;
+ }
+ ret = bus_dmamem_alloc(dma_buf->dma_tag,
+ (void **)&dma_buf->dma_b,
+ (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
+ &dma_buf->dma_map);
+ if (ret) {
+ bus_dma_tag_destroy(dma_buf->dma_tag);
+ device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
+ goto qla_alloc_dmabuf_exit;
+ }
+
+ ret = bus_dmamap_load(dma_buf->dma_tag,
+ dma_buf->dma_map,
+ dma_buf->dma_b,
+ dma_buf->size,
+ qla_dmamap_callback,
+ &b_addr, BUS_DMA_NOWAIT);
+
+ if (ret || !b_addr) {
+ bus_dma_tag_destroy(dma_buf->dma_tag);
+ bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
+ dma_buf->dma_map);
+ ret = -1;
+ goto qla_alloc_dmabuf_exit;
+ }
+
+ dma_buf->dma_addr = b_addr;
+
+qla_alloc_dmabuf_exit:
+ QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
+ __func__, ret, (void *)dma_buf->dma_tag,
+ (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
+ dma_buf->size));
+
+ return ret;
+}
+
+void
+qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
+{
+ bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
+ bus_dma_tag_destroy(dma_buf->dma_tag);
+}
+
+static int
+qla_alloc_parent_dma_tag(qla_host_t *ha)
+{
+ int ret;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ /*
+ * Allocate parent DMA Tag
+ */
+ ret = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* parent */
+ 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &ha->parent_tag);
+
+ if (ret) {
+ device_printf(dev, "%s: could not create parent dma tag\n",
+ __func__);
+ return (-1);
+ }
+
+ ha->flags.parent_tag = 1;
+
+ return (0);
+}
+
+static void
+qla_free_parent_dma_tag(qla_host_t *ha)
+{
+ if (ha->flags.parent_tag) {
+ bus_dma_tag_destroy(ha->parent_tag);
+ ha->flags.parent_tag = 0;
+ }
+}
+
+/*
+ * Name: qla_init_ifnet
+ * Function: Creates the Network Device Interface and Registers it with the O.S
+ */
+
+static void
+qla_init_ifnet(device_t dev, qla_host_t *ha)
+{
+ struct ifnet *ifp;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ ifp = ha->ifp = if_alloc(IFT_ETHER);
+
+ if (ifp == NULL)
+ panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_baudrate = (1 * 1000 * 1000 *1000);
+ ifp->if_init = qla_init;
+ ifp->if_softc = ha;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = qla_ioctl;
+ ifp->if_start = qla_start;
+
+ IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
+ ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
+ IFQ_SET_READY(&ifp->if_snd);
+
+ ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ ether_ifattach(ifp, qla_get_mac_addr(ha));
+
+ ifp->if_capabilities = IFCAP_HWCSUM |
+ IFCAP_TSO4 |
+ IFCAP_TSO6 |
+ IFCAP_JUMBO_MTU;
+
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+
+#if defined(__FreeBSD_version) && (__FreeBSD_version < 900002)
+ ifp->if_timer = 0;
+ ifp->if_watchdog = NULL;
+#endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */
+
+ ifp->if_capenable = ifp->if_capabilities;
+
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+ ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
+
+ ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
+ NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
+
+ ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
+
+ QL_DPRINT2((dev, "%s: exit\n", __func__));
+
+ return;
+}
+
+static void
+qla_init_locked(qla_host_t *ha)
+{
+ struct ifnet *ifp = ha->ifp;
+
+ qla_stop(ha);
+
+ if (qla_alloc_xmt_bufs(ha) != 0)
+ return;
+
+ if (qla_alloc_rcv_bufs(ha) != 0)
+ return;
+
+ if (qla_config_lro(ha))
+ return;
+
+ bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
+
+ ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
+
+ ha->flags.stop_rcv = 0;
+ if (qla_init_hw_if(ha) == 0) {
+ ifp = ha->ifp;
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ ha->flags.qla_watchdog_pause = 0;
+ }
+
+ return;
+}
+
+static void
+qla_init(void *arg)
+{
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)arg;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ QLA_LOCK(ha, __func__);
+ qla_init_locked(ha);
+ QLA_UNLOCK(ha, __func__);
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
+}
+
+static void
+qla_set_multi(qla_host_t *ha, uint32_t add_multi)
+{
+ uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
+ struct ifmultiaddr *ifma;
+ int mcnt = 0;
+ struct ifnet *ifp = ha->ifp;
+
+ IF_ADDR_LOCK(ifp);
+
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+
+ if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
+ break;
+
+ bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+ &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
+
+ mcnt++;
+ }
+
+ IF_ADDR_UNLOCK(ifp);
+
+ qla_hw_set_multi(ha, mta, mcnt, add_multi);
+
+ return;
+}
+
+static int
+qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ int ret = 0;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
+ __func__, cmd));
+
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ QLA_LOCK(ha, __func__);
+ qla_init_locked(ha);
+ QLA_UNLOCK(ha, __func__);
+ }
+ QL_DPRINT4((ha->pci_dev,
+ "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
+ __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
+
+ arp_ifinit(ifp, ifa);
+ if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) {
+ qla_config_ipv4_addr(ha,
+ (IA_SIN(ifa)->sin_addr.s_addr));
+ }
+ } else {
+ ether_ioctl(ifp, cmd, data);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
+ __func__, cmd));
+
+ if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
+ ret = EINVAL;
+ } else {
+ QLA_LOCK(ha, __func__);
+ ifp->if_mtu = ifr->ifr_mtu;
+ ha->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ ret = qla_set_max_mtu(ha, ha->max_frame_size,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
+ }
+ QLA_UNLOCK(ha, __func__);
+
+ if (ret)
+ ret = EINVAL;
+ }
+
+ break;
+
+ case SIOCSIFFLAGS:
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
+ __func__, cmd));
+
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if ((ifp->if_flags ^ ha->if_flags) &
+ IFF_PROMISC) {
+ qla_set_promisc(ha);
+ } else if ((ifp->if_flags ^ ha->if_flags) &
+ IFF_ALLMULTI) {
+ qla_set_allmulti(ha);
+ }
+ } else {
+ QLA_LOCK(ha, __func__);
+ qla_init_locked(ha);
+ ha->max_frame_size = ifp->if_mtu +
+ ETHER_HDR_LEN + ETHER_CRC_LEN;
+ ret = qla_set_max_mtu(ha, ha->max_frame_size,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
+ QLA_UNLOCK(ha, __func__);
+ }
+ } else {
+ QLA_LOCK(ha, __func__);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ qla_stop(ha);
+ ha->if_flags = ifp->if_flags;
+ QLA_UNLOCK(ha, __func__);
+ }
+ break;
+
+ case SIOCADDMULTI:
+ QL_DPRINT4((ha->pci_dev,
+ "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ qla_set_multi(ha, 1);
+ }
+ break;
+
+ case SIOCDELMULTI:
+ QL_DPRINT4((ha->pci_dev,
+ "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ qla_set_multi(ha, 0);
+ }
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ QL_DPRINT4((ha->pci_dev,
+ "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
+ __func__, cmd));
+ ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
+ break;
+
+ case SIOCSIFCAP:
+ {
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
+ __func__, cmd));
+
+ if (mask & IFCAP_HWCSUM)
+ ifp->if_capenable ^= IFCAP_HWCSUM;
+ if (mask & IFCAP_TSO4)
+ ifp->if_capenable ^= IFCAP_TSO4;
+ if (mask & IFCAP_TSO6)
+ ifp->if_capenable ^= IFCAP_TSO6;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ qla_init(ha);
+
+ VLAN_CAPABILITIES(ifp);
+ break;
+ }
+
+ default:
+ QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
+ __func__, cmd));
+ ret = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (ret);
+}
+
+static int
+qla_media_change(struct ifnet *ifp)
+{
+ qla_host_t *ha;
+ struct ifmedia *ifm;
+ int ret = 0;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ ifm = &ha->media;
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ ret = EINVAL;
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
+
+ return (ret);
+}
+
+static void
+qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ qla_update_link_state(ha);
+ if (ha->hw.flags.link_up) {
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
+ }
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
+ (ha->hw.flags.link_up ? "link_up" : "link_down")));
+
+ return;
+}
+
+void
+qla_start(struct ifnet *ifp)
+{
+ struct mbuf *m_head;
+ qla_host_t *ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
+
+ if (!mtx_trylock(&ha->tx_lock)) {
+ QL_DPRINT8((ha->pci_dev,
+ "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
+ return;
+ }
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING) {
+ QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
+ QLA_TX_UNLOCK(ha);
+ return;
+ }
+
+ if (!ha->watchdog_ticks)
+ qla_update_link_state(ha);
+
+ if (!ha->hw.flags.link_up) {
+ QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
+ QLA_TX_UNLOCK(ha);
+ return;
+ }
+
+ while (ifp->if_snd.ifq_head != NULL) {
+ IF_DEQUEUE(&ifp->if_snd, m_head);
+
+ if (m_head == NULL) {
+ QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
+ __func__));
+ break;
+ }
+
+ if (qla_send(ha, &m_head)) {
+ if (m_head == NULL)
+ break;
+ QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ IF_PREPEND(&ifp->if_snd, m_head);
+ break;
+ }
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, m_head);
+ }
+ QLA_TX_UNLOCK(ha);
+ QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
+ return;
+}
+
+static int
+qla_send(qla_host_t *ha, struct mbuf **m_headp)
+{
+ bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
+ bus_dmamap_t map;
+ int nsegs;
+ int ret = -1;
+ uint32_t tx_idx;
+ struct mbuf *m_head = *m_headp;
+
+ QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
+
+ if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) {
+ ha->err_tx_dmamap_create++;
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_create failed[%d, %d]\n",
+ __func__, ret, m_head->m_pkthdr.len);
+ return (ret);
+ }
+
+ ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
+ BUS_DMA_NOWAIT);
+
+ if ((ret == EFBIG) ||
+ ((nsegs > Q8_TX_MAX_SEGMENTS) &&
+ (((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
+ (m_head->m_pkthdr.len <= ha->max_frame_size)))) {
+
+ struct mbuf *m;
+
+ QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
+ m_head->m_pkthdr.len));
+
+ m = m_defrag(m_head, M_DONTWAIT);
+ if (m == NULL) {
+ ha->err_tx_defrag++;
+ m_freem(m_head);
+ *m_headp = NULL;
+ device_printf(ha->pci_dev,
+ "%s: m_defrag() = NULL [%d]\n",
+ __func__, ret);
+ return (ENOBUFS);
+ }
+ m_head = m;
+
+ if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
+ segs, &nsegs, BUS_DMA_NOWAIT))) {
+
+ ha->err_tx_dmamap_load++;
+
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
+ __func__, ret, m_head->m_pkthdr.len);
+
+ bus_dmamap_destroy(ha->tx_tag, map);
+ if (ret != ENOMEM) {
+ m_freem(m_head);
+ *m_headp = NULL;
+ }
+ return (ret);
+ }
+ } else if (ret) {
+ ha->err_tx_dmamap_load++;
+
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
+ __func__, ret, m_head->m_pkthdr.len);
+
+ bus_dmamap_destroy(ha->tx_tag, map);
+
+ if (ret != ENOMEM) {
+ m_freem(m_head);
+ *m_headp = NULL;
+ }
+ return (ret);
+ }
+
+ QL_ASSERT((nsegs != 0), ("qla_send: empty packet"));
+
+ bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
+
+ if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) {
+ ha->tx_buf[tx_idx].m_head = m_head;
+ ha->tx_buf[tx_idx].map = map;
+ } else {
+ if (ret == EINVAL) {
+ m_freem(m_head);
+ *m_headp = NULL;
+ }
+ }
+
+ QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
+ return (ret);
+}
+
+static void
+qla_stop(qla_host_t *ha)
+{
+ struct ifnet *ifp = ha->ifp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ ha->flags.qla_watchdog_pause = 1;
+ qla_mdelay(__func__, 100);
+
+ ha->flags.stop_rcv = 1;
+ qla_hw_stop_rcv(ha);
+
+ qla_del_hw_if(ha);
+
+ qla_free_lro(ha);
+
+ qla_free_xmt_bufs(ha);
+ qla_free_rcv_bufs(ha);
+
+ ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
+
+ return;
+}
+
+/*
+ * Buffer Management Functions for Transmit and Receive Rings
+ */
+static int
+qla_alloc_xmt_bufs(qla_host_t *ha)
+{
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
+ QLA_MAX_SEGMENTS, /* nsegments */
+ PAGE_SIZE, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &ha->tx_tag)) {
+ device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
+ __func__);
+ return (ENOMEM);
+ }
+ bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
+
+ return 0;
+}
+
+/*
+ * Release mbuf after it sent on the wire
+ */
+static void
+qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
+{
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ if (txb->m_head) {
+
+ bus_dmamap_unload(ha->tx_tag, txb->map);
+ bus_dmamap_destroy(ha->tx_tag, txb->map);
+
+ m_freem(txb->m_head);
+ txb->m_head = NULL;
+ }
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
+}
+
+static void
+qla_free_xmt_bufs(qla_host_t *ha)
+{
+ int i;
+
+ for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
+ qla_clear_tx_buf(ha, &ha->tx_buf[i]);
+
+ if (ha->tx_tag != NULL) {
+ bus_dma_tag_destroy(ha->tx_tag);
+ ha->tx_tag = NULL;
+ }
+ bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
+
+ return;
+}
+
+
+static int
+qla_alloc_rcv_bufs(qla_host_t *ha)
+{
+ int i, j, ret = 0;
+ qla_rx_buf_t *rxb;
+
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUM9BYTES, /* maxsize */
+ 1, /* nsegments */
+ MJUM9BYTES, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &ha->rx_tag)) {
+
+ device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
+ __func__);
+
+ return (ENOMEM);
+ }
+
+ bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
+ bzero((void *)ha->rx_jbuf,
+ (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
+
+ for (i = 0; i < MAX_SDS_RINGS; i++) {
+ ha->hw.sds[i].sdsr_next = 0;
+ ha->hw.sds[i].rxb_free = NULL;
+ ha->hw.sds[i].rx_free = 0;
+ ha->hw.sds[i].rxjb_free = NULL;
+ ha->hw.sds[i].rxj_free = 0;
+ }
+
+ for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
+
+ rxb = &ha->rx_buf[i];
+
+ ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
+
+ if (ret) {
+ device_printf(ha->pci_dev,
+ "%s: dmamap[%d] failed\n", __func__, i);
+
+ for (j = 0; j < i; j++) {
+ bus_dmamap_destroy(ha->rx_tag,
+ ha->rx_buf[j].map);
+ }
+ goto qla_alloc_rcv_bufs_failed;
+ }
+ }
+
+ qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL);
+
+ for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
+ rxb = &ha->rx_buf[i];
+ rxb->handle = i;
+ if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) {
+ /*
+ * set the physical address in the corresponding
+ * descriptor entry in the receive ring/queue for the
+ * hba
+ */
+ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i,
+ rxb->handle, rxb->paddr,
+ (rxb->m_head)->m_pkthdr.len);
+ } else {
+ device_printf(ha->pci_dev,
+ "%s: qla_get_mbuf [standard(%d)] failed\n",
+ __func__, i);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ goto qla_alloc_rcv_bufs_failed;
+ }
+ }
+
+
+ for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
+
+ rxb = &ha->rx_jbuf[i];
+
+ ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
+
+ if (ret) {
+ device_printf(ha->pci_dev,
+ "%s: dmamap[%d] failed\n", __func__, i);
+
+ for (j = 0; j < i; j++) {
+ bus_dmamap_destroy(ha->rx_tag,
+ ha->rx_jbuf[j].map);
+ }
+ goto qla_alloc_rcv_bufs_failed;
+ }
+ }
+
+ qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO);
+
+ for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
+ rxb = &ha->rx_jbuf[i];
+ rxb->handle = i;
+ if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) {
+ /*
+ * set the physical address in the corresponding
+ * descriptor entry in the receive ring/queue for the
+ * hba
+ */
+ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i,
+ rxb->handle, rxb->paddr,
+ (rxb->m_head)->m_pkthdr.len);
+ } else {
+ device_printf(ha->pci_dev,
+ "%s: qla_get_mbuf [jumbo(%d)] failed\n",
+ __func__, i);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ goto qla_alloc_rcv_bufs_failed;
+ }
+ }
+
+ return (0);
+
+qla_alloc_rcv_bufs_failed:
+ qla_free_rcv_bufs(ha);
+ return (ret);
+}
+
+static void
+qla_free_rcv_bufs(qla_host_t *ha)
+{
+ int i;
+ qla_rx_buf_t *rxb;
+
+ for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
+ rxb = &ha->rx_buf[i];
+ if (rxb->m_head != NULL) {
+ bus_dmamap_unload(ha->rx_tag, rxb->map);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ m_freem(rxb->m_head);
+ rxb->m_head = NULL;
+ }
+ }
+
+ for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
+ rxb = &ha->rx_jbuf[i];
+ if (rxb->m_head != NULL) {
+ bus_dmamap_unload(ha->rx_tag, rxb->map);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ m_freem(rxb->m_head);
+ rxb->m_head = NULL;
+ }
+ }
+
+ if (ha->rx_tag != NULL) {
+ bus_dma_tag_destroy(ha->rx_tag);
+ ha->rx_tag = NULL;
+ }
+
+ bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
+ bzero((void *)ha->rx_jbuf,
+ (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
+
+ for (i = 0; i < MAX_SDS_RINGS; i++) {
+ ha->hw.sds[i].sdsr_next = 0;
+ ha->hw.sds[i].rxb_free = NULL;
+ ha->hw.sds[i].rx_free = 0;
+ ha->hw.sds[i].rxjb_free = NULL;
+ ha->hw.sds[i].rxj_free = 0;
+ }
+
+ return;
+}
+
+int
+qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
+ uint32_t jumbo)
+{
+ register struct mbuf *mp = nmp;
+ struct ifnet *ifp;
+ int ret = 0;
+ uint32_t offset;
+
+ QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo));
+
+ ifp = ha->ifp;
+
+ if (mp == NULL) {
+
+ if (!jumbo) {
+ mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+
+ if (mp == NULL) {
+ ha->err_m_getcl++;
+ ret = ENOBUFS;
+ device_printf(ha->pci_dev,
+ "%s: m_getcl failed\n", __func__);
+ goto exit_qla_get_mbuf;
+ }
+ mp->m_len = mp->m_pkthdr.len = MCLBYTES;
+ } else {
+ mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
+ MJUM9BYTES);
+ if (mp == NULL) {
+ ha->err_m_getjcl++;
+ ret = ENOBUFS;
+ device_printf(ha->pci_dev,
+ "%s: m_getjcl failed\n", __func__);
+ goto exit_qla_get_mbuf;
+ }
+ mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
+ }
+ } else {
+ if (!jumbo)
+ mp->m_len = mp->m_pkthdr.len = MCLBYTES;
+ else
+ mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
+
+ mp->m_data = mp->m_ext.ext_buf;
+ mp->m_next = NULL;
+ }
+
+
+ offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
+ if (offset) {
+ offset = 8 - offset;
+ m_adj(mp, offset);
+ }
+
+ /*
+ * Using memory from the mbuf cluster pool, invoke the bus_dma
+ * machinery to arrange the memory mapping.
+ */
+ ret = bus_dmamap_load(ha->rx_tag, rxb->map,
+ mtod(mp, void *), mp->m_len,
+ qla_dmamap_callback, &rxb->paddr,
+ BUS_DMA_NOWAIT);
+ if (ret || !rxb->paddr) {
+ m_free(mp);
+ rxb->m_head = NULL;
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_load failed\n", __func__);
+ ret = -1;
+ goto exit_qla_get_mbuf;
+ }
+ rxb->m_head = mp;
+ bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
+
+exit_qla_get_mbuf:
+ QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
+ return (ret);
+}
+
+static void
+qla_tx_done(void *context, int pending)
+{
+ qla_host_t *ha = context;
+
+ qla_hw_tx_done(ha);
+ qla_start(ha->ifp);
+}
+
diff --git a/sys/dev/qlxgb/qla_os.h b/sys/dev/qlxgb/qla_os.h
new file mode 100644
index 000000000000..955be5d24be1
--- /dev/null
+++ b/sys/dev/qlxgb/qla_os.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_os.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_OS_H_
+#define _QLA_OS_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/kernel.h>
+#include <sys/sockio.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <sys/conf.h>
+
+#if __FreeBSD_version < 700112
+#error FreeBSD Version not supported - use version >= 700112
+#endif
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+#include <netinet/in_var.h>
+#include <netinet/tcp_lro.h>
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <sys/pcpu.h>
+
+#include <sys/unistd.h>
+#include <sys/kthread.h>
+
+#define QLA_USEC_DELAY(usec) DELAY(usec)
+
+static __inline int qla_ms_to_hz(int ms)
+{
+ int qla_hz;
+
+ struct timeval t;
+
+ t.tv_sec = ms / 1000;
+ t.tv_usec = (ms % 1000) * 1000;
+
+ qla_hz = tvtohz(&t);
+
+ if (qla_hz < 0)
+ qla_hz = 0x7fffffff;
+ if (!qla_hz)
+ qla_hz = 1;
+
+ return (qla_hz);
+}
+
+static __inline int qla_sec_to_hz(int sec)
+{
+ struct timeval t;
+
+ t.tv_sec = sec;
+ t.tv_usec = 0;
+
+ return (tvtohz(&t));
+}
+
+
+#define qla_host_to_le16(x) htole16(x)
+#define qla_host_to_le32(x) htole32(x)
+#define qla_host_to_le64(x) htole64(x)
+#define qla_host_to_be16(x) htobe16(x)
+#define qla_host_to_be32(x) htobe32(x)
+#define qla_host_to_be64(x) htobe64(x)
+
+#define qla_le16_to_host(x) le16toh(x)
+#define qla_le32_to_host(x) le32toh(x)
+#define qla_le64_to_host(x) le64toh(x)
+#define qla_be16_to_host(x) be16toh(x)
+#define qla_be32_to_host(x) be32toh(x)
+#define qla_be64_to_host(x) be64toh(x)
+
+MALLOC_DECLARE(M_QLA8XXXBUF);
+
+#define qla_mdelay(fn, msecs) \
+ {\
+ if (cold) \
+ DELAY((msecs * 1000)); \
+ else \
+ pause(fn, qla_ms_to_hz(msecs)); \
+ }
+
+/*
+ * Locks
+ */
+#define QLA_LOCK(ha, str) qla_lock(ha, str);
+#define QLA_UNLOCK(ha, str) qla_unlock(ha, str)
+
+#define QLA_TX_LOCK(ha) mtx_lock(&ha->tx_lock);
+#define QLA_TX_UNLOCK(ha) mtx_unlock(&ha->tx_lock);
+
+#define QLA_RX_LOCK(ha) mtx_lock(&ha->rx_lock);
+#define QLA_RX_UNLOCK(ha) mtx_unlock(&ha->rx_lock);
+
+#define QLA_RXJ_LOCK(ha) mtx_lock(&ha->rxj_lock);
+#define QLA_RXJ_UNLOCK(ha) mtx_unlock(&ha->rxj_lock);
+
+/*
+ * structure encapsulating a DMA buffer
+ */
+struct qla_dma {
+ bus_size_t alignment;
+ uint32_t size;
+ void *dma_b;
+ bus_addr_t dma_addr;
+ bus_dmamap_t dma_map;
+ bus_dma_tag_t dma_tag;
+};
+typedef struct qla_dma qla_dma_t;
+
+#define QL_ASSERT(x, y) if (!x) panic y
+
+#endif /* #ifndef _QLA_OS_H_ */
diff --git a/sys/dev/qlxgb/qla_reg.h b/sys/dev/qlxgb/qla_reg.h
new file mode 100644
index 000000000000..2f190f3e26fe
--- /dev/null
+++ b/sys/dev/qlxgb/qla_reg.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_reg.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_REG_H_
+#define _QLA_REG_H_
+
+/*
+ * Begin Definitions for QLA82xx Registers
+ */
+
+/*
+ * Register offsets for QLA8022
+ */
+
+/******************************
+ * PCIe Registers
+ ******************************/
+#define Q8_CRB_WINDOW_2M 0x130060
+
+#define Q8_INT_VECTOR 0x130100
+#define Q8_INT_MASK 0x130104
+
+#define Q8_INT_TARGET_STATUS_F0 0x130118
+#define Q8_INT_TARGET_MASK_F0 0x130128
+#define Q8_INT_TARGET_STATUS_F1 0x130160
+#define Q8_INT_TARGET_MASK_F1 0x130170
+#define Q8_INT_TARGET_STATUS_F2 0x130164
+#define Q8_INT_TARGET_MASK_F2 0x130174
+#define Q8_INT_TARGET_STATUS_F3 0x130168
+#define Q8_INT_TARGET_MASK_F3 0x130178
+#define Q8_INT_TARGET_STATUS_F4 0x130360
+#define Q8_INT_TARGET_MASK_F4 0x130370
+#define Q8_INT_TARGET_STATUS_F5 0x130364
+#define Q8_INT_TARGET_MASK_F5 0x130374
+#define Q8_INT_TARGET_STATUS_F6 0x130368
+#define Q8_INT_TARGET_MASK_F6 0x130378
+#define Q8_INT_TARGET_STATUS_F7 0x13036C
+#define Q8_INT_TARGET_MASK_F7 0x13037C
+
+#define Q8_SEM2_LOCK 0x13C010
+#define Q8_SEM2_UNLOCK 0x13C014
+#define Q8_SEM3_LOCK 0x13C018
+#define Q8_SEM3_UNLOCK 0x13C01C
+#define Q8_SEM5_LOCK 0x13C028
+#define Q8_SEM5_UNLOCK 0x13C02C
+#define Q8_SEM7_LOCK 0x13C038
+#define Q8_SEM7_UNLOCK 0x13C03C
+
+/* Valid bit for a SEM<N>_LOCK registers */
+#define SEM_LOCK_BIT 0x00000001
+
+
+#define Q8_ROM_LOCKID 0x1B2100
+
+/*******************************
+ * Firmware Interface Registers
+ *******************************/
+#define Q8_FW_VER_MAJOR 0x1B2150
+#define Q8_FW_VER_MINOR 0x1B2154
+#define Q8_FW_VER_SUB 0x1B2158
+#define Q8_FW_VER_BUILD 0x1B2168
+
+#define Q8_CMDPEG_STATE 0x1B2250
+#define Q8_RCVPEG_STATE 0x1B233C
+/*
+ * definitions for Q8_CMDPEG_STATE
+ */
+#define CMDPEG_PHAN_INIT_COMPLETE 0xFF01
+
+#define Q8_ROM_STATUS 0x1A0004
+/*
+ * definitions for Q8_ROM_STATUS
+ * bit definitions for Q8_UNM_ROMUSB_GLB_STATUS
+ * 31:3 Reserved; Rest as below
+ */
+#define ROM_STATUS_RDY 0x0004
+#define ROM_STATUS_DONE 0x0002
+#define ROM_STATUS_AUTO_ROM_SHDW 0x0001
+
+#define Q8_ASIC_RESET 0x1A0008
+/*
+ * definitions for Q8_ASIC_RESET
+ */
+#define ASIC_RESET_RST_XDMA 0x00800000 /* Reset XDMA */
+#define ASIC_RESET_PEG_ICACHE 0x00000020 /* Reset PEG_ICACHE */
+#define ASIC_RESET_PEG_DCACHE 0x00000010 /* Reset PEG_DCACHE */
+#define ASIC_RESET_PEG_3 0x00000008 /* Reset PEG_3 */
+#define ASIC_RESET_PEG_2 0x00000004 /* Reset PEG_2 */
+#define ASIC_RESET_PEG_1 0x00000002 /* Reset PEG_1 */
+#define ASIC_RESET_PEG_0 0x00000001 /* Reset PEG_0 */
+
+#define Q8_COLD_BOOT 0x1B21FC
+/*
+ * definitions for Q8_COLD_BOOT
+ */
+#define COLD_BOOT_VALUE 0x12345678
+
+
+#define Q8_MIU_TEST_AGT_CTRL 0x180090
+#define Q8_MIU_TEST_AGT_ADDR_LO 0x180094
+#define Q8_MIU_TEST_AGT_ADDR_HI 0x180098
+#define Q8_MIU_TEST_AGT_WRDATA_LO 0x1800A0
+#define Q8_MIU_TEST_AGT_WRDATA_HI 0x1800A4
+#define Q8_MIU_TEST_AGT_RDDATA_LO 0x1800A8
+#define Q8_MIU_TEST_AGT_RDDATA_HI 0x1800AC
+#define Q8_MIU_TEST_AGT_WRDATA_ULO 0x1800B0
+#define Q8_MIU_TEST_AGT_WRDATA_UHI 0x1800B4
+#define Q8_MIU_TEST_AGT_RDDATA_ULO 0x1800B8
+#define Q8_MIU_TEST_AGT_RDDATA_UHI 0x1800BC
+
+#define Q8_PEG_0_RESET 0x160018
+#define Q8_PEG_0_CLR1 0x160008
+#define Q8_PEG_0_CLR2 0x16000C
+#define Q8_PEG_1_CLR1 0x161008
+#define Q8_PEG_1_CLR2 0x16100C
+#define Q8_PEG_2_CLR1 0x162008
+#define Q8_PEG_2_CLR2 0x16200C
+#define Q8_PEG_3_CLR1 0x163008
+#define Q8_PEG_3_CLR2 0x16300C
+#define Q8_PEG_4_CLR1 0x164008
+#define Q8_PEG_4_CLR2 0x16400C
+#define Q8_PEG_D_RESET1 0x1650EC
+#define Q8_PEG_D_RESET2 0x16504C
+#define Q8_PEG_HALT_STATUS1 0x1B20A8
+#define Q8_PEG_HALT_STATUS2 0x1B20AC
+#define Q8_FIRMWARE_HEARTBEAT 0x1B20B0
+#define Q8_PEG_I_RESET 0x16604C
+
+#define Q8_CRB_MAC_BLOCK_START 0x1B21C0
+
+/***************************************************
+ * Flash ROM Access Registers ( Indirect Registers )
+ ***************************************************/
+
+#define Q8_ROM_INSTR_OPCODE 0x03310004
+/*
+ * bit definitions for Q8_ROM_INSTR_OPCODE
+ * 31:8 Reserved; Rest Below
+ */
+#define ROM_OPCODE_WR_STATUS_REG 0x01
+#define ROM_OPCODE_PROG_PAGE 0x02
+#define ROM_OPCODE_RD_BYTE 0x03
+#define ROM_OPCODE_WR_DISABLE 0x04
+#define ROM_OPCODE_RD_STATUS_REG 0x05
+#define ROM_OPCODE_WR_ENABLE 0x06
+#define ROM_OPCODE_FAST_RD 0x0B
+#define ROM_OPCODE_REL_DEEP_PWR_DWN 0xAB
+#define ROM_OPCODE_BULK_ERASE 0xC7
+#define ROM_OPCODE_DEEP_PWR_DWN 0xC9
+#define ROM_OPCODE_SECTOR_ERASE 0xD8
+
+#define Q8_ROM_ADDRESS 0x03310008
+/*
+ * bit definitions for Q8_ROM_ADDRESS
+ * 31:24 Reserved;
+ * 23:0 Physical ROM Address in bytes
+ */
+
+#define Q8_ROM_ADDR_BYTE_COUNT 0x03310010
+/*
+ * bit definitions for Q8_ROM_ADDR_BYTE_COUNT
+ * 31:2 Reserved;
+ * 1:0 max address bytes for ROM Interface
+ */
+
+#define Q8_ROM_DUMMY_BYTE_COUNT 0x03310014
+/*
+ * bit definitions for Q8_ROM_DUMMY_BYTE_COUNT
+ * 31:2 Reserved;
+ * 1:0 dummy bytes for ROM Instructions
+ */
+
+#define Q8_ROM_RD_DATA 0x03310018
+
+#define Q8_NX_CDRP_CMD_RSP 0x1B2218
+#define Q8_NX_CDRP_ARG1 0x1B221C
+#define Q8_NX_CDRP_ARG2 0x1B2220
+#define Q8_NX_CDRP_ARG3 0x1B2224
+#define Q8_NX_CDRP_SIGNATURE 0x1B2228
+
+#define Q8_LINK_STATE 0x1B2298
+#define Q8_LINK_SPEED_0 0x1B22E8
+/*
+ * Macros for reading and writing registers
+ */
+
+#if defined(__i386__) || defined(__amd64__)
+#define Q8_MB() __asm volatile("mfence" ::: "memory")
+#define Q8_WMB() __asm volatile("sfence" ::: "memory")
+#define Q8_RMB() __asm volatile("lfence" ::: "memory")
+#else
+#define Q8_MB()
+#define Q8_WMB()
+#define Q8_RMB()
+#endif
+
+#define READ_REG32(ha, reg) bus_read_4((ha->pci_reg), reg)
+#define READ_OFFSET32(ha, off) READ_REG32(ha, off)
+
+#define WRITE_REG32(ha, reg, val) \
+ {\
+ bus_write_4((ha->pci_reg), reg, val);\
+ bus_read_4((ha->pci_reg), reg);\
+ }
+
+#define WRITE_REG32_MB(ha, reg, val) \
+ {\
+ Q8_WMB();\
+ bus_write_4((ha->pci_reg), reg, val);\
+ }
+
+#define WRITE_OFFSET32(ha, off, val)\
+ {\
+ bus_write_4((ha->pci_reg), off, val);\
+ bus_read_4((ha->pci_reg), off);\
+ }
+
+#endif /* #ifndef _QLA_REG_H_ */
diff --git a/sys/dev/qlxgb/qla_ver.h b/sys/dev/qlxgb/qla_ver.h
new file mode 100644
index 000000000000..8c33ff4535ba
--- /dev/null
+++ b/sys/dev/qlxgb/qla_ver.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_ver.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_VER_H_
+#define _QLA_VER_H_
+
+#define QLA_VERSION_MAJOR 1
+#define QLA_VERSION_MINOR 1
+#define QLA_VERSION_BUILD 30
+
+#endif /* #ifndef _QLA_VER_H_ */
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 7d691fe24f5a..2c002a10da3d 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -255,6 +255,7 @@ SUBDIR= ${_3dfx} \
${_pst} \
pty \
puc \
+ ${_qlxgb} \
ral \
ralfw \
${_random} \
@@ -617,6 +618,7 @@ _opensolaris= opensolaris
_padlock= padlock
.endif
_pccard= pccard
+_qlxgb= qlxgb
_rdma= rdma
_s3= s3
_safe= safe
diff --git a/sys/modules/qlxgb/Makefile b/sys/modules/qlxgb/Makefile
new file mode 100644
index 000000000000..330cef908517
--- /dev/null
+++ b/sys/modules/qlxgb/Makefile
@@ -0,0 +1,43 @@
+#-
+# Copyright (c) 2010-2011 Qlogic Corporation
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# File : Makefile
+# Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+#
+# $FreeBSD$
+#
+
+.PATH: ${.CURDIR}/../../dev/qlxgb
+
+KMOD= if_qlxgb
+SRCS= qla_os.c qla_dbg.c qla_hw.c qla_misc.c qla_isr.c qla_ioctl.c
+SRCS+= device_if.h bus_if.h pci_if.h
+
+clean:
+ rm -f opt_bdg.h device_if.h bus_if.h pci_if.h export_syms
+ rm -f *.o *.kld *.ko
+ rm -f @ machine
+
+.include <bsd.kmod.mk>