aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--share/man/man4/Makefile1
-rw-r--r--share/man/man4/smartpqi.4102
-rw-r--r--sys/amd64/conf/GENERIC1
-rw-r--r--sys/conf/NOTES7
-rw-r--r--sys/conf/files.amd6416
-rw-r--r--sys/dev/smartpqi/smartpqi_cam.c1205
-rw-r--r--sys/dev/smartpqi/smartpqi_cmd.c76
-rw-r--r--sys/dev/smartpqi/smartpqi_defines.h1004
-rw-r--r--sys/dev/smartpqi/smartpqi_discovery.c1806
-rw-r--r--sys/dev/smartpqi/smartpqi_event.c439
-rw-r--r--sys/dev/smartpqi/smartpqi_helper.c291
-rw-r--r--sys/dev/smartpqi/smartpqi_includes.h90
-rw-r--r--sys/dev/smartpqi/smartpqi_init.c913
-rw-r--r--sys/dev/smartpqi/smartpqi_intr.c437
-rw-r--r--sys/dev/smartpqi/smartpqi_ioctl.c402
-rw-r--r--sys/dev/smartpqi/smartpqi_ioctl.h144
-rw-r--r--sys/dev/smartpqi/smartpqi_main.c500
-rw-r--r--sys/dev/smartpqi/smartpqi_mem.c184
-rw-r--r--sys/dev/smartpqi/smartpqi_misc.c172
-rw-r--r--sys/dev/smartpqi/smartpqi_prototypes.h263
-rw-r--r--sys/dev/smartpqi/smartpqi_queue.c995
-rw-r--r--sys/dev/smartpqi/smartpqi_request.c791
-rw-r--r--sys/dev/smartpqi/smartpqi_response.c236
-rw-r--r--sys/dev/smartpqi/smartpqi_sis.c451
-rw-r--r--sys/dev/smartpqi/smartpqi_structures.h1010
-rw-r--r--sys/dev/smartpqi/smartpqi_tag.c265
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/smartpqi/Makefile12
28 files changed, 11815 insertions, 0 deletions
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index 55139b8242a0..9d2ffbe8494e 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -476,6 +476,7 @@ MAN= aac.4 \
sio.4 \
sis.4 \
sk.4 \
+ smartpqi.4 \
smb.4 \
smbus.4 \
smp.4 \
diff --git a/share/man/man4/smartpqi.4 b/share/man/man4/smartpqi.4
new file mode 100644
index 000000000000..2b180f03db3f
--- /dev/null
+++ b/share/man/man4/smartpqi.4
@@ -0,0 +1,102 @@
+.\" Copyright (c) 2018 Murthy Bhat
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$ stable/10/share/man/man4/smartpqi.4 195614 2017-01-11 08:10:18Z jkim $
+.Dd April 06, 2018
+.Dt SMARTPQI 4
+.Os
+.Sh NAME
+.Nm smartpqi
+.Nd Microsemi smartpqi SCSI driver for PQI controllers
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following lines in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd device pci
+.Cd device scbus
+.Cd device smartpqi
+.Pp
+Alternatively, to load the driver as a
+module at boot time, place the following line in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+smartpqi_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+SCSI driver provides support for the new generation of PQI controllers from
+Microsemi.
+The
+.Nm
+driver is the first SCSI driver to implement the PQI queuing model.
+.Pp
+The
+.Nm
+driver will replace the aacraid driver for Adaptec Series 9 controllers.
+.Pp
+The
+.Pa /dev/smartpqi?
+device nodes provide access to the management interface of the controller.
+One node exists per installed card.
+.Sh HARDWARE
+Controllers supported by the
+.Nm
+driver include:
+.Pp
+.Bl -bullet -compact
+.It
+HPE Gen10 Smart Array Controller Family
+.It
+OEM Controllers based on the Microsemi Chipset
+.El
+.Sh FILES
+.Bl -tag -width /boot/kernel/aac.ko -compact
+.It Pa /dev/smartpqi?
+smartpqi management interface
+.El
+.Sh SEE ALSO
+.Xr kld 4 ,
+.Xr linux 4 ,
+.Xr scsi 4 ,
+.Xr kldload 8
+.Xr pass 4
+.Xr xpt 4
+.Xr loader.conf 5
+.Xr camcontrol 8
+.Rs
+.%T "Microsemi Website"
+.%U http://www.microsemi.com/
+.Re
+.Sh HISTORY
+The
+.Nm
+driver first appeared in
+.Fx 11.1 .
+.Sh AUTHOR
+.An Murthy Bhat
+.Aq murthy.bhat@microsemi.com
+.Sh BUGS
+The controller is not actually paused on suspend/resume.
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index c57b7783c657..9cdee6ff7687 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -166,6 +166,7 @@ device iir # Intel Integrated RAID
device ips # IBM (Adaptec) ServeRAID
device mly # Mylex AcceleRAID/eXtremeRAID
device twa # 3ware 9000 series PATA/SATA RAID
+device smartpqi # Microsemi smartpqi driver
device tws # LSI 3ware 9750 SATA+SAS 6Gb/s RAID controller
# RAID controllers
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 5ffdf8401a34..ed36a78fc3da 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -1717,6 +1717,13 @@ options MFI_DEBUG
device mrsas # LSI/Avago MegaRAID SAS/SATA, 6Gb/s and 12Gb/s
#
+# Microsemi smartpqi controllers.
+# These controllers have a SCSI-like interface, and require the
+# CAM infrastructure.
+#
+device smartpqi
+
+#
# 3ware ATA RAID
#
device twe # 3ware ATA RAID
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 4f66e303d08c..2e85a8603790 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -463,6 +463,22 @@ dev/sio/sio_isa.c optional sio isa
dev/sio/sio_pccard.c optional sio pccard
dev/sio/sio_pci.c optional sio pci
dev/sio/sio_puc.c optional sio puc
+dev/smartpqi/smartpqi_cam.c optional smartpqi
+dev/smartpqi/smartpqi_cmd.c optional smartpqi
+dev/smartpqi/smartpqi_discovery.c optional smartpqi
+dev/smartpqi/smartpqi_event.c optional smartpqi
+dev/smartpqi/smartpqi_helper.c optional smartpqi
+dev/smartpqi/smartpqi_init.c optional smartpqi
+dev/smartpqi/smartpqi_intr.c optional smartpqi
+dev/smartpqi/smartpqi_ioctl.c optional smartpqi
+dev/smartpqi/smartpqi_main.c optional smartpqi
+dev/smartpqi/smartpqi_mem.c optional smartpqi
+dev/smartpqi/smartpqi_misc.c optional smartpqi
+dev/smartpqi/smartpqi_queue.c optional smartpqi
+dev/smartpqi/smartpqi_request.c optional smartpqi
+dev/smartpqi/smartpqi_response.c optional smartpqi
+dev/smartpqi/smartpqi_sis.c optional smartpqi
+dev/smartpqi/smartpqi_tag.c optional smartpqi
dev/speaker/spkr.c optional speaker
dev/syscons/apm/apm_saver.c optional apm_saver apm
dev/syscons/scterm-teken.c optional sc
diff --git a/sys/dev/smartpqi/smartpqi_cam.c b/sys/dev/smartpqi/smartpqi_cam.c
new file mode 100644
index 000000000000..ba9ff455790c
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_cam.c
@@ -0,0 +1,1205 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+/*
+ * CAM interface for smartpqi driver
+ */
+
+#include "smartpqi_includes.h"
+
+/*
+ * Set cam sim properties of the smartpqi adapter.
+ */
+static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
+{
+
+ pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
+ cam_sim_softc(sim);
+ DBG_FUNC("IN\n");
+
+ cpi->version_num = 1;
+ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
+ cpi->target_sprt = 0;
+ cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
+ cpi->hba_eng_cnt = 0;
+ cpi->max_lun = PQI_MAX_MULTILUN;
+ cpi->max_target = 1088;
+ cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
+ cpi->initiator_id = 255;
+ strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
+ strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ cpi->unit_number = cam_sim_unit(sim);
+ cpi->bus_id = cam_sim_bus(sim);
+ cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
+ cpi->protocol = PROTO_SCSI;
+ cpi->protocol_version = SCSI_REV_SPC4;
+ cpi->transport = XPORT_SPI;
+ cpi->transport_version = 2;
+ cpi->ccb_h.status = CAM_REQ_CMP;
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Get transport settings of the smartpqi adapter
+ */
+static void get_transport_settings(struct pqisrc_softstate *softs,
+ struct ccb_trans_settings *cts)
+{
+ struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
+ struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas;
+ struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
+
+ DBG_FUNC("IN\n");
+
+ cts->protocol = PROTO_SCSI;
+ cts->protocol_version = SCSI_REV_SPC4;
+ cts->transport = XPORT_SPI;
+ cts->transport_version = 2;
+ spi->valid = CTS_SPI_VALID_DISC;
+ spi->flags = CTS_SPI_FLAGS_DISC_ENB;
+ scsi->valid = CTS_SCSI_VALID_TQ;
+ scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
+ sas->valid = CTS_SAS_VALID_SPEED;
+ cts->ccb_h.status = CAM_REQ_CMP;
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Add the target to CAM layer and rescan, when a new device is found
+ */
+void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
+ union ccb *ccb;
+
+ DBG_FUNC("IN\n");
+
+ if(softs->os_specific.sim_registered) {
+ if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
+ DBG_ERR("rescan failed (can't allocate CCB)\n");
+ return;
+ }
+
+ if (xpt_create_path(&ccb->ccb_h.path, NULL,
+ cam_sim_path(softs->os_specific.sim),
+ device->target, device->lun) != CAM_REQ_CMP) {
+ DBG_ERR("rescan failed (can't create path)\n");
+ xpt_free_ccb(ccb);
+ return;
+ }
+ xpt_rescan(ccb);
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Remove the device from CAM layer when deleted or hot removed
+ */
+void os_remove_device(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device) {
+ struct cam_path *tmppath;
+
+ DBG_FUNC("IN\n");
+
+ if(softs->os_specific.sim_registered) {
+ if (xpt_create_path(&tmppath, NULL,
+ cam_sim_path(softs->os_specific.sim),
+ device->target, device->lun) != CAM_REQ_CMP) {
+ DBG_ERR("unable to create path for async event");
+ return;
+ }
+ xpt_async(AC_LOST_DEVICE, tmppath, NULL);
+ xpt_free_path(tmppath);
+ pqisrc_free_device(softs, device);
+ OS_SLEEP(10000);
+ }
+
+ DBG_FUNC("OUT\n");
+
+}
+
+/*
+ * Function to release the frozen simq
+ */
+static void pqi_release_camq( rcb_t *rcb )
+{
+ pqisrc_softstate_t *softs;
+ struct ccb_scsiio *csio;
+
+ csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
+ softs = rcb->softs;
+
+ DBG_FUNC("IN\n");
+
+ if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
+ softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
+ if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
+ xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
+ else
+ csio->ccb_h.status |= CAM_RELEASE_SIMQ;
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function to dma-unmap the completed request
+ */
+static void pqi_unmap_request(void *arg)
+{
+ pqisrc_softstate_t *softs;
+ rcb_t *rcb;
+
+ DBG_IO("IN rcb = %p\n", arg);
+
+ rcb = (rcb_t *)arg;
+ softs = rcb->softs;
+
+ if (!(rcb->cm_flags & PQI_CMD_MAPPED))
+ return;
+
+ if (rcb->bcount != 0 ) {
+ if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
+ bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
+ rcb->cm_datamap,
+ BUS_DMASYNC_POSTREAD);
+ if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
+ bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
+ rcb->cm_datamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
+ rcb->cm_datamap);
+ }
+ rcb->cm_flags &= ~PQI_CMD_MAPPED;
+
+ if(rcb->sgt && rcb->nseg)
+ os_mem_free(rcb->softs, (void*)rcb->sgt,
+ rcb->nseg*sizeof(sgt_t));
+
+ pqisrc_put_tag(&softs->taglist, rcb->tag);
+
+ DBG_IO("OUT\n");
+}
+
+/*
+ * Construct meaningful LD name for volume here.
+ */
+static void
+smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
+{
+ struct scsi_inquiry_data *inq = NULL;
+ uint8_t *cdb = NULL;
+ pqi_scsi_dev_t *device = NULL;
+
+ DBG_FUNC("IN\n");
+
+ cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
+ (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
+ if(cdb[0] == INQUIRY &&
+ (cdb[1] & SI_EVPD) == 0 &&
+ (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
+ csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
+
+ inq = (struct scsi_inquiry_data *)csio->data_ptr;
+
+ device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
+
+ /* Let the disks be probed and dealt with via CAM. Only for LD
+ let it fall through and inquiry be tweaked */
+ if( !device || !pqisrc_is_logical_device(device) ||
+ (device->devtype != DISK_DEVICE) ||
+ pqisrc_is_external_raid_device(device)) {
+ return;
+ }
+
+ strncpy(inq->vendor, "MSCC",
+ SID_VENDOR_SIZE);
+ strncpy(inq->product,
+ pqisrc_raidlevel_to_string(device->raid_level),
+ SID_PRODUCT_SIZE);
+ strncpy(inq->revision, device->volume_offline?"OFF":"OK",
+ SID_REVISION_SIZE);
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Handle completion of a command - pass results back through the CCB
+ */
+void
+os_io_response_success(rcb_t *rcb)
+{
+ struct ccb_scsiio *csio;
+
+ DBG_IO("IN rcb = %p\n", rcb);
+
+ if (rcb == NULL)
+ panic("rcb is null");
+
+ csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
+
+ if (csio == NULL)
+ panic("csio is null");
+
+ rcb->status = REQUEST_SUCCESS;
+ csio->ccb_h.status = CAM_REQ_CMP;
+
+ smartpqi_fix_ld_inquiry(rcb->softs, csio);
+ pqi_release_camq(rcb);
+ pqi_unmap_request(rcb);
+ xpt_done((union ccb *)csio);
+
+ DBG_IO("OUT\n");
+}
+
+/*
+ * Error response handling for raid IO
+ */
+void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
+{
+ struct ccb_scsiio *csio;
+ pqisrc_softstate_t *softs;
+
+ DBG_IO("IN\n");
+
+ csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
+
+ if (csio == NULL)
+ panic("csio is null");
+
+ softs = rcb->softs;
+
+ ASSERT(err_info != NULL);
+ csio->scsi_status = err_info->status;
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+
+ if (csio->ccb_h.func_code == XPT_SCSI_IO) {
+ /*
+ * Handle specific SCSI status values.
+ */
+ switch(csio->scsi_status) {
+ case PQI_RAID_STATUS_QUEUE_FULL:
+ csio->ccb_h.status = CAM_REQ_CMP;
+ DBG_ERR("Queue Full error");
+ break;
+ /* check condition, sense data included */
+ case PQI_RAID_STATUS_CHECK_CONDITION:
+ {
+ uint16_t sense_data_len =
+ LE_16(err_info->sense_data_len);
+ uint8_t *sense_data = NULL;
+ if (sense_data_len)
+ sense_data = err_info->data;
+ memset(&csio->sense_data, 0, csio->sense_len);
+ sense_data_len = (sense_data_len >
+ csio->sense_len) ?
+ csio->sense_len :
+ sense_data_len;
+ if (sense_data)
+ memcpy(&csio->sense_data, sense_data,
+ sense_data_len);
+ if (csio->sense_len > sense_data_len)
+ csio->sense_resid = csio->sense_len
+ - sense_data_len;
+ else
+ csio->sense_resid = 0;
+ csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
+ | CAM_AUTOSNS_VALID
+ | CAM_REQ_CMP_ERR;
+
+ }
+ break;
+
+ case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
+ {
+ uint32_t resid = 0;
+ resid = rcb->bcount-err_info->data_out_transferred;
+ csio->resid = resid;
+ csio->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ default:
+ csio->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ }
+
+ if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
+ softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
+ if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
+ xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
+ else
+ csio->ccb_h.status |= CAM_RELEASE_SIMQ;
+ }
+
+ pqi_unmap_request(rcb);
+ xpt_done((union ccb *)csio);
+
+ DBG_IO("OUT\n");
+}
+
+
+/*
+ * Error response handling for aio.
+ */
+void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
+{
+ struct ccb_scsiio *csio;
+ pqisrc_softstate_t *softs;
+
+ DBG_IO("IN\n");
+
+ if (rcb == NULL)
+ panic("rcb is null");
+
+ rcb->status = REQUEST_SUCCESS;
+ csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
+ if (csio == NULL)
+ panic("csio is null");
+
+ softs = rcb->softs;
+
+ switch (err_info->service_resp) {
+ case PQI_AIO_SERV_RESPONSE_COMPLETE:
+ csio->ccb_h.status = err_info->status;
+ break;
+ case PQI_AIO_SERV_RESPONSE_FAILURE:
+ switch(err_info->status) {
+ case PQI_AIO_STATUS_IO_ABORTED:
+ csio->ccb_h.status = CAM_REQ_ABORTED;
+ DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
+ break;
+ case PQI_AIO_STATUS_UNDERRUN:
+ csio->ccb_h.status = CAM_REQ_CMP;
+ csio->resid =
+ LE_32(err_info->resd_count);
+ break;
+ case PQI_AIO_STATUS_OVERRUN:
+ csio->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case PQI_AIO_STATUS_AIO_PATH_DISABLED:
+ DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
+ rcb->dvp->offload_enabled = false;
+ csio->ccb_h.status |= CAM_REQUEUE_REQ;
+ break;
+ case PQI_AIO_STATUS_IO_ERROR:
+ case PQI_AIO_STATUS_IO_NO_DEVICE:
+ case PQI_AIO_STATUS_INVALID_DEVICE:
+ default:
+ DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
+ csio->ccb_h.status |=
+ CAM_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+ case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
+ case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
+ csio->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
+ case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
+ DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n");
+ csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
+ break;
+ default:
+ DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
+ csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
+ break;
+ }
+ if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
+ csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
+ uint8_t *sense_data = NULL;
+ unsigned sense_data_len = LE_16(err_info->data_len);
+ if (sense_data_len)
+ sense_data = err_info->data;
+ DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n",
+ sense_data_len);
+ memset(&csio->sense_data, 0, csio->sense_len);
+ if (sense_data)
+ memcpy(&csio->sense_data, sense_data, ((sense_data_len >
+ csio->sense_len) ? csio->sense_len : sense_data_len));
+ if (csio->sense_len > sense_data_len)
+ csio->sense_resid = csio->sense_len - sense_data_len;
+ else
+ csio->sense_resid = 0;
+ csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
+ }
+
+ smartpqi_fix_ld_inquiry(softs, csio);
+ pqi_release_camq(rcb);
+ pqi_unmap_request(rcb);
+ xpt_done((union ccb *)csio);
+ DBG_IO("OUT\n");
+}
+
+/*
+ * Command-mapping helper function - populate this command's s/g table.
+ */
+static void
+pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ pqisrc_softstate_t *softs;
+ rcb_t *rcb;
+
+ rcb = (rcb_t *)arg;
+ softs = rcb->softs;
+
+ if( error || nseg > softs->pqi_cap.max_sg_elem )
+ {
+ xpt_freeze_simq(softs->os_specific.sim, 1);
+ rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ|
+ CAM_RELEASE_SIMQ);
+ DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
+ error, nseg, softs->pqi_cap.max_sg_elem);
+ pqi_unmap_request(rcb);
+ xpt_done((union ccb *)rcb->cm_ccb);
+ return;
+ }
+
+ rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t));
+ rcb->nseg = nseg;
+ if (rcb->sgt != NULL) {
+ for (int i = 0; i < nseg; i++) {
+ rcb->sgt[i].addr = segs[i].ds_addr;
+ rcb->sgt[i].len = segs[i].ds_len;
+ rcb->sgt[i].flags = 0;
+ }
+ }
+
+ if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
+ bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
+ rcb->cm_datamap, BUS_DMASYNC_PREREAD);
+ if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
+ bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
+ rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
+
+ /* Call IO functions depending on pd or ld */
+ rcb->status = REQUEST_PENDING;
+
+ error = pqisrc_build_send_io(softs, rcb);
+
+ if (error) {
+ rcb->req_pending = false;
+ xpt_freeze_simq(softs->os_specific.sim, 1);
+ rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ
+ |CAM_RELEASE_SIMQ);
+ DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
+ pqi_unmap_request(rcb);
+ xpt_done((union ccb *)rcb->cm_ccb);
+ return;
+ }
+}
+
+/*
+ * Function to dma-map the request buffer
+ */
+static int pqi_map_request( rcb_t *rcb )
+{
+ pqisrc_softstate_t *softs = rcb->softs;
+ int error = PQI_STATUS_SUCCESS;
+ union ccb *ccb = rcb->cm_ccb;
+
+ DBG_FUNC("IN\n");
+
+ /* check that mapping is necessary */
+ if (rcb->cm_flags & PQI_CMD_MAPPED)
+ return(0);
+ rcb->cm_flags |= PQI_CMD_MAPPED;
+
+ if (rcb->bcount) {
+ error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
+ rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
+ if (error != 0){
+ DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n",
+ error, rcb->bcount);
+ return error;
+ }
+ } else {
+ /*
+ * Set up the command to go to the controller. If there are no
+ * data buffers associated with the command then it can bypass
+ * busdma.
+ */
+ /* Call IO functions depending on pd or ld */
+ rcb->status = REQUEST_PENDING;
+
+ error = pqisrc_build_send_io(softs, rcb);
+
+ }
+
+ DBG_FUNC("OUT error = %d\n", error);
+
+ return error;
+}
+
+/*
+ * Function to clear the request control block
+ */
+void os_reset_rcb( rcb_t *rcb )
+{
+ rcb->error_info = NULL;
+ rcb->req = NULL;
+ rcb->status = -1;
+ rcb->tag = INVALID_ELEM;
+ rcb->dvp = NULL;
+ rcb->cdbp = NULL;
+ rcb->softs = NULL;
+ rcb->cm_flags = 0;
+ rcb->cm_data = NULL;
+ rcb->bcount = 0;
+ rcb->nseg = 0;
+ rcb->sgt = NULL;
+ rcb->cm_ccb = NULL;
+ rcb->encrypt_enable = false;
+ rcb->ioaccel_handle = 0;
+ rcb->resp_qid = 0;
+ rcb->req_pending = false;
+}
+
+/*
+ * Callback function for the lun rescan
+ */
+static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
+{
+ xpt_free_path(ccb->ccb_h.path);
+ xpt_free_ccb(ccb);
+}
+
+
+/*
+ * Function to rescan the lun
+ */
+static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
+ int lun)
+{
+ union ccb *ccb = NULL;
+ cam_status status = 0;
+ struct cam_path *path = NULL;
+
+ DBG_FUNC("IN\n");
+
+ ccb = xpt_alloc_ccb_nowait();
+ status = xpt_create_path(&path, NULL,
+ cam_sim_path(softs->os_specific.sim), target, lun);
+ if (status != CAM_REQ_CMP) {
+ DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
+ status);
+ xpt_free_ccb(ccb);
+ return;
+ }
+
+ bzero(ccb, sizeof(union ccb));
+ xpt_setup_ccb(&ccb->ccb_h, path, 5);
+ ccb->ccb_h.func_code = XPT_SCAN_LUN;
+ ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
+ ccb->crcn.flags = CAM_FLAG_NONE;
+
+ xpt_action(ccb);
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function to rescan the lun under each target
+ */
+void smartpqi_target_rescan(struct pqisrc_softstate *softs)
+{
+ int target = 0, lun = 0;
+
+ DBG_FUNC("IN\n");
+
+ for(target = 0; target < PQI_MAX_DEVICES; target++){
+ for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
+ if(softs->device_list[target][lun]){
+ smartpqi_lun_rescan(softs, target, lun);
+ }
+ }
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Set the mode of tagged command queueing for the current task.
+ */
+uint8_t os_get_task_attr(rcb_t *rcb)
+{
+ union ccb *ccb = rcb->cm_ccb;
+ uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
+
+ switch(ccb->csio.tag_action) {
+ case MSG_HEAD_OF_Q_TAG:
+ tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
+ break;
+ case MSG_ORDERED_Q_TAG:
+ tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
+ break;
+ case MSG_SIMPLE_Q_TAG:
+ default:
+ tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
+ break;
+ }
+ return tag_action;
+}
+
+/*
+ * Complete all outstanding commands
+ */
+void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
+{
+ int tag = 0;
+
+ DBG_FUNC("IN\n");
+
+ for (tag = 1; tag < softs->max_outstanding_io; tag++) {
+ rcb_t *prcb = &softs->rcb[tag];
+ if(prcb->req_pending && prcb->cm_ccb ) {
+ prcb->req_pending = false;
+ prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
+ xpt_done((union ccb *)prcb->cm_ccb);
+ prcb->cm_ccb = NULL;
+ }
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * IO handling functionality entry point
+ */
+static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
+{
+ rcb_t *rcb;
+ uint32_t tag, no_transfer = 0;
+ pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
+ cam_sim_softc(sim);
+ int32_t error = PQI_STATUS_FAILURE;
+ pqi_scsi_dev_t *dvp;
+
+ DBG_FUNC("IN\n");
+
+ if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) {
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id);
+ return PQI_STATUS_FAILURE;
+ }
+
+ dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
+ /* Check controller state */
+ if (IN_PQI_RESET(softs)) {
+ ccb->ccb_h.status = CAM_SCSI_BUS_RESET
+ | CAM_BUSY | CAM_REQ_INPROG;
+ DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
+ return error;
+ }
+ /* Check device state */
+ if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
+ DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
+ return error;
+ }
+ /* Check device reset */
+ if (DEV_RESET(dvp)) {
+ ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
+ DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
+ return error;
+ }
+
+ if (dvp->expose_device == false) {
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id);
+ return error;
+ }
+
+ tag = pqisrc_get_tag(&softs->taglist);
+ if( tag == INVALID_ELEM ) {
+ DBG_ERR("Get Tag failed\n");
+ xpt_freeze_simq(softs->os_specific.sim, 1);
+ softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
+ ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
+ return PQI_STATUS_FAILURE;
+ }
+
+ DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
+
+ rcb = &softs->rcb[tag];
+ os_reset_rcb( rcb );
+ rcb->tag = tag;
+ rcb->softs = softs;
+ rcb->cmdlen = ccb->csio.cdb_len;
+ ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
+
+ switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN:
+ rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
+ break;
+ case CAM_DIR_OUT:
+ rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
+ break;
+ case CAM_DIR_NONE:
+ no_transfer = 1;
+ break;
+ default:
+ DBG_ERR("Unknown Dir\n");
+ break;
+ }
+ rcb->cm_ccb = ccb;
+ rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
+
+ if (!no_transfer) {
+ rcb->cm_data = (void *)ccb->csio.data_ptr;
+ rcb->bcount = ccb->csio.dxfer_len;
+ } else {
+ rcb->cm_data = NULL;
+ rcb->bcount = 0;
+ }
+ /*
+ * Submit the request to the adapter.
+ *
+ * Note that this may fail if we're unable to map the request (and
+ * if we ever learn a transport layer other than simple, may fail
+ * if the adapter rejects the command).
+ */
+ if ((error = pqi_map_request(rcb)) != 0) {
+ rcb->req_pending = false;
+ xpt_freeze_simq(softs->os_specific.sim, 1);
+ ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
+ if (error == EINPROGRESS) {
+ DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id);
+ error = 0;
+ } else {
+ ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+ DBG_WARN("Requeue req error = %d target = %d\n", error,
+ ccb->ccb_h.target_id);
+ pqi_unmap_request(rcb);
+ }
+ }
+
+ DBG_FUNC("OUT error = %d\n", error);
+ return error;
+}
+
+/*
+ * Abort a task, task management functionality
+ */
+static int
+pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
+{
+ rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr;
+ uint32_t abort_tag = rcb->tag;
+ uint32_t tag = 0;
+ int rval = PQI_STATUS_SUCCESS;
+ uint16_t qid;
+
+ DBG_FUNC("IN\n");
+
+ qid = (uint16_t)rcb->resp_qid;
+
+ tag = pqisrc_get_tag(&softs->taglist);
+ rcb = &softs->rcb[tag];
+ rcb->tag = tag;
+ rcb->resp_qid = qid;
+
+ rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag,
+ SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
+
+ if (PQI_STATUS_SUCCESS == rval) {
+ rval = rcb->status;
+ if (REQUEST_SUCCESS == rval) {
+ ccb->ccb_h.status = CAM_REQ_ABORTED;
+ }
+ }
+ pqisrc_put_tag(&softs->taglist, abort_tag);
+ pqisrc_put_tag(&softs->taglist,rcb->tag);
+
+ DBG_FUNC("OUT rval = %d\n", rval);
+
+ return rval;
+}
+
+/*
+ * Abort a taskset, task management functionality
+ */
+static int
+pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
+{
+ rcb_t *rcb = NULL;
+ uint32_t tag = 0;
+ int rval = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN\n");
+
+ tag = pqisrc_get_tag(&softs->taglist);
+ rcb = &softs->rcb[tag];
+ rcb->tag = tag;
+
+ rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0,
+ SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
+
+ if (rval == PQI_STATUS_SUCCESS) {
+ rval = rcb->status;
+ }
+
+ pqisrc_put_tag(&softs->taglist,rcb->tag);
+
+ DBG_FUNC("OUT rval = %d\n", rval);
+
+ return rval;
+}
+
+/*
+ * Target reset task management functionality
+ */
+static int
+pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
+{
+ pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
+ rcb_t *rcb = NULL;
+ uint32_t tag = 0;
+ int rval = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN\n");
+
+ if (devp == NULL) {
+ DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id);
+ return (-1);
+ }
+
+ tag = pqisrc_get_tag(&softs->taglist);
+ rcb = &softs->rcb[tag];
+ rcb->tag = tag;
+
+ devp->reset_in_progress = true;
+ rval = pqisrc_send_tmf(softs, devp, rcb, 0,
+ SOP_TASK_MANAGEMENT_LUN_RESET);
+ if (PQI_STATUS_SUCCESS == rval) {
+ rval = rcb->status;
+ }
+ devp->reset_in_progress = false;
+ pqisrc_put_tag(&softs->taglist,rcb->tag);
+
+ DBG_FUNC("OUT rval = %d\n", rval);
+
+ return ((rval == REQUEST_SUCCESS) ?
+ PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE);
+}
+
+/*
+ * cam entry point of the smartpqi module.
+ */
+static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct pqisrc_softstate *softs = cam_sim_softc(sim);
+ struct ccb_hdr *ccb_h = &ccb->ccb_h;
+
+ DBG_FUNC("IN\n");
+
+ switch (ccb_h->func_code) {
+ case XPT_SCSI_IO:
+ {
+ if(!pqisrc_io_start(sim, ccb)) {
+ return;
+ }
+ break;
+ }
+ case XPT_CALC_GEOMETRY:
+ {
+ struct ccb_calc_geometry *ccg;
+ ccg = &ccb->ccg;
+ if (ccg->block_size == 0) {
+ ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ break;
+ }
+ cam_calc_geometry(ccg, /* extended */ 1);
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ case XPT_PATH_INQ:
+ {
+ update_sim_properties(sim, &ccb->cpi);
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ case XPT_GET_TRAN_SETTINGS:
+ get_transport_settings(softs, &ccb->cts);
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case XPT_ABORT:
+ if(pqisrc_scsi_abort_task(softs, ccb)) {
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ xpt_done(ccb);
+ DBG_ERR("Abort task failed on %d\n",
+ ccb->ccb_h.target_id);
+ return;
+ }
+ break;
+ case XPT_TERM_IO:
+ if (pqisrc_scsi_abort_task_set(softs, ccb)) {
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ DBG_ERR("Abort task set failed on %d\n",
+ ccb->ccb_h.target_id);
+ xpt_done(ccb);
+ return;
+ }
+ break;
+ case XPT_RESET_DEV:
+ if(pqisrc_target_reset(softs, ccb)) {
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ DBG_ERR("Target reset failed on %d\n",
+ ccb->ccb_h.target_id);
+ xpt_done(ccb);
+ return;
+ } else {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ }
+ break;
+ case XPT_RESET_BUS:
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case XPT_SET_TRAN_SETTINGS:
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ return;
+ default:
+ DBG_WARN("UNSUPPORTED FUNC CODE\n");
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ break;
+ }
+ xpt_done(ccb);
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function to poll the response, when interrupts are unavailable
+ * This also serves supporting crash dump.
+ */
+static void smartpqi_poll(struct cam_sim *sim)
+{
+ struct pqisrc_softstate *softs = cam_sim_softc(sim);
+ int i;
+
+ for (i = 1; i < softs->intr_count; i++ )
+ pqisrc_process_response_queue(softs, i);
+}
+
+/*
+ * Function to adjust the queue depth of a device
+ */
+void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
+{
+ struct ccb_relsim crs;
+
+ DBG_INFO("IN\n");
+
+ xpt_setup_ccb(&crs.ccb_h, path, 5);
+ crs.ccb_h.func_code = XPT_REL_SIMQ;
+ crs.ccb_h.flags = CAM_DEV_QFREEZE;
+ crs.release_flags = RELSIM_ADJUST_OPENINGS;
+ crs.openings = queue_depth;
+ xpt_action((union ccb *)&crs);
+ if(crs.ccb_h.status != CAM_REQ_CMP) {
+ printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
+ }
+
+ DBG_INFO("OUT\n");
+}
+
+/*
+ * Function to register async callback for setting queue depth
+ */
+static void
+smartpqi_async(void *callback_arg, u_int32_t code,
+ struct cam_path *path, void *arg)
+{
+ struct pqisrc_softstate *softs;
+ softs = (struct pqisrc_softstate*)callback_arg;
+
+ DBG_FUNC("IN\n");
+
+ switch (code) {
+ case AC_FOUND_DEVICE:
+ {
+ struct ccb_getdev *cgd;
+ cgd = (struct ccb_getdev *)arg;
+ if (cgd == NULL) {
+ break;
+ }
+ uint32_t t_id = cgd->ccb_h.target_id;
+
+ if (t_id <= (PQI_CTLR_INDEX - 1)) {
+ if (softs != NULL) {
+ pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
+ smartpqi_adjust_queue_depth(path,
+ dvp->queue_depth);
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function to register sim with CAM layer for smartpqi driver
+ */
+int register_sim(struct pqisrc_softstate *softs, int card_index)
+{
+ int error = 0;
+ int max_transactions;
+ union ccb *ccb = NULL;
+ cam_status status = 0;
+ struct ccb_setasync csa;
+ struct cam_sim *sim;
+
+ DBG_FUNC("IN\n");
+
+ max_transactions = softs->max_io_for_scsi_ml;
+ softs->os_specific.devq = cam_simq_alloc(max_transactions);
+ if (softs->os_specific.devq == NULL) {
+ DBG_ERR("cam_simq_alloc failed txns = %d\n",
+ max_transactions);
+ return PQI_STATUS_FAILURE;
+ }
+
+ sim = cam_sim_alloc(smartpqi_cam_action, \
+ smartpqi_poll, "smartpqi", softs, \
+ card_index, &softs->os_specific.cam_lock, \
+ 1, max_transactions, softs->os_specific.devq);
+ if (sim == NULL) {
+ DBG_ERR("cam_sim_alloc failed txns = %d\n",
+ max_transactions);
+ cam_simq_free(softs->os_specific.devq);
+ return PQI_STATUS_FAILURE;
+ }
+
+ softs->os_specific.sim = sim;
+ mtx_lock(&softs->os_specific.cam_lock);
+ status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
+ if (status != CAM_SUCCESS) {
+ DBG_ERR("xpt_bus_register failed status=%d\n", status);
+ cam_sim_free(softs->os_specific.sim, FALSE);
+ cam_simq_free(softs->os_specific.devq);
+ mtx_unlock(&softs->os_specific.cam_lock);
+ return PQI_STATUS_FAILURE;
+ }
+
+ softs->os_specific.sim_registered = TRUE;
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ DBG_ERR("xpt_create_path failed\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ if (xpt_create_path(&ccb->ccb_h.path, NULL,
+ cam_sim_path(softs->os_specific.sim),
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ DBG_ERR("xpt_create_path failed\n");
+ xpt_free_ccb(ccb);
+ xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
+ cam_sim_free(softs->os_specific.sim, TRUE);
+ mtx_unlock(&softs->os_specific.cam_lock);
+ return PQI_STATUS_FAILURE;
+ }
+ /*
+ * Callback to set the queue depth per target which is
+ * derived from the FW.
+ */
+ softs->os_specific.path = ccb->ccb_h.path;
+ xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = AC_FOUND_DEVICE;
+ csa.callback = smartpqi_async;
+ csa.callback_arg = softs;
+ xpt_action((union ccb *)&csa);
+ if (csa.ccb_h.status != CAM_REQ_CMP) {
+ DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
+ csa.ccb_h.status);
+ }
+
+ mtx_unlock(&softs->os_specific.cam_lock);
+ DBG_INFO("OUT\n");
+ return error;
+}
+
+/*
+ * Function to deregister smartpqi sim from cam layer
+ */
+void deregister_sim(struct pqisrc_softstate *softs)
+{
+ struct ccb_setasync csa;
+
+ DBG_FUNC("IN\n");
+
+ if (softs->os_specific.mtx_init) {
+ mtx_lock(&softs->os_specific.cam_lock);
+ }
+
+
+ xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
+ csa.ccb_h.func_code = XPT_SASYNC_CB;
+ csa.event_enable = 0;
+ csa.callback = smartpqi_async;
+ csa.callback_arg = softs;
+ xpt_action((union ccb *)&csa);
+ xpt_free_path(softs->os_specific.path);
+
+ xpt_release_simq(softs->os_specific.sim, 0);
+
+ xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
+ softs->os_specific.sim_registered = FALSE;
+
+ if (softs->os_specific.sim) {
+ cam_sim_free(softs->os_specific.sim, FALSE);
+ softs->os_specific.sim = NULL;
+ }
+ if (softs->os_specific.mtx_init) {
+ mtx_unlock(&softs->os_specific.cam_lock);
+ }
+ if (softs->os_specific.devq != NULL) {
+ cam_simq_free(softs->os_specific.devq);
+ }
+ if (softs->os_specific.mtx_init) {
+ mtx_destroy(&softs->os_specific.cam_lock);
+ softs->os_specific.mtx_init = FALSE;
+ }
+
+ mtx_destroy(&softs->os_specific.map_lock);
+
+ DBG_FUNC("OUT\n");
+}
+
+static void smartpqi_cam_action(struct cam_sim *, union ccb *);
+static void smartpqi_poll(struct cam_sim *);
+
diff --git a/sys/dev/smartpqi/smartpqi_cmd.c b/sys/dev/smartpqi/smartpqi_cmd.c
new file mode 100644
index 000000000000..d1aee5a6f2c9
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_cmd.c
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+/*
+ * Function to submit the request to the adapter.
+ */
+
+int pqisrc_submit_cmnd(pqisrc_softstate_t *softs,
+ ib_queue_t *ib_q, void *req)
+{
+ char *slot = NULL;
+ uint32_t offset;
+ iu_header_t *hdr = (iu_header_t *)req;
+ uint32_t iu_len = hdr->iu_length + 4 ; /* header size */
+ int i = 0;
+ DBG_FUNC("IN\n");
+
+ PQI_LOCK(&ib_q->lock);
+
+ /* Check queue full */
+ if ((ib_q->pi_local + 1) % ib_q->num_elem == *(ib_q->ci_virt_addr)) {
+ DBG_WARN("OUT Q full\n");
+ PQI_UNLOCK(&ib_q->lock);
+ return PQI_STATUS_QFULL;
+ }
+
+ /* Get the slot */
+ offset = ib_q->pi_local * ib_q->elem_size;
+ slot = ib_q->array_virt_addr + offset;
+
+ /* Copy the IU */
+ memcpy(slot, req, iu_len);
+ DBG_INFO("IU : \n");
+ for(i = 0; i< iu_len; i++)
+ DBG_INFO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i)));
+
+ /* Update the local PI */
+ ib_q->pi_local = (ib_q->pi_local + 1) % ib_q->num_elem;
+ DBG_INFO("ib_q->pi_local : %x IU size : %d\n",
+ ib_q->pi_local, hdr->iu_length);
+ DBG_INFO("*ib_q->ci_virt_addr: %x\n",
+ *(ib_q->ci_virt_addr));
+
+ /* Inform the fw about the new IU */
+ PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
+ PQI_UNLOCK(&ib_q->lock);
+ DBG_FUNC("OUT\n");
+ return PQI_STATUS_SUCCESS;
+}
diff --git a/sys/dev/smartpqi/smartpqi_defines.h b/sys/dev/smartpqi/smartpqi_defines.h
new file mode 100644
index 000000000000..38c2bfb4b94d
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_defines.h
@@ -0,0 +1,1004 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _PQI_DEFINES_H
+#define _PQI_DEFINES_H
+
+#define PQI_STATUS_FAILURE -1
+#define PQI_STATUS_TIMEOUT -2
+#define PQI_STATUS_QFULL -3
+#define PQI_STATUS_SUCCESS 0
+
+#define PQISRC_CMD_TIMEOUT_CNT 1200000 /* 500usec * 1200000 = 5 min */
+
+/* #define SHARE_EVENT_QUEUE_FOR_IO 1 */
+
+#define INVALID_ELEM 0xffff
+#ifndef MIN
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
+#ifndef MAX
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#endif
+
+#define PQISRC_ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y))
+#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+
+#define ALIGN_BOUNDARY(a, n) { \
+ if (a % n) \
+ a = a + (n - a % n); \
+ }
+
+/* Busy wait timeout on a condition */
+#define COND_BUSYWAIT(cond, timeout /* in millisecond */) { \
+ if (!(cond)) { \
+ while (timeout) { \
+ OS_BUSYWAIT(1000); \
+ if (cond) \
+ break; \
+ timeout--; \
+ } \
+ } \
+ }
+
+/* Wait timeout on a condition*/
+#define COND_WAIT(cond, timeout /* in millisecond */) { \
+ if (!(cond)) { \
+ while (timeout) { \
+ OS_SLEEP(1000); \
+ if (cond) \
+ break; \
+ timeout--; \
+ } \
+ } \
+ }
+
+#define FILL_QUEUE_ARRAY_ADDR(q,virt,dma) { \
+ q->array_virt_addr = virt; \
+ q->array_dma_addr = dma; \
+ }
+
+#define true 1
+#define false 0
+
+enum INTR_TYPE {
+ LOCK_INTR,
+ LOCK_SLEEP
+};
+
+#define LOCKNAME_SIZE 32
+
+#define INTR_TYPE_FIXED 0x1
+#define INTR_TYPE_MSI 0x2
+#define INTR_TYPE_MSIX 0x4
+#define SIS_ENABLE_MSIX 0x40
+
+#define DMA_TO_VIRT(mem) ((mem)->virt_addr)
+#define DMA_PHYS_LOW(mem) (((mem)->dma_addr) & 0x00000000ffffffff)
+#define DMA_PHYS_HIGH(mem) ((((mem)->dma_addr) & 0xffffffff00000000) >> 32)
+
+
+typedef enum REQUEST_STATUS {
+ REQUEST_SUCCESS = 0,
+ REQUEST_PENDING = -1,
+ REQUEST_FAILED = -2,
+}REQUEST_STATUS_T;
+
+typedef enum IO_PATH {
+ AIO_PATH,
+ RAID_PATH
+}IO_PATH_T;
+
+typedef enum device_type
+{
+ DISK_DEVICE,
+ TAPE_DEVICE,
+ ROM_DEVICE = 5,
+ MEDIUM_CHANGER_DEVICE = 8,
+ RAID_DEVICE = 0x0c,
+ ENCLOSURE_DEVICE,
+ ZBC_DEVICE = 0x14
+} device_type_t;
+
+typedef enum controller_state {
+ PQI_UP_RUNNING,
+ PQI_BUS_RESET,
+}controller_state_t;
+
+
+#define PQISRC_MAX_MSIX_SUPPORTED 64
+
+/* SIS Specific */
+#define PQISRC_INIT_STRUCT_REVISION 9
+#define PQISRC_SECTOR_SIZE 512
+#define PQISRC_BLK_SIZE PQISRC_SECTOR_SIZE
+#define PQISRC_DEFAULT_DMA_ALIGN 4
+#define PQISRC_DMA_ALIGN_MASK (PQISRC_DEFAULT_DMA_ALIGN - 1)
+#define PQISRC_ERR_BUF_DMA_ALIGN 32
+#define PQISRC_ERR_BUF_ELEM_SIZE MAX(sizeof(raid_path_error_info_elem_t),sizeof(aio_path_error_info_elem_t))
+#define PQISRC_INIT_STRUCT_DMA_ALIGN 16
+
+#define SIS_CMD_GET_ADAPTER_PROPERTIES 0x19
+#define SIS_CMD_GET_COMM_PREFERRED_SETTINGS 0x26
+#define SIS_CMD_GET_PQI_CAPABILITIES 0x3000
+#define SIS_CMD_INIT_BASE_STRUCT_ADDRESS 0x1b
+
+#define SIS_SUPPORT_EXT_OPT 0x00800000
+#define SIS_SUPPORT_PQI 0x00000004
+#define SIS_SUPPORT_PQI_RESET_QUIESCE 0x00000008
+
+#define SIS_PQI_RESET_QUIESCE 0x1000000
+
+#define SIS_STATUS_OK_TIMEOUT 120000 /* in milli sec, 5 sec */
+
+#define SIS_CMD_COMPLETE_TIMEOUT 30000 /* in milli sec, 30 secs */
+#define SIS_POLL_START_WAIT_TIME 20000 /* in micro sec, 20 milli sec */
+#define SIS_DB_BIT_CLEAR_TIMEOUT_CNT 120000 /* 500usec * 120000 = 60 sec */
+
+#define SIS_ENABLE_TIMEOUT 3000
+#define REENABLE_SIS 0x1
+#define TRIGGER_NMI_SIS 0x800000
+/*SIS Register status defines */
+
+#define PQI_CTRL_KERNEL_UP_AND_RUNNING 0x80
+#define PQI_CTRL_KERNEL_PANIC 0x100
+
+#define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF
+#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000
+#define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */
+#define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */
+#define SIS_CMD_STATUS_SUCCESS 0x1
+
+/* PQI specific */
+
+/* defines */
+#define PQISRC_PQI_REG_OFFSET 0x4000
+#define PQISRC_MAX_OUTSTANDING_REQ 4096
+#define PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM 16
+#define PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM 16
+
+
+
+#define PQI_MIN_OP_IB_QUEUE_ID 1
+#define PQI_OP_EVENT_QUEUE_ID 1
+#define PQI_MIN_OP_OB_QUEUE_ID 2
+
+#define PQISRC_MAX_SUPPORTED_OP_IB_Q 128
+#define PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q (PQISRC_MAX_SUPPORTED_OP_IB_Q / 2)
+#define PQISRC_MAX_SUPPORTED_OP_AIO_IB_Q (PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q)
+#define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q)
+#define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ
+#define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2
+#define PQISRC_MAX_SUPPORTED_OP_OB_Q 64
+#define PQISRC_OP_MAX_IBQ_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */
+#define PQISRC_OP_MIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
+#define PQISRC_OP_OBQ_ELEM_SIZE 1 /* 16 bytes */
+#define PQISRC_ADMIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
+#define PQISRC_INTR_COALSC_GRAN 0
+#define PQISRC_PROTO_BIT_MASK 0
+#define PQISRC_SGL_SUPPORTED_BIT_MASK 0
+
+#define PQISRC_NUM_EVENT_Q_ELEM 32
+#define PQISRC_EVENT_Q_ELEM_SIZE 32
+
+/* PQI Registers state status */
+
+#define PQI_RESET_ACTION_RESET 0x1
+#define PQI_RESET_ACTION_COMPLETED 0x2
+#define PQI_RESET_TYPE_NO_RESET 0x0
+#define PQI_RESET_TYPE_SOFT_RESET 0x1
+#define PQI_RESET_TYPE_FIRM_RESET 0x2
+#define PQI_RESET_TYPE_HARD_RESET 0x3
+
+#define PQI_RESET_POLL_INTERVAL 100000 /*100 msec*/
+
+enum pqisrc_ctrl_mode{
+ CTRL_SIS_MODE = 0,
+ CTRL_PQI_MODE
+};
+
+/* PQI device performing internal initialization (e.g., POST). */
+#define PQI_DEV_STATE_POWER_ON_AND_RESET 0x0
+/* Upon entry to this state PQI device initialization begins. */
+#define PQI_DEV_STATE_PQI_STATUS_AVAILABLE 0x1
+/* PQI device Standard registers are available to the driver. */
+#define PQI_DEV_STATE_ALL_REGISTERS_READY 0x2
+/* PQI device is initialized and ready to process any PCI transactions. */
+#define PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY 0x3
+/* The PQI Device Error register indicates the error. */
+#define PQI_DEV_STATE_ERROR 0x4
+
+#define PQI_DEV_STATE_AT_INIT ( PQI_DEV_STATE_PQI_STATUS_AVAILABLE | \
+ PQI_DEV_STATE_ALL_REGISTERS_READY | \
+ PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY )
+
+#define PQISRC_PQI_DEVICE_SIGNATURE "PQI DREG"
+#define PQI_ADMINQ_ELEM_ARRAY_ALIGN 64
+#define PQI_ADMINQ_CI_PI_ALIGN 64
+#define PQI_OPQ_ELEM_ARRAY_ALIGN 64
+#define PQI_OPQ_CI_PI_ALIGN 4
+#define PQI_ADDR_ALIGN_MASK_64 0x3F /* lsb 6 bits */
+#define PQI_ADDR_ALIGN_MASK_4 0x3 /* lsb 2 bits */
+
+#define PQISRC_PQIMODE_READY_TIMEOUT (30 * 1000 ) /* 30 secs */
+#define PQISRC_MODE_READY_POLL_INTERVAL 1000 /* 1 msec */
+
+#define PRINT_PQI_SIGNATURE(sign) { int i = 0; \
+ char si[9]; \
+ for(i=0;i<8;i++) \
+ si[i] = *((char *)&(sign)+i); \
+ si[i] = '\0'; \
+ DBG_INFO("Signature is %s",si); \
+ }
+#define PQI_CONF_TABLE_MAX_LEN ((uint16_t)~0)
+#define PQI_CONF_TABLE_SIGNATURE "CFGTABLE"
+
+/* PQI configuration table section IDs */
+#define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0
+#define PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES 1
+#define PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA 2
+#define PQI_CONF_TABLE_SECTION_DEBUG 3
+#define PQI_CONF_TABLE_SECTION_HEARTBEAT 4
+
+#define CTRLR_HEARTBEAT_CNT(softs) LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off))
+#define PQI_NEW_HEARTBEAT_MECHANISM(softs) 1
+
+ /* pqi-2r00a table 36 */
+#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000)
+#define PQI_ADMIN_QUEUE_MSIX_ENABLE (0 << 31)
+
+#define PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR 0x01
+#define PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR 0x02
+#define PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE 0x00
+#define PQISRC_ADMIN_QUEUE_CREATE_TIMEOUT 1000 /* in miLLI sec, 1 sec, 100 ms is standard */
+#define PQISRC_ADMIN_QUEUE_DELETE_TIMEOUT 100 /* 100 ms is standard */
+#define PQISRC_ADMIN_CMD_RESP_TIMEOUT 3000 /* 3 sec */
+#define PQISRC_RAIDPATH_CMD_TIMEOUT 30000 /* 30 sec */
+
+#define REPORT_PQI_DEV_CAP_DATA_BUF_SIZE sizeof(pqi_dev_cap_t)
+#define REPORT_MANUFACTURER_INFO_DATA_BUF_SIZE 0x80 /* Data buffer size specified in bytes 0-1 of data buffer. 128 bytes. */
+/* PQI IUs */
+/* Admin IU request length not including header. */
+#define PQI_STANDARD_IU_LENGTH 0x003C /* 60 bytes. */
+#define PQI_IU_TYPE_GENERAL_ADMIN_REQUEST 0x60
+#define PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE 0xe0
+
+/* PQI / Vendor specific IU */
+#define PQI_FUNCTION_REPORT_DEV_CAP 0x00
+#define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13
+#define PQI_IU_TYPE_RAID_PATH_IO_REQUEST 0x14
+#define PQI_IU_TYPE_AIO_PATH_IO_REQUEST 0x15
+#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
+#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
+#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
+#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
+#define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93
+#define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0
+
+#define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS 0xf0
+#define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS 0xf1
+#define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR 0xf2
+#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3
+#define PQI_RESPONSE_IU_AIO_PATH_IS_OFF 0xf4
+#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
+#define PQI_REQUEST_HEADER_LENGTH 4
+#define PQI_FUNCTION_CREATE_OPERATIONAL_IQ 0x10
+#define PQI_FUNCTION_CREATE_OPERATIONAL_OQ 0x11
+#define PQI_FUNCTION_DELETE_OPERATIONAL_IQ 0x12
+#define PQI_FUNCTION_DELETE_OPERATIONAL_OQ 0x13
+#define PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP 0x14
+#define PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO 1
+
+#define PQI_DEFAULT_IB_QUEUE 0
+/* Interface macros */
+
+#define GET_FW_STATUS(softs) \
+ (PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad3_fw_status, LEGACY_SIS_OMR))
+
+#define SIS_IS_KERNEL_PANIC(softs) \
+ (GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_PANIC)
+
+#define SIS_IS_KERNEL_UP(softs) \
+ (GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_UP_AND_RUNNING)
+
+#define PQI_GET_CTRL_MODE(softs) \
+ (PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0))
+
+#define PQI_SAVE_CTRL_MODE(softs, mode) \
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode)
+
+#define PQISRC_MAX_TARGETID 1024
+#define PQISRC_MAX_TARGETLUN 64
+
+/* Vendor specific IU Type for Event config Cmds */
+#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72
+#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73
+#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
+#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
+#define PQI_MANAGEMENT_CMD_RESP_TIMEOUT 3000
+#define PQISRC_EVENT_ACK_RESP_TIMEOUT 1000
+
+
+/* Supported Event types by controller */
+#define PQI_NUM_SUPPORTED_EVENTS 7
+
+#define PQI_EVENT_TYPE_HOTPLUG 0x1
+#define PQI_EVENT_TYPE_HARDWARE 0x2
+#define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4
+#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
+#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
+#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
+#define PQI_EVENT_TYPE_HEARTBEAT 0xff
+
+/* for indexing into the pending_events[] field of struct pqisrc_softstate */
+#define PQI_EVENT_HEARTBEAT 0
+#define PQI_EVENT_HOTPLUG 1
+#define PQI_EVENT_HARDWARE 2
+#define PQI_EVENT_PHYSICAL_DEVICE 3
+#define PQI_EVENT_LOGICAL_DEVICE 4
+#define PQI_EVENT_AIO_STATE_CHANGE 5
+#define PQI_EVENT_AIO_CONFIG_CHANGE 6
+
+#define PQI_MAX_HEARTBEAT_REQUESTS 5
+
+
+/* Device flags */
+#define PQISRC_DFLAG_VALID (1 << 0)
+#define PQISRC_DFLAG_CONFIGURING (1 << 1)
+
+#define MAX_EMBEDDED_SG_IN_FIRST_IU 4
+#define MAX_EMBEDDED_SG_IN_IU 8
+#define SG_FLAG_LAST 0x40000000
+#define SG_FLAG_CHAIN 0x80000000
+
+#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET)
+#define DEV_GONE(dev) (!dev || (dev->invalid == true))
+#define IS_AIO_PATH(dev) (dev->aio_enabled)
+#define IS_RAID_PATH(dev) (!dev->aio_enabled)
+
+#define DEV_RESET(dvp) (dvp->reset_in_progress)
+
+/* SOP data direction flags */
+#define SOP_DATA_DIR_NONE 0x00
+#define SOP_DATA_DIR_FROM_DEVICE 0x01
+#define SOP_DATA_DIR_TO_DEVICE 0x02
+#define SOP_DATA_DIR_BIDIRECTIONAL 0x03
+#define SOP_PARTIAL_DATA_BUFFER 0x04
+
+#define PQISRC_DMA_VALID (1 << 0)
+#define PQISRC_CMD_NO_INTR (1 << 1)
+
+#define SOP_TASK_ATTRIBUTE_SIMPLE 0
+#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1
+#define SOP_TASK_ATTRIBUTE_ORDERED 2
+#define SOP_TASK_ATTRIBUTE_ACA 4
+
+#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0
+#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4
+#define SOP_TASK_MANAGEMENT_FUNCTION_FAILED 0x5
+#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8
+#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK 0x01
+#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET 0x02
+#define SOP_TASK_MANAGEMENT_LUN_RESET 0x8
+
+
+/* Additional CDB bytes */
+#define PQI_ADDITIONAL_CDB_BYTES_0 0 /* 16 byte CDB */
+#define PQI_ADDITIONAL_CDB_BYTES_4 1 /* 20 byte CDB */
+#define PQI_ADDITIONAL_CDB_BYTES_8 2 /* 24 byte CDB */
+#define PQI_ADDITIONAL_CDB_BYTES_12 3 /* 28 byte CDB */
+#define PQI_ADDITIONAL_CDB_BYTES_16 4 /* 32 byte CDB */
+
+#define PQI_PROTOCOL_SOP 0x0
+
+#define PQI_AIO_STATUS_GOOD 0x0
+#define PQI_AIO_STATUS_CHECK_CONDITION 0x2
+#define PQI_AIO_STATUS_CONDITION_MET 0x4
+#define PQI_AIO_STATUS_DEVICE_BUSY 0x8
+#define PQI_AIO_STATUS_INT_GOOD 0x10
+#define PQI_AIO_STATUS_INT_COND_MET 0x14
+#define PQI_AIO_STATUS_RESERV_CONFLICT 0x18
+#define PQI_AIO_STATUS_CMD_TERMINATED 0x22
+#define PQI_AIO_STATUS_QUEUE_FULL 0x28
+#define PQI_AIO_STATUS_TASK_ABORTED 0x40
+#define PQI_AIO_STATUS_UNDERRUN 0x51
+#define PQI_AIO_STATUS_OVERRUN 0x75
+/* Status when Target Failure */
+#define PQI_AIO_STATUS_IO_ERROR 0x1
+#define PQI_AIO_STATUS_IO_ABORTED 0x2
+#define PQI_AIO_STATUS_IO_NO_DEVICE 0x3
+#define PQI_AIO_STATUS_INVALID_DEVICE 0x4
+#define PQI_AIO_STATUS_AIO_PATH_DISABLED 0xe
+
+/* Service Response */
+#define PQI_AIO_SERV_RESPONSE_COMPLETE 0
+#define PQI_AIO_SERV_RESPONSE_FAILURE 1
+#define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE 2
+#define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED 3
+#define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4
+#define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
+
+#define PQI_TMF_WAIT_DELAY 10000000 /* 10 seconds */
+
+#define PQI_RAID_STATUS_GOOD PQI_AIO_STATUS_GOOD
+#define PQI_RAID_STATUS_CHECK_CONDITION PQI_AIO_STATUS_CHECK_CONDITION
+#define PQI_RAID_STATUS_CONDITION_MET PQI_AIO_STATUS_CONDITION_MET
+#define PQI_RAID_STATUS_DEVICE_BUSY PQI_AIO_STATUS_DEVICE_BUSY
+#define PQI_RAID_STATUS_INT_GOOD PQI_AIO_STATUS_INT_GOOD
+#define PQI_RAID_STATUS_INT_COND_MET PQI_AIO_STATUS_INT_COND_MET
+#define PQI_RAID_STATUS_RESERV_CONFLICT PQI_AIO_STATUS_RESERV_CONFLICT
+#define PQI_RAID_STATUS_CMD_TERMINATED PQI_AIO_STATUS_CMD_TERMINATED
+#define PQI_RAID_STATUS_QUEUE_FULL PQI_AIO_STATUS_QUEUE_FULL
+#define PQI_RAID_STATUS_TASK_ABORTED PQI_AIO_STATUS_TASK_ABORTED
+#define PQI_RAID_STATUS_UNDERRUN PQI_AIO_STATUS_UNDERRUN
+#define PQI_RAID_STATUS_OVERRUN PQI_AIO_STATUS_OVERRUN
+
+/* VPD inquiry pages */
+#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
+#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
+#define SA_VPD_PHYS_DEVICE_ID 0xc0 /* vendor-specific page */
+#define SA_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
+#define SA_VPD_LV_IOACCEL_STATUS 0xc2 /* vendor-specific page */
+#define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */
+
+#define VPD_PAGE (1 << 8)
+
+
+/* logical volume states */
+#define SA_LV_OK 0x0
+#define SA_LV_NOT_AVAILABLE 0xb
+#define SA_LV_UNDERGOING_ERASE 0xf
+#define SA_LV_UNDERGOING_RPI 0x12
+#define SA_LV_PENDING_RPI 0x13
+#define SA_LV_ENCRYPTED_NO_KEY 0x14
+#define SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15
+#define SA_LV_UNDERGOING_ENCRYPTION 0x16
+#define SA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17
+#define SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18
+#define SA_LV_PENDING_ENCRYPTION 0x19
+#define SA_LV_PENDING_ENCRYPTION_REKEYING 0x1a
+#define SA_LV_STATUS_VPD_UNSUPPORTED 0xff
+
+/*
+ * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
+ */
+#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
+
+/* 0 = no limit */
+#define PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 0
+
+
+
+#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+
+#define SA_CACHE_FLUSH 0x1
+#define SA_INQUIRY 0x12
+#define SA_REPORT_LOG 0xc2 /* Report Logical LUNs */
+#define SA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
+#define SA_CISS_READ 0xc0
+#define SA_GET_RAID_MAP 0xc8
+
+#define SA_REPORT_LOG_EXTENDED 0x1
+#define SA_REPORT_PHYS_EXTENDED 0x2
+
+#define SA_CACHE_FLUSH_BUF_LEN 4
+
+#define REPORT_LUN_DEV_FLAG_AIO_ENABLED 0x8
+#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#define RAID_MAP_MAX_ENTRIES 1024
+#define RAID_MAP_ENCRYPTION_ENABLED 0x1
+#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
+
+#define ASC_LUN_NOT_READY 0x4
+#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x4
+#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x2
+
+
+#define OBDR_SIG_OFFSET 43
+#define OBDR_TAPE_SIG "$DR-10"
+#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
+#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
+
+
+#define IOACCEL_STATUS_BYTE 4
+#define OFFLOAD_CONFIGURED_BIT 0x1
+#define OFFLOAD_ENABLED_BIT 0x2
+
+#define PQI_RAID_DATA_IN_OUT_GOOD 0x0
+#define PQI_RAID_DATA_IN_OUT_UNDERFLOW 0x1
+#define PQI_RAID_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3
+#define PQI_RAID_DATA_IN_OUT_ABORTED 0xf4
+
+#define PQI_PHYSICAL_DEVICE_BUS 0
+#define PQI_RAID_VOLUME_BUS 1
+#define PQI_HBA_BUS 2
+#define PQI_EXTERNAL_RAID_VOLUME_BUS 3
+#define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS
+
+#define TEST_UNIT_READY 0x00
+#define SCSI_VPD_HEADER_LENGTH 64
+
+
+#define PQI_MAX_MULTILUN 256
+#define PQI_MAX_LOGICALS 64
+#define PQI_MAX_PHYSICALS 1024
+#define PQI_MAX_DEVICES (PQI_MAX_LOGICALS + PQI_MAX_PHYSICALS + 1) /* 1 for controller device entry */
+
+
+#define PQI_CTLR_INDEX (PQI_MAX_DEVICES - 1)
+#define PQI_PD_INDEX(t) (t + PQI_MAX_LOGICALS)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define MAX_TARGET_DEVICES 1024
+
+#define PQI_NO_MEM 2
+
+typedef enum pqisrc_device_status {
+ DEVICE_NOT_FOUND,
+ DEVICE_CHANGED,
+ DEVICE_UNCHANGED,
+} device_status_t;
+
+#define SA_RAID_0 0
+#define SA_RAID_4 1
+#define SA_RAID_1 2 /* also used for RAID 10 */
+#define SA_RAID_5 3 /* also used for RAID 50 */
+#define SA_RAID_51 4
+#define SA_RAID_6 5 /* also used for RAID 60 */
+#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
+#define SA_RAID_MAX SA_RAID_ADM
+#define SA_RAID_UNKNOWN 0xff
+
+/* BMIC commands */
+#define BMIC_IDENTIFY_CONTROLLER 0x11
+#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
+#define BMIC_READ 0x26
+#define BMIC_WRITE 0x27
+#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
+#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
+#define BMIC_CACHE_FLUSH 0xc2
+#define BMIC_FLASH_FIRMWARE 0xf7
+#define BMIC_WRITE_HOST_WELLNESS 0xa5
+
+
+#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xC0)
+#define BMIC_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3F)
+#define BMIC_GET_LEVEL_TWO_TARGET(lunid) ((lunid)[6])
+#define BMIC_GET_DRIVE_NUMBER(lunid) \
+ (((BMIC_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \
+ BMIC_GET_LEVEL_TWO_TARGET((lunid)))
+#define NON_DISK_PHYS_DEV(rle) \
+ (((reportlun_ext_entry_t *)(rle))->device_flags & 0x1)
+
+#define NO_TIMEOUT ((unsigned long) -1)
+
+#define BMIC_DEVICE_TYPE_SATA 0x1
+
+/* No of IO slots required for internal requests */
+#define PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS 3
+#define PQI_RESERVED_IO_SLOTS_TMF 1
+#define PQI_RESERVED_IO_SLOTS_CNT (PQI_NUM_SUPPORTED_EVENTS + \
+ PQI_RESERVED_IO_SLOTS_TMF + \
+ PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS)
+
+static inline uint16_t GET_LE16(const uint8_t *p)
+{
+ return p[0] | p[1] << 8;
+}
+
+static inline uint32_t GET_LE32(const uint8_t *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
+}
+
+static inline uint64_t GET_LE64(const uint8_t *p)
+{
+ return (((uint64_t)GET_LE32(p + 4) << 32) |
+ GET_LE32(p));
+}
+
+static inline uint16_t GET_BE16(const uint8_t *p)
+{
+ return p[0] << 8 | p[1];
+}
+
+static inline uint32_t GET_BE32(const uint8_t *p)
+{
+ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+}
+
+static inline uint64_t GET_BE64(const uint8_t *p)
+{
+ return (((uint64_t)GET_BE32(p) << 32) |
+ GET_BE32(p + 4));
+}
+
+static inline void PUT_BE16(uint16_t val, uint8_t *p)
+{
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void PUT_BE32(uint32_t val, uint8_t *p)
+{
+ PUT_BE16(val >> 16, p);
+ PUT_BE16(val, p + 2);
+}
+
+static inline void PUT_BE64(uint64_t val, uint8_t *p)
+{
+ PUT_BE32(val >> 32, p);
+ PUT_BE32(val, p + 4);
+}
+
+
+#define OS_FREEBSD
+#define SIS_POLL_WAIT
+
+#define OS_ATTRIBUTE_PACKED __attribute__((__packed__))
+#define OS_ATTRIBUTE_ALIGNED(n) __attribute__((aligned(n)))
+
+
+/* Management Interface */
+#define CCISS_IOC_MAGIC 'C'
+#define SMARTPQI_IOCTL_BASE 'M'
+#define CCISS_GETDRIVVER _IOWR(SMARTPQI_IOCTL_BASE, 0, driver_info)
+#define CCISS_GETPCIINFO _IOWR(SMARTPQI_IOCTL_BASE, 1, pqi_pci_info_t)
+#define SMARTPQI_PASS_THRU _IOWR(SMARTPQI_IOCTL_BASE, 2, IOCTL_Command_struct)
+#define CCISS_PASSTHRU _IOWR('C', 210, IOCTL_Command_struct)
+#define CCISS_REGNEWD _IO(CCISS_IOC_MAGIC, 14)
+
+/*IOCTL pci_info structure */
+typedef struct pqi_pci_info
+{
+ unsigned char bus;
+ unsigned char dev_fn;
+ unsigned short domain;
+ uint32_t board_id;
+ uint32_t chip_id;
+}pqi_pci_info_t;
+
+typedef struct _driver_info
+{
+ unsigned char major_version;
+ unsigned char minor_version;
+ unsigned char release_version;
+ unsigned long build_revision;
+ unsigned long max_targets;
+ unsigned long max_io;
+ unsigned long max_transfer_length;
+}driver_info, *pdriver_info;
+
+typedef uint8_t *passthru_buf_type_t;
+
+
+#define PQISRC_DRIVER_MAJOR 1
+#define PQISRC_DRIVER_MINOR 0
+#define PQISRC_DRIVER_RELEASE 1
+# define PQISRC_DRIVER_REVISION 239
+
+#define STR(s) # s
+#define PQISRC_VERSION(a, b, c, d) STR(a.b.c-d)
+#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_DRIVER_MAJOR, \
+ PQISRC_DRIVER_MINOR, \
+ PQISRC_DRIVER_RELEASE, \
+ PQISRC_DRIVER_REVISION)
+
+/* End Management interface */
+
+#ifdef ASSERT
+#undef ASSERT
+#endif
+
+#define ASSERT(cond) {\
+ if (!(cond)) { \
+ printf("Assertion failed at file %s line %d\n",__FILE__,__LINE__); \
+ } \
+ }
+
+
+#define PQI_MAX_MSIX 64 /* vectors */
+#define PQI_MSI_CTX_SIZE sizeof(pqi_intr_ctx)+1
+#define IS_POLLING_REQUIRED(softs) if (cold) {\
+ pqisrc_process_event_intr_src(softs, 0);\
+ pqisrc_process_response_queue(softs, 1);\
+ }
+
+#define OS_GET_TASK_ATTR(rcb) os_get_task_attr(rcb)
+#define OS_FW_HEARTBEAT_TIMER_INTERVAL (5)
+
+typedef struct PCI_ACC_HANDLE {
+ bus_space_tag_t pqi_btag;
+ bus_space_handle_t pqi_bhandle;
+} PCI_ACC_HANDLE_T;
+
+/*
+ * Legacy SIS Register definitions for the Adaptec PMC SRC/SRCv/smartraid adapters.
+ */
+/* accessible via BAR0 */
+#define LEGACY_SIS_IOAR 0x18 /* IOA->host interrupt register */
+#define LEGACY_SIS_IDBR 0x20 /* inbound doorbell register */
+#define LEGACY_SIS_IISR 0x24 /* inbound interrupt status register */
+#define LEGACY_SIS_OIMR 0x34 /* outbound interrupt mask register */
+#define LEGACY_SIS_ODBR_R 0x9c /* outbound doorbell register read */
+#define LEGACY_SIS_ODBR_C 0xa0 /* outbound doorbell register clear */
+
+#define LEGACY_SIS_SCR0 0xb0 /* scratchpad 0 */
+#define LEGACY_SIS_OMR 0xbc /* outbound message register */
+#define LEGACY_SIS_IQUE64_L 0xc0 /* inbound queue address 64-bit (low) */
+#define LEGACY_SIS_IQUE64_H 0xc4 /* inbound queue address 64-bit (high)*/
+#define LEGACY_SIS_ODBR_MSI 0xc8 /* MSI register for sync./AIF */
+#define LEGACY_SIS_IQN_L 0xd0 /* inbound queue native mode (low) */
+#define LEGACY_SIS_IQN_H 0xd4 /* inbound queue native mode (high)*/
+#define LEGACY_SIS_MAILBOX 0x7fc60 /* mailbox (20 bytes) */
+#define LEGACY_SIS_SRCV_MAILBOX 0x1000 /* mailbox (20 bytes) */
+
+#define LEGACY_SIS_ODR_SHIFT 12 /* outbound doorbell shift */
+#define LEGACY_SIS_IDR_SHIFT 9 /* inbound doorbell shift */
+
+
+/*
+ * PQI Register definitions for the smartraid adapters
+ */
+/* accessible via BAR0 */
+#define PQI_SIGNATURE 0x4000
+#define PQI_ADMINQ_CONFIG 0x4008
+#define PQI_ADMINQ_CAP 0x4010
+#define PQI_LEGACY_INTR_STATUS 0x4018
+#define PQI_LEGACY_INTR_MASK_SET 0x401C
+#define PQI_LEGACY_INTR_MASK_CLR 0x4020
+#define PQI_DEV_STATUS 0x4040
+#define PQI_ADMIN_IBQ_PI_OFFSET 0x4048
+#define PQI_ADMIN_OBQ_CI_OFFSET 0x4050
+#define PQI_ADMIN_IBQ_ELEM_ARRAY_ADDR 0x4058
+#define PQI_ADMIN_OBQ_ELEM_ARRAY_ADDR 0x4060
+#define PQI_ADMIN_IBQ_CI_ADDR 0x4068
+#define PQI_ADMIN_OBQ_PI_ADDR 0x4070
+#define PQI_ADMINQ_PARAM 0x4078
+#define PQI_DEV_ERR 0x4080
+#define PQI_DEV_ERR_DETAILS 0x4088
+#define PQI_DEV_RESET 0x4090
+#define PQI_POWER_ACTION 0x4094
+
+/* Busy wait micro seconds */
+#define OS_BUSYWAIT(x) DELAY(x)
+#define OS_SLEEP(timeout) \
+ DELAY(timeout);
+
+#define OS_HOST_WELLNESS_TIMEOUT (24 * 3600)
+
+
+#define LE_16(x) htole16(x)
+#define LE_32(x) htole32(x)
+#define LE_64(x) htole64(x)
+#define BE_16(x) htobe16(x)
+#define BE_32(x) htobe32(x)
+#define BE_64(x) htobe64(x)
+
+#define PQI_HWIF_SRCV 0
+#define PQI_HWIF_UNKNOWN -1
+
+
+#define SMART_STATE_SUSPEND (1<<0)
+#define SMART_STATE_UNUSED0 (1<<1)
+#define SMART_STATE_INTERRUPTS_ON (1<<2)
+#define SMART_STATE_AIF_SLEEPER (1<<3)
+#define SMART_STATE_RESET (1<<4)
+
+#define PQI_FLAG_BUSY (1<<0)
+#define PQI_MSI_ENABLED (1<<1)
+#define PQI_SIM_REGISTERED (1<<2)
+#define PQI_MTX_INIT (1<<3)
+
+
+#define PQI_CMD_MAPPED (1<<2)
+
+/* Interrupt context to get oq_id */
+typedef struct pqi_intr_ctx {
+ int oq_id;
+ device_t pqi_dev;
+}pqi_intr_ctx_t;
+
+typedef uint8_t os_dev_info_t;
+
+typedef struct OS_SPECIFIC {
+ device_t pqi_dev;
+ struct resource *pqi_regs_res0; /* reg. if. window */
+ int pqi_regs_rid0; /* resource ID */
+ bus_dma_tag_t pqi_parent_dmat; /* parent DMA tag */
+ bus_dma_tag_t pqi_buffer_dmat;
+
+ /* controller hardware interface */
+ int pqi_hwif;
+ struct resource *pqi_irq[PQI_MAX_MSIX]; /* interrupt */
+ int pqi_irq_rid[PQI_MAX_MSIX];
+ void *intrcookie[PQI_MAX_MSIX];
+ bool intr_registered[PQI_MAX_MSIX];
+ bool msi_enabled; /* MSI/MSI-X enabled */
+ pqi_intr_ctx_t *msi_ctx;
+ int oq_id;
+ int pqi_state;
+ uint32_t pqi_flags;
+ struct mtx cam_lock;
+ struct mtx map_lock;
+ int mtx_init;
+ int sim_registered;
+ struct cam_devq *devq;
+ struct cam_sim *sim;
+ struct cam_path *path;
+ struct task event_task;
+ struct cdev *cdev;
+ struct callout_handle wellness_periodic; /* periodic event handling */
+ struct callout_handle heartbeat_timeout_id; /* heart beat event handling */
+ eventhandler_tag eh;
+} OS_SPECIFIC_T;
+
+typedef bus_addr_t dma_addr_t;
+
+/* Atomic */
+typedef volatile uint64_t OS_ATOMIC64_T;
+#define OS_ATOMIC64_SET(_softs, target, val) atomic_set_long(&(_softs)->target, val)
+#define OS_ATOMIC64_READ(_softs, target) atomic_load_acq_64(&(_softs)->target)
+#define OS_ATOMIC64_INC(_softs, target) atomic_add_64(&(_softs)->target, 1)
+
+/* Register access macros */
+#define PCI_MEM_GET32( _softs, _absaddr, _offset ) \
+ bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset)
+
+#define PCI_MEM_GET64( _softs, _absaddr, _offset ) \
+ bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset)
+
+#define PCI_MEM_PUT32( _softs, _absaddr, _offset, _val ) \
+ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+
+#define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \
+ bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+
+#define PCI_MEM_GET_BUF(_softs, _absaddr, _offset, buf, size) \
+ bus_space_read_region_1(_softs->pci_mem_handle.pqi_btag,\
+ _softs->pci_mem_handle.pqi_bhandle, _offset, buf, size)
+
+/* Lock */
+typedef struct mtx OS_LOCK_T;
+typedef struct sema OS_SEMA_LOCK_T;
+
+#define OS_ACQUIRE_SPINLOCK(_lock) mtx_lock_spin(_lock)
+#define OS_RELEASE_SPINLOCK(_lock) mtx_unlock_spin(_lock)
+
+#define PQI_LOCK(_lock) OS_ACQUIRE_SPINLOCK(_lock)
+#define PQI_UNLOCK(_lock) OS_RELEASE_SPINLOCK(_lock)
+
+#define OS_INIT_PQILOCK(_softs,_lock,_lockname) os_init_spinlock(_softs,_lock,_lockname)
+#define OS_UNINIT_PQILOCK(_lock) os_uninit_spinlock(_lock)
+
+#define OS_GET_CDBP(rcb) ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes)
+#define GET_SCSI_BUFFLEN(rcb) (rcb->cm_ccb->csio.dxfer_len)
+
+#define OS_GET_IO_QINDEX(softs,rcb) curcpu % softs->num_op_obq
+#define OS_GET_IO_RESP_QID(softs,rcb) (softs->op_ob_q[(OS_GET_IO_QINDEX(softs,rcb))].q_id)
+#define OS_GET_IO_REQ_QINDEX(softs,rcb) OS_GET_IO_QINDEX(softs,rcb)
+#define OS_GET_TMF_RESP_QID OS_GET_IO_RESP_QID
+#define OS_GET_TMF_REQ_QINDEX OS_GET_IO_REQ_QINDEX
+/* sg elements addr, len, flags */
+#define OS_GET_IO_SG_COUNT(rcb) rcb->nseg
+#define OS_GET_IO_SG_ADDR(rcb,i) rcb->sgt[i].addr
+#define OS_GET_IO_SG_LEN(rcb,i) rcb->sgt[i].len
+
+/* scsi commands used in pqilib for RAID bypass*/
+#define SCMD_READ_6 READ_6
+#define SCMD_WRITE_6 WRITE_6
+#define SCMD_READ_10 READ_10
+#define SCMD_WRITE_10 WRITE_10
+#define SCMD_READ_12 READ_12
+#define SCMD_WRITE_12 WRITE_12
+#define SCMD_READ_16 READ_16
+#define SCMD_WRITE_16 WRITE_16
+
+/* Debug facility */
+
+#define PQISRC_LOG_LEVEL 0x30
+
+static int logging_level = PQISRC_LOG_LEVEL;
+
+#define PQISRC_FLAGS_MASK 0x0000ffff
+#define PQISRC_FLAGS_INIT 0x00000001
+#define PQISRC_FLAGS_INFO 0x00000002
+#define PQISRC_FLAGS_FUNC 0x00000004
+#define PQISRC_FLAGS_TRACEIO 0x00000008
+#define PQISRC_FLAGS_WARN 0x00000010
+#define PQISRC_FLAGS_ERROR 0x00000020
+
+
+#define DBG_INIT(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_INIT) { \
+ printf("[INIT]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ } \
+ }while(0);
+
+#define DBG_INFO(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_INFO) { \
+ printf("[INFO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ } \
+ }while(0);
+
+#define DBG_FUNC(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_FUNC) { \
+ printf("[FUNC]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ } \
+ }while(0);
+
+#define DBG_TRACEIO(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_TRACEIO) { \
+ printf("[TRACEIO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ } \
+ }while(0);
+
+#define DBG_WARN(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_WARN) { \
+ printf("[WARN]:[%u:%u.%u][CPU %d][%s][%d]:"fmt,softs->bus_id,softs->device_id,softs->func_id,curcpu,__func__,__LINE__,##args);\
+ } \
+ }while(0);
+
+#define DBG_ERR(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_ERROR) { \
+ printf("[ERROR]::[%u:%u.%u][CPU %d][%s][%d]:"fmt,softs->bus_id,softs->device_id,softs->func_id,curcpu,__func__,__LINE__,##args); \
+ } \
+ }while(0);
+#define DBG_IO(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_TRACEIO) { \
+ printf("[IO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ } \
+ }while(0);
+
+#define DBG_ERR_BTL(device,fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_ERROR) { \
+ printf("[ERROR]::[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args); \
+ } \
+ }while(0);
+
+#define DBG_WARN_BTL(device,fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_WARN) { \
+ printf("[WARN]:[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args);\
+ } \
+ }while(0);
+
+#endif // _PQI_DEFINES_H
diff --git a/sys/dev/smartpqi/smartpqi_discovery.c b/sys/dev/smartpqi/smartpqi_discovery.c
new file mode 100644
index 000000000000..f216bbcc9505
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_discovery.c
@@ -0,0 +1,1806 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+/* Validate the scsi sense response code */
+static inline boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
+{
+ DBG_FUNC("IN\n");
+
+ if (!sshdr)
+ return false;
+
+ DBG_FUNC("OUT\n");
+
+ return (sshdr->response_code & 0x70) == 0x70;
+}
+
+/* Update scsi sense info to a local buffer*/
+boolean_t pqisrc_update_scsi_sense(const uint8_t *buff, int len,
+ struct sense_header_scsi *header)
+{
+
+ DBG_FUNC("IN\n");
+
+ if (!buff || !len)
+ return false;
+
+ memset(header, 0, sizeof(struct sense_header_scsi));
+
+ header->response_code = (buff[0] & 0x7f);
+
+ if (!pqisrc_scsi_sense_valid(header))
+ return false;
+
+ if (header->response_code >= 0x72) {
+ /* descriptor format */
+ if (len > 1)
+ header->sense_key = (buff[1] & 0xf);
+ if (len > 2)
+ header->asc = buff[2];
+ if (len > 3)
+ header->ascq = buff[3];
+ if (len > 7)
+ header->additional_length = buff[7];
+ } else {
+ /* fixed format */
+ if (len > 2)
+ header->sense_key = (buff[2] & 0xf);
+ if (len > 7) {
+ len = (len < (buff[7] + 8)) ?
+ len : (buff[7] + 8);
+ if (len > 12)
+ header->asc = buff[12];
+ if (len > 13)
+ header->ascq = buff[13];
+ }
+ }
+
+ DBG_FUNC("OUT\n");
+
+ return true;
+}
+
+/*
+ * Function used to build the internal raid request and analyze the response
+ */
+int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
+ void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr,
+ raid_path_error_info_elem_t *error_info)
+{
+
+ uint8_t *cdb;
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t tag = 0;
+ struct dma_mem device_mem;
+ sgt_t *sgd;
+
+ ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
+ ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+
+ rcb_t *rcb = NULL;
+
+ DBG_FUNC("IN\n");
+
+ memset(&device_mem, 0, sizeof(struct dma_mem));
+
+ /* for TUR datasize: 0 buff: NULL */
+ if (datasize) {
+ device_mem.tag = "device_mem";
+ device_mem.size = datasize;
+ device_mem.align = PQISRC_DEFAULT_DMA_ALIGN;
+
+ ret = os_dma_mem_alloc(softs, &device_mem);
+
+ if (ret) {
+ DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
+ return ret;
+ }
+
+ sgd = (sgt_t *)&request->sg_descriptors[0];
+
+ sgd->addr = device_mem.dma_addr;
+ sgd->len = datasize;
+ sgd->flags = SG_FLAG_LAST;
+
+ }
+
+ /* Build raid path request */
+ request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
+
+ request->header.iu_length = LE_16(offsetof(pqisrc_raid_req_t,
+ sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH);
+ request->buffer_length = LE_32(datasize);
+ memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
+ request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
+
+ cdb = request->cdb;
+
+ switch (cmd) {
+ case SA_INQUIRY:
+ request->data_direction = SOP_DATA_DIR_TO_DEVICE;
+ cdb[0] = SA_INQUIRY;
+ if (vpd_page & VPD_PAGE) {
+ cdb[1] = 0x1;
+ cdb[2] = (uint8_t)vpd_page;
+ }
+ cdb[4] = (uint8_t)datasize;
+ break;
+ case SA_REPORT_LOG:
+ case SA_REPORT_PHYS:
+ request->data_direction = SOP_DATA_DIR_TO_DEVICE;
+ cdb[0] = cmd;
+ if (cmd == SA_REPORT_PHYS)
+ cdb[1] = SA_REPORT_PHYS_EXTENDED;
+ else
+ cdb[1] = SA_REPORT_LOG_EXTENDED;
+ cdb[8] = (uint8_t)((datasize) >> 8);
+ cdb[9] = (uint8_t)datasize;
+ break;
+ case TEST_UNIT_READY:
+ request->data_direction = SOP_DATA_DIR_NONE;
+ break;
+ case SA_GET_RAID_MAP:
+ request->data_direction = SOP_DATA_DIR_TO_DEVICE;
+ cdb[0] = SA_CISS_READ;
+ cdb[1] = cmd;
+ cdb[8] = (uint8_t)((datasize) >> 8);
+ cdb[9] = (uint8_t)datasize;
+ break;
+ case SA_CACHE_FLUSH:
+ request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
+ cdb[0] = BMIC_WRITE;
+ cdb[6] = BMIC_CACHE_FLUSH;
+ cdb[7] = (uint8_t)((datasize) << 8);
+ cdb[8] = (uint8_t)((datasize) >> 8);
+ break;
+ case BMIC_IDENTIFY_CONTROLLER:
+ case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+ request->data_direction = SOP_DATA_DIR_TO_DEVICE;
+ cdb[0] = BMIC_READ;
+ cdb[6] = cmd;
+ cdb[7] = (uint8_t)((datasize) << 8);
+ cdb[8] = (uint8_t)((datasize) >> 8);
+ break;
+ case BMIC_WRITE_HOST_WELLNESS:
+ request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
+ memcpy(device_mem.virt_addr, buff, datasize);
+ cdb[0] = BMIC_WRITE;
+ cdb[6] = cmd;
+ cdb[7] = (uint8_t)((datasize) << 8);
+ cdb[8] = (uint8_t)((datasize) >> 8);
+ break;
+ case BMIC_SENSE_SUBSYSTEM_INFORMATION:
+ request->data_direction = SOP_DATA_DIR_TO_DEVICE;
+ cdb[0] = BMIC_READ;
+ cdb[6] = cmd;
+ cdb[7] = (uint8_t)((datasize) << 8);
+ cdb[8] = (uint8_t)((datasize) >> 8);
+ break;
+ default:
+ DBG_ERR("unknown command 0x%x", cmd);
+ break;
+ }
+
+ tag = pqisrc_get_tag(&softs->taglist);
+ if (INVALID_ELEM == tag) {
+ DBG_ERR("Tag not available\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_notag;
+ }
+
+ ((pqisrc_raid_req_t *)request)->request_id = tag;
+ ((pqisrc_raid_req_t *)request)->error_index = ((pqisrc_raid_req_t *)request)->request_id;
+ ((pqisrc_raid_req_t *)request)->response_queue_id = ob_q->q_id;
+ rcb = &softs->rcb[tag];
+ rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
+ rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
+
+ rcb->req_pending = true;
+ rcb->tag = tag;
+ /* Submit Command */
+ ret = pqisrc_submit_cmnd(softs, ib_q, request);
+
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit command\n");
+ goto err_out;
+ }
+
+ ret = pqisrc_wait_on_condition(softs, rcb);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd);
+ goto err_out;
+ }
+
+ if (datasize) {
+ if (buff) {
+ memcpy(buff, device_mem.virt_addr, datasize);
+ }
+ os_dma_mem_free(softs, &device_mem);
+ }
+
+ ret = rcb->status;
+ if (ret) {
+ if(error_info) {
+ memcpy(error_info,
+ rcb->error_info,
+ sizeof(*error_info));
+
+ if (error_info->data_out_result ==
+ PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
+ ret = PQI_STATUS_SUCCESS;
+ }
+ else{
+ DBG_INFO("Error!! Bus=%u Target=%u, Cmd=0x%x,"
+ "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr),
+ BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
+ cmd, ret);
+ ret = PQI_STATUS_FAILURE;
+ }
+ }
+ } else {
+ if(error_info) {
+ ret = PQI_STATUS_SUCCESS;
+ memset(error_info, 0, sizeof(*error_info));
+ }
+ }
+
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_out:
+ DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n",
+ BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
+ cmd, ret);
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
+err_notag:
+ if (datasize)
+ os_dma_mem_free(softs, &device_mem);
+ DBG_FUNC("FAILED \n");
+ return ret;
+}
+
+/* common function used to send report physical and logical luns cmnds*/
+static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
+ void *buff, size_t buf_len)
+{
+ int ret;
+ pqisrc_raid_req_t request;
+
+ DBG_FUNC("IN\n");
+
+ memset(&request, 0, sizeof(request));
+ ret = pqisrc_build_send_raid_request(softs, &request, buff,
+ buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+
+ DBG_FUNC("OUT\n");
+
+ return ret;
+}
+
+/* subroutine used to get physical and logical luns of the device */
+static int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
+ reportlun_data_ext_t **buff, size_t *data_length)
+{
+ int ret;
+ size_t list_len;
+ size_t data_len;
+ size_t new_lun_list_length;
+ reportlun_data_ext_t *lun_data;
+ reportlun_header_t report_lun_header;
+
+ DBG_FUNC("IN\n");
+
+ ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
+ sizeof(report_lun_header));
+
+ if (ret) {
+ DBG_ERR("failed return code: %d\n", ret);
+ return ret;
+ }
+ list_len = BE_32(report_lun_header.list_length);
+
+retry:
+ data_len = sizeof(reportlun_header_t) + list_len;
+ *data_length = data_len;
+
+ lun_data = os_mem_alloc(softs, data_len);
+
+ if (!lun_data) {
+ DBG_ERR("failed to allocate memory for lun_data\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ if (list_len == 0) {
+ DBG_INFO("list_len is 0\n");
+ memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
+ goto out;
+ }
+
+ ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
+
+ if (ret) {
+ DBG_ERR("error\n");
+ goto error;
+ }
+
+ new_lun_list_length = BE_32(lun_data->header.list_length);
+
+ if (new_lun_list_length > list_len) {
+ list_len = new_lun_list_length;
+ os_mem_free(softs, (void *)lun_data, data_len);
+ goto retry;
+ }
+
+out:
+ *buff = lun_data;
+ DBG_FUNC("OUT\n");
+ return 0;
+
+error:
+ os_mem_free(softs, (void *)lun_data, data_len);
+ DBG_ERR("FAILED\n");
+ return ret;
+}
+
+/*
+ * Function used to get physical and logical device list
+ */
+static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
+ reportlun_data_ext_t **physical_dev_list,
+ reportlun_data_ext_t **logical_dev_list,
+ size_t *phys_data_length,
+ size_t *log_data_length)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ size_t logical_list_length;
+ size_t logdev_data_length;
+ size_t data_length;
+ reportlun_data_ext_t *local_logdev_list;
+ reportlun_data_ext_t *logdev_data;
+ reportlun_header_t report_lun_header;
+
+
+ DBG_FUNC("IN\n");
+
+ ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length);
+ if (ret) {
+ DBG_ERR("report physical LUNs failed");
+ return ret;
+ }
+
+ ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length);
+ if (ret) {
+ DBG_ERR("report logical LUNs failed");
+ return ret;
+ }
+
+
+ logdev_data = *logical_dev_list;
+
+ if (logdev_data) {
+ logical_list_length =
+ BE_32(logdev_data->header.list_length);
+ } else {
+ memset(&report_lun_header, 0, sizeof(report_lun_header));
+ logdev_data =
+ (reportlun_data_ext_t *)&report_lun_header;
+ logical_list_length = 0;
+ }
+
+ logdev_data_length = sizeof(reportlun_header_t) +
+ logical_list_length;
+
+ /* Adding LOGICAL device entry for controller */
+ local_logdev_list = os_mem_alloc(softs,
+ logdev_data_length + sizeof(reportlun_ext_entry_t));
+ if (!local_logdev_list) {
+ data_length = *log_data_length;
+ os_mem_free(softs, (char *)*logical_dev_list, data_length);
+ *logical_dev_list = NULL;
+ return PQI_STATUS_FAILURE;
+ }
+
+ memcpy(local_logdev_list, logdev_data, logdev_data_length);
+ memset((uint8_t *)local_logdev_list + logdev_data_length, 0,
+ sizeof(reportlun_ext_entry_t));
+ local_logdev_list->header.list_length = BE_32(logical_list_length +
+ sizeof(reportlun_ext_entry_t));
+ data_length = *log_data_length;
+ os_mem_free(softs, (char *)*logical_dev_list, data_length);
+ *log_data_length = logdev_data_length + sizeof(reportlun_ext_entry_t);
+ *logical_dev_list = local_logdev_list;
+
+ DBG_FUNC("OUT\n");
+
+ return ret;
+}
+
+/* Subroutine used to set Bus-Target-Lun for the requested device */
+static inline void pqisrc_set_btl(pqi_scsi_dev_t *device,
+ int bus, int target, int lun)
+{
+ DBG_FUNC("IN\n");
+
+ device->bus = bus;
+ device->target = target;
+ device->lun = lun;
+
+ DBG_FUNC("OUT\n");
+}
+
+inline boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
+{
+ return device->is_external_raid_device;
+}
+
+static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
+{
+ return scsi3addr[2] != 0;
+}
+
+/* Function used to assign Bus-Target-Lun for the requested device */
+static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
+{
+ uint8_t *scsi3addr;
+ uint32_t lunid;
+ uint32_t bus;
+ uint32_t target;
+ uint32_t lun;
+ DBG_FUNC("IN\n");
+
+ scsi3addr = device->scsi3addr;
+ lunid = GET_LE32(scsi3addr);
+
+ if (pqisrc_is_hba_lunid(scsi3addr)) {
+ /* The specified device is the controller. */
+ pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, lunid & 0x3fff);
+ device->target_lun_valid = true;
+ return;
+ }
+
+ if (pqisrc_is_logical_device(device)) {
+ if (pqisrc_is_external_raid_device(device)) {
+ DBG_INFO("External Raid Device!!!");
+ bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
+ target = (lunid >> 16) & 0x3fff;
+ lun = lunid & 0xff;
+ } else {
+ bus = PQI_RAID_VOLUME_BUS;
+ lun = 0;
+ target = lunid & 0x3fff;
+ }
+ pqisrc_set_btl(device, bus, target, lun);
+ device->target_lun_valid = true;
+ return;
+ }
+
+ /* physical device */
+ pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, PQI_PD_INDEX(scsi3addr[6]), 0);
+
+ DBG_FUNC("OUT\n");
+}
+
+/* Build and send the internal INQUIRY command to particular device */
+static int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
+ uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ pqisrc_raid_req_t request;
+ raid_path_error_info_elem_t error_info;
+
+ DBG_FUNC("IN\n");
+
+ memset(&request, 0, sizeof(request));
+ ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
+ SA_INQUIRY, vpd_page, scsi3addr, &error_info);
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/* Function used to parse the sense information from response */
+static void pqisrc_fetch_sense_info(const uint8_t *sense_data,
+ unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq)
+{
+ struct sense_header_scsi header;
+
+ DBG_FUNC("IN\n");
+
+ *sense_key = 0;
+ *ascq = 0;
+ *asc = 0;
+
+ if (pqisrc_update_scsi_sense(sense_data, sense_data_length, &header)) {
+ *sense_key = header.sense_key;
+ *asc = header.asc;
+ *ascq = header.ascq;
+ }
+
+ DBG_INFO("sense_key: %x asc: %x ascq: %x\n", *sense_key, *asc, *ascq);
+
+ DBG_FUNC("OUT\n");
+}
+
+/* Function used to validate volume offline status */
+static uint8_t pqisrc_get_volume_offline_status(pqisrc_softstate_t *softs,
+ uint8_t *scsi3addr)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED;
+ uint8_t size;
+ uint8_t *buff = NULL;
+
+ DBG_FUNC("IN\n");
+
+ buff = os_mem_alloc(softs, 64);
+ if (!buff)
+ return PQI_STATUS_FAILURE;
+
+ /* Get the size of the VPD return buff. */
+ ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
+ buff, SCSI_VPD_HEADER_LENGTH);
+
+ if (ret)
+ goto out;
+
+ size = buff[3];
+
+ /* Now get the whole VPD buff. */
+ ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
+ buff, size + SCSI_VPD_HEADER_LENGTH);
+ if (ret)
+ goto out;
+
+ status = buff[4];
+
+out:
+ os_mem_free(softs, (char *)buff, 64);
+ DBG_FUNC("OUT\n");
+
+ return status;
+}
+
+
+/* Determine offline status of a volume. Returns appropriate SA_LV_* status.*/
+static uint8_t pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
+ uint8_t *scsi3addr)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint8_t *sense_data;
+ unsigned sense_data_len;
+ uint8_t sense_key;
+ uint8_t asc;
+ uint8_t ascq;
+ uint8_t off_status;
+ uint8_t scsi_status;
+ pqisrc_raid_req_t request;
+ raid_path_error_info_elem_t error_info;
+
+ DBG_FUNC("IN\n");
+
+ memset(&request, 0, sizeof(request));
+ ret = pqisrc_build_send_raid_request(softs, &request, NULL, 0,
+ TEST_UNIT_READY, 0, scsi3addr, &error_info);
+
+ if (ret)
+ goto error;
+ sense_data = error_info.data;
+ sense_data_len = LE_16(error_info.sense_data_len);
+
+ if (sense_data_len > sizeof(error_info.data))
+ sense_data_len = sizeof(error_info.data);
+
+ pqisrc_fetch_sense_info(sense_data, sense_data_len, &sense_key, &asc,
+ &ascq);
+
+ scsi_status = error_info.status;
+
+ /* scsi status: "CHECK CONDN" / SK: "not ready" ? */
+ if (scsi_status != 2 ||
+ sense_key != 2 ||
+ asc != ASC_LUN_NOT_READY) {
+ return SA_LV_OK;
+ }
+
+ /* Determine the reason for not ready state. */
+ off_status = pqisrc_get_volume_offline_status(softs, scsi3addr);
+
+ DBG_INFO("offline_status 0x%x\n", off_status);
+
+ /* Keep volume offline in certain cases. */
+ switch (off_status) {
+ case SA_LV_UNDERGOING_ERASE:
+ case SA_LV_NOT_AVAILABLE:
+ case SA_LV_UNDERGOING_RPI:
+ case SA_LV_PENDING_RPI:
+ case SA_LV_ENCRYPTED_NO_KEY:
+ case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+ case SA_LV_UNDERGOING_ENCRYPTION:
+ case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+ case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+ return off_status;
+ case SA_LV_STATUS_VPD_UNSUPPORTED:
+ /*
+ * If the VPD status page isn't available,
+ * use ASC/ASCQ to determine state.
+ */
+ if (ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS ||
+ ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)
+ return off_status;
+ break;
+ }
+
+ DBG_FUNC("OUT\n");
+
+ return SA_LV_OK;
+
+error:
+ return SA_LV_STATUS_VPD_UNSUPPORTED;
+}
+
+/* Validate the RAID map parameters */
+static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map)
+{
+ char *error_msg;
+ uint32_t raidmap_size;
+ uint32_t r5or6_blocks_per_row;
+ unsigned phys_dev_num;
+ unsigned num_raidmap_entries;
+
+ DBG_FUNC("IN\n");
+
+ raidmap_size = LE_32(raid_map->structure_size);
+ if (raidmap_size < offsetof(pqisrc_raid_map_t, dev_data)) {
+ error_msg = "RAID map too small\n";
+ goto error;
+ }
+
+ if (raidmap_size > sizeof(*raid_map)) {
+ error_msg = "RAID map too large\n";
+ goto error;
+ }
+
+ phys_dev_num = LE_16(raid_map->layout_map_count) *
+ (LE_16(raid_map->data_disks_per_row) +
+ LE_16(raid_map->metadata_disks_per_row));
+ num_raidmap_entries = phys_dev_num *
+ LE_16(raid_map->row_cnt);
+
+ if (num_raidmap_entries > RAID_MAP_MAX_ENTRIES) {
+ error_msg = "invalid number of map entries in RAID map\n";
+ goto error;
+ }
+
+ if (device->raid_level == SA_RAID_1) {
+ if (LE_16(raid_map->layout_map_count) != 2) {
+ error_msg = "invalid RAID-1 map\n";
+ goto error;
+ }
+ } else if (device->raid_level == SA_RAID_ADM) {
+ if (LE_16(raid_map->layout_map_count) != 3) {
+ error_msg = "invalid RAID-1(ADM) map\n";
+ goto error;
+ }
+ } else if ((device->raid_level == SA_RAID_5 ||
+ device->raid_level == SA_RAID_6) &&
+ LE_16(raid_map->layout_map_count) > 1) {
+ /* RAID 50/60 */
+ r5or6_blocks_per_row =
+ LE_16(raid_map->strip_size) *
+ LE_16(raid_map->data_disks_per_row);
+ if (r5or6_blocks_per_row == 0) {
+ error_msg = "invalid RAID-5 or RAID-6 map\n";
+ goto error;
+ }
+ }
+
+ DBG_FUNC("OUT\n");
+
+ return 0;
+
+error:
+ DBG_ERR("%s\n", error_msg);
+ return PQI_STATUS_FAILURE;
+}
+
+/* Get device raidmap for the requested device */
+static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ pqisrc_raid_req_t request;
+ pqisrc_raid_map_t *raid_map;
+
+ DBG_FUNC("IN\n");
+
+ raid_map = os_mem_alloc(softs, sizeof(*raid_map));
+ if (!raid_map)
+ return PQI_STATUS_FAILURE;
+
+ memset(&request, 0, sizeof(request));
+ ret = pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map),
+ SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
+
+ if (ret) {
+ DBG_ERR("error in build send raid req ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ret = pqisrc_raid_map_validation(softs, device, raid_map);
+ if (ret) {
+ DBG_ERR("error in raid map validation ret=%d\n", ret);
+ goto err_out;
+ }
+
+ device->raid_map = raid_map;
+ DBG_FUNC("OUT\n");
+ return 0;
+
+err_out:
+ os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
+ DBG_FUNC("FAILED \n");
+ return ret;
+}
+
+/* Get device ioaccel_status to validate the type of device */
+static void pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint8_t *buff;
+ uint8_t ioaccel_status;
+
+ DBG_FUNC("IN\n");
+
+ buff = os_mem_alloc(softs, 64);
+ if (!buff)
+ return;
+
+ ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
+ VPD_PAGE | SA_VPD_LV_IOACCEL_STATUS, buff, 64);
+ if (ret) {
+ DBG_ERR("error in send scsi inquiry ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ioaccel_status = buff[IOACCEL_STATUS_BYTE];
+ device->offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+
+ if (device->offload_config) {
+ device->offload_enabled_pending =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+ if (pqisrc_get_device_raidmap(softs, device))
+ device->offload_enabled_pending = false;
+ }
+
+ DBG_INFO("offload_config: 0x%x offload_enabled_pending: 0x%x \n",
+ device->offload_config, device->offload_enabled_pending);
+
+err_out:
+ os_mem_free(softs, (char*)buff, 64);
+ DBG_FUNC("OUT\n");
+}
+
+/* Get RAID level of requested device */
+static void pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
+{
+ uint8_t raid_level;
+ uint8_t *buff;
+
+ DBG_FUNC("IN\n");
+
+ raid_level = SA_RAID_UNKNOWN;
+
+ buff = os_mem_alloc(softs, 64);
+ if (buff) {
+ int ret;
+ ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
+ VPD_PAGE | SA_VPD_LV_DEVICE_GEOMETRY, buff, 64);
+ if (ret == 0) {
+ raid_level = buff[8];
+ if (raid_level > SA_RAID_MAX)
+ raid_level = SA_RAID_UNKNOWN;
+ }
+ os_mem_free(softs, (char*)buff, 64);
+ }
+
+ device->raid_level = raid_level;
+ DBG_INFO("RAID LEVEL: %x \n", raid_level);
+ DBG_FUNC("OUT\n");
+}
+
+/* Parse the inquiry response and determine the type of device */
+static int pqisrc_get_dev_data(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint8_t *inq_buff;
+
+ DBG_FUNC("IN\n");
+
+ inq_buff = os_mem_alloc(softs, OBDR_TAPE_INQ_SIZE);
+ if (!inq_buff)
+ return PQI_STATUS_FAILURE;
+
+ /* Send an inquiry to the device to see what it is. */
+ ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
+ OBDR_TAPE_INQ_SIZE);
+ if (ret)
+ goto err_out;
+ pqisrc_sanitize_inquiry_string(&inq_buff[8], 8);
+ pqisrc_sanitize_inquiry_string(&inq_buff[16], 16);
+
+ device->devtype = inq_buff[0] & 0x1f;
+ memcpy(device->vendor, &inq_buff[8],
+ sizeof(device->vendor));
+ memcpy(device->model, &inq_buff[16],
+ sizeof(device->model));
+ DBG_INFO("DEV_TYPE: %x VENDOR: %s MODEL: %s\n", device->devtype, device->vendor, device->model);
+
+ if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) {
+ if (pqisrc_is_external_raid_device(device)) {
+ device->raid_level = SA_RAID_UNKNOWN;
+ device->volume_status = SA_LV_OK;
+ device->volume_offline = false;
+ }
+ else {
+ pqisrc_get_dev_raid_level(softs, device);
+ pqisrc_get_dev_ioaccel_status(softs, device);
+ device->volume_status = pqisrc_get_dev_vol_status(softs,
+ device->scsi3addr);
+ device->volume_offline = device->volume_status != SA_LV_OK;
+ }
+ }
+
+ /*
+ * Check if this is a One-Button-Disaster-Recovery device
+ * by looking for "$DR-10" at offset 43 in the inquiry data.
+ */
+ device->is_obdr_device = (device->devtype == ROM_DEVICE &&
+ memcmp(&inq_buff[OBDR_SIG_OFFSET], OBDR_TAPE_SIG,
+ OBDR_SIG_LEN) == 0);
+err_out:
+ os_mem_free(softs, (char*)inq_buff, OBDR_TAPE_INQ_SIZE);
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/*
+ * BMIC (Basic Management And Interface Commands) command
+ * to get the controller identify params
+ */
+static int pqisrc_identify_ctrl(pqisrc_softstate_t *softs,
+ bmic_ident_ctrl_t *buff)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ pqisrc_raid_req_t request;
+
+ DBG_FUNC("IN\n");
+
+ memset(&request, 0, sizeof(request));
+ ret = pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff),
+ BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+ DBG_FUNC("OUT\n");
+
+ return ret;
+}
+
+/* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */
+int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ bmic_ident_ctrl_t *identify_ctrl;
+
+ DBG_FUNC("IN\n");
+
+ identify_ctrl = os_mem_alloc(softs, sizeof(*identify_ctrl));
+ if (!identify_ctrl) {
+ DBG_ERR("failed to allocate memory for identify_ctrl\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ memset(identify_ctrl, 0, sizeof(*identify_ctrl));
+
+ ret = pqisrc_identify_ctrl(softs, identify_ctrl);
+ if (ret)
+ goto out;
+
+ softs->fw_build_number = identify_ctrl->fw_build_number;
+ memcpy(softs->fw_version, identify_ctrl->fw_version,
+ sizeof(identify_ctrl->fw_version));
+ softs->fw_version[sizeof(identify_ctrl->fw_version)] = '\0';
+ snprintf(softs->fw_version +
+ strlen(softs->fw_version),
+ sizeof(softs->fw_version),
+ "-%u", identify_ctrl->fw_build_number);
+out:
+ os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl));
+ DBG_INFO("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/* BMIC command to determine scsi device identify params */
+static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device,
+ bmic_ident_physdev_t *buff,
+ int buf_len)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint16_t bmic_device_index;
+ pqisrc_raid_req_t request;
+
+
+ DBG_FUNC("IN\n");
+
+ memset(&request, 0, sizeof(request));
+ bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
+ request.cdb[2] = (uint8_t)bmic_device_index;
+ request.cdb[9] = (uint8_t)(bmic_device_index >> 8);
+
+ ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
+ BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/*
+ * Function used to get the scsi device information using one of BMIC
+ * BMIC_IDENTIFY_PHYSICAL_DEVICE
+ */
+static void pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device,
+ bmic_ident_physdev_t *id_phys)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN\n");
+ memset(id_phys, 0, sizeof(*id_phys));
+
+ ret= pqisrc_identify_physical_disk(softs, device,
+ id_phys, sizeof(*id_phys));
+ if (ret) {
+ device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+ return;
+ }
+
+ device->queue_depth =
+ LE_16(id_phys->current_queue_depth_limit);
+ device->device_type = id_phys->device_type;
+ device->active_path_index = id_phys->active_path_number;
+ device->path_map = id_phys->redundant_path_present_map;
+ memcpy(&device->box,
+ &id_phys->alternate_paths_phys_box_on_port,
+ sizeof(device->box));
+ memcpy(&device->phys_connector,
+ &id_phys->alternate_paths_phys_connector,
+ sizeof(device->phys_connector));
+ device->bay = id_phys->phys_bay_in_box;
+
+ DBG_INFO("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n", device->device_type, device->queue_depth);
+ DBG_FUNC("OUT\n");
+}
+
+
+/* Function used to find the entry of the device in a list */
+static device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device_to_find,
+ pqi_scsi_dev_t **same_device)
+{
+ pqi_scsi_dev_t *device;
+ int i,j;
+ DBG_FUNC("IN\n");
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ for(j = 0; j < PQI_MAX_MULTILUN; j++) {
+ if(softs->device_list[i][j] == NULL)
+ continue;
+ device = softs->device_list[i][j];
+ if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr,
+ device->scsi3addr)) {
+ *same_device = device;
+ if (pqisrc_device_equal(device_to_find, device)) {
+ if (device_to_find->volume_offline)
+ return DEVICE_CHANGED;
+ return DEVICE_UNCHANGED;
+ }
+ return DEVICE_CHANGED;
+ }
+ }
+ }
+ DBG_FUNC("OUT\n");
+
+ return DEVICE_NOT_FOUND;
+}
+
+
+/* Update the newly added devices as existed device */
+static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device_exist,
+ pqi_scsi_dev_t *new_device)
+{
+ DBG_FUNC("IN\n");
+ device_exist->expose_device = new_device->expose_device;
+ memcpy(device_exist->vendor, new_device->vendor,
+ sizeof(device_exist->vendor));
+ memcpy(device_exist->model, new_device->model,
+ sizeof(device_exist->model));
+ device_exist->is_physical_device = new_device->is_physical_device;
+ device_exist->is_external_raid_device =
+ new_device->is_external_raid_device;
+ device_exist->sas_address = new_device->sas_address;
+ device_exist->raid_level = new_device->raid_level;
+ device_exist->queue_depth = new_device->queue_depth;
+ device_exist->ioaccel_handle = new_device->ioaccel_handle;
+ device_exist->volume_status = new_device->volume_status;
+ device_exist->active_path_index = new_device->active_path_index;
+ device_exist->path_map = new_device->path_map;
+ device_exist->bay = new_device->bay;
+ memcpy(device_exist->box, new_device->box,
+ sizeof(device_exist->box));
+ memcpy(device_exist->phys_connector, new_device->phys_connector,
+ sizeof(device_exist->phys_connector));
+ device_exist->offload_config = new_device->offload_config;
+ device_exist->offload_enabled = false;
+ device_exist->offload_enabled_pending =
+ new_device->offload_enabled_pending;
+ device_exist->offload_to_mirror = 0;
+ if (device_exist->raid_map)
+ os_mem_free(softs,
+ (char *)device_exist->raid_map,
+ sizeof(*device_exist->raid_map));
+ device_exist->raid_map = new_device->raid_map;
+ /* To prevent this from being freed later. */
+ new_device->raid_map = NULL;
+ DBG_FUNC("OUT\n");
+}
+
+/* Validate the ioaccel_handle for a newly added device */
+static pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
+ pqisrc_softstate_t *softs, uint32_t ioaccel_handle)
+{
+ pqi_scsi_dev_t *device;
+ int i,j;
+ DBG_FUNC("IN\n");
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ for(j = 0; j < PQI_MAX_MULTILUN; j++) {
+ if(softs->device_list[i][j] == NULL)
+ continue;
+ device = softs->device_list[i][j];
+ if (device->devtype != DISK_DEVICE)
+ continue;
+ if (pqisrc_is_logical_device(device))
+ continue;
+ if (device->ioaccel_handle == ioaccel_handle)
+ return device;
+ }
+ }
+ DBG_FUNC("OUT\n");
+
+ return NULL;
+}
+
+/* Get the scsi device queue depth */
+static void pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
+{
+ unsigned i;
+ unsigned phys_dev_num;
+ unsigned num_raidmap_entries;
+ unsigned queue_depth;
+ pqisrc_raid_map_t *raid_map;
+ pqi_scsi_dev_t *device;
+ raidmap_data_t *dev_data;
+ pqi_scsi_dev_t *phys_disk;
+ unsigned j;
+
+ DBG_FUNC("IN\n");
+
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ for(j = 0; j < PQI_MAX_MULTILUN; j++) {
+ if(softs->device_list[i][j] == NULL)
+ continue;
+ device = softs->device_list[i][j];
+ if (device->devtype != DISK_DEVICE)
+ continue;
+ if (!pqisrc_is_logical_device(device))
+ continue;
+ if (pqisrc_is_external_raid_device(device))
+ continue;
+ device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+ raid_map = device->raid_map;
+ if (!raid_map)
+ return;
+ dev_data = raid_map->dev_data;
+ phys_dev_num = LE_16(raid_map->layout_map_count) *
+ (LE_16(raid_map->data_disks_per_row) +
+ LE_16(raid_map->metadata_disks_per_row));
+ num_raidmap_entries = phys_dev_num *
+ LE_16(raid_map->row_cnt);
+
+ queue_depth = 0;
+ for (i = 0; i < num_raidmap_entries; i++) {
+ phys_disk = pqisrc_identify_device_via_ioaccel(softs,
+ dev_data[i].ioaccel_handle);
+
+ if (!phys_disk) {
+ DBG_WARN(
+ "Failed to find physical disk handle for logical drive %016llx\n",
+ (unsigned long long)BE_64(device->scsi3addr[0]));
+ device->offload_enabled = false;
+ device->offload_enabled_pending = false;
+ if (raid_map)
+ os_mem_free(softs, (char *)raid_map, sizeof(*raid_map));
+ device->raid_map = NULL;
+ return;
+ }
+
+ queue_depth += phys_disk->queue_depth;
+ }
+
+ device->queue_depth = queue_depth;
+ } /* end inner loop */
+ }/* end outer loop */
+ DBG_FUNC("OUT\n");
+}
+
+/* Function used to add a scsi device to OS scsi subsystem */
+static int pqisrc_add_device(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
+{
+ DBG_FUNC("IN\n");
+ DBG_INFO("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
+ device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
+
+ device->invalid = false;
+
+ if(device->expose_device) {
+ /* TBD: Call OS upper layer function to add the device entry */
+ os_add_device(softs,device);
+ }
+ DBG_FUNC("OUT\n");
+ return PQI_STATUS_SUCCESS;
+
+}
+
+/* Function used to remove a scsi device from OS scsi subsystem */
+void pqisrc_remove_device(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
+{
+ DBG_FUNC("IN\n");
+ DBG_INFO("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
+ device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
+
+ /* TBD: Call OS upper layer function to remove the device entry */
+ device->invalid = true;
+ os_remove_device(softs,device);
+ DBG_FUNC("OUT\n");
+}
+
+
+/*
+ * When exposing new device to OS fails then adjst list according to the
+ * mid scsi list
+ */
+static void pqisrc_adjust_list(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
+{
+ DBG_FUNC("IN\n");
+
+ if (!device) {
+ DBG_ERR("softs = %p: device is NULL !!!\n", softs);
+ return;
+ }
+
+ OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
+ softs->device_list[device->target][device->lun] = NULL;
+ OS_RELEASE_SPINLOCK(&softs->devlist_lock);
+ pqisrc_device_mem_free(softs, device);
+
+ DBG_FUNC("OUT\n");
+}
+
+/* Debug routine used to display the RAID volume status of the device */
+static void pqisrc_display_volume_status(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
+{
+ char *status;
+
+ DBG_FUNC("IN\n");
+ switch (device->volume_status) {
+ case SA_LV_OK:
+ status = "Volume is online.";
+ break;
+ case SA_LV_UNDERGOING_ERASE:
+ status = "Volume is undergoing background erase process.";
+ break;
+ case SA_LV_NOT_AVAILABLE:
+ status = "Volume is waiting for transforming volume.";
+ break;
+ case SA_LV_UNDERGOING_RPI:
+ status = "Volume is undergoing rapid parity initialization process.";
+ break;
+ case SA_LV_PENDING_RPI:
+ status = "Volume is queued for rapid parity initialization process.";
+ break;
+ case SA_LV_ENCRYPTED_NO_KEY:
+ status = "Volume is encrypted and cannot be accessed because key is not present.";
+ break;
+ case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+ status = "Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.";
+ break;
+ case SA_LV_UNDERGOING_ENCRYPTION:
+ status = "Volume is undergoing encryption process.";
+ break;
+ case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+ status = "Volume is undergoing encryption re-keying process.";
+ break;
+ case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+ status = "Volume is encrypted and cannot be accessed because controller does not have encryption enabled.";
+ break;
+ case SA_LV_PENDING_ENCRYPTION:
+ status = "Volume is pending migration to encrypted state, but process has not started.";
+ break;
+ case SA_LV_PENDING_ENCRYPTION_REKEYING:
+ status = "Volume is encrypted and is pending encryption rekeying.";
+ break;
+ case SA_LV_STATUS_VPD_UNSUPPORTED:
+ status = "Volume status is not available through vital product data pages.";
+ break;
+ default:
+ status = "Volume is in an unknown state.";
+ break;
+ }
+
+ DBG_INFO("scsi BTL %d:%d:%d %s\n",
+ device->bus, device->target, device->lun, status);
+ DBG_FUNC("OUT\n");
+}
+
+void pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ DBG_INFO("IN\n");
+ if (!device)
+ return;
+ if (device->raid_map) {
+ os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t));
+ }
+ os_mem_free(softs, (char *)device,sizeof(*device));
+ DBG_INFO("OUT\n");
+
+}
+
+/* OS should call this function to free the scsi device */
+void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
+{
+
+ OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
+ pqisrc_device_mem_free(softs, device);
+ OS_RELEASE_SPINLOCK(&softs->devlist_lock);
+
+}
+
+
+/* Update the newly added devices to the device list */
+static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *new_device_list[], int num_new_devices)
+{
+ int ret;
+ int i;
+ device_status_t dev_status;
+ pqi_scsi_dev_t *device;
+ pqi_scsi_dev_t *same_device;
+ pqi_scsi_dev_t **added = NULL;
+ pqi_scsi_dev_t **removed = NULL;
+ int nadded = 0, nremoved = 0;
+ int j;
+ DBG_INFO("IN\n");
+
+ added = os_mem_alloc(softs, sizeof(*added) * PQI_MAX_DEVICES);
+ removed = os_mem_alloc(softs, sizeof(*removed) * PQI_MAX_DEVICES);
+
+ if (!added || !removed) {
+ DBG_WARN("Out of memory \n");
+ goto free_and_out;
+ }
+
+ OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
+
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ for(j = 0; j < PQI_MAX_MULTILUN; j++) {
+ if(softs->device_list[i][j] == NULL)
+ continue;
+ device = softs->device_list[i][j];
+ device->device_gone = true;
+ }
+ }
+ DBG_IO("Device list used an array\n");
+ for (i = 0; i < num_new_devices; i++) {
+ device = new_device_list[i];
+
+ dev_status = pqisrc_scsi_find_entry(softs, device,
+ &same_device);
+
+ switch (dev_status) {
+ case DEVICE_UNCHANGED:
+ /* New Device present in existing device list */
+ device->new_device = false;
+ same_device->device_gone = false;
+ pqisrc_exist_device_update(softs, same_device, device);
+ break;
+ case DEVICE_NOT_FOUND:
+ /* Device not found in existing list */
+ device->new_device = true;
+ break;
+ case DEVICE_CHANGED:
+ /* Actual device gone need to add device to list*/
+ device->new_device = true;
+ break;
+ default:
+ break;
+ }
+ }
+ /* Process all devices that have gone away. */
+ for(i = 0, nremoved = 0; i < PQI_MAX_DEVICES; i++) {
+ for(j = 0; j < PQI_MAX_MULTILUN; j++) {
+ if(softs->device_list[i][j] == NULL)
+ continue;
+ device = softs->device_list[i][j];
+ if (device->device_gone) {
+ softs->device_list[device->target][device->lun] = NULL;
+ removed[nremoved] = device;
+ nremoved++;
+ }
+ }
+ }
+
+ /* Process all new devices. */
+ for (i = 0, nadded = 0; i < num_new_devices; i++) {
+ device = new_device_list[i];
+ if (!device->new_device)
+ continue;
+ if (device->volume_offline)
+ continue;
+
+ softs->device_list[device->target][device->lun] = device;
+ DBG_INFO("Added device %p at B : %d T : %d L : %d\n",device,
+ device->bus,device->target,device->lun);
+ /* To prevent this entry from being freed later. */
+ new_device_list[i] = NULL;
+ added[nadded] = device;
+ nadded++;
+ }
+
+ pqisrc_update_log_dev_qdepth(softs);
+
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ for(j = 0; j < PQI_MAX_MULTILUN; j++) {
+ if(softs->device_list[i][j] == NULL)
+ continue;
+ device = softs->device_list[i][j];
+ device->offload_enabled = device->offload_enabled_pending;
+ }
+ }
+
+ OS_RELEASE_SPINLOCK(&softs->devlist_lock);
+
+ for(i = 0; i < nremoved; i++) {
+ device = removed[i];
+ if (device == NULL)
+ continue;
+ pqisrc_remove_device(softs, device);
+ pqisrc_display_device_info(softs, "removed", device);
+
+ }
+
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ for(j = 0; j < PQI_MAX_MULTILUN; j++) {
+ if(softs->device_list[i][j] == NULL)
+ continue;
+ device = softs->device_list[i][j];
+ /*
+ * Notify the OS upper layer if the queue depth of any existing device has
+ * changed.
+ */
+ if (device->queue_depth !=
+ device->advertised_queue_depth) {
+ device->advertised_queue_depth = device->queue_depth;
+ /* TBD: Call OS upper layer function to change device Q depth */
+ }
+ }
+ }
+ for(i = 0; i < nadded; i++) {
+ device = added[i];
+ if (device->expose_device) {
+ ret = pqisrc_add_device(softs, device);
+ if (ret) {
+ DBG_WARN("scsi %d:%d:%d addition failed, device not added\n",
+ device->bus, device->target,
+ device->lun);
+ pqisrc_adjust_list(softs, device);
+ continue;
+ }
+ }
+
+ pqisrc_display_device_info(softs, "added", device);
+ }
+
+ /* Process all volumes that are offline. */
+ for (i = 0; i < num_new_devices; i++) {
+ device = new_device_list[i];
+ if (!device)
+ continue;
+ if (!device->new_device)
+ continue;
+ if (device->volume_offline) {
+ pqisrc_display_volume_status(softs, device);
+ pqisrc_display_device_info(softs, "offline", device);
+ }
+ }
+
+free_and_out:
+ if (added)
+ os_mem_free(softs, (char *)added,
+ sizeof(*added) * PQI_MAX_DEVICES);
+ if (removed)
+ os_mem_free(softs, (char *)removed,
+ sizeof(*removed) * PQI_MAX_DEVICES);
+
+ DBG_INFO("OUT\n");
+}
+
+/*
+ * Let the Adapter know about driver version using one of BMIC
+ * BMIC_WRITE_HOST_WELLNESS
+ */
+int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
+{
+ int rval = PQI_STATUS_SUCCESS;
+ struct bmic_host_wellness_driver_version *host_wellness_driver_ver;
+ size_t data_length;
+ pqisrc_raid_req_t request;
+
+ DBG_FUNC("IN\n");
+
+ memset(&request, 0, sizeof(request));
+ data_length = sizeof(*host_wellness_driver_ver);
+
+ host_wellness_driver_ver = os_mem_alloc(softs, data_length);
+ if (!host_wellness_driver_ver) {
+ DBG_ERR("failed to allocate memory for host wellness driver_version\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ host_wellness_driver_ver->start_tag[0] = '<';
+ host_wellness_driver_ver->start_tag[1] = 'H';
+ host_wellness_driver_ver->start_tag[2] = 'W';
+ host_wellness_driver_ver->start_tag[3] = '>';
+ host_wellness_driver_ver->driver_version_tag[0] = 'D';
+ host_wellness_driver_ver->driver_version_tag[1] = 'V';
+ host_wellness_driver_ver->driver_version_length = LE_16(sizeof(host_wellness_driver_ver->driver_version));
+ strncpy(host_wellness_driver_ver->driver_version, softs->os_name,
+ sizeof(host_wellness_driver_ver->driver_version));
+ if (strlen(softs->os_name) < sizeof(host_wellness_driver_ver->driver_version) ) {
+ strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION,
+ sizeof(host_wellness_driver_ver->driver_version) - strlen(softs->os_name));
+ } else {
+ DBG_INFO("OS name length(%lu) is longer than buffer of driver_version\n",
+ strlen(softs->os_name));
+ }
+ host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
+ host_wellness_driver_ver->end_tag[0] = 'Z';
+ host_wellness_driver_ver->end_tag[1] = 'Z';
+
+ rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_driver_ver,data_length,
+ BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+
+ os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
+
+ DBG_FUNC("OUT");
+ return rval;
+}
+
+/*
+ * Write current RTC time from host to the adapter using
+ * BMIC_WRITE_HOST_WELLNESS
+ */
+int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
+{
+ int rval = PQI_STATUS_SUCCESS;
+ struct bmic_host_wellness_time *host_wellness_time;
+ size_t data_length;
+ pqisrc_raid_req_t request;
+
+ DBG_FUNC("IN\n");
+
+ memset(&request, 0, sizeof(request));
+ data_length = sizeof(*host_wellness_time);
+
+ host_wellness_time = os_mem_alloc(softs, data_length);
+ if (!host_wellness_time) {
+ DBG_ERR("failed to allocate memory for host wellness time structure\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ host_wellness_time->start_tag[0] = '<';
+ host_wellness_time->start_tag[1] = 'H';
+ host_wellness_time->start_tag[2] = 'W';
+ host_wellness_time->start_tag[3] = '>';
+ host_wellness_time->time_tag[0] = 'T';
+ host_wellness_time->time_tag[1] = 'D';
+ host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) -
+ offsetof(struct bmic_host_wellness_time, century));
+
+ os_get_time(host_wellness_time);
+
+ host_wellness_time->dont_write_tag[0] = 'D';
+ host_wellness_time->dont_write_tag[1] = 'W';
+ host_wellness_time->end_tag[0] = 'Z';
+ host_wellness_time->end_tag[1] = 'Z';
+
+ rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length,
+ BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+
+ os_mem_free(softs, (char *)host_wellness_time, data_length);
+
+ DBG_FUNC("OUT");
+ return rval;
+}
+
+/*
+ * Function used to perform a rescan of scsi devices
+ * for any config change events
+ */
+int pqisrc_scan_devices(pqisrc_softstate_t *softs)
+{
+ boolean_t is_physical_device;
+ int ret = PQI_STATUS_FAILURE;
+ int i;
+ int new_dev_cnt;
+ int phy_log_dev_cnt;
+ uint8_t *scsi3addr;
+ uint32_t physical_cnt;
+ uint32_t logical_cnt;
+ uint32_t ndev_allocated = 0;
+ size_t phys_data_length, log_data_length;
+ reportlun_data_ext_t *physical_dev_list = NULL;
+ reportlun_data_ext_t *logical_dev_list = NULL;
+ reportlun_ext_entry_t *lun_ext_entry = NULL;
+ bmic_ident_physdev_t *bmic_phy_info = NULL;
+ pqi_scsi_dev_t **new_device_list = NULL;
+ pqi_scsi_dev_t *device = NULL;
+
+
+ DBG_FUNC("IN\n");
+
+ ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list,
+ &phys_data_length, &log_data_length);
+
+ if (ret)
+ goto err_out;
+
+ physical_cnt = BE_32(physical_dev_list->header.list_length)
+ / sizeof(physical_dev_list->lun_entries[0]);
+
+ logical_cnt = BE_32(logical_dev_list->header.list_length)
+ / sizeof(logical_dev_list->lun_entries[0]);
+
+ DBG_INFO("physical_cnt %d logical_cnt %d\n", physical_cnt, logical_cnt);
+
+ if (physical_cnt) {
+ bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
+ if (bmic_phy_info == NULL) {
+ ret = PQI_STATUS_FAILURE;
+ DBG_ERR("failed to allocate memory for BMIC ID PHYS Device : %d\n", ret);
+ goto err_out;
+ }
+ }
+ phy_log_dev_cnt = physical_cnt + logical_cnt;
+ new_device_list = os_mem_alloc(softs,
+ sizeof(*new_device_list) * phy_log_dev_cnt);
+
+ if (new_device_list == NULL) {
+ ret = PQI_STATUS_FAILURE;
+ DBG_ERR("failed to allocate memory for device list : %d\n", ret);
+ goto err_out;
+ }
+
+ for (i = 0; i < phy_log_dev_cnt; i++) {
+ new_device_list[i] = os_mem_alloc(softs,
+ sizeof(*new_device_list[i]));
+ if (new_device_list[i] == NULL) {
+ ret = PQI_STATUS_FAILURE;
+ DBG_ERR("failed to allocate memory for device list : %d\n", ret);
+ ndev_allocated = i;
+ goto err_out;
+ }
+ }
+
+ ndev_allocated = phy_log_dev_cnt;
+ new_dev_cnt = 0;
+ for (i = 0; i < phy_log_dev_cnt; i++) {
+
+ if (i < physical_cnt) {
+ is_physical_device = true;
+ lun_ext_entry = &physical_dev_list->lun_entries[i];
+ } else {
+ is_physical_device = false;
+ lun_ext_entry =
+ &logical_dev_list->lun_entries[i - physical_cnt];
+ }
+
+ scsi3addr = lun_ext_entry->lunid;
+
+ /* Skip masked physical non-disk devices. */
+ if (MASKED_DEVICE(scsi3addr) && is_physical_device)
+ continue;
+
+ device = new_device_list[new_dev_cnt];
+ memset(device, 0, sizeof(*device));
+ memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
+ device->wwid = lun_ext_entry->wwid;
+ device->is_physical_device = is_physical_device;
+ if (!is_physical_device)
+ device->is_external_raid_device =
+ pqisrc_is_external_raid_addr(scsi3addr);
+
+
+ /* Get device type, vendor, model, device ID. */
+ ret = pqisrc_get_dev_data(softs, device);
+ if (ret) {
+ DBG_WARN("Inquiry failed, skipping device %016llx\n",
+ (unsigned long long)BE_64(device->scsi3addr[0]));
+ DBG_INFO("INQUIRY FAILED \n");
+ continue;
+ }
+ pqisrc_assign_btl(device);
+
+ /*
+ * Expose all devices except for physical devices that
+ * are masked.
+ */
+ if (device->is_physical_device &&
+ MASKED_DEVICE(scsi3addr))
+ device->expose_device = false;
+ else
+ device->expose_device = true;
+
+ if (device->is_physical_device &&
+ (lun_ext_entry->device_flags &
+ REPORT_LUN_DEV_FLAG_AIO_ENABLED) &&
+ lun_ext_entry->ioaccel_handle) {
+ device->aio_enabled = true;
+ }
+ switch (device->devtype) {
+ case ROM_DEVICE:
+ /*
+ * We don't *really* support actual CD-ROM devices,
+ * but we do support the HP "One Button Disaster
+ * Recovery" tape drive which temporarily pretends to
+ * be a CD-ROM drive.
+ */
+ if (device->is_obdr_device)
+ new_dev_cnt++;
+ break;
+ case DISK_DEVICE:
+ case ZBC_DEVICE:
+ if (device->is_physical_device) {
+ device->ioaccel_handle =
+ lun_ext_entry->ioaccel_handle;
+ device->sas_address = BE_64(lun_ext_entry->wwid);
+ pqisrc_get_physical_device_info(softs, device,
+ bmic_phy_info);
+ }
+ /* Logical device doesn't have SAS address
+ * so requires target SAS address for MSA.
+ */
+ if(device->is_external_raid_device)
+ device->sas_address = BE_64((uint64_t)lun_ext_entry->lunid);
+ new_dev_cnt++;
+ break;
+ case ENCLOSURE_DEVICE:
+ if (device->is_physical_device) {
+ device->sas_address = BE_64(lun_ext_entry->wwid);
+ }
+ new_dev_cnt++;
+ break;
+ case TAPE_DEVICE:
+ case MEDIUM_CHANGER_DEVICE:
+ new_dev_cnt++;
+ break;
+ case RAID_DEVICE:
+ /*
+ * Only present the HBA controller itself as a RAID
+ * controller. If it's a RAID controller other than
+ * the HBA itself (an external RAID controller, MSA500
+ * or similar), don't present it.
+ */
+ if (pqisrc_is_hba_lunid(scsi3addr))
+ new_dev_cnt++;
+ break;
+ }
+ }
+ DBG_INFO("new_dev_cnt %d\n", new_dev_cnt);
+
+ pqisrc_update_device_list(softs, new_device_list, new_dev_cnt);
+
+err_out:
+ if (new_device_list) {
+ for (i = 0; i < ndev_allocated; i++) {
+ if (new_device_list[i]) {
+ if(new_device_list[i]->raid_map)
+ os_mem_free(softs, (char *)new_device_list[i]->raid_map,
+ sizeof(pqisrc_raid_map_t));
+ os_mem_free(softs, (char*)new_device_list[i],
+ sizeof(*new_device_list[i]));
+ }
+ }
+ os_mem_free(softs, (char *)new_device_list,
+ sizeof(*new_device_list) * ndev_allocated);
+ }
+ if(physical_dev_list)
+ os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
+ if(logical_dev_list)
+ os_mem_free(softs, (char *)logical_dev_list, log_data_length);
+ if (bmic_phy_info)
+ os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info));
+
+ DBG_FUNC("OUT \n");
+
+ return ret;
+}
+
+/*
+ * Clean up memory allocated for devices.
+ */
+void pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
+{
+
+ int i = 0,j = 0;
+ pqi_scsi_dev_t *dvp = NULL;
+ DBG_FUNC("IN\n");
+
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ for(j = 0; j < PQI_MAX_MULTILUN; j++) {
+ if (softs->device_list[i][j] == NULL)
+ continue;
+ dvp = softs->device_list[i][j];
+ pqisrc_device_mem_free(softs, dvp);
+ }
+ }
+ DBG_FUNC("OUT\n");
+}
+
diff --git a/sys/dev/smartpqi/smartpqi_event.c b/sys/dev/smartpqi/smartpqi_event.c
new file mode 100644
index 000000000000..3ba81dcc952a
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_event.c
@@ -0,0 +1,439 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include"smartpqi_includes.h"
+
+/*
+ * Function to rescan the devices connected to adapter.
+ */
+int
+pqisrc_rescan_devices(pqisrc_softstate_t *softs)
+{
+ int ret;
+
+ DBG_FUNC("IN\n");
+
+ os_sema_lock(&softs->scan_lock);
+
+ ret = pqisrc_scan_devices(softs);
+
+ os_sema_unlock(&softs->scan_lock);
+
+ DBG_FUNC("OUT\n");
+
+ return ret;
+}
+
+/*
+ * Subroutine to acknowledge the events processed by the driver to the adapter.
+ */
+static void
+pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
+ struct pqi_event *event)
+{
+
+ pqi_event_acknowledge_request_t request;
+ ib_queue_t *ib_q = &softs->op_raid_ib_q[0];
+ int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT;
+ memset(&request,0,sizeof(request));
+
+ DBG_FUNC("IN\n");
+
+ request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
+ request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) -
+ PQI_REQUEST_HEADER_LENGTH);
+ request.event_type = event->event_type;
+ request.event_id = event->event_id;
+ request.additional_event_id = event->additional_event_id;
+
+ /* Submit Event Acknowledge */
+
+ pqisrc_submit_cmnd(softs, ib_q, &request);
+
+ /*
+ * We have to special-case this type of request because the firmware
+ * does not generate an interrupt when this type of request completes.
+ * Therefore, we have to poll until we see that the firmware has
+ * consumed the request before we move on.
+ */
+
+ COND_WAIT(((ib_q->pi_local) == *(ib_q->ci_virt_addr)), tmo);
+ if (tmo <= 0) {
+ DBG_ERR("wait for event acknowledge timed out\n");
+ DBG_ERR("tmo : %d\n",tmo);
+ }
+
+ DBG_FUNC(" OUT\n");
+}
+
+/*
+ * Acknowledge processed events to the adapter.
+ */
+void
+pqisrc_ack_all_events(void *arg1)
+{
+ int i;
+ struct pqi_event *pending_event;
+ pqisrc_softstate_t *softs = (pqisrc_softstate_t*)arg1;
+
+ DBG_FUNC(" IN\n");
+
+
+ pending_event = &softs->pending_events[0];
+ for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
+ if (pending_event->pending == true) {
+ pending_event->pending = false;
+ pqisrc_acknowledge_event(softs, pending_event);
+ }
+ pending_event++;
+ }
+
+ /* Rescan devices except for heartbeat event */
+ if ((pqisrc_rescan_devices(softs)) != PQI_STATUS_SUCCESS) {
+ DBG_ERR(" Failed to Re-Scan devices\n ");
+ }
+ DBG_FUNC(" OUT\n");
+
+}
+
+/*
+ * Get event index from event type to validate the type of event.
+ */
+static int
+pqisrc_event_type_to_event_index(unsigned event_type)
+{
+ int index;
+
+ switch (event_type) {
+ case PQI_EVENT_TYPE_HOTPLUG:
+ index = PQI_EVENT_HOTPLUG;
+ break;
+ case PQI_EVENT_TYPE_HARDWARE:
+ index = PQI_EVENT_HARDWARE;
+ break;
+ case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
+ index = PQI_EVENT_PHYSICAL_DEVICE;
+ break;
+ case PQI_EVENT_TYPE_LOGICAL_DEVICE:
+ index = PQI_EVENT_LOGICAL_DEVICE;
+ break;
+ case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
+ index = PQI_EVENT_AIO_STATE_CHANGE;
+ break;
+ case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
+ index = PQI_EVENT_AIO_CONFIG_CHANGE;
+ break;
+ default:
+ index = -1;
+ break;
+ }
+
+ return index;
+}
+
+/*
+ * Function used to process the events supported by the adapter.
+ */
+int
+pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
+{
+ uint32_t obq_pi,obq_ci;
+ pqi_event_response_t response;
+ ob_queue_t *event_q;
+ struct pqi_event *pending_event;
+ boolean_t need_delayed_work = false;
+
+ DBG_FUNC(" IN\n");
+
+ OS_ATOMIC64_INC(softs, num_intrs);
+
+ event_q = &softs->event_q;
+ obq_ci = event_q->ci_local;
+ obq_pi = *(event_q->pi_virt_addr);
+ DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi);
+
+ while(1) {
+ int event_index;
+ DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi);
+ if (obq_pi == obq_ci)
+ break;
+
+ need_delayed_work = true;
+
+ /* Copy the response */
+ memcpy(&response, event_q->array_virt_addr + (obq_ci * event_q->elem_size),
+ sizeof(pqi_event_response_t));
+ DBG_INFO("response.header.iu_type : 0x%x \n", response.header.iu_type);
+ DBG_INFO("response.event_type : 0x%x \n", response.event_type);
+
+ event_index = pqisrc_event_type_to_event_index(response.event_type);
+
+ if (event_index >= 0) {
+ if(response.request_acknowledge) {
+ pending_event = &softs->pending_events[event_index];
+ pending_event->pending = true;
+ pending_event->event_type = response.event_type;
+ pending_event->event_id = response.event_id;
+ pending_event->additional_event_id = response.additional_event_id;
+ }
+ }
+
+ obq_ci = (obq_ci + 1) % event_q->num_elem;
+ }
+ /* Update CI */
+ event_q->ci_local = obq_ci;
+ PCI_MEM_PUT32(softs, event_q->ci_register_abs,
+ event_q->ci_register_offset, event_q->ci_local);
+
+ /*Adding events to the task queue for acknowledging*/
+ if (need_delayed_work == true) {
+ os_eventtaskqueue_enqueue(softs);
+ }
+
+ DBG_FUNC("OUT");
+ return PQI_STATUS_SUCCESS;
+
+
+}
+
+/*
+ * Function used to send a general management request to adapter.
+ */
+int pqisrc_submit_management_req(pqisrc_softstate_t *softs,
+ pqi_event_config_request_t *request)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ ib_queue_t *op_ib_q = &softs->op_raid_ib_q[0];
+ rcb_t *rcb = NULL;
+
+ DBG_FUNC(" IN\n");
+
+ /* Get the tag */
+ request->request_id = pqisrc_get_tag(&softs->taglist);
+ if (INVALID_ELEM == request->request_id) {
+ DBG_ERR("Tag not available\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_out;
+ }
+
+ rcb = &softs->rcb[request->request_id];
+ rcb->req_pending = true;
+ rcb->tag = request->request_id;
+ /* Submit command on operational raid ib queue */
+ ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR(" Unable to submit command\n");
+ goto err_cmd;
+ }
+
+ ret = pqisrc_wait_on_condition(softs, rcb);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Management request timed out !!\n");
+ goto err_cmd;
+ }
+
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist,request->request_id);
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_cmd:
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist,request->request_id);
+err_out:
+ DBG_FUNC(" failed OUT : %d\n", ret);
+ return ret;
+}
+
+/*
+ * Build and send the general management request.
+ */
+static int
+pqi_event_configure(pqisrc_softstate_t *softs ,
+ pqi_event_config_request_t *request,
+ dma_mem_t *buff)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC(" IN\n");
+
+ request->header.comp_feature = 0x00;
+ request->header.iu_length = sizeof(pqi_event_config_request_t) -
+ PQI_REQUEST_HEADER_LENGTH; /* excluding IU header length */
+
+ /*Op OQ id where response to be delivered */
+ request->response_queue_id = softs->op_ob_q[0].q_id;
+ request->buffer_length = buff->size;
+ request->sg_desc.addr = buff->dma_addr;
+ request->sg_desc.length = buff->size;
+ request->sg_desc.zero = 0;
+ request->sg_desc.type = SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT;
+
+ /* submit management req IU*/
+ ret = pqisrc_submit_management_req(softs,request);
+ if(ret)
+ goto err_out;
+
+
+ DBG_FUNC(" OUT\n");
+ return ret;
+
+err_out:
+ DBG_FUNC("Failed OUT\n");
+ return ret;
+}
+
+/*
+ * Prepare REPORT EVENT CONFIGURATION IU to request that
+ * event configuration information be reported.
+ */
+int pqisrc_report_event_config(pqisrc_softstate_t *softs)
+{
+
+ int ret,i ;
+ pqi_event_config_request_t request;
+ pqi_event_config_t *event_config_p ;
+ dma_mem_t buf_report_event ;
+ /*bytes to be allocaed for report event config data-in buffer */
+ uint32_t alloc_size = sizeof(pqi_event_config_t) ;
+ memset(&request, 0 , sizeof(request));
+
+ DBG_FUNC(" IN\n");
+
+ memset(&buf_report_event, 0, sizeof(struct dma_mem));
+ buf_report_event.tag = "pqi_report_event_buf" ;
+ buf_report_event.size = alloc_size;
+ buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN;
+
+ /* allocate memory */
+ ret = os_dma_mem_alloc(softs, &buf_report_event);
+ if (ret) {
+ DBG_ERR("Failed to Allocate report event config buffer : %d\n", ret);
+ goto err_out;
+ }
+ DBG_INFO("buf_report_event.dma_addr = %p \n",(void*)buf_report_event.dma_addr);
+ DBG_INFO("buf_report_event.virt_addr = %p \n",(void*)buf_report_event.virt_addr);
+
+ request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
+
+ /* Event configuration */
+ ret=pqi_event_configure(softs,&request,&buf_report_event);
+ if(ret)
+ goto free_mem;
+
+
+ event_config_p = (pqi_event_config_t*)buf_report_event.virt_addr;
+ softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors,
+ PQI_MAX_EVENT_DESCRIPTORS) ;
+
+ for (i=0; i < softs->event_config.num_event_descriptors ;i++){
+ softs->event_config.descriptors[i].event_type =
+ event_config_p->descriptors[i].event_type;
+ }
+ /* free the allocated memory*/
+ os_dma_mem_free(softs, &buf_report_event);
+
+ DBG_FUNC(" OUT\n");
+ return ret;
+
+free_mem:
+ os_dma_mem_free(softs, &buf_report_event);
+err_out:
+ DBG_FUNC("Failed OUT\n");
+ return PQI_STATUS_FAILURE;
+}
+
+/*
+ * Prepare SET EVENT CONFIGURATION IU to request that
+ * event configuration parameters be set.
+ */
+int pqisrc_set_event_config(pqisrc_softstate_t *softs)
+{
+
+ int ret,i;
+ pqi_event_config_request_t request;
+ pqi_event_config_t *event_config_p;
+ dma_mem_t buf_set_event;
+ /*bytes to be allocaed for set event config data-out buffer */
+ uint32_t alloc_size = sizeof(pqi_event_config_t);
+ memset(&request, 0 , sizeof(request));
+
+ DBG_FUNC(" IN\n");
+
+ memset(&buf_set_event, 0, sizeof(struct dma_mem));
+ buf_set_event.tag = "pqi_set_event_buf";
+ buf_set_event.size = alloc_size;
+ buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN;
+
+ /* allocate memory */
+ ret = os_dma_mem_alloc(softs, &buf_set_event);
+ if (ret) {
+ DBG_ERR("Failed to Allocate set event config buffer : %d\n", ret);
+ goto err_out;
+ }
+
+ DBG_INFO("buf_set_event.dma_addr = %p\n",(void*)buf_set_event.dma_addr);
+ DBG_INFO("buf_set_event.virt_addr = %p\n",(void*)buf_set_event.virt_addr);
+
+ request.header.iu_type = PQI_REQUEST_IU_SET_EVENT_CONFIG;
+ request.iu_specific.global_event_oq_id = softs->event_q.q_id;
+
+ /*pointer to data-out buffer*/
+
+ event_config_p = (pqi_event_config_t *)buf_set_event.virt_addr;
+
+ event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors;
+
+
+ for (i=0; i < softs->event_config.num_event_descriptors ; i++){
+ event_config_p->descriptors[i].event_type =
+ softs->event_config.descriptors[i].event_type;
+ if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1)
+ event_config_p->descriptors[i].oq_id = softs->event_q.q_id;
+ else
+ event_config_p->descriptors[i].oq_id = 0; /* Not supported this event. */
+
+
+ }
+ /* Event configuration */
+ ret = pqi_event_configure(softs,&request,&buf_set_event);
+ if(ret)
+ goto free_mem;
+
+ os_dma_mem_free(softs, &buf_set_event);
+
+ DBG_FUNC(" OUT\n");
+ return ret;
+
+free_mem:
+ os_dma_mem_free(softs, &buf_set_event);
+err_out:
+ DBG_FUNC("Failed OUT\n");
+ return PQI_STATUS_FAILURE;
+
+}
diff --git a/sys/dev/smartpqi/smartpqi_helper.c b/sys/dev/smartpqi/smartpqi_helper.c
new file mode 100644
index 000000000000..690a0efd6b79
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_helper.c
@@ -0,0 +1,291 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+/*
+ * Function used to validate the adapter health.
+ */
+boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+
+ DBG_FUNC("OUT\n");
+
+ return !softs->ctrl_online;
+}
+
+/*
+ * Function used to take exposed devices to OS as offline.
+ */
+void pqisrc_take_devices_offline(pqisrc_softstate_t *softs)
+{
+ pqi_scsi_dev_t *device = NULL;
+ int i,j;
+
+ DBG_FUNC("IN\n");
+ for(i = 0; i < PQI_MAX_DEVICES; i++) {
+ for(j = 0; j < PQI_MAX_MULTILUN; j++) {
+ if(softs->device_list[i][j] == NULL)
+ continue;
+ device = softs->device_list[i][j];
+ pqisrc_remove_device(softs, device);
+ }
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function used to take adapter offline.
+ */
+void pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs)
+{
+
+ DBG_FUNC("IN\n");
+
+ softs->ctrl_online = false;
+ pqisrc_trigger_nmi_sis(softs);
+ os_complete_outstanding_cmds_nodevice(softs);
+ pqisrc_take_devices_offline(softs);
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Timer handler for the adapter heart-beat.
+ */
+void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs)
+{
+ uint64_t num_intrs;
+ uint8_t take_offline = false;
+
+ DBG_FUNC("IN\n");
+
+ num_intrs = OS_ATOMIC64_READ(softs, num_intrs);
+
+ if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
+ if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) {
+ take_offline = true;
+ goto take_ctrl_offline;
+ }
+ softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs);
+ DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \
+ softs->prev_heartbeat_count = %lx\n",
+ CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count);
+ } else {
+ if (num_intrs == softs->prev_num_intrs) {
+ softs->num_heartbeats_requested++;
+ if (softs->num_heartbeats_requested > PQI_MAX_HEARTBEAT_REQUESTS) {
+ take_offline = true;
+ goto take_ctrl_offline;
+ }
+ softs->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
+
+ pqisrc_ack_all_events((void*)softs);
+
+ } else {
+ softs->num_heartbeats_requested = 0;
+ }
+ softs->prev_num_intrs = num_intrs;
+ }
+
+take_ctrl_offline:
+ if (take_offline){
+ DBG_ERR("controller is offline\n");
+ pqisrc_take_ctrl_offline(softs);
+ os_stop_heartbeat_timer(softs);
+ }
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Conditional variable management routine for internal commands.
+ */
+int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb){
+
+ DBG_FUNC("IN\n");
+
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t loop_cnt = 0;
+
+ while (rcb->req_pending == true) {
+ OS_SLEEP(500); /* Micro sec */
+
+ /*Polling needed for FreeBSD : since ithread routine is not scheduled
+ during bootup, we could use polling until interrupts are
+ enabled (using 'if (cold)'to check for the boot time before
+ interrupts are enabled). */
+ IS_POLLING_REQUIRED(softs);
+
+ if (loop_cnt++ == PQISRC_CMD_TIMEOUT_CNT) {
+ DBG_ERR("ERR: Requested cmd timed out !!!\n");
+ ret = PQI_STATUS_TIMEOUT;
+ break;
+ }
+
+ if (pqisrc_ctrl_offline(softs)) {
+ DBG_ERR("Controller is Offline");
+ ret = PQI_STATUS_FAILURE;
+ break;
+ }
+
+ }
+ rcb->req_pending = true;
+
+ DBG_FUNC("OUT\n");
+
+ return ret;
+}
+
+/* Function used to validate the device wwid. */
+boolean_t pqisrc_device_equal(pqi_scsi_dev_t *dev1,
+ pqi_scsi_dev_t *dev2)
+{
+ return dev1->wwid == dev2->wwid;
+}
+
+/* Function used to validate the device scsi3addr. */
+boolean_t pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2)
+{
+ return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
+}
+
+/* Function used to validate hba_lunid */
+boolean_t pqisrc_is_hba_lunid(uint8_t *scsi3addr)
+{
+ return pqisrc_scsi3addr_equal(scsi3addr, (uint8_t*)RAID_CTLR_LUNID);
+}
+
+/* Function used to validate type of device */
+boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *device)
+{
+ return !device->is_physical_device;
+}
+
+/* Function used to sanitize inquiry string */
+void pqisrc_sanitize_inquiry_string(unsigned char *s, int len)
+{
+ boolean_t terminated = false;
+
+ DBG_FUNC("IN\n");
+
+ for (; len > 0; (--len, ++s)) {
+ if (*s == 0)
+ terminated = true;
+ if (terminated || *s < 0x20 || *s > 0x7e)
+ *s = ' ';
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+static char *raid_levels[] = {
+ "RAID 0",
+ "RAID 4",
+ "RAID 1(1+0)",
+ "RAID 5",
+ "RAID 5+1",
+ "RAID ADG",
+ "RAID 1(ADM)",
+ "RAID 6",
+};
+
+/* Get the RAID level from the index */
+char *pqisrc_raidlevel_to_string(uint8_t raid_level)
+{
+ DBG_FUNC("IN\n");
+ if (raid_level < ARRAY_SIZE(raid_levels))
+ return raid_levels[raid_level];
+ DBG_FUNC("OUT\n");
+
+ return " ";
+}
+
+/* Debug routine for displaying device info */
+void pqisrc_display_device_info(pqisrc_softstate_t *softs,
+ char *action, pqi_scsi_dev_t *device)
+{
+ DBG_INFO( "%s scsi BTL %d:%d:%d: %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
+ action,
+ device->bus,
+ device->target,
+ device->lun,
+ device->vendor,
+ device->model,
+ pqisrc_raidlevel_to_string(device->raid_level),
+ device->offload_config ? '+' : '-',
+ device->offload_enabled_pending ? '+' : '-',
+ device->expose_device ? '+' : '-',
+ device->queue_depth);
+ pqisrc_raidlevel_to_string(device->raid_level); /* To use this function */
+}
+
+/* validate the structure sizes */
+void check_struct_sizes()
+{
+
+ ASSERT(sizeof(SCSI3Addr_struct)== 2);
+ ASSERT(sizeof(PhysDevAddr_struct) == 8);
+ ASSERT(sizeof(LogDevAddr_struct)== 8);
+ ASSERT(sizeof(LUNAddr_struct)==8);
+ ASSERT(sizeof(RequestBlock_struct) == 20);
+ ASSERT(sizeof(MoreErrInfo_struct)== 8);
+ ASSERT(sizeof(ErrorInfo_struct)== 48);
+ ASSERT(sizeof(IOCTL_Command_struct)== 86);
+ ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 42);
+ ASSERT(sizeof(struct bmic_host_wellness_time)== 20);
+ ASSERT(sizeof(struct pqi_dev_adminq_cap)== 8);
+ ASSERT(sizeof(struct admin_q_param)== 4);
+ ASSERT(sizeof(struct pqi_registers)== 256);
+ ASSERT(sizeof(struct ioa_registers)== 4128);
+ ASSERT(sizeof(struct pqi_pref_settings)==4);
+ ASSERT(sizeof(struct pqi_cap)== 20);
+ ASSERT(sizeof(iu_header_t)== 4);
+ ASSERT(sizeof(gen_adm_req_iu_t)== 64);
+ ASSERT(sizeof(gen_adm_resp_iu_t)== 64);
+ ASSERT(sizeof(op_q_params) == 9);
+ ASSERT(sizeof(raid_path_error_info_elem_t)== 276);
+ ASSERT(sizeof(aio_path_error_info_elem_t)== 276);
+ ASSERT(sizeof(struct init_base_struct)== 24);
+ ASSERT(sizeof(pqi_iu_layer_desc_t)== 16);
+ ASSERT(sizeof(pqi_dev_cap_t)== 576);
+ ASSERT(sizeof(pqi_aio_req_t)== 128);
+ ASSERT(sizeof(pqisrc_raid_req_t)== 128);
+ ASSERT(sizeof(pqi_tmf_req_t)== 32);
+ ASSERT(sizeof(struct pqi_io_response)== 16);
+ ASSERT(sizeof(struct sense_header_scsi)== 8);
+ ASSERT(sizeof(reportlun_header_t)==8);
+ ASSERT(sizeof(reportlun_ext_entry_t)== 24);
+ ASSERT(sizeof(reportlun_data_ext_t)== 32);
+ ASSERT(sizeof(raidmap_data_t)==8);
+ ASSERT(sizeof(pqisrc_raid_map_t)== 8256);
+ ASSERT(sizeof(bmic_ident_ctrl_t)== 325);
+ ASSERT(sizeof(bmic_ident_physdev_t)==2048);
+
+}
diff --git a/sys/dev/smartpqi/smartpqi_includes.h b/sys/dev/smartpqi/smartpqi_includes.h
new file mode 100644
index 000000000000..dd8ffbb2daeb
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_includes.h
@@ -0,0 +1,90 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _PQI_INCLUDES_H
+#define _PQI_INCLUDES_H
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/disk.h>
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/rman.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/condvar.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/sema.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+#include <sys/smp.h>
+
+#include <machine/cpufunc.h>
+#include <sys/cpu.h>
+#include <sys/pcpu.h>
+#include <sys/time.h>
+#include <sys/clock.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <cam/cam_queue.h>
+#include <cam/cam_xpt_periph.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/md_var.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+
+
+#include "smartpqi_defines.h"
+#include "smartpqi_structures.h"
+#include "smartpqi_prototypes.h"
+#include "smartpqi_ioctl.h"
+
+
+#endif // _PQI_INCLUDES_H
diff --git a/sys/dev/smartpqi/smartpqi_init.c b/sys/dev/smartpqi/smartpqi_init.c
new file mode 100644
index 000000000000..c4bf1279640c
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_init.c
@@ -0,0 +1,913 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+/*
+ * Request the adapter to get PQI capabilities supported.
+ */
+static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN\n");
+
+ gen_adm_req_iu_t admin_req;
+ gen_adm_resp_iu_t admin_resp;
+ dma_mem_t pqi_cap_dma_buf;
+ pqi_dev_cap_t *capability = NULL;
+ pqi_iu_layer_desc_t *iu_layer_desc = NULL;
+
+ /* Allocate Non DMA memory */
+ capability = os_mem_alloc(softs, sizeof(*capability));
+ if (!capability) {
+ DBG_ERR("Failed to allocate memory for capability\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_out;
+ }
+
+ memset(&admin_req, 0, sizeof(admin_req));
+ memset(&admin_resp, 0, sizeof(admin_resp));
+
+ memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
+ pqi_cap_dma_buf.tag = "pqi_cap_buf";
+ pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
+ pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
+
+ ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf);
+ if (ret) {
+ DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
+ goto err_dma_alloc;
+ }
+
+ admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
+ admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
+ admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
+ admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr;
+ admin_req.req_type.general_func.sg_desc.type = SGL_DESCRIPTOR_CODE_DATA_BLOCK;
+
+ ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
+ if( PQI_STATUS_SUCCESS == ret) {
+ memcpy(capability,
+ pqi_cap_dma_buf.virt_addr,
+ pqi_cap_dma_buf.size);
+ } else {
+ DBG_ERR("Failed to send admin req report pqi device capability\n");
+ goto err_admin_req;
+
+ }
+
+ softs->pqi_dev_cap.max_iqs = capability->max_iqs;
+ softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements;
+ softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len;
+ softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len;
+ softs->pqi_dev_cap.max_oqs = capability->max_oqs;
+ softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements;
+ softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len;
+ softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity;
+
+ iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP];
+ softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len;
+ softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported;
+ softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported;
+
+ DBG_INFO("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs);
+ DBG_INFO("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements);
+ DBG_INFO("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len);
+ DBG_INFO("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len);
+ DBG_INFO("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs);
+ DBG_INFO("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements);
+ DBG_INFO("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len);
+ DBG_INFO("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity);
+ DBG_INFO("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
+ DBG_INFO("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
+ DBG_INFO("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
+
+
+ os_mem_free(softs, (void *)capability,
+ REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
+ os_dma_mem_free(softs, &pqi_cap_dma_buf);
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_admin_req:
+ os_dma_mem_free(softs, &pqi_cap_dma_buf);
+err_dma_alloc:
+ if (capability)
+ os_mem_free(softs, (void *)capability,
+ REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
+err_out:
+ DBG_FUNC("failed OUT\n");
+ return PQI_STATUS_FAILURE;
+}
+
+/*
+ * Function used to deallocate the used rcb.
+ */
+void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
+{
+
+ uint32_t num_req;
+ size_t size;
+ int i;
+
+ DBG_FUNC("IN\n");
+ num_req = softs->max_outstanding_io + 1;
+ size = num_req * sizeof(rcb_t);
+ for (i = 1; i < req_count; i++)
+ os_dma_mem_free(softs, &softs->sg_dma_desc[i]);
+ os_mem_free(softs, (void *)softs->rcb, size);
+ softs->rcb = NULL;
+ DBG_FUNC("OUT\n");
+}
+
+
+/*
+ * Allocate memory for rcb and SG descriptors.
+ */
+static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ int i = 0;
+ uint32_t num_req = 0;
+ uint32_t sg_buf_size = 0;
+ uint64_t alloc_size = 0;
+ rcb_t *rcb = NULL;
+ rcb_t *prcb = NULL;
+ DBG_FUNC("IN\n");
+
+ /* Set maximum outstanding requests */
+ /* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
+ * The rcb will be accessed by using the tag as index
+ * As 0 tag index is not used, we need to allocate one extra.
+ */
+ softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
+ num_req = softs->max_outstanding_io + 1;
+ DBG_INFO("Max Outstanding IO reset to %d\n", num_req);
+
+ alloc_size = num_req * sizeof(rcb_t);
+
+ /* Allocate Non DMA memory */
+ rcb = os_mem_alloc(softs, alloc_size);
+ if (!rcb) {
+ DBG_ERR("Failed to allocate memory for rcb\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_out;
+ }
+ softs->rcb = rcb;
+
+ /* Allocate sg dma memory for sg chain */
+ sg_buf_size = softs->pqi_cap.max_sg_elem *
+ sizeof(sgt_t);
+
+ prcb = &softs->rcb[1];
+ /* Initialize rcb */
+ for(i=1; i < num_req; i++) {
+ char tag[15];
+ sprintf(tag, "sg_dma_buf%d", i);
+ softs->sg_dma_desc[i].tag = tag;
+ softs->sg_dma_desc[i].size = sg_buf_size;
+ softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
+
+ ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]);
+ if (ret) {
+ DBG_ERR("Failed to Allocate sg desc %d\n", ret);
+ ret = PQI_STATUS_FAILURE;
+ goto error;
+ }
+ prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr);
+ prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr);
+ prcb ++;
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+error:
+ pqisrc_free_rcb(softs, i);
+err_out:
+ DBG_FUNC("failed OUT\n");
+ return ret;
+}
+
+/*
+ * Function used to decide the operational queue configuration params
+ * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
+ */
+void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
+{
+ uint16_t total_iq_elements;
+
+ DBG_FUNC("IN\n");
+
+ DBG_INFO("softs->intr_count : %d softs->num_cpus_online : %d",
+ softs->intr_count, softs->num_cpus_online);
+
+ if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
+ /* Share the event and Operational queue. */
+ softs->num_op_obq = 1;
+ softs->share_opq_and_eventq = true;
+ }
+ else {
+ /* Note : One OBQ (OBQ0) reserved for event queue */
+ softs->num_op_obq = MIN(softs->num_cpus_online,
+ softs->intr_count) - 1;
+ softs->num_op_obq = softs->intr_count - 1;
+ softs->share_opq_and_eventq = false;
+ }
+
+#ifdef MULTIPLE_MSIX
+ /*
+ * softs->num_cpus_online is set as number of physical CPUs,
+ * So we can have more queues/interrupts .
+ */
+ if (softs->intr_count > 1)
+ softs->share_opq_and_eventq = false;
+#endif
+
+ DBG_INFO("softs->num_op_obq : %d\n",softs->num_op_obq);
+
+ softs->num_op_raid_ibq = softs->num_op_obq;
+ softs->num_op_aio_ibq = softs->num_op_raid_ibq;
+ softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16;
+ softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
+ if (softs->max_ib_iu_length_per_fw == 256 &&
+ softs->ob_spanning_supported) {
+ /* older f/w that doesn't actually support spanning. */
+ softs->max_ib_iu_length = softs->ibq_elem_size;
+ } else {
+ /* max. inbound IU length is an multiple of our inbound element size. */
+ softs->max_ib_iu_length =
+ (softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
+ softs->ibq_elem_size;
+
+ }
+ /* If Max. Outstanding IO came with Max. Spanning element count then,
+ needed elements per IO are multiplication of
+ Max.Outstanding IO and Max.Spanning element */
+ total_iq_elements = (softs->max_outstanding_io *
+ (softs->max_ib_iu_length / softs->ibq_elem_size));
+
+ softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
+ softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
+ softs->pqi_dev_cap.max_iq_elements);
+
+ softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
+ softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
+ softs->pqi_dev_cap.max_oq_elements);
+
+ softs->max_sg_per_iu = ((softs->max_ib_iu_length -
+ softs->ibq_elem_size) /
+ sizeof(sgt_t)) +
+ MAX_EMBEDDED_SG_IN_FIRST_IU;
+
+ DBG_INFO("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
+ DBG_INFO("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
+ DBG_INFO("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
+ DBG_INFO("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Configure the operational queue parameters.
+ */
+int pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ /* Get the PQI capability,
+ REPORT PQI DEVICE CAPABILITY request */
+ ret = pqisrc_report_pqi_capability(softs);
+ if (ret) {
+ DBG_ERR("Failed to send report pqi dev capability request : %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /* Reserve required no of slots for internal requests */
+ softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
+
+ /* Decide the Op queue configuration */
+ pqisrc_decide_opq_config(softs);
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_out:
+ DBG_FUNC("OUT failed\n");
+ return ret;
+}
+
+/*
+ * Validate the PQI mode of adapter.
+ */
+int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_FAILURE;
+ int tmo = 0;
+ uint64_t signature = 0;
+
+ DBG_FUNC("IN\n");
+
+ /* Check the PQI device signature */
+ tmo = PQISRC_PQIMODE_READY_TIMEOUT;
+ do {
+ signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
+
+ if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
+ sizeof(uint64_t)) == 0) {
+ ret = PQI_STATUS_SUCCESS;
+ break;
+ }
+ OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
+ } while (tmo--);
+
+ PRINT_PQI_SIGNATURE(signature);
+
+ if (tmo <= 0) {
+ DBG_ERR("PQI Signature is invalid\n");
+ ret = PQI_STATUS_TIMEOUT;
+ goto err_out;
+ }
+
+ tmo = PQISRC_PQIMODE_READY_TIMEOUT;
+ /* Check function and status code for the device */
+ COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
+ PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
+ if (!tmo) {
+ DBG_ERR("PQI device is not in IDLE state\n");
+ ret = PQI_STATUS_TIMEOUT;
+ goto err_out;
+ }
+
+
+ tmo = PQISRC_PQIMODE_READY_TIMEOUT;
+ /* Check the PQI device status register */
+ COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
+ PQI_DEV_STATE_AT_INIT, tmo);
+ if (!tmo) {
+ DBG_ERR("PQI Registers are not ready\n");
+ ret = PQI_STATUS_TIMEOUT;
+ goto err_out;
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+err_out:
+ DBG_FUNC("OUT failed\n");
+ return ret;
+}
+
+/*
+ * Get the PQI configuration table parameters.
+ * Currently using for heart-beat counter scratch-pad register.
+ */
+int pqisrc_process_config_table(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_FAILURE;
+ uint32_t config_table_size;
+ uint32_t section_off;
+ uint8_t *config_table_abs_addr;
+ struct pqi_conf_table *conf_table;
+ struct pqi_conf_table_section_header *section_hdr;
+
+ config_table_size = softs->pqi_cap.conf_tab_sz;
+
+ if (config_table_size < sizeof(*conf_table) ||
+ config_table_size > PQI_CONF_TABLE_MAX_LEN) {
+ DBG_ERR("Invalid PQI conf table length of %u\n",
+ config_table_size);
+ return ret;
+ }
+
+ conf_table = os_mem_alloc(softs, config_table_size);
+ if (!conf_table) {
+ DBG_ERR("Failed to allocate memory for PQI conf table\n");
+ return ret;
+ }
+
+ config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
+ softs->pqi_cap.conf_tab_off);
+
+ PCI_MEM_GET_BUF(softs, config_table_abs_addr,
+ softs->pqi_cap.conf_tab_off,
+ (uint8_t*)conf_table, config_table_size);
+
+
+ if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
+ sizeof(conf_table->sign)) != 0) {
+ DBG_ERR("Invalid PQI config signature\n");
+ goto out;
+ }
+
+ section_off = LE_32(conf_table->first_section_off);
+
+ while (section_off) {
+
+ if (section_off+ sizeof(*section_hdr) >= config_table_size) {
+ DBG_ERR("PQI config table section offset (%u) beyond \
+ end of config table (config table length: %u)\n",
+ section_off, config_table_size);
+ break;
+ }
+
+ section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
+
+ switch (LE_16(section_hdr->section_id)) {
+ case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
+ case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
+ case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
+ case PQI_CONF_TABLE_SECTION_DEBUG:
+ break;
+ case PQI_CONF_TABLE_SECTION_HEARTBEAT:
+ softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
+ section_off +
+ offsetof(struct pqi_conf_table_heartbeat,
+ heartbeat_counter);
+ softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
+ softs->heartbeat_counter_off);
+ ret = PQI_STATUS_SUCCESS;
+ break;
+ default:
+ DBG_ERR("unrecognized PQI config table section ID: 0x%x\n",
+ LE_16(section_hdr->section_id));
+ break;
+ }
+ section_off = LE_16(section_hdr->next_section_off);
+ }
+out:
+ os_mem_free(softs, (void *)conf_table,config_table_size);
+ return ret;
+}
+
+/* Wait for PQI reset completion for the adapter*/
+int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ pqi_reset_reg_t reset_reg;
+ int pqi_reset_timeout = 0;
+ uint64_t val = 0;
+ uint32_t max_timeout = 0;
+
+ val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP);
+
+ max_timeout = (val & 0xFFFF00000000) >> 32;
+
+ DBG_INFO("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout);
+
+ while(1) {
+ if (pqi_reset_timeout++ == max_timeout) {
+ return PQI_STATUS_TIMEOUT;
+ }
+ OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
+ reset_reg.all_bits = PCI_MEM_GET32(softs,
+ &softs->pqi_reg->dev_reset, PQI_DEV_RESET);
+ if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Function used to perform PQI hard reset.
+ */
+int pqi_reset(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t val = 0;
+ pqi_reset_reg_t pqi_reset_reg;
+
+ DBG_FUNC("IN\n");
+
+ if (true == softs->ctrl_in_pqi_mode) {
+
+ if (softs->pqi_reset_quiesce_allowed) {
+ val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
+ LEGACY_SIS_IDBR);
+ val |= SIS_PQI_RESET_QUIESCE;
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ LEGACY_SIS_IDBR, LE_32(val));
+ ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
+ if (ret) {
+ DBG_ERR("failed with error %d during quiesce\n", ret);
+ return ret;
+ }
+ }
+
+ pqi_reset_reg.all_bits = 0;
+ pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
+ pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
+
+ PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
+ LE_32(pqi_reset_reg.all_bits));
+
+ ret = pqisrc_wait_for_pqi_reset_completion(softs);
+ if (ret) {
+ DBG_ERR("PQI reset timed out: ret = %d!\n", ret);
+ return ret;
+ }
+ }
+ softs->ctrl_in_pqi_mode = false;
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/*
+ * Initialize the adapter with supported PQI configuration.
+ */
+int pqisrc_pqi_init(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN\n");
+
+ /* Check the PQI signature */
+ ret = pqisrc_check_pqimode(softs);
+ if(ret) {
+ DBG_ERR("failed to switch to pqi\n");
+ goto err_out;
+ }
+
+ PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
+ softs->ctrl_in_pqi_mode = true;
+
+ /* Get the No. of Online CPUs,NUMA/Processor config from OS */
+ ret = os_get_processor_config(softs);
+ if (ret) {
+ DBG_ERR("Failed to get processor config from OS %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /* Get the interrupt count, type, priority available from OS */
+ ret = os_get_intr_config(softs);
+ if (ret) {
+ DBG_ERR("Failed to get interrupt config from OS %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /* Create Admin Queue pair*/
+ ret = pqisrc_create_admin_queue(softs);
+ if(ret) {
+ DBG_ERR("Failed to configure admin queue\n");
+ goto err_admin_queue;
+ }
+
+ /* For creating event and IO operational queues we have to submit
+ admin IU requests.So Allocate resources for submitting IUs */
+
+ /* Allocate the request container block (rcb) */
+ ret = pqisrc_allocate_rcb(softs);
+ if (ret == PQI_STATUS_FAILURE) {
+ DBG_ERR("Failed to allocate rcb \n");
+ goto err_rcb;
+ }
+
+ /* Allocate & initialize request id queue */
+ ret = pqisrc_init_taglist(softs,&softs->taglist,
+ softs->max_outstanding_io);
+ if (ret) {
+ DBG_ERR("Failed to allocate memory for request id q : %d\n",
+ ret);
+ goto err_taglist;
+ }
+
+ ret = pqisrc_configure_op_queues(softs);
+ if (ret) {
+ DBG_ERR("Failed to configure op queue\n");
+ goto err_config_opq;
+ }
+
+ /* Create Operational queues */
+ ret = pqisrc_create_op_queues(softs);
+ if(ret) {
+ DBG_ERR("Failed to create op queue\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_create_opq;
+ }
+
+ softs->ctrl_online = true;
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_create_opq:
+err_config_opq:
+ pqisrc_destroy_taglist(softs,&softs->taglist);
+err_taglist:
+ pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
+err_rcb:
+ pqisrc_destroy_admin_queue(softs);
+err_admin_queue:
+ os_free_intr_config(softs);
+err_out:
+ DBG_FUNC("OUT failed\n");
+ return PQI_STATUS_FAILURE;
+}
+
+/* */
+int pqisrc_force_sis(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ if (SIS_IS_KERNEL_PANIC(softs)) {
+ DBG_INFO("Controller FW is not runnning");
+ return PQI_STATUS_FAILURE;
+ }
+
+ if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) {
+ return ret;
+ }
+
+ if (SIS_IS_KERNEL_UP(softs)) {
+ PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
+ return ret;
+ }
+ /* Disable interrupts ? */
+ sis_disable_msix(softs);
+
+ /* reset pqi, this will delete queues */
+ ret = pqi_reset(softs);
+ if (ret) {
+ return ret;
+ }
+ /* Re enable SIS */
+ ret = pqisrc_reenable_sis(softs);
+ if (ret) {
+ return ret;
+ }
+
+ PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
+
+ return ret;
+}
+
+/*
+ * Uninitialize the resources used during PQI initialization.
+ */
+void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
+{
+ int i;
+ DBG_FUNC("IN\n");
+
+ if(softs->devlist_lockcreated==true){
+ os_uninit_spinlock(&softs->devlist_lock);
+ softs->devlist_lockcreated = false;
+ }
+
+ for (i = 0; i < softs->num_op_raid_ibq; i++) {
+ /* OP RAID IB Q */
+ if(softs->op_raid_ib_q[i].lockcreated==true){
+ OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
+ softs->op_raid_ib_q[i].lockcreated = false;
+ }
+
+ /* OP AIO IB Q */
+ if(softs->op_aio_ib_q[i].lockcreated==true){
+ OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
+ softs->op_aio_ib_q[i].lockcreated = false;
+ }
+ }
+
+ /* Free Op queues */
+ os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
+ os_dma_mem_free(softs, &softs->op_obq_dma_mem);
+ os_dma_mem_free(softs, &softs->event_q_dma_mem);
+
+ /* Complete all pending commands. */
+ os_complete_outstanding_cmds_nodevice(softs);
+
+ /* Free rcb */
+ pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
+
+ /* Free request id lists */
+ pqisrc_destroy_taglist(softs,&softs->taglist);
+
+ if(softs->admin_ib_queue.lockcreated==true){
+ OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
+ softs->admin_ib_queue.lockcreated = false;
+ }
+
+ /* Free Admin Queue */
+ os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
+
+ /* Switch back to SIS mode */
+ if (pqisrc_force_sis(softs)) {
+ DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function to initialize the adapter settings.
+ */
+int pqisrc_init(pqisrc_softstate_t *softs)
+{
+ int ret = 0;
+ int i = 0, j = 0;
+
+ DBG_FUNC("IN\n");
+
+ check_struct_sizes();
+
+ /* Init the Sync interface */
+ ret = pqisrc_sis_init(softs);
+ if (ret) {
+ DBG_ERR("SIS Init failed with error %d\n", ret);
+ goto err_out;
+ }
+
+ /* Init the PQI interface */
+ ret = pqisrc_pqi_init(softs);
+ if (ret) {
+ DBG_ERR("PQI Init failed with error %d\n", ret);
+ goto err_pqi;
+ }
+
+ /* Setup interrupt */
+ ret = os_setup_intr(softs);
+ if (ret) {
+ DBG_ERR("Interrupt setup failed with error %d\n", ret);
+ goto err_intr;
+ }
+
+ /* Report event configuration */
+ ret = pqisrc_report_event_config(softs);
+ if(ret){
+ DBG_ERR(" Failed to configure Report events\n");
+ goto err_event;
+ }
+
+ /* Set event configuration*/
+ ret = pqisrc_set_event_config(softs);
+ if(ret){
+ DBG_ERR(" Failed to configure Set events\n");
+ goto err_event;
+ }
+
+ /* Check for For PQI spanning */
+ ret = pqisrc_get_ctrl_fw_version(softs);
+ if(ret){
+ DBG_ERR(" Failed to get ctrl fw version\n");
+ goto err_fw_version;
+ }
+
+ /* update driver version in to FW */
+ ret = pqisrc_write_driver_version_to_host_wellness(softs);
+ if (ret) {
+ DBG_ERR(" Failed to update driver version in to FW");
+ goto err_host_wellness;
+ }
+
+
+ os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
+ ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
+ if(ret){
+ DBG_ERR(" Failed to initialize devlist_lock\n");
+ softs->devlist_lockcreated=false;
+ goto err_lock;
+ }
+ softs->devlist_lockcreated = true;
+
+ ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
+ if(ret != PQI_STATUS_SUCCESS){
+ DBG_ERR(" Failed to initialize scan lock\n");
+ goto err_scan_lock;
+ }
+
+ OS_ATOMIC64_SET(softs, num_intrs, 0);
+ softs->prev_num_intrs = softs->num_intrs;
+
+
+ /* Get the PQI configuration table to read heart-beat counter*/
+ if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
+ ret = pqisrc_process_config_table(softs);
+ if (ret) {
+ DBG_ERR("Failed to process PQI configuration table %d\n", ret);
+ goto err_config_tab;
+ }
+ }
+
+ if (PQI_NEW_HEARTBEAT_MECHANISM(softs))
+ softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
+
+ /* Init device list */
+ for(i = 0; i < PQI_MAX_DEVICES; i++)
+ for(j = 0; j < PQI_MAX_MULTILUN; j++)
+ softs->device_list[i][j] = NULL;
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_config_tab:
+ os_destroy_semaphore(&softs->scan_lock);
+err_scan_lock:
+ if(softs->devlist_lockcreated==true){
+ os_uninit_spinlock(&softs->devlist_lock);
+ softs->devlist_lockcreated = false;
+ }
+err_lock:
+err_fw_version:
+err_event:
+err_host_wellness:
+ os_destroy_intr(softs);
+err_intr:
+ pqisrc_pqi_uninit(softs);
+err_pqi:
+ pqisrc_sis_uninit(softs);
+err_out:
+ DBG_FUNC("OUT failed\n");
+ return ret;
+}
+
+/*
+ * Write all data in the adapter's battery-backed cache to
+ * storage.
+ */
+int pqisrc_flush_cache( pqisrc_softstate_t *softs,
+ enum pqisrc_flush_cache_event_type event_type)
+{
+ int rval = PQI_STATUS_SUCCESS;
+ pqisrc_raid_req_t request;
+ pqisrc_bmic_flush_cache_t *flush_buff = NULL;
+
+ DBG_FUNC("IN\n");
+
+ if (pqisrc_ctrl_offline(softs))
+ return PQI_STATUS_FAILURE;
+
+ flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
+ if (!flush_buff) {
+ DBG_ERR("Failed to allocate memory for flush cache params\n");
+ rval = PQI_STATUS_FAILURE;
+ return rval;
+ }
+
+ flush_buff->halt_event = event_type;
+
+ memset(&request, 0, sizeof(request));
+
+ rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
+ sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
+ (uint8_t *)RAID_CTLR_LUNID, NULL);
+ if (rval) {
+ DBG_ERR("error in build send raid req ret=%d\n", rval);
+ }
+
+ if (flush_buff)
+ os_mem_free(softs, (void *)flush_buff,
+ sizeof(pqisrc_bmic_flush_cache_t));
+
+ DBG_FUNC("OUT\n");
+
+ return rval;
+}
+
+/*
+ * Uninitialize the adapter.
+ */
+void pqisrc_uninit(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+
+ os_destroy_intr(softs);
+
+ os_destroy_semaphore(&softs->scan_lock);
+
+ pqisrc_pqi_uninit(softs);
+
+ pqisrc_sis_uninit(softs);
+
+ pqisrc_cleanup_devices(softs);
+
+ DBG_FUNC("OUT\n");
+}
diff --git a/sys/dev/smartpqi/smartpqi_intr.c b/sys/dev/smartpqi/smartpqi_intr.c
new file mode 100644
index 000000000000..5183b32904d0
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_intr.c
@@ -0,0 +1,437 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+
+/*
+ * Function to get processor count
+ */
+int os_get_processor_config(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+ softs->num_cpus_online = mp_ncpus;
+ DBG_FUNC("OUT\n");
+
+ return PQI_STATUS_SUCCESS;
+}
+
+/*
+ * Function to get interrupt count and type supported
+ */
+int os_get_intr_config(pqisrc_softstate_t *softs)
+{
+ device_t dev;
+ int msi_count = 0;
+ int error = 0;
+ int ret = PQI_STATUS_SUCCESS;
+ dev = softs->os_specific.pqi_dev;
+
+ DBG_FUNC("IN\n");
+
+ msi_count = pci_msix_count(dev);
+
+ if (msi_count > softs->num_cpus_online)
+ msi_count = softs->num_cpus_online;
+ if (msi_count > PQI_MAX_MSIX)
+ msi_count = PQI_MAX_MSIX;
+ if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
+ device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
+ "will try MSI\n", msi_count, error);
+ pci_release_msi(dev);
+ } else {
+ softs->intr_count = msi_count;
+ softs->intr_type = INTR_TYPE_MSIX;
+ softs->os_specific.msi_enabled = TRUE;
+ device_printf(dev, "using MSI-X interrupts (%d vectors)\n",
+ msi_count);
+ }
+ if (!softs->intr_type) {
+ msi_count = 1;
+ if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
+ device_printf(dev, "alloc msi failed - err=%d; "
+ "will use INTx\n", error);
+ pci_release_msi(dev);
+ } else {
+ softs->os_specific.msi_enabled = TRUE;
+ softs->intr_count = msi_count;
+ softs->intr_type = INTR_TYPE_MSI;
+ device_printf(dev, "using MSI interrupts\n");
+ }
+ }
+
+ if (!softs->intr_type) {
+ device_printf(dev, "using legacy interrupts\n");
+ softs->intr_type = INTR_TYPE_FIXED;
+ softs->intr_count = 1;
+ }
+
+ if(!softs->intr_type) {
+ DBG_FUNC("OUT failed\n");
+ ret = PQI_STATUS_FAILURE;
+ return ret;
+ }
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+void os_eventtaskqueue_enqueue(pqisrc_softstate_t *sc)
+{
+ taskqueue_enqueue(taskqueue_swi, &sc->os_specific.event_task);
+}
+
+void pqisrc_event_worker(void *arg1, int arg2)
+{
+ pqisrc_ack_all_events(arg1);
+}
+
+/*
+ * ithread routine to handle uniprocessor systems
+ */
+static void shared_ithread_routine(void *arg)
+{
+ pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
+ pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
+ int oq_id = intr_ctx->oq_id;
+
+ DBG_FUNC("IN\n");
+
+ pqisrc_process_response_queue(softs, oq_id);
+ pqisrc_process_event_intr_src(softs, oq_id - 1);
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * ithread routine to process non event response
+ */
+static void common_ithread_routine(void *arg)
+{
+ pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
+ pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
+ int oq_id = intr_ctx->oq_id;
+
+ DBG_FUNC("IN\n");
+
+ pqisrc_process_response_queue(softs, oq_id);
+
+ DBG_FUNC("OUT\n");
+}
+
+static void event_ithread_routine(void *arg)
+{
+ pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
+ pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
+ int oq_id = intr_ctx->oq_id;
+
+ DBG_FUNC("IN\n");
+
+ pqisrc_process_event_intr_src(softs, oq_id);
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Registration of legacy interrupt in case MSI is unsupported
+ */
+int register_legacy_intr(pqisrc_softstate_t *softs)
+{
+ int error = 0;
+ device_t dev;
+
+ DBG_FUNC("IN\n");
+
+ dev = softs->os_specific.pqi_dev;
+
+ softs->os_specific.pqi_irq_rid[0] = 0;
+ softs->os_specific.pqi_irq[0] = bus_alloc_resource_any(dev, \
+ SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[0],
+ RF_ACTIVE | RF_SHAREABLE);
+ if (NULL == softs->os_specific.pqi_irq[0]) {
+ DBG_ERR("Failed to allocate resource for interrupt\n");
+ return PQI_STATUS_FAILURE;
+ }
+ if ((softs->os_specific.msi_ctx = os_mem_alloc(softs,sizeof(pqi_intr_ctx_t))) == NULL) {
+ DBG_ERR("Failed to allocate memory for msi_ctx\n");
+ return PQI_STATUS_FAILURE;
+ }
+ softs->os_specific.msi_ctx[0].pqi_dev = dev;
+ softs->os_specific.msi_ctx[0].oq_id = 0;
+
+ error = bus_setup_intr(dev, softs->os_specific.pqi_irq[0],
+ INTR_TYPE_CAM | INTR_MPSAFE, \
+ NULL, shared_ithread_routine,
+ &softs->os_specific.msi_ctx[0],
+ &softs->os_specific.intrcookie[0]);
+ if (error) {
+ DBG_ERR("Failed to setup legacy interrupt err = %d\n", error);
+ return error;
+ }
+ softs->os_specific.intr_registered[0] = TRUE;
+
+ DBG_FUNC("OUT error = %d\n", error);
+
+ return error;
+}
+
+/*
+ * Registration of MSIx
+ */
+int register_msix_intr(pqisrc_softstate_t *softs)
+{
+ int error = 0;
+ int i = 0;
+ device_t dev;
+ dev = softs->os_specific.pqi_dev;
+ int msix_count = softs->intr_count;
+
+ DBG_FUNC("IN\n");
+
+ softs->os_specific.msi_ctx = os_mem_alloc(softs, sizeof(pqi_intr_ctx_t) * msix_count);
+ /*Add shared handler */
+ if (softs->share_opq_and_eventq) {
+ softs->os_specific.pqi_irq_rid[i] = i+1;
+ softs->os_specific.pqi_irq[i] = bus_alloc_resource_any(dev, \
+ SYS_RES_IRQ,
+ &softs->os_specific.pqi_irq_rid[i],
+ RF_SHAREABLE | RF_ACTIVE);
+ if (NULL == softs->os_specific.pqi_irq[i]) {
+ DBG_ERR("Failed to allocate \
+ event interrupt resource\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ softs->os_specific.msi_ctx[i].pqi_dev = dev;
+ softs->os_specific.msi_ctx[i].oq_id = i;
+
+ error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i],
+ INTR_TYPE_CAM | INTR_MPSAFE,\
+ NULL,
+ shared_ithread_routine,
+ &softs->os_specific.msi_ctx[i],
+ &softs->os_specific.intrcookie[i]);
+
+ if (error) {
+ DBG_ERR("Failed to setup interrupt for events r=%d\n",
+ error);
+ return error;
+ }
+ softs->os_specific.intr_registered[i] = TRUE;
+ }
+ else {
+ /* Add event handler */
+ softs->os_specific.pqi_irq_rid[i] = i+1;
+ softs->os_specific.pqi_irq[i] = bus_alloc_resource_any(dev, \
+ SYS_RES_IRQ,
+ &softs->os_specific.pqi_irq_rid[i],
+ RF_SHAREABLE | RF_ACTIVE);
+ if (NULL == softs->os_specific.pqi_irq[i]) {
+ DBG_ERR("ERR : Failed to allocate \
+ event interrupt resource\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+
+ softs->os_specific.msi_ctx[i].pqi_dev = dev;
+ softs->os_specific.msi_ctx[i].oq_id = i;
+
+
+ error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i],
+ INTR_TYPE_CAM | INTR_MPSAFE,\
+ NULL,
+ event_ithread_routine,
+ &softs->os_specific.msi_ctx[i],
+ &softs->os_specific.intrcookie[i]);
+ if (error) {
+ DBG_ERR("Failed to setup interrupt for events err=%d\n",
+ error);
+ return error;
+ }
+ softs->os_specific.intr_registered[i] = TRUE;
+ /* Add interrupt handlers*/
+ for (i = 1; i < msix_count; ++i) {
+ softs->os_specific.pqi_irq_rid[i] = i+1;
+ softs->os_specific.pqi_irq[i] = \
+ bus_alloc_resource_any(dev,
+ SYS_RES_IRQ,
+ &softs->os_specific.pqi_irq_rid[i],
+ RF_SHAREABLE | RF_ACTIVE);
+ if (NULL == softs->os_specific.pqi_irq[i]) {
+ DBG_ERR("Failed to allocate \
+ msi/x interrupt resource\n");
+ return PQI_STATUS_FAILURE;
+ }
+ softs->os_specific.msi_ctx[i].pqi_dev = dev;
+ softs->os_specific.msi_ctx[i].oq_id = i;
+ error = bus_setup_intr(dev,
+ softs->os_specific.pqi_irq[i],
+ INTR_TYPE_CAM | INTR_MPSAFE,\
+ NULL,
+ common_ithread_routine,
+ &softs->os_specific.msi_ctx[i],
+ &softs->os_specific.intrcookie[i]);
+ if (error) {
+ DBG_ERR("Failed to setup \
+ msi/x interrupt error = %d\n", error);
+ return error;
+ }
+ softs->os_specific.intr_registered[i] = TRUE;
+ }
+ }
+
+ DBG_FUNC("OUT error = %d\n", error);
+
+ return error;
+}
+
+/*
+ * Setup interrupt depending on the configuration
+ */
+int os_setup_intr(pqisrc_softstate_t *softs)
+{
+ int error = 0;
+
+ DBG_FUNC("IN\n");
+
+ if (softs->intr_type == INTR_TYPE_FIXED) {
+ error = register_legacy_intr(softs);
+ }
+ else {
+ error = register_msix_intr(softs);
+ }
+ if (error) {
+ DBG_FUNC("OUT failed error = %d\n", error);
+ return error;
+ }
+
+ DBG_FUNC("OUT error = %d\n", error);
+
+ return error;
+}
+
+/*
+ * Deregistration of legacy interrupt
+ */
+void deregister_pqi_intx(pqisrc_softstate_t *softs)
+{
+ device_t dev;
+
+ DBG_FUNC("IN\n");
+
+ dev = softs->os_specific.pqi_dev;
+ if (softs->os_specific.pqi_irq[0] != NULL) {
+ if (softs->os_specific.intr_registered[0]) {
+ bus_teardown_intr(dev, softs->os_specific.pqi_irq[0],
+ softs->os_specific.intrcookie[0]);
+ softs->os_specific.intr_registered[0] = FALSE;
+ }
+ bus_release_resource(dev, SYS_RES_IRQ,
+ softs->os_specific.pqi_irq_rid[0],
+ softs->os_specific.pqi_irq[0]);
+ softs->os_specific.pqi_irq[0] = NULL;
+ os_mem_free(softs, (char*)softs->os_specific.msi_ctx, sizeof(pqi_intr_ctx_t));
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Deregistration of MSIx interrupt
+ */
+void deregister_pqi_msix(pqisrc_softstate_t *softs)
+{
+ device_t dev;
+ dev = softs->os_specific.pqi_dev;
+ int msix_count = softs->intr_count;
+ int i = 0;
+
+ DBG_FUNC("IN\n");
+
+ os_mem_free(softs, (char*)softs->os_specific.msi_ctx, sizeof(pqi_intr_ctx_t) * msix_count);
+ softs->os_specific.msi_ctx = NULL;
+
+ for (; i < msix_count; ++i) {
+ if (softs->os_specific.pqi_irq[i] != NULL) {
+ if (softs->os_specific.intr_registered[i]) {
+ bus_teardown_intr(dev,
+ softs->os_specific.pqi_irq[i],
+ softs->os_specific.intrcookie[i]);
+ softs->os_specific.intr_registered[i] = FALSE;
+ }
+ bus_release_resource(dev, SYS_RES_IRQ,
+ softs->os_specific.pqi_irq_rid[i],
+ softs->os_specific.pqi_irq[i]);
+ softs->os_specific.pqi_irq[i] = NULL;
+ }
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function to destroy interrupts registered
+ */
+int os_destroy_intr(pqisrc_softstate_t *softs)
+{
+ device_t dev;
+ dev = softs->os_specific.pqi_dev;
+
+ DBG_FUNC("IN\n");
+
+ if (softs->intr_type == INTR_TYPE_FIXED) {
+ deregister_pqi_intx(softs);
+ } else if (softs->intr_type == INTR_TYPE_MSIX) {
+ deregister_pqi_msix(softs);
+ }
+ if (softs->os_specific.msi_enabled) {
+ pci_release_msi(dev);
+ softs->os_specific.msi_enabled = FALSE;
+ }
+
+ DBG_FUNC("OUT\n");
+
+ return PQI_STATUS_SUCCESS;
+}
+
+/*
+ * Free interrupt related resources for the adapter
+ */
+void os_free_intr_config(pqisrc_softstate_t *softs)
+{
+ device_t dev;
+ dev = softs->os_specific.pqi_dev;
+
+ DBG_FUNC("IN\n");
+
+ if (softs->os_specific.msi_enabled) {
+ pci_release_msi(dev);
+ softs->os_specific.msi_enabled = FALSE;
+ }
+
+ DBG_FUNC("OUT\n");
+}
diff --git a/sys/dev/smartpqi/smartpqi_ioctl.c b/sys/dev/smartpqi/smartpqi_ioctl.c
new file mode 100644
index 000000000000..bb1a03788f7f
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_ioctl.c
@@ -0,0 +1,402 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+/*
+ * Management interface for smartpqi driver
+ */
+
+#include "smartpqi_includes.h"
+
+/*
+ * Wrapper function to copy to user from kernel
+ */
+int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
+ void *src_buf, int size, int mode)
+{
+ return(copyout(src_buf, dest_buf, size));
+}
+
+/*
+ * Wrapper function to copy from user to kernel
+ */
+int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
+ void *src_buf, int size, int mode)
+{
+ return(copyin(src_buf, dest_buf, size));
+}
+
+/*
+ * Device open function for ioctl entry
+ */
+static int smartpqi_open(struct cdev *cdev, int flags, int devtype,
+ struct thread *td)
+{
+ int error = PQI_STATUS_SUCCESS;
+
+ return error;
+}
+
+/*
+ * Device close function for ioctl entry
+ */
+static int smartpqi_close(struct cdev *cdev, int flags, int devtype,
+ struct thread *td)
+{
+ int error = PQI_STATUS_SUCCESS;
+
+ return error;
+}
+
+/*
+ * ioctl for getting driver info
+ */
+static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
+{
+ struct pqisrc_softstate *softs = cdev->si_drv1;
+ pdriver_info driver_info = (pdriver_info)udata;
+
+ DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
+
+ driver_info->major_version = PQISRC_DRIVER_MAJOR;
+ driver_info->minor_version = PQISRC_DRIVER_MINOR;
+ driver_info->release_version = PQISRC_DRIVER_RELEASE;
+ driver_info->build_revision = PQISRC_DRIVER_REVISION;
+ driver_info->max_targets = PQI_MAX_DEVICES - 1;
+ driver_info->max_io = softs->max_io_for_scsi_ml;
+ driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * ioctl for getting controller info
+ */
+static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
+{
+ struct pqisrc_softstate *softs = cdev->si_drv1;
+ device_t dev = softs->os_specific.pqi_dev;
+ pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
+ uint32_t sub_vendor = 0;
+ uint32_t sub_device = 0;
+ uint32_t vendor = 0;
+ uint32_t device = 0;
+
+ DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
+
+ pci_info->bus = pci_get_bus(dev);
+ pci_info->dev_fn = pci_get_function(dev);
+ pci_info->domain = pci_get_domain(dev);
+ sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
+ pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
+ vendor = pci_get_vendor(dev);
+ device = pci_get_device(dev);
+ pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
+ DBG_FUNC("OUT\n");
+}
+
+
+/*
+ * ioctl entry point for user
+ */
+static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
+ int flags, struct thread *td)
+{
+ int error = PQI_STATUS_SUCCESS;
+ struct pqisrc_softstate *softs = cdev->si_drv1;
+
+ DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
+
+ if (!udata) {
+ DBG_ERR("udata is null !!\n");
+ }
+
+ if (pqisrc_ctrl_offline(softs)){
+ DBG_ERR("Controller s offline !!\n");
+ return ENOTTY;
+ }
+
+ switch (cmd) {
+ case CCISS_GETDRIVVER:
+ smartpqi_get_driver_info_ioctl(udata, cdev);
+ break;
+ case CCISS_GETPCIINFO:
+ smartpqi_get_pci_info_ioctl(udata, cdev);
+ break;
+ case SMARTPQI_PASS_THRU:
+ case CCISS_PASSTHRU:
+ error = pqisrc_passthru_ioctl(softs, udata, 0);
+ error = PQI_STATUS_SUCCESS;
+ break;
+ case CCISS_REGNEWD:
+ error = pqisrc_scan_devices(softs);
+ break;
+ default:
+ DBG_WARN( "!IOCTL cmd 0x%lx not supported", cmd);
+ error = ENOTTY;
+ break;
+ }
+
+ DBG_FUNC("OUT error = %d\n", error);
+ return error;
+}
+
+static d_open_t smartpqi_open;
+static d_ioctl_t smartpqi_ioctl;
+static d_close_t smartpqi_close;
+
+static struct cdevsw smartpqi_cdevsw =
+{
+ .d_version = D_VERSION,
+ .d_open = smartpqi_open,
+ .d_close = smartpqi_close,
+ .d_ioctl = smartpqi_ioctl,
+ .d_name = "smartpqi",
+};
+
+/*
+ * Function to create device node for ioctl
+ */
+int create_char_dev(struct pqisrc_softstate *softs, int card_index)
+{
+ int error = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN idx = %d\n", card_index);
+
+ softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
+ UID_ROOT, GID_OPERATOR, 0640,
+ "smartpqi%u", card_index);
+ if(softs->os_specific.cdev) {
+ softs->os_specific.cdev->si_drv1 = softs;
+ } else {
+ error = PQI_STATUS_FAILURE;
+ }
+
+ DBG_FUNC("OUT error = %d\n", error);
+ return error;
+}
+
+/*
+ * Function to destroy device node for ioctl
+ */
+void destroy_char_dev(struct pqisrc_softstate *softs)
+{
+ DBG_FUNC("IN\n");
+ if (softs->os_specific.cdev) {
+ destroy_dev(softs->os_specific.cdev);
+ softs->os_specific.cdev = NULL;
+ }
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function used to send passthru commands to adapter
+ * to support management tools. For eg. ssacli, sscon.
+ */
+int
+pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ char *drv_buf = NULL;
+ uint32_t tag = 0;
+ IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
+ dma_mem_t ioctl_dma_buf;
+ pqisrc_raid_req_t request;
+ raid_path_error_info_elem_t error_info;
+ ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
+ ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+ rcb_t *rcb = NULL;
+
+ memset(&request, 0, sizeof(request));
+ memset(&error_info, 0, sizeof(error_info));
+
+ DBG_FUNC("IN");
+
+ if (pqisrc_ctrl_offline(softs))
+ return PQI_STATUS_FAILURE;
+
+ if (!arg)
+ return (PQI_STATUS_FAILURE);
+
+ if (iocommand->buf_size < 1 &&
+ iocommand->Request.Type.Direction != PQIIOCTL_NONE)
+ return PQI_STATUS_FAILURE;
+ if (iocommand->Request.CDBLen > sizeof(request.cdb))
+ return PQI_STATUS_FAILURE;
+
+ switch (iocommand->Request.Type.Direction) {
+ case PQIIOCTL_NONE:
+ case PQIIOCTL_WRITE:
+ case PQIIOCTL_READ:
+ case PQIIOCTL_BIDIRECTIONAL:
+ break;
+ default:
+ return PQI_STATUS_FAILURE;
+ }
+
+ if (iocommand->buf_size > 0) {
+ memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
+ ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
+ ioctl_dma_buf.size = iocommand->buf_size;
+ ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
+ /* allocate memory */
+ ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
+ if (ret) {
+ DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
+ ret = PQI_STATUS_FAILURE;
+ goto out;
+ }
+
+ DBG_INFO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
+ DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
+
+ drv_buf = (char *)ioctl_dma_buf.virt_addr;
+ if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
+ if ((ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf,
+ iocommand->buf_size, mode)) != 0) {
+ ret = PQI_STATUS_FAILURE;
+ goto free_mem;
+ }
+ }
+ }
+
+ request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
+ request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
+ PQI_REQUEST_HEADER_LENGTH;
+ memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
+ sizeof(request.lun_number));
+ memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
+ request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
+
+ switch (iocommand->Request.Type.Direction) {
+ case PQIIOCTL_NONE:
+ request.data_direction = SOP_DATA_DIR_NONE;
+ break;
+ case PQIIOCTL_WRITE:
+ request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
+ break;
+ case PQIIOCTL_READ:
+ request.data_direction = SOP_DATA_DIR_TO_DEVICE;
+ break;
+ case PQIIOCTL_BIDIRECTIONAL:
+ request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
+ break;
+ }
+
+ request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ if (iocommand->buf_size > 0) {
+ request.buffer_length = iocommand->buf_size;
+ request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
+ request.sg_descriptors[0].len = iocommand->buf_size;
+ request.sg_descriptors[0].flags = SG_FLAG_LAST;
+ }
+ tag = pqisrc_get_tag(&softs->taglist);
+ request.request_id = tag;
+ request.response_queue_id = ob_q->q_id;
+ request.error_index = request.request_id;
+ rcb = &softs->rcb[tag];
+
+ rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
+ rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
+ rcb->tag = tag;
+ rcb->req_pending = true;
+ /* Submit Command */
+ ret = pqisrc_submit_cmnd(softs, ib_q, &request);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit command\n");
+ goto err_out;
+ }
+
+ ret = pqisrc_wait_on_condition(softs, rcb);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Passthru IOCTL cmd timed out !!\n");
+ goto err_out;
+ }
+
+ memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
+
+
+ if (rcb->status) {
+ size_t sense_data_length;
+
+ memcpy(&error_info, rcb->error_info, sizeof(error_info));
+ iocommand->error_info.ScsiStatus = error_info.status;
+ sense_data_length = error_info.sense_data_len;
+
+ if (!sense_data_length)
+ sense_data_length = error_info.resp_data_len;
+
+ if (sense_data_length &&
+ (sense_data_length > sizeof(error_info.data)))
+ sense_data_length = sizeof(error_info.data);
+
+ if (sense_data_length) {
+ if (sense_data_length >
+ sizeof(iocommand->error_info.SenseInfo))
+ sense_data_length =
+ sizeof(iocommand->error_info.SenseInfo);
+ memcpy (iocommand->error_info.SenseInfo,
+ error_info.data, sense_data_length);
+ iocommand->error_info.SenseLen = sense_data_length;
+ }
+
+ if (error_info.data_out_result ==
+ PQI_RAID_DATA_IN_OUT_UNDERFLOW){
+ rcb->status = REQUEST_SUCCESS;
+ }
+ }
+
+ if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
+ (iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
+
+ if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
+ (void*)drv_buf, iocommand->buf_size, mode)) != 0) {
+ DBG_ERR("Failed to copy the response\n");
+ goto err_out;
+ }
+ }
+
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, request.request_id);
+ if (iocommand->buf_size > 0)
+ os_dma_mem_free(softs,&ioctl_dma_buf);
+
+ DBG_FUNC("OUT\n");
+ return ret;
+err_out:
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, request.request_id);
+
+free_mem:
+ if (iocommand->buf_size > 0)
+ os_dma_mem_free(softs, &ioctl_dma_buf);
+
+out:
+ DBG_FUNC("Failed OUT\n");
+ return PQI_STATUS_FAILURE;
+}
diff --git a/sys/dev/smartpqi/smartpqi_ioctl.h b/sys/dev/smartpqi/smartpqi_ioctl.h
new file mode 100644
index 000000000000..57f4d6952eb8
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_ioctl.h
@@ -0,0 +1,144 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _PQI_IOCTL_H_
+#define _PQI_IOCTL_H_
+
+/* IOCTL passthrough macros and structures */
+
+#define SENSEINFOBYTES 32 /* note that this value may vary
+ between host implementations */
+
+/* transfer direction */
+#define PQIIOCTL_NONE 0x00
+#define PQIIOCTL_WRITE 0x01
+#define PQIIOCTL_READ 0x02
+#define PQIIOCTL_BIDIRECTIONAL (PQIIOCTL_READ | PQIIOCTL_WRITE)
+
+
+/* Type defs used in the following structs */
+#define BYTE uint8_t
+#define WORD uint16_t
+#define HWORD uint16_t
+#define DWORD uint32_t
+
+
+
+/* Command List Structure */
+typedef union _SCSI3Addr_struct {
+ struct {
+ BYTE Dev;
+ BYTE Bus:6;
+ BYTE Mode:2; /* b00 */
+ } PeripDev;
+ struct {
+ BYTE DevLSB;
+ BYTE DevMSB:6;
+ BYTE Mode:2; /* b01 */
+ } LogDev;
+ struct {
+ BYTE Dev:5;
+ BYTE Bus:3;
+ BYTE Targ:6;
+ BYTE Mode:2; /* b10 */
+ } LogUnit;
+
+}OS_ATTRIBUTE_PACKED SCSI3Addr_struct;
+
+typedef struct _PhysDevAddr_struct {
+ DWORD TargetId:24;
+ DWORD Bus:6;
+ DWORD Mode:2;
+ SCSI3Addr_struct Target[2]; /* 2 level target device addr */
+
+}OS_ATTRIBUTE_PACKED PhysDevAddr_struct;
+
+typedef struct _LogDevAddr_struct {
+ DWORD VolId:30;
+ DWORD Mode:2;
+ BYTE reserved[4];
+
+}OS_ATTRIBUTE_PACKED LogDevAddr_struct;
+
+typedef union _LUNAddr_struct {
+ BYTE LunAddrBytes[8];
+ SCSI3Addr_struct SCSI3Lun[4];
+ PhysDevAddr_struct PhysDev;
+ LogDevAddr_struct LogDev;
+
+}OS_ATTRIBUTE_PACKED LUNAddr_struct;
+
+typedef struct _RequestBlock_struct {
+ BYTE CDBLen;
+ struct {
+ BYTE Type:3;
+ BYTE Attribute:3;
+ BYTE Direction:2;
+ } Type;
+ HWORD Timeout;
+ BYTE CDB[16];
+
+}OS_ATTRIBUTE_PACKED RequestBlock_struct;
+
+typedef union _MoreErrInfo_struct{
+ struct {
+ BYTE Reserved[3];
+ BYTE Type;
+ DWORD ErrorInfo;
+ } Common_Info;
+ struct{
+ BYTE Reserved[2];
+ BYTE offense_size; /* size of offending entry */
+ BYTE offense_num; /* byte # of offense 0-base */
+ DWORD offense_value;
+ } Invalid_Cmd;
+
+}OS_ATTRIBUTE_PACKED MoreErrInfo_struct;
+
+typedef struct _ErrorInfo_struct {
+ BYTE ScsiStatus;
+ BYTE SenseLen;
+ HWORD CommandStatus;
+ DWORD ResidualCnt;
+ MoreErrInfo_struct MoreErrInfo;
+ BYTE SenseInfo[SENSEINFOBYTES];
+
+}OS_ATTRIBUTE_PACKED ErrorInfo_struct;
+
+
+typedef struct pqi_ioctl_passthruCmd_struct {
+ LUNAddr_struct LUN_info;
+ RequestBlock_struct Request;
+ ErrorInfo_struct error_info;
+ WORD buf_size; /* size in bytes of the buf */
+ passthru_buf_type_t buf;
+
+}OS_ATTRIBUTE_PACKED IOCTL_Command_struct;
+
+
+#endif /* _PQI_IOCTL_H_ */
diff --git a/sys/dev/smartpqi/smartpqi_main.c b/sys/dev/smartpqi/smartpqi_main.c
new file mode 100644
index 000000000000..6b39234c29fa
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_main.c
@@ -0,0 +1,500 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+/*
+ * Driver for the Microsemi Smart storage controllers
+ */
+
+#include "smartpqi_includes.h"
+#include "smartpqi_prototypes.h"
+
+/*
+ * Supported devices
+ */
+struct pqi_ident
+{
+ u_int16_t vendor;
+ u_int16_t device;
+ u_int16_t subvendor;
+ u_int16_t subdevice;
+ int hwif;
+ char *desc;
+} pqi_identifiers[] = {
+ /* (MSCC PM8205 8x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"},
+ {0x9005, 0x028f, 0x1028, 0x1FE0, PQI_HWIF_SRCV, "SmartRAID 3162-8i/eDell"},
+ {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"},
+ {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"},
+
+ /* (MSCC PM8225 8x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
+
+ /* (MSCC PM8221 8x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"},
+
+ /* (MSCC PM8204 8x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
+ {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"},
+ {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"},
+ {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"},
+ {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"},
+ {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"},
+ {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"},
+ {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"},
+ {0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"},
+
+ /* (MSCC PM8222 8x12G based) */
+ {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
+ {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"},
+ {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"},
+ {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"},
+ {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"},
+ {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"},
+ {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"},
+ {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"},
+ {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"},
+ {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"},
+
+ /* (SRCx MSCC FVB 24x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"},
+
+ /* (MSCC PM8241 24x12G based) */
+
+ /* (MSCC PM8242 24x12G based) */
+ {0x9005, 0x028f, 0x152d, 0x8a37, PQI_HWIF_SRCV, "QS-8242-24i"},
+ {0x9005, 0x028f, 0x9005, 0x1300, PQI_HWIF_SRCV, "HBA 1100-8i8e"},
+ {0x9005, 0x028f, 0x9005, 0x1301, PQI_HWIF_SRCV, "HBA 1100-24i"},
+ {0x9005, 0x028f, 0x9005, 0x1302, PQI_HWIF_SRCV, "SmartHBA 2100-8i8e"},
+ {0x9005, 0x028f, 0x9005, 0x1303, PQI_HWIF_SRCV, "SmartHBA 2100-24i"},
+
+ /* (MSCC PM8236 16x12G based) */
+ {0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"},
+ {0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"},
+
+ /* (MSCC PM8237 24x12G based) */
+ {0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x1101, PQI_HWIF_SRCV, "P416ie-m SR G10"},
+
+ /* (MSCC PM8238 16x12G based) */
+ {0x9005, 0x028f, 0x152d, 0x8a23, PQI_HWIF_SRCV, "QS-8238-16i"},
+ {0x9005, 0x028f, 0x9005, 0x1280, PQI_HWIF_SRCV, "HBA 1100-16i"},
+ {0x9005, 0x028f, 0x9005, 0x1281, PQI_HWIF_SRCV, "HBA 1100-16e"},
+
+ /* (MSCC PM8240 24x12G based) */
+ {0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"},
+ {0x9005, 0x028f, 0x9005, 0x1200, PQI_HWIF_SRCV, "SmartRAID 3154-24i"},
+ {0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"},
+ {0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"},
+
+ {0, 0, 0, 0, 0, 0}
+};
+
+struct pqi_ident
+pqi_family_identifiers[] = {
+ {0x9005, 0x028f, 0, 0, PQI_HWIF_SRCV, "Smart Array Storage Controller"},
+ {0, 0, 0, 0, 0, 0}
+};
+
+/*
+ * Function to identify the installed adapter.
+ */
+static struct pqi_ident *
+pqi_find_ident(device_t dev)
+{
+ struct pqi_ident *m;
+ u_int16_t vendid, devid, sub_vendid, sub_devid;
+
+ vendid = pci_get_vendor(dev);
+ devid = pci_get_device(dev);
+ sub_vendid = pci_get_subvendor(dev);
+ sub_devid = pci_get_subdevice(dev);
+
+ for (m = pqi_identifiers; m->vendor != 0; m++) {
+ if ((m->vendor == vendid) && (m->device == devid) &&
+ (m->subvendor == sub_vendid) &&
+ (m->subdevice == sub_devid)) {
+ return (m);
+ }
+ }
+
+ for (m = pqi_family_identifiers; m->vendor != 0; m++) {
+ if ((m->vendor == vendid) && (m->device == devid)) {
+ return (m);
+ }
+ }
+
+ return (NULL);
+}
+
+/*
+ * Determine whether this is one of our supported adapters.
+ */
+static int
+smartpqi_probe(device_t dev)
+{
+ struct pqi_ident *id;
+
+ if ((id = pqi_find_ident(dev)) != NULL) {
+ device_set_desc(dev, id->desc);
+ return(BUS_PROBE_VENDOR);
+ }
+
+ return(ENXIO);
+}
+
+/*
+ * Store Bus/Device/Function in softs
+ */
+void pqisrc_save_controller_info(struct pqisrc_softstate *softs)
+{
+ device_t dev = softs->os_specific.pqi_dev;
+
+ softs->bus_id = (uint32_t)pci_get_bus(dev);
+ softs->device_id = (uint32_t)pci_get_device(dev);
+ softs->func_id = (uint32_t)pci_get_function(dev);
+}
+
+
+/*
+ * Allocate resources for our device, set up the bus interface.
+ * Initialize the PQI related functionality, scan devices, register sim to
+ * upper layer, create management interface device node etc.
+ */
+static int
+smartpqi_attach(device_t dev)
+{
+ struct pqisrc_softstate *softs = NULL;
+ struct pqi_ident *id = NULL;
+ int error = 0;
+ u_int32_t command = 0, i = 0;
+ int card_index = device_get_unit(dev);
+ rcb_t *rcbp = NULL;
+
+ /*
+ * Initialise softc.
+ */
+ softs = device_get_softc(dev);
+
+ if (!softs) {
+ printf("Could not get softc\n");
+ error = EINVAL;
+ goto out;
+ }
+ memset(softs, 0, sizeof(*softs));
+ softs->os_specific.pqi_dev = dev;
+
+ DBG_FUNC("IN\n");
+
+ /* assume failure is 'not configured' */
+ error = ENXIO;
+
+ /*
+ * Verify that the adapter is correctly set up in PCI space.
+ */
+ pci_enable_busmaster(softs->os_specific.pqi_dev);
+ command = pci_read_config(softs->os_specific.pqi_dev, PCIR_COMMAND, 2);
+ if ((command & PCIM_CMD_MEMEN) == 0) {
+ DBG_ERR("memory window not available command = %d\n", command);
+ error = ENXIO;
+ goto out;
+ }
+
+ /*
+ * Detect the hardware interface version, set up the bus interface
+ * indirection.
+ */
+ id = pqi_find_ident(dev);
+ softs->os_specific.pqi_hwif = id->hwif;
+
+ switch(softs->os_specific.pqi_hwif) {
+ case PQI_HWIF_SRCV:
+ DBG_INFO("set hardware up for PMC SRCv for %p", softs);
+ break;
+ default:
+ softs->os_specific.pqi_hwif = PQI_HWIF_UNKNOWN;
+ DBG_ERR("unknown hardware type\n");
+ error = ENXIO;
+ goto out;
+ }
+
+ pqisrc_save_controller_info(softs);
+
+ /*
+ * Allocate the PCI register window.
+ */
+ softs->os_specific.pqi_regs_rid0 = PCIR_BAR(0);
+ if ((softs->os_specific.pqi_regs_res0 =
+ bus_alloc_resource_any(softs->os_specific.pqi_dev, SYS_RES_MEMORY,
+ &softs->os_specific.pqi_regs_rid0, RF_ACTIVE)) == NULL) {
+ DBG_ERR("couldn't allocate register window 0\n");
+ /* assume failure is 'out of memory' */
+ error = ENOMEM;
+ goto out;
+ }
+
+ bus_get_resource_start(softs->os_specific.pqi_dev, SYS_RES_MEMORY,
+ softs->os_specific.pqi_regs_rid0);
+
+ softs->pci_mem_handle.pqi_btag = rman_get_bustag(softs->os_specific.pqi_regs_res0);
+ softs->pci_mem_handle.pqi_bhandle = rman_get_bushandle(softs->os_specific.pqi_regs_res0);
+ /* softs->pci_mem_base_vaddr = (uintptr_t)rman_get_virtual(softs->os_specific.pqi_regs_res0); */
+ softs->pci_mem_base_vaddr = (char *)rman_get_virtual(softs->os_specific.pqi_regs_res0);
+
+ /*
+ * Allocate the parent bus DMA tag appropriate for our PCI interface.
+ *
+ * Note that some of these controllers are 64-bit capable.
+ */
+ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ PAGE_SIZE, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
+ BUS_SPACE_UNRESTRICTED, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* No locking needed */
+ &softs->os_specific.pqi_parent_dmat)) {
+ DBG_ERR("can't allocate parent DMA tag\n");
+ /* assume failure is 'out of memory' */
+ error = ENOMEM;
+ goto dma_out;
+ }
+
+ softs->os_specific.sim_registered = FALSE;
+ softs->os_name = "FreeBSD ";
+
+ /* Initialize the PQI library */
+ error = pqisrc_init(softs);
+ if (error) {
+ DBG_ERR("Failed to initialize pqi lib error = %d\n", error);
+ error = PQI_STATUS_FAILURE;
+ goto out;
+ }
+
+ mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
+ softs->os_specific.mtx_init = TRUE;
+ mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
+
+ /*
+ * Create DMA tag for mapping buffers into controller-addressable space.
+ */
+ if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ softs->pqi_cap.max_sg_elem*PAGE_SIZE,/*maxsize*/
+ softs->pqi_cap.max_sg_elem, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ busdma_lock_mutex, /* lockfunc */
+ &softs->os_specific.map_lock, /* lockfuncarg*/
+ &softs->os_specific.pqi_buffer_dmat)) {
+ DBG_ERR("can't allocate buffer DMA tag for pqi_buffer_dmat\n");
+ return (ENOMEM);
+ }
+
+ rcbp = &softs->rcb[1];
+ for( i = 1; i <= softs->pqi_cap.max_outstanding_io; i++, rcbp++ ) {
+ if ((error = bus_dmamap_create(softs->os_specific.pqi_buffer_dmat, 0, &rcbp->cm_datamap)) != 0) {
+ DBG_ERR("Cant create datamap for buf @"
+ "rcbp = %p maxio = %d error = %d\n",
+ rcbp, softs->pqi_cap.max_outstanding_io, error);
+ goto dma_out;
+ }
+ }
+
+ os_start_heartbeat_timer((void *)softs); /* Start the heart-beat timer */
+ softs->os_specific.wellness_periodic = timeout( os_wellness_periodic,
+ softs, 120*hz);
+ /* Register our shutdown handler. */
+ softs->os_specific.eh = EVENTHANDLER_REGISTER(shutdown_final,
+ smartpqi_shutdown, softs, SHUTDOWN_PRI_DEFAULT);
+
+ error = pqisrc_scan_devices(softs);
+ if (error) {
+ DBG_ERR("Failed to scan lib error = %d\n", error);
+ error = PQI_STATUS_FAILURE;
+ goto out;
+ }
+
+ error = register_sim(softs, card_index);
+ if (error) {
+ DBG_ERR("Failed to register sim index = %d error = %d\n",
+ card_index, error);
+ goto out;
+ }
+
+ smartpqi_target_rescan(softs);
+
+ TASK_INIT(&softs->os_specific.event_task, 0, pqisrc_event_worker,softs);
+
+ error = create_char_dev(softs, card_index);
+ if (error) {
+ DBG_ERR("Failed to register character device index=%d r=%d\n",
+ card_index, error);
+ goto out;
+ }
+ goto out;
+
+dma_out:
+ if (softs->os_specific.pqi_regs_res0 != NULL)
+ bus_release_resource(softs->os_specific.pqi_dev, SYS_RES_MEMORY,
+ softs->os_specific.pqi_regs_rid0,
+ softs->os_specific.pqi_regs_res0);
+out:
+ DBG_FUNC("OUT error = %d\n", error);
+ return(error);
+}
+
+/*
+ * Deallocate resources for our device.
+ */
+static int
+smartpqi_detach(device_t dev)
+{
+ struct pqisrc_softstate *softs = NULL;
+ softs = device_get_softc(dev);
+ DBG_FUNC("IN\n");
+
+ EVENTHANDLER_DEREGISTER(shutdown_final, softs->os_specific.eh);
+
+ /* kill the periodic event */
+ untimeout(os_wellness_periodic, softs,
+ softs->os_specific.wellness_periodic);
+ /* Kill the heart beat event */
+ untimeout(os_start_heartbeat_timer, softs,
+ softs->os_specific.heartbeat_timeout_id);
+
+ smartpqi_shutdown(softs);
+ destroy_char_dev(softs);
+ pqisrc_uninit(softs);
+ deregister_sim(softs);
+ pci_release_msi(dev);
+
+ DBG_FUNC("OUT\n");
+ return 0;
+}
+
+/*
+ * Bring the controller to a quiescent state, ready for system suspend.
+ */
+static int
+smartpqi_suspend(device_t dev)
+{
+ struct pqisrc_softstate *softs;
+ softs = device_get_softc(dev);
+ DBG_FUNC("IN\n");
+
+ DBG_INFO("Suspending the device %p\n", softs);
+ softs->os_specific.pqi_state |= SMART_STATE_SUSPEND;
+
+ DBG_FUNC("OUT\n");
+ return(0);
+}
+
+/*
+ * Bring the controller back to a state ready for operation.
+ */
+static int
+smartpqi_resume(device_t dev)
+{
+ struct pqisrc_softstate *softs;
+ softs = device_get_softc(dev);
+ DBG_FUNC("IN\n");
+
+ softs->os_specific.pqi_state &= ~SMART_STATE_SUSPEND;
+
+ DBG_FUNC("OUT\n");
+ return(0);
+}
+
+/*
+ * Do whatever is needed during a system shutdown.
+ */
+int
+smartpqi_shutdown(void *arg)
+{
+ struct pqisrc_softstate *softs = NULL;
+ int rval = 0;
+
+ DBG_FUNC("IN\n");
+
+ softs = (struct pqisrc_softstate *)arg;
+
+ rval = pqisrc_flush_cache(softs, PQISRC_SHUTDOWN);
+ if (rval != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to flush adapter cache! rval = %d", rval);
+ }
+
+ DBG_FUNC("OUT\n");
+
+ return rval;
+}
+
+
+static int smartpqi_probe(device_t dev);
+static int smartpqi_attach(device_t dev);
+static int smartpqi_detach(device_t dev);
+static int smartpqi_suspend(device_t dev);
+static int smartpqi_resume(device_t dev);
+
+/*
+ * PCI bus interface.
+ */
+static device_method_t pqi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, smartpqi_probe),
+ DEVMETHOD(device_attach, smartpqi_attach),
+ DEVMETHOD(device_detach, smartpqi_detach),
+ DEVMETHOD(device_suspend, smartpqi_suspend),
+ DEVMETHOD(device_resume, smartpqi_resume),
+ { 0, 0 }
+};
+
+static devclass_t pqi_devclass;
+static driver_t smartpqi_pci_driver = {
+ "smartpqi",
+ pqi_methods,
+ sizeof(struct pqisrc_softstate)
+};
+
+DRIVER_MODULE(smartpqi, pci, smartpqi_pci_driver, pqi_devclass, 0, 0);
+MODULE_DEPEND(smartpqi, pci, 1, 1, 1);
+
+
diff --git a/sys/dev/smartpqi/smartpqi_mem.c b/sys/dev/smartpqi/smartpqi_mem.c
new file mode 100644
index 000000000000..571e10958d1c
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_mem.c
@@ -0,0 +1,184 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+MALLOC_DEFINE(M_SMARTRAID, "smartraidbuf", "Buffers for the smartraid driver");
+
+/*
+ * DMA map load callback function
+ */
+static void
+os_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ bus_addr_t *paddr = (bus_addr_t *)arg;
+ *paddr = segs[0].ds_addr;
+}
+
+int os_dma_setup(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+ DBG_FUNC("OUT\n");
+ return PQI_STATUS_SUCCESS;
+}
+
+int os_dma_destroy(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+ DBG_FUNC("OUT\n");
+ return PQI_STATUS_SUCCESS;
+}
+
+/*
+ * DMA mem resource allocation wrapper function
+ */
+int os_dma_mem_alloc(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
+{
+ int ret = 0;
+
+ /* DBG_FUNC("IN\n"); */
+
+ /* DMA memory needed - allocate it */
+ if ((ret = bus_dma_tag_create(
+ softs->os_specific.pqi_parent_dmat, /* parent */
+ dma_mem->align, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ dma_mem->size, /* maxsize */
+ 1, /* nsegments */
+ dma_mem->size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* No locking needed */
+ &dma_mem->dma_tag)) != 0 ) {
+ DBG_ERR("can't allocate DMA tag with error = 0x%x\n", ret);
+ goto err_out;
+ }
+ if ((ret = bus_dmamem_alloc(dma_mem->dma_tag, (void **)&dma_mem->virt_addr,
+ BUS_DMA_NOWAIT, &dma_mem->dma_map)) != 0) {
+ DBG_ERR("can't allocate DMA memory for required object \
+ with error = 0x%x\n", ret);
+ goto err_mem;
+ }
+
+ if((ret = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
+ dma_mem->virt_addr, dma_mem->size,
+ os_dma_map, &dma_mem->dma_addr, 0)) != 0) {
+ DBG_ERR("can't load DMA memory for required \
+ object with error = 0x%x\n", ret);
+ goto err_load;
+ }
+
+ memset(dma_mem->virt_addr, 0, dma_mem->size);
+
+ /* DBG_FUNC("OUT\n"); */
+ return ret;
+
+err_load:
+ if(dma_mem->virt_addr)
+ bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr,
+ dma_mem->dma_map);
+err_mem:
+ if(dma_mem->dma_tag)
+ bus_dma_tag_destroy(dma_mem->dma_tag);
+err_out:
+ DBG_FUNC("failed OUT\n");
+ return ret;
+}
+
+/*
+ * DMA mem resource deallocation wrapper function
+ */
+void os_dma_mem_free(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
+{
+ /* DBG_FUNC("IN\n"); */
+
+ if(dma_mem->dma_addr) {
+ bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
+ dma_mem->dma_addr = 0;
+ }
+
+ if(dma_mem->virt_addr) {
+ bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr,
+ dma_mem->dma_map);
+ dma_mem->virt_addr = NULL;
+ }
+
+ if(dma_mem->dma_tag) {
+ bus_dma_tag_destroy(dma_mem->dma_tag);
+ dma_mem->dma_tag = NULL;
+ }
+
+ /* DBG_FUNC("OUT\n"); */
+}
+
+
+/*
+ * Mem resource allocation wrapper function
+ */
+void *os_mem_alloc(pqisrc_softstate_t *softs, size_t size)
+{
+ void *addr = NULL;
+
+ /* DBG_FUNC("IN\n"); */
+
+ addr = malloc((unsigned long)size, M_SMARTRAID,
+ M_NOWAIT | M_ZERO);
+
+/* DBG_FUNC("OUT\n"); */
+
+ return addr;
+}
+
+/*
+ * Mem resource deallocation wrapper function
+ */
+void os_mem_free(pqisrc_softstate_t *softs,
+ char *addr, size_t size)
+{
+ /* DBG_FUNC("IN\n"); */
+
+ free((void*)addr, M_SMARTRAID);
+
+ /* DBG_FUNC("OUT\n"); */
+}
+
+/*
+ * dma/bus resource deallocation wrapper function
+ */
+void os_resource_free(pqisrc_softstate_t *softs)
+{
+ if(softs->os_specific.pqi_parent_dmat)
+ bus_dma_tag_destroy(softs->os_specific.pqi_parent_dmat);
+
+ if (softs->os_specific.pqi_regs_res0 != NULL)
+ bus_release_resource(softs->os_specific.pqi_dev,
+ SYS_RES_MEMORY,
+ softs->os_specific.pqi_regs_rid0,
+ softs->os_specific.pqi_regs_res0);
+}
diff --git a/sys/dev/smartpqi/smartpqi_misc.c b/sys/dev/smartpqi/smartpqi_misc.c
new file mode 100644
index 000000000000..9bf5e8c07db5
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_misc.c
@@ -0,0 +1,172 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+/*
+ * Populate hostwell time variables in bcd format from FreeBSD format
+ */
+void os_get_time(struct bmic_host_wellness_time *host_wellness_time)
+{
+ struct timespec ts;
+ struct clocktime ct;
+
+ getnanotime(&ts);
+ clock_ts_to_ct(&ts, &ct);
+
+
+ /* Fill the time In BCD Format */
+ host_wellness_time->hour= (uint8_t)bin2bcd(ct.hour);
+ host_wellness_time->min = (uint8_t)bin2bcd(ct.min);
+ host_wellness_time->sec= (uint8_t)bin2bcd(ct.sec);
+ host_wellness_time->reserved = 0;
+ host_wellness_time->month = (uint8_t)bin2bcd(ct.mon);
+ host_wellness_time->day = (uint8_t)bin2bcd(ct.day);
+ host_wellness_time->century = (uint8_t)bin2bcd(ct.year / 100);
+ host_wellness_time->year = (uint8_t)bin2bcd(ct.year % 100);
+
+}
+
+/*
+ * Update host time to f/w every 24 hours in a periodic timer.
+ */
+
+void os_wellness_periodic(void *data)
+{
+ struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data;
+ int ret = 0;
+
+
+ /* update time to FW */
+ if (!pqisrc_ctrl_offline(softs)){
+ if( (ret = pqisrc_write_current_time_to_host_wellness(softs)) != 0 )
+ DBG_ERR("Failed to update time to FW in periodic ret = %d\n", ret);
+ }
+
+ /* reschedule ourselves */
+ softs->os_specific.wellness_periodic = timeout(os_wellness_periodic,
+ softs, OS_HOST_WELLNESS_TIMEOUT * hz);
+}
+
+/*
+ * Routine used to stop the heart-beat timer
+ */
+void os_stop_heartbeat_timer(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+
+ /* Kill the heart beat event */
+ untimeout(os_start_heartbeat_timer, softs,
+ softs->os_specific.heartbeat_timeout_id);
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Routine used to start the heart-beat timer
+ */
+void os_start_heartbeat_timer(void *data)
+{
+ struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data;
+ DBG_FUNC("IN\n");
+
+ pqisrc_heartbeat_timer_handler(softs);
+ if (!pqisrc_ctrl_offline(softs)) {
+ softs->os_specific.heartbeat_timeout_id =
+ timeout(os_start_heartbeat_timer, softs,
+ OS_FW_HEARTBEAT_TIMER_INTERVAL * hz);
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Mutex initialization function
+ */
+int os_init_spinlock(struct pqisrc_softstate *softs, struct mtx *lock,
+ char *lockname)
+{
+ mtx_init(lock, lockname, NULL, MTX_SPIN);
+ return 0;
+
+}
+
+/*
+ * Mutex uninitialization function
+ */
+void os_uninit_spinlock(struct mtx *lock)
+{
+ mtx_destroy(lock);
+ return;
+
+}
+
+/*
+ * Semaphore initialization function
+ */
+int os_create_semaphore(const char *name, int value, struct sema *sema)
+{
+ sema_init(sema, value, name);
+ return PQI_STATUS_SUCCESS;
+
+}
+
+/*
+ * Semaphore uninitialization function
+ */
+int os_destroy_semaphore(struct sema *sema)
+{
+ sema_destroy(sema);
+ return PQI_STATUS_SUCCESS;
+
+}
+
+/*
+ * Semaphore grab function
+ */
+void inline os_sema_lock(struct sema *sema)
+{
+ sema_post(sema);
+}
+
+/*
+ * Semaphore release function
+ */
+void inline os_sema_unlock(struct sema *sema)
+{
+ sema_wait(sema);
+}
+
+
+/*
+ * string copy wrapper function
+ */
+int os_strlcpy(char *dst, char *src, int size)
+{
+ return strlcpy(dst, src, size);
+}
diff --git a/sys/dev/smartpqi/smartpqi_prototypes.h b/sys/dev/smartpqi/smartpqi_prototypes.h
new file mode 100644
index 000000000000..90cb2141a6bc
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_prototypes.h
@@ -0,0 +1,263 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _PQI_PROTOTYPES_H
+#define _PQI_PROTOTYPES_H
+
+/* Function prototypes */
+
+/*pqi_init.c */
+int pqisrc_init(pqisrc_softstate_t *);
+void pqisrc_uninit(pqisrc_softstate_t *);
+void pqisrc_pqi_uninit(pqisrc_softstate_t *);
+int pqisrc_process_config_table(pqisrc_softstate_t *);
+int pqisrc_flush_cache(pqisrc_softstate_t *, enum pqisrc_flush_cache_event_type);
+int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *);
+
+/* pqi_sis.c*/
+int pqisrc_sis_init(pqisrc_softstate_t *);
+void pqisrc_sis_uninit(pqisrc_softstate_t *);
+int pqisrc_reenable_sis(pqisrc_softstate_t *);
+void pqisrc_trigger_nmi_sis(pqisrc_softstate_t *);
+void sis_disable_msix(pqisrc_softstate_t *);
+int pqisrc_force_sis(pqisrc_softstate_t *);
+int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *, uint32_t);
+
+/* pqi_queue.c */
+int pqisrc_submit_admin_req(pqisrc_softstate_t *,
+ gen_adm_req_iu_t *, gen_adm_resp_iu_t *);
+int pqisrc_create_admin_queue(pqisrc_softstate_t *);
+int pqisrc_destroy_admin_queue(pqisrc_softstate_t *);
+int pqisrc_create_op_queues(pqisrc_softstate_t *);
+
+/* pqi_cmd.c */
+int pqisrc_submit_cmnd(pqisrc_softstate_t *,ib_queue_t *,void *);
+
+/* pqi_tag.c */
+#ifndef LOCKFREE_STACK
+int pqisrc_init_taglist(pqisrc_softstate_t *,pqi_taglist_t *,uint32_t);
+void pqisrc_destroy_taglist(pqisrc_softstate_t *,pqi_taglist_t *);
+void pqisrc_put_tag(pqi_taglist_t *,uint32_t);
+uint32_t pqisrc_get_tag(pqi_taglist_t *);
+#else
+int pqisrc_init_taglist(pqisrc_softstate_t *, lockless_stack_t *, uint32_t);
+void pqisrc_destroy_taglist(pqisrc_softstate_t *, lockless_stack_t *);
+void pqisrc_put_tag(lockless_stack_t *,uint32_t);
+uint32_t pqisrc_get_tag(lockless_stack_t *);
+#endif /* LOCKFREE_STACK */
+
+/* pqi_discovery.c */
+void pqisrc_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
+int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *);
+int pqisrc_rescan_devices(pqisrc_softstate_t *);
+int pqisrc_scan_devices(pqisrc_softstate_t *);
+void pqisrc_process_raid_path_io_response(pqisrc_softstate_t *, uint16_t, struct pqi_io_response *);
+void pqisrc_process_io_error_response(pqisrc_softstate_t *, int, uint16_t, struct pqi_io_response *);
+void pqisrc_cleanup_devices(pqisrc_softstate_t *);
+void pqisrc_device_mem_free(pqisrc_softstate_t *, pqi_scsi_dev_t *);
+boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device);
+void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device);
+
+/* pqi_helper.c */
+boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *);
+void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *);
+int pqisrc_wait_on_condition(pqisrc_softstate_t *, rcb_t *);
+boolean_t pqisrc_device_equal(pqi_scsi_dev_t *, pqi_scsi_dev_t *);
+boolean_t pqisrc_is_hba_lunid(uint8_t *);
+boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *);
+void pqisrc_sanitize_inquiry_string(unsigned char *, int );
+void pqisrc_display_device_info(pqisrc_softstate_t *, char *, pqi_scsi_dev_t *);
+boolean_t pqisrc_scsi3addr_equal(uint8_t *, uint8_t *);
+void check_struct_sizes(void);
+char *pqisrc_raidlevel_to_string(uint8_t);
+
+/* pqi_response.c */
+void pqisrc_signal_event(pqisrc_softstate_t *softs, rcb_t *rcb);
+void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *,
+ rcb_t *);
+void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *,
+ rcb_t *, uint16_t);
+void pqisrc_process_io_response_success(pqisrc_softstate_t *,
+ rcb_t *);
+void pqisrc_process_aio_response_error(pqisrc_softstate_t *,
+ rcb_t *, uint16_t);
+void pqisrc_process_raid_response_error(pqisrc_softstate_t *,
+ rcb_t *, uint16_t);
+void pqisrc_process_response_queue(pqisrc_softstate_t *, int);
+
+
+/* pqi_request.c */
+int pqisrc_build_send_io(pqisrc_softstate_t *,rcb_t *);
+
+
+int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t*);
+
+
+int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t *,
+ rcb_t *, int, int);
+int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs);
+int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs);
+
+/* pqi_event.c*/
+int pqisrc_report_event_config(pqisrc_softstate_t *);
+int pqisrc_set_event_config(pqisrc_softstate_t *);
+int pqisrc_process_event_intr_src(pqisrc_softstate_t *,int);
+void pqisrc_ack_all_events(void *arg);
+
+
+void pqisrc_event_worker(void *, int);
+int pqisrc_scsi_setup(struct pqisrc_softstate *);
+void pqisrc_scsi_cleanup(struct pqisrc_softstate *);
+boolean_t pqisrc_update_scsi_sense(const uint8_t *, int,
+ struct sense_header_scsi *);
+int pqisrc_build_send_raid_request(pqisrc_softstate_t *, pqisrc_raid_req_t *,
+ void *, size_t, uint8_t, uint16_t, uint8_t *,
+ raid_path_error_info_elem_t *);
+
+int pqisrc_submit_management_req(pqisrc_softstate_t *,
+ pqi_event_config_request_t *);
+void pqisrc_take_devices_offline(pqisrc_softstate_t *);
+void pqisrc_take_ctrl_offline(pqisrc_softstate_t *);
+void pqisrc_free_rcb(pqisrc_softstate_t *, int);
+void pqisrc_decide_opq_config(pqisrc_softstate_t *);
+int pqisrc_configure_op_queues(pqisrc_softstate_t *);
+int pqisrc_pqi_init(pqisrc_softstate_t *);
+int pqi_reset(pqisrc_softstate_t *);
+int pqisrc_check_pqimode(pqisrc_softstate_t *);
+int pqisrc_check_fw_status(pqisrc_softstate_t *);
+int pqisrc_init_struct_base(pqisrc_softstate_t *);
+int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *);
+int pqisrc_get_preferred_settings(pqisrc_softstate_t *);
+int pqisrc_get_adapter_properties(pqisrc_softstate_t *,
+ uint32_t *, uint32_t *);
+
+void pqisrc_get_admin_queue_config(pqisrc_softstate_t *);
+void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *);
+int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *);
+int pqisrc_create_delete_adminq(pqisrc_softstate_t *, uint32_t);
+void pqisrc_print_adminq_config(pqisrc_softstate_t *);
+int pqisrc_delete_op_queue(pqisrc_softstate_t *,
+ uint32_t, boolean_t);
+void pqisrc_destroy_event_queue(pqisrc_softstate_t *);
+
+void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *);
+
+void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *);
+
+int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *,
+ ib_queue_t *, uint32_t);
+int pqisrc_create_op_obq(pqisrc_softstate_t *,
+ ob_queue_t *);
+int pqisrc_create_op_ibq(pqisrc_softstate_t *,
+ ib_queue_t *);
+int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *, ib_queue_t *);
+int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *, ib_queue_t *);
+int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *);
+int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *);
+int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *);
+int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *);
+int pqisrc_process_task_management_response(pqisrc_softstate_t *,
+ pqi_tmf_resp_t *);
+
+
+/* pqi_ioctl.c*/
+
+int
+pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int);
+
+
+/* Functions Prototypes */
+/* FreeBSD_mem.c */
+int os_dma_mem_alloc(pqisrc_softstate_t *,struct dma_mem *);
+void os_dma_mem_free(pqisrc_softstate_t *,struct dma_mem *);
+void *os_mem_alloc(pqisrc_softstate_t *,size_t);
+void os_mem_free(pqisrc_softstate_t *,char *,size_t);
+void os_resource_free(pqisrc_softstate_t *);
+int os_dma_setup(pqisrc_softstate_t *);
+int os_dma_destroy(pqisrc_softstate_t *);
+
+/* FreeBSD intr.c */
+int os_get_intr_config(pqisrc_softstate_t *);
+int os_setup_intr(pqisrc_softstate_t *);
+int os_destroy_intr(pqisrc_softstate_t *);
+int os_get_processor_config(pqisrc_softstate_t *);
+void os_free_intr_config(pqisrc_softstate_t *);
+
+/* FreeBSD_ioctl.c */
+int os_copy_to_user(struct pqisrc_softstate *, void *,
+ void *, int, int);
+int os_copy_from_user(struct pqisrc_softstate *, void *,
+ void *, int, int);
+int create_char_dev(struct pqisrc_softstate *, int);
+void destroy_char_dev(struct pqisrc_softstate *);
+
+/* FreeBSD_misc.c*/
+int os_init_spinlock(struct pqisrc_softstate *, struct mtx *, char *);
+void os_uninit_spinlock(struct mtx *);
+int os_create_semaphore(const char *, int,struct sema *);
+int os_destroy_semaphore(struct sema *);
+void os_sema_lock(struct sema *);
+void os_sema_unlock(struct sema *);
+
+int os_strlcpy(char *dst, char *src, int len);
+void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *);
+void os_stop_heartbeat_timer(pqisrc_softstate_t *);
+void os_start_heartbeat_timer(void *);
+
+/* FreeBSD_cam.c */
+int pqisrc_scsi_setup(struct pqisrc_softstate *);
+void pqisrc_scsi_cleanup(struct pqisrc_softstate *);
+uint8_t os_get_task_attr(rcb_t *);
+void os_wellness_periodic(void *);
+void smartpqi_target_rescan(struct pqisrc_softstate *);
+
+/* FreeBSD_intr.c FreeBSD_main.c */
+void pqisrc_event_worker(void *, int);
+void os_add_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
+void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
+void os_io_response_success(rcb_t *);
+void os_aio_response_error(rcb_t *, aio_path_error_info_elem_t *);
+void smartpqi_adjust_queue_depth(struct cam_path *, uint32_t );
+void os_raid_response_error(rcb_t *, raid_path_error_info_elem_t *);
+void os_wellness_periodic(void *);
+void os_reset_rcb( rcb_t *);
+int register_sim(struct pqisrc_softstate *, int);
+void deregister_sim(struct pqisrc_softstate *);
+int check_for_scsi_opcode(uint8_t *, boolean_t *, uint64_t *,
+ uint32_t *);
+int register_legacy_intr(pqisrc_softstate_t *);
+int register_msix_intr(pqisrc_softstate_t *);
+void deregister_pqi_intx(pqisrc_softstate_t *);
+void deregister_pqi_msix(pqisrc_softstate_t *);
+void os_get_time(struct bmic_host_wellness_time *);
+void os_eventtaskqueue_enqueue(pqisrc_softstate_t *);
+void pqisrc_save_controller_info(struct pqisrc_softstate *);
+int smartpqi_shutdown(void *);
+
+#endif // _SMARTPQI_PROTOTYPES_H
diff --git a/sys/dev/smartpqi/smartpqi_queue.c b/sys/dev/smartpqi/smartpqi_queue.c
new file mode 100644
index 000000000000..72850c9fcebe
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_queue.c
@@ -0,0 +1,995 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+/*
+ * Submit an admin IU to the adapter.
+ * Add interrupt support, if required
+ */
+int pqisrc_submit_admin_req(pqisrc_softstate_t *softs,
+ gen_adm_req_iu_t *req, gen_adm_resp_iu_t *resp)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ ob_queue_t *ob_q = &softs->admin_ob_queue;
+ ib_queue_t *ib_q = &softs->admin_ib_queue;
+ int tmo = PQISRC_ADMIN_CMD_RESP_TIMEOUT;
+
+ DBG_FUNC("IN\n");
+
+ req->header.iu_type =
+ PQI_IU_TYPE_GENERAL_ADMIN_REQUEST;
+ req->header.comp_feature = 0x00;
+ req->header.iu_length = PQI_STANDARD_IU_LENGTH;
+ req->res1 = 0;
+ req->work = 0;
+
+ /* Get the tag */
+ req->req_id = pqisrc_get_tag(&softs->taglist);
+ if (INVALID_ELEM == req->req_id) {
+ DBG_ERR("Tag not available0x%x\n",(uint16_t)req->req_id);
+ ret = PQI_STATUS_FAILURE;
+ goto err_out;
+ }
+ softs->rcb[req->req_id].tag = req->req_id;
+
+ /* Submit the command to the admin ib queue */
+ ret = pqisrc_submit_cmnd(softs, ib_q, req);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit command\n");
+ goto err_cmd;
+ }
+
+ /* Wait for completion */
+ COND_WAIT((*(ob_q->pi_virt_addr) != ob_q->ci_local), tmo);
+ if (tmo <= 0) {
+ DBG_ERR("Admin cmd timeout\n");
+ DBG_ERR("tmo : %d\n",tmo); \
+ ret = PQI_STATUS_TIMEOUT;
+ goto err_cmd;
+ }
+
+ /* Copy the response */
+ memcpy(resp, ob_q->array_virt_addr + (ob_q->ci_local * ob_q->elem_size),
+ sizeof(gen_adm_resp_iu_t));
+
+ /* Update CI */
+ ob_q->ci_local = (ob_q->ci_local + 1 ) % ob_q->num_elem;
+ PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
+ ob_q->ci_register_offset, LE_32(ob_q->ci_local));
+
+ /* Validate the response data */
+ ASSERT(req->fn_code == resp->fn_code);
+ ASSERT(resp->header.iu_type == PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE);
+ ret = resp->status;
+ if (ret)
+ goto err_cmd;
+
+ os_reset_rcb(&softs->rcb[req->req_id]);
+ pqisrc_put_tag(&softs->taglist,req->req_id);
+ DBG_FUNC("OUT\n");
+ return ret;
+err_cmd:
+ os_reset_rcb(&softs->rcb[req->req_id]);
+ pqisrc_put_tag(&softs->taglist,req->req_id);
+err_out:
+ DBG_FUNC("failed OUT : %d\n", ret);
+ return ret;
+}
+
+/*
+ * Get the administration queue config parameters.
+ */
+void pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs)
+{
+ uint64_t val = 0;
+
+
+ val = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP));
+
+ /* pqi_cap = (struct pqi_dev_adminq_cap *)&val;*/
+ softs->admin_ib_queue.num_elem = val & 0xFF;
+ softs->admin_ob_queue.num_elem = (val & 0xFF00) >> 8;
+ /* Note : size in unit of 16 byte s*/
+ softs->admin_ib_queue.elem_size = ((val & 0xFF0000) >> 16) * 16;
+ softs->admin_ob_queue.elem_size = ((val & 0xFF000000) >> 24) * 16;
+
+ DBG_FUNC(" softs->admin_ib_queue.num_elem : %d\n",
+ softs->admin_ib_queue.num_elem);
+ DBG_FUNC(" softs->admin_ib_queue.elem_size : %d\n",
+ softs->admin_ib_queue.elem_size);
+}
+
+/*
+ * Decide the no of elements in admin ib and ob queues.
+ */
+void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs)
+{
+ /* Determine num elements in Admin IBQ */
+ softs->admin_ib_queue.num_elem = MIN(softs->admin_ib_queue.num_elem,
+ PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM);
+
+ /* Determine num elements in Admin OBQ */
+ softs->admin_ob_queue.num_elem = MIN(softs->admin_ob_queue.num_elem,
+ PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM);
+}
+
+/*
+ * Allocate DMA memory for admin queue and initialize.
+ */
+int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
+{
+ uint32_t ib_array_size = 0;
+ uint32_t ob_array_size = 0;
+ uint32_t alloc_size = 0;
+ char *virt_addr = NULL;
+ dma_addr_t dma_addr = 0;
+ int ret = PQI_STATUS_SUCCESS;
+
+ ib_array_size = (softs->admin_ib_queue.num_elem *
+ softs->admin_ib_queue.elem_size);
+
+ ob_array_size = (softs->admin_ob_queue.num_elem *
+ softs->admin_ob_queue.elem_size);
+
+ alloc_size = ib_array_size + ob_array_size +
+ 2 * sizeof(uint32_t) + PQI_ADDR_ALIGN_MASK_64 + 1; /* for IB CI and OB PI */
+ /* Allocate memory for Admin Q */
+ softs->admin_queue_dma_mem.tag = "admin_queue";
+ softs->admin_queue_dma_mem.size = alloc_size;
+ softs->admin_queue_dma_mem.align = PQI_ADMINQ_ELEM_ARRAY_ALIGN;
+ ret = os_dma_mem_alloc(softs, &softs->admin_queue_dma_mem);
+ if (ret) {
+ DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
+ goto err_out;
+ }
+
+ /* Setup the address */
+ virt_addr = softs->admin_queue_dma_mem.virt_addr;
+ dma_addr = softs->admin_queue_dma_mem.dma_addr;
+
+ /* IB */
+ softs->admin_ib_queue.q_id = 0;
+ softs->admin_ib_queue.array_virt_addr = virt_addr;
+ softs->admin_ib_queue.array_dma_addr = dma_addr;
+ softs->admin_ib_queue.pi_local = 0;
+ /* OB */
+ softs->admin_ob_queue.q_id = 0;
+ softs->admin_ob_queue.array_virt_addr = virt_addr + ib_array_size;
+ softs->admin_ob_queue.array_dma_addr = dma_addr + ib_array_size;
+ softs->admin_ob_queue.ci_local = 0;
+
+ /* IB CI */
+ softs->admin_ib_queue.ci_virt_addr =
+ (uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr
+ + ob_array_size);
+ softs->admin_ib_queue.ci_dma_addr =
+ (dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr +
+ ob_array_size);
+
+ /* OB PI */
+ softs->admin_ob_queue.pi_virt_addr =
+ (uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) +
+ PQI_ADDR_ALIGN_MASK_64 + 1);
+ softs->admin_ob_queue.pi_dma_addr =
+ (dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) +
+ PQI_ADDR_ALIGN_MASK_64 + 1);
+
+ DBG_INFO("softs->admin_ib_queue.ci_dma_addr : %p,softs->admin_ob_queue.pi_dma_addr :%p\n",
+ (void*)softs->admin_ib_queue.ci_dma_addr, (void*)softs->admin_ob_queue.pi_dma_addr );
+
+ /* Verify alignment */
+ ASSERT(!(softs->admin_ib_queue.array_dma_addr &
+ PQI_ADDR_ALIGN_MASK_64));
+ ASSERT(!(softs->admin_ib_queue.ci_dma_addr &
+ PQI_ADDR_ALIGN_MASK_64));
+ ASSERT(!(softs->admin_ob_queue.array_dma_addr &
+ PQI_ADDR_ALIGN_MASK_64));
+ ASSERT(!(softs->admin_ob_queue.pi_dma_addr &
+ PQI_ADDR_ALIGN_MASK_64));
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_out:
+ DBG_FUNC("failed OUT\n");
+ return PQI_STATUS_FAILURE;
+}
+
+/*
+ * Subroutine used to create (or) delete the admin queue requested.
+ */
+int pqisrc_create_delete_adminq(pqisrc_softstate_t *softs,
+ uint32_t cmd)
+{
+ int tmo = 0;
+ int ret = PQI_STATUS_SUCCESS;
+
+ /* Create Admin Q pair writing to Admin Q config function reg */
+
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG, LE_64(cmd));
+
+ if (cmd == PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR)
+ tmo = PQISRC_ADMIN_QUEUE_CREATE_TIMEOUT;
+ else
+ tmo = PQISRC_ADMIN_QUEUE_DELETE_TIMEOUT;
+
+ /* Wait for completion */
+ COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG) ==
+ PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
+ if (tmo <= 0) {
+ DBG_ERR("Unable to create/delete admin queue pair\n");
+ ret = PQI_STATUS_TIMEOUT;
+ }
+
+ return ret;
+}
+
+/*
+ * Debug admin queue configuration params.
+ */
+void pqisrc_print_adminq_config(pqisrc_softstate_t *softs)
+{
+ DBG_INFO(" softs->admin_ib_queue.array_dma_addr : %p\n",
+ (void*)softs->admin_ib_queue.array_dma_addr);
+ DBG_INFO(" softs->admin_ib_queue.array_virt_addr : %p\n",
+ (void*)softs->admin_ib_queue.array_virt_addr);
+ DBG_INFO(" softs->admin_ib_queue.num_elem : %d\n",
+ softs->admin_ib_queue.num_elem);
+ DBG_INFO(" softs->admin_ib_queue.elem_size : %d\n",
+ softs->admin_ib_queue.elem_size);
+ DBG_INFO(" softs->admin_ob_queue.array_dma_addr : %p\n",
+ (void*)softs->admin_ob_queue.array_dma_addr);
+ DBG_INFO(" softs->admin_ob_queue.array_virt_addr : %p\n",
+ (void*)softs->admin_ob_queue.array_virt_addr);
+ DBG_INFO(" softs->admin_ob_queue.num_elem : %d\n",
+ softs->admin_ob_queue.num_elem);
+ DBG_INFO(" softs->admin_ob_queue.elem_size : %d\n",
+ softs->admin_ob_queue.elem_size);
+ DBG_INFO(" softs->admin_ib_queue.pi_register_abs : %p\n",
+ (void*)softs->admin_ib_queue.pi_register_abs);
+ DBG_INFO(" softs->admin_ob_queue.ci_register_abs : %p\n",
+ (void*)softs->admin_ob_queue.ci_register_abs);
+}
+
+/*
+ * Function used to create an admin queue.
+ */
+int pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;;
+ uint32_t admin_q_param = 0;
+
+ DBG_FUNC("IN\n");
+
+ /* Get admin queue details - pqi2-r00a - table 24 */
+ pqisrc_get_admin_queue_config(softs);
+
+ /* Decide admin Q config */
+ pqisrc_decide_admin_queue_config(softs);
+
+ /* Allocate and init Admin Q pair */
+ ret = pqisrc_allocate_and_init_adminq(softs);
+ if (ret) {
+ DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
+ goto err_out;
+ }
+
+ /* Write IB Q element array address */
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_elem_array_addr,
+ PQI_ADMIN_IBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ib_queue.array_dma_addr));
+
+ /* Write OB Q element array address */
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_elem_array_addr,
+ PQI_ADMIN_OBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ob_queue.array_dma_addr));
+
+ /* Write IB Q CI address */
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_ci_addr,
+ PQI_ADMIN_IBQ_CI_ADDR, LE_64(softs->admin_ib_queue.ci_dma_addr));
+
+ /* Write OB Q PI address */
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_pi_addr,
+ PQI_ADMIN_OBQ_PI_ADDR, LE_64(softs->admin_ob_queue.pi_dma_addr));
+
+
+ /* Write Admin Q params pqi-r200a table 36 */
+
+ admin_q_param = softs->admin_ib_queue.num_elem |
+ (softs->admin_ob_queue.num_elem << 8)|
+ PQI_ADMIN_QUEUE_MSIX_DISABLE;
+
+ PCI_MEM_PUT32(softs, &softs->pqi_reg->admin_q_param,
+ PQI_ADMINQ_PARAM, LE_32(admin_q_param));
+
+ /* Submit cmd to create Admin Q pair */
+ ret = pqisrc_create_delete_adminq(softs,
+ PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR);
+ if (ret) {
+ DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret);
+ goto err_q_create;
+ }
+
+ /* Admin queue created, get ci,pi offset */
+ softs->admin_ib_queue.pi_register_offset =(PQISRC_PQI_REG_OFFSET +
+ PCI_MEM_GET64(softs, &softs->pqi_reg->admin_ibq_pi_offset, PQI_ADMIN_IBQ_PI_OFFSET));
+
+ softs->admin_ib_queue.pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
+ softs->admin_ib_queue.pi_register_offset);
+
+ softs->admin_ob_queue.ci_register_offset = (PQISRC_PQI_REG_OFFSET +
+ PCI_MEM_GET64(softs, &softs->pqi_reg->admin_obq_ci_offset, PQI_ADMIN_OBQ_CI_OFFSET));
+
+ softs->admin_ob_queue.ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
+ softs->admin_ob_queue.ci_register_offset);
+
+ os_strlcpy(softs->admin_ib_queue.lockname, "admin_ibqlock", LOCKNAME_SIZE);
+
+ ret =OS_INIT_PQILOCK(softs, &softs->admin_ib_queue.lock,
+ softs->admin_ib_queue.lockname);
+ if(ret){
+ DBG_ERR("Admin spinlock initialization failed\n");
+ softs->admin_ib_queue.lockcreated = false;
+ goto err_out;
+ }
+ softs->admin_ib_queue.lockcreated = true;
+
+ /* Print admin q config details */
+ pqisrc_print_adminq_config(softs);
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_q_create:
+ os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
+err_out:
+ DBG_FUNC("failed OUT\n");
+ return ret;
+}
+
+/*
+ * Subroutine used to delete an operational queue.
+ */
+int pqisrc_delete_op_queue(pqisrc_softstate_t *softs,
+ uint32_t q_id, boolean_t ibq)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ /* Firmware doesn't support this now */
+
+#if 0
+ gen_adm_req_iu_t admin_req;
+ gen_adm_resp_iu_t admin_resp;
+
+
+ memset(&admin_req, 0, sizeof(admin_req));
+ memset(&admin_resp, 0, sizeof(admin_resp));
+
+ DBG_FUNC("IN\n");
+
+ admin_req.req_type.create_op_iq.qid = q_id;
+
+ if (ibq)
+ admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_IQ;
+ else
+ admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_OQ;
+
+
+ ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
+
+ DBG_FUNC("OUT\n");
+#endif
+ return ret;
+}
+
+/*
+ * Function used to destroy the event queue.
+ */
+void pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+
+ if (softs->event_q.created == true) {
+ int ret = PQI_STATUS_SUCCESS;
+ ret = pqisrc_delete_op_queue(softs, softs->event_q.q_id, false);
+ if (ret) {
+ DBG_ERR("Failed to Delete Event Q %d\n", softs->event_q.q_id);
+ }
+ softs->event_q.created = false;
+ }
+
+ /* Free the memory */
+ os_dma_mem_free(softs, &softs->event_q_dma_mem);
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function used to destroy operational ib queues.
+ */
+void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ ib_queue_t *op_ib_q = NULL;
+ int i;
+
+ DBG_FUNC("IN\n");
+
+ for (i = 0; i < softs->num_op_raid_ibq; i++) {
+ /* OP RAID IB Q */
+ op_ib_q = &softs->op_raid_ib_q[i];
+ if (op_ib_q->created == true) {
+ ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
+ if (ret) {
+ DBG_ERR("Failed to Delete Raid IB Q %d\n",op_ib_q->q_id);
+ }
+ op_ib_q->created = false;
+ }
+
+ if(op_ib_q->lockcreated==true){
+ OS_UNINIT_PQILOCK(&op_ib_q->lock);
+ op_ib_q->lockcreated = false;
+ }
+
+ /* OP AIO IB Q */
+ op_ib_q = &softs->op_aio_ib_q[i];
+ if (op_ib_q->created == true) {
+ ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
+ if (ret) {
+ DBG_ERR("Failed to Delete AIO IB Q %d\n",op_ib_q->q_id);
+ }
+ op_ib_q->created = false;
+ }
+
+ if(op_ib_q->lockcreated==true){
+ OS_UNINIT_PQILOCK(&op_ib_q->lock);
+ op_ib_q->lockcreated = false;
+ }
+ }
+
+ /* Free the memory */
+ os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function used to destroy operational ob queues.
+ */
+void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ int i;
+
+ DBG_FUNC("IN\n");
+
+ for (i = 0; i < softs->num_op_obq; i++) {
+ ob_queue_t *op_ob_q = NULL;
+ op_ob_q = &softs->op_ob_q[i];
+ if (op_ob_q->created == true) {
+ ret = pqisrc_delete_op_queue(softs, op_ob_q->q_id, false);
+ if (ret) {
+ DBG_ERR("Failed to Delete OB Q %d\n",op_ob_q->q_id);
+ }
+ op_ob_q->created = false;
+ }
+ }
+
+ /* Free the memory */
+ os_dma_mem_free(softs, &softs->op_obq_dma_mem);
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function used to destroy an admin queue.
+ */
+int pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN\n");
+#if 0
+ ret = pqisrc_create_delete_adminq(softs,
+ PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR);
+#endif
+ os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/*
+ * Function used to change operational ib queue properties.
+ */
+int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs,
+ ib_queue_t *op_ib_q, uint32_t prop)
+{
+ int ret = PQI_STATUS_SUCCESS;;
+ gen_adm_req_iu_t admin_req;
+ gen_adm_resp_iu_t admin_resp;
+
+ memset(&admin_req, 0, sizeof(admin_req));
+ memset(&admin_resp, 0, sizeof(admin_resp));
+
+ DBG_FUNC("IN\n");
+
+ admin_req.fn_code = PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP;
+ admin_req.req_type.change_op_iq_prop.qid = op_ib_q->q_id;
+ admin_req.req_type.change_op_iq_prop.vend_specific = prop;
+
+ ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/*
+ * Function used to create an operational ob queue.
+ */
+int pqisrc_create_op_obq(pqisrc_softstate_t *softs,
+ ob_queue_t *op_ob_q)
+{
+ int ret = PQI_STATUS_SUCCESS;;
+ gen_adm_req_iu_t admin_req;
+ gen_adm_resp_iu_t admin_resp;
+
+ DBG_FUNC("IN\n");
+
+ memset(&admin_req, 0, sizeof(admin_req));
+ memset(&admin_resp, 0, sizeof(admin_resp));
+
+ admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_OQ;
+ admin_req.req_type.create_op_oq.qid = op_ob_q->q_id;
+ admin_req.req_type.create_op_oq.intr_msg_num = op_ob_q->intr_msg_num;
+ admin_req.req_type.create_op_oq.elem_arr_addr = op_ob_q->array_dma_addr;
+ admin_req.req_type.create_op_oq.ob_pi_addr = op_ob_q->pi_dma_addr;
+ admin_req.req_type.create_op_oq.num_elem = op_ob_q->num_elem;
+ admin_req.req_type.create_op_oq.elem_len = op_ob_q->elem_size / 16;
+
+ DBG_INFO("admin_req.req_type.create_op_oq.qid : %x\n",admin_req.req_type.create_op_oq.qid);
+ DBG_INFO("admin_req.req_type.create_op_oq.intr_msg_num : %x\n", admin_req.req_type.create_op_oq.intr_msg_num );
+
+ ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
+ if( PQI_STATUS_SUCCESS == ret) {
+ op_ob_q->ci_register_offset = (PQISRC_PQI_REG_OFFSET +
+ admin_resp.resp_type.create_op_oq.ci_offset);
+ op_ob_q->ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
+ op_ob_q->ci_register_offset);
+ } else {
+ int i = 0;
+ DBG_WARN("Error Status Descriptors\n");
+ for(i = 0; i < 4;i++)
+ DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]);
+ }
+
+ DBG_FUNC("OUT ret : %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * Function used to create an operational ib queue.
+ */
+int pqisrc_create_op_ibq(pqisrc_softstate_t *softs,
+ ib_queue_t *op_ib_q)
+{
+ int ret = PQI_STATUS_SUCCESS;;
+ gen_adm_req_iu_t admin_req;
+ gen_adm_resp_iu_t admin_resp;
+
+ DBG_FUNC("IN\n");
+
+ memset(&admin_req, 0, sizeof(admin_req));
+ memset(&admin_resp, 0, sizeof(admin_resp));
+
+ admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_IQ;
+ admin_req.req_type.create_op_iq.qid = op_ib_q->q_id;
+ admin_req.req_type.create_op_iq.elem_arr_addr = op_ib_q->array_dma_addr;
+ admin_req.req_type.create_op_iq.iq_ci_addr = op_ib_q->ci_dma_addr;
+ admin_req.req_type.create_op_iq.num_elem = op_ib_q->num_elem;
+ admin_req.req_type.create_op_iq.elem_len = op_ib_q->elem_size / 16;
+
+ ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
+
+ if( PQI_STATUS_SUCCESS == ret) {
+ op_ib_q->pi_register_offset =(PQISRC_PQI_REG_OFFSET +
+ admin_resp.resp_type.create_op_iq.pi_offset);
+
+ op_ib_q->pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
+ op_ib_q->pi_register_offset);
+ } else {
+ int i = 0;
+ DBG_WARN("Error Status Decsriptors\n");
+ for(i = 0; i < 4;i++)
+ DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]);
+ }
+
+ DBG_FUNC("OUT ret : %d\n", ret);
+ return ret;
+}
+
+/*
+ * subroutine used to create an operational ib queue for AIO.
+ */
+int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *softs,
+ ib_queue_t *op_aio_ib_q)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN\n");
+
+ ret = pqisrc_create_op_ibq(softs,op_aio_ib_q);
+ if ( PQI_STATUS_SUCCESS == ret)
+ ret = pqisrc_change_op_ibq_queue_prop(softs,
+ op_aio_ib_q, PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO);
+
+ DBG_FUNC("OUT ret : %d\n", ret);
+ return ret;
+}
+
+/*
+ * subroutine used to create an operational ib queue for RAID.
+ */
+int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *softs,
+ ib_queue_t *op_raid_ib_q)
+{
+ int ret = PQI_STATUS_SUCCESS;;
+
+ DBG_FUNC("IN\n");
+
+ ret = pqisrc_create_op_ibq(softs,op_raid_ib_q);
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/*
+ * Allocate and create an event queue to process supported events.
+ */
+int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t alloc_size = 0;
+ uint32_t num_elem;
+ char *virt_addr = NULL;
+ dma_addr_t dma_addr = 0;
+ uint32_t event_q_pi_dma_start_offset = 0;
+ uint32_t event_q_pi_virt_start_offset = 0;
+ char *event_q_pi_virt_start_addr = NULL;
+ ob_queue_t *event_q = NULL;
+
+
+ DBG_FUNC("IN\n");
+
+ /*
+ * Calculate memory requirements.
+ * If event queue is shared for IO response, number of
+ * elements in event queue depends on num elements in OP OB Q
+ * also. Since event queue element size (32) is more than IO
+ * response size , event queue element size need not be checked
+ * for queue size calculation.
+ */
+#ifdef SHARE_EVENT_QUEUE_FOR_IO
+ num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_NUM_EVENT_Q_ELEM);
+#else
+ num_elem = PQISRC_NUM_EVENT_Q_ELEM;
+#endif
+
+ alloc_size = num_elem * PQISRC_EVENT_Q_ELEM_SIZE;
+ event_q_pi_dma_start_offset = alloc_size;
+ event_q_pi_virt_start_offset = alloc_size;
+ alloc_size += sizeof(uint32_t); /*For IBQ CI*/
+
+ /* Allocate memory for event queues */
+ softs->event_q_dma_mem.tag = "event_queue";
+ softs->event_q_dma_mem.size = alloc_size;
+ softs->event_q_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
+ ret = os_dma_mem_alloc(softs, &softs->event_q_dma_mem);
+ if (ret) {
+ DBG_ERR("Failed to Allocate Event Q ret : %d\n"
+ , ret);
+ goto err_out;
+ }
+
+ /* Set up the address */
+ virt_addr = softs->event_q_dma_mem.virt_addr;
+ dma_addr = softs->event_q_dma_mem.dma_addr;
+ event_q_pi_dma_start_offset += dma_addr;
+ event_q_pi_virt_start_addr = virt_addr + event_q_pi_virt_start_offset;
+
+ event_q = &softs->event_q;
+ ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
+ FILL_QUEUE_ARRAY_ADDR(event_q,virt_addr,dma_addr);
+ event_q->q_id = PQI_OP_EVENT_QUEUE_ID;
+ event_q->num_elem = num_elem;
+ event_q->elem_size = PQISRC_EVENT_Q_ELEM_SIZE;
+ event_q->pi_dma_addr = event_q_pi_dma_start_offset;
+ event_q->pi_virt_addr = (uint32_t *)event_q_pi_virt_start_addr;
+ event_q->intr_msg_num = 0; /* vector zero for event */
+ ASSERT(!(event_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+
+ ret = pqisrc_create_op_obq(softs,event_q);
+ if (ret) {
+ DBG_ERR("Failed to Create EventQ %d\n",event_q->q_id);
+ goto err_out_create;
+ }
+ event_q->created = true;
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_out_create:
+ pqisrc_destroy_event_queue(softs);
+err_out:
+ DBG_FUNC("OUT failed %d\n", ret);
+ return PQI_STATUS_FAILURE;
+}
+
+/*
+ * Allocate DMA memory and create operational ib queues.
+ */
+int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t alloc_size = 0;
+ char *virt_addr = NULL;
+ dma_addr_t dma_addr = 0;
+ uint32_t ibq_size = 0;
+ uint32_t ib_ci_dma_start_offset = 0;
+ char *ib_ci_virt_start_addr = NULL;
+ uint32_t ib_ci_virt_start_offset = 0;
+ uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID;
+ ib_queue_t *op_ib_q = NULL;
+ uint32_t num_op_ibq = softs->num_op_raid_ibq +
+ softs->num_op_aio_ibq;
+ int i = 0;
+
+ DBG_FUNC("IN\n");
+
+ /* Calculate memory requirements */
+ ibq_size = softs->num_elem_per_op_ibq * softs->ibq_elem_size;
+ alloc_size = num_op_ibq * ibq_size;
+ /* CI indexes starts after Queue element array */
+ ib_ci_dma_start_offset = alloc_size;
+ ib_ci_virt_start_offset = alloc_size;
+ alloc_size += num_op_ibq * sizeof(uint32_t); /*For IBQ CI*/
+
+ /* Allocate memory for IB queues */
+ softs->op_ibq_dma_mem.tag = "op_ib_queue";
+ softs->op_ibq_dma_mem.size = alloc_size;
+ softs->op_ibq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
+ ret = os_dma_mem_alloc(softs, &softs->op_ibq_dma_mem);
+ if (ret) {
+ DBG_ERR("Failed to Allocate Operational IBQ memory ret : %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /* Set up the address */
+ virt_addr = softs->op_ibq_dma_mem.virt_addr;
+ dma_addr = softs->op_ibq_dma_mem.dma_addr;
+ ib_ci_dma_start_offset += dma_addr;
+ ib_ci_virt_start_addr = virt_addr + ib_ci_virt_start_offset;
+
+ ASSERT(softs->num_op_raid_ibq == softs->num_op_aio_ibq);
+
+ for (i = 0; i < softs->num_op_raid_ibq; i++) {
+ /* OP RAID IB Q */
+ op_ib_q = &softs->op_raid_ib_q[i];
+ ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
+ FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
+ op_ib_q->q_id = ibq_id++;
+
+ snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i);
+ ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
+ if(ret){
+ DBG_ERR("raid_ibqlock %d init failed\n", i);
+ op_ib_q->lockcreated = false;
+ goto err_lock;
+ }
+ op_ib_q->lockcreated = true;
+ op_ib_q->num_elem = softs->num_elem_per_op_ibq;
+ op_ib_q->elem_size = softs->ibq_elem_size;
+ op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
+ (2 * i * sizeof(uint32_t));
+ op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
+ (2 * i * sizeof(uint32_t)));
+ ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+ ret = pqisrc_create_op_raid_ibq(softs, op_ib_q);
+ if (ret) {
+ DBG_ERR("[ %s ] Failed to Create OP Raid IBQ %d\n",
+ __func__, op_ib_q->q_id);
+ goto err_out_create;
+ }
+ op_ib_q->created = true;
+
+ /* OP AIO IB Q */
+ virt_addr += ibq_size;
+ dma_addr += ibq_size;
+ op_ib_q = &softs->op_aio_ib_q[i];
+ ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
+ FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
+ op_ib_q->q_id = ibq_id++;
+ snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i);
+ ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
+ if(ret){
+ DBG_ERR("aio_ibqlock %d init failed\n", i);
+ op_ib_q->lockcreated = false;
+ goto err_lock;
+ }
+ op_ib_q->lockcreated = true;
+ op_ib_q->num_elem = softs->num_elem_per_op_ibq;
+ op_ib_q->elem_size = softs->ibq_elem_size;
+ op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
+ (((2 * i) + 1) * sizeof(uint32_t));
+ op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
+ (((2 * i) + 1) * sizeof(uint32_t)));
+ ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+ ret = pqisrc_create_op_aio_ibq(softs, op_ib_q);
+ if (ret) {
+ DBG_ERR("Failed to Create OP AIO IBQ %d\n",op_ib_q->q_id);
+ goto err_out_create;
+ }
+ op_ib_q->created = true;
+
+ virt_addr += ibq_size;
+ dma_addr += ibq_size;
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_lock:
+err_out_create:
+ pqisrc_destroy_op_ib_queues(softs);
+err_out:
+ DBG_FUNC("OUT failed %d\n", ret);
+ return PQI_STATUS_FAILURE;
+}
+
+/*
+ * Allocate DMA memory and create operational ob queues.
+ */
+int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t alloc_size = 0;
+ char *virt_addr = NULL;
+ dma_addr_t dma_addr = 0;
+ uint32_t obq_size = 0;
+ uint32_t ob_pi_dma_start_offset = 0;
+ uint32_t ob_pi_virt_start_offset = 0;
+ char *ob_pi_virt_start_addr = NULL;
+ uint32_t obq_id = PQI_MIN_OP_OB_QUEUE_ID;
+ ob_queue_t *op_ob_q = NULL;
+ uint32_t num_op_obq = softs->num_op_obq;
+ int i = 0;
+
+ DBG_FUNC("IN\n");
+
+ /*
+ * OB Q element array should be 64 byte aligned.
+ * So the number of elements in OB Q should be multiple
+ * of 4, so that OB Queue element size (16) * num elements
+ * will be multiple of 64.
+ */
+
+ ALIGN_BOUNDARY(softs->num_elem_per_op_obq, 4);
+ obq_size = softs->num_elem_per_op_obq * softs->obq_elem_size;
+ alloc_size += num_op_obq * obq_size;
+ /* PI indexes starts after Queue element array */
+ ob_pi_dma_start_offset = alloc_size;
+ ob_pi_virt_start_offset = alloc_size;
+ alloc_size += num_op_obq * sizeof(uint32_t); /*For OBQ PI*/
+
+ /* Allocate memory for OB queues */
+ softs->op_obq_dma_mem.tag = "op_ob_queue";
+ softs->op_obq_dma_mem.size = alloc_size;
+ softs->op_obq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN;
+ ret = os_dma_mem_alloc(softs, &softs->op_obq_dma_mem);
+ if (ret) {
+ DBG_ERR("Failed to Allocate Operational OBQ memory ret : %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /* Set up the address */
+ virt_addr = softs->op_obq_dma_mem.virt_addr;
+ dma_addr = softs->op_obq_dma_mem.dma_addr;
+ ob_pi_dma_start_offset += dma_addr;
+ ob_pi_virt_start_addr = virt_addr + ob_pi_virt_start_offset;
+
+ DBG_INFO("softs->num_op_obq %d\n",softs->num_op_obq);
+
+ for (i = 0; i < softs->num_op_obq; i++) {
+ op_ob_q = &softs->op_ob_q[i];
+ ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
+ FILL_QUEUE_ARRAY_ADDR(op_ob_q,virt_addr,dma_addr);
+ op_ob_q->q_id = obq_id++;
+ if(softs->share_opq_and_eventq == true)
+ op_ob_q->intr_msg_num = i;
+ else
+ op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */
+ op_ob_q->num_elem = softs->num_elem_per_op_obq;
+ op_ob_q->elem_size = softs->obq_elem_size;
+ op_ob_q->pi_dma_addr = ob_pi_dma_start_offset +
+ (i * sizeof(uint32_t));
+ op_ob_q->pi_virt_addr = (uint32_t*)(ob_pi_virt_start_addr +
+ (i * sizeof(uint32_t)));
+ ASSERT(!(op_ob_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+
+ ret = pqisrc_create_op_obq(softs,op_ob_q);
+ if (ret) {
+ DBG_ERR("Failed to Create OP OBQ %d\n",op_ob_q->q_id);
+ goto err_out_create;
+ }
+ op_ob_q->created = true;
+ virt_addr += obq_size;
+ dma_addr += obq_size;
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_out_create:
+ pqisrc_destroy_op_ob_queues(softs);
+err_out:
+ DBG_FUNC("OUT failed %d\n", ret);
+ return PQI_STATUS_FAILURE;
+}
+
+/*
+ * Function used to create operational queues for the adapter.
+ */
+int pqisrc_create_op_queues(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN\n");
+
+ /* Create Operational IB queues */
+ ret = pqisrc_alloc_and_create_ib_queues(softs);
+ if (ret)
+ goto err_out;
+ /* Create Operational OB queues */
+ ret = pqisrc_alloc_and_create_ob_queues(softs);
+ if (ret)
+ goto err_out_obq;
+
+ /* Create Event queue */
+ ret = pqisrc_alloc_and_create_event_queue(softs);
+ if (ret)
+ goto err_out_eventq;
+
+ DBG_FUNC("OUT\n");
+ return ret;
+err_out_eventq:
+ pqisrc_destroy_op_ob_queues(softs);
+err_out_obq:
+ pqisrc_destroy_op_ib_queues(softs);
+err_out:
+ DBG_FUNC("OUT failed %d\n", ret);
+ return PQI_STATUS_FAILURE;
+}
diff --git a/sys/dev/smartpqi/smartpqi_request.c b/sys/dev/smartpqi/smartpqi_request.c
new file mode 100644
index 000000000000..36c3a9c61ee1
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_request.c
@@ -0,0 +1,791 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+#define SG_FLAG_LAST 0x40000000
+#define SG_FLAG_CHAIN 0x80000000
+
+/* Subroutine to find out embedded sgl count in IU */
+static inline
+uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted)
+{
+ uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
+ DBG_FUNC(" IN ");
+ /**
+ calculate embedded sgl count using num_elem_alloted for IO
+ **/
+ if(elem_alloted - 1)
+ embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
+ DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count);
+
+ DBG_FUNC(" OUT ");
+
+ return embedded_sgl_count;
+
+}
+
+/* Subroutine to find out contiguous free elem in IU */
+static inline
+uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
+{
+ uint32_t contiguous_free_elem = 0;
+
+ DBG_FUNC(" IN ");
+
+ if(pi >= ci) {
+ contiguous_free_elem = (elem_in_q - pi);
+ if(ci == 0)
+ contiguous_free_elem -= 1;
+ } else {
+ contiguous_free_elem = (ci - pi - 1);
+ }
+
+ DBG_FUNC(" OUT ");
+
+ return contiguous_free_elem;
+}
+
+/* Subroutine to find out num of elements need for the request */
+static uint32_t
+pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
+{
+ uint32_t num_sg;
+ uint32_t num_elem_required = 1;
+ DBG_FUNC(" IN ");
+ DBG_IO("SGL_Count :%d",SG_Count);
+ /********
+ If SG_Count greater than max sg per IU i.e 4 or 68
+ (4 is with out spanning or 68 is with spanning) chaining is required.
+ OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
+ on these two cases one element is enough.
+ ********/
+ if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU)
+ return num_elem_required;
+ /*
+ SGL Count Other Than First IU
+ */
+ num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU;
+ num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
+ DBG_FUNC(" OUT ");
+ return num_elem_required;
+}
+
+/* Subroutine to build SG list for the IU submission*/
+static
+boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
+ uint32_t num_elem_alloted)
+{
+ uint32_t i;
+ uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
+ sgt_t *sgt = sg_array;
+ sgt_t *sg_chain = NULL;
+ boolean_t partial = false;
+
+ DBG_FUNC(" IN ");
+
+ DBG_IO("SGL_Count :%d",num_sg);
+ if (0 == num_sg) {
+ goto out;
+ }
+
+ if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) {
+ for (i = 0; i < num_sg; i++, sgt++) {
+ sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
+ sgt->len= OS_GET_IO_SG_LEN(rcb,i);
+ sgt->flags= 0;
+ }
+
+ sg_array[num_sg - 1].flags = SG_FLAG_LAST;
+ } else {
+ /**
+ SGL Chaining
+ **/
+ sg_chain = rcb->sg_chain_virt;
+ sgt->addr = rcb->sg_chain_dma;
+ sgt->len = num_sg * sizeof(sgt_t);
+ sgt->flags = SG_FLAG_CHAIN;
+
+ sgt = sg_chain;
+ for (i = 0; i < num_sg; i++, sgt++) {
+ sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
+ sgt->len = OS_GET_IO_SG_LEN(rcb,i);
+ sgt->flags = 0;
+ }
+
+ sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
+ num_sg = 1;
+ partial = true;
+
+ }
+out:
+ iu_hdr->iu_length = num_sg * sizeof(sgt_t);
+ DBG_FUNC(" OUT ");
+ return partial;
+
+}
+
+/*Subroutine used to Build the RAID request */
+static void
+pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
+ pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
+{
+ DBG_FUNC(" IN ");
+
+ raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
+ raid_req->header.comp_feature = 0;
+ raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
+ raid_req->work_area[0] = 0;
+ raid_req->work_area[1] = 0;
+ raid_req->request_id = rcb->tag;
+ raid_req->nexus_id = 0;
+ raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
+ memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
+ sizeof(raid_req->lun_number));
+ raid_req->protocol_spec = 0;
+ raid_req->data_direction = rcb->data_dir;
+ raid_req->reserved1 = 0;
+ raid_req->fence = 0;
+ raid_req->error_index = raid_req->request_id;
+ raid_req->reserved2 = 0;
+ raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
+ raid_req->command_priority = 0;
+ raid_req->reserved3 = 0;
+ raid_req->reserved4 = 0;
+ raid_req->reserved5 = 0;
+
+ /* As cdb and additional_cdb_bytes are contiguous,
+ update them in a single statement */
+ memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
+#if 0
+ DBG_IO("CDB :");
+ for(i = 0; i < rcb->cmdlen ; i++)
+ DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
+#endif
+
+ switch (rcb->cmdlen) {
+ case 6:
+ case 10:
+ case 12:
+ case 16:
+ raid_req->additional_cdb_bytes_usage =
+ PQI_ADDITIONAL_CDB_BYTES_0;
+ break;
+ case 20:
+ raid_req->additional_cdb_bytes_usage =
+ PQI_ADDITIONAL_CDB_BYTES_4;
+ break;
+ case 24:
+ raid_req->additional_cdb_bytes_usage =
+ PQI_ADDITIONAL_CDB_BYTES_8;
+ break;
+ case 28:
+ raid_req->additional_cdb_bytes_usage =
+ PQI_ADDITIONAL_CDB_BYTES_12;
+ break;
+ case 32:
+ default: /* todo:review again */
+ raid_req->additional_cdb_bytes_usage =
+ PQI_ADDITIONAL_CDB_BYTES_16;
+ break;
+ }
+
+ /* Frame SGL Descriptor */
+ raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
+ &raid_req->header, num_elem_alloted);
+
+ raid_req->header.iu_length +=
+ offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
+
+#if 0
+ DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type);
+ DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id);
+ DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id);
+ DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length);
+ DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute);
+ DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number);
+ DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index);
+ DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
+ DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
+ DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
+#endif
+ rcb->success_cmp_callback = pqisrc_process_io_response_success;
+ rcb->error_cmp_callback = pqisrc_process_raid_response_error;
+ rcb->resp_qid = raid_req->response_queue_id;
+
+ DBG_FUNC(" OUT ");
+
+}
+
+/*Subroutine used to Build the AIO request */
+static void
+pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
+ pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
+{
+ DBG_FUNC(" IN ");
+
+ aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
+ aio_req->header.comp_feature = 0;
+ aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
+ aio_req->work_area[0] = 0;
+ aio_req->work_area[1] = 0;
+ aio_req->req_id = rcb->tag;
+ aio_req->res1[0] = 0;
+ aio_req->res1[1] = 0;
+ aio_req->nexus = rcb->ioaccel_handle;
+ aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
+ aio_req->data_dir = rcb->data_dir;
+ aio_req->mem_type = 0;
+ aio_req->fence = 0;
+ aio_req->res2 = 0;
+ aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
+ aio_req->cmd_prio = 0;
+ aio_req->res3 = 0;
+ aio_req->err_idx = aio_req->req_id;
+ aio_req->cdb_len = rcb->cmdlen;
+ memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
+#if 0
+ DBG_IO("CDB : \n");
+ for(int i = 0; i < rcb->cmdlen ; i++)
+ DBG_IO(" 0x%x \n",aio_req->cdb[i]);
+#endif
+ memset(aio_req->lun,0,sizeof(aio_req->lun));
+ memset(aio_req->res4,0,sizeof(aio_req->res4));
+
+ if(rcb->encrypt_enable == true) {
+ aio_req->encrypt_enable = true;
+ aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index);
+ aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower);
+ aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper);
+ } else {
+ aio_req->encrypt_enable = 0;
+ aio_req->encrypt_key_index = 0;
+ aio_req->encrypt_twk_high = 0;
+ aio_req->encrypt_twk_low = 0;
+ }
+
+ /* Frame SGL Descriptor */
+ aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
+ &aio_req->header, num_elem_alloted);
+
+ aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
+
+ DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
+
+ aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
+ sizeof(iu_header_t);
+#if 0
+ DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
+ DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid);
+ DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id);
+ DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus);
+ DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len);
+ DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir);
+ DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr);
+ DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx);
+ DBG_IO("aio_req->num_sg :%d",aio_req->num_sg);
+ DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr);
+ DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len);
+ DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
+#endif
+
+ rcb->success_cmp_callback = pqisrc_process_io_response_success;
+ rcb->error_cmp_callback = pqisrc_process_aio_response_error;
+ rcb->resp_qid = aio_req->response_queue_id;
+
+ DBG_FUNC(" OUT ");
+
+}
+
+/*Function used to build and send RAID/AIO */
+int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
+{
+ ib_queue_t *ib_q_array = softs->op_aio_ib_q;
+ ib_queue_t *ib_q = NULL;
+ char *ib_iu = NULL;
+ IO_PATH_T io_path = AIO_PATH;
+ uint32_t TraverseCount = 0;
+ int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
+ int qindex = first_qindex;
+ uint32_t num_op_ib_q = softs->num_op_aio_ibq;
+ uint32_t num_elem_needed;
+ uint32_t num_elem_alloted = 0;
+ pqi_scsi_dev_t *devp = rcb->dvp;
+ uint8_t raidbypass_cdb[16];
+
+ DBG_FUNC(" IN ");
+
+
+ rcb->cdbp = OS_GET_CDBP(rcb);
+
+ if(IS_AIO_PATH(devp)) {
+ /** IO for Physical Drive **/
+ /** Send in AIO PATH**/
+ rcb->ioaccel_handle = devp->ioaccel_handle;
+ } else {
+ int ret = PQI_STATUS_FAILURE;
+ /** IO for RAID Volume **/
+ if (devp->offload_enabled) {
+ /** ByPass IO ,Send in AIO PATH **/
+ ret = pqisrc_send_scsi_cmd_raidbypass(softs,
+ devp, rcb, raidbypass_cdb);
+ }
+
+ if (PQI_STATUS_FAILURE == ret) {
+ /** Send in RAID PATH **/
+ io_path = RAID_PATH;
+ num_op_ib_q = softs->num_op_raid_ibq;
+ ib_q_array = softs->op_raid_ib_q;
+ } else {
+ rcb->cdbp = raidbypass_cdb;
+ }
+ }
+
+ num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
+ DBG_IO("num_elem_needed :%d",num_elem_needed);
+
+ do {
+ uint32_t num_elem_available;
+ ib_q = (ib_q_array + qindex);
+ PQI_LOCK(&ib_q->lock);
+ num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
+ *(ib_q->ci_virt_addr), ib_q->num_elem);
+
+ DBG_IO("num_elem_avialable :%d\n",num_elem_available);
+ if(num_elem_available >= num_elem_needed) {
+ num_elem_alloted = num_elem_needed;
+ break;
+ }
+ DBG_IO("Current queue is busy! Hop to next queue\n");
+
+ PQI_UNLOCK(&ib_q->lock);
+ qindex = (qindex + 1) % num_op_ib_q;
+ if(qindex == first_qindex) {
+ if (num_elem_needed == 1)
+ break;
+ TraverseCount += 1;
+ num_elem_needed = 1;
+ }
+ }while(TraverseCount < 2);
+
+ DBG_IO("num_elem_alloted :%d",num_elem_alloted);
+ if (num_elem_alloted == 0) {
+ DBG_WARN("OUT: IB Queues were full\n");
+ return PQI_STATUS_QFULL;
+ }
+
+ /* Get IB Queue Slot address to build IU */
+ ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
+
+ if(io_path == AIO_PATH) {
+ /** Build AIO structure **/
+ pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu,
+ num_elem_alloted);
+ } else {
+ /** Build RAID structure **/
+ pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu,
+ num_elem_alloted);
+ }
+
+ rcb->req_pending = true;
+
+ /* Update the local PI */
+ ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
+
+ DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local);
+ DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
+
+ /* Inform the fw about the new IU */
+ PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
+
+ PQI_UNLOCK(&ib_q->lock);
+ DBG_FUNC(" OUT ");
+ return PQI_STATUS_SUCCESS;
+}
+
+/* Subroutine used to set encryption info as part of RAID bypass IO*/
+static inline void pqisrc_set_enc_info(
+ struct pqi_enc_info *enc_info, struct raid_map *raid_map,
+ uint64_t first_block)
+{
+ uint32_t volume_blk_size;
+
+ /*
+ * Set the encryption tweak values based on logical block address.
+ * If the block size is 512, the tweak value is equal to the LBA.
+ * For other block sizes, tweak value is (LBA * block size) / 512.
+ */
+ volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
+ if (volume_blk_size != 512)
+ first_block = (first_block * volume_blk_size) / 512;
+
+ enc_info->data_enc_key_index =
+ GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
+ enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
+ enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
+}
+
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+
+#define HPSA_RAID_0 0
+#define HPSA_RAID_4 1
+#define HPSA_RAID_1 2 /* also used for RAID 10 */
+#define HPSA_RAID_5 3 /* also used for RAID 50 */
+#define HPSA_RAID_51 4
+#define HPSA_RAID_6 5 /* also used for RAID 60 */
+#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
+#define HPSA_RAID_MAX HPSA_RAID_ADM
+#define HPSA_RAID_UNKNOWN 0xff
+
+/* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
+int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
+ uint32_t *blk_cnt) {
+
+ switch (cdb[0]) {
+ case SCMD_WRITE_6:
+ *is_write = true;
+ case SCMD_READ_6:
+ *fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) |
+ (cdb[2] << 8) | cdb[3]);
+ *blk_cnt = (uint32_t)cdb[4];
+ if (*blk_cnt == 0)
+ *blk_cnt = 256;
+ break;
+ case SCMD_WRITE_10:
+ *is_write = true;
+ case SCMD_READ_10:
+ *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
+ *blk_cnt = (uint32_t)GET_BE16(&cdb[7]);
+ break;
+ case SCMD_WRITE_12:
+ *is_write = true;
+ case SCMD_READ_12:
+ *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
+ *blk_cnt = GET_BE32(&cdb[6]);
+ break;
+ case SCMD_WRITE_16:
+ *is_write = true;
+ case SCMD_READ_16:
+ *fst_blk = GET_BE64(&cdb[2]);
+ *blk_cnt = GET_BE32(&cdb[10]);
+ break;
+ default:
+ /* Process via normal I/O path. */
+ return PQI_STATUS_FAILURE;
+ }
+ return PQI_STATUS_SUCCESS;
+}
+
+/*
+ * Function used to build and send RAID bypass request to the adapter
+ */
+int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
+{
+ struct raid_map *raid_map;
+ boolean_t is_write = false;
+ uint32_t map_idx;
+ uint64_t fst_blk, lst_blk;
+ uint32_t blk_cnt, blks_per_row;
+ uint64_t fst_row, lst_row;
+ uint32_t fst_row_offset, lst_row_offset;
+ uint32_t fst_col, lst_col;
+ uint32_t r5or6_blks_per_row;
+ uint64_t r5or6_fst_row, r5or6_lst_row;
+ uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset;
+ uint32_t r5or6_fst_col, r5or6_lst_col;
+ uint16_t data_disks_per_row, total_disks_per_row;
+ uint16_t layout_map_count;
+ uint32_t stripesz;
+ uint16_t strip_sz;
+ uint32_t fst_grp, lst_grp, cur_grp;
+ uint32_t map_row;
+ uint64_t disk_block;
+ uint32_t disk_blk_cnt;
+ uint8_t cdb_length;
+ int offload_to_mirror;
+ int i;
+ DBG_FUNC(" IN \n");
+ DBG_IO("!!!!!\n");
+
+ /* Check for eligible opcode, get LBA and block count. */
+ memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen);
+
+ for(i = 0; i < rcb->cmdlen ; i++)
+ DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
+ if(check_for_scsi_opcode(cdb, &is_write,
+ &fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
+ return PQI_STATUS_FAILURE;
+ /* Check for write to non-RAID-0. */
+ if (is_write && device->raid_level != SA_RAID_0)
+ return PQI_STATUS_FAILURE;;
+
+ if(blk_cnt == 0)
+ return PQI_STATUS_FAILURE;
+
+ lst_blk = fst_blk + blk_cnt - 1;
+ raid_map = device->raid_map;
+
+ /* Check for invalid block or wraparound. */
+ if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) ||
+ lst_blk < fst_blk)
+ return PQI_STATUS_FAILURE;
+
+ data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row);
+ strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size));
+ layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count));
+
+ /* Calculate stripe information for the request. */
+ blks_per_row = data_disks_per_row * strip_sz;
+
+ /* use __udivdi3 ? */
+ fst_row = fst_blk / blks_per_row;
+ lst_row = lst_blk / blks_per_row;
+ fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row));
+ lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row));
+ fst_col = fst_row_offset / strip_sz;
+ lst_col = lst_row_offset / strip_sz;
+
+ /* If this isn't a single row/column then give to the controller. */
+ if (fst_row != lst_row || fst_col != lst_col)
+ return PQI_STATUS_FAILURE;
+
+ /* Proceeding with driver mapping. */
+ total_disks_per_row = data_disks_per_row +
+ GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row));
+ map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
+ GET_LE16((uint8_t *)(&raid_map->row_cnt));
+ map_idx = (map_row * total_disks_per_row) + fst_col;
+
+ /* RAID 1 */
+ if (device->raid_level == SA_RAID_1) {
+ if (device->offload_to_mirror)
+ map_idx += data_disks_per_row;
+ device->offload_to_mirror = !device->offload_to_mirror;
+ } else if (device->raid_level == SA_RAID_ADM) {
+ /* RAID ADM */
+ /*
+ * Handles N-way mirrors (R1-ADM) and R10 with # of drives
+ * divisible by 3.
+ */
+ offload_to_mirror = device->offload_to_mirror;
+ if (offload_to_mirror == 0) {
+ /* use physical disk in the first mirrored group. */
+ map_idx %= data_disks_per_row;
+ } else {
+ do {
+ /*
+ * Determine mirror group that map_idx
+ * indicates.
+ */
+ cur_grp = map_idx / data_disks_per_row;
+
+ if (offload_to_mirror != cur_grp) {
+ if (cur_grp <
+ layout_map_count - 1) {
+ /*
+ * Select raid index from
+ * next group.
+ */
+ map_idx += data_disks_per_row;
+ cur_grp++;
+ } else {
+ /*
+ * Select raid index from first
+ * group.
+ */
+ map_idx %= data_disks_per_row;
+ cur_grp = 0;
+ }
+ }
+ } while (offload_to_mirror != cur_grp);
+ }
+
+ /* Set mirror group to use next time. */
+ offload_to_mirror =
+ (offload_to_mirror >= layout_map_count - 1) ?
+ 0 : offload_to_mirror + 1;
+ if(offload_to_mirror >= layout_map_count)
+ return PQI_STATUS_FAILURE;
+
+ device->offload_to_mirror = offload_to_mirror;
+ /*
+ * Avoid direct use of device->offload_to_mirror within this
+ * function since multiple threads might simultaneously
+ * increment it beyond the range of device->layout_map_count -1.
+ */
+ } else if ((device->raid_level == SA_RAID_5 ||
+ device->raid_level == SA_RAID_6) && layout_map_count > 1) {
+ /* RAID 50/60 */
+ /* Verify first and last block are in same RAID group */
+ r5or6_blks_per_row = strip_sz * data_disks_per_row;
+ stripesz = r5or6_blks_per_row * layout_map_count;
+
+ fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row;
+ lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row;
+
+ if (fst_grp != lst_grp)
+ return PQI_STATUS_FAILURE;
+
+ /* Verify request is in a single row of RAID 5/6 */
+ fst_row = r5or6_fst_row =
+ fst_blk / stripesz;
+ r5or6_lst_row = lst_blk / stripesz;
+
+ if (r5or6_fst_row != r5or6_lst_row)
+ return PQI_STATUS_FAILURE;
+
+ /* Verify request is in a single column */
+ fst_row_offset = r5or6_fst_row_offset =
+ (uint32_t)((fst_blk % stripesz) %
+ r5or6_blks_per_row);
+
+ r5or6_lst_row_offset =
+ (uint32_t)((lst_blk % stripesz) %
+ r5or6_blks_per_row);
+
+ fst_col = r5or6_fst_row_offset / strip_sz;
+ r5or6_fst_col = fst_col;
+ r5or6_lst_col = r5or6_lst_row_offset / strip_sz;
+
+ if (r5or6_fst_col != r5or6_lst_col)
+ return PQI_STATUS_FAILURE;
+
+ /* Request is eligible */
+ map_row =
+ ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
+ GET_LE16((uint8_t *)(&raid_map->row_cnt));
+
+ map_idx = (fst_grp *
+ (GET_LE16((uint8_t *)(&raid_map->row_cnt)) *
+ total_disks_per_row)) +
+ (map_row * total_disks_per_row) + fst_col;
+ }
+
+ if (map_idx >= RAID_MAP_MAX_ENTRIES)
+ return PQI_STATUS_FAILURE;
+
+ rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
+ disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
+ fst_row * strip_sz +
+ (fst_row_offset - fst_col * strip_sz);
+ disk_blk_cnt = blk_cnt;
+
+ /* Handle differing logical/physical block sizes. */
+ if (raid_map->phys_blk_shift) {
+ disk_block <<= raid_map->phys_blk_shift;
+ disk_blk_cnt <<= raid_map->phys_blk_shift;
+ }
+
+ if (disk_blk_cnt > 0xffff)
+ return PQI_STATUS_FAILURE;
+
+ /* Build the new CDB for the physical disk I/O. */
+ if (disk_block > 0xffffffff) {
+ cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16;
+ cdb[1] = 0;
+ PUT_BE64(disk_block, &cdb[2]);
+ PUT_BE32(disk_blk_cnt, &cdb[10]);
+ cdb[14] = 0;
+ cdb[15] = 0;
+ cdb_length = 16;
+ } else {
+ cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10;
+ cdb[1] = 0;
+ PUT_BE32(disk_block, &cdb[2]);
+ cdb[6] = 0;
+ PUT_BE16(disk_blk_cnt, &cdb[7]);
+ cdb[9] = 0;
+ cdb_length = 10;
+ }
+
+ if (GET_LE16((uint8_t *)(&raid_map->flags)) &
+ RAID_MAP_ENCRYPTION_ENABLED) {
+ pqisrc_set_enc_info(&rcb->enc_info, raid_map,
+ fst_blk);
+ rcb->encrypt_enable = true;
+ } else {
+ rcb->encrypt_enable = false;
+ }
+
+ rcb->cmdlen = cdb_length;
+
+
+ DBG_FUNC("OUT");
+
+ return PQI_STATUS_SUCCESS;
+}
+
+/* Function used to submit a TMF to the adater */
+int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
+ rcb_t *rcb, int req_id, int tmf_type)
+{
+ int rval = PQI_STATUS_SUCCESS;
+ pqi_tmf_req_t tmf_req;
+
+ memset(&tmf_req, 0, sizeof(pqi_tmf_req_t));
+
+ DBG_FUNC("IN");
+
+ tmf_req.header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
+ tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
+ tmf_req.req_id = rcb->tag;
+
+ memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
+ tmf_req.tmf = tmf_type;
+ tmf_req.req_id_to_manage = req_id;
+ tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
+ tmf_req.obq_id_to_manage = rcb->resp_qid;
+
+ rcb->req_pending = true;
+
+ rval = pqisrc_submit_cmnd(softs,
+ &softs->op_raid_ib_q[OS_GET_TMF_REQ_QINDEX(softs, rcb)], &tmf_req);
+ if (rval != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit command rval=%d\n", rval);
+ return rval;
+ }
+
+ rval = pqisrc_wait_on_condition(softs, rcb);
+ if (rval != PQI_STATUS_SUCCESS){
+ DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
+ rcb->status = REQUEST_FAILED;
+ }
+
+ if (rcb->status != REQUEST_SUCCESS) {
+ DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
+ "stat:0x%x\n", tmf_type, rcb->status);
+ rval = PQI_STATUS_FAILURE;
+ }
+
+ DBG_FUNC("OUT");
+ return rval;
+}
diff --git a/sys/dev/smartpqi/smartpqi_response.c b/sys/dev/smartpqi/smartpqi_response.c
new file mode 100644
index 000000000000..f6ab919ca32b
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_response.c
@@ -0,0 +1,236 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+/*
+ * Process internal RAID response in the case of success.
+ */
+void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,
+ rcb_t *rcb)
+{
+ DBG_FUNC("IN");
+
+ rcb->status = REQUEST_SUCCESS;
+ rcb->req_pending = false;
+
+ DBG_FUNC("OUT");
+}
+
+/*
+ * Process internal RAID response in the case of failure.
+ */
+void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
+ rcb_t *rcb, uint16_t err_idx)
+{
+ raid_path_error_info_elem_t error_info;
+
+ DBG_FUNC("IN");
+
+ rcb->error_info = (char *) (softs->err_buf_dma_mem.virt_addr) +
+ (err_idx * PQI_ERROR_BUFFER_ELEMENT_LENGTH);
+ rcb->status = REQUEST_SUCCESS;
+ memcpy(&error_info, rcb->error_info, sizeof(error_info));
+
+ DBG_INFO("error_status 0x%x data_in_result 0x%x data_out_result 0x%x\n",
+ error_info.status, error_info.data_in_result, error_info.data_out_result);
+
+ if (error_info.status != 0)
+ rcb->status = REQUEST_FAILED;
+ if (error_info.data_in_result != PQI_RAID_DATA_IN_OUT_GOOD)
+ rcb->status = REQUEST_FAILED;
+ if (error_info.data_out_result != PQI_RAID_DATA_IN_OUT_GOOD)
+ rcb->status = REQUEST_FAILED;
+
+ rcb->req_pending = false;
+
+ DBG_FUNC("OUT");
+}
+
+/*
+ * Process the AIO/RAID IO in the case of success.
+ */
+void pqisrc_process_io_response_success(pqisrc_softstate_t *softs,
+ rcb_t *rcb)
+{
+ DBG_FUNC("IN");
+
+ os_io_response_success(rcb);
+
+ DBG_FUNC("OUT");
+}
+
+/*
+ * Process the error info for AIO in the case of failure.
+ */
+void pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
+ rcb_t *rcb, uint16_t err_idx)
+{
+ aio_path_error_info_elem_t *err_info = NULL;
+
+ DBG_FUNC("IN");
+
+ err_info = (aio_path_error_info_elem_t*)
+ softs->err_buf_dma_mem.virt_addr +
+ err_idx;
+
+ if(err_info == NULL) {
+ DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
+ return;
+ }
+
+ os_aio_response_error(rcb, err_info);
+
+ DBG_FUNC("OUT");
+}
+
+/*
+ * Process the error info for RAID IO in the case of failure.
+ */
+void pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
+ rcb_t *rcb, uint16_t err_idx)
+{
+ raid_path_error_info_elem_t *err_info = NULL;
+
+ DBG_FUNC("IN");
+
+ err_info = (raid_path_error_info_elem_t*)
+ softs->err_buf_dma_mem.virt_addr +
+ err_idx;
+
+ if(err_info == NULL) {
+ DBG_ERR("err_info structure is NULL err_idx :%x", err_idx);
+ return;
+ }
+
+ os_raid_response_error(rcb, err_info);
+
+ DBG_FUNC("OUT");
+}
+
+/*
+ * Process the Task Management function response.
+ */
+int pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
+ pqi_tmf_resp_t *tmf_resp)
+{
+ int ret = REQUEST_SUCCESS;
+ uint32_t tag = (uint32_t)tmf_resp->req_id;
+ rcb_t *rcb = &softs->rcb[tag];
+
+ ASSERT(rcb->tag == tag);
+
+ DBG_FUNC("IN\n");
+
+ switch (tmf_resp->resp_code) {
+ case SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE:
+ case SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED:
+ ret = REQUEST_SUCCESS;
+ break;
+ default:
+ DBG_ERR("TMF Failed, Response code : 0x%x\n", tmf_resp->resp_code);
+ ret = REQUEST_FAILED;
+ break;
+ }
+
+ rcb->status = ret;
+ rcb->req_pending = false;
+
+ DBG_FUNC("OUT");
+ return ret;
+}
+
+/*
+ * Function used to process the response from the adapter
+ * which is invoked by IRQ handler.
+ */
+void
+pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
+{
+ ob_queue_t *ob_q;
+ struct pqi_io_response *response;
+ uint32_t oq_pi, oq_ci;
+
+ DBG_FUNC("IN");
+
+ OS_ATOMIC64_INC(softs, num_intrs);
+
+ ob_q = &softs->op_ob_q[oq_id - 1]; /* zero for event Q */
+ oq_ci = ob_q->ci_local;
+ oq_pi = *(ob_q->pi_virt_addr);
+
+ DBG_INFO("ci : %d pi : %d qid : %d\n", oq_ci, oq_pi, ob_q->q_id);
+
+ while (1) {
+ rcb_t *rcb = NULL;
+ uint32_t tag = 0;
+ uint32_t offset;
+
+ if (oq_pi == oq_ci)
+ break;
+ /* Get the response */
+ offset = oq_ci * ob_q->elem_size;
+ response = (struct pqi_io_response *)(ob_q->array_virt_addr +
+ offset);
+ tag = response->request_id;
+ rcb = &softs->rcb[tag];
+ /* Make sure we are processing a valid response. */
+ ASSERT(rcb->tag == tag && rcb->req_pending);
+ rcb->req_pending = false;
+
+ DBG_INFO("response.header.iu_type : %x \n", response->header.iu_type);
+
+ switch (response->header.iu_type) {
+ case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
+ case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
+ rcb->success_cmp_callback(softs, rcb);
+ break;
+ case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
+ case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
+ rcb->error_cmp_callback(softs, rcb, LE_16(response->error_index));
+ break;
+ case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
+ rcb->req_pending = false;
+ break;
+ case PQI_RESPONSE_IU_TASK_MANAGEMENT:
+ rcb->status = pqisrc_process_task_management_response(softs, (void *)response);
+ break;
+
+ default:
+ DBG_ERR("Invalid Response IU 0x%x\n",response->header.iu_type);
+ break;
+ }
+
+ oq_ci = (oq_ci + 1) % ob_q->num_elem;
+ }
+
+ ob_q->ci_local = oq_ci;
+ PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
+ ob_q->ci_register_offset, ob_q->ci_local );
+ DBG_FUNC("OUT");
+}
diff --git a/sys/dev/smartpqi/smartpqi_sis.c b/sys/dev/smartpqi/smartpqi_sis.c
new file mode 100644
index 000000000000..7661d6634244
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_sis.c
@@ -0,0 +1,451 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+/* */
+void sis_disable_msix(pqisrc_softstate_t *softs)
+{
+ uint32_t db_reg;
+
+ DBG_FUNC("IN\n");
+
+ db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
+ LEGACY_SIS_IDBR);
+ db_reg &= ~SIS_ENABLE_MSIX;
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ LEGACY_SIS_IDBR, db_reg);
+
+ DBG_FUNC("OUT\n");
+}
+
+/* Trigger a NMI as part of taking controller offline procedure */
+void pqisrc_trigger_nmi_sis(pqisrc_softstate_t *softs)
+{
+
+ DBG_FUNC("IN\n");
+
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ LEGACY_SIS_IDBR, LE_32(TRIGGER_NMI_SIS));
+ DBG_FUNC("OUT\n");
+}
+
+/* Switch the adapter back to SIS mode during uninitialization */
+int pqisrc_reenable_sis(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t timeout = SIS_ENABLE_TIMEOUT;
+
+ DBG_FUNC("IN\n");
+
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ LEGACY_SIS_IDBR, LE_32(REENABLE_SIS));
+
+ COND_WAIT(((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) &
+ REENABLE_SIS) == 0), timeout)
+ if (!timeout) {
+ DBG_WARN(" [ %s ] failed to re enable sis\n",__func__);
+ ret = PQI_STATUS_TIMEOUT;
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/* Validate the FW status PQI_CTRL_KERNEL_UP_AND_RUNNING */
+int pqisrc_check_fw_status(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t timeout = SIS_STATUS_OK_TIMEOUT;
+
+ DBG_FUNC("IN\n");
+
+ OS_SLEEP(1000000);
+ COND_WAIT((GET_FW_STATUS(softs) &
+ PQI_CTRL_KERNEL_UP_AND_RUNNING), timeout);
+ if (!timeout) {
+ DBG_ERR("FW check status timedout\n");
+ ret = PQI_STATUS_TIMEOUT;
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/* Function used to submit a SIS command to the adapter */
+static int pqisrc_send_sis_cmd(pqisrc_softstate_t *softs,
+ uint32_t *mb)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ int i = 0;
+ uint32_t timeout = SIS_CMD_COMPLETE_TIMEOUT;
+
+ int val;
+
+ DBG_FUNC("IN\n");
+
+
+ /* Copy Command to mailbox */
+ for (i = 0; i < 6; i++)
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->mb[i],
+ LEGACY_SIS_SRCV_MAILBOX+i*4, LE_32(mb[i]));
+
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->ioa_to_host_db_clr,
+ LEGACY_SIS_ODBR_R, LE_32(0x1000));
+
+ /* Submit the command */
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ LEGACY_SIS_IDBR, LE_32(SIS_CMD_SUBMIT));
+
+#ifdef SIS_POLL_WAIT
+ /* Wait for 20 milli sec to poll */
+ OS_BUSYWAIT(SIS_POLL_START_WAIT_TIME);
+#endif
+
+ val = PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R);
+
+ DBG_FUNC("val : %x\n",val);
+ /* Spin waiting for the command to complete */
+ COND_WAIT((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) &
+ SIS_CMD_COMPLETE), timeout);
+ if (!timeout) {
+ DBG_ERR("Sync command %x, timedout\n", mb[0]);
+ ret = PQI_STATUS_TIMEOUT;
+ goto err_out;
+ }
+ /* Check command status */
+ mb[0] = LE_32(PCI_MEM_GET32(softs, &softs->ioa_reg->mb[0], LEGACY_SIS_SRCV_MAILBOX));
+
+ if (mb[0] != SIS_CMD_STATUS_SUCCESS) {
+ DBG_ERR("SIS cmd failed with status = 0x%x\n",
+ mb[0]);
+ ret = PQI_STATUS_FAILURE;
+ goto err_out;
+ }
+
+ /* Copy the mailbox back */
+ for (i = 1; i < 6; i++)
+ mb[i] = LE_32(PCI_MEM_GET32(softs, &softs->ioa_reg->mb[i], LEGACY_SIS_SRCV_MAILBOX+i*4));
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_out:
+ DBG_FUNC("OUT failed\n");
+ return ret;
+}
+
+/* First SIS command for the adapter to check PQI support */
+int pqisrc_get_adapter_properties(pqisrc_softstate_t *softs,
+ uint32_t *prop, uint32_t *ext_prop)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t mb[6] = {0};
+
+ DBG_FUNC("IN\n");
+
+ mb[0] = SIS_CMD_GET_ADAPTER_PROPERTIES;
+ ret = pqisrc_send_sis_cmd(softs, mb);
+ if (!ret) {
+ DBG_INFO("GET_PROPERTIES prop = %x, ext_prop = %x\n",
+ mb[1], mb[4]);
+ *prop = mb[1];
+ *ext_prop = mb[4];
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/* Second SIS command to the adapter GET_COMM_PREFERRED_SETTINGS */
+int pqisrc_get_preferred_settings(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t mb[6] = {0};
+
+ DBG_FUNC("IN\n");
+
+ mb[0] = SIS_CMD_GET_COMM_PREFERRED_SETTINGS;
+ ret = pqisrc_send_sis_cmd(softs, mb);
+ if (!ret) {
+ /* 31:16 maximum command size in KB */
+ softs->pref_settings.max_cmd_size = mb[1] >> 16;
+ /* 15:00: Maximum FIB size in bytes */
+ softs->pref_settings.max_fib_size = mb[1] & 0x0000FFFF;
+ DBG_INFO("cmd size = %x, fib size = %x\n",
+ softs->pref_settings.max_cmd_size,
+ softs->pref_settings.max_fib_size);
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/* Get supported PQI capabilities from the adapter */
+int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t mb[6] = {0};
+
+ DBG_FUNC("IN\n");
+
+ mb[0] = SIS_CMD_GET_PQI_CAPABILITIES;
+ ret = pqisrc_send_sis_cmd(softs, mb);
+ if (!ret) {
+ softs->pqi_cap.max_sg_elem = mb[1];
+ softs->pqi_cap.max_transfer_size = mb[2];
+ softs->pqi_cap.max_outstanding_io = mb[3];
+#ifdef DMA_ATTR
+ softs->os_specific.buf_dma_attr.dma_attr_sgllen =
+ softs->pqi_cap.max_sg_elem;
+ softs->os_specific.buf_dma_attr.dma_attr_maxxfer =
+ softs->pqi_cap.max_transfer_size;
+ softs->os_specific.buf_dma_attr.dma_attr_count_max =
+ softs->pqi_cap.max_transfer_size - 1;
+#endif
+ softs->pqi_cap.conf_tab_off = mb[4];
+
+ softs->pqi_cap.conf_tab_sz = mb[5];
+
+ DBG_INFO("max_sg_elem = %x\n",
+ softs->pqi_cap.max_sg_elem);
+ DBG_INFO("max_transfer_size = %x\n",
+ softs->pqi_cap.max_transfer_size);
+ DBG_INFO("max_outstanding_io = %x\n",
+ softs->pqi_cap.max_outstanding_io);
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+}
+
+/* Send INIT STRUCT BASE ADDR - one of the SIS command */
+int pqisrc_init_struct_base(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t elem_size = 0;
+ uint32_t num_elem = 0;
+ struct dma_mem init_struct_mem = {0};
+ struct init_base_struct *init_struct = NULL;
+ uint32_t mb[6] = {0};
+
+ DBG_FUNC("IN\n");
+
+ /* Allocate init struct */
+ memset(&init_struct_mem, 0, sizeof(struct dma_mem));
+ init_struct_mem.size = sizeof(struct init_base_struct);
+ init_struct_mem.align = PQISRC_INIT_STRUCT_DMA_ALIGN;
+ init_struct_mem.tag = "init_struct";
+ ret = os_dma_mem_alloc(softs, &init_struct_mem);
+ if (ret) {
+ DBG_ERR("Failed to Allocate error buffer ret : %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /* Calculate error buffer size */
+ /* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
+ * The rcb and error buffer will be accessed by using the tag as index
+ * As 0 tag index is not used, we need to allocate one extra.
+ */
+ num_elem = softs->pqi_cap.max_outstanding_io + 1;
+ elem_size = PQISRC_ERR_BUF_ELEM_SIZE;
+ softs->err_buf_dma_mem.size = num_elem * elem_size;
+
+ /* Allocate error buffer */
+ softs->err_buf_dma_mem.align = PQISRC_ERR_BUF_DMA_ALIGN;
+ softs->err_buf_dma_mem.tag = "error_buffer";
+ ret = os_dma_mem_alloc(softs, &softs->err_buf_dma_mem);
+ if (ret) {
+ DBG_ERR("Failed to Allocate error buffer ret : %d\n",
+ ret);
+ goto err_error_buf_alloc;
+ }
+
+ /* Fill init struct */
+ init_struct = (struct init_base_struct *)DMA_TO_VIRT(&init_struct_mem);
+ init_struct->revision = PQISRC_INIT_STRUCT_REVISION;
+ init_struct->flags = 0;
+ init_struct->err_buf_paddr_l = DMA_PHYS_LOW(&softs->err_buf_dma_mem);
+ init_struct->err_buf_paddr_h = DMA_PHYS_HIGH(&softs->err_buf_dma_mem);
+ init_struct->err_buf_elem_len = elem_size;
+ init_struct->err_buf_num_elem = num_elem;
+
+ mb[0] = SIS_CMD_INIT_BASE_STRUCT_ADDRESS;
+ mb[1] = DMA_PHYS_LOW(&init_struct_mem);
+ mb[2] = DMA_PHYS_HIGH(&init_struct_mem);
+ mb[3] = init_struct_mem.size;
+
+ ret = pqisrc_send_sis_cmd(softs, mb);
+ if (ret)
+ goto err_sis_cmd;
+
+ DBG_FUNC("OUT\n");
+ os_dma_mem_free(softs, &init_struct_mem);
+ return ret;
+
+err_sis_cmd:
+ os_dma_mem_free(softs, &softs->err_buf_dma_mem);
+err_error_buf_alloc:
+ os_dma_mem_free(softs, &init_struct_mem);
+err_out:
+ DBG_FUNC("OUT failed %d\n", ret);
+ return PQI_STATUS_FAILURE;
+}
+
+/*
+ * SIS initialization of the adapter in a sequence of
+ * - GET_ADAPTER_PROPERTIES
+ * - GET_COMM_PREFERRED_SETTINGS
+ * - GET_PQI_CAPABILITIES
+ * - INIT_STRUCT_BASE ADDR
+ */
+int pqisrc_sis_init(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t prop = 0;
+ uint32_t ext_prop = 0;
+
+ DBG_FUNC("IN\n");
+
+ ret = pqisrc_force_sis(softs);
+ if (ret) {
+ DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
+ goto err_out;
+ }
+
+ /* Check FW status ready */
+ ret = pqisrc_check_fw_status(softs);
+ if (ret) {
+ DBG_ERR("PQI Controller is not ready !!!\n");
+ goto err_out;
+ }
+
+ /* Check For PQI support(19h) */
+ ret = pqisrc_get_adapter_properties(softs, &prop, &ext_prop);
+ if (ret) {
+ DBG_ERR("Failed to get adapter properties\n");
+ goto err_out;
+ }
+ if (!((prop & SIS_SUPPORT_EXT_OPT) &&
+ (ext_prop & SIS_SUPPORT_PQI))) {
+ DBG_ERR("PQI Mode Not Supported\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_out;
+ }
+
+ softs->pqi_reset_quiesce_allowed = false;
+ if (ext_prop & SIS_SUPPORT_PQI_RESET_QUIESCE)
+ softs->pqi_reset_quiesce_allowed = true;
+
+ /* Send GET_COMM_PREFERRED_SETTINGS (26h) */
+ ret = pqisrc_get_preferred_settings(softs);
+ if (ret) {
+ DBG_ERR("Failed to get adapter pref settings\n");
+ goto err_out;
+ }
+
+ /* Get PQI settings , 3000h*/
+ ret = pqisrc_get_sis_pqi_cap(softs);
+ if (ret) {
+ DBG_ERR("Failed to get PQI Capabilities\n");
+ goto err_out;
+ }
+
+ /* We need to allocate DMA memory here ,
+ * Do any os specific DMA setup.
+ */
+ ret = os_dma_setup(softs);
+ if (ret) {
+ DBG_ERR("Failed to Setup DMA\n");
+ goto err_out;
+ }
+
+ /* Init struct base addr */
+ ret = pqisrc_init_struct_base(softs);
+ if (ret) {
+ DBG_ERR("Failed to set init struct base addr\n");
+ goto err_dma;
+ }
+
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_dma:
+ os_dma_destroy(softs);
+err_out:
+ DBG_FUNC("OUT failed\n");
+ return ret;
+}
+
+/* Deallocate the resources used during SIS initialization */
+void pqisrc_sis_uninit(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+
+ os_dma_mem_free(softs, &softs->err_buf_dma_mem);
+
+ os_dma_destroy(softs);
+ os_resource_free(softs);
+ pqi_reset(softs);
+
+
+ DBG_FUNC("OUT\n");
+}
+
+int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *softs, uint32_t bit)
+{
+ int rcode = PQI_STATUS_SUCCESS;
+ uint32_t db_reg;
+ uint32_t loop_cnt = 0;
+
+ DBG_FUNC("IN\n");
+
+ while (1) {
+ db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
+ LEGACY_SIS_IDBR);
+ if ((db_reg & bit) == 0)
+ break;
+ if (GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_PANIC) {
+ DBG_ERR("controller kernel panic\n");
+ rcode = PQI_STATUS_FAILURE;
+ break;
+ }
+ if (loop_cnt++ == SIS_DB_BIT_CLEAR_TIMEOUT_CNT) {
+ DBG_ERR("door-bell reg bit 0x%x not cleared\n", bit);
+ rcode = PQI_STATUS_TIMEOUT;
+ break;
+ }
+ OS_SLEEP(500);
+ }
+
+ DBG_FUNC("OUT\n");
+
+ return rcode;
+}
diff --git a/sys/dev/smartpqi/smartpqi_structures.h b/sys/dev/smartpqi/smartpqi_structures.h
new file mode 100644
index 000000000000..c0b15ca3e0c2
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_structures.h
@@ -0,0 +1,1010 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _PQI_STRUCTURES_H
+#define _PQI_STRUCTURES_H
+
+
+
+
+struct bmic_host_wellness_driver_version {
+ uint8_t start_tag[4];
+ uint8_t driver_version_tag[2];
+ uint16_t driver_version_length;
+ char driver_version[32];
+ uint8_t end_tag[2];
+
+}OS_ATTRIBUTE_PACKED;
+
+
+struct bmic_host_wellness_time {
+ uint8_t start_tag[4];
+ uint8_t time_tag[2];
+ uint16_t time_length;
+ uint8_t hour;
+ uint8_t min;
+ uint8_t sec;
+ uint8_t reserved;
+ uint8_t month;
+ uint8_t day;
+ uint8_t century;
+ uint8_t year;
+ uint8_t dont_write_tag[2];
+ uint8_t end_tag[2];
+
+}OS_ATTRIBUTE_PACKED;
+
+
+/* As per PQI Spec pqi-2r00a , 6.2.2. */
+
+/* device capability register , for admin q table 24 */
+struct pqi_dev_adminq_cap {
+ uint8_t max_admin_ibq_elem;
+ uint8_t max_admin_obq_elem;
+ uint8_t admin_ibq_elem_len;
+ uint8_t admin_obq_elem_len;
+ uint16_t max_pqi_dev_reset_tmo;
+ uint8_t res[2];
+}OS_ATTRIBUTE_PACKED;
+
+/* admin q parameter reg , table 36 */
+struct admin_q_param {
+ uint8_t num_iq_elements;
+ uint8_t num_oq_elements;
+ uint8_t intr_msg_num;
+ uint8_t msix_disable;
+}OS_ATTRIBUTE_PACKED;
+
+struct pqi_registers {
+ uint64_t signature;
+ uint64_t admin_q_config;
+ uint64_t pqi_dev_adminq_cap;
+ uint32_t legacy_intr_status;
+ uint32_t legacy_intr_mask_set;
+ uint32_t legacy_intr_mask_clr;
+ uint8_t res1[28];
+ uint32_t pqi_dev_status;
+ uint8_t res2[4];
+ uint64_t admin_ibq_pi_offset;
+ uint64_t admin_obq_ci_offset;
+ uint64_t admin_ibq_elem_array_addr;
+ uint64_t admin_obq_elem_array_addr;
+ uint64_t admin_ibq_ci_addr;
+ uint64_t admin_obq_pi_addr;
+ uint32_t admin_q_param;
+ uint8_t res3[4];
+ uint32_t pqi_dev_err;
+ uint8_t res4[4];
+ uint64_t error_details;
+ uint32_t dev_reset;
+ uint32_t power_action;
+ uint8_t res5[104];
+}OS_ATTRIBUTE_PACKED;
+
+/*
+ * IOA controller registers
+ * Mapped in PCIe BAR 0.
+ */
+
+struct ioa_registers {
+ uint8_t res1[0x18];
+ uint32_t host_to_ioa_db_mask_clr; /* 18h */
+ uint8_t res2[4];
+ uint32_t host_to_ioa_db; /* 20h */
+ uint8_t res3[4];
+ uint32_t host_to_ioa_db_clr; /* 28h */
+ uint8_t res4[8];
+ uint32_t ioa_to_host_glob_int_mask; /* 34h */
+ uint8_t res5[0x64];
+ uint32_t ioa_to_host_db; /* 9Ch */
+ uint32_t ioa_to_host_db_clr; /* A0h */
+ uint8_t res6[4];
+ uint32_t ioa_to_host_db_mask; /* A8h */
+ uint32_t ioa_to_host_db_mask_clr; /* ACh */
+ uint32_t scratchpad0; /* B0h */
+ uint32_t scratchpad1; /* B4h */
+ uint32_t scratchpad2; /* B8h */
+ uint32_t scratchpad3_fw_status; /* BCh */
+ uint8_t res7[8];
+ uint32_t scratchpad4; /* C8h */
+ uint8_t res8[0xf34]; /* 0xC8 + 4 + 0xf34 = 1000h */
+ uint32_t mb[8]; /* 1000h */
+}OS_ATTRIBUTE_PACKED;
+
+
+/* PQI Preferred settings */
+struct pqi_pref_settings {
+ uint16_t max_cmd_size;
+ uint16_t max_fib_size;
+}OS_ATTRIBUTE_PACKED;
+
+/* pqi capability by sis interface */
+struct pqi_cap {
+ uint32_t max_sg_elem;
+ uint32_t max_transfer_size;
+ uint32_t max_outstanding_io;
+ uint32_t conf_tab_off;
+ uint32_t conf_tab_sz;
+}OS_ATTRIBUTE_PACKED;
+
+struct pqi_conf_table {
+ uint8_t sign[8]; /* "CFGTABLE" */
+ uint32_t first_section_off;
+};
+
+struct pqi_conf_table_section_header {
+ uint16_t section_id;
+ uint16_t next_section_off;
+};
+
+struct pqi_conf_table_general_info {
+ struct pqi_conf_table_section_header header;
+ uint32_t section_len;
+ uint32_t max_outstanding_req;
+ uint32_t max_sg_size;
+ uint32_t max_sg_per_req;
+};
+
+struct pqi_conf_table_debug {
+ struct pqi_conf_table_section_header header;
+ uint32_t scratchpad;
+};
+
+struct pqi_conf_table_heartbeat {
+ struct pqi_conf_table_section_header header;
+ uint32_t heartbeat_counter;
+};
+
+typedef union pqi_reset_reg {
+ struct {
+ uint32_t reset_type : 3;
+ uint32_t reserved : 2;
+ uint32_t reset_action : 3;
+ uint32_t hold_in_pd1 : 1;
+ uint32_t reserved2 : 23;
+ } bits;
+ uint32_t all_bits;
+}pqi_reset_reg_t;
+
+/* Memory descriptor for DMA memory allocation */
+typedef struct dma_mem {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+ uint32_t size;
+ uint32_t align;
+ char *tag;
+ bus_dma_tag_t dma_tag;
+ bus_dmamap_t dma_map;
+}dma_mem_t;
+
+/* Lock should be 8 byte aligned */
+
+#ifndef LOCKFREE_STACK
+
+typedef struct pqi_taglist {
+ uint32_t max_elem;
+ uint32_t num_elem;
+ uint32_t head;
+ uint32_t tail;
+ uint32_t *elem_array;
+ boolean_t lockcreated;
+ char lockname[LOCKNAME_SIZE];
+ OS_LOCK_T lock OS_ATTRIBUTE_ALIGNED(8);
+}pqi_taglist_t;
+
+#else /* LOCKFREE_STACK */
+
+union head_list {
+ struct {
+ uint32_t seq_no; /* To avoid aba problem */
+ uint32_t index; /* Index at the top of the stack */
+ }top;
+ uint64_t data;
+};
+/* lock-free stack used to push and pop the tag used for IO request */
+typedef struct lockless_stack {
+ uint32_t *next_index_array;
+ uint32_t num_elements;
+ volatile union head_list head OS_ATTRIBUTE_ALIGNED(8);
+}lockless_stack_t;
+
+#endif /* LOCKFREE_STACK */
+
+/*
+ * PQI SGL descriptor layouts.
+ */
+/*
+ * SGL (Scatter Gather List) descriptor Codes
+ */
+
+#define SGL_DESCRIPTOR_CODE_DATA_BLOCK 0x0
+#define SGL_DESCRIPTOR_CODE_BIT_BUCKET 0x1
+#define SGL_DESCRIPTOR_CODE_STANDARD_SEGMENT 0x2
+#define SGL_DESCRIPTOR_CODE_LAST_STANDARD_SEGMENT 0x3
+#define SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT 0x4
+#define SGL_DESCRIPTOR_CODE_VENDOR_SPECIFIC 0xF
+
+typedef struct sgl_descriptor
+{
+ uint64_t addr; /* !< Bytes 0-7. The starting 64-bit memory byte address of the data block. */
+ uint32_t length; /* !< Bytes 8-11. The length in bytes of the data block. Set to 0x00000000 specifies that no data be transferred. */
+ uint8_t res[3]; /* !< Bytes 12-14. */
+ uint8_t zero : 4; /* !< Byte 15, Bits 0-3. */
+ uint8_t type : 4; /* !< Byte 15, Bits 4-7. sgl descriptor type */
+} sg_desc_t;
+
+/* PQI IUs */
+typedef struct iu_header
+{
+ uint8_t iu_type;
+ uint8_t comp_feature;
+ uint16_t iu_length;
+}OS_ATTRIBUTE_PACKED iu_header_t;
+
+
+typedef struct general_admin_request /* REPORT_PQI_DEVICE_CAPABILITY, REPORT_MANUFACTURER_INFO, REPORT_OPERATIONAL_IQ, REPORT_OPERATIONAL_OQ all same layout. */
+{
+ iu_header_t header; /* !< Bytes 0-3. */
+ uint16_t res1;
+ uint16_t work;
+ uint16_t req_id; /* !< Bytes 8-9. request identifier */
+ uint8_t fn_code; /* !< Byte 10. which administrator function */
+ union {
+ struct {
+ uint8_t res2[33]; /* !< Bytes 11-43. function specific */
+ uint32_t buf_size; /* !< Bytes 44-47. size in bytes of the Data-In/Out Buffer */
+ sg_desc_t sg_desc; /* !< Bytes 48-63. SGL */
+ } OS_ATTRIBUTE_PACKED general_func;
+
+ struct {
+ uint8_t res1;
+ uint16_t qid;
+ uint8_t res2[2];
+ uint64_t elem_arr_addr;
+ uint64_t iq_ci_addr;
+ uint16_t num_elem;
+ uint16_t elem_len;
+ uint8_t queue_proto;
+ uint8_t arb_prio;
+ uint8_t res3[22];
+ uint32_t vend_specific;
+ } OS_ATTRIBUTE_PACKED create_op_iq;
+
+ struct {
+ uint8_t res1;
+ uint16_t qid;
+ uint8_t res2[2];
+ uint64_t elem_arr_addr;
+ uint64_t ob_pi_addr;
+ uint16_t num_elem;
+ uint16_t elem_len;
+ uint8_t queue_proto;
+ uint8_t res3[3];
+ uint16_t intr_msg_num;
+ uint16_t coales_count;
+ uint32_t min_coales_time;
+ uint32_t max_coales_time;
+ uint8_t res4[8];
+ uint32_t vend_specific;
+ } OS_ATTRIBUTE_PACKED create_op_oq;
+
+ struct {
+ uint8_t res1;
+ uint16_t qid;
+ uint8_t res2[50];
+ } OS_ATTRIBUTE_PACKED delete_op_queue;
+
+ struct {
+ uint8_t res1;
+ uint16_t qid;
+ uint8_t res2[46];
+ uint32_t vend_specific;
+ } OS_ATTRIBUTE_PACKED change_op_iq_prop;
+
+ } OS_ATTRIBUTE_PACKED req_type;
+
+}OS_ATTRIBUTE_PACKED gen_adm_req_iu_t;
+
+
+typedef struct general_admin_response {
+ iu_header_t header;
+ uint16_t res1;
+ uint16_t work;
+ uint16_t req_id;
+ uint8_t fn_code;
+ uint8_t status;
+ union {
+ struct {
+ uint8_t status_desc[4];
+ uint64_t pi_offset;
+ uint8_t res[40];
+ } OS_ATTRIBUTE_PACKED create_op_iq;
+
+ struct {
+ uint8_t status_desc[4];
+ uint64_t ci_offset;
+ uint8_t res[40];
+ } OS_ATTRIBUTE_PACKED create_op_oq;
+ } OS_ATTRIBUTE_PACKED resp_type;
+} OS_ATTRIBUTE_PACKED gen_adm_resp_iu_t ;
+
+/*report and set Event config IU*/
+
+typedef struct pqi_event_config_request {
+ iu_header_t header;
+ uint16_t response_queue_id; /* specifies the OQ where the response
+ IU is to be delivered */
+ uint8_t work_area[2]; /* reserved for driver use */
+ uint16_t request_id;
+ union {
+ uint16_t reserved; /* Report event config iu */
+ uint16_t global_event_oq_id; /* Set event config iu */
+ }iu_specific;
+ uint32_t buffer_length;
+ sg_desc_t sg_desc;
+}pqi_event_config_request_t;
+#if 0
+typedef struct pqi_set_event_config_request {
+ iu_header_t header;
+ uint16_t response_queue_id; /* specifies the OQ where the response
+ IU is to be delivered */
+ uint8_t work_area[2]; /* reserved for driver use */
+ uint16_t request_id;
+ uint16_t global_event_oq_id;
+ uint32_t buffer_length;
+ sg_desc_t sg_desc;
+}pqi_set_event_config_request_t;
+#endif
+
+ /* Report/Set event config data-in/data-out buffer structure */
+
+#define PQI_MAX_EVENT_DESCRIPTORS 255
+
+struct pqi_event_descriptor {
+ uint8_t event_type;
+ uint8_t reserved;
+ uint16_t oq_id;
+};
+
+typedef struct pqi_event_config {
+ uint8_t reserved[2];
+ uint8_t num_event_descriptors;
+ uint8_t reserved1;
+ struct pqi_event_descriptor descriptors[PQI_MAX_EVENT_DESCRIPTORS];
+}pqi_event_config_t;
+
+/*management response IUs */
+typedef struct pqi_management_response{
+ iu_header_t header;
+ uint16_t reserved1;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint8_t result;
+ uint8_t reserved[5];
+ uint64_t result_data;
+}pqi_management_response_t;
+ /*Event response IU*/
+typedef struct pqi_event_response {
+ iu_header_t header;
+ uint16_t reserved1;
+ uint8_t work_area[2];
+ uint8_t event_type;
+ uint8_t reserved2 : 7;
+ uint8_t request_acknowledge : 1;
+ uint16_t event_id;
+ uint32_t additional_event_id;
+ uint8_t data[16];
+}pqi_event_response_t;
+
+ /*event acknowledge IU*/
+typedef struct pqi_event_acknowledge_request {
+ iu_header_t header;
+ uint16_t reserved1;
+ uint8_t work_area[2];
+ uint8_t event_type;
+ uint8_t reserved2;
+ uint16_t event_id;
+ uint32_t additional_event_id;
+}pqi_event_acknowledge_request_t;
+
+struct pqi_event {
+ boolean_t pending;
+ uint8_t event_type;
+ uint16_t event_id;
+ uint32_t additional_event_id;
+};
+
+
+typedef struct op_q_params
+{
+ uint8_t fn_code;
+ uint16_t qid;
+ uint16_t num_elem;
+ uint16_t elem_len;
+ uint16_t int_msg_num;
+
+} OS_ATTRIBUTE_PACKED op_q_params;
+
+
+/* Driver will use this structure to interpret the error
+ info element returned from a failed requests */
+typedef struct raid_path_error_info_elem {
+ uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */
+ uint8_t data_out_result; /* !< Byte 1. See SOP spec Table 78. */
+ uint8_t reserved[3]; /* !< Bytes 2-4. */
+ uint8_t status; /* !< Byte 5. See SAM-5 specification "Status" codes Table 40. Defined in Storport.h */
+ uint16_t status_qual; /* !< Bytes 6-7. See SAM-5 specification Table 43. */
+ uint16_t sense_data_len; /* !< Bytes 8-9. See SOP specification table 79. */
+ uint16_t resp_data_len; /* !< Bytes 10-11. See SOP specification table 79. */
+ uint32_t data_in_transferred; /* !< Bytes 12-15. If "dada_in_result = 0x01 (DATA_IN BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-In buffer else Ignored. */
+ uint32_t data_out_transferred; /* !< Bytes 16-19. If "data_out_result = 0x01 (DATA_OUT BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-Out buffer else Ignored. */
+ uint8_t data[256]; /* !< Bytes 20-275. Response Data buffer or Sense Data buffer but not both. */
+}OS_ATTRIBUTE_PACKED raid_path_error_info_elem_t;
+
+#define PQI_ERROR_BUFFER_ELEMENT_LENGTH sizeof(raid_path_error_info_elem_t)
+
+typedef enum error_data_present
+{
+ DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */
+ DATA_PRESENT_RESPONSE_DATA = 1, /* !< Response data is present in Data buffer. */
+ DATA_PRESENT_SENSE_DATA = 2 /* !< Sense data is present in Data buffer. */
+} error_data_present_t;
+
+typedef struct aio_path_error_info_elem
+{
+ uint8_t status; /* !< Byte 0. See SAM-5 specification "SCSI Status" codes Table 40. Defined in Storport.h */
+ uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */
+ uint8_t data_pres; /* !< Byte 2. Bits [7:2] reserved. Bits [1:0] - 0=No data, 1=Response data, 2=Sense data. */
+ uint8_t reserved1; /* !< Byte 3. Reserved. */
+ uint32_t resd_count; /* !< Bytes 4-7. The residual data length in bytes. Need the original transfer size and if Status is OverRun or UnderRun. */
+ uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */
+ uint16_t reserved2; /* !< Bytes 10. Reserved. */
+ uint8_t data[256]; /* !< Bytes 11-267. Response data buffer or Sense data buffer but not both. */
+ uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */
+}OS_ATTRIBUTE_PACKED aio_path_error_info_elem_t;
+
+struct init_base_struct {
+ uint32_t revision; /* revision of init structure */
+ uint32_t flags; /* reserved */
+ uint32_t err_buf_paddr_l; /* lower 32 bits of physical address of error buffer */
+ uint32_t err_buf_paddr_h; /* upper 32 bits of physical address of error buffer */
+ uint32_t err_buf_elem_len; /* length of each element in error buffer (in bytes) */
+ uint32_t err_buf_num_elem; /* number of elements in error buffer */
+}OS_ATTRIBUTE_PACKED;
+
+/* Queue details */
+typedef struct ib_queue {
+ uint32_t q_id;
+ uint32_t num_elem;
+ uint32_t elem_size;
+ char *array_virt_addr;
+ dma_addr_t array_dma_addr;
+ uint32_t pi_local;
+ uint32_t pi_register_offset;
+ uint32_t *pi_register_abs;
+ uint32_t *ci_virt_addr;
+ dma_addr_t ci_dma_addr;
+ boolean_t created;
+ boolean_t lockcreated;
+ char lockname[LOCKNAME_SIZE];
+ OS_LOCK_T lock OS_ATTRIBUTE_ALIGNED(8);
+}ib_queue_t;
+
+typedef struct ob_queue {
+ uint32_t q_id;
+ uint32_t num_elem;
+ uint32_t elem_size;
+ uint32_t intr_msg_num;
+ char *array_virt_addr;
+ dma_addr_t array_dma_addr;
+ uint32_t ci_local;
+ uint32_t ci_register_offset;
+ uint32_t *ci_register_abs;
+ uint32_t *pi_virt_addr;
+ dma_addr_t pi_dma_addr;
+ boolean_t created;
+}ob_queue_t;
+
+typedef struct pqisrc_sg_desc{
+ uint64_t addr;
+ uint32_t len;
+ uint32_t flags;
+}sgt_t;
+
+
+typedef struct pqi_iu_layer_desc {
+ uint8_t ib_spanning_supported : 1;
+ uint8_t res1 : 7;
+ uint8_t res2[5];
+ uint16_t max_ib_iu_len;
+ uint8_t ob_spanning_supported : 1;
+ uint8_t res3 : 7;
+ uint8_t res4[5];
+ uint16_t max_ob_iu_len;
+}OS_ATTRIBUTE_PACKED pqi_iu_layer_desc_t;
+
+
+/* Response IU data */
+typedef struct pqi_device_capabilities {
+ uint16_t length;
+ uint8_t res1[6];
+ uint8_t ibq_arb_priority_support_bitmask;
+ uint8_t max_aw_a;
+ uint8_t max_aw_b;
+ uint8_t max_aw_c;
+ uint8_t max_arb_burst : 3;
+ uint8_t res2 : 4;
+ uint8_t iqa : 1;
+ uint8_t res3[2];
+ uint8_t iq_freeze : 1;
+ uint8_t res4 : 7;
+ uint16_t max_iqs;
+ uint16_t max_iq_elements;
+ uint8_t res5[4];
+ uint16_t max_iq_elem_len;
+ uint16_t min_iq_elem_len;
+ uint8_t res6[2];
+ uint16_t max_oqs;
+ uint16_t max_oq_elements;
+ uint16_t intr_coales_time_granularity;
+ uint16_t max_oq_elem_len;
+ uint16_t min_oq_elem_len;
+ uint8_t res7[24];
+ pqi_iu_layer_desc_t iu_layer_desc[32];
+}OS_ATTRIBUTE_PACKED pqi_dev_cap_t;
+
+/* IO path */
+
+typedef struct pqi_aio_req {
+ iu_header_t header;
+ uint16_t response_queue_id;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint8_t res1[2];
+ uint32_t nexus;
+ uint32_t buf_len;
+ uint8_t data_dir : 2;
+ uint8_t partial : 1;
+ uint8_t mem_type : 1;
+ uint8_t fence : 1;
+ uint8_t encrypt_enable : 1;
+ uint8_t res2 : 2;
+ uint8_t task_attr : 3;
+ uint8_t cmd_prio : 4;
+ uint8_t res3 : 1;
+ uint16_t encrypt_key_index;
+ uint32_t encrypt_twk_low;
+ uint32_t encrypt_twk_high;
+ uint8_t cdb[16];
+ uint16_t err_idx;
+ uint8_t num_sg;
+ uint8_t cdb_len;
+ uint8_t lun[8];
+ uint8_t res4[4];
+ sgt_t sg_desc[4];
+}OS_ATTRIBUTE_PACKED pqi_aio_req_t;
+
+
+typedef struct pqisrc_raid_request {
+ iu_header_t header;
+ uint16_t response_queue_id; /* specifies the OQ where the response
+ IU is to be delivered */
+ uint8_t work_area[2]; /* reserved for driver use */
+ uint16_t request_id;
+ uint16_t nexus_id;
+ uint32_t buffer_length;
+ uint8_t lun_number[8];
+ uint16_t protocol_spec;
+ uint8_t data_direction : 2;
+ uint8_t partial : 1;
+ uint8_t reserved1 : 4;
+ uint8_t fence : 1;
+ uint16_t error_index;
+ uint8_t reserved2;
+ uint8_t task_attribute : 3;
+ uint8_t command_priority : 4;
+ uint8_t reserved3 : 1;
+ uint8_t reserved4 : 2;
+ uint8_t additional_cdb_bytes_usage : 3;
+ uint8_t reserved5 : 3;
+ uint8_t cdb[16];
+ uint8_t additional_cdb_bytes[16];
+ sgt_t sg_descriptors[4];
+}OS_ATTRIBUTE_PACKED pqisrc_raid_req_t;
+
+
+typedef struct pqi_tmf_req {
+ iu_header_t header;
+ uint16_t resp_qid;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint16_t nexus;
+ uint8_t res1[4];
+ uint8_t lun[8];
+ uint16_t protocol_spec;
+ uint16_t obq_id_to_manage;
+ uint16_t req_id_to_manage;
+ uint8_t tmf;
+ uint8_t res2 : 7;
+ uint8_t fence : 1;
+}OS_ATTRIBUTE_PACKED pqi_tmf_req_t;
+
+
+typedef struct pqi_tmf_resp {
+ iu_header_t header;
+ uint16_t resp_qid;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint16_t nexus;
+ uint8_t add_resp_info[3];
+ uint8_t resp_code;
+}pqi_tmf_resp_t;
+
+
+struct pqi_io_response {
+ iu_header_t header;
+ uint16_t queue_id;
+ uint8_t work_area[2];
+ uint16_t request_id;
+ uint16_t error_index;
+ uint8_t reserved[4];
+}OS_ATTRIBUTE_PACKED;
+
+
+struct pqi_enc_info {
+ uint16_t data_enc_key_index;
+ uint32_t encrypt_tweak_lower;
+ uint32_t encrypt_tweak_upper;
+};
+
+
+typedef struct pqi_scsi_device {
+ device_type_t devtype; /* as reported by INQUIRY commmand */
+ uint8_t device_type; /* as reported by
+ BMIC_IDENTIFY_PHYSICAL_DEVICE - only
+ valid for devtype = TYPE_DISK */
+ int bus;
+ int target;
+ int lun;
+ uint8_t flags;
+ uint8_t scsi3addr[8];
+ uint64_t wwid;
+ uint8_t is_physical_device : 1;
+ uint8_t is_external_raid_device : 1;
+ uint8_t target_lun_valid : 1;
+ uint8_t expose_device : 1;
+ uint8_t no_uld_attach : 1;
+ uint8_t is_obdr_device : 1;
+ uint8_t aio_enabled : 1;
+ uint8_t device_gone : 1;
+ uint8_t new_device : 1;
+ uint8_t volume_offline : 1;
+ uint8_t vendor[8]; /* bytes 8-15 of inquiry data */
+ uint8_t model[16]; /* bytes 16-31 of inquiry data */
+ uint64_t sas_address;
+ uint8_t raid_level;
+ uint16_t queue_depth; /* max. queue_depth for this device */
+ uint16_t advertised_queue_depth;
+ uint32_t ioaccel_handle;
+ uint8_t volume_status;
+ uint8_t active_path_index;
+ uint8_t path_map;
+ uint8_t bay;
+ uint8_t box[8];
+ uint16_t phys_connector[8];
+ int offload_config; /* I/O accel RAID offload configured */
+ int offload_enabled; /* I/O accel RAID offload enabled */
+ int offload_enabled_pending;
+ int offload_to_mirror; /* Send next I/O accelerator RAID
+ offload request to mirror drive. */
+ struct raid_map *raid_map; /* I/O accelerator RAID map */
+ int reset_in_progress;
+ os_dev_info_t *dip; /*os specific scsi device information*/
+ boolean_t invalid;
+}pqi_scsi_dev_t;
+
+
+struct sense_header_scsi { /* See SPC-3 section 4.5 */
+ uint8_t response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
+ uint8_t sense_key;
+ uint8_t asc;
+ uint8_t ascq;
+ uint8_t byte4;
+ uint8_t byte5;
+ uint8_t byte6;
+ uint8_t additional_length; /* always 0 for fixed sense format */
+}OS_ATTRIBUTE_PACKED;
+
+
+
+typedef struct report_lun_header {
+ uint32_t list_length;
+ uint8_t extended_response;
+ uint8_t reserved[3];
+}OS_ATTRIBUTE_PACKED reportlun_header_t;
+
+
+typedef struct report_lun_ext_entry {
+ uint8_t lunid[8];
+ uint64_t wwid;
+ uint8_t device_type;
+ uint8_t device_flags;
+ uint8_t lun_count; /* number of LUNs in a multi-LUN device */
+ uint8_t redundant_paths;
+ uint32_t ioaccel_handle;
+}OS_ATTRIBUTE_PACKED reportlun_ext_entry_t;
+
+
+typedef struct report_lun_data_ext {
+ reportlun_header_t header;
+ reportlun_ext_entry_t lun_entries[1];
+}OS_ATTRIBUTE_PACKED reportlun_data_ext_t;
+
+typedef struct raidmap_data {
+ uint32_t ioaccel_handle;
+ uint8_t xor_mult[2];
+ uint8_t reserved[2];
+}OS_ATTRIBUTE_PACKED raidmap_data_t;
+
+typedef struct raid_map {
+ uint32_t structure_size; /* size of entire structure in bytes */
+ uint32_t volume_blk_size; /* bytes / block in the volume */
+ uint64_t volume_blk_cnt; /* logical blocks on the volume */
+ uint8_t phys_blk_shift; /* shift factor to convert between
+ units of logical blocks and physical
+ disk blocks */
+ uint8_t parity_rotation_shift; /* shift factor to convert between units
+ of logical stripes and physical
+ stripes */
+ uint16_t strip_size; /* blocks used on each disk / stripe */
+ uint64_t disk_starting_blk; /* first disk block used in volume */
+ uint64_t disk_blk_cnt; /* disk blocks used by volume / disk */
+ uint16_t data_disks_per_row; /* data disk entries / row in the map */
+ uint16_t metadata_disks_per_row; /* mirror/parity disk entries / row
+ in the map */
+ uint16_t row_cnt; /* rows in each layout map */
+ uint16_t layout_map_count; /* layout maps (1 map per mirror/parity
+ group) */
+ uint16_t flags;
+ uint16_t data_encryption_key_index;
+ uint8_t reserved[16];
+ raidmap_data_t dev_data[RAID_MAP_MAX_ENTRIES];
+}OS_ATTRIBUTE_PACKED pqisrc_raid_map_t;
+
+
+typedef struct bmic_ident_ctrl {
+ uint8_t conf_ld_count;
+ uint32_t conf_sign;
+ uint8_t fw_version[4];
+ uint8_t rom_fw_rev[4];
+ uint8_t hw_rev;
+ uint8_t reserved[140];
+ uint16_t extended_lun_count;
+ uint8_t reserved1[34];
+ uint16_t fw_build_number;
+ uint8_t reserved2[100];
+ uint8_t ctrl_mode;
+ uint8_t reserved3[32];
+}OS_ATTRIBUTE_PACKED bmic_ident_ctrl_t;
+
+typedef struct bmic_identify_physical_device {
+ uint8_t scsi_bus; /* SCSI Bus number on controller */
+ uint8_t scsi_id; /* SCSI ID on this bus */
+ uint16_t block_size; /* sector size in bytes */
+ uint32_t total_blocks; /* number for sectors on drive */
+ uint32_t reserved_blocks; /* controller reserved (RIS) */
+ uint8_t model[40]; /* Physical Drive Model */
+ uint8_t serial_number[40]; /* Drive Serial Number */
+ uint8_t firmware_revision[8]; /* drive firmware revision */
+ uint8_t scsi_inquiry_bits; /* inquiry byte 7 bits */
+ uint8_t compaq_drive_stamp; /* 0 means drive not stamped */
+ uint8_t last_failure_reason;
+ uint8_t flags;
+ uint8_t more_flags;
+ uint8_t scsi_lun; /* SCSI LUN for phys drive */
+ uint8_t yet_more_flags;
+ uint8_t even_more_flags;
+ uint32_t spi_speed_rules;
+ uint8_t phys_connector[2]; /* connector number on controller */
+ uint8_t phys_box_on_bus; /* phys enclosure this drive resides */
+ uint8_t phys_bay_in_box; /* phys drv bay this drive resides */
+ uint32_t rpm; /* drive rotational speed in RPM */
+ uint8_t device_type; /* type of drive */
+ uint8_t sata_version; /* only valid when device_type =
+ BMIC_DEVICE_TYPE_SATA */
+ uint64_t big_total_block_count;
+ uint64_t ris_starting_lba;
+ uint32_t ris_size;
+ uint8_t wwid[20];
+ uint8_t controller_phy_map[32];
+ uint16_t phy_count;
+ uint8_t phy_connected_dev_type[256];
+ uint8_t phy_to_drive_bay_num[256];
+ uint16_t phy_to_attached_dev_index[256];
+ uint8_t box_index;
+ uint8_t reserved;
+ uint16_t extra_physical_drive_flags;
+ uint8_t negotiated_link_rate[256];
+ uint8_t phy_to_phy_map[256];
+ uint8_t redundant_path_present_map;
+ uint8_t redundant_path_failure_map;
+ uint8_t active_path_number;
+ uint16_t alternate_paths_phys_connector[8];
+ uint8_t alternate_paths_phys_box_on_port[8];
+ uint8_t multi_lun_device_lun_count;
+ uint8_t minimum_good_fw_revision[8];
+ uint8_t unique_inquiry_bytes[20];
+ uint8_t current_temperature_degreesC;
+ uint8_t temperature_threshold_degreesC;
+ uint8_t max_temperature_degreesC;
+ uint8_t logical_blocks_per_phys_block_exp;
+ uint16_t current_queue_depth_limit;
+ uint8_t switch_name[10];
+ uint16_t switch_port;
+ uint8_t alternate_paths_switch_name[40];
+ uint8_t alternate_paths_switch_port[8];
+ uint16_t power_on_hours;
+ uint16_t percent_endurance_used;
+ uint8_t drive_authentication;
+ uint8_t smart_carrier_authentication;
+ uint8_t smart_carrier_app_fw_version;
+ uint8_t smart_carrier_bootloader_fw_version;
+ uint8_t encryption_key_name[64];
+ uint32_t misc_drive_flags;
+ uint16_t dek_index;
+ uint8_t padding[112];
+}OS_ATTRIBUTE_PACKED bmic_ident_physdev_t;
+
+typedef struct pqisrc_bmic_flush_cache {
+ uint8_t disable_cache;
+ uint8_t power_action;
+ uint8_t ndu_flush_cache;
+ uint8_t halt_event;
+ uint8_t reserved[28];
+} OS_ATTRIBUTE_PACKED pqisrc_bmic_flush_cache_t;
+
+/* for halt_event member of pqisrc_bmic_flush_cache_t */
+enum pqisrc_flush_cache_event_type {
+ PQISRC_NONE_CACHE_FLUSH_ONLY = 0,
+ PQISRC_SHUTDOWN = 1,
+ PQISRC_HIBERNATE = 2,
+ PQISRC_SUSPEND = 3,
+ PQISRC_RESTART = 4
+};
+
+struct pqisrc_softstate;
+struct request_container_block;
+typedef void (*success_callback)(struct pqisrc_softstate *, struct request_container_block *);
+typedef void (*error_callback)(struct pqisrc_softstate *, struct request_container_block *, uint16_t);
+
+/* Request container block */
+typedef struct request_container_block {
+ void *req;
+ void *error_info;
+ REQUEST_STATUS_T status;
+ uint32_t tag;
+ sgt_t *sg_chain_virt;
+ dma_addr_t sg_chain_dma;
+ uint32_t data_dir;
+ pqi_scsi_dev_t *dvp;
+ struct pqisrc_softstate *softs;
+ success_callback success_cmp_callback;
+ error_callback error_cmp_callback;
+ uint8_t *cdbp;
+ int cmdlen;
+ uint32_t bcount; /* buffer size in byte */
+ uint32_t ioaccel_handle;
+ boolean_t encrypt_enable;
+ struct pqi_enc_info enc_info;
+ int cm_flags;
+ void *cm_data; /* pointer to data in kernel space */
+ bus_dmamap_t cm_datamap;
+ uint32_t nseg;
+ union ccb *cm_ccb;
+ sgt_t *sgt; /* sg table */
+ int resp_qid;
+ boolean_t req_pending;
+}rcb_t;
+
+typedef struct pqisrc_softstate {
+ OS_SPECIFIC_T os_specific;
+ struct ioa_registers *ioa_reg;
+ struct pqi_registers *pqi_reg;
+ char *pci_mem_base_vaddr;
+ PCI_ACC_HANDLE_T pci_mem_handle;
+ struct pqi_cap pqi_cap;
+ struct pqi_pref_settings pref_settings;
+ char fw_version[11];
+ uint16_t fw_build_number;
+ uint32_t card; /* index to aac_cards */
+ uint16_t vendid; /* vendor id */
+ uint16_t subvendid; /* sub vendor id */
+ uint16_t devid; /* device id */
+ uint16_t subsysid; /* sub system id */
+ controller_state_t ctlr_state;
+ struct dma_mem err_buf_dma_mem;
+ struct dma_mem admin_queue_dma_mem;
+ struct dma_mem op_ibq_dma_mem;
+ struct dma_mem op_obq_dma_mem;
+ struct dma_mem event_q_dma_mem;
+ struct dma_mem sg_dma_desc[PQISRC_MAX_OUTSTANDING_REQ];
+ ib_queue_t admin_ib_queue;
+ ob_queue_t admin_ob_queue;
+ ob_queue_t event_q;
+ ob_queue_t op_ob_q[PQISRC_MAX_SUPPORTED_OP_OB_Q - 1];/* 1 event queue */
+ ib_queue_t op_raid_ib_q[PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q];
+ ib_queue_t op_aio_ib_q[PQISRC_MAX_SUPPORTED_OP_AIO_IB_Q];
+ uint32_t max_outstanding_io;
+ uint32_t max_io_for_scsi_ml;
+ uint32_t num_op_raid_ibq;
+ uint32_t num_op_aio_ibq;
+ uint32_t num_op_obq;
+ uint32_t num_elem_per_op_ibq;
+ uint32_t num_elem_per_op_obq;
+ uint32_t ibq_elem_size;
+ uint32_t obq_elem_size;
+ pqi_dev_cap_t pqi_dev_cap;
+ uint16_t max_ib_iu_length_per_fw;
+ uint16_t max_ib_iu_length;
+ unsigned max_sg_per_iu;
+ uint8_t ib_spanning_supported : 1;
+ uint8_t ob_spanning_supported : 1;
+ pqi_event_config_t event_config;
+ struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
+ int intr_type;
+ int intr_count;
+ int num_cpus_online;
+ boolean_t share_opq_and_eventq;
+ rcb_t *rcb;
+#ifndef LOCKFREE_STACK
+ pqi_taglist_t taglist;
+#else
+ lockless_stack_t taglist;
+#endif /* LOCKFREE_STACK */
+ boolean_t devlist_lockcreated;
+ OS_LOCK_T devlist_lock OS_ATTRIBUTE_ALIGNED(8);
+ char devlist_lock_name[LOCKNAME_SIZE];
+ pqi_scsi_dev_t *device_list[PQI_MAX_DEVICES][PQI_MAX_MULTILUN];
+ OS_SEMA_LOCK_T scan_lock;
+ uint8_t lun_count[PQI_MAX_DEVICES];
+ OS_ATOMIC64_T num_intrs;
+ uint64_t prev_num_intrs;
+ uint64_t prev_heartbeat_count;
+ uint64_t *heartbeat_counter_abs_addr;
+ uint64_t heartbeat_counter_off;
+ uint64_t num_heartbeats_requested;
+ uint32_t bus_id;
+ uint32_t device_id;
+ uint32_t func_id;
+ char *os_name;
+ boolean_t ctrl_online;
+ uint8_t pqi_reset_quiesce_allowed : 1;
+ boolean_t ctrl_in_pqi_mode;
+}pqisrc_softstate_t;
+
+#endif
diff --git a/sys/dev/smartpqi/smartpqi_tag.c b/sys/dev/smartpqi/smartpqi_tag.c
new file mode 100644
index 000000000000..9f1abe632c16
--- /dev/null
+++ b/sys/dev/smartpqi/smartpqi_tag.c
@@ -0,0 +1,265 @@
+/*-
+ * Copyright (c) 2018 Microsemi Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#include "smartpqi_includes.h"
+
+#ifndef LOCKFREE_STACK
+
+/*
+ * Function used to release the tag from taglist.
+ */
+void pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem)
+{
+
+ OS_ACQUIRE_SPINLOCK(&(taglist->lock));
+ /*DBG_FUNC("IN\n");*/
+
+ ASSERT(taglist->num_elem < taglist->max_elem);
+
+ if (taglist->num_elem < taglist->max_elem) {
+ taglist->elem_array[taglist->tail] = elem;
+ taglist->num_elem++;
+ taglist->tail = (taglist->tail + 1) % taglist->max_elem;
+ }
+
+ OS_RELEASE_SPINLOCK(&taglist->lock);
+
+ /*DBG_FUNC("OUT\n");*/
+}
+
+/*
+ * Function used to get an unoccupied tag from the tag list.
+ */
+uint32_t pqisrc_get_tag(pqi_taglist_t *taglist)
+{
+ uint32_t elem = INVALID_ELEM;
+
+ /*DBG_FUNC("IN\n");*/
+
+ OS_ACQUIRE_SPINLOCK(&taglist->lock);
+
+ ASSERT(taglist->num_elem > 0);
+
+ if (taglist->num_elem > 0) {
+ elem = taglist->elem_array[taglist->head];
+ taglist->num_elem--;
+ taglist->head = (taglist->head + 1) % taglist->max_elem;
+ }
+
+ OS_RELEASE_SPINLOCK(&taglist->lock);
+
+ /*DBG_FUNC("OUT got %d\n", elem);*/
+ return elem;
+}
+
+/*
+ * Initialize circular queue implementation of tag list.
+ */
+int pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
+ uint32_t max_elem)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ int i = 0;
+
+ DBG_FUNC("IN\n");
+
+ taglist->max_elem = max_elem;
+ taglist->num_elem = 0;
+ taglist->head = 0;
+ taglist->tail = 0;
+ taglist->elem_array = os_mem_alloc(softs,
+ (max_elem * sizeof(uint32_t)));
+ if (!(taglist->elem_array)) {
+ DBG_FUNC("Unable to allocate memory for taglist\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_out;
+ }
+
+ os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
+ ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
+ if(ret){
+ DBG_ERR("tag lock initialization failed\n");
+ taglist->lockcreated=false;
+ goto err_lock;
+ }
+ taglist->lockcreated = true;
+
+ /* indices 1 to max_elem are considered as valid tags */
+ for (i=1; i <= max_elem; i++) {
+ softs->rcb[i].tag = INVALID_ELEM;
+ pqisrc_put_tag(taglist, i);
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_lock:
+ os_mem_free(softs, (char *)taglist->elem_array,
+ (taglist->max_elem * sizeof(uint32_t)));
+ taglist->elem_array = NULL;
+err_out:
+ DBG_FUNC("OUT failed\n");
+ return ret;
+}
+
+/*
+ * Destroy circular queue implementation of tag list.
+ */
+void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist)
+{
+ DBG_FUNC("IN\n");
+ os_mem_free(softs, (char *)taglist->elem_array,
+ (taglist->max_elem * sizeof(uint32_t)));
+ taglist->elem_array = NULL;
+
+ if(taglist->lockcreated==true){
+ os_uninit_spinlock(&taglist->lock);
+ taglist->lockcreated = false;
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+#else /* LOCKFREE_STACK */
+
+/*
+ * Initialize circular queue implementation of tag list.
+ */
+int pqisrc_init_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack,
+ uint32_t max_elem)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ int index = 0;
+
+ DBG_FUNC("IN\n");
+
+ /* indices 1 to max_elem are considered as valid tags */
+ stack->num_elements = max_elem + 1;
+ stack->head.data = 0;
+ DBG_INFO("Stack head address :%p\n",&stack->head);
+
+ /*Allocate memory for stack*/
+ stack->next_index_array = (uint32_t*)os_mem_alloc(softs,
+ (stack->num_elements * sizeof(uint32_t)));
+ if (!(stack->next_index_array)) {
+ DBG_ERR("Unable to allocate memory for stack\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_out;
+ }
+
+ /* push all the entries to the stack */
+ for (index = 1; index < stack->num_elements ; index++) {
+ softs->rcb[index].tag = INVALID_ELEM;
+ pqisrc_put_tag(stack, index);
+ }
+
+ DBG_FUNC("OUT\n");
+ return ret;
+err_out:
+ DBG_FUNC("Failed OUT\n");
+ return ret;
+}
+
+/*
+ * Destroy circular queue implementation of tag list.
+ */
+void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack)
+{
+ DBG_FUNC("IN\n");
+
+ /* de-allocate stack memory */
+ if (stack->next_index_array) {
+ os_mem_free(softs,(char*)stack->next_index_array,
+ (stack->num_elements * sizeof(uint32_t)));
+ stack->next_index_array = NULL;
+ }
+
+ DBG_FUNC("OUT\n");
+}
+
+/*
+ * Function used to release the tag from taglist.
+ */
+void pqisrc_put_tag(lockless_stack_t *stack, uint32_t index)
+{
+ union head_list cur_head, new_head;
+
+ DBG_FUNC("IN\n");
+ DBG_INFO("push tag :%d\n",index);
+
+ if ( index >= stack->num_elements ) {
+ ASSERT(false);
+ DBG_ERR("Pushed Invalid index\n"); /* stack full */
+ return;
+ }
+
+ if ( stack->next_index_array[index] != 0) {
+ ASSERT(false);
+ DBG_ERR("Index already present as tag in the stack\n");
+ return;
+ }
+
+ do {
+ cur_head = stack->head;
+ /* increment seq_no */
+ new_head.top.seq_no = cur_head.top.seq_no + 1;
+ /* update the index at the top of the stack with the new index */
+ new_head.top.index = index;
+ /* Create a link to the previous index */
+ stack->next_index_array[index] = cur_head.top.index;
+ }while(OS_ATOMIC64_CAS(&stack->head.data,cur_head.data,new_head.data)
+ != cur_head.data);
+ DBG_FUNC("OUT\n");
+ return;
+}
+
+/*
+ * Function used to get an unoccupied tag from the tag list.
+ */
+uint32_t pqisrc_get_tag(lockless_stack_t *stack)
+{
+ union head_list cur_head, new_head;
+
+ DBG_FUNC("IN\n");
+ do {
+ cur_head = stack->head;
+ if (cur_head.top.index == 0) /* stack empty */
+ return INVALID_ELEM;
+ /* increment seq_no field */
+ new_head.top.seq_no = cur_head.top.seq_no + 1;
+ /* update the index at the top of the stack with the next index */
+ new_head.top.index = stack->next_index_array[cur_head.top.index];
+ }while(OS_ATOMIC64_CAS(&stack->head.data,cur_head.data,new_head.data)
+ != cur_head.data);
+ stack->next_index_array[cur_head.top.index] = 0;
+
+ DBG_INFO("pop tag: %d\n",cur_head.top.index);
+ DBG_FUNC("OUT\n");
+ return cur_head.top.index; /*tag*/
+}
+#endif /* LOCKFREE_STACK */
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index cd2ac3fae744..ec0292ba0dad 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -355,6 +355,7 @@ SUBDIR= \
siis \
sis \
sk \
+ ${_smartpqi} \
smbfs \
sn \
snp \
@@ -729,6 +730,7 @@ _qlnx= qlnx
_sfxge= sfxge
_sgx= sgx
_sgx_linux= sgx_linux
+_smartpqi= smartpqi
.if ${MK_BHYVE} != "no" || defined(ALL_MODULES)
_vmm= vmm
diff --git a/sys/modules/smartpqi/Makefile b/sys/modules/smartpqi/Makefile
new file mode 100644
index 000000000000..52c5ac2e0094
--- /dev/null
+++ b/sys/modules/smartpqi/Makefile
@@ -0,0 +1,12 @@
+# 5/10/2017
+# $FreeBSD$
+
+KMOD = smartpqi
+
+.PATH: ${.CURDIR}/../../dev/${KMOD}
+
+SRCS=smartpqi_mem.c smartpqi_intr.c smartpqi_main.c smartpqi_cam.c smartpqi_ioctl.c smartpqi_misc.c smartpqi_sis.c smartpqi_init.c smartpqi_queue.c smartpqi_tag.c smartpqi_cmd.c smartpqi_request.c smartpqi_response.c smartpqi_event.c smartpqi_helper.c smartpqi_discovery.c
+
+SRCS+= device_if.h bus_if.h pci_if.h opt_scsi.h opt_cam.h
+
+.include <bsd.kmod.mk>