aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/cxgbe/cudbg
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/cxgbe/cudbg')
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg.h474
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_common.c96
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_entity.h909
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_flash_utils.c492
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib.c4433
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib.h255
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib_common.h174
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_wtp.c1310
-rw-r--r--sys/dev/cxgbe/cudbg/fastlz.c555
-rw-r--r--sys/dev/cxgbe/cudbg/fastlz.h62
-rw-r--r--sys/dev/cxgbe/cudbg/fastlz_api.c531
11 files changed, 9291 insertions, 0 deletions
diff --git a/sys/dev/cxgbe/cudbg/cudbg.h b/sys/dev/cxgbe/cudbg/cudbg.h
new file mode 100644
index 000000000000..5d64b6f4b8a7
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/cudbg.h
@@ -0,0 +1,474 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * Chelsio Unified Debug Interface header file.
+ * Version 1.1
+ */
+#ifndef _CUDBG_IF_H_
+#define _CUDBG_IF_H_
+
+#ifdef __GNUC__
+#define ATTRIBUTE_UNUSED __attribute__ ((unused))
+#else
+#define ATTRIBUTE_UNUSED
+#endif
+
+#if defined(CONFIG_CUDBG_DEBUG)
+#define cudbg_debug(pdbg_init, format, ...) do {\
+ pdbg_init->print(format, ##__VA_ARGS__); \
+} while (0)
+#else
+#define cudbg_debug(pdbg_init, format, ...) do { } while (0)
+#endif
+
+#define OUT
+#define IN
+#define INOUT
+
+/* Error codes */
+
+#define CUDBG_STATUS_SUCCESS 0
+#define CUDBG_STATUS_NOSPACE -2
+#define CUDBG_STATUS_FLASH_WRITE_FAIL -3
+#define CUDBG_STATUS_FLASH_READ_FAIL -4
+#define CUDBG_STATUS_UNDEFINED_OUT_BUF -5
+#define CUDBG_STATUS_UNDEFINED_CBFN -6
+#define CUDBG_STATUS_UNDEFINED_PRINTF_CBFN -7
+#define CUDBG_STATUS_ADAP_INVALID -8
+#define CUDBG_STATUS_FLASH_EMPTY -9
+#define CUDBG_STATUS_NO_ADAPTER -10
+#define CUDBG_STATUS_NO_SIGNATURE -11
+#define CUDBG_STATUS_MULTIPLE_REG -12
+#define CUDBG_STATUS_UNREGISTERED -13
+#define CUDBG_STATUS_UNDEFINED_ENTITY -14
+#define CUDBG_STATUS_REG_FAIlED -15
+#define CUDBG_STATUS_DEVLOG_FAILED -16
+#define CUDBG_STATUS_SMALL_BUFF -17
+#define CUDBG_STATUS_CHKSUM_MISSMATCH -18
+#define CUDBG_STATUS_NO_SCRATCH_MEM -19
+#define CUDBG_STATUS_OUTBUFF_OVERFLOW -20
+#define CUDBG_STATUS_INVALID_BUFF -21 /* Invalid magic */
+#define CUDBG_STATUS_FILE_OPEN_FAIL -22
+#define CUDBG_STATUS_DEVLOG_INT_FAIL -23
+#define CUDBG_STATUS_ENTITY_NOT_FOUND -24
+#define CUDBG_STATUS_DECOMPRESS_FAIL -25
+#define CUDBG_STATUS_BUFFER_SHORT -26
+#define CUDBG_METADATA_VERSION_MISMATCH -27
+#define CUDBG_STATUS_NOT_IMPLEMENTED -28
+#define CUDBG_SYSTEM_ERROR -29
+#define CUDBG_STATUS_MMAP_FAILED -30
+#define CUDBG_STATUS_FILE_WRITE_FAILED -31
+#define CUDBG_STATUS_CCLK_NOT_DEFINED -32
+#define CUDBG_STATUS_FLASH_FULL -33
+#define CUDBG_STATUS_SECTOR_EMPTY -34
+#define CUDBG_STATUS_ENTITY_NOT_REQUESTED -35
+#define CUDBG_STATUS_NOT_SUPPORTED -36
+#define CUDBG_STATUS_FILE_READ_FAILED -37
+#define CUDBG_STATUS_CORRUPTED -38
+#define CUDBG_STATUS_INVALID_INDEX -39
+
+#define CUDBG_MAJOR_VERSION 1
+#define CUDBG_MINOR_VERSION 14
+#define CUDBG_BUILD_VERSION 0
+
+#define CUDBG_FILE_NAME_LEN 256
+#define CUDBG_DIR_NAME_LEN 256
+#define CUDBG_MAX_BITMAP_LEN 16
+
+static char ATTRIBUTE_UNUSED * err_msg[] = {
+ "Success",
+ "Unknown",
+ "No space",
+ "Flash write fail",
+ "Flash read fail",
+ "Undefined out buf",
+ "Callback function undefined",
+ "Print callback function undefined",
+ "ADAP invalid",
+ "Flash empty",
+ "No adapter",
+ "No signature",
+ "Multiple registration",
+ "Unregistered",
+ "Undefined entity",
+ "Reg failed",
+ "Devlog failed",
+ "Small buff",
+ "Checksum mismatch",
+ "No scratch memory",
+ "Outbuff overflow",
+ "Invalid buffer",
+ "File open fail",
+ "Devlog int fail",
+ "Entity not found",
+ "Decompress fail",
+ "Buffer short",
+ "Version mismatch",
+ "Not implemented",
+ "System error",
+ "Mmap failed",
+ "File write failed",
+ "cclk not defined",
+ "Flash full",
+ "Sector empty",
+ "Entity not requested",
+ "Not supported",
+ "File read fail",
+ "Corrupted",
+ "Invalid Index"
+};
+
+enum CUDBG_DBG_ENTITY_TYPE {
+ CUDBG_ALL = 0,
+ CUDBG_REG_DUMP = 1,
+ CUDBG_DEV_LOG = 2,
+ CUDBG_CIM_LA = 3,
+ CUDBG_CIM_MA_LA = 4,
+ CUDBG_CIM_QCFG = 5,
+ CUDBG_CIM_IBQ_TP0 = 6,
+ CUDBG_CIM_IBQ_TP1 = 7,
+ CUDBG_CIM_IBQ_ULP = 8,
+ CUDBG_CIM_IBQ_SGE0 = 9,
+ CUDBG_CIM_IBQ_SGE1 = 10,
+ CUDBG_CIM_IBQ_NCSI = 11,
+ CUDBG_CIM_OBQ_ULP0 = 12,
+ CUDBG_CIM_OBQ_ULP1 = 13,
+ CUDBG_CIM_OBQ_ULP2 = 14,
+ CUDBG_CIM_OBQ_ULP3 = 15,
+ CUDBG_CIM_OBQ_SGE = 16,
+ CUDBG_CIM_OBQ_NCSI = 17,
+ CUDBG_EDC0 = 18,
+ CUDBG_EDC1 = 19,
+ CUDBG_MC0 = 20,
+ CUDBG_MC1 = 21,
+ CUDBG_RSS = 22,
+ CUDBG_RSS_PF_CONF = 23,
+ CUDBG_RSS_KEY = 24,
+ CUDBG_RSS_VF_CONF = 25,
+ CUDBG_RSS_CONF = 26,
+ CUDBG_PATH_MTU = 27,
+ CUDBG_SW_STATE = 28,
+ CUDBG_WTP = 29,
+ CUDBG_PM_STATS = 30,
+ CUDBG_HW_SCHED = 31,
+ CUDBG_TCP_STATS = 32,
+ CUDBG_TP_ERR_STATS = 33,
+ CUDBG_FCOE_STATS = 34,
+ CUDBG_RDMA_STATS = 35,
+ CUDBG_TP_INDIRECT = 36,
+ CUDBG_SGE_INDIRECT = 37,
+ CUDBG_CPL_STATS = 38,
+ CUDBG_DDP_STATS = 39,
+ CUDBG_WC_STATS = 40,
+ CUDBG_ULPRX_LA = 41,
+ CUDBG_LB_STATS = 42,
+ CUDBG_TP_LA = 43,
+ CUDBG_MEMINFO = 44,
+ CUDBG_CIM_PIF_LA = 45,
+ CUDBG_CLK = 46,
+ CUDBG_CIM_OBQ_RXQ0 = 47,
+ CUDBG_CIM_OBQ_RXQ1 = 48,
+ CUDBG_MAC_STATS = 49,
+ CUDBG_PCIE_INDIRECT = 50,
+ CUDBG_PM_INDIRECT = 51,
+ CUDBG_FULL = 52,
+ CUDBG_TX_RATE = 53,
+ CUDBG_TID_INFO = 54,
+ CUDBG_PCIE_CONFIG = 55,
+ CUDBG_DUMP_CONTEXT = 56,
+ CUDBG_MPS_TCAM = 57,
+ CUDBG_VPD_DATA = 58,
+ CUDBG_LE_TCAM = 59,
+ CUDBG_CCTRL = 60,
+ CUDBG_MA_INDIRECT = 61,
+ CUDBG_ULPTX_LA = 62,
+ CUDBG_EXT_ENTITY = 63,
+ CUDBG_UP_CIM_INDIRECT = 64,
+ CUDBG_PBT_TABLE = 65,
+ CUDBG_MBOX_LOG = 66,
+ CUDBG_HMA_INDIRECT = 67,
+ CUDBG_MAX_ENTITY = 68,
+};
+
+#define ENTITY_FLAG_NULL 0
+#define ENTITY_FLAG_REGISTER 1
+#define ENTITY_FLAG_BINARY 2
+#define ENTITY_FLAG_FW_NO_ATTACH 3
+
+/* file_name matches Linux cxgb4 debugfs entry names. */
+struct el {char *name; char *file_name; int bit; u32 flag; };
+static struct el ATTRIBUTE_UNUSED entity_list[] = {
+ {"all", "all", CUDBG_ALL, ENTITY_FLAG_NULL},
+ {"regdump", "regdump", CUDBG_REG_DUMP, 1 << ENTITY_FLAG_REGISTER},
+ /* {"reg", CUDBG_REG_DUMP},*/
+ {"devlog", "devlog", CUDBG_DEV_LOG, ENTITY_FLAG_NULL},
+ {"cimla", "cim_la", CUDBG_CIM_LA, ENTITY_FLAG_NULL},
+ {"cimmala", "cim_ma_la", CUDBG_CIM_MA_LA, ENTITY_FLAG_NULL},
+ {"cimqcfg", "cim_qcfg", CUDBG_CIM_QCFG, ENTITY_FLAG_NULL},
+ {"ibqtp0", "ibq_tp0", CUDBG_CIM_IBQ_TP0, ENTITY_FLAG_NULL},
+ {"ibqtp1", "ibq_tp1", CUDBG_CIM_IBQ_TP1, ENTITY_FLAG_NULL},
+ {"ibqulp", "ibq_ulp", CUDBG_CIM_IBQ_ULP, ENTITY_FLAG_NULL},
+ {"ibqsge0", "ibq_sge0", CUDBG_CIM_IBQ_SGE0, ENTITY_FLAG_NULL},
+ {"ibqsge1", "ibq_sge1", CUDBG_CIM_IBQ_SGE1, ENTITY_FLAG_NULL},
+ {"ibqncsi", "ibq_ncsi", CUDBG_CIM_IBQ_NCSI, ENTITY_FLAG_NULL},
+ {"obqulp0", "obq_ulp0", CUDBG_CIM_OBQ_ULP0, ENTITY_FLAG_NULL},
+ /* {"cimobqulp1", CUDBG_CIM_OBQ_ULP1},*/
+ {"obqulp1", "obq_ulp1", CUDBG_CIM_OBQ_ULP1, ENTITY_FLAG_NULL},
+ {"obqulp2", "obq_ulp2", CUDBG_CIM_OBQ_ULP2, ENTITY_FLAG_NULL},
+ {"obqulp3", "obq_ulp3", CUDBG_CIM_OBQ_ULP3, ENTITY_FLAG_NULL},
+ {"obqsge", "obq_sge", CUDBG_CIM_OBQ_SGE, ENTITY_FLAG_NULL},
+ {"obqncsi", "obq_ncsi", CUDBG_CIM_OBQ_NCSI, ENTITY_FLAG_NULL},
+ {"edc0", "edc0", CUDBG_EDC0, (1 << ENTITY_FLAG_BINARY)},
+ {"edc1", "edc1", CUDBG_EDC1, (1 << ENTITY_FLAG_BINARY)},
+ {"mc0", "mc0", CUDBG_MC0, (1 << ENTITY_FLAG_BINARY)},
+ {"mc1", "mc1", CUDBG_MC1, (1 << ENTITY_FLAG_BINARY)},
+ {"rss", "rss", CUDBG_RSS, ENTITY_FLAG_NULL},
+ {"rss_pf_config", "rss_pf_config", CUDBG_RSS_PF_CONF, ENTITY_FLAG_NULL},
+ {"rss_key", "rss_key", CUDBG_RSS_KEY, ENTITY_FLAG_NULL},
+ {"rss_vf_config", "rss_vf_config", CUDBG_RSS_VF_CONF, ENTITY_FLAG_NULL},
+ {"rss_config", "rss_config", CUDBG_RSS_CONF, ENTITY_FLAG_NULL},
+ {"pathmtu", "path_mtus", CUDBG_PATH_MTU, ENTITY_FLAG_NULL},
+ {"swstate", "sw_state", CUDBG_SW_STATE, ENTITY_FLAG_NULL},
+ {"wtp", "wtp", CUDBG_WTP, ENTITY_FLAG_NULL},
+ {"pmstats", "pm_stats", CUDBG_PM_STATS, ENTITY_FLAG_NULL},
+ {"hwsched", "hw_sched", CUDBG_HW_SCHED, ENTITY_FLAG_NULL},
+ {"tcpstats", "tcp_stats", CUDBG_TCP_STATS, ENTITY_FLAG_NULL},
+ {"tperrstats", "tp_err_stats", CUDBG_TP_ERR_STATS, ENTITY_FLAG_NULL},
+ {"fcoestats", "fcoe_stats", CUDBG_FCOE_STATS, ENTITY_FLAG_NULL},
+ {"rdmastats", "rdma_stats", CUDBG_RDMA_STATS, ENTITY_FLAG_NULL},
+ {"tpindirect", "tp_indirect", CUDBG_TP_INDIRECT,
+ 1 << ENTITY_FLAG_REGISTER},
+ {"sgeindirect", "sge_indirect", CUDBG_SGE_INDIRECT,
+ 1 << ENTITY_FLAG_REGISTER},
+ {"cplstats", "cpl_stats", CUDBG_CPL_STATS, ENTITY_FLAG_NULL},
+ {"ddpstats", "ddp_stats", CUDBG_DDP_STATS, ENTITY_FLAG_NULL},
+ {"wcstats", "wc_stats", CUDBG_WC_STATS, ENTITY_FLAG_NULL},
+ {"ulprxla", "ulprx_la", CUDBG_ULPRX_LA, ENTITY_FLAG_NULL},
+ {"lbstats", "lb_stats", CUDBG_LB_STATS, ENTITY_FLAG_NULL},
+ {"tpla", "tp_la", CUDBG_TP_LA, ENTITY_FLAG_NULL},
+ {"meminfo", "meminfo", CUDBG_MEMINFO, ENTITY_FLAG_NULL},
+ {"cimpifla", "cim_pif_la", CUDBG_CIM_PIF_LA, ENTITY_FLAG_NULL},
+ {"clk", "clk", CUDBG_CLK, ENTITY_FLAG_NULL},
+ {"obq_sge_rx_q0", "obq_sge_rx_q0", CUDBG_CIM_OBQ_RXQ0,
+ ENTITY_FLAG_NULL},
+ {"obq_sge_rx_q1", "obq_sge_rx_q1", CUDBG_CIM_OBQ_RXQ1,
+ ENTITY_FLAG_NULL},
+ {"macstats", "mac_stats", CUDBG_MAC_STATS, ENTITY_FLAG_NULL},
+ {"pcieindirect", "pcie_indirect", CUDBG_PCIE_INDIRECT,
+ 1 << ENTITY_FLAG_REGISTER},
+ {"pmindirect", "pm_indirect", CUDBG_PM_INDIRECT,
+ 1 << ENTITY_FLAG_REGISTER},
+ {"full", "full", CUDBG_FULL, ENTITY_FLAG_NULL},
+ {"txrate", "tx_rate", CUDBG_TX_RATE, ENTITY_FLAG_NULL},
+ {"tidinfo", "tids", CUDBG_TID_INFO, ENTITY_FLAG_NULL |
+ (1 << ENTITY_FLAG_FW_NO_ATTACH)},
+ {"pcieconfig", "pcie_config", CUDBG_PCIE_CONFIG, ENTITY_FLAG_NULL},
+ {"dumpcontext", "dump_context", CUDBG_DUMP_CONTEXT, ENTITY_FLAG_NULL},
+ {"mpstcam", "mps_tcam", CUDBG_MPS_TCAM, ENTITY_FLAG_NULL},
+ {"vpddata", "vpd_data", CUDBG_VPD_DATA, ENTITY_FLAG_NULL},
+ {"letcam", "le_tcam", CUDBG_LE_TCAM, ENTITY_FLAG_NULL},
+ {"cctrl", "cctrl", CUDBG_CCTRL, ENTITY_FLAG_NULL},
+ {"maindirect", "ma_indirect", CUDBG_MA_INDIRECT,
+ 1 << ENTITY_FLAG_REGISTER},
+ {"ulptxla", "ulptx_la", CUDBG_ULPTX_LA, ENTITY_FLAG_NULL},
+ {"extentity", "ext_entity", CUDBG_EXT_ENTITY, ENTITY_FLAG_NULL},
+ {"upcimindirect", "up_cim_indirect", CUDBG_UP_CIM_INDIRECT,
+ 1 << ENTITY_FLAG_REGISTER},
+ {"pbttables", "pbt_tables", CUDBG_PBT_TABLE, ENTITY_FLAG_NULL},
+ {"mboxlog", "mboxlog", CUDBG_MBOX_LOG, ENTITY_FLAG_NULL},
+ {"hmaindirect", "hma_indirect", CUDBG_HMA_INDIRECT,
+ 1 << ENTITY_FLAG_REGISTER},
+};
+
+typedef int (*cudbg_print_cb) (char *str, ...);
+
+struct cudbg_init_hdr {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 res;
+ u16 init_struct_size;
+};
+
+struct cudbg_flash_hdr {
+ u32 signature;
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 res;
+ u64 timestamp;
+ u64 time_res;
+ u32 hdr_len;
+ u32 data_len;
+ u32 hdr_flags;
+ u32 sec_seq_no;
+ u32 reserved[22];
+};
+
+struct cudbg_param {
+ u16 param_type;
+ u16 reserved;
+ union {
+ struct {
+ u32 memtype; /* which memory (EDC0, EDC1, MC) */
+ u32 start; /* start of log in firmware memory */
+ u32 size; /* size of log */
+ } devlog_param;
+ struct {
+ struct mbox_cmd_log *log;
+ u16 mbox_cmds;
+ } mboxlog_param;
+ struct {
+ u8 caller_string[100];
+ u8 os_type;
+ } sw_state_param;
+ u64 time;
+ u8 tcb_bit_param;
+ void *adap;
+ void *access_lock;
+ } u;
+};
+
+/* params for tcb_bit_param */
+#define CUDBG_TCB_BRIEF_PARAM 0x1
+#define CUDBG_TCB_FROM_CARD_PARAM 0x2
+#define CUDBG_TCB_AS_SCB_PARAM 0x4
+
+/*
+ * * What is OFFLINE_VIEW_ONLY mode?
+ *
+ * cudbg frame work will be used only to interpret previously collected
+ * data store in a file (i.e NOT hw flash)
+ */
+
+struct cudbg_init {
+ struct cudbg_init_hdr header;
+ struct adapter *adap; /* Pointer to adapter structure
+ with filled fields */
+ cudbg_print_cb print; /* Platform dependent print
+ function */
+ u32 verbose:1; /* Turn on verbose print */
+ u32 use_flash:1; /* Use flash to collect or view
+ debug */
+ u32 full_mode:1; /* If set, cudbg will pull in
+ common code */
+ u32 no_compress:1; /* Dont compress will storing
+ the collected debug */
+ u32 info:1; /* Show just the info, Dont
+ interpret */
+ u32 reserved:27;
+ u8 dbg_bitmap[CUDBG_MAX_BITMAP_LEN];
+ /* Bit map to select the dbg
+ data type to be collected
+ or viewed */
+};
+
+
+/********************************* Helper functions *************************/
+static inline void set_dbg_bitmap(u8 *bitmap, enum CUDBG_DBG_ENTITY_TYPE type)
+{
+ int index = type / 8;
+ int bit = type % 8;
+
+ bitmap[index] |= (1 << bit);
+}
+
+static inline void reset_dbg_bitmap(u8 *bitmap, enum CUDBG_DBG_ENTITY_TYPE type)
+{
+ int index = type / 8;
+ int bit = type % 8;
+
+ bitmap[index] &= ~(1 << bit);
+}
+
+/********************************* End of Helper functions
+ * *************************/
+
+/* API Prototypes */
+
+/**
+ * cudbg_alloc_handle - Allocates and initializes a handle that represents
+ * cudbg state. Needs to called first before calling any other function.
+ *
+ * returns a pointer to memory that has a cudbg_init structure at the begining
+ * and enough space after that for internal book keeping.
+ */
+
+void *cudbg_alloc_handle(void);
+static inline struct cudbg_init *cudbg_get_init(void *handle)
+{
+ return (handle);
+}
+
+/**
+ * cudbg_collect - Collect and store debug information.
+ * ## Parameters ##
+ * @handle : A pointer returned by cudbg_alloc_handle.
+ * @outbuf : pointer to output buffer, to store the collected information
+ * or to use it as a scratch buffer in case HW flash is used to
+ * store the debug information.
+ * @outbuf_size : Size of output buffer.
+ * ## Return ##
+ * If the function succeeds, the return value will be size of debug information
+ * collected and stored.
+ * -ve value represent error.
+ */
+int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size);
+
+/**
+ * cudbg_free_handle - Release cudbg resources.
+ * ## Parameters ##
+ * @handle : A pointer returned by cudbg_alloc_handle.
+ */
+
+void cudbg_free_handle(IN void *handle);
+
+/**
+ * cudbg_read_flash_data - Read cudbg “flash” header from adapter flash.
+ * This will be used by the consumer mainly to
+ * know the size of the data in flash.
+ * ## Parameters ##
+ * @handle : A pointer returned by cudbg_hello.
+ * @data : A pointer to data/header buffer
+ */
+
+int cudbg_read_flash_details(void *handle, struct cudbg_flash_hdr *data);
+
+/**
+ * cudbg_read_flash_data - Read cudbg dump contents stored in flash.
+ * ## Parameters ##
+ * @handle : A pointer returned by cudbg_hello.
+ * @data_buf : A pointer to data buffer.
+ * @data_buf_size : Data buffer size.
+ */
+
+int cudbg_read_flash_data(void *handle, void *data_buf, u32 data_buf_size);
+
+#endif /* _CUDBG_IF_H_ */
diff --git a/sys/dev/cxgbe/cudbg/cudbg_common.c b/sys/dev/cxgbe/cudbg/cudbg_common.c
new file mode 100644
index 000000000000..f780e626da0c
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/cudbg_common.c
@@ -0,0 +1,96 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/param.h>
+
+#include "common/common.h"
+#include "cudbg.h"
+#include "cudbg_lib_common.h"
+
+int get_scratch_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+ struct cudbg_buffer *pscratch_buff)
+{
+ u32 scratch_offset;
+ int rc = 0;
+
+ scratch_offset = pdbg_buff->size - size;
+
+ if (pdbg_buff->offset > (int)scratch_offset || pdbg_buff->size < size) {
+ rc = CUDBG_STATUS_NO_SCRATCH_MEM;
+ goto err;
+ } else {
+ pscratch_buff->data = (char *)pdbg_buff->data + scratch_offset;
+ pscratch_buff->offset = 0;
+ pscratch_buff->size = size;
+ pdbg_buff->size -= size;
+ }
+
+err:
+ return rc;
+}
+
+void release_scratch_buff(struct cudbg_buffer *pscratch_buff,
+ struct cudbg_buffer *pdbg_buff)
+{
+ pdbg_buff->size += pscratch_buff->size;
+ /* Reset the used buffer to zero.
+ * If we dont do this, then it will effect the ext entity logic.
+ */
+ memset(pscratch_buff->data, 0, pscratch_buff->size);
+ pscratch_buff->data = NULL;
+ pscratch_buff->offset = 0;
+ pscratch_buff->size = 0;
+}
+
+static inline void init_cudbg_hdr(struct cudbg_init_hdr *hdr)
+{
+ hdr->major_ver = CUDBG_MAJOR_VERSION;
+ hdr->minor_ver = CUDBG_MINOR_VERSION;
+ hdr->build_ver = CUDBG_BUILD_VERSION;
+ hdr->init_struct_size = sizeof(struct cudbg_init);
+}
+
+void *
+cudbg_alloc_handle(void)
+{
+ struct cudbg_private *handle;
+
+ handle = malloc(sizeof(*handle), M_CXGBE, M_ZERO | M_WAITOK);
+ init_cudbg_hdr(&handle->dbg_init.header);
+
+ return (handle);
+}
+
+void
+cudbg_free_handle(void *handle)
+{
+
+ free(handle, M_CXGBE);
+}
diff --git a/sys/dev/cxgbe/cudbg/cudbg_entity.h b/sys/dev/cxgbe/cudbg/cudbg_entity.h
new file mode 100644
index 000000000000..2bbe0db0e5c2
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/cudbg_entity.h
@@ -0,0 +1,909 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __CUDBG_ENTITY_H__
+#define __CUDBG_ENTITY_H__
+
+#ifdef __GNUC__
+#define ATTRIBUTE_UNUSED __attribute__ ((unused))
+#else
+#define ATTRIBUTE_UNUSED
+#endif
+
+#define MC0_FLAG 1
+#define MC1_FLAG 2
+#define EDC0_FLAG 3
+#define EDC1_FLAG 4
+
+#define NUM_PCIE_CONFIG_REGS 0x61
+#define CUDBG_CTXT_SIZE_BYTES 24
+#define CUDBG_MAX_INGRESS_QIDS 65536
+#define CUDBG_MAX_FL_QIDS 2048
+#define CUDBG_MAX_CNM_QIDS 1024
+#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256
+#define ETH_ALEN 6
+#define CUDBG_MAX_RPLC_SIZE 128
+#define CUDBG_NUM_REQ_REGS 17
+#define CUDBG_MAX_TCAM_TID 0x800
+#define CUDBG_NUM_ULPTX 11
+#define CUDBG_NUM_ULPTX_READ 512
+
+#define SN_REG_ADDR 0x183f
+#define BN_REG_ADDR 0x1819
+#define NA_REG_ADDR 0x185a
+#define MN_REG_ADDR 0x1803
+
+#define A_MPS_VF_RPLCT_MAP0 0x1111c
+#define A_MPS_VF_RPLCT_MAP1 0x11120
+#define A_MPS_VF_RPLCT_MAP2 0x11124
+#define A_MPS_VF_RPLCT_MAP3 0x11128
+#define A_MPS_VF_RPLCT_MAP4 0x11300
+#define A_MPS_VF_RPLCT_MAP5 0x11304
+#define A_MPS_VF_RPLCT_MAP6 0x11308
+#define A_MPS_VF_RPLCT_MAP7 0x1130c
+
+#define PORT_TYPE_ADDR 0x1869
+#define PORT_TYPE_LEN 8
+
+/* For T6 */
+#define SN_T6_ADDR 0x83f
+#define BN_T6_ADDR 0x819
+#define NA_T6_ADDR 0x85a
+#define MN_T6_ADDR 0x803
+
+#define SN_MAX_LEN 24
+#define BN_MAX_LEN 16
+#define NA_MAX_LEN 12
+#define MN_MAX_LEN 16
+#define MAX_VPD_DATA_LEN 32
+
+#define VPD_VER_ADDR 0x18c7
+#define VPD_VER_LEN 2
+#define SCFG_VER_ADDR 0x06
+#define SCFG_VER_LEN 4
+
+#define CUDBG_CIM_BUSY_BIT (1 << 17)
+
+#define CUDBG_CHAC_PBT_ADDR 0x2800
+#define CUDBG_CHAC_PBT_LRF 0x3000
+#define CUDBG_CHAC_PBT_DATA 0x3800
+#define CUDBG_PBT_DYNAMIC_ENTRIES 8
+#define CUDBG_PBT_STATIC_ENTRIES 16
+#define CUDBG_LRF_ENTRIES 8
+#define CUDBG_PBT_DATA_ENTRIES 512
+
+#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
+#define CUDBG_TID_INFO_REV 1
+#define CUDBG_MAC_STATS_REV 1
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_a) (sizeof((_a)) / sizeof((_a)[0]))
+#endif
+
+struct cudbg_pbt_tables {
+ u32 pbt_dynamic[CUDBG_PBT_DYNAMIC_ENTRIES];
+ u32 pbt_static[CUDBG_PBT_STATIC_ENTRIES];
+ u32 lrf_table[CUDBG_LRF_ENTRIES];
+ u32 pbt_data[CUDBG_PBT_DATA_ENTRIES];
+};
+
+struct card_mem {
+ u16 size_mc0;
+ u16 size_mc1;
+ u16 size_edc0;
+ u16 size_edc1;
+ u16 mem_flag;
+ u16 res;
+};
+
+struct rss_pf_conf {
+ u32 rss_pf_map;
+ u32 rss_pf_mask;
+ u32 rss_pf_config;
+};
+
+struct cudbg_ch_cntxt {
+ uint32_t cntxt_type;
+ uint32_t cntxt_id;
+ uint32_t data[SGE_CTXT_SIZE / 4];
+};
+
+struct cudbg_tcam {
+ u32 filter_start;
+ u32 server_start;
+ u32 clip_start;
+ u32 routing_start;
+ u32 tid_hash_base;
+ u32 max_tid;
+};
+
+#if 0
+struct cudbg_mbox_log {
+ struct mbox_cmd entry;
+ u32 hi[MBOX_LEN / 8];
+ u32 lo[MBOX_LEN / 8];
+};
+#endif
+
+struct cudbg_tid_data {
+ u32 tid;
+ u32 dbig_cmd;
+ u32 dbig_conf;
+ u32 dbig_rsp_stat;
+ u32 data[CUDBG_NUM_REQ_REGS];
+};
+
+struct cudbg_cntxt_field {
+ char *name;
+ u32 start_bit;
+ u32 end_bit;
+ u32 shift;
+ u32 islog2;
+};
+
+struct cudbg_mps_tcam {
+ u64 mask;
+ u32 rplc[8];
+ u32 idx;
+ u32 cls_lo;
+ u32 cls_hi;
+ u32 rplc_size;
+ u32 vniy;
+ u32 vnix;
+ u32 dip_hit;
+ u32 vlan_vld;
+ u32 repli;
+ u16 ivlan;
+ u8 addr[ETH_ALEN];
+ u8 lookup_type;
+ u8 port_num;
+ u8 reserved[2];
+};
+
+struct rss_vf_conf {
+ u32 rss_vf_vfl;
+ u32 rss_vf_vfh;
+};
+
+struct rss_config {
+ u32 tp_rssconf; /* A_TP_RSS_CONFIG */
+ u32 tp_rssconf_tnl; /* A_TP_RSS_CONFIG_TNL */
+ u32 tp_rssconf_ofd; /* A_TP_RSS_CONFIG_OFD */
+ u32 tp_rssconf_syn; /* A_TP_RSS_CONFIG_SYN */
+ u32 tp_rssconf_vrt; /* A_TP_RSS_CONFIG_VRT */
+ u32 tp_rssconf_cng; /* A_TP_RSS_CONFIG_CNG */
+ u32 chip;
+};
+
+struct struct_pm_stats {
+ u32 tx_cnt[T6_PM_NSTATS];
+ u32 rx_cnt[T6_PM_NSTATS];
+ u64 tx_cyc[T6_PM_NSTATS];
+ u64 rx_cyc[T6_PM_NSTATS];
+};
+
+struct struct_hw_sched {
+ u32 kbps[NTX_SCHED];
+ u32 ipg[NTX_SCHED];
+ u32 pace_tab[NTX_SCHED];
+ u32 mode;
+ u32 map;
+};
+
+struct struct_tcp_stats {
+ struct tp_tcp_stats v4, v6;
+};
+
+struct struct_tp_err_stats {
+ struct tp_err_stats stats;
+ u32 nchan;
+};
+
+struct struct_tp_fcoe_stats {
+ struct tp_fcoe_stats stats[4];
+ u32 nchan;
+};
+
+struct struct_mac_stats {
+ u32 port_count;
+ struct port_stats stats[4];
+};
+
+struct struct_mac_stats_rev1 {
+ struct cudbg_ver_hdr ver_hdr;
+ u32 port_count;
+ u32 reserved;
+ struct port_stats stats[4];
+};
+
+struct struct_tp_cpl_stats {
+ struct tp_cpl_stats stats;
+ u32 nchan;
+};
+
+struct struct_wc_stats {
+ u32 wr_cl_success;
+ u32 wr_cl_fail;
+};
+
+struct struct_ulptx_la {
+ u32 rdptr[CUDBG_NUM_ULPTX];
+ u32 wrptr[CUDBG_NUM_ULPTX];
+ u32 rddata[CUDBG_NUM_ULPTX];
+ u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ];
+};
+
+struct struct_ulprx_la {
+ u32 data[ULPRX_LA_SIZE * 8];
+ u32 size;
+};
+
+struct struct_cim_qcfg {
+ u8 chip;
+ u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+ u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+ u16 thres[CIM_NUM_IBQ];
+ u32 obq_wr[2 * CIM_NUM_OBQ_T5];
+ u32 stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)];
+};
+
+enum region_index {
+ REGN_DBQ_CONTEXS_IDX,
+ REGN_IMSG_CONTEXTS_IDX,
+ REGN_FLM_CACHE_IDX,
+ REGN_TCBS_IDX,
+ REGN_PSTRUCT_IDX,
+ REGN_TIMERS_IDX,
+ REGN_RX_FL_IDX,
+ REGN_TX_FL_IDX,
+ REGN_PSTRUCT_FL_IDX,
+ REGN_TX_PAYLOAD_IDX,
+ REGN_RX_PAYLOAD_IDX,
+ REGN_LE_HASH_IDX,
+ REGN_ISCSI_IDX,
+ REGN_TDDP_IDX,
+ REGN_TPT_IDX,
+ REGN_STAG_IDX,
+ REGN_RQ_IDX,
+ REGN_RQUDP_IDX,
+ REGN_PBL_IDX,
+ REGN_TXPBL_IDX,
+ REGN_DBVFIFO_IDX,
+ REGN_ULPRX_STATE_IDX,
+ REGN_ULPTX_STATE_IDX,
+#ifndef __NO_DRIVER_OCQ_SUPPORT__
+ REGN_ON_CHIP_Q_IDX,
+#endif
+};
+
+static const char * const region[] = {
+ "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+ "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+ "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+ "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+ "RQUDP region:", "PBL region:", "TXPBL region:",
+ "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+#ifndef __NO_DRIVER_OCQ_SUPPORT__
+ "On-chip queues:"
+#endif
+};
+
+/* Info relative to memory region (i.e. wrt 0). */
+struct struct_region_info {
+ bool exist; /* Does region exists in current memory region? */
+ u32 start; /* Start wrt 0 */
+ u32 end; /* End wrt 0 */
+};
+
+struct struct_port_usage {
+ u32 id;
+ u32 used;
+ u32 alloc;
+};
+
+struct struct_lpbk_usage {
+ u32 id;
+ u32 used;
+ u32 alloc;
+};
+
+struct struct_mem_desc {
+ u32 base;
+ u32 limit;
+ u32 idx;
+};
+
+enum string_size_units {
+ STRING_UNITS_10, /* use powers of 10^3 (standard SI) */
+ STRING_UNITS_2, /* use binary powers of 2^10 */
+};
+
+struct struct_meminfo {
+ struct struct_mem_desc avail[4];
+ struct struct_mem_desc mem[ARRAY_SIZE(region) + 3];
+ u32 avail_c;
+ u32 mem_c;
+ u32 up_ram_lo;
+ u32 up_ram_hi;
+ u32 up_extmem2_lo;
+ u32 up_extmem2_hi;
+ u32 rx_pages_data[3];
+ u32 tx_pages_data[4];
+ u32 p_structs;
+ struct struct_port_usage port_data[4];
+ u32 port_used[4];
+ u32 port_alloc[4];
+ u32 loopback_used[NCHAN];
+ u32 loopback_alloc[NCHAN];
+};
+
+#ifndef __GNUC__
+#pragma warning(disable : 4200)
+#endif
+
+struct struct_lb_stats {
+ int nchan;
+ struct lb_port_stats s[0];
+};
+
+struct struct_clk_info {
+ u64 retransmit_min;
+ u64 retransmit_max;
+ u64 persist_timer_min;
+ u64 persist_timer_max;
+ u64 keepalive_idle_timer;
+ u64 keepalive_interval;
+ u64 initial_srtt;
+ u64 finwait2_timer;
+ u32 dack_timer;
+ u32 res;
+ u32 cclk_ps;
+ u32 tre;
+ u32 dack_re;
+ char core_clk_period[32];
+ char tp_timer_tick[32];
+ char tcp_tstamp_tick[32];
+ char dack_tick[32];
+};
+
+struct cim_pif_la {
+ int size;
+ u8 data[0];
+};
+
+struct struct_tp_la {
+ u32 size;
+ u32 mode;
+ u8 data[0];
+};
+
+struct field_desc {
+ const char *name;
+ u32 start;
+ u32 width;
+};
+
+struct tp_mib_type {
+ char *key;
+ u32 addr;
+ u32 value;
+};
+
+struct wtp_type_0 {
+ u32 sop;
+ u32 eop;
+};
+
+struct wtp_type_1 {
+ u32 sop[2];
+ u32 eop[2];
+};
+
+struct wtp_type_2 {
+ u32 sop[4];
+ u32 eop[4];
+};
+
+struct wtp_type_3 {
+ u32 sop[4];
+ u32 eop[4];
+ u32 drops;
+};
+
+struct wtp_data {
+ /*TX path, Request Work request sub-path:*/
+
+ struct wtp_type_1 sge_pcie_cmd_req; /*SGE_DEBUG PC_Req_xOPn*/
+ struct wtp_type_1 pcie_core_cmd_req; /*PCIE_CMDR_REQ_CNT*/
+
+
+ /*TX path, Work request to uP sub-path*/
+ struct wtp_type_1 core_pcie_cmd_rsp; /*PCIE_CMDR_RSP_CNT*/
+ struct wtp_type_1 pcie_sge_cmd_rsp; /*SGE_DEBUG PC_Rsp_xOPn*/
+ struct wtp_type_1 sge_cim; /*SGE_DEBUG CIM_xOPn*/
+
+ /*TX path, Data request path from ULP_TX to core*/
+ struct wtp_type_2 utx_sge_dma_req; /*SGE UD_Rx_xOPn*/
+ struct wtp_type_2 sge_pcie_dma_req; /*SGE PD_Req_Rdn (no eops)*/
+ struct wtp_type_2 pcie_core_dma_req; /*PCIE_DMAR_REQ_CNT (no eops)*/
+
+ /*Main TX path, from core to wire*/
+ struct wtp_type_2 core_pcie_dma_rsp; /*PCIE_DMAR_RSP_SOP_CNT/
+ PCIE_DMAR_EOP_CNT*/
+ struct wtp_type_2 pcie_sge_dma_rsp; /*SGE_DEBUG PD_Rsp_xOPn*/
+ struct wtp_type_2 sge_utx; /*SGE_DEBUG U_Tx_xOPn*/
+ struct wtp_type_2 utx_tp; /*ULP_TX_SE_CNT_CHn[xOP_CNT_ULP2TP]*/
+ struct wtp_type_2 utx_tpcside; /*TP_DBG_CSIDE_RXn[RxXoPCnt]*/
+
+ struct wtp_type_2 tpcside_rxpld;
+ struct wtp_type_2 tpcside_rxarb; /*TP_DBG_CSIDE_RXn[RxArbXopCnt]*/
+ struct wtp_type_2 tpcside_rxcpl;
+
+ struct wtp_type_2 tpeside_mps; /*TP_DBG_ESDIE_PKT0[TxXoPCnt]*/
+ struct wtp_type_2 tpeside_pm;
+ struct wtp_type_2 tpeside_pld;
+
+ /*Tx path, PCIE t5 DMA stat*/
+ struct wtp_type_2 pcie_t5_dma_stat3;
+
+ /*Tx path, SGE debug data high index 6*/
+ struct wtp_type_2 sge_debug_data_high_index_6;
+
+ /*Tx path, SGE debug data high index 3*/
+ struct wtp_type_2 sge_debug_data_high_index_3;
+
+ /*Tx path, ULP SE CNT CHx*/
+ struct wtp_type_2 ulp_se_cnt_chx;
+
+ /*pcie cmd stat 2*/
+ struct wtp_type_2 pcie_cmd_stat2;
+
+ /*pcie cmd stat 3*/
+ struct wtp_type_2 pcie_cmd_stat3;
+
+ struct wtp_type_2 pcie_dma1_stat2_core;
+
+ struct wtp_type_1 sge_work_req_pkt;
+
+ struct wtp_type_2 sge_debug_data_high_indx5;
+
+ /*Tx path, mac portx pkt count*/
+ struct wtp_type_2 mac_portx_pkt_count;
+
+ /*Rx path, mac porrx pkt count*/
+ struct wtp_type_2 mac_porrx_pkt_count;
+
+ /*Rx path, PCIE T5 dma1 stat 2*/
+ struct wtp_type_2 pcie_dma1_stat2;
+
+ /*Rx path, sge debug data high index 7*/
+ struct wtp_type_2 sge_debug_data_high_indx7;
+
+ /*Rx path, sge debug data high index 1*/
+ struct wtp_type_1 sge_debug_data_high_indx1;
+
+ /*Rx path, TP debug CSIDE Tx register*/
+ struct wtp_type_1 utx_tpcside_tx;
+
+ /*Rx path, LE DB response count*/
+ struct wtp_type_0 le_db_rsp_cnt;
+
+ /*Rx path, TP debug Eside PKTx*/
+ struct wtp_type_2 tp_dbg_eside_pktx;
+
+ /*Rx path, sge debug data high index 9*/
+ struct wtp_type_1 sge_debug_data_high_indx9;
+
+ /*Tx path, mac portx aFramesTransmittesok*/
+ struct wtp_type_2 mac_portx_aframestra_ok;
+
+ /*Rx path, mac portx aFramesTransmittesok*/
+ struct wtp_type_2 mac_porrx_aframestra_ok;
+
+ /*Tx path, MAC_PORT_MTIP_1G10G_RX_etherStatsPkts*/
+ struct wtp_type_1 mac_portx_etherstatspkts;
+
+ /*Rx path, MAC_PORT_MTIP_1G10G_RX_etherStatsPkts*/
+ struct wtp_type_1 mac_porrx_etherstatspkts;
+
+ struct wtp_type_3 tp_mps; /*MPS_TX_SE_CNT_TP01 and
+ MPS_TX_SE_CNT_TP34*/
+ struct wtp_type_3 mps_xgm; /*MPS_TX_SE_CNT_MAC01 and
+ MPS_TX_SE_CNT_MAC34*/
+ struct wtp_type_2 tx_xgm_xgm; /*XGMAC_PORT_PKT_CNT_PORT_n*/
+ struct wtp_type_2 xgm_wire; /*XGMAC_PORT_XGM_STAT_TX_FRAME_LOW_PORT_N
+ (clear on read)*/
+
+ /*RX path, from wire to core.*/
+ struct wtp_type_2 wire_xgm; /*XGMAC_PORT_XGM_STAT_RX_FRAMES_LOW_PORT_N
+ (clear on read)*/
+ struct wtp_type_2 rx_xgm_xgm; /*XGMAC_PORT_PKT_CNT_PORT_n*/
+ struct _xgm_mps { /*MPS_RX_SE_CNT_INn*/
+ u32 sop[8]; /* => undef,*/
+ u32 eop[8]; /* => undef,*/
+ u32 drop; /* => undef,*/
+ u32 cls_drop; /* => undef,*/
+ u32 err; /* => undef,*/
+ u32 bp; /* => undef,*/
+ } xgm_mps;
+
+ struct wtp_type_3 mps_tp; /*MPS_RX_SE_CNT_OUT01 and
+ MPS_RX_SE_CNT_OUT23*/
+ struct wtp_type_2 mps_tpeside; /*TP_DBG_ESIDE_PKTn*/
+ struct wtp_type_1 tpeside_pmrx; /*???*/
+ struct wtp_type_2 pmrx_ulprx; /*ULP_RX_SE_CNT_CHn[xOP_CNT_INn]*/
+ struct wtp_type_2 ulprx_tpcside; /*ULP_RX_SE_CNT_CHn[xOP_CNT_OUTn]*/
+ struct wtp_type_2 tpcside_csw; /*TP_DBG_CSIDE_TXn[TxSopCnt]*/
+ struct wtp_type_2 tpcside_pm;
+ struct wtp_type_2 tpcside_uturn;
+ struct wtp_type_2 tpcside_txcpl;
+ struct wtp_type_1 tp_csw; /*SGE_DEBUG CPLSW_TP_Rx_xOPn*/
+ struct wtp_type_1 csw_sge; /*SGE_DEBUG T_Rx_xOPn*/
+ struct wtp_type_2 sge_pcie; /*SGE_DEBUG PD_Req_SopN -
+ PD_Req_RdN - PD_ReqIntN*/
+ struct wtp_type_2 sge_pcie_ints; /*SGE_DEBUG PD_Req_IntN*/
+ struct wtp_type_2 pcie_core_dmaw; /*PCIE_DMAW_SOP_CNT and
+ PCIE_DMAW_EOP_CNT*/
+ struct wtp_type_2 pcie_core_dmai; /*PCIE_DMAI_CNT*/
+
+};
+
+struct tp_mib_data {
+ struct tp_mib_type TP_MIB_MAC_IN_ERR_0;
+ struct tp_mib_type TP_MIB_MAC_IN_ERR_1;
+ struct tp_mib_type TP_MIB_MAC_IN_ERR_2;
+ struct tp_mib_type TP_MIB_MAC_IN_ERR_3;
+ struct tp_mib_type TP_MIB_HDR_IN_ERR_0;
+ struct tp_mib_type TP_MIB_HDR_IN_ERR_1;
+ struct tp_mib_type TP_MIB_HDR_IN_ERR_2;
+ struct tp_mib_type TP_MIB_HDR_IN_ERR_3;
+ struct tp_mib_type TP_MIB_TCP_IN_ERR_0;
+ struct tp_mib_type TP_MIB_TCP_IN_ERR_1;
+ struct tp_mib_type TP_MIB_TCP_IN_ERR_2;
+ struct tp_mib_type TP_MIB_TCP_IN_ERR_3;
+ struct tp_mib_type TP_MIB_TCP_OUT_RST;
+ struct tp_mib_type TP_MIB_TCP_IN_SEG_HI;
+ struct tp_mib_type TP_MIB_TCP_IN_SEG_LO;
+ struct tp_mib_type TP_MIB_TCP_OUT_SEG_HI;
+ struct tp_mib_type TP_MIB_TCP_OUT_SEG_LO;
+ struct tp_mib_type TP_MIB_TCP_RXT_SEG_HI;
+ struct tp_mib_type TP_MIB_TCP_RXT_SEG_LO;
+ struct tp_mib_type TP_MIB_TNL_CNG_DROP_0;
+ struct tp_mib_type TP_MIB_TNL_CNG_DROP_1;
+ struct tp_mib_type TP_MIB_TNL_CNG_DROP_2;
+ struct tp_mib_type TP_MIB_TNL_CNG_DROP_3;
+ struct tp_mib_type TP_MIB_OFD_CHN_DROP_0;
+ struct tp_mib_type TP_MIB_OFD_CHN_DROP_1;
+ struct tp_mib_type TP_MIB_OFD_CHN_DROP_2;
+ struct tp_mib_type TP_MIB_OFD_CHN_DROP_3;
+ struct tp_mib_type TP_MIB_TNL_OUT_PKT_0;
+ struct tp_mib_type TP_MIB_TNL_OUT_PKT_1;
+ struct tp_mib_type TP_MIB_TNL_OUT_PKT_2;
+ struct tp_mib_type TP_MIB_TNL_OUT_PKT_3;
+ struct tp_mib_type TP_MIB_TNL_IN_PKT_0;
+ struct tp_mib_type TP_MIB_TNL_IN_PKT_1;
+ struct tp_mib_type TP_MIB_TNL_IN_PKT_2;
+ struct tp_mib_type TP_MIB_TNL_IN_PKT_3;
+ struct tp_mib_type TP_MIB_TCP_V6IN_ERR_0;
+ struct tp_mib_type TP_MIB_TCP_V6IN_ERR_1;
+ struct tp_mib_type TP_MIB_TCP_V6IN_ERR_2;
+ struct tp_mib_type TP_MIB_TCP_V6IN_ERR_3;
+ struct tp_mib_type TP_MIB_TCP_V6OUT_RST;
+ struct tp_mib_type TP_MIB_TCP_V6IN_SEG_HI;
+ struct tp_mib_type TP_MIB_TCP_V6IN_SEG_LO;
+ struct tp_mib_type TP_MIB_TCP_V6OUT_SEG_HI;
+ struct tp_mib_type TP_MIB_TCP_V6OUT_SEG_LO;
+ struct tp_mib_type TP_MIB_TCP_V6RXT_SEG_HI;
+ struct tp_mib_type TP_MIB_TCP_V6RXT_SEG_LO;
+ struct tp_mib_type TP_MIB_OFD_ARP_DROP;
+ struct tp_mib_type TP_MIB_OFD_DFR_DROP;
+ struct tp_mib_type TP_MIB_CPL_IN_REQ_0;
+ struct tp_mib_type TP_MIB_CPL_IN_REQ_1;
+ struct tp_mib_type TP_MIB_CPL_IN_REQ_2;
+ struct tp_mib_type TP_MIB_CPL_IN_REQ_3;
+ struct tp_mib_type TP_MIB_CPL_OUT_RSP_0;
+ struct tp_mib_type TP_MIB_CPL_OUT_RSP_1;
+ struct tp_mib_type TP_MIB_CPL_OUT_RSP_2;
+ struct tp_mib_type TP_MIB_CPL_OUT_RSP_3;
+ struct tp_mib_type TP_MIB_TNL_LPBK_0;
+ struct tp_mib_type TP_MIB_TNL_LPBK_1;
+ struct tp_mib_type TP_MIB_TNL_LPBK_2;
+ struct tp_mib_type TP_MIB_TNL_LPBK_3;
+ struct tp_mib_type TP_MIB_TNL_DROP_0;
+ struct tp_mib_type TP_MIB_TNL_DROP_1;
+ struct tp_mib_type TP_MIB_TNL_DROP_2;
+ struct tp_mib_type TP_MIB_TNL_DROP_3;
+ struct tp_mib_type TP_MIB_FCOE_DDP_0;
+ struct tp_mib_type TP_MIB_FCOE_DDP_1;
+ struct tp_mib_type TP_MIB_FCOE_DDP_2;
+ struct tp_mib_type TP_MIB_FCOE_DDP_3;
+ struct tp_mib_type TP_MIB_FCOE_DROP_0;
+ struct tp_mib_type TP_MIB_FCOE_DROP_1;
+ struct tp_mib_type TP_MIB_FCOE_DROP_2;
+ struct tp_mib_type TP_MIB_FCOE_DROP_3;
+ struct tp_mib_type TP_MIB_FCOE_BYTE_0_HI;
+ struct tp_mib_type TP_MIB_FCOE_BYTE_0_LO;
+ struct tp_mib_type TP_MIB_FCOE_BYTE_1_HI;
+ struct tp_mib_type TP_MIB_FCOE_BYTE_1_LO;
+ struct tp_mib_type TP_MIB_FCOE_BYTE_2_HI;
+ struct tp_mib_type TP_MIB_FCOE_BYTE_2_LO;
+ struct tp_mib_type TP_MIB_FCOE_BYTE_3_HI;
+ struct tp_mib_type TP_MIB_FCOE_BYTE_3_LO;
+ struct tp_mib_type TP_MIB_OFD_VLN_DROP_0;
+ struct tp_mib_type TP_MIB_OFD_VLN_DROP_1;
+ struct tp_mib_type TP_MIB_OFD_VLN_DROP_2;
+ struct tp_mib_type TP_MIB_OFD_VLN_DROP_3;
+ struct tp_mib_type TP_MIB_USM_PKTS;
+ struct tp_mib_type TP_MIB_USM_DROP;
+ struct tp_mib_type TP_MIB_USM_BYTES_HI;
+ struct tp_mib_type TP_MIB_USM_BYTES_LO;
+ struct tp_mib_type TP_MIB_TID_DEL;
+ struct tp_mib_type TP_MIB_TID_INV;
+ struct tp_mib_type TP_MIB_TID_ACT;
+ struct tp_mib_type TP_MIB_TID_PAS;
+ struct tp_mib_type TP_MIB_RQE_DFR_MOD;
+ struct tp_mib_type TP_MIB_RQE_DFR_PKT;
+};
+
+struct cudbg_reg_info {
+ const char *name;
+ unsigned int addr;
+ unsigned int len;
+};
+
+struct tp1_reg_info {
+ char addr[10];
+ char name[40];
+};
+
+struct ireg_field {
+ u32 ireg_addr;
+ u32 ireg_data;
+ u32 ireg_local_offset;
+ u32 ireg_offset_range;
+};
+
+struct ireg_buf {
+ struct ireg_field tp_pio;
+ u32 outbuf[32];
+};
+
+struct tx_rate {
+ u64 nrate[NCHAN];
+ u64 orate[NCHAN];
+ u32 nchan;
+};
+
+struct tid_info_region {
+ u32 ntids;
+ u32 nstids;
+ u32 stid_base;
+ u32 hash_base;
+
+ u32 natids;
+ u32 nftids;
+ u32 ftid_base;
+ u32 aftid_base;
+ u32 aftid_end;
+
+ /* Server filter region */
+ u32 sftid_base;
+ u32 nsftids;
+
+ /* UO context range */
+ u32 uotid_base;
+ u32 nuotids;
+
+ u32 sb;
+ u32 flags;
+ u32 le_db_conf;
+ u32 IP_users;
+ u32 IPv6_users;
+
+ u32 hpftid_base;
+ u32 nhpftids;
+};
+
+struct tid_info_region_rev1 {
+ struct cudbg_ver_hdr ver_hdr;
+ struct tid_info_region tid;
+ u32 tid_start;
+ u32 reserved[16];
+};
+
+struct struct_vpd_data {
+ u8 sn[SN_MAX_LEN + 1];
+ u8 bn[BN_MAX_LEN + 1];
+ u8 na[NA_MAX_LEN + 1];
+ u8 mn[MN_MAX_LEN + 1];
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_micro;
+ u16 fw_build;
+ u32 scfg_vers;
+ u32 vpd_vers;
+};
+
+struct sw_state {
+ u32 fw_state;
+ u8 caller_string[100];
+ u8 os_type;
+ u8 reserved[3];
+ u32 reserved1[16];
+};
+
+static u32 ATTRIBUTE_UNUSED t6_tp_pio_array[][4] = {
+ {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
+ {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */
+ {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */
+ {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */
+ {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */
+ {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */
+ {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */
+ {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */
+ {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */
+ {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */
+ {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */
+ {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */
+};
+
+static u32 ATTRIBUTE_UNUSED t5_tp_pio_array[][4] = {
+ {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */
+ {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */
+ {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */
+ {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */
+ {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */
+ {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */
+ {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */
+ {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */
+ {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */
+ {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */
+ {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */
+};
+
+static u32 ATTRIBUTE_UNUSED t6_ma_ireg_array[][4] = {
+ {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
+ {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
+ {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */
+};
+
+static u32 ATTRIBUTE_UNUSED t6_ma_ireg_array2[][4] = {
+ {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */
+ {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
+};
+
+static u32 ATTRIBUTE_UNUSED t6_hma_ireg_array[][4] = {
+ {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */
+};
+static u32 ATTRIBUTE_UNUSED t5_pcie_pdbg_array[][4] = {
+ {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */
+ {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */
+ {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */
+};
+
+static u32 ATTRIBUTE_UNUSED t5_pcie_config_array[][2] = {
+ {0x0, 0x34},
+ {0x3c, 0x40},
+ {0x50, 0x64},
+ {0x70, 0x80},
+ {0x94, 0xa0},
+ {0xb0, 0xb8},
+ {0xd0, 0xd4},
+ {0x100, 0x128},
+ {0x140, 0x148},
+ {0x150, 0x164},
+ {0x170, 0x178},
+ {0x180, 0x194},
+ {0x1a0, 0x1b8},
+ {0x1c0, 0x208},
+};
+
+static u32 ATTRIBUTE_UNUSED t5_pcie_cdbg_array[][4] = {
+ {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */
+ {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */
+};
+
+static u32 ATTRIBUTE_UNUSED t6_tp_tm_pio_array[1][4] = {
+ {0x7e18, 0x7e1c, 0x0, 12}
+};
+
+static u32 ATTRIBUTE_UNUSED t5_tp_tm_pio_array[1][4] = {
+ {0x7e18, 0x7e1c, 0x0, 12}
+};
+
+static u32 ATTRIBUTE_UNUSED t5_pm_rx_array[][4] = {
+ {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */
+ {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */
+};
+
+static u32 ATTRIBUTE_UNUSED t5_pm_tx_array[][4] = {
+ {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */
+ {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
+};
+
+static u32 ATTRIBUTE_UNUSED t6_tp_mib_index_array[6][4] = {
+ {0x7e50, 0x7e54, 0x0, 13},
+ {0x7e50, 0x7e54, 0x10, 6},
+ {0x7e50, 0x7e54, 0x18, 21},
+ {0x7e50, 0x7e54, 0x30, 32},
+ {0x7e50, 0x7e54, 0x50, 22},
+ {0x7e50, 0x7e54, 0x68, 12}
+};
+
+static u32 ATTRIBUTE_UNUSED t5_tp_mib_index_array[9][4] = {
+ {0x7e50, 0x7e54, 0x0, 13},
+ {0x7e50, 0x7e54, 0x10, 6},
+ {0x7e50, 0x7e54, 0x18, 8},
+ {0x7e50, 0x7e54, 0x20, 13},
+ {0x7e50, 0x7e54, 0x30, 16},
+ {0x7e50, 0x7e54, 0x40, 16},
+ {0x7e50, 0x7e54, 0x50, 16},
+ {0x7e50, 0x7e54, 0x60, 6},
+ {0x7e50, 0x7e54, 0x68, 4}
+};
+
+static u32 ATTRIBUTE_UNUSED t5_sge_dbg_index_array[9][4] = {
+ {0x10cc, 0x10d0, 0x0, 16},
+ {0x10cc, 0x10d4, 0x0, 16},
+};
+
+static u32 ATTRIBUTE_UNUSED t6_up_cim_reg_array[][4] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
+ {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+
+};
+
+static u32 ATTRIBUTE_UNUSED t5_up_cim_reg_array[][4] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
+ {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+};
+
+#endif
diff --git a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
new file mode 100644
index 000000000000..6a39373d3e2d
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
@@ -0,0 +1,492 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/param.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "cudbg.h"
+#include "cudbg_lib_common.h"
+
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+};
+
+int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size);
+int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
+ u32 start_address);
+
+void
+update_skip_size(struct cudbg_flash_sec_info *sec_info, u32 size)
+{
+ sec_info->skip_size += size;
+}
+
+static
+void set_sector_availability(struct cudbg_flash_sec_info *sec_info,
+ int sector_nu, int avail)
+{
+ sector_nu -= CUDBG_START_SEC;
+ if (avail)
+ set_dbg_bitmap(sec_info->sec_bitmap, sector_nu);
+ else
+ reset_dbg_bitmap(sec_info->sec_bitmap, sector_nu);
+}
+
+/* This function will return empty sector available for filling */
+static int
+find_empty_sec(struct cudbg_flash_sec_info *sec_info)
+{
+ int i, index, bit;
+
+ for (i = CUDBG_START_SEC; i < CUDBG_SF_MAX_SECTOR; i++) {
+ index = (i - CUDBG_START_SEC) / 8;
+ bit = (i - CUDBG_START_SEC) % 8;
+ if (!(sec_info->sec_bitmap[index] & (1 << bit)))
+ return i;
+ }
+
+ return CUDBG_STATUS_FLASH_FULL;
+}
+
+/* This function will get header initially. If header is already there
+ * then it will update that header */
+static void update_headers(void *handle, struct cudbg_buffer *dbg_buff,
+ u64 timestamp, u32 cur_entity_hdr_offset,
+ u32 start_offset, u32 ext_size)
+{
+ struct cudbg_private *priv = handle;
+ struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
+ void *sec_hdr;
+ struct cudbg_hdr *cudbg_hdr;
+ struct cudbg_flash_hdr *flash_hdr;
+ struct cudbg_entity_hdr *entity_hdr;
+ u32 hdr_offset;
+ u32 data_hdr_size;
+ u32 total_hdr_size;
+ u32 sec_hdr_start_addr;
+
+ data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
+ sizeof(struct cudbg_hdr);
+ total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
+ sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr = sec_info->sec_data + sec_hdr_start_addr;
+
+ flash_hdr = (struct cudbg_flash_hdr *)(sec_hdr);
+ cudbg_hdr = (struct cudbg_hdr *)dbg_buff->data;
+
+ /* initially initialize flash hdr and copy all data headers and
+ * in next calling (else part) copy only current entity header
+ */
+ if ((start_offset - sec_info->skip_size) == data_hdr_size) {
+ flash_hdr->signature = CUDBG_FL_SIGNATURE;
+ flash_hdr->major_ver = CUDBG_FL_MAJOR_VERSION;
+ flash_hdr->minor_ver = CUDBG_FL_MINOR_VERSION;
+ flash_hdr->build_ver = CUDBG_FL_BUILD_VERSION;
+ flash_hdr->hdr_len = sizeof(struct cudbg_flash_hdr);
+ hdr_offset = sizeof(struct cudbg_flash_hdr);
+
+ memcpy((void *)((char *)sec_hdr + hdr_offset),
+ (void *)((char *)dbg_buff->data), data_hdr_size);
+ } else
+ memcpy((void *)((char *)sec_hdr +
+ sizeof(struct cudbg_flash_hdr) +
+ cur_entity_hdr_offset),
+ (void *)((char *)dbg_buff->data +
+ cur_entity_hdr_offset),
+ sizeof(struct cudbg_entity_hdr));
+
+ hdr_offset = data_hdr_size + sizeof(struct cudbg_flash_hdr);
+ flash_hdr->data_len = cudbg_hdr->data_len - sec_info->skip_size;
+ flash_hdr->timestamp = timestamp;
+
+ entity_hdr = (struct cudbg_entity_hdr *)((char *)sec_hdr +
+ sizeof(struct cudbg_flash_hdr) +
+ cur_entity_hdr_offset);
+ /* big entity like mc need to be skipped */
+ entity_hdr->start_offset -= sec_info->skip_size;
+
+ cudbg_hdr = (struct cudbg_hdr *)((char *)sec_hdr +
+ sizeof(struct cudbg_flash_hdr));
+ cudbg_hdr->data_len = flash_hdr->data_len;
+ flash_hdr->data_len += ext_size;
+}
+
+/* Write CUDBG data into serial flash */
+int cudbg_write_flash(void *handle, u64 timestamp, void *data,
+ u32 start_offset, u32 cur_entity_hdr_offset,
+ u32 cur_entity_size,
+ u32 ext_size)
+{
+ struct cudbg_private *priv = handle;
+ struct cudbg_init *cudbg_init = &priv->dbg_init;
+ struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
+ struct adapter *adap = cudbg_init->adap;
+ struct cudbg_flash_hdr *flash_hdr = NULL;
+ struct cudbg_buffer *dbg_buff = (struct cudbg_buffer *)data;
+ u32 data_hdr_size;
+ u32 total_hdr_size;
+ u32 tmp_size;
+ u32 sec_data_offset;
+ u32 sec_hdr_start_addr;
+ u32 sec_data_size;
+ u32 space_left;
+ int rc = 0;
+ int sec;
+
+ data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
+ sizeof(struct cudbg_hdr);
+ total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
+ sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_data_size = sec_hdr_start_addr;
+
+ cudbg_init->print("\tWriting %u bytes to flash\n", cur_entity_size);
+
+ /* this function will get header if sec_info->sec_data does not
+ * have any header and
+ * will update the header if it has header
+ */
+ update_headers(handle, dbg_buff, timestamp,
+ cur_entity_hdr_offset,
+ start_offset, ext_size);
+
+ if (ext_size) {
+ cur_entity_size += sizeof(struct cudbg_entity_hdr);
+ start_offset = dbg_buff->offset - cur_entity_size;
+ }
+
+ flash_hdr = (struct cudbg_flash_hdr *)(sec_info->sec_data +
+ sec_hdr_start_addr);
+
+ if (flash_hdr->data_len > CUDBG_FLASH_SIZE) {
+ rc = CUDBG_STATUS_FLASH_FULL;
+ goto out;
+ }
+
+ space_left = CUDBG_FLASH_SIZE - flash_hdr->data_len;
+
+ if (cur_entity_size > space_left) {
+ rc = CUDBG_STATUS_FLASH_FULL;
+ goto out;
+ }
+
+ while (cur_entity_size > 0) {
+ sec = find_empty_sec(sec_info);
+ if (sec_info->par_sec) {
+ sec_data_offset = sec_info->par_sec_offset;
+ set_sector_availability(sec_info, sec_info->par_sec, 0);
+ sec_info->par_sec = 0;
+ sec_info->par_sec_offset = 0;
+
+ } else {
+ sec_info->cur_seq_no++;
+ flash_hdr->sec_seq_no = sec_info->cur_seq_no;
+ sec_data_offset = 0;
+ }
+
+ if (cur_entity_size + sec_data_offset > sec_data_size) {
+ tmp_size = sec_data_size - sec_data_offset;
+ } else {
+ tmp_size = cur_entity_size;
+ sec_info->par_sec = sec;
+ sec_info->par_sec_offset = cur_entity_size +
+ sec_data_offset;
+ }
+
+ memcpy((void *)((char *)sec_info->sec_data + sec_data_offset),
+ (void *)((char *)dbg_buff->data + start_offset),
+ tmp_size);
+
+ rc = write_flash(adap, sec, sec_info->sec_data,
+ CUDBG_SF_SECTOR_SIZE);
+ if (rc)
+ goto out;
+
+ cur_entity_size -= tmp_size;
+ set_sector_availability(sec_info, sec, 1);
+ start_offset += tmp_size;
+ }
+out:
+ return rc;
+}
+
+int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size)
+{
+ unsigned int addr;
+ unsigned int i, n;
+ unsigned int sf_sec_size;
+ int rc = 0;
+
+ u8 *ptr = (u8 *)data;
+
+ sf_sec_size = adap->params.sf_size/adap->params.sf_nsec;
+
+ addr = start_sec * CUDBG_SF_SECTOR_SIZE;
+ i = DIV_ROUND_UP(size,/* # of sectors spanned */
+ sf_sec_size);
+
+ rc = t4_flash_erase_sectors(adap, start_sec,
+ start_sec + i - 1);
+ /*
+ * If size == 0 then we're simply erasing the FLASH sectors associated
+ * with the on-adapter OptionROM Configuration File.
+ */
+
+ if (rc || size == 0)
+ goto out;
+
+ /* this will write to the flash up to SF_PAGE_SIZE at a time */
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ if ((size - i) < SF_PAGE_SIZE)
+ n = size - i;
+ else
+ n = SF_PAGE_SIZE;
+ rc = t4_write_flash(adap, addr, n, ptr, 0);
+ if (rc)
+ goto out;
+
+ addr += n;
+ ptr += n;
+ }
+
+ return 0;
+out:
+ return rc;
+}
+
+int cudbg_read_flash_details(void *handle, struct cudbg_flash_hdr *data)
+{
+ int rc;
+ rc = cudbg_read_flash(handle, (void *)data,
+ sizeof(struct cudbg_flash_hdr), 0);
+
+ return rc;
+}
+
+int cudbg_read_flash_data(void *handle, void *buf, u32 buf_size)
+{
+ int rc;
+ u32 total_hdr_size, data_header_size;
+ void *payload = NULL;
+ u32 payload_size = 0;
+
+ data_header_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
+ sizeof(struct cudbg_hdr);
+ total_hdr_size = data_header_size + sizeof(struct cudbg_flash_hdr);
+
+ /* Copy flash header to buffer */
+ rc = cudbg_read_flash(handle, buf, total_hdr_size, 0);
+ if (rc != 0)
+ goto out;
+ payload = (char *)buf + total_hdr_size;
+ payload_size = buf_size - total_hdr_size;
+
+ /* Reading flash data to buf */
+ rc = cudbg_read_flash(handle, payload, payload_size, 1);
+ if (rc != 0)
+ goto out;
+
+out:
+ return rc;
+}
+
+int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
+{
+ struct cudbg_private *priv = handle;
+ struct cudbg_init *cudbg_init = &priv->dbg_init;
+ struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
+ struct adapter *adap = cudbg_init->adap;
+ struct cudbg_flash_hdr flash_hdr;
+ u32 total_hdr_size;
+ u32 data_hdr_size;
+ u32 sec_hdr_start_addr;
+ u32 tmp_size;
+ u32 data_offset = 0;
+ u32 i, j;
+ int rc;
+
+ rc = t4_get_flash_params(adap);
+ if (rc) {
+ cudbg_init->print("\nGet flash params failed."
+ "Try Again...readflash\n\n");
+ return rc;
+ }
+
+ data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
+ sizeof(struct cudbg_hdr);
+ total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
+ sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+
+ if (!data_flag) {
+ /* fill header */
+ if (!sec_info->max_timestamp) {
+ /* finding max time stamp because it may
+ * have older filled sector also
+ */
+ memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
+ rc = read_flash(adap, CUDBG_START_SEC, &flash_hdr,
+ sizeof(struct cudbg_flash_hdr),
+ sec_hdr_start_addr);
+
+ if (flash_hdr.signature == CUDBG_FL_SIGNATURE) {
+ sec_info->max_timestamp = flash_hdr.timestamp;
+ } else {
+ rc = read_flash(adap, CUDBG_START_SEC + 1,
+ &flash_hdr,
+ sizeof(struct cudbg_flash_hdr),
+ sec_hdr_start_addr);
+
+ if (flash_hdr.signature == CUDBG_FL_SIGNATURE)
+ sec_info->max_timestamp =
+ flash_hdr.timestamp;
+ else {
+ cudbg_init->print("\n\tNo cudbg dump "\
+ "found in flash\n\n");
+ return CUDBG_STATUS_NO_SIGNATURE;
+ }
+
+ }
+
+ /* finding max sequence number because max sequenced
+ * sector has updated header
+ */
+ for (i = CUDBG_START_SEC; i <
+ CUDBG_SF_MAX_SECTOR; i++) {
+ memset(&flash_hdr, 0,
+ sizeof(struct cudbg_flash_hdr));
+ rc = read_flash(adap, i, &flash_hdr,
+ sizeof(struct cudbg_flash_hdr),
+ sec_hdr_start_addr);
+
+ if (flash_hdr.signature == CUDBG_FL_SIGNATURE &&
+ sec_info->max_timestamp ==
+ flash_hdr.timestamp &&
+ sec_info->max_seq_no <=
+ flash_hdr.sec_seq_no) {
+ if (sec_info->max_seq_no ==
+ flash_hdr.sec_seq_no) {
+ if (sec_info->hdr_data_len <
+ flash_hdr.data_len)
+ sec_info->max_seq_sec = i;
+ } else {
+ sec_info->max_seq_sec = i;
+ sec_info->hdr_data_len =
+ flash_hdr.data_len;
+ }
+ sec_info->max_seq_no = flash_hdr.sec_seq_no;
+ }
+ }
+ }
+ rc = read_flash(adap, sec_info->max_seq_sec,
+ (struct cudbg_flash_hdr *)data,
+ size, sec_hdr_start_addr);
+
+ if (rc)
+ cudbg_init->print("Read flash header failed, rc %d\n",
+ rc);
+
+ return rc;
+ }
+
+ /* finding sector sequence sorted */
+ for (i = 1; i <= sec_info->max_seq_no; i++) {
+ for (j = CUDBG_START_SEC; j < CUDBG_SF_MAX_SECTOR; j++) {
+ memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
+ rc = read_flash(adap, j, &flash_hdr,
+ sizeof(struct cudbg_flash_hdr),
+ sec_hdr_start_addr);
+
+ if (flash_hdr.signature ==
+ CUDBG_FL_SIGNATURE &&
+ sec_info->max_timestamp ==
+ flash_hdr.timestamp &&
+ flash_hdr.sec_seq_no == i) {
+ if (size + total_hdr_size >
+ CUDBG_SF_SECTOR_SIZE)
+ tmp_size = CUDBG_SF_SECTOR_SIZE -
+ total_hdr_size;
+ else
+ tmp_size = size;
+
+ if ((i != sec_info->max_seq_no) ||
+ (i == sec_info->max_seq_no &&
+ j == sec_info->max_seq_sec)){
+ /* filling data buffer with sector data
+ * except sector header
+ */
+ rc = read_flash(adap, j,
+ (void *)((char *)data +
+ data_offset),
+ tmp_size, 0);
+ data_offset += (tmp_size);
+ size -= (tmp_size);
+ break;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
+ u32 start_address)
+{
+ unsigned int addr, i, n;
+ int rc;
+ u32 *ptr = (u32 *)data;
+ addr = start_sec * CUDBG_SF_SECTOR_SIZE + start_address;
+ size = size / 4;
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ if ((size - i) < SF_PAGE_SIZE)
+ n = size - i;
+ else
+ n = SF_PAGE_SIZE;
+ rc = t4_read_flash(adap, addr, n, ptr, 0);
+ if (rc)
+ goto out;
+
+ addr = addr + (n*4);
+ ptr += n;
+ }
+
+ return 0;
+out:
+ return rc;
+}
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib.c b/sys/dev/cxgbe/cudbg/cudbg_lib.c
new file mode 100644
index 000000000000..ee5e1b086c44
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib.c
@@ -0,0 +1,4433 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/param.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "cudbg.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_lib.h"
+#include "cudbg_entity.h"
+#define BUFFER_WARN_LIMIT 10000000
+
+struct large_entity large_entity_list[] = {
+ {CUDBG_EDC0, 0, 0},
+ {CUDBG_EDC1, 0 , 0},
+ {CUDBG_MC0, 0, 0},
+ {CUDBG_MC1, 0, 0}
+};
+
+static int is_fw_attached(struct cudbg_init *pdbg_init)
+{
+
+ return (pdbg_init->adap->flags & FW_OK);
+}
+
+/* This function will add additional padding bytes into debug_buffer to make it
+ * 4 byte aligned.*/
+static void align_debug_buffer(struct cudbg_buffer *dbg_buff,
+ struct cudbg_entity_hdr *entity_hdr)
+{
+ u8 zero_buf[4] = {0};
+ u8 padding, remain;
+
+ remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
+ padding = 4 - remain;
+ if (remain) {
+ memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
+ padding);
+ dbg_buff->offset += padding;
+ entity_hdr->num_pad = padding;
+ }
+
+ entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
+}
+
+static void read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ struct adapter *padap = pdbg_init->adap;
+ int rc = -1;
+
+ if (is_fw_attached(pdbg_init))
+ rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
+ data);
+
+ if (rc)
+ t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
+}
+
+static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_entity_hdr **entity_hdr)
+{
+ struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
+ int rc = 0;
+ u32 ext_offset = cudbg_hdr->data_len;
+ *ext_size = 0;
+
+ if (dbg_buff->size - dbg_buff->offset <=
+ sizeof(struct cudbg_entity_hdr)) {
+ rc = CUDBG_STATUS_BUFFER_SHORT;
+ goto err;
+ }
+
+ *entity_hdr = (struct cudbg_entity_hdr *)
+ ((char *)outbuf + cudbg_hdr->data_len);
+
+ /* Find the last extended entity header */
+ while ((*entity_hdr)->size) {
+
+ ext_offset += sizeof(struct cudbg_entity_hdr) +
+ (*entity_hdr)->size;
+
+ *ext_size += (*entity_hdr)->size +
+ sizeof(struct cudbg_entity_hdr);
+
+ if (dbg_buff->size - dbg_buff->offset + *ext_size <=
+ sizeof(struct cudbg_entity_hdr)) {
+ rc = CUDBG_STATUS_BUFFER_SHORT;
+ goto err;
+ }
+
+ if (ext_offset != (*entity_hdr)->next_ext_offset) {
+ ext_offset -= sizeof(struct cudbg_entity_hdr) +
+ (*entity_hdr)->size;
+ break;
+ }
+
+ (*entity_hdr)->next_ext_offset = *ext_size;
+
+ *entity_hdr = (struct cudbg_entity_hdr *)
+ ((char *)outbuf +
+ ext_offset);
+ }
+
+ /* update the data offset */
+ dbg_buff->offset = ext_offset;
+err:
+ return rc;
+}
+
+static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
+ u32 cur_entity_data_offset,
+ u32 cur_entity_size,
+ int entity_nu, u32 ext_size)
+{
+ struct cudbg_private *priv = handle;
+ struct cudbg_init *cudbg_init = &priv->dbg_init;
+ struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
+ u64 timestamp;
+ u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
+ u32 remain_flash_size;
+ u32 flash_data_offset;
+ u32 data_hdr_size;
+ int rc = -1;
+
+ data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
+ sizeof(struct cudbg_hdr);
+
+ flash_data_offset = (FLASH_CUDBG_NSECS *
+ (sizeof(struct cudbg_flash_hdr) +
+ data_hdr_size)) +
+ (cur_entity_data_offset - data_hdr_size);
+
+ if (flash_data_offset > CUDBG_FLASH_SIZE) {
+ update_skip_size(sec_info, cur_entity_size);
+ if (cudbg_init->verbose)
+ cudbg_init->print("Large entity skipping...\n");
+ return rc;
+ }
+
+ remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
+
+ if (cur_entity_size > remain_flash_size) {
+ update_skip_size(sec_info, cur_entity_size);
+ if (cudbg_init->verbose)
+ cudbg_init->print("Large entity skipping...\n");
+ } else {
+ timestamp = 0;
+
+ cur_entity_hdr_offset +=
+ (sizeof(struct cudbg_entity_hdr) *
+ (entity_nu - 1));
+
+ rc = cudbg_write_flash(handle, timestamp, dbg_buff,
+ cur_entity_data_offset,
+ cur_entity_hdr_offset,
+ cur_entity_size,
+ ext_size);
+ if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
+ cudbg_init->print("\n\tFLASH is full... "
+ "can not write in flash more\n\n");
+ }
+
+ return rc;
+}
+
+int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
+{
+ struct cudbg_entity_hdr *entity_hdr = NULL;
+ struct cudbg_entity_hdr *ext_entity_hdr = NULL;
+ struct cudbg_hdr *cudbg_hdr;
+ struct cudbg_buffer dbg_buff;
+ struct cudbg_error cudbg_err = {0};
+ int large_entity_code;
+
+ u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
+ struct cudbg_init *cudbg_init =
+ &(((struct cudbg_private *)handle)->dbg_init);
+ struct adapter *padap = cudbg_init->adap;
+ u32 total_size, remaining_buf_size;
+ u32 ext_size = 0;
+ int index, bit, i, rc = -1;
+ int all;
+ bool flag_ext = 0;
+
+ reset_skip_entity();
+
+ dbg_buff.data = outbuf;
+ dbg_buff.size = *outbuf_size;
+ dbg_buff.offset = 0;
+
+ cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
+ cudbg_hdr->signature = CUDBG_SIGNATURE;
+ cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
+ cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
+ cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
+ cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
+ cudbg_hdr->chip_ver = padap->params.chipid;
+
+ if (cudbg_hdr->data_len)
+ flag_ext = 1;
+
+ if (cudbg_init->use_flash) {
+#ifndef notyet
+ rc = t4_get_flash_params(padap);
+ if (rc) {
+ if (cudbg_init->verbose)
+ cudbg_init->print("\nGet flash params failed.\n\n");
+ cudbg_init->use_flash = 0;
+ }
+#endif
+
+#ifdef notyet
+ /* Timestamp is mandatory. If it is not passed then disable
+ * flash support
+ */
+ if (!cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM].u.time) {
+ if (cudbg_init->verbose)
+ cudbg_init->print("\nTimestamp param missing,"
+ "so ignoring flash write request\n\n");
+ cudbg_init->use_flash = 0;
+ }
+#endif
+ }
+
+ if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
+ dbg_buff.size) {
+ rc = CUDBG_STATUS_SMALL_BUFF;
+ total_size = cudbg_hdr->hdr_len;
+ goto err;
+ }
+
+ /* If ext flag is set then move the offset to the end of the buf
+ * so that we can add ext entities
+ */
+ if (flag_ext) {
+ ext_entity_hdr = (struct cudbg_entity_hdr *)
+ ((char *)outbuf + cudbg_hdr->hdr_len +
+ (sizeof(struct cudbg_entity_hdr) *
+ (CUDBG_EXT_ENTITY - 1)));
+ ext_entity_hdr->start_offset = cudbg_hdr->data_len;
+ ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
+ ext_entity_hdr->size = 0;
+ dbg_buff.offset = cudbg_hdr->data_len;
+ } else {
+ dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
+ dbg_buff.offset += CUDBG_MAX_ENTITY *
+ sizeof(struct cudbg_entity_hdr);
+ }
+
+ total_size = dbg_buff.offset;
+ all = dbg_bitmap[0] & (1 << CUDBG_ALL);
+
+ /*sort(large_entity_list);*/
+
+ for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (entity_list[i].bit == CUDBG_EXT_ENTITY)
+ continue;
+
+ if (all || (dbg_bitmap[index] & (1 << bit))) {
+
+ if (!flag_ext) {
+ rc = get_entity_hdr(outbuf, i, dbg_buff.size,
+ &entity_hdr);
+ if (rc)
+ cudbg_hdr->hdr_flags = rc;
+ } else {
+ rc = get_next_ext_entity_hdr(outbuf, &ext_size,
+ &dbg_buff,
+ &entity_hdr);
+ if (rc)
+ goto err;
+
+ /* move the offset after the ext header */
+ dbg_buff.offset +=
+ sizeof(struct cudbg_entity_hdr);
+ }
+
+ entity_hdr->entity_type = i;
+ entity_hdr->start_offset = dbg_buff.offset;
+ /* process each entity by calling process_entity fp */
+ remaining_buf_size = dbg_buff.size - dbg_buff.offset;
+
+ if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
+ is_large_entity(i)) {
+ if (cudbg_init->verbose)
+ cudbg_init->print("Skipping %s\n",
+ entity_list[i].name);
+ skip_entity(i);
+ continue;
+ } else {
+
+ /* If fw_attach is 0, then skip entities which
+ * communicates with firmware
+ */
+
+ if (!is_fw_attached(cudbg_init) &&
+ (entity_list[i].flag &
+ (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
+ if (cudbg_init->verbose)
+ cudbg_init->print("Skipping %s entity,"\
+ "because fw_attach "\
+ "is 0\n",
+ entity_list[i].name);
+ continue;
+ }
+
+ if (cudbg_init->verbose)
+ cudbg_init->print("collecting debug entity: "\
+ "%s\n", entity_list[i].name);
+ memset(&cudbg_err, 0,
+ sizeof(struct cudbg_error));
+ rc = process_entity[i-1](cudbg_init, &dbg_buff,
+ &cudbg_err);
+ }
+
+ if (rc) {
+ entity_hdr->size = 0;
+ dbg_buff.offset = entity_hdr->start_offset;
+ } else
+ align_debug_buffer(&dbg_buff, entity_hdr);
+
+ if (cudbg_err.sys_err)
+ rc = CUDBG_SYSTEM_ERROR;
+
+ entity_hdr->hdr_flags = rc;
+ entity_hdr->sys_err = cudbg_err.sys_err;
+ entity_hdr->sys_warn = cudbg_err.sys_warn;
+
+ /* We don't want to include ext entity size in global
+ * header
+ */
+ if (!flag_ext)
+ total_size += entity_hdr->size;
+
+ cudbg_hdr->data_len = total_size;
+ *outbuf_size = total_size;
+
+ /* consider the size of the ext entity header and data
+ * also
+ */
+ if (flag_ext) {
+ ext_size += (sizeof(struct cudbg_entity_hdr) +
+ entity_hdr->size);
+ entity_hdr->start_offset -= cudbg_hdr->data_len;
+ ext_entity_hdr->size = ext_size;
+ entity_hdr->next_ext_offset = ext_size;
+ entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
+ }
+
+ if (cudbg_init->use_flash) {
+ if (flag_ext) {
+ wr_entity_to_flash(handle,
+ &dbg_buff,
+ ext_entity_hdr->
+ start_offset,
+ entity_hdr->
+ size,
+ CUDBG_EXT_ENTITY,
+ ext_size);
+ }
+ else
+ wr_entity_to_flash(handle,
+ &dbg_buff,
+ entity_hdr->\
+ start_offset,
+ entity_hdr->size,
+ i, ext_size);
+ }
+ }
+ }
+
+ for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
+ i++) {
+ large_entity_code = large_entity_list[i].entity_code;
+ if (large_entity_list[i].skip_flag) {
+ if (!flag_ext) {
+ rc = get_entity_hdr(outbuf, large_entity_code,
+ dbg_buff.size, &entity_hdr);
+ if (rc)
+ cudbg_hdr->hdr_flags = rc;
+ } else {
+ rc = get_next_ext_entity_hdr(outbuf, &ext_size,
+ &dbg_buff,
+ &entity_hdr);
+ if (rc)
+ goto err;
+
+ dbg_buff.offset +=
+ sizeof(struct cudbg_entity_hdr);
+ }
+
+ /* If fw_attach is 0, then skip entities which
+ * communicates with firmware
+ */
+ if (!is_fw_attached(cudbg_init) &&
+ (entity_list[large_entity_code].flag &
+ (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
+ if (cudbg_init->verbose)
+ cudbg_init->print("Skipping %s entity,"\
+ "because fw_attach "\
+ "is 0\n",
+ entity_list[large_entity_code]
+ .name);
+ continue;
+ }
+
+ entity_hdr->entity_type = large_entity_code;
+ entity_hdr->start_offset = dbg_buff.offset;
+ if (cudbg_init->verbose)
+ cudbg_init->print("Re-trying debug entity: %s\n",
+ entity_list[large_entity_code].name);
+
+ memset(&cudbg_err, 0, sizeof(struct cudbg_error));
+ rc = process_entity[large_entity_code - 1](cudbg_init,
+ &dbg_buff,
+ &cudbg_err);
+ if (rc) {
+ entity_hdr->size = 0;
+ dbg_buff.offset = entity_hdr->start_offset;
+ } else
+ align_debug_buffer(&dbg_buff, entity_hdr);
+
+ if (cudbg_err.sys_err)
+ rc = CUDBG_SYSTEM_ERROR;
+
+ entity_hdr->hdr_flags = rc;
+ entity_hdr->sys_err = cudbg_err.sys_err;
+ entity_hdr->sys_warn = cudbg_err.sys_warn;
+
+ /* We don't want to include ext entity size in global
+ * header
+ */
+ if (!flag_ext)
+ total_size += entity_hdr->size;
+
+ cudbg_hdr->data_len = total_size;
+ *outbuf_size = total_size;
+
+ /* consider the size of the ext entity header and
+ * data also
+ */
+ if (flag_ext) {
+ ext_size += (sizeof(struct cudbg_entity_hdr) +
+ entity_hdr->size);
+ entity_hdr->start_offset -=
+ cudbg_hdr->data_len;
+ ext_entity_hdr->size = ext_size;
+ entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
+ }
+
+ if (cudbg_init->use_flash) {
+ if (flag_ext)
+ wr_entity_to_flash(handle,
+ &dbg_buff,
+ ext_entity_hdr->
+ start_offset,
+ entity_hdr->size,
+ CUDBG_EXT_ENTITY,
+ ext_size);
+ else
+ wr_entity_to_flash(handle,
+ &dbg_buff,
+ entity_hdr->
+ start_offset,
+ entity_hdr->
+ size,
+ large_entity_list[i].
+ entity_code,
+ ext_size);
+ }
+ }
+ }
+
+ cudbg_hdr->data_len = total_size;
+ *outbuf_size = total_size;
+
+ if (flag_ext)
+ *outbuf_size += ext_size;
+
+ return 0;
+err:
+ return rc;
+}
+
+void reset_skip_entity(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
+ large_entity_list[i].skip_flag = 0;
+}
+
+void skip_entity(int entity_code)
+{
+ int i;
+ for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
+ i++) {
+ if (large_entity_list[i].entity_code == entity_code)
+ large_entity_list[i].skip_flag = 1;
+ }
+}
+
+int is_large_entity(int entity_code)
+{
+ int i;
+
+ for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
+ i++) {
+ if (large_entity_list[i].entity_code == entity_code)
+ return 1;
+ }
+ return 0;
+}
+
+int get_entity_hdr(void *outbuf, int i, u32 size,
+ struct cudbg_entity_hdr **entity_hdr)
+{
+ int rc = 0;
+ struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
+
+ if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
+ return CUDBG_STATUS_SMALL_BUFF;
+
+ *entity_hdr = (struct cudbg_entity_hdr *)
+ ((char *)outbuf+cudbg_hdr->hdr_len +
+ (sizeof(struct cudbg_entity_hdr)*(i-1)));
+ return rc;
+}
+
+static int collect_rss(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ u32 size;
+ int rc = 0;
+
+ size = RSS_NENTRIES * sizeof(u16);
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n",
+ __func__, rc);
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_sw_state(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct sw_state *swstate;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct sw_state);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ swstate = (struct sw_state *) scratch_buff.data;
+
+ swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
+ snprintf(swstate->caller_string, sizeof(swstate->caller_string), "%s",
+ "FreeBSD");
+ swstate->os_type = 0;
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_ddp_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct tp_usm_stats *tp_usm_stats_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct tp_usm_stats);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
+
+ /* spin_lock(&padap->stats_lock); TODO*/
+ t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
+ /* spin_unlock(&padap->stats_lock); TODO*/
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_ulptx_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_ulptx_la *ulptx_la_buff;
+ u32 size, i, j;
+ int rc = 0;
+
+ size = sizeof(struct struct_ulptx_la);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
+
+ for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
+ ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
+ A_ULP_TX_LA_RDPTR_0 +
+ 0x10 * i);
+ ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
+ A_ULP_TX_LA_WRPTR_0 +
+ 0x10 * i);
+ ulptx_la_buff->rddata[i] = t4_read_reg(padap,
+ A_ULP_TX_LA_RDDATA_0 +
+ 0x10 * i);
+ for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
+ ulptx_la_buff->rd_data[i][j] =
+ t4_read_reg(padap,
+ A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
+ }
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+
+}
+
+static int collect_ulprx_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_ulprx_la *ulprx_la_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct struct_ulprx_la);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
+ t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
+ ulprx_la_buff->size = ULPRX_LA_SIZE;
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_cpl_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_tp_cpl_stats *tp_cpl_stats_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct struct_tp_cpl_stats);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
+ tp_cpl_stats_buff->nchan = padap->chip_params->nchan;
+
+ /* spin_lock(&padap->stats_lock); TODO*/
+ t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
+ /* spin_unlock(&padap->stats_lock); TODO*/
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_wc_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_wc_stats *wc_stats_buff;
+ u32 val1;
+ u32 val2;
+ u32 size;
+
+ int rc = 0;
+
+ size = sizeof(struct struct_wc_stats);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
+
+ if (!is_t4(padap)) {
+ val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
+ val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
+ wc_stats_buff->wr_cl_success = val1 - val2;
+ wc_stats_buff->wr_cl_fail = val2;
+ } else {
+ wc_stats_buff->wr_cl_success = 0;
+ wc_stats_buff->wr_cl_fail = 0;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int mem_desc_cmp(const void *a, const void *b)
+{
+ return ((const struct struct_mem_desc *)a)->base -
+ ((const struct struct_mem_desc *)b)->base;
+}
+
+static int fill_meminfo(struct adapter *padap,
+ struct struct_meminfo *meminfo_buff)
+{
+ struct struct_mem_desc *md;
+ u32 size, lo, hi;
+ u32 used, alloc;
+ int n, i, rc = 0;
+
+ size = sizeof(struct struct_meminfo);
+
+ memset(meminfo_buff->avail, 0,
+ ARRAY_SIZE(meminfo_buff->avail) *
+ sizeof(struct struct_mem_desc));
+ memset(meminfo_buff->mem, 0,
+ (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
+ md = meminfo_buff->mem;
+
+ for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
+ meminfo_buff->mem[i].limit = 0;
+ meminfo_buff->mem[i].idx = i;
+ }
+
+ i = 0;
+
+ lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
+
+ if (lo & F_EDRAM0_ENABLE) {
+ hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
+ meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
+ meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
+ (G_EDRAM0_SIZE(hi) << 20);
+ meminfo_buff->avail[i].idx = 0;
+ i++;
+ }
+
+ if (lo & F_EDRAM1_ENABLE) {
+ hi = t4_read_reg(padap, A_MA_EDRAM1_BAR);
+ meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
+ meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
+ (G_EDRAM1_SIZE(hi) << 20);
+ meminfo_buff->avail[i].idx = 1;
+ i++;
+ }
+
+ if (is_t5(padap)) {
+ if (lo & F_EXT_MEM0_ENABLE) {
+ hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
+ meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ (G_EXT_MEM_SIZE(hi) << 20);
+ meminfo_buff->avail[i].idx = 3;
+ i++;
+ }
+
+ if (lo & F_EXT_MEM1_ENABLE) {
+ hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
+ meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ (G_EXT_MEM1_SIZE(hi) << 20);
+ meminfo_buff->avail[i].idx = 4;
+ i++;
+ }
+ } else if (is_t6(padap)) {
+ if (lo & F_EXT_MEM_ENABLE) {
+ hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
+ meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ (G_EXT_MEM_SIZE(hi) << 20);
+ meminfo_buff->avail[i].idx = 2;
+ i++;
+ }
+ }
+
+ if (!i) { /* no memory available */
+ rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
+ goto err;
+ }
+
+ meminfo_buff->avail_c = i;
+ qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
+ mem_desc_cmp);
+ (md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
+ (md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
+ (md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
+ (md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
+ (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
+ (md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
+ (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
+ (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
+ (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
+
+ /* the next few have explicit upper bounds */
+ md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap,
+ A_TP_PMM_TX_PAGE_SIZE) *
+ G_PMTXMAXPAGE(t4_read_reg(padap,
+ A_TP_PMM_TX_MAX_PAGE)
+ );
+ md++;
+
+ md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap,
+ A_TP_PMM_RX_PAGE_SIZE) *
+ G_PMRXMAXPAGE(t4_read_reg(padap,
+ A_TP_PMM_RX_MAX_PAGE)
+ );
+ md++;
+ if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
+ if (chip_id(padap) <= CHELSIO_T5) {
+ hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
+ md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
+ } else {
+ hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
+ md->base = t4_read_reg(padap,
+ A_LE_DB_HASH_TBL_BASE_ADDR);
+ }
+ md->limit = 0;
+ } else {
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ }
+ md++;
+#define ulp_region(reg) \
+ {\
+ md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
+ (md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
+ }
+
+ ulp_region(RX_ISCSI);
+ ulp_region(RX_TDDP);
+ ulp_region(TX_TPT);
+ ulp_region(RX_STAG);
+ ulp_region(RX_RQ);
+ ulp_region(RX_RQUDP);
+ ulp_region(RX_PBL);
+ ulp_region(TX_PBL);
+#undef ulp_region
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region);
+ if (!is_t4(padap)) {
+ u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
+ u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
+ if (is_t5(padap)) {
+ if (sge_ctrl & F_VFIFO_ENABLE)
+ size = G_DBVFIFO_SIZE(fifo_size);
+ } else
+ size = G_T6_DBVFIFO_SIZE(fifo_size);
+
+ if (size) {
+ md->base = G_BASEADDR(t4_read_reg(padap,
+ A_SGE_DBVFIFO_BADDR));
+ md->limit = md->base + (size << 2) - 1;
+ }
+ }
+
+ md++;
+
+ md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
+ md->limit = 0;
+ md++;
+ md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
+ md->limit = 0;
+ md++;
+#ifndef __NO_DRIVER_OCQ_SUPPORT__
+ /*md->base = padap->vres.ocq.start;*/
+ /*if (adap->vres.ocq.size)*/
+ /* md->limit = md->base + adap->vres.ocq.size - 1;*/
+ /*else*/
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ md++;
+#endif
+
+ /* add any address-space holes, there can be up to 3 */
+ for (n = 0; n < i - 1; n++)
+ if (meminfo_buff->avail[n].limit <
+ meminfo_buff->avail[n + 1].base)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ if (meminfo_buff->avail[n].limit)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ n = (int) (md - meminfo_buff->mem);
+ meminfo_buff->mem_c = n;
+
+ qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
+ mem_desc_cmp);
+
+ lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
+ hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
+ meminfo_buff->up_ram_lo = lo;
+ meminfo_buff->up_ram_hi = hi;
+
+ lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
+ hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
+ meminfo_buff->up_extmem2_lo = lo;
+ meminfo_buff->up_extmem2_hi = hi;
+
+ lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
+ meminfo_buff->rx_pages_data[0] = G_PMRXMAXPAGE(lo);
+ meminfo_buff->rx_pages_data[1] =
+ t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
+ meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
+
+ lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
+ hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
+ meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
+ meminfo_buff->tx_pages_data[1] =
+ hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
+ meminfo_buff->tx_pages_data[2] =
+ hi >= (1 << 20) ? 'M' : 'K';
+ meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
+
+ for (i = 0; i < 4; i++) {
+ if (chip_id(padap) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
+ else
+ lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
+ if (is_t5(padap)) {
+ used = G_T5_USED(lo);
+ alloc = G_T5_ALLOC(lo);
+ } else {
+ used = G_USED(lo);
+ alloc = G_ALLOC(lo);
+ }
+ meminfo_buff->port_used[i] = used;
+ meminfo_buff->port_alloc[i] = alloc;
+ }
+
+ for (i = 0; i < padap->chip_params->nchan; i++) {
+ if (chip_id(padap) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
+ else
+ lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
+ if (is_t5(padap)) {
+ used = G_T5_USED(lo);
+ alloc = G_T5_ALLOC(lo);
+ } else {
+ used = G_USED(lo);
+ alloc = G_ALLOC(lo);
+ }
+ meminfo_buff->loopback_used[i] = used;
+ meminfo_buff->loopback_alloc[i] = alloc;
+ }
+err:
+ return rc;
+}
+
+static int collect_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_meminfo *meminfo_buff;
+ int rc = 0;
+ u32 size;
+
+ size = sizeof(struct struct_meminfo);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
+
+ rc = fill_meminfo(padap, meminfo_buff);
+ if (rc)
+ goto err;
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_lb_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct lb_port_stats *tmp_stats;
+ struct struct_lb_stats *lb_stats_buff;
+ u32 i, n, size;
+ int rc = 0;
+
+ rc = padap->params.nports;
+ if (rc < 0)
+ goto err;
+
+ n = rc;
+ size = sizeof(struct struct_lb_stats) +
+ n * sizeof(struct lb_port_stats);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
+
+ lb_stats_buff->nchan = n;
+ tmp_stats = lb_stats_buff->s;
+
+ for (i = 0; i < n; i += 2, tmp_stats += 2) {
+ t4_get_lb_stats(padap, i, tmp_stats);
+ t4_get_lb_stats(padap, i + 1, tmp_stats+1);
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_rdma_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_er)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct tp_rdma_stats *rdma_stats_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct tp_rdma_stats);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
+
+ /* spin_lock(&padap->stats_lock); TODO*/
+ t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
+ /* spin_unlock(&padap->stats_lock); TODO*/
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_clk_info(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct struct_clk_info *clk_info_buff;
+ u64 tp_tick_us;
+ int size;
+ int rc = 0;
+
+ if (!padap->params.vpd.cclk) {
+ rc = CUDBG_STATUS_CCLK_NOT_DEFINED;
+ goto err;
+ }
+
+ size = sizeof(struct struct_clk_info);
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
+
+ clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* in ps
+ */
+ clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
+ clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
+ clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
+ tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
+ /* in us */
+ clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
+ clk_info_buff->dack_re) / 1000000) *
+ t4_read_reg(padap, A_TP_DACK_TIMER);
+
+ clk_info_buff->retransmit_min =
+ tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
+ clk_info_buff->retransmit_max =
+ tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
+
+ clk_info_buff->persist_timer_min =
+ tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
+ clk_info_buff->persist_timer_max =
+ tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
+
+ clk_info_buff->keepalive_idle_timer =
+ tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
+ clk_info_buff->keepalive_interval =
+ tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
+
+ clk_info_buff->initial_srtt =
+ tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
+ clk_info_buff->finwait2_timer =
+ tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+
+}
+
+static int collect_macstats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_mac_stats_rev1 *mac_stats_buff;
+ u32 i, n, size;
+ int rc = 0;
+
+ rc = padap->params.nports;
+ if (rc < 0)
+ goto err;
+
+ n = rc;
+ size = sizeof(struct struct_mac_stats_rev1);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
+
+ mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
+ mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
+ mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
+ sizeof(struct cudbg_ver_hdr);
+
+ mac_stats_buff->port_count = n;
+ for (i = 0; i < mac_stats_buff->port_count; i++)
+ t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_cim_pif_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct cim_pif_la *cim_pif_la_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct cim_pif_la) +
+ 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
+ cim_pif_la_buff->size = CIM_PIFLA_SIZE;
+
+ t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
+ (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
+ NULL, NULL);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_tp_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_tp_la *tp_la_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct struct_tp_la) + TPLA_SIZE * sizeof(u64);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
+
+ tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
+ t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_fcoe_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_tp_fcoe_stats *tp_fcoe_stats_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct struct_tp_fcoe_stats);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
+
+ /* spin_lock(&padap->stats_lock); TODO*/
+ t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
+ t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
+ if (padap->chip_params->nchan == NCHAN) {
+ t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
+ t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
+ }
+ /* spin_unlock(&padap->stats_lock); TODO*/
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_tp_err_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_tp_err_stats *tp_err_stats_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct struct_tp_err_stats);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
+
+ /* spin_lock(&padap->stats_lock); TODO*/
+ t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
+ /* spin_unlock(&padap->stats_lock); TODO*/
+ tp_err_stats_buff->nchan = padap->chip_params->nchan;
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_tcp_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_tcp_stats *tcp_stats_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct struct_tcp_stats);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
+
+ /* spin_lock(&padap->stats_lock); TODO*/
+ t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
+ /* spin_unlock(&padap->stats_lock); TODO*/
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_hw_sched(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_hw_sched *hw_sched_buff;
+ u32 size;
+ int i, rc = 0;
+
+ if (!padap->params.vpd.cclk) {
+ rc = CUDBG_STATUS_CCLK_NOT_DEFINED;
+ goto err;
+ }
+
+ size = sizeof(struct struct_hw_sched);
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
+
+ hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
+ hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
+ t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
+
+ for (i = 0; i < NTX_SCHED; ++i) {
+ t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
+ &hw_sched_buff->ipg[i], 1);
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_pm_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct struct_pm_stats *pm_stats_buff;
+ u32 size;
+ int rc = 0;
+
+ size = sizeof(struct struct_pm_stats);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
+
+ t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
+ t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_path_mtu(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ u32 size;
+ int rc = 0;
+
+ size = NMTUS * sizeof(u16);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_rss_key(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ u32 size;
+
+ int rc = 0;
+
+ size = 10 * sizeof(u32);
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_rss_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct rss_config *rss_conf;
+ int rc;
+ u32 size;
+
+ size = sizeof(struct rss_config);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ rss_conf = (struct rss_config *)scratch_buff.data;
+
+ rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
+ rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
+ rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
+ rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
+ rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
+ rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
+ rss_conf->chip = padap->params.chipid;
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_rss_vf_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ struct rss_vf_conf *vfconf;
+ int vf, rc, vf_count;
+ u32 size;
+
+ vf_count = padap->chip_params->vfcount;
+ size = vf_count * sizeof(*vfconf);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ vfconf = (struct rss_vf_conf *)scratch_buff.data;
+
+ for (vf = 0; vf < vf_count; vf++) {
+ t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
+ &vfconf[vf].rss_vf_vfh, 1);
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_rss_pf_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct rss_pf_conf *pfconf;
+ struct adapter *padap = pdbg_init->adap;
+ u32 rss_pf_map, rss_pf_mask, size;
+ int pf, rc;
+
+ size = 8 * sizeof(*pfconf);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ pfconf = (struct rss_pf_conf *)scratch_buff.data;
+
+ rss_pf_map = t4_read_rss_pf_map(padap, 1);
+ rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
+
+ for (pf = 0; pf < 8; pf++) {
+ pfconf[pf].rss_pf_map = rss_pf_map;
+ pfconf[pf].rss_pf_mask = rss_pf_mask;
+ /* no return val */
+ t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int check_valid(u32 *buf, int type)
+{
+ int index;
+ int bit;
+ int bit_pos = 0;
+
+ switch (type) {
+ case CTXT_EGRESS:
+ bit_pos = 176;
+ break;
+ case CTXT_INGRESS:
+ bit_pos = 141;
+ break;
+ case CTXT_FLM:
+ bit_pos = 89;
+ break;
+ }
+ index = bit_pos / 32;
+ bit = bit_pos % 32;
+
+ return buf[index] & (1U << bit);
+}
+
+/**
+ * Get EGRESS, INGRESS, FLM, and CNM max qid.
+ *
+ * For EGRESS and INGRESS, do the following calculation.
+ * max_qid = (DBQ/IMSG context region size in bytes) /
+ * (size of context in bytes).
+ *
+ * For FLM, do the following calculation.
+ * max_qid = (FLM cache region size in bytes) /
+ * ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
+ *
+ * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
+ * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
+ * splitting is enabled, then max CNM qid is half of max FLM qid.
+ */
+static int get_max_ctxt_qid(struct adapter *padap,
+ struct struct_meminfo *meminfo,
+ u32 *max_ctx_qid, u8 nelem)
+{
+ u32 i, idx, found = 0;
+
+ if (nelem != (CTXT_CNM + 1))
+ return -EINVAL;
+
+ for (i = 0; i < meminfo->mem_c; i++) {
+ if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
+ continue; /* skip holes */
+
+ idx = meminfo->mem[i].idx;
+ /* Get DBQ, IMSG, and FLM context region size */
+ if (idx <= CTXT_FLM) {
+ if (!(meminfo->mem[i].limit))
+ meminfo->mem[i].limit =
+ i < meminfo->mem_c - 1 ?
+ meminfo->mem[i + 1].base - 1 : ~0;
+
+ if (idx < CTXT_FLM) {
+ /* Get EGRESS and INGRESS max qid. */
+ max_ctx_qid[idx] = (meminfo->mem[i].limit -
+ meminfo->mem[i].base + 1) /
+ CUDBG_CTXT_SIZE_BYTES;
+ found++;
+ } else {
+ /* Get FLM and CNM max qid. */
+ u32 value, edram_ptr_count;
+ u8 bytes_per_ptr = 8;
+ u8 nohdr;
+
+ value = t4_read_reg(padap, A_SGE_FLM_CFG);
+
+ /* Check if header splitting is enabled. */
+ nohdr = (value >> S_NOHDR) & 1U;
+
+ /* Get the number of pointers in EDRAM per
+ * qid in units of 32.
+ */
+ edram_ptr_count = 32 *
+ (1U << G_EDRAMPTRCNT(value));
+
+ /* EDRAMPTRCNT value of 3 is reserved.
+ * So don't exceed 128.
+ */
+ if (edram_ptr_count > 128)
+ edram_ptr_count = 128;
+
+ max_ctx_qid[idx] = (meminfo->mem[i].limit -
+ meminfo->mem[i].base + 1) /
+ (edram_ptr_count *
+ bytes_per_ptr);
+ found++;
+
+ /* CNM has 1-to-1 mapping with FLM.
+ * However, if header splitting is enabled,
+ * then max CNM qid is half of max FLM qid.
+ */
+ max_ctx_qid[CTXT_CNM] = nohdr ?
+ max_ctx_qid[idx] :
+ max_ctx_qid[idx] >> 1;
+
+ /* One more increment for CNM */
+ found++;
+ }
+ }
+ if (found == nelem)
+ break;
+ }
+
+ /* Sanity check. Ensure the values are within known max. */
+ max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
+ M_CTXTQID);
+ max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
+ CUDBG_MAX_INGRESS_QIDS);
+ max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
+ CUDBG_MAX_FL_QIDS);
+ max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
+ CUDBG_MAX_CNM_QIDS);
+ return 0;
+}
+
+static int collect_dump_context(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct cudbg_buffer temp_buff;
+ struct adapter *padap = pdbg_init->adap;
+ u32 size = 0, next_offset = 0, total_size = 0;
+ struct cudbg_ch_cntxt *buff = NULL;
+ struct struct_meminfo meminfo;
+ int bytes = 0;
+ int rc = 0;
+ u32 i, j;
+ u32 max_ctx_qid[CTXT_CNM + 1];
+ bool limit_qid = false;
+ u32 qid_count = 0;
+
+ rc = fill_meminfo(padap, &meminfo);
+ if (rc)
+ goto err;
+
+ /* Get max valid qid for each type of queue */
+ rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
+ if (rc)
+ goto err;
+
+ /* There are four types of queues. Collect context upto max
+ * qid of each type of queue.
+ */
+ for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
+ size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
+ /* Not enough scratch Memory available.
+ * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
+ * for each queue type.
+ */
+ size = 0;
+ for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
+ size += sizeof(struct cudbg_ch_cntxt) *
+ CUDBG_LOWMEM_MAX_CTXT_QIDS;
+
+ limit_qid = true;
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+ }
+
+ buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
+
+ /* Collect context data */
+ for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
+ qid_count = 0;
+ for (j = 0; j < max_ctx_qid[i]; j++) {
+ read_sge_ctxt(pdbg_init, j, i, buff->data);
+
+ rc = check_valid(buff->data, i);
+ if (rc) {
+ buff->cntxt_type = i;
+ buff->cntxt_id = j;
+ buff++;
+ total_size += sizeof(struct cudbg_ch_cntxt);
+
+ if (i == CTXT_FLM) {
+ read_sge_ctxt(pdbg_init, j, CTXT_CNM,
+ buff->data);
+ buff->cntxt_type = CTXT_CNM;
+ buff->cntxt_id = j;
+ buff++;
+ total_size +=
+ sizeof(struct cudbg_ch_cntxt);
+ }
+ qid_count++;
+ }
+
+ /* If there's not enough space to collect more qids,
+ * then bail and move on to next queue type.
+ */
+ if (limit_qid &&
+ qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
+ break;
+ }
+ }
+
+ scratch_buff.size = total_size;
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ /* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
+ while (total_size > 0) {
+ bytes = min_t(unsigned long, (unsigned long)total_size,
+ (unsigned long)CUDBG_CHUNK_SIZE);
+ temp_buff.size = bytes;
+ temp_buff.data = (void *)((char *)scratch_buff.data +
+ next_offset);
+
+ rc = compress_buff(&temp_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ total_size -= bytes;
+ next_offset += bytes;
+ }
+
+err1:
+ scratch_buff.size = size;
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_fw_devlog(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+#ifdef notyet
+ struct adapter *padap = pdbg_init->adap;
+ struct devlog_params *dparams = &padap->params.devlog;
+ struct cudbg_param *params = NULL;
+ struct cudbg_buffer scratch_buff;
+ u32 offset;
+ int rc = 0, i;
+
+ rc = t4_init_devlog_params(padap, 1);
+
+ if (rc < 0) {
+ pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
+ "%d\n", __func__, rc);
+ for (i = 0; i < pdbg_init->dbg_params_cnt; i++) {
+ if (pdbg_init->dbg_params[i].param_type ==
+ CUDBG_DEVLOG_PARAM) {
+ params = &pdbg_init->dbg_params[i];
+ break;
+ }
+ }
+
+ if (params) {
+ dparams->memtype = params->u.devlog_param.memtype;
+ dparams->start = params->u.devlog_param.start;
+ dparams->size = params->u.devlog_param.size;
+ } else {
+ cudbg_err->sys_err = rc;
+ goto err;
+ }
+ }
+
+ rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
+
+ if (rc)
+ goto err;
+
+ /* Collect FW devlog */
+ if (dparams->start != 0) {
+ offset = scratch_buff.offset;
+ rc = t4_memory_rw(padap, padap->params.drv_memwin,
+ dparams->memtype, dparams->start,
+ dparams->size,
+ (__be32 *)((char *)scratch_buff.data +
+ offset), 1);
+
+ if (rc) {
+ pdbg_init->print("%s(), t4_memory_rw failed!, rc: "\
+ "%d\n", __func__, rc);
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+#endif
+ return (EDOOFUS);
+}
+/* CIM OBQ */
+
+static int collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 0;
+
+ rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
+
+ return rc;
+}
+
+static int collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 1;
+
+ rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
+
+ return rc;
+}
+
+static int collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 2;
+
+ rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
+
+ return rc;
+}
+
+static int collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 3;
+
+ rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
+
+ return rc;
+}
+
+static int collect_cim_obq_sge(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 4;
+
+ rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
+
+ return rc;
+}
+
+static int collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 5;
+
+ rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
+
+ return rc;
+}
+
+static int collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 6;
+
+ rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
+
+ return rc;
+}
+
+static int collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 7;
+
+ rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
+
+ return rc;
+}
+
+static int read_cim_obq(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err, int qid)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ u32 qsize;
+ int rc;
+ int no_of_read_words;
+
+ /* collect CIM OBQ */
+ qsize = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32);
+ rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
+ if (rc)
+ goto err;
+
+ /* t4_read_cim_obq will return no. of read words or error */
+ no_of_read_words = t4_read_cim_obq(padap, qid,
+ (u32 *)((u32 *)scratch_buff.data +
+ scratch_buff.offset), qsize);
+
+ /* no_of_read_words is less than or equal to 0 means error */
+ if (no_of_read_words <= 0) {
+ if (no_of_read_words == 0)
+ rc = CUDBG_SYSTEM_ERROR;
+ else
+ rc = no_of_read_words;
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s: t4_read_cim_obq failed (%d)\n",
+ __func__, rc);
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ scratch_buff.size = no_of_read_words * 4;
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+ if (rc)
+ goto err1;
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+/* CIM IBQ */
+
+static int collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 0;
+
+ rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
+ return rc;
+}
+
+static int collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 1;
+
+ rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
+ return rc;
+}
+
+static int collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 2;
+
+ rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
+ return rc;
+}
+
+static int collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 3;
+
+ rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
+ return rc;
+}
+
+static int collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc = 0, qid = 4;
+
+ rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
+ return rc;
+}
+
+static int collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ int rc, qid = 5;
+
+ rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
+ return rc;
+}
+
+static int read_cim_ibq(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err, int qid)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer scratch_buff;
+ u32 qsize;
+ int rc;
+ int no_of_read_words;
+
+ /* collect CIM IBQ */
+ qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
+ rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
+
+ if (rc)
+ goto err;
+
+ /* t4_read_cim_ibq will return no. of read words or error */
+ no_of_read_words = t4_read_cim_ibq(padap, qid,
+ (u32 *)((u32 *)scratch_buff.data +
+ scratch_buff.offset), qsize);
+ /* no_of_read_words is less than or equal to 0 means error */
+ if (no_of_read_words <= 0) {
+ if (no_of_read_words == 0)
+ rc = CUDBG_SYSTEM_ERROR;
+ else
+ rc = no_of_read_words;
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s: t4_read_cim_ibq failed (%d)\n",
+ __func__, rc);
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+
+err:
+ return rc;
+}
+
+static int collect_cim_ma_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ u32 rc = 0;
+
+ /* collect CIM MA LA */
+ scratch_buff.size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ /* no return */
+ t4_cim_read_ma_la(padap,
+ (u32 *) ((char *)scratch_buff.data +
+ scratch_buff.offset),
+ (u32 *) ((char *)scratch_buff.data +
+ scratch_buff.offset + 5 * CIM_MALA_SIZE));
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_cim_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+
+ int rc;
+ u32 cfg = 0;
+ int size;
+
+ /* collect CIM LA */
+ if (is_t6(padap)) {
+ size = padap->params.cim_la_size / 10 + 1;
+ size *= 11 * sizeof(u32);
+ } else {
+ size = padap->params.cim_la_size / 8;
+ size *= 8 * sizeof(u32);
+ }
+
+ size += sizeof(cfg);
+
+ rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s: t4_cim_read failed (%d)\n",
+ __func__, rc);
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
+ sizeof(cfg));
+
+ rc = t4_cim_read_la(padap,
+ (u32 *) ((char *)scratch_buff.data +
+ scratch_buff.offset + sizeof(cfg)), NULL);
+ if (rc < 0) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s: t4_cim_read_la failed (%d)\n",
+ __func__, rc);
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_cim_qcfg(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ u32 offset;
+ int cim_num_obq, rc = 0;
+
+ struct struct_cim_qcfg *cim_qcfg_data = NULL;
+
+ rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
+ &scratch_buff);
+
+ if (rc)
+ goto err;
+
+ offset = scratch_buff.offset;
+
+ cim_num_obq = is_t4(padap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+ cim_qcfg_data =
+ (struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
+ offset));
+
+ rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
+ ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
+
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
+ __func__, rc);
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
+ ARRAY_SIZE(cim_qcfg_data->obq_wr),
+ cim_qcfg_data->obq_wr);
+
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
+ __func__, rc);
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ /* no return val */
+ t4_read_cimq_cfg(padap,
+ cim_qcfg_data->base,
+ cim_qcfg_data->size,
+ cim_qcfg_data->thres);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+/**
+ * Fetch the TX/RX payload regions start and end.
+ *
+ * @padap (IN): adapter handle.
+ * @mem_type (IN): EDC0, EDC1, MC/MC0/MC1.
+ * @mem_tot_len (IN): total length of @mem_type memory region to read.
+ * @payload_type (IN): TX or RX Payload.
+ * @reg_info (OUT): store the payload region info.
+ *
+ * Fetch the TX/RX payload region information from meminfo.
+ * However, reading from the @mem_type region starts at 0 and not
+ * from whatever base info is stored in meminfo. Hence, if the
+ * payload region exists, then calculate the payload region
+ * start and end wrt 0 and @mem_tot_len, respectively, and set
+ * @reg_info->exist to true. Otherwise, set @reg_info->exist to false.
+ */
+#ifdef notyet
+static int get_payload_range(struct adapter *padap, u8 mem_type,
+ unsigned long mem_tot_len, u8 payload_type,
+ struct struct_region_info *reg_info)
+{
+ struct struct_meminfo meminfo;
+ struct struct_mem_desc mem_region;
+ struct struct_mem_desc payload;
+ u32 i, idx, found = 0;
+ u8 mc_type;
+ int rc;
+
+ /* Get meminfo of all regions */
+ rc = fill_meminfo(padap, &meminfo);
+ if (rc)
+ return rc;
+
+ /* Extract the specified TX or RX Payload region range */
+ memset(&payload, 0, sizeof(struct struct_mem_desc));
+ for (i = 0; i < meminfo.mem_c; i++) {
+ if (meminfo.mem[i].idx >= ARRAY_SIZE(region))
+ continue; /* skip holes */
+
+ idx = meminfo.mem[i].idx;
+ /* Get TX or RX Payload region start and end */
+ if (idx == payload_type) {
+ if (!(meminfo.mem[i].limit))
+ meminfo.mem[i].limit =
+ i < meminfo.mem_c - 1 ?
+ meminfo.mem[i + 1].base - 1 : ~0;
+
+ memcpy(&payload, &meminfo.mem[i], sizeof(payload));
+ found = 1;
+ break;
+ }
+ }
+
+ /* If TX or RX Payload region is not found return error. */
+ if (!found)
+ return -EINVAL;
+
+ if (mem_type < MEM_MC) {
+ memcpy(&mem_region, &meminfo.avail[mem_type],
+ sizeof(mem_region));
+ } else {
+ /* Check if both MC0 and MC1 exist by checking if a
+ * base address for the specified @mem_type exists.
+ * If a base address exists, then there is MC1 and
+ * hence use the base address stored at index 3.
+ * Otherwise, use the base address stored at index 2.
+ */
+ mc_type = meminfo.avail[mem_type].base ?
+ mem_type : mem_type - 1;
+ memcpy(&mem_region, &meminfo.avail[mc_type],
+ sizeof(mem_region));
+ }
+
+ /* Check if payload region exists in current memory */
+ if (payload.base < mem_region.base && payload.limit < mem_region.base) {
+ reg_info->exist = false;
+ return 0;
+ }
+
+ /* Get Payload region start and end with respect to 0 and
+ * mem_tot_len, respectively. This is because reading from the
+ * memory region starts at 0 and not at base info stored in meminfo.
+ */
+ if (payload.base < mem_region.limit) {
+ reg_info->exist = true;
+ if (payload.base >= mem_region.base)
+ reg_info->start = payload.base - mem_region.base;
+ else
+ reg_info->start = 0;
+
+ if (payload.limit < mem_region.limit)
+ reg_info->end = payload.limit - mem_region.base;
+ else
+ reg_info->end = mem_tot_len;
+ }
+
+ return 0;
+}
+#endif
+
+static int read_fw_mem(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff, u8 mem_type,
+ unsigned long tot_len, struct cudbg_error *cudbg_err)
+{
+#ifdef notyet
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ unsigned long bytes_read = 0;
+ unsigned long bytes_left;
+ unsigned long bytes;
+ int rc;
+ struct struct_region_info payload[2]; /* TX and RX Payload Region */
+ u16 get_payload_flag;
+ u8 i;
+
+ get_payload_flag =
+ pdbg_init->dbg_params[CUDBG_GET_PAYLOAD_PARAM].param_type;
+
+ /* If explicitly asked to get TX/RX Payload data,
+ * then don't zero out the payload data. Otherwise,
+ * zero out the payload data.
+ */
+ if (!get_payload_flag) {
+ u8 region_index[2];
+ u8 j = 0;
+
+ /* Find the index of TX and RX Payload regions in meminfo */
+ for (i = 0; i < ARRAY_SIZE(region); i++) {
+ if (!strcmp(region[i], "Tx payload:") ||
+ !strcmp(region[i], "Rx payload:")) {
+ region_index[j] = i;
+ j++;
+ if (j == 2)
+ break;
+ }
+ }
+
+ /* Get TX/RX Payload region range if they exist */
+ memset(payload, 0, ARRAY_SIZE(payload) * sizeof(payload[0]));
+ for (i = 0; i < ARRAY_SIZE(payload); i++) {
+ rc = get_payload_range(padap, mem_type, tot_len,
+ region_index[i],
+ &payload[i]);
+ if (rc)
+ goto err;
+
+ if (payload[i].exist) {
+ /* Align start and end to avoid wrap around */
+ payload[i].start =
+ roundup(payload[i].start,
+ CUDBG_CHUNK_SIZE);
+ payload[i].end =
+ rounddown(payload[i].end,
+ CUDBG_CHUNK_SIZE);
+ }
+ }
+ }
+
+ bytes_left = tot_len;
+ scratch_buff.size = tot_len;
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err;
+
+ while (bytes_left > 0) {
+ bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
+ rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
+
+ if (rc) {
+ rc = CUDBG_STATUS_NO_SCRATCH_MEM;
+ goto err;
+ }
+
+ if (!get_payload_flag) {
+ for (i = 0; i < ARRAY_SIZE(payload); i++) {
+ if (payload[i].exist &&
+ bytes_read >= payload[i].start &&
+ (bytes_read + bytes) <= payload[i].end) {
+ memset(scratch_buff.data, 0, bytes);
+ /* TX and RX Payload regions
+ * can't overlap.
+ */
+ goto skip_read;
+ }
+ }
+ }
+
+ /* Read from file */
+ /*fread(scratch_buff.data, 1, Bytes, in);*/
+ rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
+ bytes, (__be32 *)(scratch_buff.data), 1);
+
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s: t4_memory_rw failed (%d)",
+ __func__, rc);
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+skip_read:
+ rc = compress_buff(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ bytes_left -= bytes;
+ bytes_read += bytes;
+ release_scratch_buff(&scratch_buff, dbg_buff);
+ }
+
+err1:
+ if (rc)
+ release_scratch_buff(&scratch_buff, dbg_buff);
+
+err:
+ return rc;
+#endif
+ return (EDOOFUS);
+}
+
+static void collect_mem_info(struct cudbg_init *pdbg_init,
+ struct card_mem *mem_info)
+{
+ struct adapter *padap = pdbg_init->adap;
+ u32 value;
+ int t4 = 0;
+
+ if (is_t4(padap))
+ t4 = 1;
+
+ if (t4) {
+ value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
+ value = G_EXT_MEM_SIZE(value);
+ mem_info->size_mc0 = (u16)value; /* size in MB */
+
+ value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
+ if (value & F_EXT_MEM_ENABLE)
+ mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
+ bit */
+ } else {
+ value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
+ value = G_EXT_MEM0_SIZE(value);
+ mem_info->size_mc0 = (u16)value;
+
+ value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
+ value = G_EXT_MEM1_SIZE(value);
+ mem_info->size_mc1 = (u16)value;
+
+ value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
+ if (value & F_EXT_MEM0_ENABLE)
+ mem_info->mem_flag |= (1 << MC0_FLAG);
+ if (value & F_EXT_MEM1_ENABLE)
+ mem_info->mem_flag |= (1 << MC1_FLAG);
+ }
+
+ value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
+ value = G_EDRAM0_SIZE(value);
+ mem_info->size_edc0 = (u16)value;
+
+ value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
+ value = G_EDRAM1_SIZE(value);
+ mem_info->size_edc1 = (u16)value;
+
+ value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
+ if (value & F_EDRAM0_ENABLE)
+ mem_info->mem_flag |= (1 << EDC0_FLAG);
+ if (value & F_EDRAM1_ENABLE)
+ mem_info->mem_flag |= (1 << EDC1_FLAG);
+
+}
+
+static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ int rc;
+
+ if (is_fw_attached(pdbg_init)) {
+
+ /* Flush uP dcache before reading edcX/mcX */
+ rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
+
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s: t4_fwcache failed (%d)\n",
+ __func__, rc);
+ cudbg_err->sys_warn = rc;
+ }
+ }
+}
+
+static int collect_edc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct card_mem mem_info = {0};
+ unsigned long edc0_size;
+ int rc;
+
+ cudbg_t4_fwcache(pdbg_init, cudbg_err);
+
+ collect_mem_info(pdbg_init, &mem_info);
+
+ if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
+ edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
+ rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
+ edc0_size, cudbg_err);
+ if (rc)
+ goto err;
+
+ } else {
+ rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
+ __func__, err_msg[-rc]);
+ goto err;
+
+ }
+err:
+ return rc;
+}
+
+static int collect_edc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct card_mem mem_info = {0};
+ unsigned long edc1_size;
+ int rc;
+
+ cudbg_t4_fwcache(pdbg_init, cudbg_err);
+
+ collect_mem_info(pdbg_init, &mem_info);
+
+ if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
+ edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
+ rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
+ edc1_size, cudbg_err);
+ if (rc)
+ goto err;
+ } else {
+ rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
+ __func__, err_msg[-rc]);
+ goto err;
+ }
+
+err:
+
+ return rc;
+}
+
+static int collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct card_mem mem_info = {0};
+ unsigned long mc0_size;
+ int rc;
+
+ cudbg_t4_fwcache(pdbg_init, cudbg_err);
+
+ collect_mem_info(pdbg_init, &mem_info);
+
+ if (mem_info.mem_flag & (1 << MC0_FLAG)) {
+ mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
+ rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
+ mc0_size, cudbg_err);
+ if (rc)
+ goto err;
+ } else {
+ rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
+ __func__, err_msg[-rc]);
+ goto err;
+ }
+
+err:
+ return rc;
+}
+
+static int collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct card_mem mem_info = {0};
+ unsigned long mc1_size;
+ int rc;
+
+ cudbg_t4_fwcache(pdbg_init, cudbg_err);
+
+ collect_mem_info(pdbg_init, &mem_info);
+
+ if (mem_info.mem_flag & (1 << MC1_FLAG)) {
+ mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
+ rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
+ mc1_size, cudbg_err);
+ if (rc)
+ goto err;
+ } else {
+ rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
+ __func__, err_msg[-rc]);
+ goto err;
+ }
+err:
+ return rc;
+}
+
+static int collect_reg_dump(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct cudbg_buffer tmp_scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ unsigned long bytes_read = 0;
+ unsigned long bytes_left;
+ u32 buf_size = 0, bytes = 0;
+ int rc = 0;
+
+ if (is_t4(padap))
+ buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
+ else if (is_t5(padap) || is_t6(padap))
+ buf_size = T5_REGMAP_SIZE;
+
+ scratch_buff.size = buf_size;
+
+ tmp_scratch_buff = scratch_buff;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ /* no return */
+ t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
+ bytes_left = scratch_buff.size;
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ while (bytes_left > 0) {
+ tmp_scratch_buff.data =
+ ((char *)scratch_buff.data) + bytes_read;
+ bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
+ tmp_scratch_buff.size = bytes;
+ compress_buff(&tmp_scratch_buff, dbg_buff);
+ bytes_left -= bytes;
+ bytes_read += bytes;
+ }
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_cctrl(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ u32 size;
+ int rc;
+
+ size = sizeof(u16) * NMTUS * NCCTRL_WIN;
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ t4_read_cong_tbl(padap, (void *)scratch_buff.data);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int check_busy_bit(struct adapter *padap)
+{
+ u32 val;
+ u32 busy = 1;
+ int i = 0;
+ int retry = 10;
+ int status = 0;
+
+ while (busy & (1 < retry)) {
+ val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
+ busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
+ i++;
+ }
+
+ if (busy)
+ status = -1;
+
+ return status;
+}
+
+static int cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
+{
+ int rc = 0;
+
+ /* write register address into the A_CIM_HOST_ACC_CTRL */
+ t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
+
+ /* Poll HOSTBUSY */
+ rc = check_busy_bit(padap);
+ if (rc)
+ goto err;
+
+ /* Read value from A_CIM_HOST_ACC_DATA */
+ *val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
+
+err:
+ return rc;
+}
+
+static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
+ struct ireg_field *up_cim_reg, u32 *buff)
+{
+ u32 i;
+ int rc = 0;
+
+ for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
+ rc = cim_ha_rreg(padap,
+ up_cim_reg->ireg_local_offset + (i * 4),
+ buff);
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("BUSY timeout reading"
+ "CIM_HOST_ACC_CTRL\n");
+ goto err;
+ }
+
+ buff++;
+ }
+
+err:
+ return rc;
+}
+
+static int collect_up_cim_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct ireg_buf *up_cim;
+ u32 size;
+ int i, rc, n;
+
+ n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n;
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ up_cim = (struct ireg_buf *)scratch_buff.data;
+
+ for (i = 0; i < n; i++) {
+ struct ireg_field *up_cim_reg = &up_cim->tp_pio;
+ u32 *buff = up_cim->outbuf;
+
+ if (is_t5(padap)) {
+ up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
+ up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
+ up_cim_reg->ireg_local_offset =
+ t5_up_cim_reg_array[i][2];
+ up_cim_reg->ireg_offset_range =
+ t5_up_cim_reg_array[i][3];
+ } else if (is_t6(padap)) {
+ up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
+ up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
+ up_cim_reg->ireg_local_offset =
+ t6_up_cim_reg_array[i][2];
+ up_cim_reg->ireg_offset_range =
+ t6_up_cim_reg_array[i][3];
+ }
+
+ rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
+
+ up_cim++;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_mbox_log(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+#ifdef notyet
+ struct cudbg_buffer scratch_buff;
+ struct cudbg_mbox_log *mboxlog = NULL;
+ struct mbox_cmd_log *log = NULL;
+ struct mbox_cmd *entry;
+ u64 flit;
+ u32 size;
+ unsigned int entry_idx;
+ int i, k, rc;
+ u16 mbox_cmds;
+
+ if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
+ log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
+ mboxlog_param.log;
+ mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
+ mboxlog_param.mbox_cmds;
+ } else {
+ if (pdbg_init->verbose)
+ pdbg_init->print("Mbox log is not requested\n");
+ return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
+ }
+
+ size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
+ scratch_buff.size = size;
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
+
+ for (k = 0; k < mbox_cmds; k++) {
+ entry_idx = log->cursor + k;
+ if (entry_idx >= log->size)
+ entry_idx -= log->size;
+ entry = mbox_cmd_log_entry(log, entry_idx);
+
+ /* skip over unused entries */
+ if (entry->timestamp == 0)
+ continue;
+
+ memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
+
+ for (i = 0; i < MBOX_LEN / 8; i++) {
+ flit = entry->cmd[i];
+ mboxlog->hi[i] = (u32)(flit >> 32);
+ mboxlog->lo[i] = (u32)flit;
+ }
+
+ mboxlog++;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+#endif
+ return (EDOOFUS);
+}
+
+static int collect_pbt_tables(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_pbt_tables *pbt = NULL;
+ u32 size;
+ u32 addr;
+ int i, rc;
+
+ size = sizeof(struct cudbg_pbt_tables);
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
+
+ /* PBT dynamic entries */
+ addr = CUDBG_CHAC_PBT_ADDR;
+ for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
+ rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("BUSY timeout reading"
+ "CIM_HOST_ACC_CTRL\n");
+ goto err1;
+ }
+ }
+
+ /* PBT static entries */
+
+ /* static entries start when bit 6 is set */
+ addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
+ for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
+ rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("BUSY timeout reading"
+ "CIM_HOST_ACC_CTRL\n");
+ goto err1;
+ }
+ }
+
+ /* LRF entries */
+ addr = CUDBG_CHAC_PBT_LRF;
+ for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
+ rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("BUSY timeout reading"
+ "CIM_HOST_ACC_CTRL\n");
+ goto err1;
+ }
+ }
+
+ /* PBT data entries */
+ addr = CUDBG_CHAC_PBT_DATA;
+ for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
+ rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
+ if (rc) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("BUSY timeout reading"
+ "CIM_HOST_ACC_CTRL\n");
+ goto err1;
+ }
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_pm_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct ireg_buf *ch_pm;
+ u32 size;
+ int i, rc, n;
+
+ n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n * 2;
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ ch_pm = (struct ireg_buf *)scratch_buff.data;
+
+ /*PM_RX*/
+ for (i = 0; i < n; i++) {
+ struct ireg_field *pm_pio = &ch_pm->tp_pio;
+ u32 *buff = ch_pm->outbuf;
+
+ pm_pio->ireg_addr = t5_pm_rx_array[i][0];
+ pm_pio->ireg_data = t5_pm_rx_array[i][1];
+ pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
+ pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
+
+ t4_read_indirect(padap,
+ pm_pio->ireg_addr,
+ pm_pio->ireg_data,
+ buff,
+ pm_pio->ireg_offset_range,
+ pm_pio->ireg_local_offset);
+
+ ch_pm++;
+ }
+
+ /*PM_Tx*/
+ n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
+ for (i = 0; i < n; i++) {
+ struct ireg_field *pm_pio = &ch_pm->tp_pio;
+ u32 *buff = ch_pm->outbuf;
+
+ pm_pio->ireg_addr = t5_pm_tx_array[i][0];
+ pm_pio->ireg_data = t5_pm_tx_array[i][1];
+ pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
+ pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
+
+ t4_read_indirect(padap,
+ pm_pio->ireg_addr,
+ pm_pio->ireg_data,
+ buff,
+ pm_pio->ireg_offset_range,
+ pm_pio->ireg_local_offset);
+
+ ch_pm++;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+
+}
+
+static int collect_tid(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct tid_info_region *tid;
+ struct tid_info_region_rev1 *tid1;
+ u32 para[7], val[7];
+ u32 mbox, pf;
+ int rc;
+
+ scratch_buff.size = sizeof(struct tid_info_region_rev1);
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+#define FW_PARAM_DEV_A(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+#define FW_PARAM_PFVF_A(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
+ V_FW_PARAMS_PARAM_Y(0) | \
+ V_FW_PARAMS_PARAM_Z(0))
+#define MAX_ATIDS_A 8192U
+
+ tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
+ tid = &(tid1->tid);
+ tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
+ tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
+ tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
+ sizeof(struct cudbg_ver_hdr);
+
+ if (is_t5(padap)) {
+ tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
+ tid1->tid_start = 0;
+ } else if (is_t6(padap)) {
+ tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
+ tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
+ }
+
+ tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
+
+ para[0] = FW_PARAM_PFVF_A(FILTER_START);
+ para[1] = FW_PARAM_PFVF_A(FILTER_END);
+ para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
+ para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
+ para[4] = FW_PARAM_DEV_A(NTID);
+ para[5] = FW_PARAM_PFVF_A(SERVER_START);
+ para[6] = FW_PARAM_PFVF_A(SERVER_END);
+
+ mbox = padap->mbox;
+ pf = padap->pf;
+ rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
+ if (rc < 0) {
+ if (rc == -FW_EPERM) {
+ /* It looks like we don't have permission to use
+ * padap->mbox.
+ *
+ * Try mbox 4. If it works, we'll continue to
+ * collect the rest of tid info from mbox 4.
+ * Else, quit trying to collect tid info.
+ */
+ mbox = 4;
+ pf = 4;
+ rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
+ if (rc < 0) {
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+ } else {
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+ }
+
+ tid->ftid_base = val[0];
+ tid->nftids = val[1] - val[0] + 1;
+ /*active filter region*/
+ if (val[2] != val[3]) {
+#ifdef notyet
+ tid->flags |= FW_OFLD_CONN;
+#endif
+ tid->aftid_base = val[2];
+ tid->aftid_end = val[3];
+ }
+ tid->ntids = val[4];
+ tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
+ tid->stid_base = val[5];
+ tid->nstids = val[6] - val[5] + 1;
+
+ if (chip_id(padap) >= CHELSIO_T6) {
+ para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
+ para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
+ rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
+ if (rc < 0) {
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ tid->hpftid_base = val[0];
+ tid->nhpftids = val[1] - val[0] + 1;
+ }
+
+ if (chip_id(padap) <= CHELSIO_T5) {
+ tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
+ tid->hash_base /= 4;
+ } else
+ tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
+
+ /*UO context range*/
+ para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
+ para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
+
+ rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
+ if (rc < 0) {
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ if (val[0] != val[1]) {
+ tid->uotid_base = val[0];
+ tid->nuotids = val[1] - val[0] + 1;
+ }
+ tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
+ tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
+
+#undef FW_PARAM_PFVF_A
+#undef FW_PARAM_DEV_A
+#undef MAX_ATIDS_A
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_tx_rate(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct tx_rate *tx_rate;
+ u32 size;
+ int rc;
+
+ size = sizeof(struct tx_rate);
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ tx_rate = (struct tx_rate *)scratch_buff.data;
+ t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
+ tx_rate->nchan = padap->chip_params->nchan;
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
+{
+ *mask = x | y;
+ y = (__force u64)cpu_to_be64(y);
+ memcpy(addr, (char *)&y + 2, ETH_ALEN);
+}
+
+static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
+{
+ if (is_t5(padap)) {
+ mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
+ A_MPS_VF_RPLCT_MAP3));
+ mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
+ A_MPS_VF_RPLCT_MAP2));
+ mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
+ A_MPS_VF_RPLCT_MAP1));
+ mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
+ A_MPS_VF_RPLCT_MAP0));
+ } else {
+ mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
+ A_MPS_VF_RPLCT_MAP7));
+ mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
+ A_MPS_VF_RPLCT_MAP6));
+ mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
+ A_MPS_VF_RPLCT_MAP5));
+ mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
+ A_MPS_VF_RPLCT_MAP4));
+ }
+ mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
+ mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
+ mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
+ mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
+}
+
+static int collect_mps_tcam(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_mps_tcam *tcam = NULL;
+ u32 size = 0, i, n, total_size = 0;
+ u32 ctl, data2;
+ u64 tcamy, tcamx, val;
+ int rc;
+
+ n = padap->chip_params->mps_tcam_size;
+ size = sizeof(struct cudbg_mps_tcam) * n;
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+ memset(scratch_buff.data, 0, size);
+
+ tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
+ for (i = 0; i < n; i++) {
+ if (chip_id(padap) >= CHELSIO_T6) {
+ /* CtlReqID - 1: use Host Driver Requester ID
+ * CtlCmdType - 0: Read, 1: Write
+ * CtlTcamSel - 0: TCAM0, 1: TCAM1
+ * CtlXYBitSel- 0: Y bit, 1: X bit
+ */
+
+ /* Read tcamy */
+ ctl = (V_CTLREQID(1) |
+ V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
+ if (i < 256)
+ ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
+ else
+ ctl |= V_CTLTCAMINDEX(i - 256) |
+ V_CTLTCAMSEL(1);
+
+ t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
+ tcamy = G_DMACH(val) << 32;
+ tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
+ tcam->lookup_type = G_DATALKPTYPE(data2);
+
+ /* 0 - Outer header, 1 - Inner header
+ * [71:48] bit locations are overloaded for
+ * outer vs. inner lookup types.
+ */
+
+ if (tcam->lookup_type &&
+ (tcam->lookup_type != M_DATALKPTYPE)) {
+ /* Inner header VNI */
+ tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
+ (G_DATAVIDH1(data2) << 16) |
+ G_VIDL(val);
+ tcam->dip_hit = data2 & F_DATADIPHIT;
+ } else {
+ tcam->vlan_vld = data2 & F_DATAVIDH2;
+ tcam->ivlan = G_VIDL(val);
+ }
+
+ tcam->port_num = G_DATAPORTNUM(data2);
+
+ /* Read tcamx. Change the control param */
+ ctl |= V_CTLXYBITSEL(1);
+ t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
+ tcamx = G_DMACH(val) << 32;
+ tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
+ if (tcam->lookup_type &&
+ (tcam->lookup_type != M_DATALKPTYPE)) {
+ /* Inner header VNI mask */
+ tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
+ (G_DATAVIDH1(data2) << 16) |
+ G_VIDL(val);
+ }
+ } else {
+ tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
+ tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
+ }
+
+ if (tcamx & tcamy)
+ continue;
+
+ tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
+ tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
+
+ if (is_t5(padap))
+ tcam->repli = (tcam->cls_lo & F_REPLICATE);
+ else if (is_t6(padap))
+ tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
+
+ if (tcam->repli) {
+ struct fw_ldst_cmd ldst_cmd;
+ struct fw_ldst_mps_rplc mps_rplc;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ htonl(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_LDST_CMD_ADDRSPACE(
+ FW_LDST_ADDRSPC_MPS));
+
+ ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+
+ ldst_cmd.u.mps.rplc.fid_idx =
+ htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
+ V_FW_LDST_CMD_IDX(i));
+
+ rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+
+ if (rc)
+ mps_rpl_backdoor(padap, &mps_rplc);
+ else
+ mps_rplc = ldst_cmd.u.mps.rplc;
+
+ tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
+ tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
+ tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
+ tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
+ if (padap->chip_params->mps_rplc_size >
+ CUDBG_MAX_RPLC_SIZE) {
+ tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
+ tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
+ tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
+ tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
+ }
+ }
+ cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
+
+ tcam->idx = i;
+ tcam->rplc_size = padap->chip_params->mps_rplc_size;
+
+ total_size += sizeof(struct cudbg_mps_tcam);
+
+ tcam++;
+ }
+
+ if (total_size == 0) {
+ rc = CUDBG_SYSTEM_ERROR;
+ goto err1;
+ }
+
+ scratch_buff.size = total_size;
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ scratch_buff.size = size;
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_pcie_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ u32 size, *value, j;
+ int i, rc, n;
+
+ size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
+ n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ value = (u32 *)scratch_buff.data;
+ for (i = 0; i < n; i++) {
+ for (j = t5_pcie_config_array[i][0];
+ j <= t5_pcie_config_array[i][1]; j += 4) {
+ *value++ = t4_hw_pci_read_cfg4(padap, j);
+ }
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
+ struct cudbg_tid_data *tid_data)
+{
+ int i, cmd_retry = 8;
+ struct adapter *padap = pdbg_init->adap;
+ u32 val;
+
+ /* Fill REQ_DATA regs with 0's */
+ for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
+ t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
+
+ /* Write DBIG command */
+ val = (0x4 << S_DBGICMD) | tid;
+ t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
+ tid_data->dbig_cmd = val;
+
+ val = 0;
+ val |= 1 << S_DBGICMDSTRT;
+ val |= 1; /* LE mode */
+ t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
+ tid_data->dbig_conf = val;
+
+ /* Poll the DBGICMDBUSY bit */
+ val = 1;
+ while (val) {
+ val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
+ val = (val >> S_DBGICMDBUSY) & 1;
+ cmd_retry--;
+ if (!cmd_retry) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s(): Timeout waiting for non-busy\n",
+ __func__);
+ return CUDBG_SYSTEM_ERROR;
+ }
+ }
+
+ /* Check RESP status */
+ val = 0;
+ val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
+ tid_data->dbig_rsp_stat = val;
+ if (!(val & 1)) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("%s(): DBGI command failed\n", __func__);
+ return CUDBG_SYSTEM_ERROR;
+ }
+
+ /* Read RESP data */
+ for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
+ tid_data->data[i] = t4_read_reg(padap,
+ A_LE_DB_DBGI_RSP_DATA +
+ (i << 2));
+
+ tid_data->tid = tid;
+
+ return 0;
+}
+
+static int collect_le_tcam(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_tcam tcam_region = {0};
+ struct cudbg_tid_data *tid_data = NULL;
+ u32 value, bytes = 0, bytes_left = 0;
+ u32 i;
+ int rc, size;
+
+ /* Get the LE regions */
+ value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
+ index */
+ tcam_region.tid_hash_base = value;
+
+ /* Get routing table index */
+ value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
+ tcam_region.routing_start = value;
+
+ /*Get clip table index */
+ value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
+ tcam_region.clip_start = value;
+
+ /* Get filter table index */
+ value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
+ tcam_region.filter_start = value;
+
+ /* Get server table index */
+ value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
+ tcam_region.server_start = value;
+
+ /* Check whether hash is enabled and calculate the max tids */
+ value = t4_read_reg(padap, A_LE_DB_CONFIG);
+ if ((value >> S_HASHEN) & 1) {
+ value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
+ if (chip_id(padap) > CHELSIO_T5)
+ tcam_region.max_tid = (value & 0xFFFFF) +
+ tcam_region.tid_hash_base;
+ else { /* for T5 */
+ value = G_HASHTIDSIZE(value);
+ value = 1 << value;
+ tcam_region.max_tid = value +
+ tcam_region.tid_hash_base;
+ }
+ } else /* hash not enabled */
+ tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
+
+ size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
+ size += sizeof(struct cudbg_tcam);
+ scratch_buff.size = size;
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err;
+
+ rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
+ if (rc)
+ goto err;
+
+ memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
+
+ tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
+ scratch_buff.data) + 1);
+ bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
+ bytes = sizeof(struct cudbg_tcam);
+
+ /* read all tid */
+ for (i = 0; i < tcam_region.max_tid; i++) {
+ if (bytes_left < sizeof(struct cudbg_tid_data)) {
+ scratch_buff.size = bytes;
+ rc = compress_buff(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+ scratch_buff.size = CUDBG_CHUNK_SIZE;
+ release_scratch_buff(&scratch_buff, dbg_buff);
+
+ /* new alloc */
+ rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
+ &scratch_buff);
+ if (rc)
+ goto err;
+
+ tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
+ bytes_left = CUDBG_CHUNK_SIZE;
+ bytes = 0;
+ }
+
+ rc = cudbg_read_tid(pdbg_init, i, tid_data);
+
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ goto err1;
+ }
+
+ tid_data++;
+ bytes_left -= sizeof(struct cudbg_tid_data);
+ bytes += sizeof(struct cudbg_tid_data);
+ }
+
+ if (bytes) {
+ scratch_buff.size = bytes;
+ rc = compress_buff(&scratch_buff, dbg_buff);
+ }
+
+err1:
+ scratch_buff.size = CUDBG_CHUNK_SIZE;
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_ma_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct ireg_buf *ma_indr = NULL;
+ u32 size, j;
+ int i, rc, n;
+
+ if (chip_id(padap) < CHELSIO_T6) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("MA indirect available only in T6\n");
+ rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
+ goto err;
+ }
+
+ n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n * 2;
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ ma_indr = (struct ireg_buf *)scratch_buff.data;
+
+ for (i = 0; i < n; i++) {
+ struct ireg_field *ma_fli = &ma_indr->tp_pio;
+ u32 *buff = ma_indr->outbuf;
+
+ ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
+ ma_fli->ireg_data = t6_ma_ireg_array[i][1];
+ ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
+ ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
+
+ t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
+ buff, ma_fli->ireg_offset_range,
+ ma_fli->ireg_local_offset);
+
+ ma_indr++;
+
+ }
+
+ n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
+
+ for (i = 0; i < n; i++) {
+ struct ireg_field *ma_fli = &ma_indr->tp_pio;
+ u32 *buff = ma_indr->outbuf;
+
+ ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
+ ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
+ ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
+
+ for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
+ t4_read_indirect(padap, ma_fli->ireg_addr,
+ ma_fli->ireg_data, buff, 1,
+ ma_fli->ireg_local_offset);
+ buff++;
+ ma_fli->ireg_local_offset += 0x20;
+ }
+ ma_indr++;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_hma_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct ireg_buf *hma_indr = NULL;
+ u32 size;
+ int i, rc, n;
+
+ if (chip_id(padap) < CHELSIO_T6) {
+ if (pdbg_init->verbose)
+ pdbg_init->print("HMA indirect available only in T6\n");
+ rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
+ goto err;
+ }
+
+ n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n;
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ hma_indr = (struct ireg_buf *)scratch_buff.data;
+
+ for (i = 0; i < n; i++) {
+ struct ireg_field *hma_fli = &hma_indr->tp_pio;
+ u32 *buff = hma_indr->outbuf;
+
+ hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
+ hma_fli->ireg_data = t6_hma_ireg_array[i][1];
+ hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
+ hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
+
+ t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
+ buff, hma_fli->ireg_offset_range,
+ hma_fli->ireg_local_offset);
+
+ hma_indr++;
+
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_pcie_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct ireg_buf *ch_pcie;
+ u32 size;
+ int i, rc, n;
+
+ n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n * 2;
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ ch_pcie = (struct ireg_buf *)scratch_buff.data;
+
+ /*PCIE_PDBG*/
+ for (i = 0; i < n; i++) {
+ struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
+ u32 *buff = ch_pcie->outbuf;
+
+ pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
+ pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
+ pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
+ pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
+
+ t4_read_indirect(padap,
+ pcie_pio->ireg_addr,
+ pcie_pio->ireg_data,
+ buff,
+ pcie_pio->ireg_offset_range,
+ pcie_pio->ireg_local_offset);
+
+ ch_pcie++;
+ }
+
+ /*PCIE_CDBG*/
+ n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
+ for (i = 0; i < n; i++) {
+ struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
+ u32 *buff = ch_pcie->outbuf;
+
+ pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
+ pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
+ pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
+ pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
+
+ t4_read_indirect(padap,
+ pcie_pio->ireg_addr,
+ pcie_pio->ireg_data,
+ buff,
+ pcie_pio->ireg_offset_range,
+ pcie_pio->ireg_local_offset);
+
+ ch_pcie++;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+
+}
+
+static int collect_tp_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct ireg_buf *ch_tp_pio;
+ u32 size;
+ int i, rc, n = 0;
+
+ if (is_t5(padap))
+ n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
+ else if (is_t6(padap))
+ n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
+
+ size = sizeof(struct ireg_buf) * n * 3;
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
+
+ /* TP_PIO*/
+ for (i = 0; i < n; i++) {
+ struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
+ u32 *buff = ch_tp_pio->outbuf;
+
+ if (is_t5(padap)) {
+ tp_pio->ireg_addr = t5_tp_pio_array[i][0];
+ tp_pio->ireg_data = t5_tp_pio_array[i][1];
+ tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
+ tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
+ } else if (is_t6(padap)) {
+ tp_pio->ireg_addr = t6_tp_pio_array[i][0];
+ tp_pio->ireg_data = t6_tp_pio_array[i][1];
+ tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
+ tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
+ }
+
+ t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
+ tp_pio->ireg_local_offset, true);
+
+ ch_tp_pio++;
+ }
+
+ /* TP_TM_PIO*/
+ if (is_t5(padap))
+ n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
+ else if (is_t6(padap))
+ n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
+
+ for (i = 0; i < n; i++) {
+ struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
+ u32 *buff = ch_tp_pio->outbuf;
+
+ if (is_t5(padap)) {
+ tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
+ tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
+ tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
+ tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
+ } else if (is_t6(padap)) {
+ tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
+ tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
+ tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
+ tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
+ }
+
+ t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
+ tp_pio->ireg_local_offset, true);
+
+ ch_tp_pio++;
+ }
+
+ /* TP_MIB_INDEX*/
+ if (is_t5(padap))
+ n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
+ else if (is_t6(padap))
+ n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
+
+ for (i = 0; i < n ; i++) {
+ struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
+ u32 *buff = ch_tp_pio->outbuf;
+
+ if (is_t5(padap)) {
+ tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
+ tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
+ tp_pio->ireg_local_offset =
+ t5_tp_mib_index_array[i][2];
+ tp_pio->ireg_offset_range =
+ t5_tp_mib_index_array[i][3];
+ } else if (is_t6(padap)) {
+ tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
+ tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
+ tp_pio->ireg_local_offset =
+ t6_tp_mib_index_array[i][2];
+ tp_pio->ireg_offset_range =
+ t6_tp_mib_index_array[i][3];
+ }
+
+ t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
+ tp_pio->ireg_local_offset, true);
+
+ ch_tp_pio++;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_sge_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct ireg_buf *ch_sge_dbg;
+ u32 size;
+ int i, rc;
+
+ size = sizeof(struct ireg_buf) * 2;
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
+
+ for (i = 0; i < 2; i++) {
+ struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
+ u32 *buff = ch_sge_dbg->outbuf;
+
+ sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
+ sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
+ sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
+ sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
+
+ t4_read_indirect(padap,
+ sge_pio->ireg_addr,
+ sge_pio->ireg_data,
+ buff,
+ sge_pio->ireg_offset_range,
+ sge_pio->ireg_local_offset);
+
+ ch_sge_dbg++;
+ }
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_full(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
+ u32 *sp;
+ int rc;
+ int nreg = 0;
+
+ /* Collect Registers:
+ * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
+ * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
+ * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
+ * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
+ * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
+ * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3) This is for T6
+ * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
+ **/
+
+ if (is_t5(padap))
+ nreg = 6;
+ else if (is_t6(padap))
+ nreg = 7;
+
+ scratch_buff.size = nreg * sizeof(u32);
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ sp = (u32 *)scratch_buff.data;
+
+ /* TP_DBG_SCHED_TX */
+ reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
+ reg_offset_range = 1;
+
+ t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
+
+ sp++;
+
+ /* TP_DBG_SCHED_RX */
+ reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
+ reg_offset_range = 1;
+
+ t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
+
+ sp++;
+
+ /* TP_DBG_CSIDE_INT */
+ reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
+ reg_offset_range = 1;
+
+ t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
+
+ sp++;
+
+ /* TP_DBG_ESIDE_INT */
+ reg_local_offset = t5_tp_pio_array[8][2] + 3;
+ reg_offset_range = 1;
+
+ t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
+
+ sp++;
+
+ /* PCIE_CDEBUG_INDEX[AppData0] */
+ reg_addr = t5_pcie_cdbg_array[0][0];
+ reg_data = t5_pcie_cdbg_array[0][1];
+ reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
+ reg_offset_range = 1;
+
+ t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
+ reg_local_offset);
+
+ sp++;
+
+ if (is_t6(padap)) {
+ /* PCIE_CDEBUG_INDEX[AppData1] */
+ reg_addr = t5_pcie_cdbg_array[0][0];
+ reg_data = t5_pcie_cdbg_array[0][1];
+ reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
+ reg_offset_range = 1;
+
+ t4_read_indirect(padap, reg_addr, reg_data, sp,
+ reg_offset_range, reg_local_offset);
+
+ sp++;
+ }
+
+ /* SGE_DEBUG_DATA_HIGH_INDEX_10 */
+ *sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int collect_vpd_data(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+#ifdef notyet
+ struct cudbg_buffer scratch_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct struct_vpd_data *vpd_data;
+ char vpd_ver[4];
+ u32 fw_vers;
+ u32 size;
+ int rc;
+
+ size = sizeof(struct struct_vpd_data);
+ scratch_buff.size = size;
+
+ rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
+ if (rc)
+ goto err;
+
+ vpd_data = (struct struct_vpd_data *)scratch_buff.data;
+
+ if (is_t5(padap)) {
+ read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
+ read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
+ read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
+ read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
+ } else if (is_t6(padap)) {
+ read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
+ read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
+ read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
+ read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
+ }
+
+ if (is_fw_attached(pdbg_init)) {
+ rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
+ } else {
+ rc = 1;
+ }
+
+ if (rc) {
+ /* Now trying with backdoor mechanism */
+ rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
+ (u8 *)&vpd_data->scfg_vers);
+ if (rc)
+ goto err1;
+ }
+
+ if (is_fw_attached(pdbg_init)) {
+ rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
+ } else {
+ rc = 1;
+ }
+
+ if (rc) {
+ /* Now trying with backdoor mechanism */
+ rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
+ (u8 *)vpd_ver);
+ if (rc)
+ goto err1;
+ /* read_vpd_reg return string of stored hex
+ * converting hex string to char string
+ * vpd version is 2 bytes only */
+ sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
+ vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
+ }
+
+ /* Get FW version if it's not already filled in */
+ fw_vers = padap->params.fw_vers;
+ if (!fw_vers) {
+ rc = t4_get_fw_version(padap, &fw_vers);
+ if (rc)
+ goto err1;
+ }
+
+ vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
+ vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
+ vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
+ vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+#endif
+ return (EDOOFUS);
+}
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib.h b/sys/dev/cxgbe/cudbg/cudbg_lib.h
new file mode 100644
index 000000000000..6a67b9c3924a
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib.h
@@ -0,0 +1,255 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __CUDBG_LIB_H__
+#define __CUDBG_LIB_H__
+
+#ifndef min_t
+#define min_t(type, _a, _b) (((type)(_a) < (type)(_b)) ? (type)(_a) : (type)(_b))
+#endif
+
+static int collect_reg_dump(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_fw_devlog(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_qcfg(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_la(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_ma_la(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_obq_ulp0(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_obq_ulp1(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_obq_ulp2(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_obq_ulp3(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_obq_sge(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_obq_ncsi(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_ibq_tp0(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_ibq_tp1(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_ibq_ulp(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_ibq_sge0(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_ibq_sge1(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_ibq_ncsi(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_edc0_meminfo(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_edc1_meminfo(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_mc0_meminfo(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_mc1_meminfo(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_rss(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_rss_key(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_rss_pf_config(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_rss_vf_config(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_rss_config(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_path_mtu(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_sw_state(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+int collect_wtp_data(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_pm_stats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_hw_sched(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_tcp_stats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_tp_err_stats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_fcoe_stats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_rdma_stats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_tp_indirect(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_sge_indirect(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cpl_stats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_ddp_stats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_wc_stats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_ulprx_la(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_lb_stats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_tp_la(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_meminfo(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cim_pif_la(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_clk_info(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_obq_sge_rx_q0(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_obq_sge_rx_q1(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_macstats(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_pcie_indirect(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_pm_indirect(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_full(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_tx_rate(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_tid(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_pcie_config(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_dump_context(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_mps_tcam(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_vpd_data(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_le_tcam(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_cctrl(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_ma_indirect(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_ulptx_la(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_up_cim_indirect(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_pbt_tables(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_mbox_log(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+static int collect_hma_indirect(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *);
+
+static int (*process_entity[])
+ (struct cudbg_init *, struct cudbg_buffer *, struct cudbg_error *) = {
+ collect_reg_dump,
+ collect_fw_devlog,
+ collect_cim_la, /*3*/
+ collect_cim_ma_la,
+ collect_cim_qcfg,
+ collect_cim_ibq_tp0,
+ collect_cim_ibq_tp1,
+ collect_cim_ibq_ulp,
+ collect_cim_ibq_sge0,
+ collect_cim_ibq_sge1,
+ collect_cim_ibq_ncsi,
+ collect_cim_obq_ulp0,
+ collect_cim_obq_ulp1, /*13*/
+ collect_cim_obq_ulp2,
+ collect_cim_obq_ulp3,
+ collect_cim_obq_sge,
+ collect_cim_obq_ncsi,
+ collect_edc0_meminfo,
+ collect_edc1_meminfo,
+ collect_mc0_meminfo,
+ collect_mc1_meminfo,
+ collect_rss, /*22*/
+ collect_rss_pf_config,
+ collect_rss_key,
+ collect_rss_vf_config,
+ collect_rss_config, /*26*/
+ collect_path_mtu, /*27*/
+ collect_sw_state,
+ collect_wtp_data,
+ collect_pm_stats,
+ collect_hw_sched,
+ collect_tcp_stats,
+ collect_tp_err_stats,
+ collect_fcoe_stats,
+ collect_rdma_stats,
+ collect_tp_indirect,
+ collect_sge_indirect,
+ collect_cpl_stats,
+ collect_ddp_stats,
+ collect_wc_stats,
+ collect_ulprx_la,
+ collect_lb_stats,
+ collect_tp_la,
+ collect_meminfo,
+ collect_cim_pif_la,
+ collect_clk_info,
+ collect_obq_sge_rx_q0,
+ collect_obq_sge_rx_q1,
+ collect_macstats,
+ collect_pcie_indirect,
+ collect_pm_indirect,
+ collect_full,
+ collect_tx_rate,
+ collect_tid,
+ collect_pcie_config,
+ collect_dump_context,
+ collect_mps_tcam,
+ collect_vpd_data,
+ collect_le_tcam,
+ collect_cctrl,
+ collect_ma_indirect,
+ collect_ulptx_la,
+ NULL, /* ext entity */
+ collect_up_cim_indirect,
+ collect_pbt_tables,
+ collect_mbox_log,
+ collect_hma_indirect,
+ };
+
+struct large_entity {
+ int entity_code;
+ int skip_flag;
+ int priority; /* 1 is high priority */
+};
+
+static int read_cim_ibq(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error * , int);
+static int read_cim_obq(struct cudbg_init *, struct cudbg_buffer *,
+ struct cudbg_error *, int);
+int get_entity_hdr(void *outbuf, int i, u32 size, struct cudbg_entity_hdr **);
+void skip_entity(int entity_code);
+void reset_skip_entity(void);
+int is_large_entity(int entity_code);
+#endif
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
new file mode 100644
index 000000000000..16fbe291e37e
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
@@ -0,0 +1,174 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __CUDBG_LIB_COMMON_H__
+#define __CUDBG_LIB_COMMON_H__
+
+/* Extended entity
+ *
+ * Layout of the cudbg dump file when extended entity is present.
+ *
+ *
+ * ----------------
+ * | Global header |
+ * |---------------|
+ * |entity headers |
+ * |---------------|
+ * | Entity data |
+ * | * |
+ * | * |
+ * | * |
+ * |---------------|
+ * |extended entity|
+ * | header |
+ * |---------------|
+ * |extended entity|
+ * | data |
+ * -----------------
+ *
+ *
+ * Extended entity: This comes into picture only when cudbg_collect() is called
+ * multiple times.
+ */
+
+#ifndef CUDBG_LITE
+#include "common/t4_hw.h"
+#endif
+
+#define CUDBG_SF_MAX_SECTOR (FLASH_CUDBG_START_SEC + FLASH_CUDBG_NSECS)
+#define CUDBG_SF_SECTOR_SIZE SF_SEC_SIZE
+#define CUDBG_START_SEC FLASH_CUDBG_START_SEC
+#define CUDBG_FLASH_SIZE FLASH_CUDBG_MAX_SIZE
+
+#define CUDBG_EXT_DATA_BIT 0
+#define CUDBG_EXT_DATA_VALID (1 << CUDBG_EXT_DATA_BIT)
+
+struct cudbg_hdr {
+ u32 signature;
+ u32 hdr_len;
+ u16 major_ver;
+ u16 minor_ver;
+ u32 data_len;
+ u32 hdr_flags;
+ u16 max_entities;
+ u8 chip_ver;
+ u8 reserved1;
+ u32 reserved[8];
+};
+
+struct cudbg_entity_hdr {
+ u32 entity_type;
+ u32 start_offset;
+ u32 size;
+ int hdr_flags;
+ u32 sys_warn;
+ u32 sys_err;
+ u8 num_pad;
+ u8 flag; /* bit 0 is used to indicate ext data */
+ u8 reserved1[2];
+ u32 next_ext_offset; /* pointer to next extended entity meta data */
+ u32 reserved[5];
+};
+
+struct cudbg_ver_hdr {
+ u32 signature;
+ u16 revision;
+ u16 size;
+};
+
+struct cudbg_buffer {
+ u32 size;
+ u32 offset;
+ char *data;
+};
+
+struct cudbg_error {
+ int sys_err;
+ int sys_warn;
+ int app_err;
+};
+
+struct cudbg_flash_sec_info {
+ int par_sec; /* Represent partially filled sector no */
+ int par_sec_offset; /* Offset in partially filled sector */
+ int cur_seq_no;
+ u32 max_seq_no;
+ u32 max_seq_sec;
+ u32 hdr_data_len; /* Total data */
+ u32 skip_size; /* Total size of large entities. */
+ u64 max_timestamp;
+ char sec_data[CUDBG_SF_SECTOR_SIZE];
+ u8 sec_bitmap[8];
+};
+
+struct cudbg_private {
+ struct cudbg_init dbg_init;
+ struct cudbg_flash_sec_info sec_info;
+};
+
+#define HTONL_NIBBLE(data) ( \
+ (((uint32_t)(data) >> 28) & 0x0000000F) | \
+ (((uint32_t)(data) >> 20) & 0x000000F0) | \
+ (((uint32_t)(data) >> 12) & 0x00000F00) | \
+ (((uint32_t)(data) >> 4) & 0x0000F000) | \
+ (((uint32_t)(data) << 4) & 0x000F0000) | \
+ (((uint32_t)(data) << 12) & 0x00F00000) | \
+ (((uint32_t)(data) << 20) & 0x0F000000) | \
+ (((uint32_t)(data) << 28) & 0xF0000000))
+
+#define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1)
+#define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE/1024) * 1024)
+
+#define CUDBG_LEGACY_SIGNATURE 123
+#define CUDBG_SIGNATURE 67856866 /* CUDB in ascii */
+#define CUDBG_FL_SIGNATURE 0x4355464c /* CUFL in ascii */
+
+#define CUDBG_FL_MAJOR_VERSION 1
+#define CUDBG_FL_MINOR_VERSION 1
+#define CUDBG_FL_BUILD_VERSION 0
+
+void update_skip_size(struct cudbg_flash_sec_info *, u32);
+int write_compression_hdr(struct cudbg_buffer *, struct cudbg_buffer *);
+int compress_buff(struct cudbg_buffer *, struct cudbg_buffer *);
+int get_scratch_buff(struct cudbg_buffer *, u32, struct cudbg_buffer *);
+void release_scratch_buff(struct cudbg_buffer *, struct cudbg_buffer *);
+int decompress_buffer(struct cudbg_buffer *, struct cudbg_buffer *);
+int validate_buffer(struct cudbg_buffer *compressed_buffer);
+int decompress_buffer_wrapper(struct cudbg_buffer *pc_buff,
+ struct cudbg_buffer *pdc_buff);
+int get_entity_rev(struct cudbg_ver_hdr *ver_hdr);
+void sort_t(void *base, int num, int size,
+ int (*cmp_func)(const void *, const void *),
+ void (*swap_func)(void *, void *, int size));
+int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag);
+int cudbg_write_flash(void *handle, u64 timestamp, void *data,
+ u32 start_offset, u32 start_hdr_offset,
+ u32 cur_entity_size,
+ u32 ext_size);
+#endif
diff --git a/sys/dev/cxgbe/cudbg/cudbg_wtp.c b/sys/dev/cxgbe/cudbg/cudbg_wtp.c
new file mode 100644
index 000000000000..a72534d987e6
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/cudbg_wtp.c
@@ -0,0 +1,1310 @@
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/param.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "cudbg.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
+
+int collect_wtp_data(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+/*SGE_DEBUG Registers.*/
+#define TP_MIB_SIZE 0x5e
+
+struct sge_debug_reg_data {
+ /*indx0*/
+ u32 reserved1:4;
+ u32 reserved2:4;
+ u32 debug_uP_SOP_cnt:4;
+ u32 debug_uP_EOP_cnt:4;
+ u32 debug_CIM_SOP1_cnt:4;
+ u32 debug_CIM_EOP1_cnt:4;
+ u32 debug_CIM_SOP0_cnt:4;
+ u32 debug_CIM_EOP0_cnt:4;
+
+ /*indx1*/
+ u32 reserved3:32;
+
+ /*indx2*/
+ u32 debug_T_Rx_SOP1_cnt:4;
+ u32 debug_T_Rx_EOP1_cnt:4;
+ u32 debug_T_Rx_SOP0_cnt:4;
+ u32 debug_T_Rx_EOP0_cnt:4;
+ u32 debug_U_Rx_SOP1_cnt:4;
+ u32 debug_U_Rx_EOP1_cnt:4;
+ u32 debug_U_Rx_SOP0_cnt:4;
+ u32 debug_U_Rx_EOP0_cnt:4;
+
+ /*indx3*/
+ u32 reserved4:32;
+
+ /*indx4*/
+ u32 debug_UD_Rx_SOP3_cnt:4;
+ u32 debug_UD_Rx_EOP3_cnt:4;
+ u32 debug_UD_Rx_SOP2_cnt:4;
+ u32 debug_UD_Rx_EOP2_cnt:4;
+ u32 debug_UD_Rx_SOP1_cnt:4;
+ u32 debug_UD_Rx_EOP1_cnt:4;
+ u32 debug_UD_Rx_SOP0_cnt:4;
+ u32 debug_UD_Rx_EOP0_cnt:4;
+
+ /*indx5*/
+ u32 reserved5:32;
+
+ /*indx6*/
+ u32 debug_U_Tx_SOP3_cnt:4;
+ u32 debug_U_Tx_EOP3_cnt:4;
+ u32 debug_U_Tx_SOP2_cnt:4;
+ u32 debug_U_Tx_EOP2_cnt:4;
+ u32 debug_U_Tx_SOP1_cnt:4;
+ u32 debug_U_Tx_EOP1_cnt:4;
+ u32 debug_U_Tx_SOP0_cnt:4;
+ u32 debug_U_Tx_EOP0_cnt:4;
+
+ /*indx7*/
+ u32 reserved6:32;
+
+ /*indx8*/
+ u32 debug_PC_Rsp_SOP1_cnt:4;
+ u32 debug_PC_Rsp_EOP1_cnt:4;
+ u32 debug_PC_Rsp_SOP0_cnt:4;
+ u32 debug_PC_Rsp_EOP0_cnt:4;
+ u32 debug_PC_Req_SOP1_cnt:4;
+ u32 debug_PC_Req_EOP1_cnt:4;
+ u32 debug_PC_Req_SOP0_cnt:4;
+ u32 debug_PC_Req_EOP0_cnt:4;
+
+ /*indx9*/
+ u32 reserved7:32;
+
+ /*indx10*/
+ u32 debug_PD_Req_SOP3_cnt:4;
+ u32 debug_PD_Req_EOP3_cnt:4;
+ u32 debug_PD_Req_SOP2_cnt:4;
+ u32 debug_PD_Req_EOP2_cnt:4;
+ u32 debug_PD_Req_SOP1_cnt:4;
+ u32 debug_PD_Req_EOP1_cnt:4;
+ u32 debug_PD_Req_SOP0_cnt:4;
+ u32 debug_PD_Req_EOP0_cnt:4;
+
+ /*indx11*/
+ u32 reserved8:32;
+
+ /*indx12*/
+ u32 debug_PD_Rsp_SOP3_cnt:4;
+ u32 debug_PD_Rsp_EOP3_cnt:4;
+ u32 debug_PD_Rsp_SOP2_cnt:4;
+ u32 debug_PD_Rsp_EOP2_cnt:4;
+ u32 debug_PD_Rsp_SOP1_cnt:4;
+ u32 debug_PD_Rsp_EOP1_cnt:4;
+ u32 debug_PD_Rsp_SOP0_cnt:4;
+ u32 debug_PD_Rsp_EOP0_cnt:4;
+
+ /*indx13*/
+ u32 reserved9:32;
+
+ /*indx14*/
+ u32 debug_CPLSW_TP_Rx_SOP1_cnt:4;
+ u32 debug_CPLSW_TP_Rx_EOP1_cnt:4;
+ u32 debug_CPLSW_TP_Rx_SOP0_cnt:4;
+ u32 debug_CPLSW_TP_Rx_EOP0_cnt:4;
+ u32 debug_CPLSW_CIM_SOP1_cnt:4;
+ u32 debug_CPLSW_CIM_EOP1_cnt:4;
+ u32 debug_CPLSW_CIM_SOP0_cnt:4;
+ u32 debug_CPLSW_CIM_EOP0_cnt:4;
+
+ /*indx15*/
+ u32 reserved10:32;
+
+ /*indx16*/
+ u32 debug_PD_Req_Rd3_cnt:4;
+ u32 debug_PD_Req_Rd2_cnt:4;
+ u32 debug_PD_Req_Rd1_cnt:4;
+ u32 debug_PD_Req_Rd0_cnt:4;
+ u32 debug_PD_Req_Int3_cnt:4;
+ u32 debug_PD_Req_Int2_cnt:4;
+ u32 debug_PD_Req_Int1_cnt:4;
+ u32 debug_PD_Req_Int0_cnt:4;
+
+};
+
+struct tp_mib_type tp_mib[] = {
+ {"tp_mib_mac_in_err_0", 0x0},
+ {"tp_mib_mac_in_err_1", 0x1},
+ {"tp_mib_mac_in_err_2", 0x2},
+ {"tp_mib_mac_in_err_3", 0x3},
+ {"tp_mib_hdr_in_err_0", 0x4},
+ {"tp_mib_hdr_in_err_1", 0x5},
+ {"tp_mib_hdr_in_err_2", 0x6},
+ {"tp_mib_hdr_in_err_3", 0x7},
+ {"tp_mib_tcp_in_err_0", 0x8},
+ {"tp_mib_tcp_in_err_1", 0x9},
+ {"tp_mib_tcp_in_err_2", 0xa},
+ {"tp_mib_tcp_in_err_3", 0xb},
+ {"tp_mib_tcp_out_rst", 0xc},
+ {"tp_mib_tcp_in_seg_hi", 0x10},
+ {"tp_mib_tcp_in_seg_lo", 0x11},
+ {"tp_mib_tcp_out_seg_hi", 0x12},
+ {"tp_mib_tcp_out_seg_lo", 0x13},
+ {"tp_mib_tcp_rxt_seg_hi", 0x14},
+ {"tp_mib_tcp_rxt_seg_lo", 0x15},
+ {"tp_mib_tnl_cng_drop_0", 0x18},
+ {"tp_mib_tnl_cng_drop_1", 0x19},
+ {"tp_mib_tnl_cng_drop_2", 0x1a},
+ {"tp_mib_tnl_cng_drop_3", 0x1b},
+ {"tp_mib_ofd_chn_drop_0", 0x1c},
+ {"tp_mib_ofd_chn_drop_1", 0x1d},
+ {"tp_mib_ofd_chn_drop_2", 0x1e},
+ {"tp_mib_ofd_chn_drop_3", 0x1f},
+ {"tp_mib_tnl_out_pkt_0", 0x20},
+ {"tp_mib_tnl_out_pkt_1", 0x21},
+ {"tp_mib_tnl_out_pkt_2", 0x22},
+ {"tp_mib_tnl_out_pkt_3", 0x23},
+ {"tp_mib_tnl_in_pkt_0", 0x24},
+ {"tp_mib_tnl_in_pkt_1", 0x25},
+ {"tp_mib_tnl_in_pkt_2", 0x26},
+ {"tp_mib_tnl_in_pkt_3", 0x27},
+ {"tp_mib_tcp_v6in_err_0", 0x28},
+ {"tp_mib_tcp_v6in_err_1", 0x29},
+ {"tp_mib_tcp_v6in_err_2", 0x2a},
+ {"tp_mib_tcp_v6in_err_3", 0x2b},
+ {"tp_mib_tcp_v6out_rst", 0x2c},
+ {"tp_mib_tcp_v6in_seg_hi", 0x30},
+ {"tp_mib_tcp_v6in_seg_lo", 0x31},
+ {"tp_mib_tcp_v6out_seg_hi", 0x32},
+ {"tp_mib_tcp_v6out_seg_lo", 0x33},
+ {"tp_mib_tcp_v6rxt_seg_hi", 0x34},
+ {"tp_mib_tcp_v6rxt_seg_lo", 0x35},
+ {"tp_mib_ofd_arp_drop", 0x36},
+ {"tp_mib_ofd_dfr_drop", 0x37},
+ {"tp_mib_cpl_in_req_0", 0x38},
+ {"tp_mib_cpl_in_req_1", 0x39},
+ {"tp_mib_cpl_in_req_2", 0x3a},
+ {"tp_mib_cpl_in_req_3", 0x3b},
+ {"tp_mib_cpl_out_rsp_0", 0x3c},
+ {"tp_mib_cpl_out_rsp_1", 0x3d},
+ {"tp_mib_cpl_out_rsp_2", 0x3e},
+ {"tp_mib_cpl_out_rsp_3", 0x3f},
+ {"tp_mib_tnl_lpbk_0", 0x40},
+ {"tp_mib_tnl_lpbk_1", 0x41},
+ {"tp_mib_tnl_lpbk_2", 0x42},
+ {"tp_mib_tnl_lpbk_3", 0x43},
+ {"tp_mib_tnl_drop_0", 0x44},
+ {"tp_mib_tnl_drop_1", 0x45},
+ {"tp_mib_tnl_drop_2", 0x46},
+ {"tp_mib_tnl_drop_3", 0x47},
+ {"tp_mib_fcoe_ddp_0", 0x48},
+ {"tp_mib_fcoe_ddp_1", 0x49},
+ {"tp_mib_fcoe_ddp_2", 0x4a},
+ {"tp_mib_fcoe_ddp_3", 0x4b},
+ {"tp_mib_fcoe_drop_0", 0x4c},
+ {"tp_mib_fcoe_drop_1", 0x4d},
+ {"tp_mib_fcoe_drop_2", 0x4e},
+ {"tp_mib_fcoe_drop_3", 0x4f},
+ {"tp_mib_fcoe_byte_0_hi", 0x50},
+ {"tp_mib_fcoe_byte_0_lo", 0x51},
+ {"tp_mib_fcoe_byte_1_hi", 0x52},
+ {"tp_mib_fcoe_byte_1_lo", 0x53},
+ {"tp_mib_fcoe_byte_2_hi", 0x54},
+ {"tp_mib_fcoe_byte_2_lo", 0x55},
+ {"tp_mib_fcoe_byte_3_hi", 0x56},
+ {"tp_mib_fcoe_byte_3_lo", 0x57},
+ {"tp_mib_ofd_vln_drop_0", 0x58},
+ {"tp_mib_ofd_vln_drop_1", 0x59},
+ {"tp_mib_ofd_vln_drop_2", 0x5a},
+ {"tp_mib_ofd_vln_drop_3", 0x5b},
+ {"tp_mib_usm_pkts", 0x5c},
+ {"tp_mib_usm_drop", 0x5d},
+ {"tp_mib_usm_bytes_hi", 0x5e},
+ {"tp_mib_usm_bytes_lo", 0x5f},
+ {"tp_mib_tid_del", 0x60},
+ {"tp_mib_tid_inv", 0x61},
+ {"tp_mib_tid_act", 0x62},
+ {"tp_mib_tid_pas", 0x63},
+ {"tp_mib_rqe_dfr_mod", 0x64},
+ {"tp_mib_rqe_dfr_pkt", 0x65}
+};
+
+static u32 read_sge_debug_data(struct cudbg_init *pdbg_init, u32 *sge_dbg_reg)
+{
+ struct adapter *padap = pdbg_init->adap;
+ u32 value;
+ int i = 0;
+
+ for (i = 0; i <= 15; i++) {
+ t4_write_reg(padap, A_SGE_DEBUG_INDEX, (u32)i);
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_LOW);
+ /*printf("LOW 0x%08x\n", value);*/
+ sge_dbg_reg[(i << 1) | 1] = HTONL_NIBBLE(value);
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH);
+ /*printf("HIGH 0x%08x\n", value);*/
+ sge_dbg_reg[(i << 1)] = HTONL_NIBBLE(value);
+ }
+ return 0;
+}
+
+static u32 read_tp_mib_data(struct cudbg_init *pdbg_init,
+ struct tp_mib_data **ppTp_Mib)
+{
+ struct adapter *padap = pdbg_init->adap;
+ u32 i = 0;
+
+ for (i = 0; i < TP_MIB_SIZE; i++) {
+ t4_tp_mib_read(padap, &tp_mib[i].value, 1,
+ (u32)tp_mib[i].addr, true);
+ }
+ *ppTp_Mib = (struct tp_mib_data *)&tp_mib[0];
+
+ return 0;
+}
+
+static int t5_wtp_data(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct sge_debug_reg_data *sge_dbg_reg = NULL;
+ struct cudbg_buffer scratch_buff;
+ struct tp_mib_data *ptp_mib = NULL;
+ struct wtp_data *wtp;
+ u32 Sge_Dbg[32] = {0};
+ u32 value = 0;
+ u32 i = 0;
+ u32 drop = 0;
+ u32 err = 0;
+ u32 offset;
+ int rc = 0;
+
+ rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
+
+ if (rc)
+ goto err;
+
+ offset = scratch_buff.offset;
+ wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
+
+ read_sge_debug_data(pdbg_init, Sge_Dbg);
+ read_tp_mib_data(pdbg_init, &ptp_mib);
+
+ sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
+
+ /*#######################################################################*/
+ /*# TX PATH, starting from pcie*/
+ /*#######################################################################*/
+
+ /* Get Reqests of commmands from SGE to PCIE*/
+
+ wtp->sge_pcie_cmd_req.sop[0] = sge_dbg_reg->debug_PC_Req_SOP0_cnt;
+ wtp->sge_pcie_cmd_req.sop[1] = sge_dbg_reg->debug_PC_Req_SOP1_cnt;
+
+ wtp->sge_pcie_cmd_req.eop[0] = sge_dbg_reg->debug_PC_Req_EOP0_cnt;
+ wtp->sge_pcie_cmd_req.eop[1] = sge_dbg_reg->debug_PC_Req_EOP1_cnt;
+
+ /* Get Reqests of commmands from PCIE to core*/
+ value = t4_read_reg(padap, A_PCIE_CMDR_REQ_CNT);
+
+ wtp->pcie_core_cmd_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->pcie_core_cmd_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ /* there is no EOP for this, so we fake it.*/
+ wtp->pcie_core_cmd_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->pcie_core_cmd_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+
+ /* Get DMA stats*/
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
+ wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
+ wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
+ }
+
+ /* Get SGE debug data high index 6*/
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_6);
+ wtp->sge_debug_data_high_index_6.sop[0] = ((value >> 4) & 0x0F);
+ wtp->sge_debug_data_high_index_6.eop[0] = ((value >> 0) & 0x0F);
+ wtp->sge_debug_data_high_index_6.sop[1] = ((value >> 12) & 0x0F);
+ wtp->sge_debug_data_high_index_6.eop[1] = ((value >> 8) & 0x0F);
+ wtp->sge_debug_data_high_index_6.sop[2] = ((value >> 20) & 0x0F);
+ wtp->sge_debug_data_high_index_6.eop[2] = ((value >> 16) & 0x0F);
+ wtp->sge_debug_data_high_index_6.sop[3] = ((value >> 28) & 0x0F);
+ wtp->sge_debug_data_high_index_6.eop[3] = ((value >> 24) & 0x0F);
+
+ /* Get SGE debug data high index 3*/
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_3);
+ wtp->sge_debug_data_high_index_3.sop[0] = ((value >> 4) & 0x0F);
+ wtp->sge_debug_data_high_index_3.eop[0] = ((value >> 0) & 0x0F);
+ wtp->sge_debug_data_high_index_3.sop[1] = ((value >> 12) & 0x0F);
+ wtp->sge_debug_data_high_index_3.eop[1] = ((value >> 8) & 0x0F);
+ wtp->sge_debug_data_high_index_3.sop[2] = ((value >> 20) & 0x0F);
+ wtp->sge_debug_data_high_index_3.eop[2] = ((value >> 16) & 0x0F);
+ wtp->sge_debug_data_high_index_3.sop[3] = ((value >> 28) & 0x0F);
+ wtp->sge_debug_data_high_index_3.eop[3] = ((value >> 24) & 0x0F);
+
+ /* Get ULP SE CNT CHx*/
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
+ wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
+ wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
+ }
+
+ /* Get MAC PORTx PKT COUNT*/
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
+ wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
+ wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
+ wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
+ wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
+ }
+
+ /* Get mac portx aFramesTransmittedok*/
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap, 0x30a80 + ((i * 4) << 12));
+ wtp->mac_portx_aframestra_ok.sop[i] = (value & 0xFF);
+ wtp->mac_portx_aframestra_ok.eop[i] = (value & 0xFF);
+ }
+
+ /* Get command respones from core to PCIE*/
+ value = t4_read_reg(padap, A_PCIE_CMDR_RSP_CNT);
+
+ wtp->core_pcie_cmd_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->core_pcie_cmd_rsp.sop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
+
+ wtp->core_pcie_cmd_rsp.eop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->core_pcie_cmd_rsp.eop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
+
+ /*Get command Resposes from PCIE to SGE*/
+ wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
+ wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
+
+ wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
+ wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP1_cnt;
+
+ /* Get commands sent from SGE to CIM/uP*/
+ wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
+ wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
+
+ wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
+ wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
+
+ /* Get Reqests of data from PCIE by SGE*/
+ wtp->utx_sge_dma_req.sop[0] = sge_dbg_reg->debug_UD_Rx_SOP0_cnt;
+ wtp->utx_sge_dma_req.sop[1] = sge_dbg_reg->debug_UD_Rx_SOP1_cnt;
+ wtp->utx_sge_dma_req.sop[2] = sge_dbg_reg->debug_UD_Rx_SOP2_cnt;
+ wtp->utx_sge_dma_req.sop[3] = sge_dbg_reg->debug_UD_Rx_SOP3_cnt;
+
+ wtp->utx_sge_dma_req.eop[0] = sge_dbg_reg->debug_UD_Rx_EOP0_cnt;
+ wtp->utx_sge_dma_req.eop[1] = sge_dbg_reg->debug_UD_Rx_EOP1_cnt;
+ wtp->utx_sge_dma_req.eop[2] = sge_dbg_reg->debug_UD_Rx_EOP2_cnt;
+ wtp->utx_sge_dma_req.eop[3] = sge_dbg_reg->debug_UD_Rx_EOP3_cnt;
+
+ /* Get Reqests of data from PCIE by SGE*/
+ wtp->sge_pcie_dma_req.sop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
+ wtp->sge_pcie_dma_req.sop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
+ wtp->sge_pcie_dma_req.sop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
+ wtp->sge_pcie_dma_req.sop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
+ /*no EOP's, so fake it.*/
+ wtp->sge_pcie_dma_req.eop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
+ wtp->sge_pcie_dma_req.eop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
+ wtp->sge_pcie_dma_req.eop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
+ wtp->sge_pcie_dma_req.eop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
+
+ /* Get Reqests of data from PCIE to core*/
+ value = t4_read_reg(padap, A_PCIE_DMAR_REQ_CNT);
+
+ wtp->pcie_core_dma_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->pcie_core_dma_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->pcie_core_dma_req.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
+ wtp->pcie_core_dma_req.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
+ /* There is no eop so fake it.*/
+ wtp->pcie_core_dma_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->pcie_core_dma_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->pcie_core_dma_req.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
+ wtp->pcie_core_dma_req.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
+
+ /* Get data responses from core to PCIE*/
+ value = t4_read_reg(padap, A_PCIE_DMAR_RSP_SOP_CNT);
+
+ wtp->core_pcie_dma_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->core_pcie_dma_rsp.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->core_pcie_dma_rsp.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
+ wtp->core_pcie_dma_rsp.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
+
+ value = t4_read_reg(padap, A_PCIE_DMAR_RSP_EOP_CNT);
+
+ wtp->core_pcie_dma_rsp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->core_pcie_dma_rsp.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->core_pcie_dma_rsp.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
+ wtp->core_pcie_dma_rsp.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
+
+ /* Get PCIE_DATA to SGE*/
+ wtp->pcie_sge_dma_rsp.sop[0] = sge_dbg_reg->debug_PD_Rsp_SOP0_cnt;
+ wtp->pcie_sge_dma_rsp.sop[1] = sge_dbg_reg->debug_PD_Rsp_SOP1_cnt;
+ wtp->pcie_sge_dma_rsp.sop[2] = sge_dbg_reg->debug_PD_Rsp_SOP2_cnt;
+ wtp->pcie_sge_dma_rsp.sop[3] = sge_dbg_reg->debug_PD_Rsp_SOP3_cnt;
+
+ wtp->pcie_sge_dma_rsp.eop[0] = sge_dbg_reg->debug_PD_Rsp_EOP0_cnt;
+ wtp->pcie_sge_dma_rsp.eop[1] = sge_dbg_reg->debug_PD_Rsp_EOP1_cnt;
+ wtp->pcie_sge_dma_rsp.eop[2] = sge_dbg_reg->debug_PD_Rsp_EOP2_cnt;
+ wtp->pcie_sge_dma_rsp.eop[3] = sge_dbg_reg->debug_PD_Rsp_EOP3_cnt;
+
+ /*Get SGE to ULP_TX*/
+ wtp->sge_utx.sop[0] = sge_dbg_reg->debug_U_Tx_SOP0_cnt;
+ wtp->sge_utx.sop[1] = sge_dbg_reg->debug_U_Tx_SOP1_cnt;
+ wtp->sge_utx.sop[2] = sge_dbg_reg->debug_U_Tx_SOP2_cnt;
+ wtp->sge_utx.sop[3] = sge_dbg_reg->debug_U_Tx_SOP3_cnt;
+
+ wtp->sge_utx.eop[0] = sge_dbg_reg->debug_U_Tx_EOP0_cnt;
+ wtp->sge_utx.eop[1] = sge_dbg_reg->debug_U_Tx_EOP1_cnt;
+ wtp->sge_utx.eop[2] = sge_dbg_reg->debug_U_Tx_EOP2_cnt;
+ wtp->sge_utx.eop[3] = sge_dbg_reg->debug_U_Tx_EOP3_cnt;
+
+ /* Get ULP_TX to TP*/
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap, (A_ULP_TX_SE_CNT_CH0 + (i*4)));
+
+ wtp->utx_tp.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
+ wtp->utx_tp.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
+ }
+
+ /* Get TP_DBG_CSIDE registers*/
+ for (i = 0; i < 4; i++) {
+ t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
+ true);
+
+ wtp->utx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
+ wtp->utx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
+ wtp->tpcside_rxpld.sop[i] = ((value >> 20) & 0xF);/*bits 20:23*/
+ wtp->tpcside_rxpld.eop[i] = ((value >> 16) & 0xF);/*bits 16:19*/
+ wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
+ wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
+ wtp->tpcside_rxcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
+ wtp->tpcside_rxcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
+ }
+
+ /* TP_DBG_ESIDE*/
+ for (i = 0; i < 4; i++) {
+ t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
+ true);
+
+ wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
+ wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
+ wtp->tpeside_pm.sop[i] = ((value >> 20) & 0xF); /*bits 20:23*/
+ wtp->tpeside_pm.eop[i] = ((value >> 16) & 0xF); /*bits 16:19*/
+ wtp->mps_tpeside.sop[i] = ((value >> 12) & 0xF); /*bits 12:15*/
+ wtp->mps_tpeside.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
+ wtp->tpeside_pld.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
+ wtp->tpeside_pld.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
+
+ }
+
+ /*PCIE CMD STAT2*/
+ for (i = 0; i < 3; i++) {
+ value = t4_read_reg(padap, 0x5988 + (i * 0x10));
+ wtp->pcie_cmd_stat2.sop[i] = value & 0xFF;
+ wtp->pcie_cmd_stat2.eop[i] = value & 0xFF;
+ }
+
+ /*PCIE cmd stat3*/
+ for (i = 0; i < 3; i++) {
+ value = t4_read_reg(padap, 0x598c + (i * 0x10));
+ wtp->pcie_cmd_stat3.sop[i] = value & 0xFF;
+ wtp->pcie_cmd_stat3.eop[i] = value & 0xFF;
+ }
+
+ /* ULP_RX input/output*/
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
+
+ wtp->pmrx_ulprx.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
+ wtp->pmrx_ulprx.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
+ wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
+ wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
+ }
+
+ /* Get the MPS input from TP*/
+ drop = 0;
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
+ wtp->tp_mps.sop[(i*2)] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->tp_mps.eop[(i*2)] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
+ */
+ wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
+ */
+ }
+ drop = ptp_mib->TP_MIB_OFD_ARP_DROP.value;
+ drop += ptp_mib->TP_MIB_OFD_DFR_DROP.value;
+
+ drop += ptp_mib->TP_MIB_TNL_DROP_0.value;
+ drop += ptp_mib->TP_MIB_TNL_DROP_1.value;
+ drop += ptp_mib->TP_MIB_TNL_DROP_2.value;
+ drop += ptp_mib->TP_MIB_TNL_DROP_3.value;
+
+ wtp->tp_mps.drops = drop;
+
+ /* Get the MPS output to the MAC's*/
+ drop = 0;
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
+ wtp->mps_xgm.sop[(i*2)] = ((value >> 8) & 0xFF);/*bit 8:15*/
+ wtp->mps_xgm.eop[(i*2)] = ((value >> 0) & 0xFF);/*bit 0:7*/
+ wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
+ */
+ wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
+ */
+ }
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_TX_PORT_DROP_L) +
+ (i * T5_PORT_STRIDE)));
+ drop += value;
+ }
+ wtp->mps_xgm.drops = (drop & 0xFF);
+
+ /* Get the SOP/EOP counters into and out of MAC. [JHANEL] I think this
+ * is*/
+ /* clear on read, so you have to read both TX and RX path at same
+ * time.*/
+ drop = 0;
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MAC_PORT_PKT_COUNT) +
+ (i * T5_PORT_STRIDE)));
+
+ wtp->tx_xgm_xgm.sop[i] = ((value >> 24) & 0xFF); /*bit 24:31*/
+ wtp->tx_xgm_xgm.eop[i] = ((value >> 16) & 0xFF); /*bit 16:23*/
+ wtp->rx_xgm_xgm.sop[i] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->rx_xgm_xgm.eop[i] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ }
+
+ /* Get the MAC's output to the wire*/
+ drop = 0;
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MAC_PORT_AFRAMESTRANSMITTEDOK) +
+ (i * T5_PORT_STRIDE)));
+ wtp->xgm_wire.sop[i] = (value);
+ wtp->xgm_wire.eop[i] = (value); /* No EOP for XGMAC, so fake
+ it.*/
+ }
+
+ /*########################################################################*/
+ /*# RX PATH, starting from wire*/
+ /*########################################################################*/
+
+ /* Add up the wire input to the MAC*/
+ drop = 0;
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MAC_PORT_AFRAMESRECEIVEDOK) +
+ (i * T5_PORT_STRIDE)));
+
+ wtp->wire_xgm.sop[i] = (value);
+ wtp->wire_xgm.eop[i] = (value); /* No EOP for XGMAC, so fake
+ it.*/
+ }
+
+ /* Already read the rx_xgm_xgm when reading TX path.*/
+
+ /* Add up SOP/EOP's on all 8 MPS buffer channels*/
+ drop = 0;
+ for (i = 0; i < 8; i++) {
+ value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
+
+ wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
+ wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
+ }
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
+ /* typo in JHANEL's code.*/
+ drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
+ }
+ wtp->xgm_mps.cls_drop = drop & 0xFF;
+
+ /* Add up the overflow drops on all 4 ports.*/
+ drop = 0;
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
+ (i << 3)));
+ drop += value;
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
+ (i << 2)));
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
+ (i << 3)));
+ drop += value;
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
+ (i << 2)));
+
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
+ (i << 3)));
+ drop += value;
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
+ (i << 3)));
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
+ (i << 3)));
+ drop += value;
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
+ (i << 3)));
+
+ value = t4_read_reg(padap,
+ T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
+ (i * T5_PORT_STRIDE));
+ drop += value;
+ }
+ wtp->xgm_mps.drop = (drop & 0xFF);
+
+ /* Add up the MPS errors that should result in dropped packets*/
+ err = 0;
+ for (i = 0; i < 4; i++) {
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG((A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
+ (i * T5_PORT_STRIDE) + 4)));
+ }
+ wtp->xgm_mps.err = (err & 0xFF);
+
+ drop = 0;
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
+
+ wtp->mps_tp.sop[(i*2)] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->mps_tp.eop[(i*2)] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->mps_tp.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
+ */
+ wtp->mps_tp.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
+ */
+ }
+ drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
+ drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
+ drop += ptp_mib->TP_MIB_TNL_CNG_DROP_2.value;
+ drop += ptp_mib->TP_MIB_TNL_CNG_DROP_3.value;
+ drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
+ drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
+ drop += ptp_mib->TP_MIB_OFD_CHN_DROP_2.value;
+ drop += ptp_mib->TP_MIB_OFD_CHN_DROP_3.value;
+ drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
+ drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
+ drop += ptp_mib->TP_MIB_FCOE_DROP_2.value;
+ drop += ptp_mib->TP_MIB_FCOE_DROP_3.value;
+ drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
+ drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
+ drop += ptp_mib->TP_MIB_OFD_VLN_DROP_2.value;
+ drop += ptp_mib->TP_MIB_OFD_VLN_DROP_3.value;
+ drop += ptp_mib->TP_MIB_USM_DROP.value;
+
+ wtp->mps_tp.drops = drop;
+
+ /* Get TP_DBG_CSIDE_TX registers*/
+ for (i = 0; i < 4; i++) {
+ t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
+ true);
+
+ wtp->tpcside_csw.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
+ wtp->tpcside_csw.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
+ wtp->tpcside_pm.sop[i] = ((value >> 20) & 0xF);/*bits 20:23*/
+ wtp->tpcside_pm.eop[i] = ((value >> 16) & 0xF);/*bits 16:19*/
+ wtp->tpcside_uturn.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
+ wtp->tpcside_uturn.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
+ wtp->tpcside_txcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
+ wtp->tpcside_txcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
+ }
+
+ /* TP to CPL_SWITCH*/
+ wtp->tp_csw.sop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP0_cnt;
+ wtp->tp_csw.sop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP1_cnt;
+
+ wtp->tp_csw.eop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP0_cnt;
+ wtp->tp_csw.eop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP1_cnt;
+
+ /* TP/CPL_SWITCH to SGE*/
+ wtp->csw_sge.sop[0] = sge_dbg_reg->debug_T_Rx_SOP0_cnt;
+ wtp->csw_sge.sop[1] = sge_dbg_reg->debug_T_Rx_SOP1_cnt;
+
+ wtp->csw_sge.eop[0] = sge_dbg_reg->debug_T_Rx_EOP0_cnt;
+ wtp->csw_sge.eop[1] = sge_dbg_reg->debug_T_Rx_EOP1_cnt;
+
+ wtp->sge_pcie.sop[0] = sge_dbg_reg->debug_PD_Req_SOP0_cnt;
+ wtp->sge_pcie.sop[1] = sge_dbg_reg->debug_PD_Req_SOP1_cnt;
+ wtp->sge_pcie.sop[2] = sge_dbg_reg->debug_PD_Req_SOP2_cnt;
+ wtp->sge_pcie.sop[3] = sge_dbg_reg->debug_PD_Req_SOP3_cnt;
+
+ wtp->sge_pcie.eop[0] = sge_dbg_reg->debug_PD_Req_EOP0_cnt;
+ wtp->sge_pcie.eop[1] = sge_dbg_reg->debug_PD_Req_EOP1_cnt;
+ wtp->sge_pcie.eop[2] = sge_dbg_reg->debug_PD_Req_EOP2_cnt;
+ wtp->sge_pcie.eop[3] = sge_dbg_reg->debug_PD_Req_EOP3_cnt;
+
+ wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
+ wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
+ wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
+ wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
+ /* NO EOP, so fake it.*/
+ wtp->sge_pcie_ints.eop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
+ wtp->sge_pcie_ints.eop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
+ wtp->sge_pcie_ints.eop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
+ wtp->sge_pcie_ints.eop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
+
+ /*Get PCIE DMA1 STAT2*/
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
+ wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
+ wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
+ wtp->pcie_dma1_stat2_core.sop[i] += value & 0x0F;
+ wtp->pcie_dma1_stat2_core.eop[i] += value & 0x0F;
+ }
+
+ /* Get mac porrx aFramesTransmittedok*/
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap, 0x30a88 + ((i * 4) << 12));
+ wtp->mac_porrx_aframestra_ok.sop[i] = (value & 0xFF);
+ wtp->mac_porrx_aframestra_ok.eop[i] = (value & 0xFF);
+ }
+
+ /*Get SGE debug data high index 7*/
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
+ wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
+ wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
+ wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
+ wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
+ wtp->sge_debug_data_high_indx7.sop[2] = ((value >> 20) & 0x0F);
+ wtp->sge_debug_data_high_indx7.eop[2] = ((value >> 16) & 0x0F);
+ wtp->sge_debug_data_high_indx7.sop[3] = ((value >> 28) & 0x0F);
+ wtp->sge_debug_data_high_indx7.eop[3] = ((value >> 24) & 0x0F);
+
+ /*Get SGE debug data high index 1*/
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
+ wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
+ wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
+ wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
+ wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
+
+ /*Get TP debug CSIDE Tx registers*/
+ for (i = 0; i < 2; i++) {
+ t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
+ true);
+
+ wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
+ */
+ wtp->utx_tpcside_tx.eop[i] = ((value >> 24) & 0xF);
+ }
+
+ /*Get SGE debug data high index 9*/
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
+ wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
+ wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
+ wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
+ wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
+ wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
+ wtp->sge_work_req_pkt.sop[1] = ((value >> 12) & 0x0F);
+
+ /*Get LE DB response count*/
+ value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
+ wtp->le_db_rsp_cnt.sop = value & 0xF;
+ wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
+
+ /*Get TP debug Eside PKTx*/
+ for (i = 0; i < 4; i++) {
+ t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
+ true);
+
+ wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
+ wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
+ }
+
+ /* Get data responses from core to PCIE*/
+ value = t4_read_reg(padap, A_PCIE_DMAW_SOP_CNT);
+
+ wtp->pcie_core_dmaw.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->pcie_core_dmaw.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->pcie_core_dmaw.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
+ wtp->pcie_core_dmaw.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
+
+ value = t4_read_reg(padap, A_PCIE_DMAW_EOP_CNT);
+
+ wtp->pcie_core_dmaw.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->pcie_core_dmaw.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->pcie_core_dmaw.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
+ wtp->pcie_core_dmaw.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
+
+ value = t4_read_reg(padap, A_PCIE_DMAI_CNT);
+
+ wtp->pcie_core_dmai.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->pcie_core_dmai.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->pcie_core_dmai.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
+ wtp->pcie_core_dmai.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
+ /* no eop for interrups, just fake it.*/
+ wtp->pcie_core_dmai.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->pcie_core_dmai.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->pcie_core_dmai.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
+ wtp->pcie_core_dmai.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+static int t6_wtp_data(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct sge_debug_reg_data *sge_dbg_reg = NULL;
+ struct cudbg_buffer scratch_buff;
+ struct tp_mib_data *ptp_mib = NULL;
+ struct wtp_data *wtp;
+ u32 Sge_Dbg[32] = {0};
+ u32 value = 0;
+ u32 i = 0;
+ u32 drop = 0;
+ u32 err = 0;
+ u32 offset;
+ int rc = 0;
+
+ rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
+
+ if (rc)
+ goto err;
+
+ offset = scratch_buff.offset;
+ wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
+
+ read_sge_debug_data(pdbg_init, Sge_Dbg);
+ read_tp_mib_data(pdbg_init, &ptp_mib);
+
+ sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
+
+ /*# TX PATH*/
+
+ /*PCIE CMD STAT2*/
+ value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT2);
+ wtp->pcie_cmd_stat2.sop[0] = value & 0xFF;
+ wtp->pcie_cmd_stat2.eop[0] = value & 0xFF;
+
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
+ wtp->sge_pcie_cmd_req.sop[0] = ((value >> 20) & 0x0F);
+ wtp->sge_pcie_cmd_req.eop[0] = ((value >> 16) & 0x0F);
+ wtp->sge_pcie_cmd_req.sop[1] = ((value >> 28) & 0x0F);
+ wtp->sge_pcie_cmd_req.eop[1] = ((value >> 24) & 0x0F);
+
+ value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT3);
+ wtp->pcie_cmd_stat3.sop[0] = value & 0xFF;
+ wtp->pcie_cmd_stat3.eop[0] = value & 0xFF;
+
+ /*Get command Resposes from PCIE to SGE*/
+ wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
+ wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
+ wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
+ wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
+
+ /* Get commands sent from SGE to CIM/uP*/
+ wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
+ wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
+
+ wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
+ wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
+
+ /*Get SGE debug data high index 9*/
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
+ wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
+ wtp->sge_work_req_pkt.eop[0] = ((value >> 0) & 0x0F);
+
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
+ wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
+ wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
+ wtp->pcie_dma1_stat2_core.sop[i] = value & 0x0F;
+ wtp->pcie_dma1_stat2_core.eop[i] = value & 0x0F;
+ }
+
+ /* Get DMA0 stats3*/
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
+ wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
+ wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
+ }
+
+ /* Get ULP SE CNT CHx*/
+ for (i = 0; i < 4; i++) {
+ value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
+ wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
+ wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
+ }
+
+ /* Get TP_DBG_CSIDE registers*/
+ for (i = 0; i < 4; i++) {
+ t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
+ true);
+
+ wtp->utx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
+ wtp->utx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
+ wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
+ wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
+ }
+
+ for (i = 0; i < 4; i++) {
+ t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
+ true);
+
+
+ wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
+ wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
+ }
+
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
+ wtp->tp_mps.sop[(i*2)] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->tp_mps.eop[(i*2)] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
+ */
+ wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
+ */
+ }
+
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
+ wtp->mps_xgm.sop[(i*2)] = ((value >> 8) & 0xFF);/*bit 8:15*/
+ wtp->mps_xgm.eop[(i*2)] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
+ */
+ wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
+ */
+ }
+
+ /* Get MAC PORTx PKT COUNT*/
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
+ wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
+ wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
+ wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
+ wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
+ }
+
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, 0x30f20 + ((i * 4) << 12));
+ wtp->mac_portx_aframestra_ok.sop[i] = value & 0xff;
+ wtp->mac_portx_aframestra_ok.eop[i] = value & 0xff;
+ }
+
+ /*MAC_PORT_MTIP_1G10G_TX_etherStatsPkts*/
+
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, 0x30f60 + ((i * 4) << 12));
+ wtp->mac_portx_etherstatspkts.sop[i] = value & 0xff;
+ wtp->mac_portx_etherstatspkts.eop[i] = value & 0xff;
+ }
+
+ /*RX path*/
+
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
+ wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
+ wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
+ wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
+ wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
+
+ /*Get SGE debug data high index 1*/
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
+ wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
+ wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
+ wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
+ wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
+
+ value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
+ wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
+ wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
+
+ wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
+ wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
+
+ for (i = 0; i < 2; i++) {
+ t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
+ true);
+
+ wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
+ */
+ wtp->utx_tpcside_tx.eop[i] = ((value >> 24) & 0xF);
+ }
+
+ /*ULP_RX input/output*/
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
+
+ wtp->pmrx_ulprx.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
+ wtp->pmrx_ulprx.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
+ wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
+ wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
+ }
+
+ /*Get LE DB response count*/
+ value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
+ wtp->le_db_rsp_cnt.sop = value & 0xF;
+ wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
+
+ /*Get TP debug Eside PKTx*/
+ for (i = 0; i < 4; i++) {
+ t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
+ true);
+
+ wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
+ wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
+ }
+
+ drop = 0;
+ /*MPS_RX_SE_CNT_OUT01*/
+ value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
+ wtp->mps_tp.sop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
+ wtp->mps_tp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
+ wtp->mps_tp.sop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
+ wtp->mps_tp.eop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
+
+ drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
+ drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
+ drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
+ drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
+ drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
+ drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
+ drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
+ drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
+ drop += ptp_mib->TP_MIB_USM_DROP.value;
+
+ wtp->mps_tp.drops = drop;
+
+ drop = 0;
+ for (i = 0; i < 8; i++) {
+ value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
+
+ wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
+ wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
+ }
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
+ drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
+ }
+ wtp->xgm_mps.cls_drop = drop & 0xFF;
+
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, 0x30e20 + ((i * 4) << 12));
+ wtp->mac_porrx_aframestra_ok.sop[i] = value & 0xff;
+ wtp->mac_porrx_aframestra_ok.eop[i] = value & 0xff;
+ }
+
+ /*MAC_PORT_MTIP_1G10G_RX_etherStatsPkts*/
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap, 0x30e60 + ((i * 4) << 12));
+ wtp->mac_porrx_etherstatspkts.sop[i] = value & 0xff;
+ wtp->mac_porrx_etherstatspkts.eop[i] = value & 0xff;
+ }
+
+ wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
+ wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
+ wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
+ wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
+
+ /* Add up the overflow drops on all 4 ports.*/
+ drop = 0;
+ for (i = 0; i < 2; i++) {
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
+ (i << 3)));
+ drop += value;
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
+ (i << 2)));
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
+ (i << 3)));
+ drop += value;
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
+ (i << 2)));
+
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
+ (i << 3)));
+ drop += value;
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
+ (i << 3)));
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
+ (i << 3)));
+ drop += value;
+ value = t4_read_reg(padap,
+ (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
+ (i << 3)));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
+ (i * T5_PORT_STRIDE)));
+ drop += value;
+ }
+ wtp->xgm_mps.drop = (drop & 0xFF);
+
+ /* Add up the MPS errors that should result in dropped packets*/
+ err = 0;
+ for (i = 0; i < 2; i++) {
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
+ (i * T5_PORT_STRIDE) + 4));
+
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
+ (i * T5_PORT_STRIDE)));
+ err += value;
+ value = t4_read_reg(padap,
+ (T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
+ (i * T5_PORT_STRIDE) + 4));
+ }
+ wtp->xgm_mps.err = (err & 0xFF);
+
+ rc = write_compression_hdr(&scratch_buff, dbg_buff);
+
+ if (rc)
+ goto err1;
+
+ rc = compress_buff(&scratch_buff, dbg_buff);
+
+err1:
+ release_scratch_buff(&scratch_buff, dbg_buff);
+err:
+ return rc;
+}
+
+int collect_wtp_data(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ int rc = -1;
+
+ if (is_t5(padap))
+ rc = t5_wtp_data(pdbg_init, dbg_buff, cudbg_err);
+ else if (is_t6(padap))
+ rc = t6_wtp_data(pdbg_init, dbg_buff, cudbg_err);
+
+ return rc;
+}
diff --git a/sys/dev/cxgbe/cudbg/fastlz.c b/sys/dev/cxgbe/cudbg/fastlz.c
new file mode 100644
index 000000000000..41e5c99b2877
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/fastlz.c
@@ -0,0 +1,555 @@
+/*
+ FastLZ - lightning-fast lossless compression library
+
+ Copyright (C) 2007 Ariya Hidayat (ariya@kde.org)
+ Copyright (C) 2006 Ariya Hidayat (ariya@kde.org)
+ Copyright (C) 2005 Ariya Hidayat (ariya@kde.org)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "osdep.h"
+#include "fastlz.h"
+
+#if !defined(FASTLZ__COMPRESSOR) && !defined(FASTLZ_DECOMPRESSOR)
+
+/*
+ * Always check for bound when decompressing.
+ * Generally it is best to leave it defined.
+ */
+#define FASTLZ_SAFE
+
+#if defined(WIN32) || defined(__NT__) || defined(_WIN32) || defined(__WIN32__)
+#if defined(_MSC_VER) || defined(__GNUC__)
+/* #include <windows.h> */
+#pragma warning(disable : 4242)
+#pragma warning(disable : 4244)
+#endif
+#endif
+
+/*
+ * Give hints to the compiler for branch prediction optimization.
+ */
+#if defined(__GNUC__) && (__GNUC__ > 2)
+#define FASTLZ_EXPECT_CONDITIONAL(c) (__builtin_expect((c), 1))
+#define FASTLZ_UNEXPECT_CONDITIONAL(c) (__builtin_expect((c), 0))
+#else
+#define FASTLZ_EXPECT_CONDITIONAL(c) (c)
+#define FASTLZ_UNEXPECT_CONDITIONAL(c) (c)
+#endif
+
+/*
+ * Use inlined functions for supported systems.
+ */
+#if defined(__GNUC__) || defined(__DMC__) || defined(__POCC__) ||\
+ defined(__WATCOMC__) || defined(__SUNPRO_C)
+#define FASTLZ_INLINE inline
+#elif defined(__BORLANDC__) || defined(_MSC_VER) || defined(__LCC__)
+#define FASTLZ_INLINE __inline
+#else
+#define FASTLZ_INLINE
+#endif
+
+/*
+ * Prevent accessing more than 8-bit at once, except on x86 architectures.
+ */
+#if !defined(FASTLZ_STRICT_ALIGN)
+#define FASTLZ_STRICT_ALIGN
+#if defined(__i386__) || defined(__386) /* GNU C, Sun Studio */
+#undef FASTLZ_STRICT_ALIGN
+#elif defined(__i486__) || defined(__i586__) || defined(__i686__) /* GNU C */
+#undef FASTLZ_STRICT_ALIGN
+#elif defined(_M_IX86) /* Intel, MSVC */
+#undef FASTLZ_STRICT_ALIGN
+#elif defined(__386)
+#undef FASTLZ_STRICT_ALIGN
+#elif defined(_X86_) /* MinGW */
+#undef FASTLZ_STRICT_ALIGN
+#elif defined(__I86__) /* Digital Mars */
+#undef FASTLZ_STRICT_ALIGN
+#endif
+#endif
+
+/*
+ * FIXME: use preprocessor magic to set this on different platforms!
+ */
+
+#define MAX_COPY 32
+#define MAX_LEN 264 /* 256 + 8 */
+#define MAX_DISTANCE 8192
+
+#if !defined(FASTLZ_STRICT_ALIGN)
+#define FASTLZ_READU16(p) (*((const unsigned short *)(p)))
+#else
+#define FASTLZ_READU16(p) ((p)[0] | (p)[1]<<8)
+#endif
+
+#define HASH_LOG 13
+#define HASH_SIZE (1 << HASH_LOG)
+#define HASH_MASK (HASH_SIZE - 1)
+#define HASH_FUNCTION(v, p) {\
+ v = FASTLZ_READU16(p);\
+ v ^= FASTLZ_READU16(p + 1)^\
+ (v>>(16 - HASH_LOG));\
+ v &= HASH_MASK;\
+ }
+
+#undef FASTLZ_LEVEL
+#define FASTLZ_LEVEL 1
+
+#undef FASTLZ_COMPRESSOR
+#undef FASTLZ_DECOMPRESSOR
+#define FASTLZ_COMPRESSOR fastlz1_compress
+#define FASTLZ_DECOMPRESSOR fastlz1_decompress
+static FASTLZ_INLINE int FASTLZ_COMPRESSOR(const void *input, int length,
+ void *output);
+static FASTLZ_INLINE int FASTLZ_DECOMPRESSOR(const void *input, int length,
+ void *output, int maxout);
+#include "fastlz.c"
+
+#undef FASTLZ_LEVEL
+#define FASTLZ_LEVEL 2
+
+#undef MAX_DISTANCE
+#define MAX_DISTANCE 8191
+#define MAX_FARDISTANCE (65535 + MAX_DISTANCE - 1)
+
+#undef FASTLZ_COMPRESSOR
+#undef FASTLZ_DECOMPRESSOR
+#define FASTLZ_COMPRESSOR fastlz2_compress
+#define FASTLZ_DECOMPRESSOR fastlz2_decompress
+static FASTLZ_INLINE int FASTLZ_COMPRESSOR(const void *input, int length,
+ void *output);
+static FASTLZ_INLINE int FASTLZ_DECOMPRESSOR(const void *input, int length,
+ void *output, int maxout);
+#include "fastlz.c"
+
+int fastlz_compress(const void *input, int length, void *output)
+{
+ /* for short block, choose fastlz1 */
+ if (length < 65536)
+ return fastlz1_compress(input, length, output);
+
+ /* else... */
+ return fastlz2_compress(input, length, output);
+}
+
+int fastlz_decompress(const void *input, int length, void *output, int maxout)
+{
+ /* magic identifier for compression level */
+ int level = ((*(const unsigned char *)input) >> 5) + 1;
+
+ if (level == 1)
+ return fastlz1_decompress(input, length, output, maxout);
+ if (level == 2)
+ return fastlz2_decompress(input, length, output, maxout);
+
+ /* unknown level, trigger error */
+ return 0;
+}
+
+int fastlz_compress_level(int level, const void *input, int length,
+ void *output)
+{
+ if (level == 1)
+ return fastlz1_compress(input, length, output);
+ if (level == 2)
+ return fastlz2_compress(input, length, output);
+
+ return 0;
+}
+
+#else /* !defined(FASTLZ_COMPRESSOR) && !defined(FASTLZ_DECOMPRESSOR) */
+
+
+static FASTLZ_INLINE int FASTLZ_COMPRESSOR(const void *input, int length,
+ void *output)
+{
+ const unsigned char *ip = (const unsigned char *) input;
+ const unsigned char *ip_bound = ip + length - 2;
+ const unsigned char *ip_limit = ip + length - 12;
+ unsigned char *op = (unsigned char *) output;
+ static const unsigned char *g_htab[HASH_SIZE];
+
+ const unsigned char **htab = g_htab;
+ const unsigned char **hslot;
+ unsigned int hval;
+
+ unsigned int copy;
+
+ /* sanity check */
+ if (FASTLZ_UNEXPECT_CONDITIONAL(length < 4)) {
+ if (length) {
+ /* create literal copy only */
+ *op++ = length - 1;
+ ip_bound++;
+ while (ip <= ip_bound)
+ *op++ = *ip++;
+ return length + 1;
+ } else
+ return 0;
+ }
+
+ /* initializes hash table */
+ for (hslot = htab; hslot < htab + HASH_SIZE; hslot++)
+ *hslot = ip;
+
+ /* we start with literal copy */
+ copy = 2;
+ *op++ = MAX_COPY - 1;
+ *op++ = *ip++;
+ *op++ = *ip++;
+
+ /* main loop */
+ while (FASTLZ_EXPECT_CONDITIONAL(ip < ip_limit)) {
+ const unsigned char *ref;
+ unsigned int distance;
+
+ /* minimum match length */
+ unsigned int len = 3;
+
+ /* comparison starting-point */
+ const unsigned char *anchor = ip;
+
+ /* check for a run */
+#if FASTLZ_LEVEL == 2
+ if (ip[0] == ip[-1] &&
+ FASTLZ_READU16(ip - 1) == FASTLZ_READU16(ip + 1)) {
+ distance = 1;
+ ip += 3;
+ ref = anchor - 1 + 3;
+ goto match;
+ }
+#endif
+
+ /* find potential match */
+ HASH_FUNCTION(hval, ip);
+ hslot = htab + hval;
+ ref = htab[hval];
+
+ /* calculate distance to the match */
+ distance = anchor - ref;
+
+ /* update hash table */
+ *hslot = anchor;
+
+ if (!ref)
+ goto literal;
+ /* is this a match? check the first 3 bytes */
+ if (distance == 0 ||
+#if FASTLZ_LEVEL == 1
+ (distance >= MAX_DISTANCE) ||
+#else
+ (distance >= MAX_FARDISTANCE) ||
+#endif
+ *ref++ != *ip++ || *ref++ != *ip++ ||
+ *ref++ != *ip++)
+ goto literal;
+
+#if FASTLZ_LEVEL == 2
+ /* far, needs at least 5-byte match */
+ if (distance >= MAX_DISTANCE) {
+ if (*ip++ != *ref++ || *ip++ != *ref++)
+ goto literal;
+ len += 2;
+ }
+
+match:
+#endif
+
+ /* last matched byte */
+ ip = anchor + len;
+
+ /* distance is biased */
+ distance--;
+
+ if (!distance) {
+ /* zero distance means a run */
+ unsigned char x = ip[-1];
+ while (ip < ip_bound)
+ if (*ref++ != x)
+ break;
+ else
+ ip++;
+ } else
+ for (;;) {
+ /* safe because the outer check
+ * against ip limit */
+ if (*ref++ != *ip++)
+ break;
+ if (*ref++ != *ip++)
+ break;
+ if (*ref++ != *ip++)
+ break;
+ if (*ref++ != *ip++)
+ break;
+ if (*ref++ != *ip++)
+ break;
+ if (*ref++ != *ip++)
+ break;
+ if (*ref++ != *ip++)
+ break;
+ if (*ref++ != *ip++)
+ break;
+ while (ip < ip_bound)
+ if (*ref++ != *ip++)
+ break;
+ break;
+ }
+
+ /* if we have copied something, adjust the copy count */
+ if (copy)
+ /* copy is biased, '0' means 1 byte copy */
+ *(op - copy - 1) = copy - 1;
+ else
+ /* back, to overwrite the copy count */
+ op--;
+
+ /* reset literal counter */
+ copy = 0;
+
+ /* length is biased, '1' means a match of 3 bytes */
+ ip -= 3;
+ len = ip - anchor;
+
+ /* encode the match */
+#if FASTLZ_LEVEL == 2
+ if (distance < MAX_DISTANCE) {
+ if (len < 7) {
+ *op++ = (len << 5) + (distance >> 8);
+ *op++ = (distance & 255);
+ } else {
+ *op++ = (7 << 5) + (distance >> 8);
+ for (len -= 7; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = len;
+ *op++ = (distance & 255);
+ }
+ } else {
+ /* far away, but not yet in the another galaxy... */
+ if (len < 7) {
+ distance -= MAX_DISTANCE;
+ *op++ = (len << 5) + 31;
+ *op++ = 255;
+ *op++ = distance >> 8;
+ *op++ = distance & 255;
+ } else {
+ distance -= MAX_DISTANCE;
+ *op++ = (7 << 5) + 31;
+ for (len -= 7; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = len;
+ *op++ = 255;
+ *op++ = distance >> 8;
+ *op++ = distance & 255;
+ }
+ }
+#else
+
+ if (FASTLZ_UNEXPECT_CONDITIONAL(len > MAX_LEN - 2))
+ while (len > MAX_LEN - 2) {
+ *op++ = (7 << 5) + (distance >> 8);
+ *op++ = MAX_LEN - 2 - 7 - 2;
+ *op++ = (distance & 255);
+ len -= MAX_LEN - 2;
+ }
+
+ if (len < 7) {
+ *op++ = (len << 5) + (distance >> 8);
+ *op++ = (distance & 255);
+ } else {
+ *op++ = (7 << 5) + (distance >> 8);
+ *op++ = len - 7;
+ *op++ = (distance & 255);
+ }
+#endif
+
+ /* update the hash at match boundary */
+ HASH_FUNCTION(hval, ip);
+ htab[hval] = ip++;
+ HASH_FUNCTION(hval, ip);
+ htab[hval] = ip++;
+
+ /* assuming literal copy */
+ *op++ = MAX_COPY - 1;
+
+ continue;
+
+literal:
+ *op++ = *anchor++;
+ ip = anchor;
+ copy++;
+ if (FASTLZ_UNEXPECT_CONDITIONAL(copy == MAX_COPY)) {
+ copy = 0;
+ *op++ = MAX_COPY - 1;
+ }
+ }
+
+ /* left-over as literal copy */
+ ip_bound++;
+ while (ip <= ip_bound) {
+ *op++ = *ip++;
+ copy++;
+ if (copy == MAX_COPY) {
+ copy = 0;
+ *op++ = MAX_COPY - 1;
+ }
+ }
+
+ /* if we have copied something, adjust the copy length */
+ if (copy)
+ *(op - copy - 1) = copy - 1;
+ else
+ op--;
+
+#if FASTLZ_LEVEL == 2
+ /* marker for fastlz2 */
+ *(unsigned char *)output |= (1 << 5);
+#endif
+
+ return op - (unsigned char *)output;
+}
+
+static FASTLZ_INLINE int FASTLZ_DECOMPRESSOR(const void *input, int length,
+ void *output, int maxout)
+{
+ const unsigned char *ip = (const unsigned char *) input;
+ const unsigned char *ip_limit = ip + length;
+ unsigned char *op = (unsigned char *) output;
+ unsigned char *op_limit = op + maxout;
+ unsigned int ctrl = (*ip++) & 31;
+ int loop = 1;
+
+ do {
+ const unsigned char *ref = op;
+ unsigned int len = ctrl >> 5;
+ unsigned int ofs = (ctrl & 31) << 8;
+
+ if (ctrl >= 32) {
+#if FASTLZ_LEVEL == 2
+ unsigned char code;
+#endif
+ len--;
+ ref -= ofs;
+ if (len == 7 - 1)
+#if FASTLZ_LEVEL == 1
+ len += *ip++;
+ ref -= *ip++;
+#else
+ do {
+ code = *ip++;
+ len += code;
+ } while (code == 255);
+ code = *ip++;
+ ref -= code;
+
+ /* match from 16-bit distance */
+ if (FASTLZ_UNEXPECT_CONDITIONAL(code == 255))
+ if (FASTLZ_EXPECT_CONDITIONAL(ofs ==
+ (31 << 8))) {
+ ofs = (*ip++) << 8;
+ ofs += *ip++;
+ ref = op - ofs - MAX_DISTANCE;
+ }
+#endif
+
+#ifdef FASTLZ_SAFE
+ if (FASTLZ_UNEXPECT_CONDITIONAL(op + len + 3 >
+ op_limit))
+ return 0;
+
+ if (FASTLZ_UNEXPECT_CONDITIONAL(ref - 1 <
+ (unsigned char *)output)
+ )
+ return 0;
+#endif
+
+ if (FASTLZ_EXPECT_CONDITIONAL(ip < ip_limit))
+ ctrl = *ip++;
+ else
+ loop = 0;
+
+ if (ref == op) {
+ /* optimize copy for a run */
+ unsigned char b = ref[-1];
+ *op++ = b;
+ *op++ = b;
+ *op++ = b;
+ for (; len; --len)
+ *op++ = b;
+ } else {
+#if !defined(FASTLZ_STRICT_ALIGN)
+ const unsigned short *p;
+ unsigned short *q;
+#endif
+ /* copy from reference */
+ ref--;
+ *op++ = *ref++;
+ *op++ = *ref++;
+ *op++ = *ref++;
+
+#if !defined(FASTLZ_STRICT_ALIGN)
+ /* copy a byte, so that now it's word aligned */
+ if (len & 1) {
+ *op++ = *ref++;
+ len--;
+ }
+
+ /* copy 16-bit at once */
+ q = (unsigned short *) op;
+ op += len;
+ p = (const unsigned short *) ref;
+ for (len >>= 1; len > 4; len -= 4) {
+ *q++ = *p++;
+ *q++ = *p++;
+ *q++ = *p++;
+ *q++ = *p++;
+ }
+ for (; len; --len)
+ *q++ = *p++;
+#else
+ for (; len; --len)
+ *op++ = *ref++;
+#endif
+ }
+ } else {
+ ctrl++;
+#ifdef FASTLZ_SAFE
+ if (FASTLZ_UNEXPECT_CONDITIONAL(op + ctrl > op_limit))
+ return 0;
+ if (FASTLZ_UNEXPECT_CONDITIONAL(ip + ctrl > ip_limit))
+ return 0;
+#endif
+
+ *op++ = *ip++;
+ for (--ctrl; ctrl; ctrl--)
+ *op++ = *ip++;
+
+ loop = FASTLZ_EXPECT_CONDITIONAL(ip < ip_limit);
+ if (loop)
+ ctrl = *ip++;
+ }
+ } while (FASTLZ_EXPECT_CONDITIONAL(loop));
+
+ return op - (unsigned char *)output;
+}
+
+#endif /* !defined(FASTLZ_COMPRESSOR) && !defined(FASTLZ_DECOMPRESSOR) */
diff --git a/sys/dev/cxgbe/cudbg/fastlz.h b/sys/dev/cxgbe/cudbg/fastlz.h
new file mode 100644
index 000000000000..5aa474fa5a87
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/fastlz.h
@@ -0,0 +1,62 @@
+/*
+ FastLZ - lightning-fast lossless compression library
+
+ Copyright (C) 2007 Ariya Hidayat (ariya@kde.org)
+ Copyright (C) 2006 Ariya Hidayat (ariya@kde.org)
+ Copyright (C) 2005 Ariya Hidayat (ariya@kde.org)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+ $FreeBSD$
+ */
+#ifndef FASTLZ_H
+#define FASTLZ_H
+
+#define FASTLZ_VERSION 0x000100
+
+#define FASTLZ_VERSION_MAJOR 0
+#define FASTLZ_VERSION_MINOR 0
+#define FASTLZ_VERSION_REVISION 0
+
+#define FASTLZ_VERSION_STRING "0.1.0"
+
+struct cudbg_buffer;
+
+int fastlz_compress(const void *input, int length, void *output);
+int fastlz_compress_level(int level, const void *input, int length,
+ void *output);
+int fastlz_decompress(const void *input, int length, void *output, int maxout);
+
+/* prototypes */
+
+int write_magic(struct cudbg_buffer *);
+int detect_magic(struct cudbg_buffer *);
+
+int write_to_buf(void *, u32, u32 *, void *, u32);
+int read_from_buf(void *, u32, u32 *, void *, u32);
+
+int write_chunk_header(struct cudbg_buffer *, int, int, unsigned long,
+ unsigned long, unsigned long);
+
+int read_chunk_header(struct cudbg_buffer *, int* , int*, unsigned long*,
+ unsigned long*, unsigned long*);
+
+unsigned long block_compress(const unsigned char *, unsigned long length,
+ unsigned char *);
+#endif /* FASTLZ_H */
diff --git a/sys/dev/cxgbe/cudbg/fastlz_api.c b/sys/dev/cxgbe/cudbg/fastlz_api.c
new file mode 100644
index 000000000000..a513557ad352
--- /dev/null
+++ b/sys/dev/cxgbe/cudbg/fastlz_api.c
@@ -0,0 +1,531 @@
+/*
+ FastLZ - lightning-fast lossless compression library
+
+ Copyright (C) 2007 Ariya Hidayat (ariya@kde.org)
+ Copyright (C) 2006 Ariya Hidayat (ariya@kde.org)
+ Copyright (C) 2005 Ariya Hidayat (ariya@kde.org)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "osdep.h"
+#include "cudbg.h"
+#include "cudbg_lib_common.h"
+#include "fastlz.h"
+
+static unsigned char sixpack_magic[8] = {137, '6', 'P', 'K', 13, 10, 26, 10};
+
+#define CUDBG_BLOCK_SIZE (63*1024)
+#define CUDBG_CHUNK_BUF_LEN 16
+#define CUDBG_MIN_COMPR_LEN 32 /*min data length for applying compression*/
+
+/* for Adler-32 checksum algorithm, see RFC 1950 Section 8.2 */
+
+#define ADLER32_BASE 65521
+
+static inline unsigned long update_adler32(unsigned long checksum,
+ const void *buf, int len)
+{
+ const unsigned char *ptr = (const unsigned char *)buf;
+ unsigned long s1 = checksum & 0xffff;
+ unsigned long s2 = (checksum >> 16) & 0xffff;
+
+ while (len > 0) {
+ unsigned k = len < 5552 ? len : 5552;
+ len -= k;
+
+ while (k >= 8) {
+ s1 += *ptr++; s2 += s1;
+ s1 += *ptr++; s2 += s1;
+ s1 += *ptr++; s2 += s1;
+ s1 += *ptr++; s2 += s1;
+ s1 += *ptr++; s2 += s1;
+ s1 += *ptr++; s2 += s1;
+ s1 += *ptr++; s2 += s1;
+ s1 += *ptr++; s2 += s1;
+ k -= 8;
+ }
+
+ while (k-- > 0) {
+ s1 += *ptr++; s2 += s1;
+ }
+ s1 = s1 % ADLER32_BASE;
+ s2 = s2 % ADLER32_BASE;
+ }
+ return (s2 << 16) + s1;
+}
+
+int write_magic(struct cudbg_buffer *_out_buff)
+{
+ int rc;
+
+ rc = write_to_buf(_out_buff->data, _out_buff->size, &_out_buff->offset,
+ sixpack_magic, 8);
+
+ return rc;
+}
+
+int write_to_buf(void *out_buf, u32 out_buf_size, u32 *offset, void *in_buf,
+ u32 in_buf_size)
+{
+ int rc = 0;
+
+ if (*offset >= out_buf_size) {
+ rc = CUDBG_STATUS_OUTBUFF_OVERFLOW;
+ goto err;
+ }
+
+ memcpy((char *)out_buf + *offset, in_buf, in_buf_size);
+ *offset = *offset + in_buf_size;
+
+err:
+ return rc;
+}
+
+int read_from_buf(void *in_buf, u32 in_buf_size, u32 *offset, void *out_buf,
+ u32 out_buf_size)
+{
+ if (in_buf_size - *offset < out_buf_size)
+ return 0;
+
+ memcpy((char *)out_buf, (char *)in_buf + *offset, out_buf_size);
+ *offset = *offset + out_buf_size;
+ return out_buf_size;
+}
+
+int write_chunk_header(struct cudbg_buffer *_outbuf, int id, int options,
+ unsigned long size, unsigned long checksum,
+ unsigned long extra)
+{
+ unsigned char buffer[CUDBG_CHUNK_BUF_LEN];
+ int rc = 0;
+
+ buffer[0] = id & 255;
+ buffer[1] = (unsigned char)(id >> 8);
+ buffer[2] = options & 255;
+ buffer[3] = (unsigned char)(options >> 8);
+ buffer[4] = size & 255;
+ buffer[5] = (size >> 8) & 255;
+ buffer[6] = (size >> 16) & 255;
+ buffer[7] = (size >> 24) & 255;
+ buffer[8] = checksum & 255;
+ buffer[9] = (checksum >> 8) & 255;
+ buffer[10] = (checksum >> 16) & 255;
+ buffer[11] = (checksum >> 24) & 255;
+ buffer[12] = extra & 255;
+ buffer[13] = (extra >> 8) & 255;
+ buffer[14] = (extra >> 16) & 255;
+ buffer[15] = (extra >> 24) & 255;
+
+ rc = write_to_buf(_outbuf->data, _outbuf->size, &_outbuf->offset,
+ buffer, 16);
+
+ return rc;
+}
+
+int write_compression_hdr(struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff)
+{
+ struct cudbg_buffer tmp_buffer;
+ unsigned long fsize = pin_buff->size;
+ unsigned char *buffer;
+ unsigned long checksum;
+ int rc;
+ char *shown_name = "abc";
+
+ /* Always release inner scratch buffer, before releasing outer. */
+ rc = get_scratch_buff(pout_buff, 10, &tmp_buffer);
+
+ if (rc)
+ goto err;
+
+ buffer = (unsigned char *)tmp_buffer.data;
+
+ rc = write_magic(pout_buff);
+
+ if (rc)
+ goto err1;
+
+ /* chunk for File Entry */
+ buffer[0] = fsize & 255;
+ buffer[1] = (fsize >> 8) & 255;
+ buffer[2] = (fsize >> 16) & 255;
+ buffer[3] = (fsize >> 24) & 255;
+ buffer[4] = 0;
+ buffer[5] = 0;
+ buffer[6] = 0;
+ buffer[7] = 0;
+ buffer[8] = (strlen(shown_name)+1) & 255;
+ buffer[9] = (unsigned char)((strlen(shown_name)+1) >> 8);
+ checksum = 1L;
+ checksum = update_adler32(checksum, buffer, 10);
+ checksum = update_adler32(checksum, shown_name,
+ (int)strlen(shown_name)+1);
+
+ rc = write_chunk_header(pout_buff, 1, 0,
+ 10+(unsigned long)strlen(shown_name)+1,
+ checksum, 0);
+
+ if (rc)
+ goto err1;
+
+ rc = write_to_buf(pout_buff->data, pout_buff->size,
+ &(pout_buff->offset), buffer, 10);
+
+ if (rc)
+ goto err1;
+
+ rc = write_to_buf(pout_buff->data, pout_buff->size,
+ &(pout_buff->offset), shown_name,
+ (u32)strlen(shown_name)+1);
+
+ if (rc)
+ goto err1;
+
+err1:
+ release_scratch_buff(&tmp_buffer, pout_buff);
+err:
+ return rc;
+}
+
+int compress_buff(struct cudbg_buffer *pin_buff, struct cudbg_buffer *pout_buff)
+{
+ struct cudbg_buffer tmp_buffer;
+ struct cudbg_hdr *cudbg_hdr;
+ unsigned long checksum;
+ unsigned char *result;
+ unsigned int bytes_read;
+ int chunk_size, level = 2, rc = 0;
+ int compress_method = 1;
+
+ bytes_read = pin_buff->size;
+ rc = get_scratch_buff(pout_buff, CUDBG_BLOCK_SIZE, &tmp_buffer);
+
+ if (rc)
+ goto err;
+
+ result = (unsigned char *)tmp_buffer.data;
+
+ if (bytes_read < 32)
+ compress_method = 0;
+
+ cudbg_hdr = (struct cudbg_hdr *) pout_buff->data;
+
+ switch (compress_method) {
+ case 1:
+ chunk_size = fastlz_compress_level(level, pin_buff->data,
+ bytes_read, result);
+
+ checksum = update_adler32(1L, result, chunk_size);
+
+ if ((chunk_size > 62000) && (cudbg_hdr->reserved[7] < (u32)
+ chunk_size)) /* 64512 */
+ cudbg_hdr->reserved[7] = (u32) chunk_size;
+
+ rc = write_chunk_header(pout_buff, 17, 1, chunk_size, checksum,
+ bytes_read);
+
+ if (rc)
+ goto err_put_buff;
+
+ rc = write_to_buf(pout_buff->data, pout_buff->size,
+ &pout_buff->offset, result, chunk_size);
+
+ if (rc)
+ goto err_put_buff;
+
+ break;
+
+ /* uncompressed, also fallback method */
+ case 0:
+ default:
+ checksum = update_adler32(1L, pin_buff->data, bytes_read);
+
+ rc = write_chunk_header(pout_buff, 17, 0, bytes_read, checksum,
+ bytes_read);
+
+ if (rc)
+ goto err_put_buff;
+
+ rc = write_to_buf(pout_buff->data, pout_buff->size,
+ &pout_buff->offset, pin_buff->data,
+ bytes_read);
+ if (rc)
+ goto err_put_buff;
+
+ break;
+ }
+
+err_put_buff:
+ release_scratch_buff(&tmp_buffer, pout_buff);
+err:
+ return rc;
+}
+
+/* return non-zero if magic sequence is detected */
+/* warning: reset the read pointer to the beginning of the file */
+int detect_magic(struct cudbg_buffer *_c_buff)
+{
+ unsigned char buffer[8];
+ size_t bytes_read;
+ int c;
+
+ bytes_read = read_from_buf(_c_buff->data, _c_buff->size,
+ &_c_buff->offset, buffer, 8);
+
+ if (bytes_read < 8)
+ return 0;
+
+ for (c = 0; c < 8; c++)
+ if (buffer[c] != sixpack_magic[c])
+ return 0;
+
+ return -1;
+}
+
+static inline unsigned long readU16(const unsigned char *ptr)
+{
+ return ptr[0]+(ptr[1]<<8);
+}
+
+static inline unsigned long readU32(const unsigned char *ptr)
+{
+ return ptr[0]+(ptr[1]<<8)+(ptr[2]<<16)+(ptr[3]<<24);
+}
+
+int read_chunk_header(struct cudbg_buffer *pc_buff, int *pid, int *poptions,
+ unsigned long *psize, unsigned long *pchecksum,
+ unsigned long *pextra)
+{
+ unsigned char buffer[CUDBG_CHUNK_BUF_LEN];
+ int byte_r = read_from_buf(pc_buff->data, pc_buff->size,
+ &pc_buff->offset, buffer, 16);
+ if (byte_r == 0)
+ return 0;
+
+ *pid = readU16(buffer) & 0xffff;
+ *poptions = readU16(buffer+2) & 0xffff;
+ *psize = readU32(buffer+4) & 0xffffffff;
+ *pchecksum = readU32(buffer+8) & 0xffffffff;
+ *pextra = readU32(buffer+12) & 0xffffffff;
+ return 0;
+}
+
+int validate_buffer(struct cudbg_buffer *compressed_buffer)
+{
+ if (!detect_magic(compressed_buffer))
+ return CUDBG_STATUS_INVALID_BUFF;
+
+ return 0;
+}
+
+int decompress_buffer(struct cudbg_buffer *pc_buff,
+ struct cudbg_buffer *pd_buff)
+{
+ struct cudbg_buffer tmp_compressed_buffer;
+ struct cudbg_buffer tmp_decompressed_buffer;
+ unsigned char *compressed_buffer;
+ unsigned char *decompressed_buffer;
+ unsigned char buffer[CUDBG_MIN_COMPR_LEN];
+ unsigned long chunk_size;
+ unsigned long chunk_checksum;
+ unsigned long chunk_extra;
+ unsigned long checksum;
+ unsigned long total_extracted = 0;
+ unsigned long r;
+ unsigned long remaining;
+ unsigned long bytes_read;
+ u32 decompressed_size = 0;
+ int chunk_id, chunk_options, rc;
+
+ if (pd_buff->size < 2 * CUDBG_BLOCK_SIZE)
+ return CUDBG_STATUS_SMALL_BUFF;
+
+ rc = get_scratch_buff(pd_buff, CUDBG_BLOCK_SIZE,
+ &tmp_compressed_buffer);
+
+ if (rc)
+ goto err_cbuff;
+
+ rc = get_scratch_buff(pd_buff, CUDBG_BLOCK_SIZE,
+ &tmp_decompressed_buffer);
+ if (rc)
+ goto err_dcbuff;
+
+ compressed_buffer = (unsigned char *)tmp_compressed_buffer.data;
+ decompressed_buffer = (unsigned char *)tmp_decompressed_buffer.data;
+
+ /* main loop */
+
+ for (;;) {
+ if (pc_buff->offset > pc_buff->size)
+ break;
+
+ rc = read_chunk_header(pc_buff, &chunk_id, &chunk_options,
+ &chunk_size, &chunk_checksum,
+ &chunk_extra);
+ if (rc != 0)
+ break;
+
+ /* skip 8+16 */
+ if ((chunk_id == 1) && (chunk_size > 10) &&
+ (chunk_size < CUDBG_BLOCK_SIZE)) {
+
+ bytes_read = read_from_buf(pc_buff->data, pc_buff->size,
+ &pc_buff->offset, buffer,
+ chunk_size);
+
+ if (bytes_read == 0)
+ return 0;
+
+ checksum = update_adler32(1L, buffer, chunk_size);
+ if (checksum != chunk_checksum)
+ return CUDBG_STATUS_CHKSUM_MISSMATCH;
+
+ decompressed_size = (u32)readU32(buffer);
+
+ if (pd_buff->size < decompressed_size) {
+
+ pd_buff->size = 2 * CUDBG_BLOCK_SIZE +
+ decompressed_size;
+ pc_buff->offset -= chunk_size + 16;
+ return CUDBG_STATUS_SMALL_BUFF;
+ }
+ total_extracted = 0;
+
+ }
+
+ if (chunk_size > CUDBG_BLOCK_SIZE) {
+ /* Release old allocated memory */
+ release_scratch_buff(&tmp_decompressed_buffer, pd_buff);
+ release_scratch_buff(&tmp_compressed_buffer, pd_buff);
+
+ /* allocate new memory with chunk_size size */
+ rc = get_scratch_buff(pd_buff, chunk_size,
+ &tmp_compressed_buffer);
+ if (rc)
+ goto err_cbuff;
+
+ rc = get_scratch_buff(pd_buff, chunk_size,
+ &tmp_decompressed_buffer);
+ if (rc)
+ goto err_dcbuff;
+
+ compressed_buffer = (unsigned char *)tmp_compressed_buffer.data;
+ decompressed_buffer = (unsigned char *)tmp_decompressed_buffer.data;
+ }
+
+ if ((chunk_id == 17) && decompressed_size) {
+ /* uncompressed */
+ switch (chunk_options) {
+ /* stored, simply copy to output */
+ case 0:
+ total_extracted += chunk_size;
+ remaining = chunk_size;
+ checksum = 1L;
+ for (;;) {
+ /* Write a funtion for this */
+ r = (CUDBG_BLOCK_SIZE < remaining) ?
+ CUDBG_BLOCK_SIZE : remaining;
+ bytes_read =
+ read_from_buf(pc_buff->data,
+ pc_buff->size,
+ &pc_buff->offset, buffer,
+ r);
+
+ if (bytes_read == 0)
+ return 0;
+
+ write_to_buf(pd_buff->data,
+ pd_buff->size,
+ &pd_buff->offset, buffer,
+ bytes_read);
+ checksum = update_adler32(checksum,
+ buffer,
+ bytes_read);
+ remaining -= bytes_read;
+
+ /* verify everything is written
+ * correctly */
+ if (checksum != chunk_checksum)
+ return
+ CUDBG_STATUS_CHKSUM_MISSMATCH;
+ }
+
+ break;
+
+ /* compressed using FastLZ */
+ case 1:
+ bytes_read = read_from_buf(pc_buff->data,
+ pc_buff->size,
+ &pc_buff->offset,
+ compressed_buffer,
+ chunk_size);
+
+ if (bytes_read == 0)
+ return 0;
+
+ checksum = update_adler32(1L, compressed_buffer,
+ chunk_size);
+ total_extracted += chunk_extra;
+
+ /* verify that the chunk data is correct */
+ if (checksum != chunk_checksum) {
+ return CUDBG_STATUS_CHKSUM_MISSMATCH;
+ } else {
+ /* decompress and verify */
+ remaining =
+ fastlz_decompress(compressed_buffer,
+ chunk_size,
+ decompressed_buffer,
+ chunk_extra);
+
+ if (remaining != chunk_extra) {
+ rc =
+ CUDBG_STATUS_DECOMPRESS_FAIL;
+ goto err;
+ } else {
+ write_to_buf(pd_buff->data,
+ pd_buff->size,
+ &pd_buff->offset,
+ decompressed_buffer,
+ chunk_extra);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ }
+
+err:
+ release_scratch_buff(&tmp_decompressed_buffer, pd_buff);
+err_dcbuff:
+ release_scratch_buff(&tmp_compressed_buffer, pd_buff);
+
+err_cbuff:
+ return rc;
+}
+