aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/cxgbe/common
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/cxgbe/common')
-rw-r--r--sys/dev/cxgbe/common/common.h3
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c1592
-rw-r--r--sys/dev/cxgbe/common/t4_msg.h566
-rw-r--r--sys/dev/cxgbe/common/t4_regs.h100
4 files changed, 1856 insertions, 405 deletions
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index 6b36832a7464..2033967ffb94 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -684,9 +684,10 @@ u32 t4_hw_pci_read_cfg4(adapter_t *adapter, int reg);
struct fw_filter_wr;
+void t4_intr_clear(struct adapter *adapter);
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
-bool t4_slow_intr_handler(struct adapter *adapter, bool verbose);
+bool t4_slow_intr_handler(struct adapter *adapter, int flags);
int t4_hash_mac_addr(const u8 *addr);
int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index eb7ea9acc108..65292486cbc8 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -84,6 +84,41 @@ static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
delay, NULL);
}
+ /**
+ * t7_wait_sram_done - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @result_reg: register that holds the result value
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the result register at completion time
+ *
+ * Waits until a specific bit in @reg is cleared, checking up to
+ * @attempts times.Once the bit is cleared, reads from @result_reg
+ * and stores the value in @valp if it is not NULL. Returns 0 if the
+ * operation completes successfully and -EAGAIN if it times out.
+ */
+static int t7_wait_sram_done(struct adapter *adap, int reg, int result_reg,
+ int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t4_read_reg(adap, reg);
+
+ /* Check if SramStart (bit 19) is cleared */
+ if (!(val & (1 << 19))) {
+ if (valp)
+ *valp = t4_read_reg(adap, result_reg);
+ return 0;
+ }
+
+ if (--attempts == 0)
+ return -EAGAIN;
+
+ if (delay)
+ udelay(delay);
+ }
+}
+
/**
* t4_set_reg_field - set a register field to a value
* @adapter: the adapter to program
@@ -535,11 +570,11 @@ static int t4_edc_err_read(struct adapter *adap, int idx)
edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
CH_WARN(adap,
- "edc%d err addr 0x%x: 0x%x.\n",
+ " edc%d err addr 0x%x: 0x%x.\n",
idx, edc_ecc_err_addr_reg,
t4_read_reg(adap, edc_ecc_err_addr_reg));
CH_WARN(adap,
- "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+ " bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
edc_bist_status_rdata_reg,
(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
@@ -578,14 +613,15 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
- mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
- idx);
- mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
- idx);
+ mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, idx);
+ mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, idx);
+ } else {
+ /* Need to figure out split mode and the rest. */
+ return (-ENOTSUP);
}
if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
@@ -636,21 +672,13 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
idx);
} else {
-/*
- * These macro are missing in t4_regs.h file.
- * Added temporarily for testing.
- */
-#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
-#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
- edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
- edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
- edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
- edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
+ edc_bist_cmd_reg = EDC_T5_REG(A_EDC_H_BIST_CMD, idx);
+ edc_bist_cmd_addr_reg = EDC_T5_REG(A_EDC_H_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len_reg = EDC_T5_REG(A_EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern = EDC_T5_REG(A_EDC_H_BIST_DATA_PATTERN,
idx);
- edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
+ edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA,
idx);
-#undef EDC_REG_T5
-#undef EDC_STRIDE_T5
}
if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
@@ -2662,10 +2690,9 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x173c, 0x1760,
0x1800, 0x18fc,
0x3000, 0x3044,
- 0x3060, 0x3064,
0x30a4, 0x30b0,
0x30b8, 0x30d8,
- 0x30e0, 0x30fc,
+ 0x30e0, 0x30e8,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35e0, 0x35ec,
@@ -2680,7 +2707,7 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x480c, 0x4814,
0x4890, 0x489c,
0x48a4, 0x48ac,
- 0x48b8, 0x48c4,
+ 0x48b8, 0x48bc,
0x4900, 0x4924,
0x4ffc, 0x4ffc,
0x5500, 0x5624,
@@ -2698,8 +2725,10 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x5a60, 0x5a6c,
0x5a80, 0x5a8c,
0x5a94, 0x5a9c,
- 0x5b94, 0x5bfc,
- 0x5c10, 0x5e48,
+ 0x5b94, 0x5bec,
+ 0x5bf8, 0x5bfc,
+ 0x5c10, 0x5c40,
+ 0x5c4c, 0x5e48,
0x5e50, 0x5e94,
0x5ea0, 0x5eb0,
0x5ec0, 0x5ec0,
@@ -2708,7 +2737,8 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x5ef0, 0x5ef0,
0x5f00, 0x5f04,
0x5f0c, 0x5f10,
- 0x5f20, 0x5f88,
+ 0x5f20, 0x5f78,
+ 0x5f84, 0x5f88,
0x5f90, 0x5fd8,
0x6000, 0x6020,
0x6028, 0x6030,
@@ -3084,7 +3114,7 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x38140, 0x38140,
0x38150, 0x38154,
0x38160, 0x381c4,
- 0x381f0, 0x38204,
+ 0x381d0, 0x38204,
0x3820c, 0x38214,
0x3821c, 0x3822c,
0x38244, 0x38244,
@@ -3156,6 +3186,10 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x3a000, 0x3a004,
0x3a050, 0x3a084,
0x3a090, 0x3a09c,
+ 0x3a93c, 0x3a93c,
+ 0x3b93c, 0x3b93c,
+ 0x3c93c, 0x3c93c,
+ 0x3d93c, 0x3d93c,
0x3e000, 0x3e020,
0x3e03c, 0x3e05c,
0x3e100, 0x3e120,
@@ -4743,10 +4777,9 @@ struct intr_details {
struct intr_action {
u32 mask;
int arg;
- bool (*action)(struct adapter *, int, bool);
+ bool (*action)(struct adapter *, int, int);
};
-#define NONFATAL_IF_DISABLED 1
struct intr_info {
const char *name; /* name of the INT_CAUSE register */
int cause_reg; /* INT_CAUSE register */
@@ -4769,73 +4802,78 @@ intr_alert_char(u32 cause, u32 enable, u32 fatal)
}
static void
-t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause)
+show_intr_info(struct adapter *sc, const struct intr_info *ii, uint32_t cause,
+ uint32_t ucause, uint32_t enabled, uint32_t fatal, int flags)
{
- u32 enable, fatal, leftover;
+ uint32_t leftover, msgbits;
const struct intr_details *details;
char alert;
+ const bool verbose = flags & IHF_VERBOSE;
- enable = t4_read_reg(adap, ii->enable_reg);
- if (ii->flags & NONFATAL_IF_DISABLED)
- fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg);
- else
- fatal = ii->fatal;
- alert = intr_alert_char(cause, enable, fatal);
- CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n",
- alert, ii->name, ii->cause_reg, cause, enable, fatal);
+ if (verbose || ucause != 0 || flags & IHF_RUN_ALL_ACTIONS) {
+ alert = intr_alert_char(cause, enabled, fatal);
+ CH_ALERT(sc, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n", alert,
+ ii->name, ii->cause_reg, cause, enabled, fatal);
+ }
- leftover = cause;
+ leftover = verbose ? cause : ucause;
for (details = ii->details; details && details->mask != 0; details++) {
- u32 msgbits = details->mask & cause;
+ msgbits = details->mask & leftover;
if (msgbits == 0)
continue;
- alert = intr_alert_char(msgbits, enable, ii->fatal);
- CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits,
- details->msg);
+ alert = intr_alert_char(msgbits, enabled, fatal);
+ CH_ALERT(sc, " %c [0x%08x] %s\n", alert, msgbits, details->msg);
leftover &= ~msgbits;
}
- if (leftover != 0 && leftover != cause)
- CH_ALERT(adap, " ? [0x%08x]\n", leftover);
+ if (leftover != 0 && leftover != (verbose ? cause : ucause))
+ CH_ALERT(sc, " ? [0x%08x]\n", leftover);
}
/*
* Returns true for fatal error.
*/
static bool
-t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
- u32 additional_cause, bool verbose)
+t4_handle_intr(struct adapter *sc, const struct intr_info *ii, uint32_t acause,
+ int flags)
{
- u32 cause, fatal;
+ uint32_t cause, ucause, enabled, fatal;
bool rc;
const struct intr_action *action;
- /*
- * Read and display cause. Note that the top level PL_INT_CAUSE is a
- * bit special and we need to completely ignore the bits that are not in
- * PL_INT_ENABLE.
- */
- cause = t4_read_reg(adap, ii->cause_reg);
- if (ii->cause_reg == A_PL_INT_CAUSE)
- cause &= t4_read_reg(adap, ii->enable_reg);
- if (verbose || cause != 0)
- t4_show_intr_info(adap, ii, cause);
- fatal = cause & ii->fatal;
- if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED)
- fatal &= t4_read_reg(adap, ii->enable_reg);
- cause |= additional_cause;
- if (cause == 0)
- return (false);
+ cause = t4_read_reg(sc, ii->cause_reg);
+ enabled = t4_read_reg(sc, ii->enable_reg);
+ flags |= ii->flags;
+ fatal = ii->fatal & cause;
+ if (flags & IHF_FATAL_IFF_ENABLED)
+ fatal &= enabled;
+ ucause = cause;
+ if (flags & IHF_IGNORE_IF_DISABLED)
+ ucause &= enabled;
+ if (!(flags & IHF_NO_SHOW))
+ show_intr_info(sc, ii, cause, ucause, enabled, fatal, flags);
rc = fatal != 0;
for (action = ii->actions; action && action->mask != 0; action++) {
- if (!(action->mask & cause))
+ if (action->action == NULL)
continue;
- rc |= (action->action)(adap, action->arg, verbose);
+ if (action->mask & (ucause | acause) ||
+ flags & IHF_RUN_ALL_ACTIONS) {
+ bool rc1 = (action->action)(sc, action->arg, flags);
+ if (action->mask & ucause)
+ rc |= rc1;
+ }
}
/* clear */
- t4_write_reg(adap, ii->cause_reg, cause);
- (void)t4_read_reg(adap, ii->cause_reg);
+ if (cause != 0) {
+ if (flags & IHF_CLR_ALL_SET) {
+ t4_write_reg(sc, ii->cause_reg, cause);
+ (void)t4_read_reg(sc, ii->cause_reg);
+ } else if (ucause != 0 && flags & IHF_CLR_ALL_UNIGNORED) {
+ t4_write_reg(sc, ii->cause_reg, ucause);
+ (void)t4_read_reg(sc, ii->cause_reg);
+ }
+ }
return (rc);
}
@@ -4843,7 +4881,7 @@ t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
/*
* Interrupt handler for the PCIE module.
*/
-static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool pcie_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details sysbus_intr_details[] = {
{ F_RNPP, "RXNP array parity error" },
@@ -4956,21 +4994,43 @@ static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_PCIE_INT_CAUSE,
.enable_reg = A_PCIE_INT_ENABLE,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ struct intr_info pcie_int_cause_ext = {
+ .name = "PCIE_INT_CAUSE_EXT",
+ .cause_reg = A_PCIE_INT_CAUSE_EXT,
+ .enable_reg = A_PCIE_INT_ENABLE_EXT,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ struct intr_info pcie_int_cause_x8 = {
+ .name = "PCIE_INT_CAUSE_X8",
+ .cause_reg = A_PCIE_INT_CAUSE_X8,
+ .enable_reg = A_PCIE_INT_ENABLE_X8,
+ .fatal = 0,
+ .flags = 0,
.details = NULL,
.actions = NULL,
};
bool fatal = false;
if (is_t4(adap)) {
- fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, flags);
pcie_intr_info.details = pcie_intr_details;
} else {
pcie_intr_info.details = t5_pcie_intr_details;
}
- fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &pcie_int_cause_ext, 0, flags);
+ fatal |= t4_handle_intr(adap, &pcie_int_cause_x8, 0, flags);
+ }
return (fatal);
}
@@ -4978,7 +5038,7 @@ static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* TP interrupt handler.
*/
-static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool tp_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details tp_intr_details[] = {
{ 0x3fffffff, "TP parity error" },
@@ -4990,25 +5050,90 @@ static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_TP_INT_CAUSE,
.enable_reg = A_TP_INT_ENABLE,
.fatal = 0x7fffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = tp_intr_details,
.actions = NULL,
};
+ static const struct intr_info tp_inic_perr_cause = {
+ .name = "TP_INIC_PERR_CAUSE",
+ .cause_reg = A_TP_INIC_PERR_CAUSE,
+ .enable_reg = A_TP_INIC_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_c_perr_cause = {
+ .name = "TP_C_PERR_CAUSE",
+ .cause_reg = A_TP_C_PERR_CAUSE,
+ .enable_reg = A_TP_C_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_e_eg_perr_cause = {
+ .name = "TP_E_EG_PERR_CAUSE",
+ .cause_reg = A_TP_E_EG_PERR_CAUSE,
+ .enable_reg = A_TP_E_EG_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_e_in0_perr_cause = {
+ .name = "TP_E_IN0_PERR_CAUSE",
+ .cause_reg = A_TP_E_IN0_PERR_CAUSE,
+ .enable_reg = A_TP_E_IN0_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_e_in1_perr_cause = {
+ .name = "TP_E_IN1_PERR_CAUSE",
+ .cause_reg = A_TP_E_IN1_PERR_CAUSE,
+ .enable_reg = A_TP_E_IN1_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info tp_o_perr_cause = {
+ .name = "TP_O_PERR_CAUSE",
+ .cause_reg = A_TP_O_PERR_CAUSE,
+ .enable_reg = A_TP_O_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ bool fatal;
+
+ fatal = t4_handle_intr(adap, &tp_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &tp_inic_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_c_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_e_eg_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_e_in0_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_e_in1_perr_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &tp_o_perr_cause, 0, flags);
+ }
- return (t4_handle_intr(adap, &tp_intr_info, 0, verbose));
+ return (fatal);
}
/*
* SGE interrupt handler.
*/
-static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool sge_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_info sge_int1_info = {
.name = "SGE_INT_CAUSE1",
.cause_reg = A_SGE_INT_CAUSE1,
.enable_reg = A_SGE_INT_ENABLE1,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = NULL,
};
@@ -5017,7 +5142,7 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_SGE_INT_CAUSE2,
.enable_reg = A_SGE_INT_ENABLE2,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = NULL,
};
@@ -5115,7 +5240,7 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_SGE_INT_CAUSE5,
.enable_reg = A_SGE_INT_ENABLE5,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = NULL,
};
@@ -5128,7 +5253,24 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = NULL,
.actions = NULL,
};
-
+ static const struct intr_info sge_int7_info = {
+ .name = "SGE_INT_CAUSE7",
+ .cause_reg = A_SGE_INT_CAUSE7,
+ .enable_reg = A_SGE_INT_ENABLE7,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info sge_int8_info = {
+ .name = "SGE_INT_CAUSE8",
+ .cause_reg = A_SGE_INT_CAUSE8,
+ .enable_reg = A_SGE_INT_ENABLE8,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
bool fatal;
u32 v;
@@ -5139,14 +5281,18 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
}
fatal = false;
- fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &sge_int1_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &sge_int2_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &sge_int3_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &sge_int4_info, 0, flags);
if (chip_id(adap) >= CHELSIO_T5)
- fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &sge_int5_info, 0, flags);
if (chip_id(adap) >= CHELSIO_T6)
- fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &sge_int6_info, 0, flags);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ fatal |= t4_handle_intr(adap, &sge_int7_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &sge_int8_info, 0, flags);
+ }
v = t4_read_reg(adap, A_SGE_ERROR_STATS);
if (v & F_ERROR_QID_VALID) {
@@ -5163,7 +5309,7 @@ static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* CIM interrupt handler.
*/
-static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool cim_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details cim_host_intr_details[] = {
/* T6+ */
@@ -5208,7 +5354,7 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_CIM_HOST_INT_CAUSE,
.enable_reg = A_CIM_HOST_INT_ENABLE,
.fatal = 0x007fffe6,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = cim_host_intr_details,
.actions = NULL,
};
@@ -5259,7 +5405,7 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_CIM_HOST_UPACC_INT_CAUSE,
.enable_reg = A_CIM_HOST_UPACC_INT_ENABLE,
.fatal = 0x3fffeeff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = cim_host_upacc_intr_details,
.actions = NULL,
};
@@ -5272,6 +5418,15 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = NULL,
.actions = NULL,
};
+ static const struct intr_info cim_perr_cause = {
+ .name = "CIM_PERR_CAUSE",
+ .cause_reg = A_CIM_PERR_CAUSE,
+ .enable_reg = A_CIM_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
u32 val, fw_err;
bool fatal;
@@ -5290,9 +5445,11 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
}
fatal = (fw_err & F_PCIE_FW_ERR) != 0;
- fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6)
+ fatal |= t4_handle_intr(adap, &cim_perr_cause, 0, flags);
if (fatal)
t4_os_cim_err(adap);
@@ -5302,7 +5459,7 @@ static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* ULP RX interrupt handler.
*/
-static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool ulprx_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details ulprx_intr_details[] = {
/* T5+ */
@@ -5320,7 +5477,7 @@ static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_ULP_RX_INT_CAUSE,
.enable_reg = A_ULP_RX_INT_ENABLE,
.fatal = 0x07ffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = ulprx_intr_details,
.actions = NULL,
};
@@ -5333,10 +5490,53 @@ static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = NULL,
.actions = NULL,
};
+ static const struct intr_info ulprx_int_cause_pcmd = {
+ .name = "ULP_RX_INT_CAUSE_PCMD",
+ .cause_reg = A_ULP_RX_INT_CAUSE_PCMD,
+ .enable_reg = A_ULP_RX_INT_ENABLE_PCMD,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulprx_int_cause_data = {
+ .name = "ULP_RX_INT_CAUSE_DATA",
+ .cause_reg = A_ULP_RX_INT_CAUSE_DATA,
+ .enable_reg = A_ULP_RX_INT_ENABLE_DATA,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulprx_int_cause_arb = {
+ .name = "ULP_RX_INT_CAUSE_ARB",
+ .cause_reg = A_ULP_RX_INT_CAUSE_ARB,
+ .enable_reg = A_ULP_RX_INT_ENABLE_ARB,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulprx_int_cause_intf = {
+ .name = "ULP_RX_INT_CAUSE_INTERFACE",
+ .cause_reg = A_ULP_RX_INT_CAUSE_INTERFACE,
+ .enable_reg = A_ULP_RX_INT_ENABLE_INTERFACE,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
bool fatal = false;
- fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, flags);
+ if (chip_id(adap) < CHELSIO_T7)
+ fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, flags);
+ else {
+ fatal |= t4_handle_intr(adap, &ulprx_int_cause_pcmd, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulprx_int_cause_data, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulprx_int_cause_arb, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulprx_int_cause_intf, 0, flags);
+ }
return (fatal);
}
@@ -5344,7 +5544,7 @@ static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* ULP TX interrupt handler.
*/
-static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool ulptx_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details ulptx_intr_details[] = {
{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" },
@@ -5359,32 +5559,98 @@ static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_ULP_TX_INT_CAUSE,
.enable_reg = A_ULP_TX_INT_ENABLE,
.fatal = 0x0fffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = ulptx_intr_details,
.actions = NULL,
};
- static const struct intr_info ulptx_intr2_info = {
+ static const struct intr_info ulptx_intr_info2 = {
.name = "ULP_TX_INT_CAUSE_2",
.cause_reg = A_ULP_TX_INT_CAUSE_2,
.enable_reg = A_ULP_TX_INT_ENABLE_2,
- .fatal = 0xf0,
- .flags = NONFATAL_IF_DISABLED,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info3 = {
+ .name = "ULP_TX_INT_CAUSE_3",
+ .cause_reg = A_ULP_TX_INT_CAUSE_3,
+ .enable_reg = A_ULP_TX_INT_ENABLE_3,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info4 = {
+ .name = "ULP_TX_INT_CAUSE_4",
+ .cause_reg = A_ULP_TX_INT_CAUSE_4,
+ .enable_reg = A_ULP_TX_INT_ENABLE_4,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info5 = {
+ .name = "ULP_TX_INT_CAUSE_5",
+ .cause_reg = A_ULP_TX_INT_CAUSE_5,
+ .enable_reg = A_ULP_TX_INT_ENABLE_5,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info6 = {
+ .name = "ULP_TX_INT_CAUSE_6",
+ .cause_reg = A_ULP_TX_INT_CAUSE_6,
+ .enable_reg = A_ULP_TX_INT_ENABLE_6,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info7 = {
+ .name = "ULP_TX_INT_CAUSE_7",
+ .cause_reg = A_ULP_TX_INT_CAUSE_7,
+ .enable_reg = A_ULP_TX_INT_ENABLE_7,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info ulptx_intr_info8 = {
+ .name = "ULP_TX_INT_CAUSE_8",
+ .cause_reg = A_ULP_TX_INT_CAUSE_8,
+ .enable_reg = A_ULP_TX_INT_ENABLE_8,
+ .fatal = 0,
+ .flags = 0,
.details = NULL,
.actions = NULL,
};
bool fatal = false;
- fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T4)
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info2, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info3, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info4, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info5, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info6, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info7, 0, flags);
+ fatal |= t4_handle_intr(adap, &ulptx_intr_info8, 0, flags);
+ }
return (fatal);
}
-static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
+static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, int flags)
{
int i;
u32 data[17];
+ if (flags & IHF_NO_SHOW)
+ return (false);
+
t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0],
ARRAY_SIZE(data), A_PM_TX_DBG_STAT0);
for (i = 0; i < ARRAY_SIZE(data); i++) {
@@ -5398,13 +5664,9 @@ static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
/*
* PM TX interrupt handler.
*/
-static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool pmtx_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_action pmtx_intr_actions[] = {
- { 0xffffffff, 0, pmtx_dump_dbg_stats },
- { 0 },
- };
- static const struct intr_details pmtx_intr_details[] = {
+ static const struct intr_details pmtx_int_cause_fields[] = {
{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" },
{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" },
{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" },
@@ -5421,25 +5683,29 @@ static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" },
{ 0 }
};
- static const struct intr_info pmtx_intr_info = {
+ static const struct intr_action pmtx_int_cause_actions[] = {
+ { 0xffffffff, -1, pmtx_dump_dbg_stats },
+ { 0 },
+ };
+ static const struct intr_info pmtx_int_cause = {
.name = "PM_TX_INT_CAUSE",
.cause_reg = A_PM_TX_INT_CAUSE,
.enable_reg = A_PM_TX_INT_ENABLE,
.fatal = 0xffffffff,
.flags = 0,
- .details = pmtx_intr_details,
- .actions = pmtx_intr_actions,
+ .details = pmtx_int_cause_fields,
+ .actions = pmtx_int_cause_actions,
};
- return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &pmtx_int_cause, 0, flags));
}
/*
* PM RX interrupt handler.
*/
-static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool pmrx_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_details pmrx_intr_details[] = {
+ static const struct intr_details pmrx_int_cause_fields[] = {
/* T6+ */
{ 0x18000000, "PMRX ospi overflow" },
{ F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" },
@@ -5461,25 +5727,25 @@ static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"},
{ 0 }
};
- static const struct intr_info pmrx_intr_info = {
+ static const struct intr_info pmrx_int_cause = {
.name = "PM_RX_INT_CAUSE",
.cause_reg = A_PM_RX_INT_CAUSE,
.enable_reg = A_PM_RX_INT_ENABLE,
.fatal = 0x1fffffff,
- .flags = NONFATAL_IF_DISABLED,
- .details = pmrx_intr_details,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = pmrx_int_cause_fields,
.actions = NULL,
};
- return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &pmrx_int_cause, 0, flags));
}
/*
* CPL switch interrupt handler.
*/
-static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool cplsw_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_details cplsw_intr_details[] = {
+ static const struct intr_details cplsw_int_cause_fields[] = {
/* T5+ */
{ F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" },
{ F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" },
@@ -5493,17 +5759,17 @@ static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" },
{ 0 }
};
- static const struct intr_info cplsw_intr_info = {
+ static const struct intr_info cplsw_int_cause = {
.name = "CPL_INTR_CAUSE",
.cause_reg = A_CPL_INTR_CAUSE,
.enable_reg = A_CPL_INTR_ENABLE,
- .fatal = 0xff,
- .flags = NONFATAL_IF_DISABLED,
- .details = cplsw_intr_details,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = cplsw_int_cause_fields,
.actions = NULL,
};
- return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &cplsw_int_cause, 0, flags));
}
#define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR)
@@ -5515,11 +5781,12 @@ static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
#define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \
F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \
F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR)
+#define T7_LE_FATAL_MASK (T6_LE_FATAL_MASK | F_CACHESRAMPERR | F_CACHEINTPERR)
/*
* LE interrupt handler.
*/
-static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool le_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details le_intr_details[] = {
{ F_REQQPARERR, "LE request queue parity error" },
@@ -5556,7 +5823,7 @@ static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_LE_DB_INT_CAUSE,
.enable_reg = A_LE_DB_INT_ENABLE,
.fatal = 0,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = NULL,
};
@@ -5566,16 +5833,19 @@ static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
le_intr_info.fatal = T5_LE_FATAL_MASK;
} else {
le_intr_info.details = t6_le_intr_details;
- le_intr_info.fatal = T6_LE_FATAL_MASK;
+ if (chip_id(adap) < CHELSIO_T7)
+ le_intr_info.fatal = T6_LE_FATAL_MASK;
+ else
+ le_intr_info.fatal = T7_LE_FATAL_MASK;
}
- return (t4_handle_intr(adap, &le_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &le_intr_info, 0, flags));
}
/*
* MPS interrupt handler.
*/
-static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool mps_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_details mps_rx_perr_intr_details[] = {
{ 0xffffffff, "MPS Rx parity error" },
@@ -5586,10 +5856,55 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MPS_RX_PERR_INT_CAUSE,
.enable_reg = A_MPS_RX_PERR_INT_ENABLE,
.fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_rx_perr_intr_details,
.actions = NULL,
};
+ static const struct intr_info mps_rx_perr_intr_info2 = {
+ .name = "MPS_RX_PERR_INT_CAUSE2",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE2,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE2,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_rx_perr_intr_info3 = {
+ .name = "MPS_RX_PERR_INT_CAUSE3",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE3,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE3,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_rx_perr_intr_info4 = {
+ .name = "MPS_RX_PERR_INT_CAUSE4",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE4,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE4,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_rx_perr_intr_info5 = {
+ .name = "MPS_RX_PERR_INT_CAUSE5",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE5,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE5,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_rx_perr_intr_info6 = {
+ .name = "MPS_RX_PERR_INT_CAUSE6",
+ .cause_reg = A_MPS_RX_PERR_INT_CAUSE6,
+ .enable_reg = A_MPS_RX_PERR_INT_ENABLE6,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
static const struct intr_details mps_tx_intr_details[] = {
{ F_PORTERR, "MPS Tx destination port is disabled" },
{ F_FRMERR, "MPS Tx framing error" },
@@ -5606,10 +5921,37 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MPS_TX_INT_CAUSE,
.enable_reg = A_MPS_TX_INT_ENABLE,
.fatal = 0x1ffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_tx_intr_details,
.actions = NULL,
};
+ static const struct intr_info mps_tx_intr_info2 = {
+ .name = "MPS_TX_INT2_CAUSE",
+ .cause_reg = A_MPS_TX_INT2_CAUSE,
+ .enable_reg = A_MPS_TX_INT2_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_tx_intr_info3 = {
+ .name = "MPS_TX_INT3_CAUSE",
+ .cause_reg = A_MPS_TX_INT3_CAUSE,
+ .enable_reg = A_MPS_TX_INT3_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mps_tx_intr_info4 = {
+ .name = "MPS_TX_INT4_CAUSE",
+ .cause_reg = A_MPS_TX_INT4_CAUSE,
+ .enable_reg = A_MPS_TX_INT4_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
static const struct intr_details mps_trc_intr_details[] = {
{ F_MISCPERR, "MPS TRC misc parity error" },
{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" },
@@ -5626,14 +5968,23 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.actions = NULL,
};
static const struct intr_info t7_mps_trc_intr_info = {
- .name = "T7_MPS_TRC_INT_CAUSE",
+ .name = "MPS_TRC_INT_CAUSE",
.cause_reg = A_T7_MPS_TRC_INT_CAUSE,
.enable_reg = A_T7_MPS_TRC_INT_ENABLE,
- .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
- .flags = 0,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_trc_intr_details,
.actions = NULL,
};
+ static const struct intr_info t7_mps_trc_intr_info2 = {
+ .name = "MPS_TRC_INT_CAUSE2",
+ .cause_reg = A_MPS_TRC_INT_CAUSE2,
+ .enable_reg = A_MPS_TRC_INT_ENABLE2,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
static const struct intr_details mps_stat_sram_intr_details[] = {
{ 0xffffffff, "MPS statistics SRAM parity error" },
{ 0 }
@@ -5643,7 +5994,7 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM,
.enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM,
.fatal = 0x1fffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_stat_sram_intr_details,
.actions = NULL,
};
@@ -5656,7 +6007,7 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
.enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO,
.fatal = 0xffffff,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = mps_stat_tx_intr_details,
.actions = NULL,
};
@@ -5701,24 +6052,31 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = mps_stat_sram1_intr_details,
.actions = NULL,
};
+ bool fatal = false;
- bool fatal;
-
- fatal = false;
- fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
- if (chip_id(adap) > CHELSIO_T6)
- fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, verbose);
- else
- fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose);
- if (chip_id(adap) > CHELSIO_T4) {
- fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0,
- verbose);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info2, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info3, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info4, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info5, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info6, 0, flags);
}
+ fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ fatal |= t4_handle_intr(adap, &mps_tx_intr_info2, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_tx_intr_info3, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_tx_intr_info4, 0, flags);
+ fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info2, 0, flags);
+ } else
+ fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, flags);
+ if (chip_id(adap) > CHELSIO_T4)
+ fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0, flags);
t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */
@@ -5730,7 +6088,7 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* EDC/MC interrupt handler.
*/
-static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
+static bool mem_intr_handler(struct adapter *adap, int idx, int flags)
{
static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" };
unsigned int count_reg, v;
@@ -5740,61 +6098,106 @@ static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
{ F_PERR_INT_CAUSE, "FIFO parity error" },
{ 0 }
};
+ char rname[32];
struct intr_info ii = {
+ .name = &rname[0],
.fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE,
.details = mem_intr_details,
.flags = 0,
.actions = NULL,
};
- bool fatal;
+ bool fatal = false;
+ int i = 0;
switch (idx) {
+ case MEM_EDC1: i = 1;
+ /* fall through */
case MEM_EDC0:
- ii.name = "EDC0_INT_CAUSE";
- ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0);
- ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0);
- count_reg = EDC_REG(A_EDC_ECC_STATUS, 0);
- break;
- case MEM_EDC1:
- ii.name = "EDC1_INT_CAUSE";
- ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1);
- ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1);
- count_reg = EDC_REG(A_EDC_ECC_STATUS, 1);
+ snprintf(rname, sizeof(rname), "EDC%u_INT_CAUSE", i);
+ if (is_t4(adap)) {
+ ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, i);
+ ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, i);
+ count_reg = EDC_REG(A_EDC_ECC_STATUS, i);
+ } else {
+ ii.cause_reg = EDC_T5_REG(A_EDC_H_INT_CAUSE, i);
+ ii.enable_reg = EDC_T5_REG(A_EDC_H_INT_ENABLE, i);
+ count_reg = EDC_T5_REG(A_EDC_H_ECC_STATUS, i);
+ }
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(rname, sizeof(rname), "EDC%u_PAR_CAUSE", i);
+ ii.cause_reg = EDC_T5_REG(A_EDC_H_PAR_CAUSE, i);
+ ii.enable_reg = EDC_T5_REG(A_EDC_H_PAR_ENABLE, i);
+ ii.fatal = 0xffffffff;
+ ii.details = NULL;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ }
break;
+ case MEM_MC1:
+ if (is_t4(adap) || is_t6(adap))
+ return (false);
+ i = 1;
+ /* fall through */
case MEM_MC0:
- ii.name = "MC0_INT_CAUSE";
+ snprintf(rname, sizeof(rname), "MC%u_INT_CAUSE", i);
if (is_t4(adap)) {
ii.cause_reg = A_MC_INT_CAUSE;
ii.enable_reg = A_MC_INT_ENABLE;
count_reg = A_MC_ECC_STATUS;
+ } else if (chip_id(adap) < CHELSIO_T7) {
+ ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, i);
+ ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, i);
+ count_reg = MC_REG(A_MC_P_ECC_STATUS, i);
} else {
- ii.cause_reg = A_MC_P_INT_CAUSE;
- ii.enable_reg = A_MC_P_INT_ENABLE;
- count_reg = A_MC_P_ECC_STATUS;
+ ii.cause_reg = MC_T7_REG(A_T7_MC_P_INT_CAUSE, i);
+ ii.enable_reg = MC_T7_REG(A_T7_MC_P_INT_ENABLE, i);
+ count_reg = MC_T7_REG(A_T7_MC_P_ECC_STATUS, i);
+ }
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+
+ snprintf(rname, sizeof(rname), "MC%u_PAR_CAUSE", i);
+ if (is_t4(adap)) {
+ ii.cause_reg = A_MC_PAR_CAUSE;
+ ii.enable_reg = A_MC_PAR_ENABLE;
+ } else if (chip_id(adap) < CHELSIO_T7) {
+ ii.cause_reg = MC_REG(A_MC_P_PAR_CAUSE, i);
+ ii.enable_reg = MC_REG(A_MC_P_PAR_ENABLE, i);
+ } else {
+ ii.cause_reg = MC_T7_REG(A_T7_MC_P_PAR_CAUSE, i);
+ ii.enable_reg = MC_T7_REG(A_T7_MC_P_PAR_ENABLE, i);
+ }
+ ii.fatal = 0xffffffff;
+ ii.details = NULL;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(rname, sizeof(rname), "MC%u_DDRCTL_INT_CAUSE", i);
+ ii.cause_reg = MC_T7_REG(A_MC_P_DDRCTL_INT_CAUSE, i);
+ ii.enable_reg = MC_T7_REG(A_MC_P_DDRCTL_INT_ENABLE, i);
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+
+ snprintf(rname, sizeof(rname), "MC%u_ECC_UE_INT_CAUSE", i);
+ ii.cause_reg = MC_T7_REG(A_MC_P_ECC_UE_INT_CAUSE, i);
+ ii.enable_reg = MC_T7_REG(A_MC_P_ECC_UE_INT_ENABLE, i);
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
}
- break;
- case MEM_MC1:
- ii.name = "MC1_INT_CAUSE";
- ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1);
- ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1);
- count_reg = MC_REG(A_MC_P_ECC_STATUS, 1);
break;
}
- fatal = t4_handle_intr(adap, &ii, 0, verbose);
-
v = t4_read_reg(adap, count_reg);
if (v != 0) {
- if (G_ECC_UECNT(v) != 0) {
+ if (G_ECC_UECNT(v) != 0 && !(flags & IHF_NO_SHOW)) {
CH_ALERT(adap,
- "%s: %u uncorrectable ECC data error(s)\n",
+ " %s: %u uncorrectable ECC data error(s)\n",
name[idx], G_ECC_UECNT(v));
}
- if (G_ECC_CECNT(v) != 0) {
+ if (G_ECC_CECNT(v) != 0 && !(flags & IHF_NO_SHOW)) {
if (idx <= MEM_EDC1)
t4_edc_err_read(adap, idx);
CH_WARN_RATELIMIT(adap,
- "%s: %u correctable ECC data error(s)\n",
+ " %s: %u correctable ECC data error(s)\n",
name[idx], G_ECC_CECNT(v));
}
t4_write_reg(adap, count_reg, 0xffffffff);
@@ -5803,14 +6206,16 @@ static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
return (fatal);
}
-static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
+static bool ma_wrap_status(struct adapter *adap, int arg, int flags)
{
u32 v;
v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS);
- CH_ALERT(adap,
- "MA address wrap-around error by client %u to address %#x\n",
- G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
+ if (!(flags & IHF_NO_SHOW)) {
+ CH_ALERT(adap,
+ " MA address wrap-around by client %u to address %#x\n",
+ G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
+ }
t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v);
return (false);
@@ -5820,7 +6225,7 @@ static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
/*
* MA interrupt handler.
*/
-static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool ma_intr_handler(struct adapter *adap, int arg, int flags)
{
static const struct intr_action ma_intr_actions[] = {
{ F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status },
@@ -5831,7 +6236,7 @@ static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
.cause_reg = A_MA_INT_CAUSE,
.enable_reg = A_MA_INT_ENABLE,
.fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE,
- .flags = NONFATAL_IF_DISABLED,
+ .flags = IHF_FATAL_IFF_ENABLED,
.details = NULL,
.actions = ma_intr_actions,
};
@@ -5856,10 +6261,10 @@ static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
bool fatal;
fatal = false;
- fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ma_intr_info, 0, flags);
+ fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, flags);
if (chip_id(adap) > CHELSIO_T4)
- fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, flags);
return (fatal);
}
@@ -5867,58 +6272,115 @@ static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
/*
* SMB interrupt handler.
*/
-static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool smb_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_details smb_intr_details[] = {
+ static const struct intr_details smb_int_cause_fields[] = {
{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" },
{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" },
{ F_SLVFIFOPARINT, "SMB slave FIFO parity error" },
{ 0 }
};
- static const struct intr_info smb_intr_info = {
+ static const struct intr_info smb_int_cause = {
.name = "SMB_INT_CAUSE",
.cause_reg = A_SMB_INT_CAUSE,
.enable_reg = A_SMB_INT_ENABLE,
.fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT,
.flags = 0,
- .details = smb_intr_details,
+ .details = smb_int_cause_fields,
.actions = NULL,
};
-
- return (t4_handle_intr(adap, &smb_intr_info, 0, verbose));
+ return (t4_handle_intr(adap, &smb_int_cause, 0, flags));
}
/*
* NC-SI interrupt handler.
*/
-static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool ncsi_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_details ncsi_intr_details[] = {
+ static const struct intr_details ncsi_int_cause_fields[] = {
{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" },
{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" },
{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" },
{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" },
{ 0 }
};
- static const struct intr_info ncsi_intr_info = {
+ static const struct intr_info ncsi_int_cause = {
.name = "NCSI_INT_CAUSE",
.cause_reg = A_NCSI_INT_CAUSE,
.enable_reg = A_NCSI_INT_ENABLE,
.fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR |
F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR,
.flags = 0,
- .details = ncsi_intr_details,
+ .details = ncsi_int_cause_fields,
+ .actions = NULL,
+ };
+ static const struct intr_info ncsi_xgmac0_int_cause = {
+ .name = "NCSI_XGMAC0_INT_CAUSE",
+ .cause_reg = A_NCSI_XGMAC0_INT_CAUSE,
+ .enable_reg = A_NCSI_XGMAC0_INT_ENABLE,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
.actions = NULL,
};
+ bool fatal = false;
- return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose));
+ fatal |= t4_handle_intr(adap, &ncsi_int_cause, 0, flags);
+ if (chip_id(adap) > CHELSIO_T6)
+ fatal |= t4_handle_intr(adap, &ncsi_xgmac0_int_cause, 0, flags);
+ return (fatal);
}
/*
* MAC interrupt handler.
*/
-static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
+static bool mac_intr_handler(struct adapter *adap, int port, int flags)
{
+ static const struct intr_info mac_int_cause_cmn = {
+ .name = "MAC_INT_CAUSE_CMN",
+ .cause_reg = A_MAC_INT_CAUSE_CMN,
+ .enable_reg = A_MAC_INT_EN_CMN,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mac_perr_cause_mtip = {
+ .name = "MAC_PERR_INT_CAUSE_MTIP",
+ .cause_reg = A_MAC_PERR_INT_CAUSE_MTIP,
+ .enable_reg = A_MAC_PERR_INT_EN_MTIP,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED | IHF_IGNORE_IF_DISABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mac_cerr_cause_mtip = {
+ .name = "MAC_CERR_INT_CAUSE_MTIP",
+ .cause_reg = A_MAC_CERR_INT_CAUSE_MTIP,
+ .enable_reg = A_MAC_CERR_INT_EN_MTIP,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mac_ios_int_cause_quad0 = {
+ .name = "MAC_IOS_INTR_CAUSE_QUAD0",
+ .cause_reg = A_MAC_IOS_INTR_CAUSE_QUAD0,
+ .enable_reg = A_MAC_IOS_INTR_EN_QUAD0,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info mac_ios_int_cause_quad1 = {
+ .name = "MAC_IOS_INTR_CAUSE_QUAD1",
+ .cause_reg = A_MAC_IOS_INTR_CAUSE_QUAD1,
+ .enable_reg = A_MAC_IOS_INTR_EN_QUAD1,
+ .fatal = 0,
+ .flags = 0,
+ .details = NULL,
+ .actions = NULL,
+ };
static const struct intr_details mac_intr_details[] = {
{ F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" },
{ F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" },
@@ -5928,6 +6390,9 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
struct intr_info ii;
bool fatal = false;
+ if (port > 1 && is_t6(adap))
+ return (false);
+
if (is_t4(adap)) {
snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port);
ii.name = &name[0];
@@ -5947,66 +6412,79 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.details = mac_intr_details;
ii.actions = NULL;
} else {
- snprintf(name, sizeof(name), "T7_MAC_PORT%u_INT_CAUSE", port);
+ snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_CAUSE);
ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_EN);
- ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
- ii.flags = 0;
- ii.details = mac_intr_details;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
+ ii.details = NULL;
ii.actions = NULL;
}
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ if (is_t4(adap))
+ return (fatal);
+ MPASS(chip_id(adap) >= CHELSIO_T5);
+ snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
if (chip_id(adap) > CHELSIO_T6) {
- snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE);
ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN);
- ii.fatal = 0;
- ii.flags = 0;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
ii.details = NULL;
ii.actions = NULL;
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
- } else if (chip_id(adap) >= CHELSIO_T5) {
- snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
+ } else {
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN);
- ii.fatal = 0;
- ii.flags = 0;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
ii.details = NULL;
ii.actions = NULL;
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
}
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ if (is_t5(adap))
+ return (fatal);
+ MPASS(chip_id(adap) >= CHELSIO_T6);
+ snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
if (chip_id(adap) > CHELSIO_T6) {
- snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE_100G", port);
ii.name = &name[0];
ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE_100G);
ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN_100G);
- ii.fatal = 0;
- ii.flags = 0;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
ii.details = NULL;
ii.actions = NULL;
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
- } else if (is_t6(adap)) {
- snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
+ } else {
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G);
- ii.fatal = 0;
- ii.flags = 0;
+ ii.fatal = 0xffffffff;
+ ii.flags = IHF_FATAL_IFF_ENABLED;
ii.details = NULL;
ii.actions = NULL;
- fatal |= t4_handle_intr(adap, &ii, 0, verbose);
}
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ if (is_t6(adap))
+ return (fatal);
+
+ MPASS(chip_id(adap) >= CHELSIO_T7);
+ fatal |= t4_handle_intr(adap, &mac_int_cause_cmn, 0, flags);
+ fatal |= t4_handle_intr(adap, &mac_perr_cause_mtip, 0, flags);
+ fatal |= t4_handle_intr(adap, &mac_cerr_cause_mtip, 0, flags);
+ fatal |= t4_handle_intr(adap, &mac_ios_int_cause_quad0, 0, flags);
+ fatal |= t4_handle_intr(adap, &mac_ios_int_cause_quad1, 0, flags);
return (fatal);
}
-static bool pl_timeout_status(struct adapter *adap, int arg, bool verbose)
+static bool pl_timeout_status(struct adapter *adap, int arg, int flags)
{
+ if (flags & IHF_NO_SHOW)
+ return (false);
CH_ALERT(adap, " PL_TIMEOUT_STATUS 0x%08x 0x%08x\n",
t4_read_reg(adap, A_PL_TIMEOUT_STATUS0),
@@ -6015,13 +6493,9 @@ static bool pl_timeout_status(struct adapter *adap, int arg, bool verbose)
return (false);
}
-static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
+static bool plpl_intr_handler(struct adapter *adap, int arg, int flags)
{
- static const struct intr_action plpl_intr_actions[] = {
- { F_TIMEOUT, 0, pl_timeout_status },
- { 0 },
- };
- static const struct intr_details plpl_intr_details[] = {
+ static const struct intr_details plpl_int_cause_fields[] = {
{ F_PL_BUSPERR, "Bus parity error" },
{ F_FATALPERR, "Fatal parity error" },
{ F_INVALIDACCESS, "Global reserved memory access" },
@@ -6030,31 +6504,397 @@ static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
{ F_PERRVFID, "VFID_MAP parity error" },
{ 0 }
};
- static const struct intr_info plpl_intr_info = {
+ static const struct intr_action plpl_int_cause_actions[] = {
+ { F_TIMEOUT, -1, pl_timeout_status },
+ { 0 },
+ };
+ static const struct intr_info plpl_int_cause = {
.name = "PL_PL_INT_CAUSE",
.cause_reg = A_PL_PL_INT_CAUSE,
.enable_reg = A_PL_PL_INT_ENABLE,
.fatal = F_FATALPERR | F_PERRVFID,
- .flags = NONFATAL_IF_DISABLED,
- .details = plpl_intr_details,
- .actions = plpl_intr_actions,
+ .flags = IHF_FATAL_IFF_ENABLED | IHF_IGNORE_IF_DISABLED,
+ .details = plpl_int_cause_fields,
+ .actions = plpl_int_cause_actions,
+ };
+
+ return (t4_handle_intr(adap, &plpl_int_cause, 0, flags));
+}
+
+/* similar to t4_port_reg */
+static inline u32
+t7_tlstx_reg(u8 instance, u8 channel, u32 reg)
+{
+ MPASS(instance <= 1);
+ MPASS(channel < NUM_TLS_TX_CH_INSTANCES);
+ return (instance * (CRYPTO_1_BASE_ADDR - CRYPTO_0_BASE_ADDR) +
+ TLS_TX_CH_REG(reg, channel));
+}
+
+/*
+ * CRYPTO (aka TLS_TX) interrupt handler.
+ */
+static bool tlstx_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_details tlstx_int_cause_fields[] = {
+ { F_KEX_CERR, "KEX SRAM Correctable error" },
+ { F_KEYLENERR, "IPsec Key length error" },
+ { F_INTF1_PERR, "Input Interface1 parity error" },
+ { F_INTF0_PERR, "Input Interface0 parity error" },
+ { F_KEX_PERR, "KEX SRAM Parity error" },
+ { 0 }
+ };
+ struct intr_info ii = {
+ .fatal = F_KEX_PERR | F_INTF0_PERR | F_INTF1_PERR,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = tlstx_int_cause_fields,
+ .actions = NULL,
+ };
+ char name[32];
+ int ch;
+ bool fatal = false;
+
+ for (ch = 0; ch < NUM_TLS_TX_CH_INSTANCES; ch++) {
+ snprintf(name, sizeof(name), "TLSTX%u_CH%u_INT_CAUSE", idx, ch);
+ ii.name = &name[0];
+ ii.cause_reg = t7_tlstx_reg(idx, ch, A_TLS_TX_CH_INT_CAUSE);
+ ii.enable_reg = t7_tlstx_reg(idx, ch, A_TLS_TX_CH_INT_ENABLE);
+ fatal |= t4_handle_intr(adap, &ii, 0, flags);
+ }
+
+ return (fatal);
+}
+
+/*
+ * HMA interrupt handler.
+ */
+static bool hma_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_details hma_int_cause_fields[] = {
+ { F_GK_UF_INT_CAUSE, "Gatekeeper underflow" },
+ { F_IDTF_INT_CAUSE, "Invalid descriptor fault" },
+ { F_OTF_INT_CAUSE, "Offset translation fault" },
+ { F_RTF_INT_CAUSE, "Region translation fault" },
+ { F_PCIEMST_INT_CAUSE, "PCIe master access error" },
+ { F_MAMST_INT_CAUSE, "MA master access error" },
+ { 1, "FIFO parity error" },
+ { 0 }
+ };
+ static const struct intr_info hma_int_cause = {
+ .name = "HMA_INT_CAUSE",
+ .cause_reg = A_HMA_INT_CAUSE,
+ .enable_reg = A_HMA_INT_ENABLE,
+ .fatal = 7,
+ .flags = 0,
+ .details = hma_int_cause_fields,
+ .actions = NULL,
+ };
+
+ return (t4_handle_intr(adap, &hma_int_cause, 0, flags));
+}
+
+/*
+ * CRYPTO_KEY interrupt handler.
+ */
+static bool cryptokey_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_details cryptokey_int_cause_fields[] = {
+ { F_MA_FIFO_PERR, "MA arbiter FIFO parity error" },
+ { F_MA_RSP_PERR, "MA response IF parity error" },
+ { F_ING_CACHE_DATA_PERR, "Ingress key cache data parity error" },
+ { F_ING_CACHE_TAG_PERR, "Ingress key cache tag parity error" },
+ { F_LKP_KEY_REQ_PERR, "Ingress key req parity error" },
+ { F_LKP_CLIP_TCAM_PERR, "Ingress LKP CLIP TCAM parity error" },
+ { F_LKP_MAIN_TCAM_PERR, "Ingress LKP main TCAM parity error" },
+ { F_EGR_KEY_REQ_PERR, "Egress key req or FIFO3 parity error" },
+ { F_EGR_CACHE_DATA_PERR, "Egress key cache data parity error" },
+ { F_EGR_CACHE_TAG_PERR, "Egress key cache tag parity error" },
+ { F_CIM_PERR, "CIM interface parity error" },
+ { F_MA_INV_RSP_TAG, "MA invalid response tag" },
+ { F_ING_KEY_RANGE_ERR, "Ingress key range error" },
+ { F_ING_MFIFO_OVFL, "Ingress MFIFO overflow" },
+ { F_LKP_REQ_OVFL, "Ingress lookup FIFO overflow" },
+ { F_EOK_WAIT_ERR, "EOK wait error" },
+ { F_EGR_KEY_RANGE_ERR, "Egress key range error" },
+ { F_EGR_MFIFO_OVFL, "Egress MFIFO overflow" },
+ { F_SEQ_WRAP_HP_OVFL, "Sequence wrap (hi-pri)" },
+ { F_SEQ_WRAP_LP_OVFL, "Sequence wrap (lo-pri)" },
+ { F_EGR_SEQ_WRAP_HP, "Egress sequence wrap (hi-pri)" },
+ { F_EGR_SEQ_WRAP_LP, "Egress sequence wrap (lo-pri)" },
+ { 0 }
+ };
+ static const struct intr_info cryptokey_int_cause = {
+ .name = "CRYPTO_KEY_INT_CAUSE",
+ .cause_reg = A_CRYPTO_KEY_INT_CAUSE,
+ .enable_reg = A_CRYPTO_KEY_INT_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = cryptokey_int_cause_fields,
+ .actions = NULL,
+ };
+
+ return (t4_handle_intr(adap, &cryptokey_int_cause, 0, flags));
+}
+
+/*
+ * GCACHE interrupt handler.
+ */
+static bool gcache_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_details gcache_int_cause_fields[] = {
+ { F_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE, "GC1 SRAM rsp dataq perr" },
+ { F_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE, "GC0 SRAM rsp dataq perr" },
+ { F_GC1_WQDATA_FIFO_PERR_INT_CAUSE, "GC1 wqdata FIFO perr" },
+ { F_GC0_WQDATA_FIFO_PERR_INT_CAUSE, "GC0 wqdata FIFO perr" },
+ { F_GC1_RDTAG_QUEUE_PERR_INT_CAUSE, "GC1 rdtag queue perr" },
+ { F_GC0_RDTAG_QUEUE_PERR_INT_CAUSE, "GC0 rdtag queue perr" },
+ { F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE, "GC1 SRAM rdtag queue perr" },
+ { F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE, "GC0 SRAM rdtag queue perr" },
+ { F_GC1_RSP_PERR_INT_CAUSE, "GC1 rsp perr" },
+ { F_GC0_RSP_PERR_INT_CAUSE, "GC0 rsp perr" },
+ { F_GC1_LRU_UERR_INT_CAUSE, "GC1 lru uerr" },
+ { F_GC0_LRU_UERR_INT_CAUSE, "GC0 lru uerr" },
+ { F_GC1_TAG_UERR_INT_CAUSE, "GC1 tag uerr" },
+ { F_GC0_TAG_UERR_INT_CAUSE, "GC0 tag uerr" },
+ { F_GC1_LRU_CERR_INT_CAUSE, "GC1 lru cerr" },
+ { F_GC0_LRU_CERR_INT_CAUSE, "GC0 lru cerr" },
+ { F_GC1_TAG_CERR_INT_CAUSE, "GC1 tag cerr" },
+ { F_GC0_TAG_CERR_INT_CAUSE, "GC0 tag cerr" },
+ { F_GC1_CE_INT_CAUSE, "GC1 correctable error" },
+ { F_GC0_CE_INT_CAUSE, "GC0 correctable error" },
+ { F_GC1_UE_INT_CAUSE, "GC1 uncorrectable error" },
+ { F_GC0_UE_INT_CAUSE, "GC0 uncorrectable error" },
+ { F_GC1_CMD_PAR_INT_CAUSE, "GC1 cmd perr" },
+ { F_GC1_DATA_PAR_INT_CAUSE, "GC1 data perr" },
+ { F_GC0_CMD_PAR_INT_CAUSE, "GC0 cmd perr" },
+ { F_GC0_DATA_PAR_INT_CAUSE, "GC0 data perr" },
+ { F_ILLADDRACCESS1_INT_CAUSE, "GC1 illegal address access" },
+ { F_ILLADDRACCESS0_INT_CAUSE, "GC0 illegal address access" },
+ { 0 }
+ };
+ static const struct intr_info gcache_perr_cause = {
+ .name = "GCACHE_PAR_CAUSE",
+ .cause_reg = A_GCACHE_PAR_CAUSE,
+ .enable_reg = A_GCACHE_PAR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info gcache_int_cause = {
+ .name = "GCACHE_INT_CAUSE",
+ .cause_reg = A_GCACHE_INT_CAUSE,
+ .enable_reg = A_GCACHE_INT_ENABLE,
+ .fatal = 0,
+ .flags = 0,
+ .details = gcache_int_cause_fields,
+ .actions = NULL,
+ };
+ bool fatal = false;
+
+ fatal |= t4_handle_intr(adap, &gcache_int_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &gcache_perr_cause, 0, flags);
+
+ return (fatal);
+}
+
+/*
+ * ARM interrupt handler.
+ */
+static bool arm_intr_handler(struct adapter *adap, int idx, int flags)
+{
+ static const struct intr_info arm_perr_cause0 = {
+ .name = "ARM_PERR_INT_CAUSE0",
+ .cause_reg = A_ARM_PERR_INT_CAUSE0,
+ .enable_reg = A_ARM_PERR_INT_ENB0,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_perr_cause1 = {
+ .name = "ARM_PERR_INT_CAUSE1",
+ .cause_reg = A_ARM_PERR_INT_CAUSE1,
+ .enable_reg = A_ARM_PERR_INT_ENB1,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_perr_cause2 = {
+ .name = "ARM_PERR_INT_CAUSE2",
+ .cause_reg = A_ARM_PERR_INT_CAUSE2,
+ .enable_reg = A_ARM_PERR_INT_ENB2,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_cerr_cause0 = {
+ .name = "ARM_CERR_INT_CAUSE",
+ .cause_reg = A_ARM_CERR_INT_CAUSE0,
+ .enable_reg = A_ARM_CERR_INT_ENB0,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
};
+ static const struct intr_info arm_err_cause0 = {
+ .name = "ARM_ERR_INT_CAUSE",
+ .cause_reg = A_ARM_ERR_INT_CAUSE0,
+ .enable_reg = A_ARM_ERR_INT_ENB0,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_periph_cause = {
+ .name = "ARM_PERIPHERAL_INT_CAUSE",
+ .cause_reg = A_ARM_PERIPHERAL_INT_CAUSE,
+ .enable_reg = A_ARM_PERIPHERAL_INT_ENB,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ static const struct intr_info arm_nvme_db_emu_cause = {
+ .name = "ARM_NVME_DB_EMU_INT_CAUSE",
+ .cause_reg = A_ARM_NVME_DB_EMU_INT_CAUSE,
+ .enable_reg = A_ARM_NVME_DB_EMU_INT_ENABLE,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = NULL,
+ .actions = NULL,
+ };
+ bool fatal = false;
+
+ fatal |= t4_handle_intr(adap, &arm_perr_cause0, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_perr_cause1, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_perr_cause2, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_cerr_cause0, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_err_cause0, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_periph_cause, 0, flags);
+ fatal |= t4_handle_intr(adap, &arm_nvme_db_emu_cause, 0, flags);
+
+ return (fatal);
+}
+
+static inline uint32_t
+get_perr_ucause(struct adapter *sc, const struct intr_info *ii)
+{
+ uint32_t cause;
- return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose));
+ cause = t4_read_reg(sc, ii->cause_reg);
+ if (ii->flags & IHF_IGNORE_IF_DISABLED)
+ cause &= t4_read_reg(sc, ii->enable_reg);
+ return (cause);
+}
+
+static uint32_t
+t4_perr_to_ic(struct adapter *adap, uint32_t perr)
+{
+ uint32_t mask;
+
+ if (adap->chip_params->nchan > 2)
+ mask = F_MAC0 | F_MAC1 | F_MAC2 | F_MAC3;
+ else
+ mask = F_MAC0 | F_MAC1;
+ return (perr & mask ? perr | mask : perr);
+}
+
+static uint32_t
+t7_perr_to_ic1(uint32_t perr)
+{
+ uint32_t cause = 0;
+
+ if (perr & F_T7_PL_PERR_ULP_TX)
+ cause |= F_T7_ULP_TX;
+ if (perr & F_T7_PL_PERR_SGE)
+ cause |= F_T7_SGE;
+ if (perr & F_T7_PL_PERR_HMA)
+ cause |= F_T7_HMA;
+ if (perr & F_T7_PL_PERR_CPL_SWITCH)
+ cause |= F_T7_CPL_SWITCH;
+ if (perr & F_T7_PL_PERR_ULP_RX)
+ cause |= F_T7_ULP_RX;
+ if (perr & F_T7_PL_PERR_PM_RX)
+ cause |= F_T7_PM_RX;
+ if (perr & F_T7_PL_PERR_PM_TX)
+ cause |= F_T7_PM_TX;
+ if (perr & F_T7_PL_PERR_MA)
+ cause |= F_T7_MA;
+ if (perr & F_T7_PL_PERR_TP)
+ cause |= F_T7_TP;
+ if (perr & F_T7_PL_PERR_LE)
+ cause |= F_T7_LE;
+ if (perr & F_T7_PL_PERR_EDC1)
+ cause |= F_T7_EDC1;
+ if (perr & F_T7_PL_PERR_EDC0)
+ cause |= F_T7_EDC0;
+ if (perr & F_T7_PL_PERR_MC1)
+ cause |= F_T7_MC1;
+ if (perr & F_T7_PL_PERR_MC0)
+ cause |= F_T7_MC0;
+ if (perr & F_T7_PL_PERR_PCIE)
+ cause |= F_T7_PCIE;
+ if (perr & F_T7_PL_PERR_UART)
+ cause |= F_T7_UART;
+ if (perr & F_T7_PL_PERR_PMU)
+ cause |= F_PMU;
+ if (perr & F_T7_PL_PERR_MAC)
+ cause |= F_MAC0 | F_MAC1 | F_MAC2 | F_MAC3;
+ if (perr & F_T7_PL_PERR_SMB)
+ cause |= F_SMB;
+ if (perr & F_T7_PL_PERR_SF)
+ cause |= F_SF;
+ if (perr & F_T7_PL_PERR_PL)
+ cause |= F_PL;
+ if (perr & F_T7_PL_PERR_NCSI)
+ cause |= F_NCSI;
+ if (perr & F_T7_PL_PERR_MPS)
+ cause |= F_MPS;
+ if (perr & F_T7_PL_PERR_MI)
+ cause |= F_MI;
+ if (perr & F_T7_PL_PERR_DBG)
+ cause |= F_DBG;
+ if (perr & F_T7_PL_PERR_I2CM)
+ cause |= F_I2CM;
+ if (perr & F_T7_PL_PERR_CIM)
+ cause |= F_CIM;
+
+ return (cause);
+}
+
+static uint32_t
+t7_perr_to_ic2(uint32_t perr)
+{
+ uint32_t cause = 0;
+
+ if (perr & F_T7_PL_PERR_CRYPTO_KEY)
+ cause |= F_CRYPTO_KEY;
+ if (perr & F_T7_PL_PERR_CRYPTO1)
+ cause |= F_CRYPTO1;
+ if (perr & F_T7_PL_PERR_CRYPTO0)
+ cause |= F_CRYPTO0;
+ if (perr & F_T7_PL_PERR_GCACHE)
+ cause |= F_GCACHE;
+ if (perr & F_T7_PL_PERR_ARM)
+ cause |= F_ARM;
+
+ return (cause);
}
/**
* t4_slow_intr_handler - control path interrupt handler
* @adap: the adapter
- * @verbose: increased verbosity, for debug
*
* T4 interrupt handler for non-data global interrupt events, e.g., errors.
* The designation 'slow' is because it involves register reads, while
* data interrupts typically don't involve any MMIOs.
*/
-bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
+bool t4_slow_intr_handler(struct adapter *adap, int flags)
{
- static const struct intr_details pl_intr_details[] = {
+ static const struct intr_details pl_int_cause_fields[] = {
{ F_MC1, "MC1" },
{ F_UART, "UART" },
{ F_ULP_TX, "ULP TX" },
@@ -6087,10 +6927,56 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, "CIM" },
{ 0 }
};
- static const struct intr_details t7_pl_intr_details[] = {
- { F_T7_MC1, "MC1" },
+ static const struct intr_action pl_int_cause_actions[] = {
+ { F_ULP_TX, -1, ulptx_intr_handler },
+ { F_SGE, -1, sge_intr_handler },
+ { F_CPL_SWITCH, -1, cplsw_intr_handler },
+ { F_ULP_RX, -1, ulprx_intr_handler },
+ { F_PM_RX, -1, pmtx_intr_handler },
+ { F_PM_TX, -1, pmtx_intr_handler },
+ { F_MA, -1, ma_intr_handler },
+ { F_TP, -1, tp_intr_handler },
+ { F_LE, -1, le_intr_handler },
+ { F_EDC0, MEM_EDC0, mem_intr_handler },
+ { F_EDC1, MEM_EDC1, mem_intr_handler },
+ { F_MC0, MEM_MC0, mem_intr_handler },
+ { F_MC1, MEM_MC1, mem_intr_handler },
+ { F_PCIE, -1, pcie_intr_handler },
+ { F_MAC0, 0, mac_intr_handler },
+ { F_MAC1, 1, mac_intr_handler },
+ { F_MAC2, 2, mac_intr_handler },
+ { F_MAC3, 3, mac_intr_handler },
+ { F_SMB, -1, smb_intr_handler },
+ { F_PL, -1, plpl_intr_handler },
+ { F_NCSI, -1, ncsi_intr_handler },
+ { F_MPS, -1, mps_intr_handler },
+ { F_CIM, -1, cim_intr_handler },
+ { 0 }
+ };
+ static const struct intr_info pl_int_cause = {
+ .name = "PL_INT_CAUSE",
+ .cause_reg = A_PL_INT_CAUSE,
+ .enable_reg = A_PL_INT_ENABLE,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED,
+ .details = pl_int_cause_fields,
+ .actions = pl_int_cause_actions,
+ };
+ static const struct intr_info pl_perr_cause = {
+ .name = "PL_PERR_CAUSE",
+ .cause_reg = A_PL_PERR_CAUSE,
+ .enable_reg = A_PL_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = pl_int_cause_fields,
+ .actions = NULL,
+ };
+ static const struct intr_details t7_pl_int_cause_fields[] = {
+ { F_T7_FLR, "FLR" },
+ { F_T7_SW_CIM, "SW CIM" },
{ F_T7_ULP_TX, "ULP TX" },
{ F_T7_SGE, "SGE" },
+ { F_T7_HMA, "HMA" },
{ F_T7_CPL_SWITCH, "CPL Switch" },
{ F_T7_ULP_RX, "ULP RX" },
{ F_T7_PM_RX, "PM RX" },
@@ -6100,117 +6986,165 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_T7_LE, "LE" },
{ F_T7_EDC1, "EDC1" },
{ F_T7_EDC0, "EDC0" },
+ { F_T7_MC1, "MC1" },
{ F_T7_MC0, "MC0" },
{ F_T7_PCIE, "PCIE" },
+ { F_T7_UART, "UART" },
+ { F_PMU, "PMU" },
{ F_MAC3, "MAC3" },
{ F_MAC2, "MAC2" },
{ F_MAC1, "MAC1" },
{ F_MAC0, "MAC0" },
{ F_SMB, "SMB" },
+ { F_SF, "SF" },
{ F_PL, "PL" },
{ F_NCSI, "NC-SI" },
{ F_MPS, "MPS" },
+ { F_MI, "MI" },
{ F_DBG, "DBG" },
{ F_I2CM, "I2CM" },
- { F_MI, "MI" },
{ F_CIM, "CIM" },
{ 0 }
};
- struct intr_info pl_perr_cause = {
- .name = "PL_PERR_CAUSE",
- .cause_reg = A_PL_PERR_CAUSE,
- .enable_reg = A_PL_PERR_ENABLE,
- .fatal = 0xffffffff,
- .flags = NONFATAL_IF_DISABLED,
- .details = NULL,
- .actions = NULL,
- };
- static const struct intr_action pl_intr_action[] = {
- { F_MC1, MEM_MC1, mem_intr_handler },
- { F_ULP_TX, -1, ulptx_intr_handler },
- { F_SGE, -1, sge_intr_handler },
- { F_CPL_SWITCH, -1, cplsw_intr_handler },
- { F_ULP_RX, -1, ulprx_intr_handler },
- { F_PM_RX, -1, pmrx_intr_handler},
- { F_PM_TX, -1, pmtx_intr_handler},
- { F_MA, -1, ma_intr_handler },
- { F_TP, -1, tp_intr_handler },
- { F_LE, -1, le_intr_handler },
- { F_EDC1, MEM_EDC1, mem_intr_handler },
- { F_EDC0, MEM_EDC0, mem_intr_handler },
- { F_MC0, MEM_MC0, mem_intr_handler },
- { F_PCIE, -1, pcie_intr_handler },
- { F_MAC3, 3, mac_intr_handler},
- { F_MAC2, 2, mac_intr_handler},
- { F_MAC1, 1, mac_intr_handler},
- { F_MAC0, 0, mac_intr_handler},
- { F_SMB, -1, smb_intr_handler},
- { F_PL, -1, plpl_intr_handler },
- { F_NCSI, -1, ncsi_intr_handler},
- { F_MPS, -1, mps_intr_handler },
- { F_CIM, -1, cim_intr_handler },
- { 0 }
- };
- static const struct intr_action t7_pl_intr_action[] = {
+ static const struct intr_action t7_pl_int_cause_actions[] = {
{ F_T7_ULP_TX, -1, ulptx_intr_handler },
{ F_T7_SGE, -1, sge_intr_handler },
+ { F_T7_HMA, -1, hma_intr_handler },
{ F_T7_CPL_SWITCH, -1, cplsw_intr_handler },
{ F_T7_ULP_RX, -1, ulprx_intr_handler },
- { F_T7_PM_RX, -1, pmrx_intr_handler},
- { F_T7_PM_TX, -1, pmtx_intr_handler},
+ { F_T7_PM_RX, -1, pmrx_intr_handler },
+ { F_T7_PM_TX, -1, pmtx_intr_handler },
{ F_T7_MA, -1, ma_intr_handler },
{ F_T7_TP, -1, tp_intr_handler },
{ F_T7_LE, -1, le_intr_handler },
- { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
{ F_T7_EDC0, MEM_EDC0, mem_intr_handler },
- { F_T7_MC1, MEM_MC1, mem_intr_handler },
+ { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
{ F_T7_MC0, MEM_MC0, mem_intr_handler },
+ { F_T7_MC1, MEM_MC1, mem_intr_handler },
{ F_T7_PCIE, -1, pcie_intr_handler },
- { F_MAC3, 3, mac_intr_handler},
- { F_MAC2, 2, mac_intr_handler},
- { F_MAC1, 1, mac_intr_handler},
- { F_MAC0, 0, mac_intr_handler},
- { F_SMB, -1, smb_intr_handler},
+ { F_MAC0, 0, mac_intr_handler },
+ { F_MAC1, 1, mac_intr_handler },
+ { F_MAC2, 2, mac_intr_handler },
+ { F_MAC3, 3, mac_intr_handler },
+ { F_SMB, -1, smb_intr_handler },
{ F_PL, -1, plpl_intr_handler },
- { F_NCSI, -1, ncsi_intr_handler},
+ { F_NCSI, -1, ncsi_intr_handler },
{ F_MPS, -1, mps_intr_handler },
{ F_CIM, -1, cim_intr_handler },
{ 0 }
};
- struct intr_info pl_intr_info = {
+ static const struct intr_info t7_pl_int_cause = {
.name = "PL_INT_CAUSE",
.cause_reg = A_PL_INT_CAUSE,
.enable_reg = A_PL_INT_ENABLE,
.fatal = 0,
- .flags = 0,
- .details = NULL,
+ .flags = IHF_IGNORE_IF_DISABLED,
+ .details = t7_pl_int_cause_fields,
+ .actions = t7_pl_int_cause_actions,
+ };
+ static const struct intr_details t7_pl_int_cause2_fields[] = {
+ { F_CRYPTO_KEY, "CRYPTO KEY" },
+ { F_CRYPTO1, "CRYPTO1" },
+ { F_CRYPTO0, "CRYPTO0" },
+ { F_GCACHE, "GCACHE" },
+ { F_ARM, "ARM" },
+ { 0 }
+ };
+ static const struct intr_action t7_pl_int_cause2_actions[] = {
+ { F_CRYPTO_KEY, -1, cryptokey_intr_handler },
+ { F_CRYPTO1, 1, tlstx_intr_handler },
+ { F_CRYPTO0, 0, tlstx_intr_handler },
+ { F_GCACHE, -1, gcache_intr_handler },
+ { F_ARM, -1, arm_intr_handler },
+ { 0 }
+ };
+ static const struct intr_info t7_pl_int_cause2 = {
+ .name = "PL_INT_CAUSE2",
+ .cause_reg = A_PL_INT_CAUSE2,
+ .enable_reg = A_PL_INT_ENABLE2,
+ .fatal = 0,
+ .flags = IHF_IGNORE_IF_DISABLED,
+ .details = t7_pl_int_cause2_fields,
+ .actions = t7_pl_int_cause2_actions,
+ };
+ static const struct intr_details t7_pl_perr_cause_fields[] = {
+ { F_T7_PL_PERR_CRYPTO_KEY, "CRYPTO KEY" },
+ { F_T7_PL_PERR_CRYPTO1, "CRYPTO1" },
+ { F_T7_PL_PERR_CRYPTO0, "CRYPTO0" },
+ { F_T7_PL_PERR_GCACHE, "GCACHE" },
+ { F_T7_PL_PERR_ARM, "ARM" },
+ { F_T7_PL_PERR_ULP_TX, "ULP TX" },
+ { F_T7_PL_PERR_SGE, "SGE" },
+ { F_T7_PL_PERR_HMA, "HMA" },
+ { F_T7_PL_PERR_CPL_SWITCH, "CPL Switch" },
+ { F_T7_PL_PERR_ULP_RX, "ULP RX" },
+ { F_T7_PL_PERR_PM_RX, "PM RX" },
+ { F_T7_PL_PERR_PM_TX, "PM TX" },
+ { F_T7_PL_PERR_MA, "MA" },
+ { F_T7_PL_PERR_TP, "TP" },
+ { F_T7_PL_PERR_LE, "LE" },
+ { F_T7_PL_PERR_EDC1, "EDC1" },
+ { F_T7_PL_PERR_EDC0, "EDC0" },
+ { F_T7_PL_PERR_MC1, "MC1" },
+ { F_T7_PL_PERR_MC0, "MC0" },
+ { F_T7_PL_PERR_PCIE, "PCIE" },
+ { F_T7_PL_PERR_UART, "UART" },
+ { F_T7_PL_PERR_PMU, "PMU" },
+ { F_T7_PL_PERR_MAC, "MAC" },
+ { F_T7_PL_PERR_SMB, "SMB" },
+ { F_T7_PL_PERR_SF, "SF" },
+ { F_T7_PL_PERR_PL, "PL" },
+ { F_T7_PL_PERR_NCSI, "NC-SI" },
+ { F_T7_PL_PERR_MPS, "MPS" },
+ { F_T7_PL_PERR_MI, "MI" },
+ { F_T7_PL_PERR_DBG, "DBG" },
+ { F_T7_PL_PERR_I2CM, "I2CM" },
+ { F_T7_PL_PERR_CIM, "CIM" },
+ { 0 }
+ };
+ static const struct intr_info t7_pl_perr_cause = {
+ .name = "PL_PERR_CAUSE",
+ .cause_reg = A_PL_PERR_CAUSE,
+ .enable_reg = A_PL_PERR_ENABLE,
+ .fatal = 0xffffffff,
+ .flags = IHF_IGNORE_IF_DISABLED | IHF_FATAL_IFF_ENABLED,
+ .details = t7_pl_perr_cause_fields,
.actions = NULL,
};
- u32 perr;
-
- if (chip_id(adap) >= CHELSIO_T7) {
- pl_perr_cause.details = t7_pl_intr_details;
- pl_intr_info.details = t7_pl_intr_details;
- pl_intr_info.actions = t7_pl_intr_action;
+ bool fatal = false;
+ uint32_t perr;
+
+ if (chip_id(adap) < CHELSIO_T7) {
+ perr = get_perr_ucause(adap, &pl_perr_cause);
+ fatal |= t4_handle_intr(adap, &pl_perr_cause, 0,
+ flags & ~(IHF_CLR_ALL_SET | IHF_CLR_ALL_UNIGNORED));
+ fatal |= t4_handle_intr(adap, &pl_int_cause,
+ t4_perr_to_ic(adap, perr), flags);
+ t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
+ (void)t4_read_reg(adap, pl_perr_cause.cause_reg);
} else {
- pl_perr_cause.details = pl_intr_details;
- pl_intr_info.details = pl_intr_details;
- pl_intr_info.actions = pl_intr_action;
+ perr = get_perr_ucause(adap, &t7_pl_perr_cause);
+ fatal |= t4_handle_intr(adap, &t7_pl_perr_cause, 0,
+ flags & ~(IHF_CLR_ALL_SET | IHF_CLR_ALL_UNIGNORED));
+ fatal |= t4_handle_intr(adap, &t7_pl_int_cause,
+ t7_perr_to_ic1(perr), flags);
+ fatal |= t4_handle_intr(adap, &t7_pl_int_cause2,
+ t7_perr_to_ic2(perr), flags);
+ t4_write_reg(adap, t7_pl_perr_cause.cause_reg, perr);
+ (void)t4_read_reg(adap, t7_pl_perr_cause.cause_reg);
}
-
- perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
- if (verbose || perr != 0) {
- t4_show_intr_info(adap, &pl_perr_cause, perr);
- if (perr != 0)
- t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
- if (verbose)
- perr |= t4_read_reg(adap, pl_intr_info.enable_reg);
- }
-
- return (t4_handle_intr(adap, &pl_intr_info, perr, verbose));
+ return (fatal);
}
-#define PF_INTR_MASK (F_PFSW | F_PFCIM)
+void t4_intr_clear(struct adapter *adap)
+{
+#if 1
+ if (chip_id(adap) >= CHELSIO_T7)
+ t4_write_reg(adap, A_SGE_INT_CAUSE8, 0xffffffff);
+#endif
+ (void)t4_slow_intr_handler(adap,
+ IHF_NO_SHOW | IHF_RUN_ALL_ACTIONS | IHF_CLR_ALL_SET);
+}
/**
* t4_intr_enable - enable interrupts
@@ -6229,6 +7163,8 @@ void t4_intr_enable(struct adapter *adap)
{
u32 mask, val;
+ if (adap->intr_flags & IHF_INTR_CLEAR_ON_INIT)
+ t4_intr_clear(adap);
if (chip_id(adap) <= CHELSIO_T5)
val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT |
F_DBFIFO_LP_INT;
@@ -6241,8 +7177,14 @@ void t4_intr_enable(struct adapter *adap)
F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_EGRESS_SIZE_ERR;
mask = val;
t4_set_reg_field(adap, A_SGE_INT_ENABLE3, mask, val);
- t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
+ if (chip_id(adap) >= CHELSIO_T7)
+ t4_write_reg(adap, A_SGE_INT_ENABLE4, 0xffffffff);
+ t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), F_PFSW | F_PFCIM);
t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
+#if 1
+ if (chip_id(adap) >= CHELSIO_T7)
+ t4_set_reg_field(adap, A_PL_INT_ENABLE, F_MAC0 | F_MAC1 | F_MAC2 | F_MAC3, 0);
+#endif
t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
}
@@ -6439,9 +7381,15 @@ int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
/* Read an RSS table row */
static int rd_rss_row(struct adapter *adap, int row, u32 *val)
{
- t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
- return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
- 5, 0, val);
+ if (chip_id(adap) < CHELSIO_T7) {
+ t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
+ return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE,
+ F_LKPTBLROWVLD, 1, 5, 0, val);
+ } else {
+ t4_write_reg(adap, A_TP_RSS_CONFIG_SRAM, 0xB0000 | row);
+ return t7_wait_sram_done(adap, A_TP_RSS_CONFIG_SRAM,
+ A_TP_RSS_LKP_TABLE, 5, 0, val);
+ }
}
/**
@@ -10178,7 +11126,7 @@ const struct chip_params *t4_get_chip_params(int chipid)
.vfcount = 256,
.sge_fl_db = 0,
.sge_ctxt_size = SGE_CTXT_SIZE_T7,
- .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
+ .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES * 3,
.rss_nentries = T7_RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE_T6,
},
diff --git a/sys/dev/cxgbe/common/t4_msg.h b/sys/dev/cxgbe/common/t4_msg.h
index 0d12ccf2e910..214080964fbb 100644
--- a/sys/dev/cxgbe/common/t4_msg.h
+++ b/sys/dev/cxgbe/common/t4_msg.h
@@ -30,6 +30,7 @@
#define T4_MSG_H
enum cpl_opcodes {
+ CPL_TLS_TX_SCMD_FMT = 0x0,
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_ACCEPT_RPL = 0x2,
CPL_ACT_OPEN_REQ = 0x3,
@@ -48,6 +49,8 @@ enum cpl_opcodes {
CPL_RTE_READ_REQ = 0x11,
CPL_L2T_WRITE_REQ = 0x12,
CPL_L2T_READ_REQ = 0x13,
+ CPL_GRE_TABLE_REQ = 0x1b,
+ CPL_GRE_TABLE_RPL = 0xbb,
CPL_SMT_WRITE_REQ = 0x14,
CPL_SMT_READ_REQ = 0x15,
CPL_TAG_WRITE_REQ = 0x16,
@@ -130,6 +133,7 @@ enum cpl_opcodes {
CPL_TX_TLS_SFO = 0x89,
CPL_TX_SEC_PDU = 0x8A,
CPL_TX_TLS_ACK = 0x8B,
+ CPL_TX_QUIC_ENC = 0x8d,
CPL_RCB_UPD = 0x8C,
CPL_SGE_FLR_FLUSH = 0xA0,
@@ -258,6 +262,7 @@ enum {
ULP_MODE_TCPDDP = 5,
ULP_MODE_FCOE = 6,
ULP_MODE_TLS = 8,
+ ULP_MODE_DTLS = 9,
ULP_MODE_RDMA_V2 = 10,
ULP_MODE_NVMET = 11,
};
@@ -1149,23 +1154,36 @@ struct cpl_get_tcb {
#define V_QUEUENO(x) ((x) << S_QUEUENO)
#define G_QUEUENO(x) (((x) >> S_QUEUENO) & M_QUEUENO)
-#define S_T7_QUEUENO 0
-#define M_T7_QUEUENO 0xFFF
-#define V_T7_QUEUENO(x) ((x) << S_T7_QUEUENO)
-#define G_T7_QUEUENO(x) (((x) >> S_T7_QUEUENO) & M_T7_QUEUENO)
-
#define S_REPLY_CHAN 14
#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
#define F_REPLY_CHAN V_REPLY_CHAN(1U)
+#define S_NO_REPLY 15
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+#define F_NO_REPLY V_NO_REPLY(1U)
+
+struct cpl_t7_get_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 rxchan_queue;
+ __be16 cookie_pkd;
+};
+
#define S_T7_REPLY_CHAN 12
#define M_T7_REPLY_CHAN 0x7
#define V_T7_REPLY_CHAN(x) ((x) << S_T7_REPLY_CHAN)
#define G_T7_REPLY_CHAN(x) (((x) >> S_T7_REPLY_CHAN) & M_T7_REPLY_CHAN)
-#define S_NO_REPLY 15
-#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
-#define F_NO_REPLY V_NO_REPLY(1U)
+#define S_T7_QUEUENO 0
+#define M_T7_QUEUENO 0xFFF
+#define V_T7_QUEUENO(x) ((x) << S_T7_QUEUENO)
+#define G_T7_QUEUENO(x) (((x) >> S_T7_QUEUENO) & M_T7_QUEUENO)
+
+#define S_CPL_GET_TCB_COOKIE 0
+#define M_CPL_GET_TCB_COOKIE 0xff
+#define V_CPL_GET_TCB_COOKIE(x) ((x) << S_CPL_GET_TCB_COOKIE)
+#define G_CPL_GET_TCB_COOKIE(x) \
+ (((x) >> S_CPL_GET_TCB_COOKIE) & M_CPL_GET_TCB_COOKIE)
struct cpl_get_tcb_rpl {
RSS_HDR
@@ -1234,6 +1252,16 @@ struct cpl_close_con_rpl {
__be32 rcv_nxt;
};
+struct cpl_t7_close_con_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rto;
+ __u8 rsvd;
+ __u8 status;
+ __be32 snd_nxt;
+ __be32 rcv_nxt;
+};
+
struct cpl_close_listsvr_req {
WR_HDR;
union opcode_tid ot;
@@ -1340,6 +1368,24 @@ struct cpl_abort_rpl_rss {
__u8 status;
};
+struct cpl_t7_abort_rpl_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 idx_status;
+};
+
+#define S_CPL_ABORT_RPL_RSS_IDX 8
+#define M_CPL_ABORT_RPL_RSS_IDX 0xffffff
+#define V_CPL_ABORT_RPL_RSS_IDX(x) ((x) << S_CPL_ABORT_RPL_RSS_IDX)
+#define G_CPL_ABORT_RPL_RSS_IDX(x) \
+ (((x) >> S_CPL_ABORT_RPL_RSS_IDX) & M_CPL_ABORT_RPL_RSS_IDX)
+
+#define S_CPL_ABORT_RPL_RSS_STATUS 0
+#define M_CPL_ABORT_RPL_RSS_STATUS 0xff
+#define V_CPL_ABORT_RPL_RSS_STATUS(x) ((x) << S_CPL_ABORT_RPL_RSS_STATUS)
+#define G_CPL_ABORT_RPL_RSS_STATUS(x) \
+ (((x) >> S_CPL_ABORT_RPL_RSS_STATUS) & M_CPL_ABORT_RPL_RSS_STATUS)
+
struct cpl_abort_rpl_rss6 {
RSS_HDR
union opcode_tid ot;
@@ -1444,6 +1490,11 @@ struct cpl_tx_data {
#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
+#define S_T7_TX_ULP_MODE 10
+#define M_T7_TX_ULP_MODE 0xf
+#define V_T7_TX_ULP_MODE(x) ((x) << S_T7_TX_ULP_MODE)
+#define G_T7_TX_ULP_MODE(x) (((x) >> S_T7_TX_ULP_MODE) & M_T7_TX_ULP_MODE)
+
#define S_TX_FORCE 13
#define V_TX_FORCE(x) ((x) << S_TX_FORCE)
#define F_TX_FORCE V_TX_FORCE(1U)
@@ -1881,14 +1932,6 @@ struct cpl_tx_pkt_xt {
(((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI) & \
M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
-#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 30
-#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 0x3
-#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
- ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
-#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
- (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO) & \
- M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
-
/* cpl_tx_pkt_xt.core.ctrl2 fields */
#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 30
#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 0x3
@@ -1898,6 +1941,14 @@ struct cpl_tx_pkt_xt {
(((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO) & \
M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 30
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 0x3
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+
#define S_CPL_TX_PKT_XT_CHKSTARTOFFSET 20
#define M_CPL_TX_PKT_XT_CHKSTARTOFFSET 0x3ff
#define V_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
@@ -2190,7 +2241,8 @@ struct cpl_t7_tx_data_iso {
__be32 num_pi_bytes_seglen_offset;
__be32 datasn_offset;
__be32 buffer_offset;
- __be32 reserved3;
+ __be32 pdo_pkd;
+ /* encapsulated CPL_TX_DATA follows here */
};
#define S_CPL_T7_TX_DATA_ISO_OPCODE 24
@@ -2274,6 +2326,12 @@ struct cpl_t7_tx_data_iso {
(((x) >> S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET) & \
M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+#define S_CPL_TX_DATA_ISO_PDO 0
+#define M_CPL_TX_DATA_ISO_PDO 0xff
+#define V_CPL_TX_DATA_ISO_PDO(x) ((x) << S_CPL_TX_DATA_ISO_PDO)
+#define G_CPL_TX_DATA_ISO_PDO(x) \
+ (((x) >> S_CPL_TX_DATA_ISO_PDO) & M_CPL_TX_DATA_ISO_PDO)
+
struct cpl_iscsi_hdr {
RSS_HDR
union opcode_tid ot;
@@ -2419,6 +2477,74 @@ struct cpl_rx_data_ack_core {
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+struct cpl_rx_phys_addr {
+ __be32 RSS[2];
+ __be32 op_to_tid;
+ __be32 pci_rlx_order_to_len;
+ __be64 phys_addr;
+};
+
+#define S_CPL_RX_PHYS_ADDR_OPCODE 24
+#define M_CPL_RX_PHYS_ADDR_OPCODE 0xff
+#define V_CPL_RX_PHYS_ADDR_OPCODE(x) ((x) << S_CPL_RX_PHYS_ADDR_OPCODE)
+#define G_CPL_RX_PHYS_ADDR_OPCODE(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_OPCODE) & M_CPL_RX_PHYS_ADDR_OPCODE)
+
+#define S_CPL_RX_PHYS_ADDR_ISRDMA 23
+#define M_CPL_RX_PHYS_ADDR_ISRDMA 0x1
+#define V_CPL_RX_PHYS_ADDR_ISRDMA(x) ((x) << S_CPL_RX_PHYS_ADDR_ISRDMA)
+#define G_CPL_RX_PHYS_ADDR_ISRDMA(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_ISRDMA) & M_CPL_RX_PHYS_ADDR_ISRDMA)
+#define F_CPL_RX_PHYS_ADDR_ISRDMA V_CPL_RX_PHYS_ADDR_ISRDMA(1U)
+
+#define S_CPL_RX_PHYS_ADDR_TID 0
+#define M_CPL_RX_PHYS_ADDR_TID 0xfffff
+#define V_CPL_RX_PHYS_ADDR_TID(x) ((x) << S_CPL_RX_PHYS_ADDR_TID)
+#define G_CPL_RX_PHYS_ADDR_TID(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_TID) & M_CPL_RX_PHYS_ADDR_TID)
+
+#define S_CPL_RX_PHYS_ADDR_PCIRLXORDER 31
+#define M_CPL_RX_PHYS_ADDR_PCIRLXORDER 0x1
+#define V_CPL_RX_PHYS_ADDR_PCIRLXORDER(x) \
+ ((x) << S_CPL_RX_PHYS_ADDR_PCIRLXORDER)
+#define G_CPL_RX_PHYS_ADDR_PCIRLXORDER(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_PCIRLXORDER) & M_CPL_RX_PHYS_ADDR_PCIRLXORDER)
+#define F_CPL_RX_PHYS_ADDR_PCIRLXORDER V_CPL_RX_PHYS_ADDR_PCIRLXORDER(1U)
+
+#define S_CPL_RX_PHYS_ADDR_PCINOSNOOP 30
+#define M_CPL_RX_PHYS_ADDR_PCINOSNOOP 0x1
+#define V_CPL_RX_PHYS_ADDR_PCINOSNOOP(x) \
+ ((x) << S_CPL_RX_PHYS_ADDR_PCINOSNOOP)
+#define G_CPL_RX_PHYS_ADDR_PCINOSNOOP(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_PCINOSNOOP) & M_CPL_RX_PHYS_ADDR_PCINOSNOOP)
+#define F_CPL_RX_PHYS_ADDR_PCINOSNOOP V_CPL_RX_PHYS_ADDR_PCINOSNOOP(1U)
+
+#define S_CPL_RX_PHYS_ADDR_PCITPHINTEN 29
+#define M_CPL_RX_PHYS_ADDR_PCITPHINTEN 0x1
+#define V_CPL_RX_PHYS_ADDR_PCITPHINTEN(x) \
+ ((x) << S_CPL_RX_PHYS_ADDR_PCITPHINTEN)
+#define G_CPL_RX_PHYS_ADDR_PCITPHINTEN(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_PCITPHINTEN) & M_CPL_RX_PHYS_ADDR_PCITPHINTEN)
+#define F_CPL_RX_PHYS_ADDR_PCITPHINTEN V_CPL_RX_PHYS_ADDR_PCITPHINTEN(1U)
+
+#define S_CPL_RX_PHYS_ADDR_PCITPHINT 27
+#define M_CPL_RX_PHYS_ADDR_PCITPHINT 0x3
+#define V_CPL_RX_PHYS_ADDR_PCITPHINT(x) ((x) << S_CPL_RX_PHYS_ADDR_PCITPHINT)
+#define G_CPL_RX_PHYS_ADDR_PCITPHINT(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_PCITPHINT) & M_CPL_RX_PHYS_ADDR_PCITPHINT)
+
+#define S_CPL_RX_PHYS_ADDR_DCAID 16
+#define M_CPL_RX_PHYS_ADDR_DCAID 0x7ff
+#define V_CPL_RX_PHYS_ADDR_DCAID(x) ((x) << S_CPL_RX_PHYS_ADDR_DCAID)
+#define G_CPL_RX_PHYS_ADDR_DCAID(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_DCAID) & M_CPL_RX_PHYS_ADDR_DCAID)
+
+#define S_CPL_RX_PHYS_ADDR_LEN 0
+#define M_CPL_RX_PHYS_ADDR_LEN 0xffff
+#define V_CPL_RX_PHYS_ADDR_LEN(x) ((x) << S_CPL_RX_PHYS_ADDR_LEN)
+#define G_CPL_RX_PHYS_ADDR_LEN(x) \
+ (((x) >> S_CPL_RX_PHYS_ADDR_LEN) & M_CPL_RX_PHYS_ADDR_LEN)
+
struct cpl_rx_ddp_complete {
RSS_HDR
union opcode_tid ot;
@@ -4059,13 +4185,6 @@ struct cpl_rdma_cqe_ext {
#define G_CPL_RDMA_CQE_EXT_QPID(x) \
(((x) >> S_CPL_RDMA_CQE_EXT_QPID) & M_CPL_RDMA_CQE_EXT_QPID)
-#define S_CPL_RDMA_CQE_EXT_EXTMODE 11
-#define M_CPL_RDMA_CQE_EXT_EXTMODE 0x1
-#define V_CPL_RDMA_CQE_EXT_EXTMODE(x) ((x) << S_CPL_RDMA_CQE_EXT_EXTMODE)
-#define G_CPL_RDMA_CQE_EXT_EXTMODE(x) \
- (((x) >> S_CPL_RDMA_CQE_EXT_EXTMODE) & M_CPL_RDMA_CQE_EXT_EXTMODE)
-#define F_CPL_RDMA_CQE_EXT_EXTMODE V_CPL_RDMA_CQE_EXT_EXTMODE(1U)
-
#define S_CPL_RDMA_CQE_EXT_GENERATION_BIT 10
#define M_CPL_RDMA_CQE_EXT_GENERATION_BIT 0x1
#define V_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
@@ -4109,6 +4228,13 @@ struct cpl_rdma_cqe_ext {
#define G_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
(((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT) & M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+#define S_CPL_RDMA_CQE_EXT_EXTMODE 23
+#define M_CPL_RDMA_CQE_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_EXT_EXTMODE(x) ((x) << S_CPL_RDMA_CQE_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_EXTMODE) & M_CPL_RDMA_CQE_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_EXT_EXTMODE V_CPL_RDMA_CQE_EXT_EXTMODE(1U)
+
#define S_CPL_RDMA_CQE_EXT_SRQ 0
#define M_CPL_RDMA_CQE_EXT_SRQ 0xfff
#define V_CPL_RDMA_CQE_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_EXT_SRQ)
@@ -4161,14 +4287,6 @@ struct cpl_rdma_cqe_fw_ext {
#define G_CPL_RDMA_CQE_FW_EXT_QPID(x) \
(((x) >> S_CPL_RDMA_CQE_FW_EXT_QPID) & M_CPL_RDMA_CQE_FW_EXT_QPID)
-#define S_CPL_RDMA_CQE_FW_EXT_EXTMODE 11
-#define M_CPL_RDMA_CQE_FW_EXT_EXTMODE 0x1
-#define V_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
- ((x) << S_CPL_RDMA_CQE_FW_EXT_EXTMODE)
-#define G_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
- (((x) >> S_CPL_RDMA_CQE_FW_EXT_EXTMODE) & M_CPL_RDMA_CQE_FW_EXT_EXTMODE)
-#define F_CPL_RDMA_CQE_FW_EXT_EXTMODE V_CPL_RDMA_CQE_FW_EXT_EXTMODE(1U)
-
#define S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 10
#define M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 0x1
#define V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
@@ -4215,6 +4333,14 @@ struct cpl_rdma_cqe_fw_ext {
(((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT) & \
M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+#define S_CPL_RDMA_CQE_FW_EXT_EXTMODE 23
+#define M_CPL_RDMA_CQE_FW_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_EXTMODE) & M_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_FW_EXT_EXTMODE V_CPL_RDMA_CQE_FW_EXT_EXTMODE(1U)
+
#define S_CPL_RDMA_CQE_FW_EXT_SRQ 0
#define M_CPL_RDMA_CQE_FW_EXT_SRQ 0xfff
#define V_CPL_RDMA_CQE_FW_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SRQ)
@@ -4267,14 +4393,6 @@ struct cpl_rdma_cqe_err_ext {
#define G_CPL_RDMA_CQE_ERR_EXT_QPID(x) \
(((x) >> S_CPL_RDMA_CQE_ERR_EXT_QPID) & M_CPL_RDMA_CQE_ERR_EXT_QPID)
-#define S_CPL_RDMA_CQE_ERR_EXT_EXTMODE 11
-#define M_CPL_RDMA_CQE_ERR_EXT_EXTMODE 0x1
-#define V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
- ((x) << S_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
-#define G_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
- (((x) >> S_CPL_RDMA_CQE_ERR_EXT_EXTMODE) & M_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
-#define F_CPL_RDMA_CQE_ERR_EXT_EXTMODE V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(1U)
-
#define S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 10
#define M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 0x1
#define V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
@@ -4323,6 +4441,14 @@ struct cpl_rdma_cqe_err_ext {
(((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT) & \
M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+#define S_CPL_RDMA_CQE_ERR_EXT_EXTMODE 23
+#define M_CPL_RDMA_CQE_ERR_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_EXTMODE) & M_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_ERR_EXT_EXTMODE V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(1U)
+
#define S_CPL_RDMA_CQE_ERR_EXT_SRQ 0
#define M_CPL_RDMA_CQE_ERR_EXT_SRQ 0xfff
#define V_CPL_RDMA_CQE_ERR_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SRQ)
@@ -5040,6 +5166,58 @@ struct cpl_tx_tnl_lso {
#define G_CPL_TX_TNL_LSO_SIZE(x) \
(((x) >> S_CPL_TX_TNL_LSO_SIZE) & M_CPL_TX_TNL_LSO_SIZE)
+#define S_CPL_TX_TNL_LSO_BTH_OPCODE 24
+#define M_CPL_TX_TNL_LSO_BTH_OPCODE 0xff
+#define V_CPL_TX_TNL_LSO_BTH_OPCODE(x) ((x) << S_CPL_TX_TNL_LSO_BTH_OPCODE)
+#define G_CPL_TX_TNL_LSO_BTH_OPCODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_BTH_OPCODE) & \
+ M_CPL_TX_TNL_LSO_BTH_OPCODE)
+
+#define S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0
+#define M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0xffffff
+#define V_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+#define G_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN) & \
+ M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+
+#define S_CPL_TX_TNL_LSO_MSS_TVER 8
+#define M_CPL_TX_TNL_LSO_MSS_TVER 0xf
+#define V_CPL_TX_TNL_LSO_MSS_TVER(x) ((x) << S_CPL_TX_TNL_LSO_MSS_TVER)
+#define G_CPL_TX_TNL_LSO_MSS_TVER(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_TVER) & M_CPL_TX_TNL_LSO_MSS_TVER)
+
+#define S_CPL_TX_TNL_LSO_MSS_M 7
+#define M_CPL_TX_TNL_LSO_MSS_M 0x1
+#define V_CPL_TX_TNL_LSO_MSS_M(x) ((x) << S_CPL_TX_TNL_LSO_MSS_M)
+#define G_CPL_TX_TNL_LSO_MSS_M(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_M) & M_CPL_TX_TNL_LSO_MSS_M)
+
+#define S_CPL_TX_TNL_LSO_MSS_PMTU 4
+#define M_CPL_TX_TNL_LSO_MSS_PMTU 0x7
+#define V_CPL_TX_TNL_LSO_MSS_PMTU(x) ((x) << S_CPL_TX_TNL_LSO_MSS_PMTU)
+#define G_CPL_TX_TNL_LSO_MSS_PMTU(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_PMTU) & M_CPL_TX_TNL_LSO_MSS_PMTU)
+
+#define S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 3
+#define M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 0x1
+#define V_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ ((x) << S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+#define G_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR) & M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+
+#define S_CPL_TX_TNL_LSO_MSS_ACKREQ 1
+#define M_CPL_TX_TNL_LSO_MSS_ACKREQ 0x3
+#define V_CPL_TX_TNL_LSO_MSS_ACKREQ(x) ((x) << S_CPL_TX_TNL_LSO_MSS_ACKREQ)
+#define G_CPL_TX_TNL_LSO_MSS_ACKREQ(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_ACKREQ) & M_CPL_TX_TNL_LSO_MSS_ACKREQ)
+
+#define S_CPL_TX_TNL_LSO_MSS_SE 0
+#define M_CPL_TX_TNL_LSO_MSS_SE 0x1
+#define V_CPL_TX_TNL_LSO_MSS_SE(x) ((x) << S_CPL_TX_TNL_LSO_MSS_SE)
+#define G_CPL_TX_TNL_LSO_MSS_SE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_SE) & M_CPL_TX_TNL_LSO_MSS_SE)
+
struct cpl_rx_mps_pkt {
__be32 op_to_r1_hi;
__be32 r1_lo_length;
@@ -5839,10 +6017,10 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_OPCODE(x) \
(((x) >> S_CPL_TX_TLS_ACK_OPCODE) & M_CPL_TX_TLS_ACK_OPCODE)
-#define S_T7_CPL_TX_TLS_ACK_RXCHID 22
-#define M_T7_CPL_TX_TLS_ACK_RXCHID 0x3
-#define V_T7_CPL_TX_TLS_ACK_RXCHID(x) ((x) << S_T7_CPL_TX_TLS_ACK_RXCHID)
-#define G_T7_CPL_TX_TLS_ACK_RXCHID(x) \
+#define S_T7_CPL_TX_TLS_ACK_RXCHID 22
+#define M_T7_CPL_TX_TLS_ACK_RXCHID 0x3
+#define V_T7_CPL_TX_TLS_ACK_RXCHID(x) ((x) << S_T7_CPL_TX_TLS_ACK_RXCHID)
+#define G_T7_CPL_TX_TLS_ACK_RXCHID(x) \
(((x) >> S_T7_CPL_TX_TLS_ACK_RXCHID) & M_T7_CPL_TX_TLS_ACK_RXCHID)
#define S_CPL_TX_TLS_ACK_RXCHID 22
@@ -5905,11 +6083,245 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_PLDLEN(x) \
(((x) >> S_CPL_TX_TLS_ACK_PLDLEN) & M_CPL_TX_TLS_ACK_PLDLEN)
+struct cpl_tx_quic_enc {
+ __be32 op_to_hdrlen;
+ __be32 hdrlen_to_pktlen;
+ __be32 r4[2];
+};
+
+#define S_CPL_TX_QUIC_ENC_OPCODE 24
+#define M_CPL_TX_QUIC_ENC_OPCODE 0xff
+#define V_CPL_TX_QUIC_ENC_OPCODE(x) ((x) << S_CPL_TX_QUIC_ENC_OPCODE)
+#define G_CPL_TX_QUIC_ENC_OPCODE(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_OPCODE) & M_CPL_TX_QUIC_ENC_OPCODE)
+
+#define S_CPL_TX_QUIC_ENC_KEYSIZE 22
+#define M_CPL_TX_QUIC_ENC_KEYSIZE 0x3
+#define V_CPL_TX_QUIC_ENC_KEYSIZE(x) ((x) << S_CPL_TX_QUIC_ENC_KEYSIZE)
+#define G_CPL_TX_QUIC_ENC_KEYSIZE(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_KEYSIZE) & M_CPL_TX_QUIC_ENC_KEYSIZE)
+
+#define S_CPL_TX_QUIC_ENC_PKTNUMSIZE 20
+#define M_CPL_TX_QUIC_ENC_PKTNUMSIZE 0x3
+#define V_CPL_TX_QUIC_ENC_PKTNUMSIZE(x) ((x) << S_CPL_TX_QUIC_ENC_PKTNUMSIZE)
+#define G_CPL_TX_QUIC_ENC_PKTNUMSIZE(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_PKTNUMSIZE) & M_CPL_TX_QUIC_ENC_PKTNUMSIZE)
+
+#define S_CPL_TX_QUIC_ENC_HDRTYPE 19
+#define M_CPL_TX_QUIC_ENC_HDRTYPE 0x1
+#define V_CPL_TX_QUIC_ENC_HDRTYPE(x) ((x) << S_CPL_TX_QUIC_ENC_HDRTYPE)
+#define G_CPL_TX_QUIC_ENC_HDRTYPE(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_HDRTYPE) & M_CPL_TX_QUIC_ENC_HDRTYPE)
+#define F_CPL_TX_QUIC_ENC_HDRTYPE V_CPL_TX_QUIC_ENC_HDRTYPE(1U)
+
+#define S_CPL_TX_QUIC_ENC_HDRSTARTOFFSET 4
+#define M_CPL_TX_QUIC_ENC_HDRSTARTOFFSET 0xfff
+#define V_CPL_TX_QUIC_ENC_HDRSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_QUIC_ENC_HDRSTARTOFFSET)
+#define G_CPL_TX_QUIC_ENC_HDRSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_HDRSTARTOFFSET) & \
+ M_CPL_TX_QUIC_ENC_HDRSTARTOFFSET)
+
+#define S_CPL_TX_QUIC_ENC_HDRLENGTH_HI 0
+#define M_CPL_TX_QUIC_ENC_HDRLENGTH_HI 0x3
+#define V_CPL_TX_QUIC_ENC_HDRLENGTH_HI(x) \
+ ((x) << S_CPL_TX_QUIC_ENC_HDRLENGTH_HI)
+#define G_CPL_TX_QUIC_ENC_HDRLENGTH_HI(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_HDRLENGTH_HI) & M_CPL_TX_QUIC_ENC_HDRLENGTH_HI)
+
+#define S_CPL_TX_QUIC_ENC_HDRLENGTH_LO 24
+#define M_CPL_TX_QUIC_ENC_HDRLENGTH_LO 0xff
+#define V_CPL_TX_QUIC_ENC_HDRLENGTH_LO(x) \
+ ((x) << S_CPL_TX_QUIC_ENC_HDRLENGTH_LO)
+#define G_CPL_TX_QUIC_ENC_HDRLENGTH_LO(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_HDRLENGTH_LO) & M_CPL_TX_QUIC_ENC_HDRLENGTH_LO)
+
+#define S_CPL_TX_QUIC_ENC_NUMPKT 16
+#define M_CPL_TX_QUIC_ENC_NUMPKT 0xff
+#define V_CPL_TX_QUIC_ENC_NUMPKT(x) ((x) << S_CPL_TX_QUIC_ENC_NUMPKT)
+#define G_CPL_TX_QUIC_ENC_NUMPKT(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_NUMPKT) & M_CPL_TX_QUIC_ENC_NUMPKT)
+
+#define S_CPL_TX_QUIC_ENC_PKTLEN 0
+#define M_CPL_TX_QUIC_ENC_PKTLEN 0xffff
+#define V_CPL_TX_QUIC_ENC_PKTLEN(x) ((x) << S_CPL_TX_QUIC_ENC_PKTLEN)
+#define G_CPL_TX_QUIC_ENC_PKTLEN(x) \
+ (((x) >> S_CPL_TX_QUIC_ENC_PKTLEN) & M_CPL_TX_QUIC_ENC_PKTLEN)
+
+struct cpl_tls_tx_scmd_fmt {
+ __be32 op_to_num_ivs;
+ __be32 enb_dbgId_to_hdrlen;
+ __be32 seq_num[2];
+};
+
+#define S_CPL_TLS_TX_SCMD_FMT_OPCODE 31
+#define M_CPL_TLS_TX_SCMD_FMT_OPCODE 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_OPCODE(x) ((x) << S_CPL_TLS_TX_SCMD_FMT_OPCODE)
+#define G_CPL_TLS_TX_SCMD_FMT_OPCODE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_OPCODE) & M_CPL_TLS_TX_SCMD_FMT_OPCODE)
+#define F_CPL_TLS_TX_SCMD_FMT_OPCODE V_CPL_TLS_TX_SCMD_FMT_OPCODE(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL 29
+#define M_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL 0x3
+#define V_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL) & \
+ M_CPL_TLS_TX_SCMD_FMT_SEQNUMBERCTRL)
+
+#define S_CPL_TLS_TX_SCMD_FMT_PROTOVERSION 24
+#define M_CPL_TLS_TX_SCMD_FMT_PROTOVERSION 0xf
+#define V_CPL_TLS_TX_SCMD_FMT_PROTOVERSION(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_PROTOVERSION)
+#define G_CPL_TLS_TX_SCMD_FMT_PROTOVERSION(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_PROTOVERSION) & \
+ M_CPL_TLS_TX_SCMD_FMT_PROTOVERSION)
+
+#define S_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL 23
+#define M_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL) & \
+ M_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL)
+#define F_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL V_CPL_TLS_TX_SCMD_FMT_ENCDECCTRL(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL 22
+#define M_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL) & \
+ M_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL)
+#define F_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL \
+ V_CPL_TLS_TX_SCMD_FMT_CIPHAUTHSEQCTRL(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_CIPHMODE 18
+#define M_CPL_TLS_TX_SCMD_FMT_CIPHMODE 0xf
+#define V_CPL_TLS_TX_SCMD_FMT_CIPHMODE(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_CIPHMODE)
+#define G_CPL_TLS_TX_SCMD_FMT_CIPHMODE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_CIPHMODE) & M_CPL_TLS_TX_SCMD_FMT_CIPHMODE)
+
+#define S_CPL_TLS_TX_SCMD_FMT_AUTHMODE 14
+#define M_CPL_TLS_TX_SCMD_FMT_AUTHMODE 0xf
+#define V_CPL_TLS_TX_SCMD_FMT_AUTHMODE(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_AUTHMODE)
+#define G_CPL_TLS_TX_SCMD_FMT_AUTHMODE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_AUTHMODE) & M_CPL_TLS_TX_SCMD_FMT_AUTHMODE)
+
+#define S_CPL_TLS_TX_SCMD_FMT_HMACCTRL 11
+#define M_CPL_TLS_TX_SCMD_FMT_HMACCTRL 0x7
+#define V_CPL_TLS_TX_SCMD_FMT_HMACCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_HMACCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_HMACCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_HMACCTRL) & M_CPL_TLS_TX_SCMD_FMT_HMACCTRL)
+
+#define S_CPL_TLS_TX_SCMD_FMT_IVSIZE 7
+#define M_CPL_TLS_TX_SCMD_FMT_IVSIZE 0xf
+#define V_CPL_TLS_TX_SCMD_FMT_IVSIZE(x) ((x) << S_CPL_TLS_TX_SCMD_FMT_IVSIZE)
+#define G_CPL_TLS_TX_SCMD_FMT_IVSIZE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_IVSIZE) & M_CPL_TLS_TX_SCMD_FMT_IVSIZE)
+
+#define S_CPL_TLS_TX_SCMD_FMT_NUMIVS 0
+#define M_CPL_TLS_TX_SCMD_FMT_NUMIVS 0x7f
+#define V_CPL_TLS_TX_SCMD_FMT_NUMIVS(x) ((x) << S_CPL_TLS_TX_SCMD_FMT_NUMIVS)
+#define G_CPL_TLS_TX_SCMD_FMT_NUMIVS(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_NUMIVS) & M_CPL_TLS_TX_SCMD_FMT_NUMIVS)
+
+#define S_CPL_TLS_TX_SCMD_FMT_ENBDBGID 31
+#define M_CPL_TLS_TX_SCMD_FMT_ENBDBGID 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_ENBDBGID(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_ENBDBGID)
+#define G_CPL_TLS_TX_SCMD_FMT_ENBDBGID(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_ENBDBGID) & M_CPL_TLS_TX_SCMD_FMT_ENBDBGID)
+#define F_CPL_TLS_TX_SCMD_FMT_ENBDBGID V_CPL_TLS_TX_SCMD_FMT_ENBDBGID(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_IVGENCTRL 30
+#define M_CPL_TLS_TX_SCMD_FMT_IVGENCTRL 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_IVGENCTRL(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_IVGENCTRL)
+#define G_CPL_TLS_TX_SCMD_FMT_IVGENCTRL(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_IVGENCTRL) & \
+ M_CPL_TLS_TX_SCMD_FMT_IVGENCTRL)
+
+#define S_CPL_TLS_TX_SCMD_FMT_MOREFRAGS 20
+#define M_CPL_TLS_TX_SCMD_FMT_MOREFRAGS 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_MOREFRAGS(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_MOREFRAGS)
+#define G_CPL_TLS_TX_SCMD_FMT_MOREFRAGS(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_MOREFRAGS) & \
+ M_CPL_TLS_TX_SCMD_FMT_MOREFRAGS)
+#define F_CPL_TLS_TX_SCMD_FMT_MOREFRAGS V_CPL_TLS_TX_SCMD_FMT_MOREFRAGS(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_LASTFRAGS 19
+#define M_CPL_TLS_TX_SCMD_FMT_LASTFRAGS 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_LASTFRAGS(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_LASTFRAGS)
+#define G_CPL_TLS_TX_SCMD_FMT_LASTFRAGS(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_LASTFRAGS) & \
+ M_CPL_TLS_TX_SCMD_FMT_LASTFRAGS)
+#define F_CPL_TLS_TX_SCMD_FMT_LASTFRAGS V_CPL_TLS_TX_SCMD_FMT_LASTFRAGS(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU 18
+#define M_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU)
+#define G_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU) & \
+ M_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU)
+#define F_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU V_CPL_TLS_TX_SCMD_FMT_TLSCOMPPDU(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY 17
+#define M_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY)
+#define G_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY) & \
+ M_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY)
+#define F_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY \
+ V_CPL_TLS_TX_SCMD_FMT_PAYLOADONLY(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE 16
+#define M_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE)
+#define G_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE) & \
+ M_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE)
+#define F_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE \
+ V_CPL_TLS_TX_SCMD_FMT_TLSFRAGENABLE(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_MACONLY 15
+#define M_CPL_TLS_TX_SCMD_FMT_MACONLY 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_MACONLY(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_MACONLY)
+#define G_CPL_TLS_TX_SCMD_FMT_MACONLY(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_MACONLY) & M_CPL_TLS_TX_SCMD_FMT_MACONLY)
+#define F_CPL_TLS_TX_SCMD_FMT_MACONLY V_CPL_TLS_TX_SCMD_FMT_MACONLY(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_AADIVDROP 14
+#define M_CPL_TLS_TX_SCMD_FMT_AADIVDROP 0x1
+#define V_CPL_TLS_TX_SCMD_FMT_AADIVDROP(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_AADIVDROP)
+#define G_CPL_TLS_TX_SCMD_FMT_AADIVDROP(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_AADIVDROP) & \
+ M_CPL_TLS_TX_SCMD_FMT_AADIVDROP)
+#define F_CPL_TLS_TX_SCMD_FMT_AADIVDROP V_CPL_TLS_TX_SCMD_FMT_AADIVDROP(1U)
+
+#define S_CPL_TLS_TX_SCMD_FMT_HDRLENGTH 0
+#define M_CPL_TLS_TX_SCMD_FMT_HDRLENGTH 0x3fff
+#define V_CPL_TLS_TX_SCMD_FMT_HDRLENGTH(x) \
+ ((x) << S_CPL_TLS_TX_SCMD_FMT_HDRLENGTH)
+#define G_CPL_TLS_TX_SCMD_FMT_HDRLENGTH(x) \
+ (((x) >> S_CPL_TLS_TX_SCMD_FMT_HDRLENGTH) & \
+ M_CPL_TLS_TX_SCMD_FMT_HDRLENGTH)
+
struct cpl_rcb_upd {
__be32 op_to_tid;
__be32 opcode_psn;
__u8 nodata_to_cnprepclr;
- __u8 r0;
+ __u8 rsp_nak_seqclr_pkd;
__be16 wrptr;
__be32 length;
};
@@ -6202,13 +6614,6 @@ struct cpl_roce_cqe {
#define G_CPL_ROCE_CQE_QPID(x) \
(((x) >> S_CPL_ROCE_CQE_QPID) & M_CPL_ROCE_CQE_QPID)
-#define S_CPL_ROCE_CQE_EXTMODE 11
-#define M_CPL_ROCE_CQE_EXTMODE 0x1
-#define V_CPL_ROCE_CQE_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_EXTMODE)
-#define G_CPL_ROCE_CQE_EXTMODE(x) \
- (((x) >> S_CPL_ROCE_CQE_EXTMODE) & M_CPL_ROCE_CQE_EXTMODE)
-#define F_CPL_ROCE_CQE_EXTMODE V_CPL_ROCE_CQE_EXTMODE(1U)
-
#define S_CPL_ROCE_CQE_GENERATION_BIT 10
#define M_CPL_ROCE_CQE_GENERATION_BIT 0x1
#define V_CPL_ROCE_CQE_GENERATION_BIT(x) \
@@ -6249,6 +6654,13 @@ struct cpl_roce_cqe {
#define G_CPL_ROCE_CQE_WR_TYPE_EXT(x) \
(((x) >> S_CPL_ROCE_CQE_WR_TYPE_EXT) & M_CPL_ROCE_CQE_WR_TYPE_EXT)
+#define S_CPL_ROCE_CQE_EXTMODE 23
+#define M_CPL_ROCE_CQE_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_EXTMODE)
+#define G_CPL_ROCE_CQE_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_EXTMODE) & M_CPL_ROCE_CQE_EXTMODE)
+#define F_CPL_ROCE_CQE_EXTMODE V_CPL_ROCE_CQE_EXTMODE(1U)
+
#define S_CPL_ROCE_CQE_SRQ 0
#define M_CPL_ROCE_CQE_SRQ 0xfff
#define V_CPL_ROCE_CQE_SRQ(x) ((x) << S_CPL_ROCE_CQE_SRQ)
@@ -6304,13 +6716,6 @@ struct cpl_roce_cqe_fw {
#define G_CPL_ROCE_CQE_FW_QPID(x) \
(((x) >> S_CPL_ROCE_CQE_FW_QPID) & M_CPL_ROCE_CQE_FW_QPID)
-#define S_CPL_ROCE_CQE_FW_EXTMODE 11
-#define M_CPL_ROCE_CQE_FW_EXTMODE 0x1
-#define V_CPL_ROCE_CQE_FW_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_FW_EXTMODE)
-#define G_CPL_ROCE_CQE_FW_EXTMODE(x) \
- (((x) >> S_CPL_ROCE_CQE_FW_EXTMODE) & M_CPL_ROCE_CQE_FW_EXTMODE)
-#define F_CPL_ROCE_CQE_FW_EXTMODE V_CPL_ROCE_CQE_FW_EXTMODE(1U)
-
#define S_CPL_ROCE_CQE_FW_GENERATION_BIT 10
#define M_CPL_ROCE_CQE_FW_GENERATION_BIT 0x1
#define V_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
@@ -6353,6 +6758,14 @@ struct cpl_roce_cqe_fw {
#define G_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
(((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE_EXT) & M_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+#define S_CPL_ROCE_CQE_FW_EXTMODE 23
+#define M_CPL_ROCE_CQE_FW_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_FW_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_FW_EXTMODE)
+#define G_CPL_ROCE_CQE_FW_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_EXTMODE) & M_CPL_ROCE_CQE_FW_EXTMODE)
+#define F_CPL_ROCE_CQE_FW_EXTMODE V_CPL_ROCE_CQE_FW_EXTMODE(1U)
+
+
#define S_CPL_ROCE_CQE_FW_SRQ 0
#define M_CPL_ROCE_CQE_FW_SRQ 0xfff
#define V_CPL_ROCE_CQE_FW_SRQ(x) ((x) << S_CPL_ROCE_CQE_FW_SRQ)
@@ -6360,16 +6773,16 @@ struct cpl_roce_cqe_fw {
(((x) >> S_CPL_ROCE_CQE_FW_SRQ) & M_CPL_ROCE_CQE_FW_SRQ)
struct cpl_roce_cqe_err {
- __be32 op_to_CQID;
- __be32 Tid_FlitCnt;
- __be32 QPID_to_WR_type;
- __be32 Length;
- __be32 TAG;
- __be32 MSN;
- __be32 SE_to_SRQ;
- __be32 RQE;
- __be32 ExtInfoMS[2];
- __be32 ExtInfoLS[2];
+ __be32 op_to_cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
};
#define S_CPL_ROCE_CQE_ERR_OPCODE 24
@@ -6408,13 +6821,6 @@ struct cpl_roce_cqe_err {
#define G_CPL_ROCE_CQE_ERR_QPID(x) \
(((x) >> S_CPL_ROCE_CQE_ERR_QPID) & M_CPL_ROCE_CQE_ERR_QPID)
-#define S_CPL_ROCE_CQE_ERR_EXTMODE 11
-#define M_CPL_ROCE_CQE_ERR_EXTMODE 0x1
-#define V_CPL_ROCE_CQE_ERR_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_ERR_EXTMODE)
-#define G_CPL_ROCE_CQE_ERR_EXTMODE(x) \
- (((x) >> S_CPL_ROCE_CQE_ERR_EXTMODE) & M_CPL_ROCE_CQE_ERR_EXTMODE)
-#define F_CPL_ROCE_CQE_ERR_EXTMODE V_CPL_ROCE_CQE_ERR_EXTMODE(1U)
-
#define S_CPL_ROCE_CQE_ERR_GENERATION_BIT 10
#define M_CPL_ROCE_CQE_ERR_GENERATION_BIT 0x1
#define V_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
@@ -6458,6 +6864,14 @@ struct cpl_roce_cqe_err {
#define G_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
(((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT) & M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+#define S_CPL_ROCE_CQE_ERR_EXTMODE 23
+#define M_CPL_ROCE_CQE_ERR_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_ERR_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_ERR_EXTMODE)
+#define G_CPL_ROCE_CQE_ERR_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_EXTMODE) & M_CPL_ROCE_CQE_ERR_EXTMODE)
+#define F_CPL_ROCE_CQE_ERR_EXTMODE V_CPL_ROCE_CQE_ERR_EXTMODE(1U)
+
+
#define S_CPL_ROCE_CQE_ERR_SRQ 0
#define M_CPL_ROCE_CQE_ERR_SRQ 0xfff
#define V_CPL_ROCE_CQE_ERR_SRQ(x) ((x) << S_CPL_ROCE_CQE_ERR_SRQ)
diff --git a/sys/dev/cxgbe/common/t4_regs.h b/sys/dev/cxgbe/common/t4_regs.h
index 8f500ec0fbdd..51f150443261 100644
--- a/sys/dev/cxgbe/common/t4_regs.h
+++ b/sys/dev/cxgbe/common/t4_regs.h
@@ -27,11 +27,11 @@
*/
/* This file is automatically generated --- changes will be lost */
-/* Generation Date : Thu Sep 11 05:25:56 PM IST 2025 */
+/* Generation Date : Tue Oct 28 05:23:45 PM IST 2025 */
/* Directory name: t4_reg.txt, Date: Not specified */
/* Directory name: t5_reg.txt, Changeset: 6945:54ba4ba7ee8b */
/* Directory name: t6_reg.txt, Changeset: 4277:9c165d0f4899 */
-/* Directory name: t7_reg.txt, Changeset: 5945:1487219ecb20 */
+/* Directory name: t7_sw_reg.txt, Changeset: 5946:0b60ff298e7d */
#define MYPF_BASE 0x1b000
#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
@@ -44006,10 +44006,57 @@
#define V_MPS2CRYPTO_RX_INTF_FIFO(x) ((x) << S_MPS2CRYPTO_RX_INTF_FIFO)
#define G_MPS2CRYPTO_RX_INTF_FIFO(x) (((x) >> S_MPS2CRYPTO_RX_INTF_FIFO) & M_MPS2CRYPTO_RX_INTF_FIFO)
-#define S_RX_PRE_PROC_PERR 9
-#define M_RX_PRE_PROC_PERR 0x7ffU
-#define V_RX_PRE_PROC_PERR(x) ((x) << S_RX_PRE_PROC_PERR)
-#define G_RX_PRE_PROC_PERR(x) (((x) >> S_RX_PRE_PROC_PERR) & M_RX_PRE_PROC_PERR)
+#define S_MAC_RX_PPROC_MPS2TP_TF 19
+#define V_MAC_RX_PPROC_MPS2TP_TF(x) ((x) << S_MAC_RX_PPROC_MPS2TP_TF)
+#define F_MAC_RX_PPROC_MPS2TP_TF V_MAC_RX_PPROC_MPS2TP_TF(1U)
+
+#define S_MAC_RX_PPROC_LB_CH3 18
+#define V_MAC_RX_PPROC_LB_CH3(x) ((x) << S_MAC_RX_PPROC_LB_CH3)
+#define F_MAC_RX_PPROC_LB_CH3 V_MAC_RX_PPROC_LB_CH3(1U)
+
+#define S_MAC_RX_PPROC_LB_CH2 17
+#define V_MAC_RX_PPROC_LB_CH2(x) ((x) << S_MAC_RX_PPROC_LB_CH2)
+#define F_MAC_RX_PPROC_LB_CH2 V_MAC_RX_PPROC_LB_CH2(1U)
+
+#define S_MAC_RX_PPROC_LB_CH1 16
+#define V_MAC_RX_PPROC_LB_CH1(x) ((x) << S_MAC_RX_PPROC_LB_CH1)
+#define F_MAC_RX_PPROC_LB_CH1 V_MAC_RX_PPROC_LB_CH1(1U)
+
+#define S_MAC_RX_PPROC_LB_CH0 15
+#define V_MAC_RX_PPROC_LB_CH0(x) ((x) << S_MAC_RX_PPROC_LB_CH0)
+#define F_MAC_RX_PPROC_LB_CH0 V_MAC_RX_PPROC_LB_CH0(1U)
+
+#define S_MAC_RX_PPROC_DWRR_CH0_3 14
+#define V_MAC_RX_PPROC_DWRR_CH0_3(x) ((x) << S_MAC_RX_PPROC_DWRR_CH0_3)
+#define F_MAC_RX_PPROC_DWRR_CH0_3 V_MAC_RX_PPROC_DWRR_CH0_3(1U)
+
+#define S_MAC_RX_FIFO_PERR 13
+#define V_MAC_RX_FIFO_PERR(x) ((x) << S_MAC_RX_FIFO_PERR)
+#define F_MAC_RX_FIFO_PERR V_MAC_RX_FIFO_PERR(1U)
+
+#define S_MAC2MPS_PT3_PERR 12
+#define V_MAC2MPS_PT3_PERR(x) ((x) << S_MAC2MPS_PT3_PERR)
+#define F_MAC2MPS_PT3_PERR V_MAC2MPS_PT3_PERR(1U)
+
+#define S_MAC2MPS_PT2_PERR 11
+#define V_MAC2MPS_PT2_PERR(x) ((x) << S_MAC2MPS_PT2_PERR)
+#define F_MAC2MPS_PT2_PERR V_MAC2MPS_PT2_PERR(1U)
+
+#define S_MAC2MPS_PT1_PERR 10
+#define V_MAC2MPS_PT1_PERR(x) ((x) << S_MAC2MPS_PT1_PERR)
+#define F_MAC2MPS_PT1_PERR V_MAC2MPS_PT1_PERR(1U)
+
+#define S_MAC2MPS_PT0_PERR 9
+#define V_MAC2MPS_PT0_PERR(x) ((x) << S_MAC2MPS_PT0_PERR)
+#define F_MAC2MPS_PT0_PERR V_MAC2MPS_PT0_PERR(1U)
+
+#define S_LPBK_FIFO_PERR 8
+#define V_LPBK_FIFO_PERR(x) ((x) << S_LPBK_FIFO_PERR)
+#define F_LPBK_FIFO_PERR V_LPBK_FIFO_PERR(1U)
+
+#define S_TP2MPS_TF_FIFO_PERR 7
+#define V_TP2MPS_TF_FIFO_PERR(x) ((x) << S_TP2MPS_TF_FIFO_PERR)
+#define F_TP2MPS_TF_FIFO_PERR V_TP2MPS_TF_FIFO_PERR(1U)
#define A_MPS_RX_PAUSE_GEN_TH_1 0x11090
#define A_MPS_RX_PERR_INT_ENABLE2 0x11090
@@ -78258,6 +78305,26 @@
#define G_RX_CDR_LANE_SEL(x) (((x) >> S_RX_CDR_LANE_SEL) & M_RX_CDR_LANE_SEL)
#define A_MAC_DEBUG_PL_IF_1 0x381c4
+#define A_MAC_HSS0_ANALOG_TEST_CTRL 0x381d0
+
+#define S_WP_PMT_IN_I 0
+#define M_WP_PMT_IN_I 0xfU
+#define V_WP_PMT_IN_I(x) ((x) << S_WP_PMT_IN_I)
+#define G_WP_PMT_IN_I(x) (((x) >> S_WP_PMT_IN_I) & M_WP_PMT_IN_I)
+
+#define A_MAC_HSS1_ANALOG_TEST_CTRL 0x381d4
+#define A_MAC_HSS2_ANALOG_TEST_CTRL 0x381d8
+#define A_MAC_HSS3_ANALOG_TEST_CTRL 0x381dc
+#define A_MAC_HSS0_ANALOG_TEST_STATUS 0x381e0
+
+#define S_WP_PMT_OUT_O 0
+#define M_WP_PMT_OUT_O 0xfU
+#define V_WP_PMT_OUT_O(x) ((x) << S_WP_PMT_OUT_O)
+#define G_WP_PMT_OUT_O(x) (((x) >> S_WP_PMT_OUT_O) & M_WP_PMT_OUT_O)
+
+#define A_MAC_HSS1_ANALOG_TEST_STATUS 0x381e4
+#define A_MAC_HSS2_ANALOG_TEST_STATUS 0x381e8
+#define A_MAC_HSS3_ANALOG_TEST_STATUS 0x381ec
#define A_MAC_SIGNAL_DETECT_CTRL 0x381f0
#define S_SIGNAL_DET_LN7 15
@@ -80933,6 +81000,27 @@
#define F_Q1_LOS_0_ASSERT V_Q1_LOS_0_ASSERT(1U)
#define A_MAC_IOS_INTR_CAUSE_QUAD1 0x3a09c
+#define A_MAC_HSS0_PMD_RECEIVE_SIGNAL_DETECT 0x3a93c
+
+#define S_PMD_RECEIVE_SIGNAL_DETECT_1N3 4
+#define V_PMD_RECEIVE_SIGNAL_DETECT_1N3(x) ((x) << S_PMD_RECEIVE_SIGNAL_DETECT_1N3)
+#define F_PMD_RECEIVE_SIGNAL_DETECT_1N3 V_PMD_RECEIVE_SIGNAL_DETECT_1N3(1U)
+
+#define S_PMD_RECEIVE_SIGNAL_DETECT_1N2 3
+#define V_PMD_RECEIVE_SIGNAL_DETECT_1N2(x) ((x) << S_PMD_RECEIVE_SIGNAL_DETECT_1N2)
+#define F_PMD_RECEIVE_SIGNAL_DETECT_1N2 V_PMD_RECEIVE_SIGNAL_DETECT_1N2(1U)
+
+#define S_PMD_RECEIVE_SIGNAL_DETECT_LN1 2
+#define V_PMD_RECEIVE_SIGNAL_DETECT_LN1(x) ((x) << S_PMD_RECEIVE_SIGNAL_DETECT_LN1)
+#define F_PMD_RECEIVE_SIGNAL_DETECT_LN1 V_PMD_RECEIVE_SIGNAL_DETECT_LN1(1U)
+
+#define S_PMD_RECEIVE_SIGNAL_DETECT_1N0 1
+#define V_PMD_RECEIVE_SIGNAL_DETECT_1N0(x) ((x) << S_PMD_RECEIVE_SIGNAL_DETECT_1N0)
+#define F_PMD_RECEIVE_SIGNAL_DETECT_1N0 V_PMD_RECEIVE_SIGNAL_DETECT_1N0(1U)
+
+#define A_MAC_HSS1_PMD_RECEIVE_SIGNAL_DETECT 0x3b93c
+#define A_MAC_HSS2_PMD_RECEIVE_SIGNAL_DETECT 0x3c93c
+#define A_MAC_HSS3_PMD_RECEIVE_SIGNAL_DETECT 0x3d93c
#define A_MAC_MTIP_PCS_1G_0_CONTROL 0x3e000
#define S_SPEED_SEL_1 13