aboutsummaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/acpica/acpi.c423
-rw-r--r--sys/dev/acpica/acpi_lid.c4
-rw-r--r--sys/dev/acpica/acpi_powerres.c274
-rw-r--r--sys/dev/acpica/acpivar.h16
-rw-r--r--sys/dev/ath/if_ath_tx.c14
-rw-r--r--sys/dev/bnxt/bnxt_en/if_bnxt.c1
-rw-r--r--sys/dev/cpuctl/cpuctl.c2
-rw-r--r--sys/dev/cxgbe/adapter.h91
-rw-r--r--sys/dev/cxgbe/common/common.h177
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c1967
-rw-r--r--sys/dev/cxgbe/common/t4_hw.h135
-rw-r--r--sys/dev/cxgbe/common/t4_msg.h3011
-rw-r--r--sys/dev/cxgbe/common/t4_regs.h27273
-rw-r--r--sys/dev/cxgbe/common/t4_regs_values.h24
-rw-r--r--sys/dev/cxgbe/common/t4_tcb.h182
-rw-r--r--sys/dev/cxgbe/crypto/t4_crypto.c54
-rw-r--r--sys/dev/cxgbe/crypto/t4_crypto.h1
-rw-r--r--sys/dev/cxgbe/crypto/t4_keyctx.c30
-rw-r--r--sys/dev/cxgbe/crypto/t6_kern_tls.c2
-rw-r--r--sys/dev/cxgbe/crypto/t7_kern_tls.c2196
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_flash_utils.c90
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib.c11
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib_common.h7
-rw-r--r--sys/dev/cxgbe/cxgbei/icl_cxgbei.c98
-rw-r--r--sys/dev/cxgbe/firmware/t4fw_interface.h1320
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg.txt644
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt530
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt644
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/device.c20
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h5
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/mem.c103
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/resource.c38
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/t4.h1
-rw-r--r--sys/dev/cxgbe/offload.h12
-rw-r--r--sys/dev/cxgbe/t4_filter.c476
-rw-r--r--sys/dev/cxgbe/t4_ioctl.h17
-rw-r--r--sys/dev/cxgbe/t4_iov.c67
-rw-r--r--sys/dev/cxgbe/t4_l2t.c14
-rw-r--r--sys/dev/cxgbe/t4_l2t.h2
-rw-r--r--sys/dev/cxgbe/t4_main.c1575
-rw-r--r--sys/dev/cxgbe/t4_mp_ring.c81
-rw-r--r--sys/dev/cxgbe/t4_mp_ring.h1
-rw-r--r--sys/dev/cxgbe/t4_netmap.c23
-rw-r--r--sys/dev/cxgbe/t4_sched.c6
-rw-r--r--sys/dev/cxgbe/t4_sge.c209
-rw-r--r--sys/dev/cxgbe/t4_tpt.c193
-rw-r--r--sys/dev/cxgbe/t4_tracer.c5
-rw-r--r--sys/dev/cxgbe/t4_vf.c65
-rw-r--r--sys/dev/cxgbe/tom/t4_connect.c43
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c20
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c20
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c7
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.c288
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.h1
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c141
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h10
-rw-r--r--sys/dev/cxgbe/tom/t4_tom_l2t.c2
-rw-r--r--sys/dev/gpio/gpioc.c31
-rw-r--r--sys/dev/gpio/pl061.c3
-rw-r--r--sys/dev/hid/ietp.c55
-rw-r--r--sys/dev/hid/u2f.c23
-rw-r--r--sys/dev/iicbus/iichid.c20
-rw-r--r--sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c19
-rw-r--r--sys/dev/sound/pci/hda/hdaa.c53
-rw-r--r--sys/dev/sound/pcm/dsp.c119
-rw-r--r--sys/dev/thunderbolt/hcm.c223
-rw-r--r--sys/dev/thunderbolt/hcm_var.h47
-rw-r--r--sys/dev/thunderbolt/nhi.c1170
-rw-r--r--sys/dev/thunderbolt/nhi_pci.c529
-rw-r--r--sys/dev/thunderbolt/nhi_reg.h332
-rw-r--r--sys/dev/thunderbolt/nhi_var.h277
-rw-r--r--sys/dev/thunderbolt/nhi_wmi.c198
-rw-r--r--sys/dev/thunderbolt/router.c939
-rw-r--r--sys/dev/thunderbolt/router_var.h242
-rw-r--r--sys/dev/thunderbolt/tb_acpi_pcib.c181
-rw-r--r--sys/dev/thunderbolt/tb_debug.c334
-rw-r--r--sys/dev/thunderbolt/tb_debug.h93
-rw-r--r--sys/dev/thunderbolt/tb_dev.c331
-rw-r--r--sys/dev/thunderbolt/tb_dev.h41
-rw-r--r--sys/dev/thunderbolt/tb_if.m121
-rw-r--r--sys/dev/thunderbolt/tb_ioctl.h52
-rw-r--r--sys/dev/thunderbolt/tb_pcib.c614
-rw-r--r--sys/dev/thunderbolt/tb_pcib.h93
-rw-r--r--sys/dev/thunderbolt/tb_reg.h52
-rw-r--r--sys/dev/thunderbolt/tb_var.h54
-rw-r--r--sys/dev/thunderbolt/tbcfg_reg.h363
-rw-r--r--sys/dev/virtio/network/if_vtnet.c49
-rw-r--r--sys/dev/watchdog/watchdog.c2
88 files changed, 45735 insertions, 3591 deletions
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 574d3aacbcde..7f9ca6e39df8 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -102,6 +102,11 @@ struct acpi_interface {
int num;
};
+struct acpi_wake_prep_context {
+ struct acpi_softc *sc;
+ enum power_stype stype;
+};
+
static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
/* Global mutex for locking access to the ACPI subsystem. */
@@ -111,8 +116,9 @@ struct callout acpi_sleep_timer;
/* Bitmap of device quirks. */
int acpi_quirks;
-/* Supported sleep states. */
-static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT];
+/* Supported sleep states and types. */
+static bool acpi_supported_stypes[POWER_STYPE_COUNT];
+static bool acpi_supported_sstates[ACPI_S_STATE_COUNT];
static void acpi_lookup(void *arg, const char *name, device_t *dev);
static int acpi_modevent(struct module *mod, int event, void *junk);
@@ -169,21 +175,29 @@ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
void *context, void **status);
static void acpi_sleep_enable(void *arg);
static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
-static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
+static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc,
+ enum power_stype stype);
static void acpi_shutdown_final(void *arg, int howto);
static void acpi_enable_fixed_events(struct acpi_softc *sc);
static void acpi_resync_clock(struct acpi_softc *sc);
-static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
-static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
-static int acpi_wake_prep_walk(int sstate);
+static int acpi_wake_sleep_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype);
+static int acpi_wake_run_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype);
+static int acpi_wake_prep_walk(struct acpi_softc *sc, enum power_stype stype);
static int acpi_wake_sysctl_walk(device_t dev);
static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
-static void acpi_system_eventhandler_sleep(void *arg, int state);
-static void acpi_system_eventhandler_wakeup(void *arg, int state);
-static int acpi_sname2sstate(const char *sname);
-static const char *acpi_sstate2sname(int sstate);
static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
+static void acpi_system_eventhandler_sleep(void *arg,
+ enum power_stype stype);
+static void acpi_system_eventhandler_wakeup(void *arg,
+ enum power_stype stype);
+static enum power_stype acpi_sstate_to_stype(int sstate);
+static int acpi_sname_to_sstate(const char *sname);
+static const char *acpi_sstate_to_sname(int sstate);
+static int acpi_suspend_state_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_stype_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype);
static int acpi_pm_func(u_long cmd, void *arg, enum power_stype stype);
@@ -477,6 +491,7 @@ acpi_attach(device_t dev)
UINT32 flags;
UINT8 TypeA, TypeB;
char *env;
+ enum power_stype stype;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -589,31 +604,35 @@ acpi_attach(device_t dev)
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "power_button_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A",
+ &sc->acpi_power_button_stype, 0, acpi_stype_sysctl, "A",
"Power button ACPI sleep state.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_button_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A",
+ &sc->acpi_sleep_button_stype, 0, acpi_stype_sysctl, "A",
"Sleep button ACPI sleep state.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "lid_switch_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A",
- "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid.");
+ &sc->acpi_lid_switch_stype, 0, acpi_stype_sysctl, "A",
+ "Lid ACPI sleep state. Set to s2idle or s2mem if you want to suspend "
+ "your laptop when close the lid.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "standby_state",
- CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ NULL, 0, acpi_suspend_state_sysctl, "A",
+ "Current ACPI suspend state. This sysctl is deprecated; you probably "
+ "want to use kern.power.suspend instead.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "suspend_state",
+ OID_AUTO, "standby_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A",
+ "ACPI Sx state to use when going standby (S1 or S2).");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
"sleep delay in seconds");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
+ OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0,
+ "Use S4BIOS when hibernating.");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
@@ -659,31 +678,38 @@ acpi_attach(device_t dev)
sc->acpi_s4bios = 1;
#endif
- /* Probe all supported sleep states. */
- acpi_sleep_states[ACPI_STATE_S0] = TRUE;
- for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
+ /*
+ * Probe all supported ACPI sleep states. Awake (S0) is always supported.
+ */
+ acpi_supported_sstates[ACPI_STATE_S0] = TRUE;
+ acpi_supported_stypes[POWER_STYPE_AWAKE] = true;
+ for (state = ACPI_STATE_S1; state <= ACPI_STATE_S5; state++)
if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT,
__DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) &&
- ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
- acpi_sleep_states[state] = TRUE;
+ ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) {
+ acpi_supported_sstates[state] = TRUE;
+ acpi_supported_stypes[acpi_sstate_to_stype(state)] = true;
+ }
/*
- * Dispatch the default sleep state to devices. The lid switch is set
+ * Dispatch the default sleep type to devices. The lid switch is set
* to UNKNOWN by default to avoid surprising users.
*/
- sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
- ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
- sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
- sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
- ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
- sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
- ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
-
- /* Pick the first valid sleep state for the sleep button default. */
- sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
- for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
- if (acpi_sleep_states[state]) {
- sc->acpi_sleep_button_sx = state;
+ sc->acpi_power_button_stype = acpi_supported_stypes[POWER_STYPE_POWEROFF] ?
+ POWER_STYPE_POWEROFF : POWER_STYPE_UNKNOWN;
+ sc->acpi_lid_switch_stype = POWER_STYPE_UNKNOWN;
+
+ sc->acpi_standby_sx = ACPI_STATE_UNKNOWN;
+ if (acpi_supported_sstates[ACPI_STATE_S1])
+ sc->acpi_standby_sx = ACPI_STATE_S1;
+ else if (acpi_supported_sstates[ACPI_STATE_S2])
+ sc->acpi_standby_sx = ACPI_STATE_S2;
+
+ /* Pick the first valid sleep type for the sleep button default. */
+ sc->acpi_sleep_button_stype = POWER_STYPE_UNKNOWN;
+ for (stype = POWER_STYPE_STANDBY; stype <= POWER_STYPE_HIBERNATE; stype++)
+ if (acpi_supported_stypes[stype]) {
+ sc->acpi_sleep_button_stype = stype;
break;
}
@@ -708,7 +734,7 @@ acpi_attach(device_t dev)
/* Flag our initial states. */
sc->acpi_enabled = TRUE;
- sc->acpi_sstate = ACPI_STATE_S0;
+ sc->acpi_stype = POWER_STYPE_AWAKE;
sc->acpi_sleep_disabled = TRUE;
/* Create the control device */
@@ -720,7 +746,8 @@ acpi_attach(device_t dev)
goto out;
/* Register ACPI again to pass the correct argument of pm_func. */
- power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc,
+ acpi_supported_stypes);
acpi_platform_osc(dev);
@@ -768,6 +795,36 @@ acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype)
return (ACPI_STATE_UNKNOWN);
}
+/*
+ * XXX It would be nice if we didn't need this function, but we'd need
+ * acpi_EnterSleepState and acpi_ReqSleepState to take in actual ACPI S-states,
+ * which won't be possible at the moment because suspend-to-idle (which is not
+ * an ACPI S-state nor maps to one) will be implemented here.
+ *
+ * In the future, we should make generic a lot of the logic in these functions
+ * to enable suspend-to-idle on non-ACPI builds, and then make
+ * acpi_EnterSleepState and acpi_ReqSleepState truly take in ACPI S-states
+ * again.
+ */
+static enum power_stype
+acpi_sstate_to_stype(int sstate)
+{
+ switch (sstate) {
+ case ACPI_STATE_S0:
+ return (POWER_STYPE_AWAKE);
+ case ACPI_STATE_S1:
+ case ACPI_STATE_S2:
+ return (POWER_STYPE_STANDBY);
+ case ACPI_STATE_S3:
+ return (POWER_STYPE_SUSPEND_TO_MEM);
+ case ACPI_STATE_S4:
+ return (POWER_STYPE_HIBERNATE);
+ case ACPI_STATE_S5:
+ return (POWER_STYPE_POWEROFF);
+ }
+ return (POWER_STYPE_UNKNOWN);
+}
+
static void
acpi_set_power_children(device_t dev, int state)
{
@@ -820,6 +877,7 @@ acpi_resume(device_t dev)
static int
acpi_shutdown(device_t dev)
{
+ struct acpi_softc *sc = device_get_softc(dev);
bus_topo_assert();
@@ -830,7 +888,7 @@ acpi_shutdown(device_t dev)
* Enable any GPEs that are able to power-on the system (i.e., RTC).
* Also, disable any that are not valid for this state (most).
*/
- acpi_wake_prep_walk(ACPI_STATE_S5);
+ acpi_wake_prep_walk(sc, POWER_STYPE_POWEROFF);
return (0);
}
@@ -2063,7 +2121,7 @@ acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
* Note illegal _S0D is evaluated because some systems expect this.
*/
sc = device_get_softc(bus);
- snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
+ snprintf(sxd, sizeof(sxd), "_S%dD", acpi_stype_to_sstate(sc, sc->acpi_stype));
status = acpi_GetInteger(handle, sxd, dstate);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
device_printf(dev, "failed to get %s on %s: %s\n", sxd,
@@ -3161,9 +3219,9 @@ acpi_sleep_force_task(void *context)
{
struct acpi_softc *sc = (struct acpi_softc *)context;
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
- device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
- sc->acpi_next_sstate);
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_stype)))
+ device_printf(sc->acpi_dev, "force sleep state %s failed\n",
+ power_stype_to_name(sc->acpi_next_stype));
}
static void
@@ -3190,24 +3248,24 @@ acpi_sleep_force(void *arg)
* acks are in.
*/
int
-acpi_ReqSleepState(struct acpi_softc *sc, int state)
+acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype)
{
#if defined(__amd64__) || defined(__i386__)
struct apm_clone_data *clone;
ACPI_STATUS status;
- if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
+ if (stype < POWER_STYPE_AWAKE || stype >= POWER_STYPE_COUNT)
return (EINVAL);
- if (!acpi_sleep_states[state])
+ if (!acpi_supported_stypes[stype])
return (EOPNOTSUPP);
/*
* If a reboot/shutdown/suspend request is already in progress or
* suspend is blocked due to an upcoming shutdown, just return.
*/
- if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) {
+ if (rebooting || sc->acpi_next_stype != POWER_STYPE_AWAKE ||
+ suspend_blocked)
return (0);
- }
/* Wait until sleep is enabled. */
while (sc->acpi_sleep_disabled) {
@@ -3216,12 +3274,12 @@ acpi_ReqSleepState(struct acpi_softc *sc, int state)
ACPI_LOCK(acpi);
- sc->acpi_next_sstate = state;
+ sc->acpi_next_stype = stype;
/* S5 (soft-off) should be entered directly with no waiting. */
- if (state == ACPI_STATE_S5) {
+ if (stype == POWER_STYPE_POWEROFF) {
ACPI_UNLOCK(acpi);
- status = acpi_EnterSleepState(sc, state);
+ status = acpi_EnterSleepState(sc, stype);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
@@ -3237,7 +3295,7 @@ acpi_ReqSleepState(struct acpi_softc *sc, int state)
/* If devd(8) is not running, immediately enter the sleep state. */
if (!devctl_process_running()) {
ACPI_UNLOCK(acpi);
- status = acpi_EnterSleepState(sc, state);
+ status = acpi_EnterSleepState(sc, stype);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
@@ -3252,7 +3310,7 @@ acpi_ReqSleepState(struct acpi_softc *sc, int state)
ACPI_UNLOCK(acpi);
/* Now notify devd(8) also. */
- acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
+ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, stype);
return (0);
#else
@@ -3275,17 +3333,17 @@ acpi_AckSleepState(struct apm_clone_data *clone, int error)
struct acpi_softc *sc;
int ret, sleeping;
- /* If no pending sleep state, return an error. */
+ /* If no pending sleep type, return an error. */
ACPI_LOCK(acpi);
sc = clone->acpi_sc;
- if (sc->acpi_next_sstate == 0) {
+ if (sc->acpi_next_stype == POWER_STYPE_AWAKE) {
ACPI_UNLOCK(acpi);
return (ENXIO);
}
/* Caller wants to abort suspend process. */
if (error) {
- sc->acpi_next_sstate = 0;
+ sc->acpi_next_stype = POWER_STYPE_AWAKE;
callout_stop(&sc->susp_force_to);
device_printf(sc->acpi_dev,
"listener on %s cancelled the pending suspend\n",
@@ -3315,7 +3373,7 @@ acpi_AckSleepState(struct apm_clone_data *clone, int error)
ACPI_UNLOCK(acpi);
ret = 0;
if (sleeping) {
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_stype)))
ret = ENODEV;
}
return (ret);
@@ -3372,24 +3430,27 @@ enum acpi_sleep_state {
* Currently we support S1-S5 but S4 is only S4BIOS
*/
static ACPI_STATUS
-acpi_EnterSleepState(struct acpi_softc *sc, int state)
+acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
{
register_t intr;
ACPI_STATUS status;
ACPI_EVENT_STATUS power_button_status;
enum acpi_sleep_state slp_state;
+ int acpi_sstate;
int sleep_result;
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
- if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
+ if (stype <= POWER_STYPE_AWAKE || stype >= POWER_STYPE_COUNT)
return_ACPI_STATUS (AE_BAD_PARAMETER);
- if (!acpi_sleep_states[state]) {
- device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
- state);
+ if (!acpi_supported_stypes[stype]) {
+ device_printf(sc->acpi_dev, "Sleep type %s not supported on this "
+ "platform\n", power_stype_to_name(stype));
return (AE_SUPPORT);
}
+ acpi_sstate = acpi_stype_to_sstate(sc, stype);
+
/* Re-entry once we're suspending is not allowed. */
status = acpi_sleep_disable(sc);
if (ACPI_FAILURE(status)) {
@@ -3398,7 +3459,7 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
return (status);
}
- if (state == ACPI_STATE_S5) {
+ if (stype == POWER_STYPE_POWEROFF) {
/*
* Shut down cleanly and power off. This will call us back through the
* shutdown handlers.
@@ -3426,16 +3487,16 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
#endif
/*
- * Be sure to hold Giant across DEVICE_SUSPEND/RESUME
+ * Be sure to hold bus topology lock across DEVICE_SUSPEND/RESUME.
*/
bus_topo_lock();
slp_state = ACPI_SS_NONE;
- sc->acpi_sstate = state;
+ sc->acpi_stype = stype;
/* Enable any GPEs as appropriate and requested by the user. */
- acpi_wake_prep_walk(state);
+ acpi_wake_prep_walk(sc, stype);
slp_state = ACPI_SS_GPE_SET;
/*
@@ -3452,7 +3513,7 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
}
slp_state = ACPI_SS_DEV_SUSPEND;
- status = AcpiEnterSleepStatePrep(state);
+ status = AcpiEnterSleepStatePrep(acpi_sstate);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
AcpiFormatException(status));
@@ -3465,9 +3526,9 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
suspendclock();
intr = intr_disable();
- if (state != ACPI_STATE_S1) {
- sleep_result = acpi_sleep_machdep(sc, state);
- acpi_wakeup_machdep(sc, state, sleep_result, 0);
+ if (stype != POWER_STYPE_STANDBY) {
+ sleep_result = acpi_sleep_machdep(sc, acpi_sstate);
+ acpi_wakeup_machdep(sc, acpi_sstate, sleep_result, 0);
/*
* XXX According to ACPI specification SCI_EN bit should be restored
@@ -3478,10 +3539,10 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
* This hack is picked up from Linux, which claims that it follows
* Windows behavior.
*/
- if (sleep_result == 1 && state != ACPI_STATE_S4)
+ if (sleep_result == 1 && stype != POWER_STYPE_HIBERNATE)
AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
- if (sleep_result == 1 && state == ACPI_STATE_S3) {
+ if (sleep_result == 1 && stype == POWER_STYPE_SUSPEND_TO_MEM) {
/*
* Prevent mis-interpretation of the wakeup by power button
* as a request for power off.
@@ -3507,20 +3568,20 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
intr_restore(intr);
/* call acpi_wakeup_machdep() again with interrupt enabled */
- acpi_wakeup_machdep(sc, state, sleep_result, 1);
+ acpi_wakeup_machdep(sc, acpi_sstate, sleep_result, 1);
- AcpiLeaveSleepStatePrep(state);
+ AcpiLeaveSleepStatePrep(acpi_sstate);
if (sleep_result == -1)
goto backout;
- /* Re-enable ACPI hardware on wakeup from sleep state 4. */
- if (state == ACPI_STATE_S4)
+ /* Re-enable ACPI hardware on wakeup from hibernate. */
+ if (stype == POWER_STYPE_HIBERNATE)
AcpiEnable();
} else {
- status = AcpiEnterSleepState(state);
+ status = AcpiEnterSleepState(acpi_sstate);
intr_restore(intr);
- AcpiLeaveSleepStatePrep(state);
+ AcpiLeaveSleepStatePrep(acpi_sstate);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
AcpiFormatException(status));
@@ -3537,13 +3598,13 @@ backout:
if (slp_state >= ACPI_SS_SLP_PREP)
resumeclock();
if (slp_state >= ACPI_SS_GPE_SET) {
- acpi_wake_prep_walk(state);
- sc->acpi_sstate = ACPI_STATE_S0;
+ acpi_wake_prep_walk(sc, stype);
+ sc->acpi_stype = POWER_STYPE_AWAKE;
}
if (slp_state >= ACPI_SS_DEV_SUSPEND)
DEVICE_RESUME(root_bus);
if (slp_state >= ACPI_SS_SLP_PREP)
- AcpiLeaveSleepState(state);
+ AcpiLeaveSleepState(acpi_sstate);
if (slp_state >= ACPI_SS_SLEPT) {
#if defined(__i386__) || defined(__amd64__)
/* NB: we are still using ACPI timecounter at this point. */
@@ -3552,7 +3613,7 @@ backout:
acpi_resync_clock(sc);
acpi_enable_fixed_events(sc);
}
- sc->acpi_next_sstate = 0;
+ sc->acpi_next_stype = POWER_STYPE_AWAKE;
bus_topo_unlock();
@@ -3578,7 +3639,7 @@ backout:
/* Run /etc/rc.resume after we are back. */
if (devctl_process_running())
- acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
+ acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, stype);
return_ACPI_STATUS (status);
}
@@ -3629,8 +3690,10 @@ acpi_wake_set_enable(device_t dev, int enable)
}
static int
-acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
+acpi_wake_sleep_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype)
{
+ int sstate;
struct acpi_prw_data prw;
device_t dev;
@@ -3639,6 +3702,8 @@ acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
return (ENXIO);
dev = acpi_get_device(handle);
+ sstate = acpi_stype_to_sstate(sc, stype);
+
/*
* The destination sleep state must be less than (i.e., higher power)
* or equal to the value specified by _PRW. If this GPE cannot be
@@ -3649,22 +3714,24 @@ acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
if (sstate > prw.lowest_wake) {
AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
if (bootverbose)
- device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
- acpi_name(handle), sstate);
+ device_printf(dev, "wake_prep disabled wake for %s (%s)\n",
+ acpi_name(handle), power_stype_to_name(stype));
} else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
acpi_pwr_wake_enable(handle, 1);
acpi_SetInteger(handle, "_PSW", 1);
if (bootverbose)
- device_printf(dev, "wake_prep enabled for %s (S%d)\n",
- acpi_name(handle), sstate);
+ device_printf(dev, "wake_prep enabled for %s (%s)\n",
+ acpi_name(handle), power_stype_to_name(stype));
}
return (0);
}
static int
-acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
+acpi_wake_run_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype)
{
+ int sstate;
struct acpi_prw_data prw;
device_t dev;
@@ -3678,6 +3745,8 @@ acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
return (0);
+ sstate = acpi_stype_to_sstate(sc, stype);
+
/*
* If this GPE couldn't be enabled for the previous sleep state, it was
* disabled before going to sleep so re-enable it. If it was enabled,
@@ -3701,26 +3770,29 @@ acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
static ACPI_STATUS
acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
{
- int sstate;
+ struct acpi_wake_prep_context *ctx = context;
/* If suspending, run the sleep prep function, otherwise wake. */
- sstate = *(int *)context;
if (AcpiGbl_SystemAwakeAndRunning)
- acpi_wake_sleep_prep(handle, sstate);
+ acpi_wake_sleep_prep(ctx->sc, handle, ctx->stype);
else
- acpi_wake_run_prep(handle, sstate);
+ acpi_wake_run_prep(ctx->sc, handle, ctx->stype);
return (AE_OK);
}
/* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
static int
-acpi_wake_prep_walk(int sstate)
+acpi_wake_prep_walk(struct acpi_softc *sc, enum power_stype stype)
{
ACPI_HANDLE sb_handle;
+ struct acpi_wake_prep_context ctx = {
+ .sc = sc,
+ .stype = stype,
+ };
if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
- acpi_wake_prep, NULL, &sstate, NULL);
+ acpi_wake_prep, NULL, &ctx, NULL);
return (0);
}
@@ -3879,31 +3951,35 @@ out:
/* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
static void
-acpi_system_eventhandler_sleep(void *arg, int state)
+acpi_system_eventhandler_sleep(void *arg, enum power_stype stype)
{
struct acpi_softc *sc = (struct acpi_softc *)arg;
int ret;
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
/* Check if button action is disabled or unknown. */
- if (state == ACPI_STATE_UNKNOWN)
+ if (stype == ACPI_STATE_UNKNOWN)
return;
- /* Request that the system prepare to enter the given suspend state. */
- ret = acpi_ReqSleepState(sc, state);
+ /*
+ * Request that the system prepare to enter the given suspend state. We can
+ * totally pass an ACPI S-state to an enum power_stype.
+ */
+ ret = acpi_ReqSleepState(sc, stype);
if (ret != 0)
device_printf(sc->acpi_dev,
- "request to enter state S%d failed (err %d)\n", state, ret);
+ "request to enter state %s failed (err %d)\n",
+ power_stype_to_name(stype), ret);
return_VOID;
}
static void
-acpi_system_eventhandler_wakeup(void *arg, int state)
+acpi_system_eventhandler_wakeup(void *arg, enum power_stype stype)
{
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
/* Currently, nothing to do for wakeup. */
@@ -3917,14 +3993,14 @@ static void
acpi_invoke_sleep_eventhandler(void *context)
{
- EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context);
+ EVENTHANDLER_INVOKE(acpi_sleep_event, *(enum power_stype *)context);
}
static void
acpi_invoke_wake_eventhandler(void *context)
{
- EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context);
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, *(enum power_stype *)context);
}
UINT32
@@ -3940,7 +4016,7 @@ acpi_event_power_button_sleep(void *context)
#if defined(__amd64__) || defined(__i386__)
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx)))
+ acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
#else
shutdown_nice(RB_POWEROFF);
@@ -3957,7 +4033,7 @@ acpi_event_power_button_wake(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx)))
+ acpi_invoke_wake_eventhandler, &sc->acpi_power_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -3970,7 +4046,7 @@ acpi_event_sleep_button_sleep(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx)))
+ acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -3983,7 +4059,7 @@ acpi_event_sleep_button_wake(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx)))
+ acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -4179,7 +4255,8 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
{
struct acpi_softc *sc;
struct acpi_ioctl_hook *hp;
- int error, state;
+ int error;
+ int sstate;
error = 0;
hp = NULL;
@@ -4209,9 +4286,9 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
/* Core system ioctls. */
switch (cmd) {
case ACPIIO_REQSLPSTATE:
- state = *(int *)addr;
- if (state != ACPI_STATE_S5)
- return (acpi_ReqSleepState(sc, state));
+ sstate = *(int *)addr;
+ if (sstate != ACPI_STATE_S5)
+ return (acpi_ReqSleepState(sc, acpi_sstate_to_stype(sstate)));
device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
error = EOPNOTSUPP;
break;
@@ -4220,12 +4297,12 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
error = acpi_AckSleepState(sc->acpi_clone, error);
break;
case ACPIIO_SETSLPSTATE: /* DEPRECATED */
- state = *(int *)addr;
- if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
+ sstate = *(int *)addr;
+ if (sstate < ACPI_STATE_S0 || sstate > ACPI_STATE_S5)
return (EINVAL);
- if (!acpi_sleep_states[state])
+ if (!acpi_supported_sstates[sstate])
return (EOPNOTSUPP);
- if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
+ if (ACPI_FAILURE(acpi_SetSleepState(sc, acpi_sstate_to_stype(sstate))))
error = ENXIO;
break;
default:
@@ -4237,7 +4314,7 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
}
static int
-acpi_sname2sstate(const char *sname)
+acpi_sname_to_sstate(const char *sname)
{
int sstate;
@@ -4252,14 +4329,15 @@ acpi_sname2sstate(const char *sname)
}
static const char *
-acpi_sstate2sname(int sstate)
+acpi_sstate_to_sname(int state)
{
- static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
+ static const char *snames[ACPI_S_STATE_COUNT] = {"S0", "S1", "S2", "S3",
+ "S4", "S5"};
- if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
- return (snames[sstate]);
- else if (sstate == ACPI_STATE_UNKNOWN)
+ if (state == ACPI_STATE_UNKNOWN)
return ("NONE");
+ if (state >= ACPI_STATE_S0 && state < ACPI_S_STATE_COUNT)
+ return (snames[state]);
return (NULL);
}
@@ -4272,8 +4350,8 @@ acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
- if (acpi_sleep_states[state])
- sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
+ if (acpi_supported_sstates[state])
+ sbuf_printf(&sb, "%s ", acpi_sstate_to_sname(state));
sbuf_trim(&sb);
sbuf_finish(&sb);
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
@@ -4282,26 +4360,89 @@ acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
}
static int
+acpi_suspend_state_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ char name[10];
+ int err;
+ struct acpi_softc *sc = oidp->oid_arg1;
+ enum power_stype new_stype;
+ enum power_stype old_stype = power_suspend_stype;
+ int old_sstate = acpi_stype_to_sstate(sc, old_stype);
+ int new_sstate;
+
+ strlcpy(name, acpi_sstate_to_sname(old_sstate), sizeof(name));
+ err = sysctl_handle_string(oidp, name, sizeof(name), req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ new_sstate = acpi_sname_to_sstate(name);
+ if (new_sstate < 0)
+ return (EINVAL);
+ new_stype = acpi_sstate_to_stype(new_sstate);
+ if (acpi_supported_stypes[new_stype] == false)
+ return (EOPNOTSUPP);
+ if (new_stype != old_stype)
+ power_suspend_stype = new_stype;
+ return (err);
+}
+
+static int
acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
{
char sleep_state[10];
- int error, new_state, old_state;
+ int error;
+ int new_sstate, old_sstate;
- old_state = *(int *)oidp->oid_arg1;
- strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
+ old_sstate = *(int *)oidp->oid_arg1;
+ strlcpy(sleep_state, acpi_sstate_to_sname(old_sstate), sizeof(sleep_state));
error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
if (error == 0 && req->newptr != NULL) {
- new_state = acpi_sname2sstate(sleep_state);
- if (new_state < ACPI_STATE_S1)
+ new_sstate = acpi_sname_to_sstate(sleep_state);
+ if (new_sstate < 0)
return (EINVAL);
- if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
+ if (new_sstate < ACPI_S_STATE_COUNT &&
+ !acpi_supported_sstates[new_sstate])
return (EOPNOTSUPP);
- if (new_state != old_state)
- *(int *)oidp->oid_arg1 = new_state;
+ if (new_sstate != old_sstate)
+ *(int *)oidp->oid_arg1 = new_sstate;
}
return (error);
}
+static int
+acpi_stype_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ char name[10];
+ int err;
+ int sstate;
+ enum power_stype new_stype, old_stype;
+
+ old_stype = *(enum power_stype *)oidp->oid_arg1;
+ strlcpy(name, power_stype_to_name(old_stype), sizeof(name));
+ err = sysctl_handle_string(oidp, name, sizeof(name), req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ new_stype = power_name_to_stype(name);
+ if (new_stype == POWER_STYPE_UNKNOWN) {
+ sstate = acpi_sname_to_sstate(name);
+ if (sstate < 0)
+ return (EINVAL);
+ printf("warning: this sysctl expects a sleep type, but an ACPI S-state has "
+ "been passed to it. This functionality is deprecated; see acpi(4).\n");
+ MPASS(sstate < ACPI_S_STATE_COUNT);
+ if (acpi_supported_sstates[sstate] == false)
+ return (EOPNOTSUPP);
+ new_stype = acpi_sstate_to_stype(sstate);
+ }
+
+ if (acpi_supported_stypes[new_stype] == false)
+ return (EOPNOTSUPP);
+ if (new_stype != old_stype)
+ *(enum power_stype *)oidp->oid_arg1 = new_stype;
+ return (0);
+}
+
/* Inform devctl(4) when we receive a Notify. */
void
acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
@@ -4650,7 +4791,7 @@ acpi_reset_interfaces(device_t dev)
static int
acpi_pm_func(u_long cmd, void *arg, enum power_stype stype)
{
- int error, sstate;
+ int error;
struct acpi_softc *sc;
error = 0;
@@ -4661,8 +4802,7 @@ acpi_pm_func(u_long cmd, void *arg, enum power_stype stype)
error = EINVAL;
goto out;
}
- sstate = acpi_stype_to_sstate(sc, stype);
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, sstate)))
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, stype)))
error = ENXIO;
break;
default:
@@ -4680,7 +4820,8 @@ acpi_pm_register(void *arg)
if (!cold || resource_disabled("acpi", 0))
return;
- power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL,
+ acpi_supported_stypes);
}
SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL);
diff --git a/sys/dev/acpica/acpi_lid.c b/sys/dev/acpica/acpi_lid.c
index 142791f7282a..fb8755d9f0fe 100644
--- a/sys/dev/acpica/acpi_lid.c
+++ b/sys/dev/acpica/acpi_lid.c
@@ -235,9 +235,9 @@ acpi_lid_notify_status_changed(void *arg)
sc->lid_status ? "opened" : "closed");
if (sc->lid_status == 0)
- EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_sx);
+ EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_stype);
else
- EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_sx);
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_stype);
out:
ACPI_SERIAL_END(lid);
diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c
index 0a8b67a5fa84..29d1690f1bdd 100644
--- a/sys/dev/acpica/acpi_powerres.c
+++ b/sys/dev/acpica/acpi_powerres.c
@@ -76,13 +76,6 @@ struct acpi_powerconsumer {
/* Device which is powered */
ACPI_HANDLE ac_consumer;
int ac_state;
-
- struct {
- bool prx_has;
- size_t prx_count;
- ACPI_HANDLE *prx_deps;
- } ac_prx[ACPI_D_STATE_COUNT];
-
TAILQ_ENTRY(acpi_powerconsumer) ac_link;
TAILQ_HEAD(,acpi_powerreference) ac_references;
};
@@ -103,7 +96,9 @@ static TAILQ_HEAD(acpi_powerconsumer_list, acpi_powerconsumer)
ACPI_SERIAL_DECL(powerres, "ACPI power resources");
static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer);
+#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer);
+#endif /* notyet */
static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res);
#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res);
@@ -117,8 +112,6 @@ static struct acpi_powerresource
*acpi_pwr_find_resource(ACPI_HANDLE res);
static struct acpi_powerconsumer
*acpi_pwr_find_consumer(ACPI_HANDLE consumer);
-static ACPI_STATUS acpi_pwr_infer_state(struct acpi_powerconsumer *pc);
-static ACPI_STATUS acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state);
/*
* Register a power resource.
@@ -229,84 +222,6 @@ acpi_pwr_deregister_resource(ACPI_HANDLE res)
#endif /* notyet */
/*
- * Evaluate the _PRx (power resources each D-state depends on). This also
- * populates the acpi_powerresources queue with the power resources discovered
- * during this step.
- *
- * ACPI 7.3.8 - 7.3.11 guarantee that _PRx will return the same data each
- * time they are evaluated.
- *
- * If this function fails, acpi_pwr_deregister_consumer() must be called on the
- * power consumer to free already allocated memory.
- */
-static ACPI_STATUS
-acpi_pwr_get_power_resources(ACPI_HANDLE consumer, struct acpi_powerconsumer *pc)
-{
- ACPI_INTEGER status;
- ACPI_STRING reslist_name;
- ACPI_HANDLE reslist_handle;
- ACPI_STRING reslist_names[] = {"_PR0", "_PR1", "_PR2", "_PR3"};
- ACPI_BUFFER reslist;
- ACPI_OBJECT *reslist_object;
- ACPI_OBJECT *dep;
- ACPI_HANDLE *res;
-
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- ACPI_SERIAL_ASSERT(powerres);
-
- MPASS(consumer != NULL);
-
- for (int state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
- pc->ac_prx[state].prx_has = false;
- pc->ac_prx[state].prx_count = 0;
- pc->ac_prx[state].prx_deps = NULL;
-
- reslist_name = reslist_names[state - ACPI_STATE_D0];
- if (ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle)))
- continue;
-
- reslist.Pointer = NULL;
- reslist.Length = ACPI_ALLOCATE_BUFFER;
- status = AcpiEvaluateObjectTyped(reslist_handle, NULL, NULL, &reslist,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status) || reslist.Pointer == NULL)
- /*
- * ACPI_ALLOCATE_BUFFER entails everything will be freed on error
- * by AcpiEvaluateObjectTyped.
- */
- continue;
-
- reslist_object = (ACPI_OBJECT *)reslist.Pointer;
- pc->ac_prx[state].prx_has = true;
- pc->ac_prx[state].prx_count = reslist_object->Package.Count;
-
- if (reslist_object->Package.Count == 0) {
- AcpiOsFree(reslist_object);
- continue;
- }
-
- pc->ac_prx[state].prx_deps = mallocarray(pc->ac_prx[state].prx_count,
- sizeof(*pc->ac_prx[state].prx_deps), M_ACPIPWR, M_NOWAIT);
- if (pc->ac_prx[state].prx_deps == NULL) {
- AcpiOsFree(reslist_object);
- return_ACPI_STATUS (AE_NO_MEMORY);
- }
-
- for (size_t i = 0; i < reslist_object->Package.Count; i++) {
- dep = &reslist_object->Package.Elements[i];
- res = dep->Reference.Handle;
- pc->ac_prx[state].prx_deps[i] = res;
-
- /* It's fine to attempt to register the same resource twice. */
- acpi_pwr_register_resource(res);
- }
- AcpiOsFree(reslist_object);
- }
-
- return_ACPI_STATUS (AE_OK);
-}
-
-/*
* Register a power consumer.
*
* It's OK to call this if we already know about the consumer.
@@ -314,7 +229,6 @@ acpi_pwr_get_power_resources(ACPI_HANDLE consumer, struct acpi_powerconsumer *pc
static ACPI_STATUS
acpi_pwr_register_consumer(ACPI_HANDLE consumer)
{
- ACPI_INTEGER status;
struct acpi_powerconsumer *pc;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -325,30 +239,14 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
/* Allocate a new power consumer */
- if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL)
+ if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT)) == NULL)
return_ACPI_STATUS (AE_NO_MEMORY);
TAILQ_INSERT_HEAD(&acpi_powerconsumers, pc, ac_link);
TAILQ_INIT(&pc->ac_references);
pc->ac_consumer = consumer;
- /*
- * Get all its power resource dependencies, if it has _PRx. We do this now
- * as an opportunity to populate the acpi_powerresources queue.
- *
- * If this fails, immediately deregister it.
- */
- status = acpi_pwr_get_power_resources(consumer, pc);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
- "failed to get power resources for %s\n",
- acpi_name(consumer)));
- acpi_pwr_deregister_consumer(consumer);
- return_ACPI_STATUS (status);
- }
-
- /* Find its initial state. */
- if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &pc->ac_state)))
- pc->ac_state = ACPI_STATE_UNKNOWN;
+ /* XXX we should try to find its current state */
+ pc->ac_state = ACPI_STATE_UNKNOWN;
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power consumer %s\n",
acpi_name(consumer)));
@@ -356,6 +254,7 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
+#ifdef notyet
/*
* Deregister a power consumer.
*
@@ -380,9 +279,6 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
/* Pull the consumer off the list and free it */
TAILQ_REMOVE(&acpi_powerconsumers, pc, ac_link);
- for (size_t i = 0; i < sizeof(pc->ac_prx) / sizeof(*pc->ac_prx); i++)
- if (pc->ac_prx[i].prx_deps != NULL)
- free(pc->ac_prx[i].prx_deps, M_ACPIPWR);
free(pc, M_ACPIPWR);
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power consumer %s\n",
@@ -390,139 +286,10 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
+#endif /* notyet */
/*
- * The _PSC control method isn't required if it's possible to infer the D-state
- * from the _PRx control methods. (See 7.3.6.)
- * We can infer that a given D-state has been achieved when all the dependencies
- * are in the ON state.
- */
-static ACPI_STATUS
-acpi_pwr_infer_state(struct acpi_powerconsumer *pc)
-{
- ACPI_HANDLE *res;
- uint32_t on;
- bool all_on = false;
-
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- ACPI_SERIAL_ASSERT(powerres);
-
- /* It is important we go from the hottest to the coldest state. */
- for (
- pc->ac_state = ACPI_STATE_D0;
- pc->ac_state <= ACPI_STATE_D3_HOT && !all_on;
- pc->ac_state++
- ) {
- MPASS(pc->ac_state <= sizeof(pc->ac_prx) / sizeof(*pc->ac_prx));
-
- if (!pc->ac_prx[pc->ac_state].prx_has)
- continue;
-
- all_on = true;
-
- for (size_t i = 0; i < pc->ac_prx[pc->ac_state].prx_count; i++) {
- res = pc->ac_prx[pc->ac_state].prx_deps[i];
- /* If failure, better to assume D-state is hotter than colder. */
- if (ACPI_FAILURE(acpi_GetInteger(res, "_STA", &on)))
- continue;
- if (on == 0) {
- all_on = false;
- break;
- }
- }
- }
-
- MPASS(pc->ac_state != ACPI_STATE_D0);
-
- /*
- * If none of the power resources required for the shallower D-states are
- * on, then we can assume it is unpowered (i.e. D3cold). A device is not
- * required to support D3cold however; in that case, _PR3 is not explicitly
- * provided. Those devices should default to D3hot instead.
- *
- * See comments of first row of table 7.1 in ACPI spec.
- */
- if (!all_on)
- pc->ac_state = pc->ac_prx[ACPI_STATE_D3_HOT].prx_has ?
- ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT;
- else
- pc->ac_state--;
-
- return_ACPI_STATUS (AE_OK);
-}
-
-static ACPI_STATUS
-acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state)
-{
- struct acpi_powerconsumer *pc;
- ACPI_HANDLE method_handle;
- ACPI_STATUS status;
- ACPI_BUFFER result;
- ACPI_OBJECT *object = NULL;
-
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- ACPI_SERIAL_ASSERT(powerres);
-
- if (consumer == NULL)
- return_ACPI_STATUS (AE_NOT_FOUND);
-
- if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) {
- if (ACPI_FAILURE(status = acpi_pwr_register_consumer(consumer)))
- goto out;
- if ((pc = acpi_pwr_find_consumer(consumer)) == NULL)
- panic("acpi added power consumer but can't find it");
- }
-
- status = AcpiGetHandle(consumer, "_PSC", &method_handle);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "no _PSC object - %s\n",
- AcpiFormatException(status)));
- status = acpi_pwr_infer_state(pc);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "couldn't infer D-state - %s\n",
- AcpiFormatException(status)));
- pc->ac_state = ACPI_STATE_UNKNOWN;
- }
- goto out;
- }
-
- result.Pointer = NULL;
- result.Length = ACPI_ALLOCATE_BUFFER;
- status = AcpiEvaluateObjectTyped(method_handle, NULL, NULL, &result, ACPI_TYPE_INTEGER);
- if (ACPI_FAILURE(status) || result.Pointer == NULL) {
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to get state with _PSC - %s\n",
- AcpiFormatException(status)));
- pc->ac_state = ACPI_STATE_UNKNOWN;
- goto out;
- }
-
- object = (ACPI_OBJECT *)result.Pointer;
- pc->ac_state = ACPI_STATE_D0 + object->Integer.Value;
- status = AE_OK;
-
-out:
- if (object != NULL)
- AcpiOsFree(object);
- *state = pc->ac_state;
- return_ACPI_STATUS (status);
-}
-
-/*
- * Get a power consumer's D-state.
- */
-ACPI_STATUS
-acpi_pwr_get_state(ACPI_HANDLE consumer, int *state)
-{
- ACPI_STATUS res;
-
- ACPI_SERIAL_BEGIN(powerres);
- res = acpi_pwr_get_state_locked(consumer, state);
- ACPI_SERIAL_END(powerres);
- return (res);
-}
-
-/*
- * Set a power consumer to a particular D-state.
+ * Set a power consumer to a particular power state.
*/
ACPI_STATUS
acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
@@ -533,7 +300,6 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
ACPI_OBJECT *reslist_object;
ACPI_STATUS status;
char *method_name, *reslist_name = NULL;
- int new_state;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -735,28 +501,8 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
}
}
- /*
- * Make sure the transition succeeded. If getting new state failed,
- * just assume the new state is what we wanted. This was the behaviour
- * before we were checking D-states.
- */
- if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &new_state))) {
- printf("%s: failed to get new D-state\n", __func__);
- pc->ac_state = state;
- } else {
- if (new_state != state)
- printf("%s: new power state %s is not the one requested %s\n",
- __func__, acpi_d_state_to_str(new_state),
- acpi_d_state_to_str(state));
- pc->ac_state = new_state;
- }
-
- /*
- * We consider the transition successful even if the state we got doesn't
- * reflect what we set it to. This is because we weren't previously
- * checking the new state at all, so there might exist buggy platforms on
- * which suspend would otherwise succeed if we failed here.
- */
+ /* Transition was successful */
+ pc->ac_state = state;
status = AE_OK;
out:
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index fac32d832598..71d8e46ab310 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -54,20 +54,19 @@ struct acpi_softc {
struct cdev *acpi_dev_t;
int acpi_enabled;
- int acpi_sstate;
+ enum power_stype acpi_stype;
int acpi_sleep_disabled;
struct sysctl_ctx_list acpi_sysctl_ctx;
struct sysctl_oid *acpi_sysctl_tree;
- int acpi_power_button_sx;
- int acpi_sleep_button_sx;
- int acpi_lid_switch_sx;
+ enum power_stype acpi_power_button_stype;
+ enum power_stype acpi_sleep_button_stype;
+ enum power_stype acpi_lid_switch_stype;
int acpi_standby_sx;
- int acpi_suspend_sx;
+ int acpi_s4bios;
int acpi_sleep_delay;
- int acpi_s4bios;
int acpi_do_disable;
int acpi_verbose;
int acpi_handle_reboot;
@@ -75,7 +74,7 @@ struct acpi_softc {
vm_offset_t acpi_wakeaddr;
vm_paddr_t acpi_wakephys;
- int acpi_next_sstate; /* Next suspend Sx state. */
+ enum power_stype acpi_next_stype; /* Next suspend sleep type. */
struct apm_clone_data *acpi_clone; /* Pseudo-dev for devd(8). */
STAILQ_HEAD(,apm_clone_data) apm_cdevs; /* All apm/apmctl/acpi cdevs. */
struct callout susp_force_to; /* Force suspend if no acks. */
@@ -412,7 +411,7 @@ ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid,
uint32_t *caps_out, bool query);
ACPI_STATUS acpi_OverrideInterruptLevel(UINT32 InterruptNumber);
ACPI_STATUS acpi_SetIntrModel(int model);
-int acpi_ReqSleepState(struct acpi_softc *sc, int state);
+int acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype);
int acpi_AckSleepState(struct apm_clone_data *clone, int error);
ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state);
int acpi_wake_set_enable(device_t dev, int enable);
@@ -491,7 +490,6 @@ EVENTHANDLER_DECLARE(acpi_video_event, acpi_event_handler_t);
/* Device power control. */
ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable);
-ACPI_STATUS acpi_pwr_get_state(ACPI_HANDLE consumer, int *state);
ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state);
acpi_pwr_for_sleep_t acpi_device_pwr_for_sleep;
int acpi_set_powerstate(device_t child, int state);
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index deadd63c3d18..9ac591c14943 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -971,6 +971,12 @@ ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_handoff_hw(sc, txq, bf);
}
+/*
+ * Setup a frame for encryption.
+ *
+ * If this fails, then an non-zero error is returned. The mbuf
+ * must be freed by the caller.
+ */
static int
ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
@@ -1547,6 +1553,10 @@ ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
*
* Note that this may cause the mbuf to be reallocated, so
* m0 may not be valid.
+ *
+ * If there's a problem then the mbuf is freed and an error
+ * is returned. The ath_buf then needs to be freed by the
+ * caller.
*/
static int
ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
@@ -2073,9 +2083,8 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
/* This also sets up the DMA map; crypto; frame parameters, etc */
r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
-
if (r != 0)
- goto done;
+ return (r);
/* At this point m0 could have changed! */
m0 = bf->bf_m;
@@ -2132,7 +2141,6 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
ath_tx_leak_count_update(sc, tid, bf);
ath_tx_xmit_normal(sc, txq, bf);
#endif
-done:
return 0;
}
diff --git a/sys/dev/bnxt/bnxt_en/if_bnxt.c b/sys/dev/bnxt/bnxt_en/if_bnxt.c
index feac3ce54a29..471e26a4b252 100644
--- a/sys/dev/bnxt/bnxt_en/if_bnxt.c
+++ b/sys/dev/bnxt/bnxt_en/if_bnxt.c
@@ -48,6 +48,7 @@
#include <net/ethernet.h>
#include <net/iflib.h>
+#define WANT_NATIVE_PCI_GET_SLOT
#include <linux/pci.h>
#include <linux/kmod.h>
#include <linux/module.h>
diff --git a/sys/dev/cpuctl/cpuctl.c b/sys/dev/cpuctl/cpuctl.c
index b0ab3467df69..deaabaaaa1fc 100644
--- a/sys/dev/cpuctl/cpuctl.c
+++ b/sys/dev/cpuctl/cpuctl.c
@@ -344,7 +344,7 @@ ucode_intel_load_rv(void *arg)
d = arg;
if (PCPU_GET(cpuid) == d->cpu)
- d->ret = ucode_intel_load(d->ptr, true, NULL, NULL);
+ d->ret = ucode_intel_load(d->ptr, SAFE, NULL, NULL);
}
static int
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index d3820245837a..55f09fefb7e3 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -319,7 +318,7 @@ struct port_info {
char lockname[16];
unsigned long flags;
- uint8_t lport; /* associated offload logical port */
+ uint8_t hw_port; /* associated hardware port idx */
int8_t mdio_addr;
uint8_t port_type;
uint8_t mod_type;
@@ -413,6 +412,24 @@ enum {
NUM_CPL_COOKIES = 8 /* Limited by M_COOKIE. Do not increase. */
};
+/*
+ * Crypto replies use the low bit in the 64-bit cookie of CPL_FW6_PLD as a
+ * CPL cookie to identify the sender/receiver.
+ */
+enum {
+ CPL_FW6_COOKIE_CCR = 0,
+ CPL_FW6_COOKIE_KTLS,
+
+ NUM_CPL_FW6_COOKIES = 2 /* Low bits of cookie value. */
+};
+
+_Static_assert(powerof2(NUM_CPL_FW6_COOKIES),
+ "NUM_CPL_FW6_COOKIES must be a power of 2");
+
+#define CPL_FW6_COOKIE_MASK (NUM_CPL_FW6_COOKIES - 1)
+
+#define CPL_FW6_PLD_COOKIE(cpl) (be64toh((cpl)->data[1]) & ~CPL_FW6_COOKIE_MASK)
+
struct sge_iq;
struct rss_header;
typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
@@ -477,6 +494,7 @@ struct sge_eq {
uint8_t doorbells;
uint8_t port_id; /* port_id of the port associated with the eq */
uint8_t tx_chan; /* tx channel used by the eq */
+ uint8_t hw_port; /* hw port used by the eq */
struct mtx eq_lock;
struct tx_desc *desc; /* KVA of descriptor ring */
@@ -640,12 +658,26 @@ struct sge_txq {
uint64_t kern_tls_full;
uint64_t kern_tls_octets;
uint64_t kern_tls_waste;
- uint64_t kern_tls_options;
uint64_t kern_tls_header;
- uint64_t kern_tls_fin;
uint64_t kern_tls_fin_short;
uint64_t kern_tls_cbc;
uint64_t kern_tls_gcm;
+ union {
+ struct {
+ /* T6 only. */
+ uint64_t kern_tls_options;
+ uint64_t kern_tls_fin;
+ };
+ struct {
+ /* T7 only. */
+ uint64_t kern_tls_ghash_received;
+ uint64_t kern_tls_ghash_requested;
+ uint64_t kern_tls_lso;
+ uint64_t kern_tls_partial_ghash;
+ uint64_t kern_tls_splitmode;
+ uint64_t kern_tls_trailer;
+ };
+ };
/* stats for not-that-common events */
@@ -769,6 +801,16 @@ struct sge_ofld_txq {
counter_u64_t tx_toe_tls_octets;
} __aligned(CACHE_LINE_SIZE);
+static inline int
+ofld_txq_group(int val, int mask)
+{
+ const uint32_t ngroup = 1 << bitcount32(mask);
+ const int mshift = ffs(mask) - 1;
+ const uint32_t gmask = ngroup - 1;
+
+ return (val >> mshift & gmask);
+}
+
#define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1))
struct sge_nm_rxq {
/* Items used by the driver rx ithread are in this cacheline. */
@@ -836,6 +878,7 @@ struct sge_nm_txq {
} __aligned(CACHE_LINE_SIZE);
struct sge {
+ int nctrlq; /* total # of control queues */
int nrxq; /* total # of Ethernet rx queues */
int ntxq; /* total # of Ethernet tx queues */
int nofldrxq; /* total # of TOE rx queues */
@@ -937,7 +980,8 @@ struct adapter {
struct taskqueue *tq[MAX_NPORTS]; /* General purpose taskqueues */
struct port_info *port[MAX_NPORTS];
- uint8_t chan_map[MAX_NCHAN]; /* channel -> port */
+ uint8_t chan_map[MAX_NCHAN]; /* tx_chan -> port_id */
+ uint8_t port_map[MAX_NPORTS]; /* hw_port -> port_id */
CXGBE_LIST_HEAD(, clip_entry) *clip_table;
TAILQ_HEAD(, clip_entry) clip_pending; /* these need hw update. */
@@ -959,9 +1003,12 @@ struct adapter {
vmem_t *key_map;
struct tls_tunables tlst;
+ vmem_t *pbl_arena;
+ vmem_t *stag_arena;
+
uint8_t doorbells;
int offload_map; /* port_id's with IFCAP_TOE enabled */
- int bt_map; /* tx_chan's with BASE-T */
+ int bt_map; /* hw_port's that are BASE-T */
int active_ulds; /* ULDs activated on this adapter */
int flags;
int debug_flags;
@@ -988,6 +1035,7 @@ struct adapter {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -1409,6 +1457,14 @@ void t6_ktls_modunload(void);
int t6_ktls_try(if_t, struct socket *, struct ktls_session *);
int t6_ktls_parse_pkt(struct mbuf *);
int t6_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int);
+
+/* t7_kern_tls.c */
+int t7_tls_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *,
+ struct m_snd_tag **);
+void t7_ktls_modload(void);
+void t7_ktls_modunload(void);
+int t7_ktls_parse_pkt(struct mbuf *);
+int t7_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int);
#endif
/* t4_keyctx.c */
@@ -1536,6 +1592,27 @@ int t4_hashfilter_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbu
int t4_del_hashfilter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
void free_hftid_hash(struct tid_info *);
+/* t4_tpt.c */
+#define T4_STAG_UNSET 0xffffffff
+#define T4_WRITE_MEM_DMA_LEN \
+ roundup2(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_sgl), 16)
+#define T4_ULPTX_MIN_IO 32
+#define T4_MAX_INLINE_SIZE 96
+#define T4_WRITE_MEM_INLINE_LEN(len) \
+ roundup2(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_idata) + \
+ roundup((len), T4_ULPTX_MIN_IO), 16)
+
+uint32_t t4_pblpool_alloc(struct adapter *, int);
+void t4_pblpool_free(struct adapter *, uint32_t, int);
+uint32_t t4_stag_alloc(struct adapter *, int);
+void t4_stag_free(struct adapter *, uint32_t, int);
+void t4_init_tpt(struct adapter *);
+void t4_free_tpt(struct adapter *);
+void t4_write_mem_dma_wr(struct adapter *, void *, int, int, uint32_t,
+ uint32_t, vm_paddr_t, uint64_t);
+void t4_write_mem_inline_wr(struct adapter *, void *, int, int, uint32_t,
+ uint32_t, void *, uint64_t);
+
static inline struct wrqe *
alloc_wrqe(int wr_len, struct sge_wrq *wrq)
{
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index 6e80ce40648b..6b36832a7464 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,6 +31,15 @@
#include "t4_hw.h"
+#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC0 | F_EDC0 | \
+ F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
+ F_CPL_SWITCH | F_SGE | F_ULP_TX | F_SF)
+
+#define GLBL_T7_INTR_MASK (F_CIM | F_MPS | F_PL | F_T7_PCIE | F_T7_MC0 | \
+ F_T7_EDC0 | F_T7_EDC1 | F_T7_LE | F_T7_TP | \
+ F_T7_MA | F_T7_PM_TX | F_T7_PM_RX | F_T7_ULP_RX | \
+ F_T7_CPL_SWITCH | F_T7_SGE | F_T7_ULP_TX | F_SF)
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -77,6 +85,18 @@ enum {
FEC_MODULE = 1 << 6, /* FEC suggested by the cable/transceiver. */
};
+enum {
+ ULP_T10DIF_ISCSI = 1 << 0,
+ ULP_T10DIF_FCOE = 1 << 1
+};
+
+enum {
+ ULP_CRYPTO_LOOKASIDE = 1 << 0,
+ ULP_CRYPTO_INLINE_TLS = 1 << 1,
+ ULP_CRYPTO_INLINE_IPSEC = 1 << 2,
+ ULP_CRYPTO_OFLD_OVER_IPSEC_INLINE = 1 << 4
+};
+
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
struct port_stats {
@@ -230,6 +250,15 @@ struct tp_cpl_stats {
struct tp_rdma_stats {
u32 rqe_dfr_pkt;
u32 rqe_dfr_mod;
+ u32 pkts_in[MAX_NCHAN];
+ u64 bytes_in[MAX_NCHAN];
+ /*
+ * When reading rdma stats, the address difference b/w RDMA_IN and
+ * RDMA_OUT is 4*u32, to read both at once, added padding
+ */
+ u32 padding[4];
+ u32 pkts_out[MAX_NCHAN];
+ u64 bytes_out[MAX_NCHAN];
};
struct sge_params {
@@ -259,7 +288,10 @@ struct tp_params {
uint32_t max_rx_pdu;
uint32_t max_tx_pdu;
bool rx_pkt_encap;
+ uint8_t lb_mode;
+ uint8_t lb_nchan;
+ int8_t ipsecidx_shift;
int8_t fcoe_shift;
int8_t port_shift;
int8_t vnic_shift;
@@ -270,6 +302,9 @@ struct tp_params {
int8_t macmatch_shift;
int8_t matchtype_shift;
int8_t frag_shift;
+ int8_t roce_shift;
+ int8_t synonly_shift;
+ int8_t tcpflags_shift;
};
/* Use same modulation queue as the tx channel. */
@@ -285,6 +320,22 @@ struct vpd_params {
u8 md[MD_LEN + 1];
};
+/*
+ * Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct pci_params {
unsigned int vpd_cap_addr;
unsigned int mps;
@@ -308,8 +359,11 @@ struct chip_params {
u8 pm_stats_cnt;
u8 cng_ch_bits_log; /* congestion channel map bits width */
u8 nsched_cls;
+ u8 cim_num_ibq;
u8 cim_num_obq;
- u8 filter_opt_len;
+ u8 filter_opt_len; /* number of bits for optional fields */
+ u8 filter_num_opt; /* number of optional fields */
+ u8 sge_ctxt_size;
u16 mps_rplc_size;
u16 vfcount;
u32 sge_fl_db;
@@ -360,6 +414,7 @@ struct adapter_params {
struct sge_params sge;
struct tp_params tp; /* PF-only */
struct vpd_params vpd;
+ struct pf_resources pfres; /* PF-only */
struct pci_params pci;
struct devlog_params devlog; /* PF-only */
struct rss_params rss; /* VF-only */
@@ -399,12 +454,13 @@ struct adapter_params {
unsigned int ofldq_wr_cred;
unsigned int eo_wr_cred;
- unsigned int max_ordird_qp;
- unsigned int max_ird_adapter;
+ unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
+ unsigned int max_ird_adapter; /* Max read depth per adapter */
/* These values are for all ports (8b/port, upto 4 ports) */
uint32_t mps_bg_map; /* MPS rx buffer group map */
uint32_t tp_ch_map; /* TPCHMAP from firmware */
+ uint32_t tx_tp_ch_map; /* TX_TPCHMAP from firmware */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
@@ -412,11 +468,15 @@ struct adapter_params {
bool viid_smt_extn_support; /* FW returns vin, vfvld & smt index? */
unsigned int max_pkts_per_eth_tx_pkts_wr;
uint8_t nsched_cls; /* # of usable sched classes per port */
+
+ uint8_t ncores;
+ uint32_t tid_qid_sel_mask; /* TID based QID selection mask */
};
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
#define CHELSIO_T6 0x6
+#define CHELSIO_T7 0x7
/*
* State needed to monitor the forward progress of SGE Ingress DMA activities
@@ -509,10 +569,11 @@ static inline int is_hashfilter(const struct adapter *adap)
static inline int is_ktls(const struct adapter *adap)
{
- return adap->cryptocaps & FW_CAPS_CONFIG_TLS_HW;
+ return adap->cryptocaps & FW_CAPS_CONFIG_TLS_HW ||
+ adap->params.chipid == CHELSIO_T7;
}
-static inline int chip_id(struct adapter *adap)
+static inline int chip_id(const struct adapter *adap)
{
return adap->params.chipid;
}
@@ -537,6 +598,11 @@ static inline int is_t6(struct adapter *adap)
return adap->params.chipid == CHELSIO_T6;
}
+static inline int is_t7(struct adapter *adap)
+{
+ return adap->params.chipid == CHELSIO_T7;
+}
+
static inline int is_fpga(struct adapter *adap)
{
return adap->params.fpga;
@@ -641,7 +707,7 @@ int t4_load_bootcfg(struct adapter *adapter, const u8 *cfg_data, unsigned int si
int t4_load_boot(struct adapter *adap, u8 *boot_data,
unsigned int boot_addr, unsigned int size);
int t4_flash_erase_sectors(struct adapter *adapter, int start, int end);
-int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp);
int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr);
@@ -655,9 +721,10 @@ int t4_init_hw(struct adapter *adapter, u32 fw_params);
const struct chip_params *t4_get_chip_params(int chipid);
int t4_prep_adapter(struct adapter *adapter, u32 *buf);
int t4_shutdown_adapter(struct adapter *adapter);
-int t4_init_devlog_params(struct adapter *adapter, int fw_attach);
+int t4_init_devlog_ncores_params(struct adapter *adapter, int fw_attach);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_width(const struct adapter *adap, int filter_field);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id);
void t4_fatal_err(struct adapter *adapter, bool fw_error);
@@ -665,6 +732,7 @@ int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
int filter_index, int enable);
void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
int filter_index, int *enabled);
+void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -691,19 +759,60 @@ void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok);
int t4_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
-void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
-int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
-int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
-int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp);
-int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
- const unsigned int *valp);
-int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp);
-int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[]);
+void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
+ u16 *size, u16 *thres);
+int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n);
+int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n);
+int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ unsigned int *valp);
+int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ const unsigned int *valp);
+int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
+ u32 *wrptr);
void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr);
void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
+
+static inline void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size,
+ u16 *thres)
+{
+ t4_read_cimq_cfg_core(adap, 0, base, size, thres);
+}
+
+static inline int t4_read_cim_ibq(struct adapter *adap, u32 qid, u32 *data,
+ size_t n)
+{
+ return t4_read_cim_ibq_core(adap, 0, qid, data, n);
+}
+
+static inline int t4_read_cim_obq(struct adapter *adap, u32 qid, u32 *data,
+ size_t n)
+{
+ return t4_read_cim_obq_core(adap, 0, qid, data, n);
+}
+
+static inline int t4_cim_read(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ return t4_cim_read_core(adap, 0, 0, addr, n, valp);
+}
+
+static inline int t4_cim_write(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ return t4_cim_write_core(adap, 0, 0, addr, n, valp);
+}
+
+static inline int t4_cim_read_la(struct adapter *adap, u32 *la_buf, u32 *wrptr)
+{
+ return t4_cim_read_la_core(adap, 0, la_buf, wrptr);
+}
+
int t4_get_flash_params(struct adapter *adapter);
u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach);
@@ -919,6 +1028,8 @@ int t4_configure_ringbb(struct adapter *adap);
int t4_configure_add_smac(struct adapter *adap);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
+int t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
+ unsigned int *lenp);
static inline int t4vf_query_params(struct adapter *adapter,
unsigned int nparams, const u32 *params,
@@ -969,8 +1080,8 @@ port_top_speed(const struct port_info *pi)
sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
static inline void *
-mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
- uint64_t mask, uint64_t val)
+mk_set_tcb_field_ulp_with_rpl(struct adapter *sc, void *cur, int tid,
+ uint16_t word, uint64_t mask, uint64_t val, const int qid)
{
struct ulp_txpkt *ulpmc;
struct ulptx_idata *ulpsc;
@@ -989,8 +1100,21 @@ mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htobe16(F_NO_REPLY);
- req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
+
+ if (qid == -1) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
+ } else {
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ req->word_cookie = htobe16(V_WORD(word) |
+ V_COOKIE(CPL_COOKIE_TOM));
+ }
req->mask = htobe64(mask);
req->val = htobe64(val);
@@ -1006,4 +1130,11 @@ mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
return (ulpsc + 1);
}
+
+static inline void *
+mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
+ uint64_t mask, uint64_t val)
+{
+ return (mk_set_tcb_field_ulp_with_rpl(sc, cur, tid, word, mask, val, -1));
+}
#endif /* __CHELSIO_COMMON_H */
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index 07940a44f66e..eb7ea9acc108 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2012, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -246,6 +245,8 @@ struct port_tx_state {
u32
t4_port_reg(struct adapter *adap, u8 port, u32 reg)
{
+ if (chip_id(adap) > CHELSIO_T6)
+ return T7_PORT_REG(port, reg);
if (chip_id(adap) > CHELSIO_T4)
return T5_PORT_REG(port, reg);
return PORT_REG(port, reg);
@@ -268,8 +269,10 @@ read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
{
int i;
- for_each_port(sc, i)
- read_tx_state_one(sc, i, &tx_state[i]);
+ for (i = 0; i < MAX_NCHAN; i++) {
+ if (sc->chan_map[i] != 0xff)
+ read_tx_state_one(sc, i, &tx_state[i]);
+ }
}
static void
@@ -279,7 +282,9 @@ check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
uint64_t tx_frames, rx_pause;
int i;
- for_each_port(sc, i) {
+ for (i = 0; i < MAX_NCHAN; i++) {
+ if (sc->chan_map[i] == 0xff)
+ continue;
rx_pause = tx_state[i].rx_pause;
tx_frames = tx_state[i].tx_frames;
read_tx_state_one(sc, i, &tx_state[i]); /* update */
@@ -351,7 +356,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
return -EINVAL;
if (adap->flags & IS_VF) {
- if (is_t6(adap))
+ if (chip_id(adap) >= CHELSIO_T6)
data_reg = FW_T6VF_MBDATA_BASE_ADDR;
else
data_reg = FW_T4VF_MBDATA_BASE_ADDR;
@@ -508,9 +513,8 @@ failed:
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
- return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
- sleep_ok, FW_CMD_MAX_TIMEOUT);
-
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
+ sleep_ok, FW_CMD_MAX_TIMEOUT);
}
static int t4_edc_err_read(struct adapter *adap, int idx)
@@ -799,6 +803,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
case CHELSIO_T5:
case CHELSIO_T6:
+ case CHELSIO_T7:
if (adapter->flags & IS_VF)
return FW_T4VF_REGMAP_SIZE;
return T5_REGMAP_SIZE;
@@ -2639,6 +2644,638 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
};
+ static const unsigned int t7_reg_ranges[] = {
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x115c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11d0,
+ 0x11fc, 0x1278,
+ 0x1280, 0x1368,
+ 0x1700, 0x172c,
+ 0x173c, 0x1760,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3044,
+ 0x3060, 0x3064,
+ 0x30a4, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35e0, 0x35ec,
+ 0x3600, 0x37fc,
+ 0x3804, 0x3818,
+ 0x3880, 0x388c,
+ 0x3900, 0x3904,
+ 0x3910, 0x3978,
+ 0x3980, 0x399c,
+ 0x4700, 0x4720,
+ 0x4728, 0x475c,
+ 0x480c, 0x4814,
+ 0x4890, 0x489c,
+ 0x48a4, 0x48ac,
+ 0x48b8, 0x48c4,
+ 0x4900, 0x4924,
+ 0x4ffc, 0x4ffc,
+ 0x5500, 0x5624,
+ 0x56c4, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x598c,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
+ 0x5ec8, 0x5ed0,
+ 0x5ee0, 0x5ee0,
+ 0x5ef0, 0x5ef0,
+ 0x5f00, 0x5f04,
+ 0x5f0c, 0x5f10,
+ 0x5f20, 0x5f88,
+ 0x5f90, 0x5fd8,
+ 0x6000, 0x6020,
+ 0x6028, 0x6030,
+ 0x6044, 0x609c,
+ 0x60a8, 0x60ac,
+ 0x60b8, 0x60ec,
+ 0x6100, 0x6104,
+ 0x6118, 0x611c,
+ 0x6150, 0x6150,
+ 0x6180, 0x61b8,
+ 0x7700, 0x77a8,
+ 0x77b0, 0x7888,
+ 0x78cc, 0x7970,
+ 0x7b00, 0x7b00,
+ 0x7b08, 0x7b0c,
+ 0x7b24, 0x7b84,
+ 0x7b8c, 0x7c2c,
+ 0x7c34, 0x7c40,
+ 0x7c48, 0x7c68,
+ 0x7c70, 0x7c7c,
+ 0x7d00, 0x7ddc,
+ 0x7de4, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e74,
+ 0x7e80, 0x7ee0,
+ 0x7ee8, 0x7f0c,
+ 0x7f20, 0x7f5c,
+ 0x8dc0, 0x8de8,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e30,
+ 0x8e7c, 0x8ee8,
+ 0x8f88, 0x8f88,
+ 0x8f90, 0x8fb0,
+ 0x8fb8, 0x9058,
+ 0x9074, 0x90f8,
+ 0x9100, 0x912c,
+ 0x9138, 0x9188,
+ 0x9400, 0x9414,
+ 0x9430, 0x9440,
+ 0x9454, 0x9454,
+ 0x945c, 0x947c,
+ 0x9498, 0x94b8,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
+ 0x9800, 0x9804,
+ 0x9854, 0x9854,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0x9fec,
+ 0xa000, 0xa06c,
+ 0xa080, 0xa0ec,
+ 0xa100, 0xa16c,
+ 0xa180, 0xa1ec,
+ 0xa200, 0xa26c,
+ 0xa280, 0xa2ec,
+ 0xa300, 0xa36c,
+ 0xa380, 0xa458,
+ 0xa460, 0xa4f8,
+ 0xd000, 0xd03c,
+ 0xd100, 0xd134,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xe00c,
+ 0xf000, 0xf008,
+ 0xf010, 0xf06c,
+ 0x11000, 0x11014,
+ 0x11048, 0x11120,
+ 0x11130, 0x11144,
+ 0x11174, 0x11178,
+ 0x11190, 0x111a0,
+ 0x111e4, 0x112f0,
+ 0x11300, 0x1133c,
+ 0x11408, 0x1146c,
+ 0x12000, 0x12004,
+ 0x12060, 0x122c4,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191a0,
+ 0x191ac, 0x191c8,
+ 0x191d0, 0x191e4,
+ 0x19250, 0x19250,
+ 0x19258, 0x19268,
+ 0x19278, 0x19278,
+ 0x19280, 0x192b0,
+ 0x192bc, 0x192f0,
+ 0x19300, 0x19308,
+ 0x19310, 0x19318,
+ 0x19320, 0x19328,
+ 0x19330, 0x19330,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x1947c,
+ 0x19488, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cdc,
+ 0x19ce4, 0x19cf8,
+ 0x19d00, 0x19d30,
+ 0x19d50, 0x19d80,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19de0,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fb4,
+ 0x19fbc, 0x19fbc,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a108,
+ 0x1a114, 0x1a130,
+ 0x1a138, 0x1a1c4,
+ 0x1a1fc, 0x1a29c,
+ 0x1a2a8, 0x1a2b8,
+ 0x1a2c0, 0x1a388,
+ 0x1a398, 0x1a3ac,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e4,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e4,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae4,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee4,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e4,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e4,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae4,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee4,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30038,
+ 0x30100, 0x3017c,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301e0,
+ 0x30200, 0x30344,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3065c,
+ 0x30800, 0x30848,
+ 0x30850, 0x308a8,
+ 0x308b8, 0x308c0,
+ 0x308cc, 0x308dc,
+ 0x30900, 0x30904,
+ 0x3090c, 0x30914,
+ 0x3091c, 0x30928,
+ 0x30930, 0x3093c,
+ 0x30944, 0x30948,
+ 0x30954, 0x30974,
+ 0x3097c, 0x30980,
+ 0x30a00, 0x30a20,
+ 0x30a38, 0x30a3c,
+ 0x30a50, 0x30a50,
+ 0x30a80, 0x30a80,
+ 0x30a88, 0x30aa8,
+ 0x30ab0, 0x30ab4,
+ 0x30ac8, 0x30ad4,
+ 0x30b28, 0x30b84,
+ 0x30b98, 0x30bb8,
+ 0x30c98, 0x30d14,
+ 0x31000, 0x31020,
+ 0x31038, 0x3103c,
+ 0x31050, 0x31050,
+ 0x31080, 0x31080,
+ 0x31088, 0x310a8,
+ 0x310b0, 0x310b4,
+ 0x310c8, 0x310d4,
+ 0x31128, 0x31184,
+ 0x31198, 0x311b8,
+ 0x32000, 0x32038,
+ 0x32100, 0x3217c,
+ 0x32190, 0x321a0,
+ 0x321a8, 0x321b8,
+ 0x321c4, 0x321c8,
+ 0x321d0, 0x321e0,
+ 0x32200, 0x32344,
+ 0x32400, 0x324b4,
+ 0x324c0, 0x3252c,
+ 0x32540, 0x3265c,
+ 0x32800, 0x32848,
+ 0x32850, 0x328a8,
+ 0x328b8, 0x328c0,
+ 0x328cc, 0x328dc,
+ 0x32900, 0x32904,
+ 0x3290c, 0x32914,
+ 0x3291c, 0x32928,
+ 0x32930, 0x3293c,
+ 0x32944, 0x32948,
+ 0x32954, 0x32974,
+ 0x3297c, 0x32980,
+ 0x32a00, 0x32a20,
+ 0x32a38, 0x32a3c,
+ 0x32a50, 0x32a50,
+ 0x32a80, 0x32a80,
+ 0x32a88, 0x32aa8,
+ 0x32ab0, 0x32ab4,
+ 0x32ac8, 0x32ad4,
+ 0x32b28, 0x32b84,
+ 0x32b98, 0x32bb8,
+ 0x32c98, 0x32d14,
+ 0x33000, 0x33020,
+ 0x33038, 0x3303c,
+ 0x33050, 0x33050,
+ 0x33080, 0x33080,
+ 0x33088, 0x330a8,
+ 0x330b0, 0x330b4,
+ 0x330c8, 0x330d4,
+ 0x33128, 0x33184,
+ 0x33198, 0x331b8,
+ 0x34000, 0x34038,
+ 0x34100, 0x3417c,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341e0,
+ 0x34200, 0x34344,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3465c,
+ 0x34800, 0x34848,
+ 0x34850, 0x348a8,
+ 0x348b8, 0x348c0,
+ 0x348cc, 0x348dc,
+ 0x34900, 0x34904,
+ 0x3490c, 0x34914,
+ 0x3491c, 0x34928,
+ 0x34930, 0x3493c,
+ 0x34944, 0x34948,
+ 0x34954, 0x34974,
+ 0x3497c, 0x34980,
+ 0x34a00, 0x34a20,
+ 0x34a38, 0x34a3c,
+ 0x34a50, 0x34a50,
+ 0x34a80, 0x34a80,
+ 0x34a88, 0x34aa8,
+ 0x34ab0, 0x34ab4,
+ 0x34ac8, 0x34ad4,
+ 0x34b28, 0x34b84,
+ 0x34b98, 0x34bb8,
+ 0x34c98, 0x34d14,
+ 0x35000, 0x35020,
+ 0x35038, 0x3503c,
+ 0x35050, 0x35050,
+ 0x35080, 0x35080,
+ 0x35088, 0x350a8,
+ 0x350b0, 0x350b4,
+ 0x350c8, 0x350d4,
+ 0x35128, 0x35184,
+ 0x35198, 0x351b8,
+ 0x36000, 0x36038,
+ 0x36100, 0x3617c,
+ 0x36190, 0x361a0,
+ 0x361a8, 0x361b8,
+ 0x361c4, 0x361c8,
+ 0x361d0, 0x361e0,
+ 0x36200, 0x36344,
+ 0x36400, 0x364b4,
+ 0x364c0, 0x3652c,
+ 0x36540, 0x3665c,
+ 0x36800, 0x36848,
+ 0x36850, 0x368a8,
+ 0x368b8, 0x368c0,
+ 0x368cc, 0x368dc,
+ 0x36900, 0x36904,
+ 0x3690c, 0x36914,
+ 0x3691c, 0x36928,
+ 0x36930, 0x3693c,
+ 0x36944, 0x36948,
+ 0x36954, 0x36974,
+ 0x3697c, 0x36980,
+ 0x36a00, 0x36a20,
+ 0x36a38, 0x36a3c,
+ 0x36a50, 0x36a50,
+ 0x36a80, 0x36a80,
+ 0x36a88, 0x36aa8,
+ 0x36ab0, 0x36ab4,
+ 0x36ac8, 0x36ad4,
+ 0x36b28, 0x36b84,
+ 0x36b98, 0x36bb8,
+ 0x36c98, 0x36d14,
+ 0x37000, 0x37020,
+ 0x37038, 0x3703c,
+ 0x37050, 0x37050,
+ 0x37080, 0x37080,
+ 0x37088, 0x370a8,
+ 0x370b0, 0x370b4,
+ 0x370c8, 0x370d4,
+ 0x37128, 0x37184,
+ 0x37198, 0x371b8,
+ 0x38000, 0x380b0,
+ 0x380b8, 0x38130,
+ 0x38140, 0x38140,
+ 0x38150, 0x38154,
+ 0x38160, 0x381c4,
+ 0x381f0, 0x38204,
+ 0x3820c, 0x38214,
+ 0x3821c, 0x3822c,
+ 0x38244, 0x38244,
+ 0x38254, 0x38274,
+ 0x3827c, 0x38280,
+ 0x38300, 0x38304,
+ 0x3830c, 0x38314,
+ 0x3831c, 0x3832c,
+ 0x38344, 0x38344,
+ 0x38354, 0x38374,
+ 0x3837c, 0x38380,
+ 0x38400, 0x38424,
+ 0x38438, 0x3843c,
+ 0x38480, 0x38480,
+ 0x384a8, 0x384a8,
+ 0x384b0, 0x384b4,
+ 0x384c8, 0x38514,
+ 0x38600, 0x3860c,
+ 0x3861c, 0x38624,
+ 0x38900, 0x38924,
+ 0x38938, 0x3893c,
+ 0x38980, 0x38980,
+ 0x389a8, 0x389a8,
+ 0x389b0, 0x389b4,
+ 0x389c8, 0x38a14,
+ 0x38b00, 0x38b0c,
+ 0x38b1c, 0x38b24,
+ 0x38e00, 0x38e00,
+ 0x38e18, 0x38e20,
+ 0x38e38, 0x38e40,
+ 0x38e58, 0x38e60,
+ 0x38e78, 0x38e80,
+ 0x38e98, 0x38ea0,
+ 0x38eb8, 0x38ec0,
+ 0x38ed8, 0x38ee0,
+ 0x38ef8, 0x38f08,
+ 0x38f10, 0x38f2c,
+ 0x38f80, 0x38ffc,
+ 0x39080, 0x39080,
+ 0x39088, 0x39090,
+ 0x39100, 0x39108,
+ 0x39120, 0x39128,
+ 0x39140, 0x39148,
+ 0x39160, 0x39168,
+ 0x39180, 0x39188,
+ 0x391a0, 0x391a8,
+ 0x391c0, 0x391c8,
+ 0x391e0, 0x391e8,
+ 0x39200, 0x39200,
+ 0x39208, 0x39240,
+ 0x39300, 0x39300,
+ 0x39308, 0x39340,
+ 0x39400, 0x39400,
+ 0x39408, 0x39440,
+ 0x39500, 0x39500,
+ 0x39508, 0x39540,
+ 0x39600, 0x39600,
+ 0x39608, 0x39640,
+ 0x39700, 0x39700,
+ 0x39708, 0x39740,
+ 0x39800, 0x39800,
+ 0x39808, 0x39840,
+ 0x39900, 0x39900,
+ 0x39908, 0x39940,
+ 0x39a00, 0x39a04,
+ 0x39a10, 0x39a14,
+ 0x39a1c, 0x39aa8,
+ 0x39b00, 0x39ecc,
+ 0x3a000, 0x3a004,
+ 0x3a050, 0x3a084,
+ 0x3a090, 0x3a09c,
+ 0x3e000, 0x3e020,
+ 0x3e03c, 0x3e05c,
+ 0x3e100, 0x3e120,
+ 0x3e13c, 0x3e15c,
+ 0x3e200, 0x3e220,
+ 0x3e23c, 0x3e25c,
+ 0x3e300, 0x3e320,
+ 0x3e33c, 0x3e35c,
+ 0x3f000, 0x3f034,
+ 0x3f100, 0x3f130,
+ 0x3f200, 0x3f218,
+ 0x44000, 0x44014,
+ 0x44020, 0x44028,
+ 0x44030, 0x44030,
+ 0x44100, 0x44114,
+ 0x44120, 0x44128,
+ 0x44130, 0x44130,
+ 0x44200, 0x44214,
+ 0x44220, 0x44228,
+ 0x44230, 0x44230,
+ 0x44300, 0x44314,
+ 0x44320, 0x44328,
+ 0x44330, 0x44330,
+ 0x44400, 0x44414,
+ 0x44420, 0x44428,
+ 0x44430, 0x44430,
+ 0x44500, 0x44514,
+ 0x44520, 0x44528,
+ 0x44530, 0x44530,
+ 0x44714, 0x44718,
+ 0x44730, 0x44730,
+ 0x447c0, 0x447c0,
+ 0x447f0, 0x447f0,
+ 0x447f8, 0x447fc,
+ 0x45000, 0x45014,
+ 0x45020, 0x45028,
+ 0x45030, 0x45030,
+ 0x45100, 0x45114,
+ 0x45120, 0x45128,
+ 0x45130, 0x45130,
+ 0x45200, 0x45214,
+ 0x45220, 0x45228,
+ 0x45230, 0x45230,
+ 0x45300, 0x45314,
+ 0x45320, 0x45328,
+ 0x45330, 0x45330,
+ 0x45400, 0x45414,
+ 0x45420, 0x45428,
+ 0x45430, 0x45430,
+ 0x45500, 0x45514,
+ 0x45520, 0x45528,
+ 0x45530, 0x45530,
+ 0x45714, 0x45718,
+ 0x45730, 0x45730,
+ 0x457c0, 0x457c0,
+ 0x457f0, 0x457f0,
+ 0x457f8, 0x457fc,
+ 0x46000, 0x46010,
+ 0x46020, 0x46034,
+ 0x46040, 0x46050,
+ 0x46060, 0x46088,
+ 0x47000, 0x4709c,
+ 0x470c0, 0x470d4,
+ 0x47100, 0x471a8,
+ 0x471b0, 0x471e8,
+ 0x47200, 0x47210,
+ 0x4721c, 0x47230,
+ 0x47238, 0x47238,
+ 0x47240, 0x472ac,
+ 0x472d0, 0x472f4,
+ 0x47300, 0x47310,
+ 0x47318, 0x47348,
+ 0x47350, 0x47354,
+ 0x47380, 0x47388,
+ 0x47390, 0x47394,
+ 0x47400, 0x47448,
+ 0x47450, 0x47458,
+ 0x47500, 0x4751c,
+ 0x47530, 0x4754c,
+ 0x47560, 0x4757c,
+ 0x47590, 0x475ac,
+ 0x47600, 0x47630,
+ 0x47640, 0x47644,
+ 0x47660, 0x4769c,
+ 0x47700, 0x47710,
+ 0x47740, 0x47750,
+ 0x4775c, 0x4779c,
+ 0x477b0, 0x477bc,
+ 0x477c4, 0x477c8,
+ 0x477d4, 0x477fc,
+ 0x48000, 0x48004,
+ 0x48018, 0x4801c,
+ 0x49304, 0x493f0,
+ 0x49400, 0x49410,
+ 0x49460, 0x494f4,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
+ 0x50400, 0x50404,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
+ 0x50c00, 0x50c04,
+ 0x51000, 0x51020,
+ 0x51028, 0x510c4,
+ 0x51104, 0x51108,
+ 0x51200, 0x51274,
+ 0x51300, 0x51324,
+ 0x51400, 0x51548,
+ 0x51550, 0x51554,
+ 0x5155c, 0x51584,
+ 0x5158c, 0x515c8,
+ 0x515f0, 0x515f4,
+ 0x58000, 0x58004,
+ 0x58018, 0x5801c,
+ 0x59304, 0x593f0,
+ 0x59400, 0x59410,
+ 0x59460, 0x594f4,
+ };
+
u32 *buf_end = (u32 *)(buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -2679,6 +3316,16 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
}
break;
+ case CHELSIO_T7:
+ if (adap->flags & IS_VF) {
+ reg_ranges = t6vf_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
+ } else {
+ reg_ranges = t7_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t7_reg_ranges);
+ }
+ break;
+
default:
CH_ERR(adap,
"Unsupported chip version %d\n", chip_version);
@@ -3086,6 +3733,56 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
return 0;
}
+/* Flash Layout {start sector, # of sectors} for T4/T5/T6 adapters */
+static const struct t4_flash_loc_entry t4_flash_loc_arr[] = {
+ [FLASH_LOC_EXP_ROM] = { 0, 6 },
+ [FLASH_LOC_IBFT] = { 6, 1 },
+ [FLASH_LOC_BOOTCFG] = { 7, 1 },
+ [FLASH_LOC_FW] = { 8, 16 },
+ [FLASH_LOC_FWBOOTSTRAP] = { 27, 1 },
+ [FLASH_LOC_ISCSI_CRASH] = { 29, 1 },
+ [FLASH_LOC_FCOE_CRASH] = { 30, 1 },
+ [FLASH_LOC_CFG] = { 31, 1 },
+ [FLASH_LOC_CUDBG] = { 32, 32 },
+ [FLASH_LOC_BOOT_AREA] = { 0, 8 }, /* Spans complete Boot Area */
+ [FLASH_LOC_END] = { 64, 0 },
+};
+
+/* Flash Layout {start sector, # of sectors} for T7 adapters */
+static const struct t4_flash_loc_entry t7_flash_loc_arr[] = {
+ [FLASH_LOC_VPD] = { 0, 1 },
+ [FLASH_LOC_FWBOOTSTRAP] = { 1, 1 },
+ [FLASH_LOC_FW] = { 2, 29 },
+ [FLASH_LOC_CFG] = { 31, 1 },
+ [FLASH_LOC_EXP_ROM] = { 32, 15 },
+ [FLASH_LOC_IBFT] = { 47, 1 },
+ [FLASH_LOC_BOOTCFG] = { 48, 1 },
+ [FLASH_LOC_DPU_BOOT] = { 49, 13 },
+ [FLASH_LOC_ISCSI_CRASH] = { 62, 1 },
+ [FLASH_LOC_FCOE_CRASH] = { 63, 1 },
+ [FLASH_LOC_VPD_BACKUP] = { 64, 1 },
+ [FLASH_LOC_FWBOOTSTRAP_BACKUP] = { 65, 1 },
+ [FLASH_LOC_FW_BACKUP] = { 66, 29 },
+ [FLASH_LOC_CFG_BACK] = { 95, 1 },
+ [FLASH_LOC_CUDBG] = { 96, 48 },
+ [FLASH_LOC_CHIP_DUMP] = { 144, 48 },
+ [FLASH_LOC_DPU_AREA] = { 192, 64 },
+ [FLASH_LOC_BOOT_AREA] = { 32, 17 }, /* Spans complete UEFI/PXE Boot Area */
+ [FLASH_LOC_END] = { 256, 0 },
+};
+
+int
+t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
+ unsigned int *lenp)
+{
+ const struct t4_flash_loc_entry *l = chip_id(adap) >= CHELSIO_T7 ?
+ &t7_flash_loc_arr[loc] : &t4_flash_loc_arr[loc];
+
+ if (lenp != NULL)
+ *lenp = FLASH_MAX_SIZE(l->nsecs);
+ return (FLASH_START(l->start_sec));
+}
+
/* serial flash and firmware constants and flash config file constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -3116,13 +3813,16 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
int lock, u32 *valp)
{
int ret;
+ uint32_t op;
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
return -EBUSY;
- t4_write_reg(adapter, A_SF_OP,
- V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ op = V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1);
+ if (chip_id(adapter) >= CHELSIO_T7)
+ op |= F_QUADREADDISABLE;
+ t4_write_reg(adapter, A_SF_OP, op);
ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, A_SF_DATA);
@@ -3294,9 +3994,10 @@ unlock:
*/
int t4_get_fw_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FW_START +
- offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
+ 1, vers, 0);
}
/**
@@ -3308,8 +4009,10 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers)
*/
int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
{
- return t4_read_flash(adapter, FLASH_FW_START,
- sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start, sizeof (*hdr) / sizeof (uint32_t),
+ (uint32_t *)hdr, 1);
}
/**
@@ -3321,9 +4024,11 @@ int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
*/
int t4_get_bs_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
- offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FWBOOTSTRAP,
+ NULL);
+
+ return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
+ 1, vers, 0);
}
/**
@@ -3335,9 +4040,10 @@ int t4_get_bs_version(struct adapter *adapter, u32 *vers)
*/
int t4_get_tp_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FW_START +
- offsetof(struct fw_hdr, tp_microcode_ver),
- 1, vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start +
+ offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0);
}
/**
@@ -3359,10 +4065,10 @@ int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
sizeof(u32))];
int ret;
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_EXP_ROM, NULL);
- ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
- ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
- 0);
+ ret = t4_read_flash(adapter, start, ARRAY_SIZE(exprom_header_buf),
+ exprom_header_buf, 0);
if (ret)
return ret;
@@ -3520,16 +4226,20 @@ int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
* File is stored, or an error if the device FLASH is too small to contain
* a Firmware Configuration File.
*/
-int t4_flash_cfg_addr(struct adapter *adapter)
+int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp)
{
+ unsigned int len = 0;
+ const int cfg_start = t4_flash_loc_start(adapter, FLASH_LOC_CFG, &len);
+
/*
* If the device FLASH isn't large enough to hold a Firmware
* Configuration File, return an error.
*/
- if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
+ if (adapter->params.sf_size < cfg_start + len)
return -ENOSPC;
-
- return FLASH_CFG_START;
+ if (lenp != NULL)
+ *lenp = len;
+ return (cfg_start);
}
/*
@@ -3547,7 +4257,8 @@ static int t4_fw_matches_chip(struct adapter *adap,
*/
if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
(is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
- (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
+ (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6) ||
+ (is_t7(adap) && hdr->chip == FW_HDR_CHIP_T7))
return 1;
CH_ERR(adap,
@@ -3572,20 +4283,15 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
u8 first_page[SF_PAGE_SIZE];
const u32 *p = (const u32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
unsigned int fw_start_sec;
unsigned int fw_start;
unsigned int fw_size;
+ enum t4_flash_loc loc;
- if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
- fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
- fw_start = FLASH_FWBOOTSTRAP_START;
- fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
- } else {
- fw_start_sec = FLASH_FW_START_SEC;
- fw_start = FLASH_FW_START;
- fw_size = FLASH_FW_MAX_SIZE;
- }
+ loc = ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP ?
+ FLASH_LOC_FWBOOTSTRAP : FLASH_LOC_FW;
+ fw_start = t4_flash_loc_start(adap, loc, &fw_size);
+ fw_start_sec = fw_start / SF_SEC_SIZE;
if (!size) {
CH_ERR(adap, "FW image has no data\n");
@@ -3618,7 +4324,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
return -EINVAL;
}
- i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+ i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
if (ret)
goto out;
@@ -3672,7 +4378,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.param[0].mnem =
cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
- c.param[0].val = (__force __be32)op;
+ c.param[0].val = cpu_to_be32(op);
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
@@ -3922,15 +4628,12 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
* speed and let the firmware pick one.
*/
fec |= FW_PORT_CAP32_FORCE_FEC;
- if (speed & FW_PORT_CAP32_SPEED_100G) {
+ if (speed & FW_PORT_CAP32_SPEED_25G) {
fec |= FW_PORT_CAP32_FEC_RS;
- fec |= FW_PORT_CAP32_FEC_NO_FEC;
- } else if (speed & FW_PORT_CAP32_SPEED_50G) {
fec |= FW_PORT_CAP32_FEC_BASER_RS;
fec |= FW_PORT_CAP32_FEC_NO_FEC;
} else {
fec |= FW_PORT_CAP32_FEC_RS;
- fec |= FW_PORT_CAP32_FEC_BASER_RS;
fec |= FW_PORT_CAP32_FEC_NO_FEC;
}
} else {
@@ -3948,12 +4651,9 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
* the potential top speed. Request the best
* FEC at that speed instead.
*/
- if (speed & FW_PORT_CAP32_SPEED_100G) {
- if (fec == FW_PORT_CAP32_FEC_BASER_RS)
- fec = FW_PORT_CAP32_FEC_RS;
- } else if (speed & FW_PORT_CAP32_SPEED_50G) {
- if (fec == FW_PORT_CAP32_FEC_RS)
- fec = FW_PORT_CAP32_FEC_BASER_RS;
+ if ((speed & FW_PORT_CAP32_SPEED_25G) == 0 &&
+ fec == FW_PORT_CAP32_FEC_BASER_RS) {
+ fec = FW_PORT_CAP32_FEC_RS;
}
}
} else {
@@ -4925,6 +5625,15 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = mps_trc_intr_details,
.actions = NULL,
};
+ static const struct intr_info t7_mps_trc_intr_info = {
+ .name = "T7_MPS_TRC_INT_CAUSE",
+ .cause_reg = A_T7_MPS_TRC_INT_CAUSE,
+ .enable_reg = A_T7_MPS_TRC_INT_ENABLE,
+ .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
+ .flags = 0,
+ .details = mps_trc_intr_details,
+ .actions = NULL,
+ };
static const struct intr_details mps_stat_sram_intr_details[] = {
{ 0xffffffff, "MPS statistics SRAM parity error" },
{ 0 }
@@ -4998,7 +5707,10 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
fatal = false;
fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
+ if (chip_id(adap) > CHELSIO_T6)
+ fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, verbose);
+ else
+ fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
@@ -5225,7 +5937,7 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.flags = 0;
ii.details = mac_intr_details;
ii.actions = NULL;
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
@@ -5234,10 +5946,29 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.flags = 0;
ii.details = mac_intr_details;
ii.actions = NULL;
+ } else {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_INT_CAUSE", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_CAUSE);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_EN);
+ ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
+ ii.flags = 0;
+ ii.details = mac_intr_details;
+ ii.actions = NULL;
}
fatal |= t4_handle_intr(adap, &ii, 0, verbose);
- if (chip_id(adap) >= CHELSIO_T5) {
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN);
+ ii.fatal = 0;
+ ii.flags = 0;
+ ii.details = NULL;
+ ii.actions = NULL;
+ fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ } else if (chip_id(adap) >= CHELSIO_T5) {
snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
@@ -5249,7 +5980,17 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
fatal |= t4_handle_intr(adap, &ii, 0, verbose);
}
- if (chip_id(adap) >= CHELSIO_T6) {
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE_100G", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE_100G);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN_100G);
+ ii.fatal = 0;
+ ii.flags = 0;
+ ii.details = NULL;
+ ii.actions = NULL;
+ fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ } else if (is_t6(adap)) {
snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
@@ -5346,13 +6087,42 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, "CIM" },
{ 0 }
};
- static const struct intr_info pl_perr_cause = {
+ static const struct intr_details t7_pl_intr_details[] = {
+ { F_T7_MC1, "MC1" },
+ { F_T7_ULP_TX, "ULP TX" },
+ { F_T7_SGE, "SGE" },
+ { F_T7_CPL_SWITCH, "CPL Switch" },
+ { F_T7_ULP_RX, "ULP RX" },
+ { F_T7_PM_RX, "PM RX" },
+ { F_T7_PM_TX, "PM TX" },
+ { F_T7_MA, "MA" },
+ { F_T7_TP, "TP" },
+ { F_T7_LE, "LE" },
+ { F_T7_EDC1, "EDC1" },
+ { F_T7_EDC0, "EDC0" },
+ { F_T7_MC0, "MC0" },
+ { F_T7_PCIE, "PCIE" },
+ { F_MAC3, "MAC3" },
+ { F_MAC2, "MAC2" },
+ { F_MAC1, "MAC1" },
+ { F_MAC0, "MAC0" },
+ { F_SMB, "SMB" },
+ { F_PL, "PL" },
+ { F_NCSI, "NC-SI" },
+ { F_MPS, "MPS" },
+ { F_DBG, "DBG" },
+ { F_I2CM, "I2CM" },
+ { F_MI, "MI" },
+ { F_CIM, "CIM" },
+ { 0 }
+ };
+ struct intr_info pl_perr_cause = {
.name = "PL_PERR_CAUSE",
.cause_reg = A_PL_PERR_CAUSE,
.enable_reg = A_PL_PERR_ENABLE,
.fatal = 0xffffffff,
- .flags = 0,
- .details = pl_intr_details,
+ .flags = NONFATAL_IF_DISABLED,
+ .details = NULL,
.actions = NULL,
};
static const struct intr_action pl_intr_action[] = {
@@ -5381,17 +6151,53 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, -1, cim_intr_handler },
{ 0 }
};
- static const struct intr_info pl_intr_info = {
+ static const struct intr_action t7_pl_intr_action[] = {
+ { F_T7_ULP_TX, -1, ulptx_intr_handler },
+ { F_T7_SGE, -1, sge_intr_handler },
+ { F_T7_CPL_SWITCH, -1, cplsw_intr_handler },
+ { F_T7_ULP_RX, -1, ulprx_intr_handler },
+ { F_T7_PM_RX, -1, pmrx_intr_handler},
+ { F_T7_PM_TX, -1, pmtx_intr_handler},
+ { F_T7_MA, -1, ma_intr_handler },
+ { F_T7_TP, -1, tp_intr_handler },
+ { F_T7_LE, -1, le_intr_handler },
+ { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
+ { F_T7_EDC0, MEM_EDC0, mem_intr_handler },
+ { F_T7_MC1, MEM_MC1, mem_intr_handler },
+ { F_T7_MC0, MEM_MC0, mem_intr_handler },
+ { F_T7_PCIE, -1, pcie_intr_handler },
+ { F_MAC3, 3, mac_intr_handler},
+ { F_MAC2, 2, mac_intr_handler},
+ { F_MAC1, 1, mac_intr_handler},
+ { F_MAC0, 0, mac_intr_handler},
+ { F_SMB, -1, smb_intr_handler},
+ { F_PL, -1, plpl_intr_handler },
+ { F_NCSI, -1, ncsi_intr_handler},
+ { F_MPS, -1, mps_intr_handler },
+ { F_CIM, -1, cim_intr_handler },
+ { 0 }
+ };
+ struct intr_info pl_intr_info = {
.name = "PL_INT_CAUSE",
.cause_reg = A_PL_INT_CAUSE,
.enable_reg = A_PL_INT_ENABLE,
.fatal = 0,
.flags = 0,
- .details = pl_intr_details,
- .actions = pl_intr_action,
+ .details = NULL,
+ .actions = NULL,
};
u32 perr;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ pl_perr_cause.details = t7_pl_intr_details;
+ pl_intr_info.details = t7_pl_intr_details;
+ pl_intr_info.actions = t7_pl_intr_action;
+ } else {
+ pl_perr_cause.details = pl_intr_details;
+ pl_intr_info.details = pl_intr_details;
+ pl_intr_info.actions = pl_intr_action;
+ }
+
perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
if (verbose || perr != 0) {
t4_show_intr_info(adap, &pl_perr_cause, perr);
@@ -5421,19 +6227,20 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
*/
void t4_intr_enable(struct adapter *adap)
{
- u32 val = 0;
+ u32 mask, val;
if (chip_id(adap) <= CHELSIO_T5)
- val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
+ val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT |
+ F_DBFIFO_LP_INT;
else
val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
- F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT |
- F_EGRESS_SIZE_ERR;
- t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val);
+ F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_EGRESS_SIZE_ERR;
+ mask = val;
+ t4_set_reg_field(adap, A_SGE_INT_ENABLE3, mask, val);
t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
@@ -6184,6 +6991,11 @@ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
{
t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
sleep_ok);
+
+ if (chip_id(adap) >= CHELSIO_T7)
+ /* read RDMA stats IN and OUT for all ports at once */
+ t4_tp_mib_read(adap, &st->pkts_in[0], 28, A_TP_MIB_RDMA_IN_PKT_0,
+ sleep_ok);
}
/**
@@ -6564,16 +7376,24 @@ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
int idx, int enable)
{
- int i, ofst = idx * 4;
+ int i, ofst;
+ u32 match_ctl_a, match_ctl_b;
u32 data_reg, mask_reg, cfg;
u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
if (idx < 0 || idx >= NTRACE)
return -EINVAL;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ match_ctl_a = T7_MPS_TRC_FILTER_MATCH_CTL_A(idx);
+ match_ctl_b = T7_MPS_TRC_FILTER_MATCH_CTL_B(idx);
+ } else {
+ match_ctl_a = MPS_TRC_FILTER_MATCH_CTL_A(idx);
+ match_ctl_b = MPS_TRC_FILTER_MATCH_CTL_B(idx);
+ }
+
if (tp == NULL || !enable) {
- t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
- enable ? en : 0);
+ t4_set_reg_field(adap, match_ctl_a, en, enable ? en : 0);
return 0;
}
@@ -6610,22 +7430,20 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
return -EINVAL;
/* stop the tracer we'll be changing */
- t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
+ t4_set_reg_field(adap, match_ctl_a, en, 0);
- idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
- data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
- mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
+ ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
+ data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
+ mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
t4_write_reg(adap, data_reg, tp->data[i]);
t4_write_reg(adap, mask_reg, ~tp->mask[i]);
}
- t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
- V_TFCAPTUREMAX(tp->snap_len) |
+ t4_write_reg(adap, match_ctl_b, V_TFCAPTUREMAX(tp->snap_len) |
V_TFMINPKTSIZE(tp->min_len));
- t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
- V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
- (is_t4(adap) ?
+ t4_write_reg(adap, match_ctl_a, V_TFOFFSET(tp->skip_ofst) |
+ V_TFLENGTH(tp->skip_len) | en | (is_t4(adap) ?
V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
@@ -6645,11 +7463,16 @@ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
int *enabled)
{
u32 ctla, ctlb;
- int i, ofst = idx * 4;
+ int i, ofst;
u32 data_reg, mask_reg;
- ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
- ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ ctla = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_A(idx));
+ ctlb = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_B(idx));
+ } else {
+ ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A(idx));
+ ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B(idx));
+ }
if (is_t4(adap)) {
*enabled = !!(ctla & F_TFEN);
@@ -6676,6 +7499,37 @@ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
}
/**
+ * t4_set_trace_rss_control - configure the trace rss control register
+ * @adap: the adapter
+ * @chan: the channel number for RSS control
+ * @qid: queue number
+ *
+ * Configures the MPS tracing RSS control parameter for specified
+ * @chan channel and @qid queue number.
+ */
+void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid)
+{
+ u32 mps_trc_rss_control;
+
+ switch (chip_id(adap)) {
+ case CHELSIO_T4:
+ mps_trc_rss_control = A_MPS_TRC_RSS_CONTROL;
+ break;
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ mps_trc_rss_control = A_MPS_T5_TRC_RSS_CONTROL;
+ break;
+ case CHELSIO_T7:
+ default:
+ mps_trc_rss_control = A_T7_MPS_T5_TRC_RSS_CONTROL;
+ break;
+ }
+
+ t4_write_reg(adap, mps_trc_rss_control,
+ V_RSSCONTROL(chan) | V_QUEUENUMBER(qid));
+}
+
+/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
* @cnt: where to store the count statistics
@@ -6696,6 +7550,8 @@ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
else {
t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
A_PM_TX_DBG_DATA, data, 2,
+ chip_id(adap) >= CHELSIO_T7 ?
+ A_T7_PM_TX_DBG_STAT_MSB :
A_PM_TX_DBG_STAT_MSB);
cycles[i] = (((u64)data[0] << 32) | data[1]);
}
@@ -6730,6 +7586,25 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
}
/**
+ * t4_pmrx_cache_get_stats - returns the HW PMRX cache stats
+ * @adap: the adapter
+ * @stats: where to store the statistics
+ *
+ * Returns performance statistics of PMRX cache.
+ */
+void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[])
+{
+ u8 i, j;
+
+ for (i = 0, j = 0; i < T7_PM_RX_CACHE_NSTATS / 3; i++, j += 3) {
+ t4_write_reg(adap, A_PM_RX_STAT_CONFIG, 0x100 + i);
+ stats[j] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
+ t4_read_indirect(adap, A_PM_RX_DBG_CTRL, A_PM_RX_DBG_DATA,
+ &stats[j + 1], 2, A_PM_RX_DBG_STAT_MSB);
+ }
+}
+
+/**
* t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
@@ -6762,11 +7637,24 @@ static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
const u32 n = adap->params.nports;
const u32 all_chan = (1 << adap->chip_params->nchan) - 1;
- if (n == 1)
- return idx == 0 ? all_chan : 0;
- if (n == 2 && chip_id(adap) <= CHELSIO_T5)
- return idx < 2 ? (3 << (2 * idx)) : 0;
- return 1 << idx;
+ switch (adap->params.tp.lb_mode) {
+ case 0:
+ if (n == 1)
+ return (all_chan);
+ if (n == 2 && chip_id(adap) <= CHELSIO_T5)
+ return (3 << (2 * idx));
+ return (1 << idx);
+ case 1:
+ MPASS(n == 1);
+ return (all_chan);
+ case 2:
+ MPASS(n <= 2);
+ return (3 << (2 * idx));
+ default:
+ CH_ERR(adap, "Unsupported LB mode %d\n",
+ adap->params.tp.lb_mode);
+ return (0);
+ }
}
/*
@@ -6784,6 +7672,8 @@ static unsigned int t4_get_rx_c_chan(struct adapter *adap, int idx)
*/
static unsigned int t4_get_tx_c_chan(struct adapter *adap, int idx)
{
+ if (adap->params.tx_tp_ch_map != UINT32_MAX)
+ return (adap->params.tx_tp_ch_map >> (8 * idx)) & 0xff;
return idx;
}
@@ -6856,79 +7746,89 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
*/
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
- struct port_info *pi = adap->port[idx];
- u32 bgmap = pi->mps_bg_map;
- u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
+ struct port_info *pi;
+ int port_id, tx_chan;
+ u32 bgmap, stat_ctl;
+
+ port_id = adap->port_map[idx];
+ MPASS(port_id >= 0 && port_id <= adap->params.nports);
+ pi = adap->port[port_id];
#define GET_STAT(name) \
t4_read_reg64(adap, \
- t4_port_reg(adap, pi->tx_chan, A_MPS_PORT_STAT_##name##_L));
-#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+ t4_port_reg(adap, tx_chan, A_MPS_PORT_STAT_##name##_L));
+ memset(p, 0, sizeof(*p));
+ for (tx_chan = pi->tx_chan;
+ tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
+ p->tx_pause += GET_STAT(TX_PORT_PAUSE);
+ p->tx_octets += GET_STAT(TX_PORT_BYTES);
+ p->tx_frames += GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames += GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames += GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames += GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames += GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 += GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 += GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 += GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 += GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 += GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 += GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max += GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop += GET_STAT(TX_PORT_DROP);
+ p->tx_ppp0 += GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 += GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 += GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 += GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 += GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 += GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 += GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 += GET_STAT(TX_PORT_PPP7);
+
+ p->rx_pause += GET_STAT(RX_PORT_PAUSE);
+ p->rx_octets += GET_STAT(RX_PORT_BYTES);
+ p->rx_frames += GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames += GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames += GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames += GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long += GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber += GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_len_err += GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err += GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt += GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 += GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 += GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 += GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 += GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 += GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 += GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max += GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_ppp0 += GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 += GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 += GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 += GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 += GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 += GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 += GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 += GET_STAT(RX_PORT_PPP7);
+ if (!is_t6(adap)) {
+ MPASS(pi->fcs_reg == A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
+ p->rx_fcs_err += GET_STAT(RX_PORT_CRC_ERROR);
+ }
+ }
+#undef GET_STAT
- p->tx_pause = GET_STAT(TX_PORT_PAUSE);
- p->tx_octets = GET_STAT(TX_PORT_BYTES);
- p->tx_frames = GET_STAT(TX_PORT_FRAMES);
- p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
- p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
- p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
- p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
- p->tx_frames_64 = GET_STAT(TX_PORT_64B);
- p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
- p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
- p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
- p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
- p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
- p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
- p->tx_drop = GET_STAT(TX_PORT_DROP);
- p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
- p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
- p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
- p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
- p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
- p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
- p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
- p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+ if (is_t6(adap) && pi->fcs_reg != -1)
+ p->rx_fcs_err = t4_read_reg64(adap,
+ t4_port_reg(adap, pi->tx_chan, pi->fcs_reg)) - pi->fcs_base;
if (chip_id(adap) >= CHELSIO_T5) {
+ stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
if (stat_ctl & F_COUNTPAUSESTATTX) {
p->tx_frames -= p->tx_pause;
p->tx_octets -= p->tx_pause * 64;
}
if (stat_ctl & F_COUNTPAUSEMCTX)
p->tx_mcast_frames -= p->tx_pause;
- }
-
- p->rx_pause = GET_STAT(RX_PORT_PAUSE);
- p->rx_octets = GET_STAT(RX_PORT_BYTES);
- p->rx_frames = GET_STAT(RX_PORT_FRAMES);
- p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
- p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
- p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
- p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
- p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
- p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
- p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
- p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
- p->rx_frames_64 = GET_STAT(RX_PORT_64B);
- p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
- p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
- p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
- p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
- p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
- p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
- p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
- p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
- p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
- p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
- p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
- p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
- p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
- p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
-
- if (pi->fcs_reg != -1)
- p->rx_fcs_err = t4_read_reg64(adap, pi->fcs_reg) - pi->fcs_base;
-
- if (chip_id(adap) >= CHELSIO_T5) {
if (stat_ctl & F_COUNTPAUSESTATRX) {
p->rx_frames -= p->rx_pause;
p->rx_octets -= p->rx_pause * 64;
@@ -6937,6 +7837,8 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_mcast_frames -= p->rx_pause;
}
+#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+ bgmap = pi->mps_bg_map;
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
@@ -6945,8 +7847,6 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
-
-#undef GET_STAT
#undef GET_STAT_COM
}
@@ -7016,10 +7916,14 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+ } else {
+ mag_id_reg_l = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
}
if (addr) {
@@ -7056,8 +7960,10 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
if (is_t4(adap))
port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
- else
+ else if (chip_id(adap) < CHELSIO_T7)
port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+ else
+ port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
if (!enable) {
t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
@@ -7348,6 +8254,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
break;
case CHELSIO_T6:
+ case CHELSIO_T7:
sge_idma_decode = (const char * const *)t6_decode;
sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
break;
@@ -8964,7 +9871,7 @@ static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_READ |
- V_FW_PORT_CMD_PORTID(pi->tx_chan));
+ V_FW_PORT_CMD_PORTID(pi->hw_port));
action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
FW_PORT_ACTION_GET_PORT_INFO;
cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
@@ -8996,16 +9903,12 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
(action == FW_PORT_ACTION_GET_PORT_INFO ||
action == FW_PORT_ACTION_GET_PORT_INFO32)) {
/* link/module state change message */
- int i;
- int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
- struct port_info *pi = NULL;
-
- for_each_port(adap, i) {
- pi = adap2pinfo(adap, i);
- if (pi->tx_chan == chan)
- break;
- }
+ int hw_port = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
+ int port_id = adap->port_map[hw_port];
+ struct port_info *pi;
+ MPASS(port_id >= 0 && port_id < adap->params.nports);
+ pi = adap->port[port_id];
PORT_LOCK(pi);
handle_port_info(pi, p, action, &mod_changed, &link_changed);
PORT_UNLOCK(pi);
@@ -9159,14 +10062,15 @@ int t4_get_flash_params(struct adapter *adapter)
}
/* If we didn't recognize the FLASH part, that's no real issue: the
- * Hardware/Software contract says that Hardware will _*ALWAYS*_
- * use a FLASH part which is at least 4MB in size and has 64KB
- * sectors. The unrecognized FLASH part is likely to be much larger
- * than 4MB, but that's all we really need.
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_ use a
+ * FLASH part which has 64KB sectors and is at least 4MB or 16MB in
+ * size, depending on the board.
*/
if (size == 0) {
- CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
- size = 1 << 22;
+ size = chip_id(adapter) >= CHELSIO_T7 ? 16 : 4;
+ CH_WARN(adapter, "Unknown Flash Part %#x, assuming %uMB\n",
+ flashid, size);
+ size <<= 20;
}
/*
@@ -9212,11 +10116,14 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = PM_NSTATS,
.cng_ch_bits_log = 2,
.nsched_cls = 15,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ,
.filter_opt_len = FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 128,
.vfcount = 128,
.sge_fl_db = F_DBPRIO,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
.rss_nentries = RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE,
@@ -9227,11 +10134,14 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = PM_NSTATS,
.cng_ch_bits_log = 2,
.nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ_T5,
.filter_opt_len = T5_FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 128,
.vfcount = 128,
.sge_fl_db = F_DBPRIO | F_DBTYPE,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
.rss_nentries = RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE,
@@ -9242,15 +10152,36 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = T6_PM_NSTATS,
.cng_ch_bits_log = 3,
.nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ_T5,
.filter_opt_len = T5_FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 256,
.vfcount = 256,
.sge_fl_db = 0,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
.rss_nentries = T6_RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE_T6,
},
+ {
+ /* T7 */
+ .nchan = NCHAN,
+ .pm_stats_cnt = T6_PM_NSTATS,
+ .cng_ch_bits_log = 2,
+ .nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ_T7,
+ .cim_num_obq = CIM_NUM_OBQ_T7,
+ .filter_opt_len = T7_FILTER_OPT_LEN,
+ .filter_num_opt = S_T7_FT_LAST + 1,
+ .mps_rplc_size = 256,
+ .vfcount = 256,
+ .sge_fl_db = 0,
+ .sge_ctxt_size = SGE_CTXT_SIZE_T7,
+ .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
+ .rss_nentries = T7_RSS_NENTRIES,
+ .cim_la_size = CIMLA_SIZE_T6,
+ },
};
chipid -= CHELSIO_T4;
@@ -9466,14 +10397,11 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
}
/**
- * t4_init_devlog_params - initialize adapter->params.devlog
+ * t4_init_devlog_ncores_params - initialize adap->params.devlog and ncores
* @adap: the adapter
* @fw_attach: whether we can talk to the firmware
- *
- * Initialize various fields of the adapter's Firmware Device Log
- * Parameters structure.
*/
-int t4_init_devlog_params(struct adapter *adap, int fw_attach)
+int t4_init_devlog_ncores_params(struct adapter *adap, int fw_attach)
{
struct devlog_params *dparams = &adap->params.devlog;
u32 pf_dparams;
@@ -9487,12 +10415,15 @@ int t4_init_devlog_params(struct adapter *adap, int fw_attach)
*/
pf_dparams =
t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
- if (pf_dparams) {
- unsigned int nentries, nentries128;
+ if (pf_dparams && pf_dparams != UINT32_MAX) {
+ unsigned int nentries, nentries128, ncore_shift;
+
+ ncore_shift = (G_PCIE_FW_PF_DEVLOG_COUNT_MSB(pf_dparams) << 1) |
+ G_PCIE_FW_PF_DEVLOG_COUNT_LSB(pf_dparams);
+ adap->params.ncores = 1 << ncore_shift;
dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
-
nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
nentries = (nentries128 + 1) * 128;
dparams->size = nentries * sizeof(struct fw_devlog_e);
@@ -9503,6 +10434,7 @@ int t4_init_devlog_params(struct adapter *adap, int fw_attach)
/*
* For any failing returns ...
*/
+ adap->params.ncores = 1;
memset(dparams, 0, sizeof *dparams);
/*
@@ -9624,21 +10556,28 @@ int t4_init_sge_params(struct adapter *adapter)
/* Convert the LE's hardware hash mask to a shorter filter mask. */
static inline uint16_t
-hashmask_to_filtermask(uint64_t hashmask, uint16_t filter_mode)
+hashmask_to_filtermask(struct adapter *adap, uint64_t hashmask, uint16_t filter_mode)
{
- static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
- int i;
+ int first, last, i;
uint16_t filter_mask;
- uint64_t mask; /* field mask */
+ uint64_t mask; /* field mask */
+
+
+ if (chip_id(adap) >= CHELSIO_T7) {
+ first = S_T7_FT_FIRST;
+ last = S_T7_FT_LAST;
+ } else {
+ first = S_FT_FIRST;
+ last = S_FT_LAST;
+ }
- filter_mask = 0;
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
+ for (filter_mask = 0, i = first; i <= last; i++) {
if ((filter_mode & (1 << i)) == 0)
continue;
- mask = (1 << width[i]) - 1;
+ mask = (1 << t4_filter_field_width(adap, i)) - 1;
if ((hashmask & mask) == mask)
filter_mask |= 1 << i;
- hashmask >>= width[i];
+ hashmask >>= t4_filter_field_width(adap, i);
}
return (filter_mask);
@@ -9681,7 +10620,15 @@ read_filter_mode_and_ingress_config(struct adapter *adap)
v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
hash_mask |= (u64)v << 32;
}
- tpp->filter_mask = hashmask_to_filtermask(hash_mask,
+ if (chip_id(adap) >= CHELSIO_T7) {
+ /*
+ * This param came before T7 so T7+ firmwares should
+ * always support this query.
+ */
+ CH_WARN(adap, "query for filter mode/mask failed: %d\n",
+ rc);
+ }
+ tpp->filter_mask = hashmask_to_filtermask(adap, hash_mask,
tpp->filter_mode);
t4_tp_pio_read(adap, &v, 1, A_TP_INGRESS_CONFIG, true);
@@ -9696,16 +10643,37 @@ read_filter_mode_and_ingress_config(struct adapter *adap)
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
- tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
- tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
- tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
- tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
- tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
- tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
- tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
- tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
- tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
- tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ tpp->ipsecidx_shift = t4_filter_field_shift(adap, F_IPSECIDX);
+ tpp->fcoe_shift = t4_filter_field_shift(adap, F_T7_FCOE);
+ tpp->port_shift = t4_filter_field_shift(adap, F_T7_PORT);
+ tpp->vnic_shift = t4_filter_field_shift(adap, F_T7_VNIC_ID);
+ tpp->vlan_shift = t4_filter_field_shift(adap, F_T7_VLAN);
+ tpp->tos_shift = t4_filter_field_shift(adap, F_T7_TOS);
+ tpp->protocol_shift = t4_filter_field_shift(adap, F_T7_PROTOCOL);
+ tpp->ethertype_shift = t4_filter_field_shift(adap, F_T7_ETHERTYPE);
+ tpp->macmatch_shift = t4_filter_field_shift(adap, F_T7_MACMATCH);
+ tpp->matchtype_shift = t4_filter_field_shift(adap, F_T7_MPSHITTYPE);
+ tpp->frag_shift = t4_filter_field_shift(adap, F_T7_FRAGMENTATION);
+ tpp->roce_shift = t4_filter_field_shift(adap, F_ROCE);
+ tpp->synonly_shift = t4_filter_field_shift(adap, F_SYNONLY);
+ tpp->tcpflags_shift = t4_filter_field_shift(adap, F_TCPFLAGS);
+ } else {
+ tpp->ipsecidx_shift = -1;
+ tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
+ tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
+ tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
+ tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
+ tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
+ tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
+ tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
+ tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
+ tpp->roce_shift = -1;
+ tpp->synonly_shift = -1;
+ tpp->tcpflags_shift = -1;
+ }
}
/**
@@ -9725,11 +10693,21 @@ int t4_init_tp_params(struct adapter *adap)
read_filter_mode_and_ingress_config(adap);
+ tpp->rx_pkt_encap = false;
+ tpp->lb_mode = 0;
+ tpp->lb_nchan = 1;
if (chip_id(adap) > CHELSIO_T5) {
v = t4_read_reg(adap, A_TP_OUT_CONFIG);
tpp->rx_pkt_encap = v & F_CRXPKTENC;
- } else
- tpp->rx_pkt_encap = false;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ t4_tp_pio_read(adap, &v, 1, A_TP_CHANNEL_MAP, true);
+ tpp->lb_mode = G_T7_LB_MODE(v);
+ if (tpp->lb_mode == 1)
+ tpp->lb_nchan = 4;
+ else if (tpp->lb_mode == 2)
+ tpp->lb_nchan = 2;
+ }
+ }
rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE);
tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE);
@@ -9750,6 +10728,53 @@ int t4_init_tp_params(struct adapter *adap)
}
/**
+ * t4_filter_field_width - returns the width of a filter field
+ * @adap: the adapter
+ * @filter_field: the filter field whose width is being requested
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_width(const struct adapter *adap, int filter_field)
+{
+ const int nopt = adap->chip_params->filter_num_opt;
+ static const uint8_t width_t7[] = {
+ W_FT_IPSECIDX,
+ W_FT_FCOE,
+ W_FT_PORT,
+ W_FT_VNIC_ID,
+ W_FT_VLAN,
+ W_FT_TOS,
+ W_FT_PROTOCOL,
+ W_FT_ETHERTYPE,
+ W_FT_MACMATCH,
+ W_FT_MPSHITTYPE,
+ W_FT_FRAGMENTATION,
+ W_FT_ROCE,
+ W_FT_SYNONLY,
+ W_FT_TCPFLAGS
+ };
+ static const uint8_t width_t4[] = {
+ W_FT_FCOE,
+ W_FT_PORT,
+ W_FT_VNIC_ID,
+ W_FT_VLAN,
+ W_FT_TOS,
+ W_FT_PROTOCOL,
+ W_FT_ETHERTYPE,
+ W_FT_MACMATCH,
+ W_FT_MPSHITTYPE,
+ W_FT_FRAGMENTATION
+ };
+ const uint8_t *width = chip_id(adap) >= CHELSIO_T7 ? width_t7 : width_t4;
+
+ if (filter_field < 0 || filter_field >= nopt)
+ return (0);
+ return (width[filter_field]);
+}
+
+/**
* t4_filter_field_shift - calculate filter field shift
* @adap: the adapter
* @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
@@ -9767,6 +10792,56 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
if ((filter_mode & filter_sel) == 0)
return -1;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_IPSECIDX:
+ field_shift += W_FT_IPSECIDX;
+ break;
+ case F_T7_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_T7_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_T7_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_T7_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_T7_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_T7_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_T7_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_T7_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_T7_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_T7_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ case F_ROCE:
+ field_shift += W_FT_ROCE;
+ break;
+ case F_SYNONLY:
+ field_shift += W_FT_SYNONLY;
+ break;
+ case F_TCPFLAGS:
+ field_shift += W_FT_TCPFLAGS;
+ break;
+ }
+ }
+ return field_shift;
+ }
+
for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
switch (filter_mode & sel) {
case F_FCOE:
@@ -9818,11 +10893,11 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
} while ((adap->params.portvec & (1 << j)) == 0);
}
+ p->hw_port = j;
p->tx_chan = t4_get_tx_c_chan(adap, j);
p->rx_chan = t4_get_rx_c_chan(adap, j);
p->mps_bg_map = t4_get_mps_bg_map(adap, j);
p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
- p->lport = j;
if (!(adap->flags & IS_VF) ||
adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
@@ -9851,232 +10926,321 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
return 0;
}
+static void t4_read_cimq_cfg_ibq_core(struct adapter *adap, u8 coreid, u32 qid,
+ u16 *base, u16 *size, u16 *thres)
+{
+ unsigned int v, m;
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ v = F_T7_IBQSELECT | V_T7_QUENUMSELECT(qid) |
+ V_CORESELECT(coreid);
+ /* value is in 512-byte units */
+ m = 512;
+ } else {
+ v = F_IBQSELECT | V_QUENUMSELECT(qid);
+ /* value is in 256-byte units */
+ m = 256;
+ }
+
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ if (base)
+ *base = G_CIMQBASE(v) * m;
+ if (size)
+ *size = G_CIMQSIZE(v) * m;
+ if (thres)
+ *thres = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
+}
+
+static void t4_read_cimq_cfg_obq_core(struct adapter *adap, u8 coreid, u32 qid,
+ u16 *base, u16 *size)
+{
+ unsigned int v, m;
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ v = F_T7_OBQSELECT | V_T7_QUENUMSELECT(qid) |
+ V_CORESELECT(coreid);
+ /* value is in 512-byte units */
+ m = 512;
+ } else {
+ v = F_OBQSELECT | V_QUENUMSELECT(qid);
+ /* value is in 256-byte units */
+ m = 256;
+ }
+
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ if (base)
+ *base = G_CIMQBASE(v) * m;
+ if (size)
+ *size = G_CIMQSIZE(v) * m;
+}
+
/**
- * t4_read_cimq_cfg - read CIM queue configuration
+ * t4_read_cimq_cfg_core - read CIM queue configuration on specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @base: holds the queue base addresses in bytes
* @size: holds the queue sizes in bytes
* @thres: holds the queue full thresholds in bytes
*
* Returns the current configuration of the CIM queues, starting with
- * the IBQs, then the OBQs.
+ * the IBQs, then the OBQs, on a specific @coreid.
*/
-void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
+ u16 *size, u16 *thres)
{
- unsigned int i, v;
- int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
+ unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int i;
- for (i = 0; i < CIM_NUM_IBQ; i++) {
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
- V_QUENUMSELECT(i));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
- /* value is in 256-byte units */
- *base++ = G_CIMQBASE(v) * 256;
- *size++ = G_CIMQSIZE(v) * 256;
- *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
- }
- for (i = 0; i < cim_num_obq; i++) {
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
- V_QUENUMSELECT(i));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
- /* value is in 256-byte units */
- *base++ = G_CIMQBASE(v) * 256;
- *size++ = G_CIMQSIZE(v) * 256;
- }
+ for (i = 0; i < cim_num_ibq; i++, base++, size++, thres++)
+ t4_read_cimq_cfg_ibq_core(adap, coreid, i, base, size, thres);
+
+ for (i = 0; i < cim_num_obq; i++, base++, size++)
+ t4_read_cimq_cfg_obq_core(adap, coreid, i, base, size);
+}
+
+static int t4_read_cim_ibq_data_core(struct adapter *adap, u8 coreid, u32 addr,
+ u32 *data)
+{
+ int ret, attempts;
+ unsigned int v;
+
+ /* It might take 3-10ms before the IBQ debug read access is allowed.
+ * Wait for 1 Sec with a delay of 1 usec.
+ */
+ attempts = 1000000;
+
+ if (chip_id(adap) > CHELSIO_T6)
+ v = V_T7_IBQDBGADDR(addr) | V_IBQDBGCORE(coreid);
+ else
+ v = V_IBQDBGADDR(addr);
+
+ t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, v | F_IBQDBGEN);
+ ret = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
+ attempts, 1);
+ if (ret)
+ return ret;
+
+ *data = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+ return 0;
}
/**
- * t4_read_cim_ibq - read the contents of a CIM inbound queue
+ * t4_read_cim_ibq_core - read the contents of a CIM inbound queue on
+ * specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
- * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
- * error and the number of 32-bit words actually read on success.
+ * to the capacity of @data on a specific @coreid. @n must be a multiple
+ * of 4. Returns < 0 on error and the number of 32-bit words actually
+ * read on success.
*/
-int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n)
{
- int i, err, attempts;
- unsigned int addr;
- const unsigned int nwords = CIM_IBQ_SIZE * 4;
+ unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
+ u16 i, addr, nwords;
+ int ret;
- if (qid > 5 || (n & 3))
+ if (qid > (cim_num_ibq - 1) || (n & 3))
return -EINVAL;
- addr = qid * nwords;
+ t4_read_cimq_cfg_ibq_core(adap, coreid, qid, &addr, &nwords, NULL);
+ addr >>= sizeof(u16);
+ nwords >>= sizeof(u16);
if (n > nwords)
n = nwords;
- /* It might take 3-10ms before the IBQ debug read access is allowed.
- * Wait for 1 Sec with a delay of 1 usec.
- */
- attempts = 1000000;
-
- for (i = 0; i < n; i++, addr++) {
- t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
- F_IBQDBGEN);
- err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
- attempts, 1);
- if (err)
- return err;
- *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+ for (i = 0; i < n; i++, addr++, data++) {
+ ret = t4_read_cim_ibq_data_core(adap, coreid, addr, data);
+ if (ret < 0)
+ return ret;
}
+
t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
return i;
}
+static int t4_read_cim_obq_data_core(struct adapter *adap, u8 coreid, u32 addr,
+ u32 *data)
+{
+ unsigned int v;
+ int ret;
+
+ if (chip_id(adap) > CHELSIO_T6)
+ v = V_T7_OBQDBGADDR(addr) | V_OBQDBGCORE(coreid);
+ else
+ v = V_OBQDBGADDR(addr);
+
+ t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, v | F_OBQDBGEN);
+ ret = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 2, 1);
+ if (ret)
+ return ret;
+
+ *data = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+ return 0;
+}
+
/**
- * t4_read_cim_obq - read the contents of a CIM outbound queue
+ * t4_read_cim_obq_core - read the contents of a CIM outbound queue on
+ * specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
- * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
- * error and the number of 32-bit words actually read on success.
+ * to the capacity of @data on specific @coreid. @n must be a multiple
+ * of 4. Returns < 0 on error and the number of 32-bit words actually
+ * read on success.
*/
-int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n)
{
- int i, err;
- unsigned int addr, v, nwords;
- int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
+ u16 i, addr, nwords;
+ int ret;
if ((qid > (cim_num_obq - 1)) || (n & 3))
return -EINVAL;
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
- V_QUENUMSELECT(qid));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
-
- addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
- nwords = G_CIMQSIZE(v) * 64; /* same */
+ t4_read_cimq_cfg_obq_core(adap, coreid, qid, &addr, &nwords);
+ addr >>= sizeof(u16);
+ nwords >>= sizeof(u16);
if (n > nwords)
n = nwords;
- for (i = 0; i < n; i++, addr++) {
- t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
- F_OBQDBGEN);
- err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
- 2, 1);
- if (err)
- return err;
- *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+ for (i = 0; i < n; i++, addr++, data++) {
+ ret = t4_read_cim_obq_data_core(adap, coreid, addr, data);
+ if (ret < 0)
+ return ret;
}
+
t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
- return i;
+ return i;
}
-enum {
- CIM_QCTL_BASE = 0,
- CIM_CTL_BASE = 0x2000,
- CIM_PBT_ADDR_BASE = 0x2800,
- CIM_PBT_LRF_BASE = 0x3000,
- CIM_PBT_DATA_BASE = 0x3800
-};
-
/**
- * t4_cim_read - read a block from CIM internal address space
+ * t4_cim_read_core - read a block from CIM internal address space
+ * of a control register group on specific core.
* @adap: the adapter
+ * @group: the control register group to select for read
+ * @coreid: the uP coreid
* @addr: the start address within the CIM address space
* @n: number of words to read
* @valp: where to store the result
*
- * Reads a block of 4-byte words from the CIM intenal address space.
+ * Reads a block of 4-byte words from the CIM intenal address space
+ * of a control register @group on a specific @coreid.
*/
-int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp)
+int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ unsigned int *valp)
{
+ unsigned int hostbusy, v = 0;
int ret = 0;
- if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ if (chip_id(adap) > CHELSIO_T6) {
+ hostbusy = F_T7_HOSTBUSY;
+ v = V_HOSTGRPSEL(group) | V_HOSTCORESEL(coreid);
+ } else {
+ hostbusy = F_HOSTBUSY;
+ }
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
- t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
- ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
0, 5, 2);
if (!ret)
*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
}
+
return ret;
}
/**
- * t4_cim_write - write a block into CIM internal address space
+ * t4_cim_write_core - write a block into CIM internal address space
+ * of a control register group on specific core.
* @adap: the adapter
+ * @group: the control register group to select for write
+ * @coreid: the uP coreid
* @addr: the start address within the CIM address space
* @n: number of words to write
* @valp: set of values to write
*
- * Writes a block of 4-byte words into the CIM intenal address space.
+ * Writes a block of 4-byte words into the CIM intenal address space
+ * of a control register @group on a specific @coreid.
*/
-int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
- const unsigned int *valp)
+int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ const unsigned int *valp)
{
+ unsigned int hostbusy, v;
int ret = 0;
- if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ if (chip_id(adap) > CHELSIO_T6) {
+ hostbusy = F_T7_HOSTBUSY;
+ v = F_T7_HOSTWRITE | V_HOSTGRPSEL(group) |
+ V_HOSTCORESEL(coreid);
+ } else {
+ hostbusy = F_HOSTBUSY;
+ v = F_HOSTWRITE;
+ }
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
- t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
- ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
0, 5, 2);
}
- return ret;
-}
-static int t4_cim_write1(struct adapter *adap, unsigned int addr,
- unsigned int val)
-{
- return t4_cim_write(adap, addr, 1, &val);
-}
-
-/**
- * t4_cim_ctl_read - read a block from CIM control region
- * @adap: the adapter
- * @addr: the start address within the CIM control region
- * @n: number of words to read
- * @valp: where to store the result
- *
- * Reads a block of 4-byte words from the CIM control region.
- */
-int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp)
-{
- return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
+ return ret;
}
/**
- * t4_cim_read_la - read CIM LA capture buffer
+ * t4_cim_read_la_core - read CIM LA capture buffer on specific core
* @adap: the adapter
+ * @coreid: uP coreid
* @la_buf: where to store the LA data
* @wrptr: the HW write pointer within the capture buffer
*
- * Reads the contents of the CIM LA buffer with the most recent entry at
- * the end of the returned data and with the entry at @wrptr first.
- * We try to leave the LA in the running state we find it in.
+ * Reads the contents of the CIM LA buffer on a specific @coreid
+ * with the most recent entry at the end of the returned data
+ * and with the entry at @wrptr first. We try to leave the LA
+ * in the running state we find it in.
*/
-int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
+ u32 *wrptr)
{
- int i, ret;
unsigned int cfg, val, idx;
+ int i, ret;
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &cfg);
if (ret)
return ret;
if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
- ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
+ val = 0;
+ ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
return ret;
}
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &val);
if (ret)
goto restart;
@@ -10085,25 +11249,28 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
*wrptr = idx;
for (i = 0; i < adap->params.cim_la_size; i++) {
- ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
- V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
+ val = V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN;
+ ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
break;
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
break;
if (val & F_UPDBGLARDEN) {
ret = -ETIMEDOUT;
break;
}
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_DATA, 1,
+ &la_buf[i]);
if (ret)
break;
/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
* identify the 32-bit portion of the full 312-bit data
*/
- if (is_t6(adap) && (idx & 0xf) >= 9)
+ if ((chip_id(adap) > CHELSIO_T5) && (idx & 0xf) >= 9)
idx = (idx & 0xff0) + 0x10;
else
idx++;
@@ -10112,11 +11279,15 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
}
restart:
if (cfg & F_UPDBGLAEN) {
- int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
- cfg & ~F_UPDBGLARDEN);
+ int r;
+
+ val = cfg & ~F_UPDBGLARDEN;
+ r = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (!ret)
ret = r;
}
+
return ret;
}
@@ -10403,25 +11574,20 @@ void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbp
int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
{
int ret, i, n, cfg_addr;
- unsigned int addr;
+ unsigned int addr, len;
unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- cfg_addr = t4_flash_cfg_addr(adap);
+ cfg_addr = t4_flash_cfg_addr(adap, &len);
if (cfg_addr < 0)
return cfg_addr;
- addr = cfg_addr;
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_CFG_MAX_SIZE) {
- CH_ERR(adap, "cfg file too large, max is %u bytes\n",
- FLASH_CFG_MAX_SIZE);
+ if (size > len) {
+ CH_ERR(adap, "cfg file too large, max is %u bytes\n", len);
return -EFBIG;
}
- i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
- sf_sec_size);
+ flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
+ i = DIV_ROUND_UP(len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
flash_cfg_start_sec + i - 1);
/*
@@ -10432,15 +11598,12 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
goto out;
/* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i< size; i+= SF_PAGE_SIZE) {
- if ( (size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
+ addr = cfg_addr;
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ n = min(size - i, SF_PAGE_SIZE);
ret = t4_write_flash(adap, addr, n, cfg_data, 1);
if (ret)
goto out;
-
addr += SF_PAGE_SIZE;
cfg_data += SF_PAGE_SIZE;
}
@@ -10644,25 +11807,25 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
pcir_data_t *pcir_header;
int ret, addr;
uint16_t device_id;
- unsigned int i;
- unsigned int boot_sector = (boot_addr * 1024 );
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+ unsigned int i, start, len;
+ unsigned int boot_sector = boot_addr * 1024;
/*
- * Make sure the boot image does not encroach on the firmware region
+ * Make sure the boot image does not exceed its available space.
*/
- if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
- CH_ERR(adap, "boot image encroaching on firmware region\n");
+ len = 0;
+ start = t4_flash_loc_start(adap, FLASH_LOC_BOOT_AREA, &len);
+ if (boot_sector + size > start + len) {
+ CH_ERR(adap, "boot data is larger than available BOOT area\n");
return -EFBIG;
}
/*
* The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
* and Boot configuration data sections. These 3 boot sections span
- * sectors 0 to 7 in flash and live right before the FW image location.
+ * the entire FLASH_LOC_BOOT_AREA.
*/
- i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
- sf_sec_size);
+ i = DIV_ROUND_UP(size ? size : len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
(boot_sector >> 16) + i - 1);
@@ -10765,40 +11928,39 @@ out:
* is stored, or an error if the device FLASH is too small to contain
* a OptionROM Configuration.
*/
-static int t4_flash_bootcfg_addr(struct adapter *adapter)
+static int t4_flash_bootcfg_addr(struct adapter *adapter, unsigned int *lenp)
{
+ unsigned int len = 0;
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_BOOTCFG, &len);
+
/*
* If the device FLASH isn't large enough to hold a Firmware
* Configuration File, return an error.
*/
- if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
+ if (adapter->params.sf_size < start + len)
return -ENOSPC;
-
- return FLASH_BOOTCFG_START;
+ if (lenp != NULL)
+ *lenp = len;
+ return (start);
}
int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
{
int ret, i, n, cfg_addr;
- unsigned int addr;
+ unsigned int addr, len;
unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- cfg_addr = t4_flash_bootcfg_addr(adap);
+ cfg_addr = t4_flash_bootcfg_addr(adap, &len);
if (cfg_addr < 0)
return cfg_addr;
- addr = cfg_addr;
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_BOOTCFG_MAX_SIZE) {
- CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
- FLASH_BOOTCFG_MAX_SIZE);
+ if (size > len) {
+ CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", len);
return -EFBIG;
}
- i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
- sf_sec_size);
+ flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
+ i = DIV_ROUND_UP(len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
flash_cfg_start_sec + i - 1);
@@ -10810,15 +11972,12 @@ int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
goto out;
/* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i< size; i+= SF_PAGE_SIZE) {
- if ( (size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
+ addr = cfg_addr;
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ n = min(size - i, SF_PAGE_SIZE);
ret = t4_write_flash(adap, addr, n, cfg_data, 0);
if (ret)
goto out;
-
addr += SF_PAGE_SIZE;
cfg_data += SF_PAGE_SIZE;
}
@@ -10844,19 +12003,20 @@ out:
*/
int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
{
- static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
int i, nbits, rc;
uint32_t param, val;
uint16_t fmode, fmask;
const int maxbits = adap->chip_params->filter_opt_len;
+ const int nopt = adap->chip_params->filter_num_opt;
+ int width;
if (mode != -1 || mask != -1) {
if (mode != -1) {
fmode = mode;
nbits = 0;
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
+ for (i = 0; i < nopt; i++) {
if (fmode & (1 << i))
- nbits += width[i];
+ nbits += t4_filter_field_width(adap, i);
}
if (nbits > maxbits) {
CH_ERR(adap, "optional fields in the filter "
@@ -10867,17 +12027,20 @@ int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
}
/*
- * Hardware wants the bits to be maxed out. Keep
+ * Hardware < T7 wants the bits to be maxed out. Keep
* setting them until there's no room for more.
*/
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
- if (fmode & (1 << i))
- continue;
- if (nbits + width[i] <= maxbits) {
- fmode |= 1 << i;
- nbits += width[i];
- if (nbits == maxbits)
- break;
+ if (chip_id(adap) < CHELSIO_T7) {
+ for (i = 0; i < nopt; i++) {
+ if (fmode & (1 << i))
+ continue;
+ width = t4_filter_field_width(adap, i);
+ if (nbits + width <= maxbits) {
+ fmode |= 1 << i;
+ nbits += width;
+ if (nbits == maxbits)
+ break;
+ }
}
}
@@ -10936,21 +12099,26 @@ int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
*/
void t4_clr_port_stats(struct adapter *adap, int idx)
{
- unsigned int i;
- u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
- u32 port_base_addr;
+ struct port_info *pi;
+ int i, port_id, tx_chan;
+ u32 bgmap, port_base_addr;
- if (is_t4(adap))
- port_base_addr = PORT_BASE(idx);
- else
- port_base_addr = T5_PORT_BASE(idx);
-
- for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
- t4_write_reg(adap, port_base_addr + i, 0);
- for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
- t4_write_reg(adap, port_base_addr + i, 0);
+ port_id = adap->port_map[idx];
+ MPASS(port_id >= 0 && port_id <= adap->params.nports);
+ pi = adap->port[port_id];
+
+ for (tx_chan = pi->tx_chan;
+ tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
+ port_base_addr = t4_port_reg(adap, tx_chan, 0);
+
+ for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ }
+ bgmap = pi->mps_bg_map;
for (i = 0; i < 4; i++)
if (bgmap & (1 << i)) {
t4_write_reg(adap,
@@ -11078,6 +12246,8 @@ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
+ if (chip_id(adap) > CHELSIO_T6)
+ data[6] = be32_to_cpu(c.u.idctxt.ctxt_data6);
}
return ret;
}
@@ -11099,9 +12269,12 @@ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type cty
t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
- if (!ret)
+ if (!ret) {
for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
*data++ = t4_read_reg(adap, i);
+ if (chip_id(adap) > CHELSIO_T6)
+ *data++ = t4_read_reg(adap, i);
+ }
return ret;
}
diff --git a/sys/dev/cxgbe/common/t4_hw.h b/sys/dev/cxgbe/common/t4_hw.h
index 79ec690cd5e6..09bd9ac9e637 100644
--- a/sys/dev/cxgbe/common/t4_hw.h
+++ b/sys/dev/cxgbe/common/t4_hw.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,30 +41,36 @@ enum {
EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
T6_RSS_NENTRIES = 4096,
+ T7_RSS_NENTRIES = 16384,
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
NTX_SCHED = 8, /* # of HW Tx scheduling queues */
PM_NSTATS = 5, /* # of PM stats */
- T6_PM_NSTATS = 7,
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
MAX_PM_NSTATS = 7,
+ T7_PM_RX_CACHE_NSTATS = 27, /* # of PM Rx Cache stats in T7 */
MBOX_LEN = 64, /* mailbox size in bytes */
NTRACE = 4, /* # of tracing filters */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width of optional components */
T5_FILTER_OPT_LEN = 40,
+ T7_FILTER_OPT_LEN = 63,
NWOL_PAT = 8, /* # of WoL patterns */
WOL_PAT_LEN = 128, /* length of WoL patterns */
UDBS_SEG_SIZE = 128, /* Segment size of BAR2 doorbells */
UDBS_SEG_SHIFT = 7, /* log2(UDBS_SEG_SIZE) */
UDBS_DB_OFFSET = 8, /* offset of the 4B doorbell in a segment */
UDBS_WR_OFFSET = 64, /* offset of the work request in a segment */
+ MAX_UP_CORES = 8, /* Max # of uP cores that can be enabled */
};
enum {
CIM_NUM_IBQ = 6, /* # of CIM IBQs */
+ CIM_NUM_IBQ_T7 = 16, /* # of CIM IBQs for T7 */
CIM_NUM_OBQ = 6, /* # of CIM OBQs */
CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
+ CIM_NUM_OBQ_T7 = 16, /* # of CIM OBQs for T7 adapter */
CIMLA_SIZE = 256 * 8, /* 256 rows * ceil(235/32) 32-bit words */
CIMLA_SIZE_T6 = 256 * 10, /* 256 rows * ceil(311/32) 32-bit words */
CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */
@@ -91,6 +96,7 @@ enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_CTXT_SIZE = 24, /* size of SGE context */
+ SGE_CTXT_SIZE_T7 = 28, /* size of SGE context for T7 */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */
@@ -161,6 +167,18 @@ struct rsp_ctrl {
#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX)
#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX)
+#define S_ARM_QTYPE 11
+#define M_ARM_QTYPE 1
+#define V_ARM_QTYPE(x) ((x) << S_ARM_QTYPE)
+
+#define S_ARM_PIDX 0
+#define M_ARM_PIDX 0x7ffU
+#define V_ARM_PIDX(x) ((x) << S_ARM_PIDX)
+
+#define S_ARM_CIDXINC 0
+#define M_ARM_CIDXINC 0x7ffU
+#define V_ARM_CIDXINC(x) ((x) << S_ARM_CIDXINC)
+
/* # of pages a pagepod can hold without needing another pagepod */
#define PPOD_PAGES 4U
@@ -206,95 +224,116 @@ struct pagepod {
*/
#define FLASH_START(start) ((start) * SF_SEC_SIZE)
#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+#define FLASH_MIN_SIZE FLASH_START(32)
-enum {
+enum t4_flash_loc {
/*
* Various Expansion-ROM boot images, etc.
*/
- FLASH_EXP_ROM_START_SEC = 0,
- FLASH_EXP_ROM_NSECS = 6,
- FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
- FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+ FLASH_LOC_EXP_ROM = 0,
/*
* iSCSI Boot Firmware Table (iBFT) and other driver-related
* parameters ...
*/
- FLASH_IBFT_START_SEC = 6,
- FLASH_IBFT_NSECS = 1,
- FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
- FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
+ FLASH_LOC_IBFT,
/*
* Boot configuration data.
*/
- FLASH_BOOTCFG_START_SEC = 7,
- FLASH_BOOTCFG_NSECS = 1,
- FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
- FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
+ FLASH_LOC_BOOTCFG,
/*
* Location of firmware image in FLASH.
*/
- FLASH_FW_START_SEC = 8,
- FLASH_FW_NSECS = 16,
- FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
- FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+ FLASH_LOC_FW,
/*
* Location of bootstrap firmware image in FLASH.
*/
- FLASH_FWBOOTSTRAP_START_SEC = 27,
- FLASH_FWBOOTSTRAP_NSECS = 1,
- FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
- FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+ FLASH_LOC_FWBOOTSTRAP,
/*
* iSCSI persistent/crash information.
*/
- FLASH_ISCSI_CRASH_START_SEC = 29,
- FLASH_ISCSI_CRASH_NSECS = 1,
- FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
- FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
+ FLASH_LOC_ISCSI_CRASH,
/*
* FCoE persistent/crash information.
*/
- FLASH_FCOE_CRASH_START_SEC = 30,
- FLASH_FCOE_CRASH_NSECS = 1,
- FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
- FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
+ FLASH_LOC_FCOE_CRASH,
/*
* Location of Firmware Configuration File in FLASH.
*/
- FLASH_CFG_START_SEC = 31,
- FLASH_CFG_NSECS = 1,
- FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
- FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+ FLASH_LOC_CFG,
+
+ /*
+ * CUDBG chip dump.
+ */
+ FLASH_LOC_CUDBG,
+
+ /*
+ * FW chip dump.
+ */
+ FLASH_LOC_CHIP_DUMP,
+
+ /*
+ * DPU boot information store.
+ */
+ FLASH_LOC_DPU_BOOT,
+
+ /*
+ * DPU peristent information store.
+ */
+ FLASH_LOC_DPU_AREA,
/*
- * We don't support FLASH devices which can't support the full
- * standard set of sections which we need for normal operations.
+ * VPD location.
*/
- FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
+ FLASH_LOC_VPD,
/*
- * Sectors 32-63 for CUDBG.
+ * Backup init/vpd.
*/
- FLASH_CUDBG_START_SEC = 32,
- FLASH_CUDBG_NSECS = 32,
- FLASH_CUDBG_START = FLASH_START(FLASH_CUDBG_START_SEC),
- FLASH_CUDBG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CUDBG_NSECS),
+ FLASH_LOC_VPD_BACKUP,
/*
- * Size of defined FLASH regions.
+ * Backup firmware image.
*/
- FLASH_END_SEC = 64,
+ FLASH_LOC_FW_BACKUP,
+
+ /*
+ * Backup bootstrap firmware image.
+ */
+ FLASH_LOC_FWBOOTSTRAP_BACKUP,
+
+ /*
+ * Backup Location of Firmware Configuration File in FLASH.
+ */
+ FLASH_LOC_CFG_BACK,
+
+ /*
+ * Helper to retrieve info that spans the entire Boot related area.
+ */
+ FLASH_LOC_BOOT_AREA,
+
+ /*
+ * Helper to determine minimum standard set of sections needed for
+ * normal operations.
+ */
+ FLASH_LOC_MIN_SIZE,
+
+ /*
+ * End of FLASH regions.
+ */
+ FLASH_LOC_END
};
-#undef FLASH_START
-#undef FLASH_MAX_SIZE
+struct t4_flash_loc_entry {
+ u16 start_sec;
+ u16 nsecs;
+};
#define S_SGE_TIMESTAMP 0
#define M_SGE_TIMESTAMP 0xfffffffffffffffULL
diff --git a/sys/dev/cxgbe/common/t4_msg.h b/sys/dev/cxgbe/common/t4_msg.h
index d356d0d99f36..0d12ccf2e910 100644
--- a/sys/dev/cxgbe/common/t4_msg.h
+++ b/sys/dev/cxgbe/common/t4_msg.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +29,7 @@
#ifndef T4_MSG_H
#define T4_MSG_H
-enum {
+enum cpl_opcodes {
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_ACCEPT_RPL = 0x2,
CPL_ACT_OPEN_REQ = 0x3,
@@ -68,13 +67,16 @@ enum {
CPL_PEER_CLOSE = 0x26,
CPL_RTE_DELETE_RPL = 0x27,
CPL_RTE_WRITE_RPL = 0x28,
+ CPL_ROCE_FW_NOTIFY = 0x28,
CPL_RX_URG_PKT = 0x29,
CPL_TAG_WRITE_RPL = 0x2A,
+ CPL_RDMA_ASYNC_EVENT = 0x2A,
CPL_ABORT_REQ_RSS = 0x2B,
CPL_RX_URG_NOTIFY = 0x2C,
CPL_ABORT_RPL_RSS = 0x2D,
CPL_SMT_WRITE_RPL = 0x2E,
CPL_TX_DATA_ACK = 0x2F,
+ CPL_RDMA_INV_REQ = 0x2F,
CPL_RX_PHYS_ADDR = 0x30,
CPL_PCMD_READ_RPL = 0x31,
@@ -107,19 +109,30 @@ enum {
CPL_RX_DATA_DIF = 0x4B,
CPL_ERR_NOTIFY = 0x4D,
CPL_RX_TLS_CMP = 0x4E,
+ CPL_T6_TX_DATA_ACK = 0x4F,
CPL_RDMA_READ_REQ = 0x60,
CPL_RX_ISCSI_DIF = 0x60,
+ CPL_RDMA_CQE_EXT = 0x61,
+ CPL_RDMA_CQE_FW_EXT = 0x62,
+ CPL_RDMA_CQE_ERR_EXT = 0x63,
+ CPL_TX_DATA_ACK_XT = 0x64,
+ CPL_ROCE_CQE = 0x68,
+ CPL_ROCE_CQE_FW = 0x69,
+ CPL_ROCE_CQE_ERR = 0x6A,
+
+ CPL_SACK_REQ = 0x70,
CPL_SET_LE_REQ = 0x80,
CPL_PASS_OPEN_REQ6 = 0x81,
CPL_ACT_OPEN_REQ6 = 0x83,
CPL_TX_TLS_PDU = 0x88,
CPL_TX_TLS_SFO = 0x89,
-
CPL_TX_SEC_PDU = 0x8A,
CPL_TX_TLS_ACK = 0x8B,
+ CPL_RCB_UPD = 0x8C,
+ CPL_SGE_FLR_FLUSH = 0xA0,
CPL_RDMA_TERMINATE = 0xA2,
CPL_RDMA_WRITE = 0xA4,
CPL_SGE_EGR_UPDATE = 0xA5,
@@ -138,15 +151,27 @@ enum {
CPL_TLS_DATA = 0xB1,
CPL_ISCSI_DATA = 0xB2,
CPL_FCOE_DATA = 0xB3,
+ CPL_NVMT_DATA = 0xB4,
+ CPL_NVMT_CMP = 0xB5,
+ CPL_NVMT_CMP_IMM = 0xB6,
+ CPL_NVMT_CMP_SRQ = 0xB7,
+ CPL_ROCE_ACK_NAK_REQ = 0xBC,
+ CPL_ROCE_ACK_NAK = 0xBD,
CPL_FW4_MSG = 0xC0,
CPL_FW4_PLD = 0xC1,
+ CPL_RDMA_CQE_SRQ = 0xC2,
+ CPL_ACCELERATOR_ACK = 0xC4,
CPL_FW4_ACK = 0xC3,
+ CPL_RX_PKT_IPSEC = 0xC6,
CPL_SRQ_TABLE_RPL = 0xCC,
+ CPL_TX_DATA_REQ = 0xCF,
+
CPL_RX_PHYS_DSGL = 0xD0,
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_ACCELERATOR_HDR = 0xE8,
CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -233,6 +258,8 @@ enum {
ULP_MODE_TCPDDP = 5,
ULP_MODE_FCOE = 6,
ULP_MODE_TLS = 8,
+ ULP_MODE_RDMA_V2 = 10,
+ ULP_MODE_NVMET = 11,
};
enum {
@@ -325,9 +352,14 @@ union opcode_tid {
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
/* extract the TID from a CPL command */
-#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+#define GET_TID(cmd) (G_TID(be32toh(OPCODE_TID(cmd))))
#define GET_OPCODE(cmd) ((cmd)->ot.opcode)
+
+/*
+ * Note that this driver splits the 14b opaque atid into an 11b atid and a 3b
+ * cookie that is used to demux replies for shared CPLs.
+ */
/* partitioning of TID fields that also carry a queue id */
#define S_TID_TID 0
#define M_TID_TID 0x7ff
@@ -717,7 +749,7 @@ struct cpl_pass_establish {
struct cpl_pass_accept_req {
RSS_HDR
union opcode_tid ot;
- __be16 rsvd;
+ __be16 ipsecen_outiphdrlen;
__be16 len;
__be32 hdr_len;
__be16 vlan;
@@ -775,6 +807,155 @@ struct cpl_pass_accept_req {
#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+struct cpl_t7_pass_accept_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 ipsecen_to_outiphdrlen;
+ __be16 length;
+ __be32 ethhdrlen_to_rxchannel;
+ __be16 vlantag;
+ __be16 interface_to_mac_ix;
+ __be32 tos_ptid;
+ __be16 tcpmss;
+ __u8 tcpwsc;
+ __u8 tcptmstp_to_tcpunkn;
+};
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN 12
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPSECEN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPSECEN)
+#define F_CPL_PASS_T7_ACCEPT_REQ_IPSECEN \
+ V_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE 10
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE 0x3
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN 0x3ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN 24
+#define M_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN 0xff
+#define V_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN 14
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN 0x3ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN 8
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN 0x3f
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL 0xf
+#define V_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL)
+#define G_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE 12
+#define M_CPL_T7_PASS_ACCEPT_REQ_INTERFACE 0xf
+#define V_CPL_T7_PASS_ACCEPT_REQ_INTERFACE(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE)
+#define G_CPL_T7_PASS_ACCEPT_REQ_INTERFACE(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_INTERFACE)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH 9
+#define M_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH)
+#define G_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH)
+#define F_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH \
+ V_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_MAC_IX 0x1ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_MAC_IX(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX)
+#define G_CPL_T7_PASS_ACCEPT_REQ_MAC_IX(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX) & M_CPL_T7_PASS_ACCEPT_REQ_MAC_IX)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TOS 24
+#define M_CPL_T7_PASS_ACCEPT_REQ_TOS 0xff
+#define V_CPL_T7_PASS_ACCEPT_REQ_TOS(x) ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TOS)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TOS(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TOS) & M_CPL_T7_PASS_ACCEPT_REQ_TOS)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_PTID 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_PTID 0xffffff
+#define V_CPL_T7_PASS_ACCEPT_REQ_PTID(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_PTID)
+#define G_CPL_T7_PASS_ACCEPT_REQ_PTID(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_PTID) & M_CPL_T7_PASS_ACCEPT_REQ_PTID)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP 7
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK 6
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPSACK 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPSACK)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPSACK \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPECN 5
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPECN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPECN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPECN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPECN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPECN) & M_CPL_T7_PASS_ACCEPT_REQ_TCPECN)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPECN \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPECN(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN 4
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(1U)
+
struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
@@ -810,6 +991,7 @@ struct cpl_act_open_req {
#define M_FILTER_TUPLE 0xFFFFFFFFFF
#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
+
struct cpl_t5_act_open_req {
WR_HDR;
union opcode_tid ot;
@@ -843,6 +1025,26 @@ struct cpl_t6_act_open_req {
#define V_AOPEN_FCOEMASK(x) ((x) << S_AOPEN_FCOEMASK)
#define F_AOPEN_FCOEMASK V_AOPEN_FCOEMASK(1U)
+struct cpl_t7_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 iss;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+#define S_T7_FILTER_TUPLE 1
+#define M_T7_FILTER_TUPLE 0x7FFFFFFFFFFFFFFFULL
+#define V_T7_FILTER_TUPLE(x) ((x) << S_T7_FILTER_TUPLE)
+#define G_T7_FILTER_TUPLE(x) (((x) >> S_T7_FILTER_TUPLE) & M_T7_FILTER_TUPLE)
+
struct cpl_act_open_req6 {
WR_HDR;
union opcode_tid ot;
@@ -889,6 +1091,23 @@ struct cpl_t6_act_open_req6 {
__be32 opt3;
};
+struct cpl_t7_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 iss;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
struct cpl_act_open_rpl {
RSS_HDR
union opcode_tid ot;
@@ -921,8 +1140,7 @@ struct cpl_get_tcb {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
- __u8 rsvd;
- __u8 cookie;
+ __be16 cookie;
};
/* cpl_get_tcb.reply_ctrl fields */
@@ -931,10 +1149,20 @@ struct cpl_get_tcb {
#define V_QUEUENO(x) ((x) << S_QUEUENO)
#define G_QUEUENO(x) (((x) >> S_QUEUENO) & M_QUEUENO)
+#define S_T7_QUEUENO 0
+#define M_T7_QUEUENO 0xFFF
+#define V_T7_QUEUENO(x) ((x) << S_T7_QUEUENO)
+#define G_T7_QUEUENO(x) (((x) >> S_T7_QUEUENO) & M_T7_QUEUENO)
+
#define S_REPLY_CHAN 14
#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
#define F_REPLY_CHAN V_REPLY_CHAN(1U)
+#define S_T7_REPLY_CHAN 12
+#define M_T7_REPLY_CHAN 0x7
+#define V_T7_REPLY_CHAN(x) ((x) << S_T7_REPLY_CHAN)
+#define G_T7_REPLY_CHAN(x) (((x) >> S_T7_REPLY_CHAN) & M_T7_REPLY_CHAN)
+
#define S_NO_REPLY 15
#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
#define F_NO_REPLY V_NO_REPLY(1U)
@@ -1018,6 +1246,40 @@ struct cpl_close_listsvr_req {
#define V_LISTSVR_IPV6(x) ((x) << S_LISTSVR_IPV6)
#define F_LISTSVR_IPV6 V_LISTSVR_IPV6(1U)
+struct cpl_t7_close_listsvr_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 noreply_to_queue;
+ __be16 r2;
+};
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY 15
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY 0x1
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY) & \
+ M_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY)
+#define F_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY \
+ V_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 14
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 0x1
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6) & M_CPL_T7_CLOSE_LISTSVR_REQ_IPV6)
+#define F_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 \
+ V_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(1U)
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE 0
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE 0xfff
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE) & \
+ M_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE)
+
struct cpl_close_listsvr_rpl {
RSS_HDR
union opcode_tid ot;
@@ -1250,6 +1512,71 @@ struct cpl_tx_data_ack {
__be32 snd_una;
};
+struct cpl_tx_data_ack_xt {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+ __be32 extinfoh[2];
+ __be32 extinfol[2];
+};
+
+struct cpl_tx_data_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+};
+
+#define S_CPL_TX_DATA_REQ_TID 0
+#define M_CPL_TX_DATA_REQ_TID 0xffffff
+#define V_CPL_TX_DATA_REQ_TID(x) ((x) << S_CPL_TX_DATA_REQ_TID)
+#define G_CPL_TX_DATA_REQ_TID(x) \
+ (((x) >> S_CPL_TX_DATA_REQ_TID) & M_CPL_TX_DATA_REQ_TID)
+
+struct cpl_sack_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+ __be32 block1[2];
+ __be32 block2[2];
+ __be32 block3[2];
+};
+
+struct cpl_sge_flr_flush {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 cookievalue_cookiesel;
+};
+
+#define S_CPL_SGE_FLR_FLUSH_COOKIEVALUE 4
+#define M_CPL_SGE_FLR_FLUSH_COOKIEVALUE 0x3ff
+#define V_CPL_SGE_FLR_FLUSH_COOKIEVALUE(x) \
+ ((x) << S_CPL_SGE_FLR_FLUSH_COOKIEVALUE)
+#define G_CPL_SGE_FLR_FLUSH_COOKIEVALUE(x) \
+ (((x) >> S_CPL_SGE_FLR_FLUSH_COOKIEVALUE) & \
+ M_CPL_SGE_FLR_FLUSH_COOKIEVALUE)
+
+#define S_CPL_SGE_FLR_FLUSH_COOKIESEL 0
+#define M_CPL_SGE_FLR_FLUSH_COOKIESEL 0xf
+#define V_CPL_SGE_FLR_FLUSH_COOKIESEL(x) \
+ ((x) << S_CPL_SGE_FLR_FLUSH_COOKIESEL)
+#define G_CPL_SGE_FLR_FLUSH_COOKIESEL(x) \
+ (((x) >> S_CPL_SGE_FLR_FLUSH_COOKIESEL) & M_CPL_SGE_FLR_FLUSH_COOKIESEL)
+
struct cpl_wr_ack { /* XXX */
RSS_HDR
union opcode_tid ot;
@@ -1271,8 +1598,6 @@ struct cpl_tx_pkt {
struct cpl_tx_pkt_core c;
};
-#define cpl_tx_pkt_xt cpl_tx_pkt
-
/* cpl_tx_pkt_core.ctrl0 fields */
#define S_TXPKT_VF 0
#define M_TXPKT_VF 0xFF
@@ -1404,6 +1729,261 @@ struct cpl_tx_pkt {
#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS)
#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL)
+struct cpl_tx_pkt_xt {
+ WR_HDR;
+ __be32 ctrl0;
+ __be16 pack;
+ __be16 len;
+ __be32 ctrl1;
+ __be32 ctrl2;
+};
+
+/* cpl_tx_pkt_xt.core.ctrl0 fields */
+#define S_CPL_TX_PKT_XT_OPCODE 24
+#define M_CPL_TX_PKT_XT_OPCODE 0xff
+#define V_CPL_TX_PKT_XT_OPCODE(x) ((x) << S_CPL_TX_PKT_XT_OPCODE)
+#define G_CPL_TX_PKT_XT_OPCODE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OPCODE) & M_CPL_TX_PKT_XT_OPCODE)
+
+#define S_CPL_TX_PKT_XT_TIMESTAMP 23
+#define M_CPL_TX_PKT_XT_TIMESTAMP 0x1
+#define V_CPL_TX_PKT_XT_TIMESTAMP(x) ((x) << S_CPL_TX_PKT_XT_TIMESTAMP)
+#define G_CPL_TX_PKT_XT_TIMESTAMP(x) \
+ (((x) >> S_CPL_TX_PKT_XT_TIMESTAMP) & M_CPL_TX_PKT_XT_TIMESTAMP)
+#define F_CPL_TX_PKT_XT_TIMESTAMP V_CPL_TX_PKT_XT_TIMESTAMP(1U)
+
+#define S_CPL_TX_PKT_XT_STATDISABLE 22
+#define M_CPL_TX_PKT_XT_STATDISABLE 0x1
+#define V_CPL_TX_PKT_XT_STATDISABLE(x) ((x) << S_CPL_TX_PKT_XT_STATDISABLE)
+#define G_CPL_TX_PKT_XT_STATDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_STATDISABLE) & M_CPL_TX_PKT_XT_STATDISABLE)
+#define F_CPL_TX_PKT_XT_STATDISABLE V_CPL_TX_PKT_XT_STATDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_FCSDIS 21
+#define M_CPL_TX_PKT_XT_FCSDIS 0x1
+#define V_CPL_TX_PKT_XT_FCSDIS(x) ((x) << S_CPL_TX_PKT_XT_FCSDIS)
+#define G_CPL_TX_PKT_XT_FCSDIS(x) \
+ (((x) >> S_CPL_TX_PKT_XT_FCSDIS) & M_CPL_TX_PKT_XT_FCSDIS)
+#define F_CPL_TX_PKT_XT_FCSDIS V_CPL_TX_PKT_XT_FCSDIS(1U)
+
+#define S_CPL_TX_PKT_XT_STATSPECIAL 20
+#define M_CPL_TX_PKT_XT_STATSPECIAL 0x1
+#define V_CPL_TX_PKT_XT_STATSPECIAL(x) ((x) << S_CPL_TX_PKT_XT_STATSPECIAL)
+#define G_CPL_TX_PKT_XT_STATSPECIAL(x) \
+ (((x) >> S_CPL_TX_PKT_XT_STATSPECIAL) & M_CPL_TX_PKT_XT_STATSPECIAL)
+#define F_CPL_TX_PKT_XT_STATSPECIAL V_CPL_TX_PKT_XT_STATSPECIAL(1U)
+
+#define S_CPL_TX_PKT_XT_INTERFACE 16
+#define M_CPL_TX_PKT_XT_INTERFACE 0xf
+#define V_CPL_TX_PKT_XT_INTERFACE(x) ((x) << S_CPL_TX_PKT_XT_INTERFACE)
+#define G_CPL_TX_PKT_XT_INTERFACE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_INTERFACE) & M_CPL_TX_PKT_XT_INTERFACE)
+
+#define S_CPL_TX_PKT_XT_OVLAN 15
+#define M_CPL_TX_PKT_XT_OVLAN 0x1
+#define V_CPL_TX_PKT_XT_OVLAN(x) ((x) << S_CPL_TX_PKT_XT_OVLAN)
+#define G_CPL_TX_PKT_XT_OVLAN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OVLAN) & M_CPL_TX_PKT_XT_OVLAN)
+#define F_CPL_TX_PKT_XT_OVLAN V_CPL_TX_PKT_XT_OVLAN(1U)
+
+#define S_CPL_TX_PKT_XT_OVLANIDX 12
+#define M_CPL_TX_PKT_XT_OVLANIDX 0x7
+#define V_CPL_TX_PKT_XT_OVLANIDX(x) ((x) << S_CPL_TX_PKT_XT_OVLANIDX)
+#define G_CPL_TX_PKT_XT_OVLANIDX(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OVLANIDX) & M_CPL_TX_PKT_XT_OVLANIDX)
+
+#define S_CPL_TX_PKT_XT_VFVALID 11
+#define M_CPL_TX_PKT_XT_VFVALID 0x1
+#define V_CPL_TX_PKT_XT_VFVALID(x) ((x) << S_CPL_TX_PKT_XT_VFVALID)
+#define G_CPL_TX_PKT_XT_VFVALID(x) \
+ (((x) >> S_CPL_TX_PKT_XT_VFVALID) & M_CPL_TX_PKT_XT_VFVALID)
+#define F_CPL_TX_PKT_XT_VFVALID V_CPL_TX_PKT_XT_VFVALID(1U)
+
+#define S_CPL_TX_PKT_XT_PF 8
+#define M_CPL_TX_PKT_XT_PF 0x7
+#define V_CPL_TX_PKT_XT_PF(x) ((x) << S_CPL_TX_PKT_XT_PF)
+#define G_CPL_TX_PKT_XT_PF(x) \
+ (((x) >> S_CPL_TX_PKT_XT_PF) & M_CPL_TX_PKT_XT_PF)
+
+#define S_CPL_TX_PKT_XT_VF 0
+#define M_CPL_TX_PKT_XT_VF 0xff
+#define V_CPL_TX_PKT_XT_VF(x) ((x) << S_CPL_TX_PKT_XT_VF)
+#define G_CPL_TX_PKT_XT_VF(x) \
+ (((x) >> S_CPL_TX_PKT_XT_VF) & M_CPL_TX_PKT_XT_VF)
+
+/* cpl_tx_pkt_xt.core.ctrl1 fields */
+#define S_CPL_TX_PKT_XT_L4CHKDISABLE 31
+#define M_CPL_TX_PKT_XT_L4CHKDISABLE 0x1
+#define V_CPL_TX_PKT_XT_L4CHKDISABLE(x) ((x) << S_CPL_TX_PKT_XT_L4CHKDISABLE)
+#define G_CPL_TX_PKT_XT_L4CHKDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_L4CHKDISABLE) & M_CPL_TX_PKT_XT_L4CHKDISABLE)
+#define F_CPL_TX_PKT_XT_L4CHKDISABLE V_CPL_TX_PKT_XT_L4CHKDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_L3CHKDISABLE 30
+#define M_CPL_TX_PKT_XT_L3CHKDISABLE 0x1
+#define V_CPL_TX_PKT_XT_L3CHKDISABLE(x) ((x) << S_CPL_TX_PKT_XT_L3CHKDISABLE)
+#define G_CPL_TX_PKT_XT_L3CHKDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_L3CHKDISABLE) & M_CPL_TX_PKT_XT_L3CHKDISABLE)
+#define F_CPL_TX_PKT_XT_L3CHKDISABLE V_CPL_TX_PKT_XT_L3CHKDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_OUTL4CHKEN 29
+#define M_CPL_TX_PKT_XT_OUTL4CHKEN 0x1
+#define V_CPL_TX_PKT_XT_OUTL4CHKEN(x) ((x) << S_CPL_TX_PKT_XT_OUTL4CHKEN)
+#define G_CPL_TX_PKT_XT_OUTL4CHKEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OUTL4CHKEN) & M_CPL_TX_PKT_XT_OUTL4CHKEN)
+#define F_CPL_TX_PKT_XT_OUTL4CHKEN V_CPL_TX_PKT_XT_OUTL4CHKEN(1U)
+
+#define S_CPL_TX_PKT_XT_IVLAN 28
+#define M_CPL_TX_PKT_XT_IVLAN 0x1
+#define V_CPL_TX_PKT_XT_IVLAN(x) ((x) << S_CPL_TX_PKT_XT_IVLAN)
+#define G_CPL_TX_PKT_XT_IVLAN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IVLAN) & M_CPL_TX_PKT_XT_IVLAN)
+#define F_CPL_TX_PKT_XT_IVLAN V_CPL_TX_PKT_XT_IVLAN(1U)
+
+#define S_CPL_TX_PKT_XT_IVLANTAG 12
+#define M_CPL_TX_PKT_XT_IVLANTAG 0xffff
+#define V_CPL_TX_PKT_XT_IVLANTAG(x) ((x) << S_CPL_TX_PKT_XT_IVLANTAG)
+#define G_CPL_TX_PKT_XT_IVLANTAG(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IVLANTAG) & M_CPL_TX_PKT_XT_IVLANTAG)
+
+#define S_CPL_TX_PKT_XT_CHKTYPE 8
+#define M_CPL_TX_PKT_XT_CHKTYPE 0xf
+#define V_CPL_TX_PKT_XT_CHKTYPE(x) ((x) << S_CPL_TX_PKT_XT_CHKTYPE)
+#define G_CPL_TX_PKT_XT_CHKTYPE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKTYPE) & M_CPL_TX_PKT_XT_CHKTYPE)
+
+#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI 0
+#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI 0xff
+#define V_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI)
+#define G_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI) & \
+ M_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI)
+
+#define S_CPL_TX_PKT_XT_ETHHDRLEN 0
+#define M_CPL_TX_PKT_XT_ETHHDRLEN 0xff
+#define V_CPL_TX_PKT_XT_ETHHDRLEN(x) ((x) << S_CPL_TX_PKT_XT_ETHHDRLEN)
+#define G_CPL_TX_PKT_XT_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ETHHDRLEN) & M_CPL_TX_PKT_XT_ETHHDRLEN)
+
+#define S_CPL_TX_PKT_XT_ROCECHKINSMODE 6
+#define M_CPL_TX_PKT_XT_ROCECHKINSMODE 0x3
+#define V_CPL_TX_PKT_XT_ROCECHKINSMODE(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCECHKINSMODE)
+#define G_CPL_TX_PKT_XT_ROCECHKINSMODE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCECHKINSMODE) & M_CPL_TX_PKT_XT_ROCECHKINSMODE)
+
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI 0
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI 0x3f
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
+
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 30
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 0x3
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+
+/* cpl_tx_pkt_xt.core.ctrl2 fields */
+#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 30
+#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 0x3
+#define V_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+#define G_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO) & \
+ M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+
+#define S_CPL_TX_PKT_XT_CHKSTARTOFFSET 20
+#define M_CPL_TX_PKT_XT_CHKSTARTOFFSET 0x3ff
+#define V_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKSTARTOFFSET)
+#define G_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKSTARTOFFSET) & M_CPL_TX_PKT_XT_CHKSTARTOFFSET)
+
+#define S_CPL_TX_PKT_XT_IPHDRLEN 20
+#define M_CPL_TX_PKT_XT_IPHDRLEN 0xfff
+#define V_CPL_TX_PKT_XT_IPHDRLEN(x) ((x) << S_CPL_TX_PKT_XT_IPHDRLEN)
+#define G_CPL_TX_PKT_XT_IPHDRLEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IPHDRLEN) & M_CPL_TX_PKT_XT_IPHDRLEN)
+
+#define S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET 20
+#define M_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET 0x3ff
+#define V_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET)
+#define G_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET) & \
+ M_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET)
+
+#define S_CPL_TX_PKT_XT_CHKSTOPOFFSET 12
+#define M_CPL_TX_PKT_XT_CHKSTOPOFFSET 0xff
+#define V_CPL_TX_PKT_XT_CHKSTOPOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKSTOPOFFSET)
+#define G_CPL_TX_PKT_XT_CHKSTOPOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKSTOPOFFSET) & M_CPL_TX_PKT_XT_CHKSTOPOFFSET)
+
+#define S_CPL_TX_PKT_XT_IPSECIDX 0
+#define M_CPL_TX_PKT_XT_IPSECIDX 0xfff
+#define V_CPL_TX_PKT_XT_IPSECIDX(x) ((x) << S_CPL_TX_PKT_XT_IPSECIDX)
+#define G_CPL_TX_PKT_XT_IPSECIDX(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IPSECIDX) & M_CPL_TX_PKT_XT_IPSECIDX)
+
+#define S_CPL_TX_TNL_LSO_BTH_OPCODE 24
+#define M_CPL_TX_TNL_LSO_BTH_OPCODE 0xff
+#define V_CPL_TX_TNL_LSO_BTH_OPCODE(x) ((x) << S_CPL_TX_TNL_LSO_BTH_OPCODE)
+#define G_CPL_TX_TNL_LSO_BTH_OPCODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_BTH_OPCODE) & \
+ M_CPL_TX_TNL_LSO_BTH_OPCODE)
+
+#define S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0
+#define M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0xffffff
+#define V_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+#define G_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN) & \
+ M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+
+#define S_CPL_TX_TNL_LSO_MSS_TVER 8
+#define M_CPL_TX_TNL_LSO_MSS_TVER 0xf
+#define V_CPL_TX_TNL_LSO_MSS_TVER(x) ((x) << S_CPL_TX_TNL_LSO_MSS_TVER)
+#define G_CPL_TX_TNL_LSO_MSS_TVER(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_TVER) & M_CPL_TX_TNL_LSO_MSS_TVER)
+
+#define S_CPL_TX_TNL_LSO_MSS_M 7
+#define M_CPL_TX_TNL_LSO_MSS_M 0x1
+#define V_CPL_TX_TNL_LSO_MSS_M(x) ((x) << S_CPL_TX_TNL_LSO_MSS_M)
+#define G_CPL_TX_TNL_LSO_MSS_M(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_M) & M_CPL_TX_TNL_LSO_MSS_M)
+
+#define S_CPL_TX_TNL_LSO_MSS_PMTU 4
+#define M_CPL_TX_TNL_LSO_MSS_PMTU 0x7
+#define V_CPL_TX_TNL_LSO_MSS_PMTU(x) ((x) << S_CPL_TX_TNL_LSO_MSS_PMTU)
+#define G_CPL_TX_TNL_LSO_MSS_PMTU(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_PMTU) & M_CPL_TX_TNL_LSO_MSS_PMTU)
+
+#define S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 3
+#define M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 0x1
+#define V_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ ((x) << S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+#define G_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR) & M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+
+#define S_CPL_TX_TNL_LSO_MSS_ACKREQ 1
+#define M_CPL_TX_TNL_LSO_MSS_ACKREQ 0x3
+#define V_CPL_TX_TNL_LSO_MSS_ACKREQ(x) ((x) << S_CPL_TX_TNL_LSO_MSS_ACKREQ)
+#define G_CPL_TX_TNL_LSO_MSS_ACKREQ(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_ACKREQ) & M_CPL_TX_TNL_LSO_MSS_ACKREQ)
+
+#define S_CPL_TX_TNL_LSO_MSS_SE 0
+#define M_CPL_TX_TNL_LSO_MSS_SE 0x1
+#define V_CPL_TX_TNL_LSO_MSS_SE(x) ((x) << S_CPL_TX_TNL_LSO_MSS_SE)
+#define G_CPL_TX_TNL_LSO_MSS_SE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_SE) & M_CPL_TX_TNL_LSO_MSS_SE)
+
struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
__be16 ipid_ofst;
@@ -1600,6 +2180,100 @@ struct cpl_tx_data_iso {
(((x) >> S_CPL_TX_DATA_ISO_SEGLEN_OFFSET) & \
M_CPL_TX_DATA_ISO_SEGLEN_OFFSET)
+struct cpl_t7_tx_data_iso {
+ __be32 op_to_scsi;
+ __u8 nvme_tcp_pkd;
+ __u8 ahs;
+ __be16 mpdu;
+ __be32 burst;
+ __be32 size;
+ __be32 num_pi_bytes_seglen_offset;
+ __be32 datasn_offset;
+ __be32 buffer_offset;
+ __be32 reserved3;
+};
+
+#define S_CPL_T7_TX_DATA_ISO_OPCODE 24
+#define M_CPL_T7_TX_DATA_ISO_OPCODE 0xff
+#define V_CPL_T7_TX_DATA_ISO_OPCODE(x) ((x) << S_CPL_T7_TX_DATA_ISO_OPCODE)
+#define G_CPL_T7_TX_DATA_ISO_OPCODE(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_OPCODE) & M_CPL_T7_TX_DATA_ISO_OPCODE)
+
+#define S_CPL_T7_TX_DATA_ISO_FIRST 23
+#define M_CPL_T7_TX_DATA_ISO_FIRST 0x1
+#define V_CPL_T7_TX_DATA_ISO_FIRST(x) ((x) << S_CPL_T7_TX_DATA_ISO_FIRST)
+#define G_CPL_T7_TX_DATA_ISO_FIRST(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_FIRST) & M_CPL_T7_TX_DATA_ISO_FIRST)
+#define F_CPL_T7_TX_DATA_ISO_FIRST V_CPL_T7_TX_DATA_ISO_FIRST(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_LAST 22
+#define M_CPL_T7_TX_DATA_ISO_LAST 0x1
+#define V_CPL_T7_TX_DATA_ISO_LAST(x) ((x) << S_CPL_T7_TX_DATA_ISO_LAST)
+#define G_CPL_T7_TX_DATA_ISO_LAST(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_LAST) & M_CPL_T7_TX_DATA_ISO_LAST)
+#define F_CPL_T7_TX_DATA_ISO_LAST V_CPL_T7_TX_DATA_ISO_LAST(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_CPLHDRLEN 21
+#define M_CPL_T7_TX_DATA_ISO_CPLHDRLEN 0x1
+#define V_CPL_T7_TX_DATA_ISO_CPLHDRLEN(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_CPLHDRLEN)
+#define G_CPL_T7_TX_DATA_ISO_CPLHDRLEN(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_CPLHDRLEN) & M_CPL_T7_TX_DATA_ISO_CPLHDRLEN)
+#define F_CPL_T7_TX_DATA_ISO_CPLHDRLEN V_CPL_T7_TX_DATA_ISO_CPLHDRLEN(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_HDRCRC 20
+#define M_CPL_T7_TX_DATA_ISO_HDRCRC 0x1
+#define V_CPL_T7_TX_DATA_ISO_HDRCRC(x) ((x) << S_CPL_T7_TX_DATA_ISO_HDRCRC)
+#define G_CPL_T7_TX_DATA_ISO_HDRCRC(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_HDRCRC) & M_CPL_T7_TX_DATA_ISO_HDRCRC)
+#define F_CPL_T7_TX_DATA_ISO_HDRCRC V_CPL_T7_TX_DATA_ISO_HDRCRC(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_PLDCRC 19
+#define M_CPL_T7_TX_DATA_ISO_PLDCRC 0x1
+#define V_CPL_T7_TX_DATA_ISO_PLDCRC(x) ((x) << S_CPL_T7_TX_DATA_ISO_PLDCRC)
+#define G_CPL_T7_TX_DATA_ISO_PLDCRC(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_PLDCRC) & M_CPL_T7_TX_DATA_ISO_PLDCRC)
+#define F_CPL_T7_TX_DATA_ISO_PLDCRC V_CPL_T7_TX_DATA_ISO_PLDCRC(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_IMMEDIATE 18
+#define M_CPL_T7_TX_DATA_ISO_IMMEDIATE 0x1
+#define V_CPL_T7_TX_DATA_ISO_IMMEDIATE(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_IMMEDIATE)
+#define G_CPL_T7_TX_DATA_ISO_IMMEDIATE(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_IMMEDIATE) & M_CPL_T7_TX_DATA_ISO_IMMEDIATE)
+#define F_CPL_T7_TX_DATA_ISO_IMMEDIATE \
+ V_CPL_T7_TX_DATA_ISO_IMMEDIATE(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_SCSI 16
+#define M_CPL_T7_TX_DATA_ISO_SCSI 0x3
+#define V_CPL_T7_TX_DATA_ISO_SCSI(x) ((x) << S_CPL_T7_TX_DATA_ISO_SCSI)
+#define G_CPL_T7_TX_DATA_ISO_SCSI(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_SCSI) & M_CPL_T7_TX_DATA_ISO_SCSI)
+
+#define S_CPL_T7_TX_DATA_ISO_NVME_TCP 0
+#define M_CPL_T7_TX_DATA_ISO_NVME_TCP 0x1
+#define V_CPL_T7_TX_DATA_ISO_NVME_TCP(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_NVME_TCP)
+#define G_CPL_T7_TX_DATA_ISO_NVME_TCP(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_NVME_TCP) & M_CPL_T7_TX_DATA_ISO_NVME_TCP)
+#define F_CPL_T7_TX_DATA_ISO_NVME_TCP \
+ V_CPL_T7_TX_DATA_ISO_NVME_TCP(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_NUMPIBYTES 24
+#define M_CPL_T7_TX_DATA_ISO_NUMPIBYTES 0xff
+#define V_CPL_T7_TX_DATA_ISO_NUMPIBYTES(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_NUMPIBYTES)
+#define G_CPL_T7_TX_DATA_ISO_NUMPIBYTES(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_NUMPIBYTES) & M_CPL_T7_TX_DATA_ISO_NUMPIBYTES)
+
+#define S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET 0
+#define M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET 0xffffff
+#define V_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+#define G_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET) & \
+ M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+
struct cpl_iscsi_hdr {
RSS_HDR
union opcode_tid ot;
@@ -2324,6 +2998,18 @@ struct cpl_l2t_write_req {
#define V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY)
#define F_L2T_W_NOREPLY V_L2T_W_NOREPLY(1U)
+
+/* cpl_l2t_write_req.vlan fields */
+#define S_L2T_VLANTAG 0
+#define M_L2T_VLANTAG 0xFFF
+#define V_L2T_VLANTAG(x) ((x) << S_L2T_VLANTAG)
+#define G_L2T_VLANTAG(x) (((x) >> S_L2T_VLANTAG) & M_L2T_VLANTAG)
+
+#define S_L2T_VLANPRIO 13
+#define M_L2T_VLANPRIO 0x7
+#define V_L2T_VLANPRIO(x) ((x) << S_L2T_VLANPRIO)
+#define G_L2T_VLANPRIO(x) (((x) >> S_L2T_VLANPRIO) & M_L2T_VLANPRIO)
+
#define CPL_L2T_VLAN_NONE 0xfff
struct cpl_l2t_write_rpl {
@@ -2400,6 +3086,175 @@ struct cpl_srq_table_rpl {
#define V_SRQT_IDX(x) ((x) << S_SRQT_IDX)
#define G_SRQT_IDX(x) (((x) >> S_SRQT_IDX) & M_SRQT_IDX)
+struct cpl_t7_srq_table_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_index;
+ __be16 srqlimit_pkd;
+ __be16 cqid;
+ __be16 xdid;
+ __be16 pdid;
+ __be32 quelen_quebase;
+ __be32 curmsn_maxmsn;
+};
+
+#define S_CPL_T7_SRQ_TABLE_REQ_NOREPLY 31
+#define M_CPL_T7_SRQ_TABLE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_NOREPLY)
+#define G_CPL_T7_SRQ_TABLE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_NOREPLY) & M_CPL_T7_SRQ_TABLE_REQ_NOREPLY)
+#define F_CPL_T7_SRQ_TABLE_REQ_NOREPLY \
+ V_CPL_T7_SRQ_TABLE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_WRITE 30
+#define M_CPL_T7_SRQ_TABLE_REQ_WRITE 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_WRITE(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_WRITE)
+#define G_CPL_T7_SRQ_TABLE_REQ_WRITE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_WRITE) & M_CPL_T7_SRQ_TABLE_REQ_WRITE)
+#define F_CPL_T7_SRQ_TABLE_REQ_WRITE V_CPL_T7_SRQ_TABLE_REQ_WRITE(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_INCR 28
+#define M_CPL_T7_SRQ_TABLE_REQ_INCR 0x3
+#define V_CPL_T7_SRQ_TABLE_REQ_INCR(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_INCR)
+#define G_CPL_T7_SRQ_TABLE_REQ_INCR(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_INCR) & M_CPL_T7_SRQ_TABLE_REQ_INCR)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_OVER 24
+#define M_CPL_T7_SRQ_TABLE_REQ_OVER 0xf
+#define V_CPL_T7_SRQ_TABLE_REQ_OVER(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_OVER)
+#define G_CPL_T7_SRQ_TABLE_REQ_OVER(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_OVER) & M_CPL_T7_SRQ_TABLE_REQ_OVER)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD 23
+#define M_CPL_T7_SRQ_TABLE_REQ_LIMITUPD 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD)
+#define G_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD) & M_CPL_T7_SRQ_TABLE_REQ_LIMITUPD)
+#define F_CPL_T7_SRQ_TABLE_REQ_LIMITUPD V_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_INDEX 0
+#define M_CPL_T7_SRQ_TABLE_REQ_INDEX 0x3ff
+#define V_CPL_T7_SRQ_TABLE_REQ_INDEX(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_INDEX)
+#define G_CPL_T7_SRQ_TABLE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_INDEX) & M_CPL_T7_SRQ_TABLE_REQ_INDEX)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT 0
+#define M_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT 0x3f
+#define V_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT)
+#define G_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT) & M_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_QUELEN 28
+#define M_CPL_T7_SRQ_TABLE_REQ_QUELEN 0xf
+#define V_CPL_T7_SRQ_TABLE_REQ_QUELEN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_QUELEN)
+#define G_CPL_T7_SRQ_TABLE_REQ_QUELEN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_QUELEN) & M_CPL_T7_SRQ_TABLE_REQ_QUELEN)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_QUEBASE 0
+#define M_CPL_T7_SRQ_TABLE_REQ_QUEBASE 0x3ffffff
+#define V_CPL_T7_SRQ_TABLE_REQ_QUEBASE(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_QUEBASE)
+#define G_CPL_T7_SRQ_TABLE_REQ_QUEBASE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_QUEBASE) & M_CPL_T7_SRQ_TABLE_REQ_QUEBASE)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_CURMSN 16
+#define M_CPL_T7_SRQ_TABLE_REQ_CURMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_REQ_CURMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_CURMSN)
+#define G_CPL_T7_SRQ_TABLE_REQ_CURMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_CURMSN) & M_CPL_T7_SRQ_TABLE_REQ_CURMSN)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_MAXMSN 0
+#define M_CPL_T7_SRQ_TABLE_REQ_MAXMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_REQ_MAXMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_MAXMSN)
+#define G_CPL_T7_SRQ_TABLE_REQ_MAXMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_MAXMSN) & M_CPL_T7_SRQ_TABLE_REQ_MAXMSN)
+
+struct cpl_t7_srq_table_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 status_index;
+ __be16 srqlimit_pkd;
+ __be16 cqid;
+ __be16 xdid;
+ __be16 pdid;
+ __be32 quelen_quebase;
+ __be32 curmsn_maxmsn;
+};
+
+#define S_CPL_T7_SRQ_TABLE_RPL_STATUS 24
+#define M_CPL_T7_SRQ_TABLE_RPL_STATUS 0xff
+#define V_CPL_T7_SRQ_TABLE_RPL_STATUS(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_STATUS)
+#define G_CPL_T7_SRQ_TABLE_RPL_STATUS(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_STATUS) & M_CPL_T7_SRQ_TABLE_RPL_STATUS)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_INDEX 0
+#define M_CPL_T7_SRQ_TABLE_RPL_INDEX 0x3ff
+#define V_CPL_T7_SRQ_TABLE_RPL_INDEX(x) ((x) << S_CPL_T7_SRQ_TABLE_RPL_INDEX)
+#define G_CPL_T7_SRQ_TABLE_RPL_INDEX(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_INDEX) & M_CPL_T7_SRQ_TABLE_RPL_INDEX)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT 0
+#define M_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT 0x3f
+#define V_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT)
+#define G_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT) & M_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_QUELEN 28
+#define M_CPL_T7_SRQ_TABLE_RPL_QUELEN 0xf
+#define V_CPL_T7_SRQ_TABLE_RPL_QUELEN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_QUELEN)
+#define G_CPL_T7_SRQ_TABLE_RPL_QUELEN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_QUELEN) & M_CPL_T7_SRQ_TABLE_RPL_QUELEN)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_QUEBASE 0
+#define M_CPL_T7_SRQ_TABLE_RPL_QUEBASE 0x3ffffff
+#define V_CPL_T7_SRQ_TABLE_RPL_QUEBASE(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_QUEBASE)
+#define G_CPL_T7_SRQ_TABLE_RPL_QUEBASE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_QUEBASE) & M_CPL_T7_SRQ_TABLE_RPL_QUEBASE)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_CURMSN 16
+#define M_CPL_T7_SRQ_TABLE_RPL_CURMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_RPL_CURMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_CURMSN)
+#define G_CPL_T7_SRQ_TABLE_RPL_CURMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_CURMSN) & M_CPL_T7_SRQ_TABLE_RPL_CURMSN)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_MAXMSN 0
+#define M_CPL_T7_SRQ_TABLE_RPL_MAXMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_RPL_MAXMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_MAXMSN)
+#define G_CPL_T7_SRQ_TABLE_RPL_MAXMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_MAXMSN) & M_CPL_T7_SRQ_TABLE_RPL_MAXMSN)
+
+struct cpl_rdma_async_event {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 EventInfo;
+};
+
+#define S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE 16
+#define M_CPL_RDMA_ASYNC_EVENT_EVENTTYPE 0xf
+#define V_CPL_RDMA_ASYNC_EVENT_EVENTTYPE(x) \
+ ((x) << S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE)
+#define G_CPL_RDMA_ASYNC_EVENT_EVENTTYPE(x) \
+ (((x) >> S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE) & \
+ M_CPL_RDMA_ASYNC_EVENT_EVENTTYPE)
+
+#define S_CPL_RDMA_ASYNC_EVENT_INDEX 0
+#define M_CPL_RDMA_ASYNC_EVENT_INDEX 0xffff
+#define V_CPL_RDMA_ASYNC_EVENT_INDEX(x) ((x) << S_CPL_RDMA_ASYNC_EVENT_INDEX)
+#define G_CPL_RDMA_ASYNC_EVENT_INDEX(x) \
+ (((x) >> S_CPL_RDMA_ASYNC_EVENT_INDEX) & M_CPL_RDMA_ASYNC_EVENT_INDEX)
+
struct cpl_smt_write_req {
WR_HDR;
union opcode_tid ot;
@@ -2479,6 +3334,118 @@ struct cpl_smt_read_rpl {
#define V_SMTW_VF_VLD(x) ((x) << S_SMTW_VF_VLD)
#define F_SMTW_VF_VLD V_SMTW_VF_VLD(1U)
+struct cpl_t7_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_mtu;
+ union smt_write_req {
+ struct smt_write_req_pfvf {
+ __be64 tagvalue;
+ __be32 pfvf_smac_hi;
+ __be32 smac_lo;
+ __be64 tagext;
+ } pfvf;
+ struct smt_write_req_ipv4 {
+ __be32 srcipv4;
+ __be32 destipv4;
+ } ipv4;
+ struct smt_write_req_ipv6 {
+ __be64 ipv6ms;
+ __be64 ipv6ls;
+ } ipv6;
+ } u;
+};
+
+#define S_CPL_T7_SMT_WRITE_REQ_NOREPLY 31
+#define M_CPL_T7_SMT_WRITE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_NOREPLY)
+#define G_CPL_T7_SMT_WRITE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_NOREPLY) & M_CPL_T7_SMT_WRITE_REQ_NOREPLY)
+#define F_CPL_T7_SMT_WRITE_REQ_NOREPLY \
+ V_CPL_T7_SMT_WRITE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_TAGINSERT 30
+#define M_CPL_T7_SMT_WRITE_REQ_TAGINSERT 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_TAGINSERT(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_TAGINSERT)
+#define G_CPL_T7_SMT_WRITE_REQ_TAGINSERT(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_TAGINSERT) & \
+ M_CPL_T7_SMT_WRITE_REQ_TAGINSERT)
+#define F_CPL_T7_SMT_WRITE_REQ_TAGINSERT \
+ V_CPL_T7_SMT_WRITE_REQ_TAGINSERT(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_TAGTYPE 28
+#define M_CPL_T7_SMT_WRITE_REQ_TAGTYPE 0x3
+#define V_CPL_T7_SMT_WRITE_REQ_TAGTYPE(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_TAGTYPE)
+#define G_CPL_T7_SMT_WRITE_REQ_TAGTYPE(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_TAGTYPE) & M_CPL_T7_SMT_WRITE_REQ_TAGTYPE)
+
+#define S_CPL_T7_SMT_WRITE_REQ_INDEX 20
+#define M_CPL_T7_SMT_WRITE_REQ_INDEX 0xff
+#define V_CPL_T7_SMT_WRITE_REQ_INDEX(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_INDEX)
+#define G_CPL_T7_SMT_WRITE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_INDEX) & M_CPL_T7_SMT_WRITE_REQ_INDEX)
+
+#define S_CPL_T7_SMT_WRITE_REQ_OVLAN 16
+#define M_CPL_T7_SMT_WRITE_REQ_OVLAN 0xf
+#define V_CPL_T7_SMT_WRITE_REQ_OVLAN(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_OVLAN)
+#define G_CPL_T7_SMT_WRITE_REQ_OVLAN(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_OVLAN) & M_CPL_T7_SMT_WRITE_REQ_OVLAN)
+
+#define S_CPL_T7_SMT_WRITE_REQ_IPSEC 14
+#define M_CPL_T7_SMT_WRITE_REQ_IPSEC 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_IPSEC(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_IPSEC)
+#define G_CPL_T7_SMT_WRITE_REQ_IPSEC(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_IPSEC) & M_CPL_T7_SMT_WRITE_REQ_IPSEC)
+#define F_CPL_T7_SMT_WRITE_REQ_IPSEC V_CPL_T7_SMT_WRITE_REQ_IPSEC(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_MTU 0
+#define M_CPL_T7_SMT_WRITE_REQ_MTU 0x3fff
+#define V_CPL_T7_SMT_WRITE_REQ_MTU(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_MTU)
+#define G_CPL_T7_SMT_WRITE_REQ_MTU(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_MTU) & M_CPL_T7_SMT_WRITE_REQ_MTU)
+
+#define S_CPL_T7_SMT_WRITE_REQ_PFVF 16
+#define M_CPL_T7_SMT_WRITE_REQ_PFVF 0xfff
+#define V_CPL_T7_SMT_WRITE_REQ_PFVF(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_PFVF)
+#define G_CPL_T7_SMT_WRITE_REQ_PFVF(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_PFVF) & M_CPL_T7_SMT_WRITE_REQ_PFVF)
+
+#define S_CPL_T7_SMT_WRITE_REQ_SMAC_HI 0
+#define M_CPL_T7_SMT_WRITE_REQ_SMAC_HI 0xffff
+#define V_CPL_T7_SMT_WRITE_REQ_SMAC_HI(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_SMAC_HI)
+#define G_CPL_T7_SMT_WRITE_REQ_SMAC_HI(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_SMAC_HI) & M_CPL_T7_SMT_WRITE_REQ_SMAC_HI)
+
+struct cpl_t7_smt_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 index_to_ipsecidx;
+};
+
+#define S_CPL_T7_SMT_READ_REQ_INDEX 20
+#define M_CPL_T7_SMT_READ_REQ_INDEX 0xff
+#define V_CPL_T7_SMT_READ_REQ_INDEX(x) ((x) << S_CPL_T7_SMT_READ_REQ_INDEX)
+#define G_CPL_T7_SMT_READ_REQ_INDEX(x) \
+ (((x) >> S_CPL_SMT_READ_REQ_INDEX) & M_CPL_T7_SMT_READ_REQ_INDEX)
+
+#define S_CPL_T7_SMT_READ_REQ_IPSEC 14
+#define M_CPL_T7_SMT_READ_REQ_IPSEC 0x1
+#define V_CPL_T7_SMT_READ_REQ_IPSEC(x) ((x) << S_CPL_T7_SMT_READ_REQ_IPSEC)
+#define G_CPL_T7_SMT_READ_REQ_IPSEC(x) \
+ (((x) >> S_CPL_T7_SMT_READ_REQ_IPSEC) & M_CPL_T7_SMT_READ_REQ_IPSEC)
+#define F_CPL_T7_SMT_READ_REQ_IPSEC V_CPL_T7_SMT_READ_REQ_IPSEC(1U)
+
+#define S_CPL_T7_SMT_READ_REQ_IPSECIDX 0
+#define M_CPL_T7_SMT_READ_REQ_IPSECIDX 0x1fff
+#define V_CPL_T7_SMT_READ_REQ_IPSECIDX(x) \
+ ((x) << S_CPL_T7_SMT_READ_REQ_IPSECIDX)
+#define G_CPL_T7_SMT_READ_REQ_IPSECIDX(x) \
+ (((x) >> S_CPL_T7_SMT_READ_REQ_IPSECIDX) & M_CPL_T7_SMT_READ_REQ_IPSECIDX)
+
struct cpl_tag_write_req {
WR_HDR;
union opcode_tid ot;
@@ -2611,6 +3578,352 @@ struct cpl_pkt_notify {
#define V_NTFY_T5_ETHHDR_LEN(x) ((x) << S_NTFY_T5_ETHHDR_LEN)
#define G_NTFY_T5_ETHHDR_LEN(x) (((x) >> S_NTFY_T5_ETHHDR_LEN) & M_NTFY_T5_ETHHDR_LEN)
+struct cpl_t7_pkt_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r1;
+ __be16 length;
+ __be32 ethhdrlen_to_macindex;
+ __be32 lineinfo;
+};
+
+#define S_CPL_T7_PKT_NOTIFY_ETHHDRLEN 24
+#define M_CPL_T7_PKT_NOTIFY_ETHHDRLEN 0xff
+#define V_CPL_T7_PKT_NOTIFY_ETHHDRLEN(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_ETHHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_ETHHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_ETHHDRLEN) & M_CPL_T7_PKT_NOTIFY_ETHHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_IPHDRLEN 18
+#define M_CPL_T7_PKT_NOTIFY_IPHDRLEN 0x3f
+#define V_CPL_T7_PKT_NOTIFY_IPHDRLEN(x) ((x) << S_CPL_T7_PKT_NOTIFY_IPHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_IPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_IPHDRLEN) & M_CPL_T7_PKT_NOTIFY_IPHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_TCPHDRLEN 14
+#define M_CPL_T7_PKT_NOTIFY_TCPHDRLEN 0xf
+#define V_CPL_T7_PKT_NOTIFY_TCPHDRLEN(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_TCPHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_TCPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_TCPHDRLEN) & M_CPL_T7_PKT_NOTIFY_TCPHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_INTERFACE 10
+#define M_CPL_T7_PKT_NOTIFY_INTERFACE 0xf
+#define V_CPL_T7_PKT_NOTIFY_INTERFACE(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_INTERFACE)
+#define G_CPL_T7_PKT_NOTIFY_INTERFACE(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_INTERFACE) & M_CPL_T7_PKT_NOTIFY_INTERFACE)
+
+#define S_CPL_T7_PKT_NOTIFY_MACINDEX 0
+#define M_CPL_T7_PKT_NOTIFY_MACINDEX 0x1ff
+#define V_CPL_T7_PKT_NOTIFY_MACINDEX(x) ((x) << S_CPL_T7_PKT_NOTIFY_MACINDEX)
+#define G_CPL_T7_PKT_NOTIFY_MACINDEX(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_MACINDEX) & M_CPL_T7_PKT_NOTIFY_MACINDEX)
+
+struct cpl_rdma_cqe {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_RSSCTRL 16
+#define M_CPL_RDMA_CQE_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_RSSCTRL)
+#define G_CPL_RDMA_CQE_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_RSSCTRL) & M_CPL_RDMA_CQE_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_CQID 0
+#define M_CPL_RDMA_CQE_CQID 0xffff
+#define V_CPL_RDMA_CQE_CQID(x) ((x) << S_CPL_RDMA_CQE_CQID)
+#define G_CPL_RDMA_CQE_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_CQID) & M_CPL_RDMA_CQE_CQID)
+
+#define S_CPL_RDMA_CQE_TID 8
+#define M_CPL_RDMA_CQE_TID 0xfffff
+#define V_CPL_RDMA_CQE_TID(x) ((x) << S_CPL_RDMA_CQE_TID)
+#define G_CPL_RDMA_CQE_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_TID) & M_CPL_RDMA_CQE_TID)
+
+#define S_CPL_RDMA_CQE_FLITCNT 0
+#define M_CPL_RDMA_CQE_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_FLITCNT)
+#define G_CPL_RDMA_CQE_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FLITCNT) & M_CPL_RDMA_CQE_FLITCNT)
+
+#define S_CPL_RDMA_CQE_QPID 12
+#define M_CPL_RDMA_CQE_QPID 0xfffff
+#define V_CPL_RDMA_CQE_QPID(x) ((x) << S_CPL_RDMA_CQE_QPID)
+#define G_CPL_RDMA_CQE_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_QPID) & M_CPL_RDMA_CQE_QPID)
+
+#define S_CPL_RDMA_CQE_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_GENERATION_BIT) & M_CPL_RDMA_CQE_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_GENERATION_BIT V_CPL_RDMA_CQE_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_STATUS 5
+#define M_CPL_RDMA_CQE_STATUS 0x1f
+#define V_CPL_RDMA_CQE_STATUS(x) ((x) << S_CPL_RDMA_CQE_STATUS)
+#define G_CPL_RDMA_CQE_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_STATUS) & M_CPL_RDMA_CQE_STATUS)
+
+#define S_CPL_RDMA_CQE_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_CQE_TYPE)
+#define G_CPL_RDMA_CQE_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_CQE_TYPE) & M_CPL_RDMA_CQE_CQE_TYPE)
+#define F_CPL_RDMA_CQE_CQE_TYPE V_CPL_RDMA_CQE_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_WR_TYPE 0
+#define M_CPL_RDMA_CQE_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_WR_TYPE)
+#define G_CPL_RDMA_CQE_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_WR_TYPE) & M_CPL_RDMA_CQE_WR_TYPE)
+
+struct cpl_rdma_cqe_srq {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 r3;
+ __be32 rqe;
+};
+
+#define S_CPL_RDMA_CQE_SRQ_OPCODE 24
+#define M_CPL_RDMA_CQE_SRQ_OPCODE 0xff
+#define V_CPL_RDMA_CQE_SRQ_OPCODE(x) ((x) << S_CPL_RDMA_CQE_SRQ_OPCODE)
+#define G_CPL_RDMA_CQE_SRQ_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_OPCODE) & M_CPL_RDMA_CQE_SRQ_OPCODE)
+
+#define S_CPL_RDMA_CQE_SRQ_RSSCTRL 16
+#define M_CPL_RDMA_CQE_SRQ_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_SRQ_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_SRQ_RSSCTRL)
+#define G_CPL_RDMA_CQE_SRQ_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_RSSCTRL) & M_CPL_RDMA_CQE_SRQ_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_SRQ_CQID 0
+#define M_CPL_RDMA_CQE_SRQ_CQID 0xffff
+#define V_CPL_RDMA_CQE_SRQ_CQID(x) ((x) << S_CPL_RDMA_CQE_SRQ_CQID)
+#define G_CPL_RDMA_CQE_SRQ_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_CQID) & M_CPL_RDMA_CQE_SRQ_CQID)
+
+#define S_CPL_RDMA_CQE_SRQ_TID 8
+#define M_CPL_RDMA_CQE_SRQ_TID 0xfffff
+#define V_CPL_RDMA_CQE_SRQ_TID(x) ((x) << S_CPL_RDMA_CQE_SRQ_TID)
+#define G_CPL_RDMA_CQE_SRQ_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_TID) & M_CPL_RDMA_CQE_SRQ_TID)
+
+#define S_CPL_RDMA_CQE_SRQ_FLITCNT 0
+#define M_CPL_RDMA_CQE_SRQ_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_SRQ_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_SRQ_FLITCNT)
+#define G_CPL_RDMA_CQE_SRQ_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_FLITCNT) & M_CPL_RDMA_CQE_SRQ_FLITCNT)
+
+#define S_CPL_RDMA_CQE_SRQ_QPID 12
+#define M_CPL_RDMA_CQE_SRQ_QPID 0xfffff
+#define V_CPL_RDMA_CQE_SRQ_QPID(x) ((x) << S_CPL_RDMA_CQE_SRQ_QPID)
+#define G_CPL_RDMA_CQE_SRQ_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_QPID) & M_CPL_RDMA_CQE_SRQ_QPID)
+
+#define S_CPL_RDMA_CQE_SRQ_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_SRQ_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_SRQ_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_SRQ_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_SRQ_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_SRQ_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_SRQ_GENERATION_BIT \
+ V_CPL_RDMA_CQE_SRQ_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_SRQ_STATUS 5
+#define M_CPL_RDMA_CQE_SRQ_STATUS 0x1f
+#define V_CPL_RDMA_CQE_SRQ_STATUS(x) ((x) << S_CPL_RDMA_CQE_SRQ_STATUS)
+#define G_CPL_RDMA_CQE_SRQ_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_STATUS) & M_CPL_RDMA_CQE_SRQ_STATUS)
+
+#define S_CPL_RDMA_CQE_SRQ_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_SRQ_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_SRQ_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_SRQ_CQE_TYPE)
+#define G_CPL_RDMA_CQE_SRQ_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_CQE_TYPE) & M_CPL_RDMA_CQE_SRQ_CQE_TYPE)
+#define F_CPL_RDMA_CQE_SRQ_CQE_TYPE V_CPL_RDMA_CQE_SRQ_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_SRQ_WR_TYPE 0
+#define M_CPL_RDMA_CQE_SRQ_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_SRQ_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_SRQ_WR_TYPE)
+#define G_CPL_RDMA_CQE_SRQ_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_WR_TYPE) & M_CPL_RDMA_CQE_SRQ_WR_TYPE)
+
+struct cpl_rdma_cqe_read_rsp {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_READ_RSP_RSSCTRL 16
+#define M_CPL_RDMA_CQE_READ_RSP_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_READ_RSP_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_RSSCTRL)
+#define G_CPL_RDMA_CQE_READ_RSP_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_RSSCTRL) & \
+ M_CPL_RDMA_CQE_READ_RSP_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_READ_RSP_CQID 0
+#define M_CPL_RDMA_CQE_READ_RSP_CQID 0xffff
+#define V_CPL_RDMA_CQE_READ_RSP_CQID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_CQID)
+#define G_CPL_RDMA_CQE_READ_RSP_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_CQID) & M_CPL_RDMA_CQE_READ_RSP_CQID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_TID 8
+#define M_CPL_RDMA_CQE_READ_RSP_TID 0xfffff
+#define V_CPL_RDMA_CQE_READ_RSP_TID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_TID)
+#define G_CPL_RDMA_CQE_READ_RSP_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_TID) & M_CPL_RDMA_CQE_READ_RSP_TID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_FLITCNT 0
+#define M_CPL_RDMA_CQE_READ_RSP_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_READ_RSP_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_FLITCNT)
+#define G_CPL_RDMA_CQE_READ_RSP_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_FLITCNT) & \
+ M_CPL_RDMA_CQE_READ_RSP_FLITCNT)
+
+#define S_CPL_RDMA_CQE_READ_RSP_QPID 12
+#define M_CPL_RDMA_CQE_READ_RSP_QPID 0xfffff
+#define V_CPL_RDMA_CQE_READ_RSP_QPID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_QPID)
+#define G_CPL_RDMA_CQE_READ_RSP_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_QPID) & M_CPL_RDMA_CQE_READ_RSP_QPID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT \
+ V_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_READ_RSP_STATUS 5
+#define M_CPL_RDMA_CQE_READ_RSP_STATUS 0x1f
+#define V_CPL_RDMA_CQE_READ_RSP_STATUS(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_STATUS)
+#define G_CPL_RDMA_CQE_READ_RSP_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_STATUS) & M_CPL_RDMA_CQE_READ_RSP_STATUS)
+
+#define S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_READ_RSP_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE)
+#define G_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE) & \
+ M_CPL_RDMA_CQE_READ_RSP_CQE_TYPE)
+#define F_CPL_RDMA_CQE_READ_RSP_CQE_TYPE V_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_READ_RSP_WR_TYPE 0
+#define M_CPL_RDMA_CQE_READ_RSP_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_READ_RSP_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_WR_TYPE)
+#define G_CPL_RDMA_CQE_READ_RSP_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_WR_TYPE) & \
+ M_CPL_RDMA_CQE_READ_RSP_WR_TYPE)
+
+struct cpl_rdma_cqe_err {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_ERR_RSSCTRL 16
+#define M_CPL_RDMA_CQE_ERR_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_ERR_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_ERR_RSSCTRL)
+#define G_CPL_RDMA_CQE_ERR_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_RSSCTRL) & M_CPL_RDMA_CQE_ERR_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_ERR_CQID 0
+#define M_CPL_RDMA_CQE_ERR_CQID 0xffff
+#define V_CPL_RDMA_CQE_ERR_CQID(x) ((x) << S_CPL_RDMA_CQE_ERR_CQID)
+#define G_CPL_RDMA_CQE_ERR_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_CQID) & M_CPL_RDMA_CQE_ERR_CQID)
+
+#define S_CPL_RDMA_CQE_ERR_TID 8
+#define M_CPL_RDMA_CQE_ERR_TID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_TID(x) ((x) << S_CPL_RDMA_CQE_ERR_TID)
+#define G_CPL_RDMA_CQE_ERR_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_TID) & M_CPL_RDMA_CQE_ERR_TID)
+
+#define S_CPL_RDMA_CQE_ERR_FLITCNT 0
+#define M_CPL_RDMA_CQE_ERR_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_ERR_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_ERR_FLITCNT)
+#define G_CPL_RDMA_CQE_ERR_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_FLITCNT) & M_CPL_RDMA_CQE_ERR_FLITCNT)
+
+#define S_CPL_RDMA_CQE_ERR_QPID 12
+#define M_CPL_RDMA_CQE_ERR_QPID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_QPID(x) ((x) << S_CPL_RDMA_CQE_ERR_QPID)
+#define G_CPL_RDMA_CQE_ERR_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_QPID) & M_CPL_RDMA_CQE_ERR_QPID)
+
+#define S_CPL_RDMA_CQE_ERR_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_ERR_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_ERR_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_ERR_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_ERR_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_ERR_GENERATION_BIT \
+ V_CPL_RDMA_CQE_ERR_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_ERR_STATUS 5
+#define M_CPL_RDMA_CQE_ERR_STATUS 0x1f
+#define V_CPL_RDMA_CQE_ERR_STATUS(x) ((x) << S_CPL_RDMA_CQE_ERR_STATUS)
+#define G_CPL_RDMA_CQE_ERR_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_STATUS) & M_CPL_RDMA_CQE_ERR_STATUS)
+
+#define S_CPL_RDMA_CQE_ERR_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_ERR_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_ERR_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_ERR_CQE_TYPE)
+#define G_CPL_RDMA_CQE_ERR_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_CQE_TYPE) & M_CPL_RDMA_CQE_ERR_CQE_TYPE)
+#define F_CPL_RDMA_CQE_ERR_CQE_TYPE V_CPL_RDMA_CQE_ERR_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_WR_TYPE 0
+#define M_CPL_RDMA_CQE_ERR_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_ERR_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_ERR_WR_TYPE)
+#define G_CPL_RDMA_CQE_ERR_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_WR_TYPE) & M_CPL_RDMA_CQE_ERR_WR_TYPE)
+
+struct cpl_rdma_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 srq_pkd;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_READ_REQ_SRQ 0
+#define M_CPL_RDMA_READ_REQ_SRQ 0xfff
+#define V_CPL_RDMA_READ_REQ_SRQ(x) ((x) << S_CPL_RDMA_READ_REQ_SRQ)
+#define G_CPL_RDMA_READ_REQ_SRQ(x) \
+ (((x) >> S_CPL_RDMA_READ_REQ_SRQ) & M_CPL_RDMA_READ_REQ_SRQ)
+
struct cpl_rdma_terminate {
RSS_HDR
union opcode_tid ot;
@@ -2618,6 +3931,404 @@ struct cpl_rdma_terminate {
__be16 len;
};
+struct cpl_rdma_atomic_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 opcode_srq;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_ATOMIC_REQ_OPCODE 12
+#define M_CPL_RDMA_ATOMIC_REQ_OPCODE 0xf
+#define V_CPL_RDMA_ATOMIC_REQ_OPCODE(x) ((x) << S_CPL_RDMA_ATOMIC_REQ_OPCODE)
+#define G_CPL_RDMA_ATOMIC_REQ_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_REQ_OPCODE) & M_CPL_RDMA_ATOMIC_REQ_OPCODE)
+
+#define S_CPL_RDMA_ATOMIC_REQ_SRQ 0
+#define M_CPL_RDMA_ATOMIC_REQ_SRQ 0xfff
+#define V_CPL_RDMA_ATOMIC_REQ_SRQ(x) ((x) << S_CPL_RDMA_ATOMIC_REQ_SRQ)
+#define G_CPL_RDMA_ATOMIC_REQ_SRQ(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_REQ_SRQ) & M_CPL_RDMA_ATOMIC_REQ_SRQ)
+
+struct cpl_rdma_atomic_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 opcode_srq;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_ATOMIC_RPL_OPCODE 12
+#define M_CPL_RDMA_ATOMIC_RPL_OPCODE 0xf
+#define V_CPL_RDMA_ATOMIC_RPL_OPCODE(x) ((x) << S_CPL_RDMA_ATOMIC_RPL_OPCODE)
+#define G_CPL_RDMA_ATOMIC_RPL_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_RPL_OPCODE) & M_CPL_RDMA_ATOMIC_RPL_OPCODE)
+
+#define S_CPL_RDMA_ATOMIC_RPL_SRQ 0
+#define M_CPL_RDMA_ATOMIC_RPL_SRQ 0xfff
+#define V_CPL_RDMA_ATOMIC_RPL_SRQ(x) ((x) << S_CPL_RDMA_ATOMIC_RPL_SRQ)
+#define G_CPL_RDMA_ATOMIC_RPL_SRQ(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_RPL_SRQ) & M_CPL_RDMA_ATOMIC_RPL_SRQ)
+
+struct cpl_rdma_imm_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r;
+ __be16 Length;
+};
+
+struct cpl_rdma_imm_data_se {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r;
+ __be16 Length;
+};
+
+struct cpl_rdma_inv_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 stag;
+ __be32 cqid_pdid_hi;
+ __be32 pdid_lo_qpid;
+};
+
+#define S_CPL_RDMA_INV_REQ_CQID 8
+#define M_CPL_RDMA_INV_REQ_CQID 0xfffff
+#define V_CPL_RDMA_INV_REQ_CQID(x) ((x) << S_CPL_RDMA_INV_REQ_CQID)
+#define G_CPL_RDMA_INV_REQ_CQID(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_CQID) & M_CPL_RDMA_INV_REQ_CQID)
+
+#define S_CPL_RDMA_INV_REQ_PDID_HI 0
+#define M_CPL_RDMA_INV_REQ_PDID_HI 0xff
+#define V_CPL_RDMA_INV_REQ_PDID_HI(x) ((x) << S_CPL_RDMA_INV_REQ_PDID_HI)
+#define G_CPL_RDMA_INV_REQ_PDID_HI(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_PDID_HI) & M_CPL_RDMA_INV_REQ_PDID_HI)
+
+#define S_CPL_RDMA_INV_REQ_PDID_LO 20
+#define M_CPL_RDMA_INV_REQ_PDID_LO 0xfff
+#define V_CPL_RDMA_INV_REQ_PDID_LO(x) ((x) << S_CPL_RDMA_INV_REQ_PDID_LO)
+#define G_CPL_RDMA_INV_REQ_PDID_LO(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_PDID_LO) & M_CPL_RDMA_INV_REQ_PDID_LO)
+
+#define S_CPL_RDMA_INV_REQ_QPID 0
+#define M_CPL_RDMA_INV_REQ_QPID 0xfffff
+#define V_CPL_RDMA_INV_REQ_QPID(x) ((x) << S_CPL_RDMA_INV_REQ_QPID)
+#define G_CPL_RDMA_INV_REQ_QPID(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_QPID) & M_CPL_RDMA_INV_REQ_QPID)
+
+struct cpl_rdma_cqe_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_EXT_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_RSSCTRL) & M_CPL_RDMA_CQE_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_EXT_CQID 0
+#define M_CPL_RDMA_CQE_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_EXT_CQID)
+#define G_CPL_RDMA_CQE_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_CQID) & M_CPL_RDMA_CQE_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_EXT_TID 8
+#define M_CPL_RDMA_CQE_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_EXT_TID)
+#define G_CPL_RDMA_CQE_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_TID) & M_CPL_RDMA_CQE_EXT_TID)
+
+#define S_CPL_RDMA_CQE_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_EXT_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_FLITCNT) & M_CPL_RDMA_CQE_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_EXT_QPID 12
+#define M_CPL_RDMA_CQE_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_EXT_QPID)
+#define G_CPL_RDMA_CQE_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_QPID) & M_CPL_RDMA_CQE_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_EXT_EXTMODE(x) ((x) << S_CPL_RDMA_CQE_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_EXTMODE) & M_CPL_RDMA_CQE_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_EXT_EXTMODE V_CPL_RDMA_CQE_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_EXT_STATUS(x) ((x) << S_CPL_RDMA_CQE_EXT_STATUS)
+#define G_CPL_RDMA_CQE_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_STATUS) & M_CPL_RDMA_CQE_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_EXT_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_CQE_TYPE) & M_CPL_RDMA_CQE_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_EXT_CQE_TYPE V_CPL_RDMA_CQE_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_EXT_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE) & M_CPL_RDMA_CQE_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_EXT_SE 31
+#define M_CPL_RDMA_CQE_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_EXT_SE)
+#define G_CPL_RDMA_CQE_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_SE) & M_CPL_RDMA_CQE_EXT_SE)
+#define F_CPL_RDMA_CQE_EXT_SE V_CPL_RDMA_CQE_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT) & M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_EXT_SRQ)
+#define G_CPL_RDMA_CQE_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_SRQ) & M_CPL_RDMA_CQE_EXT_SRQ)
+
+struct cpl_rdma_cqe_fw_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_FW_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_FW_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_FW_EXT_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_FW_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_RSSCTRL) & M_CPL_RDMA_CQE_FW_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_FW_EXT_CQID 0
+#define M_CPL_RDMA_CQE_FW_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_FW_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_CQID)
+#define G_CPL_RDMA_CQE_FW_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_CQID) & M_CPL_RDMA_CQE_FW_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_TID 8
+#define M_CPL_RDMA_CQE_FW_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_FW_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_TID)
+#define G_CPL_RDMA_CQE_FW_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_TID) & M_CPL_RDMA_CQE_FW_EXT_TID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_FW_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_FW_EXT_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_FW_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_FLITCNT) & M_CPL_RDMA_CQE_FW_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_FW_EXT_QPID 12
+#define M_CPL_RDMA_CQE_FW_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_FW_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_QPID)
+#define G_CPL_RDMA_CQE_FW_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_QPID) & M_CPL_RDMA_CQE_FW_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_FW_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_EXTMODE) & M_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_FW_EXT_EXTMODE V_CPL_RDMA_CQE_FW_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_FW_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_FW_EXT_STATUS(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_STATUS)
+#define G_CPL_RDMA_CQE_FW_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_STATUS) & M_CPL_RDMA_CQE_FW_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_FW_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE) & M_CPL_RDMA_CQE_FW_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_FW_EXT_CQE_TYPE V_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_FW_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_FW_EXT_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_FW_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE) & M_CPL_RDMA_CQE_FW_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_FW_EXT_SE 31
+#define M_CPL_RDMA_CQE_FW_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SE)
+#define G_CPL_RDMA_CQE_FW_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_SE) & M_CPL_RDMA_CQE_FW_EXT_SE)
+#define F_CPL_RDMA_CQE_FW_EXT_SE V_CPL_RDMA_CQE_FW_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT) & \
+ M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_FW_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_FW_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_FW_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SRQ)
+#define G_CPL_RDMA_CQE_FW_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_SRQ) & M_CPL_RDMA_CQE_FW_EXT_SRQ)
+
+struct cpl_rdma_cqe_err_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_ERR_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_ERR_EXT_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_ERR_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL) & M_CPL_RDMA_CQE_ERR_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_CQID 0
+#define M_CPL_RDMA_CQE_ERR_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_ERR_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_CQID)
+#define G_CPL_RDMA_CQE_ERR_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_CQID) & M_CPL_RDMA_CQE_ERR_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_TID 8
+#define M_CPL_RDMA_CQE_ERR_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_TID)
+#define G_CPL_RDMA_CQE_ERR_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_TID) & M_CPL_RDMA_CQE_ERR_EXT_TID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_ERR_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_ERR_EXT_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_ERR_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_FLITCNT) & M_CPL_RDMA_CQE_ERR_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_QPID 12
+#define M_CPL_RDMA_CQE_ERR_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_QPID)
+#define G_CPL_RDMA_CQE_ERR_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_QPID) & M_CPL_RDMA_CQE_ERR_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_ERR_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_EXTMODE) & M_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_ERR_EXT_EXTMODE V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_ERR_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_ERR_EXT_STATUS(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_STATUS)
+#define G_CPL_RDMA_CQE_ERR_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_STATUS) & M_CPL_RDMA_CQE_ERR_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE) & \
+ M_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE V_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_ERR_EXT_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_ERR_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE) & M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_SE 31
+#define M_CPL_RDMA_CQE_ERR_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SE)
+#define G_CPL_RDMA_CQE_ERR_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_SE) & M_CPL_RDMA_CQE_ERR_EXT_SE)
+#define F_CPL_RDMA_CQE_ERR_EXT_SE V_CPL_RDMA_CQE_ERR_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT) & \
+ M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_ERR_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_ERR_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SRQ)
+#define G_CPL_RDMA_CQE_ERR_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_SRQ) & M_CPL_RDMA_CQE_ERR_EXT_SRQ)
+
struct cpl_set_le_req {
WR_HDR;
union opcode_tid ot;
@@ -2630,6 +4341,13 @@ struct cpl_set_le_req {
};
/* cpl_set_le_req.reply_ctrl additional fields */
+#define S_LE_REQ_RXCHANNEL 14
+#define M_LE_REQ_RXCHANNEL 0x1
+#define V_LE_REQ_RXCHANNEL(x) ((x) << S_LE_REQ_RXCHANNEL)
+#define G_LE_REQ_RXCHANNEL(x) \
+ (((x) >> S_LE_REQ_RXCHANNEL) & M_LE_REQ_RXCHANNEL)
+#define F_LE_REQ_RXCHANNEL V_LE_REQ_RXCHANNEL(1U)
+
#define S_LE_REQ_IP6 13
#define V_LE_REQ_IP6(x) ((x) << S_LE_REQ_IP6)
#define F_LE_REQ_IP6 V_LE_REQ_IP6(1U)
@@ -2659,6 +4377,80 @@ struct cpl_set_le_req {
#define V_LE_REQCMD(x) ((x) << S_LE_REQCMD)
#define G_LE_REQCMD(x) (((x) >> S_LE_REQCMD) & M_LE_REQCMD)
+struct cpl_t7_set_le_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_channel;
+ __be32 mask1[2];
+ __be32 mask0[2];
+ __be32 value1[2];
+ __be32 value0[2];
+};
+
+#define S_CPL_T7_SET_LE_REQ_INDEX 0
+#define M_CPL_T7_SET_LE_REQ_INDEX 0xffffff
+#define V_CPL_T7_SET_LE_REQ_INDEX(x) ((x) << S_CPL_T7_SET_LE_REQ_INDEX)
+#define G_CPL_T7_SET_LE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_INDEX) & M_CPL_T7_SET_LE_REQ_INDEX)
+
+#define S_CPL_T7_SET_LE_REQ_NOREPLY 31
+#define M_CPL_T7_SET_LE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SET_LE_REQ_NOREPLY(x) ((x) << S_CPL_T7_SET_LE_REQ_NOREPLY)
+#define G_CPL_T7_SET_LE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_NOREPLY) & M_CPL_T7_SET_LE_REQ_NOREPLY)
+#define F_CPL_T7_SET_LE_REQ_NOREPLY V_CPL_T7_SET_LE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SET_LE_REQ_RXCHANNEL 28
+#define M_CPL_T7_SET_LE_REQ_RXCHANNEL 0x7
+#define V_CPL_T7_SET_LE_REQ_RXCHANNEL(x) \
+ ((x) << S_CPL_T7_SET_LE_REQ_RXCHANNEL)
+#define G_CPL_T7_SET_LE_REQ_RXCHANNEL(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_RXCHANNEL) & M_CPL_T7_SET_LE_REQ_RXCHANNEL)
+
+#define S_CPL_T7_SET_LE_REQ_QUEUE 16
+#define M_CPL_T7_SET_LE_REQ_QUEUE 0xfff
+#define V_CPL_T7_SET_LE_REQ_QUEUE(x) ((x) << S_CPL_T7_SET_LE_REQ_QUEUE)
+#define G_CPL_T7_SET_LE_REQ_QUEUE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_QUEUE) & M_CPL_T7_SET_LE_REQ_QUEUE)
+
+#define S_CPL_T7_SET_LE_REQ_REQCMD 12
+#define M_CPL_T7_SET_LE_REQ_REQCMD 0xf
+#define V_CPL_T7_SET_LE_REQ_REQCMD(x) ((x) << S_CPL_T7_SET_LE_REQ_REQCMD)
+#define G_CPL_T7_SET_LE_REQ_REQCMD(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQCMD) & M_CPL_T7_SET_LE_REQ_REQCMD)
+
+#define S_CPL_T7_SET_LE_REQ_REQSIZE 9
+#define M_CPL_T7_SET_LE_REQ_REQSIZE 0x7
+#define V_CPL_T7_SET_LE_REQ_REQSIZE(x) ((x) << S_CPL_T7_SET_LE_REQ_REQSIZE)
+#define G_CPL_T7_SET_LE_REQ_REQSIZE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQSIZE) & M_CPL_T7_SET_LE_REQ_REQSIZE)
+
+#define S_CPL_T7_SET_LE_REQ_MORE 8
+#define M_CPL_T7_SET_LE_REQ_MORE 0x1
+#define V_CPL_T7_SET_LE_REQ_MORE(x) ((x) << S_CPL_T7_SET_LE_REQ_MORE)
+#define G_CPL_T7_SET_LE_REQ_MORE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_MORE) & M_CPL_T7_SET_LE_REQ_MORE)
+#define F_CPL_T7_SET_LE_REQ_MORE V_CPL_T7_SET_LE_REQ_MORE(1U)
+
+#define S_CPL_T7_SET_LE_REQ_OFFSET 5
+#define M_CPL_T7_SET_LE_REQ_OFFSET 0x7
+#define V_CPL_T7_SET_LE_REQ_OFFSET(x) ((x) << S_CPL_T7_SET_LE_REQ_OFFSET)
+#define G_CPL_T7_SET_LE_REQ_OFFSET(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_OFFSET) & M_CPL_T7_SET_LE_REQ_OFFSET)
+
+#define S_CPL_T7_SET_LE_REQ_REQTYPE 4
+#define M_CPL_T7_SET_LE_REQ_REQTYPE 0x1
+#define V_CPL_T7_SET_LE_REQ_REQTYPE(x) ((x) << S_CPL_T7_SET_LE_REQ_REQTYPE)
+#define G_CPL_T7_SET_LE_REQ_REQTYPE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQTYPE) & M_CPL_T7_SET_LE_REQ_REQTYPE)
+#define F_CPL_T7_SET_LE_REQ_REQTYPE V_CPL_T7_SET_LE_REQ_REQTYPE(1U)
+
+#define S_CPL_T7_SET_LE_REQ_CHANNEL 0
+#define M_CPL_T7_SET_LE_REQ_CHANNEL 0x3
+#define V_CPL_T7_SET_LE_REQ_CHANNEL(x) ((x) << S_CPL_T7_SET_LE_REQ_CHANNEL)
+#define G_CPL_T7_SET_LE_REQ_CHANNEL(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_CHANNEL) & M_CPL_T7_SET_LE_REQ_CHANNEL)
+
struct cpl_set_le_rpl {
RSS_HDR
union opcode_tid ot;
@@ -2710,6 +4502,7 @@ enum {
FW_TYPE_WRERR_RPL = 5,
FW_TYPE_PI_ERR = 6,
FW_TYPE_TLS_KEY = 7,
+ FW_TYPE_IPSEC_SA = 8,
};
struct cpl_fw2_pld {
@@ -2811,6 +4604,8 @@ enum {
FW6_TYPE_RSSCPL = FW_TYPE_RSSCPL,
FW6_TYPE_WRERR_RPL = FW_TYPE_WRERR_RPL,
FW6_TYPE_PI_ERR = FW_TYPE_PI_ERR,
+ FW6_TYPE_TLS_KEY = FW_TYPE_TLS_KEY,
+ FW6_TYPE_IPSEC_SA = FW_TYPE_IPSEC_SA,
NUM_FW6_TYPES
};
@@ -2932,6 +4727,10 @@ struct ulp_mem_io {
#define M_ULP_MEMIO_DATA_LEN 0x1F
#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
+#define S_T7_ULP_MEMIO_DATA_LEN 0
+#define M_T7_ULP_MEMIO_DATA_LEN 0x7FF
+#define V_T7_ULP_MEMIO_DATA_LEN(x) ((x) << S_T7_ULP_MEMIO_DATA_LEN)
+
/* ULP_TXPKT field values */
enum {
ULP_TXPKT_DEST_TP = 0,
@@ -2960,11 +4759,25 @@ struct ulp_txpkt {
(((x) >> S_ULP_TXPKT_CHANNELID) & M_ULP_TXPKT_CHANNELID)
#define F_ULP_TXPKT_CHANNELID V_ULP_TXPKT_CHANNELID(1U)
+#define S_T7_ULP_TXPKT_CHANNELID 22
+#define M_T7_ULP_TXPKT_CHANNELID 0x3
+#define V_T7_ULP_TXPKT_CHANNELID(x) ((x) << S_T7_ULP_TXPKT_CHANNELID)
+#define G_T7_ULP_TXPKT_CHANNELID(x) \
+ (((x) >> S_T7_ULP_TXPKT_CHANNELID) & M_T7_ULP_TXPKT_CHANNELID)
+#define F_T7_ULP_TXPKT_CHANNELID V_T7_ULP_TXPKT_CHANNELID(1U)
+
/* ulp_txpkt.cmd_dest fields */
#define S_ULP_TXPKT_DEST 16
#define M_ULP_TXPKT_DEST 0x3
#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+#define S_ULP_TXPKT_CMDMORE 15
+#define M_ULP_TXPKT_CMDMORE 0x1
+#define V_ULP_TXPKT_CMDMORE(x) ((x) << S_ULP_TXPKT_CMDMORE)
+#define G_ULP_TXPKT_CMDMORE(x) \
+ (((x) >> S_ULP_TXPKT_CMDMORE) & M_ULP_TXPKT_CMDMORE)
+#define F_ULP_TXPKT_CMDMORE V_ULP_TXPKT_CMDMORE(1U)
+
#define S_ULP_TXPKT_FID 4
#define M_ULP_TXPKT_FID 0x7ff
#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID)
@@ -2978,13 +4791,15 @@ enum cpl_tx_tnl_lso_type {
TX_TNL_TYPE_NVGRE,
TX_TNL_TYPE_VXLAN,
TX_TNL_TYPE_GENEVE,
+ TX_TNL_TYPE_IPSEC,
};
struct cpl_tx_tnl_lso {
__be32 op_to_IpIdSplitOut;
__be16 IpIdOffsetOut;
__be16 UdpLenSetOut_to_TnlHdrLen;
- __be64 r1;
+ __be32 ipsecen_to_rocev2;
+ __be32 roce_eth;
__be32 Flow_to_TcpHdrLen;
__be16 IpIdOffset;
__be16 IpIdSplit_to_Mss;
@@ -3098,6 +4913,68 @@ struct cpl_tx_tnl_lso {
#define G_CPL_TX_TNL_LSO_TNLHDRLEN(x) \
(((x) >> S_CPL_TX_TNL_LSO_TNLHDRLEN) & M_CPL_TX_TNL_LSO_TNLHDRLEN)
+#define S_CPL_TX_TNL_LSO_IPSECEN 31
+#define M_CPL_TX_TNL_LSO_IPSECEN 0x1
+#define V_CPL_TX_TNL_LSO_IPSECEN(x) ((x) << S_CPL_TX_TNL_LSO_IPSECEN)
+#define G_CPL_TX_TNL_LSO_IPSECEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECEN) & M_CPL_TX_TNL_LSO_IPSECEN)
+#define F_CPL_TX_TNL_LSO_IPSECEN V_CPL_TX_TNL_LSO_IPSECEN(1U)
+
+#define S_CPL_TX_TNL_LSO_ENCAPDIS 30
+#define M_CPL_TX_TNL_LSO_ENCAPDIS 0x1
+#define V_CPL_TX_TNL_LSO_ENCAPDIS(x) ((x) << S_CPL_TX_TNL_LSO_ENCAPDIS)
+#define G_CPL_TX_TNL_LSO_ENCAPDIS(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ENCAPDIS) & M_CPL_TX_TNL_LSO_ENCAPDIS)
+#define F_CPL_TX_TNL_LSO_ENCAPDIS V_CPL_TX_TNL_LSO_ENCAPDIS(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECMODE 29
+#define M_CPL_TX_TNL_LSO_IPSECMODE 0x1
+#define V_CPL_TX_TNL_LSO_IPSECMODE(x) ((x) << S_CPL_TX_TNL_LSO_IPSECMODE)
+#define G_CPL_TX_TNL_LSO_IPSECMODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECMODE) & M_CPL_TX_TNL_LSO_IPSECMODE)
+#define F_CPL_TX_TNL_LSO_IPSECMODE V_CPL_TX_TNL_LSO_IPSECMODE(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPV6 28
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPV6 0x1
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPV6(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPV6)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPV6(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPV6) & M_CPL_TX_TNL_LSO_IPSECTNLIPV6)
+#define F_CPL_TX_TNL_LSO_IPSECTNLIPV6 V_CPL_TX_TNL_LSO_IPSECTNLIPV6(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN 20
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN 0xff
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN) & \
+ M_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT 19
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT 0x1
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT) & \
+ M_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT)
+#define F_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT \
+ V_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(1U)
+
+#define S_CPL_TX_TNL_LSO_ROCEV2 18
+#define M_CPL_TX_TNL_LSO_ROCEV2 0x1
+#define V_CPL_TX_TNL_LSO_ROCEV2(x) ((x) << S_CPL_TX_TNL_LSO_ROCEV2)
+#define G_CPL_TX_TNL_LSO_ROCEV2(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ROCEV2) & M_CPL_TX_TNL_LSO_ROCEV2)
+#define F_CPL_TX_TNL_LSO_ROCEV2 V_CPL_TX_TNL_LSO_ROCEV2(1U)
+
+#define S_CPL_TX_TNL_LSO_UDPCHKUPDOUT 17
+#define M_CPL_TX_TNL_LSO_UDPCHKUPDOUT 0x1
+#define V_CPL_TX_TNL_LSO_UDPCHKUPDOUT(x) \
+ ((x) << S_CPL_TX_TNL_LSO_UDPCHKUPDOUT)
+#define G_CPL_TX_TNL_LSO_UDPCHKUPDOUT(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_UDPCHKUPDOUT) & M_CPL_TX_TNL_LSO_UDPCHKUPDOUT)
+#define F_CPL_TX_TNL_LSO_UDPCHKUPDOUT V_CPL_TX_TNL_LSO_UDPCHKUPDOUT(1U)
+
#define S_CPL_TX_TNL_LSO_FLOW 21
#define M_CPL_TX_TNL_LSO_FLOW 0x1
#define V_CPL_TX_TNL_LSO_FLOW(x) ((x) << S_CPL_TX_TNL_LSO_FLOW)
@@ -3180,6 +5057,12 @@ struct cpl_rx_mps_pkt {
#define G_CPL_RX_MPS_PKT_TYPE(x) \
(((x) >> S_CPL_RX_MPS_PKT_TYPE) & M_CPL_RX_MPS_PKT_TYPE)
+#define S_CPL_RX_MPS_PKT_LENGTH 0
+#define M_CPL_RX_MPS_PKT_LENGTH 0xffff
+#define V_CPL_RX_MPS_PKT_LENGTH(x) ((x) << S_CPL_RX_MPS_PKT_LENGTH)
+#define G_CPL_RX_MPS_PKT_LENGTH(x) \
+ (((x) >> S_CPL_RX_MPS_PKT_LENGTH) & M_CPL_RX_MPS_PKT_LENGTH)
+
/*
* Values for CPL_RX_MPS_PKT_TYPE, a bit-wise orthogonal field.
*/
@@ -3188,6 +5071,88 @@ struct cpl_rx_mps_pkt {
#define X_CPL_RX_MPS_PKT_TYPE_QFC (1 << 2)
#define X_CPL_RX_MPS_PKT_TYPE_PTP (1 << 3)
+struct cpl_t7_rx_mps_pkt {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 length_pkd;
+};
+
+#define S_CPL_T7_RX_MPS_PKT_TYPE 20
+#define M_CPL_T7_RX_MPS_PKT_TYPE 0xf
+#define V_CPL_T7_RX_MPS_PKT_TYPE(x) ((x) << S_CPL_T7_RX_MPS_PKT_TYPE)
+#define G_CPL_T7_RX_MPS_PKT_TYPE(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_TYPE) & M_CPL_T7_RX_MPS_PKT_TYPE)
+
+#define S_CPL_T7_RX_MPS_PKT_INTERFACE 16
+#define M_CPL_T7_RX_MPS_PKT_INTERFACE 0xf
+#define V_CPL_T7_RX_MPS_PKT_INTERFACE(x) \
+ ((x) << S_CPL_T7_RX_MPS_PKT_INTERFACE)
+#define G_CPL_T7_RX_MPS_PKT_INTERFACE(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_INTERFACE) & M_CPL_T7_RX_MPS_PKT_INTERFACE)
+
+#define S_CPL_T7_RX_MPS_PKT_TRUNCATED 7
+#define M_CPL_T7_RX_MPS_PKT_TRUNCATED 0x1
+#define V_CPL_T7_RX_MPS_PKT_TRUNCATED(x) \
+ ((x) << S_CPL_T7_RX_MPS_PKT_TRUNCATED)
+#define G_CPL_T7_RX_MPS_PKT_TRUNCATED(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_TRUNCATED) & M_CPL_T7_RX_MPS_PKT_TRUNCATED)
+#define F_CPL_T7_RX_MPS_PKT_TRUNCATED V_CPL_T7_RX_MPS_PKT_TRUNCATED(1U)
+
+#define S_CPL_T7_RX_MPS_PKT_PKTERR 6
+#define M_CPL_T7_RX_MPS_PKT_PKTERR 0x1
+#define V_CPL_T7_RX_MPS_PKT_PKTERR(x) ((x) << S_CPL_T7_RX_MPS_PKT_PKTERR)
+#define G_CPL_T7_RX_MPS_PKT_PKTERR(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_PKTERR) & M_CPL_T7_RX_MPS_PKT_PKTERR)
+#define F_CPL_T7_RX_MPS_PKT_PKTERR V_CPL_T7_RX_MPS_PKT_PKTERR(1U)
+
+#define S_CPL_T7_RX_MPS_PKT_LENGTH 0
+#define M_CPL_T7_RX_MPS_PKT_LENGTH 0xffff
+#define V_CPL_T7_RX_MPS_PKT_LENGTH(x) ((x) << S_CPL_T7_RX_MPS_PKT_LENGTH)
+#define G_CPL_T7_RX_MPS_PKT_LENGTH(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_LENGTH) & M_CPL_T7_RX_MPS_PKT_LENGTH)
+
+struct cpl_tx_tls_pdu {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 pldlen_pkd;
+ __be32 customtype_customprotover;
+ __be32 r2_lo;
+ __be32 scmd0[2];
+ __be32 scmd1[2];
+};
+
+#define S_CPL_TX_TLS_PDU_DATATYPE 20
+#define M_CPL_TX_TLS_PDU_DATATYPE 0xf
+#define V_CPL_TX_TLS_PDU_DATATYPE(x) ((x) << S_CPL_TX_TLS_PDU_DATATYPE)
+#define G_CPL_TX_TLS_PDU_DATATYPE(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_DATATYPE) & M_CPL_TX_TLS_PDU_DATATYPE)
+
+#define S_CPL_TX_TLS_PDU_CPLLEN 16
+#define M_CPL_TX_TLS_PDU_CPLLEN 0xf
+#define V_CPL_TX_TLS_PDU_CPLLEN(x) ((x) << S_CPL_TX_TLS_PDU_CPLLEN)
+#define G_CPL_TX_TLS_PDU_CPLLEN(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CPLLEN) & M_CPL_TX_TLS_PDU_CPLLEN)
+
+#define S_CPL_TX_TLS_PDU_PLDLEN 0
+#define M_CPL_TX_TLS_PDU_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_PDU_PLDLEN(x) ((x) << S_CPL_TX_TLS_PDU_PLDLEN)
+#define G_CPL_TX_TLS_PDU_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_PLDLEN) & M_CPL_TX_TLS_PDU_PLDLEN)
+
+#define S_CPL_TX_TLS_PDU_CUSTOMTYPE 24
+#define M_CPL_TX_TLS_PDU_CUSTOMTYPE 0xff
+#define V_CPL_TX_TLS_PDU_CUSTOMTYPE(x) ((x) << S_CPL_TX_TLS_PDU_CUSTOMTYPE)
+#define G_CPL_TX_TLS_PDU_CUSTOMTYPE(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CUSTOMTYPE) & M_CPL_TX_TLS_PDU_CUSTOMTYPE)
+
+#define S_CPL_TX_TLS_PDU_CUSTOMPROTOVER 8
+#define M_CPL_TX_TLS_PDU_CUSTOMPROTOVER 0xffff
+#define V_CPL_TX_TLS_PDU_CUSTOMPROTOVER(x) \
+ ((x) << S_CPL_TX_TLS_PDU_CUSTOMPROTOVER)
+#define G_CPL_TX_TLS_PDU_CUSTOMPROTOVER(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CUSTOMPROTOVER) & \
+ M_CPL_TX_TLS_PDU_CUSTOMPROTOVER)
+
struct cpl_tx_tls_sfo {
__be32 op_to_seg_len;
__be32 pld_len;
@@ -3223,6 +5188,12 @@ struct cpl_tx_tls_sfo {
#define G_CPL_TX_TLS_SFO_SEG_LEN(x) \
(((x) >> S_CPL_TX_TLS_SFO_SEG_LEN) & M_CPL_TX_TLS_SFO_SEG_LEN)
+#define S_CPL_TX_TLS_SFO_PLDLEN 0
+#define M_CPL_TX_TLS_SFO_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_SFO_PLDLEN(x) ((x) << S_CPL_TX_TLS_SFO_PLDLEN)
+#define G_CPL_TX_TLS_SFO_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_SFO_PLDLEN) & M_CPL_TX_TLS_SFO_PLDLEN)
+
#define S_CPL_TX_TLS_SFO_TYPE 24
#define M_CPL_TX_TLS_SFO_TYPE 0xff
#define V_CPL_TX_TLS_SFO_TYPE(x) ((x) << S_CPL_TX_TLS_SFO_TYPE)
@@ -3454,6 +5425,119 @@ struct cpl_rx_tls_cmp {
#define G_SCMD_HDR_LEN(x) \
(((x) >> S_SCMD_HDR_LEN) & M_SCMD_HDR_LEN)
+struct cpl_rx_pkt_ipsec {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 vlan;
+ __be16 length;
+ __be32 rxchannel_to_ethhdrlen;
+ __be32 iphdrlen_to_rxerror;
+ __be64 timestamp;
+};
+
+#define S_CPL_RX_PKT_IPSEC_OPCODE 24
+#define M_CPL_RX_PKT_IPSEC_OPCODE 0xff
+#define V_CPL_RX_PKT_IPSEC_OPCODE(x) ((x) << S_CPL_RX_PKT_IPSEC_OPCODE)
+#define G_CPL_RX_PKT_IPSEC_OPCODE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_OPCODE) & M_CPL_RX_PKT_IPSEC_OPCODE)
+
+#define S_CPL_RX_PKT_IPSEC_IPFRAG 23
+#define M_CPL_RX_PKT_IPSEC_IPFRAG 0x1
+#define V_CPL_RX_PKT_IPSEC_IPFRAG(x) ((x) << S_CPL_RX_PKT_IPSEC_IPFRAG)
+#define G_CPL_RX_PKT_IPSEC_IPFRAG(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPFRAG) & M_CPL_RX_PKT_IPSEC_IPFRAG)
+#define F_CPL_RX_PKT_IPSEC_IPFRAG V_CPL_RX_PKT_IPSEC_IPFRAG(1U)
+
+#define S_CPL_RX_PKT_IPSEC_VLAN_EX 22
+#define M_CPL_RX_PKT_IPSEC_VLAN_EX 0x1
+#define V_CPL_RX_PKT_IPSEC_VLAN_EX(x) ((x) << S_CPL_RX_PKT_IPSEC_VLAN_EX)
+#define G_CPL_RX_PKT_IPSEC_VLAN_EX(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_VLAN_EX) & M_CPL_RX_PKT_IPSEC_VLAN_EX)
+#define F_CPL_RX_PKT_IPSEC_VLAN_EX V_CPL_RX_PKT_IPSEC_VLAN_EX(1U)
+
+#define S_CPL_RX_PKT_IPSEC_IPMI 21
+#define M_CPL_RX_PKT_IPSEC_IPMI 0x1
+#define V_CPL_RX_PKT_IPSEC_IPMI(x) ((x) << S_CPL_RX_PKT_IPSEC_IPMI)
+#define G_CPL_RX_PKT_IPSEC_IPMI(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPMI) & M_CPL_RX_PKT_IPSEC_IPMI)
+#define F_CPL_RX_PKT_IPSEC_IPMI V_CPL_RX_PKT_IPSEC_IPMI(1U)
+
+#define S_CPL_RX_PKT_IPSEC_INTERFACE 16
+#define M_CPL_RX_PKT_IPSEC_INTERFACE 0xf
+#define V_CPL_RX_PKT_IPSEC_INTERFACE(x) ((x) << S_CPL_RX_PKT_IPSEC_INTERFACE)
+#define G_CPL_RX_PKT_IPSEC_INTERFACE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_INTERFACE) & M_CPL_RX_PKT_IPSEC_INTERFACE)
+
+#define S_CPL_RX_PKT_IPSEC_IPSECEXTERR 12
+#define M_CPL_RX_PKT_IPSEC_IPSECEXTERR 0xf
+#define V_CPL_RX_PKT_IPSEC_IPSECEXTERR(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_IPSECEXTERR)
+#define G_CPL_RX_PKT_IPSEC_IPSECEXTERR(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPSECEXTERR) & M_CPL_RX_PKT_IPSEC_IPSECEXTERR)
+
+#define S_CPL_RX_PKT_IPSEC_IPSECTYPE 10
+#define M_CPL_RX_PKT_IPSEC_IPSECTYPE 0x3
+#define V_CPL_RX_PKT_IPSEC_IPSECTYPE(x) ((x) << S_CPL_RX_PKT_IPSEC_IPSECTYPE)
+#define G_CPL_RX_PKT_IPSEC_IPSECTYPE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPSECTYPE) & M_CPL_RX_PKT_IPSEC_IPSECTYPE)
+
+#define S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN 0
+#define M_CPL_RX_PKT_IPSEC_OUTIPHDRLEN 0x3ff
+#define V_CPL_RX_PKT_IPSEC_OUTIPHDRLEN(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_OUTIPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN) & M_CPL_RX_PKT_IPSEC_OUTIPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_RXCHANNEL 28
+#define M_CPL_RX_PKT_IPSEC_RXCHANNEL 0xf
+#define V_CPL_RX_PKT_IPSEC_RXCHANNEL(x) ((x) << S_CPL_RX_PKT_IPSEC_RXCHANNEL)
+#define G_CPL_RX_PKT_IPSEC_RXCHANNEL(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_RXCHANNEL) & M_CPL_RX_PKT_IPSEC_RXCHANNEL)
+
+#define S_CPL_RX_PKT_IPSEC_FLAGS 20
+#define M_CPL_RX_PKT_IPSEC_FLAGS 0xff
+#define V_CPL_RX_PKT_IPSEC_FLAGS(x) ((x) << S_CPL_RX_PKT_IPSEC_FLAGS)
+#define G_CPL_RX_PKT_IPSEC_FLAGS(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_FLAGS) & M_CPL_RX_PKT_IPSEC_FLAGS)
+
+#define S_CPL_RX_PKT_IPSEC_MACMATCHTYPE 17
+#define M_CPL_RX_PKT_IPSEC_MACMATCHTYPE 0x7
+#define V_CPL_RX_PKT_IPSEC_MACMATCHTYPE(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_MACMATCHTYPE)
+#define G_CPL_RX_PKT_IPSEC_MACMATCHTYPE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_MACMATCHTYPE) & \
+ M_CPL_RX_PKT_IPSEC_MACMATCHTYPE)
+
+#define S_CPL_RX_PKT_IPSEC_MACINDEX 8
+#define M_CPL_RX_PKT_IPSEC_MACINDEX 0x1ff
+#define V_CPL_RX_PKT_IPSEC_MACINDEX(x) ((x) << S_CPL_RX_PKT_IPSEC_MACINDEX)
+#define G_CPL_RX_PKT_IPSEC_MACINDEX(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_MACINDEX) & M_CPL_RX_PKT_IPSEC_MACINDEX)
+
+#define S_CPL_RX_PKT_IPSEC_ETHHDRLEN 0
+#define M_CPL_RX_PKT_IPSEC_ETHHDRLEN 0xff
+#define V_CPL_RX_PKT_IPSEC_ETHHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_ETHHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_ETHHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_ETHHDRLEN) & M_CPL_RX_PKT_IPSEC_ETHHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_IPHDRLEN 22
+#define M_CPL_RX_PKT_IPSEC_IPHDRLEN 0x3ff
+#define V_CPL_RX_PKT_IPSEC_IPHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_IPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_IPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPHDRLEN) & M_CPL_RX_PKT_IPSEC_IPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_TCPHDRLEN 16
+#define M_CPL_RX_PKT_IPSEC_TCPHDRLEN 0x3f
+#define V_CPL_RX_PKT_IPSEC_TCPHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_TCPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_TCPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_TCPHDRLEN) & M_CPL_RX_PKT_IPSEC_TCPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_RXERROR 0
+#define M_CPL_RX_PKT_IPSEC_RXERROR 0xffff
+#define V_CPL_RX_PKT_IPSEC_RXERROR(x) ((x) << S_CPL_RX_PKT_IPSEC_RXERROR)
+#define G_CPL_RX_PKT_IPSEC_RXERROR(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_RXERROR) & M_CPL_RX_PKT_IPSEC_RXERROR)
+
struct cpl_tx_sec_pdu {
__be32 op_ivinsrtofst;
__be32 pldlen;
@@ -3478,6 +5562,13 @@ struct cpl_tx_sec_pdu {
(((x) >> S_CPL_TX_SEC_PDU_RXCHID) & M_CPL_TX_SEC_PDU_RXCHID)
#define F_CPL_TX_SEC_PDU_RXCHID V_CPL_TX_SEC_PDU_RXCHID(1U)
+#define S_T7_CPL_TX_SEC_PDU_RXCHID 22
+#define M_T7_CPL_TX_SEC_PDU_RXCHID 0x3
+#define V_T7_CPL_TX_SEC_PDU_RXCHID(x) ((x) << S_T7_CPL_TX_SEC_PDU_RXCHID)
+#define G_T7_CPL_TX_SEC_PDU_RXCHID(x) \
+(((x) >> S_T7_CPL_TX_SEC_PDU_RXCHID) & M_T7_CPL_TX_SEC_PDU_RXCHID)
+#define F_T7_CPL_TX_SEC_PDU_RXCHID V_T7_CPL_TX_SEC_PDU_RXCHID(1U)
+
/* Ack Follows */
#define S_CPL_TX_SEC_PDU_ACKFOLLOWS 21
#define M_CPL_TX_SEC_PDU_ACKFOLLOWS 0x1
@@ -3501,6 +5592,13 @@ struct cpl_tx_sec_pdu {
#define G_CPL_TX_SEC_PDU_CPLLEN(x) \
(((x) >> S_CPL_TX_SEC_PDU_CPLLEN) & M_CPL_TX_SEC_PDU_CPLLEN)
+#define S_CPL_TX_SEC_PDU_ACKNEXT 15
+#define M_CPL_TX_SEC_PDU_ACKNEXT 0x1
+#define V_CPL_TX_SEC_PDU_ACKNEXT(x) ((x) << S_CPL_TX_SEC_PDU_ACKNEXT)
+#define G_CPL_TX_SEC_PDU_ACKNEXT(x) \
+ (((x) >> S_CPL_TX_SEC_PDU_ACKNEXT) & M_CPL_TX_SEC_PDU_ACKNEXT)
+#define F_CPL_TX_SEC_PDU_ACKNEXT V_CPL_TX_SEC_PDU_ACKNEXT(1U)
+
/* PlaceHolder */
#define S_CPL_TX_SEC_PDU_PLACEHOLDER 10
#define M_CPL_TX_SEC_PDU_PLACEHOLDER 0x1
@@ -3517,6 +5615,12 @@ struct cpl_tx_sec_pdu {
(((x) >> S_CPL_TX_SEC_PDU_IVINSRTOFST) & \
M_CPL_TX_SEC_PDU_IVINSRTOFST)
+#define S_CPL_TX_SEC_PDU_PLDLEN 0
+#define M_CPL_TX_SEC_PDU_PLDLEN 0xfffff
+#define V_CPL_TX_SEC_PDU_PLDLEN(x) ((x) << S_CPL_TX_SEC_PDU_PLDLEN)
+#define G_CPL_TX_SEC_PDU_PLDLEN(x) \
+ (((x) >> S_CPL_TX_SEC_PDU_PLDLEN) & M_CPL_TX_SEC_PDU_PLDLEN)
+
/* AadStartOffset: Offset in bytes for AAD start from
* the first byte following
* the pkt headers (0-255
@@ -3666,6 +5770,62 @@ struct cpl_rx_phys_dsgl {
(((x) >> S_CPL_RX_PHYS_DSGL_NOOFSGENTR) & \
M_CPL_RX_PHYS_DSGL_NOOFSGENTR)
+struct cpl_t7_rx_phys_dsgl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 PhysAddrFields_lo_to_NumSGE;
+ __be32 RSSCopy[2];
+};
+
+#define S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI 0
+#define M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI 0xffffff
+#define V_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI)
+#define G_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI) & \
+ M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI)
+
+#define S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO 16
+#define M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO 0xffff
+#define V_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO)
+#define G_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO) & \
+ M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO)
+
+#define S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR 11
+#define M_CPL_T7_RX_PHYS_DSGL_NUMSGEERR 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR)
+#define G_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR) & M_CPL_T7_RX_PHYS_DSGL_NUMSGEERR)
+#define F_CPL_T7_RX_PHYS_DSGL_NUMSGEERR V_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE 10
+#define M_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE)
+#define G_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE) & \
+ M_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE)
+#define F_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE \
+ V_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_SPLITMODE 9
+#define M_CPL_T7_RX_PHYS_DSGL_SPLITMODE 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_SPLITMODE(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_SPLITMODE)
+#define G_CPL_T7_RX_PHYS_DSGL_SPLITMODE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_SPLITMODE) & M_CPL_T7_RX_PHYS_DSGL_SPLITMODE)
+#define F_CPL_T7_RX_PHYS_DSGL_SPLITMODE \
+ V_CPL_T7_RX_PHYS_DSGL_SPLITMODE(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_NUMSGE 0
+#define M_CPL_T7_RX_PHYS_DSGL_NUMSGE 0x1ff
+#define V_CPL_T7_RX_PHYS_DSGL_NUMSGE(x) ((x) << S_CPL_T7_RX_PHYS_DSGL_NUMSGE)
+#define G_CPL_T7_RX_PHYS_DSGL_NUMSGE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_NUMSGE) & M_CPL_T7_RX_PHYS_DSGL_NUMSGE)
+
/* CPL_TX_TLS_ACK */
struct cpl_tx_tls_ack {
__be32 op_to_Rsvd2;
@@ -3679,12 +5839,11 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_OPCODE(x) \
(((x) >> S_CPL_TX_TLS_ACK_OPCODE) & M_CPL_TX_TLS_ACK_OPCODE)
-#define S_CPL_TX_TLS_ACK_RSVD1 23
-#define M_CPL_TX_TLS_ACK_RSVD1 0x1
-#define V_CPL_TX_TLS_ACK_RSVD1(x) ((x) << S_CPL_TX_TLS_ACK_RSVD1)
-#define G_CPL_TX_TLS_ACK_RSVD1(x) \
- (((x) >> S_CPL_TX_TLS_ACK_RSVD1) & M_CPL_TX_TLS_ACK_RSVD1)
-#define F_CPL_TX_TLS_ACK_RSVD1 V_CPL_TX_TLS_ACK_RSVD1(1U)
+#define S_T7_CPL_TX_TLS_ACK_RXCHID 22
+#define M_T7_CPL_TX_TLS_ACK_RXCHID 0x3
+#define V_T7_CPL_TX_TLS_ACK_RXCHID(x) ((x) << S_T7_CPL_TX_TLS_ACK_RXCHID)
+#define G_T7_CPL_TX_TLS_ACK_RXCHID(x) \
+ (((x) >> S_T7_CPL_TX_TLS_ACK_RXCHID) & M_T7_CPL_TX_TLS_ACK_RXCHID)
#define S_CPL_TX_TLS_ACK_RXCHID 22
#define M_CPL_TX_TLS_ACK_RXCHID 0x1
@@ -3740,4 +5899,822 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_RSVD2(x) \
(((x) >> S_CPL_TX_TLS_ACK_RSVD2) & M_CPL_TX_TLS_ACK_RSVD2)
+#define S_CPL_TX_TLS_ACK_PLDLEN 0
+#define M_CPL_TX_TLS_ACK_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_ACK_PLDLEN(x) ((x) << S_CPL_TX_TLS_ACK_PLDLEN)
+#define G_CPL_TX_TLS_ACK_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_ACK_PLDLEN) & M_CPL_TX_TLS_ACK_PLDLEN)
+
+struct cpl_rcb_upd {
+ __be32 op_to_tid;
+ __be32 opcode_psn;
+ __u8 nodata_to_cnprepclr;
+ __u8 r0;
+ __be16 wrptr;
+ __be32 length;
+};
+
+#define S_CPL_RCB_UPD_OPCODE 24
+#define M_CPL_RCB_UPD_OPCODE 0xff
+#define V_CPL_RCB_UPD_OPCODE(x) ((x) << S_CPL_RCB_UPD_OPCODE)
+#define G_CPL_RCB_UPD_OPCODE(x) \
+ (((x) >> S_CPL_RCB_UPD_OPCODE) & M_CPL_RCB_UPD_OPCODE)
+
+#define S_CPL_RCB_UPD_TID 0
+#define M_CPL_RCB_UPD_TID 0xffffff
+#define V_CPL_RCB_UPD_TID(x) ((x) << S_CPL_RCB_UPD_TID)
+#define G_CPL_RCB_UPD_TID(x) \
+ (((x) >> S_CPL_RCB_UPD_TID) & M_CPL_RCB_UPD_TID)
+
+#define S_CPL_RCB_UPD_OPCODE 24
+#define M_CPL_RCB_UPD_OPCODE 0xff
+#define V_CPL_RCB_UPD_OPCODE(x) ((x) << S_CPL_RCB_UPD_OPCODE)
+#define G_CPL_RCB_UPD_OPCODE(x) \
+ (((x) >> S_CPL_RCB_UPD_OPCODE) & M_CPL_RCB_UPD_OPCODE)
+
+#define S_CPL_RCB_UPD_PSN 0
+#define M_CPL_RCB_UPD_PSN 0xffffff
+#define V_CPL_RCB_UPD_PSN(x) ((x) << S_CPL_RCB_UPD_PSN)
+#define G_CPL_RCB_UPD_PSN(x) \
+ (((x) >> S_CPL_RCB_UPD_PSN) & M_CPL_RCB_UPD_PSN)
+
+#define S_CPL_RCB_UPD_NODATA 7
+#define M_CPL_RCB_UPD_NODATA 0x1
+#define V_CPL_RCB_UPD_NODATA(x) ((x) << S_CPL_RCB_UPD_NODATA)
+#define G_CPL_RCB_UPD_NODATA(x) \
+ (((x) >> S_CPL_RCB_UPD_NODATA) & M_CPL_RCB_UPD_NODATA)
+#define F_CPL_RCB_UPD_NODATA V_CPL_RCB_UPD_NODATA(1U)
+
+#define S_CPL_RCB_UPD_RTTSTAMP 6
+#define M_CPL_RCB_UPD_RTTSTAMP 0x1
+#define V_CPL_RCB_UPD_RTTSTAMP(x) ((x) << S_CPL_RCB_UPD_RTTSTAMP)
+#define G_CPL_RCB_UPD_RTTSTAMP(x) \
+ (((x) >> S_CPL_RCB_UPD_RTTSTAMP) & M_CPL_RCB_UPD_RTTSTAMP)
+#define F_CPL_RCB_UPD_RTTSTAMP V_CPL_RCB_UPD_RTTSTAMP(1U)
+
+#define S_CPL_RCB_UPD_ECNREPCLR 5
+#define M_CPL_RCB_UPD_ECNREPCLR 0x1
+#define V_CPL_RCB_UPD_ECNREPCLR(x) ((x) << S_CPL_RCB_UPD_ECNREPCLR)
+#define G_CPL_RCB_UPD_ECNREPCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_ECNREPCLR) & M_CPL_RCB_UPD_ECNREPCLR)
+#define F_CPL_RCB_UPD_ECNREPCLR V_CPL_RCB_UPD_ECNREPCLR(1U)
+
+#define S_CPL_RCB_UPD_NAKSEQCLR 4
+#define M_CPL_RCB_UPD_NAKSEQCLR 0x1
+#define V_CPL_RCB_UPD_NAKSEQCLR(x) ((x) << S_CPL_RCB_UPD_NAKSEQCLR)
+#define G_CPL_RCB_UPD_NAKSEQCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_NAKSEQCLR) & M_CPL_RCB_UPD_NAKSEQCLR)
+#define F_CPL_RCB_UPD_NAKSEQCLR V_CPL_RCB_UPD_NAKSEQCLR(1U)
+
+#define S_CPL_RCB_UPD_QPERRSET 3
+#define M_CPL_RCB_UPD_QPERRSET 0x1
+#define V_CPL_RCB_UPD_QPERRSET(x) ((x) << S_CPL_RCB_UPD_QPERRSET)
+#define G_CPL_RCB_UPD_QPERRSET(x) \
+ (((x) >> S_CPL_RCB_UPD_QPERRSET) & M_CPL_RCB_UPD_QPERRSET)
+#define F_CPL_RCB_UPD_QPERRSET V_CPL_RCB_UPD_QPERRSET(1U)
+
+#define S_CPL_RCB_UPD_RRQUPDEN 2
+#define M_CPL_RCB_UPD_RRQUPDEN 0x1
+#define V_CPL_RCB_UPD_RRQUPDEN(x) ((x) << S_CPL_RCB_UPD_RRQUPDEN)
+#define G_CPL_RCB_UPD_RRQUPDEN(x) \
+ (((x) >> S_CPL_RCB_UPD_RRQUPDEN) & M_CPL_RCB_UPD_RRQUPDEN)
+#define F_CPL_RCB_UPD_RRQUPDEN V_CPL_RCB_UPD_RRQUPDEN(1U)
+
+#define S_CPL_RCB_UPD_RQUPDEN 1
+#define M_CPL_RCB_UPD_RQUPDEN 0x1
+#define V_CPL_RCB_UPD_RQUPDEN(x) ((x) << S_CPL_RCB_UPD_RQUPDEN)
+#define G_CPL_RCB_UPD_RQUPDEN(x) \
+ (((x) >> S_CPL_RCB_UPD_RQUPDEN) & M_CPL_RCB_UPD_RQUPDEN)
+#define F_CPL_RCB_UPD_RQUPDEN V_CPL_RCB_UPD_RQUPDEN(1U)
+
+#define S_CPL_RCB_UPD_CNPREPCLR 0
+#define M_CPL_RCB_UPD_CNPREPCLR 0x1
+#define V_CPL_RCB_UPD_CNPREPCLR(x) ((x) << S_CPL_RCB_UPD_CNPREPCLR)
+#define G_CPL_RCB_UPD_CNPREPCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_CNPREPCLR) & M_CPL_RCB_UPD_CNPREPCLR)
+#define F_CPL_RCB_UPD_CNPREPCLR V_CPL_RCB_UPD_CNPREPCLR(1U)
+
+#define S_CPL_RCB_UPD_RSPNAKSEQCLR 7
+#define M_CPL_RCB_UPD_RSPNAKSEQCLR 0x1
+#define V_CPL_RCB_UPD_RSPNAKSEQCLR(x) ((x) << S_CPL_RCB_UPD_RSPNAKSEQCLR)
+#define G_CPL_RCB_UPD_RSPNAKSEQCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_RSPNAKSEQCLR) & M_CPL_RCB_UPD_RSPNAKSEQCLR)
+#define F_CPL_RCB_UPD_RSPNAKSEQCLR V_CPL_RCB_UPD_RSPNAKSEQCLR(1U)
+
+struct cpl_roce_fw_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 type_pkd;
+};
+
+#define S_CPL_ROCE_FW_NOTIFY_OPCODE 24
+#define M_CPL_ROCE_FW_NOTIFY_OPCODE 0xff
+#define V_CPL_ROCE_FW_NOTIFY_OPCODE(x) ((x) << S_CPL_ROCE_FW_NOTIFY_OPCODE)
+#define G_CPL_ROCE_FW_NOTIFY_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_OPCODE) & M_CPL_ROCE_FW_NOTIFY_OPCODE)
+
+#define S_CPL_ROCE_FW_NOTIFY_TID 0
+#define M_CPL_ROCE_FW_NOTIFY_TID 0xffffff
+#define V_CPL_ROCE_FW_NOTIFY_TID(x) ((x) << S_CPL_ROCE_FW_NOTIFY_TID)
+#define G_CPL_ROCE_FW_NOTIFY_TID(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_TID) & M_CPL_ROCE_FW_NOTIFY_TID)
+
+#define S_CPL_ROCE_FW_NOTIFY_TYPE 28
+#define M_CPL_ROCE_FW_NOTIFY_TYPE 0xf
+#define V_CPL_ROCE_FW_NOTIFY_TYPE(x) ((x) << S_CPL_ROCE_FW_NOTIFY_TYPE)
+#define G_CPL_ROCE_FW_NOTIFY_TYPE(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_TYPE) & M_CPL_ROCE_FW_NOTIFY_TYPE)
+
+struct cpl_roce_ack_nak_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 type_to_opcode;
+ __be16 length;
+ __be32 psn_msn_hi;
+ __be32 msn_lo_pkd;
+};
+
+#define S_CPL_ROCE_ACK_NAK_REQ_OPCODE 24
+#define M_CPL_ROCE_ACK_NAK_REQ_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_OPCODE(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_REQ_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_OPCODE) & M_CPL_ROCE_ACK_NAK_REQ_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_TID 0
+#define M_CPL_ROCE_ACK_NAK_REQ_TID 0xffffff
+#define V_CPL_ROCE_ACK_NAK_REQ_TID(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_TID)
+#define G_CPL_ROCE_ACK_NAK_REQ_TID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_TID) & M_CPL_ROCE_ACK_NAK_REQ_TID)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_TYPE 12
+#define M_CPL_ROCE_ACK_NAK_REQ_TYPE 0xf
+#define V_CPL_ROCE_ACK_NAK_REQ_TYPE(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_TYPE)
+#define G_CPL_ROCE_ACK_NAK_REQ_TYPE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_TYPE) & M_CPL_ROCE_ACK_NAK_REQ_TYPE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_STATUS 8
+#define M_CPL_ROCE_ACK_NAK_REQ_STATUS 0xf
+#define V_CPL_ROCE_ACK_NAK_REQ_STATUS(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_STATUS)
+#define G_CPL_ROCE_ACK_NAK_REQ_STATUS(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_STATUS) & M_CPL_ROCE_ACK_NAK_REQ_STATUS)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE 0
+#define M_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE) & M_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_PSN 8
+#define M_CPL_ROCE_ACK_NAK_REQ_PSN 0xffffff
+#define V_CPL_ROCE_ACK_NAK_REQ_PSN(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_PSN)
+#define G_CPL_ROCE_ACK_NAK_REQ_PSN(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_PSN) & M_CPL_ROCE_ACK_NAK_REQ_PSN)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_MSN_HI 0
+#define M_CPL_ROCE_ACK_NAK_REQ_MSN_HI 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_MSN_HI(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_MSN_HI)
+#define G_CPL_ROCE_ACK_NAK_REQ_MSN_HI(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_MSN_HI) & M_CPL_ROCE_ACK_NAK_REQ_MSN_HI)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_MSN_LO 16
+#define M_CPL_ROCE_ACK_NAK_REQ_MSN_LO 0xffff
+#define V_CPL_ROCE_ACK_NAK_REQ_MSN_LO(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_MSN_LO)
+#define G_CPL_ROCE_ACK_NAK_REQ_MSN_LO(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_MSN_LO) & M_CPL_ROCE_ACK_NAK_REQ_MSN_LO)
+
+struct cpl_roce_ack_nak {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 type_to_opcode;
+ __be16 length;
+ __be32 psn_rtt_hi;
+ __be32 rtt_lo_to_rttbad;
+};
+
+#define S_CPL_ROCE_ACK_NAK_OPCODE 24
+#define M_CPL_ROCE_ACK_NAK_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_OPCODE(x) ((x) << S_CPL_ROCE_ACK_NAK_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_OPCODE) & M_CPL_ROCE_ACK_NAK_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_TID 0
+#define M_CPL_ROCE_ACK_NAK_TID 0xffffff
+#define V_CPL_ROCE_ACK_NAK_TID(x) ((x) << S_CPL_ROCE_ACK_NAK_TID)
+#define G_CPL_ROCE_ACK_NAK_TID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_TID) & M_CPL_ROCE_ACK_NAK_TID)
+
+#define S_CPL_ROCE_ACK_NAK_TYPE 12
+#define M_CPL_ROCE_ACK_NAK_TYPE 0xf
+#define V_CPL_ROCE_ACK_NAK_TYPE(x) ((x) << S_CPL_ROCE_ACK_NAK_TYPE)
+#define G_CPL_ROCE_ACK_NAK_TYPE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_TYPE) & M_CPL_ROCE_ACK_NAK_TYPE)
+
+#define S_CPL_ROCE_ACK_NAK_STATUS 8
+#define M_CPL_ROCE_ACK_NAK_STATUS 0xf
+#define V_CPL_ROCE_ACK_NAK_STATUS(x) ((x) << S_CPL_ROCE_ACK_NAK_STATUS)
+#define G_CPL_ROCE_ACK_NAK_STATUS(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_STATUS) & M_CPL_ROCE_ACK_NAK_STATUS)
+
+#define S_CPL_ROCE_ACK_NAK_WIRE_OPCODE 0
+#define M_CPL_ROCE_ACK_NAK_WIRE_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_WIRE_OPCODE(x) ((x) << S_CPL_ROCE_ACK_NAK_WIRE_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_WIRE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_WIRE_OPCODE) & M_CPL_ROCE_ACK_NAK_WIRE_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_PSN 8
+#define M_CPL_ROCE_ACK_NAK_PSN 0xffffff
+#define V_CPL_ROCE_ACK_NAK_PSN(x) ((x) << S_CPL_ROCE_ACK_NAK_PSN)
+#define G_CPL_ROCE_ACK_NAK_PSN(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_PSN) & M_CPL_ROCE_ACK_NAK_PSN)
+
+#define S_CPL_ROCE_ACK_NAK_RTT_HI 0
+#define M_CPL_ROCE_ACK_NAK_RTT_HI 0xff
+#define V_CPL_ROCE_ACK_NAK_RTT_HI(x) ((x) << S_CPL_ROCE_ACK_NAK_RTT_HI)
+#define G_CPL_ROCE_ACK_NAK_RTT_HI(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTT_HI) & M_CPL_ROCE_ACK_NAK_RTT_HI)
+
+#define S_CPL_ROCE_ACK_NAK_RTT_LO 24
+#define M_CPL_ROCE_ACK_NAK_RTT_LO 0xff
+#define V_CPL_ROCE_ACK_NAK_RTT_LO(x) ((x) << S_CPL_ROCE_ACK_NAK_RTT_LO)
+#define G_CPL_ROCE_ACK_NAK_RTT_LO(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTT_LO) & M_CPL_ROCE_ACK_NAK_RTT_LO)
+
+#define S_CPL_ROCE_ACK_NAK_RTTVALID 23
+#define M_CPL_ROCE_ACK_NAK_RTTVALID 0x1
+#define V_CPL_ROCE_ACK_NAK_RTTVALID(x) ((x) << S_CPL_ROCE_ACK_NAK_RTTVALID)
+#define G_CPL_ROCE_ACK_NAK_RTTVALID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTTVALID) & M_CPL_ROCE_ACK_NAK_RTTVALID)
+#define F_CPL_ROCE_ACK_NAK_RTTVALID V_CPL_ROCE_ACK_NAK_RTTVALID(1U)
+
+#define S_CPL_ROCE_ACK_NAK_RTTBAD 22
+#define M_CPL_ROCE_ACK_NAK_RTTBAD 0x1
+#define V_CPL_ROCE_ACK_NAK_RTTBAD(x) ((x) << S_CPL_ROCE_ACK_NAK_RTTBAD)
+#define G_CPL_ROCE_ACK_NAK_RTTBAD(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTTBAD) & M_CPL_ROCE_ACK_NAK_RTTBAD)
+#define F_CPL_ROCE_ACK_NAK_RTTBAD V_CPL_ROCE_ACK_NAK_RTTBAD(1U)
+
+struct cpl_roce_cqe {
+ __be16 op_rssctrl;
+ __be16 cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_ROCE_CQE_OPCODE 8
+#define M_CPL_ROCE_CQE_OPCODE 0xff
+#define V_CPL_ROCE_CQE_OPCODE(x) ((x) << S_CPL_ROCE_CQE_OPCODE)
+#define G_CPL_ROCE_CQE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_OPCODE) & M_CPL_ROCE_CQE_OPCODE)
+
+#define S_CPL_ROCE_CQE_RSSCTRL 0
+#define M_CPL_ROCE_CQE_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_RSSCTRL)
+#define G_CPL_ROCE_CQE_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_RSSCTRL) & M_CPL_ROCE_CQE_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_TID 8
+#define M_CPL_ROCE_CQE_TID 0xfffff
+#define V_CPL_ROCE_CQE_TID(x) ((x) << S_CPL_ROCE_CQE_TID)
+#define G_CPL_ROCE_CQE_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_TID) & M_CPL_ROCE_CQE_TID)
+
+#define S_CPL_ROCE_CQE_FLITCNT 0
+#define M_CPL_ROCE_CQE_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_FLITCNT)
+#define G_CPL_ROCE_CQE_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FLITCNT) & M_CPL_ROCE_CQE_FLITCNT)
+
+#define S_CPL_ROCE_CQE_QPID 12
+#define M_CPL_ROCE_CQE_QPID 0xfffff
+#define V_CPL_ROCE_CQE_QPID(x) ((x) << S_CPL_ROCE_CQE_QPID)
+#define G_CPL_ROCE_CQE_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_QPID) & M_CPL_ROCE_CQE_QPID)
+
+#define S_CPL_ROCE_CQE_EXTMODE 11
+#define M_CPL_ROCE_CQE_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_EXTMODE)
+#define G_CPL_ROCE_CQE_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_EXTMODE) & M_CPL_ROCE_CQE_EXTMODE)
+#define F_CPL_ROCE_CQE_EXTMODE V_CPL_ROCE_CQE_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_GENERATION_BIT) & M_CPL_ROCE_CQE_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_GENERATION_BIT V_CPL_ROCE_CQE_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_STATUS 5
+#define M_CPL_ROCE_CQE_STATUS 0x1f
+#define V_CPL_ROCE_CQE_STATUS(x) ((x) << S_CPL_ROCE_CQE_STATUS)
+#define G_CPL_ROCE_CQE_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_STATUS) & M_CPL_ROCE_CQE_STATUS)
+
+#define S_CPL_ROCE_CQE_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_CQE_TYPE)
+#define G_CPL_ROCE_CQE_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_CQE_TYPE) & M_CPL_ROCE_CQE_CQE_TYPE)
+#define F_CPL_ROCE_CQE_CQE_TYPE V_CPL_ROCE_CQE_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_WR_TYPE 0
+#define M_CPL_ROCE_CQE_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_WR_TYPE)
+#define G_CPL_ROCE_CQE_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_WR_TYPE) & M_CPL_ROCE_CQE_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_SE 31
+#define M_CPL_ROCE_CQE_SE 0x1
+#define V_CPL_ROCE_CQE_SE(x) ((x) << S_CPL_ROCE_CQE_SE)
+#define G_CPL_ROCE_CQE_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_SE) & M_CPL_ROCE_CQE_SE)
+#define F_CPL_ROCE_CQE_SE V_CPL_ROCE_CQE_SE(1U)
+
+#define S_CPL_ROCE_CQE_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_WR_TYPE_EXT(x) ((x) << S_CPL_ROCE_CQE_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_WR_TYPE_EXT) & M_CPL_ROCE_CQE_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_SRQ 0
+#define M_CPL_ROCE_CQE_SRQ 0xfff
+#define V_CPL_ROCE_CQE_SRQ(x) ((x) << S_CPL_ROCE_CQE_SRQ)
+#define G_CPL_ROCE_CQE_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_SRQ) & M_CPL_ROCE_CQE_SRQ)
+
+struct cpl_roce_cqe_fw {
+ __be32 op_to_cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_ROCE_CQE_FW_OPCODE 24
+#define M_CPL_ROCE_CQE_FW_OPCODE 0xff
+#define V_CPL_ROCE_CQE_FW_OPCODE(x) ((x) << S_CPL_ROCE_CQE_FW_OPCODE)
+#define G_CPL_ROCE_CQE_FW_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_OPCODE) & M_CPL_ROCE_CQE_FW_OPCODE)
+
+#define S_CPL_ROCE_CQE_FW_RSSCTRL 16
+#define M_CPL_ROCE_CQE_FW_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_FW_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_FW_RSSCTRL)
+#define G_CPL_ROCE_CQE_FW_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_RSSCTRL) & M_CPL_ROCE_CQE_FW_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_FW_CQID 0
+#define M_CPL_ROCE_CQE_FW_CQID 0xffff
+#define V_CPL_ROCE_CQE_FW_CQID(x) ((x) << S_CPL_ROCE_CQE_FW_CQID)
+#define G_CPL_ROCE_CQE_FW_CQID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_CQID) & M_CPL_ROCE_CQE_FW_CQID)
+
+#define S_CPL_ROCE_CQE_FW_TID 8
+#define M_CPL_ROCE_CQE_FW_TID 0xfffff
+#define V_CPL_ROCE_CQE_FW_TID(x) ((x) << S_CPL_ROCE_CQE_FW_TID)
+#define G_CPL_ROCE_CQE_FW_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_TID) & M_CPL_ROCE_CQE_FW_TID)
+
+#define S_CPL_ROCE_CQE_FW_FLITCNT 0
+#define M_CPL_ROCE_CQE_FW_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_FW_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_FW_FLITCNT)
+#define G_CPL_ROCE_CQE_FW_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_FLITCNT) & M_CPL_ROCE_CQE_FW_FLITCNT)
+
+#define S_CPL_ROCE_CQE_FW_QPID 12
+#define M_CPL_ROCE_CQE_FW_QPID 0xfffff
+#define V_CPL_ROCE_CQE_FW_QPID(x) ((x) << S_CPL_ROCE_CQE_FW_QPID)
+#define G_CPL_ROCE_CQE_FW_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_QPID) & M_CPL_ROCE_CQE_FW_QPID)
+
+#define S_CPL_ROCE_CQE_FW_EXTMODE 11
+#define M_CPL_ROCE_CQE_FW_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_FW_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_FW_EXTMODE)
+#define G_CPL_ROCE_CQE_FW_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_EXTMODE) & M_CPL_ROCE_CQE_FW_EXTMODE)
+#define F_CPL_ROCE_CQE_FW_EXTMODE V_CPL_ROCE_CQE_FW_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_FW_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_FW_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_FW_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_GENERATION_BIT) & \
+ M_CPL_ROCE_CQE_FW_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_FW_GENERATION_BIT V_CPL_ROCE_CQE_FW_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_FW_STATUS 5
+#define M_CPL_ROCE_CQE_FW_STATUS 0x1f
+#define V_CPL_ROCE_CQE_FW_STATUS(x) ((x) << S_CPL_ROCE_CQE_FW_STATUS)
+#define G_CPL_ROCE_CQE_FW_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_STATUS) & M_CPL_ROCE_CQE_FW_STATUS)
+
+#define S_CPL_ROCE_CQE_FW_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_FW_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_FW_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_FW_CQE_TYPE)
+#define G_CPL_ROCE_CQE_FW_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_CQE_TYPE) & M_CPL_ROCE_CQE_FW_CQE_TYPE)
+#define F_CPL_ROCE_CQE_FW_CQE_TYPE V_CPL_ROCE_CQE_FW_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_FW_WR_TYPE 0
+#define M_CPL_ROCE_CQE_FW_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_FW_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_FW_WR_TYPE)
+#define G_CPL_ROCE_CQE_FW_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE) & M_CPL_ROCE_CQE_FW_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_FW_SE 31
+#define M_CPL_ROCE_CQE_FW_SE 0x1
+#define V_CPL_ROCE_CQE_FW_SE(x) ((x) << S_CPL_ROCE_CQE_FW_SE)
+#define G_CPL_ROCE_CQE_FW_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_SE) & M_CPL_ROCE_CQE_FW_SE)
+#define F_CPL_ROCE_CQE_FW_SE V_CPL_ROCE_CQE_FW_SE(1U)
+
+#define S_CPL_ROCE_CQE_FW_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_FW_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE_EXT) & M_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_FW_SRQ 0
+#define M_CPL_ROCE_CQE_FW_SRQ 0xfff
+#define V_CPL_ROCE_CQE_FW_SRQ(x) ((x) << S_CPL_ROCE_CQE_FW_SRQ)
+#define G_CPL_ROCE_CQE_FW_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_SRQ) & M_CPL_ROCE_CQE_FW_SRQ)
+
+struct cpl_roce_cqe_err {
+ __be32 op_to_CQID;
+ __be32 Tid_FlitCnt;
+ __be32 QPID_to_WR_type;
+ __be32 Length;
+ __be32 TAG;
+ __be32 MSN;
+ __be32 SE_to_SRQ;
+ __be32 RQE;
+ __be32 ExtInfoMS[2];
+ __be32 ExtInfoLS[2];
+};
+
+#define S_CPL_ROCE_CQE_ERR_OPCODE 24
+#define M_CPL_ROCE_CQE_ERR_OPCODE 0xff
+#define V_CPL_ROCE_CQE_ERR_OPCODE(x) ((x) << S_CPL_ROCE_CQE_ERR_OPCODE)
+#define G_CPL_ROCE_CQE_ERR_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_OPCODE) & M_CPL_ROCE_CQE_ERR_OPCODE)
+
+#define S_CPL_ROCE_CQE_ERR_RSSCTRL 16
+#define M_CPL_ROCE_CQE_ERR_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_ERR_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_ERR_RSSCTRL)
+#define G_CPL_ROCE_CQE_ERR_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_RSSCTRL) & M_CPL_ROCE_CQE_ERR_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_ERR_CQID 0
+#define M_CPL_ROCE_CQE_ERR_CQID 0xffff
+#define V_CPL_ROCE_CQE_ERR_CQID(x) ((x) << S_CPL_ROCE_CQE_ERR_CQID)
+#define G_CPL_ROCE_CQE_ERR_CQID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_CQID) & M_CPL_ROCE_CQE_ERR_CQID)
+
+#define S_CPL_ROCE_CQE_ERR_TID 8
+#define M_CPL_ROCE_CQE_ERR_TID 0xfffff
+#define V_CPL_ROCE_CQE_ERR_TID(x) ((x) << S_CPL_ROCE_CQE_ERR_TID)
+#define G_CPL_ROCE_CQE_ERR_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_TID) & M_CPL_ROCE_CQE_ERR_TID)
+
+#define S_CPL_ROCE_CQE_ERR_FLITCNT 0
+#define M_CPL_ROCE_CQE_ERR_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_ERR_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_ERR_FLITCNT)
+#define G_CPL_ROCE_CQE_ERR_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_FLITCNT) & M_CPL_ROCE_CQE_ERR_FLITCNT)
+
+#define S_CPL_ROCE_CQE_ERR_QPID 12
+#define M_CPL_ROCE_CQE_ERR_QPID 0xfffff
+#define V_CPL_ROCE_CQE_ERR_QPID(x) ((x) << S_CPL_ROCE_CQE_ERR_QPID)
+#define G_CPL_ROCE_CQE_ERR_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_QPID) & M_CPL_ROCE_CQE_ERR_QPID)
+
+#define S_CPL_ROCE_CQE_ERR_EXTMODE 11
+#define M_CPL_ROCE_CQE_ERR_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_ERR_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_ERR_EXTMODE)
+#define G_CPL_ROCE_CQE_ERR_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_EXTMODE) & M_CPL_ROCE_CQE_ERR_EXTMODE)
+#define F_CPL_ROCE_CQE_ERR_EXTMODE V_CPL_ROCE_CQE_ERR_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_ERR_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_ERR_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_GENERATION_BIT) & \
+ M_CPL_ROCE_CQE_ERR_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_ERR_GENERATION_BIT \
+ V_CPL_ROCE_CQE_ERR_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_ERR_STATUS 5
+#define M_CPL_ROCE_CQE_ERR_STATUS 0x1f
+#define V_CPL_ROCE_CQE_ERR_STATUS(x) ((x) << S_CPL_ROCE_CQE_ERR_STATUS)
+#define G_CPL_ROCE_CQE_ERR_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_STATUS) & M_CPL_ROCE_CQE_ERR_STATUS)
+
+#define S_CPL_ROCE_CQE_ERR_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_ERR_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_ERR_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_ERR_CQE_TYPE)
+#define G_CPL_ROCE_CQE_ERR_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_CQE_TYPE) & M_CPL_ROCE_CQE_ERR_CQE_TYPE)
+#define F_CPL_ROCE_CQE_ERR_CQE_TYPE V_CPL_ROCE_CQE_ERR_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_WR_TYPE 0
+#define M_CPL_ROCE_CQE_ERR_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_ERR_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_ERR_WR_TYPE)
+#define G_CPL_ROCE_CQE_ERR_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE) & M_CPL_ROCE_CQE_ERR_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_ERR_SE 31
+#define M_CPL_ROCE_CQE_ERR_SE 0x1
+#define V_CPL_ROCE_CQE_ERR_SE(x) ((x) << S_CPL_ROCE_CQE_ERR_SE)
+#define G_CPL_ROCE_CQE_ERR_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_SE) & M_CPL_ROCE_CQE_ERR_SE)
+#define F_CPL_ROCE_CQE_ERR_SE V_CPL_ROCE_CQE_ERR_SE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT) & M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_ERR_SRQ 0
+#define M_CPL_ROCE_CQE_ERR_SRQ 0xfff
+#define V_CPL_ROCE_CQE_ERR_SRQ(x) ((x) << S_CPL_ROCE_CQE_ERR_SRQ)
+#define G_CPL_ROCE_CQE_ERR_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_SRQ) & M_CPL_ROCE_CQE_ERR_SRQ)
+
+struct cpl_accelerator_hdr {
+ __be16 op_accelerator_id;
+ __be16 rxchid_payload_to_inner_cpl_length_ack;
+ __be32 inner_cpl_length_payload_status_loc;
+};
+
+#define S_CPL_ACCELERATOR_HDR_OPCODE 8
+#define M_CPL_ACCELERATOR_HDR_OPCODE 0xff
+#define V_CPL_ACCELERATOR_HDR_OPCODE(x) ((x) << S_CPL_ACCELERATOR_HDR_OPCODE)
+#define G_CPL_ACCELERATOR_HDR_OPCODE(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_OPCODE) & M_CPL_ACCELERATOR_HDR_OPCODE)
+
+#define S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID 0
+#define M_CPL_ACCELERATOR_HDR_ACCELERATOR_ID 0xff
+#define V_CPL_ACCELERATOR_HDR_ACCELERATOR_ID(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID)
+#define G_CPL_ACCELERATOR_HDR_ACCELERATOR_ID(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID) & \
+ M_CPL_ACCELERATOR_HDR_ACCELERATOR_ID)
+
+#define S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD 14
+#define M_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD 0x3
+#define V_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD 12
+#define M_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD 0x3
+#define V_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_RXCHID_ACK 10
+#define M_CPL_ACCELERATOR_HDR_RXCHID_ACK 0x3
+#define V_CPL_ACCELERATOR_HDR_RXCHID_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_RXCHID_ACK)
+#define G_CPL_ACCELERATOR_HDR_RXCHID_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_RXCHID_ACK) & \
+ M_CPL_ACCELERATOR_HDR_RXCHID_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_DESTID_ACK 8
+#define M_CPL_ACCELERATOR_HDR_DESTID_ACK 0x3
+#define V_CPL_ACCELERATOR_HDR_DESTID_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_DESTID_ACK)
+#define G_CPL_ACCELERATOR_HDR_DESTID_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_DESTID_ACK) & \
+ M_CPL_ACCELERATOR_HDR_DESTID_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK 0
+#define M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK 0xff
+#define V_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK)
+#define G_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK) & \
+ M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD 24
+#define M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD 0xff
+#define V_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_STATUS_LOC 22
+#define M_CPL_ACCELERATOR_HDR_STATUS_LOC 0x3
+#define V_CPL_ACCELERATOR_HDR_STATUS_LOC(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_STATUS_LOC)
+#define G_CPL_ACCELERATOR_HDR_STATUS_LOC(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_STATUS_LOC) & \
+ M_CPL_ACCELERATOR_HDR_STATUS_LOC)
+
+struct cpl_accelerator_ack {
+ RSS_HDR
+ __be16 op_accelerator_id;
+ __be16 r0;
+ __be32 status;
+ __be64 r1;
+ __be64 r2;
+};
+
+#define S_CPL_ACCELERATOR_ACK_OPCODE 8
+#define M_CPL_ACCELERATOR_ACK_OPCODE 0xff
+#define V_CPL_ACCELERATOR_ACK_OPCODE(x) ((x) << S_CPL_ACCELERATOR_ACK_OPCODE)
+#define G_CPL_ACCELERATOR_ACK_OPCODE(x) \
+ (((x) >> S_CPL_ACCELERATOR_ACK_OPCODE) & M_CPL_ACCELERATOR_ACK_OPCODE)
+
+#define S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID 0
+#define M_CPL_ACCELERATOR_ACK_ACCELERATOR_ID 0xff
+#define V_CPL_ACCELERATOR_ACK_ACCELERATOR_ID(x) \
+ ((x) << S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID)
+#define G_CPL_ACCELERATOR_ACK_ACCELERATOR_ID(x) \
+ (((x) >> S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID) & \
+ M_CPL_ACCELERATOR_ACK_ACCELERATOR_ID)
+
+struct cpl_nvmt_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r0;
+ __be16 length;
+ __be32 seq;
+ __be32 status_pkd;
+};
+
+#define S_CPL_NVMT_DATA_OPCODE 24
+#define M_CPL_NVMT_DATA_OPCODE 0xff
+#define V_CPL_NVMT_DATA_OPCODE(x) ((x) << S_CPL_NVMT_DATA_OPCODE)
+#define G_CPL_NVMT_DATA_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_DATA_OPCODE) & M_CPL_NVMT_DATA_OPCODE)
+
+#define S_CPL_NVMT_DATA_TID 0
+#define M_CPL_NVMT_DATA_TID 0xffffff
+#define V_CPL_NVMT_DATA_TID(x) ((x) << S_CPL_NVMT_DATA_TID)
+#define G_CPL_NVMT_DATA_TID(x) \
+ (((x) >> S_CPL_NVMT_DATA_TID) & M_CPL_NVMT_DATA_TID)
+
+#define S_CPL_NVMT_DATA_STATUS 0
+#define M_CPL_NVMT_DATA_STATUS 0xff
+#define V_CPL_NVMT_DATA_STATUS(x) ((x) << S_CPL_NVMT_DATA_STATUS)
+#define G_CPL_NVMT_DATA_STATUS(x) \
+ (((x) >> S_CPL_NVMT_DATA_STATUS) & M_CPL_NVMT_DATA_STATUS)
+
+struct cpl_nvmt_cmp {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 crch;
+ __be16 length;
+ __be32 seq;
+ __u8 t10status;
+ __u8 status;
+ __be16 crcl;
+};
+
+#define S_CPL_NVMT_CMP_OPCODE 24
+#define M_CPL_NVMT_CMP_OPCODE 0xff
+#define V_CPL_NVMT_CMP_OPCODE(x) ((x) << S_CPL_NVMT_CMP_OPCODE)
+#define G_CPL_NVMT_CMP_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_OPCODE) & M_CPL_NVMT_CMP_OPCODE)
+
+#define S_CPL_NVMT_CMP_TID 0
+#define M_CPL_NVMT_CMP_TID 0xffffff
+#define V_CPL_NVMT_CMP_TID(x) ((x) << S_CPL_NVMT_CMP_TID)
+#define G_CPL_NVMT_CMP_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_TID) & M_CPL_NVMT_CMP_TID)
+
+struct cpl_nvmt_cmp_imm {
+ __be32 op_to_cqid;
+ __be32 generation_bit_to_oprqinc;
+ __be32 seq;
+ __be16 crch;
+ __be16 length;
+ __be16 crcl;
+ __u8 t10status;
+ __u8 status;
+ __be32 r1;
+};
+
+#define S_CPL_NVMT_CMP_IMM_OPCODE 24
+#define M_CPL_NVMT_CMP_IMM_OPCODE 0xff
+#define V_CPL_NVMT_CMP_IMM_OPCODE(x) ((x) << S_CPL_NVMT_CMP_IMM_OPCODE)
+#define G_CPL_NVMT_CMP_IMM_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_OPCODE) & M_CPL_NVMT_CMP_IMM_OPCODE)
+
+#define S_CPL_NVMT_CMP_IMM_RSSCTRL 16
+#define M_CPL_NVMT_CMP_IMM_RSSCTRL 0xff
+#define V_CPL_NVMT_CMP_IMM_RSSCTRL(x) ((x) << S_CPL_NVMT_CMP_IMM_RSSCTRL)
+#define G_CPL_NVMT_CMP_IMM_RSSCTRL(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_RSSCTRL) & M_CPL_NVMT_CMP_IMM_RSSCTRL)
+
+#define S_CPL_NVMT_CMP_IMM_CQID 0
+#define M_CPL_NVMT_CMP_IMM_CQID 0xffff
+#define V_CPL_NVMT_CMP_IMM_CQID(x) ((x) << S_CPL_NVMT_CMP_IMM_CQID)
+#define G_CPL_NVMT_CMP_IMM_CQID(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_CQID) & M_CPL_NVMT_CMP_IMM_CQID)
+
+#define S_CPL_NVMT_CMP_IMM_GENERATION_BIT 31
+#define M_CPL_NVMT_CMP_IMM_GENERATION_BIT 0x1
+#define V_CPL_NVMT_CMP_IMM_GENERATION_BIT(x) \
+ ((x) << S_CPL_NVMT_CMP_IMM_GENERATION_BIT)
+#define G_CPL_NVMT_CMP_IMM_GENERATION_BIT(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_GENERATION_BIT) & \
+ M_CPL_NVMT_CMP_IMM_GENERATION_BIT)
+#define F_CPL_NVMT_CMP_IMM_GENERATION_BIT \
+ V_CPL_NVMT_CMP_IMM_GENERATION_BIT(1U)
+
+#define S_CPL_NVMT_CMP_IMM_TID 8
+#define M_CPL_NVMT_CMP_IMM_TID 0xfffff
+#define V_CPL_NVMT_CMP_IMM_TID(x) ((x) << S_CPL_NVMT_CMP_IMM_TID)
+#define G_CPL_NVMT_CMP_IMM_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_TID) & M_CPL_NVMT_CMP_IMM_TID)
+
+#define S_CPL_NVMT_CMP_IMM_OPRQINC 0
+#define M_CPL_NVMT_CMP_IMM_OPRQINC 0xff
+#define V_CPL_NVMT_CMP_IMM_OPRQINC(x) ((x) << S_CPL_NVMT_CMP_IMM_OPRQINC)
+#define G_CPL_NVMT_CMP_IMM_OPRQINC(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_OPRQINC) & M_CPL_NVMT_CMP_IMM_OPRQINC)
+
+struct cpl_nvmt_cmp_srq {
+ __be32 op_to_cqid;
+ __be32 generation_bit_to_oprqinc;
+ __be32 seq;
+ __be16 crch;
+ __be16 length;
+ __be16 crcl;
+ __u8 t10status;
+ __u8 status;
+ __be32 rqe;
+};
+
+#define S_CPL_NVMT_CMP_SRQ_OPCODE 24
+#define M_CPL_NVMT_CMP_SRQ_OPCODE 0xff
+#define V_CPL_NVMT_CMP_SRQ_OPCODE(x) ((x) << S_CPL_NVMT_CMP_SRQ_OPCODE)
+#define G_CPL_NVMT_CMP_SRQ_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_OPCODE) & M_CPL_NVMT_CMP_SRQ_OPCODE)
+
+#define S_CPL_NVMT_CMP_SRQ_RSSCTRL 16
+#define M_CPL_NVMT_CMP_SRQ_RSSCTRL 0xff
+#define V_CPL_NVMT_CMP_SRQ_RSSCTRL(x) ((x) << S_CPL_NVMT_CMP_SRQ_RSSCTRL)
+#define G_CPL_NVMT_CMP_SRQ_RSSCTRL(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_RSSCTRL) & M_CPL_NVMT_CMP_SRQ_RSSCTRL)
+
+#define S_CPL_NVMT_CMP_SRQ_CQID 0
+#define M_CPL_NVMT_CMP_SRQ_CQID 0xffff
+#define V_CPL_NVMT_CMP_SRQ_CQID(x) ((x) << S_CPL_NVMT_CMP_SRQ_CQID)
+#define G_CPL_NVMT_CMP_SRQ_CQID(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_CQID) & M_CPL_NVMT_CMP_SRQ_CQID)
+
+#define S_CPL_NVMT_CMP_SRQ_GENERATION_BIT 31
+#define M_CPL_NVMT_CMP_SRQ_GENERATION_BIT 0x1
+#define V_CPL_NVMT_CMP_SRQ_GENERATION_BIT(x) \
+ ((x) << S_CPL_NVMT_CMP_SRQ_GENERATION_BIT)
+#define G_CPL_NVMT_CMP_SRQ_GENERATION_BIT(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_GENERATION_BIT) & \
+ M_CPL_NVMT_CMP_SRQ_GENERATION_BIT)
+#define F_CPL_NVMT_CMP_SRQ_GENERATION_BIT \
+ V_CPL_NVMT_CMP_SRQ_GENERATION_BIT(1U)
+
+#define S_CPL_NVMT_CMP_SRQ_TID 8
+#define M_CPL_NVMT_CMP_SRQ_TID 0xfffff
+#define V_CPL_NVMT_CMP_SRQ_TID(x) ((x) << S_CPL_NVMT_CMP_SRQ_TID)
+#define G_CPL_NVMT_CMP_SRQ_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_TID) & M_CPL_NVMT_CMP_SRQ_TID)
+
+#define S_CPL_NVMT_CMP_SRQ_OPRQINC 0
+#define M_CPL_NVMT_CMP_SRQ_OPRQINC 0xff
+#define V_CPL_NVMT_CMP_SRQ_OPRQINC(x) ((x) << S_CPL_NVMT_CMP_SRQ_OPRQINC)
+#define G_CPL_NVMT_CMP_SRQ_OPRQINC(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_OPRQINC) & M_CPL_NVMT_CMP_SRQ_OPRQINC)
+
#endif /* T4_MSG_H */
diff --git a/sys/dev/cxgbe/common/t4_regs.h b/sys/dev/cxgbe/common/t4_regs.h
index e3b2a29b2ea9..8f500ec0fbdd 100644
--- a/sys/dev/cxgbe/common/t4_regs.h
+++ b/sys/dev/cxgbe/common/t4_regs.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2013, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2013, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +27,11 @@
*/
/* This file is automatically generated --- changes will be lost */
-/* Generation Date : Wed Jan 27 10:57:51 IST 2016 */
-/* Directory name: t4_reg.txt, Changeset: */
-/* Directory name: t5_reg.txt, Changeset: 6936:7f6342b03d61 */
-/* Directory name: t6_reg.txt, Changeset: 4191:ce3ccd95c109 */
+/* Generation Date : Thu Sep 11 05:25:56 PM IST 2025 */
+/* Directory name: t4_reg.txt, Date: Not specified */
+/* Directory name: t5_reg.txt, Changeset: 6945:54ba4ba7ee8b */
+/* Directory name: t6_reg.txt, Changeset: 4277:9c165d0f4899 */
+/* Directory name: t7_reg.txt, Changeset: 5945:1487219ecb20 */
#define MYPF_BASE 0x1b000
#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
@@ -285,9 +285,6 @@
#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
-#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
-#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
-
#define PCIE_PF_INT_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
#define NUM_PCIE_PF_INT_INSTANCES 8
@@ -459,9 +456,6 @@
#define LE_DB_DBGI_REQ_MASK_T6(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
#define NUM_LE_DB_DBGI_REQ_MASK_T6_INSTANCES 11
-#define LE_DB_DBGI_RSP_DATA_T6(idx) (A_LE_DB_DBGI_RSP_DATA + (idx) * 4)
-#define NUM_LE_DB_DBGI_RSP_DATA_T6_INSTANCES 11
-
#define LE_DB_ACTIVE_MASK_IPV6_T6(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
#define NUM_LE_DB_ACTIVE_MASK_IPV6_T6_INSTANCES 8
@@ -501,12 +495,175 @@
#define CIM_CTL_MAILBOX_VFN_CTL_T6(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4)
#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T6_INSTANCES 256
+#define T7_MYPORT_BASE 0x2e000
+#define T7_MYPORT_REG(reg_addr) (T7_MYPORT_BASE + (reg_addr))
+
+#define T7_PORT0_BASE 0x30000
+#define T7_PORT0_REG(reg_addr) (T7_PORT0_BASE + (reg_addr))
+
+#define T7_PORT1_BASE 0x32000
+#define T7_PORT1_REG(reg_addr) (T7_PORT1_BASE + (reg_addr))
+
+#define T7_PORT2_BASE 0x34000
+#define T7_PORT2_REG(reg_addr) (T7_PORT2_BASE + (reg_addr))
+
+#define T7_PORT3_BASE 0x36000
+#define T7_PORT3_REG(reg_addr) (T7_PORT3_BASE + (reg_addr))
+
+#define T7_PORT_STRIDE 0x2000
+#define T7_PORT_BASE(idx) (T7_PORT0_BASE + (idx) * T7_PORT_STRIDE)
+#define T7_PORT_REG(idx, reg) (T7_PORT_BASE(idx) + (reg))
+
+#define PCIE_MEM_ACCESS_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_MEM_ACCESS_T7_INSTANCES 16
+
+#define PCIE_T7_CMD_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T7_CMD_INSTANCES 1
+
+#define PCIE_T5_ARM_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T5_ARM_INSTANCES 1
+
+#define PCIE_JBOF_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_JBOF_INSTANCES 16
+
+#define PCIE_EMUADRRMAP_REG(reg_addr, idx) ((reg_addr) + (idx) * 32)
+#define NUM_PCIE_EMUADRRMAP_INSTANCES 3
+
+#define CIM_GFT_MASK(idx) (A_CIM_GFT_MASK + (idx) * 4)
+#define NUM_CIM_GFT_MASK_INSTANCES 4
+
+#define T7_MPS_TRC_FILTER_MATCH_CTL_A(idx) (A_T7_MPS_TRC_FILTER_MATCH_CTL_A + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_MATCH_CTL_A_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_MATCH_CTL_B(idx) (A_T7_MPS_TRC_FILTER_MATCH_CTL_B + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_MATCH_CTL_B_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_RUNT_CTL(idx) (A_T7_MPS_TRC_FILTER_RUNT_CTL + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_RUNT_CTL_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_DROP(idx) (A_T7_MPS_TRC_FILTER_DROP + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_DROP_INSTANCES 8
+
+#define MPS_TRC_FILTER4_MATCH(idx) (A_MPS_TRC_FILTER4_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER4_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER4_DONT_CARE(idx) (A_MPS_TRC_FILTER4_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER4_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER5_MATCH(idx) (A_MPS_TRC_FILTER5_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER5_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER5_DONT_CARE(idx) (A_MPS_TRC_FILTER5_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER5_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER6_MATCH(idx) (A_MPS_TRC_FILTER6_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER6_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER6_DONT_CARE(idx) (A_MPS_TRC_FILTER6_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER6_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER7_MATCH(idx) (A_MPS_TRC_FILTER7_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER7_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER7_DONT_CARE(idx) (A_MPS_TRC_FILTER7_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER7_DONT_CARE_INSTANCES 28
+
+#define LE_DB_DBGI_REQ_DATA_T7(idx) (A_LE_DB_DBGI_REQ_DATA + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_DATA_T7_INSTANCES 13
+
+#define LE_DB_DBGI_REQ_MASK_T7(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_MASK_T7_INSTANCES 13
+
+#define LE_DB_ACTIVE_MASK_IPV6_T7(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
+#define NUM_LE_DB_ACTIVE_MASK_IPV6_T7_INSTANCES 8
+
+#define LE_HASH_MASK_GEN_IPV4T7(idx) (A_LE_HASH_MASK_GEN_IPV4T5 + (idx) * 4)
+#define NUM_LE_HASH_MASK_GEN_IPV4T7_INSTANCES 8
+
+#define T7_LE_HASH_MASK_GEN_IPV6T5(idx) (A_T7_LE_HASH_MASK_GEN_IPV6T5 + (idx) * 4)
+#define NUM_T7_LE_HASH_MASK_GEN_IPV6T5_INSTANCES 8
+
+#define LE_DB_SECOND_GEN_HASH_MASK_IPV4_T7(idx) (A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_SECOND_GEN_HASH_MASK_IPV4_T7_INSTANCES 8
+
+#define TLS_TX_CH_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define NUM_TLS_TX_CH_INSTANCES 6
+
+#define TLS_TX_CH_IND_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define NUM_TLS_TX_CH_IND_INSTANCES 6
+
+#define ARM_CPU_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_ARM_CPU_INSTANCES 4
+
+#define ARM_CCIM_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_ARM_CCIM_INSTANCES 4
+
+#define ARM_CCIS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_ARM_CCIS_INSTANCES 5
+
+#define ARM_CCI_EVNTBUS(idx) (A_ARM_CCI_EVNTBUS + (idx) * 4)
+#define NUM_ARM_CCI_EVNTBUS_INSTANCES 5
+
+#define ARM_ARM_CFG1(idx) (A_ARM_ARM_CFG1 + (idx) * 4)
+#define NUM_ARM_ARM_CFG1_INSTANCES 2
+
+#define ARM_ARM_CFG2(idx) (A_ARM_ARM_CFG2 + (idx) * 4)
+#define NUM_ARM_ARM_CFG2_INSTANCES 2
+
+#define ARM_MSG_REG(reg_addr, idx) ((reg_addr) + (idx) * 48)
+#define NUM_ARM_MSG_INSTANCES 4
+
+#define ARM_MSG_PCIE_MESSAGE2AXI_CFG4(idx) (A_ARM_MSG_PCIE_MESSAGE2AXI_CFG4 + (idx) * 4)
+#define NUM_ARM_MSG_PCIE_MESSAGE2AXI_CFG4_INSTANCES 2
+
+#define MC_CE_ERR_DATA_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_CE_ERR_DATA_T7_INSTANCES 16
+
+#define MC_UE_ERR_DATA_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_UE_ERR_DATA_T7_INSTANCES 16
+
+#define MC_P_BIST_USER_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_P_BIST_USER_INSTANCES 36
+
+#define HMA_H_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_HMA_H_BIST_STATUS_INSTANCES 18
+
+#define GCACHE_P_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_GCACHE_P_BIST_STATUS_INSTANCES 18
+
+#define CIM_CTL_MAILBOX_VF_STATUS_T7(idx) (A_CIM_CTL_MAILBOX_VF_STATUS + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VF_STATUS_T7_INSTANCES 8
+
+#define CIM_CTL_MAILBOX_VFN_CTL_T7(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T7_INSTANCES 256
+
+#define CIM_CTL_TID_MAP_EN(idx) (A_CIM_CTL_TID_MAP_EN + (idx) * 4)
+#define NUM_CIM_CTL_TID_MAP_EN_INSTANCES 8
+
+#define CIM_CTL_TID_MAP_CORE(idx) (A_CIM_CTL_TID_MAP_CORE + (idx) * 4)
+#define NUM_CIM_CTL_TID_MAP_CORE_INSTANCES 8
+
+#define CIM_CTL_CRYPTO_KEY_DATA(idx) (A_CIM_CTL_CRYPTO_KEY_DATA + (idx) * 4)
+#define NUM_CIM_CTL_CRYPTO_KEY_DATA_INSTANCES 17
+
+#define CIM_CTL_FLOWID_OP_VALID(idx) (A_CIM_CTL_FLOWID_OP_VALID + (idx) * 4)
+#define NUM_CIM_CTL_FLOWID_OP_VALID_INSTANCES 8
+
+#define CIM_CTL_SLV_REG(reg_addr, idx) ((reg_addr) + (idx) * 1024)
+#define NUM_CIM_CTL_SLV_INSTANCES 7
+
#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
+#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
+#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
+
+#define MC_T7_STRIDE (MC_T71_BASE_ADDR - MC_T70_BASE_ADDR)
+#define MC_T7_REG(reg, idx) (reg + MC_T7_STRIDE * idx)
+
/* registers for module SGE */
#define SGE_BASE_ADDR 0x1000
@@ -637,6 +794,24 @@
#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
#define F_GLOBALENABLE V_GLOBALENABLE(1U)
+#define S_NUMOFFID 19
+#define M_NUMOFFID 0x7U
+#define V_NUMOFFID(x) ((x) << S_NUMOFFID)
+#define G_NUMOFFID(x) (((x) >> S_NUMOFFID) & M_NUMOFFID)
+
+#define S_INGHINTENABLE2 16
+#define V_INGHINTENABLE2(x) ((x) << S_INGHINTENABLE2)
+#define F_INGHINTENABLE2 V_INGHINTENABLE2(1U)
+
+#define S_INGHINTENABLE3 3
+#define V_INGHINTENABLE3(x) ((x) << S_INGHINTENABLE3)
+#define F_INGHINTENABLE3 V_INGHINTENABLE3(1U)
+
+#define S_TF_MODE 1
+#define M_TF_MODE 0x3U
+#define V_TF_MODE(x) ((x) << S_TF_MODE)
+#define G_TF_MODE(x) (((x) >> S_TF_MODE) & M_TF_MODE)
+
#define A_SGE_HOST_PAGE_SIZE 0x100c
#define S_HOSTPAGESIZEPF7 28
@@ -792,6 +967,16 @@
#define V_WR_ERROR_OPCODE(x) ((x) << S_WR_ERROR_OPCODE)
#define G_WR_ERROR_OPCODE(x) (((x) >> S_WR_ERROR_OPCODE) & M_WR_ERROR_OPCODE)
+#define S_WR_SENDPATH_ERROR_OPCODE 16
+#define M_WR_SENDPATH_ERROR_OPCODE 0xffU
+#define V_WR_SENDPATH_ERROR_OPCODE(x) ((x) << S_WR_SENDPATH_ERROR_OPCODE)
+#define G_WR_SENDPATH_ERROR_OPCODE(x) (((x) >> S_WR_SENDPATH_ERROR_OPCODE) & M_WR_SENDPATH_ERROR_OPCODE)
+
+#define S_WR_SENDPATH_OPCODE 8
+#define M_WR_SENDPATH_OPCODE 0xffU
+#define V_WR_SENDPATH_OPCODE(x) ((x) << S_WR_SENDPATH_OPCODE)
+#define G_WR_SENDPATH_OPCODE(x) (((x) >> S_WR_SENDPATH_OPCODE) & M_WR_SENDPATH_OPCODE)
+
#define A_SGE_PERR_INJECT 0x1020
#define S_MEMSEL 1
@@ -941,6 +1126,22 @@
#define V_PERR_PC_REQ(x) ((x) << S_PERR_PC_REQ)
#define F_PERR_PC_REQ V_PERR_PC_REQ(1U)
+#define S_PERR_HEADERSPLIT_FIFO3 28
+#define V_PERR_HEADERSPLIT_FIFO3(x) ((x) << S_PERR_HEADERSPLIT_FIFO3)
+#define F_PERR_HEADERSPLIT_FIFO3 V_PERR_HEADERSPLIT_FIFO3(1U)
+
+#define S_PERR_HEADERSPLIT_FIFO2 27
+#define V_PERR_HEADERSPLIT_FIFO2(x) ((x) << S_PERR_HEADERSPLIT_FIFO2)
+#define F_PERR_HEADERSPLIT_FIFO2 V_PERR_HEADERSPLIT_FIFO2(1U)
+
+#define S_PERR_PAYLOAD_FIFO3 26
+#define V_PERR_PAYLOAD_FIFO3(x) ((x) << S_PERR_PAYLOAD_FIFO3)
+#define F_PERR_PAYLOAD_FIFO3 V_PERR_PAYLOAD_FIFO3(1U)
+
+#define S_PERR_PAYLOAD_FIFO2 25
+#define V_PERR_PAYLOAD_FIFO2(x) ((x) << S_PERR_PAYLOAD_FIFO2)
+#define F_PERR_PAYLOAD_FIFO2 V_PERR_PAYLOAD_FIFO2(1U)
+
#define A_SGE_INT_ENABLE1 0x1028
#define A_SGE_PERR_ENABLE1 0x102c
#define A_SGE_INT_CAUSE2 0x1030
@@ -1105,6 +1306,22 @@
#define V_PERR_DB_FIFO(x) ((x) << S_PERR_DB_FIFO)
#define F_PERR_DB_FIFO V_PERR_DB_FIFO(1U)
+#define S_TF_FIFO_PERR 24
+#define V_TF_FIFO_PERR(x) ((x) << S_TF_FIFO_PERR)
+#define F_TF_FIFO_PERR V_TF_FIFO_PERR(1U)
+
+#define S_PERR_ISW_IDMA3_FIFO 15
+#define V_PERR_ISW_IDMA3_FIFO(x) ((x) << S_PERR_ISW_IDMA3_FIFO)
+#define F_PERR_ISW_IDMA3_FIFO V_PERR_ISW_IDMA3_FIFO(1U)
+
+#define S_PERR_ISW_IDMA2_FIFO 13
+#define V_PERR_ISW_IDMA2_FIFO(x) ((x) << S_PERR_ISW_IDMA2_FIFO)
+#define F_PERR_ISW_IDMA2_FIFO V_PERR_ISW_IDMA2_FIFO(1U)
+
+#define S_SGE_IPP_FIFO_PERR 5
+#define V_SGE_IPP_FIFO_PERR(x) ((x) << S_SGE_IPP_FIFO_PERR)
+#define F_SGE_IPP_FIFO_PERR V_SGE_IPP_FIFO_PERR(1U)
+
#define A_SGE_INT_ENABLE2 0x1034
#define A_SGE_PERR_ENABLE2 0x1038
#define A_SGE_INT_CAUSE3 0x103c
@@ -1259,110 +1476,20 @@
#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
#define A_SGE_FL_BUFFER_SIZE1 0x1048
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE2 0x104c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE3 0x1050
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE4 0x1054
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE5 0x1058
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE6 0x105c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE7 0x1060
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE8 0x1064
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE9 0x1068
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE10 0x106c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE11 0x1070
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE12 0x1074
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE13 0x1078
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE14 0x107c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE15 0x1080
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_DBQ_CTXT_BADDR 0x1084
#define S_BASEADDR 3
@@ -1426,6 +1553,10 @@
#define V_NULLPTREN(x) ((x) << S_NULLPTREN)
#define F_NULLPTREN V_NULLPTREN(1U)
+#define S_HDRSTARTFLQ4K 1
+#define V_HDRSTARTFLQ4K(x) ((x) << S_HDRSTARTFLQ4K)
+#define F_HDRSTARTFLQ4K V_HDRSTARTFLQ4K(1U)
+
#define A_SGE_CONM_CTRL 0x1094
#define S_EGRTHRESHOLD 8
@@ -2243,6 +2374,34 @@
#define V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(x) ((x) << S_PERR_IDMA_SWITCH_OUTPUT_FIFO0)
#define F_PERR_IDMA_SWITCH_OUTPUT_FIFO0 V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(1U)
+#define S_PERR_POINTER_HDR_FIFO3 10
+#define V_PERR_POINTER_HDR_FIFO3(x) ((x) << S_PERR_POINTER_HDR_FIFO3)
+#define F_PERR_POINTER_HDR_FIFO3 V_PERR_POINTER_HDR_FIFO3(1U)
+
+#define S_PERR_POINTER_HDR_FIFO2 9
+#define V_PERR_POINTER_HDR_FIFO2(x) ((x) << S_PERR_POINTER_HDR_FIFO2)
+#define F_PERR_POINTER_HDR_FIFO2 V_PERR_POINTER_HDR_FIFO2(1U)
+
+#define S_PERR_POINTER_DATA_FIFO3 8
+#define V_PERR_POINTER_DATA_FIFO3(x) ((x) << S_PERR_POINTER_DATA_FIFO3)
+#define F_PERR_POINTER_DATA_FIFO3 V_PERR_POINTER_DATA_FIFO3(1U)
+
+#define S_PERR_POINTER_DATA_FIFO2 7
+#define V_PERR_POINTER_DATA_FIFO2(x) ((x) << S_PERR_POINTER_DATA_FIFO2)
+#define F_PERR_POINTER_DATA_FIFO2 V_PERR_POINTER_DATA_FIFO2(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO3 3
+#define V_PERR_IDMA2IMSG_FIFO3(x) ((x) << S_PERR_IDMA2IMSG_FIFO3)
+#define F_PERR_IDMA2IMSG_FIFO3 V_PERR_IDMA2IMSG_FIFO3(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO2 2
+#define V_PERR_IDMA2IMSG_FIFO2(x) ((x) << S_PERR_IDMA2IMSG_FIFO2)
+#define F_PERR_IDMA2IMSG_FIFO2 V_PERR_IDMA2IMSG_FIFO2(1U)
+
+#define S_PERR_HINT_DELAY_FIFO 0
+#define V_PERR_HINT_DELAY_FIFO(x) ((x) << S_PERR_HINT_DELAY_FIFO)
+#define F_PERR_HINT_DELAY_FIFO V_PERR_HINT_DELAY_FIFO(1U)
+
#define A_SGE_INT_ENABLE5 0x1110
#define A_SGE_PERR_ENABLE5 0x1114
#define A_SGE_DBFIFO_STATUS2 0x1118
@@ -2359,6 +2518,46 @@
#define V_TX_COALESCE_PRI(x) ((x) << S_TX_COALESCE_PRI)
#define F_TX_COALESCE_PRI V_TX_COALESCE_PRI(1U)
+#define S_HINT_SGE_SEL 31
+#define V_HINT_SGE_SEL(x) ((x) << S_HINT_SGE_SEL)
+#define F_HINT_SGE_SEL V_HINT_SGE_SEL(1U)
+
+#define S_HINT_SEL 30
+#define V_HINT_SEL(x) ((x) << S_HINT_SEL)
+#define F_HINT_SEL V_HINT_SEL(1U)
+
+#define S_HINT_DISABLE 29
+#define V_HINT_DISABLE(x) ((x) << S_HINT_DISABLE)
+#define F_HINT_DISABLE V_HINT_DISABLE(1U)
+
+#define S_RXCPLMODE_ISCSI 28
+#define V_RXCPLMODE_ISCSI(x) ((x) << S_RXCPLMODE_ISCSI)
+#define F_RXCPLMODE_ISCSI V_RXCPLMODE_ISCSI(1U)
+
+#define S_RXCPLMODE_NVMT 27
+#define V_RXCPLMODE_NVMT(x) ((x) << S_RXCPLMODE_NVMT)
+#define F_RXCPLMODE_NVMT V_RXCPLMODE_NVMT(1U)
+
+#define S_WRE_REPLAY_INORDER 26
+#define V_WRE_REPLAY_INORDER(x) ((x) << S_WRE_REPLAY_INORDER)
+#define F_WRE_REPLAY_INORDER V_WRE_REPLAY_INORDER(1U)
+
+#define S_ETH2XEN 25
+#define V_ETH2XEN(x) ((x) << S_ETH2XEN)
+#define F_ETH2XEN V_ETH2XEN(1U)
+
+#define S_ARMDBENDDIS 24
+#define V_ARMDBENDDIS(x) ((x) << S_ARMDBENDDIS)
+#define F_ARMDBENDDIS V_ARMDBENDDIS(1U)
+
+#define S_PACKPADT7 23
+#define V_PACKPADT7(x) ((x) << S_PACKPADT7)
+#define F_PACKPADT7 V_PACKPADT7(1U)
+
+#define S_WRE_UPFLCREDIT 22
+#define V_WRE_UPFLCREDIT(x) ((x) << S_WRE_UPFLCREDIT)
+#define F_WRE_UPFLCREDIT V_WRE_UPFLCREDIT(1U)
+
#define A_SGE_DEEP_SLEEP 0x1128
#define S_IDMA1_SLEEP_STATUS 11
@@ -2493,6 +2692,42 @@
#define V_FATAL_DEQ(x) ((x) << S_FATAL_DEQ)
#define F_FATAL_DEQ V_FATAL_DEQ(1U)
+#define S_FATAL_DEQ0_DRDY 29
+#define M_FATAL_DEQ0_DRDY 0x7U
+#define V_FATAL_DEQ0_DRDY(x) ((x) << S_FATAL_DEQ0_DRDY)
+#define G_FATAL_DEQ0_DRDY(x) (((x) >> S_FATAL_DEQ0_DRDY) & M_FATAL_DEQ0_DRDY)
+
+#define S_FATAL_OUT0_DRDY 26
+#define M_FATAL_OUT0_DRDY 0x7U
+#define V_FATAL_OUT0_DRDY(x) ((x) << S_FATAL_OUT0_DRDY)
+#define G_FATAL_OUT0_DRDY(x) (((x) >> S_FATAL_OUT0_DRDY) & M_FATAL_OUT0_DRDY)
+
+#define S_IMSG_DBG3_STUCK 25
+#define V_IMSG_DBG3_STUCK(x) ((x) << S_IMSG_DBG3_STUCK)
+#define F_IMSG_DBG3_STUCK V_IMSG_DBG3_STUCK(1U)
+
+#define S_IMSG_DBG2_STUCK 24
+#define V_IMSG_DBG2_STUCK(x) ((x) << S_IMSG_DBG2_STUCK)
+#define F_IMSG_DBG2_STUCK V_IMSG_DBG2_STUCK(1U)
+
+#define S_IMSG_DBG1_STUCK 23
+#define V_IMSG_DBG1_STUCK(x) ((x) << S_IMSG_DBG1_STUCK)
+#define F_IMSG_DBG1_STUCK V_IMSG_DBG1_STUCK(1U)
+
+#define S_IMSG_DBG0_STUCK 22
+#define V_IMSG_DBG0_STUCK(x) ((x) << S_IMSG_DBG0_STUCK)
+#define F_IMSG_DBG0_STUCK V_IMSG_DBG0_STUCK(1U)
+
+#define S_FATAL_DEQ1_DRDY 3
+#define M_FATAL_DEQ1_DRDY 0x3U
+#define V_FATAL_DEQ1_DRDY(x) ((x) << S_FATAL_DEQ1_DRDY)
+#define G_FATAL_DEQ1_DRDY(x) (((x) >> S_FATAL_DEQ1_DRDY) & M_FATAL_DEQ1_DRDY)
+
+#define S_FATAL_OUT1_DRDY 1
+#define M_FATAL_OUT1_DRDY 0x3U
+#define V_FATAL_OUT1_DRDY(x) ((x) << S_FATAL_OUT1_DRDY)
+#define G_FATAL_OUT1_DRDY(x) (((x) >> S_FATAL_OUT1_DRDY) & M_FATAL_OUT1_DRDY)
+
#define A_SGE_DOORBELL_THROTTLE_THRESHOLD 0x112c
#define S_THROTTLE_THRESHOLD_FL 16
@@ -2612,6 +2847,55 @@
#define V_DBPTBUFRSV0(x) ((x) << S_DBPTBUFRSV0)
#define G_DBPTBUFRSV0(x) (((x) >> S_DBPTBUFRSV0) & M_DBPTBUFRSV0)
+#define A_SGE_TBUF_CONTROL0 0x114c
+#define A_SGE_TBUF_CONTROL1 0x1150
+
+#define S_DBPTBUFRSV3 9
+#define M_DBPTBUFRSV3 0x1ffU
+#define V_DBPTBUFRSV3(x) ((x) << S_DBPTBUFRSV3)
+#define G_DBPTBUFRSV3(x) (((x) >> S_DBPTBUFRSV3) & M_DBPTBUFRSV3)
+
+#define S_DBPTBUFRSV2 0
+#define M_DBPTBUFRSV2 0x1ffU
+#define V_DBPTBUFRSV2(x) ((x) << S_DBPTBUFRSV2)
+#define G_DBPTBUFRSV2(x) (((x) >> S_DBPTBUFRSV2) & M_DBPTBUFRSV2)
+
+#define A_SGE_TBUF_CONTROL2 0x1154
+
+#define S_DBPTBUFRSV5 9
+#define M_DBPTBUFRSV5 0x1ffU
+#define V_DBPTBUFRSV5(x) ((x) << S_DBPTBUFRSV5)
+#define G_DBPTBUFRSV5(x) (((x) >> S_DBPTBUFRSV5) & M_DBPTBUFRSV5)
+
+#define S_DBPTBUFRSV4 0
+#define M_DBPTBUFRSV4 0x1ffU
+#define V_DBPTBUFRSV4(x) ((x) << S_DBPTBUFRSV4)
+#define G_DBPTBUFRSV4(x) (((x) >> S_DBPTBUFRSV4) & M_DBPTBUFRSV4)
+
+#define A_SGE_TBUF_CONTROL3 0x1158
+
+#define S_DBPTBUFRSV7 9
+#define M_DBPTBUFRSV7 0x1ffU
+#define V_DBPTBUFRSV7(x) ((x) << S_DBPTBUFRSV7)
+#define G_DBPTBUFRSV7(x) (((x) >> S_DBPTBUFRSV7) & M_DBPTBUFRSV7)
+
+#define S_DBPTBUFRSV6 0
+#define M_DBPTBUFRSV6 0x1ffU
+#define V_DBPTBUFRSV6(x) ((x) << S_DBPTBUFRSV6)
+#define G_DBPTBUFRSV6(x) (((x) >> S_DBPTBUFRSV6) & M_DBPTBUFRSV6)
+
+#define A_SGE_TBUF_CONTROL4 0x115c
+
+#define S_DBPTBUFRSV9 9
+#define M_DBPTBUFRSV9 0x1ffU
+#define V_DBPTBUFRSV9(x) ((x) << S_DBPTBUFRSV9)
+#define G_DBPTBUFRSV9(x) (((x) >> S_DBPTBUFRSV9) & M_DBPTBUFRSV9)
+
+#define S_DBPTBUFRSV8 0
+#define M_DBPTBUFRSV8 0x1ffU
+#define V_DBPTBUFRSV8(x) ((x) << S_DBPTBUFRSV8)
+#define G_DBPTBUFRSV8(x) (((x) >> S_DBPTBUFRSV8) & M_DBPTBUFRSV8)
+
#define A_SGE_PC0_REQ_BIST_CMD 0x1180
#define A_SGE_PC0_REQ_BIST_ERROR_CNT 0x1184
#define A_SGE_PC1_REQ_BIST_CMD 0x1190
@@ -2620,6 +2904,113 @@
#define A_SGE_PC0_RSP_BIST_ERROR_CNT 0x11a4
#define A_SGE_PC1_RSP_BIST_CMD 0x11b0
#define A_SGE_PC1_RSP_BIST_ERROR_CNT 0x11b4
+#define A_SGE_DBQ_TIMER_THRESH0 0x11b8
+
+#define S_TXTIMETH3 24
+#define M_TXTIMETH3 0x3fU
+#define V_TXTIMETH3(x) ((x) << S_TXTIMETH3)
+#define G_TXTIMETH3(x) (((x) >> S_TXTIMETH3) & M_TXTIMETH3)
+
+#define S_TXTIMETH2 16
+#define M_TXTIMETH2 0x3fU
+#define V_TXTIMETH2(x) ((x) << S_TXTIMETH2)
+#define G_TXTIMETH2(x) (((x) >> S_TXTIMETH2) & M_TXTIMETH2)
+
+#define S_TXTIMETH1 8
+#define M_TXTIMETH1 0x3fU
+#define V_TXTIMETH1(x) ((x) << S_TXTIMETH1)
+#define G_TXTIMETH1(x) (((x) >> S_TXTIMETH1) & M_TXTIMETH1)
+
+#define S_TXTIMETH0 0
+#define M_TXTIMETH0 0x3fU
+#define V_TXTIMETH0(x) ((x) << S_TXTIMETH0)
+#define G_TXTIMETH0(x) (((x) >> S_TXTIMETH0) & M_TXTIMETH0)
+
+#define A_SGE_DBQ_TIMER_THRESH1 0x11bc
+
+#define S_TXTIMETH7 24
+#define M_TXTIMETH7 0x3fU
+#define V_TXTIMETH7(x) ((x) << S_TXTIMETH7)
+#define G_TXTIMETH7(x) (((x) >> S_TXTIMETH7) & M_TXTIMETH7)
+
+#define S_TXTIMETH6 16
+#define M_TXTIMETH6 0x3fU
+#define V_TXTIMETH6(x) ((x) << S_TXTIMETH6)
+#define G_TXTIMETH6(x) (((x) >> S_TXTIMETH6) & M_TXTIMETH6)
+
+#define S_TXTIMETH5 8
+#define M_TXTIMETH5 0x3fU
+#define V_TXTIMETH5(x) ((x) << S_TXTIMETH5)
+#define G_TXTIMETH5(x) (((x) >> S_TXTIMETH5) & M_TXTIMETH5)
+
+#define S_TXTIMETH4 0
+#define M_TXTIMETH4 0x3fU
+#define V_TXTIMETH4(x) ((x) << S_TXTIMETH4)
+#define G_TXTIMETH4(x) (((x) >> S_TXTIMETH4) & M_TXTIMETH4)
+
+#define A_SGE_DBQ_TIMER_CONFIG 0x11c0
+
+#define S_DBQ_TIMER_OP 0
+#define M_DBQ_TIMER_OP 0xffU
+#define V_DBQ_TIMER_OP(x) ((x) << S_DBQ_TIMER_OP)
+#define G_DBQ_TIMER_OP(x) (((x) >> S_DBQ_TIMER_OP) & M_DBQ_TIMER_OP)
+
+#define A_SGE_DBQ_TIMER_DBG 0x11c4
+
+#define S_DBQ_TIMER_CMD 31
+#define V_DBQ_TIMER_CMD(x) ((x) << S_DBQ_TIMER_CMD)
+#define F_DBQ_TIMER_CMD V_DBQ_TIMER_CMD(1U)
+
+#define S_DBQ_TIMER_INDEX 24
+#define M_DBQ_TIMER_INDEX 0x3fU
+#define V_DBQ_TIMER_INDEX(x) ((x) << S_DBQ_TIMER_INDEX)
+#define G_DBQ_TIMER_INDEX(x) (((x) >> S_DBQ_TIMER_INDEX) & M_DBQ_TIMER_INDEX)
+
+#define S_DBQ_TIMER_QCNT 0
+#define M_DBQ_TIMER_QCNT 0x1ffffU
+#define V_DBQ_TIMER_QCNT(x) ((x) << S_DBQ_TIMER_QCNT)
+#define G_DBQ_TIMER_QCNT(x) (((x) >> S_DBQ_TIMER_QCNT) & M_DBQ_TIMER_QCNT)
+
+#define A_SGE_INT_CAUSE8 0x11c8
+
+#define S_TRACE_RXPERR 8
+#define V_TRACE_RXPERR(x) ((x) << S_TRACE_RXPERR)
+#define F_TRACE_RXPERR V_TRACE_RXPERR(1U)
+
+#define S_U3_RXPERR 7
+#define V_U3_RXPERR(x) ((x) << S_U3_RXPERR)
+#define F_U3_RXPERR V_U3_RXPERR(1U)
+
+#define S_U2_RXPERR 6
+#define V_U2_RXPERR(x) ((x) << S_U2_RXPERR)
+#define F_U2_RXPERR V_U2_RXPERR(1U)
+
+#define S_U1_RXPERR 5
+#define V_U1_RXPERR(x) ((x) << S_U1_RXPERR)
+#define F_U1_RXPERR V_U1_RXPERR(1U)
+
+#define S_U0_RXPERR 4
+#define V_U0_RXPERR(x) ((x) << S_U0_RXPERR)
+#define F_U0_RXPERR V_U0_RXPERR(1U)
+
+#define S_T3_RXPERR 3
+#define V_T3_RXPERR(x) ((x) << S_T3_RXPERR)
+#define F_T3_RXPERR V_T3_RXPERR(1U)
+
+#define S_T2_RXPERR 2
+#define V_T2_RXPERR(x) ((x) << S_T2_RXPERR)
+#define F_T2_RXPERR V_T2_RXPERR(1U)
+
+#define S_T1_RXPERR 1
+#define V_T1_RXPERR(x) ((x) << S_T1_RXPERR)
+#define F_T1_RXPERR V_T1_RXPERR(1U)
+
+#define S_T0_RXPERR 0
+#define V_T0_RXPERR(x) ((x) << S_T0_RXPERR)
+#define F_T0_RXPERR V_T0_RXPERR(1U)
+
+#define A_SGE_INT_ENABLE8 0x11cc
+#define A_SGE_PERR_ENABLE8 0x11d0
#define A_SGE_CTXT_CMD 0x11fc
#define S_BUSY 31
@@ -2648,6 +3039,17 @@
#define A_SGE_CTXT_DATA4 0x1210
#define A_SGE_CTXT_DATA5 0x1214
#define A_SGE_CTXT_DATA6 0x1218
+
+#define S_DATA_UNUSED 7
+#define M_DATA_UNUSED 0x1ffffffU
+#define V_DATA_UNUSED(x) ((x) << S_DATA_UNUSED)
+#define G_DATA_UNUSED(x) (((x) >> S_DATA_UNUSED) & M_DATA_UNUSED)
+
+#define S_DATA6 0
+#define M_DATA6 0x7fU
+#define V_DATA6(x) ((x) << S_DATA6)
+#define G_DATA6(x) (((x) >> S_DATA6) & M_DATA6)
+
#define A_SGE_CTXT_DATA7 0x121c
#define A_SGE_CTXT_MASK0 0x1220
#define A_SGE_CTXT_MASK1 0x1224
@@ -2656,6 +3058,17 @@
#define A_SGE_CTXT_MASK4 0x1230
#define A_SGE_CTXT_MASK5 0x1234
#define A_SGE_CTXT_MASK6 0x1238
+
+#define S_MASK_UNUSED 7
+#define M_MASK_UNUSED 0x1ffffffU
+#define V_MASK_UNUSED(x) ((x) << S_MASK_UNUSED)
+#define G_MASK_UNUSED(x) (((x) >> S_MASK_UNUSED) & M_MASK_UNUSED)
+
+#define S_MASK 0
+#define M_MASK 0x7fU
+#define V_MASK(x) ((x) << S_MASK)
+#define G_MASK(x) (((x) >> S_MASK) & M_MASK)
+
#define A_SGE_CTXT_MASK7 0x123c
#define A_SGE_QBASE_MAP0 0x1240
@@ -2674,6 +3087,10 @@
#define V_INGRESS0_SIZE(x) ((x) << S_INGRESS0_SIZE)
#define G_INGRESS0_SIZE(x) (((x) >> S_INGRESS0_SIZE) & M_INGRESS0_SIZE)
+#define S_DESTINATION 31
+#define V_DESTINATION(x) ((x) << S_DESTINATION)
+#define F_DESTINATION V_DESTINATION(1U)
+
#define A_SGE_QBASE_MAP1 0x1244
#define S_EGRESS0_BASE 0
@@ -2719,6 +3136,10 @@
#define V_FLMTHRESH(x) ((x) << S_FLMTHRESH)
#define G_FLMTHRESH(x) (((x) >> S_FLMTHRESH) & M_FLMTHRESH)
+#define S_CONENMIDDLE 7
+#define V_CONENMIDDLE(x) ((x) << S_CONENMIDDLE)
+#define F_CONENMIDDLE V_CONENMIDDLE(1U)
+
#define A_SGE_DEBUG_CONM 0x1258
#define S_MPS_CH_CNG 16
@@ -2745,6 +3166,16 @@
#define V_LAST_QID(x) ((x) << S_LAST_QID)
#define G_LAST_QID(x) (((x) >> S_LAST_QID) & M_LAST_QID)
+#define S_CH_CNG 16
+#define M_CH_CNG 0xffffU
+#define V_CH_CNG(x) ((x) << S_CH_CNG)
+#define G_CH_CNG(x) (((x) >> S_CH_CNG) & M_CH_CNG)
+
+#define S_CH_SEL 14
+#define M_CH_SEL 0x3U
+#define V_CH_SEL(x) ((x) << S_CH_SEL)
+#define G_CH_SEL(x) (((x) >> S_CH_SEL) & M_CH_SEL)
+
#define A_SGE_DBG_QUEUE_STAT0_CTRL 0x125c
#define S_IMSG_GTS_SEL 18
@@ -2766,6 +3197,7 @@
#define A_SGE_DBG_BAR2_PKT_CNT 0x126c
#define A_SGE_DBG_DB_PKT_CNT 0x1270
#define A_SGE_DBG_GTS_PKT_CNT 0x1274
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_16 0x1278
#define A_SGE_DEBUG_DATA_HIGH_INDEX_0 0x1280
#define S_CIM_WM 24
@@ -3965,6 +4397,352 @@
#define V_VFWCOFFSET(x) ((x) << S_VFWCOFFSET)
#define G_VFWCOFFSET(x) (((x) >> S_VFWCOFFSET) & M_VFWCOFFSET)
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_17 0x1340
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_18 0x1344
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_19 0x1348
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_20 0x134c
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_21 0x1350
+#define A_SGE_DEBUG_DATA_LOW_INDEX_16 0x1354
+#define A_SGE_DEBUG_DATA_LOW_INDEX_17 0x1358
+#define A_SGE_DEBUG_DATA_LOW_INDEX_18 0x135c
+#define A_SGE_INT_CAUSE7 0x1360
+
+#define S_HINT_FIFO_FULL 25
+#define V_HINT_FIFO_FULL(x) ((x) << S_HINT_FIFO_FULL)
+#define F_HINT_FIFO_FULL V_HINT_FIFO_FULL(1U)
+
+#define S_CERR_HINT_DELAY_FIFO 24
+#define V_CERR_HINT_DELAY_FIFO(x) ((x) << S_CERR_HINT_DELAY_FIFO)
+#define F_CERR_HINT_DELAY_FIFO V_CERR_HINT_DELAY_FIFO(1U)
+
+#define S_COAL_TIMER_FIFO_PERR 23
+#define V_COAL_TIMER_FIFO_PERR(x) ((x) << S_COAL_TIMER_FIFO_PERR)
+#define F_COAL_TIMER_FIFO_PERR V_COAL_TIMER_FIFO_PERR(1U)
+
+#define S_CMP_FIFO_PERR 22
+#define V_CMP_FIFO_PERR(x) ((x) << S_CMP_FIFO_PERR)
+#define F_CMP_FIFO_PERR V_CMP_FIFO_PERR(1U)
+
+#define S_SGE_IPP_FIFO_CERR 21
+#define V_SGE_IPP_FIFO_CERR(x) ((x) << S_SGE_IPP_FIFO_CERR)
+#define F_SGE_IPP_FIFO_CERR V_SGE_IPP_FIFO_CERR(1U)
+
+#define S_CERR_ING_CTXT_CACHE 20
+#define V_CERR_ING_CTXT_CACHE(x) ((x) << S_CERR_ING_CTXT_CACHE)
+#define F_CERR_ING_CTXT_CACHE V_CERR_ING_CTXT_CACHE(1U)
+
+#define S_IMSG_CNTX_PERR 19
+#define V_IMSG_CNTX_PERR(x) ((x) << S_IMSG_CNTX_PERR)
+#define F_IMSG_CNTX_PERR V_IMSG_CNTX_PERR(1U)
+
+#define S_PD_FIFO_PERR 18
+#define V_PD_FIFO_PERR(x) ((x) << S_PD_FIFO_PERR)
+#define F_PD_FIFO_PERR V_PD_FIFO_PERR(1U)
+
+#define S_IMSG_512_FIFO_PERR 17
+#define V_IMSG_512_FIFO_PERR(x) ((x) << S_IMSG_512_FIFO_PERR)
+#define F_IMSG_512_FIFO_PERR V_IMSG_512_FIFO_PERR(1U)
+
+#define S_CPLSW_FIFO_PERR 16
+#define V_CPLSW_FIFO_PERR(x) ((x) << S_CPLSW_FIFO_PERR)
+#define F_CPLSW_FIFO_PERR V_CPLSW_FIFO_PERR(1U)
+
+#define S_IMSG_FIFO_PERR 15
+#define V_IMSG_FIFO_PERR(x) ((x) << S_IMSG_FIFO_PERR)
+#define F_IMSG_FIFO_PERR V_IMSG_FIFO_PERR(1U)
+
+#define S_CERR_ITP_EVR 14
+#define V_CERR_ITP_EVR(x) ((x) << S_CERR_ITP_EVR)
+#define F_CERR_ITP_EVR V_CERR_ITP_EVR(1U)
+
+#define S_CERR_CONM_SRAM 13
+#define V_CERR_CONM_SRAM(x) ((x) << S_CERR_CONM_SRAM)
+#define F_CERR_CONM_SRAM V_CERR_CONM_SRAM(1U)
+
+#define S_CERR_EGR_CTXT_CACHE 12
+#define V_CERR_EGR_CTXT_CACHE(x) ((x) << S_CERR_EGR_CTXT_CACHE)
+#define F_CERR_EGR_CTXT_CACHE V_CERR_EGR_CTXT_CACHE(1U)
+
+#define S_CERR_FLM_CNTXMEM 11
+#define V_CERR_FLM_CNTXMEM(x) ((x) << S_CERR_FLM_CNTXMEM)
+#define F_CERR_FLM_CNTXMEM V_CERR_FLM_CNTXMEM(1U)
+
+#define S_CERR_FUNC_QBASE 10
+#define V_CERR_FUNC_QBASE(x) ((x) << S_CERR_FUNC_QBASE)
+#define F_CERR_FUNC_QBASE V_CERR_FUNC_QBASE(1U)
+
+#define S_IMSG_CNTX_CERR 9
+#define V_IMSG_CNTX_CERR(x) ((x) << S_IMSG_CNTX_CERR)
+#define F_IMSG_CNTX_CERR V_IMSG_CNTX_CERR(1U)
+
+#define S_PD_FIFO_CERR 8
+#define V_PD_FIFO_CERR(x) ((x) << S_PD_FIFO_CERR)
+#define F_PD_FIFO_CERR V_PD_FIFO_CERR(1U)
+
+#define S_IMSG_512_FIFO_CERR 7
+#define V_IMSG_512_FIFO_CERR(x) ((x) << S_IMSG_512_FIFO_CERR)
+#define F_IMSG_512_FIFO_CERR V_IMSG_512_FIFO_CERR(1U)
+
+#define S_CPLSW_FIFO_CERR 6
+#define V_CPLSW_FIFO_CERR(x) ((x) << S_CPLSW_FIFO_CERR)
+#define F_CPLSW_FIFO_CERR V_CPLSW_FIFO_CERR(1U)
+
+#define S_IMSG_FIFO_CERR 5
+#define V_IMSG_FIFO_CERR(x) ((x) << S_IMSG_FIFO_CERR)
+#define F_IMSG_FIFO_CERR V_IMSG_FIFO_CERR(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO3 4
+#define V_CERR_HEADERSPLIT_FIFO3(x) ((x) << S_CERR_HEADERSPLIT_FIFO3)
+#define F_CERR_HEADERSPLIT_FIFO3 V_CERR_HEADERSPLIT_FIFO3(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO2 3
+#define V_CERR_HEADERSPLIT_FIFO2(x) ((x) << S_CERR_HEADERSPLIT_FIFO2)
+#define F_CERR_HEADERSPLIT_FIFO2 V_CERR_HEADERSPLIT_FIFO2(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO1 2
+#define V_CERR_HEADERSPLIT_FIFO1(x) ((x) << S_CERR_HEADERSPLIT_FIFO1)
+#define F_CERR_HEADERSPLIT_FIFO1 V_CERR_HEADERSPLIT_FIFO1(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO0 1
+#define V_CERR_HEADERSPLIT_FIFO0(x) ((x) << S_CERR_HEADERSPLIT_FIFO0)
+#define F_CERR_HEADERSPLIT_FIFO0 V_CERR_HEADERSPLIT_FIFO0(1U)
+
+#define S_CERR_FLM_L1CACHE 0
+#define V_CERR_FLM_L1CACHE(x) ((x) << S_CERR_FLM_L1CACHE)
+#define F_CERR_FLM_L1CACHE V_CERR_FLM_L1CACHE(1U)
+
+#define A_SGE_INT_ENABLE7 0x1364
+#define A_SGE_PERR_ENABLE7 0x1368
+#define A_SGE_ING_COMP_COAL_CFG 0x1700
+
+#define S_USE_PTP_TIMER 27
+#define V_USE_PTP_TIMER(x) ((x) << S_USE_PTP_TIMER)
+#define F_USE_PTP_TIMER V_USE_PTP_TIMER(1U)
+
+#define S_IMSG_SET_OFLOW_ALL_ENTRIES_43060 26
+#define V_IMSG_SET_OFLOW_ALL_ENTRIES_43060(x) ((x) << S_IMSG_SET_OFLOW_ALL_ENTRIES_43060)
+#define F_IMSG_SET_OFLOW_ALL_ENTRIES_43060 V_IMSG_SET_OFLOW_ALL_ENTRIES_43060(1U)
+
+#define S_IMSG_STUCK_INDIRECT_QUEUE_42907 25
+#define V_IMSG_STUCK_INDIRECT_QUEUE_42907(x) ((x) << S_IMSG_STUCK_INDIRECT_QUEUE_42907)
+#define F_IMSG_STUCK_INDIRECT_QUEUE_42907 V_IMSG_STUCK_INDIRECT_QUEUE_42907(1U)
+
+#define S_COMP_COAL_PIDX_INCR 24
+#define V_COMP_COAL_PIDX_INCR(x) ((x) << S_COMP_COAL_PIDX_INCR)
+#define F_COMP_COAL_PIDX_INCR V_COMP_COAL_PIDX_INCR(1U)
+
+#define S_COMP_COAL_TIMER_CNT 16
+#define M_COMP_COAL_TIMER_CNT 0xffU
+#define V_COMP_COAL_TIMER_CNT(x) ((x) << S_COMP_COAL_TIMER_CNT)
+#define G_COMP_COAL_TIMER_CNT(x) (((x) >> S_COMP_COAL_TIMER_CNT) & M_COMP_COAL_TIMER_CNT)
+
+#define S_COMP_COAL_CNTR_TH 8
+#define M_COMP_COAL_CNTR_TH 0xffU
+#define V_COMP_COAL_CNTR_TH(x) ((x) << S_COMP_COAL_CNTR_TH)
+#define G_COMP_COAL_CNTR_TH(x) (((x) >> S_COMP_COAL_CNTR_TH) & M_COMP_COAL_CNTR_TH)
+
+#define S_COMP_COAL_OPCODE 0
+#define M_COMP_COAL_OPCODE 0xffU
+#define V_COMP_COAL_OPCODE(x) ((x) << S_COMP_COAL_OPCODE)
+#define G_COMP_COAL_OPCODE(x) (((x) >> S_COMP_COAL_OPCODE) & M_COMP_COAL_OPCODE)
+
+#define A_SGE_ING_IMSG_DBG 0x1704
+
+#define S_STUCK_CTR_TH 1
+#define M_STUCK_CTR_TH 0xffU
+#define V_STUCK_CTR_TH(x) ((x) << S_STUCK_CTR_TH)
+#define G_STUCK_CTR_TH(x) (((x) >> S_STUCK_CTR_TH) & M_STUCK_CTR_TH)
+
+#define S_STUCK_INT_EN 0
+#define V_STUCK_INT_EN(x) ((x) << S_STUCK_INT_EN)
+#define F_STUCK_INT_EN V_STUCK_INT_EN(1U)
+
+#define A_SGE_ING_IMSG_RSP0_DBG 0x1708
+
+#define S_IDMA1_QID 16
+#define M_IDMA1_QID 0xffffU
+#define V_IDMA1_QID(x) ((x) << S_IDMA1_QID)
+#define G_IDMA1_QID(x) (((x) >> S_IDMA1_QID) & M_IDMA1_QID)
+
+#define S_IDMA0_QID 0
+#define M_IDMA0_QID 0xffffU
+#define V_IDMA0_QID(x) ((x) << S_IDMA0_QID)
+#define G_IDMA0_QID(x) (((x) >> S_IDMA0_QID) & M_IDMA0_QID)
+
+#define A_SGE_ING_IMSG_RSP1_DBG 0x170c
+
+#define S_IDMA3_QID 16
+#define M_IDMA3_QID 0xffffU
+#define V_IDMA3_QID(x) ((x) << S_IDMA3_QID)
+#define G_IDMA3_QID(x) (((x) >> S_IDMA3_QID) & M_IDMA3_QID)
+
+#define S_IDMA2_QID 0
+#define M_IDMA2_QID 0xffffU
+#define V_IDMA2_QID(x) ((x) << S_IDMA2_QID)
+#define G_IDMA2_QID(x) (((x) >> S_IDMA2_QID) & M_IDMA2_QID)
+
+#define A_SGE_LB_MODE 0x1710
+
+#define S_LB_MODE 0
+#define M_LB_MODE 0x3U
+#define V_LB_MODE(x) ((x) << S_LB_MODE)
+#define G_LB_MODE(x) (((x) >> S_LB_MODE) & M_LB_MODE)
+
+#define A_SGE_IMSG_QUESCENT 0x1714
+
+#define S_IMSG_QUESCENT 0
+#define V_IMSG_QUESCENT(x) ((x) << S_IMSG_QUESCENT)
+#define F_IMSG_QUESCENT V_IMSG_QUESCENT(1U)
+
+#define A_SGE_LA_CTRL 0x1718
+
+#define S_LA_GLOBAL_EN 8
+#define V_LA_GLOBAL_EN(x) ((x) << S_LA_GLOBAL_EN)
+#define F_LA_GLOBAL_EN V_LA_GLOBAL_EN(1U)
+
+#define S_PTP_TIMESTAMP_SEL 7
+#define V_PTP_TIMESTAMP_SEL(x) ((x) << S_PTP_TIMESTAMP_SEL)
+#define F_PTP_TIMESTAMP_SEL V_PTP_TIMESTAMP_SEL(1U)
+
+#define S_CIM2SGE_ID_CHK_VLD 6
+#define V_CIM2SGE_ID_CHK_VLD(x) ((x) << S_CIM2SGE_ID_CHK_VLD)
+#define F_CIM2SGE_ID_CHK_VLD V_CIM2SGE_ID_CHK_VLD(1U)
+
+#define S_CPLSW_ID_CHK_VLD 5
+#define V_CPLSW_ID_CHK_VLD(x) ((x) << S_CPLSW_ID_CHK_VLD)
+#define F_CPLSW_ID_CHK_VLD V_CPLSW_ID_CHK_VLD(1U)
+
+#define S_FLM_ID_CHK_VLD 4
+#define V_FLM_ID_CHK_VLD(x) ((x) << S_FLM_ID_CHK_VLD)
+#define F_FLM_ID_CHK_VLD V_FLM_ID_CHK_VLD(1U)
+
+#define S_IQ_DBP_ID_CHK_VLD 3
+#define V_IQ_DBP_ID_CHK_VLD(x) ((x) << S_IQ_DBP_ID_CHK_VLD)
+#define F_IQ_DBP_ID_CHK_VLD V_IQ_DBP_ID_CHK_VLD(1U)
+
+#define S_UP_OBQ_ID_CHK_VLD 2
+#define V_UP_OBQ_ID_CHK_VLD(x) ((x) << S_UP_OBQ_ID_CHK_VLD)
+#define F_UP_OBQ_ID_CHK_VLD V_UP_OBQ_ID_CHK_VLD(1U)
+
+#define S_CIM_ID_CHK_VLD 1
+#define V_CIM_ID_CHK_VLD(x) ((x) << S_CIM_ID_CHK_VLD)
+#define F_CIM_ID_CHK_VLD V_CIM_ID_CHK_VLD(1U)
+
+#define S_DBP_ID_CHK_VLD 0
+#define V_DBP_ID_CHK_VLD(x) ((x) << S_DBP_ID_CHK_VLD)
+#define F_DBP_ID_CHK_VLD V_DBP_ID_CHK_VLD(1U)
+
+#define A_SGE_LA_CTRL_EQID_LOW 0x171c
+
+#define S_EQ_ID_CHK_LOW 0
+#define M_EQ_ID_CHK_LOW 0x1ffffU
+#define V_EQ_ID_CHK_LOW(x) ((x) << S_EQ_ID_CHK_LOW)
+#define G_EQ_ID_CHK_LOW(x) (((x) >> S_EQ_ID_CHK_LOW) & M_EQ_ID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_EQID_HIGH 0x1720
+
+#define S_EQ_ID_CHK_HIGH 0
+#define M_EQ_ID_CHK_HIGH 0x1ffffU
+#define V_EQ_ID_CHK_HIGH(x) ((x) << S_EQ_ID_CHK_HIGH)
+#define G_EQ_ID_CHK_HIGH(x) (((x) >> S_EQ_ID_CHK_HIGH) & M_EQ_ID_CHK_HIGH)
+
+#define A_SGE_LA_CTRL_IQID 0x1724
+
+#define S_IQ_ID_CHK_HIGH 16
+#define M_IQ_ID_CHK_HIGH 0xffffU
+#define V_IQ_ID_CHK_HIGH(x) ((x) << S_IQ_ID_CHK_HIGH)
+#define G_IQ_ID_CHK_HIGH(x) (((x) >> S_IQ_ID_CHK_HIGH) & M_IQ_ID_CHK_HIGH)
+
+#define S_IQ_ID_CHK_LOW 0
+#define M_IQ_ID_CHK_LOW 0xffffU
+#define V_IQ_ID_CHK_LOW(x) ((x) << S_IQ_ID_CHK_LOW)
+#define G_IQ_ID_CHK_LOW(x) (((x) >> S_IQ_ID_CHK_LOW) & M_IQ_ID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_TID_LOW 0x1728
+
+#define S_TID_CHK_LOW 0
+#define M_TID_CHK_LOW 0xffffffU
+#define V_TID_CHK_LOW(x) ((x) << S_TID_CHK_LOW)
+#define G_TID_CHK_LOW(x) (((x) >> S_TID_CHK_LOW) & M_TID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_TID_HIGH 0x172c
+
+#define S_TID_CHK_HIGH 0
+#define M_TID_CHK_HIGH 0xffffffU
+#define V_TID_CHK_HIGH(x) ((x) << S_TID_CHK_HIGH)
+#define G_TID_CHK_HIGH(x) (((x) >> S_TID_CHK_HIGH) & M_TID_CHK_HIGH)
+
+#define A_SGE_CFG_TP_ERR 0x173c
+
+#define S_TP_ERR_STATUS_CH3 30
+#define M_TP_ERR_STATUS_CH3 0x3U
+#define V_TP_ERR_STATUS_CH3(x) ((x) << S_TP_ERR_STATUS_CH3)
+#define G_TP_ERR_STATUS_CH3(x) (((x) >> S_TP_ERR_STATUS_CH3) & M_TP_ERR_STATUS_CH3)
+
+#define S_TP_ERR_STATUS_CH2 28
+#define M_TP_ERR_STATUS_CH2 0x3U
+#define V_TP_ERR_STATUS_CH2(x) ((x) << S_TP_ERR_STATUS_CH2)
+#define G_TP_ERR_STATUS_CH2(x) (((x) >> S_TP_ERR_STATUS_CH2) & M_TP_ERR_STATUS_CH2)
+
+#define S_TP_ERR_STATUS_CH1 26
+#define M_TP_ERR_STATUS_CH1 0x3U
+#define V_TP_ERR_STATUS_CH1(x) ((x) << S_TP_ERR_STATUS_CH1)
+#define G_TP_ERR_STATUS_CH1(x) (((x) >> S_TP_ERR_STATUS_CH1) & M_TP_ERR_STATUS_CH1)
+
+#define S_TP_ERR_STATUS_CH0 24
+#define M_TP_ERR_STATUS_CH0 0x3U
+#define V_TP_ERR_STATUS_CH0(x) ((x) << S_TP_ERR_STATUS_CH0)
+#define G_TP_ERR_STATUS_CH0(x) (((x) >> S_TP_ERR_STATUS_CH0) & M_TP_ERR_STATUS_CH0)
+
+#define S_CPL0_SIZE 16
+#define M_CPL0_SIZE 0xffU
+#define V_CPL0_SIZE(x) ((x) << S_CPL0_SIZE)
+#define G_CPL0_SIZE(x) (((x) >> S_CPL0_SIZE) & M_CPL0_SIZE)
+
+#define S_CPL1_SIZE 8
+#define M_CPL1_SIZE 0xffU
+#define V_CPL1_SIZE(x) ((x) << S_CPL1_SIZE)
+#define G_CPL1_SIZE(x) (((x) >> S_CPL1_SIZE) & M_CPL1_SIZE)
+
+#define S_SIZE_LATCH_CLR 3
+#define V_SIZE_LATCH_CLR(x) ((x) << S_SIZE_LATCH_CLR)
+#define F_SIZE_LATCH_CLR V_SIZE_LATCH_CLR(1U)
+
+#define S_EXT_LATCH_CLR 2
+#define V_EXT_LATCH_CLR(x) ((x) << S_EXT_LATCH_CLR)
+#define F_EXT_LATCH_CLR V_EXT_LATCH_CLR(1U)
+
+#define S_EXT_CHANGE_42875 1
+#define V_EXT_CHANGE_42875(x) ((x) << S_EXT_CHANGE_42875)
+#define F_EXT_CHANGE_42875 V_EXT_CHANGE_42875(1U)
+
+#define S_SIZE_CHANGE_42913 0
+#define V_SIZE_CHANGE_42913(x) ((x) << S_SIZE_CHANGE_42913)
+#define F_SIZE_CHANGE_42913 V_SIZE_CHANGE_42913(1U)
+
+#define A_SGE_CHNL0_CTX_ERROR_COUNT_PER_TID 0x1740
+#define A_SGE_CHNL1_CTX_ERROR_COUNT_PER_TID 0x1744
+#define A_SGE_CHNL2_CTX_ERROR_COUNT_PER_TID 0x1748
+#define A_SGE_CHNL3_CTX_ERROR_COUNT_PER_TID 0x174c
+#define A_SGE_CTX_ACC_CH0 0x1750
+
+#define S_RDMA_INV_HANDLING 24
+#define M_RDMA_INV_HANDLING 0x3U
+#define V_RDMA_INV_HANDLING(x) ((x) << S_RDMA_INV_HANDLING)
+#define G_RDMA_INV_HANDLING(x) (((x) >> S_RDMA_INV_HANDLING) & M_RDMA_INV_HANDLING)
+
+#define S_T7_TERMINATE_STATUS_EN 23
+#define V_T7_TERMINATE_STATUS_EN(x) ((x) << S_T7_TERMINATE_STATUS_EN)
+#define F_T7_TERMINATE_STATUS_EN V_T7_TERMINATE_STATUS_EN(1U)
+
+#define S_T7_DISABLE 22
+#define V_T7_DISABLE(x) ((x) << S_T7_DISABLE)
+#define F_T7_DISABLE V_T7_DISABLE(1U)
+
+#define A_SGE_CTX_ACC_CH1 0x1754
+#define A_SGE_CTX_ACC_CH2 0x1758
+#define A_SGE_CTX_ACC_CH3 0x175c
+#define A_SGE_CTX_BASE 0x1760
#define A_SGE_LA_RDPTR_0 0x1800
#define A_SGE_LA_RDDATA_0 0x1804
#define A_SGE_LA_WRPTR_0 0x1808
@@ -4296,6 +5074,11 @@
#define A_PCIE_INT_CAUSE 0x3004
#define A_PCIE_PERR_ENABLE 0x3008
+
+#define S_TGTTAGQCLIENT1PERR 29
+#define V_TGTTAGQCLIENT1PERR(x) ((x) << S_TGTTAGQCLIENT1PERR)
+#define F_TGTTAGQCLIENT1PERR V_TGTTAGQCLIENT1PERR(1U)
+
#define A_PCIE_PERR_INJECT 0x300c
#define S_IDE 0
@@ -4582,10 +5365,6 @@
#define V_LINKREQRSTPCIECRSTMODE(x) ((x) << S_LINKREQRSTPCIECRSTMODE)
#define F_LINKREQRSTPCIECRSTMODE V_LINKREQRSTPCIECRSTMODE(1U)
-#define S_T6_PIOSTOPEN 31
-#define V_T6_PIOSTOPEN(x) ((x) << S_T6_PIOSTOPEN)
-#define F_T6_PIOSTOPEN V_T6_PIOSTOPEN(1U)
-
#define A_PCIE_DMA_CTRL 0x3018
#define S_LITTLEENDIAN 7
@@ -4618,6 +5397,14 @@
#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG)
#define G_T6_TOTMAXTAG(x) (((x) >> S_T6_TOTMAXTAG) & M_T6_TOTMAXTAG)
+#define S_REG_VDM_ONLY 17
+#define V_REG_VDM_ONLY(x) ((x) << S_REG_VDM_ONLY)
+#define F_REG_VDM_ONLY V_REG_VDM_ONLY(1U)
+
+#define S_MULT_REQID_SUP 16
+#define V_MULT_REQID_SUP(x) ((x) << S_MULT_REQID_SUP)
+#define F_MULT_REQID_SUP V_MULT_REQID_SUP(1U)
+
#define A_PCIE_DMA_CFG 0x301c
#define S_MAXPYLDSIZE 28
@@ -4668,6 +5455,10 @@
#define V_DMADCASTFIRSTONLY(x) ((x) << S_DMADCASTFIRSTONLY)
#define F_DMADCASTFIRSTONLY V_DMADCASTFIRSTONLY(1U)
+#define S_ARMDCASTFIRSTONLY 7
+#define V_ARMDCASTFIRSTONLY(x) ((x) << S_ARMDCASTFIRSTONLY)
+#define F_ARMDCASTFIRSTONLY V_ARMDCASTFIRSTONLY(1U)
+
#define A_PCIE_DMA_STAT 0x3020
#define S_STATEREQ 28
@@ -4748,7 +5539,157 @@
#define G_PERSTTIMER(x) (((x) >> S_PERSTTIMER) & M_PERSTTIMER)
#define A_PCIE_CFG7 0x302c
+#define A_PCIE_INT_ENABLE_EXT 0x3030
+
+#define S_TCAMRSPERR 31
+#define V_TCAMRSPERR(x) ((x) << S_TCAMRSPERR)
+#define F_TCAMRSPERR V_TCAMRSPERR(1U)
+
+#define S_IPFORMQPERR 30
+#define V_IPFORMQPERR(x) ((x) << S_IPFORMQPERR)
+#define F_IPFORMQPERR V_IPFORMQPERR(1U)
+
+#define S_IPFORMQCERR 29
+#define V_IPFORMQCERR(x) ((x) << S_IPFORMQCERR)
+#define F_IPFORMQCERR V_IPFORMQCERR(1U)
+
+#define S_TRGT1GRPCERR 28
+#define V_TRGT1GRPCERR(x) ((x) << S_TRGT1GRPCERR)
+#define F_TRGT1GRPCERR V_TRGT1GRPCERR(1U)
+
+#define S_IPSOTCERR 27
+#define V_IPSOTCERR(x) ((x) << S_IPSOTCERR)
+#define F_IPSOTCERR V_IPSOTCERR(1U)
+
+#define S_IPRETRYCERR 26
+#define V_IPRETRYCERR(x) ((x) << S_IPRETRYCERR)
+#define F_IPRETRYCERR V_IPRETRYCERR(1U)
+
+#define S_IPRXDATAGRPCERR 25
+#define V_IPRXDATAGRPCERR(x) ((x) << S_IPRXDATAGRPCERR)
+#define F_IPRXDATAGRPCERR V_IPRXDATAGRPCERR(1U)
+
+#define S_IPRXHDRGRPCERR 24
+#define V_IPRXHDRGRPCERR(x) ((x) << S_IPRXHDRGRPCERR)
+#define F_IPRXHDRGRPCERR V_IPRXHDRGRPCERR(1U)
+
+#define S_A0ARBRSPORDFIFOPERR 19
+#define V_A0ARBRSPORDFIFOPERR(x) ((x) << S_A0ARBRSPORDFIFOPERR)
+#define F_A0ARBRSPORDFIFOPERR V_A0ARBRSPORDFIFOPERR(1U)
+
+#define S_HRSPCERR 18
+#define V_HRSPCERR(x) ((x) << S_HRSPCERR)
+#define F_HRSPCERR V_HRSPCERR(1U)
+
+#define S_HREQRDCERR 17
+#define V_HREQRDCERR(x) ((x) << S_HREQRDCERR)
+#define F_HREQRDCERR V_HREQRDCERR(1U)
+
+#define S_HREQWRCERR 16
+#define V_HREQWRCERR(x) ((x) << S_HREQWRCERR)
+#define F_HREQWRCERR V_HREQWRCERR(1U)
+
+#define S_DRSPCERR 15
+#define V_DRSPCERR(x) ((x) << S_DRSPCERR)
+#define F_DRSPCERR V_DRSPCERR(1U)
+
+#define S_DREQRDCERR 14
+#define V_DREQRDCERR(x) ((x) << S_DREQRDCERR)
+#define F_DREQRDCERR V_DREQRDCERR(1U)
+
+#define S_DREQWRCERR 13
+#define V_DREQWRCERR(x) ((x) << S_DREQWRCERR)
+#define F_DREQWRCERR V_DREQWRCERR(1U)
+
+#define S_CRSPCERR 12
+#define V_CRSPCERR(x) ((x) << S_CRSPCERR)
+#define F_CRSPCERR V_CRSPCERR(1U)
+
+#define S_ARSPPERR 11
+#define V_ARSPPERR(x) ((x) << S_ARSPPERR)
+#define F_ARSPPERR V_ARSPPERR(1U)
+
+#define S_AREQRDPERR 10
+#define V_AREQRDPERR(x) ((x) << S_AREQRDPERR)
+#define F_AREQRDPERR V_AREQRDPERR(1U)
+
+#define S_AREQWRPERR 9
+#define V_AREQWRPERR(x) ((x) << S_AREQWRPERR)
+#define F_AREQWRPERR V_AREQWRPERR(1U)
+
+#define S_PIOREQGRPCERR 8
+#define V_PIOREQGRPCERR(x) ((x) << S_PIOREQGRPCERR)
+#define F_PIOREQGRPCERR V_PIOREQGRPCERR(1U)
+
+#define S_ARSPCERR 7
+#define V_ARSPCERR(x) ((x) << S_ARSPCERR)
+#define F_ARSPCERR V_ARSPCERR(1U)
+
+#define S_AREQRDCERR 6
+#define V_AREQRDCERR(x) ((x) << S_AREQRDCERR)
+#define F_AREQRDCERR V_AREQRDCERR(1U)
+
+#define S_AREQWRCERR 5
+#define V_AREQWRCERR(x) ((x) << S_AREQWRCERR)
+#define F_AREQWRCERR V_AREQWRCERR(1U)
+
+#define S_MARSPPERR 4
+#define V_MARSPPERR(x) ((x) << S_MARSPPERR)
+#define F_MARSPPERR V_MARSPPERR(1U)
+
+#define S_INICMAWDATAORDPERR 3
+#define V_INICMAWDATAORDPERR(x) ((x) << S_INICMAWDATAORDPERR)
+#define F_INICMAWDATAORDPERR V_INICMAWDATAORDPERR(1U)
+
+#define S_EMUPERR 2
+#define V_EMUPERR(x) ((x) << S_EMUPERR)
+#define F_EMUPERR V_EMUPERR(1U)
+
+#define S_ERRSPPERR 1
+#define V_ERRSPPERR(x) ((x) << S_ERRSPPERR)
+#define F_ERRSPPERR V_ERRSPPERR(1U)
+
+#define S_MSTGRPCERR 0
+#define V_MSTGRPCERR(x) ((x) << S_MSTGRPCERR)
+#define F_MSTGRPCERR V_MSTGRPCERR(1U)
+
+#define A_PCIE_INT_ENABLE_X8 0x3034
+
+#define S_X8TGTGRPPERR 23
+#define V_X8TGTGRPPERR(x) ((x) << S_X8TGTGRPPERR)
+#define F_X8TGTGRPPERR V_X8TGTGRPPERR(1U)
+
+#define S_X8IPSOTPERR 22
+#define V_X8IPSOTPERR(x) ((x) << S_X8IPSOTPERR)
+#define F_X8IPSOTPERR V_X8IPSOTPERR(1U)
+
+#define S_X8IPRETRYPERR 21
+#define V_X8IPRETRYPERR(x) ((x) << S_X8IPRETRYPERR)
+#define F_X8IPRETRYPERR V_X8IPRETRYPERR(1U)
+
+#define S_X8IPRXDATAGRPPERR 20
+#define V_X8IPRXDATAGRPPERR(x) ((x) << S_X8IPRXDATAGRPPERR)
+#define F_X8IPRXDATAGRPPERR V_X8IPRXDATAGRPPERR(1U)
+
+#define S_X8IPRXHDRGRPPERR 19
+#define V_X8IPRXHDRGRPPERR(x) ((x) << S_X8IPRXHDRGRPPERR)
+#define F_X8IPRXHDRGRPPERR V_X8IPRXHDRGRPPERR(1U)
+
+#define S_X8IPCORECERR 3
+#define V_X8IPCORECERR(x) ((x) << S_X8IPCORECERR)
+#define F_X8IPCORECERR V_X8IPCORECERR(1U)
+
+#define S_X8MSTGRPPERR 2
+#define V_X8MSTGRPPERR(x) ((x) << S_X8MSTGRPPERR)
+#define F_X8MSTGRPPERR V_X8MSTGRPPERR(1U)
+
+#define S_X8MSTGRPCERR 1
+#define V_X8MSTGRPCERR(x) ((x) << S_X8MSTGRPCERR)
+#define F_X8MSTGRPCERR V_X8MSTGRPCERR(1U)
+
+#define A_PCIE_INT_CAUSE_EXT 0x3038
#define A_PCIE_CMD_CTRL 0x303c
+#define A_PCIE_INT_CAUSE_X8 0x303c
#define A_PCIE_CMD_CFG 0x3040
#define S_MAXRSPCNT 16
@@ -4761,6 +5702,40 @@
#define V_MAXREQCNT(x) ((x) << S_MAXREQCNT)
#define G_MAXREQCNT(x) (((x) >> S_MAXREQCNT) & M_MAXREQCNT)
+#define A_PCIE_PERR_ENABLE_EXT 0x3040
+
+#define S_T7_ARSPPERR 18
+#define V_T7_ARSPPERR(x) ((x) << S_T7_ARSPPERR)
+#define F_T7_ARSPPERR V_T7_ARSPPERR(1U)
+
+#define S_T7_AREQRDPERR 17
+#define V_T7_AREQRDPERR(x) ((x) << S_T7_AREQRDPERR)
+#define F_T7_AREQRDPERR V_T7_AREQRDPERR(1U)
+
+#define S_T7_AREQWRPERR 16
+#define V_T7_AREQWRPERR(x) ((x) << S_T7_AREQWRPERR)
+#define F_T7_AREQWRPERR V_T7_AREQWRPERR(1U)
+
+#define S_T7_A0ARBRSPORDFIFOPERR 15
+#define V_T7_A0ARBRSPORDFIFOPERR(x) ((x) << S_T7_A0ARBRSPORDFIFOPERR)
+#define F_T7_A0ARBRSPORDFIFOPERR V_T7_A0ARBRSPORDFIFOPERR(1U)
+
+#define S_T7_MARSPPERR 14
+#define V_T7_MARSPPERR(x) ((x) << S_T7_MARSPPERR)
+#define F_T7_MARSPPERR V_T7_MARSPPERR(1U)
+
+#define S_T7_INICMAWDATAORDPERR 13
+#define V_T7_INICMAWDATAORDPERR(x) ((x) << S_T7_INICMAWDATAORDPERR)
+#define F_T7_INICMAWDATAORDPERR V_T7_INICMAWDATAORDPERR(1U)
+
+#define S_T7_EMUPERR 12
+#define V_T7_EMUPERR(x) ((x) << S_T7_EMUPERR)
+#define F_T7_EMUPERR V_T7_EMUPERR(1U)
+
+#define S_T7_ERRSPPERR 11
+#define V_T7_ERRSPPERR(x) ((x) << S_T7_ERRSPPERR)
+#define F_T7_ERRSPPERR V_T7_ERRSPPERR(1U)
+
#define A_PCIE_CMD_STAT 0x3044
#define S_RSPCNT 16
@@ -4773,6 +5748,32 @@
#define V_REQCNT(x) ((x) << S_REQCNT)
#define G_REQCNT(x) (((x) >> S_REQCNT) & M_REQCNT)
+#define A_PCIE_PERR_ENABLE_X8 0x3044
+
+#define S_T7_X8TGTGRPPERR 28
+#define V_T7_X8TGTGRPPERR(x) ((x) << S_T7_X8TGTGRPPERR)
+#define F_T7_X8TGTGRPPERR V_T7_X8TGTGRPPERR(1U)
+
+#define S_T7_X8IPSOTPERR 27
+#define V_T7_X8IPSOTPERR(x) ((x) << S_T7_X8IPSOTPERR)
+#define F_T7_X8IPSOTPERR V_T7_X8IPSOTPERR(1U)
+
+#define S_T7_X8IPRETRYPERR 26
+#define V_T7_X8IPRETRYPERR(x) ((x) << S_T7_X8IPRETRYPERR)
+#define F_T7_X8IPRETRYPERR V_T7_X8IPRETRYPERR(1U)
+
+#define S_T7_X8IPRXDATAGRPPERR 25
+#define V_T7_X8IPRXDATAGRPPERR(x) ((x) << S_T7_X8IPRXDATAGRPPERR)
+#define F_T7_X8IPRXDATAGRPPERR V_T7_X8IPRXDATAGRPPERR(1U)
+
+#define S_T7_X8IPRXHDRGRPPERR 24
+#define V_T7_X8IPRXHDRGRPPERR(x) ((x) << S_T7_X8IPRXHDRGRPPERR)
+#define F_T7_X8IPRXHDRGRPPERR V_T7_X8IPRXHDRGRPPERR(1U)
+
+#define S_T7_X8MSTGRPPERR 0
+#define V_T7_X8MSTGRPPERR(x) ((x) << S_T7_X8MSTGRPPERR)
+#define F_T7_X8MSTGRPPERR V_T7_X8MSTGRPPERR(1U)
+
#define A_PCIE_HMA_CTRL 0x3050
#define S_IPLTSSM 12
@@ -4889,9 +5890,9 @@
#define V_T6_ENABLE(x) ((x) << S_T6_ENABLE)
#define F_T6_ENABLE V_T6_ENABLE(1U)
-#define S_T6_AI 30
-#define V_T6_AI(x) ((x) << S_T6_AI)
-#define F_T6_AI V_T6_AI(1U)
+#define S_T6_1_AI 30
+#define V_T6_1_AI(x) ((x) << S_T6_1_AI)
+#define F_T6_1_AI V_T6_1_AI(1U)
#define S_T6_CS2 29
#define V_T6_CS2(x) ((x) << S_T6_CS2)
@@ -4936,6 +5937,7 @@
#define V_MEMOFST(x) ((x) << S_MEMOFST)
#define G_MEMOFST(x) (((x) >> S_MEMOFST) & M_MEMOFST)
+#define A_T7_PCIE_MAILBOX_BASE_WIN 0x30a4
#define A_PCIE_MAILBOX_BASE_WIN 0x30a8
#define S_MBOXPCIEOFST 6
@@ -4953,7 +5955,21 @@
#define V_MBOXWIN(x) ((x) << S_MBOXWIN)
#define G_MBOXWIN(x) (((x) >> S_MBOXWIN) & M_MBOXWIN)
+#define A_PCIE_MAILBOX_OFFSET0 0x30a8
+
+#define S_MEMOFST0 3
+#define M_MEMOFST0 0x1fffffffU
+#define V_MEMOFST0(x) ((x) << S_MEMOFST0)
+#define G_MEMOFST0(x) (((x) >> S_MEMOFST0) & M_MEMOFST0)
+
#define A_PCIE_MAILBOX_OFFSET 0x30ac
+#define A_PCIE_MAILBOX_OFFSET1 0x30ac
+
+#define S_MEMOFST1 0
+#define M_MEMOFST1 0xfU
+#define V_MEMOFST1(x) ((x) << S_MEMOFST1)
+#define G_MEMOFST1(x) (((x) >> S_MEMOFST1) & M_MEMOFST1)
+
#define A_PCIE_MA_CTRL 0x30b0
#define S_MA_TAGFREE 29
@@ -5098,6 +6114,11 @@
#define V_STATIC_SPARE3(x) ((x) << S_STATIC_SPARE3)
#define G_STATIC_SPARE3(x) (((x) >> S_STATIC_SPARE3) & M_STATIC_SPARE3)
+#define S_T7_STATIC_SPARE3 0
+#define M_T7_STATIC_SPARE3 0x7fffU
+#define V_T7_STATIC_SPARE3(x) ((x) << S_T7_STATIC_SPARE3)
+#define G_T7_STATIC_SPARE3(x) (((x) >> S_T7_STATIC_SPARE3) & M_T7_STATIC_SPARE3)
+
#define A_PCIE_DBG_INDIR_REQ 0x30ec
#define S_DBGENABLE 31
@@ -5173,6 +6194,17 @@
#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
#define A_PCIE_PF_INT_CFG 0x3140
+
+#define S_T7_VECNUM 12
+#define M_T7_VECNUM 0x7ffU
+#define V_T7_VECNUM(x) ((x) << S_T7_VECNUM)
+#define G_T7_VECNUM(x) (((x) >> S_T7_VECNUM) & M_T7_VECNUM)
+
+#define S_T7_VECBASE 0
+#define M_T7_VECBASE 0xfffU
+#define V_T7_VECBASE(x) ((x) << S_T7_VECBASE)
+#define G_T7_VECBASE(x) (((x) >> S_T7_VECBASE) & M_T7_VECBASE)
+
#define A_PCIE_PF_INT_CFG2 0x3144
#define A_PCIE_VF_INT_CFG 0x3180
#define A_PCIE_VF_INT_CFG2 0x3184
@@ -5198,6 +6230,20 @@
#define A_PCIE_VF_MSIX_EN_1 0x35c4
#define A_PCIE_VF_MSIX_EN_2 0x35c8
#define A_PCIE_VF_MSIX_EN_3 0x35cc
+#define A_PCIE_FID_PASID 0x35e0
+#define A_PCIE_FID_VFID_CTL 0x35e4
+
+#define S_T7_WRITE 0
+#define V_T7_WRITE(x) ((x) << S_T7_WRITE)
+#define F_T7_WRITE V_T7_WRITE(1U)
+
+#define A_T7_PCIE_FID_VFID_SEL 0x35e8
+
+#define S_T7_ADDR 2
+#define M_T7_ADDR 0x1fffU
+#define V_T7_ADDR(x) ((x) << S_T7_ADDR)
+#define G_T7_ADDR(x) (((x) >> S_T7_ADDR) & M_T7_ADDR)
+
#define A_PCIE_FID_VFID_SEL 0x35ec
#define S_FID_VFID_SEL_SELECT 0
@@ -5205,6 +6251,17 @@
#define V_FID_VFID_SEL_SELECT(x) ((x) << S_FID_VFID_SEL_SELECT)
#define G_FID_VFID_SEL_SELECT(x) (((x) >> S_FID_VFID_SEL_SELECT) & M_FID_VFID_SEL_SELECT)
+#define A_T7_PCIE_FID_VFID 0x35ec
+
+#define S_FID_VFID_NVMEGROUPEN 29
+#define V_FID_VFID_NVMEGROUPEN(x) ((x) << S_FID_VFID_NVMEGROUPEN)
+#define F_FID_VFID_NVMEGROUPEN V_FID_VFID_NVMEGROUPEN(1U)
+
+#define S_FID_VFID_GROUPSEL 25
+#define M_FID_VFID_GROUPSEL 0xfU
+#define V_FID_VFID_GROUPSEL(x) ((x) << S_FID_VFID_GROUPSEL)
+#define G_FID_VFID_GROUPSEL(x) (((x) >> S_FID_VFID_GROUPSEL) & M_FID_VFID_GROUPSEL)
+
#define A_PCIE_FID_VFID 0x3600
#define S_FID_VFID_SELECT 30
@@ -5264,6 +6321,227 @@
#define V_T6_FID_VFID_RVF(x) ((x) << S_T6_FID_VFID_RVF)
#define G_T6_FID_VFID_RVF(x) (((x) >> S_T6_FID_VFID_RVF) & M_T6_FID_VFID_RVF)
+#define A_PCIE_JBOF_NVME_HIGH_DW_START_ADDR 0x3600
+#define A_PCIE_JBOF_NVME_LOW_DW_START_ADDR 0x3604
+#define A_PCIE_JBOF_NVME_LENGTH 0x3608
+
+#define S_NVMEDISABLE 31
+#define V_NVMEDISABLE(x) ((x) << S_NVMEDISABLE)
+#define F_NVMEDISABLE V_NVMEDISABLE(1U)
+
+#define S_NVMELENGTH 0
+#define M_NVMELENGTH 0x3fffffffU
+#define V_NVMELENGTH(x) ((x) << S_NVMELENGTH)
+#define G_NVMELENGTH(x) (((x) >> S_NVMELENGTH) & M_NVMELENGTH)
+
+#define A_PCIE_JBOF_NVME_GROUP 0x360c
+
+#define S_NVMEGROUPSEL 0
+#define M_NVMEGROUPSEL 0xfU
+#define V_NVMEGROUPSEL(x) ((x) << S_NVMEGROUPSEL)
+#define G_NVMEGROUPSEL(x) (((x) >> S_NVMEGROUPSEL) & M_NVMEGROUPSEL)
+
+#define A_T7_PCIE_MEM_ACCESS_BASE_WIN 0x3700
+#define A_PCIE_MEM_ACCESS_BASE_WIN1 0x3704
+
+#define S_PCIEOFST1 0
+#define M_PCIEOFST1 0xffU
+#define V_PCIEOFST1(x) ((x) << S_PCIEOFST1)
+#define G_PCIEOFST1(x) (((x) >> S_PCIEOFST1) & M_PCIEOFST1)
+
+#define A_PCIE_MEM_ACCESS_OFFSET0 0x3708
+#define A_PCIE_MEM_ACCESS_OFFSET1 0x370c
+#define A_PCIE_PTM_EP_EXT_STROBE 0x3804
+
+#define S_PTM_AUTO_UPDATE 1
+#define V_PTM_AUTO_UPDATE(x) ((x) << S_PTM_AUTO_UPDATE)
+#define F_PTM_AUTO_UPDATE V_PTM_AUTO_UPDATE(1U)
+
+#define S_PTM_EXT_STROBE 0
+#define V_PTM_EXT_STROBE(x) ((x) << S_PTM_EXT_STROBE)
+#define F_PTM_EXT_STROBE V_PTM_EXT_STROBE(1U)
+
+#define A_PCIE_PTM_EP_EXT_TIME0 0x3808
+#define A_PCIE_PTM_EP_EXT_TIME1 0x380c
+#define A_PCIE_PTM_MAN_UPD_PULSE 0x3810
+
+#define S_PTM_MAN_UPD_PULSE 0
+#define V_PTM_MAN_UPD_PULSE(x) ((x) << S_PTM_MAN_UPD_PULSE)
+#define F_PTM_MAN_UPD_PULSE V_PTM_MAN_UPD_PULSE(1U)
+
+#define A_PCIE_SWAP_DATA_B2L_X16 0x3814
+#define A_PCIE_PCIE_RC_RST 0x3818
+
+#define S_PERST 0
+#define V_PERST(x) ((x) << S_PERST)
+#define F_PERST V_PERST(1U)
+
+#define A_PCIE_PCIE_LN_CLKSEL 0x3880
+
+#define S_DS8_SEL 30
+#define M_DS8_SEL 0x3U
+#define V_DS8_SEL(x) ((x) << S_DS8_SEL)
+#define G_DS8_SEL(x) (((x) >> S_DS8_SEL) & M_DS8_SEL)
+
+#define S_DS7_SEL 28
+#define M_DS7_SEL 0x3U
+#define V_DS7_SEL(x) ((x) << S_DS7_SEL)
+#define G_DS7_SEL(x) (((x) >> S_DS7_SEL) & M_DS7_SEL)
+
+#define S_DS6_SEL 26
+#define M_DS6_SEL 0x3U
+#define V_DS6_SEL(x) ((x) << S_DS6_SEL)
+#define G_DS6_SEL(x) (((x) >> S_DS6_SEL) & M_DS6_SEL)
+
+#define S_DS5_SEL 24
+#define M_DS5_SEL 0x3U
+#define V_DS5_SEL(x) ((x) << S_DS5_SEL)
+#define G_DS5_SEL(x) (((x) >> S_DS5_SEL) & M_DS5_SEL)
+
+#define S_DS4_SEL 22
+#define M_DS4_SEL 0x3U
+#define V_DS4_SEL(x) ((x) << S_DS4_SEL)
+#define G_DS4_SEL(x) (((x) >> S_DS4_SEL) & M_DS4_SEL)
+
+#define S_DS3_SEL 20
+#define M_DS3_SEL 0x3U
+#define V_DS3_SEL(x) ((x) << S_DS3_SEL)
+#define G_DS3_SEL(x) (((x) >> S_DS3_SEL) & M_DS3_SEL)
+
+#define S_DS2_SEL 18
+#define M_DS2_SEL 0x3U
+#define V_DS2_SEL(x) ((x) << S_DS2_SEL)
+#define G_DS2_SEL(x) (((x) >> S_DS2_SEL) & M_DS2_SEL)
+
+#define S_DS1_SEL 16
+#define M_DS1_SEL 0x3U
+#define V_DS1_SEL(x) ((x) << S_DS1_SEL)
+#define G_DS1_SEL(x) (((x) >> S_DS1_SEL) & M_DS1_SEL)
+
+#define S_LN14_SEL 14
+#define M_LN14_SEL 0x3U
+#define V_LN14_SEL(x) ((x) << S_LN14_SEL)
+#define G_LN14_SEL(x) (((x) >> S_LN14_SEL) & M_LN14_SEL)
+
+#define S_LN12_SEL 12
+#define M_LN12_SEL 0x3U
+#define V_LN12_SEL(x) ((x) << S_LN12_SEL)
+#define G_LN12_SEL(x) (((x) >> S_LN12_SEL) & M_LN12_SEL)
+
+#define S_LN10_SEL 10
+#define M_LN10_SEL 0x3U
+#define V_LN10_SEL(x) ((x) << S_LN10_SEL)
+#define G_LN10_SEL(x) (((x) >> S_LN10_SEL) & M_LN10_SEL)
+
+#define S_LN8_SEL 8
+#define M_LN8_SEL 0x3U
+#define V_LN8_SEL(x) ((x) << S_LN8_SEL)
+#define G_LN8_SEL(x) (((x) >> S_LN8_SEL) & M_LN8_SEL)
+
+#define S_LN6_SEL 6
+#define M_LN6_SEL 0x3U
+#define V_LN6_SEL(x) ((x) << S_LN6_SEL)
+#define G_LN6_SEL(x) (((x) >> S_LN6_SEL) & M_LN6_SEL)
+
+#define S_LN4_SEL 4
+#define M_LN4_SEL 0x3U
+#define V_LN4_SEL(x) ((x) << S_LN4_SEL)
+#define G_LN4_SEL(x) (((x) >> S_LN4_SEL) & M_LN4_SEL)
+
+#define S_LN2_SEL 2
+#define M_LN2_SEL 0x3U
+#define V_LN2_SEL(x) ((x) << S_LN2_SEL)
+#define G_LN2_SEL(x) (((x) >> S_LN2_SEL) & M_LN2_SEL)
+
+#define S_LN0_SEL 0
+#define M_LN0_SEL 0x3U
+#define V_LN0_SEL(x) ((x) << S_LN0_SEL)
+#define G_LN0_SEL(x) (((x) >> S_LN0_SEL) & M_LN0_SEL)
+
+#define A_PCIE_PCIE_MSIX_EN 0x3884
+
+#define S_MSIX_ENABLE 0
+#define M_MSIX_ENABLE 0xffU
+#define V_MSIX_ENABLE(x) ((x) << S_MSIX_ENABLE)
+#define G_MSIX_ENABLE(x) (((x) >> S_MSIX_ENABLE) & M_MSIX_ENABLE)
+
+#define A_PCIE_LFSR_WRCTRL 0x3888
+
+#define S_WR_LFSR_CMP_DATA 16
+#define M_WR_LFSR_CMP_DATA 0xffffU
+#define V_WR_LFSR_CMP_DATA(x) ((x) << S_WR_LFSR_CMP_DATA)
+#define G_WR_LFSR_CMP_DATA(x) (((x) >> S_WR_LFSR_CMP_DATA) & M_WR_LFSR_CMP_DATA)
+
+#define S_WR_LFSR_RSVD 2
+#define M_WR_LFSR_RSVD 0x3fffU
+#define V_WR_LFSR_RSVD(x) ((x) << S_WR_LFSR_RSVD)
+#define G_WR_LFSR_RSVD(x) (((x) >> S_WR_LFSR_RSVD) & M_WR_LFSR_RSVD)
+
+#define S_WR_LFSR_EN 1
+#define V_WR_LFSR_EN(x) ((x) << S_WR_LFSR_EN)
+#define F_WR_LFSR_EN V_WR_LFSR_EN(1U)
+
+#define S_WR_LFSR_START 0
+#define V_WR_LFSR_START(x) ((x) << S_WR_LFSR_START)
+#define F_WR_LFSR_START V_WR_LFSR_START(1U)
+
+#define A_PCIE_LFSR_RDCTRL 0x388c
+
+#define S_CMD_LFSR_CMP_DATA 24
+#define M_CMD_LFSR_CMP_DATA 0xffU
+#define V_CMD_LFSR_CMP_DATA(x) ((x) << S_CMD_LFSR_CMP_DATA)
+#define G_CMD_LFSR_CMP_DATA(x) (((x) >> S_CMD_LFSR_CMP_DATA) & M_CMD_LFSR_CMP_DATA)
+
+#define S_RD_LFSR_CMD_DATA 16
+#define M_RD_LFSR_CMD_DATA 0xffU
+#define V_RD_LFSR_CMD_DATA(x) ((x) << S_RD_LFSR_CMD_DATA)
+#define G_RD_LFSR_CMD_DATA(x) (((x) >> S_RD_LFSR_CMD_DATA) & M_RD_LFSR_CMD_DATA)
+
+#define S_RD_LFSR_RSVD 10
+#define M_RD_LFSR_RSVD 0x3fU
+#define V_RD_LFSR_RSVD(x) ((x) << S_RD_LFSR_RSVD)
+#define G_RD_LFSR_RSVD(x) (((x) >> S_RD_LFSR_RSVD) & M_RD_LFSR_RSVD)
+
+#define S_RD3_LFSR_EN 9
+#define V_RD3_LFSR_EN(x) ((x) << S_RD3_LFSR_EN)
+#define F_RD3_LFSR_EN V_RD3_LFSR_EN(1U)
+
+#define S_RD3_LFSR_START 8
+#define V_RD3_LFSR_START(x) ((x) << S_RD3_LFSR_START)
+#define F_RD3_LFSR_START V_RD3_LFSR_START(1U)
+
+#define S_RD2_LFSR_EN 7
+#define V_RD2_LFSR_EN(x) ((x) << S_RD2_LFSR_EN)
+#define F_RD2_LFSR_EN V_RD2_LFSR_EN(1U)
+
+#define S_RD2_LFSR_START 6
+#define V_RD2_LFSR_START(x) ((x) << S_RD2_LFSR_START)
+#define F_RD2_LFSR_START V_RD2_LFSR_START(1U)
+
+#define S_RD1_LFSR_EN 5
+#define V_RD1_LFSR_EN(x) ((x) << S_RD1_LFSR_EN)
+#define F_RD1_LFSR_EN V_RD1_LFSR_EN(1U)
+
+#define S_RD1_LFSR_START 4
+#define V_RD1_LFSR_START(x) ((x) << S_RD1_LFSR_START)
+#define F_RD1_LFSR_START V_RD1_LFSR_START(1U)
+
+#define S_RD0_LFSR_EN 3
+#define V_RD0_LFSR_EN(x) ((x) << S_RD0_LFSR_EN)
+#define F_RD0_LFSR_EN V_RD0_LFSR_EN(1U)
+
+#define S_RD0_LFSR_START 2
+#define V_RD0_LFSR_START(x) ((x) << S_RD0_LFSR_START)
+#define F_RD0_LFSR_START V_RD0_LFSR_START(1U)
+
+#define S_CMD_LFSR_EN 1
+#define V_CMD_LFSR_EN(x) ((x) << S_CMD_LFSR_EN)
+#define F_CMD_LFSR_EN V_CMD_LFSR_EN(1U)
+
+#define S_CMD_LFSR_START 0
+#define V_CMD_LFSR_START(x) ((x) << S_CMD_LFSR_START)
+#define F_CMD_LFSR_START V_CMD_LFSR_START(1U)
+
#define A_PCIE_FID 0x3900
#define S_PAD 11
@@ -5280,6 +6558,309 @@
#define V_FUNC(x) ((x) << S_FUNC)
#define G_FUNC(x) (((x) >> S_FUNC) & M_FUNC)
+#define A_PCIE_EMU_ADDR 0x3900
+
+#define S_EMU_ADDR 0
+#define M_EMU_ADDR 0x1ffU
+#define V_EMU_ADDR(x) ((x) << S_EMU_ADDR)
+#define G_EMU_ADDR(x) (((x) >> S_EMU_ADDR) & M_EMU_ADDR)
+
+#define A_PCIE_EMU_CFG 0x3904
+
+#define S_EMUENABLE 16
+#define V_EMUENABLE(x) ((x) << S_EMUENABLE)
+#define F_EMUENABLE V_EMUENABLE(1U)
+
+#define S_EMUTYPE 14
+#define M_EMUTYPE 0x3U
+#define V_EMUTYPE(x) ((x) << S_EMUTYPE)
+#define G_EMUTYPE(x) (((x) >> S_EMUTYPE) & M_EMUTYPE)
+
+#define S_BAR0TARGET 12
+#define M_BAR0TARGET 0x3U
+#define V_BAR0TARGET(x) ((x) << S_BAR0TARGET)
+#define G_BAR0TARGET(x) (((x) >> S_BAR0TARGET) & M_BAR0TARGET)
+
+#define S_BAR2TARGET 10
+#define M_BAR2TARGET 0x3U
+#define V_BAR2TARGET(x) ((x) << S_BAR2TARGET)
+#define G_BAR2TARGET(x) (((x) >> S_BAR2TARGET) & M_BAR2TARGET)
+
+#define S_BAR4TARGET 8
+#define M_BAR4TARGET 0x3U
+#define V_BAR4TARGET(x) ((x) << S_BAR4TARGET)
+#define G_BAR4TARGET(x) (((x) >> S_BAR4TARGET) & M_BAR4TARGET)
+
+#define S_RELEATIVEEMUID 0
+#define M_RELEATIVEEMUID 0xffU
+#define V_RELEATIVEEMUID(x) ((x) << S_RELEATIVEEMUID)
+#define G_RELEATIVEEMUID(x) (((x) >> S_RELEATIVEEMUID) & M_RELEATIVEEMUID)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET0_BAR0 0x3910
+
+#define S_T7_MEMOFST0 0
+#define M_T7_MEMOFST0 0xfffffffU
+#define V_T7_MEMOFST0(x) ((x) << S_T7_MEMOFST0)
+#define G_T7_MEMOFST0(x) (((x) >> S_T7_MEMOFST0) & M_T7_MEMOFST0)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG0_BAR0 0x3914
+
+#define S_SIZE0 0
+#define M_SIZE0 0x1fU
+#define V_SIZE0(x) ((x) << S_SIZE0)
+#define G_SIZE0(x) (((x) >> S_SIZE0) & M_SIZE0)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET1_BAR0 0x3918
+
+#define S_T7_MEMOFST1 0
+#define M_T7_MEMOFST1 0xfffffffU
+#define V_T7_MEMOFST1(x) ((x) << S_T7_MEMOFST1)
+#define G_T7_MEMOFST1(x) (((x) >> S_T7_MEMOFST1) & M_T7_MEMOFST1)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG1_BAR0 0x391c
+
+#define S_SIZE1 0
+#define M_SIZE1 0x1fU
+#define V_SIZE1(x) ((x) << S_SIZE1)
+#define G_SIZE1(x) (((x) >> S_SIZE1) & M_SIZE1)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET2_BAR0 0x3920
+
+#define S_MEMOFST2 0
+#define M_MEMOFST2 0xfffffffU
+#define V_MEMOFST2(x) ((x) << S_MEMOFST2)
+#define G_MEMOFST2(x) (((x) >> S_MEMOFST2) & M_MEMOFST2)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG2_BAR0 0x3924
+
+#define S_SIZE2 0
+#define M_SIZE2 0x1fU
+#define V_SIZE2(x) ((x) << S_SIZE2)
+#define G_SIZE2(x) (((x) >> S_SIZE2) & M_SIZE2)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET3_BAR0 0x3928
+
+#define S_MEMOFST3 0
+#define M_MEMOFST3 0xfffffffU
+#define V_MEMOFST3(x) ((x) << S_MEMOFST3)
+#define G_MEMOFST3(x) (((x) >> S_MEMOFST3) & M_MEMOFST3)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG3_BAR0 0x392c
+
+#define S_SIZE3 0
+#define M_SIZE3 0x1fU
+#define V_SIZE3(x) ((x) << S_SIZE3)
+#define G_SIZE3(x) (((x) >> S_SIZE3) & M_SIZE3)
+
+#define A_PCIE_TCAM_DATA 0x3970
+#define A_PCIE_TCAM_CTL 0x3974
+
+#define S_TCAMADDR 8
+#define M_TCAMADDR 0x3ffU
+#define V_TCAMADDR(x) ((x) << S_TCAMADDR)
+#define G_TCAMADDR(x) (((x) >> S_TCAMADDR) & M_TCAMADDR)
+
+#define S_CAMEN 0
+#define V_CAMEN(x) ((x) << S_CAMEN)
+#define F_CAMEN V_CAMEN(1U)
+
+#define A_PCIE_TCAM_DBG 0x3978
+
+#define S_CBPASS 24
+#define V_CBPASS(x) ((x) << S_CBPASS)
+#define F_CBPASS V_CBPASS(1U)
+
+#define S_CBBUSY 20
+#define V_CBBUSY(x) ((x) << S_CBBUSY)
+#define F_CBBUSY V_CBBUSY(1U)
+
+#define S_CBSTART 17
+#define V_CBSTART(x) ((x) << S_CBSTART)
+#define F_CBSTART V_CBSTART(1U)
+
+#define S_RSTCB 16
+#define V_RSTCB(x) ((x) << S_RSTCB)
+#define F_RSTCB V_RSTCB(1U)
+
+#define S_TCAM_DBG_DATA 0
+#define M_TCAM_DBG_DATA 0xffffU
+#define V_TCAM_DBG_DATA(x) ((x) << S_TCAM_DBG_DATA)
+#define G_TCAM_DBG_DATA(x) (((x) >> S_TCAM_DBG_DATA) & M_TCAM_DBG_DATA)
+
+#define A_PCIE_TEST_CTRL0 0x3980
+#define A_PCIE_TEST_CTRL1 0x3984
+#define A_PCIE_TEST_CTRL2 0x3988
+#define A_PCIE_TEST_CTRL3 0x398c
+#define A_PCIE_TEST_STS0 0x3990
+#define A_PCIE_TEST_STS1 0x3994
+#define A_PCIE_TEST_STS2 0x3998
+#define A_PCIE_TEST_STS3 0x399c
+#define A_PCIE_X8_CORE_ACK_LATENCY_TIMER_REPLAY_TIMER 0x4700
+#define A_PCIE_X8_CORE_VENDOR_SPECIFIC_DLLP 0x4704
+#define A_PCIE_X8_CORE_PORT_FORCE_LINK 0x4708
+#define A_PCIE_X8_CORE_ACK_FREQUENCY_L0L1_ASPM_CONTROL 0x470c
+#define A_PCIE_X8_CORE_PORT_LINK_CONTROL 0x4710
+#define A_PCIE_X8_CORE_LANE_SKEW 0x4714
+#define A_PCIE_X8_CORE_SYMBOL_NUMBER 0x4718
+#define A_PCIE_X8_CORE_SYMBOL_TIMER_FILTER_MASK1 0x471c
+#define A_PCIE_X8_CORE_FILTER_MASK2 0x4720
+#define A_PCIE_X8_CORE_DEBUG_0 0x4728
+#define A_PCIE_X8_CORE_DEBUG_1 0x472c
+#define A_PCIE_X8_CORE_TRANSMIT_POSTED_FC_CREDIT_STATUS 0x4730
+#define A_PCIE_X8_CORE_TRANSMIT_NONPOSTED_FC_CREDIT_STATUS 0x4734
+#define A_PCIE_X8_CORE_TRANSMIT_COMPLETION_FC_CREDIT_STATUS 0x4738
+#define A_PCIE_X8_CORE_QUEUE_STATUS 0x473c
+#define A_PCIE_X8_CORE_VC_TRANSMIT_ARBITRATION_1 0x4740
+#define A_PCIE_X8_CORE_VC_TRANSMIT_ARBITRATION_2 0x4744
+#define A_PCIE_X8_CORE_VC0_POSTED_RECEIVE_QUEUE_CONTROL 0x4748
+#define A_PCIE_X8_CORE_VC0_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x474c
+#define A_PCIE_X8_CORE_VC0_COMPLETION_RECEIVE_QUEUE_CONTROL 0x4750
+#define A_PCIE_X8_CORE_VC1_POSTED_RECEIVE_QUEUE_CONTROL 0x4754
+#define A_PCIE_X8_CORE_VC1_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x4758
+#define A_PCIE_X8_CORE_VC1_COMPLETION_RECEIVE_QUEUE_CONTROL 0x475c
+#define A_PCIE_X8_CORE_LINK_WIDTH_SPEED_CHANGE 0x480c
+#define A_PCIE_X8_CORE_PHY_STATUS 0x4810
+#define A_PCIE_X8_CORE_PHY_CONTROL 0x4814
+#define A_PCIE_X8_CORE_GEN3_CONTROL 0x4890
+#define A_PCIE_X8_CORE_GEN3_EQ_FS_LF 0x4894
+#define A_PCIE_X8_CORE_GEN3_EQ_PRESET_COEFF 0x4898
+#define A_PCIE_X8_CORE_GEN3_EQ_PRESET_INDEX 0x489c
+#define A_PCIE_X8_CORE_GEN3_EQ_STATUS 0x48a4
+#define A_PCIE_X8_CORE_GEN3_EQ_CONTROL 0x48a8
+#define A_PCIE_X8_CORE_GEN3_EQ_DIRCHANGE_FEEDBACK 0x48ac
+#define A_PCIE_X8_CORE_PIPE_CONTROL 0x48b8
+#define A_PCIE_X8_CORE_DBI_RO_WE 0x48bc
+#define A_PCIE_X8_CFG_SPACE_REQ 0x48c0
+#define A_PCIE_X8_CFG_SPACE_DATA 0x48c4
+#define A_PCIE_X8_CFG_MPS_MRS 0x4900
+
+#define S_MRS 3
+#define M_MRS 0x7U
+#define V_MRS(x) ((x) << S_MRS)
+#define G_MRS(x) (((x) >> S_MRS) & M_MRS)
+
+#define S_T7_MPS 0
+#define M_T7_MPS 0x7U
+#define V_T7_MPS(x) ((x) << S_T7_MPS)
+#define G_T7_MPS(x) (((x) >> S_T7_MPS) & M_T7_MPS)
+
+#define A_PCIE_X8_CFG_ATTRIBUTES 0x4904
+
+#define S_T7_DCAEN 2
+#define V_T7_DCAEN(x) ((x) << S_T7_DCAEN)
+#define F_T7_DCAEN V_T7_DCAEN(1U)
+
+#define S_DCASTFITTRAONLEN 1
+#define V_DCASTFITTRAONLEN(x) ((x) << S_DCASTFITTRAONLEN)
+#define F_DCASTFITTRAONLEN V_DCASTFITTRAONLEN(1U)
+
+#define S_REQCTLDYNSTCLKEN 0
+#define V_REQCTLDYNSTCLKEN(x) ((x) << S_REQCTLDYNSTCLKEN)
+#define F_REQCTLDYNSTCLKEN V_REQCTLDYNSTCLKEN(1U)
+
+#define A_PCIE_X8_CFG_LTSSM 0x4908
+
+#define S_APP_LTSSM_ENABLE 0
+#define V_APP_LTSSM_ENABLE(x) ((x) << S_APP_LTSSM_ENABLE)
+#define F_APP_LTSSM_ENABLE V_APP_LTSSM_ENABLE(1U)
+
+#define A_PCIE_ARM_REQUESTER_ID_X8 0x490c
+
+#define S_A1_RSVD1 24
+#define M_A1_RSVD1 0xffU
+#define V_A1_RSVD1(x) ((x) << S_A1_RSVD1)
+#define G_A1_RSVD1(x) (((x) >> S_A1_RSVD1) & M_A1_RSVD1)
+
+#define S_A1_PRIMBUSNUMBER 16
+#define M_A1_PRIMBUSNUMBER 0xffU
+#define V_A1_PRIMBUSNUMBER(x) ((x) << S_A1_PRIMBUSNUMBER)
+#define G_A1_PRIMBUSNUMBER(x) (((x) >> S_A1_PRIMBUSNUMBER) & M_A1_PRIMBUSNUMBER)
+
+#define S_A1_REQUESTERID 0
+#define M_A1_REQUESTERID 0xffffU
+#define V_A1_REQUESTERID(x) ((x) << S_A1_REQUESTERID)
+#define G_A1_REQUESTERID(x) (((x) >> S_A1_REQUESTERID) & M_A1_REQUESTERID)
+
+#define A_PCIE_SWAP_DATA_B2L_X8 0x4910
+
+#define S_CFGRD_SWAP_EN 1
+#define V_CFGRD_SWAP_EN(x) ((x) << S_CFGRD_SWAP_EN)
+#define F_CFGRD_SWAP_EN V_CFGRD_SWAP_EN(1U)
+
+#define S_CFGWR_SWAP_EN 0
+#define V_CFGWR_SWAP_EN(x) ((x) << S_CFGWR_SWAP_EN)
+#define F_CFGWR_SWAP_EN V_CFGWR_SWAP_EN(1U)
+
+#define A_PCIE_PDEBUG_DATA0_X8 0x4914
+#define A_PCIE_PDEBUG_DATA1_X8 0x4918
+#define A_PCIE_PDEBUG_DATA2_X8 0x491c
+#define A_PCIE_PDEBUG_CTRL_X8 0x4920
+#define A_PCIE_PDEBUG_DATA_X8 0x4924
+#define A_PCIE_SPARE_REGISTER_SPACES_X8 0x4ffc
+#define A_PCIE_PIPE_LANE0_REG0 0x5500
+#define A_PCIE_PIPE_LANE0_REG1 0x5504
+#define A_PCIE_PIPE_LANE0_REG2 0x5508
+#define A_PCIE_PIPE_LANE0_REG3 0x550c
+#define A_PCIE_PIPE_LANE1_REG0 0x5510
+#define A_PCIE_PIPE_LANE1_REG1 0x5514
+#define A_PCIE_PIPE_LANE1_REG2 0x5518
+#define A_PCIE_PIPE_LANE1_REG3 0x551c
+#define A_PCIE_PIPE_LANE2_REG0 0x5520
+#define A_PCIE_PIPE_LANE2_REG1 0x5524
+#define A_PCIE_PIPE_LANE2_REG2 0x5528
+#define A_PCIE_PIPE_LANE2_REG3 0x552c
+#define A_PCIE_PIPE_LANE3_REG0 0x5530
+#define A_PCIE_PIPE_LANE3_REG1 0x5534
+#define A_PCIE_PIPE_LANE3_REG2 0x5538
+#define A_PCIE_PIPE_LANE3_REG3 0x553c
+#define A_PCIE_PIPE_LANE4_REG0 0x5540
+#define A_PCIE_PIPE_LANE4_REG1 0x5544
+#define A_PCIE_PIPE_LANE4_REG2 0x5548
+#define A_PCIE_PIPE_LANE4_REG3 0x554c
+#define A_PCIE_PIPE_LANE5_REG0 0x5550
+#define A_PCIE_PIPE_LANE5_REG1 0x5554
+#define A_PCIE_PIPE_LANE5_REG2 0x5558
+#define A_PCIE_PIPE_LANE5_REG3 0x555c
+#define A_PCIE_PIPE_LANE6_REG0 0x5560
+#define A_PCIE_PIPE_LANE6_REG1 0x5564
+#define A_PCIE_PIPE_LANE6_REG2 0x5568
+#define A_PCIE_PIPE_LANE6_REG3 0x556c
+#define A_PCIE_PIPE_LANE7_REG0 0x5570
+#define A_PCIE_PIPE_LANE7_REG1 0x5574
+#define A_PCIE_PIPE_LANE7_REG2 0x5578
+#define A_PCIE_PIPE_LANE7_REG3 0x557c
+#define A_PCIE_PIPE_LANE8_REG0 0x5580
+#define A_PCIE_PIPE_LANE8_REG1 0x5584
+#define A_PCIE_PIPE_LANE8_REG2 0x5588
+#define A_PCIE_PIPE_LANE8_REG3 0x558c
+#define A_PCIE_PIPE_LANE9_REG0 0x5590
+#define A_PCIE_PIPE_LANE9_REG1 0x5594
+#define A_PCIE_PIPE_LANE9_REG2 0x5598
+#define A_PCIE_PIPE_LANE9_REG3 0x559c
+#define A_PCIE_PIPE_LANE10_REG0 0x55a0
+#define A_PCIE_PIPE_LANE10_REG1 0x55a4
+#define A_PCIE_PIPE_LANE10_REG2 0x55a8
+#define A_PCIE_PIPE_LANE10_REG3 0x55ac
+#define A_PCIE_PIPE_LANE11_REG0 0x55b0
+#define A_PCIE_PIPE_LANE11_REG1 0x55b4
+#define A_PCIE_PIPE_LANE11_REG2 0x55b8
+#define A_PCIE_PIPE_LANE11_REG3 0x55bc
+#define A_PCIE_PIPE_LANE12_REG0 0x55c0
+#define A_PCIE_PIPE_LANE12_REG1 0x55c4
+#define A_PCIE_PIPE_LANE12_REG2 0x55c8
+#define A_PCIE_PIPE_LANE12_REG3 0x55cc
+#define A_PCIE_PIPE_LANE13_REG0 0x55d0
+#define A_PCIE_PIPE_LANE13_REG1 0x55d4
+#define A_PCIE_PIPE_LANE13_REG2 0x55d8
+#define A_PCIE_PIPE_LANE13_REG3 0x55dc
+#define A_PCIE_PIPE_LANE14_REG0 0x55e0
+#define A_PCIE_PIPE_LANE14_REG1 0x55e4
+#define A_PCIE_PIPE_LANE14_REG2 0x55e8
+#define A_PCIE_PIPE_LANE14_REG3 0x55ec
+#define A_PCIE_PIPE_LANE15_REG0 0x55f0
+#define A_PCIE_PIPE_LANE15_REG1 0x55f4
+#define A_PCIE_PIPE_LANE15_REG2 0x55f8
+#define A_PCIE_PIPE_LANE15_REG3 0x55fc
#define A_PCIE_COOKIE_STAT 0x5600
#define S_COOKIEB 16
@@ -5346,6 +6927,30 @@
#define V_T6_RCVDPIOREQCOOKIE(x) ((x) << S_T6_RCVDPIOREQCOOKIE)
#define G_T6_RCVDPIOREQCOOKIE(x) (((x) >> S_T6_RCVDPIOREQCOOKIE) & M_T6_RCVDPIOREQCOOKIE)
+#define A_T7_PCIE_VC0_CDTS0 0x56c4
+
+#define S_T7_CPLD0 16
+#define M_T7_CPLD0 0xffffU
+#define V_T7_CPLD0(x) ((x) << S_T7_CPLD0)
+#define G_T7_CPLD0(x) (((x) >> S_T7_CPLD0) & M_T7_CPLD0)
+
+#define S_T7_CPLH0 0
+#define M_T7_CPLH0 0xfffU
+#define V_T7_CPLH0(x) ((x) << S_T7_CPLH0)
+#define G_T7_CPLH0(x) (((x) >> S_T7_CPLH0) & M_T7_CPLH0)
+
+#define A_T7_PCIE_VC0_CDTS1 0x56c8
+
+#define S_T7_PD0 16
+#define M_T7_PD0 0xffffU
+#define V_T7_PD0(x) ((x) << S_T7_PD0)
+#define G_T7_PD0(x) (((x) >> S_T7_PD0) & M_T7_PD0)
+
+#define S_T7_PH0 0
+#define M_T7_PH0 0xfffU
+#define V_T7_PH0(x) ((x) << S_T7_PH0)
+#define G_T7_PH0(x) (((x) >> S_T7_PH0) & M_T7_PH0)
+
#define A_PCIE_VC0_CDTS0 0x56cc
#define S_CPLD0 20
@@ -5363,6 +6968,18 @@
#define V_PD0(x) ((x) << S_PD0)
#define G_PD0(x) (((x) >> S_PD0) & M_PD0)
+#define A_PCIE_VC0_CDTS2 0x56cc
+
+#define S_T7_NPD0 16
+#define M_T7_NPD0 0xffffU
+#define V_T7_NPD0(x) ((x) << S_T7_NPD0)
+#define G_T7_NPD0(x) (((x) >> S_T7_NPD0) & M_T7_NPD0)
+
+#define S_T7_NPH0 0
+#define M_T7_NPH0 0xfffU
+#define V_T7_NPH0(x) ((x) << S_T7_NPH0)
+#define G_T7_NPH0(x) (((x) >> S_T7_NPH0) & M_T7_NPH0)
+
#define A_PCIE_VC0_CDTS1 0x56d0
#define S_CPLH0 20
@@ -5380,6 +6997,7 @@
#define V_NPD0(x) ((x) << S_NPD0)
#define G_NPD0(x) (((x) >> S_NPD0) & M_NPD0)
+#define A_T7_PCIE_VC1_CDTS0 0x56d0
#define A_PCIE_VC1_CDTS0 0x56d4
#define S_CPLD1 20
@@ -5397,6 +7015,7 @@
#define V_PD1(x) ((x) << S_PD1)
#define G_PD1(x) (((x) >> S_PD1) & M_PD1)
+#define A_T7_PCIE_VC1_CDTS1 0x56d4
#define A_PCIE_VC1_CDTS1 0x56d8
#define S_CPLH1 20
@@ -5414,6 +7033,7 @@
#define V_NPD1(x) ((x) << S_NPD1)
#define G_NPD1(x) (((x) >> S_NPD1) & M_NPD1)
+#define A_PCIE_VC1_CDTS2 0x56d8
#define A_PCIE_FLR_PF_STATUS 0x56dc
#define A_PCIE_FLR_VF0_STATUS 0x56e0
#define A_PCIE_FLR_VF1_STATUS 0x56e4
@@ -5916,6 +7536,11 @@
#define V_DISABLE_SCRAMBLER(x) ((x) << S_DISABLE_SCRAMBLER)
#define F_DISABLE_SCRAMBLER V_DISABLE_SCRAMBLER(1U)
+#define S_RATE_SHADOW_SEL 24
+#define M_RATE_SHADOW_SEL 0x3U
+#define V_RATE_SHADOW_SEL(x) ((x) << S_RATE_SHADOW_SEL)
+#define G_RATE_SHADOW_SEL(x) (((x) >> S_RATE_SHADOW_SEL) & M_RATE_SHADOW_SEL)
+
#define A_PCIE_CORE_GEN3_EQ_FS_LF 0x5894
#define S_FULL_SWING 6
@@ -6347,6 +7972,35 @@
#define V_RDSOPCNT(x) ((x) << S_RDSOPCNT)
#define G_RDSOPCNT(x) (((x) >> S_RDSOPCNT) & M_RDSOPCNT)
+#define S_DMA_COOKIECNT 24
+#define M_DMA_COOKIECNT 0xfU
+#define V_DMA_COOKIECNT(x) ((x) << S_DMA_COOKIECNT)
+#define G_DMA_COOKIECNT(x) (((x) >> S_DMA_COOKIECNT) & M_DMA_COOKIECNT)
+
+#define S_DMA_RDSEQNUMUPDCNT 20
+#define M_DMA_RDSEQNUMUPDCNT 0xfU
+#define V_DMA_RDSEQNUMUPDCNT(x) ((x) << S_DMA_RDSEQNUMUPDCNT)
+#define G_DMA_RDSEQNUMUPDCNT(x) (((x) >> S_DMA_RDSEQNUMUPDCNT) & M_DMA_RDSEQNUMUPDCNT)
+
+#define S_DMA_SIREQCNT 16
+#define M_DMA_SIREQCNT 0xfU
+#define V_DMA_SIREQCNT(x) ((x) << S_DMA_SIREQCNT)
+#define G_DMA_SIREQCNT(x) (((x) >> S_DMA_SIREQCNT) & M_DMA_SIREQCNT)
+
+#define S_DMA_WREOPMATCHSOP 12
+#define V_DMA_WREOPMATCHSOP(x) ((x) << S_DMA_WREOPMATCHSOP)
+#define F_DMA_WREOPMATCHSOP V_DMA_WREOPMATCHSOP(1U)
+
+#define S_DMA_WRSOPCNT 8
+#define M_DMA_WRSOPCNT 0xfU
+#define V_DMA_WRSOPCNT(x) ((x) << S_DMA_WRSOPCNT)
+#define G_DMA_WRSOPCNT(x) (((x) >> S_DMA_WRSOPCNT) & M_DMA_WRSOPCNT)
+
+#define S_DMA_RDSOPCNT 0
+#define M_DMA_RDSOPCNT 0xffU
+#define V_DMA_RDSOPCNT(x) ((x) << S_DMA_RDSOPCNT)
+#define G_DMA_RDSOPCNT(x) (((x) >> S_DMA_RDSOPCNT) & M_DMA_RDSOPCNT)
+
#define A_PCIE_T5_DMA_STAT3 0x594c
#define S_ATMREQSOPCNT 24
@@ -6372,6 +8026,29 @@
#define V_RSPSOPCNT(x) ((x) << S_RSPSOPCNT)
#define G_RSPSOPCNT(x) (((x) >> S_RSPSOPCNT) & M_RSPSOPCNT)
+#define S_DMA_ATMREQSOPCNT 24
+#define M_DMA_ATMREQSOPCNT 0xffU
+#define V_DMA_ATMREQSOPCNT(x) ((x) << S_DMA_ATMREQSOPCNT)
+#define G_DMA_ATMREQSOPCNT(x) (((x) >> S_DMA_ATMREQSOPCNT) & M_DMA_ATMREQSOPCNT)
+
+#define S_DMA_ATMEOPMATCHSOP 17
+#define V_DMA_ATMEOPMATCHSOP(x) ((x) << S_DMA_ATMEOPMATCHSOP)
+#define F_DMA_ATMEOPMATCHSOP V_DMA_ATMEOPMATCHSOP(1U)
+
+#define S_DMA_RSPEOPMATCHSOP 16
+#define V_DMA_RSPEOPMATCHSOP(x) ((x) << S_DMA_RSPEOPMATCHSOP)
+#define F_DMA_RSPEOPMATCHSOP V_DMA_RSPEOPMATCHSOP(1U)
+
+#define S_DMA_RSPERRCNT 8
+#define M_DMA_RSPERRCNT 0xffU
+#define V_DMA_RSPERRCNT(x) ((x) << S_DMA_RSPERRCNT)
+#define G_DMA_RSPERRCNT(x) (((x) >> S_DMA_RSPERRCNT) & M_DMA_RSPERRCNT)
+
+#define S_DMA_RSPSOPCNT 0
+#define M_DMA_RSPSOPCNT 0xffU
+#define V_DMA_RSPSOPCNT(x) ((x) << S_DMA_RSPSOPCNT)
+#define G_DMA_RSPSOPCNT(x) (((x) >> S_DMA_RSPSOPCNT) & M_DMA_RSPSOPCNT)
+
#define A_PCIE_CORE_OUTBOUND_POSTED_HEADER_BUFFER_ALLOCATION 0x5960
#define S_OP0H 24
@@ -6507,11 +8184,6 @@
#define V_T6_USECMDPOOL(x) ((x) << S_T6_USECMDPOOL)
#define F_T6_USECMDPOOL V_T6_USECMDPOOL(1U)
-#define S_T6_MINTAG 0
-#define M_T6_MINTAG 0xffU
-#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
-#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
-
#define A_PCIE_T5_CMD_STAT 0x5984
#define S_T5_STAT_RSPCNT 20
@@ -6558,6 +8230,21 @@
#define A_PCIE_T5_CMD_STAT2 0x5988
#define A_PCIE_T5_CMD_STAT3 0x598c
+
+#define S_CMD_RSPEOPMATCHSOP 16
+#define V_CMD_RSPEOPMATCHSOP(x) ((x) << S_CMD_RSPEOPMATCHSOP)
+#define F_CMD_RSPEOPMATCHSOP V_CMD_RSPEOPMATCHSOP(1U)
+
+#define S_CMD_RSPERRCNT 8
+#define M_CMD_RSPERRCNT 0xffU
+#define V_CMD_RSPERRCNT(x) ((x) << S_CMD_RSPERRCNT)
+#define G_CMD_RSPERRCNT(x) (((x) >> S_CMD_RSPERRCNT) & M_CMD_RSPERRCNT)
+
+#define S_CMD_RSPSOPCNT 0
+#define M_CMD_RSPSOPCNT 0xffU
+#define V_CMD_RSPSOPCNT(x) ((x) << S_CMD_RSPSOPCNT)
+#define G_CMD_RSPSOPCNT(x) (((x) >> S_CMD_RSPSOPCNT) & M_CMD_RSPSOPCNT)
+
#define A_PCIE_CORE_PCI_EXPRESS_TAGS_ALLOCATION 0x5990
#define S_OC0T 24
@@ -6868,14 +8555,14 @@
#define V_T6_T5_HMA_MAXRSPCNT(x) ((x) << S_T6_T5_HMA_MAXRSPCNT)
#define G_T6_T5_HMA_MAXRSPCNT(x) (((x) >> S_T6_T5_HMA_MAXRSPCNT) & M_T6_T5_HMA_MAXRSPCNT)
-#define S_T6_SEQCHKDIS 8
-#define V_T6_SEQCHKDIS(x) ((x) << S_T6_SEQCHKDIS)
-#define F_T6_SEQCHKDIS V_T6_SEQCHKDIS(1U)
+#define S_T5_HMA_SEQCHKDIS 8
+#define V_T5_HMA_SEQCHKDIS(x) ((x) << S_T5_HMA_SEQCHKDIS)
+#define F_T5_HMA_SEQCHKDIS V_T5_HMA_SEQCHKDIS(1U)
-#define S_T6_MINTAG 0
-#define M_T6_MINTAG 0xffU
-#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
-#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
+#define S_T5_MINTAG 0
+#define M_T5_MINTAG 0xffU
+#define V_T5_MINTAG(x) ((x) << S_T5_MINTAG)
+#define G_T5_MINTAG(x) (((x) >> S_T5_MINTAG) & M_T5_MINTAG)
#define A_PCIE_CORE_ROOT_COMPLEX_ERROR_SEVERITY 0x59b4
@@ -6992,6 +8679,31 @@
#define F_CRSI V_CRSI(1U)
#define A_PCIE_T5_HMA_STAT2 0x59b8
+
+#define S_HMA_COOKIECNT 24
+#define M_HMA_COOKIECNT 0xfU
+#define V_HMA_COOKIECNT(x) ((x) << S_HMA_COOKIECNT)
+#define G_HMA_COOKIECNT(x) (((x) >> S_HMA_COOKIECNT) & M_HMA_COOKIECNT)
+
+#define S_HMA_RDSEQNUMUPDCNT 20
+#define M_HMA_RDSEQNUMUPDCNT 0xfU
+#define V_HMA_RDSEQNUMUPDCNT(x) ((x) << S_HMA_RDSEQNUMUPDCNT)
+#define G_HMA_RDSEQNUMUPDCNT(x) (((x) >> S_HMA_RDSEQNUMUPDCNT) & M_HMA_RDSEQNUMUPDCNT)
+
+#define S_HMA_WREOPMATCHSOP 12
+#define V_HMA_WREOPMATCHSOP(x) ((x) << S_HMA_WREOPMATCHSOP)
+#define F_HMA_WREOPMATCHSOP V_HMA_WREOPMATCHSOP(1U)
+
+#define S_HMA_WRSOPCNT 8
+#define M_HMA_WRSOPCNT 0xfU
+#define V_HMA_WRSOPCNT(x) ((x) << S_HMA_WRSOPCNT)
+#define G_HMA_WRSOPCNT(x) (((x) >> S_HMA_WRSOPCNT) & M_HMA_WRSOPCNT)
+
+#define S_HMA_RDSOPCNT 0
+#define M_HMA_RDSOPCNT 0xffU
+#define V_HMA_RDSOPCNT(x) ((x) << S_HMA_RDSOPCNT)
+#define G_HMA_RDSOPCNT(x) (((x) >> S_HMA_RDSOPCNT) & M_HMA_RDSOPCNT)
+
#define A_PCIE_CORE_ENDPOINT_STATUS 0x59bc
#define S_PTOM 31
@@ -7035,6 +8747,21 @@
#define F_PMC7 V_PMC7(1U)
#define A_PCIE_T5_HMA_STAT3 0x59bc
+
+#define S_HMA_RSPEOPMATCHSOP 16
+#define V_HMA_RSPEOPMATCHSOP(x) ((x) << S_HMA_RSPEOPMATCHSOP)
+#define F_HMA_RSPEOPMATCHSOP V_HMA_RSPEOPMATCHSOP(1U)
+
+#define S_HMA_RSPERRCNT 8
+#define M_HMA_RSPERRCNT 0xffU
+#define V_HMA_RSPERRCNT(x) ((x) << S_HMA_RSPERRCNT)
+#define G_HMA_RSPERRCNT(x) (((x) >> S_HMA_RSPERRCNT) & M_HMA_RSPERRCNT)
+
+#define S_HMA_RSPSOPCNT 0
+#define M_HMA_RSPSOPCNT 0xffU
+#define V_HMA_RSPSOPCNT(x) ((x) << S_HMA_RSPSOPCNT)
+#define G_HMA_RSPSOPCNT(x) (((x) >> S_HMA_RSPSOPCNT) & M_HMA_RSPSOPCNT)
+
#define A_PCIE_CORE_ENDPOINT_ERROR_SEVERITY 0x59c0
#define S_PTOS 31
@@ -7187,6 +8914,14 @@
#define V_STI_SLEEPREQ(x) ((x) << S_STI_SLEEPREQ)
#define F_STI_SLEEPREQ V_STI_SLEEPREQ(1U)
+#define S_ARM_STATIC_CGEN 28
+#define V_ARM_STATIC_CGEN(x) ((x) << S_ARM_STATIC_CGEN)
+#define F_ARM_STATIC_CGEN V_ARM_STATIC_CGEN(1U)
+
+#define S_ARM_DYNAMIC_CGEN 27
+#define V_ARM_DYNAMIC_CGEN(x) ((x) << S_ARM_DYNAMIC_CGEN)
+#define F_ARM_DYNAMIC_CGEN V_ARM_DYNAMIC_CGEN(1U)
+
#define A_PCIE_CORE_ENDPOINT_INTERRUPT_ENABLE 0x59c4
#define S_PTOI 31
@@ -7521,6 +9256,14 @@
#define V_PIOCPL_VDMTXDATAPERR(x) ((x) << S_PIOCPL_VDMTXDATAPERR)
#define F_PIOCPL_VDMTXDATAPERR V_PIOCPL_VDMTXDATAPERR(1U)
+#define S_TGT1_MEM_PERR 28
+#define V_TGT1_MEM_PERR(x) ((x) << S_TGT1_MEM_PERR)
+#define F_TGT1_MEM_PERR V_TGT1_MEM_PERR(1U)
+
+#define S_TGT2_MEM_PERR 27
+#define V_TGT2_MEM_PERR(x) ((x) << S_TGT2_MEM_PERR)
+#define F_TGT2_MEM_PERR V_TGT2_MEM_PERR(1U)
+
#define A_PCIE_CORE_GENERAL_PURPOSE_CONTROL_2 0x59d4
#define A_PCIE_RSP_ERR_INT_LOG_EN 0x59d4
@@ -7622,6 +9365,16 @@
#define V_T6_REQVFID(x) ((x) << S_T6_REQVFID)
#define G_T6_REQVFID(x) (((x) >> S_T6_REQVFID) & M_T6_REQVFID)
+#define S_LOGADDR10B 9
+#define M_LOGADDR10B 0x3ffU
+#define V_LOGADDR10B(x) ((x) << S_LOGADDR10B)
+#define G_LOGADDR10B(x) (((x) >> S_LOGADDR10B) & M_LOGADDR10B)
+
+#define S_LOGREQVFID 0
+#define M_LOGREQVFID 0x1ffU
+#define V_LOGREQVFID(x) ((x) << S_LOGREQVFID)
+#define G_LOGREQVFID(x) (((x) >> S_LOGREQVFID) & M_LOGREQVFID)
+
#define A_PCIE_CHANGESET 0x59fc
#define A_PCIE_REVISION 0x5a00
#define A_PCIE_PDEBUG_INDEX 0x5a04
@@ -7646,6 +9399,16 @@
#define V_T6_PDEBUGSELL(x) ((x) << S_T6_PDEBUGSELL)
#define G_T6_PDEBUGSELL(x) (((x) >> S_T6_PDEBUGSELL) & M_T6_PDEBUGSELL)
+#define S_T7_1_PDEBUGSELH 16
+#define M_T7_1_PDEBUGSELH 0xffU
+#define V_T7_1_PDEBUGSELH(x) ((x) << S_T7_1_PDEBUGSELH)
+#define G_T7_1_PDEBUGSELH(x) (((x) >> S_T7_1_PDEBUGSELH) & M_T7_1_PDEBUGSELH)
+
+#define S_T7_1_PDEBUGSELL 0
+#define M_T7_1_PDEBUGSELL 0xffU
+#define V_T7_1_PDEBUGSELL(x) ((x) << S_T7_1_PDEBUGSELL)
+#define G_T7_1_PDEBUGSELL(x) (((x) >> S_T7_1_PDEBUGSELL) & M_T7_1_PDEBUGSELL)
+
#define A_PCIE_PDEBUG_DATA_HIGH 0x5a08
#define A_PCIE_PDEBUG_DATA_LOW 0x5a0c
#define A_PCIE_CDEBUG_INDEX 0x5a10
@@ -8468,6 +10231,21 @@
#define A_PCIE_PHY_INDIR_DATA 0x5bf4
#define A_PCIE_STATIC_SPARE1 0x5bf8
#define A_PCIE_STATIC_SPARE2 0x5bfc
+
+#define S_X8_SW_EN 30
+#define V_X8_SW_EN(x) ((x) << S_X8_SW_EN)
+#define F_X8_SW_EN V_X8_SW_EN(1U)
+
+#define S_SWITCHCFG 28
+#define M_SWITCHCFG 0x3U
+#define V_SWITCHCFG(x) ((x) << S_SWITCHCFG)
+#define G_SWITCHCFG(x) (((x) >> S_SWITCHCFG) & M_SWITCHCFG)
+
+#define S_STATIC_SPARE2 0
+#define M_STATIC_SPARE2 0xfffffffU
+#define V_STATIC_SPARE2(x) ((x) << S_STATIC_SPARE2)
+#define G_STATIC_SPARE2(x) (((x) >> S_STATIC_SPARE2) & M_STATIC_SPARE2)
+
#define A_PCIE_KDOORBELL_GTS_PF_BASE_LEN 0x5c10
#define S_KDB_PF_LEN 24
@@ -8872,9 +10650,13 @@
#define A_PCIE_FLR_VF6_STATUS 0x5e78
#define A_PCIE_FLR_VF7_STATUS 0x5e7c
#define A_T6_PCIE_BUS_MST_STAT_4 0x5e80
+#define A_T7_PCIE_BUS_MST_STAT_4 0x5e80
#define A_T6_PCIE_BUS_MST_STAT_5 0x5e84
+#define A_T7_PCIE_BUS_MST_STAT_5 0x5e84
#define A_T6_PCIE_BUS_MST_STAT_6 0x5e88
+#define A_T7_PCIE_BUS_MST_STAT_6 0x5e88
#define A_T6_PCIE_BUS_MST_STAT_7 0x5e8c
+#define A_T7_PCIE_BUS_MST_STAT_7 0x5e8c
#define A_PCIE_BUS_MST_STAT_8 0x5e90
#define S_BUSMST_263_256 0
@@ -8895,9 +10677,13 @@
#define G_DATAFREECNT(x) (((x) >> S_DATAFREECNT) & M_DATAFREECNT)
#define A_T6_PCIE_RSP_ERR_STAT_4 0x5ea0
+#define A_T7_PCIE_RSP_ERR_STAT_4 0x5ea0
#define A_T6_PCIE_RSP_ERR_STAT_5 0x5ea4
+#define A_T7_PCIE_RSP_ERR_STAT_5 0x5ea4
#define A_T6_PCIE_RSP_ERR_STAT_6 0x5ea8
+#define A_T7_PCIE_RSP_ERR_STAT_6 0x5ea8
#define A_T6_PCIE_RSP_ERR_STAT_7 0x5eac
+#define A_T7_PCIE_RSP_ERR_STAT_7 0x5eac
#define A_PCIE_RSP_ERR_STAT_8 0x5eb0
#define S_RSPERR_263_256 0
@@ -9025,6 +10811,1028 @@
#define A_PCIE_DEBUG_ADDR_RANGE1 0x5ee0
#define A_PCIE_DEBUG_ADDR_RANGE2 0x5ef0
#define A_PCIE_DEBUG_ADDR_RANGE_CNT 0x5f00
+#define A_PCIE_PHY_PGM_LOAD_CTRL 0x5f04
+
+#define S_HSS_PMLD_ACC_EN 31
+#define V_HSS_PMLD_ACC_EN(x) ((x) << S_HSS_PMLD_ACC_EN)
+#define F_HSS_PMLD_ACC_EN V_HSS_PMLD_ACC_EN(1U)
+
+#define S_HSS_PMRDWR_ADDR 0
+#define M_HSS_PMRDWR_ADDR 0x3ffffU
+#define V_HSS_PMRDWR_ADDR(x) ((x) << S_HSS_PMRDWR_ADDR)
+#define G_HSS_PMRDWR_ADDR(x) (((x) >> S_HSS_PMRDWR_ADDR) & M_HSS_PMRDWR_ADDR)
+
+#define A_PCIE_PHY_PGM_LOAD_DATA 0x5f08
+#define A_PCIE_HSS_CFG 0x5f0c
+
+#define S_HSS_PCS_AGGREGATION_MODE 30
+#define M_HSS_PCS_AGGREGATION_MODE 0x3U
+#define V_HSS_PCS_AGGREGATION_MODE(x) ((x) << S_HSS_PCS_AGGREGATION_MODE)
+#define G_HSS_PCS_AGGREGATION_MODE(x) (((x) >> S_HSS_PCS_AGGREGATION_MODE) & M_HSS_PCS_AGGREGATION_MODE)
+
+#define S_HSS_PCS_FURCATE_MODE 28
+#define M_HSS_PCS_FURCATE_MODE 0x3U
+#define V_HSS_PCS_FURCATE_MODE(x) ((x) << S_HSS_PCS_FURCATE_MODE)
+#define G_HSS_PCS_FURCATE_MODE(x) (((x) >> S_HSS_PCS_FURCATE_MODE) & M_HSS_PCS_FURCATE_MODE)
+
+#define S_HSS_PCS_PCLK_ON_IN_P2 27
+#define V_HSS_PCS_PCLK_ON_IN_P2(x) ((x) << S_HSS_PCS_PCLK_ON_IN_P2)
+#define F_HSS_PCS_PCLK_ON_IN_P2 V_HSS_PCS_PCLK_ON_IN_P2(1U)
+
+#define S_HSS0_PHY_CTRL_REFCLK 17
+#define M_HSS0_PHY_CTRL_REFCLK 0x1fU
+#define V_HSS0_PHY_CTRL_REFCLK(x) ((x) << S_HSS0_PHY_CTRL_REFCLK)
+#define G_HSS0_PHY_CTRL_REFCLK(x) (((x) >> S_HSS0_PHY_CTRL_REFCLK) & M_HSS0_PHY_CTRL_REFCLK)
+
+#define S_HSS1_PHY_CTRL_REFCLK 12
+#define M_HSS1_PHY_CTRL_REFCLK 0x1fU
+#define V_HSS1_PHY_CTRL_REFCLK(x) ((x) << S_HSS1_PHY_CTRL_REFCLK)
+#define G_HSS1_PHY_CTRL_REFCLK(x) (((x) >> S_HSS1_PHY_CTRL_REFCLK) & M_HSS1_PHY_CTRL_REFCLK)
+
+#define S_HSS0_PHY_REXT_MASTER 11
+#define V_HSS0_PHY_REXT_MASTER(x) ((x) << S_HSS0_PHY_REXT_MASTER)
+#define F_HSS0_PHY_REXT_MASTER V_HSS0_PHY_REXT_MASTER(1U)
+
+#define S_HSS1_PHY_REXT_MASTER 10
+#define V_HSS1_PHY_REXT_MASTER(x) ((x) << S_HSS1_PHY_REXT_MASTER)
+#define F_HSS1_PHY_REXT_MASTER V_HSS1_PHY_REXT_MASTER(1U)
+
+#define S_HSS0_PHY_CTRL_VDDA_SEL 9
+#define V_HSS0_PHY_CTRL_VDDA_SEL(x) ((x) << S_HSS0_PHY_CTRL_VDDA_SEL)
+#define F_HSS0_PHY_CTRL_VDDA_SEL V_HSS0_PHY_CTRL_VDDA_SEL(1U)
+
+#define S_HSS0_PHY_CTRL_VDDHA_SEL 8
+#define V_HSS0_PHY_CTRL_VDDHA_SEL(x) ((x) << S_HSS0_PHY_CTRL_VDDHA_SEL)
+#define F_HSS0_PHY_CTRL_VDDHA_SEL V_HSS0_PHY_CTRL_VDDHA_SEL(1U)
+
+#define S_HSS1_PHY_CTRL_VDDA_SEL 7
+#define V_HSS1_PHY_CTRL_VDDA_SEL(x) ((x) << S_HSS1_PHY_CTRL_VDDA_SEL)
+#define F_HSS1_PHY_CTRL_VDDA_SEL V_HSS1_PHY_CTRL_VDDA_SEL(1U)
+
+#define S_HSS1_PHY_CTRL_VDDHA_SEL 6
+#define V_HSS1_PHY_CTRL_VDDHA_SEL(x) ((x) << S_HSS1_PHY_CTRL_VDDHA_SEL)
+#define F_HSS1_PHY_CTRL_VDDHA_SEL V_HSS1_PHY_CTRL_VDDHA_SEL(1U)
+
+#define S_HSS1_CPU_MEMPSACK 5
+#define V_HSS1_CPU_MEMPSACK(x) ((x) << S_HSS1_CPU_MEMPSACK)
+#define F_HSS1_CPU_MEMPSACK V_HSS1_CPU_MEMPSACK(1U)
+
+#define S_HSS0_CPU_MEMPSACK 3
+#define V_HSS0_CPU_MEMPSACK(x) ((x) << S_HSS0_CPU_MEMPSACK)
+#define F_HSS0_CPU_MEMPSACK V_HSS0_CPU_MEMPSACK(1U)
+
+#define S_HSS1_CPU_MEMACK 4
+#define V_HSS1_CPU_MEMACK(x) ((x) << S_HSS1_CPU_MEMACK)
+#define F_HSS1_CPU_MEMACK V_HSS1_CPU_MEMACK(1U)
+
+#define S_HSS0_CPU_MEMACK 2
+#define V_HSS0_CPU_MEMACK(x) ((x) << S_HSS0_CPU_MEMACK)
+#define F_HSS0_CPU_MEMACK V_HSS0_CPU_MEMACK(1U)
+
+#define S_HSS_PM_IS_ROM 1
+#define V_HSS_PM_IS_ROM(x) ((x) << S_HSS_PM_IS_ROM)
+#define F_HSS_PM_IS_ROM V_HSS_PM_IS_ROM(1U)
+
+#define A_PCIE_HSS_RST 0x5f10
+
+#define S_HSS_RST_CTRL_BY_FW 31
+#define V_HSS_RST_CTRL_BY_FW(x) ((x) << S_HSS_RST_CTRL_BY_FW)
+#define F_HSS_RST_CTRL_BY_FW V_HSS_RST_CTRL_BY_FW(1U)
+
+#define S_HSS_PIPE0_RESET_N 30
+#define V_HSS_PIPE0_RESET_N(x) ((x) << S_HSS_PIPE0_RESET_N)
+#define F_HSS_PIPE0_RESET_N V_HSS_PIPE0_RESET_N(1U)
+
+#define S_HSS0_POR_N 29
+#define V_HSS0_POR_N(x) ((x) << S_HSS0_POR_N)
+#define F_HSS0_POR_N V_HSS0_POR_N(1U)
+
+#define S_HSS1_POR_N 28
+#define V_HSS1_POR_N(x) ((x) << S_HSS1_POR_N)
+#define F_HSS1_POR_N V_HSS1_POR_N(1U)
+
+#define S_HSS0_CPU_RESET 27
+#define V_HSS0_CPU_RESET(x) ((x) << S_HSS0_CPU_RESET)
+#define F_HSS0_CPU_RESET V_HSS0_CPU_RESET(1U)
+
+#define S_HSS1_CPU_RESET 26
+#define V_HSS1_CPU_RESET(x) ((x) << S_HSS1_CPU_RESET)
+#define F_HSS1_CPU_RESET V_HSS1_CPU_RESET(1U)
+
+#define S_HSS_PCS_POR_N 25
+#define V_HSS_PCS_POR_N(x) ((x) << S_HSS_PCS_POR_N)
+#define F_HSS_PCS_POR_N V_HSS_PCS_POR_N(1U)
+
+#define S_SW_CRST_ 24
+#define V_SW_CRST_(x) ((x) << S_SW_CRST_)
+#define F_SW_CRST_ V_SW_CRST_(1U)
+
+#define S_SW_PCIECRST_ 23
+#define V_SW_PCIECRST_(x) ((x) << S_SW_PCIECRST_)
+#define F_SW_PCIECRST_ V_SW_PCIECRST_(1U)
+
+#define S_SW_PCIEPIPERST_ 22
+#define V_SW_PCIEPIPERST_(x) ((x) << S_SW_PCIEPIPERST_)
+#define F_SW_PCIEPIPERST_ V_SW_PCIEPIPERST_(1U)
+
+#define S_SW_PCIEPHYRST_ 21
+#define V_SW_PCIEPHYRST_(x) ((x) << S_SW_PCIEPHYRST_)
+#define F_SW_PCIEPHYRST_ V_SW_PCIEPHYRST_(1U)
+
+#define S_HSS1_ERR_O 3
+#define V_HSS1_ERR_O(x) ((x) << S_HSS1_ERR_O)
+#define F_HSS1_ERR_O V_HSS1_ERR_O(1U)
+
+#define S_HSS0_ERR_O 2
+#define V_HSS0_ERR_O(x) ((x) << S_HSS0_ERR_O)
+#define F_HSS0_ERR_O V_HSS0_ERR_O(1U)
+
+#define S_HSS1_PLL_LOCK 1
+#define V_HSS1_PLL_LOCK(x) ((x) << S_HSS1_PLL_LOCK)
+#define F_HSS1_PLL_LOCK V_HSS1_PLL_LOCK(1U)
+
+#define S_HSS0_PLL_LOCK 0
+#define V_HSS0_PLL_LOCK(x) ((x) << S_HSS0_PLL_LOCK)
+#define F_HSS0_PLL_LOCK V_HSS0_PLL_LOCK(1U)
+
+#define A_PCIE_T5_ARM_CFG 0x5f20
+
+#define S_T5_ARM_MAXREQCNT 20
+#define M_T5_ARM_MAXREQCNT 0x7fU
+#define V_T5_ARM_MAXREQCNT(x) ((x) << S_T5_ARM_MAXREQCNT)
+#define G_T5_ARM_MAXREQCNT(x) (((x) >> S_T5_ARM_MAXREQCNT) & M_T5_ARM_MAXREQCNT)
+
+#define S_T5_ARM_MAXRDREQSIZE 17
+#define M_T5_ARM_MAXRDREQSIZE 0x7U
+#define V_T5_ARM_MAXRDREQSIZE(x) ((x) << S_T5_ARM_MAXRDREQSIZE)
+#define G_T5_ARM_MAXRDREQSIZE(x) (((x) >> S_T5_ARM_MAXRDREQSIZE) & M_T5_ARM_MAXRDREQSIZE)
+
+#define S_T5_ARM_MAXRSPCNT 9
+#define M_T5_ARM_MAXRSPCNT 0xffU
+#define V_T5_ARM_MAXRSPCNT(x) ((x) << S_T5_ARM_MAXRSPCNT)
+#define G_T5_ARM_MAXRSPCNT(x) (((x) >> S_T5_ARM_MAXRSPCNT) & M_T5_ARM_MAXRSPCNT)
+
+#define A_PCIE_T5_ARM_STAT 0x5f24
+
+#define S_ARM_RESPCNT 20
+#define M_ARM_RESPCNT 0x1ffU
+#define V_ARM_RESPCNT(x) ((x) << S_ARM_RESPCNT)
+#define G_ARM_RESPCNT(x) (((x) >> S_ARM_RESPCNT) & M_ARM_RESPCNT)
+
+#define S_ARM_RDREQCNT 12
+#define M_ARM_RDREQCNT 0x3fU
+#define V_ARM_RDREQCNT(x) ((x) << S_ARM_RDREQCNT)
+#define G_ARM_RDREQCNT(x) (((x) >> S_ARM_RDREQCNT) & M_ARM_RDREQCNT)
+
+#define S_ARM_WRREQCNT 0
+#define M_ARM_WRREQCNT 0x1ffU
+#define V_ARM_WRREQCNT(x) ((x) << S_ARM_WRREQCNT)
+#define G_ARM_WRREQCNT(x) (((x) >> S_ARM_WRREQCNT) & M_ARM_WRREQCNT)
+
+#define A_PCIE_T5_ARM_STAT2 0x5f28
+
+#define S_ARM_COOKIECNT 24
+#define M_ARM_COOKIECNT 0xfU
+#define V_ARM_COOKIECNT(x) ((x) << S_ARM_COOKIECNT)
+#define G_ARM_COOKIECNT(x) (((x) >> S_ARM_COOKIECNT) & M_ARM_COOKIECNT)
+
+#define S_ARM_RDSEQNUMUPDCNT 20
+#define M_ARM_RDSEQNUMUPDCNT 0xfU
+#define V_ARM_RDSEQNUMUPDCNT(x) ((x) << S_ARM_RDSEQNUMUPDCNT)
+#define G_ARM_RDSEQNUMUPDCNT(x) (((x) >> S_ARM_RDSEQNUMUPDCNT) & M_ARM_RDSEQNUMUPDCNT)
+
+#define S_ARM_SIREQCNT 16
+#define M_ARM_SIREQCNT 0xfU
+#define V_ARM_SIREQCNT(x) ((x) << S_ARM_SIREQCNT)
+#define G_ARM_SIREQCNT(x) (((x) >> S_ARM_SIREQCNT) & M_ARM_SIREQCNT)
+
+#define S_ARM_WREOPMATCHSOP 12
+#define V_ARM_WREOPMATCHSOP(x) ((x) << S_ARM_WREOPMATCHSOP)
+#define F_ARM_WREOPMATCHSOP V_ARM_WREOPMATCHSOP(1U)
+
+#define S_ARM_WRSOPCNT 8
+#define M_ARM_WRSOPCNT 0xfU
+#define V_ARM_WRSOPCNT(x) ((x) << S_ARM_WRSOPCNT)
+#define G_ARM_WRSOPCNT(x) (((x) >> S_ARM_WRSOPCNT) & M_ARM_WRSOPCNT)
+
+#define S_ARM_RDSOPCNT 0
+#define M_ARM_RDSOPCNT 0xffU
+#define V_ARM_RDSOPCNT(x) ((x) << S_ARM_RDSOPCNT)
+#define G_ARM_RDSOPCNT(x) (((x) >> S_ARM_RDSOPCNT) & M_ARM_RDSOPCNT)
+
+#define A_PCIE_T5_ARM_STAT3 0x5f2c
+
+#define S_ARM_ATMREQSOPCNT 24
+#define M_ARM_ATMREQSOPCNT 0xffU
+#define V_ARM_ATMREQSOPCNT(x) ((x) << S_ARM_ATMREQSOPCNT)
+#define G_ARM_ATMREQSOPCNT(x) (((x) >> S_ARM_ATMREQSOPCNT) & M_ARM_ATMREQSOPCNT)
+
+#define S_ARM_ATMEOPMATCHSOP 17
+#define V_ARM_ATMEOPMATCHSOP(x) ((x) << S_ARM_ATMEOPMATCHSOP)
+#define F_ARM_ATMEOPMATCHSOP V_ARM_ATMEOPMATCHSOP(1U)
+
+#define S_ARM_RSPEOPMATCHSOP 16
+#define V_ARM_RSPEOPMATCHSOP(x) ((x) << S_ARM_RSPEOPMATCHSOP)
+#define F_ARM_RSPEOPMATCHSOP V_ARM_RSPEOPMATCHSOP(1U)
+
+#define S_ARM_RSPERRCNT 8
+#define M_ARM_RSPERRCNT 0xffU
+#define V_ARM_RSPERRCNT(x) ((x) << S_ARM_RSPERRCNT)
+#define G_ARM_RSPERRCNT(x) (((x) >> S_ARM_RSPERRCNT) & M_ARM_RSPERRCNT)
+
+#define S_ARM_RSPSOPCNT 0
+#define M_ARM_RSPSOPCNT 0xffU
+#define V_ARM_RSPSOPCNT(x) ((x) << S_ARM_RSPSOPCNT)
+#define G_ARM_RSPSOPCNT(x) (((x) >> S_ARM_RSPSOPCNT) & M_ARM_RSPSOPCNT)
+
+#define A_PCIE_ARM_REQUESTER_ID 0x5f30
+
+#define S_A0_RSVD1 24
+#define M_A0_RSVD1 0xffU
+#define V_A0_RSVD1(x) ((x) << S_A0_RSVD1)
+#define G_A0_RSVD1(x) (((x) >> S_A0_RSVD1) & M_A0_RSVD1)
+
+#define S_A0_PRIMBUSNUMBER 16
+#define M_A0_PRIMBUSNUMBER 0xffU
+#define V_A0_PRIMBUSNUMBER(x) ((x) << S_A0_PRIMBUSNUMBER)
+#define G_A0_PRIMBUSNUMBER(x) (((x) >> S_A0_PRIMBUSNUMBER) & M_A0_PRIMBUSNUMBER)
+
+#define S_A0_REQUESTERID 0
+#define M_A0_REQUESTERID 0xffffU
+#define V_A0_REQUESTERID(x) ((x) << S_A0_REQUESTERID)
+#define G_A0_REQUESTERID(x) (((x) >> S_A0_REQUESTERID) & M_A0_REQUESTERID)
+
+#define A_PCIE_SWITCH_CFG_SPACE_REQ0 0x5f34
+
+#define S_REQ0ENABLE 31
+#define V_REQ0ENABLE(x) ((x) << S_REQ0ENABLE)
+#define F_REQ0ENABLE V_REQ0ENABLE(1U)
+
+#define S_RDREQ0TYPE 19
+#define V_RDREQ0TYPE(x) ((x) << S_RDREQ0TYPE)
+#define F_RDREQ0TYPE V_RDREQ0TYPE(1U)
+
+#define S_BYTEENABLE0 15
+#define M_BYTEENABLE0 0xfU
+#define V_BYTEENABLE0(x) ((x) << S_BYTEENABLE0)
+#define G_BYTEENABLE0(x) (((x) >> S_BYTEENABLE0) & M_BYTEENABLE0)
+
+#define S_REGADDR0 0
+#define M_REGADDR0 0x7fffU
+#define V_REGADDR0(x) ((x) << S_REGADDR0)
+#define G_REGADDR0(x) (((x) >> S_REGADDR0) & M_REGADDR0)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA0 0x5f38
+#define A_PCIE_SWITCH_CFG_SPACE_REQ1 0x5f3c
+
+#define S_REQ1ENABLE 31
+#define V_REQ1ENABLE(x) ((x) << S_REQ1ENABLE)
+#define F_REQ1ENABLE V_REQ1ENABLE(1U)
+
+#define S_RDREQ1TYPE 26
+#define M_RDREQ1TYPE 0xfU
+#define V_RDREQ1TYPE(x) ((x) << S_RDREQ1TYPE)
+#define G_RDREQ1TYPE(x) (((x) >> S_RDREQ1TYPE) & M_RDREQ1TYPE)
+
+#define S_BYTEENABLE1 15
+#define M_BYTEENABLE1 0x7ffU
+#define V_BYTEENABLE1(x) ((x) << S_BYTEENABLE1)
+#define G_BYTEENABLE1(x) (((x) >> S_BYTEENABLE1) & M_BYTEENABLE1)
+
+#define S_REGADDR1 0
+#define M_REGADDR1 0x7fffU
+#define V_REGADDR1(x) ((x) << S_REGADDR1)
+#define G_REGADDR1(x) (((x) >> S_REGADDR1) & M_REGADDR1)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA1 0x5f40
+#define A_PCIE_SWITCH_CFG_SPACE_REQ2 0x5f44
+
+#define S_REQ2ENABLE 31
+#define V_REQ2ENABLE(x) ((x) << S_REQ2ENABLE)
+#define F_REQ2ENABLE V_REQ2ENABLE(1U)
+
+#define S_RDREQ2TYPE 26
+#define M_RDREQ2TYPE 0xfU
+#define V_RDREQ2TYPE(x) ((x) << S_RDREQ2TYPE)
+#define G_RDREQ2TYPE(x) (((x) >> S_RDREQ2TYPE) & M_RDREQ2TYPE)
+
+#define S_BYTEENABLE2 15
+#define M_BYTEENABLE2 0x7ffU
+#define V_BYTEENABLE2(x) ((x) << S_BYTEENABLE2)
+#define G_BYTEENABLE2(x) (((x) >> S_BYTEENABLE2) & M_BYTEENABLE2)
+
+#define S_REGADDR2 0
+#define M_REGADDR2 0x7fffU
+#define V_REGADDR2(x) ((x) << S_REGADDR2)
+#define G_REGADDR2(x) (((x) >> S_REGADDR2) & M_REGADDR2)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA2 0x5f48
+#define A_PCIE_SWITCH_CFG_SPACE_REQ3 0x5f4c
+
+#define S_REQ3ENABLE 31
+#define V_REQ3ENABLE(x) ((x) << S_REQ3ENABLE)
+#define F_REQ3ENABLE V_REQ3ENABLE(1U)
+
+#define S_RDREQ3TYPE 26
+#define M_RDREQ3TYPE 0xfU
+#define V_RDREQ3TYPE(x) ((x) << S_RDREQ3TYPE)
+#define G_RDREQ3TYPE(x) (((x) >> S_RDREQ3TYPE) & M_RDREQ3TYPE)
+
+#define S_BYTEENABLE3 15
+#define M_BYTEENABLE3 0x7ffU
+#define V_BYTEENABLE3(x) ((x) << S_BYTEENABLE3)
+#define G_BYTEENABLE3(x) (((x) >> S_BYTEENABLE3) & M_BYTEENABLE3)
+
+#define S_REGADDR3 0
+#define M_REGADDR3 0x7fffU
+#define V_REGADDR3(x) ((x) << S_REGADDR3)
+#define G_REGADDR3(x) (((x) >> S_REGADDR3) & M_REGADDR3)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA3 0x5f50
+#define A_PCIE_SWITCH_CFG_SPACE_REQ4 0x5f54
+
+#define S_REQ4ENABLE 31
+#define V_REQ4ENABLE(x) ((x) << S_REQ4ENABLE)
+#define F_REQ4ENABLE V_REQ4ENABLE(1U)
+
+#define S_RDREQ4TYPE 26
+#define M_RDREQ4TYPE 0xfU
+#define V_RDREQ4TYPE(x) ((x) << S_RDREQ4TYPE)
+#define G_RDREQ4TYPE(x) (((x) >> S_RDREQ4TYPE) & M_RDREQ4TYPE)
+
+#define S_BYTEENABLE4 15
+#define M_BYTEENABLE4 0x7ffU
+#define V_BYTEENABLE4(x) ((x) << S_BYTEENABLE4)
+#define G_BYTEENABLE4(x) (((x) >> S_BYTEENABLE4) & M_BYTEENABLE4)
+
+#define S_REGADDR4 0
+#define M_REGADDR4 0x7fffU
+#define V_REGADDR4(x) ((x) << S_REGADDR4)
+#define G_REGADDR4(x) (((x) >> S_REGADDR4) & M_REGADDR4)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA4 0x5f58
+#define A_PCIE_SWITCH_CFG_SPACE_REQ5 0x5f5c
+
+#define S_REQ5ENABLE 31
+#define V_REQ5ENABLE(x) ((x) << S_REQ5ENABLE)
+#define F_REQ5ENABLE V_REQ5ENABLE(1U)
+
+#define S_RDREQ5TYPE 26
+#define M_RDREQ5TYPE 0xfU
+#define V_RDREQ5TYPE(x) ((x) << S_RDREQ5TYPE)
+#define G_RDREQ5TYPE(x) (((x) >> S_RDREQ5TYPE) & M_RDREQ5TYPE)
+
+#define S_BYTEENABLE5 15
+#define M_BYTEENABLE5 0x7ffU
+#define V_BYTEENABLE5(x) ((x) << S_BYTEENABLE5)
+#define G_BYTEENABLE5(x) (((x) >> S_BYTEENABLE5) & M_BYTEENABLE5)
+
+#define S_REGADDR5 0
+#define M_REGADDR5 0x7fffU
+#define V_REGADDR5(x) ((x) << S_REGADDR5)
+#define G_REGADDR5(x) (((x) >> S_REGADDR5) & M_REGADDR5)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA5 0x5f60
+#define A_PCIE_SWITCH_CFG_SPACE_REQ6 0x5f64
+
+#define S_REQ6ENABLE 31
+#define V_REQ6ENABLE(x) ((x) << S_REQ6ENABLE)
+#define F_REQ6ENABLE V_REQ6ENABLE(1U)
+
+#define S_RDREQ6TYPE 26
+#define M_RDREQ6TYPE 0xfU
+#define V_RDREQ6TYPE(x) ((x) << S_RDREQ6TYPE)
+#define G_RDREQ6TYPE(x) (((x) >> S_RDREQ6TYPE) & M_RDREQ6TYPE)
+
+#define S_BYTEENABLE6 15
+#define M_BYTEENABLE6 0x7ffU
+#define V_BYTEENABLE6(x) ((x) << S_BYTEENABLE6)
+#define G_BYTEENABLE6(x) (((x) >> S_BYTEENABLE6) & M_BYTEENABLE6)
+
+#define S_REGADDR6 0
+#define M_REGADDR6 0x7fffU
+#define V_REGADDR6(x) ((x) << S_REGADDR6)
+#define G_REGADDR6(x) (((x) >> S_REGADDR6) & M_REGADDR6)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA6 0x5f68
+#define A_PCIE_SWITCH_CFG_SPACE_REQ7 0x5f6c
+
+#define S_REQ7ENABLE 31
+#define V_REQ7ENABLE(x) ((x) << S_REQ7ENABLE)
+#define F_REQ7ENABLE V_REQ7ENABLE(1U)
+
+#define S_RDREQ7TYPE 26
+#define M_RDREQ7TYPE 0xfU
+#define V_RDREQ7TYPE(x) ((x) << S_RDREQ7TYPE)
+#define G_RDREQ7TYPE(x) (((x) >> S_RDREQ7TYPE) & M_RDREQ7TYPE)
+
+#define S_BYTEENABLE7 15
+#define M_BYTEENABLE7 0x7ffU
+#define V_BYTEENABLE7(x) ((x) << S_BYTEENABLE7)
+#define G_BYTEENABLE7(x) (((x) >> S_BYTEENABLE7) & M_BYTEENABLE7)
+
+#define S_REGADDR7 0
+#define M_REGADDR7 0x7fffU
+#define V_REGADDR7(x) ((x) << S_REGADDR7)
+#define G_REGADDR7(x) (((x) >> S_REGADDR7) & M_REGADDR7)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA7 0x5f70
+#define A_PCIE_SWITCH_CFG_SPACE_REQ8 0x5f74
+
+#define S_REQ8ENABLE 31
+#define V_REQ8ENABLE(x) ((x) << S_REQ8ENABLE)
+#define F_REQ8ENABLE V_REQ8ENABLE(1U)
+
+#define S_RDREQ8TYPE 26
+#define M_RDREQ8TYPE 0xfU
+#define V_RDREQ8TYPE(x) ((x) << S_RDREQ8TYPE)
+#define G_RDREQ8TYPE(x) (((x) >> S_RDREQ8TYPE) & M_RDREQ8TYPE)
+
+#define S_BYTEENABLE8 15
+#define M_BYTEENABLE8 0x7ffU
+#define V_BYTEENABLE8(x) ((x) << S_BYTEENABLE8)
+#define G_BYTEENABLE8(x) (((x) >> S_BYTEENABLE8) & M_BYTEENABLE8)
+
+#define S_REGADDR8 0
+#define M_REGADDR8 0x7fffU
+#define V_REGADDR8(x) ((x) << S_REGADDR8)
+#define G_REGADDR8(x) (((x) >> S_REGADDR8) & M_REGADDR8)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA8 0x5f78
+#define A_PCIE_SNPS_G5_PHY_CR_REQ 0x5f7c
+
+#define S_REGSEL 31
+#define V_REGSEL(x) ((x) << S_REGSEL)
+#define F_REGSEL V_REGSEL(1U)
+
+#define S_RDENABLE 30
+#define V_RDENABLE(x) ((x) << S_RDENABLE)
+#define F_RDENABLE V_RDENABLE(1U)
+
+#define S_WRENABLE 29
+#define V_WRENABLE(x) ((x) << S_WRENABLE)
+#define F_WRENABLE V_WRENABLE(1U)
+
+#define S_AUTOINCRVAL 21
+#define M_AUTOINCRVAL 0x3U
+#define V_AUTOINCRVAL(x) ((x) << S_AUTOINCRVAL)
+#define G_AUTOINCRVAL(x) (((x) >> S_AUTOINCRVAL) & M_AUTOINCRVAL)
+
+#define S_AUTOINCR 20
+#define V_AUTOINCR(x) ((x) << S_AUTOINCR)
+#define F_AUTOINCR V_AUTOINCR(1U)
+
+#define S_PHYSEL 16
+#define M_PHYSEL 0xfU
+#define V_PHYSEL(x) ((x) << S_PHYSEL)
+#define G_PHYSEL(x) (((x) >> S_PHYSEL) & M_PHYSEL)
+
+#define S_T7_REGADDR 0
+#define M_T7_REGADDR 0xffffU
+#define V_T7_REGADDR(x) ((x) << S_T7_REGADDR)
+#define G_T7_REGADDR(x) (((x) >> S_T7_REGADDR) & M_T7_REGADDR)
+
+#define A_PCIE_SNPS_G5_PHY_CR_DATA 0x5f80
+#define A_PCIE_SNPS_G5_PHY_SRAM_CFG 0x5f84
+
+#define S_PHY3_SRAM_BOOTLOAD_BYPASS 27
+#define V_PHY3_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY3_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY3_SRAM_BOOTLOAD_BYPASS V_PHY3_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY3_SRAM_BYPASS 26
+#define V_PHY3_SRAM_BYPASS(x) ((x) << S_PHY3_SRAM_BYPASS)
+#define F_PHY3_SRAM_BYPASS V_PHY3_SRAM_BYPASS(1U)
+
+#define S_PHY3_SRAM_ECC_EN 25
+#define V_PHY3_SRAM_ECC_EN(x) ((x) << S_PHY3_SRAM_ECC_EN)
+#define F_PHY3_SRAM_ECC_EN V_PHY3_SRAM_ECC_EN(1U)
+
+#define S_PHY3_SRAM_EXT_LD_DONE 24
+#define V_PHY3_SRAM_EXT_LD_DONE(x) ((x) << S_PHY3_SRAM_EXT_LD_DONE)
+#define F_PHY3_SRAM_EXT_LD_DONE V_PHY3_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY2_SRAM_BOOTLOAD_BYPASS 19
+#define V_PHY2_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY2_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY2_SRAM_BOOTLOAD_BYPASS V_PHY2_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY2_SRAM_BYPASS 18
+#define V_PHY2_SRAM_BYPASS(x) ((x) << S_PHY2_SRAM_BYPASS)
+#define F_PHY2_SRAM_BYPASS V_PHY2_SRAM_BYPASS(1U)
+
+#define S_PHY2_SRAM_ECC_EN 17
+#define V_PHY2_SRAM_ECC_EN(x) ((x) << S_PHY2_SRAM_ECC_EN)
+#define F_PHY2_SRAM_ECC_EN V_PHY2_SRAM_ECC_EN(1U)
+
+#define S_PHY2_SRAM_EXT_LD_DONE 16
+#define V_PHY2_SRAM_EXT_LD_DONE(x) ((x) << S_PHY2_SRAM_EXT_LD_DONE)
+#define F_PHY2_SRAM_EXT_LD_DONE V_PHY2_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY1_SRAM_BOOTLOAD_BYPASS 11
+#define V_PHY1_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY1_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY1_SRAM_BOOTLOAD_BYPASS V_PHY1_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY1_SRAM_BYPASS 10
+#define V_PHY1_SRAM_BYPASS(x) ((x) << S_PHY1_SRAM_BYPASS)
+#define F_PHY1_SRAM_BYPASS V_PHY1_SRAM_BYPASS(1U)
+
+#define S_PHY1_SRAM_ECC_EN 9
+#define V_PHY1_SRAM_ECC_EN(x) ((x) << S_PHY1_SRAM_ECC_EN)
+#define F_PHY1_SRAM_ECC_EN V_PHY1_SRAM_ECC_EN(1U)
+
+#define S_PHY1_SRAM_EXT_LD_DONE 8
+#define V_PHY1_SRAM_EXT_LD_DONE(x) ((x) << S_PHY1_SRAM_EXT_LD_DONE)
+#define F_PHY1_SRAM_EXT_LD_DONE V_PHY1_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY_CR_PARA_SEL 4
+#define M_PHY_CR_PARA_SEL 0xfU
+#define V_PHY_CR_PARA_SEL(x) ((x) << S_PHY_CR_PARA_SEL)
+#define G_PHY_CR_PARA_SEL(x) (((x) >> S_PHY_CR_PARA_SEL) & M_PHY_CR_PARA_SEL)
+
+#define S_PHY0_SRAM_BOOTLOAD_BYPASS 3
+#define V_PHY0_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY0_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY0_SRAM_BOOTLOAD_BYPASS V_PHY0_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY0_SRAM_BYPASS 2
+#define V_PHY0_SRAM_BYPASS(x) ((x) << S_PHY0_SRAM_BYPASS)
+#define F_PHY0_SRAM_BYPASS V_PHY0_SRAM_BYPASS(1U)
+
+#define S_PHY0_SRAM_ECC_EN 1
+#define V_PHY0_SRAM_ECC_EN(x) ((x) << S_PHY0_SRAM_ECC_EN)
+#define F_PHY0_SRAM_ECC_EN V_PHY0_SRAM_ECC_EN(1U)
+
+#define S_PHY0_SRAM_EXT_LD_DONE 0
+#define V_PHY0_SRAM_EXT_LD_DONE(x) ((x) << S_PHY0_SRAM_EXT_LD_DONE)
+#define F_PHY0_SRAM_EXT_LD_DONE V_PHY0_SRAM_EXT_LD_DONE(1U)
+
+#define A_PCIE_SNPS_G5_PHY_SRAM_STS 0x5f88
+
+#define S_PHY3_SRAM_INIT_DONE 3
+#define V_PHY3_SRAM_INIT_DONE(x) ((x) << S_PHY3_SRAM_INIT_DONE)
+#define F_PHY3_SRAM_INIT_DONE V_PHY3_SRAM_INIT_DONE(1U)
+
+#define S_PHY2_SRAM_INIT_DONE 2
+#define V_PHY2_SRAM_INIT_DONE(x) ((x) << S_PHY2_SRAM_INIT_DONE)
+#define F_PHY2_SRAM_INIT_DONE V_PHY2_SRAM_INIT_DONE(1U)
+
+#define S_PHY1_SRAM_INIT_DONE 1
+#define V_PHY1_SRAM_INIT_DONE(x) ((x) << S_PHY1_SRAM_INIT_DONE)
+#define F_PHY1_SRAM_INIT_DONE V_PHY1_SRAM_INIT_DONE(1U)
+
+#define S_PHY0_SRAM_INIT_DONE 0
+#define V_PHY0_SRAM_INIT_DONE(x) ((x) << S_PHY0_SRAM_INIT_DONE)
+#define F_PHY0_SRAM_INIT_DONE V_PHY0_SRAM_INIT_DONE(1U)
+
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_0_TO_3 0x5f90
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_0_DATA 0x5f94
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_1_DATA 0x5f98
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_2_DATA 0x5f9c
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_3_DATA 0x5fa0
+#define A_PCIE_SNPS_G5_PHY_DEFAULTS 0x5fa4
+#define A_PCIE_SNPS_G5_PHY_0_VALUES 0x5fa8
+
+#define S_RX_TERM_OFFSET 28
+#define V_RX_TERM_OFFSET(x) ((x) << S_RX_TERM_OFFSET)
+#define F_RX_TERM_OFFSET V_RX_TERM_OFFSET(1U)
+
+#define S_REFB_RAW_CLK_DIV2_EN 27
+#define V_REFB_RAW_CLK_DIV2_EN(x) ((x) << S_REFB_RAW_CLK_DIV2_EN)
+#define F_REFB_RAW_CLK_DIV2_EN V_REFB_RAW_CLK_DIV2_EN(1U)
+
+#define S_REFB_RANGE 23
+#define M_REFB_RANGE 0xfU
+#define V_REFB_RANGE(x) ((x) << S_REFB_RANGE)
+#define G_REFB_RANGE(x) (((x) >> S_REFB_RANGE) & M_REFB_RANGE)
+
+#define S_REFB_LANE_CLK_EN 22
+#define V_REFB_LANE_CLK_EN(x) ((x) << S_REFB_LANE_CLK_EN)
+#define F_REFB_LANE_CLK_EN V_REFB_LANE_CLK_EN(1U)
+
+#define S_REFB_CLK_DIV2_EN 21
+#define V_REFB_CLK_DIV2_EN(x) ((x) << S_REFB_CLK_DIV2_EN)
+#define F_REFB_CLK_DIV2_EN V_REFB_CLK_DIV2_EN(1U)
+
+#define S_REFA_RAW_CLK_DIV2_EN 20
+#define V_REFA_RAW_CLK_DIV2_EN(x) ((x) << S_REFA_RAW_CLK_DIV2_EN)
+#define F_REFA_RAW_CLK_DIV2_EN V_REFA_RAW_CLK_DIV2_EN(1U)
+
+#define S_REFA_RANGE 16
+#define M_REFA_RANGE 0xfU
+#define V_REFA_RANGE(x) ((x) << S_REFA_RANGE)
+#define G_REFA_RANGE(x) (((x) >> S_REFA_RANGE) & M_REFA_RANGE)
+
+#define S_REFA_LANE_CLK_EN 15
+#define V_REFA_LANE_CLK_EN(x) ((x) << S_REFA_LANE_CLK_EN)
+#define F_REFA_LANE_CLK_EN V_REFA_LANE_CLK_EN(1U)
+
+#define S_REFA_CLK_DIV2_EN 14
+#define V_REFA_CLK_DIV2_EN(x) ((x) << S_REFA_CLK_DIV2_EN)
+#define F_REFA_CLK_DIV2_EN V_REFA_CLK_DIV2_EN(1U)
+
+#define S_NOMINAL_VPH_SEL 10
+#define M_NOMINAL_VPH_SEL 0x3U
+#define V_NOMINAL_VPH_SEL(x) ((x) << S_NOMINAL_VPH_SEL)
+#define G_NOMINAL_VPH_SEL(x) (((x) >> S_NOMINAL_VPH_SEL) & M_NOMINAL_VPH_SEL)
+
+#define S_NOMINAL_VP_SEL 8
+#define M_NOMINAL_VP_SEL 0x3U
+#define V_NOMINAL_VP_SEL(x) ((x) << S_NOMINAL_VP_SEL)
+#define G_NOMINAL_VP_SEL(x) (((x) >> S_NOMINAL_VP_SEL) & M_NOMINAL_VP_SEL)
+
+#define S_MPLLB_WORD_CLK_EN 7
+#define V_MPLLB_WORD_CLK_EN(x) ((x) << S_MPLLB_WORD_CLK_EN)
+#define F_MPLLB_WORD_CLK_EN V_MPLLB_WORD_CLK_EN(1U)
+
+#define S_MPLLB_SSC_EN 6
+#define V_MPLLB_SSC_EN(x) ((x) << S_MPLLB_SSC_EN)
+#define F_MPLLB_SSC_EN V_MPLLB_SSC_EN(1U)
+
+#define S_MPLLB_SHORT_LOCK_EN 5
+#define V_MPLLB_SHORT_LOCK_EN(x) ((x) << S_MPLLB_SHORT_LOCK_EN)
+#define F_MPLLB_SHORT_LOCK_EN V_MPLLB_SHORT_LOCK_EN(1U)
+
+#define S_MPLLB_FORCE_EN 4
+#define V_MPLLB_FORCE_EN(x) ((x) << S_MPLLB_FORCE_EN)
+#define F_MPLLB_FORCE_EN V_MPLLB_FORCE_EN(1U)
+
+#define S_MPLLA_WORD_CLK_EN 3
+#define V_MPLLA_WORD_CLK_EN(x) ((x) << S_MPLLA_WORD_CLK_EN)
+#define F_MPLLA_WORD_CLK_EN V_MPLLA_WORD_CLK_EN(1U)
+
+#define S_MPLLA_SSC_EN 2
+#define V_MPLLA_SSC_EN(x) ((x) << S_MPLLA_SSC_EN)
+#define F_MPLLA_SSC_EN V_MPLLA_SSC_EN(1U)
+
+#define S_MPLLA_SHORT_LOCK_EN 1
+#define V_MPLLA_SHORT_LOCK_EN(x) ((x) << S_MPLLA_SHORT_LOCK_EN)
+#define F_MPLLA_SHORT_LOCK_EN V_MPLLA_SHORT_LOCK_EN(1U)
+
+#define S_MPLLA_FORCE_EN 0
+#define V_MPLLA_FORCE_EN(x) ((x) << S_MPLLA_FORCE_EN)
+#define F_MPLLA_FORCE_EN V_MPLLA_FORCE_EN(1U)
+
+#define A_PCIE_SNPS_G5_PHY_1_VALUES 0x5fac
+
+#define S_REF_ALT1_CLK_M 13
+#define V_REF_ALT1_CLK_M(x) ((x) << S_REF_ALT1_CLK_M)
+#define F_REF_ALT1_CLK_M V_REF_ALT1_CLK_M(1U)
+
+#define S_REF_ALT1_CLK_P 12
+#define V_REF_ALT1_CLK_P(x) ((x) << S_REF_ALT1_CLK_P)
+#define F_REF_ALT1_CLK_P V_REF_ALT1_CLK_P(1U)
+
+#define A_PCIE_SNPS_G5_PHY_2_VALUES 0x5fb0
+#define A_PCIE_SNPS_G5_PHY_3_VALUES 0x5fb4
+#define A_PCIE_SNPS_G5_PHY_0_RX_LANEPLL_BYPASS_MODE 0x5fb8
+
+#define S_T7_LANE3 15
+#define M_T7_LANE3 0x1fU
+#define V_T7_LANE3(x) ((x) << S_T7_LANE3)
+#define G_T7_LANE3(x) (((x) >> S_T7_LANE3) & M_T7_LANE3)
+
+#define S_T7_LANE2 10
+#define M_T7_LANE2 0x1fU
+#define V_T7_LANE2(x) ((x) << S_T7_LANE2)
+#define G_T7_LANE2(x) (((x) >> S_T7_LANE2) & M_T7_LANE2)
+
+#define S_T7_LANE1 5
+#define M_T7_LANE1 0x1fU
+#define V_T7_LANE1(x) ((x) << S_T7_LANE1)
+#define G_T7_LANE1(x) (((x) >> S_T7_LANE1) & M_T7_LANE1)
+
+#define S_T7_LANE0 0
+#define M_T7_LANE0 0x1fU
+#define V_T7_LANE0(x) ((x) << S_T7_LANE0)
+#define G_T7_LANE0(x) (((x) >> S_T7_LANE0) & M_T7_LANE0)
+
+#define A_PCIE_SNPS_G5_PHY_1_RX_LANEPLL_BYPASS_MODE 0x5fbc
+#define A_PCIE_SNPS_G5_PHY_2_RX_LANEPLL_BYPASS_MODE 0x5fc0
+#define A_PCIE_SNPS_G5_PHY_3_RX_LANEPLL_BYPASS_MODE 0x5fc4
+#define A_PCIE_SNPS_G5_PHY_0_1_RX_LANEPLL_SRC_SEL 0x5fc8
+
+#define S_LANE7_LANEPLL_SRC_SEL 28
+#define M_LANE7_LANEPLL_SRC_SEL 0xfU
+#define V_LANE7_LANEPLL_SRC_SEL(x) ((x) << S_LANE7_LANEPLL_SRC_SEL)
+#define G_LANE7_LANEPLL_SRC_SEL(x) (((x) >> S_LANE7_LANEPLL_SRC_SEL) & M_LANE7_LANEPLL_SRC_SEL)
+
+#define S_LANE6_LANEPLL_SRC_SEL 24
+#define M_LANE6_LANEPLL_SRC_SEL 0xfU
+#define V_LANE6_LANEPLL_SRC_SEL(x) ((x) << S_LANE6_LANEPLL_SRC_SEL)
+#define G_LANE6_LANEPLL_SRC_SEL(x) (((x) >> S_LANE6_LANEPLL_SRC_SEL) & M_LANE6_LANEPLL_SRC_SEL)
+
+#define S_LANE5_LANEPLL_SRC_SEL 20
+#define M_LANE5_LANEPLL_SRC_SEL 0xfU
+#define V_LANE5_LANEPLL_SRC_SEL(x) ((x) << S_LANE5_LANEPLL_SRC_SEL)
+#define G_LANE5_LANEPLL_SRC_SEL(x) (((x) >> S_LANE5_LANEPLL_SRC_SEL) & M_LANE5_LANEPLL_SRC_SEL)
+
+#define S_LANE4_LANEPLL_SRC_SEL 16
+#define M_LANE4_LANEPLL_SRC_SEL 0xfU
+#define V_LANE4_LANEPLL_SRC_SEL(x) ((x) << S_LANE4_LANEPLL_SRC_SEL)
+#define G_LANE4_LANEPLL_SRC_SEL(x) (((x) >> S_LANE4_LANEPLL_SRC_SEL) & M_LANE4_LANEPLL_SRC_SEL)
+
+#define S_LANE3_LANEPLL_SRC_SEL 12
+#define M_LANE3_LANEPLL_SRC_SEL 0xfU
+#define V_LANE3_LANEPLL_SRC_SEL(x) ((x) << S_LANE3_LANEPLL_SRC_SEL)
+#define G_LANE3_LANEPLL_SRC_SEL(x) (((x) >> S_LANE3_LANEPLL_SRC_SEL) & M_LANE3_LANEPLL_SRC_SEL)
+
+#define S_LANE2_LANEPLL_SRC_SEL 8
+#define M_LANE2_LANEPLL_SRC_SEL 0xfU
+#define V_LANE2_LANEPLL_SRC_SEL(x) ((x) << S_LANE2_LANEPLL_SRC_SEL)
+#define G_LANE2_LANEPLL_SRC_SEL(x) (((x) >> S_LANE2_LANEPLL_SRC_SEL) & M_LANE2_LANEPLL_SRC_SEL)
+
+#define S_LANE1_LANEPLL_SRC_SEL 4
+#define M_LANE1_LANEPLL_SRC_SEL 0xfU
+#define V_LANE1_LANEPLL_SRC_SEL(x) ((x) << S_LANE1_LANEPLL_SRC_SEL)
+#define G_LANE1_LANEPLL_SRC_SEL(x) (((x) >> S_LANE1_LANEPLL_SRC_SEL) & M_LANE1_LANEPLL_SRC_SEL)
+
+#define S_LANE0_LANEPLL_SRC_SEL 0
+#define M_LANE0_LANEPLL_SRC_SEL 0xfU
+#define V_LANE0_LANEPLL_SRC_SEL(x) ((x) << S_LANE0_LANEPLL_SRC_SEL)
+#define G_LANE0_LANEPLL_SRC_SEL(x) (((x) >> S_LANE0_LANEPLL_SRC_SEL) & M_LANE0_LANEPLL_SRC_SEL)
+
+#define A_PCIE_SNPS_G5_PHY_2_3_RX_LANEPLL_SRC_SEL 0x5fcc
+#define A_PCIE_SNPS_G5_PHY_RX_DECERR 0x5fd0
+
+#define S_LANE15_REC_OVRD_8B10B_DECERR 30
+#define M_LANE15_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE15_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE15_REC_OVRD_8B10B_DECERR)
+#define G_LANE15_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE15_REC_OVRD_8B10B_DECERR) & M_LANE15_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE14_REC_OVRD_8B10B_DECERR 28
+#define M_LANE14_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE14_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE14_REC_OVRD_8B10B_DECERR)
+#define G_LANE14_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE14_REC_OVRD_8B10B_DECERR) & M_LANE14_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE13_REC_OVRD_8B10B_DECERR 26
+#define M_LANE13_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE13_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE13_REC_OVRD_8B10B_DECERR)
+#define G_LANE13_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE13_REC_OVRD_8B10B_DECERR) & M_LANE13_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE12_REC_OVRD_8B10B_DECERR 24
+#define M_LANE12_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE12_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE12_REC_OVRD_8B10B_DECERR)
+#define G_LANE12_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE12_REC_OVRD_8B10B_DECERR) & M_LANE12_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE11_REC_OVRD_8B10B_DECERR 22
+#define M_LANE11_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE11_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE11_REC_OVRD_8B10B_DECERR)
+#define G_LANE11_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE11_REC_OVRD_8B10B_DECERR) & M_LANE11_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE10_REC_OVRD_8B10B_DECERR 20
+#define M_LANE10_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE10_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE10_REC_OVRD_8B10B_DECERR)
+#define G_LANE10_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE10_REC_OVRD_8B10B_DECERR) & M_LANE10_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE9_REC_OVRD_8B10B_DECERR 18
+#define M_LANE9_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE9_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE9_REC_OVRD_8B10B_DECERR)
+#define G_LANE9_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE9_REC_OVRD_8B10B_DECERR) & M_LANE9_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE8_REC_OVRD_8B10B_DECERR 16
+#define M_LANE8_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE8_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE8_REC_OVRD_8B10B_DECERR)
+#define G_LANE8_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE8_REC_OVRD_8B10B_DECERR) & M_LANE8_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE7_REC_OVRD_8B10B_DECERR 14
+#define M_LANE7_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE7_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE7_REC_OVRD_8B10B_DECERR)
+#define G_LANE7_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE7_REC_OVRD_8B10B_DECERR) & M_LANE7_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE6_REC_OVRD_8B10B_DECERR 12
+#define M_LANE6_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE6_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE6_REC_OVRD_8B10B_DECERR)
+#define G_LANE6_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE6_REC_OVRD_8B10B_DECERR) & M_LANE6_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE5_REC_OVRD_8B10B_DECERR 10
+#define M_LANE5_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE5_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE5_REC_OVRD_8B10B_DECERR)
+#define G_LANE5_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE5_REC_OVRD_8B10B_DECERR) & M_LANE5_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE4_REC_OVRD_8B10B_DECERR 8
+#define M_LANE4_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE4_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE4_REC_OVRD_8B10B_DECERR)
+#define G_LANE4_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE4_REC_OVRD_8B10B_DECERR) & M_LANE4_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE3_REC_OVRD_8B10B_DECERR 6
+#define M_LANE3_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE3_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE3_REC_OVRD_8B10B_DECERR)
+#define G_LANE3_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE3_REC_OVRD_8B10B_DECERR) & M_LANE3_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE2_REC_OVRD_8B10B_DECERR 4
+#define M_LANE2_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE2_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE2_REC_OVRD_8B10B_DECERR)
+#define G_LANE2_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE2_REC_OVRD_8B10B_DECERR) & M_LANE2_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE1_REC_OVRD_8B10B_DECERR 2
+#define M_LANE1_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE1_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE1_REC_OVRD_8B10B_DECERR)
+#define G_LANE1_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE1_REC_OVRD_8B10B_DECERR) & M_LANE1_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE0_REC_OVRD_8B10B_DECERR 0
+#define M_LANE0_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE0_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE0_REC_OVRD_8B10B_DECERR)
+#define G_LANE0_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE0_REC_OVRD_8B10B_DECERR) & M_LANE0_REC_OVRD_8B10B_DECERR)
+
+#define A_PCIE_SNPS_G5_PHY_TX2RX_LOOPBK_REC_OVRD_EN 0x5fd4
+
+#define S_LANE15_REC_OVRD_EN 31
+#define V_LANE15_REC_OVRD_EN(x) ((x) << S_LANE15_REC_OVRD_EN)
+#define F_LANE15_REC_OVRD_EN V_LANE15_REC_OVRD_EN(1U)
+
+#define S_LANE14_REC_OVRD_EN 30
+#define V_LANE14_REC_OVRD_EN(x) ((x) << S_LANE14_REC_OVRD_EN)
+#define F_LANE14_REC_OVRD_EN V_LANE14_REC_OVRD_EN(1U)
+
+#define S_LANE13_REC_OVRD_EN 29
+#define V_LANE13_REC_OVRD_EN(x) ((x) << S_LANE13_REC_OVRD_EN)
+#define F_LANE13_REC_OVRD_EN V_LANE13_REC_OVRD_EN(1U)
+
+#define S_LANE11_REC_OVRD_EN 27
+#define V_LANE11_REC_OVRD_EN(x) ((x) << S_LANE11_REC_OVRD_EN)
+#define F_LANE11_REC_OVRD_EN V_LANE11_REC_OVRD_EN(1U)
+
+#define S_LANE12_REC_OVRD_EN 28
+#define V_LANE12_REC_OVRD_EN(x) ((x) << S_LANE12_REC_OVRD_EN)
+#define F_LANE12_REC_OVRD_EN V_LANE12_REC_OVRD_EN(1U)
+
+#define S_LANE10_REC_OVRD_EN 26
+#define V_LANE10_REC_OVRD_EN(x) ((x) << S_LANE10_REC_OVRD_EN)
+#define F_LANE10_REC_OVRD_EN V_LANE10_REC_OVRD_EN(1U)
+
+#define S_LANE9_REC_OVRD_EN 25
+#define V_LANE9_REC_OVRD_EN(x) ((x) << S_LANE9_REC_OVRD_EN)
+#define F_LANE9_REC_OVRD_EN V_LANE9_REC_OVRD_EN(1U)
+
+#define S_LANE8_REC_OVRD_EN 24
+#define V_LANE8_REC_OVRD_EN(x) ((x) << S_LANE8_REC_OVRD_EN)
+#define F_LANE8_REC_OVRD_EN V_LANE8_REC_OVRD_EN(1U)
+
+#define S_LANE7_REC_OVRD_EN 23
+#define V_LANE7_REC_OVRD_EN(x) ((x) << S_LANE7_REC_OVRD_EN)
+#define F_LANE7_REC_OVRD_EN V_LANE7_REC_OVRD_EN(1U)
+
+#define S_LANE6_REC_OVRD_EN 22
+#define V_LANE6_REC_OVRD_EN(x) ((x) << S_LANE6_REC_OVRD_EN)
+#define F_LANE6_REC_OVRD_EN V_LANE6_REC_OVRD_EN(1U)
+
+#define S_LANE5_REC_OVRD_EN 21
+#define V_LANE5_REC_OVRD_EN(x) ((x) << S_LANE5_REC_OVRD_EN)
+#define F_LANE5_REC_OVRD_EN V_LANE5_REC_OVRD_EN(1U)
+
+#define S_LANE4_REC_OVRD_EN 20
+#define V_LANE4_REC_OVRD_EN(x) ((x) << S_LANE4_REC_OVRD_EN)
+#define F_LANE4_REC_OVRD_EN V_LANE4_REC_OVRD_EN(1U)
+
+#define S_LANE3_REC_OVRD_EN 19
+#define V_LANE3_REC_OVRD_EN(x) ((x) << S_LANE3_REC_OVRD_EN)
+#define F_LANE3_REC_OVRD_EN V_LANE3_REC_OVRD_EN(1U)
+
+#define S_LANE2_REC_OVRD_EN 18
+#define V_LANE2_REC_OVRD_EN(x) ((x) << S_LANE2_REC_OVRD_EN)
+#define F_LANE2_REC_OVRD_EN V_LANE2_REC_OVRD_EN(1U)
+
+#define S_LANE1_REC_OVRD_EN 17
+#define V_LANE1_REC_OVRD_EN(x) ((x) << S_LANE1_REC_OVRD_EN)
+#define F_LANE1_REC_OVRD_EN V_LANE1_REC_OVRD_EN(1U)
+
+#define S_LANE0_REC_OVRD_EN 16
+#define V_LANE0_REC_OVRD_EN(x) ((x) << S_LANE0_REC_OVRD_EN)
+#define F_LANE0_REC_OVRD_EN V_LANE0_REC_OVRD_EN(1U)
+
+#define S_LANE15_TX2RX_LOOPBK 15
+#define V_LANE15_TX2RX_LOOPBK(x) ((x) << S_LANE15_TX2RX_LOOPBK)
+#define F_LANE15_TX2RX_LOOPBK V_LANE15_TX2RX_LOOPBK(1U)
+
+#define S_LANE14_TX2RX_LOOPBK 14
+#define V_LANE14_TX2RX_LOOPBK(x) ((x) << S_LANE14_TX2RX_LOOPBK)
+#define F_LANE14_TX2RX_LOOPBK V_LANE14_TX2RX_LOOPBK(1U)
+
+#define S_LANE13_TX2RX_LOOPBK 13
+#define V_LANE13_TX2RX_LOOPBK(x) ((x) << S_LANE13_TX2RX_LOOPBK)
+#define F_LANE13_TX2RX_LOOPBK V_LANE13_TX2RX_LOOPBK(1U)
+
+#define S_LANE12_TX2RX_LOOPBK 12
+#define V_LANE12_TX2RX_LOOPBK(x) ((x) << S_LANE12_TX2RX_LOOPBK)
+#define F_LANE12_TX2RX_LOOPBK V_LANE12_TX2RX_LOOPBK(1U)
+
+#define S_LANE11_TX2RX_LOOPBK 11
+#define V_LANE11_TX2RX_LOOPBK(x) ((x) << S_LANE11_TX2RX_LOOPBK)
+#define F_LANE11_TX2RX_LOOPBK V_LANE11_TX2RX_LOOPBK(1U)
+
+#define S_LANE10_TX2RX_LOOPBK 10
+#define V_LANE10_TX2RX_LOOPBK(x) ((x) << S_LANE10_TX2RX_LOOPBK)
+#define F_LANE10_TX2RX_LOOPBK V_LANE10_TX2RX_LOOPBK(1U)
+
+#define S_LANE9_TX2RX_LOOPBK 9
+#define V_LANE9_TX2RX_LOOPBK(x) ((x) << S_LANE9_TX2RX_LOOPBK)
+#define F_LANE9_TX2RX_LOOPBK V_LANE9_TX2RX_LOOPBK(1U)
+
+#define S_LANE8_TX2RX_LOOPBK 8
+#define V_LANE8_TX2RX_LOOPBK(x) ((x) << S_LANE8_TX2RX_LOOPBK)
+#define F_LANE8_TX2RX_LOOPBK V_LANE8_TX2RX_LOOPBK(1U)
+
+#define S_LANE7_TX2RX_LOOPBK 7
+#define V_LANE7_TX2RX_LOOPBK(x) ((x) << S_LANE7_TX2RX_LOOPBK)
+#define F_LANE7_TX2RX_LOOPBK V_LANE7_TX2RX_LOOPBK(1U)
+
+#define S_LANE6_TX2RX_LOOPBK 6
+#define V_LANE6_TX2RX_LOOPBK(x) ((x) << S_LANE6_TX2RX_LOOPBK)
+#define F_LANE6_TX2RX_LOOPBK V_LANE6_TX2RX_LOOPBK(1U)
+
+#define S_LANE5_TX2RX_LOOPBK 5
+#define V_LANE5_TX2RX_LOOPBK(x) ((x) << S_LANE5_TX2RX_LOOPBK)
+#define F_LANE5_TX2RX_LOOPBK V_LANE5_TX2RX_LOOPBK(1U)
+
+#define S_LANE4_TX2RX_LOOPBK 4
+#define V_LANE4_TX2RX_LOOPBK(x) ((x) << S_LANE4_TX2RX_LOOPBK)
+#define F_LANE4_TX2RX_LOOPBK V_LANE4_TX2RX_LOOPBK(1U)
+
+#define S_LANE3_TX2RX_LOOPBK 3
+#define V_LANE3_TX2RX_LOOPBK(x) ((x) << S_LANE3_TX2RX_LOOPBK)
+#define F_LANE3_TX2RX_LOOPBK V_LANE3_TX2RX_LOOPBK(1U)
+
+#define S_LANE2_TX2RX_LOOPBK 2
+#define V_LANE2_TX2RX_LOOPBK(x) ((x) << S_LANE2_TX2RX_LOOPBK)
+#define F_LANE2_TX2RX_LOOPBK V_LANE2_TX2RX_LOOPBK(1U)
+
+#define S_LANE1_TX2RX_LOOPBK 1
+#define V_LANE1_TX2RX_LOOPBK(x) ((x) << S_LANE1_TX2RX_LOOPBK)
+#define F_LANE1_TX2RX_LOOPBK V_LANE1_TX2RX_LOOPBK(1U)
+
+#define S_LANE0_TX2RX_LOOPBK 0
+#define V_LANE0_TX2RX_LOOPBK(x) ((x) << S_LANE0_TX2RX_LOOPBK)
+#define F_LANE0_TX2RX_LOOPBK V_LANE0_TX2RX_LOOPBK(1U)
+
+#define A_PCIE_PHY_TX_DISABLE_UPCS_PIPE_CONFIG 0x5fd8
+
+#define S_UPCS_PIPE_CONFIG 16
+#define M_UPCS_PIPE_CONFIG 0xffffU
+#define V_UPCS_PIPE_CONFIG(x) ((x) << S_UPCS_PIPE_CONFIG)
+#define G_UPCS_PIPE_CONFIG(x) (((x) >> S_UPCS_PIPE_CONFIG) & M_UPCS_PIPE_CONFIG)
+
+#define S_TX15_DISABLE 15
+#define V_TX15_DISABLE(x) ((x) << S_TX15_DISABLE)
+#define F_TX15_DISABLE V_TX15_DISABLE(1U)
+
+#define S_TX14_DISABLE 14
+#define V_TX14_DISABLE(x) ((x) << S_TX14_DISABLE)
+#define F_TX14_DISABLE V_TX14_DISABLE(1U)
+
+#define S_TX13_DISABLE 13
+#define V_TX13_DISABLE(x) ((x) << S_TX13_DISABLE)
+#define F_TX13_DISABLE V_TX13_DISABLE(1U)
+
+#define S_TX12_DISABLE 12
+#define V_TX12_DISABLE(x) ((x) << S_TX12_DISABLE)
+#define F_TX12_DISABLE V_TX12_DISABLE(1U)
+
+#define S_TX11_DISABLE 11
+#define V_TX11_DISABLE(x) ((x) << S_TX11_DISABLE)
+#define F_TX11_DISABLE V_TX11_DISABLE(1U)
+
+#define S_TX10_DISABLE 10
+#define V_TX10_DISABLE(x) ((x) << S_TX10_DISABLE)
+#define F_TX10_DISABLE V_TX10_DISABLE(1U)
+
+#define S_TX9_DISABLE 9
+#define V_TX9_DISABLE(x) ((x) << S_TX9_DISABLE)
+#define F_TX9_DISABLE V_TX9_DISABLE(1U)
+
+#define S_TX8_DISABLE 8
+#define V_TX8_DISABLE(x) ((x) << S_TX8_DISABLE)
+#define F_TX8_DISABLE V_TX8_DISABLE(1U)
+
+#define S_TX7_DISABLE 7
+#define V_TX7_DISABLE(x) ((x) << S_TX7_DISABLE)
+#define F_TX7_DISABLE V_TX7_DISABLE(1U)
+
+#define S_TX6_DISABLE 6
+#define V_TX6_DISABLE(x) ((x) << S_TX6_DISABLE)
+#define F_TX6_DISABLE V_TX6_DISABLE(1U)
+
+#define S_TX5_DISABLE 5
+#define V_TX5_DISABLE(x) ((x) << S_TX5_DISABLE)
+#define F_TX5_DISABLE V_TX5_DISABLE(1U)
+
+#define S_TX4_DISABLE 4
+#define V_TX4_DISABLE(x) ((x) << S_TX4_DISABLE)
+#define F_TX4_DISABLE V_TX4_DISABLE(1U)
+
+#define S_TX3_DISABLE 3
+#define V_TX3_DISABLE(x) ((x) << S_TX3_DISABLE)
+#define F_TX3_DISABLE V_TX3_DISABLE(1U)
+
+#define S_TX2_DISABLE 2
+#define V_TX2_DISABLE(x) ((x) << S_TX2_DISABLE)
+#define F_TX2_DISABLE V_TX2_DISABLE(1U)
+
+#define S_TX1_DISABLE 1
+#define V_TX1_DISABLE(x) ((x) << S_TX1_DISABLE)
+#define F_TX1_DISABLE V_TX1_DISABLE(1U)
+
+#define S_TX0_DISABLE 0
+#define V_TX0_DISABLE(x) ((x) << S_TX0_DISABLE)
+#define F_TX0_DISABLE V_TX0_DISABLE(1U)
+
#define A_PCIE_PDEBUG_REG_0X0 0x0
#define A_PCIE_PDEBUG_REG_0X1 0x1
#define A_PCIE_PDEBUG_REG_0X2 0x2
@@ -11668,6 +14476,40 @@
#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+#define A_DBG_GPIO_OUT 0x6010
+
+#define S_GPIO23_OUT_VAL 23
+#define V_GPIO23_OUT_VAL(x) ((x) << S_GPIO23_OUT_VAL)
+#define F_GPIO23_OUT_VAL V_GPIO23_OUT_VAL(1U)
+
+#define S_GPIO22_OUT_VAL 22
+#define V_GPIO22_OUT_VAL(x) ((x) << S_GPIO22_OUT_VAL)
+#define F_GPIO22_OUT_VAL V_GPIO22_OUT_VAL(1U)
+
+#define S_GPIO21_OUT_VAL 21
+#define V_GPIO21_OUT_VAL(x) ((x) << S_GPIO21_OUT_VAL)
+#define F_GPIO21_OUT_VAL V_GPIO21_OUT_VAL(1U)
+
+#define S_GPIO20_OUT_VAL 20
+#define V_GPIO20_OUT_VAL(x) ((x) << S_GPIO20_OUT_VAL)
+#define F_GPIO20_OUT_VAL V_GPIO20_OUT_VAL(1U)
+
+#define S_T7_GPIO19_OUT_VAL 19
+#define V_T7_GPIO19_OUT_VAL(x) ((x) << S_T7_GPIO19_OUT_VAL)
+#define F_T7_GPIO19_OUT_VAL V_T7_GPIO19_OUT_VAL(1U)
+
+#define S_T7_GPIO18_OUT_VAL 18
+#define V_T7_GPIO18_OUT_VAL(x) ((x) << S_T7_GPIO18_OUT_VAL)
+#define F_T7_GPIO18_OUT_VAL V_T7_GPIO18_OUT_VAL(1U)
+
+#define S_T7_GPIO17_OUT_VAL 17
+#define V_T7_GPIO17_OUT_VAL(x) ((x) << S_T7_GPIO17_OUT_VAL)
+#define F_T7_GPIO17_OUT_VAL V_T7_GPIO17_OUT_VAL(1U)
+
+#define S_T7_GPIO16_OUT_VAL 16
+#define V_T7_GPIO16_OUT_VAL(x) ((x) << S_T7_GPIO16_OUT_VAL)
+#define F_T7_GPIO16_OUT_VAL V_T7_GPIO16_OUT_VAL(1U)
+
#define A_DBG_GPIO_IN 0x6014
#define S_GPIO15_CHG_DET 31
@@ -11798,6 +14640,38 @@
#define V_GPIO0_IN(x) ((x) << S_GPIO0_IN)
#define F_GPIO0_IN V_GPIO0_IN(1U)
+#define S_GPIO23_IN 23
+#define V_GPIO23_IN(x) ((x) << S_GPIO23_IN)
+#define F_GPIO23_IN V_GPIO23_IN(1U)
+
+#define S_GPIO22_IN 22
+#define V_GPIO22_IN(x) ((x) << S_GPIO22_IN)
+#define F_GPIO22_IN V_GPIO22_IN(1U)
+
+#define S_GPIO21_IN 21
+#define V_GPIO21_IN(x) ((x) << S_GPIO21_IN)
+#define F_GPIO21_IN V_GPIO21_IN(1U)
+
+#define S_GPIO20_IN 20
+#define V_GPIO20_IN(x) ((x) << S_GPIO20_IN)
+#define F_GPIO20_IN V_GPIO20_IN(1U)
+
+#define S_T7_GPIO19_IN 19
+#define V_T7_GPIO19_IN(x) ((x) << S_T7_GPIO19_IN)
+#define F_T7_GPIO19_IN V_T7_GPIO19_IN(1U)
+
+#define S_T7_GPIO18_IN 18
+#define V_T7_GPIO18_IN(x) ((x) << S_T7_GPIO18_IN)
+#define F_T7_GPIO18_IN V_T7_GPIO18_IN(1U)
+
+#define S_T7_GPIO17_IN 17
+#define V_T7_GPIO17_IN(x) ((x) << S_T7_GPIO17_IN)
+#define F_T7_GPIO17_IN V_T7_GPIO17_IN(1U)
+
+#define S_T7_GPIO16_IN 16
+#define V_T7_GPIO16_IN(x) ((x) << S_T7_GPIO16_IN)
+#define F_T7_GPIO16_IN V_T7_GPIO16_IN(1U)
+
#define A_DBG_INT_ENABLE 0x6018
#define S_IBM_FDL_FAIL_INT_ENBL 25
@@ -11920,6 +14794,58 @@
#define V_GPIO16(x) ((x) << S_GPIO16)
#define F_GPIO16 V_GPIO16(1U)
+#define S_USBFIFOPARERR 12
+#define V_USBFIFOPARERR(x) ((x) << S_USBFIFOPARERR)
+#define F_USBFIFOPARERR V_USBFIFOPARERR(1U)
+
+#define S_T7_IBM_FDL_FAIL_INT_ENBL 11
+#define V_T7_IBM_FDL_FAIL_INT_ENBL(x) ((x) << S_T7_IBM_FDL_FAIL_INT_ENBL)
+#define F_T7_IBM_FDL_FAIL_INT_ENBL V_T7_IBM_FDL_FAIL_INT_ENBL(1U)
+
+#define S_T7_PLL_LOCK_LOST_INT_ENBL 10
+#define V_T7_PLL_LOCK_LOST_INT_ENBL(x) ((x) << S_T7_PLL_LOCK_LOST_INT_ENBL)
+#define F_T7_PLL_LOCK_LOST_INT_ENBL V_T7_PLL_LOCK_LOST_INT_ENBL(1U)
+
+#define S_M1_LOCK 9
+#define V_M1_LOCK(x) ((x) << S_M1_LOCK)
+#define F_M1_LOCK V_M1_LOCK(1U)
+
+#define S_T7_PCIE_LOCK 8
+#define V_T7_PCIE_LOCK(x) ((x) << S_T7_PCIE_LOCK)
+#define F_T7_PCIE_LOCK V_T7_PCIE_LOCK(1U)
+
+#define S_T7_U_LOCK 7
+#define V_T7_U_LOCK(x) ((x) << S_T7_U_LOCK)
+#define F_T7_U_LOCK V_T7_U_LOCK(1U)
+
+#define S_MAC_LOCK 6
+#define V_MAC_LOCK(x) ((x) << S_MAC_LOCK)
+#define F_MAC_LOCK V_MAC_LOCK(1U)
+
+#define S_ARM_LOCK 5
+#define V_ARM_LOCK(x) ((x) << S_ARM_LOCK)
+#define F_ARM_LOCK V_ARM_LOCK(1U)
+
+#define S_M0_LOCK 4
+#define V_M0_LOCK(x) ((x) << S_M0_LOCK)
+#define F_M0_LOCK V_M0_LOCK(1U)
+
+#define S_XGPBUS_LOCK 3
+#define V_XGPBUS_LOCK(x) ((x) << S_XGPBUS_LOCK)
+#define F_XGPBUS_LOCK V_XGPBUS_LOCK(1U)
+
+#define S_XGPHY_LOCK 2
+#define V_XGPHY_LOCK(x) ((x) << S_XGPHY_LOCK)
+#define F_XGPHY_LOCK V_XGPHY_LOCK(1U)
+
+#define S_USB_LOCK 1
+#define V_USB_LOCK(x) ((x) << S_USB_LOCK)
+#define F_USB_LOCK V_USB_LOCK(1U)
+
+#define S_T7_C_LOCK 0
+#define V_T7_C_LOCK(x) ((x) << S_T7_C_LOCK)
+#define F_T7_C_LOCK V_T7_C_LOCK(1U)
+
#define A_DBG_INT_CAUSE 0x601c
#define S_IBM_FDL_FAIL_INT_CAUSE 25
@@ -11938,6 +14864,14 @@
#define V_PLL_LOCK_LOST_INT_CAUSE(x) ((x) << S_PLL_LOCK_LOST_INT_CAUSE)
#define F_PLL_LOCK_LOST_INT_CAUSE V_PLL_LOCK_LOST_INT_CAUSE(1U)
+#define S_T7_IBM_FDL_FAIL_INT_CAUSE 11
+#define V_T7_IBM_FDL_FAIL_INT_CAUSE(x) ((x) << S_T7_IBM_FDL_FAIL_INT_CAUSE)
+#define F_T7_IBM_FDL_FAIL_INT_CAUSE V_T7_IBM_FDL_FAIL_INT_CAUSE(1U)
+
+#define S_T7_PLL_LOCK_LOST_INT_CAUSE 10
+#define V_T7_PLL_LOCK_LOST_INT_CAUSE(x) ((x) << S_T7_PLL_LOCK_LOST_INT_CAUSE)
+#define F_T7_PLL_LOCK_LOST_INT_CAUSE V_T7_PLL_LOCK_LOST_INT_CAUSE(1U)
+
#define A_DBG_DBG0_RST_VALUE 0x6020
#define S_DEBUGDATA 0
@@ -11977,6 +14911,10 @@
#define V_C_OCLK_EN(x) ((x) << S_C_OCLK_EN)
#define F_C_OCLK_EN V_C_OCLK_EN(1U)
+#define S_INIC_MODE_EN 0
+#define V_INIC_MODE_EN(x) ((x) << S_INIC_MODE_EN)
+#define F_INIC_MODE_EN V_INIC_MODE_EN(1U)
+
#define A_DBG_PLL_LOCK 0x602c
#define S_PLL_P_LOCK 20
@@ -12003,6 +14941,38 @@
#define V_PLL_C_LOCK(x) ((x) << S_PLL_C_LOCK)
#define F_PLL_C_LOCK V_PLL_C_LOCK(1U)
+#define S_T7_PLL_M_LOCK 9
+#define V_T7_PLL_M_LOCK(x) ((x) << S_T7_PLL_M_LOCK)
+#define F_T7_PLL_M_LOCK V_T7_PLL_M_LOCK(1U)
+
+#define S_PLL_PCIE_LOCK 8
+#define V_PLL_PCIE_LOCK(x) ((x) << S_PLL_PCIE_LOCK)
+#define F_PLL_PCIE_LOCK V_PLL_PCIE_LOCK(1U)
+
+#define S_T7_PLL_U_LOCK 7
+#define V_T7_PLL_U_LOCK(x) ((x) << S_T7_PLL_U_LOCK)
+#define F_T7_PLL_U_LOCK V_T7_PLL_U_LOCK(1U)
+
+#define S_PLL_MAC_LOCK 6
+#define V_PLL_MAC_LOCK(x) ((x) << S_PLL_MAC_LOCK)
+#define F_PLL_MAC_LOCK V_PLL_MAC_LOCK(1U)
+
+#define S_PLL_ARM_LOCK 5
+#define V_PLL_ARM_LOCK(x) ((x) << S_PLL_ARM_LOCK)
+#define F_PLL_ARM_LOCK V_PLL_ARM_LOCK(1U)
+
+#define S_PLL_XGPBUS_LOCK 3
+#define V_PLL_XGPBUS_LOCK(x) ((x) << S_PLL_XGPBUS_LOCK)
+#define F_PLL_XGPBUS_LOCK V_PLL_XGPBUS_LOCK(1U)
+
+#define S_PLL_XGPHY_LOCK 2
+#define V_PLL_XGPHY_LOCK(x) ((x) << S_PLL_XGPHY_LOCK)
+#define F_PLL_XGPHY_LOCK V_PLL_XGPHY_LOCK(1U)
+
+#define S_PLL_USB_LOCK 1
+#define V_PLL_USB_LOCK(x) ((x) << S_PLL_USB_LOCK)
+#define F_PLL_USB_LOCK V_PLL_USB_LOCK(1U)
+
#define A_DBG_GPIO_ACT_LOW 0x6030
#define S_P_LOCK_ACT_LOW 21
@@ -12109,6 +15079,48 @@
#define V_GPIO16_ACT_LOW(x) ((x) << S_GPIO16_ACT_LOW)
#define F_GPIO16_ACT_LOW V_GPIO16_ACT_LOW(1U)
+#define A_DBG_PLL_LOCK_ACT_LOW 0x6030
+
+#define S_M1_LOCK_ACT_LOW 9
+#define V_M1_LOCK_ACT_LOW(x) ((x) << S_M1_LOCK_ACT_LOW)
+#define F_M1_LOCK_ACT_LOW V_M1_LOCK_ACT_LOW(1U)
+
+#define S_PCIE_LOCK_ACT_LOW 8
+#define V_PCIE_LOCK_ACT_LOW(x) ((x) << S_PCIE_LOCK_ACT_LOW)
+#define F_PCIE_LOCK_ACT_LOW V_PCIE_LOCK_ACT_LOW(1U)
+
+#define S_T7_U_LOCK_ACT_LOW 7
+#define V_T7_U_LOCK_ACT_LOW(x) ((x) << S_T7_U_LOCK_ACT_LOW)
+#define F_T7_U_LOCK_ACT_LOW V_T7_U_LOCK_ACT_LOW(1U)
+
+#define S_MAC_LOCK_ACT_LOW 6
+#define V_MAC_LOCK_ACT_LOW(x) ((x) << S_MAC_LOCK_ACT_LOW)
+#define F_MAC_LOCK_ACT_LOW V_MAC_LOCK_ACT_LOW(1U)
+
+#define S_ARM_LOCK_ACT_LOW 5
+#define V_ARM_LOCK_ACT_LOW(x) ((x) << S_ARM_LOCK_ACT_LOW)
+#define F_ARM_LOCK_ACT_LOW V_ARM_LOCK_ACT_LOW(1U)
+
+#define S_M0_LOCK_ACT_LOW 4
+#define V_M0_LOCK_ACT_LOW(x) ((x) << S_M0_LOCK_ACT_LOW)
+#define F_M0_LOCK_ACT_LOW V_M0_LOCK_ACT_LOW(1U)
+
+#define S_XGPBUS_LOCK_ACT_LOW 3
+#define V_XGPBUS_LOCK_ACT_LOW(x) ((x) << S_XGPBUS_LOCK_ACT_LOW)
+#define F_XGPBUS_LOCK_ACT_LOW V_XGPBUS_LOCK_ACT_LOW(1U)
+
+#define S_XGPHY_LOCK_ACT_LOW 2
+#define V_XGPHY_LOCK_ACT_LOW(x) ((x) << S_XGPHY_LOCK_ACT_LOW)
+#define F_XGPHY_LOCK_ACT_LOW V_XGPHY_LOCK_ACT_LOW(1U)
+
+#define S_USB_LOCK_ACT_LOW 1
+#define V_USB_LOCK_ACT_LOW(x) ((x) << S_USB_LOCK_ACT_LOW)
+#define F_USB_LOCK_ACT_LOW V_USB_LOCK_ACT_LOW(1U)
+
+#define S_T7_C_LOCK_ACT_LOW 0
+#define V_T7_C_LOCK_ACT_LOW(x) ((x) << S_T7_C_LOCK_ACT_LOW)
+#define F_T7_C_LOCK_ACT_LOW V_T7_C_LOCK_ACT_LOW(1U)
+
#define A_DBG_EFUSE_BYTE0_3 0x6034
#define A_DBG_EFUSE_BYTE4_7 0x6038
#define A_DBG_EFUSE_BYTE8_11 0x603c
@@ -12140,6 +15152,32 @@
#define V_STATIC_U_PLL_TUNE(x) ((x) << S_STATIC_U_PLL_TUNE)
#define G_STATIC_U_PLL_TUNE(x) (((x) >> S_STATIC_U_PLL_TUNE) & M_STATIC_U_PLL_TUNE)
+#define A_T7_DBG_STATIC_U_PLL_CONF1 0x6044
+
+#define S_STATIC_U_PLL_RANGE 22
+#define M_STATIC_U_PLL_RANGE 0x7U
+#define V_STATIC_U_PLL_RANGE(x) ((x) << S_STATIC_U_PLL_RANGE)
+#define G_STATIC_U_PLL_RANGE(x) (((x) >> S_STATIC_U_PLL_RANGE) & M_STATIC_U_PLL_RANGE)
+
+#define S_STATIC_U_PLL_DIVQ 17
+#define M_STATIC_U_PLL_DIVQ 0x1fU
+#define V_STATIC_U_PLL_DIVQ(x) ((x) << S_STATIC_U_PLL_DIVQ)
+#define G_STATIC_U_PLL_DIVQ(x) (((x) >> S_STATIC_U_PLL_DIVQ) & M_STATIC_U_PLL_DIVQ)
+
+#define S_STATIC_U_PLL_DIVFI 8
+#define M_STATIC_U_PLL_DIVFI 0x1ffU
+#define V_STATIC_U_PLL_DIVFI(x) ((x) << S_STATIC_U_PLL_DIVFI)
+#define G_STATIC_U_PLL_DIVFI(x) (((x) >> S_STATIC_U_PLL_DIVFI) & M_STATIC_U_PLL_DIVFI)
+
+#define S_STATIC_U_PLL_DIVR 2
+#define M_STATIC_U_PLL_DIVR 0x3fU
+#define V_STATIC_U_PLL_DIVR(x) ((x) << S_STATIC_U_PLL_DIVR)
+#define G_STATIC_U_PLL_DIVR(x) (((x) >> S_STATIC_U_PLL_DIVR) & M_STATIC_U_PLL_DIVR)
+
+#define S_T7_1_STATIC_U_PLL_BYPASS 1
+#define V_T7_1_STATIC_U_PLL_BYPASS(x) ((x) << S_T7_1_STATIC_U_PLL_BYPASS)
+#define F_T7_1_STATIC_U_PLL_BYPASS V_T7_1_STATIC_U_PLL_BYPASS(1U)
+
#define A_DBG_STATIC_C_PLL_CONF 0x6048
#define S_STATIC_C_PLL_MULT 23
@@ -12167,6 +15205,26 @@
#define V_STATIC_C_PLL_TUNE(x) ((x) << S_STATIC_C_PLL_TUNE)
#define G_STATIC_C_PLL_TUNE(x) (((x) >> S_STATIC_C_PLL_TUNE) & M_STATIC_C_PLL_TUNE)
+#define A_T7_DBG_STATIC_U_PLL_CONF2 0x6048
+
+#define S_STATIC_U_PLL_SSMF 5
+#define M_STATIC_U_PLL_SSMF 0xfU
+#define V_STATIC_U_PLL_SSMF(x) ((x) << S_STATIC_U_PLL_SSMF)
+#define G_STATIC_U_PLL_SSMF(x) (((x) >> S_STATIC_U_PLL_SSMF) & M_STATIC_U_PLL_SSMF)
+
+#define S_STATIC_U_PLL_SSMD 2
+#define M_STATIC_U_PLL_SSMD 0x7U
+#define V_STATIC_U_PLL_SSMD(x) ((x) << S_STATIC_U_PLL_SSMD)
+#define G_STATIC_U_PLL_SSMD(x) (((x) >> S_STATIC_U_PLL_SSMD) & M_STATIC_U_PLL_SSMD)
+
+#define S_STATIC_U_PLL_SSDS 1
+#define V_STATIC_U_PLL_SSDS(x) ((x) << S_STATIC_U_PLL_SSDS)
+#define F_STATIC_U_PLL_SSDS V_STATIC_U_PLL_SSDS(1U)
+
+#define S_STATIC_U_PLL_SSE 0
+#define V_STATIC_U_PLL_SSE(x) ((x) << S_STATIC_U_PLL_SSE)
+#define F_STATIC_U_PLL_SSE V_STATIC_U_PLL_SSE(1U)
+
#define A_DBG_STATIC_M_PLL_CONF 0x604c
#define S_STATIC_M_PLL_MULT 23
@@ -12194,6 +15252,32 @@
#define V_STATIC_M_PLL_TUNE(x) ((x) << S_STATIC_M_PLL_TUNE)
#define G_STATIC_M_PLL_TUNE(x) (((x) >> S_STATIC_M_PLL_TUNE) & M_STATIC_M_PLL_TUNE)
+#define A_T7_DBG_STATIC_C_PLL_CONF1 0x604c
+
+#define S_STATIC_C_PLL_RANGE 22
+#define M_STATIC_C_PLL_RANGE 0x7U
+#define V_STATIC_C_PLL_RANGE(x) ((x) << S_STATIC_C_PLL_RANGE)
+#define G_STATIC_C_PLL_RANGE(x) (((x) >> S_STATIC_C_PLL_RANGE) & M_STATIC_C_PLL_RANGE)
+
+#define S_STATIC_C_PLL_DIVQ 17
+#define M_STATIC_C_PLL_DIVQ 0x1fU
+#define V_STATIC_C_PLL_DIVQ(x) ((x) << S_STATIC_C_PLL_DIVQ)
+#define G_STATIC_C_PLL_DIVQ(x) (((x) >> S_STATIC_C_PLL_DIVQ) & M_STATIC_C_PLL_DIVQ)
+
+#define S_STATIC_C_PLL_DIVFI 8
+#define M_STATIC_C_PLL_DIVFI 0x1ffU
+#define V_STATIC_C_PLL_DIVFI(x) ((x) << S_STATIC_C_PLL_DIVFI)
+#define G_STATIC_C_PLL_DIVFI(x) (((x) >> S_STATIC_C_PLL_DIVFI) & M_STATIC_C_PLL_DIVFI)
+
+#define S_STATIC_C_PLL_DIVR 2
+#define M_STATIC_C_PLL_DIVR 0x3fU
+#define V_STATIC_C_PLL_DIVR(x) ((x) << S_STATIC_C_PLL_DIVR)
+#define G_STATIC_C_PLL_DIVR(x) (((x) >> S_STATIC_C_PLL_DIVR) & M_STATIC_C_PLL_DIVR)
+
+#define S_T7_1_STATIC_C_PLL_BYPASS 1
+#define V_T7_1_STATIC_C_PLL_BYPASS(x) ((x) << S_T7_1_STATIC_C_PLL_BYPASS)
+#define F_T7_1_STATIC_C_PLL_BYPASS V_T7_1_STATIC_C_PLL_BYPASS(1U)
+
#define A_DBG_STATIC_KX_PLL_CONF 0x6050
#define S_STATIC_KX_PLL_C 21
@@ -12226,6 +15310,26 @@
#define V_STATIC_KX_PLL_P(x) ((x) << S_STATIC_KX_PLL_P)
#define G_STATIC_KX_PLL_P(x) (((x) >> S_STATIC_KX_PLL_P) & M_STATIC_KX_PLL_P)
+#define A_T7_DBG_STATIC_C_PLL_CONF2 0x6050
+
+#define S_STATIC_C_PLL_SSMF 5
+#define M_STATIC_C_PLL_SSMF 0xfU
+#define V_STATIC_C_PLL_SSMF(x) ((x) << S_STATIC_C_PLL_SSMF)
+#define G_STATIC_C_PLL_SSMF(x) (((x) >> S_STATIC_C_PLL_SSMF) & M_STATIC_C_PLL_SSMF)
+
+#define S_STATIC_C_PLL_SSMD 2
+#define M_STATIC_C_PLL_SSMD 0x7U
+#define V_STATIC_C_PLL_SSMD(x) ((x) << S_STATIC_C_PLL_SSMD)
+#define G_STATIC_C_PLL_SSMD(x) (((x) >> S_STATIC_C_PLL_SSMD) & M_STATIC_C_PLL_SSMD)
+
+#define S_STATIC_C_PLL_SSDS 1
+#define V_STATIC_C_PLL_SSDS(x) ((x) << S_STATIC_C_PLL_SSDS)
+#define F_STATIC_C_PLL_SSDS V_STATIC_C_PLL_SSDS(1U)
+
+#define S_STATIC_C_PLL_SSE 0
+#define V_STATIC_C_PLL_SSE(x) ((x) << S_STATIC_C_PLL_SSE)
+#define F_STATIC_C_PLL_SSE V_STATIC_C_PLL_SSE(1U)
+
#define A_DBG_STATIC_KR_PLL_CONF 0x6054
#define S_STATIC_KR_PLL_C 21
@@ -12258,6 +15362,38 @@
#define V_STATIC_KR_PLL_P(x) ((x) << S_STATIC_KR_PLL_P)
#define G_STATIC_KR_PLL_P(x) (((x) >> S_STATIC_KR_PLL_P) & M_STATIC_KR_PLL_P)
+#define A_DBG_STATIC_PLL_DFS_CONF 0x6054
+
+#define S_STATIC_U_DFS_ACK 23
+#define V_STATIC_U_DFS_ACK(x) ((x) << S_STATIC_U_DFS_ACK)
+#define F_STATIC_U_DFS_ACK V_STATIC_U_DFS_ACK(1U)
+
+#define S_STATIC_C_DFS_ACK 22
+#define V_STATIC_C_DFS_ACK(x) ((x) << S_STATIC_C_DFS_ACK)
+#define F_STATIC_C_DFS_ACK V_STATIC_C_DFS_ACK(1U)
+
+#define S_STATIC_U_DFS_DIVFI 13
+#define M_STATIC_U_DFS_DIVFI 0x1ffU
+#define V_STATIC_U_DFS_DIVFI(x) ((x) << S_STATIC_U_DFS_DIVFI)
+#define G_STATIC_U_DFS_DIVFI(x) (((x) >> S_STATIC_U_DFS_DIVFI) & M_STATIC_U_DFS_DIVFI)
+
+#define S_STATIC_U_DFS_NEWDIV 12
+#define V_STATIC_U_DFS_NEWDIV(x) ((x) << S_STATIC_U_DFS_NEWDIV)
+#define F_STATIC_U_DFS_NEWDIV V_STATIC_U_DFS_NEWDIV(1U)
+
+#define S_T7_STATIC_U_DFS_ENABLE 11
+#define V_T7_STATIC_U_DFS_ENABLE(x) ((x) << S_T7_STATIC_U_DFS_ENABLE)
+#define F_T7_STATIC_U_DFS_ENABLE V_T7_STATIC_U_DFS_ENABLE(1U)
+
+#define S_STATIC_C_DFS_DIVFI 2
+#define M_STATIC_C_DFS_DIVFI 0x1ffU
+#define V_STATIC_C_DFS_DIVFI(x) ((x) << S_STATIC_C_DFS_DIVFI)
+#define G_STATIC_C_DFS_DIVFI(x) (((x) >> S_STATIC_C_DFS_DIVFI) & M_STATIC_C_DFS_DIVFI)
+
+#define S_STATIC_C_DFS_NEWDIV 1
+#define V_STATIC_C_DFS_NEWDIV(x) ((x) << S_STATIC_C_DFS_NEWDIV)
+#define F_STATIC_C_DFS_NEWDIV V_STATIC_C_DFS_NEWDIV(1U)
+
#define A_DBG_EXTRA_STATIC_BITS_CONF 0x6058
#define S_STATIC_M_PLL_RESET 30
@@ -12343,6 +15479,14 @@
#define V_PSRO_SEL(x) ((x) << S_PSRO_SEL)
#define G_PSRO_SEL(x) (((x) >> S_PSRO_SEL) & M_PSRO_SEL)
+#define S_T7_STATIC_LVDS_CLKOUT_EN 21
+#define V_T7_STATIC_LVDS_CLKOUT_EN(x) ((x) << S_T7_STATIC_LVDS_CLKOUT_EN)
+#define F_T7_STATIC_LVDS_CLKOUT_EN V_T7_STATIC_LVDS_CLKOUT_EN(1U)
+
+#define S_T7_EXPHYCLK_SEL_EN 16
+#define V_T7_EXPHYCLK_SEL_EN(x) ((x) << S_T7_EXPHYCLK_SEL_EN)
+#define F_T7_EXPHYCLK_SEL_EN V_T7_EXPHYCLK_SEL_EN(1U)
+
#define A_DBG_STATIC_OCLK_MUXSEL_CONF 0x605c
#define S_M_OCLK_MUXSEL 12
@@ -12467,16 +15611,6 @@
#define V_T5_RD_ADDR0(x) ((x) << S_T5_RD_ADDR0)
#define G_T5_RD_ADDR0(x) (((x) >> S_T5_RD_ADDR0) & M_T5_RD_ADDR0)
-#define S_T6_RD_ADDR1 11
-#define M_T6_RD_ADDR1 0x1ffU
-#define V_T6_RD_ADDR1(x) ((x) << S_T6_RD_ADDR1)
-#define G_T6_RD_ADDR1(x) (((x) >> S_T6_RD_ADDR1) & M_T6_RD_ADDR1)
-
-#define S_T6_RD_ADDR0 2
-#define M_T6_RD_ADDR0 0x1ffU
-#define V_T6_RD_ADDR0(x) ((x) << S_T6_RD_ADDR0)
-#define G_T6_RD_ADDR0(x) (((x) >> S_T6_RD_ADDR0) & M_T6_RD_ADDR0)
-
#define A_DBG_TRACE_WRADDR 0x6090
#define S_WR_POINTER_ADDR1 16
@@ -12499,16 +15633,6 @@
#define V_T5_WR_POINTER_ADDR0(x) ((x) << S_T5_WR_POINTER_ADDR0)
#define G_T5_WR_POINTER_ADDR0(x) (((x) >> S_T5_WR_POINTER_ADDR0) & M_T5_WR_POINTER_ADDR0)
-#define S_T6_WR_POINTER_ADDR1 16
-#define M_T6_WR_POINTER_ADDR1 0x1ffU
-#define V_T6_WR_POINTER_ADDR1(x) ((x) << S_T6_WR_POINTER_ADDR1)
-#define G_T6_WR_POINTER_ADDR1(x) (((x) >> S_T6_WR_POINTER_ADDR1) & M_T6_WR_POINTER_ADDR1)
-
-#define S_T6_WR_POINTER_ADDR0 0
-#define M_T6_WR_POINTER_ADDR0 0x1ffU
-#define V_T6_WR_POINTER_ADDR0(x) ((x) << S_T6_WR_POINTER_ADDR0)
-#define G_T6_WR_POINTER_ADDR0(x) (((x) >> S_T6_WR_POINTER_ADDR0) & M_T6_WR_POINTER_ADDR0)
-
#define A_DBG_TRACE0_DATA_OUT 0x6094
#define A_DBG_TRACE1_DATA_OUT 0x6098
#define A_DBG_FUSE_SENSE_DONE 0x609c
@@ -12575,7 +15699,52 @@
#define V_T6_TVSENSE_RST(x) ((x) << S_T6_TVSENSE_RST)
#define F_T6_TVSENSE_RST V_T6_TVSENSE_RST(1U)
+#define A_DBG_PVT_EN1 0x60a8
+
+#define S_PVT_TRIMO 18
+#define M_PVT_TRIMO 0x3fU
+#define V_PVT_TRIMO(x) ((x) << S_PVT_TRIMO)
+#define G_PVT_TRIMO(x) (((x) >> S_PVT_TRIMO) & M_PVT_TRIMO)
+
+#define S_PVT_TRIMG 13
+#define M_PVT_TRIMG 0x1fU
+#define V_PVT_TRIMG(x) ((x) << S_PVT_TRIMG)
+#define G_PVT_TRIMG(x) (((x) >> S_PVT_TRIMG) & M_PVT_TRIMG)
+
+#define S_PVT_VSAMPLE 12
+#define V_PVT_VSAMPLE(x) ((x) << S_PVT_VSAMPLE)
+#define F_PVT_VSAMPLE V_PVT_VSAMPLE(1U)
+
+#define S_PVT_PSAMPLE 10
+#define M_PVT_PSAMPLE 0x3U
+#define V_PVT_PSAMPLE(x) ((x) << S_PVT_PSAMPLE)
+#define G_PVT_PSAMPLE(x) (((x) >> S_PVT_PSAMPLE) & M_PVT_PSAMPLE)
+
+#define S_PVT_ENA 9
+#define V_PVT_ENA(x) ((x) << S_PVT_ENA)
+#define F_PVT_ENA V_PVT_ENA(1U)
+
+#define S_PVT_RESET 8
+#define V_PVT_RESET(x) ((x) << S_PVT_RESET)
+#define F_PVT_RESET V_PVT_RESET(1U)
+
+#define S_PVT_DIV 0
+#define M_PVT_DIV 0xffU
+#define V_PVT_DIV(x) ((x) << S_PVT_DIV)
+#define G_PVT_DIV(x) (((x) >> S_PVT_DIV) & M_PVT_DIV)
+
#define A_DBG_CUST_EFUSE_OUT_EN 0x60ac
+#define A_DBG_PVT_EN2 0x60ac
+
+#define S_PVT_DATA_OUT 1
+#define M_PVT_DATA_OUT 0x3ffU
+#define V_PVT_DATA_OUT(x) ((x) << S_PVT_DATA_OUT)
+#define G_PVT_DATA_OUT(x) (((x) >> S_PVT_DATA_OUT) & M_PVT_DATA_OUT)
+
+#define S_PVT_DATA_VALID 0
+#define V_PVT_DATA_VALID(x) ((x) << S_PVT_DATA_VALID)
+#define F_PVT_DATA_VALID V_PVT_DATA_VALID(1U)
+
#define A_DBG_CUST_EFUSE_SEL1_EN 0x60b0
#define A_DBG_CUST_EFUSE_SEL2_EN 0x60b4
@@ -12638,6 +15807,36 @@
#define V_STATIC_M_PLL_FFSLEWRATE(x) ((x) << S_STATIC_M_PLL_FFSLEWRATE)
#define G_STATIC_M_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_M_PLL_FFSLEWRATE) & M_STATIC_M_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_M0_PLL_CONF1 0x60b8
+
+#define S_STATIC_M0_PLL_RANGE 22
+#define M_STATIC_M0_PLL_RANGE 0x7U
+#define V_STATIC_M0_PLL_RANGE(x) ((x) << S_STATIC_M0_PLL_RANGE)
+#define G_STATIC_M0_PLL_RANGE(x) (((x) >> S_STATIC_M0_PLL_RANGE) & M_STATIC_M0_PLL_RANGE)
+
+#define S_STATIC_M0_PLL_DIVQ 17
+#define M_STATIC_M0_PLL_DIVQ 0x1fU
+#define V_STATIC_M0_PLL_DIVQ(x) ((x) << S_STATIC_M0_PLL_DIVQ)
+#define G_STATIC_M0_PLL_DIVQ(x) (((x) >> S_STATIC_M0_PLL_DIVQ) & M_STATIC_M0_PLL_DIVQ)
+
+#define S_STATIC_M0_PLL_DIVFI 8
+#define M_STATIC_M0_PLL_DIVFI 0x1ffU
+#define V_STATIC_M0_PLL_DIVFI(x) ((x) << S_STATIC_M0_PLL_DIVFI)
+#define G_STATIC_M0_PLL_DIVFI(x) (((x) >> S_STATIC_M0_PLL_DIVFI) & M_STATIC_M0_PLL_DIVFI)
+
+#define S_STATIC_M0_PLL_DIVR 2
+#define M_STATIC_M0_PLL_DIVR 0x3fU
+#define V_STATIC_M0_PLL_DIVR(x) ((x) << S_STATIC_M0_PLL_DIVR)
+#define G_STATIC_M0_PLL_DIVR(x) (((x) >> S_STATIC_M0_PLL_DIVR) & M_STATIC_M0_PLL_DIVR)
+
+#define S_STATIC_M0_PLL_BYPASS 1
+#define V_STATIC_M0_PLL_BYPASS(x) ((x) << S_STATIC_M0_PLL_BYPASS)
+#define F_STATIC_M0_PLL_BYPASS V_STATIC_M0_PLL_BYPASS(1U)
+
+#define S_STATIC_M0_PLL_RESET 0
+#define V_STATIC_M0_PLL_RESET(x) ((x) << S_STATIC_M0_PLL_RESET)
+#define F_STATIC_M0_PLL_RESET V_STATIC_M0_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF2 0x60bc
#define S_T5_STATIC_M_PLL_DCO_BYPASS 23
@@ -12715,6 +15914,50 @@
#define V_STATIC_M_PLL_LOCKTUNE(x) ((x) << S_STATIC_M_PLL_LOCKTUNE)
#define G_STATIC_M_PLL_LOCKTUNE(x) (((x) >> S_STATIC_M_PLL_LOCKTUNE) & M_STATIC_M_PLL_LOCKTUNE)
+#define A_DBG_STATIC_M0_PLL_CONF2 0x60bc
+
+#define S_T7_STATIC_SWMC1RST_ 14
+#define V_T7_STATIC_SWMC1RST_(x) ((x) << S_T7_STATIC_SWMC1RST_)
+#define F_T7_STATIC_SWMC1RST_ V_T7_STATIC_SWMC1RST_(1U)
+
+#define S_T7_STATIC_SWMC1CFGRST_ 13
+#define V_T7_STATIC_SWMC1CFGRST_(x) ((x) << S_T7_STATIC_SWMC1CFGRST_)
+#define F_T7_STATIC_SWMC1CFGRST_ V_T7_STATIC_SWMC1CFGRST_(1U)
+
+#define S_T7_STATIC_PHY0RECRST_ 12
+#define V_T7_STATIC_PHY0RECRST_(x) ((x) << S_T7_STATIC_PHY0RECRST_)
+#define F_T7_STATIC_PHY0RECRST_ V_T7_STATIC_PHY0RECRST_(1U)
+
+#define S_T7_STATIC_PHY1RECRST_ 11
+#define V_T7_STATIC_PHY1RECRST_(x) ((x) << S_T7_STATIC_PHY1RECRST_)
+#define F_T7_STATIC_PHY1RECRST_ V_T7_STATIC_PHY1RECRST_(1U)
+
+#define S_T7_STATIC_SWMC0RST_ 10
+#define V_T7_STATIC_SWMC0RST_(x) ((x) << S_T7_STATIC_SWMC0RST_)
+#define F_T7_STATIC_SWMC0RST_ V_T7_STATIC_SWMC0RST_(1U)
+
+#define S_T7_STATIC_SWMC0CFGRST_ 9
+#define V_T7_STATIC_SWMC0CFGRST_(x) ((x) << S_T7_STATIC_SWMC0CFGRST_)
+#define F_T7_STATIC_SWMC0CFGRST_ V_T7_STATIC_SWMC0CFGRST_(1U)
+
+#define S_STATIC_M0_PLL_SSMF 5
+#define M_STATIC_M0_PLL_SSMF 0xfU
+#define V_STATIC_M0_PLL_SSMF(x) ((x) << S_STATIC_M0_PLL_SSMF)
+#define G_STATIC_M0_PLL_SSMF(x) (((x) >> S_STATIC_M0_PLL_SSMF) & M_STATIC_M0_PLL_SSMF)
+
+#define S_STATIC_M0_PLL_SSMD 2
+#define M_STATIC_M0_PLL_SSMD 0x7U
+#define V_STATIC_M0_PLL_SSMD(x) ((x) << S_STATIC_M0_PLL_SSMD)
+#define G_STATIC_M0_PLL_SSMD(x) (((x) >> S_STATIC_M0_PLL_SSMD) & M_STATIC_M0_PLL_SSMD)
+
+#define S_STATIC_M0_PLL_SSDS 1
+#define V_STATIC_M0_PLL_SSDS(x) ((x) << S_STATIC_M0_PLL_SSDS)
+#define F_STATIC_M0_PLL_SSDS V_STATIC_M0_PLL_SSDS(1U)
+
+#define S_STATIC_M0_PLL_SSE 0
+#define V_STATIC_M0_PLL_SSE(x) ((x) << S_STATIC_M0_PLL_SSE)
+#define F_STATIC_M0_PLL_SSE V_STATIC_M0_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF3 0x60c0
#define S_T5_STATIC_M_PLL_MULTPRE 30
@@ -12778,8 +16021,58 @@
#define V_T6_STATIC_M_PLL_RANGEA(x) ((x) << S_T6_STATIC_M_PLL_RANGEA)
#define G_T6_STATIC_M_PLL_RANGEA(x) (((x) >> S_T6_STATIC_M_PLL_RANGEA) & M_T6_STATIC_M_PLL_RANGEA)
+#define A_DBG_STATIC_MAC_PLL_CONF1 0x60c0
+
+#define S_STATIC_MAC_PLL_RANGE 22
+#define M_STATIC_MAC_PLL_RANGE 0x7U
+#define V_STATIC_MAC_PLL_RANGE(x) ((x) << S_STATIC_MAC_PLL_RANGE)
+#define G_STATIC_MAC_PLL_RANGE(x) (((x) >> S_STATIC_MAC_PLL_RANGE) & M_STATIC_MAC_PLL_RANGE)
+
+#define S_STATIC_MAC_PLL_DIVQ 17
+#define M_STATIC_MAC_PLL_DIVQ 0x1fU
+#define V_STATIC_MAC_PLL_DIVQ(x) ((x) << S_STATIC_MAC_PLL_DIVQ)
+#define G_STATIC_MAC_PLL_DIVQ(x) (((x) >> S_STATIC_MAC_PLL_DIVQ) & M_STATIC_MAC_PLL_DIVQ)
+
+#define S_STATIC_MAC_PLL_DIVFI 8
+#define M_STATIC_MAC_PLL_DIVFI 0x1ffU
+#define V_STATIC_MAC_PLL_DIVFI(x) ((x) << S_STATIC_MAC_PLL_DIVFI)
+#define G_STATIC_MAC_PLL_DIVFI(x) (((x) >> S_STATIC_MAC_PLL_DIVFI) & M_STATIC_MAC_PLL_DIVFI)
+
+#define S_STATIC_MAC_PLL_DIVR 2
+#define M_STATIC_MAC_PLL_DIVR 0x3fU
+#define V_STATIC_MAC_PLL_DIVR(x) ((x) << S_STATIC_MAC_PLL_DIVR)
+#define G_STATIC_MAC_PLL_DIVR(x) (((x) >> S_STATIC_MAC_PLL_DIVR) & M_STATIC_MAC_PLL_DIVR)
+
+#define S_STATIC_MAC_PLL_BYPASS 1
+#define V_STATIC_MAC_PLL_BYPASS(x) ((x) << S_STATIC_MAC_PLL_BYPASS)
+#define F_STATIC_MAC_PLL_BYPASS V_STATIC_MAC_PLL_BYPASS(1U)
+
+#define S_STATIC_MAC_PLL_RESET 0
+#define V_STATIC_MAC_PLL_RESET(x) ((x) << S_STATIC_MAC_PLL_RESET)
+#define F_STATIC_MAC_PLL_RESET V_STATIC_MAC_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF4 0x60c4
#define A_DBG_STATIC_M_PLL_CONF4 0x60c4
+#define A_DBG_STATIC_MAC_PLL_CONF2 0x60c4
+
+#define S_STATIC_MAC_PLL_SSMF 5
+#define M_STATIC_MAC_PLL_SSMF 0xfU
+#define V_STATIC_MAC_PLL_SSMF(x) ((x) << S_STATIC_MAC_PLL_SSMF)
+#define G_STATIC_MAC_PLL_SSMF(x) (((x) >> S_STATIC_MAC_PLL_SSMF) & M_STATIC_MAC_PLL_SSMF)
+
+#define S_STATIC_MAC_PLL_SSMD 2
+#define M_STATIC_MAC_PLL_SSMD 0x7U
+#define V_STATIC_MAC_PLL_SSMD(x) ((x) << S_STATIC_MAC_PLL_SSMD)
+#define G_STATIC_MAC_PLL_SSMD(x) (((x) >> S_STATIC_MAC_PLL_SSMD) & M_STATIC_MAC_PLL_SSMD)
+
+#define S_STATIC_MAC_PLL_SSDS 1
+#define V_STATIC_MAC_PLL_SSDS(x) ((x) << S_STATIC_MAC_PLL_SSDS)
+#define F_STATIC_MAC_PLL_SSDS V_STATIC_MAC_PLL_SSDS(1U)
+
+#define S_STATIC_MAC_PLL_SSE 0
+#define V_STATIC_MAC_PLL_SSE(x) ((x) << S_STATIC_MAC_PLL_SSE)
+#define F_STATIC_MAC_PLL_SSE V_STATIC_MAC_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF5 0x60c8
#define S_T5_STATIC_M_PLL_VCVTUNE 24
@@ -12835,6 +16128,36 @@
#define V_T6_STATIC_M_PLL_MULT(x) ((x) << S_T6_STATIC_M_PLL_MULT)
#define G_T6_STATIC_M_PLL_MULT(x) (((x) >> S_T6_STATIC_M_PLL_MULT) & M_T6_STATIC_M_PLL_MULT)
+#define A_DBG_STATIC_ARM_PLL_CONF1 0x60c8
+
+#define S_STATIC_ARM_PLL_RANGE 22
+#define M_STATIC_ARM_PLL_RANGE 0x7U
+#define V_STATIC_ARM_PLL_RANGE(x) ((x) << S_STATIC_ARM_PLL_RANGE)
+#define G_STATIC_ARM_PLL_RANGE(x) (((x) >> S_STATIC_ARM_PLL_RANGE) & M_STATIC_ARM_PLL_RANGE)
+
+#define S_STATIC_ARM_PLL_DIVQ 17
+#define M_STATIC_ARM_PLL_DIVQ 0x1fU
+#define V_STATIC_ARM_PLL_DIVQ(x) ((x) << S_STATIC_ARM_PLL_DIVQ)
+#define G_STATIC_ARM_PLL_DIVQ(x) (((x) >> S_STATIC_ARM_PLL_DIVQ) & M_STATIC_ARM_PLL_DIVQ)
+
+#define S_STATIC_ARM_PLL_DIVFI 8
+#define M_STATIC_ARM_PLL_DIVFI 0x1ffU
+#define V_STATIC_ARM_PLL_DIVFI(x) ((x) << S_STATIC_ARM_PLL_DIVFI)
+#define G_STATIC_ARM_PLL_DIVFI(x) (((x) >> S_STATIC_ARM_PLL_DIVFI) & M_STATIC_ARM_PLL_DIVFI)
+
+#define S_STATIC_ARM_PLL_DIVR 2
+#define M_STATIC_ARM_PLL_DIVR 0x3fU
+#define V_STATIC_ARM_PLL_DIVR(x) ((x) << S_STATIC_ARM_PLL_DIVR)
+#define G_STATIC_ARM_PLL_DIVR(x) (((x) >> S_STATIC_ARM_PLL_DIVR) & M_STATIC_ARM_PLL_DIVR)
+
+#define S_STATIC_ARM_PLL_BYPASS 1
+#define V_STATIC_ARM_PLL_BYPASS(x) ((x) << S_STATIC_ARM_PLL_BYPASS)
+#define F_STATIC_ARM_PLL_BYPASS V_STATIC_ARM_PLL_BYPASS(1U)
+
+#define S_STATIC_ARM_PLL_RESET 0
+#define V_STATIC_ARM_PLL_RESET(x) ((x) << S_STATIC_ARM_PLL_RESET)
+#define F_STATIC_ARM_PLL_RESET V_STATIC_ARM_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF6 0x60cc
#define S_T5_STATIC_PHY0RECRST_ 5
@@ -12913,6 +16236,26 @@
#define V_STATIC_SWMC1CFGRST_(x) ((x) << S_STATIC_SWMC1CFGRST_)
#define F_STATIC_SWMC1CFGRST_ V_STATIC_SWMC1CFGRST_(1U)
+#define A_DBG_STATIC_ARM_PLL_CONF2 0x60cc
+
+#define S_STATIC_ARM_PLL_SSMF 5
+#define M_STATIC_ARM_PLL_SSMF 0xfU
+#define V_STATIC_ARM_PLL_SSMF(x) ((x) << S_STATIC_ARM_PLL_SSMF)
+#define G_STATIC_ARM_PLL_SSMF(x) (((x) >> S_STATIC_ARM_PLL_SSMF) & M_STATIC_ARM_PLL_SSMF)
+
+#define S_STATIC_ARM_PLL_SSMD 2
+#define M_STATIC_ARM_PLL_SSMD 0x7U
+#define V_STATIC_ARM_PLL_SSMD(x) ((x) << S_STATIC_ARM_PLL_SSMD)
+#define G_STATIC_ARM_PLL_SSMD(x) (((x) >> S_STATIC_ARM_PLL_SSMD) & M_STATIC_ARM_PLL_SSMD)
+
+#define S_STATIC_ARM_PLL_SSDS 1
+#define V_STATIC_ARM_PLL_SSDS(x) ((x) << S_STATIC_ARM_PLL_SSDS)
+#define F_STATIC_ARM_PLL_SSDS V_STATIC_ARM_PLL_SSDS(1U)
+
+#define S_STATIC_ARM_PLL_SSE 0
+#define V_STATIC_ARM_PLL_SSE(x) ((x) << S_STATIC_ARM_PLL_SSE)
+#define F_STATIC_ARM_PLL_SSE V_STATIC_ARM_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF1 0x60d0
#define S_T5_STATIC_C_PLL_MULTFRAC 8
@@ -12937,6 +16280,36 @@
#define V_STATIC_C_PLL_FFSLEWRATE(x) ((x) << S_STATIC_C_PLL_FFSLEWRATE)
#define G_STATIC_C_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_C_PLL_FFSLEWRATE) & M_STATIC_C_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_USB_PLL_CONF1 0x60d0
+
+#define S_STATIC_USB_PLL_RANGE 22
+#define M_STATIC_USB_PLL_RANGE 0x7U
+#define V_STATIC_USB_PLL_RANGE(x) ((x) << S_STATIC_USB_PLL_RANGE)
+#define G_STATIC_USB_PLL_RANGE(x) (((x) >> S_STATIC_USB_PLL_RANGE) & M_STATIC_USB_PLL_RANGE)
+
+#define S_STATIC_USB_PLL_DIVQ 17
+#define M_STATIC_USB_PLL_DIVQ 0x1fU
+#define V_STATIC_USB_PLL_DIVQ(x) ((x) << S_STATIC_USB_PLL_DIVQ)
+#define G_STATIC_USB_PLL_DIVQ(x) (((x) >> S_STATIC_USB_PLL_DIVQ) & M_STATIC_USB_PLL_DIVQ)
+
+#define S_STATIC_USB_PLL_DIVFI 8
+#define M_STATIC_USB_PLL_DIVFI 0x1ffU
+#define V_STATIC_USB_PLL_DIVFI(x) ((x) << S_STATIC_USB_PLL_DIVFI)
+#define G_STATIC_USB_PLL_DIVFI(x) (((x) >> S_STATIC_USB_PLL_DIVFI) & M_STATIC_USB_PLL_DIVFI)
+
+#define S_STATIC_USB_PLL_DIVR 2
+#define M_STATIC_USB_PLL_DIVR 0x3fU
+#define V_STATIC_USB_PLL_DIVR(x) ((x) << S_STATIC_USB_PLL_DIVR)
+#define G_STATIC_USB_PLL_DIVR(x) (((x) >> S_STATIC_USB_PLL_DIVR) & M_STATIC_USB_PLL_DIVR)
+
+#define S_STATIC_USB_PLL_BYPASS 1
+#define V_STATIC_USB_PLL_BYPASS(x) ((x) << S_STATIC_USB_PLL_BYPASS)
+#define F_STATIC_USB_PLL_BYPASS V_STATIC_USB_PLL_BYPASS(1U)
+
+#define S_STATIC_USB_PLL_RESET 0
+#define V_STATIC_USB_PLL_RESET(x) ((x) << S_STATIC_USB_PLL_RESET)
+#define F_STATIC_USB_PLL_RESET V_STATIC_USB_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF2 0x60d4
#define S_T5_STATIC_C_PLL_DCO_BYPASS 23
@@ -13019,6 +16392,26 @@
#define V_STATIC_C_PLL_LOCKTUNE(x) ((x) << S_STATIC_C_PLL_LOCKTUNE)
#define G_STATIC_C_PLL_LOCKTUNE(x) (((x) >> S_STATIC_C_PLL_LOCKTUNE) & M_STATIC_C_PLL_LOCKTUNE)
+#define A_DBG_STATIC_USB_PLL_CONF2 0x60d4
+
+#define S_STATIC_USB_PLL_SSMF 5
+#define M_STATIC_USB_PLL_SSMF 0xfU
+#define V_STATIC_USB_PLL_SSMF(x) ((x) << S_STATIC_USB_PLL_SSMF)
+#define G_STATIC_USB_PLL_SSMF(x) (((x) >> S_STATIC_USB_PLL_SSMF) & M_STATIC_USB_PLL_SSMF)
+
+#define S_STATIC_USB_PLL_SSMD 2
+#define M_STATIC_USB_PLL_SSMD 0x7U
+#define V_STATIC_USB_PLL_SSMD(x) ((x) << S_STATIC_USB_PLL_SSMD)
+#define G_STATIC_USB_PLL_SSMD(x) (((x) >> S_STATIC_USB_PLL_SSMD) & M_STATIC_USB_PLL_SSMD)
+
+#define S_STATIC_USB_PLL_SSDS 1
+#define V_STATIC_USB_PLL_SSDS(x) ((x) << S_STATIC_USB_PLL_SSDS)
+#define F_STATIC_USB_PLL_SSDS V_STATIC_USB_PLL_SSDS(1U)
+
+#define S_STATIC_USB_PLL_SSE 0
+#define V_STATIC_USB_PLL_SSE(x) ((x) << S_STATIC_USB_PLL_SSE)
+#define F_STATIC_USB_PLL_SSE V_STATIC_USB_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF3 0x60d8
#define S_T5_STATIC_C_PLL_MULTPRE 30
@@ -13082,8 +16475,58 @@
#define V_T6_STATIC_C_PLL_RANGEA(x) ((x) << S_T6_STATIC_C_PLL_RANGEA)
#define G_T6_STATIC_C_PLL_RANGEA(x) (((x) >> S_T6_STATIC_C_PLL_RANGEA) & M_T6_STATIC_C_PLL_RANGEA)
+#define A_DBG_STATIC_XGPHY_PLL_CONF1 0x60d8
+
+#define S_STATIC_XGPHY_PLL_RANGE 22
+#define M_STATIC_XGPHY_PLL_RANGE 0x7U
+#define V_STATIC_XGPHY_PLL_RANGE(x) ((x) << S_STATIC_XGPHY_PLL_RANGE)
+#define G_STATIC_XGPHY_PLL_RANGE(x) (((x) >> S_STATIC_XGPHY_PLL_RANGE) & M_STATIC_XGPHY_PLL_RANGE)
+
+#define S_STATIC_XGPHY_PLL_DIVQ 17
+#define M_STATIC_XGPHY_PLL_DIVQ 0x1fU
+#define V_STATIC_XGPHY_PLL_DIVQ(x) ((x) << S_STATIC_XGPHY_PLL_DIVQ)
+#define G_STATIC_XGPHY_PLL_DIVQ(x) (((x) >> S_STATIC_XGPHY_PLL_DIVQ) & M_STATIC_XGPHY_PLL_DIVQ)
+
+#define S_STATIC_XGPHY_PLL_DIVFI 8
+#define M_STATIC_XGPHY_PLL_DIVFI 0x1ffU
+#define V_STATIC_XGPHY_PLL_DIVFI(x) ((x) << S_STATIC_XGPHY_PLL_DIVFI)
+#define G_STATIC_XGPHY_PLL_DIVFI(x) (((x) >> S_STATIC_XGPHY_PLL_DIVFI) & M_STATIC_XGPHY_PLL_DIVFI)
+
+#define S_STATIC_XGPHY_PLL_DIVR 2
+#define M_STATIC_XGPHY_PLL_DIVR 0x3fU
+#define V_STATIC_XGPHY_PLL_DIVR(x) ((x) << S_STATIC_XGPHY_PLL_DIVR)
+#define G_STATIC_XGPHY_PLL_DIVR(x) (((x) >> S_STATIC_XGPHY_PLL_DIVR) & M_STATIC_XGPHY_PLL_DIVR)
+
+#define S_STATIC_XGPHY_PLL_BYPASS 1
+#define V_STATIC_XGPHY_PLL_BYPASS(x) ((x) << S_STATIC_XGPHY_PLL_BYPASS)
+#define F_STATIC_XGPHY_PLL_BYPASS V_STATIC_XGPHY_PLL_BYPASS(1U)
+
+#define S_STATIC_XGPHY_PLL_RESET 0
+#define V_STATIC_XGPHY_PLL_RESET(x) ((x) << S_STATIC_XGPHY_PLL_RESET)
+#define F_STATIC_XGPHY_PLL_RESET V_STATIC_XGPHY_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF4 0x60dc
#define A_DBG_STATIC_C_PLL_CONF4 0x60dc
+#define A_DBG_STATIC_XGPHY_PLL_CONF2 0x60dc
+
+#define S_STATIC_XGPHY_PLL_SSMF 5
+#define M_STATIC_XGPHY_PLL_SSMF 0xfU
+#define V_STATIC_XGPHY_PLL_SSMF(x) ((x) << S_STATIC_XGPHY_PLL_SSMF)
+#define G_STATIC_XGPHY_PLL_SSMF(x) (((x) >> S_STATIC_XGPHY_PLL_SSMF) & M_STATIC_XGPHY_PLL_SSMF)
+
+#define S_STATIC_XGPHY_PLL_SSMD 2
+#define M_STATIC_XGPHY_PLL_SSMD 0x7U
+#define V_STATIC_XGPHY_PLL_SSMD(x) ((x) << S_STATIC_XGPHY_PLL_SSMD)
+#define G_STATIC_XGPHY_PLL_SSMD(x) (((x) >> S_STATIC_XGPHY_PLL_SSMD) & M_STATIC_XGPHY_PLL_SSMD)
+
+#define S_STATIC_XGPHY_PLL_SSDS 1
+#define V_STATIC_XGPHY_PLL_SSDS(x) ((x) << S_STATIC_XGPHY_PLL_SSDS)
+#define F_STATIC_XGPHY_PLL_SSDS V_STATIC_XGPHY_PLL_SSDS(1U)
+
+#define S_STATIC_XGPHY_PLL_SSE 0
+#define V_STATIC_XGPHY_PLL_SSE(x) ((x) << S_STATIC_XGPHY_PLL_SSE)
+#define F_STATIC_XGPHY_PLL_SSE V_STATIC_XGPHY_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF5 0x60e0
#define S_T5_STATIC_C_PLL_VCVTUNE 22
@@ -13140,6 +16583,40 @@
#define V_T6_STATIC_C_PLL_MULT(x) ((x) << S_T6_STATIC_C_PLL_MULT)
#define G_T6_STATIC_C_PLL_MULT(x) (((x) >> S_T6_STATIC_C_PLL_MULT) & M_T6_STATIC_C_PLL_MULT)
+#define A_DBG_STATIC_XGPBUS_PLL_CONF1 0x60e0
+
+#define S_STATIC_XGPBUS_SWRST_ 25
+#define V_STATIC_XGPBUS_SWRST_(x) ((x) << S_STATIC_XGPBUS_SWRST_)
+#define F_STATIC_XGPBUS_SWRST_ V_STATIC_XGPBUS_SWRST_(1U)
+
+#define S_STATIC_XGPBUS_PLL_RANGE 22
+#define M_STATIC_XGPBUS_PLL_RANGE 0x7U
+#define V_STATIC_XGPBUS_PLL_RANGE(x) ((x) << S_STATIC_XGPBUS_PLL_RANGE)
+#define G_STATIC_XGPBUS_PLL_RANGE(x) (((x) >> S_STATIC_XGPBUS_PLL_RANGE) & M_STATIC_XGPBUS_PLL_RANGE)
+
+#define S_STATIC_XGPBUS_PLL_DIVQ 17
+#define M_STATIC_XGPBUS_PLL_DIVQ 0x1fU
+#define V_STATIC_XGPBUS_PLL_DIVQ(x) ((x) << S_STATIC_XGPBUS_PLL_DIVQ)
+#define G_STATIC_XGPBUS_PLL_DIVQ(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVQ) & M_STATIC_XGPBUS_PLL_DIVQ)
+
+#define S_STATIC_XGPBUS_PLL_DIVFI 8
+#define M_STATIC_XGPBUS_PLL_DIVFI 0x1ffU
+#define V_STATIC_XGPBUS_PLL_DIVFI(x) ((x) << S_STATIC_XGPBUS_PLL_DIVFI)
+#define G_STATIC_XGPBUS_PLL_DIVFI(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVFI) & M_STATIC_XGPBUS_PLL_DIVFI)
+
+#define S_STATIC_XGPBUS_PLL_DIVR 2
+#define M_STATIC_XGPBUS_PLL_DIVR 0x3fU
+#define V_STATIC_XGPBUS_PLL_DIVR(x) ((x) << S_STATIC_XGPBUS_PLL_DIVR)
+#define G_STATIC_XGPBUS_PLL_DIVR(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVR) & M_STATIC_XGPBUS_PLL_DIVR)
+
+#define S_STATIC_XGPBUS_PLL_BYPASS 1
+#define V_STATIC_XGPBUS_PLL_BYPASS(x) ((x) << S_STATIC_XGPBUS_PLL_BYPASS)
+#define F_STATIC_XGPBUS_PLL_BYPASS V_STATIC_XGPBUS_PLL_BYPASS(1U)
+
+#define S_STATIC_XGPBUS_PLL_RESET 0
+#define V_STATIC_XGPBUS_PLL_RESET(x) ((x) << S_STATIC_XGPBUS_PLL_RESET)
+#define F_STATIC_XGPBUS_PLL_RESET V_STATIC_XGPBUS_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF1 0x60e4
#define S_T5_STATIC_U_PLL_MULTFRAC 8
@@ -13164,6 +16641,26 @@
#define V_STATIC_U_PLL_FFSLEWRATE(x) ((x) << S_STATIC_U_PLL_FFSLEWRATE)
#define G_STATIC_U_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_U_PLL_FFSLEWRATE) & M_STATIC_U_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_XGPBUS_PLL_CONF2 0x60e4
+
+#define S_STATIC_XGPBUS_PLL_SSMF 5
+#define M_STATIC_XGPBUS_PLL_SSMF 0xfU
+#define V_STATIC_XGPBUS_PLL_SSMF(x) ((x) << S_STATIC_XGPBUS_PLL_SSMF)
+#define G_STATIC_XGPBUS_PLL_SSMF(x) (((x) >> S_STATIC_XGPBUS_PLL_SSMF) & M_STATIC_XGPBUS_PLL_SSMF)
+
+#define S_STATIC_XGPBUS_PLL_SSMD 2
+#define M_STATIC_XGPBUS_PLL_SSMD 0x7U
+#define V_STATIC_XGPBUS_PLL_SSMD(x) ((x) << S_STATIC_XGPBUS_PLL_SSMD)
+#define G_STATIC_XGPBUS_PLL_SSMD(x) (((x) >> S_STATIC_XGPBUS_PLL_SSMD) & M_STATIC_XGPBUS_PLL_SSMD)
+
+#define S_STATIC_XGPBUS_PLL_SSDS 1
+#define V_STATIC_XGPBUS_PLL_SSDS(x) ((x) << S_STATIC_XGPBUS_PLL_SSDS)
+#define F_STATIC_XGPBUS_PLL_SSDS V_STATIC_XGPBUS_PLL_SSDS(1U)
+
+#define S_STATIC_XGPBUS_PLL_SSE 0
+#define V_STATIC_XGPBUS_PLL_SSE(x) ((x) << S_STATIC_XGPBUS_PLL_SSE)
+#define F_STATIC_XGPBUS_PLL_SSE V_STATIC_XGPBUS_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF2 0x60e8
#define S_T5_STATIC_U_PLL_DCO_BYPASS 23
@@ -13246,6 +16743,36 @@
#define V_STATIC_U_PLL_LOCKTUNE(x) ((x) << S_STATIC_U_PLL_LOCKTUNE)
#define G_STATIC_U_PLL_LOCKTUNE(x) (((x) >> S_STATIC_U_PLL_LOCKTUNE) & M_STATIC_U_PLL_LOCKTUNE)
+#define A_DBG_STATIC_M1_PLL_CONF1 0x60e8
+
+#define S_STATIC_M1_PLL_RANGE 22
+#define M_STATIC_M1_PLL_RANGE 0x7U
+#define V_STATIC_M1_PLL_RANGE(x) ((x) << S_STATIC_M1_PLL_RANGE)
+#define G_STATIC_M1_PLL_RANGE(x) (((x) >> S_STATIC_M1_PLL_RANGE) & M_STATIC_M1_PLL_RANGE)
+
+#define S_STATIC_M1_PLL_DIVQ 17
+#define M_STATIC_M1_PLL_DIVQ 0x1fU
+#define V_STATIC_M1_PLL_DIVQ(x) ((x) << S_STATIC_M1_PLL_DIVQ)
+#define G_STATIC_M1_PLL_DIVQ(x) (((x) >> S_STATIC_M1_PLL_DIVQ) & M_STATIC_M1_PLL_DIVQ)
+
+#define S_STATIC_M1_PLL_DIVFI 8
+#define M_STATIC_M1_PLL_DIVFI 0x1ffU
+#define V_STATIC_M1_PLL_DIVFI(x) ((x) << S_STATIC_M1_PLL_DIVFI)
+#define G_STATIC_M1_PLL_DIVFI(x) (((x) >> S_STATIC_M1_PLL_DIVFI) & M_STATIC_M1_PLL_DIVFI)
+
+#define S_STATIC_M1_PLL_DIVR 2
+#define M_STATIC_M1_PLL_DIVR 0x3fU
+#define V_STATIC_M1_PLL_DIVR(x) ((x) << S_STATIC_M1_PLL_DIVR)
+#define G_STATIC_M1_PLL_DIVR(x) (((x) >> S_STATIC_M1_PLL_DIVR) & M_STATIC_M1_PLL_DIVR)
+
+#define S_STATIC_M1_PLL_BYPASS 1
+#define V_STATIC_M1_PLL_BYPASS(x) ((x) << S_STATIC_M1_PLL_BYPASS)
+#define F_STATIC_M1_PLL_BYPASS V_STATIC_M1_PLL_BYPASS(1U)
+
+#define S_STATIC_M1_PLL_RESET 0
+#define V_STATIC_M1_PLL_RESET(x) ((x) << S_STATIC_M1_PLL_RESET)
+#define F_STATIC_M1_PLL_RESET V_STATIC_M1_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF3 0x60ec
#define S_T5_STATIC_U_PLL_MULTPRE 30
@@ -13309,6 +16836,26 @@
#define V_T6_STATIC_U_PLL_RANGEA(x) ((x) << S_T6_STATIC_U_PLL_RANGEA)
#define G_T6_STATIC_U_PLL_RANGEA(x) (((x) >> S_T6_STATIC_U_PLL_RANGEA) & M_T6_STATIC_U_PLL_RANGEA)
+#define A_DBG_STATIC_M1_PLL_CONF2 0x60ec
+
+#define S_STATIC_M1_PLL_SSMF 5
+#define M_STATIC_M1_PLL_SSMF 0xfU
+#define V_STATIC_M1_PLL_SSMF(x) ((x) << S_STATIC_M1_PLL_SSMF)
+#define G_STATIC_M1_PLL_SSMF(x) (((x) >> S_STATIC_M1_PLL_SSMF) & M_STATIC_M1_PLL_SSMF)
+
+#define S_STATIC_M1_PLL_SSMD 2
+#define M_STATIC_M1_PLL_SSMD 0x7U
+#define V_STATIC_M1_PLL_SSMD(x) ((x) << S_STATIC_M1_PLL_SSMD)
+#define G_STATIC_M1_PLL_SSMD(x) (((x) >> S_STATIC_M1_PLL_SSMD) & M_STATIC_M1_PLL_SSMD)
+
+#define S_STATIC_M1_PLL_SSDS 1
+#define V_STATIC_M1_PLL_SSDS(x) ((x) << S_STATIC_M1_PLL_SSDS)
+#define F_STATIC_M1_PLL_SSDS V_STATIC_M1_PLL_SSDS(1U)
+
+#define S_STATIC_M1_PLL_SSE 0
+#define V_STATIC_M1_PLL_SSE(x) ((x) << S_STATIC_M1_PLL_SSE)
+#define F_STATIC_M1_PLL_SSE V_STATIC_M1_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF4 0x60f0
#define A_DBG_STATIC_U_PLL_CONF4 0x60f0
#define A_DBG_T5_STATIC_U_PLL_CONF5 0x60f4
@@ -13557,6 +17104,104 @@
#define V_GPIO19_OUT_VAL(x) ((x) << S_GPIO19_OUT_VAL)
#define F_GPIO19_OUT_VAL V_GPIO19_OUT_VAL(1U)
+#define A_DBG_GPIO_OEN 0x6100
+
+#define S_GPIO23_OEN 23
+#define V_GPIO23_OEN(x) ((x) << S_GPIO23_OEN)
+#define F_GPIO23_OEN V_GPIO23_OEN(1U)
+
+#define S_GPIO22_OEN 22
+#define V_GPIO22_OEN(x) ((x) << S_GPIO22_OEN)
+#define F_GPIO22_OEN V_GPIO22_OEN(1U)
+
+#define S_GPIO21_OEN 21
+#define V_GPIO21_OEN(x) ((x) << S_GPIO21_OEN)
+#define F_GPIO21_OEN V_GPIO21_OEN(1U)
+
+#define S_GPIO20_OEN 20
+#define V_GPIO20_OEN(x) ((x) << S_GPIO20_OEN)
+#define F_GPIO20_OEN V_GPIO20_OEN(1U)
+
+#define S_T7_GPIO19_OEN 19
+#define V_T7_GPIO19_OEN(x) ((x) << S_T7_GPIO19_OEN)
+#define F_T7_GPIO19_OEN V_T7_GPIO19_OEN(1U)
+
+#define S_T7_GPIO18_OEN 18
+#define V_T7_GPIO18_OEN(x) ((x) << S_T7_GPIO18_OEN)
+#define F_T7_GPIO18_OEN V_T7_GPIO18_OEN(1U)
+
+#define S_T7_GPIO17_OEN 17
+#define V_T7_GPIO17_OEN(x) ((x) << S_T7_GPIO17_OEN)
+#define F_T7_GPIO17_OEN V_T7_GPIO17_OEN(1U)
+
+#define S_T7_GPIO16_OEN 16
+#define V_T7_GPIO16_OEN(x) ((x) << S_T7_GPIO16_OEN)
+#define F_T7_GPIO16_OEN V_T7_GPIO16_OEN(1U)
+
+#define S_T7_GPIO15_OEN 15
+#define V_T7_GPIO15_OEN(x) ((x) << S_T7_GPIO15_OEN)
+#define F_T7_GPIO15_OEN V_T7_GPIO15_OEN(1U)
+
+#define S_T7_GPIO14_OEN 14
+#define V_T7_GPIO14_OEN(x) ((x) << S_T7_GPIO14_OEN)
+#define F_T7_GPIO14_OEN V_T7_GPIO14_OEN(1U)
+
+#define S_T7_GPIO13_OEN 13
+#define V_T7_GPIO13_OEN(x) ((x) << S_T7_GPIO13_OEN)
+#define F_T7_GPIO13_OEN V_T7_GPIO13_OEN(1U)
+
+#define S_T7_GPIO12_OEN 12
+#define V_T7_GPIO12_OEN(x) ((x) << S_T7_GPIO12_OEN)
+#define F_T7_GPIO12_OEN V_T7_GPIO12_OEN(1U)
+
+#define S_T7_GPIO11_OEN 11
+#define V_T7_GPIO11_OEN(x) ((x) << S_T7_GPIO11_OEN)
+#define F_T7_GPIO11_OEN V_T7_GPIO11_OEN(1U)
+
+#define S_T7_GPIO10_OEN 10
+#define V_T7_GPIO10_OEN(x) ((x) << S_T7_GPIO10_OEN)
+#define F_T7_GPIO10_OEN V_T7_GPIO10_OEN(1U)
+
+#define S_T7_GPIO9_OEN 9
+#define V_T7_GPIO9_OEN(x) ((x) << S_T7_GPIO9_OEN)
+#define F_T7_GPIO9_OEN V_T7_GPIO9_OEN(1U)
+
+#define S_T7_GPIO8_OEN 8
+#define V_T7_GPIO8_OEN(x) ((x) << S_T7_GPIO8_OEN)
+#define F_T7_GPIO8_OEN V_T7_GPIO8_OEN(1U)
+
+#define S_T7_GPIO7_OEN 7
+#define V_T7_GPIO7_OEN(x) ((x) << S_T7_GPIO7_OEN)
+#define F_T7_GPIO7_OEN V_T7_GPIO7_OEN(1U)
+
+#define S_T7_GPIO6_OEN 6
+#define V_T7_GPIO6_OEN(x) ((x) << S_T7_GPIO6_OEN)
+#define F_T7_GPIO6_OEN V_T7_GPIO6_OEN(1U)
+
+#define S_T7_GPIO5_OEN 5
+#define V_T7_GPIO5_OEN(x) ((x) << S_T7_GPIO5_OEN)
+#define F_T7_GPIO5_OEN V_T7_GPIO5_OEN(1U)
+
+#define S_T7_GPIO4_OEN 4
+#define V_T7_GPIO4_OEN(x) ((x) << S_T7_GPIO4_OEN)
+#define F_T7_GPIO4_OEN V_T7_GPIO4_OEN(1U)
+
+#define S_T7_GPIO3_OEN 3
+#define V_T7_GPIO3_OEN(x) ((x) << S_T7_GPIO3_OEN)
+#define F_T7_GPIO3_OEN V_T7_GPIO3_OEN(1U)
+
+#define S_T7_GPIO2_OEN 2
+#define V_T7_GPIO2_OEN(x) ((x) << S_T7_GPIO2_OEN)
+#define F_T7_GPIO2_OEN V_T7_GPIO2_OEN(1U)
+
+#define S_T7_GPIO1_OEN 1
+#define V_T7_GPIO1_OEN(x) ((x) << S_T7_GPIO1_OEN)
+#define F_T7_GPIO1_OEN V_T7_GPIO1_OEN(1U)
+
+#define S_T7_GPIO0_OEN 0
+#define V_T7_GPIO0_OEN(x) ((x) << S_T7_GPIO0_OEN)
+#define F_T7_GPIO0_OEN V_T7_GPIO0_OEN(1U)
+
#define A_DBG_PVT_REG_UPDATE_CTL 0x6104
#define S_FAST_UPDATE 8
@@ -13605,6 +17250,104 @@
#define V_GPIO16_IN(x) ((x) << S_GPIO16_IN)
#define F_GPIO16_IN V_GPIO16_IN(1U)
+#define A_DBG_GPIO_CHG_DET 0x6104
+
+#define S_GPIO23_CHG_DET 23
+#define V_GPIO23_CHG_DET(x) ((x) << S_GPIO23_CHG_DET)
+#define F_GPIO23_CHG_DET V_GPIO23_CHG_DET(1U)
+
+#define S_GPIO22_CHG_DET 22
+#define V_GPIO22_CHG_DET(x) ((x) << S_GPIO22_CHG_DET)
+#define F_GPIO22_CHG_DET V_GPIO22_CHG_DET(1U)
+
+#define S_GPIO21_CHG_DET 21
+#define V_GPIO21_CHG_DET(x) ((x) << S_GPIO21_CHG_DET)
+#define F_GPIO21_CHG_DET V_GPIO21_CHG_DET(1U)
+
+#define S_GPIO20_CHG_DET 20
+#define V_GPIO20_CHG_DET(x) ((x) << S_GPIO20_CHG_DET)
+#define F_GPIO20_CHG_DET V_GPIO20_CHG_DET(1U)
+
+#define S_T7_GPIO19_CHG_DET 19
+#define V_T7_GPIO19_CHG_DET(x) ((x) << S_T7_GPIO19_CHG_DET)
+#define F_T7_GPIO19_CHG_DET V_T7_GPIO19_CHG_DET(1U)
+
+#define S_T7_GPIO18_CHG_DET 18
+#define V_T7_GPIO18_CHG_DET(x) ((x) << S_T7_GPIO18_CHG_DET)
+#define F_T7_GPIO18_CHG_DET V_T7_GPIO18_CHG_DET(1U)
+
+#define S_T7_GPIO17_CHG_DET 17
+#define V_T7_GPIO17_CHG_DET(x) ((x) << S_T7_GPIO17_CHG_DET)
+#define F_T7_GPIO17_CHG_DET V_T7_GPIO17_CHG_DET(1U)
+
+#define S_T7_GPIO16_CHG_DET 16
+#define V_T7_GPIO16_CHG_DET(x) ((x) << S_T7_GPIO16_CHG_DET)
+#define F_T7_GPIO16_CHG_DET V_T7_GPIO16_CHG_DET(1U)
+
+#define S_T7_GPIO15_CHG_DET 15
+#define V_T7_GPIO15_CHG_DET(x) ((x) << S_T7_GPIO15_CHG_DET)
+#define F_T7_GPIO15_CHG_DET V_T7_GPIO15_CHG_DET(1U)
+
+#define S_T7_GPIO14_CHG_DET 14
+#define V_T7_GPIO14_CHG_DET(x) ((x) << S_T7_GPIO14_CHG_DET)
+#define F_T7_GPIO14_CHG_DET V_T7_GPIO14_CHG_DET(1U)
+
+#define S_T7_GPIO13_CHG_DET 13
+#define V_T7_GPIO13_CHG_DET(x) ((x) << S_T7_GPIO13_CHG_DET)
+#define F_T7_GPIO13_CHG_DET V_T7_GPIO13_CHG_DET(1U)
+
+#define S_T7_GPIO12_CHG_DET 12
+#define V_T7_GPIO12_CHG_DET(x) ((x) << S_T7_GPIO12_CHG_DET)
+#define F_T7_GPIO12_CHG_DET V_T7_GPIO12_CHG_DET(1U)
+
+#define S_T7_GPIO11_CHG_DET 11
+#define V_T7_GPIO11_CHG_DET(x) ((x) << S_T7_GPIO11_CHG_DET)
+#define F_T7_GPIO11_CHG_DET V_T7_GPIO11_CHG_DET(1U)
+
+#define S_T7_GPIO10_CHG_DET 10
+#define V_T7_GPIO10_CHG_DET(x) ((x) << S_T7_GPIO10_CHG_DET)
+#define F_T7_GPIO10_CHG_DET V_T7_GPIO10_CHG_DET(1U)
+
+#define S_T7_GPIO9_CHG_DET 9
+#define V_T7_GPIO9_CHG_DET(x) ((x) << S_T7_GPIO9_CHG_DET)
+#define F_T7_GPIO9_CHG_DET V_T7_GPIO9_CHG_DET(1U)
+
+#define S_T7_GPIO8_CHG_DET 8
+#define V_T7_GPIO8_CHG_DET(x) ((x) << S_T7_GPIO8_CHG_DET)
+#define F_T7_GPIO8_CHG_DET V_T7_GPIO8_CHG_DET(1U)
+
+#define S_T7_GPIO7_CHG_DET 7
+#define V_T7_GPIO7_CHG_DET(x) ((x) << S_T7_GPIO7_CHG_DET)
+#define F_T7_GPIO7_CHG_DET V_T7_GPIO7_CHG_DET(1U)
+
+#define S_T7_GPIO6_CHG_DET 6
+#define V_T7_GPIO6_CHG_DET(x) ((x) << S_T7_GPIO6_CHG_DET)
+#define F_T7_GPIO6_CHG_DET V_T7_GPIO6_CHG_DET(1U)
+
+#define S_T7_GPIO5_CHG_DET 5
+#define V_T7_GPIO5_CHG_DET(x) ((x) << S_T7_GPIO5_CHG_DET)
+#define F_T7_GPIO5_CHG_DET V_T7_GPIO5_CHG_DET(1U)
+
+#define S_T7_GPIO4_CHG_DET 4
+#define V_T7_GPIO4_CHG_DET(x) ((x) << S_T7_GPIO4_CHG_DET)
+#define F_T7_GPIO4_CHG_DET V_T7_GPIO4_CHG_DET(1U)
+
+#define S_T7_GPIO3_CHG_DET 3
+#define V_T7_GPIO3_CHG_DET(x) ((x) << S_T7_GPIO3_CHG_DET)
+#define F_T7_GPIO3_CHG_DET V_T7_GPIO3_CHG_DET(1U)
+
+#define S_T7_GPIO2_CHG_DET 2
+#define V_T7_GPIO2_CHG_DET(x) ((x) << S_T7_GPIO2_CHG_DET)
+#define F_T7_GPIO2_CHG_DET V_T7_GPIO2_CHG_DET(1U)
+
+#define S_T7_GPIO1_CHG_DET 1
+#define V_T7_GPIO1_CHG_DET(x) ((x) << S_T7_GPIO1_CHG_DET)
+#define F_T7_GPIO1_CHG_DET V_T7_GPIO1_CHG_DET(1U)
+
+#define S_T7_GPIO0_CHG_DET 0
+#define V_T7_GPIO0_CHG_DET(x) ((x) << S_T7_GPIO0_CHG_DET)
+#define F_T7_GPIO0_CHG_DET V_T7_GPIO0_CHG_DET(1U)
+
#define A_DBG_PVT_REG_LAST_MEASUREMENT 0x6108
#define S_LAST_MEASUREMENT_SELECT 8
@@ -13964,6 +17707,22 @@
#define V_GPIO0_PE_EN(x) ((x) << S_GPIO0_PE_EN)
#define F_GPIO0_PE_EN V_GPIO0_PE_EN(1U)
+#define S_GPIO23_PE_EN 23
+#define V_GPIO23_PE_EN(x) ((x) << S_GPIO23_PE_EN)
+#define F_GPIO23_PE_EN V_GPIO23_PE_EN(1U)
+
+#define S_GPIO22_PE_EN 22
+#define V_GPIO22_PE_EN(x) ((x) << S_GPIO22_PE_EN)
+#define F_GPIO22_PE_EN V_GPIO22_PE_EN(1U)
+
+#define S_GPIO21_PE_EN 21
+#define V_GPIO21_PE_EN(x) ((x) << S_GPIO21_PE_EN)
+#define F_GPIO21_PE_EN V_GPIO21_PE_EN(1U)
+
+#define S_GPIO20_PE_EN 20
+#define V_GPIO20_PE_EN(x) ((x) << S_GPIO20_PE_EN)
+#define F_GPIO20_PE_EN V_GPIO20_PE_EN(1U)
+
#define A_DBG_PVT_REG_THRESHOLD 0x611c
#define S_PVT_CALIBRATION_DONE 8
@@ -14084,6 +17843,22 @@
#define V_GPIO0_PS_EN(x) ((x) << S_GPIO0_PS_EN)
#define F_GPIO0_PS_EN V_GPIO0_PS_EN(1U)
+#define S_GPIO23_PS_EN 23
+#define V_GPIO23_PS_EN(x) ((x) << S_GPIO23_PS_EN)
+#define F_GPIO23_PS_EN V_GPIO23_PS_EN(1U)
+
+#define S_GPIO22_PS_EN 22
+#define V_GPIO22_PS_EN(x) ((x) << S_GPIO22_PS_EN)
+#define F_GPIO22_PS_EN V_GPIO22_PS_EN(1U)
+
+#define S_GPIO21_PS_EN 21
+#define V_GPIO21_PS_EN(x) ((x) << S_GPIO21_PS_EN)
+#define F_GPIO21_PS_EN V_GPIO21_PS_EN(1U)
+
+#define S_GPIO20_PS_EN 20
+#define V_GPIO20_PS_EN(x) ((x) << S_GPIO20_PS_EN)
+#define F_GPIO20_PS_EN V_GPIO20_PS_EN(1U)
+
#define A_DBG_PVT_REG_IN_TERMP 0x6120
#define S_REG_IN_TERMP_B 4
@@ -14254,6 +18029,17 @@
#define V_STATIC_U_PLL_VREGTUNE(x) ((x) << S_STATIC_U_PLL_VREGTUNE)
#define G_STATIC_U_PLL_VREGTUNE(x) (((x) >> S_STATIC_U_PLL_VREGTUNE) & M_STATIC_U_PLL_VREGTUNE)
+#define A_DBG_STATIC_PLL_LOCK_WAIT_CONF 0x6150
+
+#define S_STATIC_WAIT_LOCK 24
+#define V_STATIC_WAIT_LOCK(x) ((x) << S_STATIC_WAIT_LOCK)
+#define F_STATIC_WAIT_LOCK V_STATIC_WAIT_LOCK(1U)
+
+#define S_STATIC_LOCK_WAIT_TIME 0
+#define M_STATIC_LOCK_WAIT_TIME 0xffffffU
+#define V_STATIC_LOCK_WAIT_TIME(x) ((x) << S_STATIC_LOCK_WAIT_TIME)
+#define G_STATIC_LOCK_WAIT_TIME(x) (((x) >> S_STATIC_LOCK_WAIT_TIME) & M_STATIC_LOCK_WAIT_TIME)
+
#define A_DBG_STATIC_C_PLL_CONF6 0x6154
#define S_STATIC_C_PLL_VREGTUNE 0
@@ -14303,13 +18089,274 @@
#define A_DBG_CUST_EFUSE_BYTE24_27 0x6178
#define A_DBG_CUST_EFUSE_BYTE28_31 0x617c
#define A_DBG_CUST_EFUSE_BYTE32_35 0x6180
+#define A_DBG_GPIO_INT_ENABLE 0x6180
+
+#define S_GPIO23 23
+#define V_GPIO23(x) ((x) << S_GPIO23)
+#define F_GPIO23 V_GPIO23(1U)
+
+#define S_GPIO22 22
+#define V_GPIO22(x) ((x) << S_GPIO22)
+#define F_GPIO22 V_GPIO22(1U)
+
+#define S_GPIO21 21
+#define V_GPIO21(x) ((x) << S_GPIO21)
+#define F_GPIO21 V_GPIO21(1U)
+
+#define S_GPIO20 20
+#define V_GPIO20(x) ((x) << S_GPIO20)
+#define F_GPIO20 V_GPIO20(1U)
+
+#define S_T7_GPIO19 19
+#define V_T7_GPIO19(x) ((x) << S_T7_GPIO19)
+#define F_T7_GPIO19 V_T7_GPIO19(1U)
+
+#define S_T7_GPIO18 18
+#define V_T7_GPIO18(x) ((x) << S_T7_GPIO18)
+#define F_T7_GPIO18 V_T7_GPIO18(1U)
+
+#define S_T7_GPIO17 17
+#define V_T7_GPIO17(x) ((x) << S_T7_GPIO17)
+#define F_T7_GPIO17 V_T7_GPIO17(1U)
+
+#define S_T7_GPIO16 16
+#define V_T7_GPIO16(x) ((x) << S_T7_GPIO16)
+#define F_T7_GPIO16 V_T7_GPIO16(1U)
+
#define A_DBG_CUST_EFUSE_BYTE36_39 0x6184
+#define A_DBG_GPIO_INT_CAUSE 0x6184
#define A_DBG_CUST_EFUSE_BYTE40_43 0x6188
+#define A_T7_DBG_GPIO_ACT_LOW 0x6188
+
+#define S_GPIO23_ACT_LOW 23
+#define V_GPIO23_ACT_LOW(x) ((x) << S_GPIO23_ACT_LOW)
+#define F_GPIO23_ACT_LOW V_GPIO23_ACT_LOW(1U)
+
+#define S_GPIO22_ACT_LOW 22
+#define V_GPIO22_ACT_LOW(x) ((x) << S_GPIO22_ACT_LOW)
+#define F_GPIO22_ACT_LOW V_GPIO22_ACT_LOW(1U)
+
+#define S_GPIO21_ACT_LOW 21
+#define V_GPIO21_ACT_LOW(x) ((x) << S_GPIO21_ACT_LOW)
+#define F_GPIO21_ACT_LOW V_GPIO21_ACT_LOW(1U)
+
+#define S_GPIO20_ACT_LOW 20
+#define V_GPIO20_ACT_LOW(x) ((x) << S_GPIO20_ACT_LOW)
+#define F_GPIO20_ACT_LOW V_GPIO20_ACT_LOW(1U)
+
+#define S_T7_GPIO19_ACT_LOW 19
+#define V_T7_GPIO19_ACT_LOW(x) ((x) << S_T7_GPIO19_ACT_LOW)
+#define F_T7_GPIO19_ACT_LOW V_T7_GPIO19_ACT_LOW(1U)
+
+#define S_T7_GPIO18_ACT_LOW 18
+#define V_T7_GPIO18_ACT_LOW(x) ((x) << S_T7_GPIO18_ACT_LOW)
+#define F_T7_GPIO18_ACT_LOW V_T7_GPIO18_ACT_LOW(1U)
+
+#define S_T7_GPIO17_ACT_LOW 17
+#define V_T7_GPIO17_ACT_LOW(x) ((x) << S_T7_GPIO17_ACT_LOW)
+#define F_T7_GPIO17_ACT_LOW V_T7_GPIO17_ACT_LOW(1U)
+
+#define S_T7_GPIO16_ACT_LOW 16
+#define V_T7_GPIO16_ACT_LOW(x) ((x) << S_T7_GPIO16_ACT_LOW)
+#define F_T7_GPIO16_ACT_LOW V_T7_GPIO16_ACT_LOW(1U)
+
#define A_DBG_CUST_EFUSE_BYTE44_47 0x618c
+#define A_DBG_DDR_CAL 0x618c
+
+#define S_CAL_ENDC 9
+#define V_CAL_ENDC(x) ((x) << S_CAL_ENDC)
+#define F_CAL_ENDC V_CAL_ENDC(1U)
+
+#define S_CAL_MODE 8
+#define V_CAL_MODE(x) ((x) << S_CAL_MODE)
+#define F_CAL_MODE V_CAL_MODE(1U)
+
+#define S_CAL_REFSEL 7
+#define V_CAL_REFSEL(x) ((x) << S_CAL_REFSEL)
+#define F_CAL_REFSEL V_CAL_REFSEL(1U)
+
+#define S_PD 6
+#define V_PD(x) ((x) << S_PD)
+#define F_PD V_PD(1U)
+
+#define S_CAL_RST 5
+#define V_CAL_RST(x) ((x) << S_CAL_RST)
+#define F_CAL_RST V_CAL_RST(1U)
+
+#define S_CAL_READ 4
+#define V_CAL_READ(x) ((x) << S_CAL_READ)
+#define F_CAL_READ V_CAL_READ(1U)
+
+#define S_CAL_SC 3
+#define V_CAL_SC(x) ((x) << S_CAL_SC)
+#define F_CAL_SC V_CAL_SC(1U)
+
+#define S_CAL_LC 2
+#define V_CAL_LC(x) ((x) << S_CAL_LC)
+#define F_CAL_LC V_CAL_LC(1U)
+
+#define S_CAL_CCAL 1
+#define V_CAL_CCAL(x) ((x) << S_CAL_CCAL)
+#define F_CAL_CCAL V_CAL_CCAL(1U)
+
+#define S_CAL_RES 0
+#define V_CAL_RES(x) ((x) << S_CAL_RES)
+#define F_CAL_RES V_CAL_RES(1U)
+
#define A_DBG_CUST_EFUSE_BYTE48_51 0x6190
+#define A_DBG_EFUSE_CTL_0 0x6190
+
+#define S_EFUSE_CSB 31
+#define V_EFUSE_CSB(x) ((x) << S_EFUSE_CSB)
+#define F_EFUSE_CSB V_EFUSE_CSB(1U)
+
+#define S_EFUSE_STROBE 30
+#define V_EFUSE_STROBE(x) ((x) << S_EFUSE_STROBE)
+#define F_EFUSE_STROBE V_EFUSE_STROBE(1U)
+
+#define S_EFUSE_LOAD 29
+#define V_EFUSE_LOAD(x) ((x) << S_EFUSE_LOAD)
+#define F_EFUSE_LOAD V_EFUSE_LOAD(1U)
+
+#define S_EFUSE_PGENB 28
+#define V_EFUSE_PGENB(x) ((x) << S_EFUSE_PGENB)
+#define F_EFUSE_PGENB V_EFUSE_PGENB(1U)
+
+#define S_EFUSE_PS 27
+#define V_EFUSE_PS(x) ((x) << S_EFUSE_PS)
+#define F_EFUSE_PS V_EFUSE_PS(1U)
+
+#define S_EFUSE_MR 26
+#define V_EFUSE_MR(x) ((x) << S_EFUSE_MR)
+#define F_EFUSE_MR V_EFUSE_MR(1U)
+
+#define S_EFUSE_PD 25
+#define V_EFUSE_PD(x) ((x) << S_EFUSE_PD)
+#define F_EFUSE_PD V_EFUSE_PD(1U)
+
+#define S_EFUSE_RWL 24
+#define V_EFUSE_RWL(x) ((x) << S_EFUSE_RWL)
+#define F_EFUSE_RWL V_EFUSE_RWL(1U)
+
+#define S_EFUSE_RSB 23
+#define V_EFUSE_RSB(x) ((x) << S_EFUSE_RSB)
+#define F_EFUSE_RSB V_EFUSE_RSB(1U)
+
+#define S_EFUSE_TRCS 22
+#define V_EFUSE_TRCS(x) ((x) << S_EFUSE_TRCS)
+#define F_EFUSE_TRCS V_EFUSE_TRCS(1U)
+
+#define S_EFUSE_AT 20
+#define M_EFUSE_AT 0x3U
+#define V_EFUSE_AT(x) ((x) << S_EFUSE_AT)
+#define G_EFUSE_AT(x) (((x) >> S_EFUSE_AT) & M_EFUSE_AT)
+
+#define S_EFUSE_RD_STATE 16
+#define M_EFUSE_RD_STATE 0xfU
+#define V_EFUSE_RD_STATE(x) ((x) << S_EFUSE_RD_STATE)
+#define G_EFUSE_RD_STATE(x) (((x) >> S_EFUSE_RD_STATE) & M_EFUSE_RD_STATE)
+
+#define S_EFUSE_BUSY 15
+#define V_EFUSE_BUSY(x) ((x) << S_EFUSE_BUSY)
+#define F_EFUSE_BUSY V_EFUSE_BUSY(1U)
+
+#define S_EFUSE_WR_RD 13
+#define M_EFUSE_WR_RD 0x3U
+#define V_EFUSE_WR_RD(x) ((x) << S_EFUSE_WR_RD)
+#define G_EFUSE_WR_RD(x) (((x) >> S_EFUSE_WR_RD) & M_EFUSE_WR_RD)
+
+#define S_EFUSE_A 0
+#define M_EFUSE_A 0x7ffU
+#define V_EFUSE_A(x) ((x) << S_EFUSE_A)
+#define G_EFUSE_A(x) (((x) >> S_EFUSE_A) & M_EFUSE_A)
+
#define A_DBG_CUST_EFUSE_BYTE52_55 0x6194
+#define A_DBG_EFUSE_CTL_1 0x6194
#define A_DBG_CUST_EFUSE_BYTE56_59 0x6198
+#define A_DBG_EFUSE_RD_CTL 0x6198
+
+#define S_EFUSE_RD_ID 6
+#define M_EFUSE_RD_ID 0x3U
+#define V_EFUSE_RD_ID(x) ((x) << S_EFUSE_RD_ID)
+#define G_EFUSE_RD_ID(x) (((x) >> S_EFUSE_RD_ID) & M_EFUSE_RD_ID)
+
+#define S_EFUSE_RD_ADDR 0
+#define M_EFUSE_RD_ADDR 0x3fU
+#define V_EFUSE_RD_ADDR(x) ((x) << S_EFUSE_RD_ADDR)
+#define G_EFUSE_RD_ADDR(x) (((x) >> S_EFUSE_RD_ADDR) & M_EFUSE_RD_ADDR)
+
#define A_DBG_CUST_EFUSE_BYTE60_63 0x619c
+#define A_DBG_EFUSE_RD_DATA 0x619c
+#define A_DBG_EFUSE_TIME_0 0x61a0
+
+#define S_EFUSE_TIME_1 16
+#define M_EFUSE_TIME_1 0xffffU
+#define V_EFUSE_TIME_1(x) ((x) << S_EFUSE_TIME_1)
+#define G_EFUSE_TIME_1(x) (((x) >> S_EFUSE_TIME_1) & M_EFUSE_TIME_1)
+
+#define S_EFUSE_TIME_0 0
+#define M_EFUSE_TIME_0 0xffffU
+#define V_EFUSE_TIME_0(x) ((x) << S_EFUSE_TIME_0)
+#define G_EFUSE_TIME_0(x) (((x) >> S_EFUSE_TIME_0) & M_EFUSE_TIME_0)
+
+#define A_DBG_EFUSE_TIME_1 0x61a4
+
+#define S_EFUSE_TIME_3 16
+#define M_EFUSE_TIME_3 0xffffU
+#define V_EFUSE_TIME_3(x) ((x) << S_EFUSE_TIME_3)
+#define G_EFUSE_TIME_3(x) (((x) >> S_EFUSE_TIME_3) & M_EFUSE_TIME_3)
+
+#define S_EFUSE_TIME_2 0
+#define M_EFUSE_TIME_2 0xffffU
+#define V_EFUSE_TIME_2(x) ((x) << S_EFUSE_TIME_2)
+#define G_EFUSE_TIME_2(x) (((x) >> S_EFUSE_TIME_2) & M_EFUSE_TIME_2)
+
+#define A_DBG_EFUSE_TIME_2 0x61a8
+
+#define S_EFUSE_TIME_5 16
+#define M_EFUSE_TIME_5 0xffffU
+#define V_EFUSE_TIME_5(x) ((x) << S_EFUSE_TIME_5)
+#define G_EFUSE_TIME_5(x) (((x) >> S_EFUSE_TIME_5) & M_EFUSE_TIME_5)
+
+#define S_EFUSE_TIME_4 0
+#define M_EFUSE_TIME_4 0xffffU
+#define V_EFUSE_TIME_4(x) ((x) << S_EFUSE_TIME_4)
+#define G_EFUSE_TIME_4(x) (((x) >> S_EFUSE_TIME_4) & M_EFUSE_TIME_4)
+
+#define A_DBG_EFUSE_TIME_3 0x61ac
+
+#define S_EFUSE_TIME_7 16
+#define M_EFUSE_TIME_7 0xffffU
+#define V_EFUSE_TIME_7(x) ((x) << S_EFUSE_TIME_7)
+#define G_EFUSE_TIME_7(x) (((x) >> S_EFUSE_TIME_7) & M_EFUSE_TIME_7)
+
+#define S_EFUSE_TIME_6 0
+#define M_EFUSE_TIME_6 0xffffU
+#define V_EFUSE_TIME_6(x) ((x) << S_EFUSE_TIME_6)
+#define G_EFUSE_TIME_6(x) (((x) >> S_EFUSE_TIME_6) & M_EFUSE_TIME_6)
+
+#define A_DBG_VREF_CTL 0x61b0
+
+#define S_VREF_SEL_1 15
+#define V_VREF_SEL_1(x) ((x) << S_VREF_SEL_1)
+#define F_VREF_SEL_1 V_VREF_SEL_1(1U)
+
+#define S_VREF_R_1 8
+#define M_VREF_R_1 0x7fU
+#define V_VREF_R_1(x) ((x) << S_VREF_R_1)
+#define G_VREF_R_1(x) (((x) >> S_VREF_R_1) & M_VREF_R_1)
+
+#define S_VREF_SEL_0 7
+#define V_VREF_SEL_0(x) ((x) << S_VREF_SEL_0)
+#define F_VREF_SEL_0 V_VREF_SEL_0(1U)
+
+#define S_VREF_R_0 0
+#define M_VREF_R_0 0x7fU
+#define V_VREF_R_0(x) ((x) << S_VREF_R_0)
+#define G_VREF_R_0(x) (((x) >> S_VREF_R_0) & M_VREF_R_0)
+
+#define A_DBG_FPGA_EFUSE_CTL 0x61b4
+#define A_DBG_FPGA_EFUSE_DATA 0x61b8
/* registers for module MC */
#define MC_BASE_ADDR 0x6200
@@ -16048,31 +20095,91 @@
#define V_THRESHOLD0_EN(x) ((x) << S_THRESHOLD0_EN)
#define F_THRESHOLD0_EN V_THRESHOLD0_EN(1U)
+#define A_MA_CLIENT0_PR_THRESHOLD 0x7700
+
+#define S_T7_THRESHOLD1_EN 31
+#define V_T7_THRESHOLD1_EN(x) ((x) << S_T7_THRESHOLD1_EN)
+#define F_T7_THRESHOLD1_EN V_T7_THRESHOLD1_EN(1U)
+
+#define S_T7_THRESHOLD1 16
+#define M_T7_THRESHOLD1 0x7fffU
+#define V_T7_THRESHOLD1(x) ((x) << S_T7_THRESHOLD1)
+#define G_T7_THRESHOLD1(x) (((x) >> S_T7_THRESHOLD1) & M_T7_THRESHOLD1)
+
+#define S_T7_THRESHOLD0_EN 15
+#define V_T7_THRESHOLD0_EN(x) ((x) << S_T7_THRESHOLD0_EN)
+#define F_T7_THRESHOLD0_EN V_T7_THRESHOLD0_EN(1U)
+
+#define S_T7_THRESHOLD0 0
+#define M_T7_THRESHOLD0 0x7fffU
+#define V_T7_THRESHOLD0(x) ((x) << S_T7_THRESHOLD0)
+#define G_T7_THRESHOLD0(x) (((x) >> S_T7_THRESHOLD0) & M_T7_THRESHOLD0)
+
#define A_MA_CLIENT0_WR_LATENCY_THRESHOLD 0x7704
+#define A_MA_CLIENT0_CR_THRESHOLD 0x7704
+
+#define S_CREDITSHAPER_EN 31
+#define V_CREDITSHAPER_EN(x) ((x) << S_CREDITSHAPER_EN)
+#define F_CREDITSHAPER_EN V_CREDITSHAPER_EN(1U)
+
+#define S_CREDIT_MAX 16
+#define M_CREDIT_MAX 0xfffU
+#define V_CREDIT_MAX(x) ((x) << S_CREDIT_MAX)
+#define G_CREDIT_MAX(x) (((x) >> S_CREDIT_MAX) & M_CREDIT_MAX)
+
+#define S_CREDIT_VAL 0
+#define M_CREDIT_VAL 0xfffU
+#define V_CREDIT_VAL(x) ((x) << S_CREDIT_VAL)
+#define G_CREDIT_VAL(x) (((x) >> S_CREDIT_VAL) & M_CREDIT_VAL)
+
#define A_MA_CLIENT1_RD_LATENCY_THRESHOLD 0x7708
+#define A_MA_CLIENT1_PR_THRESHOLD 0x7708
#define A_MA_CLIENT1_WR_LATENCY_THRESHOLD 0x770c
+#define A_MA_CLIENT1_CR_THRESHOLD 0x770c
#define A_MA_CLIENT2_RD_LATENCY_THRESHOLD 0x7710
+#define A_MA_CLIENT2_PR_THRESHOLD 0x7710
#define A_MA_CLIENT2_WR_LATENCY_THRESHOLD 0x7714
+#define A_MA_CLIENT2_CR_THRESHOLD 0x7714
#define A_MA_CLIENT3_RD_LATENCY_THRESHOLD 0x7718
+#define A_MA_CLIENT3_PR_THRESHOLD 0x7718
#define A_MA_CLIENT3_WR_LATENCY_THRESHOLD 0x771c
+#define A_MA_CLIENT3_CR_THRESHOLD 0x771c
#define A_MA_CLIENT4_RD_LATENCY_THRESHOLD 0x7720
+#define A_MA_CLIENT4_PR_THRESHOLD 0x7720
#define A_MA_CLIENT4_WR_LATENCY_THRESHOLD 0x7724
+#define A_MA_CLIENT4_CR_THRESHOLD 0x7724
#define A_MA_CLIENT5_RD_LATENCY_THRESHOLD 0x7728
+#define A_MA_CLIENT5_PR_THRESHOLD 0x7728
#define A_MA_CLIENT5_WR_LATENCY_THRESHOLD 0x772c
+#define A_MA_CLIENT5_CR_THRESHOLD 0x772c
#define A_MA_CLIENT6_RD_LATENCY_THRESHOLD 0x7730
+#define A_MA_CLIENT6_PR_THRESHOLD 0x7730
#define A_MA_CLIENT6_WR_LATENCY_THRESHOLD 0x7734
+#define A_MA_CLIENT6_CR_THRESHOLD 0x7734
#define A_MA_CLIENT7_RD_LATENCY_THRESHOLD 0x7738
+#define A_MA_CLIENT7_PR_THRESHOLD 0x7738
#define A_MA_CLIENT7_WR_LATENCY_THRESHOLD 0x773c
+#define A_MA_CLIENT7_CR_THRESHOLD 0x773c
#define A_MA_CLIENT8_RD_LATENCY_THRESHOLD 0x7740
+#define A_MA_CLIENT8_PR_THRESHOLD 0x7740
#define A_MA_CLIENT8_WR_LATENCY_THRESHOLD 0x7744
+#define A_MA_CLIENT8_CR_THRESHOLD 0x7744
#define A_MA_CLIENT9_RD_LATENCY_THRESHOLD 0x7748
+#define A_MA_CLIENT9_PR_THRESHOLD 0x7748
#define A_MA_CLIENT9_WR_LATENCY_THRESHOLD 0x774c
+#define A_MA_CLIENT9_CR_THRESHOLD 0x774c
#define A_MA_CLIENT10_RD_LATENCY_THRESHOLD 0x7750
+#define A_MA_CLIENT10_PR_THRESHOLD 0x7750
#define A_MA_CLIENT10_WR_LATENCY_THRESHOLD 0x7754
+#define A_MA_CLIENT10_CR_THRESHOLD 0x7754
#define A_MA_CLIENT11_RD_LATENCY_THRESHOLD 0x7758
+#define A_MA_CLIENT11_PR_THRESHOLD 0x7758
#define A_MA_CLIENT11_WR_LATENCY_THRESHOLD 0x775c
+#define A_MA_CLIENT11_CR_THRESHOLD 0x775c
#define A_MA_CLIENT12_RD_LATENCY_THRESHOLD 0x7760
+#define A_MA_CLIENT12_PR_THRESHOLD 0x7760
#define A_MA_CLIENT12_WR_LATENCY_THRESHOLD 0x7764
+#define A_MA_CLIENT12_CR_THRESHOLD 0x7764
#define A_MA_SGE_TH0_DEBUG_CNT 0x7768
#define S_DBG_READ_DATA_CNT 24
@@ -16103,10 +20210,359 @@
#define A_MA_TP_TH1_DEBUG_CNT 0x7780
#define A_MA_LE_DEBUG_CNT 0x7784
#define A_MA_CIM_DEBUG_CNT 0x7788
+#define A_MA_CIM_TH0_DEBUG_CNT 0x7788
#define A_MA_PCIE_DEBUG_CNT 0x778c
#define A_MA_PMTX_DEBUG_CNT 0x7790
#define A_MA_PMRX_DEBUG_CNT 0x7794
#define A_MA_HMA_DEBUG_CNT 0x7798
+#define A_MA_COR_ERROR_ENABLE1 0x779c
+
+#define S_ARB4_COR_WRQUEUE_ERROR_EN 9
+#define V_ARB4_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB4_COR_WRQUEUE_ERROR_EN)
+#define F_ARB4_COR_WRQUEUE_ERROR_EN V_ARB4_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB3_COR_WRQUEUE_ERROR_EN 8
+#define V_ARB3_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB3_COR_WRQUEUE_ERROR_EN)
+#define F_ARB3_COR_WRQUEUE_ERROR_EN V_ARB3_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_COR_WRQUEUE_ERROR_EN 7
+#define V_ARB2_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB2_COR_WRQUEUE_ERROR_EN)
+#define F_ARB2_COR_WRQUEUE_ERROR_EN V_ARB2_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_COR_WRQUEUE_ERROR_EN 6
+#define V_ARB1_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB1_COR_WRQUEUE_ERROR_EN)
+#define F_ARB1_COR_WRQUEUE_ERROR_EN V_ARB1_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_COR_WRQUEUE_ERROR_EN 5
+#define V_ARB0_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB0_COR_WRQUEUE_ERROR_EN)
+#define F_ARB0_COR_WRQUEUE_ERROR_EN V_ARB0_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB4_COR_RDQUEUE_ERROR_EN 4
+#define V_ARB4_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB4_COR_RDQUEUE_ERROR_EN)
+#define F_ARB4_COR_RDQUEUE_ERROR_EN V_ARB4_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB3_COR_RDQUEUE_ERROR_EN 3
+#define V_ARB3_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB3_COR_RDQUEUE_ERROR_EN)
+#define F_ARB3_COR_RDQUEUE_ERROR_EN V_ARB3_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_COR_RDQUEUE_ERROR_EN 2
+#define V_ARB2_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB2_COR_RDQUEUE_ERROR_EN)
+#define F_ARB2_COR_RDQUEUE_ERROR_EN V_ARB2_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_COR_RDQUEUE_ERROR_EN 1
+#define V_ARB1_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB1_COR_RDQUEUE_ERROR_EN)
+#define F_ARB1_COR_RDQUEUE_ERROR_EN V_ARB1_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_COR_RDQUEUE_ERROR_EN 0
+#define V_ARB0_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB0_COR_RDQUEUE_ERROR_EN)
+#define F_ARB0_COR_RDQUEUE_ERROR_EN V_ARB0_COR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS1 0x77a0
+
+#define S_ARB4_COR_WRQUEUE_ERROR 9
+#define V_ARB4_COR_WRQUEUE_ERROR(x) ((x) << S_ARB4_COR_WRQUEUE_ERROR)
+#define F_ARB4_COR_WRQUEUE_ERROR V_ARB4_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB3_COR_WRQUEUE_ERROR 8
+#define V_ARB3_COR_WRQUEUE_ERROR(x) ((x) << S_ARB3_COR_WRQUEUE_ERROR)
+#define F_ARB3_COR_WRQUEUE_ERROR V_ARB3_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB2_COR_WRQUEUE_ERROR 7
+#define V_ARB2_COR_WRQUEUE_ERROR(x) ((x) << S_ARB2_COR_WRQUEUE_ERROR)
+#define F_ARB2_COR_WRQUEUE_ERROR V_ARB2_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB1_COR_WRQUEUE_ERROR 6
+#define V_ARB1_COR_WRQUEUE_ERROR(x) ((x) << S_ARB1_COR_WRQUEUE_ERROR)
+#define F_ARB1_COR_WRQUEUE_ERROR V_ARB1_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB0_COR_WRQUEUE_ERROR 5
+#define V_ARB0_COR_WRQUEUE_ERROR(x) ((x) << S_ARB0_COR_WRQUEUE_ERROR)
+#define F_ARB0_COR_WRQUEUE_ERROR V_ARB0_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB4_COR_RDQUEUE_ERROR 4
+#define V_ARB4_COR_RDQUEUE_ERROR(x) ((x) << S_ARB4_COR_RDQUEUE_ERROR)
+#define F_ARB4_COR_RDQUEUE_ERROR V_ARB4_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB3_COR_RDQUEUE_ERROR 3
+#define V_ARB3_COR_RDQUEUE_ERROR(x) ((x) << S_ARB3_COR_RDQUEUE_ERROR)
+#define F_ARB3_COR_RDQUEUE_ERROR V_ARB3_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB2_COR_RDQUEUE_ERROR 2
+#define V_ARB2_COR_RDQUEUE_ERROR(x) ((x) << S_ARB2_COR_RDQUEUE_ERROR)
+#define F_ARB2_COR_RDQUEUE_ERROR V_ARB2_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB1_COR_RDQUEUE_ERROR 1
+#define V_ARB1_COR_RDQUEUE_ERROR(x) ((x) << S_ARB1_COR_RDQUEUE_ERROR)
+#define F_ARB1_COR_RDQUEUE_ERROR V_ARB1_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB0_COR_RDQUEUE_ERROR 0
+#define V_ARB0_COR_RDQUEUE_ERROR(x) ((x) << S_ARB0_COR_RDQUEUE_ERROR)
+#define F_ARB0_COR_RDQUEUE_ERROR V_ARB0_COR_RDQUEUE_ERROR(1U)
+
+#define A_MA_DBG_CTL 0x77a4
+
+#define S_DATAH_SEL 20
+#define V_DATAH_SEL(x) ((x) << S_DATAH_SEL)
+#define F_DATAH_SEL V_DATAH_SEL(1U)
+
+#define S_EN_DBG 16
+#define V_EN_DBG(x) ((x) << S_EN_DBG)
+#define F_EN_DBG V_EN_DBG(1U)
+
+#define S_T7_SEL 0
+#define M_T7_SEL 0xffU
+#define V_T7_SEL(x) ((x) << S_T7_SEL)
+#define G_T7_SEL(x) (((x) >> S_T7_SEL) & M_T7_SEL)
+
+#define A_MA_DBG_DATA 0x77a8
+#define A_MA_COR_ERROR_ENABLE2 0x77b0
+
+#define S_CL14_COR_WRQUEUE_ERROR_EN 14
+#define V_CL14_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL14_COR_WRQUEUE_ERROR_EN)
+#define F_CL14_COR_WRQUEUE_ERROR_EN V_CL14_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL13_COR_WRQUEUE_ERROR_EN 13
+#define V_CL13_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL13_COR_WRQUEUE_ERROR_EN)
+#define F_CL13_COR_WRQUEUE_ERROR_EN V_CL13_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL12_COR_WRQUEUE_ERROR_EN 12
+#define V_CL12_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL12_COR_WRQUEUE_ERROR_EN)
+#define F_CL12_COR_WRQUEUE_ERROR_EN V_CL12_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL11_COR_WRQUEUE_ERROR_EN 11
+#define V_CL11_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL11_COR_WRQUEUE_ERROR_EN)
+#define F_CL11_COR_WRQUEUE_ERROR_EN V_CL11_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL10_COR_WRQUEUE_ERROR_EN 10
+#define V_CL10_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL10_COR_WRQUEUE_ERROR_EN)
+#define F_CL10_COR_WRQUEUE_ERROR_EN V_CL10_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL9_COR_WRQUEUE_ERROR_EN 9
+#define V_CL9_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL9_COR_WRQUEUE_ERROR_EN)
+#define F_CL9_COR_WRQUEUE_ERROR_EN V_CL9_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL8_COR_WRQUEUE_ERROR_EN 8
+#define V_CL8_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL8_COR_WRQUEUE_ERROR_EN)
+#define F_CL8_COR_WRQUEUE_ERROR_EN V_CL8_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL7_COR_WRQUEUE_ERROR_EN 7
+#define V_CL7_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL7_COR_WRQUEUE_ERROR_EN)
+#define F_CL7_COR_WRQUEUE_ERROR_EN V_CL7_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL6_COR_WRQUEUE_ERROR_EN 6
+#define V_CL6_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL6_COR_WRQUEUE_ERROR_EN)
+#define F_CL6_COR_WRQUEUE_ERROR_EN V_CL6_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL5_COR_WRQUEUE_ERROR_EN 5
+#define V_CL5_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL5_COR_WRQUEUE_ERROR_EN)
+#define F_CL5_COR_WRQUEUE_ERROR_EN V_CL5_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL4_COR_WRQUEUE_ERROR_EN 4
+#define V_CL4_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL4_COR_WRQUEUE_ERROR_EN)
+#define F_CL4_COR_WRQUEUE_ERROR_EN V_CL4_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL3_COR_WRQUEUE_ERROR_EN 3
+#define V_CL3_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL3_COR_WRQUEUE_ERROR_EN)
+#define F_CL3_COR_WRQUEUE_ERROR_EN V_CL3_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL2_COR_WRQUEUE_ERROR_EN 2
+#define V_CL2_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL2_COR_WRQUEUE_ERROR_EN)
+#define F_CL2_COR_WRQUEUE_ERROR_EN V_CL2_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL1_COR_WRQUEUE_ERROR_EN 1
+#define V_CL1_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL1_COR_WRQUEUE_ERROR_EN)
+#define F_CL1_COR_WRQUEUE_ERROR_EN V_CL1_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL0_COR_WRQUEUE_ERROR_EN 0
+#define V_CL0_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL0_COR_WRQUEUE_ERROR_EN)
+#define F_CL0_COR_WRQUEUE_ERROR_EN V_CL0_COR_WRQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS2 0x77b4
+
+#define S_CL14_COR_WRQUEUE_ERROR 14
+#define V_CL14_COR_WRQUEUE_ERROR(x) ((x) << S_CL14_COR_WRQUEUE_ERROR)
+#define F_CL14_COR_WRQUEUE_ERROR V_CL14_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL13_COR_WRQUEUE_ERROR 13
+#define V_CL13_COR_WRQUEUE_ERROR(x) ((x) << S_CL13_COR_WRQUEUE_ERROR)
+#define F_CL13_COR_WRQUEUE_ERROR V_CL13_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL12_COR_WRQUEUE_ERROR 12
+#define V_CL12_COR_WRQUEUE_ERROR(x) ((x) << S_CL12_COR_WRQUEUE_ERROR)
+#define F_CL12_COR_WRQUEUE_ERROR V_CL12_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL11_COR_WRQUEUE_ERROR 11
+#define V_CL11_COR_WRQUEUE_ERROR(x) ((x) << S_CL11_COR_WRQUEUE_ERROR)
+#define F_CL11_COR_WRQUEUE_ERROR V_CL11_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL10_COR_WRQUEUE_ERROR 10
+#define V_CL10_COR_WRQUEUE_ERROR(x) ((x) << S_CL10_COR_WRQUEUE_ERROR)
+#define F_CL10_COR_WRQUEUE_ERROR V_CL10_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL9_COR_WRQUEUE_ERROR 9
+#define V_CL9_COR_WRQUEUE_ERROR(x) ((x) << S_CL9_COR_WRQUEUE_ERROR)
+#define F_CL9_COR_WRQUEUE_ERROR V_CL9_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL8_COR_WRQUEUE_ERROR 8
+#define V_CL8_COR_WRQUEUE_ERROR(x) ((x) << S_CL8_COR_WRQUEUE_ERROR)
+#define F_CL8_COR_WRQUEUE_ERROR V_CL8_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL7_COR_WRQUEUE_ERROR 7
+#define V_CL7_COR_WRQUEUE_ERROR(x) ((x) << S_CL7_COR_WRQUEUE_ERROR)
+#define F_CL7_COR_WRQUEUE_ERROR V_CL7_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL6_COR_WRQUEUE_ERROR 6
+#define V_CL6_COR_WRQUEUE_ERROR(x) ((x) << S_CL6_COR_WRQUEUE_ERROR)
+#define F_CL6_COR_WRQUEUE_ERROR V_CL6_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL5_COR_WRQUEUE_ERROR 5
+#define V_CL5_COR_WRQUEUE_ERROR(x) ((x) << S_CL5_COR_WRQUEUE_ERROR)
+#define F_CL5_COR_WRQUEUE_ERROR V_CL5_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL4_COR_WRQUEUE_ERROR 4
+#define V_CL4_COR_WRQUEUE_ERROR(x) ((x) << S_CL4_COR_WRQUEUE_ERROR)
+#define F_CL4_COR_WRQUEUE_ERROR V_CL4_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL3_COR_WRQUEUE_ERROR 3
+#define V_CL3_COR_WRQUEUE_ERROR(x) ((x) << S_CL3_COR_WRQUEUE_ERROR)
+#define F_CL3_COR_WRQUEUE_ERROR V_CL3_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL2_COR_WRQUEUE_ERROR 2
+#define V_CL2_COR_WRQUEUE_ERROR(x) ((x) << S_CL2_COR_WRQUEUE_ERROR)
+#define F_CL2_COR_WRQUEUE_ERROR V_CL2_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL1_COR_WRQUEUE_ERROR 1
+#define V_CL1_COR_WRQUEUE_ERROR(x) ((x) << S_CL1_COR_WRQUEUE_ERROR)
+#define F_CL1_COR_WRQUEUE_ERROR V_CL1_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL0_COR_WRQUEUE_ERROR 0
+#define V_CL0_COR_WRQUEUE_ERROR(x) ((x) << S_CL0_COR_WRQUEUE_ERROR)
+#define F_CL0_COR_WRQUEUE_ERROR V_CL0_COR_WRQUEUE_ERROR(1U)
+
+#define A_MA_COR_ERROR_ENABLE3 0x77b8
+
+#define S_CL14_COR_RDQUEUE_ERROR_EN 14
+#define V_CL14_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL14_COR_RDQUEUE_ERROR_EN)
+#define F_CL14_COR_RDQUEUE_ERROR_EN V_CL14_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL13_COR_RDQUEUE_ERROR_EN 13
+#define V_CL13_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL13_COR_RDQUEUE_ERROR_EN)
+#define F_CL13_COR_RDQUEUE_ERROR_EN V_CL13_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL12_COR_RDQUEUE_ERROR_EN 12
+#define V_CL12_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL12_COR_RDQUEUE_ERROR_EN)
+#define F_CL12_COR_RDQUEUE_ERROR_EN V_CL12_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL11_COR_RDQUEUE_ERROR_EN 11
+#define V_CL11_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL11_COR_RDQUEUE_ERROR_EN)
+#define F_CL11_COR_RDQUEUE_ERROR_EN V_CL11_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL10_COR_RDQUEUE_ERROR_EN 10
+#define V_CL10_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL10_COR_RDQUEUE_ERROR_EN)
+#define F_CL10_COR_RDQUEUE_ERROR_EN V_CL10_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL9_COR_RDQUEUE_ERROR_EN 9
+#define V_CL9_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL9_COR_RDQUEUE_ERROR_EN)
+#define F_CL9_COR_RDQUEUE_ERROR_EN V_CL9_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL8_COR_RDQUEUE_ERROR_EN 8
+#define V_CL8_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL8_COR_RDQUEUE_ERROR_EN)
+#define F_CL8_COR_RDQUEUE_ERROR_EN V_CL8_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL7_COR_RDQUEUE_ERROR_EN 7
+#define V_CL7_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL7_COR_RDQUEUE_ERROR_EN)
+#define F_CL7_COR_RDQUEUE_ERROR_EN V_CL7_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL6_COR_RDQUEUE_ERROR_EN 6
+#define V_CL6_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL6_COR_RDQUEUE_ERROR_EN)
+#define F_CL6_COR_RDQUEUE_ERROR_EN V_CL6_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL5_COR_RDQUEUE_ERROR_EN 5
+#define V_CL5_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL5_COR_RDQUEUE_ERROR_EN)
+#define F_CL5_COR_RDQUEUE_ERROR_EN V_CL5_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL4_COR_RDQUEUE_ERROR_EN 4
+#define V_CL4_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL4_COR_RDQUEUE_ERROR_EN)
+#define F_CL4_COR_RDQUEUE_ERROR_EN V_CL4_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL3_COR_RDQUEUE_ERROR_EN 3
+#define V_CL3_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL3_COR_RDQUEUE_ERROR_EN)
+#define F_CL3_COR_RDQUEUE_ERROR_EN V_CL3_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL2_COR_RDQUEUE_ERROR_EN 2
+#define V_CL2_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL2_COR_RDQUEUE_ERROR_EN)
+#define F_CL2_COR_RDQUEUE_ERROR_EN V_CL2_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL1_COR_RDQUEUE_ERROR_EN 1
+#define V_CL1_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL1_COR_RDQUEUE_ERROR_EN)
+#define F_CL1_COR_RDQUEUE_ERROR_EN V_CL1_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL0_COR_RDQUEUE_ERROR_EN 0
+#define V_CL0_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL0_COR_RDQUEUE_ERROR_EN)
+#define F_CL0_COR_RDQUEUE_ERROR_EN V_CL0_COR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS3 0x77bc
+
+#define S_CL14_COR_RDQUEUE_ERROR 14
+#define V_CL14_COR_RDQUEUE_ERROR(x) ((x) << S_CL14_COR_RDQUEUE_ERROR)
+#define F_CL14_COR_RDQUEUE_ERROR V_CL14_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL13_COR_RDQUEUE_ERROR 13
+#define V_CL13_COR_RDQUEUE_ERROR(x) ((x) << S_CL13_COR_RDQUEUE_ERROR)
+#define F_CL13_COR_RDQUEUE_ERROR V_CL13_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL12_COR_RDQUEUE_ERROR 12
+#define V_CL12_COR_RDQUEUE_ERROR(x) ((x) << S_CL12_COR_RDQUEUE_ERROR)
+#define F_CL12_COR_RDQUEUE_ERROR V_CL12_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL11_COR_RDQUEUE_ERROR 11
+#define V_CL11_COR_RDQUEUE_ERROR(x) ((x) << S_CL11_COR_RDQUEUE_ERROR)
+#define F_CL11_COR_RDQUEUE_ERROR V_CL11_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL10_COR_RDQUEUE_ERROR 10
+#define V_CL10_COR_RDQUEUE_ERROR(x) ((x) << S_CL10_COR_RDQUEUE_ERROR)
+#define F_CL10_COR_RDQUEUE_ERROR V_CL10_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL9_COR_RDQUEUE_ERROR 9
+#define V_CL9_COR_RDQUEUE_ERROR(x) ((x) << S_CL9_COR_RDQUEUE_ERROR)
+#define F_CL9_COR_RDQUEUE_ERROR V_CL9_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL8_COR_RDQUEUE_ERROR 8
+#define V_CL8_COR_RDQUEUE_ERROR(x) ((x) << S_CL8_COR_RDQUEUE_ERROR)
+#define F_CL8_COR_RDQUEUE_ERROR V_CL8_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL7_COR_RDQUEUE_ERROR 7
+#define V_CL7_COR_RDQUEUE_ERROR(x) ((x) << S_CL7_COR_RDQUEUE_ERROR)
+#define F_CL7_COR_RDQUEUE_ERROR V_CL7_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL6_COR_RDQUEUE_ERROR 6
+#define V_CL6_COR_RDQUEUE_ERROR(x) ((x) << S_CL6_COR_RDQUEUE_ERROR)
+#define F_CL6_COR_RDQUEUE_ERROR V_CL6_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL5_COR_RDQUEUE_ERROR 5
+#define V_CL5_COR_RDQUEUE_ERROR(x) ((x) << S_CL5_COR_RDQUEUE_ERROR)
+#define F_CL5_COR_RDQUEUE_ERROR V_CL5_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL4_COR_RDQUEUE_ERROR 4
+#define V_CL4_COR_RDQUEUE_ERROR(x) ((x) << S_CL4_COR_RDQUEUE_ERROR)
+#define F_CL4_COR_RDQUEUE_ERROR V_CL4_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL3_COR_RDQUEUE_ERROR 3
+#define V_CL3_COR_RDQUEUE_ERROR(x) ((x) << S_CL3_COR_RDQUEUE_ERROR)
+#define F_CL3_COR_RDQUEUE_ERROR V_CL3_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL2_COR_RDQUEUE_ERROR 2
+#define V_CL2_COR_RDQUEUE_ERROR(x) ((x) << S_CL2_COR_RDQUEUE_ERROR)
+#define F_CL2_COR_RDQUEUE_ERROR V_CL2_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL1_COR_RDQUEUE_ERROR 1
+#define V_CL1_COR_RDQUEUE_ERROR(x) ((x) << S_CL1_COR_RDQUEUE_ERROR)
+#define F_CL1_COR_RDQUEUE_ERROR V_CL1_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL0_COR_RDQUEUE_ERROR 0
+#define V_CL0_COR_RDQUEUE_ERROR(x) ((x) << S_CL0_COR_RDQUEUE_ERROR)
+#define F_CL0_COR_RDQUEUE_ERROR V_CL0_COR_RDQUEUE_ERROR(1U)
+
#define A_MA_EDRAM0_BAR 0x77c0
#define S_EDRAM0_BASE 16
@@ -16119,6 +20575,16 @@
#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+#define S_T7_EDRAM0_BASE 16
+#define M_T7_EDRAM0_BASE 0xffffU
+#define V_T7_EDRAM0_BASE(x) ((x) << S_T7_EDRAM0_BASE)
+#define G_T7_EDRAM0_BASE(x) (((x) >> S_T7_EDRAM0_BASE) & M_T7_EDRAM0_BASE)
+
+#define S_T7_EDRAM0_SIZE 0
+#define M_T7_EDRAM0_SIZE 0xffffU
+#define V_T7_EDRAM0_SIZE(x) ((x) << S_T7_EDRAM0_SIZE)
+#define G_T7_EDRAM0_SIZE(x) (((x) >> S_T7_EDRAM0_SIZE) & M_T7_EDRAM0_SIZE)
+
#define A_MA_EDRAM1_BAR 0x77c4
#define S_EDRAM1_BASE 16
@@ -16131,6 +20597,16 @@
#define V_EDRAM1_SIZE(x) ((x) << S_EDRAM1_SIZE)
#define G_EDRAM1_SIZE(x) (((x) >> S_EDRAM1_SIZE) & M_EDRAM1_SIZE)
+#define S_T7_EDRAM1_BASE 16
+#define M_T7_EDRAM1_BASE 0xffffU
+#define V_T7_EDRAM1_BASE(x) ((x) << S_T7_EDRAM1_BASE)
+#define G_T7_EDRAM1_BASE(x) (((x) >> S_T7_EDRAM1_BASE) & M_T7_EDRAM1_BASE)
+
+#define S_T7_EDRAM1_SIZE 0
+#define M_T7_EDRAM1_SIZE 0xffffU
+#define V_T7_EDRAM1_SIZE(x) ((x) << S_T7_EDRAM1_SIZE)
+#define G_T7_EDRAM1_SIZE(x) (((x) >> S_T7_EDRAM1_SIZE) & M_T7_EDRAM1_SIZE)
+
#define A_MA_EXT_MEMORY_BAR 0x77c8
#define S_EXT_MEM_BASE 16
@@ -16155,6 +20631,16 @@
#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+#define S_T7_EXT_MEM0_BASE 16
+#define M_T7_EXT_MEM0_BASE 0xffffU
+#define V_T7_EXT_MEM0_BASE(x) ((x) << S_T7_EXT_MEM0_BASE)
+#define G_T7_EXT_MEM0_BASE(x) (((x) >> S_T7_EXT_MEM0_BASE) & M_T7_EXT_MEM0_BASE)
+
+#define S_T7_EXT_MEM0_SIZE 0
+#define M_T7_EXT_MEM0_SIZE 0xffffU
+#define V_T7_EXT_MEM0_SIZE(x) ((x) << S_T7_EXT_MEM0_SIZE)
+#define G_T7_EXT_MEM0_SIZE(x) (((x) >> S_T7_EXT_MEM0_SIZE) & M_T7_EXT_MEM0_SIZE)
+
#define A_MA_HOST_MEMORY_BAR 0x77cc
#define S_HMA_BASE 16
@@ -16167,6 +20653,16 @@
#define V_HMA_SIZE(x) ((x) << S_HMA_SIZE)
#define G_HMA_SIZE(x) (((x) >> S_HMA_SIZE) & M_HMA_SIZE)
+#define S_HMATARGETBASE 16
+#define M_HMATARGETBASE 0xffffU
+#define V_HMATARGETBASE(x) ((x) << S_HMATARGETBASE)
+#define G_HMATARGETBASE(x) (((x) >> S_HMATARGETBASE) & M_HMATARGETBASE)
+
+#define S_T7_HMA_SIZE 0
+#define M_T7_HMA_SIZE 0xffffU
+#define V_T7_HMA_SIZE(x) ((x) << S_T7_HMA_SIZE)
+#define G_T7_HMA_SIZE(x) (((x) >> S_T7_HMA_SIZE) & M_T7_HMA_SIZE)
+
#define A_MA_EXT_MEM_PAGE_SIZE 0x77d0
#define S_BRC_MODE 2
@@ -16290,6 +20786,14 @@
#define V_MC_SPLIT(x) ((x) << S_MC_SPLIT)
#define F_MC_SPLIT V_MC_SPLIT(1U)
+#define S_EDC512 8
+#define V_EDC512(x) ((x) << S_EDC512)
+#define F_EDC512 V_EDC512(1U)
+
+#define S_MC_SPLIT_BOUNDARY 7
+#define V_MC_SPLIT_BOUNDARY(x) ((x) << S_MC_SPLIT_BOUNDARY)
+#define F_MC_SPLIT_BOUNDARY V_MC_SPLIT_BOUNDARY(1U)
+
#define A_MA_INT_ENABLE 0x77dc
#define S_MEM_PERR_INT_ENABLE 1
@@ -16475,6 +20979,55 @@
#define F_CL0_PAR_RDQUEUE_ERROR_EN V_CL0_PAR_RDQUEUE_ERROR_EN(1U)
#define A_MA_PARITY_ERROR_ENABLE1 0x77f0
+
+#define S_T7_ARB4_PAR_WRQUEUE_ERROR_EN 11
+#define V_T7_ARB4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB4_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB4_PAR_WRQUEUE_ERROR_EN V_T7_ARB4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB3_PAR_WRQUEUE_ERROR_EN 10
+#define V_T7_ARB3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB3_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB3_PAR_WRQUEUE_ERROR_EN V_T7_ARB3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB2_PAR_WRQUEUE_ERROR_EN 9
+#define V_T7_ARB2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB2_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB2_PAR_WRQUEUE_ERROR_EN V_T7_ARB2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB1_PAR_WRQUEUE_ERROR_EN 8
+#define V_T7_ARB1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB1_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB1_PAR_WRQUEUE_ERROR_EN V_T7_ARB1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB0_PAR_WRQUEUE_ERROR_EN 7
+#define V_T7_ARB0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB0_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB0_PAR_WRQUEUE_ERROR_EN V_T7_ARB0_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB4_PAR_RDQUEUE_ERROR_EN 6
+#define V_T7_ARB4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB4_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB4_PAR_RDQUEUE_ERROR_EN V_T7_ARB4_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB3_PAR_RDQUEUE_ERROR_EN 5
+#define V_T7_ARB3_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB3_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB3_PAR_RDQUEUE_ERROR_EN V_T7_ARB3_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB2_PAR_RDQUEUE_ERROR_EN 4
+#define V_T7_ARB2_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB2_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB2_PAR_RDQUEUE_ERROR_EN V_T7_ARB2_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB1_PAR_RDQUEUE_ERROR_EN 3
+#define V_T7_ARB1_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB1_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB1_PAR_RDQUEUE_ERROR_EN V_T7_ARB1_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB0_PAR_RDQUEUE_ERROR_EN 2
+#define V_T7_ARB0_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB0_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB0_PAR_RDQUEUE_ERROR_EN V_T7_ARB0_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_TP_DMARBT_PAR_ERROR_EN 1
+#define V_T7_TP_DMARBT_PAR_ERROR_EN(x) ((x) << S_T7_TP_DMARBT_PAR_ERROR_EN)
+#define F_T7_TP_DMARBT_PAR_ERROR_EN V_T7_TP_DMARBT_PAR_ERROR_EN(1U)
+
+#define S_T7_LOGIC_FIFO_PAR_ERROR_EN 0
+#define V_T7_LOGIC_FIFO_PAR_ERROR_EN(x) ((x) << S_T7_LOGIC_FIFO_PAR_ERROR_EN)
+#define F_T7_LOGIC_FIFO_PAR_ERROR_EN V_T7_LOGIC_FIFO_PAR_ERROR_EN(1U)
+
#define A_MA_PARITY_ERROR_STATUS 0x77f4
#define S_TP_DMARBT_PAR_ERROR 31
@@ -16606,6 +21159,55 @@
#define F_CL0_PAR_RDQUEUE_ERROR V_CL0_PAR_RDQUEUE_ERROR(1U)
#define A_MA_PARITY_ERROR_STATUS1 0x77f4
+
+#define S_T7_ARB4_PAR_WRQUEUE_ERROR 11
+#define V_T7_ARB4_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB4_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB4_PAR_WRQUEUE_ERROR V_T7_ARB4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB3_PAR_WRQUEUE_ERROR 10
+#define V_T7_ARB3_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB3_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB3_PAR_WRQUEUE_ERROR V_T7_ARB3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB2_PAR_WRQUEUE_ERROR 9
+#define V_T7_ARB2_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB2_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB2_PAR_WRQUEUE_ERROR V_T7_ARB2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB1_PAR_WRQUEUE_ERROR 8
+#define V_T7_ARB1_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB1_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB1_PAR_WRQUEUE_ERROR V_T7_ARB1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB0_PAR_WRQUEUE_ERROR 7
+#define V_T7_ARB0_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB0_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB0_PAR_WRQUEUE_ERROR V_T7_ARB0_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB4_PAR_RDQUEUE_ERROR 6
+#define V_T7_ARB4_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB4_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB4_PAR_RDQUEUE_ERROR V_T7_ARB4_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB3_PAR_RDQUEUE_ERROR 5
+#define V_T7_ARB3_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB3_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB3_PAR_RDQUEUE_ERROR V_T7_ARB3_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB2_PAR_RDQUEUE_ERROR 4
+#define V_T7_ARB2_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB2_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB2_PAR_RDQUEUE_ERROR V_T7_ARB2_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB1_PAR_RDQUEUE_ERROR 3
+#define V_T7_ARB1_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB1_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB1_PAR_RDQUEUE_ERROR V_T7_ARB1_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB0_PAR_RDQUEUE_ERROR 2
+#define V_T7_ARB0_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB0_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB0_PAR_RDQUEUE_ERROR V_T7_ARB0_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_TP_DMARBT_PAR_ERROR 1
+#define V_T7_TP_DMARBT_PAR_ERROR(x) ((x) << S_T7_TP_DMARBT_PAR_ERROR)
+#define F_T7_TP_DMARBT_PAR_ERROR V_T7_TP_DMARBT_PAR_ERROR(1U)
+
+#define S_T7_LOGIC_FIFO_PAR_ERROR 0
+#define V_T7_LOGIC_FIFO_PAR_ERROR(x) ((x) << S_T7_LOGIC_FIFO_PAR_ERROR)
+#define F_T7_LOGIC_FIFO_PAR_ERROR V_T7_LOGIC_FIFO_PAR_ERROR(1U)
+
#define A_MA_SGE_PCIE_COHERANCY_CTRL 0x77f8
#define S_BONUS_REG 6
@@ -16653,6 +21255,66 @@
#define V_ARB4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR_EN)
#define F_ARB4_PAR_RDQUEUE_ERROR_EN V_ARB4_PAR_RDQUEUE_ERROR_EN(1U)
+#define S_CL14_PAR_WRQUEUE_ERROR_EN 14
+#define V_CL14_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL14_PAR_WRQUEUE_ERROR_EN)
+#define F_CL14_PAR_WRQUEUE_ERROR_EN V_CL14_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL13_PAR_WRQUEUE_ERROR_EN 13
+#define V_CL13_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL13_PAR_WRQUEUE_ERROR_EN)
+#define F_CL13_PAR_WRQUEUE_ERROR_EN V_CL13_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL12_PAR_WRQUEUE_ERROR_EN 12
+#define V_CL12_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL12_PAR_WRQUEUE_ERROR_EN)
+#define F_CL12_PAR_WRQUEUE_ERROR_EN V_CL12_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL11_PAR_WRQUEUE_ERROR_EN 11
+#define V_CL11_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL11_PAR_WRQUEUE_ERROR_EN)
+#define F_CL11_PAR_WRQUEUE_ERROR_EN V_CL11_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL10_PAR_WRQUEUE_ERROR_EN 10
+#define V_T7_CL10_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL10_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL10_PAR_WRQUEUE_ERROR_EN V_T7_CL10_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL9_PAR_WRQUEUE_ERROR_EN 9
+#define V_T7_CL9_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL9_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL9_PAR_WRQUEUE_ERROR_EN V_T7_CL9_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL8_PAR_WRQUEUE_ERROR_EN 8
+#define V_T7_CL8_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL8_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL8_PAR_WRQUEUE_ERROR_EN V_T7_CL8_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL7_PAR_WRQUEUE_ERROR_EN 7
+#define V_T7_CL7_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL7_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL7_PAR_WRQUEUE_ERROR_EN V_T7_CL7_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL6_PAR_WRQUEUE_ERROR_EN 6
+#define V_T7_CL6_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL6_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL6_PAR_WRQUEUE_ERROR_EN V_T7_CL6_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL5_PAR_WRQUEUE_ERROR_EN 5
+#define V_T7_CL5_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL5_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL5_PAR_WRQUEUE_ERROR_EN V_T7_CL5_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL4_PAR_WRQUEUE_ERROR_EN 4
+#define V_T7_CL4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL4_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL4_PAR_WRQUEUE_ERROR_EN V_T7_CL4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL3_PAR_WRQUEUE_ERROR_EN 3
+#define V_T7_CL3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL3_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL3_PAR_WRQUEUE_ERROR_EN V_T7_CL3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL2_PAR_WRQUEUE_ERROR_EN 2
+#define V_T7_CL2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL2_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL2_PAR_WRQUEUE_ERROR_EN V_T7_CL2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL1_PAR_WRQUEUE_ERROR_EN 1
+#define V_T7_CL1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL1_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL1_PAR_WRQUEUE_ERROR_EN V_T7_CL1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL0_PAR_WRQUEUE_ERROR_EN 0
+#define V_T7_CL0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL0_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL0_PAR_WRQUEUE_ERROR_EN V_T7_CL0_PAR_WRQUEUE_ERROR_EN(1U)
+
#define A_MA_PARITY_ERROR_STATUS2 0x7804
#define S_ARB4_PAR_WRQUEUE_ERROR 1
@@ -16663,6 +21325,66 @@
#define V_ARB4_PAR_RDQUEUE_ERROR(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR)
#define F_ARB4_PAR_RDQUEUE_ERROR V_ARB4_PAR_RDQUEUE_ERROR(1U)
+#define S_CL14_PAR_WRQUEUE_ERROR 14
+#define V_CL14_PAR_WRQUEUE_ERROR(x) ((x) << S_CL14_PAR_WRQUEUE_ERROR)
+#define F_CL14_PAR_WRQUEUE_ERROR V_CL14_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL13_PAR_WRQUEUE_ERROR 13
+#define V_CL13_PAR_WRQUEUE_ERROR(x) ((x) << S_CL13_PAR_WRQUEUE_ERROR)
+#define F_CL13_PAR_WRQUEUE_ERROR V_CL13_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL12_PAR_WRQUEUE_ERROR 12
+#define V_CL12_PAR_WRQUEUE_ERROR(x) ((x) << S_CL12_PAR_WRQUEUE_ERROR)
+#define F_CL12_PAR_WRQUEUE_ERROR V_CL12_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL11_PAR_WRQUEUE_ERROR 11
+#define V_CL11_PAR_WRQUEUE_ERROR(x) ((x) << S_CL11_PAR_WRQUEUE_ERROR)
+#define F_CL11_PAR_WRQUEUE_ERROR V_CL11_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL10_PAR_WRQUEUE_ERROR 10
+#define V_T7_CL10_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL10_PAR_WRQUEUE_ERROR)
+#define F_T7_CL10_PAR_WRQUEUE_ERROR V_T7_CL10_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL9_PAR_WRQUEUE_ERROR 9
+#define V_T7_CL9_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL9_PAR_WRQUEUE_ERROR)
+#define F_T7_CL9_PAR_WRQUEUE_ERROR V_T7_CL9_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL8_PAR_WRQUEUE_ERROR 8
+#define V_T7_CL8_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL8_PAR_WRQUEUE_ERROR)
+#define F_T7_CL8_PAR_WRQUEUE_ERROR V_T7_CL8_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL7_PAR_WRQUEUE_ERROR 7
+#define V_T7_CL7_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL7_PAR_WRQUEUE_ERROR)
+#define F_T7_CL7_PAR_WRQUEUE_ERROR V_T7_CL7_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL6_PAR_WRQUEUE_ERROR 6
+#define V_T7_CL6_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL6_PAR_WRQUEUE_ERROR)
+#define F_T7_CL6_PAR_WRQUEUE_ERROR V_T7_CL6_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL5_PAR_WRQUEUE_ERROR 5
+#define V_T7_CL5_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL5_PAR_WRQUEUE_ERROR)
+#define F_T7_CL5_PAR_WRQUEUE_ERROR V_T7_CL5_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL4_PAR_WRQUEUE_ERROR 4
+#define V_T7_CL4_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL4_PAR_WRQUEUE_ERROR)
+#define F_T7_CL4_PAR_WRQUEUE_ERROR V_T7_CL4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL3_PAR_WRQUEUE_ERROR 3
+#define V_T7_CL3_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL3_PAR_WRQUEUE_ERROR)
+#define F_T7_CL3_PAR_WRQUEUE_ERROR V_T7_CL3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL2_PAR_WRQUEUE_ERROR 2
+#define V_T7_CL2_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL2_PAR_WRQUEUE_ERROR)
+#define F_T7_CL2_PAR_WRQUEUE_ERROR V_T7_CL2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL1_PAR_WRQUEUE_ERROR 1
+#define V_T7_CL1_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL1_PAR_WRQUEUE_ERROR)
+#define F_T7_CL1_PAR_WRQUEUE_ERROR V_T7_CL1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL0_PAR_WRQUEUE_ERROR 0
+#define V_T7_CL0_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL0_PAR_WRQUEUE_ERROR)
+#define F_T7_CL0_PAR_WRQUEUE_ERROR V_T7_CL0_PAR_WRQUEUE_ERROR(1U)
+
#define A_MA_EXT_MEMORY1_BAR 0x7808
#define S_EXT_MEM1_BASE 16
@@ -16675,6 +21397,16 @@
#define V_EXT_MEM1_SIZE(x) ((x) << S_EXT_MEM1_SIZE)
#define G_EXT_MEM1_SIZE(x) (((x) >> S_EXT_MEM1_SIZE) & M_EXT_MEM1_SIZE)
+#define S_T7_EXT_MEM1_BASE 16
+#define M_T7_EXT_MEM1_BASE 0xffffU
+#define V_T7_EXT_MEM1_BASE(x) ((x) << S_T7_EXT_MEM1_BASE)
+#define G_T7_EXT_MEM1_BASE(x) (((x) >> S_T7_EXT_MEM1_BASE) & M_T7_EXT_MEM1_BASE)
+
+#define S_T7_EXT_MEM1_SIZE 0
+#define M_T7_EXT_MEM1_SIZE 0xffffU
+#define V_T7_EXT_MEM1_SIZE(x) ((x) << S_T7_EXT_MEM1_SIZE)
+#define G_T7_EXT_MEM1_SIZE(x) (((x) >> S_T7_EXT_MEM1_SIZE) & M_T7_EXT_MEM1_SIZE)
+
#define A_MA_PMTX_THROTTLE 0x780c
#define S_FL_ENABLE 31
@@ -16696,6 +21428,7 @@
#define A_MA_TP_TH1_WRDATA_CNT 0x782c
#define A_MA_LE_WRDATA_CNT 0x7830
#define A_MA_CIM_WRDATA_CNT 0x7834
+#define A_MA_CIM_TH0_WRDATA_CNT 0x7834
#define A_MA_PCIE_WRDATA_CNT 0x7838
#define A_MA_PMTX_WRDATA_CNT 0x783c
#define A_MA_PMRX_WRDATA_CNT 0x7840
@@ -16709,6 +21442,7 @@
#define A_MA_TP_TH1_RDDATA_CNT 0x7860
#define A_MA_LE_RDDATA_CNT 0x7864
#define A_MA_CIM_RDDATA_CNT 0x7868
+#define A_MA_CIM_TH0_RDDATA_CNT 0x7868
#define A_MA_PCIE_RDDATA_CNT 0x786c
#define A_MA_PMTX_RDDATA_CNT 0x7870
#define A_MA_PMRX_RDDATA_CNT 0x7874
@@ -16733,7 +21467,43 @@
#define F_DDR_MODE V_DDR_MODE(1U)
#define A_MA_EDRAM1_WRDATA_CNT1 0x7884
+#define A_MA_PARITY_ERROR_ENABLE3 0x7884
+
+#define S_CL14_PAR_RDQUEUE_ERROR_EN 14
+#define V_CL14_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL14_PAR_RDQUEUE_ERROR_EN)
+#define F_CL14_PAR_RDQUEUE_ERROR_EN V_CL14_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL13_PAR_RDQUEUE_ERROR_EN 13
+#define V_CL13_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL13_PAR_RDQUEUE_ERROR_EN)
+#define F_CL13_PAR_RDQUEUE_ERROR_EN V_CL13_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL12_PAR_RDQUEUE_ERROR_EN 12
+#define V_CL12_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL12_PAR_RDQUEUE_ERROR_EN)
+#define F_CL12_PAR_RDQUEUE_ERROR_EN V_CL12_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL11_PAR_RDQUEUE_ERROR_EN 11
+#define V_CL11_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL11_PAR_RDQUEUE_ERROR_EN)
+#define F_CL11_PAR_RDQUEUE_ERROR_EN V_CL11_PAR_RDQUEUE_ERROR_EN(1U)
+
#define A_MA_EDRAM1_WRDATA_CNT0 0x7888
+#define A_MA_PARITY_ERROR_STATUS3 0x7888
+
+#define S_CL14_PAR_RDQUEUE_ERROR 14
+#define V_CL14_PAR_RDQUEUE_ERROR(x) ((x) << S_CL14_PAR_RDQUEUE_ERROR)
+#define F_CL14_PAR_RDQUEUE_ERROR V_CL14_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL13_PAR_RDQUEUE_ERROR 13
+#define V_CL13_PAR_RDQUEUE_ERROR(x) ((x) << S_CL13_PAR_RDQUEUE_ERROR)
+#define F_CL13_PAR_RDQUEUE_ERROR V_CL13_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL12_PAR_RDQUEUE_ERROR 12
+#define V_CL12_PAR_RDQUEUE_ERROR(x) ((x) << S_CL12_PAR_RDQUEUE_ERROR)
+#define F_CL12_PAR_RDQUEUE_ERROR V_CL12_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL11_PAR_RDQUEUE_ERROR 11
+#define V_CL11_PAR_RDQUEUE_ERROR(x) ((x) << S_CL11_PAR_RDQUEUE_ERROR)
+#define F_CL11_PAR_RDQUEUE_ERROR V_CL11_PAR_RDQUEUE_ERROR(1U)
+
#define A_MA_EXT_MEMORY0_WRDATA_CNT1 0x788c
#define A_MA_EXT_MEMORY0_WRDATA_CNT0 0x7890
#define A_MA_HOST_MEMORY_WRDATA_CNT1 0x7894
@@ -16915,6 +21685,30 @@
#define V_FUTURE_DEXPANSION_WTE(x) ((x) << S_FUTURE_DEXPANSION_WTE)
#define G_FUTURE_DEXPANSION_WTE(x) (((x) >> S_FUTURE_DEXPANSION_WTE) & M_FUTURE_DEXPANSION_WTE)
+#define S_T7_FUTURE_CEXPANSION_WTE 31
+#define V_T7_FUTURE_CEXPANSION_WTE(x) ((x) << S_T7_FUTURE_CEXPANSION_WTE)
+#define F_T7_FUTURE_CEXPANSION_WTE V_T7_FUTURE_CEXPANSION_WTE(1U)
+
+#define S_CL14_WR_CMD_TO_EN 30
+#define V_CL14_WR_CMD_TO_EN(x) ((x) << S_CL14_WR_CMD_TO_EN)
+#define F_CL14_WR_CMD_TO_EN V_CL14_WR_CMD_TO_EN(1U)
+
+#define S_CL13_WR_CMD_TO_EN 29
+#define V_CL13_WR_CMD_TO_EN(x) ((x) << S_CL13_WR_CMD_TO_EN)
+#define F_CL13_WR_CMD_TO_EN V_CL13_WR_CMD_TO_EN(1U)
+
+#define S_T7_FUTURE_DEXPANSION_WTE 15
+#define V_T7_FUTURE_DEXPANSION_WTE(x) ((x) << S_T7_FUTURE_DEXPANSION_WTE)
+#define F_T7_FUTURE_DEXPANSION_WTE V_T7_FUTURE_DEXPANSION_WTE(1U)
+
+#define S_CL14_WR_DATA_TO_EN 14
+#define V_CL14_WR_DATA_TO_EN(x) ((x) << S_CL14_WR_DATA_TO_EN)
+#define F_CL14_WR_DATA_TO_EN V_CL14_WR_DATA_TO_EN(1U)
+
+#define S_CL13_WR_DATA_TO_EN 13
+#define V_CL13_WR_DATA_TO_EN(x) ((x) << S_CL13_WR_DATA_TO_EN)
+#define F_CL13_WR_DATA_TO_EN V_CL13_WR_DATA_TO_EN(1U)
+
#define A_MA_WRITE_TIMEOUT_ERROR_STATUS 0x78d8
#define S_CL12_WR_CMD_TO_ERROR 28
@@ -17031,6 +21825,30 @@
#define V_FUTURE_DEXPANSION_WTS(x) ((x) << S_FUTURE_DEXPANSION_WTS)
#define G_FUTURE_DEXPANSION_WTS(x) (((x) >> S_FUTURE_DEXPANSION_WTS) & M_FUTURE_DEXPANSION_WTS)
+#define S_T7_FUTURE_CEXPANSION_WTS 31
+#define V_T7_FUTURE_CEXPANSION_WTS(x) ((x) << S_T7_FUTURE_CEXPANSION_WTS)
+#define F_T7_FUTURE_CEXPANSION_WTS V_T7_FUTURE_CEXPANSION_WTS(1U)
+
+#define S_CL14_WR_CMD_TO_ERROR 30
+#define V_CL14_WR_CMD_TO_ERROR(x) ((x) << S_CL14_WR_CMD_TO_ERROR)
+#define F_CL14_WR_CMD_TO_ERROR V_CL14_WR_CMD_TO_ERROR(1U)
+
+#define S_CL13_WR_CMD_TO_ERROR 29
+#define V_CL13_WR_CMD_TO_ERROR(x) ((x) << S_CL13_WR_CMD_TO_ERROR)
+#define F_CL13_WR_CMD_TO_ERROR V_CL13_WR_CMD_TO_ERROR(1U)
+
+#define S_T7_FUTURE_DEXPANSION_WTS 15
+#define V_T7_FUTURE_DEXPANSION_WTS(x) ((x) << S_T7_FUTURE_DEXPANSION_WTS)
+#define F_T7_FUTURE_DEXPANSION_WTS V_T7_FUTURE_DEXPANSION_WTS(1U)
+
+#define S_CL14_WR_DATA_TO_ERROR 14
+#define V_CL14_WR_DATA_TO_ERROR(x) ((x) << S_CL14_WR_DATA_TO_ERROR)
+#define F_CL14_WR_DATA_TO_ERROR V_CL14_WR_DATA_TO_ERROR(1U)
+
+#define S_CL13_WR_DATA_TO_ERROR 13
+#define V_CL13_WR_DATA_TO_ERROR(x) ((x) << S_CL13_WR_DATA_TO_ERROR)
+#define F_CL13_WR_DATA_TO_ERROR V_CL13_WR_DATA_TO_ERROR(1U)
+
#define A_MA_READ_TIMEOUT_ERROR_ENABLE 0x78dc
#define S_CL12_RD_CMD_TO_EN 28
@@ -17147,6 +21965,30 @@
#define V_FUTURE_DEXPANSION_RTE(x) ((x) << S_FUTURE_DEXPANSION_RTE)
#define G_FUTURE_DEXPANSION_RTE(x) (((x) >> S_FUTURE_DEXPANSION_RTE) & M_FUTURE_DEXPANSION_RTE)
+#define S_T7_FUTURE_CEXPANSION_RTE 31
+#define V_T7_FUTURE_CEXPANSION_RTE(x) ((x) << S_T7_FUTURE_CEXPANSION_RTE)
+#define F_T7_FUTURE_CEXPANSION_RTE V_T7_FUTURE_CEXPANSION_RTE(1U)
+
+#define S_CL14_RD_CMD_TO_EN 30
+#define V_CL14_RD_CMD_TO_EN(x) ((x) << S_CL14_RD_CMD_TO_EN)
+#define F_CL14_RD_CMD_TO_EN V_CL14_RD_CMD_TO_EN(1U)
+
+#define S_CL13_RD_CMD_TO_EN 29
+#define V_CL13_RD_CMD_TO_EN(x) ((x) << S_CL13_RD_CMD_TO_EN)
+#define F_CL13_RD_CMD_TO_EN V_CL13_RD_CMD_TO_EN(1U)
+
+#define S_T7_FUTURE_DEXPANSION_RTE 15
+#define V_T7_FUTURE_DEXPANSION_RTE(x) ((x) << S_T7_FUTURE_DEXPANSION_RTE)
+#define F_T7_FUTURE_DEXPANSION_RTE V_T7_FUTURE_DEXPANSION_RTE(1U)
+
+#define S_CL14_RD_DATA_TO_EN 14
+#define V_CL14_RD_DATA_TO_EN(x) ((x) << S_CL14_RD_DATA_TO_EN)
+#define F_CL14_RD_DATA_TO_EN V_CL14_RD_DATA_TO_EN(1U)
+
+#define S_CL13_RD_DATA_TO_EN 13
+#define V_CL13_RD_DATA_TO_EN(x) ((x) << S_CL13_RD_DATA_TO_EN)
+#define F_CL13_RD_DATA_TO_EN V_CL13_RD_DATA_TO_EN(1U)
+
#define A_MA_READ_TIMEOUT_ERROR_STATUS 0x78e0
#define S_CL12_RD_CMD_TO_ERROR 28
@@ -17263,6 +22105,27 @@
#define V_FUTURE_DEXPANSION_RTS(x) ((x) << S_FUTURE_DEXPANSION_RTS)
#define G_FUTURE_DEXPANSION_RTS(x) (((x) >> S_FUTURE_DEXPANSION_RTS) & M_FUTURE_DEXPANSION_RTS)
+#define S_T7_FUTURE_CEXPANSION_RTS 31
+#define V_T7_FUTURE_CEXPANSION_RTS(x) ((x) << S_T7_FUTURE_CEXPANSION_RTS)
+#define F_T7_FUTURE_CEXPANSION_RTS V_T7_FUTURE_CEXPANSION_RTS(1U)
+
+#define S_CL14_RD_CMD_TO_ERROR 30
+#define V_CL14_RD_CMD_TO_ERROR(x) ((x) << S_CL14_RD_CMD_TO_ERROR)
+#define F_CL14_RD_CMD_TO_ERROR V_CL14_RD_CMD_TO_ERROR(1U)
+
+#define S_CL13_RD_CMD_TO_ERROR 29
+#define V_CL13_RD_CMD_TO_ERROR(x) ((x) << S_CL13_RD_CMD_TO_ERROR)
+#define F_CL13_RD_CMD_TO_ERROR V_CL13_RD_CMD_TO_ERROR(1U)
+
+#define S_T7_FUTURE_DEXPANSION_RTS 14
+#define M_T7_FUTURE_DEXPANSION_RTS 0x3U
+#define V_T7_FUTURE_DEXPANSION_RTS(x) ((x) << S_T7_FUTURE_DEXPANSION_RTS)
+#define G_T7_FUTURE_DEXPANSION_RTS(x) (((x) >> S_T7_FUTURE_DEXPANSION_RTS) & M_T7_FUTURE_DEXPANSION_RTS)
+
+#define S_CL13_RD_DATA_TO_ERROR 13
+#define V_CL13_RD_DATA_TO_ERROR(x) ((x) << S_CL13_RD_DATA_TO_ERROR)
+#define F_CL13_RD_DATA_TO_ERROR V_CL13_RD_DATA_TO_ERROR(1U)
+
#define A_MA_BKP_CNT_SEL 0x78e4
#define S_BKP_CNT_TYPE 30
@@ -17361,12 +22224,16 @@
#define V_FUTURE_DEXPANSION_IPE(x) ((x) << S_FUTURE_DEXPANSION_IPE)
#define G_FUTURE_DEXPANSION_IPE(x) (((x) >> S_FUTURE_DEXPANSION_IPE) & M_FUTURE_DEXPANSION_IPE)
-#define A_MA_IF_PARITY_ERROR_STATUS 0x78f4
+#define S_T7_FUTURE_DEXPANSION_IPE 14
+#define M_T7_FUTURE_DEXPANSION_IPE 0x3ffffU
+#define V_T7_FUTURE_DEXPANSION_IPE(x) ((x) << S_T7_FUTURE_DEXPANSION_IPE)
+#define G_T7_FUTURE_DEXPANSION_IPE(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPE) & M_T7_FUTURE_DEXPANSION_IPE)
-#define S_T5_FUTURE_DEXPANSION 13
-#define M_T5_FUTURE_DEXPANSION 0x7ffffU
-#define V_T5_FUTURE_DEXPANSION(x) ((x) << S_T5_FUTURE_DEXPANSION)
-#define G_T5_FUTURE_DEXPANSION(x) (((x) >> S_T5_FUTURE_DEXPANSION) & M_T5_FUTURE_DEXPANSION)
+#define S_CL13_IF_PAR_EN 13
+#define V_CL13_IF_PAR_EN(x) ((x) << S_CL13_IF_PAR_EN)
+#define F_CL13_IF_PAR_EN V_CL13_IF_PAR_EN(1U)
+
+#define A_MA_IF_PARITY_ERROR_STATUS 0x78f4
#define S_CL12_IF_PAR_ERROR 12
#define V_CL12_IF_PAR_ERROR(x) ((x) << S_CL12_IF_PAR_ERROR)
@@ -17425,6 +22292,15 @@
#define V_FUTURE_DEXPANSION_IPS(x) ((x) << S_FUTURE_DEXPANSION_IPS)
#define G_FUTURE_DEXPANSION_IPS(x) (((x) >> S_FUTURE_DEXPANSION_IPS) & M_FUTURE_DEXPANSION_IPS)
+#define S_T7_FUTURE_DEXPANSION_IPS 14
+#define M_T7_FUTURE_DEXPANSION_IPS 0x3ffffU
+#define V_T7_FUTURE_DEXPANSION_IPS(x) ((x) << S_T7_FUTURE_DEXPANSION_IPS)
+#define G_T7_FUTURE_DEXPANSION_IPS(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPS) & M_T7_FUTURE_DEXPANSION_IPS)
+
+#define S_CL13_IF_PAR_ERROR 13
+#define V_CL13_IF_PAR_ERROR(x) ((x) << S_CL13_IF_PAR_ERROR)
+#define F_CL13_IF_PAR_ERROR V_CL13_IF_PAR_ERROR(1U)
+
#define A_MA_LOCAL_DEBUG_CFG 0x78f8
#define S_DEBUG_OR 15
@@ -17445,6 +22321,131 @@
#define G_DEBUGPAGE(x) (((x) >> S_DEBUGPAGE) & M_DEBUGPAGE)
#define A_MA_LOCAL_DEBUG_RPT 0x78fc
+#define A_MA_CLIENT13_PR_THRESHOLD 0x7900
+#define A_MA_CLIENT13_CR_THRESHOLD 0x7904
+#define A_MA_CRYPTO_DEBUG_CNT 0x7908
+#define A_MA_CRYPTO_WRDATA_CNT 0x790c
+#define A_MA_CRYPTO_RDDATA_CNT 0x7910
+#define A_MA_LOCAL_DEBUG_PERF_CFG 0x7914
+#define A_MA_LOCAL_DEBUG_PERF_RPT 0x7918
+#define A_MA_PCIE_THROTTLE 0x791c
+#define A_MA_CLIENT14_PR_THRESHOLD 0x7920
+#define A_MA_CLIENT14_CR_THRESHOLD 0x7924
+#define A_MA_CIM_TH1_DEBUG_CNT 0x7928
+#define A_MA_CIM_TH1_WRDATA_CNT 0x792c
+#define A_MA_CIM_TH1_RDDATA_CNT 0x7930
+#define A_MA_CIM_THREAD1_MAPPER 0x7934
+
+#define S_CIM_THREAD1_EN 0
+#define M_CIM_THREAD1_EN 0xffU
+#define V_CIM_THREAD1_EN(x) ((x) << S_CIM_THREAD1_EN)
+#define G_CIM_THREAD1_EN(x) (((x) >> S_CIM_THREAD1_EN) & M_CIM_THREAD1_EN)
+
+#define A_MA_PIO_CI_SGE_TH0_BASE 0x7938
+
+#define S_SGE_TH0_BASE 0
+#define M_SGE_TH0_BASE 0xffffU
+#define V_SGE_TH0_BASE(x) ((x) << S_SGE_TH0_BASE)
+#define G_SGE_TH0_BASE(x) (((x) >> S_SGE_TH0_BASE) & M_SGE_TH0_BASE)
+
+#define A_MA_PIO_CI_SGE_TH1_BASE 0x793c
+
+#define S_SGE_TH1_BASE 0
+#define M_SGE_TH1_BASE 0xffffU
+#define V_SGE_TH1_BASE(x) ((x) << S_SGE_TH1_BASE)
+#define G_SGE_TH1_BASE(x) (((x) >> S_SGE_TH1_BASE) & M_SGE_TH1_BASE)
+
+#define A_MA_PIO_CI_ULPTX_BASE 0x7940
+
+#define S_ULPTX_BASE 0
+#define M_ULPTX_BASE 0xffffU
+#define V_ULPTX_BASE(x) ((x) << S_ULPTX_BASE)
+#define G_ULPTX_BASE(x) (((x) >> S_ULPTX_BASE) & M_ULPTX_BASE)
+
+#define A_MA_PIO_CI_ULPRX_BASE 0x7944
+
+#define S_ULPRX_BASE 0
+#define M_ULPRX_BASE 0xffffU
+#define V_ULPRX_BASE(x) ((x) << S_ULPRX_BASE)
+#define G_ULPRX_BASE(x) (((x) >> S_ULPRX_BASE) & M_ULPRX_BASE)
+
+#define A_MA_PIO_CI_ULPTXRX_BASE 0x7948
+
+#define S_ULPTXRX_BASE 0
+#define M_ULPTXRX_BASE 0xffffU
+#define V_ULPTXRX_BASE(x) ((x) << S_ULPTXRX_BASE)
+#define G_ULPTXRX_BASE(x) (((x) >> S_ULPTXRX_BASE) & M_ULPTXRX_BASE)
+
+#define A_MA_PIO_CI_TP_TH0_BASE 0x794c
+
+#define S_TP_TH0_BASE 0
+#define M_TP_TH0_BASE 0xffffU
+#define V_TP_TH0_BASE(x) ((x) << S_TP_TH0_BASE)
+#define G_TP_TH0_BASE(x) (((x) >> S_TP_TH0_BASE) & M_TP_TH0_BASE)
+
+#define A_MA_PIO_CI_TP_TH1_BASE 0x7950
+
+#define S_TP_TH1_BASE 0
+#define M_TP_TH1_BASE 0xffffU
+#define V_TP_TH1_BASE(x) ((x) << S_TP_TH1_BASE)
+#define G_TP_TH1_BASE(x) (((x) >> S_TP_TH1_BASE) & M_TP_TH1_BASE)
+
+#define A_MA_PIO_CI_LE_BASE 0x7954
+
+#define S_LE_BASE 0
+#define M_LE_BASE 0xffffU
+#define V_LE_BASE(x) ((x) << S_LE_BASE)
+#define G_LE_BASE(x) (((x) >> S_LE_BASE) & M_LE_BASE)
+
+#define A_MA_PIO_CI_CIM_TH0_BASE 0x7958
+
+#define S_CIM_TH0_BASE 0
+#define M_CIM_TH0_BASE 0xffffU
+#define V_CIM_TH0_BASE(x) ((x) << S_CIM_TH0_BASE)
+#define G_CIM_TH0_BASE(x) (((x) >> S_CIM_TH0_BASE) & M_CIM_TH0_BASE)
+
+#define A_MA_PIO_CI_PCIE_BASE 0x795c
+
+#define S_PCIE_BASE 0
+#define M_PCIE_BASE 0xffffU
+#define V_PCIE_BASE(x) ((x) << S_PCIE_BASE)
+#define G_PCIE_BASE(x) (((x) >> S_PCIE_BASE) & M_PCIE_BASE)
+
+#define A_MA_PIO_CI_PMTX_BASE 0x7960
+
+#define S_PMTX_BASE 0
+#define M_PMTX_BASE 0xffffU
+#define V_PMTX_BASE(x) ((x) << S_PMTX_BASE)
+#define G_PMTX_BASE(x) (((x) >> S_PMTX_BASE) & M_PMTX_BASE)
+
+#define A_MA_PIO_CI_PMRX_BASE 0x7964
+
+#define S_PMRX_BASE 0
+#define M_PMRX_BASE 0xffffU
+#define V_PMRX_BASE(x) ((x) << S_PMRX_BASE)
+#define G_PMRX_BASE(x) (((x) >> S_PMRX_BASE) & M_PMRX_BASE)
+
+#define A_MA_PIO_CI_HMA_BASE 0x7968
+
+#define S_HMACLIENTBASE 0
+#define M_HMACLIENTBASE 0xffffU
+#define V_HMACLIENTBASE(x) ((x) << S_HMACLIENTBASE)
+#define G_HMACLIENTBASE(x) (((x) >> S_HMACLIENTBASE) & M_HMACLIENTBASE)
+
+#define A_MA_PIO_CI_CRYPTO_BASE 0x796c
+
+#define S_CRYPTO_BASE 0
+#define M_CRYPTO_BASE 0xffffU
+#define V_CRYPTO_BASE(x) ((x) << S_CRYPTO_BASE)
+#define G_CRYPTO_BASE(x) (((x) >> S_CRYPTO_BASE) & M_CRYPTO_BASE)
+
+#define A_MA_PIO_CI_CIM_TH1_BASE 0x7970
+
+#define S_CIM_TH1_BASE 0
+#define M_CIM_TH1_BASE 0xffffU
+#define V_CIM_TH1_BASE(x) ((x) << S_CIM_TH1_BASE)
+#define G_CIM_TH1_BASE(x) (((x) >> S_CIM_TH1_BASE) & M_CIM_TH1_BASE)
+
#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_EXTERNAL 0xa000
#define S_CMDVLD0 31
@@ -20418,6 +25419,124 @@
#define V_FLASHADDRSIZE(x) ((x) << S_FLASHADDRSIZE)
#define G_FLASHADDRSIZE(x) (((x) >> S_FLASHADDRSIZE) & M_FLASHADDRSIZE)
+#define A_T7_CIM_PERR_ENABLE 0x7b08
+
+#define S_T7_MA_CIM_INTFPERR 31
+#define V_T7_MA_CIM_INTFPERR(x) ((x) << S_T7_MA_CIM_INTFPERR)
+#define F_T7_MA_CIM_INTFPERR V_T7_MA_CIM_INTFPERR(1U)
+
+#define S_T7_MBHOSTPARERR 30
+#define V_T7_MBHOSTPARERR(x) ((x) << S_T7_MBHOSTPARERR)
+#define F_T7_MBHOSTPARERR V_T7_MBHOSTPARERR(1U)
+
+#define S_MAARBINVRSPTAG 29
+#define V_MAARBINVRSPTAG(x) ((x) << S_MAARBINVRSPTAG)
+#define F_MAARBINVRSPTAG V_MAARBINVRSPTAG(1U)
+
+#define S_MAARBFIFOPARERR 28
+#define V_MAARBFIFOPARERR(x) ((x) << S_MAARBFIFOPARERR)
+#define F_MAARBFIFOPARERR V_MAARBFIFOPARERR(1U)
+
+#define S_SEMSRAMPARERR 27
+#define V_SEMSRAMPARERR(x) ((x) << S_SEMSRAMPARERR)
+#define F_SEMSRAMPARERR V_SEMSRAMPARERR(1U)
+
+#define S_RSACPARERR 26
+#define V_RSACPARERR(x) ((x) << S_RSACPARERR)
+#define F_RSACPARERR V_RSACPARERR(1U)
+
+#define S_RSADPARERR 25
+#define V_RSADPARERR(x) ((x) << S_RSADPARERR)
+#define F_RSADPARERR V_RSADPARERR(1U)
+
+#define S_T7_PLCIM_MSTRSPDATAPARERR 24
+#define V_T7_PLCIM_MSTRSPDATAPARERR(x) ((x) << S_T7_PLCIM_MSTRSPDATAPARERR)
+#define F_T7_PLCIM_MSTRSPDATAPARERR V_T7_PLCIM_MSTRSPDATAPARERR(1U)
+
+#define S_T7_PCIE2CIMINTFPARERR 23
+#define V_T7_PCIE2CIMINTFPARERR(x) ((x) << S_T7_PCIE2CIMINTFPARERR)
+#define F_T7_PCIE2CIMINTFPARERR V_T7_PCIE2CIMINTFPARERR(1U)
+
+#define S_T7_NCSI2CIMINTFPARERR 22
+#define V_T7_NCSI2CIMINTFPARERR(x) ((x) << S_T7_NCSI2CIMINTFPARERR)
+#define F_T7_NCSI2CIMINTFPARERR V_T7_NCSI2CIMINTFPARERR(1U)
+
+#define S_T7_SGE2CIMINTFPARERR 21
+#define V_T7_SGE2CIMINTFPARERR(x) ((x) << S_T7_SGE2CIMINTFPARERR)
+#define F_T7_SGE2CIMINTFPARERR V_T7_SGE2CIMINTFPARERR(1U)
+
+#define S_T7_ULP2CIMINTFPARERR 20
+#define V_T7_ULP2CIMINTFPARERR(x) ((x) << S_T7_ULP2CIMINTFPARERR)
+#define F_T7_ULP2CIMINTFPARERR V_T7_ULP2CIMINTFPARERR(1U)
+
+#define S_T7_TP2CIMINTFPARERR 19
+#define V_T7_TP2CIMINTFPARERR(x) ((x) << S_T7_TP2CIMINTFPARERR)
+#define F_T7_TP2CIMINTFPARERR V_T7_TP2CIMINTFPARERR(1U)
+
+#define S_CORE7PARERR 18
+#define V_CORE7PARERR(x) ((x) << S_CORE7PARERR)
+#define F_CORE7PARERR V_CORE7PARERR(1U)
+
+#define S_CORE6PARERR 17
+#define V_CORE6PARERR(x) ((x) << S_CORE6PARERR)
+#define F_CORE6PARERR V_CORE6PARERR(1U)
+
+#define S_CORE5PARERR 16
+#define V_CORE5PARERR(x) ((x) << S_CORE5PARERR)
+#define F_CORE5PARERR V_CORE5PARERR(1U)
+
+#define S_CORE4PARERR 15
+#define V_CORE4PARERR(x) ((x) << S_CORE4PARERR)
+#define F_CORE4PARERR V_CORE4PARERR(1U)
+
+#define S_CORE3PARERR 14
+#define V_CORE3PARERR(x) ((x) << S_CORE3PARERR)
+#define F_CORE3PARERR V_CORE3PARERR(1U)
+
+#define S_CORE2PARERR 13
+#define V_CORE2PARERR(x) ((x) << S_CORE2PARERR)
+#define F_CORE2PARERR V_CORE2PARERR(1U)
+
+#define S_CORE1PARERR 12
+#define V_CORE1PARERR(x) ((x) << S_CORE1PARERR)
+#define F_CORE1PARERR V_CORE1PARERR(1U)
+
+#define S_GFTPARERR 10
+#define V_GFTPARERR(x) ((x) << S_GFTPARERR)
+#define F_GFTPARERR V_GFTPARERR(1U)
+
+#define S_MPSRSPDATAPARERR 9
+#define V_MPSRSPDATAPARERR(x) ((x) << S_MPSRSPDATAPARERR)
+#define F_MPSRSPDATAPARERR V_MPSRSPDATAPARERR(1U)
+
+#define S_ER_RSPDATAPARERR 8
+#define V_ER_RSPDATAPARERR(x) ((x) << S_ER_RSPDATAPARERR)
+#define F_ER_RSPDATAPARERR V_ER_RSPDATAPARERR(1U)
+
+#define S_FLOWFIFOPARERR 7
+#define V_FLOWFIFOPARERR(x) ((x) << S_FLOWFIFOPARERR)
+#define F_FLOWFIFOPARERR V_FLOWFIFOPARERR(1U)
+
+#define S_OBQSRAMPARERR 6
+#define V_OBQSRAMPARERR(x) ((x) << S_OBQSRAMPARERR)
+#define F_OBQSRAMPARERR V_OBQSRAMPARERR(1U)
+
+#define S_TIEQOUTPARERR 3
+#define V_TIEQOUTPARERR(x) ((x) << S_TIEQOUTPARERR)
+#define F_TIEQOUTPARERR V_TIEQOUTPARERR(1U)
+
+#define S_TIEQINPARERR 2
+#define V_TIEQINPARERR(x) ((x) << S_TIEQINPARERR)
+#define F_TIEQINPARERR V_TIEQINPARERR(1U)
+
+#define S_PIFRSPPARERR 1
+#define V_PIFRSPPARERR(x) ((x) << S_PIFRSPPARERR)
+#define F_PIFRSPPARERR V_PIFRSPPARERR(1U)
+
+#define S_PIFREQPARERR 0
+#define V_PIFREQPARERR(x) ((x) << S_PIFREQPARERR)
+#define F_PIFREQPARERR V_PIFREQPARERR(1U)
+
#define A_CIM_EEPROM_BASE_ADDR 0x7b0c
#define S_EEPROMBASEADDR 6
@@ -20425,6 +25544,7 @@
#define V_EEPROMBASEADDR(x) ((x) << S_EEPROMBASEADDR)
#define G_EEPROMBASEADDR(x) (((x) >> S_EEPROMBASEADDR) & M_EEPROMBASEADDR)
+#define A_CIM_PERR_CAUSE 0x7b0c
#define A_CIM_EEPROM_ADDR_SIZE 0x7b10
#define S_EEPROMADDRSIZE 4
@@ -20593,6 +25713,38 @@
#define V_IBQPCIEPARERR(x) ((x) << S_IBQPCIEPARERR)
#define F_IBQPCIEPARERR V_IBQPCIEPARERR(1U)
+#define S_CORE7ACCINT 22
+#define V_CORE7ACCINT(x) ((x) << S_CORE7ACCINT)
+#define F_CORE7ACCINT V_CORE7ACCINT(1U)
+
+#define S_CORE6ACCINT 21
+#define V_CORE6ACCINT(x) ((x) << S_CORE6ACCINT)
+#define F_CORE6ACCINT V_CORE6ACCINT(1U)
+
+#define S_CORE5ACCINT 20
+#define V_CORE5ACCINT(x) ((x) << S_CORE5ACCINT)
+#define F_CORE5ACCINT V_CORE5ACCINT(1U)
+
+#define S_CORE4ACCINT 19
+#define V_CORE4ACCINT(x) ((x) << S_CORE4ACCINT)
+#define F_CORE4ACCINT V_CORE4ACCINT(1U)
+
+#define S_CORE3ACCINT 18
+#define V_CORE3ACCINT(x) ((x) << S_CORE3ACCINT)
+#define F_CORE3ACCINT V_CORE3ACCINT(1U)
+
+#define S_CORE2ACCINT 17
+#define V_CORE2ACCINT(x) ((x) << S_CORE2ACCINT)
+#define F_CORE2ACCINT V_CORE2ACCINT(1U)
+
+#define S_CORE1ACCINT 16
+#define V_CORE1ACCINT(x) ((x) << S_CORE1ACCINT)
+#define F_CORE1ACCINT V_CORE1ACCINT(1U)
+
+#define S_PERRNONZERO 1
+#define V_PERRNONZERO(x) ((x) << S_PERRNONZERO)
+#define F_PERRNONZERO V_PERRNONZERO(1U)
+
#define A_CIM_HOST_INT_CAUSE 0x7b2c
#define S_TIEQOUTPARERRINT 20
@@ -20745,6 +25897,10 @@
#define V_RSVDSPACEINTEN(x) ((x) << S_RSVDSPACEINTEN)
#define F_RSVDSPACEINTEN V_RSVDSPACEINTEN(1U)
+#define S_CONWRERRINTEN 31
+#define V_CONWRERRINTEN(x) ((x) << S_CONWRERRINTEN)
+#define F_CONWRERRINTEN V_CONWRERRINTEN(1U)
+
#define A_CIM_HOST_UPACC_INT_CAUSE 0x7b34
#define S_EEPROMWRINT 30
@@ -20871,12 +26027,32 @@
#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
+#define S_CONWRERRINT 31
+#define V_CONWRERRINT(x) ((x) << S_CONWRERRINT)
+#define F_CONWRERRINT V_CONWRERRINT(1U)
+
#define A_CIM_UP_INT_ENABLE 0x7b38
#define S_MSTPLINTEN 4
#define V_MSTPLINTEN(x) ((x) << S_MSTPLINTEN)
#define F_MSTPLINTEN V_MSTPLINTEN(1U)
+#define S_SEMINT 8
+#define V_SEMINT(x) ((x) << S_SEMINT)
+#define F_SEMINT V_SEMINT(1U)
+
+#define S_RSAINT 7
+#define V_RSAINT(x) ((x) << S_RSAINT)
+#define F_RSAINT V_RSAINT(1U)
+
+#define S_TRNGINT 6
+#define V_TRNGINT(x) ((x) << S_TRNGINT)
+#define F_TRNGINT V_TRNGINT(1U)
+
+#define S_PEERHALTINT 5
+#define V_PEERHALTINT(x) ((x) << S_PEERHALTINT)
+#define F_PEERHALTINT V_PEERHALTINT(1U)
+
#define A_CIM_UP_INT_CAUSE 0x7b3c
#define S_MSTPLINT 4
@@ -20900,6 +26076,33 @@
#define V_QUENUMSELECT(x) ((x) << S_QUENUMSELECT)
#define G_QUENUMSELECT(x) (((x) >> S_QUENUMSELECT) & M_QUENUMSELECT)
+#define S_MAPOFFSET 11
+#define M_MAPOFFSET 0x1fU
+#define V_MAPOFFSET(x) ((x) << S_MAPOFFSET)
+#define G_MAPOFFSET(x) (((x) >> S_MAPOFFSET) & M_MAPOFFSET)
+
+#define S_MAPSELECT 10
+#define V_MAPSELECT(x) ((x) << S_MAPSELECT)
+#define F_MAPSELECT V_MAPSELECT(1U)
+
+#define S_CORESELECT 6
+#define M_CORESELECT 0xfU
+#define V_CORESELECT(x) ((x) << S_CORESELECT)
+#define G_CORESELECT(x) (((x) >> S_CORESELECT) & M_CORESELECT)
+
+#define S_T7_OBQSELECT 5
+#define V_T7_OBQSELECT(x) ((x) << S_T7_OBQSELECT)
+#define F_T7_OBQSELECT V_T7_OBQSELECT(1U)
+
+#define S_T7_IBQSELECT 4
+#define V_T7_IBQSELECT(x) ((x) << S_T7_IBQSELECT)
+#define F_T7_IBQSELECT V_T7_IBQSELECT(1U)
+
+#define S_T7_QUENUMSELECT 0
+#define M_T7_QUENUMSELECT 0xfU
+#define V_T7_QUENUMSELECT(x) ((x) << S_T7_QUENUMSELECT)
+#define G_T7_QUENUMSELECT(x) (((x) >> S_T7_QUENUMSELECT) & M_T7_QUENUMSELECT)
+
#define A_CIM_QUEUE_CONFIG_CTRL 0x7b4c
#define S_CIMQSIZE 24
@@ -20940,6 +26143,29 @@
#define V_HOSTADDR(x) ((x) << S_HOSTADDR)
#define G_HOSTADDR(x) (((x) >> S_HOSTADDR) & M_HOSTADDR)
+#define S_T7_HOSTBUSY 31
+#define V_T7_HOSTBUSY(x) ((x) << S_T7_HOSTBUSY)
+#define F_T7_HOSTBUSY V_T7_HOSTBUSY(1U)
+
+#define S_T7_HOSTWRITE 30
+#define V_T7_HOSTWRITE(x) ((x) << S_T7_HOSTWRITE)
+#define F_T7_HOSTWRITE V_T7_HOSTWRITE(1U)
+
+#define S_HOSTGRPSEL 28
+#define M_HOSTGRPSEL 0x3U
+#define V_HOSTGRPSEL(x) ((x) << S_HOSTGRPSEL)
+#define G_HOSTGRPSEL(x) (((x) >> S_HOSTGRPSEL) & M_HOSTGRPSEL)
+
+#define S_HOSTCORESEL 24
+#define M_HOSTCORESEL 0xfU
+#define V_HOSTCORESEL(x) ((x) << S_HOSTCORESEL)
+#define G_HOSTCORESEL(x) (((x) >> S_HOSTCORESEL) & M_HOSTCORESEL)
+
+#define S_T7_HOSTADDR 0
+#define M_T7_HOSTADDR 0xffffffU
+#define V_T7_HOSTADDR(x) ((x) << S_T7_HOSTADDR)
+#define G_T7_HOSTADDR(x) (((x) >> S_T7_HOSTADDR) & M_T7_HOSTADDR)
+
#define A_CIM_HOST_ACC_DATA 0x7b54
#define A_CIM_CDEBUGDATA 0x7b58
@@ -20953,6 +26179,31 @@
#define V_CDEBUGDATAL(x) ((x) << S_CDEBUGDATAL)
#define G_CDEBUGDATAL(x) (((x) >> S_CDEBUGDATAL) & M_CDEBUGDATAL)
+#define A_CIM_DEBUG_CFG 0x7b58
+
+#define S_OR_EN 20
+#define V_OR_EN(x) ((x) << S_OR_EN)
+#define F_OR_EN V_OR_EN(1U)
+
+#define S_USEL 19
+#define V_USEL(x) ((x) << S_USEL)
+#define F_USEL V_USEL(1U)
+
+#define S_HI 18
+#define V_HI(x) ((x) << S_HI)
+#define F_HI V_HI(1U)
+
+#define S_SELH 9
+#define M_SELH 0x1ffU
+#define V_SELH(x) ((x) << S_SELH)
+#define G_SELH(x) (((x) >> S_SELH) & M_SELH)
+
+#define S_SELL 0
+#define M_SELL 0x1ffU
+#define V_SELL(x) ((x) << S_SELL)
+#define G_SELL(x) (((x) >> S_SELL) & M_SELL)
+
+#define A_CIM_DEBUG_DATA 0x7b5c
#define A_CIM_IBQ_DBG_CFG 0x7b60
#define S_IBQDBGADDR 16
@@ -20972,6 +26223,25 @@
#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
#define F_IBQDBGEN V_IBQDBGEN(1U)
+#define S_IBQDBGCORE 28
+#define M_IBQDBGCORE 0xfU
+#define V_IBQDBGCORE(x) ((x) << S_IBQDBGCORE)
+#define G_IBQDBGCORE(x) (((x) >> S_IBQDBGCORE) & M_IBQDBGCORE)
+
+#define S_T7_IBQDBGADDR 12
+#define M_T7_IBQDBGADDR 0x1fffU
+#define V_T7_IBQDBGADDR(x) ((x) << S_T7_IBQDBGADDR)
+#define G_T7_IBQDBGADDR(x) (((x) >> S_T7_IBQDBGADDR) & M_T7_IBQDBGADDR)
+
+#define S_IBQDBGSTATE 4
+#define M_IBQDBGSTATE 0x3U
+#define V_IBQDBGSTATE(x) ((x) << S_IBQDBGSTATE)
+#define G_IBQDBGSTATE(x) (((x) >> S_IBQDBGSTATE) & M_IBQDBGSTATE)
+
+#define S_PERRADDRCLR 3
+#define V_PERRADDRCLR(x) ((x) << S_PERRADDRCLR)
+#define F_PERRADDRCLR V_PERRADDRCLR(1U)
+
#define A_CIM_OBQ_DBG_CFG 0x7b64
#define S_OBQDBGADDR 16
@@ -20991,6 +26261,21 @@
#define V_OBQDBGEN(x) ((x) << S_OBQDBGEN)
#define F_OBQDBGEN V_OBQDBGEN(1U)
+#define S_OBQDBGCORE 28
+#define M_OBQDBGCORE 0xfU
+#define V_OBQDBGCORE(x) ((x) << S_OBQDBGCORE)
+#define G_OBQDBGCORE(x) (((x) >> S_OBQDBGCORE) & M_OBQDBGCORE)
+
+#define S_T7_OBQDBGADDR 12
+#define M_T7_OBQDBGADDR 0x1fffU
+#define V_T7_OBQDBGADDR(x) ((x) << S_T7_OBQDBGADDR)
+#define G_T7_OBQDBGADDR(x) (((x) >> S_T7_OBQDBGADDR) & M_T7_OBQDBGADDR)
+
+#define S_OBQDBGSTATE 4
+#define M_OBQDBGSTATE 0x3U
+#define V_OBQDBGSTATE(x) ((x) << S_OBQDBGSTATE)
+#define G_OBQDBGSTATE(x) (((x) >> S_OBQDBGSTATE) & M_OBQDBGSTATE)
+
#define A_CIM_IBQ_DBG_DATA 0x7b68
#define A_CIM_OBQ_DBG_DATA 0x7b6c
#define A_CIM_DEBUGCFG 0x7b70
@@ -21075,6 +26360,11 @@
#define V_ZONE_DST(x) ((x) << S_ZONE_DST)
#define G_ZONE_DST(x) (((x) >> S_ZONE_DST) & M_ZONE_DST)
+#define S_THREAD_ID 2
+#define M_THREAD_ID 0x7U
+#define V_THREAD_ID(x) ((x) << S_THREAD_ID)
+#define G_THREAD_ID(x) (((x) >> S_THREAD_ID) & M_THREAD_ID)
+
#define A_CIM_MEM_ZONE0_LEN 0x7b98
#define S_MEM_ZONE_LEN 4
@@ -21207,6 +26497,7 @@
#define G_DUPUACCMASK(x) (((x) >> S_DUPUACCMASK) & M_DUPUACCMASK)
#define A_CIM_PERR_INJECT 0x7c20
+#define A_CIM_FPGA_ROM_EFUSE_CMD 0x7c20
#define A_CIM_PERR_ENABLE 0x7c24
#define S_PERREN 0
@@ -21224,6 +26515,7 @@
#define V_T6_T5_PERREN(x) ((x) << S_T6_T5_PERREN)
#define G_T6_T5_PERREN(x) (((x) >> S_T6_T5_PERREN) & M_T6_T5_PERREN)
+#define A_CIM_FPGA_ROM_EFUSE_DATA 0x7c24
#define A_CIM_EEPROM_BUSY_BIT 0x7c28
#define S_EEPROMBUSY 0
@@ -21240,6 +26532,22 @@
#define V_SLOW_TIMER_ENABLE(x) ((x) << S_SLOW_TIMER_ENABLE)
#define F_SLOW_TIMER_ENABLE V_SLOW_TIMER_ENABLE(1U)
+#define S_FLASHWRPAGEMORE 5
+#define V_FLASHWRPAGEMORE(x) ((x) << S_FLASHWRPAGEMORE)
+#define F_FLASHWRPAGEMORE V_FLASHWRPAGEMORE(1U)
+
+#define S_FLASHWRENABLE 4
+#define V_FLASHWRENABLE(x) ((x) << S_FLASHWRENABLE)
+#define F_FLASHWRENABLE V_FLASHWRENABLE(1U)
+
+#define S_FLASHMOREENABLE 3
+#define V_FLASHMOREENABLE(x) ((x) << S_FLASHMOREENABLE)
+#define F_FLASHMOREENABLE V_FLASHMOREENABLE(1U)
+
+#define S_WR_RESP_ENABLE 2
+#define V_WR_RESP_ENABLE(x) ((x) << S_WR_RESP_ENABLE)
+#define F_WR_RESP_ENABLE V_WR_RESP_ENABLE(1U)
+
#define A_CIM_UP_PO_SINGLE_OUTSTANDING 0x7c30
#define S_UP_PO_SINGLE_OUTSTANDING 0
@@ -21271,6 +26579,18 @@
#define G_CIM_PCIE_PKT_ERR_CODE(x) (((x) >> S_CIM_PCIE_PKT_ERR_CODE) & M_CIM_PCIE_PKT_ERR_CODE)
#define A_CIM_IBQ_DBG_WAIT_COUNTER 0x7c40
+#define A_CIM_QUE_PERR_ADDR 0x7c40
+
+#define S_IBQPERRADDR 16
+#define M_IBQPERRADDR 0xfffU
+#define V_IBQPERRADDR(x) ((x) << S_IBQPERRADDR)
+#define G_IBQPERRADDR(x) (((x) >> S_IBQPERRADDR) & M_IBQPERRADDR)
+
+#define S_OBQPERRADDR 0
+#define M_OBQPERRADDR 0xfffU
+#define V_OBQPERRADDR(x) ((x) << S_OBQPERRADDR)
+#define G_OBQPERRADDR(x) (((x) >> S_OBQPERRADDR) & M_OBQPERRADDR)
+
#define A_CIM_PIO_UP_MST_CFG_SEL 0x7c44
#define S_PIO_UP_MST_CFG_SEL 0
@@ -21309,6 +26629,20 @@
#define V_PCIE_OBQ_IF_DISABLE(x) ((x) << S_PCIE_OBQ_IF_DISABLE)
#define F_PCIE_OBQ_IF_DISABLE V_PCIE_OBQ_IF_DISABLE(1U)
+#define S_ULP_OBQ_SIZE 8
+#define M_ULP_OBQ_SIZE 0x3U
+#define V_ULP_OBQ_SIZE(x) ((x) << S_ULP_OBQ_SIZE)
+#define G_ULP_OBQ_SIZE(x) (((x) >> S_ULP_OBQ_SIZE) & M_ULP_OBQ_SIZE)
+
+#define S_TP_IBQ_SIZE 6
+#define M_TP_IBQ_SIZE 0x3U
+#define V_TP_IBQ_SIZE(x) ((x) << S_TP_IBQ_SIZE)
+#define G_TP_IBQ_SIZE(x) (((x) >> S_TP_IBQ_SIZE) & M_TP_IBQ_SIZE)
+
+#define S_OBQ_EOM_ENABLE 5
+#define V_OBQ_EOM_ENABLE(x) ((x) << S_OBQ_EOM_ENABLE)
+#define F_OBQ_EOM_ENABLE V_OBQ_EOM_ENABLE(1U)
+
#define A_CIM_CGEN_GLOBAL 0x7c50
#define S_CGEN_GLOBAL 0
@@ -21321,6 +26655,77 @@
#define V_PIFDBGLA_DPSLP_EN(x) ((x) << S_PIFDBGLA_DPSLP_EN)
#define F_PIFDBGLA_DPSLP_EN V_PIFDBGLA_DPSLP_EN(1U)
+#define A_CIM_GFT_CMM_CONFIG 0x7c58
+
+#define S_GLFL 31
+#define V_GLFL(x) ((x) << S_GLFL)
+#define F_GLFL V_GLFL(1U)
+
+#define S_T7_WRCNTIDLE 16
+#define M_T7_WRCNTIDLE 0x7fffU
+#define V_T7_WRCNTIDLE(x) ((x) << S_T7_WRCNTIDLE)
+#define G_T7_WRCNTIDLE(x) (((x) >> S_T7_WRCNTIDLE) & M_T7_WRCNTIDLE)
+
+#define A_CIM_GFT_CONFIG 0x7c5c
+
+#define S_GFTMABASE 16
+#define M_GFTMABASE 0xffffU
+#define V_GFTMABASE(x) ((x) << S_GFTMABASE)
+#define G_GFTMABASE(x) (((x) >> S_GFTMABASE) & M_GFTMABASE)
+
+#define S_GFTHASHTBLSIZE 12
+#define M_GFTHASHTBLSIZE 0xfU
+#define V_GFTHASHTBLSIZE(x) ((x) << S_GFTHASHTBLSIZE)
+#define G_GFTHASHTBLSIZE(x) (((x) >> S_GFTHASHTBLSIZE) & M_GFTHASHTBLSIZE)
+
+#define S_GFTTCAMPRIORITY 11
+#define V_GFTTCAMPRIORITY(x) ((x) << S_GFTTCAMPRIORITY)
+#define F_GFTTCAMPRIORITY V_GFTTCAMPRIORITY(1U)
+
+#define S_GFTMATHREADID 8
+#define M_GFTMATHREADID 0x7U
+#define V_GFTMATHREADID(x) ((x) << S_GFTMATHREADID)
+#define G_GFTMATHREADID(x) (((x) >> S_GFTMATHREADID) & M_GFTMATHREADID)
+
+#define S_GFTTCAMINIT 7
+#define V_GFTTCAMINIT(x) ((x) << S_GFTTCAMINIT)
+#define F_GFTTCAMINIT V_GFTTCAMINIT(1U)
+
+#define S_GFTTCAMINITDONE 6
+#define V_GFTTCAMINITDONE(x) ((x) << S_GFTTCAMINITDONE)
+#define F_GFTTCAMINITDONE V_GFTTCAMINITDONE(1U)
+
+#define S_GFTTBLMODEEN 0
+#define V_GFTTBLMODEEN(x) ((x) << S_GFTTBLMODEEN)
+#define F_GFTTBLMODEEN V_GFTTBLMODEEN(1U)
+
+#define A_CIM_TCAM_BIST_CTRL 0x7c60
+
+#define S_RST_CB 31
+#define V_RST_CB(x) ((x) << S_RST_CB)
+#define F_RST_CB V_RST_CB(1U)
+
+#define S_CB_START 0
+#define M_CB_START 0xfffffffU
+#define V_CB_START(x) ((x) << S_CB_START)
+#define G_CB_START(x) (((x) >> S_CB_START) & M_CB_START)
+
+#define A_CIM_TCAM_BIST_CB_PASS 0x7c64
+
+#define S_CB_PASS 0
+#define M_CB_PASS 0xfffffffU
+#define V_CB_PASS(x) ((x) << S_CB_PASS)
+#define G_CB_PASS(x) (((x) >> S_CB_PASS) & M_CB_PASS)
+
+#define A_CIM_TCAM_BIST_CB_BUSY 0x7c68
+
+#define S_CB_BUSY 0
+#define M_CB_BUSY 0xfffffffU
+#define V_CB_BUSY(x) ((x) << S_CB_BUSY)
+#define G_CB_BUSY(x) (((x) >> S_CB_BUSY) & M_CB_BUSY)
+
+#define A_CIM_GFT_MASK 0x7c70
+
/* registers for module TP */
#define TP_BASE_ADDR 0x7d00
@@ -21613,6 +27018,14 @@
#define V_CRXPKTXT(x) ((x) << S_CRXPKTXT)
#define F_CRXPKTXT V_CRXPKTXT(1U)
+#define S_ETOEBYPCSUMNOWAIT 15
+#define V_ETOEBYPCSUMNOWAIT(x) ((x) << S_ETOEBYPCSUMNOWAIT)
+#define F_ETOEBYPCSUMNOWAIT V_ETOEBYPCSUMNOWAIT(1U)
+
+#define S_ENICCSUMNOWAIT 14
+#define V_ENICCSUMNOWAIT(x) ((x) << S_ENICCSUMNOWAIT)
+#define F_ENICCSUMNOWAIT V_ENICCSUMNOWAIT(1U)
+
#define A_TP_GLOBAL_CONFIG 0x7d08
#define S_SYNCOOKIEPARAMS 26
@@ -21703,6 +27116,31 @@
#define V_ACTIVEFILTERCOUNTS(x) ((x) << S_ACTIVEFILTERCOUNTS)
#define F_ACTIVEFILTERCOUNTS V_ACTIVEFILTERCOUNTS(1U)
+#define S_RXSACKPARSE 31
+#define V_RXSACKPARSE(x) ((x) << S_RXSACKPARSE)
+#define F_RXSACKPARSE V_RXSACKPARSE(1U)
+
+#define S_RXSACKFWDMODE 29
+#define M_RXSACKFWDMODE 0x3U
+#define V_RXSACKFWDMODE(x) ((x) << S_RXSACKFWDMODE)
+#define G_RXSACKFWDMODE(x) (((x) >> S_RXSACKFWDMODE) & M_RXSACKFWDMODE)
+
+#define S_SRVRCHRSSEN 26
+#define V_SRVRCHRSSEN(x) ((x) << S_SRVRCHRSSEN)
+#define F_SRVRCHRSSEN V_SRVRCHRSSEN(1U)
+
+#define S_LBCHNDISTEN 23
+#define V_LBCHNDISTEN(x) ((x) << S_LBCHNDISTEN)
+#define F_LBCHNDISTEN V_LBCHNDISTEN(1U)
+
+#define S_ETHTNLLEN2X 20
+#define V_ETHTNLLEN2X(x) ((x) << S_ETHTNLLEN2X)
+#define F_ETHTNLLEN2X V_ETHTNLLEN2X(1U)
+
+#define S_EGLBCHNDISTEN 19
+#define V_EGLBCHNDISTEN(x) ((x) << S_EGLBCHNDISTEN)
+#define F_EGLBCHNDISTEN V_EGLBCHNDISTEN(1U)
+
#define A_TP_DB_CONFIG 0x7d0c
#define S_DBMAXOPCNT 24
@@ -21767,6 +27205,11 @@
#define V_PMRXMAXPAGE(x) ((x) << S_PMRXMAXPAGE)
#define G_PMRXMAXPAGE(x) (((x) >> S_PMRXMAXPAGE) & M_PMRXMAXPAGE)
+#define S_T7_PMRXNUMCHN 29
+#define M_T7_PMRXNUMCHN 0x7U
+#define V_T7_PMRXNUMCHN(x) ((x) << S_T7_PMRXNUMCHN)
+#define G_T7_PMRXNUMCHN(x) (((x) >> S_T7_PMRXNUMCHN) & M_T7_PMRXNUMCHN)
+
#define A_TP_PMM_TX_PAGE_SIZE 0x7d34
#define A_TP_PMM_TX_MAX_PAGE 0x7d38
@@ -21780,6 +27223,83 @@
#define V_PMTXMAXPAGE(x) ((x) << S_PMTXMAXPAGE)
#define G_PMTXMAXPAGE(x) (((x) >> S_PMTXMAXPAGE) & M_PMTXMAXPAGE)
+#define S_T7_PMTXNUMCHN 29
+#define M_T7_PMTXNUMCHN 0x7U
+#define V_T7_PMTXNUMCHN(x) ((x) << S_T7_PMTXNUMCHN)
+#define G_T7_PMTXNUMCHN(x) (((x) >> S_T7_PMTXNUMCHN) & M_T7_PMTXNUMCHN)
+
+#define A_TP_EXT_CONFIG 0x7d3c
+
+#define S_TNLERRORIPSECARW 29
+#define V_TNLERRORIPSECARW(x) ((x) << S_TNLERRORIPSECARW)
+#define F_TNLERRORIPSECARW V_TNLERRORIPSECARW(1U)
+
+#define S_TNLERRORIPSECICV 28
+#define V_TNLERRORIPSECICV(x) ((x) << S_TNLERRORIPSECICV)
+#define F_TNLERRORIPSECICV V_TNLERRORIPSECICV(1U)
+
+#define S_DROPERRORIPSECARW 25
+#define V_DROPERRORIPSECARW(x) ((x) << S_DROPERRORIPSECARW)
+#define F_DROPERRORIPSECARW V_DROPERRORIPSECARW(1U)
+
+#define S_DROPERRORIPSECICV 24
+#define V_DROPERRORIPSECICV(x) ((x) << S_DROPERRORIPSECICV)
+#define F_DROPERRORIPSECICV V_DROPERRORIPSECICV(1U)
+
+#define S_MIBRDMAROCEEN 19
+#define V_MIBRDMAROCEEN(x) ((x) << S_MIBRDMAROCEEN)
+#define F_MIBRDMAROCEEN V_MIBRDMAROCEEN(1U)
+
+#define S_MIBRDMAIWARPEN 18
+#define V_MIBRDMAIWARPEN(x) ((x) << S_MIBRDMAIWARPEN)
+#define F_MIBRDMAIWARPEN V_MIBRDMAIWARPEN(1U)
+
+#define S_BYPTXDATAACKALLEN 17
+#define V_BYPTXDATAACKALLEN(x) ((x) << S_BYPTXDATAACKALLEN)
+#define F_BYPTXDATAACKALLEN V_BYPTXDATAACKALLEN(1U)
+
+#define S_DATAACKEXTEN 16
+#define V_DATAACKEXTEN(x) ((x) << S_DATAACKEXTEN)
+#define F_DATAACKEXTEN V_DATAACKEXTEN(1U)
+
+#define S_MACMATCH11FWD 11
+#define V_MACMATCH11FWD(x) ((x) << S_MACMATCH11FWD)
+#define F_MACMATCH11FWD V_MACMATCH11FWD(1U)
+
+#define S_USERTMSTPEN 10
+#define V_USERTMSTPEN(x) ((x) << S_USERTMSTPEN)
+#define F_USERTMSTPEN V_USERTMSTPEN(1U)
+
+#define S_MMGRCACHEDIS 9
+#define V_MMGRCACHEDIS(x) ((x) << S_MMGRCACHEDIS)
+#define F_MMGRCACHEDIS V_MMGRCACHEDIS(1U)
+
+#define S_TXPKTPACKOUTUDPEN 8
+#define V_TXPKTPACKOUTUDPEN(x) ((x) << S_TXPKTPACKOUTUDPEN)
+#define F_TXPKTPACKOUTUDPEN V_TXPKTPACKOUTUDPEN(1U)
+
+#define S_IPSECROCECRCMODE 6
+#define M_IPSECROCECRCMODE 0x3U
+#define V_IPSECROCECRCMODE(x) ((x) << S_IPSECROCECRCMODE)
+#define G_IPSECROCECRCMODE(x) (((x) >> S_IPSECROCECRCMODE) & M_IPSECROCECRCMODE)
+
+#define S_IPSECIDXLOC 5
+#define V_IPSECIDXLOC(x) ((x) << S_IPSECIDXLOC)
+#define F_IPSECIDXLOC V_IPSECIDXLOC(1U)
+
+#define S_IPSECIDXCAPEN 4
+#define V_IPSECIDXCAPEN(x) ((x) << S_IPSECIDXCAPEN)
+#define F_IPSECIDXCAPEN V_IPSECIDXCAPEN(1U)
+
+#define S_IPSECOFEN 3
+#define V_IPSECOFEN(x) ((x) << S_IPSECOFEN)
+#define F_IPSECOFEN V_IPSECOFEN(1U)
+
+#define S_IPSECCFG 0
+#define M_IPSECCFG 0x7U
+#define V_IPSECCFG(x) ((x) << S_IPSECCFG)
+#define G_IPSECCFG(x) (((x) >> S_IPSECCFG) & M_IPSECCFG)
+
#define A_TP_TCP_OPTIONS 0x7d40
#define S_MTUDEFAULT 16
@@ -22615,10 +28135,6 @@
#define V_TXPDUSIZEADJ(x) ((x) << S_TXPDUSIZEADJ)
#define G_TXPDUSIZEADJ(x) (((x) >> S_TXPDUSIZEADJ) & M_TXPDUSIZEADJ)
-#define S_ENABLECBYP 21
-#define V_ENABLECBYP(x) ((x) << S_ENABLECBYP)
-#define F_ENABLECBYP V_ENABLECBYP(1U)
-
#define S_LIMITEDTRANSMIT 20
#define M_LIMITEDTRANSMIT 0xfU
#define V_LIMITEDTRANSMIT(x) ((x) << S_LIMITEDTRANSMIT)
@@ -22779,6 +28295,18 @@
#define V_ECNSYNECT(x) ((x) << S_ECNSYNECT)
#define F_ECNSYNECT V_ECNSYNECT(1U)
+#define A_TP_PARA_REG9 0x7d88
+
+#define S_PMMAXXFERLEN3 16
+#define M_PMMAXXFERLEN3 0xffffU
+#define V_PMMAXXFERLEN3(x) ((x) << S_PMMAXXFERLEN3)
+#define G_PMMAXXFERLEN3(x) (((x) >> S_PMMAXXFERLEN3) & M_PMMAXXFERLEN3)
+
+#define S_PMMAXXFERLEN2 0
+#define M_PMMAXXFERLEN2 0xffffU
+#define V_PMMAXXFERLEN2(x) ((x) << S_PMMAXXFERLEN2)
+#define G_PMMAXXFERLEN2(x) (((x) >> S_PMMAXXFERLEN2) & M_PMMAXXFERLEN2)
+
#define A_TP_ERR_CONFIG 0x7d8c
#define S_TNLERRORPING 30
@@ -22926,6 +28454,11 @@
#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & M_DELAYEDACKRESOLUTION)
+#define S_ROCETIMERRESOLUTION 24
+#define M_ROCETIMERRESOLUTION 0xffU
+#define V_ROCETIMERRESOLUTION(x) ((x) << S_ROCETIMERRESOLUTION)
+#define G_ROCETIMERRESOLUTION(x) (((x) >> S_ROCETIMERRESOLUTION) & M_ROCETIMERRESOLUTION)
+
#define A_TP_MSL 0x7d94
#define S_MSL 0
@@ -23423,6 +28956,14 @@
#define V_FRMWRQUEMASK(x) ((x) << S_FRMWRQUEMASK)
#define G_FRMWRQUEMASK(x) (((x) >> S_FRMWRQUEMASK) & M_FRMWRQUEMASK)
+#define S_RRCPLOPT1SMSELEN 11
+#define V_RRCPLOPT1SMSELEN(x) ((x) << S_RRCPLOPT1SMSELEN)
+#define F_RRCPLOPT1SMSELEN V_RRCPLOPT1SMSELEN(1U)
+
+#define S_RRCPLOPT1BQEN 10
+#define V_RRCPLOPT1BQEN(x) ((x) << S_RRCPLOPT1BQEN)
+#define F_RRCPLOPT1BQEN V_RRCPLOPT1BQEN(1U)
+
#define A_TP_RSS_CONFIG_SYN 0x7dfc
#define A_TP_RSS_CONFIG_VRT 0x7e00
@@ -23595,6 +29136,69 @@
#define V_QUEUE(x) ((x) << S_QUEUE)
#define G_QUEUE(x) (((x) >> S_QUEUE) & M_QUEUE)
+#define S_T7_UPDVLD 19
+#define V_T7_UPDVLD(x) ((x) << S_T7_UPDVLD)
+#define F_T7_UPDVLD V_T7_UPDVLD(1U)
+
+#define S_T7_XOFF 18
+#define V_T7_XOFF(x) ((x) << S_T7_XOFF)
+#define F_T7_XOFF V_T7_XOFF(1U)
+
+#define S_T7_UPDCHN3 17
+#define V_T7_UPDCHN3(x) ((x) << S_T7_UPDCHN3)
+#define F_T7_UPDCHN3 V_T7_UPDCHN3(1U)
+
+#define S_T7_UPDCHN2 16
+#define V_T7_UPDCHN2(x) ((x) << S_T7_UPDCHN2)
+#define F_T7_UPDCHN2 V_T7_UPDCHN2(1U)
+
+#define S_T7_UPDCHN1 15
+#define V_T7_UPDCHN1(x) ((x) << S_T7_UPDCHN1)
+#define F_T7_UPDCHN1 V_T7_UPDCHN1(1U)
+
+#define S_T7_UPDCHN0 14
+#define V_T7_UPDCHN0(x) ((x) << S_T7_UPDCHN0)
+#define F_T7_UPDCHN0 V_T7_UPDCHN0(1U)
+
+#define S_T7_QUEUE 0
+#define M_T7_QUEUE 0x3fffU
+#define V_T7_QUEUE(x) ((x) << S_T7_QUEUE)
+#define G_T7_QUEUE(x) (((x) >> S_T7_QUEUE) & M_T7_QUEUE)
+
+#define A_TP_RSS_CONFIG_4CH 0x7e08
+
+#define S_BASEQIDEN 1
+#define V_BASEQIDEN(x) ((x) << S_BASEQIDEN)
+#define F_BASEQIDEN V_BASEQIDEN(1U)
+
+#define S_200GMODE 0
+#define V_200GMODE(x) ((x) << S_200GMODE)
+#define F_200GMODE V_200GMODE(1U)
+
+#define A_TP_RSS_CONFIG_SRAM 0x7e0c
+
+#define S_SRAMRDDIS 20
+#define V_SRAMRDDIS(x) ((x) << S_SRAMRDDIS)
+#define F_SRAMRDDIS V_SRAMRDDIS(1U)
+
+#define S_SRAMSTART 19
+#define V_SRAMSTART(x) ((x) << S_SRAMSTART)
+#define F_SRAMSTART V_SRAMSTART(1U)
+
+#define S_SRAMWRITE 18
+#define V_SRAMWRITE(x) ((x) << S_SRAMWRITE)
+#define F_SRAMWRITE V_SRAMWRITE(1U)
+
+#define S_SRAMSEL 16
+#define M_SRAMSEL 0x3U
+#define V_SRAMSEL(x) ((x) << S_SRAMSEL)
+#define G_SRAMSEL(x) (((x) >> S_SRAMSEL) & M_SRAMSEL)
+
+#define S_SRAMADDR 0
+#define M_SRAMADDR 0x3fffU
+#define V_SRAMADDR(x) ((x) << S_SRAMADDR)
+#define G_SRAMADDR(x) (((x) >> S_SRAMADDR) & M_SRAMADDR)
+
#define A_TP_LA_TABLE_0 0x7e10
#define S_VIRTPORT1TABLE 16
@@ -23621,6 +29225,18 @@
#define A_TP_TM_PIO_ADDR 0x7e18
#define A_TP_TM_PIO_DATA 0x7e1c
+#define A_TP_RX_MOD_CONFIG_CH3_CH2 0x7e20
+
+#define S_RXCHANNELWEIGHT3 8
+#define M_RXCHANNELWEIGHT3 0xffU
+#define V_RXCHANNELWEIGHT3(x) ((x) << S_RXCHANNELWEIGHT3)
+#define G_RXCHANNELWEIGHT3(x) (((x) >> S_RXCHANNELWEIGHT3) & M_RXCHANNELWEIGHT3)
+
+#define S_RXCHANNELWEIGHT2 0
+#define M_RXCHANNELWEIGHT2 0xffU
+#define V_RXCHANNELWEIGHT2(x) ((x) << S_RXCHANNELWEIGHT2)
+#define G_RXCHANNELWEIGHT2(x) (((x) >> S_RXCHANNELWEIGHT2) & M_RXCHANNELWEIGHT2)
+
#define A_TP_MOD_CONFIG 0x7e24
#define S_RXCHANNELWEIGHT1 24
@@ -23887,6 +29503,30 @@
#define V_SRQTABLEPERR(x) ((x) << S_SRQTABLEPERR)
#define F_SRQTABLEPERR V_SRQTABLEPERR(1U)
+#define S_TPCERR 5
+#define V_TPCERR(x) ((x) << S_TPCERR)
+#define F_TPCERR V_TPCERR(1U)
+
+#define S_OTHERPERR 4
+#define V_OTHERPERR(x) ((x) << S_OTHERPERR)
+#define F_OTHERPERR V_OTHERPERR(1U)
+
+#define S_TPEING1PERR 3
+#define V_TPEING1PERR(x) ((x) << S_TPEING1PERR)
+#define F_TPEING1PERR V_TPEING1PERR(1U)
+
+#define S_TPEING0PERR 2
+#define V_TPEING0PERR(x) ((x) << S_TPEING0PERR)
+#define F_TPEING0PERR V_TPEING0PERR(1U)
+
+#define S_TPEEGPERR 1
+#define V_TPEEGPERR(x) ((x) << S_TPEEGPERR)
+#define F_TPEEGPERR V_TPEEGPERR(1U)
+
+#define S_TPCPERR 0
+#define V_TPCPERR(x) ((x) << S_TPCPERR)
+#define F_TPCPERR V_TPCPERR(1U)
+
#define A_TP_INT_CAUSE 0x7e74
#define A_TP_PER_ENABLE 0x7e78
#define A_TP_FLM_FREE_PS_CNT 0x7e80
@@ -23907,6 +29547,11 @@
#define V_FREERXPAGECOUNT(x) ((x) << S_FREERXPAGECOUNT)
#define G_FREERXPAGECOUNT(x) (((x) >> S_FREERXPAGECOUNT) & M_FREERXPAGECOUNT)
+#define S_T7_FREERXPAGECHN 28
+#define M_T7_FREERXPAGECHN 0x7U
+#define V_T7_FREERXPAGECHN(x) ((x) << S_T7_FREERXPAGECHN)
+#define G_T7_FREERXPAGECHN(x) (((x) >> S_T7_FREERXPAGECHN) & M_T7_FREERXPAGECHN)
+
#define A_TP_FLM_FREE_TX_CNT 0x7e88
#define S_FREETXPAGECHN 28
@@ -23919,6 +29564,11 @@
#define V_FREETXPAGECOUNT(x) ((x) << S_FREETXPAGECOUNT)
#define G_FREETXPAGECOUNT(x) (((x) >> S_FREETXPAGECOUNT) & M_FREETXPAGECOUNT)
+#define S_T7_FREETXPAGECHN 28
+#define M_T7_FREETXPAGECHN 0x7U
+#define V_T7_FREETXPAGECHN(x) ((x) << S_T7_FREETXPAGECHN)
+#define G_T7_FREETXPAGECHN(x) (((x) >> S_T7_FREETXPAGECHN) & M_T7_FREETXPAGECHN)
+
#define A_TP_TM_HEAP_PUSH_CNT 0x7e8c
#define A_TP_TM_HEAP_POP_CNT 0x7e90
#define A_TP_TM_DACK_PUSH_CNT 0x7e94
@@ -24111,6 +29761,38 @@
#define V_COMMITLIMIT0(x) ((x) << S_COMMITLIMIT0)
#define G_COMMITLIMIT0(x) (((x) >> S_COMMITLIMIT0) & M_COMMITLIMIT0)
+#define S_RXCOMMITRESET3 7
+#define V_RXCOMMITRESET3(x) ((x) << S_RXCOMMITRESET3)
+#define F_RXCOMMITRESET3 V_RXCOMMITRESET3(1U)
+
+#define S_RXCOMMITRESET2 6
+#define V_RXCOMMITRESET2(x) ((x) << S_RXCOMMITRESET2)
+#define F_RXCOMMITRESET2 V_RXCOMMITRESET2(1U)
+
+#define S_T7_RXCOMMITRESET1 5
+#define V_T7_RXCOMMITRESET1(x) ((x) << S_T7_RXCOMMITRESET1)
+#define F_T7_RXCOMMITRESET1 V_T7_RXCOMMITRESET1(1U)
+
+#define S_T7_RXCOMMITRESET0 4
+#define V_T7_RXCOMMITRESET0(x) ((x) << S_T7_RXCOMMITRESET0)
+#define F_T7_RXCOMMITRESET0 V_T7_RXCOMMITRESET0(1U)
+
+#define S_RXFORCECONG3 3
+#define V_RXFORCECONG3(x) ((x) << S_RXFORCECONG3)
+#define F_RXFORCECONG3 V_RXFORCECONG3(1U)
+
+#define S_RXFORCECONG2 2
+#define V_RXFORCECONG2(x) ((x) << S_RXFORCECONG2)
+#define F_RXFORCECONG2 V_RXFORCECONG2(1U)
+
+#define S_T7_RXFORCECONG1 1
+#define V_T7_RXFORCECONG1(x) ((x) << S_T7_RXFORCECONG1)
+#define F_T7_RXFORCECONG1 V_T7_RXFORCECONG1(1U)
+
+#define S_T7_RXFORCECONG0 0
+#define V_T7_RXFORCECONG0(x) ((x) << S_T7_RXFORCECONG0)
+#define F_T7_RXFORCECONG0 V_T7_RXFORCECONG0(1U)
+
#define A_TP_TX_SCHED 0x7eb4
#define S_COMMITRESET3 31
@@ -24229,6 +29911,14 @@
#define V_RXMODXOFF0(x) ((x) << S_RXMODXOFF0)
#define F_RXMODXOFF0 V_RXMODXOFF0(1U)
+#define S_RXMODXOFF3 3
+#define V_RXMODXOFF3(x) ((x) << S_RXMODXOFF3)
+#define F_RXMODXOFF3 V_RXMODXOFF3(1U)
+
+#define S_RXMODXOFF2 2
+#define V_RXMODXOFF2(x) ((x) << S_RXMODXOFF2)
+#define F_RXMODXOFF2 V_RXMODXOFF2(1U)
+
#define A_TP_TX_ORATE 0x7ebc
#define S_OFDRATE3 24
@@ -24313,6 +30003,37 @@
#define A_TP_DBG_LA_DATAL 0x7ed8
#define A_TP_DBG_LA_DATAH 0x7edc
+#define A_TP_DBG_LA_FILTER 0x7ee0
+
+#define S_FILTERTID 12
+#define M_FILTERTID 0xfffffU
+#define V_FILTERTID(x) ((x) << S_FILTERTID)
+#define G_FILTERTID(x) (((x) >> S_FILTERTID) & M_FILTERTID)
+
+#define S_ENTIDFILTER 5
+#define V_ENTIDFILTER(x) ((x) << S_ENTIDFILTER)
+#define F_ENTIDFILTER V_ENTIDFILTER(1U)
+
+#define S_ENOFFLOAD 4
+#define V_ENOFFLOAD(x) ((x) << S_ENOFFLOAD)
+#define F_ENOFFLOAD V_ENOFFLOAD(1U)
+
+#define S_ENTUNNEL 3
+#define V_ENTUNNEL(x) ((x) << S_ENTUNNEL)
+#define F_ENTUNNEL V_ENTUNNEL(1U)
+
+#define S_ENI 2
+#define V_ENI(x) ((x) << S_ENI)
+#define F_ENI V_ENI(1U)
+
+#define S_ENC 1
+#define V_ENC(x) ((x) << S_ENC)
+#define F_ENC V_ENC(1U)
+
+#define S_ENE 0
+#define V_ENE(x) ((x) << S_ENE)
+#define F_ENE V_ENE(1U)
+
#define A_TP_PROTOCOL_CNTRL 0x7ee8
#define S_WRITEENABLE 31
@@ -24348,6 +30069,546 @@
#define V_PROTOCOLDATAFIELD(x) ((x) << S_PROTOCOLDATAFIELD)
#define G_PROTOCOLDATAFIELD(x) (((x) >> S_PROTOCOLDATAFIELD) & M_PROTOCOLDATAFIELD)
+#define A_TP_INIC_CTRL0 0x7f00
+#define A_TP_INIC_DBG 0x7f04
+#define A_TP_INIC_PERR_ENABLE 0x7f08
+
+#define S_INICMAC1_ERR 16
+#define M_INICMAC1_ERR 0x3fU
+#define V_INICMAC1_ERR(x) ((x) << S_INICMAC1_ERR)
+#define G_INICMAC1_ERR(x) (((x) >> S_INICMAC1_ERR) & M_INICMAC1_ERR)
+
+#define S_INICMAC0_ERR 0
+#define M_INICMAC0_ERR 0x3fU
+#define V_INICMAC0_ERR(x) ((x) << S_INICMAC0_ERR)
+#define G_INICMAC0_ERR(x) (((x) >> S_INICMAC0_ERR) & M_INICMAC0_ERR)
+
+#define A_TP_INIC_PERR_CAUSE 0x7f0c
+#define A_TP_PARA_REG10 0x7f20
+
+#define S_DIS39320FIX 20
+#define V_DIS39320FIX(x) ((x) << S_DIS39320FIX)
+#define F_DIS39320FIX V_DIS39320FIX(1U)
+
+#define S_IWARPMAXPDULEN 16
+#define M_IWARPMAXPDULEN 0xfU
+#define V_IWARPMAXPDULEN(x) ((x) << S_IWARPMAXPDULEN)
+#define G_IWARPMAXPDULEN(x) (((x) >> S_IWARPMAXPDULEN) & M_IWARPMAXPDULEN)
+
+#define S_TLSMAXRXDATA 0
+#define M_TLSMAXRXDATA 0xffffU
+#define V_TLSMAXRXDATA(x) ((x) << S_TLSMAXRXDATA)
+#define G_TLSMAXRXDATA(x) (((x) >> S_TLSMAXRXDATA) & M_TLSMAXRXDATA)
+
+#define A_TP_TCAM_BIST_CTRL 0x7f24
+#define A_TP_TCAM_BIST_CB_PASS 0x7f28
+#define A_TP_TCAM_BIST_CB_BUSY 0x7f2c
+#define A_TP_C_PERR_ENABLE 0x7f30
+
+#define S_DMXFIFOOVFL 26
+#define V_DMXFIFOOVFL(x) ((x) << S_DMXFIFOOVFL)
+#define F_DMXFIFOOVFL V_DMXFIFOOVFL(1U)
+
+#define S_URX2TPCDDPINTF 25
+#define V_URX2TPCDDPINTF(x) ((x) << S_URX2TPCDDPINTF)
+#define F_URX2TPCDDPINTF V_URX2TPCDDPINTF(1U)
+
+#define S_TPCDISPTOKENFIFO 24
+#define V_TPCDISPTOKENFIFO(x) ((x) << S_TPCDISPTOKENFIFO)
+#define F_TPCDISPTOKENFIFO V_TPCDISPTOKENFIFO(1U)
+
+#define S_TPCDISPCPLFIFO3 23
+#define V_TPCDISPCPLFIFO3(x) ((x) << S_TPCDISPCPLFIFO3)
+#define F_TPCDISPCPLFIFO3 V_TPCDISPCPLFIFO3(1U)
+
+#define S_TPCDISPCPLFIFO2 22
+#define V_TPCDISPCPLFIFO2(x) ((x) << S_TPCDISPCPLFIFO2)
+#define F_TPCDISPCPLFIFO2 V_TPCDISPCPLFIFO2(1U)
+
+#define S_TPCDISPCPLFIFO1 21
+#define V_TPCDISPCPLFIFO1(x) ((x) << S_TPCDISPCPLFIFO1)
+#define F_TPCDISPCPLFIFO1 V_TPCDISPCPLFIFO1(1U)
+
+#define S_TPCDISPCPLFIFO0 20
+#define V_TPCDISPCPLFIFO0(x) ((x) << S_TPCDISPCPLFIFO0)
+#define F_TPCDISPCPLFIFO0 V_TPCDISPCPLFIFO0(1U)
+
+#define S_URXPLDINTFCRC3 19
+#define V_URXPLDINTFCRC3(x) ((x) << S_URXPLDINTFCRC3)
+#define F_URXPLDINTFCRC3 V_URXPLDINTFCRC3(1U)
+
+#define S_URXPLDINTFCRC2 18
+#define V_URXPLDINTFCRC2(x) ((x) << S_URXPLDINTFCRC2)
+#define F_URXPLDINTFCRC2 V_URXPLDINTFCRC2(1U)
+
+#define S_URXPLDINTFCRC1 17
+#define V_URXPLDINTFCRC1(x) ((x) << S_URXPLDINTFCRC1)
+#define F_URXPLDINTFCRC1 V_URXPLDINTFCRC1(1U)
+
+#define S_URXPLDINTFCRC0 16
+#define V_URXPLDINTFCRC0(x) ((x) << S_URXPLDINTFCRC0)
+#define F_URXPLDINTFCRC0 V_URXPLDINTFCRC0(1U)
+
+#define S_DMXDBFIFO 15
+#define V_DMXDBFIFO(x) ((x) << S_DMXDBFIFO)
+#define F_DMXDBFIFO V_DMXDBFIFO(1U)
+
+#define S_DMXDBSRAM 14
+#define V_DMXDBSRAM(x) ((x) << S_DMXDBSRAM)
+#define F_DMXDBSRAM V_DMXDBSRAM(1U)
+
+#define S_DMXCPLFIFO 13
+#define V_DMXCPLFIFO(x) ((x) << S_DMXCPLFIFO)
+#define F_DMXCPLFIFO V_DMXCPLFIFO(1U)
+
+#define S_DMXCPLSRAM 12
+#define V_DMXCPLSRAM(x) ((x) << S_DMXCPLSRAM)
+#define F_DMXCPLSRAM V_DMXCPLSRAM(1U)
+
+#define S_DMXCSUMFIFO 11
+#define V_DMXCSUMFIFO(x) ((x) << S_DMXCSUMFIFO)
+#define F_DMXCSUMFIFO V_DMXCSUMFIFO(1U)
+
+#define S_DMXLENFIFO 10
+#define V_DMXLENFIFO(x) ((x) << S_DMXLENFIFO)
+#define F_DMXLENFIFO V_DMXLENFIFO(1U)
+
+#define S_DMXCHECKFIFO 9
+#define V_DMXCHECKFIFO(x) ((x) << S_DMXCHECKFIFO)
+#define F_DMXCHECKFIFO V_DMXCHECKFIFO(1U)
+
+#define S_DMXWINFIFO 8
+#define V_DMXWINFIFO(x) ((x) << S_DMXWINFIFO)
+#define F_DMXWINFIFO V_DMXWINFIFO(1U)
+
+#define S_EGTOKENFIFO 7
+#define V_EGTOKENFIFO(x) ((x) << S_EGTOKENFIFO)
+#define F_EGTOKENFIFO V_EGTOKENFIFO(1U)
+
+#define S_EGDATAFIFO 6
+#define V_EGDATAFIFO(x) ((x) << S_EGDATAFIFO)
+#define F_EGDATAFIFO V_EGDATAFIFO(1U)
+
+#define S_UTX2TPCINTF3 5
+#define V_UTX2TPCINTF3(x) ((x) << S_UTX2TPCINTF3)
+#define F_UTX2TPCINTF3 V_UTX2TPCINTF3(1U)
+
+#define S_UTX2TPCINTF2 4
+#define V_UTX2TPCINTF2(x) ((x) << S_UTX2TPCINTF2)
+#define F_UTX2TPCINTF2 V_UTX2TPCINTF2(1U)
+
+#define S_UTX2TPCINTF1 3
+#define V_UTX2TPCINTF1(x) ((x) << S_UTX2TPCINTF1)
+#define F_UTX2TPCINTF1 V_UTX2TPCINTF1(1U)
+
+#define S_UTX2TPCINTF0 2
+#define V_UTX2TPCINTF0(x) ((x) << S_UTX2TPCINTF0)
+#define F_UTX2TPCINTF0 V_UTX2TPCINTF0(1U)
+
+#define S_LBKTOKENFIFO 1
+#define V_LBKTOKENFIFO(x) ((x) << S_LBKTOKENFIFO)
+#define F_LBKTOKENFIFO V_LBKTOKENFIFO(1U)
+
+#define S_LBKDATAFIFO 0
+#define V_LBKDATAFIFO(x) ((x) << S_LBKDATAFIFO)
+#define F_LBKDATAFIFO V_LBKDATAFIFO(1U)
+
+#define A_TP_C_PERR_CAUSE 0x7f34
+#define A_TP_E_EG_PERR_ENABLE 0x7f38
+
+#define S_MPSLPBKTOKENFIFO 25
+#define V_MPSLPBKTOKENFIFO(x) ((x) << S_MPSLPBKTOKENFIFO)
+#define F_MPSLPBKTOKENFIFO V_MPSLPBKTOKENFIFO(1U)
+
+#define S_MPSMACTOKENFIFO 24
+#define V_MPSMACTOKENFIFO(x) ((x) << S_MPSMACTOKENFIFO)
+#define F_MPSMACTOKENFIFO V_MPSMACTOKENFIFO(1U)
+
+#define S_DISPIPSECFIFO3 23
+#define V_DISPIPSECFIFO3(x) ((x) << S_DISPIPSECFIFO3)
+#define F_DISPIPSECFIFO3 V_DISPIPSECFIFO3(1U)
+
+#define S_DISPTCPFIFO3 22
+#define V_DISPTCPFIFO3(x) ((x) << S_DISPTCPFIFO3)
+#define F_DISPTCPFIFO3 V_DISPTCPFIFO3(1U)
+
+#define S_DISPIPFIFO3 21
+#define V_DISPIPFIFO3(x) ((x) << S_DISPIPFIFO3)
+#define F_DISPIPFIFO3 V_DISPIPFIFO3(1U)
+
+#define S_DISPETHFIFO3 20
+#define V_DISPETHFIFO3(x) ((x) << S_DISPETHFIFO3)
+#define F_DISPETHFIFO3 V_DISPETHFIFO3(1U)
+
+#define S_DISPGREFIFO3 19
+#define V_DISPGREFIFO3(x) ((x) << S_DISPGREFIFO3)
+#define F_DISPGREFIFO3 V_DISPGREFIFO3(1U)
+
+#define S_DISPCPL5FIFO3 18
+#define V_DISPCPL5FIFO3(x) ((x) << S_DISPCPL5FIFO3)
+#define F_DISPCPL5FIFO3 V_DISPCPL5FIFO3(1U)
+
+#define S_DISPIPSECFIFO2 17
+#define V_DISPIPSECFIFO2(x) ((x) << S_DISPIPSECFIFO2)
+#define F_DISPIPSECFIFO2 V_DISPIPSECFIFO2(1U)
+
+#define S_DISPTCPFIFO2 16
+#define V_DISPTCPFIFO2(x) ((x) << S_DISPTCPFIFO2)
+#define F_DISPTCPFIFO2 V_DISPTCPFIFO2(1U)
+
+#define S_DISPIPFIFO2 15
+#define V_DISPIPFIFO2(x) ((x) << S_DISPIPFIFO2)
+#define F_DISPIPFIFO2 V_DISPIPFIFO2(1U)
+
+#define S_DISPETHFIFO2 14
+#define V_DISPETHFIFO2(x) ((x) << S_DISPETHFIFO2)
+#define F_DISPETHFIFO2 V_DISPETHFIFO2(1U)
+
+#define S_DISPGREFIFO2 13
+#define V_DISPGREFIFO2(x) ((x) << S_DISPGREFIFO2)
+#define F_DISPGREFIFO2 V_DISPGREFIFO2(1U)
+
+#define S_DISPCPL5FIFO2 12
+#define V_DISPCPL5FIFO2(x) ((x) << S_DISPCPL5FIFO2)
+#define F_DISPCPL5FIFO2 V_DISPCPL5FIFO2(1U)
+
+#define S_DISPIPSECFIFO1 11
+#define V_DISPIPSECFIFO1(x) ((x) << S_DISPIPSECFIFO1)
+#define F_DISPIPSECFIFO1 V_DISPIPSECFIFO1(1U)
+
+#define S_DISPTCPFIFO1 10
+#define V_DISPTCPFIFO1(x) ((x) << S_DISPTCPFIFO1)
+#define F_DISPTCPFIFO1 V_DISPTCPFIFO1(1U)
+
+#define S_DISPIPFIFO1 9
+#define V_DISPIPFIFO1(x) ((x) << S_DISPIPFIFO1)
+#define F_DISPIPFIFO1 V_DISPIPFIFO1(1U)
+
+#define S_DISPETHFIFO1 8
+#define V_DISPETHFIFO1(x) ((x) << S_DISPETHFIFO1)
+#define F_DISPETHFIFO1 V_DISPETHFIFO1(1U)
+
+#define S_DISPGREFIFO1 7
+#define V_DISPGREFIFO1(x) ((x) << S_DISPGREFIFO1)
+#define F_DISPGREFIFO1 V_DISPGREFIFO1(1U)
+
+#define S_DISPCPL5FIFO1 6
+#define V_DISPCPL5FIFO1(x) ((x) << S_DISPCPL5FIFO1)
+#define F_DISPCPL5FIFO1 V_DISPCPL5FIFO1(1U)
+
+#define S_DISPIPSECFIFO0 5
+#define V_DISPIPSECFIFO0(x) ((x) << S_DISPIPSECFIFO0)
+#define F_DISPIPSECFIFO0 V_DISPIPSECFIFO0(1U)
+
+#define S_DISPTCPFIFO0 4
+#define V_DISPTCPFIFO0(x) ((x) << S_DISPTCPFIFO0)
+#define F_DISPTCPFIFO0 V_DISPTCPFIFO0(1U)
+
+#define S_DISPIPFIFO0 3
+#define V_DISPIPFIFO0(x) ((x) << S_DISPIPFIFO0)
+#define F_DISPIPFIFO0 V_DISPIPFIFO0(1U)
+
+#define S_DISPETHFIFO0 2
+#define V_DISPETHFIFO0(x) ((x) << S_DISPETHFIFO0)
+#define F_DISPETHFIFO0 V_DISPETHFIFO0(1U)
+
+#define S_DISPGREFIFO0 1
+#define V_DISPGREFIFO0(x) ((x) << S_DISPGREFIFO0)
+#define F_DISPGREFIFO0 V_DISPGREFIFO0(1U)
+
+#define S_DISPCPL5FIFO0 0
+#define V_DISPCPL5FIFO0(x) ((x) << S_DISPCPL5FIFO0)
+#define F_DISPCPL5FIFO0 V_DISPCPL5FIFO0(1U)
+
+#define A_TP_E_EG_PERR_CAUSE 0x7f3c
+#define A_TP_E_IN0_PERR_ENABLE 0x7f40
+
+#define S_DMXISSFIFO 30
+#define V_DMXISSFIFO(x) ((x) << S_DMXISSFIFO)
+#define F_DMXISSFIFO V_DMXISSFIFO(1U)
+
+#define S_DMXERRFIFO 29
+#define V_DMXERRFIFO(x) ((x) << S_DMXERRFIFO)
+#define F_DMXERRFIFO V_DMXERRFIFO(1U)
+
+#define S_DMXATTFIFO 28
+#define V_DMXATTFIFO(x) ((x) << S_DMXATTFIFO)
+#define F_DMXATTFIFO V_DMXATTFIFO(1U)
+
+#define S_DMXTCPFIFO 27
+#define V_DMXTCPFIFO(x) ((x) << S_DMXTCPFIFO)
+#define F_DMXTCPFIFO V_DMXTCPFIFO(1U)
+
+#define S_DMXMPAFIFO 26
+#define V_DMXMPAFIFO(x) ((x) << S_DMXMPAFIFO)
+#define F_DMXMPAFIFO V_DMXMPAFIFO(1U)
+
+#define S_DMXOPTFIFO 25
+#define V_DMXOPTFIFO(x) ((x) << S_DMXOPTFIFO)
+#define F_DMXOPTFIFO V_DMXOPTFIFO(1U)
+
+#define S_INGTOKENFIFO 24
+#define V_INGTOKENFIFO(x) ((x) << S_INGTOKENFIFO)
+#define F_INGTOKENFIFO V_INGTOKENFIFO(1U)
+
+#define S_DMXPLDCHKOVFL1 21
+#define V_DMXPLDCHKOVFL1(x) ((x) << S_DMXPLDCHKOVFL1)
+#define F_DMXPLDCHKOVFL1 V_DMXPLDCHKOVFL1(1U)
+
+#define S_DMXPLDCHKFIFO1 20
+#define V_DMXPLDCHKFIFO1(x) ((x) << S_DMXPLDCHKFIFO1)
+#define F_DMXPLDCHKFIFO1 V_DMXPLDCHKFIFO1(1U)
+
+#define S_DMXOPTFIFO1 19
+#define V_DMXOPTFIFO1(x) ((x) << S_DMXOPTFIFO1)
+#define F_DMXOPTFIFO1 V_DMXOPTFIFO1(1U)
+
+#define S_DMXMPAFIFO1 18
+#define V_DMXMPAFIFO1(x) ((x) << S_DMXMPAFIFO1)
+#define F_DMXMPAFIFO1 V_DMXMPAFIFO1(1U)
+
+#define S_DMXDBFIFO1 17
+#define V_DMXDBFIFO1(x) ((x) << S_DMXDBFIFO1)
+#define F_DMXDBFIFO1 V_DMXDBFIFO1(1U)
+
+#define S_DMXATTFIFO1 16
+#define V_DMXATTFIFO1(x) ((x) << S_DMXATTFIFO1)
+#define F_DMXATTFIFO1 V_DMXATTFIFO1(1U)
+
+#define S_DMXISSFIFO1 15
+#define V_DMXISSFIFO1(x) ((x) << S_DMXISSFIFO1)
+#define F_DMXISSFIFO1 V_DMXISSFIFO1(1U)
+
+#define S_DMXTCPFIFO1 14
+#define V_DMXTCPFIFO1(x) ((x) << S_DMXTCPFIFO1)
+#define F_DMXTCPFIFO1 V_DMXTCPFIFO1(1U)
+
+#define S_DMXERRFIFO1 13
+#define V_DMXERRFIFO1(x) ((x) << S_DMXERRFIFO1)
+#define F_DMXERRFIFO1 V_DMXERRFIFO1(1U)
+
+#define S_MPS2TPINTF1 12
+#define V_MPS2TPINTF1(x) ((x) << S_MPS2TPINTF1)
+#define F_MPS2TPINTF1 V_MPS2TPINTF1(1U)
+
+#define S_DMXPLDCHKOVFL0 9
+#define V_DMXPLDCHKOVFL0(x) ((x) << S_DMXPLDCHKOVFL0)
+#define F_DMXPLDCHKOVFL0 V_DMXPLDCHKOVFL0(1U)
+
+#define S_DMXPLDCHKFIFO0 8
+#define V_DMXPLDCHKFIFO0(x) ((x) << S_DMXPLDCHKFIFO0)
+#define F_DMXPLDCHKFIFO0 V_DMXPLDCHKFIFO0(1U)
+
+#define S_DMXOPTFIFO0 7
+#define V_DMXOPTFIFO0(x) ((x) << S_DMXOPTFIFO0)
+#define F_DMXOPTFIFO0 V_DMXOPTFIFO0(1U)
+
+#define S_DMXMPAFIFO0 6
+#define V_DMXMPAFIFO0(x) ((x) << S_DMXMPAFIFO0)
+#define F_DMXMPAFIFO0 V_DMXMPAFIFO0(1U)
+
+#define S_DMXDBFIFO0 5
+#define V_DMXDBFIFO0(x) ((x) << S_DMXDBFIFO0)
+#define F_DMXDBFIFO0 V_DMXDBFIFO0(1U)
+
+#define S_DMXATTFIFO0 4
+#define V_DMXATTFIFO0(x) ((x) << S_DMXATTFIFO0)
+#define F_DMXATTFIFO0 V_DMXATTFIFO0(1U)
+
+#define S_DMXISSFIFO0 3
+#define V_DMXISSFIFO0(x) ((x) << S_DMXISSFIFO0)
+#define F_DMXISSFIFO0 V_DMXISSFIFO0(1U)
+
+#define S_DMXTCPFIFO0 2
+#define V_DMXTCPFIFO0(x) ((x) << S_DMXTCPFIFO0)
+#define F_DMXTCPFIFO0 V_DMXTCPFIFO0(1U)
+
+#define S_DMXERRFIFO0 1
+#define V_DMXERRFIFO0(x) ((x) << S_DMXERRFIFO0)
+#define F_DMXERRFIFO0 V_DMXERRFIFO0(1U)
+
+#define S_MPS2TPINTF0 0
+#define V_MPS2TPINTF0(x) ((x) << S_MPS2TPINTF0)
+#define F_MPS2TPINTF0 V_MPS2TPINTF0(1U)
+
+#define A_TP_E_IN0_PERR_CAUSE 0x7f44
+#define A_TP_E_IN1_PERR_ENABLE 0x7f48
+
+#define S_DMXPLDCHKOVFL3 21
+#define V_DMXPLDCHKOVFL3(x) ((x) << S_DMXPLDCHKOVFL3)
+#define F_DMXPLDCHKOVFL3 V_DMXPLDCHKOVFL3(1U)
+
+#define S_DMXPLDCHKFIFO3 20
+#define V_DMXPLDCHKFIFO3(x) ((x) << S_DMXPLDCHKFIFO3)
+#define F_DMXPLDCHKFIFO3 V_DMXPLDCHKFIFO3(1U)
+
+#define S_DMXOPTFIFO3 19
+#define V_DMXOPTFIFO3(x) ((x) << S_DMXOPTFIFO3)
+#define F_DMXOPTFIFO3 V_DMXOPTFIFO3(1U)
+
+#define S_DMXMPAFIFO3 18
+#define V_DMXMPAFIFO3(x) ((x) << S_DMXMPAFIFO3)
+#define F_DMXMPAFIFO3 V_DMXMPAFIFO3(1U)
+
+#define S_DMXDBFIFO3 17
+#define V_DMXDBFIFO3(x) ((x) << S_DMXDBFIFO3)
+#define F_DMXDBFIFO3 V_DMXDBFIFO3(1U)
+
+#define S_DMXATTFIFO3 16
+#define V_DMXATTFIFO3(x) ((x) << S_DMXATTFIFO3)
+#define F_DMXATTFIFO3 V_DMXATTFIFO3(1U)
+
+#define S_DMXISSFIFO3 15
+#define V_DMXISSFIFO3(x) ((x) << S_DMXISSFIFO3)
+#define F_DMXISSFIFO3 V_DMXISSFIFO3(1U)
+
+#define S_DMXTCPFIFO3 14
+#define V_DMXTCPFIFO3(x) ((x) << S_DMXTCPFIFO3)
+#define F_DMXTCPFIFO3 V_DMXTCPFIFO3(1U)
+
+#define S_DMXERRFIFO3 13
+#define V_DMXERRFIFO3(x) ((x) << S_DMXERRFIFO3)
+#define F_DMXERRFIFO3 V_DMXERRFIFO3(1U)
+
+#define S_MPS2TPINTF3 12
+#define V_MPS2TPINTF3(x) ((x) << S_MPS2TPINTF3)
+#define F_MPS2TPINTF3 V_MPS2TPINTF3(1U)
+
+#define S_DMXPLDCHKOVFL2 9
+#define V_DMXPLDCHKOVFL2(x) ((x) << S_DMXPLDCHKOVFL2)
+#define F_DMXPLDCHKOVFL2 V_DMXPLDCHKOVFL2(1U)
+
+#define S_DMXPLDCHKFIFO2 8
+#define V_DMXPLDCHKFIFO2(x) ((x) << S_DMXPLDCHKFIFO2)
+#define F_DMXPLDCHKFIFO2 V_DMXPLDCHKFIFO2(1U)
+
+#define S_DMXOPTFIFO2 7
+#define V_DMXOPTFIFO2(x) ((x) << S_DMXOPTFIFO2)
+#define F_DMXOPTFIFO2 V_DMXOPTFIFO2(1U)
+
+#define S_DMXMPAFIFO2 6
+#define V_DMXMPAFIFO2(x) ((x) << S_DMXMPAFIFO2)
+#define F_DMXMPAFIFO2 V_DMXMPAFIFO2(1U)
+
+#define S_DMXDBFIFO2 5
+#define V_DMXDBFIFO2(x) ((x) << S_DMXDBFIFO2)
+#define F_DMXDBFIFO2 V_DMXDBFIFO2(1U)
+
+#define S_DMXATTFIFO2 4
+#define V_DMXATTFIFO2(x) ((x) << S_DMXATTFIFO2)
+#define F_DMXATTFIFO2 V_DMXATTFIFO2(1U)
+
+#define S_DMXISSFIFO2 3
+#define V_DMXISSFIFO2(x) ((x) << S_DMXISSFIFO2)
+#define F_DMXISSFIFO2 V_DMXISSFIFO2(1U)
+
+#define S_DMXTCPFIFO2 2
+#define V_DMXTCPFIFO2(x) ((x) << S_DMXTCPFIFO2)
+#define F_DMXTCPFIFO2 V_DMXTCPFIFO2(1U)
+
+#define S_DMXERRFIFO2 1
+#define V_DMXERRFIFO2(x) ((x) << S_DMXERRFIFO2)
+#define F_DMXERRFIFO2 V_DMXERRFIFO2(1U)
+
+#define S_MPS2TPINTF2 0
+#define V_MPS2TPINTF2(x) ((x) << S_MPS2TPINTF2)
+#define F_MPS2TPINTF2 V_MPS2TPINTF2(1U)
+
+#define A_TP_E_IN1_PERR_CAUSE 0x7f4c
+#define A_TP_O_PERR_ENABLE 0x7f50
+
+#define S_DMARBTPERR 31
+#define V_DMARBTPERR(x) ((x) << S_DMARBTPERR)
+#define F_DMARBTPERR V_DMARBTPERR(1U)
+
+#define S_MMGRCACHEDATASRAM 24
+#define V_MMGRCACHEDATASRAM(x) ((x) << S_MMGRCACHEDATASRAM)
+#define F_MMGRCACHEDATASRAM V_MMGRCACHEDATASRAM(1U)
+
+#define S_MMGRCACHETAGFIFO 23
+#define V_MMGRCACHETAGFIFO(x) ((x) << S_MMGRCACHETAGFIFO)
+#define F_MMGRCACHETAGFIFO V_MMGRCACHETAGFIFO(1U)
+
+#define S_TPPROTOSRAM 16
+#define V_TPPROTOSRAM(x) ((x) << S_TPPROTOSRAM)
+#define F_TPPROTOSRAM V_TPPROTOSRAM(1U)
+
+#define S_HSPSRAM 15
+#define V_HSPSRAM(x) ((x) << S_HSPSRAM)
+#define F_HSPSRAM V_HSPSRAM(1U)
+
+#define S_RATEGRPSRAM 14
+#define V_RATEGRPSRAM(x) ((x) << S_RATEGRPSRAM)
+#define F_RATEGRPSRAM V_RATEGRPSRAM(1U)
+
+#define S_TXFBSEQFIFO 13
+#define V_TXFBSEQFIFO(x) ((x) << S_TXFBSEQFIFO)
+#define F_TXFBSEQFIFO V_TXFBSEQFIFO(1U)
+
+#define S_CMDATASRAM 12
+#define V_CMDATASRAM(x) ((x) << S_CMDATASRAM)
+#define F_CMDATASRAM V_CMDATASRAM(1U)
+
+#define S_CMTAGFIFO 11
+#define V_CMTAGFIFO(x) ((x) << S_CMTAGFIFO)
+#define F_CMTAGFIFO V_CMTAGFIFO(1U)
+
+#define S_RFCOPFIFO 10
+#define V_RFCOPFIFO(x) ((x) << S_RFCOPFIFO)
+#define F_RFCOPFIFO V_RFCOPFIFO(1U)
+
+#define S_DELINVFIFO 9
+#define V_DELINVFIFO(x) ((x) << S_DELINVFIFO)
+#define F_DELINVFIFO V_DELINVFIFO(1U)
+
+#define S_RSSCFGSRAM 8
+#define V_RSSCFGSRAM(x) ((x) << S_RSSCFGSRAM)
+#define F_RSSCFGSRAM V_RSSCFGSRAM(1U)
+
+#define S_RSSKEYSRAM 7
+#define V_RSSKEYSRAM(x) ((x) << S_RSSKEYSRAM)
+#define F_RSSKEYSRAM V_RSSKEYSRAM(1U)
+
+#define S_RSSLKPSRAM 6
+#define V_RSSLKPSRAM(x) ((x) << S_RSSLKPSRAM)
+#define F_RSSLKPSRAM V_RSSLKPSRAM(1U)
+
+#define S_SRQSRAM 5
+#define V_SRQSRAM(x) ((x) << S_SRQSRAM)
+#define F_SRQSRAM V_SRQSRAM(1U)
+
+#define S_ARPDASRAM 4
+#define V_ARPDASRAM(x) ((x) << S_ARPDASRAM)
+#define F_ARPDASRAM V_ARPDASRAM(1U)
+
+#define S_ARPSASRAM 3
+#define V_ARPSASRAM(x) ((x) << S_ARPSASRAM)
+#define F_ARPSASRAM V_ARPSASRAM(1U)
+
+#define S_ARPGRESRAM 2
+#define V_ARPGRESRAM(x) ((x) << S_ARPGRESRAM)
+#define F_ARPGRESRAM V_ARPGRESRAM(1U)
+
+#define S_ARPIPSECSRAM1 1
+#define V_ARPIPSECSRAM1(x) ((x) << S_ARPIPSECSRAM1)
+#define F_ARPIPSECSRAM1 V_ARPIPSECSRAM1(1U)
+
+#define S_ARPIPSECSRAM0 0
+#define V_ARPIPSECSRAM0(x) ((x) << S_ARPIPSECSRAM0)
+#define F_ARPIPSECSRAM0 V_ARPIPSECSRAM0(1U)
+
+#define A_TP_O_PERR_CAUSE 0x7f54
+#define A_TP_CERR_ENABLE 0x7f58
+
+#define S_TPCEGDATAFIFO 8
+#define V_TPCEGDATAFIFO(x) ((x) << S_TPCEGDATAFIFO)
+#define F_TPCEGDATAFIFO V_TPCEGDATAFIFO(1U)
+
+#define S_TPCLBKDATAFIFO 7
+#define V_TPCLBKDATAFIFO(x) ((x) << S_TPCLBKDATAFIFO)
+#define F_TPCLBKDATAFIFO V_TPCLBKDATAFIFO(1U)
+
+#define A_TP_CERR_CAUSE 0x7f5c
#define A_TP_TX_MOD_Q7_Q6_TIMER_SEPARATOR 0x0
#define S_TXTIMERSEPQ7 16
@@ -24520,6 +30781,137 @@
#define A_TP_TX_MOD_C3_C2_RATE_LIMIT 0xa
#define A_TP_TX_MOD_C1_C0_RATE_LIMIT 0xb
+#define A_TP_RX_MOD_Q3_Q2_TIMER_SEPARATOR 0xc
+
+#define S_RXTIMERSEPQ3 16
+#define M_RXTIMERSEPQ3 0xffffU
+#define V_RXTIMERSEPQ3(x) ((x) << S_RXTIMERSEPQ3)
+#define G_RXTIMERSEPQ3(x) (((x) >> S_RXTIMERSEPQ3) & M_RXTIMERSEPQ3)
+
+#define S_RXTIMERSEPQ2 0
+#define M_RXTIMERSEPQ2 0xffffU
+#define V_RXTIMERSEPQ2(x) ((x) << S_RXTIMERSEPQ2)
+#define G_RXTIMERSEPQ2(x) (((x) >> S_RXTIMERSEPQ2) & M_RXTIMERSEPQ2)
+
+#define A_TP_RX_MOD_Q3_Q2_RATE_LIMIT 0xd
+
+#define S_RXRATEINCQ3 24
+#define M_RXRATEINCQ3 0xffU
+#define V_RXRATEINCQ3(x) ((x) << S_RXRATEINCQ3)
+#define G_RXRATEINCQ3(x) (((x) >> S_RXRATEINCQ3) & M_RXRATEINCQ3)
+
+#define S_RXRATETCKQ3 16
+#define M_RXRATETCKQ3 0xffU
+#define V_RXRATETCKQ3(x) ((x) << S_RXRATETCKQ3)
+#define G_RXRATETCKQ3(x) (((x) >> S_RXRATETCKQ3) & M_RXRATETCKQ3)
+
+#define S_RXRATEINCQ2 8
+#define M_RXRATEINCQ2 0xffU
+#define V_RXRATEINCQ2(x) ((x) << S_RXRATEINCQ2)
+#define G_RXRATEINCQ2(x) (((x) >> S_RXRATEINCQ2) & M_RXRATEINCQ2)
+
+#define S_RXRATETCKQ2 0
+#define M_RXRATETCKQ2 0xffU
+#define V_RXRATETCKQ2(x) ((x) << S_RXRATETCKQ2)
+#define G_RXRATETCKQ2(x) (((x) >> S_RXRATETCKQ2) & M_RXRATETCKQ2)
+
+#define A_TP_RX_LPBK_CONG 0x1c
+#define A_TP_RX_SCHED_MOD 0x1d
+
+#define S_T7_ENABLELPBKFULL1 28
+#define M_T7_ENABLELPBKFULL1 0xfU
+#define V_T7_ENABLELPBKFULL1(x) ((x) << S_T7_ENABLELPBKFULL1)
+#define G_T7_ENABLELPBKFULL1(x) (((x) >> S_T7_ENABLELPBKFULL1) & M_T7_ENABLELPBKFULL1)
+
+#define S_T7_ENABLEFIFOFULL1 24
+#define M_T7_ENABLEFIFOFULL1 0xfU
+#define V_T7_ENABLEFIFOFULL1(x) ((x) << S_T7_ENABLEFIFOFULL1)
+#define G_T7_ENABLEFIFOFULL1(x) (((x) >> S_T7_ENABLEFIFOFULL1) & M_T7_ENABLEFIFOFULL1)
+
+#define S_T7_ENABLEPCMDFULL1 20
+#define M_T7_ENABLEPCMDFULL1 0xfU
+#define V_T7_ENABLEPCMDFULL1(x) ((x) << S_T7_ENABLEPCMDFULL1)
+#define G_T7_ENABLEPCMDFULL1(x) (((x) >> S_T7_ENABLEPCMDFULL1) & M_T7_ENABLEPCMDFULL1)
+
+#define S_T7_ENABLEHDRFULL1 16
+#define M_T7_ENABLEHDRFULL1 0xfU
+#define V_T7_ENABLEHDRFULL1(x) ((x) << S_T7_ENABLEHDRFULL1)
+#define G_T7_ENABLEHDRFULL1(x) (((x) >> S_T7_ENABLEHDRFULL1) & M_T7_ENABLEHDRFULL1)
+
+#define S_T7_ENABLELPBKFULL0 12
+#define M_T7_ENABLELPBKFULL0 0xfU
+#define V_T7_ENABLELPBKFULL0(x) ((x) << S_T7_ENABLELPBKFULL0)
+#define G_T7_ENABLELPBKFULL0(x) (((x) >> S_T7_ENABLELPBKFULL0) & M_T7_ENABLELPBKFULL0)
+
+#define S_T7_ENABLEFIFOFULL0 8
+#define M_T7_ENABLEFIFOFULL0 0xfU
+#define V_T7_ENABLEFIFOFULL0(x) ((x) << S_T7_ENABLEFIFOFULL0)
+#define G_T7_ENABLEFIFOFULL0(x) (((x) >> S_T7_ENABLEFIFOFULL0) & M_T7_ENABLEFIFOFULL0)
+
+#define S_T7_ENABLEPCMDFULL0 4
+#define M_T7_ENABLEPCMDFULL0 0xfU
+#define V_T7_ENABLEPCMDFULL0(x) ((x) << S_T7_ENABLEPCMDFULL0)
+#define G_T7_ENABLEPCMDFULL0(x) (((x) >> S_T7_ENABLEPCMDFULL0) & M_T7_ENABLEPCMDFULL0)
+
+#define S_T7_ENABLEHDRFULL0 0
+#define M_T7_ENABLEHDRFULL0 0xfU
+#define V_T7_ENABLEHDRFULL0(x) ((x) << S_T7_ENABLEHDRFULL0)
+#define G_T7_ENABLEHDRFULL0(x) (((x) >> S_T7_ENABLEHDRFULL0) & M_T7_ENABLEHDRFULL0)
+
+#define A_TP_RX_SCHED_MOD_CH3_CH2 0x1e
+
+#define S_ENABLELPBKFULL3 28
+#define M_ENABLELPBKFULL3 0xfU
+#define V_ENABLELPBKFULL3(x) ((x) << S_ENABLELPBKFULL3)
+#define G_ENABLELPBKFULL3(x) (((x) >> S_ENABLELPBKFULL3) & M_ENABLELPBKFULL3)
+
+#define S_ENABLEFIFOFULL3 24
+#define M_ENABLEFIFOFULL3 0xfU
+#define V_ENABLEFIFOFULL3(x) ((x) << S_ENABLEFIFOFULL3)
+#define G_ENABLEFIFOFULL3(x) (((x) >> S_ENABLEFIFOFULL3) & M_ENABLEFIFOFULL3)
+
+#define S_ENABLEPCMDFULL3 20
+#define M_ENABLEPCMDFULL3 0xfU
+#define V_ENABLEPCMDFULL3(x) ((x) << S_ENABLEPCMDFULL3)
+#define G_ENABLEPCMDFULL3(x) (((x) >> S_ENABLEPCMDFULL3) & M_ENABLEPCMDFULL3)
+
+#define S_ENABLEHDRFULL3 16
+#define M_ENABLEHDRFULL3 0xfU
+#define V_ENABLEHDRFULL3(x) ((x) << S_ENABLEHDRFULL3)
+#define G_ENABLEHDRFULL3(x) (((x) >> S_ENABLEHDRFULL3) & M_ENABLEHDRFULL3)
+
+#define S_ENABLELPBKFULL2 12
+#define M_ENABLELPBKFULL2 0xfU
+#define V_ENABLELPBKFULL2(x) ((x) << S_ENABLELPBKFULL2)
+#define G_ENABLELPBKFULL2(x) (((x) >> S_ENABLELPBKFULL2) & M_ENABLELPBKFULL2)
+
+#define S_ENABLEFIFOFULL2 8
+#define M_ENABLEFIFOFULL2 0xfU
+#define V_ENABLEFIFOFULL2(x) ((x) << S_ENABLEFIFOFULL2)
+#define G_ENABLEFIFOFULL2(x) (((x) >> S_ENABLEFIFOFULL2) & M_ENABLEFIFOFULL2)
+
+#define S_ENABLEPCMDFULL2 4
+#define M_ENABLEPCMDFULL2 0xfU
+#define V_ENABLEPCMDFULL2(x) ((x) << S_ENABLEPCMDFULL2)
+#define G_ENABLEPCMDFULL2(x) (((x) >> S_ENABLEPCMDFULL2) & M_ENABLEPCMDFULL2)
+
+#define S_ENABLEHDRFULL2 0
+#define M_ENABLEHDRFULL2 0xfU
+#define V_ENABLEHDRFULL2(x) ((x) << S_ENABLEHDRFULL2)
+#define G_ENABLEHDRFULL2(x) (((x) >> S_ENABLEHDRFULL2) & M_ENABLEHDRFULL2)
+
+#define A_TP_RX_SCHED_MAP_CH3_CH2 0x1f
+
+#define S_T7_RXMAPCHANNEL3 16
+#define M_T7_RXMAPCHANNEL3 0xffffU
+#define V_T7_RXMAPCHANNEL3(x) ((x) << S_T7_RXMAPCHANNEL3)
+#define G_T7_RXMAPCHANNEL3(x) (((x) >> S_T7_RXMAPCHANNEL3) & M_T7_RXMAPCHANNEL3)
+
+#define S_T7_RXMAPCHANNEL2 0
+#define M_T7_RXMAPCHANNEL2 0xffffU
+#define V_T7_RXMAPCHANNEL2(x) ((x) << S_T7_RXMAPCHANNEL2)
+#define G_T7_RXMAPCHANNEL2(x) (((x) >> S_T7_RXMAPCHANNEL2) & M_T7_RXMAPCHANNEL2)
+
#define A_TP_RX_SCHED_MAP 0x20
#define S_RXMAPCHANNEL3 24
@@ -24542,6 +30934,16 @@
#define V_RXMAPCHANNEL0(x) ((x) << S_RXMAPCHANNEL0)
#define G_RXMAPCHANNEL0(x) (((x) >> S_RXMAPCHANNEL0) & M_RXMAPCHANNEL0)
+#define S_T7_RXMAPCHANNEL1 16
+#define M_T7_RXMAPCHANNEL1 0xffffU
+#define V_T7_RXMAPCHANNEL1(x) ((x) << S_T7_RXMAPCHANNEL1)
+#define G_T7_RXMAPCHANNEL1(x) (((x) >> S_T7_RXMAPCHANNEL1) & M_T7_RXMAPCHANNEL1)
+
+#define S_T7_RXMAPCHANNEL0 0
+#define M_T7_RXMAPCHANNEL0 0xffffU
+#define V_T7_RXMAPCHANNEL0(x) ((x) << S_T7_RXMAPCHANNEL0)
+#define G_T7_RXMAPCHANNEL0(x) (((x) >> S_T7_RXMAPCHANNEL0) & M_T7_RXMAPCHANNEL0)
+
#define A_TP_RX_SCHED_SGE 0x21
#define S_RXSGEMOD1 12
@@ -24570,6 +30972,16 @@
#define V_RXSGECHANNEL0(x) ((x) << S_RXSGECHANNEL0)
#define F_RXSGECHANNEL0 V_RXSGECHANNEL0(1U)
+#define S_RXSGEMOD3 20
+#define M_RXSGEMOD3 0xfU
+#define V_RXSGEMOD3(x) ((x) << S_RXSGEMOD3)
+#define G_RXSGEMOD3(x) (((x) >> S_RXSGEMOD3) & M_RXSGEMOD3)
+
+#define S_RXSGEMOD2 16
+#define M_RXSGEMOD2 0xfU
+#define V_RXSGEMOD2(x) ((x) << S_RXSGEMOD2)
+#define G_RXSGEMOD2(x) (((x) >> S_RXSGEMOD2) & M_RXSGEMOD2)
+
#define A_TP_TX_SCHED_MAP 0x22
#define S_TXMAPCHANNEL3 12
@@ -24600,6 +31012,14 @@
#define V_TXLPKCHANNEL0(x) ((x) << S_TXLPKCHANNEL0)
#define F_TXLPKCHANNEL0 V_TXLPKCHANNEL0(1U)
+#define S_TXLPKCHANNEL3 19
+#define V_TXLPKCHANNEL3(x) ((x) << S_TXLPKCHANNEL3)
+#define F_TXLPKCHANNEL3 V_TXLPKCHANNEL3(1U)
+
+#define S_TXLPKCHANNEL2 18
+#define V_TXLPKCHANNEL2(x) ((x) << S_TXLPKCHANNEL2)
+#define F_TXLPKCHANNEL2 V_TXLPKCHANNEL2(1U)
+
#define A_TP_TX_SCHED_HDR 0x23
#define S_TXMAPHDRCHANNEL7 28
@@ -24827,6 +31247,69 @@
#define V_RXMAPE2CCHANNEL0(x) ((x) << S_RXMAPE2CCHANNEL0)
#define F_RXMAPE2CCHANNEL0 V_RXMAPE2CCHANNEL0(1U)
+#define S_T7_LB_MODE 30
+#define M_T7_LB_MODE 0x3U
+#define V_T7_LB_MODE(x) ((x) << S_T7_LB_MODE)
+#define G_T7_LB_MODE(x) (((x) >> S_T7_LB_MODE) & M_T7_LB_MODE)
+
+#define S_ING_LB_MODE 28
+#define M_ING_LB_MODE 0x3U
+#define V_ING_LB_MODE(x) ((x) << S_ING_LB_MODE)
+#define G_ING_LB_MODE(x) (((x) >> S_ING_LB_MODE) & M_ING_LB_MODE)
+
+#define S_RXC_LB_MODE 26
+#define M_RXC_LB_MODE 0x3U
+#define V_RXC_LB_MODE(x) ((x) << S_RXC_LB_MODE)
+#define G_RXC_LB_MODE(x) (((x) >> S_RXC_LB_MODE) & M_RXC_LB_MODE)
+
+#define S_SINGLERXCHANNEL 25
+#define V_SINGLERXCHANNEL(x) ((x) << S_SINGLERXCHANNEL)
+#define F_SINGLERXCHANNEL V_SINGLERXCHANNEL(1U)
+
+#define S_RXCHANNELCHECK 24
+#define V_RXCHANNELCHECK(x) ((x) << S_RXCHANNELCHECK)
+#define F_RXCHANNELCHECK V_RXCHANNELCHECK(1U)
+
+#define S_T7_RXMAPC2CCHANNEL3 21
+#define M_T7_RXMAPC2CCHANNEL3 0x7U
+#define V_T7_RXMAPC2CCHANNEL3(x) ((x) << S_T7_RXMAPC2CCHANNEL3)
+#define G_T7_RXMAPC2CCHANNEL3(x) (((x) >> S_T7_RXMAPC2CCHANNEL3) & M_T7_RXMAPC2CCHANNEL3)
+
+#define S_T7_RXMAPC2CCHANNEL2 18
+#define M_T7_RXMAPC2CCHANNEL2 0x7U
+#define V_T7_RXMAPC2CCHANNEL2(x) ((x) << S_T7_RXMAPC2CCHANNEL2)
+#define G_T7_RXMAPC2CCHANNEL2(x) (((x) >> S_T7_RXMAPC2CCHANNEL2) & M_T7_RXMAPC2CCHANNEL2)
+
+#define S_T7_RXMAPC2CCHANNEL1 15
+#define M_T7_RXMAPC2CCHANNEL1 0x7U
+#define V_T7_RXMAPC2CCHANNEL1(x) ((x) << S_T7_RXMAPC2CCHANNEL1)
+#define G_T7_RXMAPC2CCHANNEL1(x) (((x) >> S_T7_RXMAPC2CCHANNEL1) & M_T7_RXMAPC2CCHANNEL1)
+
+#define S_T7_RXMAPC2CCHANNEL0 12
+#define M_T7_RXMAPC2CCHANNEL0 0x7U
+#define V_T7_RXMAPC2CCHANNEL0(x) ((x) << S_T7_RXMAPC2CCHANNEL0)
+#define G_T7_RXMAPC2CCHANNEL0(x) (((x) >> S_T7_RXMAPC2CCHANNEL0) & M_T7_RXMAPC2CCHANNEL0)
+
+#define S_T7_RXMAPE2CCHANNEL3 9
+#define M_T7_RXMAPE2CCHANNEL3 0x7U
+#define V_T7_RXMAPE2CCHANNEL3(x) ((x) << S_T7_RXMAPE2CCHANNEL3)
+#define G_T7_RXMAPE2CCHANNEL3(x) (((x) >> S_T7_RXMAPE2CCHANNEL3) & M_T7_RXMAPE2CCHANNEL3)
+
+#define S_T7_RXMAPE2CCHANNEL2 6
+#define M_T7_RXMAPE2CCHANNEL2 0x7U
+#define V_T7_RXMAPE2CCHANNEL2(x) ((x) << S_T7_RXMAPE2CCHANNEL2)
+#define G_T7_RXMAPE2CCHANNEL2(x) (((x) >> S_T7_RXMAPE2CCHANNEL2) & M_T7_RXMAPE2CCHANNEL2)
+
+#define S_T7_RXMAPE2CCHANNEL1 3
+#define M_T7_RXMAPE2CCHANNEL1 0x7U
+#define V_T7_RXMAPE2CCHANNEL1(x) ((x) << S_T7_RXMAPE2CCHANNEL1)
+#define G_T7_RXMAPE2CCHANNEL1(x) (((x) >> S_T7_RXMAPE2CCHANNEL1) & M_T7_RXMAPE2CCHANNEL1)
+
+#define S_T7_RXMAPE2CCHANNEL0 0
+#define M_T7_RXMAPE2CCHANNEL0 0x7U
+#define V_T7_RXMAPE2CCHANNEL0(x) ((x) << S_T7_RXMAPE2CCHANNEL0)
+#define G_T7_RXMAPE2CCHANNEL0(x) (((x) >> S_T7_RXMAPE2CCHANNEL0) & M_T7_RXMAPE2CCHANNEL0)
+
#define A_TP_RX_LPBK 0x28
#define A_TP_TX_LPBK 0x29
#define A_TP_TX_SCHED_PPP 0x2a
@@ -24873,6 +31356,55 @@
#define V_COMMITLIMIT0L(x) ((x) << S_COMMITLIMIT0L)
#define G_COMMITLIMIT0L(x) (((x) >> S_COMMITLIMIT0L) & M_COMMITLIMIT0L)
+#define A_TP_RX_SCHED_FIFO_CH3_CH2 0x2c
+
+#define S_COMMITLIMIT3H 24
+#define M_COMMITLIMIT3H 0xffU
+#define V_COMMITLIMIT3H(x) ((x) << S_COMMITLIMIT3H)
+#define G_COMMITLIMIT3H(x) (((x) >> S_COMMITLIMIT3H) & M_COMMITLIMIT3H)
+
+#define S_COMMITLIMIT3L 16
+#define M_COMMITLIMIT3L 0xffU
+#define V_COMMITLIMIT3L(x) ((x) << S_COMMITLIMIT3L)
+#define G_COMMITLIMIT3L(x) (((x) >> S_COMMITLIMIT3L) & M_COMMITLIMIT3L)
+
+#define S_COMMITLIMIT2H 8
+#define M_COMMITLIMIT2H 0xffU
+#define V_COMMITLIMIT2H(x) ((x) << S_COMMITLIMIT2H)
+#define G_COMMITLIMIT2H(x) (((x) >> S_COMMITLIMIT2H) & M_COMMITLIMIT2H)
+
+#define S_COMMITLIMIT2L 0
+#define M_COMMITLIMIT2L 0xffU
+#define V_COMMITLIMIT2L(x) ((x) << S_COMMITLIMIT2L)
+#define G_COMMITLIMIT2L(x) (((x) >> S_COMMITLIMIT2L) & M_COMMITLIMIT2L)
+
+#define A_TP_CHANNEL_MAP_LPBK 0x2d
+
+#define S_T7_RXMAPCHANNELELN 12
+#define M_T7_RXMAPCHANNELELN 0xfU
+#define V_T7_RXMAPCHANNELELN(x) ((x) << S_T7_RXMAPCHANNELELN)
+#define G_T7_RXMAPCHANNELELN(x) (((x) >> S_T7_RXMAPCHANNELELN) & M_T7_RXMAPCHANNELELN)
+
+#define S_T7_RXMAPE2LCHANNEL3 9
+#define M_T7_RXMAPE2LCHANNEL3 0x7U
+#define V_T7_RXMAPE2LCHANNEL3(x) ((x) << S_T7_RXMAPE2LCHANNEL3)
+#define G_T7_RXMAPE2LCHANNEL3(x) (((x) >> S_T7_RXMAPE2LCHANNEL3) & M_T7_RXMAPE2LCHANNEL3)
+
+#define S_T7_RXMAPE2LCHANNEL2 6
+#define M_T7_RXMAPE2LCHANNEL2 0x7U
+#define V_T7_RXMAPE2LCHANNEL2(x) ((x) << S_T7_RXMAPE2LCHANNEL2)
+#define G_T7_RXMAPE2LCHANNEL2(x) (((x) >> S_T7_RXMAPE2LCHANNEL2) & M_T7_RXMAPE2LCHANNEL2)
+
+#define S_T7_RXMAPE2LCHANNEL1 3
+#define M_T7_RXMAPE2LCHANNEL1 0x7U
+#define V_T7_RXMAPE2LCHANNEL1(x) ((x) << S_T7_RXMAPE2LCHANNEL1)
+#define G_T7_RXMAPE2LCHANNEL1(x) (((x) >> S_T7_RXMAPE2LCHANNEL1) & M_T7_RXMAPE2LCHANNEL1)
+
+#define S_T7_RXMAPE2LCHANNEL0 0
+#define M_T7_RXMAPE2LCHANNEL0 0x7U
+#define V_T7_RXMAPE2LCHANNEL0(x) ((x) << S_T7_RXMAPE2LCHANNEL0)
+#define G_T7_RXMAPE2LCHANNEL0(x) (((x) >> S_T7_RXMAPE2LCHANNEL0) & M_T7_RXMAPE2LCHANNEL0)
+
#define A_TP_IPMI_CFG1 0x2e
#define S_VLANENABLE 31
@@ -24966,47 +31498,12 @@
#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
#define A_TP_RSS_PF1_CONFIG 0x31
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF2_CONFIG 0x32
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF3_CONFIG 0x33
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF4_CONFIG 0x34
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF5_CONFIG 0x35
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF6_CONFIG 0x36
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF7_CONFIG 0x37
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF_MAP 0x38
#define S_LKPIDXSIZE 24
@@ -25097,6 +31594,22 @@
#define G_PF0MSKSIZE(x) (((x) >> S_PF0MSKSIZE) & M_PF0MSKSIZE)
#define A_TP_RSS_VFL_CONFIG 0x3a
+
+#define S_BASEQID 16
+#define M_BASEQID 0xfffU
+#define V_BASEQID(x) ((x) << S_BASEQID)
+#define G_BASEQID(x) (((x) >> S_BASEQID) & M_BASEQID)
+
+#define S_MAXRRQID 8
+#define M_MAXRRQID 0xffU
+#define V_MAXRRQID(x) ((x) << S_MAXRRQID)
+#define G_MAXRRQID(x) (((x) >> S_MAXRRQID) & M_MAXRRQID)
+
+#define S_RRCOUNTER 0
+#define M_RRCOUNTER 0xffU
+#define V_RRCOUNTER(x) ((x) << S_RRCOUNTER)
+#define G_RRCOUNTER(x) (((x) >> S_RRCOUNTER) & M_RRCOUNTER)
+
#define A_TP_RSS_VFH_CONFIG 0x3b
#define S_ENABLEUDPHASH 31
@@ -25150,6 +31663,10 @@
#define V_KEYINDEX(x) ((x) << S_KEYINDEX)
#define G_KEYINDEX(x) (((x) >> S_KEYINDEX) & M_KEYINDEX)
+#define S_ROUNDROBINEN 3
+#define V_ROUNDROBINEN(x) ((x) << S_ROUNDROBINEN)
+#define F_ROUNDROBINEN V_ROUNDROBINEN(1U)
+
#define A_TP_RSS_SECRET_KEY0 0x40
#define A_TP_RSS_SECRET_KEY1 0x41
#define A_TP_RSS_SECRET_KEY2 0x42
@@ -25283,6 +31800,36 @@
#define V_SHAREDXRC(x) ((x) << S_SHAREDXRC)
#define F_SHAREDXRC V_SHAREDXRC(1U)
+#define S_VERIFYRSPOP 25
+#define M_VERIFYRSPOP 0x1fU
+#define V_VERIFYRSPOP(x) ((x) << S_VERIFYRSPOP)
+#define G_VERIFYRSPOP(x) (((x) >> S_VERIFYRSPOP) & M_VERIFYRSPOP)
+
+#define S_VERIFYREQOP 20
+#define M_VERIFYREQOP 0x1fU
+#define V_VERIFYREQOP(x) ((x) << S_VERIFYREQOP)
+#define G_VERIFYREQOP(x) (((x) >> S_VERIFYREQOP) & M_VERIFYREQOP)
+
+#define S_AWRITERSPOP 15
+#define M_AWRITERSPOP 0x1fU
+#define V_AWRITERSPOP(x) ((x) << S_AWRITERSPOP)
+#define G_AWRITERSPOP(x) (((x) >> S_AWRITERSPOP) & M_AWRITERSPOP)
+
+#define S_AWRITEREQOP 10
+#define M_AWRITEREQOP 0x1fU
+#define V_AWRITEREQOP(x) ((x) << S_AWRITEREQOP)
+#define G_AWRITEREQOP(x) (((x) >> S_AWRITEREQOP) & M_AWRITEREQOP)
+
+#define S_FLUSHRSPOP 5
+#define M_FLUSHRSPOP 0x1fU
+#define V_FLUSHRSPOP(x) ((x) << S_FLUSHRSPOP)
+#define G_FLUSHRSPOP(x) (((x) >> S_FLUSHRSPOP) & M_FLUSHRSPOP)
+
+#define S_FLUSHREQOP 0
+#define M_FLUSHREQOP 0x1fU
+#define V_FLUSHREQOP(x) ((x) << S_FLUSHREQOP)
+#define G_FLUSHREQOP(x) (((x) >> S_FLUSHREQOP) & M_FLUSHREQOP)
+
#define A_TP_FRAG_CONFIG 0x56
#define S_TLSMODE 16
@@ -25330,6 +31877,21 @@
#define V_PASSMODE(x) ((x) << S_PASSMODE)
#define G_PASSMODE(x) (((x) >> S_PASSMODE) & M_PASSMODE)
+#define S_NVMTMODE 22
+#define M_NVMTMODE 0x3U
+#define V_NVMTMODE(x) ((x) << S_NVMTMODE)
+#define G_NVMTMODE(x) (((x) >> S_NVMTMODE) & M_NVMTMODE)
+
+#define S_ROCEMODE 20
+#define M_ROCEMODE 0x3U
+#define V_ROCEMODE(x) ((x) << S_ROCEMODE)
+#define G_ROCEMODE(x) (((x) >> S_ROCEMODE) & M_ROCEMODE)
+
+#define S_DTLSMODE 18
+#define M_DTLSMODE 0x3U
+#define V_DTLSMODE(x) ((x) << S_DTLSMODE)
+#define G_DTLSMODE(x) (((x) >> S_DTLSMODE) & M_DTLSMODE)
+
#define A_TP_CMM_CONFIG 0x57
#define S_WRCNTIDLE 16
@@ -25383,6 +31945,7 @@
#define V_GRETYPE(x) ((x) << S_GRETYPE)
#define G_GRETYPE(x) (((x) >> S_GRETYPE) & M_GRETYPE)
+#define A_TP_MMGR_CMM_CONFIG 0x5a
#define A_TP_DBG_CLEAR 0x60
#define A_TP_DBG_CORE_HDR0 0x61
@@ -25843,14 +32406,6 @@
#define V_T5_EPCMDBUSY(x) ((x) << S_T5_EPCMDBUSY)
#define F_T5_EPCMDBUSY V_T5_EPCMDBUSY(1U)
-#define S_T6_ETXBUSY 1
-#define V_T6_ETXBUSY(x) ((x) << S_T6_ETXBUSY)
-#define F_T6_ETXBUSY V_T6_ETXBUSY(1U)
-
-#define S_T6_EPCMDBUSY 0
-#define V_T6_EPCMDBUSY(x) ((x) << S_T6_EPCMDBUSY)
-#define F_T6_EPCMDBUSY V_T6_EPCMDBUSY(1U)
-
#define A_TP_DBG_ENG_RES1 0x67
#define S_RXCPLSRDY 31
@@ -26114,16 +32669,6 @@
#define V_T5_RXPCMDCNG(x) ((x) << S_T5_RXPCMDCNG)
#define G_T5_RXPCMDCNG(x) (((x) >> S_T5_RXPCMDCNG) & M_T5_RXPCMDCNG)
-#define S_T6_RXFIFOCNG 20
-#define M_T6_RXFIFOCNG 0xfU
-#define V_T6_RXFIFOCNG(x) ((x) << S_T6_RXFIFOCNG)
-#define G_T6_RXFIFOCNG(x) (((x) >> S_T6_RXFIFOCNG) & M_T6_RXFIFOCNG)
-
-#define S_T6_RXPCMDCNG 14
-#define M_T6_RXPCMDCNG 0x3U
-#define V_T6_RXPCMDCNG(x) ((x) << S_T6_RXPCMDCNG)
-#define G_T6_RXPCMDCNG(x) (((x) >> S_T6_RXPCMDCNG) & M_T6_RXPCMDCNG)
-
#define A_TP_DBG_ERROR_CNT 0x6c
#define A_TP_DBG_CORE_CPL 0x6d
@@ -26191,6 +32736,244 @@
#define A_TP_DBG_CACHE_RD_HIT 0x73
#define A_TP_DBG_CACHE_MC_REQ 0x74
#define A_TP_DBG_CACHE_MC_RSP 0x75
+#define A_TP_RSS_PF0_CONFIG_CH3_CH2 0x80
+
+#define S_PFMAPALWAYS 22
+#define V_PFMAPALWAYS(x) ((x) << S_PFMAPALWAYS)
+#define F_PFMAPALWAYS V_PFMAPALWAYS(1U)
+
+#define S_PFROUNDROBINEN 21
+#define V_PFROUNDROBINEN(x) ((x) << S_PFROUNDROBINEN)
+#define F_PFROUNDROBINEN V_PFROUNDROBINEN(1U)
+
+#define S_FOURCHNEN 20
+#define V_FOURCHNEN(x) ((x) << S_FOURCHNEN)
+#define F_FOURCHNEN V_FOURCHNEN(1U)
+
+#define S_CH3DEFAULTQUEUE 10
+#define M_CH3DEFAULTQUEUE 0x3ffU
+#define V_CH3DEFAULTQUEUE(x) ((x) << S_CH3DEFAULTQUEUE)
+#define G_CH3DEFAULTQUEUE(x) (((x) >> S_CH3DEFAULTQUEUE) & M_CH3DEFAULTQUEUE)
+
+#define S_CH2DEFAULTQUEUE 0
+#define M_CH2DEFAULTQUEUE 0x3ffU
+#define V_CH2DEFAULTQUEUE(x) ((x) << S_CH2DEFAULTQUEUE)
+#define G_CH2DEFAULTQUEUE(x) (((x) >> S_CH2DEFAULTQUEUE) & M_CH2DEFAULTQUEUE)
+
+#define A_TP_RSS_PF1_CONFIG_CH3_CH2 0x81
+#define A_TP_RSS_PF2_CONFIG_CH3_CH2 0x82
+#define A_TP_RSS_PF3_CONFIG_CH3_CH2 0x83
+#define A_TP_RSS_PF4_CONFIG_CH3_CH2 0x84
+#define A_TP_RSS_PF5_CONFIG_CH3_CH2 0x85
+#define A_TP_RSS_PF6_CONFIG_CH3_CH2 0x86
+#define A_TP_RSS_PF7_CONFIG_CH3_CH2 0x87
+#define A_TP_RSS_PF0_EXT_CONFIG 0x88
+#define A_TP_RSS_PF1_EXT_CONFIG 0x89
+#define A_TP_RSS_PF2_EXT_CONFIG 0x8a
+#define A_TP_RSS_PF3_EXT_CONFIG 0x8b
+#define A_TP_RSS_PF4_EXT_CONFIG 0x8c
+#define A_TP_RSS_PF5_EXT_CONFIG 0x8d
+#define A_TP_RSS_PF6_EXT_CONFIG 0x8e
+#define A_TP_RSS_PF7_EXT_CONFIG 0x8f
+#define A_TP_ROCE_CONFIG 0x90
+
+#define S_IGNAETHMSB 24
+#define V_IGNAETHMSB(x) ((x) << S_IGNAETHMSB)
+#define F_IGNAETHMSB V_IGNAETHMSB(1U)
+
+#define S_XDIDMMCTL 23
+#define V_XDIDMMCTL(x) ((x) << S_XDIDMMCTL)
+#define F_XDIDMMCTL V_XDIDMMCTL(1U)
+
+#define S_WRRETHDBGFWDEN 22
+#define V_WRRETHDBGFWDEN(x) ((x) << S_WRRETHDBGFWDEN)
+#define F_WRRETHDBGFWDEN V_WRRETHDBGFWDEN(1U)
+
+#define S_ACKINTGENCTRL 20
+#define M_ACKINTGENCTRL 0x3U
+#define V_ACKINTGENCTRL(x) ((x) << S_ACKINTGENCTRL)
+#define G_ACKINTGENCTRL(x) (((x) >> S_ACKINTGENCTRL) & M_ACKINTGENCTRL)
+
+#define S_ATOMICALIGNCHKEN 19
+#define V_ATOMICALIGNCHKEN(x) ((x) << S_ATOMICALIGNCHKEN)
+#define F_ATOMICALIGNCHKEN V_ATOMICALIGNCHKEN(1U)
+
+#define S_RDRETHLENCHKEN 18
+#define V_RDRETHLENCHKEN(x) ((x) << S_RDRETHLENCHKEN)
+#define F_RDRETHLENCHKEN V_RDRETHLENCHKEN(1U)
+
+#define S_WRTOTALLENCHKEN 17
+#define V_WRTOTALLENCHKEN(x) ((x) << S_WRTOTALLENCHKEN)
+#define F_WRTOTALLENCHKEN V_WRTOTALLENCHKEN(1U)
+
+#define S_WRRETHLENCHKEN 16
+#define V_WRRETHLENCHKEN(x) ((x) << S_WRRETHLENCHKEN)
+#define F_WRRETHLENCHKEN V_WRRETHLENCHKEN(1U)
+
+#define S_TNLERRORUDPLEN 11
+#define V_TNLERRORUDPLEN(x) ((x) << S_TNLERRORUDPLEN)
+#define F_TNLERRORUDPLEN V_TNLERRORUDPLEN(1U)
+
+#define S_TNLERRORPKEY 10
+#define V_TNLERRORPKEY(x) ((x) << S_TNLERRORPKEY)
+#define F_TNLERRORPKEY V_TNLERRORPKEY(1U)
+
+#define S_TNLERROROPCODE 9
+#define V_TNLERROROPCODE(x) ((x) << S_TNLERROROPCODE)
+#define F_TNLERROROPCODE V_TNLERROROPCODE(1U)
+
+#define S_TNLERRORTVER 8
+#define V_TNLERRORTVER(x) ((x) << S_TNLERRORTVER)
+#define F_TNLERRORTVER V_TNLERRORTVER(1U)
+
+#define S_DROPERRORUDPLEN 3
+#define V_DROPERRORUDPLEN(x) ((x) << S_DROPERRORUDPLEN)
+#define F_DROPERRORUDPLEN V_DROPERRORUDPLEN(1U)
+
+#define S_DROPERRORPKEY 2
+#define V_DROPERRORPKEY(x) ((x) << S_DROPERRORPKEY)
+#define F_DROPERRORPKEY V_DROPERRORPKEY(1U)
+
+#define S_DROPERROROPCODE 1
+#define V_DROPERROROPCODE(x) ((x) << S_DROPERROROPCODE)
+#define F_DROPERROROPCODE V_DROPERROROPCODE(1U)
+
+#define S_DROPERRORTVER 0
+#define V_DROPERRORTVER(x) ((x) << S_DROPERRORTVER)
+#define F_DROPERRORTVER V_DROPERRORTVER(1U)
+
+#define A_TP_NVMT_CONFIG 0x91
+
+#define S_PDACHKEN 2
+#define V_PDACHKEN(x) ((x) << S_PDACHKEN)
+#define F_PDACHKEN V_PDACHKEN(1U)
+
+#define S_FORCERQNONDDP 1
+#define V_FORCERQNONDDP(x) ((x) << S_FORCERQNONDDP)
+#define F_FORCERQNONDDP V_FORCERQNONDDP(1U)
+
+#define S_STRIPHCRC 0
+#define V_STRIPHCRC(x) ((x) << S_STRIPHCRC)
+#define F_STRIPHCRC V_STRIPHCRC(1U)
+
+#define A_TP_NVMT_MAXHDR 0x92
+
+#define S_MAXHDR3 24
+#define M_MAXHDR3 0xffU
+#define V_MAXHDR3(x) ((x) << S_MAXHDR3)
+#define G_MAXHDR3(x) (((x) >> S_MAXHDR3) & M_MAXHDR3)
+
+#define S_MAXHDR2 16
+#define M_MAXHDR2 0xffU
+#define V_MAXHDR2(x) ((x) << S_MAXHDR2)
+#define G_MAXHDR2(x) (((x) >> S_MAXHDR2) & M_MAXHDR2)
+
+#define S_MAXHDR1 8
+#define M_MAXHDR1 0xffU
+#define V_MAXHDR1(x) ((x) << S_MAXHDR1)
+#define G_MAXHDR1(x) (((x) >> S_MAXHDR1) & M_MAXHDR1)
+
+#define S_MAXHDR0 0
+#define M_MAXHDR0 0xffU
+#define V_MAXHDR0(x) ((x) << S_MAXHDR0)
+#define G_MAXHDR0(x) (((x) >> S_MAXHDR0) & M_MAXHDR0)
+
+#define A_TP_NVMT_PDORSVD 0x93
+
+#define S_PDORSVD3 24
+#define M_PDORSVD3 0xffU
+#define V_PDORSVD3(x) ((x) << S_PDORSVD3)
+#define G_PDORSVD3(x) (((x) >> S_PDORSVD3) & M_PDORSVD3)
+
+#define S_PDORSVD2 16
+#define M_PDORSVD2 0xffU
+#define V_PDORSVD2(x) ((x) << S_PDORSVD2)
+#define G_PDORSVD2(x) (((x) >> S_PDORSVD2) & M_PDORSVD2)
+
+#define S_PDORSVD1 8
+#define M_PDORSVD1 0xffU
+#define V_PDORSVD1(x) ((x) << S_PDORSVD1)
+#define G_PDORSVD1(x) (((x) >> S_PDORSVD1) & M_PDORSVD1)
+
+#define S_PDORSVD0 0
+#define M_PDORSVD0 0xffU
+#define V_PDORSVD0(x) ((x) << S_PDORSVD0)
+#define G_PDORSVD0(x) (((x) >> S_PDORSVD0) & M_PDORSVD0)
+
+#define A_TP_RDMA_CONFIG 0x94
+
+#define S_SRQLIMITEN 20
+#define V_SRQLIMITEN(x) ((x) << S_SRQLIMITEN)
+#define F_SRQLIMITEN V_SRQLIMITEN(1U)
+
+#define S_SNDIMMSEOP 15
+#define M_SNDIMMSEOP 0x1fU
+#define V_SNDIMMSEOP(x) ((x) << S_SNDIMMSEOP)
+#define G_SNDIMMSEOP(x) (((x) >> S_SNDIMMSEOP) & M_SNDIMMSEOP)
+
+#define S_SNDIMMOP 10
+#define M_SNDIMMOP 0x1fU
+#define V_SNDIMMOP(x) ((x) << S_SNDIMMOP)
+#define G_SNDIMMOP(x) (((x) >> S_SNDIMMOP) & M_SNDIMMOP)
+
+#define S_IWARPXRCIDCHKEN 4
+#define V_IWARPXRCIDCHKEN(x) ((x) << S_IWARPXRCIDCHKEN)
+#define F_IWARPXRCIDCHKEN V_IWARPXRCIDCHKEN(1U)
+
+#define S_IWARPEXTOPEN 3
+#define V_IWARPEXTOPEN(x) ((x) << S_IWARPEXTOPEN)
+#define F_IWARPEXTOPEN V_IWARPEXTOPEN(1U)
+
+#define S_XRCIMPLTYPE 1
+#define V_XRCIMPLTYPE(x) ((x) << S_XRCIMPLTYPE)
+#define F_XRCIMPLTYPE V_XRCIMPLTYPE(1U)
+
+#define S_XRCEN 0
+#define V_XRCEN(x) ((x) << S_XRCEN)
+#define F_XRCEN V_XRCEN(1U)
+
+#define A_TP_ROCE_RRQ_BASE 0x95
+#define A_TP_FILTER_RATE_CFG 0x96
+
+#define S_GRP_CFG_RD 30
+#define V_GRP_CFG_RD(x) ((x) << S_GRP_CFG_RD)
+#define F_GRP_CFG_RD V_GRP_CFG_RD(1U)
+
+#define S_GRP_CFG_INIT 29
+#define V_GRP_CFG_INIT(x) ((x) << S_GRP_CFG_INIT)
+#define F_GRP_CFG_INIT V_GRP_CFG_INIT(1U)
+
+#define S_GRP_CFG_RST 28
+#define V_GRP_CFG_RST(x) ((x) << S_GRP_CFG_RST)
+#define F_GRP_CFG_RST V_GRP_CFG_RST(1U)
+
+#define S_GRP_CFG_SEL 16
+#define M_GRP_CFG_SEL 0xfffU
+#define V_GRP_CFG_SEL(x) ((x) << S_GRP_CFG_SEL)
+#define G_GRP_CFG_SEL(x) (((x) >> S_GRP_CFG_SEL) & M_GRP_CFG_SEL)
+
+#define S_US_TIMER_TICK 0
+#define M_US_TIMER_TICK 0xffffU
+#define V_US_TIMER_TICK(x) ((x) << S_US_TIMER_TICK)
+#define G_US_TIMER_TICK(x) (((x) >> S_US_TIMER_TICK) & M_US_TIMER_TICK)
+
+#define A_TP_TLS_CONFIG 0x99
+
+#define S_QUIESCETYPE1 24
+#define M_QUIESCETYPE1 0xffU
+#define V_QUIESCETYPE1(x) ((x) << S_QUIESCETYPE1)
+#define G_QUIESCETYPE1(x) (((x) >> S_QUIESCETYPE1) & M_QUIESCETYPE1)
+
+#define S_QUIESCETYPE2 16
+#define M_QUIESCETYPE2 0xffU
+#define V_QUIESCETYPE2(x) ((x) << S_QUIESCETYPE2)
+#define G_QUIESCETYPE2(x) (((x) >> S_QUIESCETYPE2) & M_QUIESCETYPE2)
+
+#define S_QUIESCETYPE3 8
+#define M_QUIESCETYPE3 0xffU
+#define V_QUIESCETYPE3(x) ((x) << S_QUIESCETYPE3)
+#define G_QUIESCETYPE3(x) (((x) >> S_QUIESCETYPE3) & M_QUIESCETYPE3)
+
#define A_TP_T5_TX_DROP_CNT_CH0 0x120
#define A_TP_T5_TX_DROP_CNT_CH1 0x121
#define A_TP_TX_DROP_CNT_CH2 0x122
@@ -26682,10 +33465,6 @@
#define A_TP_DBG_ESIDE_DISP1 0x137
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE1 0
#define V_TXFULL_ESIDE1(x) ((x) << S_TXFULL_ESIDE1)
#define F_TXFULL_ESIDE1 V_TXFULL_ESIDE1(1U)
@@ -26719,20 +33498,12 @@
#define A_TP_DBG_ESIDE_DISP2 0x13a
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE2 0
#define V_TXFULL_ESIDE2(x) ((x) << S_TXFULL_ESIDE2)
#define F_TXFULL_ESIDE2 V_TXFULL_ESIDE2(1U)
#define A_TP_DBG_ESIDE_DISP3 0x13b
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE3 0
#define V_TXFULL_ESIDE3(x) ((x) << S_TXFULL_ESIDE3)
#define F_TXFULL_ESIDE3 V_TXFULL_ESIDE3(1U)
@@ -26836,6 +33607,94 @@
#define V_SRVRSRAM(x) ((x) << S_SRVRSRAM)
#define F_SRVRSRAM V_SRVRSRAM(1U)
+#define S_T7_FILTERMODE 31
+#define V_T7_FILTERMODE(x) ((x) << S_T7_FILTERMODE)
+#define F_T7_FILTERMODE V_T7_FILTERMODE(1U)
+
+#define S_T7_FCOEMASK 30
+#define V_T7_FCOEMASK(x) ((x) << S_T7_FCOEMASK)
+#define F_T7_FCOEMASK V_T7_FCOEMASK(1U)
+
+#define S_T7_SRVRSRAM 29
+#define V_T7_SRVRSRAM(x) ((x) << S_T7_SRVRSRAM)
+#define F_T7_SRVRSRAM V_T7_SRVRSRAM(1U)
+
+#define S_ROCEUDFORCEIPV6 28
+#define V_ROCEUDFORCEIPV6(x) ((x) << S_ROCEUDFORCEIPV6)
+#define F_ROCEUDFORCEIPV6 V_ROCEUDFORCEIPV6(1U)
+
+#define S_TCPFLAGS8 27
+#define V_TCPFLAGS8(x) ((x) << S_TCPFLAGS8)
+#define F_TCPFLAGS8 V_TCPFLAGS8(1U)
+
+#define S_MACMATCH11 26
+#define V_MACMATCH11(x) ((x) << S_MACMATCH11)
+#define F_MACMATCH11 V_MACMATCH11(1U)
+
+#define S_SMACMATCH10 25
+#define V_SMACMATCH10(x) ((x) << S_SMACMATCH10)
+#define F_SMACMATCH10 V_SMACMATCH10(1U)
+
+#define S_SMACMATCH 14
+#define V_SMACMATCH(x) ((x) << S_SMACMATCH)
+#define F_SMACMATCH V_SMACMATCH(1U)
+
+#define S_TCPFLAGS 13
+#define V_TCPFLAGS(x) ((x) << S_TCPFLAGS)
+#define F_TCPFLAGS V_TCPFLAGS(1U)
+
+#define S_SYNONLY 12
+#define V_SYNONLY(x) ((x) << S_SYNONLY)
+#define F_SYNONLY V_SYNONLY(1U)
+
+#define S_ROCE 11
+#define V_ROCE(x) ((x) << S_ROCE)
+#define F_ROCE V_ROCE(1U)
+
+#define S_T7_FRAGMENTATION 10
+#define V_T7_FRAGMENTATION(x) ((x) << S_T7_FRAGMENTATION)
+#define F_T7_FRAGMENTATION V_T7_FRAGMENTATION(1U)
+
+#define S_T7_MPSHITTYPE 9
+#define V_T7_MPSHITTYPE(x) ((x) << S_T7_MPSHITTYPE)
+#define F_T7_MPSHITTYPE V_T7_MPSHITTYPE(1U)
+
+#define S_T7_MACMATCH 8
+#define V_T7_MACMATCH(x) ((x) << S_T7_MACMATCH)
+#define F_T7_MACMATCH V_T7_MACMATCH(1U)
+
+#define S_T7_ETHERTYPE 7
+#define V_T7_ETHERTYPE(x) ((x) << S_T7_ETHERTYPE)
+#define F_T7_ETHERTYPE V_T7_ETHERTYPE(1U)
+
+#define S_T7_PROTOCOL 6
+#define V_T7_PROTOCOL(x) ((x) << S_T7_PROTOCOL)
+#define F_T7_PROTOCOL V_T7_PROTOCOL(1U)
+
+#define S_T7_TOS 5
+#define V_T7_TOS(x) ((x) << S_T7_TOS)
+#define F_T7_TOS V_T7_TOS(1U)
+
+#define S_T7_VLAN 4
+#define V_T7_VLAN(x) ((x) << S_T7_VLAN)
+#define F_T7_VLAN V_T7_VLAN(1U)
+
+#define S_T7_VNIC_ID 3
+#define V_T7_VNIC_ID(x) ((x) << S_T7_VNIC_ID)
+#define F_T7_VNIC_ID V_T7_VNIC_ID(1U)
+
+#define S_T7_PORT 2
+#define V_T7_PORT(x) ((x) << S_T7_PORT)
+#define F_T7_PORT V_T7_PORT(1U)
+
+#define S_T7_FCOE 1
+#define V_T7_FCOE(x) ((x) << S_T7_FCOE)
+#define F_T7_FCOE V_T7_FCOE(1U)
+
+#define S_IPSECIDX 0
+#define V_IPSECIDX(x) ((x) << S_IPSECIDX)
+#define F_IPSECIDX V_IPSECIDX(1U)
+
#define A_TP_INGRESS_CONFIG 0x141
#define S_OPAQUE_TYPE 16
@@ -26888,6 +33747,14 @@
#define V_USE_ENC_IDX(x) ((x) << S_USE_ENC_IDX)
#define F_USE_ENC_IDX V_USE_ENC_IDX(1U)
+#define S_USE_MPS_ECN 15
+#define V_USE_MPS_ECN(x) ((x) << S_USE_MPS_ECN)
+#define F_USE_MPS_ECN V_USE_MPS_ECN(1U)
+
+#define S_USE_MPS_CONG 14
+#define V_USE_MPS_CONG(x) ((x) << S_USE_MPS_CONG)
+#define F_USE_MPS_CONG V_USE_MPS_CONG(1U)
+
#define A_TP_TX_DROP_CFG_CH2 0x142
#define A_TP_TX_DROP_CFG_CH3 0x143
#define A_TP_EGRESS_CONFIG 0x145
@@ -27490,6 +34357,51 @@
#define V_ROCEV2UDPPORT(x) ((x) << S_ROCEV2UDPPORT)
#define G_ROCEV2UDPPORT(x) (((x) >> S_ROCEV2UDPPORT) & M_ROCEV2UDPPORT)
+#define S_IPSECTUNETHTRANSEN 29
+#define V_IPSECTUNETHTRANSEN(x) ((x) << S_IPSECTUNETHTRANSEN)
+#define F_IPSECTUNETHTRANSEN V_IPSECTUNETHTRANSEN(1U)
+
+#define S_ROCEV2ZEROUDP6CSUM 28
+#define V_ROCEV2ZEROUDP6CSUM(x) ((x) << S_ROCEV2ZEROUDP6CSUM)
+#define F_ROCEV2ZEROUDP6CSUM V_ROCEV2ZEROUDP6CSUM(1U)
+
+#define S_ROCEV2PROCEN 27
+#define V_ROCEV2PROCEN(x) ((x) << S_ROCEV2PROCEN)
+#define F_ROCEV2PROCEN V_ROCEV2PROCEN(1U)
+
+#define A_TP_ESIDE_ROCE_PORT12 0x161
+
+#define S_ROCEV2UDPPORT2 16
+#define M_ROCEV2UDPPORT2 0xffffU
+#define V_ROCEV2UDPPORT2(x) ((x) << S_ROCEV2UDPPORT2)
+#define G_ROCEV2UDPPORT2(x) (((x) >> S_ROCEV2UDPPORT2) & M_ROCEV2UDPPORT2)
+
+#define S_ROCEV2UDPPORT1 0
+#define M_ROCEV2UDPPORT1 0xffffU
+#define V_ROCEV2UDPPORT1(x) ((x) << S_ROCEV2UDPPORT1)
+#define G_ROCEV2UDPPORT1(x) (((x) >> S_ROCEV2UDPPORT1) & M_ROCEV2UDPPORT1)
+
+#define A_TP_ESIDE_ROCE_PORT34 0x162
+
+#define S_ROCEV2UDPPORT4 16
+#define M_ROCEV2UDPPORT4 0xffffU
+#define V_ROCEV2UDPPORT4(x) ((x) << S_ROCEV2UDPPORT4)
+#define G_ROCEV2UDPPORT4(x) (((x) >> S_ROCEV2UDPPORT4) & M_ROCEV2UDPPORT4)
+
+#define S_ROCEV2UDPPORT3 0
+#define M_ROCEV2UDPPORT3 0xffffU
+#define V_ROCEV2UDPPORT3(x) ((x) << S_ROCEV2UDPPORT3)
+#define G_ROCEV2UDPPORT3(x) (((x) >> S_ROCEV2UDPPORT3) & M_ROCEV2UDPPORT3)
+
+#define A_TP_ESIDE_CONFIG1 0x163
+
+#define S_ROCEV2CRCIGN 0
+#define M_ROCEV2CRCIGN 0xfU
+#define V_ROCEV2CRCIGN(x) ((x) << S_ROCEV2CRCIGN)
+#define G_ROCEV2CRCIGN(x) (((x) >> S_ROCEV2CRCIGN) & M_ROCEV2CRCIGN)
+
+#define A_TP_ESIDE_DEBUG_CFG 0x16c
+#define A_TP_ESIDE_DEBUG_DATA 0x16d
#define A_TP_DBG_CSIDE_RX0 0x230
#define S_CRXSOPCNT 28
@@ -27962,56 +34874,7 @@
#define V_TXFULL2X(x) ((x) << S_TXFULL2X)
#define F_TXFULL2X V_TXFULL2X(1U)
-#define S_T6_TXFULL 31
-#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
-#define F_T6_TXFULL V_T6_TXFULL(1U)
-
-#define S_T6_PLD_RXZEROP_SRDY 25
-#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
-#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U)
-
-#define S_T6_DDP_SRDY 22
-#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
-#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U)
-
-#define S_T6_DDP_DRDY 21
-#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
-#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U)
-
#define A_TP_DBG_CSIDE_DISP1 0x23b
-
-#define S_T5_TXFULL 31
-#define V_T5_TXFULL(x) ((x) << S_T5_TXFULL)
-#define F_T5_TXFULL V_T5_TXFULL(1U)
-
-#define S_T5_PLD_RXZEROP_SRDY 25
-#define V_T5_PLD_RXZEROP_SRDY(x) ((x) << S_T5_PLD_RXZEROP_SRDY)
-#define F_T5_PLD_RXZEROP_SRDY V_T5_PLD_RXZEROP_SRDY(1U)
-
-#define S_T5_DDP_SRDY 22
-#define V_T5_DDP_SRDY(x) ((x) << S_T5_DDP_SRDY)
-#define F_T5_DDP_SRDY V_T5_DDP_SRDY(1U)
-
-#define S_T5_DDP_DRDY 21
-#define V_T5_DDP_DRDY(x) ((x) << S_T5_DDP_DRDY)
-#define F_T5_DDP_DRDY V_T5_DDP_DRDY(1U)
-
-#define S_T6_TXFULL 31
-#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
-#define F_T6_TXFULL V_T6_TXFULL(1U)
-
-#define S_T6_PLD_RXZEROP_SRDY 25
-#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
-#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U)
-
-#define S_T6_DDP_SRDY 22
-#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
-#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U)
-
-#define S_T6_DDP_DRDY 21
-#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
-#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U)
-
#define A_TP_DBG_CSIDE_DDP0 0x23c
#define S_DDPMSGLATEST7 28
@@ -28222,6 +35085,59 @@
#define V_ISCSICMDMODE(x) ((x) << S_ISCSICMDMODE)
#define F_ISCSICMDMODE V_ISCSICMDMODE(1U)
+#define S_NVMTOPUPDEN 30
+#define V_NVMTOPUPDEN(x) ((x) << S_NVMTOPUPDEN)
+#define F_NVMTOPUPDEN V_NVMTOPUPDEN(1U)
+
+#define S_NOPDIS 29
+#define V_NOPDIS(x) ((x) << S_NOPDIS)
+#define F_NOPDIS V_NOPDIS(1U)
+
+#define S_IWARPINVREQEN 27
+#define V_IWARPINVREQEN(x) ((x) << S_IWARPINVREQEN)
+#define F_IWARPINVREQEN V_IWARPINVREQEN(1U)
+
+#define S_ROCEINVREQEN 26
+#define V_ROCEINVREQEN(x) ((x) << S_ROCEINVREQEN)
+#define F_ROCEINVREQEN V_ROCEINVREQEN(1U)
+
+#define S_ROCESRQFWEN 25
+#define V_ROCESRQFWEN(x) ((x) << S_ROCESRQFWEN)
+#define F_ROCESRQFWEN V_ROCESRQFWEN(1U)
+
+#define S_T7_WRITEZEROOP 20
+#define M_T7_WRITEZEROOP 0x1fU
+#define V_T7_WRITEZEROOP(x) ((x) << S_T7_WRITEZEROOP)
+#define G_T7_WRITEZEROOP(x) (((x) >> S_T7_WRITEZEROOP) & M_T7_WRITEZEROOP)
+
+#define S_IWARPEXTMODE 9
+#define V_IWARPEXTMODE(x) ((x) << S_IWARPEXTMODE)
+#define F_IWARPEXTMODE V_IWARPEXTMODE(1U)
+
+#define S_IWARPINVFWEN 8
+#define V_IWARPINVFWEN(x) ((x) << S_IWARPINVFWEN)
+#define F_IWARPINVFWEN V_IWARPINVFWEN(1U)
+
+#define S_IWARPSRQFWEN 7
+#define V_IWARPSRQFWEN(x) ((x) << S_IWARPSRQFWEN)
+#define F_IWARPSRQFWEN V_IWARPSRQFWEN(1U)
+
+#define S_T7_STARTSKIPPLD 3
+#define V_T7_STARTSKIPPLD(x) ((x) << S_T7_STARTSKIPPLD)
+#define F_T7_STARTSKIPPLD V_T7_STARTSKIPPLD(1U)
+
+#define S_NVMTFLIMMEN 2
+#define V_NVMTFLIMMEN(x) ((x) << S_NVMTFLIMMEN)
+#define F_NVMTFLIMMEN V_NVMTFLIMMEN(1U)
+
+#define S_NVMTOPCTRLEN 1
+#define V_NVMTOPCTRLEN(x) ((x) << S_NVMTOPCTRLEN)
+#define F_NVMTOPCTRLEN V_NVMTOPCTRLEN(1U)
+
+#define S_T7_WRITEZEROEN 0
+#define V_T7_WRITEZEROEN(x) ((x) << S_T7_WRITEZEROEN)
+#define F_T7_WRITEZEROEN V_T7_WRITEZEROEN(1U)
+
#define A_TP_CSPI_POWER 0x243
#define S_GATECHNTX3 11
@@ -28256,6 +35172,26 @@
#define V_SLEEPREQUTRN(x) ((x) << S_SLEEPREQUTRN)
#define F_SLEEPREQUTRN V_SLEEPREQUTRN(1U)
+#define S_GATECHNRX3 7
+#define V_GATECHNRX3(x) ((x) << S_GATECHNRX3)
+#define F_GATECHNRX3 V_GATECHNRX3(1U)
+
+#define S_GATECHNRX2 6
+#define V_GATECHNRX2(x) ((x) << S_GATECHNRX2)
+#define F_GATECHNRX2 V_GATECHNRX2(1U)
+
+#define S_T7_GATECHNRX1 5
+#define V_T7_GATECHNRX1(x) ((x) << S_T7_GATECHNRX1)
+#define F_T7_GATECHNRX1 V_T7_GATECHNRX1(1U)
+
+#define S_T7_GATECHNRX0 4
+#define V_T7_GATECHNRX0(x) ((x) << S_T7_GATECHNRX0)
+#define F_T7_GATECHNRX0 V_T7_GATECHNRX0(1U)
+
+#define S_T7_SLEEPRDYUTRN 3
+#define V_T7_SLEEPRDYUTRN(x) ((x) << S_T7_SLEEPRDYUTRN)
+#define F_T7_SLEEPRDYUTRN V_T7_SLEEPRDYUTRN(1U)
+
#define A_TP_TRC_CONFIG 0x244
#define S_TRCRR 1
@@ -28266,6 +35202,19 @@
#define V_TRCCH(x) ((x) << S_TRCCH)
#define F_TRCCH V_TRCCH(1U)
+#define S_DEBUGPG 3
+#define V_DEBUGPG(x) ((x) << S_DEBUGPG)
+#define F_DEBUGPG V_DEBUGPG(1U)
+
+#define S_T7_TRCRR 2
+#define V_T7_TRCRR(x) ((x) << S_T7_TRCRR)
+#define F_T7_TRCRR V_T7_TRCRR(1U)
+
+#define S_T7_TRCCH 0
+#define M_T7_TRCCH 0x3U
+#define V_T7_TRCCH(x) ((x) << S_T7_TRCCH)
+#define G_T7_TRCCH(x) (((x) >> S_T7_TRCCH) & M_T7_TRCCH)
+
#define A_TP_TAG_CONFIG 0x245
#define S_ETAGTYPE 16
@@ -28379,26 +35328,6 @@
#define V_T5_CPRSSTATE0(x) ((x) << S_T5_CPRSSTATE0)
#define G_T5_CPRSSTATE0(x) (((x) >> S_T5_CPRSSTATE0) & M_T5_CPRSSTATE0)
-#define S_T6_CPRSSTATE3 24
-#define M_T6_CPRSSTATE3 0xfU
-#define V_T6_CPRSSTATE3(x) ((x) << S_T6_CPRSSTATE3)
-#define G_T6_CPRSSTATE3(x) (((x) >> S_T6_CPRSSTATE3) & M_T6_CPRSSTATE3)
-
-#define S_T6_CPRSSTATE2 16
-#define M_T6_CPRSSTATE2 0xfU
-#define V_T6_CPRSSTATE2(x) ((x) << S_T6_CPRSSTATE2)
-#define G_T6_CPRSSTATE2(x) (((x) >> S_T6_CPRSSTATE2) & M_T6_CPRSSTATE2)
-
-#define S_T6_CPRSSTATE1 8
-#define M_T6_CPRSSTATE1 0xfU
-#define V_T6_CPRSSTATE1(x) ((x) << S_T6_CPRSSTATE1)
-#define G_T6_CPRSSTATE1(x) (((x) >> S_T6_CPRSSTATE1) & M_T6_CPRSSTATE1)
-
-#define S_T6_CPRSSTATE0 0
-#define M_T6_CPRSSTATE0 0xfU
-#define V_T6_CPRSSTATE0(x) ((x) << S_T6_CPRSSTATE0)
-#define G_T6_CPRSSTATE0(x) (((x) >> S_T6_CPRSSTATE0) & M_T6_CPRSSTATE0)
-
#define A_TP_DBG_CSIDE_DEMUX 0x247
#define S_CALLDONE 28
@@ -28630,6 +35559,62 @@
#define A_TP_DBG_CSIDE_ARBIT_WAIT1 0x24e
#define A_TP_DBG_CSIDE_ARBIT_CNT0 0x24f
#define A_TP_DBG_CSIDE_ARBIT_CNT1 0x250
+#define A_TP_CHDR_CONFIG1 0x259
+
+#define S_CH3HIGH 24
+#define M_CH3HIGH 0xffU
+#define V_CH3HIGH(x) ((x) << S_CH3HIGH)
+#define G_CH3HIGH(x) (((x) >> S_CH3HIGH) & M_CH3HIGH)
+
+#define S_CH3LOW 16
+#define M_CH3LOW 0xffU
+#define V_CH3LOW(x) ((x) << S_CH3LOW)
+#define G_CH3LOW(x) (((x) >> S_CH3LOW) & M_CH3LOW)
+
+#define S_CH2HIGH 8
+#define M_CH2HIGH 0xffU
+#define V_CH2HIGH(x) ((x) << S_CH2HIGH)
+#define G_CH2HIGH(x) (((x) >> S_CH2HIGH) & M_CH2HIGH)
+
+#define S_CH2LOW 0
+#define M_CH2LOW 0xffU
+#define V_CH2LOW(x) ((x) << S_CH2LOW)
+#define G_CH2LOW(x) (((x) >> S_CH2LOW) & M_CH2LOW)
+
+#define A_TP_CDSP_RDMA_CONFIG 0x260
+#define A_TP_NVMT_OP_CTRL 0x268
+
+#define S_DEFOPCTRL 30
+#define M_DEFOPCTRL 0x3U
+#define V_DEFOPCTRL(x) ((x) << S_DEFOPCTRL)
+#define G_DEFOPCTRL(x) (((x) >> S_DEFOPCTRL) & M_DEFOPCTRL)
+
+#define S_NVMTOPCTRL 0
+#define M_NVMTOPCTRL 0x3fffffffU
+#define V_NVMTOPCTRL(x) ((x) << S_NVMTOPCTRL)
+#define G_NVMTOPCTRL(x) (((x) >> S_NVMTOPCTRL) & M_NVMTOPCTRL)
+
+#define A_TP_CSIDE_DEBUG_CFG 0x26c
+
+#define S_T7_OR_EN 13
+#define V_T7_OR_EN(x) ((x) << S_T7_OR_EN)
+#define F_T7_OR_EN V_T7_OR_EN(1U)
+
+#define S_T7_HI 12
+#define V_T7_HI(x) ((x) << S_T7_HI)
+#define F_T7_HI V_T7_HI(1U)
+
+#define S_T7_SELH 6
+#define M_T7_SELH 0x3fU
+#define V_T7_SELH(x) ((x) << S_T7_SELH)
+#define G_T7_SELH(x) (((x) >> S_T7_SELH) & M_T7_SELH)
+
+#define S_T7_SELL 0
+#define M_T7_SELL 0x3fU
+#define V_T7_SELL(x) ((x) << S_T7_SELL)
+#define G_T7_SELL(x) (((x) >> S_T7_SELL) & M_T7_SELL)
+
+#define A_TP_CSIDE_DEBUG_DATA 0x26d
#define A_TP_FIFO_CONFIG 0x8c0
#define S_CH1_OUTPUT 27
@@ -28771,6 +35756,174 @@
#define A_TP_MIB_TNL_ERR_1 0x71
#define A_TP_MIB_TNL_ERR_2 0x72
#define A_TP_MIB_TNL_ERR_3 0x73
+#define A_TP_MIB_RDMA_IN_PKT_0 0x80
+#define A_TP_MIB_RDMA_IN_PKT_1 0x81
+#define A_TP_MIB_RDMA_IN_PKT_2 0x82
+#define A_TP_MIB_RDMA_IN_PKT_3 0x83
+#define A_TP_MIB_RDMA_IN_BYTE_HI_0 0x84
+#define A_TP_MIB_RDMA_IN_BYTE_LO_0 0x85
+#define A_TP_MIB_RDMA_IN_BYTE_HI_1 0x86
+#define A_TP_MIB_RDMA_IN_BYTE_LO_1 0x87
+#define A_TP_MIB_RDMA_IN_BYTE_HI_2 0x88
+#define A_TP_MIB_RDMA_IN_BYTE_LO_2 0x89
+#define A_TP_MIB_RDMA_IN_BYTE_HI_3 0x8a
+#define A_TP_MIB_RDMA_IN_BYTE_LO_3 0x8b
+#define A_TP_MIB_RDMA_OUT_PKT_0 0x90
+#define A_TP_MIB_RDMA_OUT_PKT_1 0x91
+#define A_TP_MIB_RDMA_OUT_PKT_2 0x92
+#define A_TP_MIB_RDMA_OUT_PKT_3 0x93
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_0 0x94
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_0 0x95
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_1 0x96
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_1 0x97
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_2 0x98
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_2 0x99
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_3 0x9a
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_3 0x9b
+#define A_TP_MIB_ISCSI_IN_PKT_0 0xa0
+#define A_TP_MIB_ISCSI_IN_PKT_1 0xa1
+#define A_TP_MIB_ISCSI_IN_PKT_2 0xa2
+#define A_TP_MIB_ISCSI_IN_PKT_3 0xa3
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_0 0xa4
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_0 0xa5
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_1 0xa6
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_1 0xa7
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_2 0xa8
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_2 0xa9
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_3 0xaa
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_3 0xab
+#define A_TP_MIB_ISCSI_OUT_PKT_0 0xb0
+#define A_TP_MIB_ISCSI_OUT_PKT_1 0xb1
+#define A_TP_MIB_ISCSI_OUT_PKT_2 0xb2
+#define A_TP_MIB_ISCSI_OUT_PKT_3 0xb3
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_0 0xb4
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_0 0xb5
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_1 0xb6
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_1 0xb7
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_2 0xb8
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_2 0xb9
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_3 0xba
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_3 0xbb
+#define A_TP_MIB_NVMT_IN_PKT_0 0xc0
+#define A_TP_MIB_NVMT_IN_PKT_1 0xc1
+#define A_TP_MIB_NVMT_IN_PKT_2 0xc2
+#define A_TP_MIB_NVMT_IN_PKT_3 0xc3
+#define A_TP_MIB_NVMT_IN_BYTE_HI_0 0xc4
+#define A_TP_MIB_NVMT_IN_BYTE_LO_0 0xc5
+#define A_TP_MIB_NVMT_IN_BYTE_HI_1 0xc6
+#define A_TP_MIB_NVMT_IN_BYTE_LO_1 0xc7
+#define A_TP_MIB_NVMT_IN_BYTE_HI_2 0xc8
+#define A_TP_MIB_NVMT_IN_BYTE_LO_2 0xc9
+#define A_TP_MIB_NVMT_IN_BYTE_HI_3 0xca
+#define A_TP_MIB_NVMT_IN_BYTE_LO_3 0xcb
+#define A_TP_MIB_NVMT_OUT_PKT_0 0xd0
+#define A_TP_MIB_NVMT_OUT_PKT_1 0xd1
+#define A_TP_MIB_NVMT_OUT_PKT_2 0xd2
+#define A_TP_MIB_NVMT_OUT_PKT_3 0xd3
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_0 0xd4
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_0 0xd5
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_1 0xd6
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_1 0xd7
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_2 0xd8
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_2 0xd9
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_3 0xda
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_3 0xdb
+#define A_TP_MIB_TLS_IN_PKT_0 0xe0
+#define A_TP_MIB_TLS_IN_PKT_1 0xe1
+#define A_TP_MIB_TLS_IN_PKT_2 0xe2
+#define A_TP_MIB_TLS_IN_PKT_3 0xe3
+#define A_TP_MIB_TLS_IN_BYTE_HI_0 0xe4
+#define A_TP_MIB_TLS_IN_BYTE_LO_0 0xe5
+#define A_TP_MIB_TLS_IN_BYTE_HI_1 0xe6
+#define A_TP_MIB_TLS_IN_BYTE_LO_1 0xe7
+#define A_TP_MIB_TLS_IN_BYTE_HI_2 0xe8
+#define A_TP_MIB_TLS_IN_BYTE_LO_2 0xe9
+#define A_TP_MIB_TLS_IN_BYTE_HI_3 0xea
+#define A_TP_MIB_TLS_IN_BYTE_LO_3 0xeb
+#define A_TP_MIB_TLS_OUT_PKT_0 0xf0
+#define A_TP_MIB_TLS_OUT_PKT_1 0xf1
+#define A_TP_MIB_TLS_OUT_PKT_2 0xf2
+#define A_TP_MIB_TLS_OUT_PKT_3 0xf3
+#define A_TP_MIB_TLS_OUT_BYTE_HI_0 0xf4
+#define A_TP_MIB_TLS_OUT_BYTE_LO_0 0xf5
+#define A_TP_MIB_TLS_OUT_BYTE_HI_1 0xf6
+#define A_TP_MIB_TLS_OUT_BYTE_LO_1 0xf7
+#define A_TP_MIB_TLS_OUT_BYTE_HI_2 0xf8
+#define A_TP_MIB_TLS_OUT_BYTE_LO_2 0xf9
+#define A_TP_MIB_TLS_OUT_BYTE_HI_3 0xfa
+#define A_TP_MIB_TLS_OUT_BYTE_LO_3 0xfb
+#define A_TP_MIB_ROCE_IN_PKT_0 0x100
+#define A_TP_MIB_ROCE_IN_PKT_1 0x101
+#define A_TP_MIB_ROCE_IN_PKT_2 0x102
+#define A_TP_MIB_ROCE_IN_PKT_3 0x103
+#define A_TP_MIB_ROCE_IN_BYTE_HI_0 0x104
+#define A_TP_MIB_ROCE_IN_BYTE_LO_0 0x105
+#define A_TP_MIB_ROCE_IN_BYTE_HI_1 0x106
+#define A_TP_MIB_ROCE_IN_BYTE_LO_1 0x107
+#define A_TP_MIB_ROCE_IN_BYTE_HI_2 0x108
+#define A_TP_MIB_ROCE_IN_BYTE_LO_2 0x109
+#define A_TP_MIB_ROCE_IN_BYTE_HI_3 0x10a
+#define A_TP_MIB_ROCE_IN_BYTE_LO_3 0x10b
+#define A_TP_MIB_ROCE_OUT_PKT_0 0x110
+#define A_TP_MIB_ROCE_OUT_PKT_1 0x111
+#define A_TP_MIB_ROCE_OUT_PKT_2 0x112
+#define A_TP_MIB_ROCE_OUT_PKT_3 0x113
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_0 0x114
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_0 0x115
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_1 0x116
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_1 0x117
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_2 0x118
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_2 0x119
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_3 0x11a
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_3 0x11b
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_0 0x120
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_1 0x121
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_2 0x122
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_3 0x123
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_0 0x124
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_0 0x125
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_1 0x126
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_1 0x127
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_2 0x128
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_2 0x129
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_3 0x12a
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_3 0x12b
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_0 0x130
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_1 0x131
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_2 0x132
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_3 0x133
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_0 0x134
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_0 0x135
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_1 0x136
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_1 0x137
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_2 0x138
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_2 0x139
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_3 0x13a
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_3 0x13b
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_0 0x140
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_1 0x141
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_2 0x142
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_3 0x143
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_0 0x144
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_0 0x145
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_1 0x146
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_1 0x147
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_2 0x148
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_2 0x149
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_3 0x14a
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_3 0x14b
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_0 0x150
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_1 0x151
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_2 0x152
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_3 0x153
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_0 0x154
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_0 0x155
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_1 0x156
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_1 0x157
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_2 0x158
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_2 0x159
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_3 0x15a
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_3 0x15b
/* registers for module ULP_TX */
#define ULP_TX_BASE_ADDR 0x8dc0
@@ -28853,7 +36006,58 @@
#define V_ATOMIC_FIX_DIS(x) ((x) << S_ATOMIC_FIX_DIS)
#define F_ATOMIC_FIX_DIS V_ATOMIC_FIX_DIS(1U)
+#define S_LB_LEN_SEL 28
+#define V_LB_LEN_SEL(x) ((x) << S_LB_LEN_SEL)
+#define F_LB_LEN_SEL V_LB_LEN_SEL(1U)
+
+#define S_DISABLE_TPT_CREDIT_CHK 27
+#define V_DISABLE_TPT_CREDIT_CHK(x) ((x) << S_DISABLE_TPT_CREDIT_CHK)
+#define F_DISABLE_TPT_CREDIT_CHK V_DISABLE_TPT_CREDIT_CHK(1U)
+
+#define S_REQSRC 26
+#define V_REQSRC(x) ((x) << S_REQSRC)
+#define F_REQSRC V_REQSRC(1U)
+
+#define S_ERR2UP 25
+#define V_ERR2UP(x) ((x) << S_ERR2UP)
+#define F_ERR2UP V_ERR2UP(1U)
+
+#define S_SGE_INVALIDATE_DIS 24
+#define V_SGE_INVALIDATE_DIS(x) ((x) << S_SGE_INVALIDATE_DIS)
+#define F_SGE_INVALIDATE_DIS V_SGE_INVALIDATE_DIS(1U)
+
+#define S_ROCE_ACKREQ_CTRL 23
+#define V_ROCE_ACKREQ_CTRL(x) ((x) << S_ROCE_ACKREQ_CTRL)
+#define F_ROCE_ACKREQ_CTRL V_ROCE_ACKREQ_CTRL(1U)
+
+#define S_MEM_ADDR_CTRL 21
+#define M_MEM_ADDR_CTRL 0x3U
+#define V_MEM_ADDR_CTRL(x) ((x) << S_MEM_ADDR_CTRL)
+#define G_MEM_ADDR_CTRL(x) (((x) >> S_MEM_ADDR_CTRL) & M_MEM_ADDR_CTRL)
+
+#define S_TPT_EXTENSION_MODE 20
+#define V_TPT_EXTENSION_MODE(x) ((x) << S_TPT_EXTENSION_MODE)
+#define F_TPT_EXTENSION_MODE V_TPT_EXTENSION_MODE(1U)
+
+#define S_XRC_INDICATION 19
+#define V_XRC_INDICATION(x) ((x) << S_XRC_INDICATION)
+#define F_XRC_INDICATION V_XRC_INDICATION(1U)
+
+#define S_LSO_1SEG_LEN_UPD_EN 18
+#define V_LSO_1SEG_LEN_UPD_EN(x) ((x) << S_LSO_1SEG_LEN_UPD_EN)
+#define F_LSO_1SEG_LEN_UPD_EN V_LSO_1SEG_LEN_UPD_EN(1U)
+
+#define S_PKT_ISGL_ERR_ST_EN 17
+#define V_PKT_ISGL_ERR_ST_EN(x) ((x) << S_PKT_ISGL_ERR_ST_EN)
+#define F_PKT_ISGL_ERR_ST_EN V_PKT_ISGL_ERR_ST_EN(1U)
+
#define A_ULP_TX_PERR_INJECT 0x8dc4
+
+#define S_T7_1_MEMSEL 1
+#define M_T7_1_MEMSEL 0x7fU
+#define V_T7_1_MEMSEL(x) ((x) << S_T7_1_MEMSEL)
+#define G_T7_1_MEMSEL(x) (((x) >> S_T7_1_MEMSEL) & M_T7_1_MEMSEL)
+
#define A_ULP_TX_INT_ENABLE 0x8dc8
#define S_PBL_BOUND_ERR_CH3 31
@@ -28984,8 +36188,28 @@
#define V_IMM_DATA_PERR_SET_CH0(x) ((x) << S_IMM_DATA_PERR_SET_CH0)
#define F_IMM_DATA_PERR_SET_CH0 V_IMM_DATA_PERR_SET_CH0(1U)
+#define A_ULP_TX_INT_ENABLE_1 0x8dc8
+
+#define S_TLS_DSGL_PARERR3 3
+#define V_TLS_DSGL_PARERR3(x) ((x) << S_TLS_DSGL_PARERR3)
+#define F_TLS_DSGL_PARERR3 V_TLS_DSGL_PARERR3(1U)
+
+#define S_TLS_DSGL_PARERR2 2
+#define V_TLS_DSGL_PARERR2(x) ((x) << S_TLS_DSGL_PARERR2)
+#define F_TLS_DSGL_PARERR2 V_TLS_DSGL_PARERR2(1U)
+
+#define S_TLS_DSGL_PARERR1 1
+#define V_TLS_DSGL_PARERR1(x) ((x) << S_TLS_DSGL_PARERR1)
+#define F_TLS_DSGL_PARERR1 V_TLS_DSGL_PARERR1(1U)
+
+#define S_TLS_DSGL_PARERR0 0
+#define V_TLS_DSGL_PARERR0(x) ((x) << S_TLS_DSGL_PARERR0)
+#define F_TLS_DSGL_PARERR0 V_TLS_DSGL_PARERR0(1U)
+
#define A_ULP_TX_INT_CAUSE 0x8dcc
+#define A_ULP_TX_INT_CAUSE_1 0x8dcc
#define A_ULP_TX_PERR_ENABLE 0x8dd0
+#define A_ULP_TX_PERR_ENABLE_1 0x8dd0
#define A_ULP_TX_TPT_LLIMIT 0x8dd4
#define A_ULP_TX_TPT_ULIMIT 0x8dd8
#define A_ULP_TX_PBL_LLIMIT 0x8ddc
@@ -29014,6 +36238,13 @@
#define F_TLSDISABLE V_TLSDISABLE(1U)
#define A_ULP_TX_CPL_ERR_MASK_L 0x8de8
+#define A_ULP_TX_FID_1 0x8de8
+
+#define S_FID_1 0
+#define M_FID_1 0x7ffU
+#define V_FID_1(x) ((x) << S_FID_1)
+#define G_FID_1(x) (((x) >> S_FID_1) & M_FID_1)
+
#define A_ULP_TX_CPL_ERR_MASK_H 0x8dec
#define A_ULP_TX_CPL_ERR_VALUE_L 0x8df0
#define A_ULP_TX_CPL_ERR_VALUE_H 0x8df4
@@ -29166,6 +36397,15 @@
#define V_WRREQ_SZ(x) ((x) << S_WRREQ_SZ)
#define G_WRREQ_SZ(x) (((x) >> S_WRREQ_SZ) & M_WRREQ_SZ)
+#define S_T7_GLOBALENABLE 31
+#define V_T7_GLOBALENABLE(x) ((x) << S_T7_GLOBALENABLE)
+#define F_T7_GLOBALENABLE V_T7_GLOBALENABLE(1U)
+
+#define S_RDREQ_SZ 3
+#define M_RDREQ_SZ 0x7U
+#define V_RDREQ_SZ(x) ((x) << S_RDREQ_SZ)
+#define G_RDREQ_SZ(x) (((x) >> S_RDREQ_SZ) & M_RDREQ_SZ)
+
#define A_ULP_TX_ULP2TP_BIST_ERROR_CNT 0x8e34
#define A_ULP_TX_PERR_INJECT_2 0x8e34
@@ -29385,6 +36625,200 @@
#define A_ULP_TX_INT_CAUSE_2 0x8e80
#define A_ULP_TX_PERR_ENABLE_2 0x8e84
+#define A_ULP_TX_INT_ENABLE_3 0x8e88
+
+#define S_GF_SGE_FIFO_PARERR3 31
+#define V_GF_SGE_FIFO_PARERR3(x) ((x) << S_GF_SGE_FIFO_PARERR3)
+#define F_GF_SGE_FIFO_PARERR3 V_GF_SGE_FIFO_PARERR3(1U)
+
+#define S_GF_SGE_FIFO_PARERR2 30
+#define V_GF_SGE_FIFO_PARERR2(x) ((x) << S_GF_SGE_FIFO_PARERR2)
+#define F_GF_SGE_FIFO_PARERR2 V_GF_SGE_FIFO_PARERR2(1U)
+
+#define S_GF_SGE_FIFO_PARERR1 29
+#define V_GF_SGE_FIFO_PARERR1(x) ((x) << S_GF_SGE_FIFO_PARERR1)
+#define F_GF_SGE_FIFO_PARERR1 V_GF_SGE_FIFO_PARERR1(1U)
+
+#define S_GF_SGE_FIFO_PARERR0 28
+#define V_GF_SGE_FIFO_PARERR0(x) ((x) << S_GF_SGE_FIFO_PARERR0)
+#define F_GF_SGE_FIFO_PARERR0 V_GF_SGE_FIFO_PARERR0(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR3 27
+#define V_DEDUPE_SGE_FIFO_PARERR3(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR3)
+#define F_DEDUPE_SGE_FIFO_PARERR3 V_DEDUPE_SGE_FIFO_PARERR3(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR2 26
+#define V_DEDUPE_SGE_FIFO_PARERR2(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR2)
+#define F_DEDUPE_SGE_FIFO_PARERR2 V_DEDUPE_SGE_FIFO_PARERR2(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR1 25
+#define V_DEDUPE_SGE_FIFO_PARERR1(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR1)
+#define F_DEDUPE_SGE_FIFO_PARERR1 V_DEDUPE_SGE_FIFO_PARERR1(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR0 24
+#define V_DEDUPE_SGE_FIFO_PARERR0(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR0)
+#define F_DEDUPE_SGE_FIFO_PARERR0 V_DEDUPE_SGE_FIFO_PARERR0(1U)
+
+#define S_GF3_DSGL_FIFO_PARERR 23
+#define V_GF3_DSGL_FIFO_PARERR(x) ((x) << S_GF3_DSGL_FIFO_PARERR)
+#define F_GF3_DSGL_FIFO_PARERR V_GF3_DSGL_FIFO_PARERR(1U)
+
+#define S_GF2_DSGL_FIFO_PARERR 22
+#define V_GF2_DSGL_FIFO_PARERR(x) ((x) << S_GF2_DSGL_FIFO_PARERR)
+#define F_GF2_DSGL_FIFO_PARERR V_GF2_DSGL_FIFO_PARERR(1U)
+
+#define S_GF1_DSGL_FIFO_PARERR 21
+#define V_GF1_DSGL_FIFO_PARERR(x) ((x) << S_GF1_DSGL_FIFO_PARERR)
+#define F_GF1_DSGL_FIFO_PARERR V_GF1_DSGL_FIFO_PARERR(1U)
+
+#define S_GF0_DSGL_FIFO_PARERR 20
+#define V_GF0_DSGL_FIFO_PARERR(x) ((x) << S_GF0_DSGL_FIFO_PARERR)
+#define F_GF0_DSGL_FIFO_PARERR V_GF0_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE3_DSGL_FIFO_PARERR 19
+#define V_DEDUPE3_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE3_DSGL_FIFO_PARERR)
+#define F_DEDUPE3_DSGL_FIFO_PARERR V_DEDUPE3_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE2_DSGL_FIFO_PARERR 18
+#define V_DEDUPE2_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE2_DSGL_FIFO_PARERR)
+#define F_DEDUPE2_DSGL_FIFO_PARERR V_DEDUPE2_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE1_DSGL_FIFO_PARERR 17
+#define V_DEDUPE1_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE1_DSGL_FIFO_PARERR)
+#define F_DEDUPE1_DSGL_FIFO_PARERR V_DEDUPE1_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE0_DSGL_FIFO_PARERR 16
+#define V_DEDUPE0_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE0_DSGL_FIFO_PARERR)
+#define F_DEDUPE0_DSGL_FIFO_PARERR V_DEDUPE0_DSGL_FIFO_PARERR(1U)
+
+#define S_XP10_SGE_FIFO_PARERR 15
+#define V_XP10_SGE_FIFO_PARERR(x) ((x) << S_XP10_SGE_FIFO_PARERR)
+#define F_XP10_SGE_FIFO_PARERR V_XP10_SGE_FIFO_PARERR(1U)
+
+#define S_DSGL_PAR_ERR 14
+#define V_DSGL_PAR_ERR(x) ((x) << S_DSGL_PAR_ERR)
+#define F_DSGL_PAR_ERR V_DSGL_PAR_ERR(1U)
+
+#define S_CDDIP_INT 13
+#define V_CDDIP_INT(x) ((x) << S_CDDIP_INT)
+#define F_CDDIP_INT V_CDDIP_INT(1U)
+
+#define S_CCEIP_INT 12
+#define V_CCEIP_INT(x) ((x) << S_CCEIP_INT)
+#define F_CCEIP_INT V_CCEIP_INT(1U)
+
+#define S_TLS_SGE_FIFO_PARERR3 11
+#define V_TLS_SGE_FIFO_PARERR3(x) ((x) << S_TLS_SGE_FIFO_PARERR3)
+#define F_TLS_SGE_FIFO_PARERR3 V_TLS_SGE_FIFO_PARERR3(1U)
+
+#define S_TLS_SGE_FIFO_PARERR2 10
+#define V_TLS_SGE_FIFO_PARERR2(x) ((x) << S_TLS_SGE_FIFO_PARERR2)
+#define F_TLS_SGE_FIFO_PARERR2 V_TLS_SGE_FIFO_PARERR2(1U)
+
+#define S_TLS_SGE_FIFO_PARERR1 9
+#define V_TLS_SGE_FIFO_PARERR1(x) ((x) << S_TLS_SGE_FIFO_PARERR1)
+#define F_TLS_SGE_FIFO_PARERR1 V_TLS_SGE_FIFO_PARERR1(1U)
+
+#define S_TLS_SGE_FIFO_PARERR0 8
+#define V_TLS_SGE_FIFO_PARERR0(x) ((x) << S_TLS_SGE_FIFO_PARERR0)
+#define F_TLS_SGE_FIFO_PARERR0 V_TLS_SGE_FIFO_PARERR0(1U)
+
+#define S_ULP2SMARBT_RSP_PERR 6
+#define V_ULP2SMARBT_RSP_PERR(x) ((x) << S_ULP2SMARBT_RSP_PERR)
+#define F_ULP2SMARBT_RSP_PERR V_ULP2SMARBT_RSP_PERR(1U)
+
+#define S_ULPTX2MA_RSP_PERR 5
+#define V_ULPTX2MA_RSP_PERR(x) ((x) << S_ULPTX2MA_RSP_PERR)
+#define F_ULPTX2MA_RSP_PERR V_ULPTX2MA_RSP_PERR(1U)
+
+#define S_PCIE2ULP_PERR3 4
+#define V_PCIE2ULP_PERR3(x) ((x) << S_PCIE2ULP_PERR3)
+#define F_PCIE2ULP_PERR3 V_PCIE2ULP_PERR3(1U)
+
+#define S_PCIE2ULP_PERR2 3
+#define V_PCIE2ULP_PERR2(x) ((x) << S_PCIE2ULP_PERR2)
+#define F_PCIE2ULP_PERR2 V_PCIE2ULP_PERR2(1U)
+
+#define S_PCIE2ULP_PERR1 2
+#define V_PCIE2ULP_PERR1(x) ((x) << S_PCIE2ULP_PERR1)
+#define F_PCIE2ULP_PERR1 V_PCIE2ULP_PERR1(1U)
+
+#define S_PCIE2ULP_PERR0 1
+#define V_PCIE2ULP_PERR0(x) ((x) << S_PCIE2ULP_PERR0)
+#define F_PCIE2ULP_PERR0 V_PCIE2ULP_PERR0(1U)
+
+#define S_CIM2ULP_PERR 0
+#define V_CIM2ULP_PERR(x) ((x) << S_CIM2ULP_PERR)
+#define F_CIM2ULP_PERR V_CIM2ULP_PERR(1U)
+
+#define A_ULP_TX_INT_CAUSE_3 0x8e8c
+#define A_ULP_TX_PERR_ENABLE_3 0x8e90
+#define A_ULP_TX_INT_ENABLE_4 0x8e94
+
+#define S_DMA_PAR_ERR3 28
+#define M_DMA_PAR_ERR3 0xfU
+#define V_DMA_PAR_ERR3(x) ((x) << S_DMA_PAR_ERR3)
+#define G_DMA_PAR_ERR3(x) (((x) >> S_DMA_PAR_ERR3) & M_DMA_PAR_ERR3)
+
+#define S_DMA_PAR_ERR2 24
+#define M_DMA_PAR_ERR2 0xfU
+#define V_DMA_PAR_ERR2(x) ((x) << S_DMA_PAR_ERR2)
+#define G_DMA_PAR_ERR2(x) (((x) >> S_DMA_PAR_ERR2) & M_DMA_PAR_ERR2)
+
+#define S_DMA_PAR_ERR1 20
+#define M_DMA_PAR_ERR1 0xfU
+#define V_DMA_PAR_ERR1(x) ((x) << S_DMA_PAR_ERR1)
+#define G_DMA_PAR_ERR1(x) (((x) >> S_DMA_PAR_ERR1) & M_DMA_PAR_ERR1)
+
+#define S_DMA_PAR_ERR0 16
+#define M_DMA_PAR_ERR0 0xfU
+#define V_DMA_PAR_ERR0(x) ((x) << S_DMA_PAR_ERR0)
+#define G_DMA_PAR_ERR0(x) (((x) >> S_DMA_PAR_ERR0) & M_DMA_PAR_ERR0)
+
+#define S_CORE_CMD_FIFO_LB1 12
+#define M_CORE_CMD_FIFO_LB1 0xfU
+#define V_CORE_CMD_FIFO_LB1(x) ((x) << S_CORE_CMD_FIFO_LB1)
+#define G_CORE_CMD_FIFO_LB1(x) (((x) >> S_CORE_CMD_FIFO_LB1) & M_CORE_CMD_FIFO_LB1)
+
+#define S_CORE_CMD_FIFO_LB0 8
+#define M_CORE_CMD_FIFO_LB0 0xfU
+#define V_CORE_CMD_FIFO_LB0(x) ((x) << S_CORE_CMD_FIFO_LB0)
+#define G_CORE_CMD_FIFO_LB0(x) (((x) >> S_CORE_CMD_FIFO_LB0) & M_CORE_CMD_FIFO_LB0)
+
+#define S_XP10_2_ULP_PERR 7
+#define V_XP10_2_ULP_PERR(x) ((x) << S_XP10_2_ULP_PERR)
+#define F_XP10_2_ULP_PERR V_XP10_2_ULP_PERR(1U)
+
+#define S_ULP_2_XP10_PERR 6
+#define V_ULP_2_XP10_PERR(x) ((x) << S_ULP_2_XP10_PERR)
+#define F_ULP_2_XP10_PERR V_ULP_2_XP10_PERR(1U)
+
+#define S_CMD_FIFO_LB1 5
+#define V_CMD_FIFO_LB1(x) ((x) << S_CMD_FIFO_LB1)
+#define F_CMD_FIFO_LB1 V_CMD_FIFO_LB1(1U)
+
+#define S_CMD_FIFO_LB0 4
+#define V_CMD_FIFO_LB0(x) ((x) << S_CMD_FIFO_LB0)
+#define F_CMD_FIFO_LB0 V_CMD_FIFO_LB0(1U)
+
+#define S_TF_TP_PERR 3
+#define V_TF_TP_PERR(x) ((x) << S_TF_TP_PERR)
+#define F_TF_TP_PERR V_TF_TP_PERR(1U)
+
+#define S_TF_SGE_PERR 2
+#define V_TF_SGE_PERR(x) ((x) << S_TF_SGE_PERR)
+#define F_TF_SGE_PERR V_TF_SGE_PERR(1U)
+
+#define S_TF_MEM_PERR 1
+#define V_TF_MEM_PERR(x) ((x) << S_TF_MEM_PERR)
+#define F_TF_MEM_PERR V_TF_MEM_PERR(1U)
+
+#define S_TF_MP_PERR 0
+#define V_TF_MP_PERR(x) ((x) << S_TF_MP_PERR)
+#define F_TF_MP_PERR V_TF_MP_PERR(1U)
+
+#define A_ULP_TX_INT_CAUSE_4 0x8e98
+#define A_ULP_TX_PERR_ENABLE_4 0x8e9c
#define A_ULP_TX_SE_CNT_ERR 0x8ea0
#define S_ERR_CH3 12
@@ -29531,16 +36965,381 @@
#define A_ULP_TX_CSU_REVISION 0x8ebc
#define A_ULP_TX_LA_RDPTR_0 0x8ec0
+#define A_ULP_TX_PL2APB_INFO 0x8ec0
+
+#define S_PL2APB_BRIDGE_HUNG 27
+#define V_PL2APB_BRIDGE_HUNG(x) ((x) << S_PL2APB_BRIDGE_HUNG)
+#define F_PL2APB_BRIDGE_HUNG V_PL2APB_BRIDGE_HUNG(1U)
+
+#define S_PL2APB_BRIDGE_STATE 26
+#define V_PL2APB_BRIDGE_STATE(x) ((x) << S_PL2APB_BRIDGE_STATE)
+#define F_PL2APB_BRIDGE_STATE V_PL2APB_BRIDGE_STATE(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_TYPE 25
+#define V_PL2APB_BRIDGE_HUNG_TYPE(x) ((x) << S_PL2APB_BRIDGE_HUNG_TYPE)
+#define F_PL2APB_BRIDGE_HUNG_TYPE V_PL2APB_BRIDGE_HUNG_TYPE(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_ID 24
+#define V_PL2APB_BRIDGE_HUNG_ID(x) ((x) << S_PL2APB_BRIDGE_HUNG_ID)
+#define F_PL2APB_BRIDGE_HUNG_ID V_PL2APB_BRIDGE_HUNG_ID(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_ADDR 0
+#define M_PL2APB_BRIDGE_HUNG_ADDR 0xfffffU
+#define V_PL2APB_BRIDGE_HUNG_ADDR(x) ((x) << S_PL2APB_BRIDGE_HUNG_ADDR)
+#define G_PL2APB_BRIDGE_HUNG_ADDR(x) (((x) >> S_PL2APB_BRIDGE_HUNG_ADDR) & M_PL2APB_BRIDGE_HUNG_ADDR)
+
#define A_ULP_TX_LA_RDDATA_0 0x8ec4
+#define A_ULP_TX_INT_ENABLE_5 0x8ec4
+
+#define S_DEDUPE_PERR3 23
+#define V_DEDUPE_PERR3(x) ((x) << S_DEDUPE_PERR3)
+#define F_DEDUPE_PERR3 V_DEDUPE_PERR3(1U)
+
+#define S_DEDUPE_PERR2 22
+#define V_DEDUPE_PERR2(x) ((x) << S_DEDUPE_PERR2)
+#define F_DEDUPE_PERR2 V_DEDUPE_PERR2(1U)
+
+#define S_DEDUPE_PERR1 21
+#define V_DEDUPE_PERR1(x) ((x) << S_DEDUPE_PERR1)
+#define F_DEDUPE_PERR1 V_DEDUPE_PERR1(1U)
+
+#define S_DEDUPE_PERR0 20
+#define V_DEDUPE_PERR0(x) ((x) << S_DEDUPE_PERR0)
+#define F_DEDUPE_PERR0 V_DEDUPE_PERR0(1U)
+
+#define S_GF_PERR3 19
+#define V_GF_PERR3(x) ((x) << S_GF_PERR3)
+#define F_GF_PERR3 V_GF_PERR3(1U)
+
+#define S_GF_PERR2 18
+#define V_GF_PERR2(x) ((x) << S_GF_PERR2)
+#define F_GF_PERR2 V_GF_PERR2(1U)
+
+#define S_GF_PERR1 17
+#define V_GF_PERR1(x) ((x) << S_GF_PERR1)
+#define F_GF_PERR1 V_GF_PERR1(1U)
+
+#define S_GF_PERR0 16
+#define V_GF_PERR0(x) ((x) << S_GF_PERR0)
+#define F_GF_PERR0 V_GF_PERR0(1U)
+
+#define S_SGE2ULP_INV_PERR 13
+#define V_SGE2ULP_INV_PERR(x) ((x) << S_SGE2ULP_INV_PERR)
+#define F_SGE2ULP_INV_PERR V_SGE2ULP_INV_PERR(1U)
+
+#define S_T7_PL_BUSPERR 12
+#define V_T7_PL_BUSPERR(x) ((x) << S_T7_PL_BUSPERR)
+#define F_T7_PL_BUSPERR V_T7_PL_BUSPERR(1U)
+
+#define S_TLSTX2ULPTX_PERR3 11
+#define V_TLSTX2ULPTX_PERR3(x) ((x) << S_TLSTX2ULPTX_PERR3)
+#define F_TLSTX2ULPTX_PERR3 V_TLSTX2ULPTX_PERR3(1U)
+
+#define S_TLSTX2ULPTX_PERR2 10
+#define V_TLSTX2ULPTX_PERR2(x) ((x) << S_TLSTX2ULPTX_PERR2)
+#define F_TLSTX2ULPTX_PERR2 V_TLSTX2ULPTX_PERR2(1U)
+
+#define S_TLSTX2ULPTX_PERR1 9
+#define V_TLSTX2ULPTX_PERR1(x) ((x) << S_TLSTX2ULPTX_PERR1)
+#define F_TLSTX2ULPTX_PERR1 V_TLSTX2ULPTX_PERR1(1U)
+
+#define S_TLSTX2ULPTX_PERR0 8
+#define V_TLSTX2ULPTX_PERR0(x) ((x) << S_TLSTX2ULPTX_PERR0)
+#define F_TLSTX2ULPTX_PERR0 V_TLSTX2ULPTX_PERR0(1U)
+
+#define S_XP10_2_ULP_PL_PERR 1
+#define V_XP10_2_ULP_PL_PERR(x) ((x) << S_XP10_2_ULP_PL_PERR)
+#define F_XP10_2_ULP_PL_PERR V_XP10_2_ULP_PL_PERR(1U)
+
+#define S_ULP_2_XP10_PL_PERR 0
+#define V_ULP_2_XP10_PL_PERR(x) ((x) << S_ULP_2_XP10_PL_PERR)
+#define F_ULP_2_XP10_PL_PERR V_ULP_2_XP10_PL_PERR(1U)
+
#define A_ULP_TX_LA_WRPTR_0 0x8ec8
+#define A_ULP_TX_INT_CAUSE_5 0x8ec8
#define A_ULP_TX_LA_RESERVED_0 0x8ecc
+#define A_ULP_TX_PERR_ENABLE_5 0x8ecc
#define A_ULP_TX_LA_RDPTR_1 0x8ed0
+#define A_ULP_TX_INT_CAUSE_6 0x8ed0
+
+#define S_DDR_HDR_FIFO_PERR_SET3 12
+#define V_DDR_HDR_FIFO_PERR_SET3(x) ((x) << S_DDR_HDR_FIFO_PERR_SET3)
+#define F_DDR_HDR_FIFO_PERR_SET3 V_DDR_HDR_FIFO_PERR_SET3(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET2 11
+#define V_DDR_HDR_FIFO_PERR_SET2(x) ((x) << S_DDR_HDR_FIFO_PERR_SET2)
+#define F_DDR_HDR_FIFO_PERR_SET2 V_DDR_HDR_FIFO_PERR_SET2(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET1 10
+#define V_DDR_HDR_FIFO_PERR_SET1(x) ((x) << S_DDR_HDR_FIFO_PERR_SET1)
+#define F_DDR_HDR_FIFO_PERR_SET1 V_DDR_HDR_FIFO_PERR_SET1(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET0 9
+#define V_DDR_HDR_FIFO_PERR_SET0(x) ((x) << S_DDR_HDR_FIFO_PERR_SET0)
+#define F_DDR_HDR_FIFO_PERR_SET0 V_DDR_HDR_FIFO_PERR_SET0(1U)
+
+#define S_PRE_MP_RSP_PERR_SET3 8
+#define V_PRE_MP_RSP_PERR_SET3(x) ((x) << S_PRE_MP_RSP_PERR_SET3)
+#define F_PRE_MP_RSP_PERR_SET3 V_PRE_MP_RSP_PERR_SET3(1U)
+
+#define S_PRE_MP_RSP_PERR_SET2 7
+#define V_PRE_MP_RSP_PERR_SET2(x) ((x) << S_PRE_MP_RSP_PERR_SET2)
+#define F_PRE_MP_RSP_PERR_SET2 V_PRE_MP_RSP_PERR_SET2(1U)
+
+#define S_PRE_MP_RSP_PERR_SET1 6
+#define V_PRE_MP_RSP_PERR_SET1(x) ((x) << S_PRE_MP_RSP_PERR_SET1)
+#define F_PRE_MP_RSP_PERR_SET1 V_PRE_MP_RSP_PERR_SET1(1U)
+
+#define S_PRE_MP_RSP_PERR_SET0 5
+#define V_PRE_MP_RSP_PERR_SET0(x) ((x) << S_PRE_MP_RSP_PERR_SET0)
+#define F_PRE_MP_RSP_PERR_SET0 V_PRE_MP_RSP_PERR_SET0(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET3 4
+#define V_PRE_CQE_FIFO_PERR_SET3(x) ((x) << S_PRE_CQE_FIFO_PERR_SET3)
+#define F_PRE_CQE_FIFO_PERR_SET3 V_PRE_CQE_FIFO_PERR_SET3(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET2 3
+#define V_PRE_CQE_FIFO_PERR_SET2(x) ((x) << S_PRE_CQE_FIFO_PERR_SET2)
+#define F_PRE_CQE_FIFO_PERR_SET2 V_PRE_CQE_FIFO_PERR_SET2(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET1 2
+#define V_PRE_CQE_FIFO_PERR_SET1(x) ((x) << S_PRE_CQE_FIFO_PERR_SET1)
+#define F_PRE_CQE_FIFO_PERR_SET1 V_PRE_CQE_FIFO_PERR_SET1(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET0 1
+#define V_PRE_CQE_FIFO_PERR_SET0(x) ((x) << S_PRE_CQE_FIFO_PERR_SET0)
+#define F_PRE_CQE_FIFO_PERR_SET0 V_PRE_CQE_FIFO_PERR_SET0(1U)
+
+#define S_RSP_FIFO_PERR_SET 0
+#define V_RSP_FIFO_PERR_SET(x) ((x) << S_RSP_FIFO_PERR_SET)
+#define F_RSP_FIFO_PERR_SET V_RSP_FIFO_PERR_SET(1U)
+
#define A_ULP_TX_LA_RDDATA_1 0x8ed4
+#define A_ULP_TX_INT_ENABLE_6 0x8ed4
#define A_ULP_TX_LA_WRPTR_1 0x8ed8
+#define A_ULP_TX_PERR_ENABLE_6 0x8ed8
#define A_ULP_TX_LA_RESERVED_1 0x8edc
+#define A_ULP_TX_INT_CAUSE_7 0x8edc
+
+#define S_TLS_SGE_FIFO_CORERR3 23
+#define V_TLS_SGE_FIFO_CORERR3(x) ((x) << S_TLS_SGE_FIFO_CORERR3)
+#define F_TLS_SGE_FIFO_CORERR3 V_TLS_SGE_FIFO_CORERR3(1U)
+
+#define S_TLS_SGE_FIFO_CORERR2 22
+#define V_TLS_SGE_FIFO_CORERR2(x) ((x) << S_TLS_SGE_FIFO_CORERR2)
+#define F_TLS_SGE_FIFO_CORERR2 V_TLS_SGE_FIFO_CORERR2(1U)
+
+#define S_TLS_SGE_FIFO_CORERR1 21
+#define V_TLS_SGE_FIFO_CORERR1(x) ((x) << S_TLS_SGE_FIFO_CORERR1)
+#define F_TLS_SGE_FIFO_CORERR1 V_TLS_SGE_FIFO_CORERR1(1U)
+
+#define S_TLS_SGE_FIFO_CORERR0 20
+#define V_TLS_SGE_FIFO_CORERR0(x) ((x) << S_TLS_SGE_FIFO_CORERR0)
+#define F_TLS_SGE_FIFO_CORERR0 V_TLS_SGE_FIFO_CORERR0(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET3 19
+#define V_LSO_HDR_SRAM_CERR_SET3(x) ((x) << S_LSO_HDR_SRAM_CERR_SET3)
+#define F_LSO_HDR_SRAM_CERR_SET3 V_LSO_HDR_SRAM_CERR_SET3(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET2 18
+#define V_LSO_HDR_SRAM_CERR_SET2(x) ((x) << S_LSO_HDR_SRAM_CERR_SET2)
+#define F_LSO_HDR_SRAM_CERR_SET2 V_LSO_HDR_SRAM_CERR_SET2(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET1 17
+#define V_LSO_HDR_SRAM_CERR_SET1(x) ((x) << S_LSO_HDR_SRAM_CERR_SET1)
+#define F_LSO_HDR_SRAM_CERR_SET1 V_LSO_HDR_SRAM_CERR_SET1(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET0 16
+#define V_LSO_HDR_SRAM_CERR_SET0(x) ((x) << S_LSO_HDR_SRAM_CERR_SET0)
+#define F_LSO_HDR_SRAM_CERR_SET0 V_LSO_HDR_SRAM_CERR_SET0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH3_LB1 15
+#define V_CORE_CMD_FIFO_CERR_SET_CH3_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH3_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH3_LB1 V_CORE_CMD_FIFO_CERR_SET_CH3_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH2_LB1 14
+#define V_CORE_CMD_FIFO_CERR_SET_CH2_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH2_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH2_LB1 V_CORE_CMD_FIFO_CERR_SET_CH2_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH1_LB1 13
+#define V_CORE_CMD_FIFO_CERR_SET_CH1_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH1_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH1_LB1 V_CORE_CMD_FIFO_CERR_SET_CH1_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH0_LB1 12
+#define V_CORE_CMD_FIFO_CERR_SET_CH0_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH0_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH0_LB1 V_CORE_CMD_FIFO_CERR_SET_CH0_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH3_LB0 11
+#define V_CORE_CMD_FIFO_CERR_SET_CH3_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH3_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH3_LB0 V_CORE_CMD_FIFO_CERR_SET_CH3_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH2_LB0 10
+#define V_CORE_CMD_FIFO_CERR_SET_CH2_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH2_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH2_LB0 V_CORE_CMD_FIFO_CERR_SET_CH2_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH1_LB0 9
+#define V_CORE_CMD_FIFO_CERR_SET_CH1_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH1_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH1_LB0 V_CORE_CMD_FIFO_CERR_SET_CH1_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH0_LB0 8
+#define V_CORE_CMD_FIFO_CERR_SET_CH0_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH0_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH0_LB0 V_CORE_CMD_FIFO_CERR_SET_CH0_LB0(1U)
+
+#define S_CQE_FIFO_CERR_SET3 7
+#define V_CQE_FIFO_CERR_SET3(x) ((x) << S_CQE_FIFO_CERR_SET3)
+#define F_CQE_FIFO_CERR_SET3 V_CQE_FIFO_CERR_SET3(1U)
+
+#define S_CQE_FIFO_CERR_SET2 6
+#define V_CQE_FIFO_CERR_SET2(x) ((x) << S_CQE_FIFO_CERR_SET2)
+#define F_CQE_FIFO_CERR_SET2 V_CQE_FIFO_CERR_SET2(1U)
+
+#define S_CQE_FIFO_CERR_SET1 5
+#define V_CQE_FIFO_CERR_SET1(x) ((x) << S_CQE_FIFO_CERR_SET1)
+#define F_CQE_FIFO_CERR_SET1 V_CQE_FIFO_CERR_SET1(1U)
+
+#define S_CQE_FIFO_CERR_SET0 4
+#define V_CQE_FIFO_CERR_SET0(x) ((x) << S_CQE_FIFO_CERR_SET0)
+#define F_CQE_FIFO_CERR_SET0 V_CQE_FIFO_CERR_SET0(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET3 3
+#define V_PRE_CQE_FIFO_CERR_SET3(x) ((x) << S_PRE_CQE_FIFO_CERR_SET3)
+#define F_PRE_CQE_FIFO_CERR_SET3 V_PRE_CQE_FIFO_CERR_SET3(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET2 2
+#define V_PRE_CQE_FIFO_CERR_SET2(x) ((x) << S_PRE_CQE_FIFO_CERR_SET2)
+#define F_PRE_CQE_FIFO_CERR_SET2 V_PRE_CQE_FIFO_CERR_SET2(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET1 1
+#define V_PRE_CQE_FIFO_CERR_SET1(x) ((x) << S_PRE_CQE_FIFO_CERR_SET1)
+#define F_PRE_CQE_FIFO_CERR_SET1 V_PRE_CQE_FIFO_CERR_SET1(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET0 0
+#define V_PRE_CQE_FIFO_CERR_SET0(x) ((x) << S_PRE_CQE_FIFO_CERR_SET0)
+#define F_PRE_CQE_FIFO_CERR_SET0 V_PRE_CQE_FIFO_CERR_SET0(1U)
+
#define A_ULP_TX_LA_RDPTR_2 0x8ee0
+#define A_ULP_TX_INT_ENABLE_7 0x8ee0
#define A_ULP_TX_LA_RDDATA_2 0x8ee4
+#define A_ULP_TX_INT_CAUSE_8 0x8ee4
+
+#define S_MEM_RSP_FIFO_CERR_SET3 28
+#define V_MEM_RSP_FIFO_CERR_SET3(x) ((x) << S_MEM_RSP_FIFO_CERR_SET3)
+#define F_MEM_RSP_FIFO_CERR_SET3 V_MEM_RSP_FIFO_CERR_SET3(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET2 27
+#define V_MEM_RSP_FIFO_CERR_SET2(x) ((x) << S_MEM_RSP_FIFO_CERR_SET2)
+#define F_MEM_RSP_FIFO_CERR_SET2 V_MEM_RSP_FIFO_CERR_SET2(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET1 26
+#define V_MEM_RSP_FIFO_CERR_SET1(x) ((x) << S_MEM_RSP_FIFO_CERR_SET1)
+#define F_MEM_RSP_FIFO_CERR_SET1 V_MEM_RSP_FIFO_CERR_SET1(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET0 25
+#define V_MEM_RSP_FIFO_CERR_SET0(x) ((x) << S_MEM_RSP_FIFO_CERR_SET0)
+#define F_MEM_RSP_FIFO_CERR_SET0 V_MEM_RSP_FIFO_CERR_SET0(1U)
+
+#define S_PI_SRAM_CERR_SET3 24
+#define V_PI_SRAM_CERR_SET3(x) ((x) << S_PI_SRAM_CERR_SET3)
+#define F_PI_SRAM_CERR_SET3 V_PI_SRAM_CERR_SET3(1U)
+
+#define S_PI_SRAM_CERR_SET2 23
+#define V_PI_SRAM_CERR_SET2(x) ((x) << S_PI_SRAM_CERR_SET2)
+#define F_PI_SRAM_CERR_SET2 V_PI_SRAM_CERR_SET2(1U)
+
+#define S_PI_SRAM_CERR_SET1 22
+#define V_PI_SRAM_CERR_SET1(x) ((x) << S_PI_SRAM_CERR_SET1)
+#define F_PI_SRAM_CERR_SET1 V_PI_SRAM_CERR_SET1(1U)
+
+#define S_PI_SRAM_CERR_SET0 21
+#define V_PI_SRAM_CERR_SET0(x) ((x) << S_PI_SRAM_CERR_SET0)
+#define F_PI_SRAM_CERR_SET0 V_PI_SRAM_CERR_SET0(1U)
+
+#define S_PRE_MP_RSP_CERR_SET3 20
+#define V_PRE_MP_RSP_CERR_SET3(x) ((x) << S_PRE_MP_RSP_CERR_SET3)
+#define F_PRE_MP_RSP_CERR_SET3 V_PRE_MP_RSP_CERR_SET3(1U)
+
+#define S_PRE_MP_RSP_CERR_SET2 19
+#define V_PRE_MP_RSP_CERR_SET2(x) ((x) << S_PRE_MP_RSP_CERR_SET2)
+#define F_PRE_MP_RSP_CERR_SET2 V_PRE_MP_RSP_CERR_SET2(1U)
+
+#define S_PRE_MP_RSP_CERR_SET1 18
+#define V_PRE_MP_RSP_CERR_SET1(x) ((x) << S_PRE_MP_RSP_CERR_SET1)
+#define F_PRE_MP_RSP_CERR_SET1 V_PRE_MP_RSP_CERR_SET1(1U)
+
+#define S_PRE_MP_RSP_CERR_SET0 17
+#define V_PRE_MP_RSP_CERR_SET0(x) ((x) << S_PRE_MP_RSP_CERR_SET0)
+#define F_PRE_MP_RSP_CERR_SET0 V_PRE_MP_RSP_CERR_SET0(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET3 16
+#define V_DDR_HDR_FIFO_CERR_SET3(x) ((x) << S_DDR_HDR_FIFO_CERR_SET3)
+#define F_DDR_HDR_FIFO_CERR_SET3 V_DDR_HDR_FIFO_CERR_SET3(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET2 15
+#define V_DDR_HDR_FIFO_CERR_SET2(x) ((x) << S_DDR_HDR_FIFO_CERR_SET2)
+#define F_DDR_HDR_FIFO_CERR_SET2 V_DDR_HDR_FIFO_CERR_SET2(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET1 14
+#define V_DDR_HDR_FIFO_CERR_SET1(x) ((x) << S_DDR_HDR_FIFO_CERR_SET1)
+#define F_DDR_HDR_FIFO_CERR_SET1 V_DDR_HDR_FIFO_CERR_SET1(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET0 13
+#define V_DDR_HDR_FIFO_CERR_SET0(x) ((x) << S_DDR_HDR_FIFO_CERR_SET0)
+#define F_DDR_HDR_FIFO_CERR_SET0 V_DDR_HDR_FIFO_CERR_SET0(1U)
+
+#define S_CMD_FIFO_CERR_SET3 12
+#define V_CMD_FIFO_CERR_SET3(x) ((x) << S_CMD_FIFO_CERR_SET3)
+#define F_CMD_FIFO_CERR_SET3 V_CMD_FIFO_CERR_SET3(1U)
+
+#define S_CMD_FIFO_CERR_SET2 11
+#define V_CMD_FIFO_CERR_SET2(x) ((x) << S_CMD_FIFO_CERR_SET2)
+#define F_CMD_FIFO_CERR_SET2 V_CMD_FIFO_CERR_SET2(1U)
+
+#define S_CMD_FIFO_CERR_SET1 10
+#define V_CMD_FIFO_CERR_SET1(x) ((x) << S_CMD_FIFO_CERR_SET1)
+#define F_CMD_FIFO_CERR_SET1 V_CMD_FIFO_CERR_SET1(1U)
+
+#define S_CMD_FIFO_CERR_SET0 9
+#define V_CMD_FIFO_CERR_SET0(x) ((x) << S_CMD_FIFO_CERR_SET0)
+#define F_CMD_FIFO_CERR_SET0 V_CMD_FIFO_CERR_SET0(1U)
+
+#define S_GF_SGE_FIFO_CORERR3 8
+#define V_GF_SGE_FIFO_CORERR3(x) ((x) << S_GF_SGE_FIFO_CORERR3)
+#define F_GF_SGE_FIFO_CORERR3 V_GF_SGE_FIFO_CORERR3(1U)
+
+#define S_GF_SGE_FIFO_CORERR2 7
+#define V_GF_SGE_FIFO_CORERR2(x) ((x) << S_GF_SGE_FIFO_CORERR2)
+#define F_GF_SGE_FIFO_CORERR2 V_GF_SGE_FIFO_CORERR2(1U)
+
+#define S_GF_SGE_FIFO_CORERR1 6
+#define V_GF_SGE_FIFO_CORERR1(x) ((x) << S_GF_SGE_FIFO_CORERR1)
+#define F_GF_SGE_FIFO_CORERR1 V_GF_SGE_FIFO_CORERR1(1U)
+
+#define S_GF_SGE_FIFO_CORERR0 5
+#define V_GF_SGE_FIFO_CORERR0(x) ((x) << S_GF_SGE_FIFO_CORERR0)
+#define F_GF_SGE_FIFO_CORERR0 V_GF_SGE_FIFO_CORERR0(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR3 4
+#define V_DEDUPE_SGE_FIFO_CORERR3(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR3)
+#define F_DEDUPE_SGE_FIFO_CORERR3 V_DEDUPE_SGE_FIFO_CORERR3(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR2 3
+#define V_DEDUPE_SGE_FIFO_CORERR2(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR2)
+#define F_DEDUPE_SGE_FIFO_CORERR2 V_DEDUPE_SGE_FIFO_CORERR2(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR1 2
+#define V_DEDUPE_SGE_FIFO_CORERR1(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR1)
+#define F_DEDUPE_SGE_FIFO_CORERR1 V_DEDUPE_SGE_FIFO_CORERR1(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR0 1
+#define V_DEDUPE_SGE_FIFO_CORERR0(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR0)
+#define F_DEDUPE_SGE_FIFO_CORERR0 V_DEDUPE_SGE_FIFO_CORERR0(1U)
+
+#define S_RSP_FIFO_CERR_SET 0
+#define V_RSP_FIFO_CERR_SET(x) ((x) << S_RSP_FIFO_CERR_SET)
+#define F_RSP_FIFO_CERR_SET V_RSP_FIFO_CERR_SET(1U)
+
#define A_ULP_TX_LA_WRPTR_2 0x8ee8
+#define A_ULP_TX_INT_ENABLE_8 0x8ee8
#define A_ULP_TX_LA_RESERVED_2 0x8eec
#define A_ULP_TX_LA_RDPTR_3 0x8ef0
#define A_ULP_TX_LA_RDDATA_3 0x8ef4
@@ -29671,6 +37470,97 @@
#define V_SHOVE_LAST(x) ((x) << S_SHOVE_LAST)
#define F_SHOVE_LAST V_SHOVE_LAST(1U)
+#define A_ULP_TX_ACCELERATOR_CTL 0x8f90
+
+#define S_FIFO_THRESHOLD 8
+#define M_FIFO_THRESHOLD 0x1fU
+#define V_FIFO_THRESHOLD(x) ((x) << S_FIFO_THRESHOLD)
+#define G_FIFO_THRESHOLD(x) (((x) >> S_FIFO_THRESHOLD) & M_FIFO_THRESHOLD)
+
+#define S_COMPRESSION_XP10DISABLECFUSE 5
+#define V_COMPRESSION_XP10DISABLECFUSE(x) ((x) << S_COMPRESSION_XP10DISABLECFUSE)
+#define F_COMPRESSION_XP10DISABLECFUSE V_COMPRESSION_XP10DISABLECFUSE(1U)
+
+#define S_COMPRESSION_XP10DISABLE 4
+#define V_COMPRESSION_XP10DISABLE(x) ((x) << S_COMPRESSION_XP10DISABLE)
+#define F_COMPRESSION_XP10DISABLE V_COMPRESSION_XP10DISABLE(1U)
+
+#define S_DEDUPEDISABLECFUSE 3
+#define V_DEDUPEDISABLECFUSE(x) ((x) << S_DEDUPEDISABLECFUSE)
+#define F_DEDUPEDISABLECFUSE V_DEDUPEDISABLECFUSE(1U)
+
+#define S_DEDUPEDISABLE 2
+#define V_DEDUPEDISABLE(x) ((x) << S_DEDUPEDISABLE)
+#define F_DEDUPEDISABLE V_DEDUPEDISABLE(1U)
+
+#define S_GFDISABLECFUSE 1
+#define V_GFDISABLECFUSE(x) ((x) << S_GFDISABLECFUSE)
+#define F_GFDISABLECFUSE V_GFDISABLECFUSE(1U)
+
+#define S_GFDISABLE 0
+#define V_GFDISABLE(x) ((x) << S_GFDISABLE)
+#define F_GFDISABLE V_GFDISABLE(1U)
+
+#define A_ULP_TX_XP10_IND_ADDR 0x8f94
+
+#define S_XP10_CONTROL 31
+#define V_XP10_CONTROL(x) ((x) << S_XP10_CONTROL)
+#define F_XP10_CONTROL V_XP10_CONTROL(1U)
+
+#define S_XP10_ADDR 0
+#define M_XP10_ADDR 0xfffffU
+#define V_XP10_ADDR(x) ((x) << S_XP10_ADDR)
+#define G_XP10_ADDR(x) (((x) >> S_XP10_ADDR) & M_XP10_ADDR)
+
+#define A_ULP_TX_XP10_IND_DATA 0x8f98
+#define A_ULP_TX_IWARP_PMOF_OPCODES_1 0x8f9c
+
+#define S_RDMA_VERIFY_RESPONSE 24
+#define M_RDMA_VERIFY_RESPONSE 0x1fU
+#define V_RDMA_VERIFY_RESPONSE(x) ((x) << S_RDMA_VERIFY_RESPONSE)
+#define G_RDMA_VERIFY_RESPONSE(x) (((x) >> S_RDMA_VERIFY_RESPONSE) & M_RDMA_VERIFY_RESPONSE)
+
+#define S_RDMA_VERIFY_REQUEST 16
+#define M_RDMA_VERIFY_REQUEST 0x1fU
+#define V_RDMA_VERIFY_REQUEST(x) ((x) << S_RDMA_VERIFY_REQUEST)
+#define G_RDMA_VERIFY_REQUEST(x) (((x) >> S_RDMA_VERIFY_REQUEST) & M_RDMA_VERIFY_REQUEST)
+
+#define S_RDMA_FLUSH_RESPONSE 8
+#define M_RDMA_FLUSH_RESPONSE 0x1fU
+#define V_RDMA_FLUSH_RESPONSE(x) ((x) << S_RDMA_FLUSH_RESPONSE)
+#define G_RDMA_FLUSH_RESPONSE(x) (((x) >> S_RDMA_FLUSH_RESPONSE) & M_RDMA_FLUSH_RESPONSE)
+
+#define S_RDMA_FLUSH_REQUEST 0
+#define M_RDMA_FLUSH_REQUEST 0x1fU
+#define V_RDMA_FLUSH_REQUEST(x) ((x) << S_RDMA_FLUSH_REQUEST)
+#define G_RDMA_FLUSH_REQUEST(x) (((x) >> S_RDMA_FLUSH_REQUEST) & M_RDMA_FLUSH_REQUEST)
+
+#define A_ULP_TX_IWARP_PMOF_OPCODES_2 0x8fa0
+
+#define S_RDMA_SEND_WITH_SE_IMMEDIATE 24
+#define M_RDMA_SEND_WITH_SE_IMMEDIATE 0x1fU
+#define V_RDMA_SEND_WITH_SE_IMMEDIATE(x) ((x) << S_RDMA_SEND_WITH_SE_IMMEDIATE)
+#define G_RDMA_SEND_WITH_SE_IMMEDIATE(x) (((x) >> S_RDMA_SEND_WITH_SE_IMMEDIATE) & M_RDMA_SEND_WITH_SE_IMMEDIATE)
+
+#define S_RDMA_SEND_WITH_IMMEDIATE 16
+#define M_RDMA_SEND_WITH_IMMEDIATE 0x1fU
+#define V_RDMA_SEND_WITH_IMMEDIATE(x) ((x) << S_RDMA_SEND_WITH_IMMEDIATE)
+#define G_RDMA_SEND_WITH_IMMEDIATE(x) (((x) >> S_RDMA_SEND_WITH_IMMEDIATE) & M_RDMA_SEND_WITH_IMMEDIATE)
+
+#define S_RDMA_ATOMIC_WRITE_RESPONSE 8
+#define M_RDMA_ATOMIC_WRITE_RESPONSE 0x1fU
+#define V_RDMA_ATOMIC_WRITE_RESPONSE(x) ((x) << S_RDMA_ATOMIC_WRITE_RESPONSE)
+#define G_RDMA_ATOMIC_WRITE_RESPONSE(x) (((x) >> S_RDMA_ATOMIC_WRITE_RESPONSE) & M_RDMA_ATOMIC_WRITE_RESPONSE)
+
+#define S_RDMA_ATOMIC_WRITE_REQUEST 0
+#define M_RDMA_ATOMIC_WRITE_REQUEST 0x1fU
+#define V_RDMA_ATOMIC_WRITE_REQUEST(x) ((x) << S_RDMA_ATOMIC_WRITE_REQUEST)
+#define G_RDMA_ATOMIC_WRITE_REQUEST(x) (((x) >> S_RDMA_ATOMIC_WRITE_REQUEST) & M_RDMA_ATOMIC_WRITE_REQUEST)
+
+#define A_ULP_TX_NVME_TCP_TPT_LLIMIT 0x8fa4
+#define A_ULP_TX_NVME_TCP_TPT_ULIMIT 0x8fa8
+#define A_ULP_TX_NVME_TCP_PBL_LLIMIT 0x8fac
+#define A_ULP_TX_NVME_TCP_PBL_ULIMIT 0x8fb0
#define A_ULP_TX_TLS_IND_CMD 0x8fb8
#define S_TLS_TX_REG_OFF_ADDR 0
@@ -29678,7 +37568,48 @@
#define V_TLS_TX_REG_OFF_ADDR(x) ((x) << S_TLS_TX_REG_OFF_ADDR)
#define G_TLS_TX_REG_OFF_ADDR(x) (((x) >> S_TLS_TX_REG_OFF_ADDR) & M_TLS_TX_REG_OFF_ADDR)
+#define A_ULP_TX_DBG_CTL 0x8fb8
#define A_ULP_TX_TLS_IND_DATA 0x8fbc
+#define A_ULP_TX_DBG_DATA 0x8fbc
+#define A_ULP_TX_TLS_CH0_PERR_CAUSE 0xc
+
+#define S_GLUE_PERR 3
+#define V_GLUE_PERR(x) ((x) << S_GLUE_PERR)
+#define F_GLUE_PERR V_GLUE_PERR(1U)
+
+#define S_DSGL_PERR 2
+#define V_DSGL_PERR(x) ((x) << S_DSGL_PERR)
+#define F_DSGL_PERR V_DSGL_PERR(1U)
+
+#define S_SGE_PERR 1
+#define V_SGE_PERR(x) ((x) << S_SGE_PERR)
+#define F_SGE_PERR V_SGE_PERR(1U)
+
+#define S_KEX_PERR 0
+#define V_KEX_PERR(x) ((x) << S_KEX_PERR)
+#define F_KEX_PERR V_KEX_PERR(1U)
+
+#define A_ULP_TX_TLS_CH0_PERR_ENABLE 0x10
+#define A_ULP_TX_TLS_CH0_HMACCTRL_CFG 0x20
+
+#define S_HMAC_CFG6 12
+#define M_HMAC_CFG6 0x3fU
+#define V_HMAC_CFG6(x) ((x) << S_HMAC_CFG6)
+#define G_HMAC_CFG6(x) (((x) >> S_HMAC_CFG6) & M_HMAC_CFG6)
+
+#define S_HMAC_CFG5 6
+#define M_HMAC_CFG5 0x3fU
+#define V_HMAC_CFG5(x) ((x) << S_HMAC_CFG5)
+#define G_HMAC_CFG5(x) (((x) >> S_HMAC_CFG5) & M_HMAC_CFG5)
+
+#define S_HMAC_CFG4 0
+#define M_HMAC_CFG4 0x3fU
+#define V_HMAC_CFG4(x) ((x) << S_HMAC_CFG4)
+#define G_HMAC_CFG4(x) (((x) >> S_HMAC_CFG4) & M_HMAC_CFG4)
+
+#define A_ULP_TX_TLS_CH1_PERR_CAUSE 0x4c
+#define A_ULP_TX_TLS_CH1_PERR_ENABLE 0x50
+#define A_ULP_TX_TLS_CH1_HMACCTRL_CFG 0x60
/* registers for module PM_RX */
#define PM_RX_BASE_ADDR 0x8fc0
@@ -29703,6 +37634,31 @@
#define V_PREFETCH_ENABLE(x) ((x) << S_PREFETCH_ENABLE)
#define F_PREFETCH_ENABLE V_PREFETCH_ENABLE(1U)
+#define S_CACHE_HOLD 13
+#define V_CACHE_HOLD(x) ((x) << S_CACHE_HOLD)
+#define F_CACHE_HOLD V_CACHE_HOLD(1U)
+
+#define S_CACHE_INIT_DONE 12
+#define V_CACHE_INIT_DONE(x) ((x) << S_CACHE_INIT_DONE)
+#define F_CACHE_INIT_DONE V_CACHE_INIT_DONE(1U)
+
+#define S_CACHE_DEPTH 8
+#define M_CACHE_DEPTH 0xfU
+#define V_CACHE_DEPTH(x) ((x) << S_CACHE_DEPTH)
+#define G_CACHE_DEPTH(x) (((x) >> S_CACHE_DEPTH) & M_CACHE_DEPTH)
+
+#define S_CACHE_INIT 7
+#define V_CACHE_INIT(x) ((x) << S_CACHE_INIT)
+#define F_CACHE_INIT V_CACHE_INIT(1U)
+
+#define S_CACHE_SLEEP 6
+#define V_CACHE_SLEEP(x) ((x) << S_CACHE_SLEEP)
+#define F_CACHE_SLEEP V_CACHE_SLEEP(1U)
+
+#define S_CACHE_BYPASS 5
+#define V_CACHE_BYPASS(x) ((x) << S_CACHE_BYPASS)
+#define F_CACHE_BYPASS V_CACHE_BYPASS(1U)
+
#define A_PM_RX_STAT_CONFIG 0x8fc8
#define A_PM_RX_STAT_COUNT 0x8fcc
#define A_PM_RX_STAT_LSB 0x8fd0
@@ -29723,6 +37679,11 @@
#define V_PMDBGADDR(x) ((x) << S_PMDBGADDR)
#define G_PMDBGADDR(x) (((x) >> S_PMDBGADDR) & M_PMDBGADDR)
+#define S_T7_OSPIWRBUSY_T5 21
+#define M_T7_OSPIWRBUSY_T5 0xfU
+#define V_T7_OSPIWRBUSY_T5(x) ((x) << S_T7_OSPIWRBUSY_T5)
+#define G_T7_OSPIWRBUSY_T5(x) (((x) >> S_T7_OSPIWRBUSY_T5) & M_T7_OSPIWRBUSY_T5)
+
#define A_PM_RX_STAT_MSB 0x8fd4
#define A_PM_RX_DBG_DATA 0x8fd4
#define A_PM_RX_INT_ENABLE 0x8fd8
@@ -29843,7 +37804,36 @@
#define V_SDC_ERR(x) ((x) << S_SDC_ERR)
#define F_SDC_ERR V_SDC_ERR(1U)
+#define S_MASTER_PERR 31
+#define V_MASTER_PERR(x) ((x) << S_MASTER_PERR)
+#define F_MASTER_PERR V_MASTER_PERR(1U)
+
+#define S_T7_OSPI_OVERFLOW3 30
+#define V_T7_OSPI_OVERFLOW3(x) ((x) << S_T7_OSPI_OVERFLOW3)
+#define F_T7_OSPI_OVERFLOW3 V_T7_OSPI_OVERFLOW3(1U)
+
+#define S_T7_OSPI_OVERFLOW2 29
+#define V_T7_OSPI_OVERFLOW2(x) ((x) << S_T7_OSPI_OVERFLOW2)
+#define F_T7_OSPI_OVERFLOW2 V_T7_OSPI_OVERFLOW2(1U)
+
#define A_PM_RX_INT_CAUSE 0x8fdc
+
+#define S_CACHE_SRAM_ERROR 3
+#define V_CACHE_SRAM_ERROR(x) ((x) << S_CACHE_SRAM_ERROR)
+#define F_CACHE_SRAM_ERROR V_CACHE_SRAM_ERROR(1U)
+
+#define S_CACHE_LRU_ERROR 2
+#define V_CACHE_LRU_ERROR(x) ((x) << S_CACHE_LRU_ERROR)
+#define F_CACHE_LRU_ERROR V_CACHE_LRU_ERROR(1U)
+
+#define S_CACHE_ISLAND_ERROR 1
+#define V_CACHE_ISLAND_ERROR(x) ((x) << S_CACHE_ISLAND_ERROR)
+#define F_CACHE_ISLAND_ERROR V_CACHE_ISLAND_ERROR(1U)
+
+#define S_CACHE_CTRL_ERROR 0
+#define V_CACHE_CTRL_ERROR(x) ((x) << S_CACHE_CTRL_ERROR)
+#define F_CACHE_CTRL_ERROR V_CACHE_CTRL_ERROR(1U)
+
#define A_PM_RX_ISPI_DBG_4B_DATA0 0x10000
#define A_PM_RX_ISPI_DBG_4B_DATA1 0x10001
#define A_PM_RX_ISPI_DBG_4B_DATA2 0x10002
@@ -29959,12 +37949,25 @@
#define V_CHNL0_MAX_DEFICIT_CNT(x) ((x) << S_CHNL0_MAX_DEFICIT_CNT)
#define G_CHNL0_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL0_MAX_DEFICIT_CNT) & M_CHNL0_MAX_DEFICIT_CNT)
+#define A_PM_RX_PRFTCH_WRR_MAX_DEFICIT_CNT0 0x1001c
#define A_PM_RX_FEATURE_EN 0x1001d
#define S_PIO_CH_DEFICIT_CTL_EN_RX 0
#define V_PIO_CH_DEFICIT_CTL_EN_RX(x) ((x) << S_PIO_CH_DEFICIT_CTL_EN_RX)
#define F_PIO_CH_DEFICIT_CTL_EN_RX V_PIO_CH_DEFICIT_CTL_EN_RX(1U)
+#define A_PM_RX_PRFTCH_WRR_MAX_DEFICIT_CNT1 0x1001d
+
+#define S_CHNL3_MAX_DEFICIT_CNT 16
+#define M_CHNL3_MAX_DEFICIT_CNT 0xffffU
+#define V_CHNL3_MAX_DEFICIT_CNT(x) ((x) << S_CHNL3_MAX_DEFICIT_CNT)
+#define G_CHNL3_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL3_MAX_DEFICIT_CNT) & M_CHNL3_MAX_DEFICIT_CNT)
+
+#define S_CHNL2_MAX_DEFICIT_CNT 0
+#define M_CHNL2_MAX_DEFICIT_CNT 0xffffU
+#define V_CHNL2_MAX_DEFICIT_CNT(x) ((x) << S_CHNL2_MAX_DEFICIT_CNT)
+#define G_CHNL2_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL2_MAX_DEFICIT_CNT) & M_CHNL2_MAX_DEFICIT_CNT)
+
#define A_PM_RX_CH0_OSPI_DEFICIT_THRSHLD 0x1001e
#define S_CH0_OSPI_DEFICIT_THRSHLD 0
@@ -30245,16 +38248,6 @@
#define V_RX_C_TXAFULL(x) ((x) << S_RX_C_TXAFULL)
#define G_RX_C_TXAFULL(x) (((x) >> S_RX_C_TXAFULL) & M_RX_C_TXAFULL)
-#define S_T6_RX_PCMD_DRDY 26
-#define M_T6_RX_PCMD_DRDY 0x3U
-#define V_T6_RX_PCMD_DRDY(x) ((x) << S_T6_RX_PCMD_DRDY)
-#define G_T6_RX_PCMD_DRDY(x) (((x) >> S_T6_RX_PCMD_DRDY) & M_T6_RX_PCMD_DRDY)
-
-#define S_T6_RX_PCMD_SRDY 24
-#define M_T6_RX_PCMD_SRDY 0x3U
-#define V_T6_RX_PCMD_SRDY(x) ((x) << S_T6_RX_PCMD_SRDY)
-#define G_T6_RX_PCMD_SRDY(x) (((x) >> S_T6_RX_PCMD_SRDY) & M_T6_RX_PCMD_SRDY)
-
#define A_PM_RX_DBG_STAT6 0x10027
#define S_RX_M_INTRNL_FIFO_CNT 4
@@ -30434,6 +38427,179 @@
#define V_RX_BUNDLE_LEN0(x) ((x) << S_RX_BUNDLE_LEN0)
#define G_RX_BUNDLE_LEN0(x) (((x) >> S_RX_BUNDLE_LEN0) & M_RX_BUNDLE_LEN0)
+#define A_PM_RX_INT_CAUSE_MASK_HALT_2 0x10049
+#define A_PM_RX_INT_ENABLE_2 0x10060
+
+#define S_CACHE_SRAM_ODD_CERR 12
+#define V_CACHE_SRAM_ODD_CERR(x) ((x) << S_CACHE_SRAM_ODD_CERR)
+#define F_CACHE_SRAM_ODD_CERR V_CACHE_SRAM_ODD_CERR(1U)
+
+#define S_CACHE_SRAM_EVEN_CERR 11
+#define V_CACHE_SRAM_EVEN_CERR(x) ((x) << S_CACHE_SRAM_EVEN_CERR)
+#define F_CACHE_SRAM_EVEN_CERR V_CACHE_SRAM_EVEN_CERR(1U)
+
+#define S_CACHE_LRU_LEFT_CERR 10
+#define V_CACHE_LRU_LEFT_CERR(x) ((x) << S_CACHE_LRU_LEFT_CERR)
+#define F_CACHE_LRU_LEFT_CERR V_CACHE_LRU_LEFT_CERR(1U)
+
+#define S_CACHE_LRU_RIGHT_CERR 9
+#define V_CACHE_LRU_RIGHT_CERR(x) ((x) << S_CACHE_LRU_RIGHT_CERR)
+#define F_CACHE_LRU_RIGHT_CERR V_CACHE_LRU_RIGHT_CERR(1U)
+
+#define S_CACHE_ISLAND_CERR 8
+#define V_CACHE_ISLAND_CERR(x) ((x) << S_CACHE_ISLAND_CERR)
+#define F_CACHE_ISLAND_CERR V_CACHE_ISLAND_CERR(1U)
+
+#define S_OCSPI_CERR 7
+#define V_OCSPI_CERR(x) ((x) << S_OCSPI_CERR)
+#define F_OCSPI_CERR V_OCSPI_CERR(1U)
+
+#define S_IESPI_CERR 6
+#define V_IESPI_CERR(x) ((x) << S_IESPI_CERR)
+#define F_IESPI_CERR V_IESPI_CERR(1U)
+
+#define S_OCSPI2_RX_FRAMING_ERROR 5
+#define V_OCSPI2_RX_FRAMING_ERROR(x) ((x) << S_OCSPI2_RX_FRAMING_ERROR)
+#define F_OCSPI2_RX_FRAMING_ERROR V_OCSPI2_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_RX_FRAMING_ERROR 4
+#define V_OCSPI3_RX_FRAMING_ERROR(x) ((x) << S_OCSPI3_RX_FRAMING_ERROR)
+#define F_OCSPI3_RX_FRAMING_ERROR V_OCSPI3_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI2_TX_FRAMING_ERROR 3
+#define V_OCSPI2_TX_FRAMING_ERROR(x) ((x) << S_OCSPI2_TX_FRAMING_ERROR)
+#define F_OCSPI2_TX_FRAMING_ERROR V_OCSPI2_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_TX_FRAMING_ERROR 2
+#define V_OCSPI3_TX_FRAMING_ERROR(x) ((x) << S_OCSPI3_TX_FRAMING_ERROR)
+#define F_OCSPI3_TX_FRAMING_ERROR V_OCSPI3_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI2_OFIFO2X_TX_FRAMING_ERROR 1
+#define V_OCSPI2_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI2_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI2_OFIFO2X_TX_FRAMING_ERROR V_OCSPI2_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_OFIFO2X_TX_FRAMING_ERROR 0
+#define V_OCSPI3_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI3_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI3_OFIFO2X_TX_FRAMING_ERROR V_OCSPI3_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define A_PM_RX_INT_CAUSE_2 0x10061
+#define A_PM_RX_PERR_ENABLE 0x10062
+
+#define S_T7_SDC_ERR 31
+#define V_T7_SDC_ERR(x) ((x) << S_T7_SDC_ERR)
+#define F_T7_SDC_ERR V_T7_SDC_ERR(1U)
+
+#define S_T7_MA_INTF_SDC_ERR 30
+#define V_T7_MA_INTF_SDC_ERR(x) ((x) << S_T7_MA_INTF_SDC_ERR)
+#define F_T7_MA_INTF_SDC_ERR V_T7_MA_INTF_SDC_ERR(1U)
+
+#define S_E_PCMD_PERR 21
+#define V_E_PCMD_PERR(x) ((x) << S_E_PCMD_PERR)
+#define F_E_PCMD_PERR V_E_PCMD_PERR(1U)
+
+#define S_CACHE_RSP_DFIFO_PERR 20
+#define V_CACHE_RSP_DFIFO_PERR(x) ((x) << S_CACHE_RSP_DFIFO_PERR)
+#define F_CACHE_RSP_DFIFO_PERR V_CACHE_RSP_DFIFO_PERR(1U)
+
+#define S_CACHE_SRAM_ODD_PERR 19
+#define V_CACHE_SRAM_ODD_PERR(x) ((x) << S_CACHE_SRAM_ODD_PERR)
+#define F_CACHE_SRAM_ODD_PERR V_CACHE_SRAM_ODD_PERR(1U)
+
+#define S_CACHE_SRAM_EVEN_PERR 18
+#define V_CACHE_SRAM_EVEN_PERR(x) ((x) << S_CACHE_SRAM_EVEN_PERR)
+#define F_CACHE_SRAM_EVEN_PERR V_CACHE_SRAM_EVEN_PERR(1U)
+
+#define S_CACHE_RSVD_PERR 17
+#define V_CACHE_RSVD_PERR(x) ((x) << S_CACHE_RSVD_PERR)
+#define F_CACHE_RSVD_PERR V_CACHE_RSVD_PERR(1U)
+
+#define S_CACHE_LRU_LEFT_PERR 16
+#define V_CACHE_LRU_LEFT_PERR(x) ((x) << S_CACHE_LRU_LEFT_PERR)
+#define F_CACHE_LRU_LEFT_PERR V_CACHE_LRU_LEFT_PERR(1U)
+
+#define S_CACHE_LRU_RIGHT_PERR 15
+#define V_CACHE_LRU_RIGHT_PERR(x) ((x) << S_CACHE_LRU_RIGHT_PERR)
+#define F_CACHE_LRU_RIGHT_PERR V_CACHE_LRU_RIGHT_PERR(1U)
+
+#define S_CACHE_RSP_CMD_PERR 14
+#define V_CACHE_RSP_CMD_PERR(x) ((x) << S_CACHE_RSP_CMD_PERR)
+#define F_CACHE_RSP_CMD_PERR V_CACHE_RSP_CMD_PERR(1U)
+
+#define S_CACHE_SRAM_CMD_PERR 13
+#define V_CACHE_SRAM_CMD_PERR(x) ((x) << S_CACHE_SRAM_CMD_PERR)
+#define F_CACHE_SRAM_CMD_PERR V_CACHE_SRAM_CMD_PERR(1U)
+
+#define S_CACHE_MA_CMD_PERR 12
+#define V_CACHE_MA_CMD_PERR(x) ((x) << S_CACHE_MA_CMD_PERR)
+#define F_CACHE_MA_CMD_PERR V_CACHE_MA_CMD_PERR(1U)
+
+#define S_CACHE_TCAM_PERR 11
+#define V_CACHE_TCAM_PERR(x) ((x) << S_CACHE_TCAM_PERR)
+#define F_CACHE_TCAM_PERR V_CACHE_TCAM_PERR(1U)
+
+#define S_CACHE_ISLAND_PERR 10
+#define V_CACHE_ISLAND_PERR(x) ((x) << S_CACHE_ISLAND_PERR)
+#define F_CACHE_ISLAND_PERR V_CACHE_ISLAND_PERR(1U)
+
+#define S_MC_WCNT_FIFO_PERR 9
+#define V_MC_WCNT_FIFO_PERR(x) ((x) << S_MC_WCNT_FIFO_PERR)
+#define F_MC_WCNT_FIFO_PERR V_MC_WCNT_FIFO_PERR(1U)
+
+#define S_MC_WDATA_FIFO_PERR 8
+#define V_MC_WDATA_FIFO_PERR(x) ((x) << S_MC_WDATA_FIFO_PERR)
+#define F_MC_WDATA_FIFO_PERR V_MC_WDATA_FIFO_PERR(1U)
+
+#define S_MC_RCNT_FIFO_PERR 7
+#define V_MC_RCNT_FIFO_PERR(x) ((x) << S_MC_RCNT_FIFO_PERR)
+#define F_MC_RCNT_FIFO_PERR V_MC_RCNT_FIFO_PERR(1U)
+
+#define S_MC_RDATA_FIFO_PERR 6
+#define V_MC_RDATA_FIFO_PERR(x) ((x) << S_MC_RDATA_FIFO_PERR)
+#define F_MC_RDATA_FIFO_PERR V_MC_RDATA_FIFO_PERR(1U)
+
+#define S_TOKEN_FIFO_PERR 5
+#define V_TOKEN_FIFO_PERR(x) ((x) << S_TOKEN_FIFO_PERR)
+#define F_TOKEN_FIFO_PERR V_TOKEN_FIFO_PERR(1U)
+
+#define S_T7_BUNDLE_LEN_PARERR 4
+#define V_T7_BUNDLE_LEN_PARERR(x) ((x) << S_T7_BUNDLE_LEN_PARERR)
+#define F_T7_BUNDLE_LEN_PARERR V_T7_BUNDLE_LEN_PARERR(1U)
+
+#define A_PM_RX_PERR_CAUSE 0x10063
+#define A_PM_RX_EXT_CFIFO_CONFIG0 0x10070
+
+#define S_CH1_PTR_MAX 17
+#define M_CH1_PTR_MAX 0x7fffU
+#define V_CH1_PTR_MAX(x) ((x) << S_CH1_PTR_MAX)
+#define G_CH1_PTR_MAX(x) (((x) >> S_CH1_PTR_MAX) & M_CH1_PTR_MAX)
+
+#define S_CH0_PTR_MAX 1
+#define M_CH0_PTR_MAX 0x7fffU
+#define V_CH0_PTR_MAX(x) ((x) << S_CH0_PTR_MAX)
+#define G_CH0_PTR_MAX(x) (((x) >> S_CH0_PTR_MAX) & M_CH0_PTR_MAX)
+
+#define S_STROBE 0
+#define V_STROBE(x) ((x) << S_STROBE)
+#define F_STROBE V_STROBE(1U)
+
+#define A_PM_RX_EXT_CFIFO_CONFIG1 0x10071
+
+#define S_CH2_PTR_MAX 1
+#define M_CH2_PTR_MAX 0x7fffU
+#define V_CH2_PTR_MAX(x) ((x) << S_CH2_PTR_MAX)
+#define G_CH2_PTR_MAX(x) (((x) >> S_CH2_PTR_MAX) & M_CH2_PTR_MAX)
+
+#define A_PM_RX_EXT_EFIFO_CONFIG0 0x10072
+#define A_PM_RX_EXT_EFIFO_CONFIG1 0x10073
+#define A_T7_PM_RX_CH0_OSPI_DEFICIT_THRSHLD 0x10074
+#define A_T7_PM_RX_CH1_OSPI_DEFICIT_THRSHLD 0x10075
+#define A_PM_RX_CH2_OSPI_DEFICIT_THRSHLD 0x10076
+#define A_PM_RX_CH3_OSPI_DEFICIT_THRSHLD 0x10077
+#define A_T7_PM_RX_FEATURE_EN 0x10078
+#define A_PM_RX_TCAM_BIST_CTRL 0x10080
+#define A_PM_RX_TCAM_BIST_CB_PASS 0x10081
+#define A_PM_RX_TCAM_BIST_CB_BUSY 0x10082
+
/* registers for module PM_TX */
#define PM_TX_BASE_ADDR 0x8fe0
@@ -30613,6 +38779,118 @@
#define V_C_PCMD_PAR_ERROR(x) ((x) << S_C_PCMD_PAR_ERROR)
#define F_C_PCMD_PAR_ERROR V_C_PCMD_PAR_ERROR(1U)
+#define S_T7_ZERO_C_CMD_ERROR 30
+#define V_T7_ZERO_C_CMD_ERROR(x) ((x) << S_T7_ZERO_C_CMD_ERROR)
+#define F_T7_ZERO_C_CMD_ERROR V_T7_ZERO_C_CMD_ERROR(1U)
+
+#define S_OESPI_COR_ERR 29
+#define V_OESPI_COR_ERR(x) ((x) << S_OESPI_COR_ERR)
+#define F_OESPI_COR_ERR V_OESPI_COR_ERR(1U)
+
+#define S_ICSPI_COR_ERR 28
+#define V_ICSPI_COR_ERR(x) ((x) << S_ICSPI_COR_ERR)
+#define F_ICSPI_COR_ERR V_ICSPI_COR_ERR(1U)
+
+#define S_ICSPI_OVFL 24
+#define V_ICSPI_OVFL(x) ((x) << S_ICSPI_OVFL)
+#define F_ICSPI_OVFL V_ICSPI_OVFL(1U)
+
+#define S_PCMD_LEN_OVFL3 23
+#define V_PCMD_LEN_OVFL3(x) ((x) << S_PCMD_LEN_OVFL3)
+#define F_PCMD_LEN_OVFL3 V_PCMD_LEN_OVFL3(1U)
+
+#define S_T7_PCMD_LEN_OVFL2 22
+#define V_T7_PCMD_LEN_OVFL2(x) ((x) << S_T7_PCMD_LEN_OVFL2)
+#define F_T7_PCMD_LEN_OVFL2 V_T7_PCMD_LEN_OVFL2(1U)
+
+#define S_T7_PCMD_LEN_OVFL1 21
+#define V_T7_PCMD_LEN_OVFL1(x) ((x) << S_T7_PCMD_LEN_OVFL1)
+#define F_T7_PCMD_LEN_OVFL1 V_T7_PCMD_LEN_OVFL1(1U)
+
+#define S_T7_PCMD_LEN_OVFL0 20
+#define V_T7_PCMD_LEN_OVFL0(x) ((x) << S_T7_PCMD_LEN_OVFL0)
+#define F_T7_PCMD_LEN_OVFL0 V_T7_PCMD_LEN_OVFL0(1U)
+
+#define S_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR 19
+#define V_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR 18
+#define V_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR 17
+#define V_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR 16
+#define V_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI0_TX_FRAMING_ERROR 15
+#define V_T7_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI0_TX_FRAMING_ERROR)
+#define F_T7_ICSPI0_TX_FRAMING_ERROR V_T7_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI1_TX_FRAMING_ERROR 14
+#define V_T7_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI1_TX_FRAMING_ERROR)
+#define F_T7_ICSPI1_TX_FRAMING_ERROR V_T7_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI2_TX_FRAMING_ERROR 13
+#define V_T7_ICSPI2_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI2_TX_FRAMING_ERROR)
+#define F_T7_ICSPI2_TX_FRAMING_ERROR V_T7_ICSPI2_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI3_TX_FRAMING_ERROR 12
+#define V_T7_ICSPI3_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI3_TX_FRAMING_ERROR)
+#define F_T7_ICSPI3_TX_FRAMING_ERROR V_T7_ICSPI3_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_RX_FRAMING_ERROR 11
+#define V_T7_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_RX_FRAMING_ERROR)
+#define F_T7_OESPI0_RX_FRAMING_ERROR V_T7_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_RX_FRAMING_ERROR 10
+#define V_T7_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_RX_FRAMING_ERROR)
+#define F_T7_OESPI1_RX_FRAMING_ERROR V_T7_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_RX_FRAMING_ERROR 9
+#define V_T7_OESPI2_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_RX_FRAMING_ERROR)
+#define F_T7_OESPI2_RX_FRAMING_ERROR V_T7_OESPI2_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_RX_FRAMING_ERROR 8
+#define V_T7_OESPI3_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_RX_FRAMING_ERROR)
+#define F_T7_OESPI3_RX_FRAMING_ERROR V_T7_OESPI3_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_TX_FRAMING_ERROR 7
+#define V_T7_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_TX_FRAMING_ERROR)
+#define F_T7_OESPI0_TX_FRAMING_ERROR V_T7_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_TX_FRAMING_ERROR 6
+#define V_T7_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_TX_FRAMING_ERROR)
+#define F_T7_OESPI1_TX_FRAMING_ERROR V_T7_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_TX_FRAMING_ERROR 5
+#define V_T7_OESPI2_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_TX_FRAMING_ERROR)
+#define F_T7_OESPI2_TX_FRAMING_ERROR V_T7_OESPI2_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_TX_FRAMING_ERROR 4
+#define V_T7_OESPI3_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_TX_FRAMING_ERROR)
+#define F_T7_OESPI3_TX_FRAMING_ERROR V_T7_OESPI3_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR 3
+#define V_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR 2
+#define V_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR 1
+#define V_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR 0
+#define V_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR(1U)
+
#define A_PM_TX_INT_CAUSE 0x8ffc
#define S_ZERO_C_CMD_ERROR 28
@@ -30624,23 +38902,51 @@
#define F_OSPI_OR_BUNDLE_LEN_PAR_ERR V_OSPI_OR_BUNDLE_LEN_PAR_ERR(1U)
#define A_PM_TX_ISPI_DBG_4B_DATA0 0x10000
+#define A_T7_PM_TX_DBG_STAT_MSB 0x10000
#define A_PM_TX_ISPI_DBG_4B_DATA1 0x10001
+#define A_T7_PM_TX_DBG_STAT_LSB 0x10001
#define A_PM_TX_ISPI_DBG_4B_DATA2 0x10002
+#define A_T7_PM_TX_DBG_RSVD_FLIT_CNT 0x10002
#define A_PM_TX_ISPI_DBG_4B_DATA3 0x10003
+#define A_T7_PM_TX_SDC_EN 0x10003
#define A_PM_TX_ISPI_DBG_4B_DATA4 0x10004
+#define A_T7_PM_TX_INOUT_FIFO_DBG_CHNL_SEL 0x10004
#define A_PM_TX_ISPI_DBG_4B_DATA5 0x10005
+#define A_T7_PM_TX_INOUT_FIFO_DBG_WR 0x10005
#define A_PM_TX_ISPI_DBG_4B_DATA6 0x10006
+#define A_T7_PM_TX_INPUT_FIFO_STR_FWD_EN 0x10006
#define A_PM_TX_ISPI_DBG_4B_DATA7 0x10007
+#define A_T7_PM_TX_FEATURE_EN 0x10007
+
+#define S_IN_AFULL_TH 5
+#define M_IN_AFULL_TH 0x3U
+#define V_IN_AFULL_TH(x) ((x) << S_IN_AFULL_TH)
+#define G_IN_AFULL_TH(x) (((x) >> S_IN_AFULL_TH) & M_IN_AFULL_TH)
+
+#define S_PIO_FROM_CH_EN 4
+#define V_PIO_FROM_CH_EN(x) ((x) << S_PIO_FROM_CH_EN)
+#define F_PIO_FROM_CH_EN V_PIO_FROM_CH_EN(1U)
+
#define A_PM_TX_ISPI_DBG_4B_DATA8 0x10008
+#define A_T7_PM_TX_T5_PM_TX_INT_ENABLE 0x10008
#define A_PM_TX_OSPI_DBG_4B_DATA0 0x10009
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD0 0x10009
#define A_PM_TX_OSPI_DBG_4B_DATA1 0x1000a
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD1 0x1000a
#define A_PM_TX_OSPI_DBG_4B_DATA2 0x1000b
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD2 0x1000b
#define A_PM_TX_OSPI_DBG_4B_DATA3 0x1000c
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD3 0x1000c
#define A_PM_TX_OSPI_DBG_4B_DATA4 0x1000d
+#define A_T7_PM_TX_CH0_OSPI_DEFICIT_THRSHLD 0x1000d
#define A_PM_TX_OSPI_DBG_4B_DATA5 0x1000e
+#define A_T7_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x1000e
#define A_PM_TX_OSPI_DBG_4B_DATA6 0x1000f
+#define A_T7_PM_TX_CH2_OSPI_DEFICIT_THRSHLD 0x1000f
#define A_PM_TX_OSPI_DBG_4B_DATA7 0x10010
+#define A_T7_PM_TX_CH3_OSPI_DEFICIT_THRSHLD 0x10010
#define A_PM_TX_OSPI_DBG_4B_DATA8 0x10011
+#define A_T7_PM_TX_INT_CAUSE_MASK_HALT 0x10011
#define A_PM_TX_OSPI_DBG_4B_DATA9 0x10012
#define A_PM_TX_OSPI_DBG_4B_DATA10 0x10013
#define A_PM_TX_OSPI_DBG_4B_DATA11 0x10014
@@ -30722,6 +39028,48 @@
#define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD3 0x10026
#define A_PM_TX_CH0_OSPI_DEFICIT_THRSHLD 0x10027
#define A_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x10028
+#define A_PM_TX_PERR_ENABLE 0x10028
+
+#define S_T7_1_OSPI_OVERFLOW3 23
+#define V_T7_1_OSPI_OVERFLOW3(x) ((x) << S_T7_1_OSPI_OVERFLOW3)
+#define F_T7_1_OSPI_OVERFLOW3 V_T7_1_OSPI_OVERFLOW3(1U)
+
+#define S_T7_1_OSPI_OVERFLOW2 22
+#define V_T7_1_OSPI_OVERFLOW2(x) ((x) << S_T7_1_OSPI_OVERFLOW2)
+#define F_T7_1_OSPI_OVERFLOW2 V_T7_1_OSPI_OVERFLOW2(1U)
+
+#define S_T7_1_OSPI_OVERFLOW1 21
+#define V_T7_1_OSPI_OVERFLOW1(x) ((x) << S_T7_1_OSPI_OVERFLOW1)
+#define F_T7_1_OSPI_OVERFLOW1 V_T7_1_OSPI_OVERFLOW1(1U)
+
+#define S_T7_1_OSPI_OVERFLOW0 20
+#define V_T7_1_OSPI_OVERFLOW0(x) ((x) << S_T7_1_OSPI_OVERFLOW0)
+#define F_T7_1_OSPI_OVERFLOW0 V_T7_1_OSPI_OVERFLOW0(1U)
+
+#define S_T7_BUNDLE_LEN_OVFL_EN 18
+#define V_T7_BUNDLE_LEN_OVFL_EN(x) ((x) << S_T7_BUNDLE_LEN_OVFL_EN)
+#define F_T7_BUNDLE_LEN_OVFL_EN V_T7_BUNDLE_LEN_OVFL_EN(1U)
+
+#define S_T7_M_INTFPERREN 17
+#define V_T7_M_INTFPERREN(x) ((x) << S_T7_M_INTFPERREN)
+#define F_T7_M_INTFPERREN V_T7_M_INTFPERREN(1U)
+
+#define S_T7_1_SDC_ERR 16
+#define V_T7_1_SDC_ERR(x) ((x) << S_T7_1_SDC_ERR)
+#define F_T7_1_SDC_ERR V_T7_1_SDC_ERR(1U)
+
+#define S_TOKEN_PAR_ERROR 5
+#define V_TOKEN_PAR_ERROR(x) ((x) << S_TOKEN_PAR_ERROR)
+#define F_TOKEN_PAR_ERROR V_TOKEN_PAR_ERROR(1U)
+
+#define S_BUNDLE_LEN_PAR_ERROR 4
+#define V_BUNDLE_LEN_PAR_ERROR(x) ((x) << S_BUNDLE_LEN_PAR_ERROR)
+#define F_BUNDLE_LEN_PAR_ERROR V_BUNDLE_LEN_PAR_ERROR(1U)
+
+#define S_C_PCMD_TOKEN_PAR_ERROR 0
+#define V_C_PCMD_TOKEN_PAR_ERROR(x) ((x) << S_C_PCMD_TOKEN_PAR_ERROR)
+#define F_C_PCMD_TOKEN_PAR_ERROR V_C_PCMD_TOKEN_PAR_ERROR(1U)
+
#define A_PM_TX_CH2_OSPI_DEFICIT_THRSHLD 0x10029
#define S_CH2_OSPI_DEFICIT_THRSHLD 0
@@ -30729,6 +39077,7 @@
#define V_CH2_OSPI_DEFICIT_THRSHLD(x) ((x) << S_CH2_OSPI_DEFICIT_THRSHLD)
#define G_CH2_OSPI_DEFICIT_THRSHLD(x) (((x) >> S_CH2_OSPI_DEFICIT_THRSHLD) & M_CH2_OSPI_DEFICIT_THRSHLD)
+#define A_PM_TX_PERR_CAUSE 0x10029
#define A_PM_TX_CH3_OSPI_DEFICIT_THRSHLD 0x1002a
#define S_CH3_OSPI_DEFICIT_THRSHLD 0
@@ -31462,6 +39811,7 @@
#define G_ADDR(x) (((x) >> S_ADDR) & M_ADDR)
#define A_MPS_PORT_TX_PAUSE_SOURCE_L 0x24
+#define A_MPS_VF_TX_MAC_DROP_PP 0x24
#define A_MPS_PORT_TX_PAUSE_SOURCE_H 0x28
#define A_MPS_PORT_PRTY_BUFFER_GROUP_MAP 0x2c
@@ -31547,6 +39897,24 @@
#define V_TXPRTY0(x) ((x) << S_TXPRTY0)
#define G_TXPRTY0(x) (((x) >> S_TXPRTY0) & M_TXPRTY0)
+#define A_MPS_PORT_PRTY_GROUP_MAP 0x34
+#define A_MPS_PORT_TRACE_MAX_CAPTURE_SIZE 0x38
+
+#define S_TX2RX 6
+#define M_TX2RX 0x7U
+#define V_TX2RX(x) ((x) << S_TX2RX)
+#define G_TX2RX(x) (((x) >> S_TX2RX) & M_TX2RX)
+
+#define S_MAC2MPS 3
+#define M_MAC2MPS 0x7U
+#define V_MAC2MPS(x) ((x) << S_MAC2MPS)
+#define G_MAC2MPS(x) (((x) >> S_MAC2MPS) & M_MAC2MPS)
+
+#define S_MPS2MAC 0
+#define M_MPS2MAC 0x7U
+#define V_MPS2MAC(x) ((x) << S_MPS2MAC)
+#define G_MPS2MAC(x) (((x) >> S_MPS2MAC) & M_MPS2MAC)
+
#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_H 0x84
#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
@@ -31578,7 +39946,9 @@
#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_H 0xf4
#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+#define A_MPS_VF_STAT_RX_VF_ERR_DROP_FRAMES_L 0xf8
#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
+#define A_MPS_VF_STAT_RX_VF_ERR_DROP_FRAMES_H 0xfc
#define A_MPS_PORT_RX_CTL 0x100
#define S_NO_RPLCT_M 20
@@ -31682,6 +40052,26 @@
#define V_HASH_EN_MAC(x) ((x) << S_HASH_EN_MAC)
#define F_HASH_EN_MAC V_HASH_EN_MAC(1U)
+#define S_TRANS_ENCAP_EN 30
+#define V_TRANS_ENCAP_EN(x) ((x) << S_TRANS_ENCAP_EN)
+#define F_TRANS_ENCAP_EN V_TRANS_ENCAP_EN(1U)
+
+#define S_CRYPTO_DUMMY_PKT_CHK_EN 29
+#define V_CRYPTO_DUMMY_PKT_CHK_EN(x) ((x) << S_CRYPTO_DUMMY_PKT_CHK_EN)
+#define F_CRYPTO_DUMMY_PKT_CHK_EN V_CRYPTO_DUMMY_PKT_CHK_EN(1U)
+
+#define S_PASS_HPROM 28
+#define V_PASS_HPROM(x) ((x) << S_PASS_HPROM)
+#define F_PASS_HPROM V_PASS_HPROM(1U)
+
+#define S_PASS_PROM 27
+#define V_PASS_PROM(x) ((x) << S_PASS_PROM)
+#define F_PASS_PROM V_PASS_PROM(1U)
+
+#define S_ENCAP_ONLY_IF_OUTER_HIT 26
+#define V_ENCAP_ONLY_IF_OUTER_HIT(x) ((x) << S_ENCAP_ONLY_IF_OUTER_HIT)
+#define F_ENCAP_ONLY_IF_OUTER_HIT V_ENCAP_ONLY_IF_OUTER_HIT(1U)
+
#define A_MPS_PORT_RX_MTU 0x104
#define A_MPS_PORT_RX_PF_MAP 0x108
#define A_MPS_PORT_RX_VF_MAP0 0x10c
@@ -31924,6 +40314,23 @@
#define V_REPL_VECT_SEL(x) ((x) << S_REPL_VECT_SEL)
#define G_REPL_VECT_SEL(x) (((x) >> S_REPL_VECT_SEL) & M_REPL_VECT_SEL)
+#define A_MPS_PORT_MAC_RX_DROP_EN_PP 0x16c
+
+#define S_PRIO 0
+#define M_PRIO 0xffU
+#define V_PRIO(x) ((x) << S_PRIO)
+#define G_PRIO(x) (((x) >> S_PRIO) & M_PRIO)
+
+#define A_MPS_PORT_RX_INT_RSS_HASH 0x170
+#define A_MPS_PORT_RX_INT_RSS_CONTROL 0x174
+#define A_MPS_PORT_RX_CNT_DBG_CTL 0x178
+
+#define S_DBG_TYPE 0
+#define M_DBG_TYPE 0x1fU
+#define V_DBG_TYPE(x) ((x) << S_DBG_TYPE)
+#define G_DBG_TYPE(x) (((x) >> S_DBG_TYPE) & M_DBG_TYPE)
+
+#define A_MPS_PORT_RX_CNT_DBG 0x17c
#define A_MPS_PORT_TX_MAC_RELOAD_CH0 0x190
#define S_CREDIT 0
@@ -31984,6 +40391,10 @@
#define V_ON_PENDING(x) ((x) << S_ON_PENDING)
#define G_ON_PENDING(x) (((x) >> S_ON_PENDING) & M_ON_PENDING)
+#define A_MPS_PORT_TX_MAC_DROP_PP 0x1d4
+#define A_MPS_PORT_TX_LPBK_DROP_PP 0x1d8
+#define A_MPS_PORT_TX_MAC_DROP_CNT 0x1dc
+#define A_MPS_PORT_TX_LPBK_DROP_CNT 0x1e0
#define A_MPS_PORT_CLS_HASH_SRAM 0x200
#define S_VALID 20
@@ -32097,6 +40508,13 @@
#define V_TAG(x) ((x) << S_TAG)
#define G_TAG(x) (((x) >> S_TAG) & M_TAG)
+#define A_MPS_PF_TX_MAC_DROP_PP 0x2e4
+
+#define S_T7_DROPEN 0
+#define M_T7_DROPEN 0xffU
+#define V_T7_DROPEN(x) ((x) << S_T7_DROPEN)
+#define G_T7_DROPEN(x) (((x) >> S_T7_DROPEN) & M_T7_DROPEN)
+
#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_L 0x300
#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_H 0x304
#define A_MPS_PORT_CLS_HASH_CTL 0x304
@@ -32112,35 +40530,9 @@
#define V_PROMISCEN(x) ((x) << S_PROMISCEN)
#define F_PROMISCEN V_PROMISCEN(1U)
-#define S_T6_MULTILISTEN 16
-#define V_T6_MULTILISTEN(x) ((x) << S_T6_MULTILISTEN)
-#define F_T6_MULTILISTEN V_T6_MULTILISTEN(1U)
-
-#define S_T6_PRIORITY 13
-#define M_T6_PRIORITY 0x7U
-#define V_T6_PRIORITY(x) ((x) << S_T6_PRIORITY)
-#define G_T6_PRIORITY(x) (((x) >> S_T6_PRIORITY) & M_T6_PRIORITY)
-
-#define S_T6_REPLICATE 12
-#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
-#define F_T6_REPLICATE V_T6_REPLICATE(1U)
-
-#define S_T6_PF 9
-#define M_T6_PF 0x7U
-#define V_T6_PF(x) ((x) << S_T6_PF)
-#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
-
-#define S_T6_VF_VALID 8
-#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
-#define F_T6_VF_VALID V_T6_VF_VALID(1U)
-
-#define S_T6_VF 0
-#define M_T6_VF 0xffU
-#define V_T6_VF(x) ((x) << S_T6_VF)
-#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
-
#define A_MPS_PF_STAT_TX_PF_BCAST_FRAMES_H 0x30c
#define A_MPS_PORT_CLS_BMC_MAC_ADDR_L 0x30c
+#define A_MPS_PORT_CLS_BMC_MAC0_ADDR_L 0x30c
#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_L 0x310
#define A_MPS_PORT_CLS_BMC_MAC_ADDR_H 0x310
@@ -32156,6 +40548,7 @@
#define V_MATCHALL(x) ((x) << S_MATCHALL)
#define F_MATCHALL V_MATCHALL(1U)
+#define A_MPS_PORT_CLS_BMC_MAC0_ADDR_H 0x310
#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_H 0x314
#define A_MPS_PORT_CLS_BMC_VLAN 0x314
@@ -32167,6 +40560,7 @@
#define V_VLAN_VLD(x) ((x) << S_VLAN_VLD)
#define F_VLAN_VLD V_VLAN_VLD(1U)
+#define A_MPS_PORT_CLS_BMC_VLAN0 0x314
#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_L 0x318
#define A_MPS_PORT_CLS_CTL 0x318
@@ -32218,6 +40612,18 @@
#define V_DMAC_TCAM_SEL(x) ((x) << S_DMAC_TCAM_SEL)
#define G_DMAC_TCAM_SEL(x) (((x) >> S_DMAC_TCAM_SEL) & M_DMAC_TCAM_SEL)
+#define S_SMAC_INDEX_EN 17
+#define V_SMAC_INDEX_EN(x) ((x) << S_SMAC_INDEX_EN)
+#define F_SMAC_INDEX_EN V_SMAC_INDEX_EN(1U)
+
+#define S_LPBK_TCAM2_HIT_PRIORITY 16
+#define V_LPBK_TCAM2_HIT_PRIORITY(x) ((x) << S_LPBK_TCAM2_HIT_PRIORITY)
+#define F_LPBK_TCAM2_HIT_PRIORITY V_LPBK_TCAM2_HIT_PRIORITY(1U)
+
+#define S_TCAM2_HIT_PRIORITY 15
+#define V_TCAM2_HIT_PRIORITY(x) ((x) << S_TCAM2_HIT_PRIORITY)
+#define F_TCAM2_HIT_PRIORITY V_TCAM2_HIT_PRIORITY(1U)
+
#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_H 0x31c
#define A_MPS_PORT_CLS_NCSI_ETH_TYPE 0x31c
@@ -32238,14 +40644,23 @@
#define F_EN2 V_EN2(1U)
#define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_H 0x324
+#define A_MPS_PORT_CLS_BMC_MAC1_ADDR_L 0x324
#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_L 0x328
+#define A_MPS_PORT_CLS_BMC_MAC1_ADDR_H 0x328
#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_H 0x32c
+#define A_MPS_PORT_CLS_BMC_MAC2_ADDR_L 0x32c
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_L 0x330
+#define A_MPS_PORT_CLS_BMC_MAC2_ADDR_H 0x330
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_H 0x334
+#define A_MPS_PORT_CLS_BMC_MAC3_ADDR_L 0x334
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_L 0x338
+#define A_MPS_PORT_CLS_BMC_MAC3_ADDR_H 0x338
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_H 0x33c
+#define A_MPS_PORT_CLS_BMC_VLAN1 0x33c
#define A_MPS_PF_STAT_RX_PF_BYTES_L 0x340
+#define A_MPS_PORT_CLS_BMC_VLAN2 0x340
#define A_MPS_PF_STAT_RX_PF_BYTES_H 0x344
+#define A_MPS_PORT_CLS_BMC_VLAN3 0x344
#define A_MPS_PF_STAT_RX_PF_FRAMES_L 0x348
#define A_MPS_PF_STAT_RX_PF_FRAMES_H 0x34c
#define A_MPS_PF_STAT_RX_PF_BCAST_BYTES_L 0x350
@@ -32261,7 +40676,9 @@
#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_L 0x378
#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_H 0x37c
#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_L 0x380
+#define A_MPS_PF_STAT_RX_PF_ERR_DROP_FRAMES_L 0x380
#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_H 0x384
+#define A_MPS_PF_STAT_RX_PF_ERR_DROP_FRAMES_H 0x384
#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
@@ -32393,6 +40810,22 @@
#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_L 0x618
#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_H 0x61c
+#define A_MPS_PORT_STAT_RX_PRIO_0_DROP_FRAME_L 0x620
+#define A_MPS_PORT_STAT_RX_PRIO_0_DROP_FRAME_H 0x624
+#define A_MPS_PORT_STAT_RX_PRIO_1_DROP_FRAME_L 0x628
+#define A_MPS_PORT_STAT_RX_PRIO_1_DROP_FRAME_H 0x62c
+#define A_MPS_PORT_STAT_RX_PRIO_2_DROP_FRAME_L 0x630
+#define A_MPS_PORT_STAT_RX_PRIO_2_DROP_FRAME_H 0x634
+#define A_MPS_PORT_STAT_RX_PRIO_3_DROP_FRAME_L 0x638
+#define A_MPS_PORT_STAT_RX_PRIO_3_DROP_FRAME_H 0x63c
+#define A_MPS_PORT_STAT_RX_PRIO_4_DROP_FRAME_L 0x640
+#define A_MPS_PORT_STAT_RX_PRIO_4_DROP_FRAME_H 0x644
+#define A_MPS_PORT_STAT_RX_PRIO_5_DROP_FRAME_L 0x648
+#define A_MPS_PORT_STAT_RX_PRIO_5_DROP_FRAME_H 0x64c
+#define A_MPS_PORT_STAT_RX_PRIO_6_DROP_FRAME_L 0x650
+#define A_MPS_PORT_STAT_RX_PRIO_6_DROP_FRAME_H 0x654
+#define A_MPS_PORT_STAT_RX_PRIO_7_DROP_FRAME_L 0x658
+#define A_MPS_PORT_STAT_RX_PRIO_7_DROP_FRAME_H 0x65c
#define A_MPS_CMN_CTL 0x9000
#define S_DETECT8023 3
@@ -32425,6 +40858,46 @@
#define V_SPEEDMODE(x) ((x) << S_SPEEDMODE)
#define G_SPEEDMODE(x) (((x) >> S_SPEEDMODE) & M_SPEEDMODE)
+#define S_PT1_SEL_CFG 21
+#define V_PT1_SEL_CFG(x) ((x) << S_PT1_SEL_CFG)
+#define F_PT1_SEL_CFG V_PT1_SEL_CFG(1U)
+
+#define S_BUG_42938_EN 20
+#define V_BUG_42938_EN(x) ((x) << S_BUG_42938_EN)
+#define F_BUG_42938_EN V_BUG_42938_EN(1U)
+
+#define S_NO_BYPASS_PAUSE 19
+#define V_NO_BYPASS_PAUSE(x) ((x) << S_NO_BYPASS_PAUSE)
+#define F_NO_BYPASS_PAUSE V_NO_BYPASS_PAUSE(1U)
+
+#define S_BYPASS_PAUSE 18
+#define V_BYPASS_PAUSE(x) ((x) << S_BYPASS_PAUSE)
+#define F_BYPASS_PAUSE V_BYPASS_PAUSE(1U)
+
+#define S_PBUS_EN 16
+#define M_PBUS_EN 0x3U
+#define V_PBUS_EN(x) ((x) << S_PBUS_EN)
+#define G_PBUS_EN(x) (((x) >> S_PBUS_EN) & M_PBUS_EN)
+
+#define S_INIC_EN 14
+#define M_INIC_EN 0x3U
+#define V_INIC_EN(x) ((x) << S_INIC_EN)
+#define G_INIC_EN(x) (((x) >> S_INIC_EN) & M_INIC_EN)
+
+#define S_SBA_EN 12
+#define M_SBA_EN 0x3U
+#define V_SBA_EN(x) ((x) << S_SBA_EN)
+#define G_SBA_EN(x) (((x) >> S_SBA_EN) & M_SBA_EN)
+
+#define S_BG2TP_MAP_MODE 11
+#define V_BG2TP_MAP_MODE(x) ((x) << S_BG2TP_MAP_MODE)
+#define F_BG2TP_MAP_MODE V_BG2TP_MAP_MODE(1U)
+
+#define S_MPS_LB_MODE 9
+#define M_MPS_LB_MODE 0x3U
+#define V_MPS_LB_MODE(x) ((x) << S_MPS_LB_MODE)
+#define G_MPS_LB_MODE(x) (((x) >> S_MPS_LB_MODE) & M_MPS_LB_MODE)
+
#define A_MPS_INT_ENABLE 0x9004
#define S_STATINTENB 5
@@ -32618,6 +41091,17 @@
#define A_MPS_T5_BUILD_REVISION 0x9078
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH0 0x907c
+
+#define S_VALUE_1 16
+#define M_VALUE_1 0xffffU
+#define V_VALUE_1(x) ((x) << S_VALUE_1)
+#define G_VALUE_1(x) (((x) >> S_VALUE_1) & M_VALUE_1)
+
+#define S_VALUE_0 0
+#define M_VALUE_0 0xffffU
+#define V_VALUE_0(x) ((x) << S_VALUE_0)
+#define G_VALUE_0(x) (((x) >> S_VALUE_0) & M_VALUE_0)
+
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH1 0x9080
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH2 0x9084
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH3 0x9088
@@ -32671,11 +41155,130 @@
#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
#define A_MPS_FPGA_BIST_CFG_P1 0x9124
-
-#define S_T6_BASEADDR 0
-#define M_T6_BASEADDR 0xffffU
-#define V_T6_BASEADDR(x) ((x) << S_T6_BASEADDR)
-#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
+#define A_MPS_FPGA_BIST_CFG_P2 0x9128
+#define A_MPS_FPGA_BIST_CFG_P3 0x912c
+#define A_MPS_INIC_CTL 0x9130
+
+#define S_T7_RD_WRN 16
+#define V_T7_RD_WRN(x) ((x) << S_T7_RD_WRN)
+#define F_T7_RD_WRN V_T7_RD_WRN(1U)
+
+#define A_MPS_INIC_DATA 0x9134
+#define A_MPS_TP_CSIDE_MUX_CTL_P2 0x9138
+#define A_MPS_TP_CSIDE_MUX_CTL_P3 0x913c
+#define A_MPS_RED_CTL 0x9140
+
+#define S_LPBK_SHIFT_0 28
+#define M_LPBK_SHIFT_0 0xfU
+#define V_LPBK_SHIFT_0(x) ((x) << S_LPBK_SHIFT_0)
+#define G_LPBK_SHIFT_0(x) (((x) >> S_LPBK_SHIFT_0) & M_LPBK_SHIFT_0)
+
+#define S_LPBK_SHIFT_1 24
+#define M_LPBK_SHIFT_1 0xfU
+#define V_LPBK_SHIFT_1(x) ((x) << S_LPBK_SHIFT_1)
+#define G_LPBK_SHIFT_1(x) (((x) >> S_LPBK_SHIFT_1) & M_LPBK_SHIFT_1)
+
+#define S_LPBK_SHIFT_2 20
+#define M_LPBK_SHIFT_2 0xfU
+#define V_LPBK_SHIFT_2(x) ((x) << S_LPBK_SHIFT_2)
+#define G_LPBK_SHIFT_2(x) (((x) >> S_LPBK_SHIFT_2) & M_LPBK_SHIFT_2)
+
+#define S_LPBK_SHIFT_3 16
+#define M_LPBK_SHIFT_3 0xfU
+#define V_LPBK_SHIFT_3(x) ((x) << S_LPBK_SHIFT_3)
+#define G_LPBK_SHIFT_3(x) (((x) >> S_LPBK_SHIFT_3) & M_LPBK_SHIFT_3)
+
+#define S_MAC_SHIFT_0 12
+#define M_MAC_SHIFT_0 0xfU
+#define V_MAC_SHIFT_0(x) ((x) << S_MAC_SHIFT_0)
+#define G_MAC_SHIFT_0(x) (((x) >> S_MAC_SHIFT_0) & M_MAC_SHIFT_0)
+
+#define S_MAC_SHIFT_1 8
+#define M_MAC_SHIFT_1 0xfU
+#define V_MAC_SHIFT_1(x) ((x) << S_MAC_SHIFT_1)
+#define G_MAC_SHIFT_1(x) (((x) >> S_MAC_SHIFT_1) & M_MAC_SHIFT_1)
+
+#define S_MAC_SHIFT_2 4
+#define M_MAC_SHIFT_2 0xfU
+#define V_MAC_SHIFT_2(x) ((x) << S_MAC_SHIFT_2)
+#define G_MAC_SHIFT_2(x) (((x) >> S_MAC_SHIFT_2) & M_MAC_SHIFT_2)
+
+#define S_MAC_SHIFT_3 0
+#define M_MAC_SHIFT_3 0xfU
+#define V_MAC_SHIFT_3(x) ((x) << S_MAC_SHIFT_3)
+#define G_MAC_SHIFT_3(x) (((x) >> S_MAC_SHIFT_3) & M_MAC_SHIFT_3)
+
+#define A_MPS_RED_EN 0x9144
+
+#define S_LPBK_EN3 7
+#define V_LPBK_EN3(x) ((x) << S_LPBK_EN3)
+#define F_LPBK_EN3 V_LPBK_EN3(1U)
+
+#define S_LPBK_EN2 6
+#define V_LPBK_EN2(x) ((x) << S_LPBK_EN2)
+#define F_LPBK_EN2 V_LPBK_EN2(1U)
+
+#define S_LPBK_EN1 5
+#define V_LPBK_EN1(x) ((x) << S_LPBK_EN1)
+#define F_LPBK_EN1 V_LPBK_EN1(1U)
+
+#define S_LPBK_EN0 4
+#define V_LPBK_EN0(x) ((x) << S_LPBK_EN0)
+#define F_LPBK_EN0 V_LPBK_EN0(1U)
+
+#define S_MAC_EN3 3
+#define V_MAC_EN3(x) ((x) << S_MAC_EN3)
+#define F_MAC_EN3 V_MAC_EN3(1U)
+
+#define S_MAC_EN2 2
+#define V_MAC_EN2(x) ((x) << S_MAC_EN2)
+#define F_MAC_EN2 V_MAC_EN2(1U)
+
+#define S_MAC_EN1 1
+#define V_MAC_EN1(x) ((x) << S_MAC_EN1)
+#define F_MAC_EN1 V_MAC_EN1(1U)
+
+#define S_MAC_EN0 0
+#define V_MAC_EN0(x) ((x) << S_MAC_EN0)
+#define F_MAC_EN0 V_MAC_EN0(1U)
+
+#define A_MPS_MAC0_RED_DROP_CNT_H 0x9148
+#define A_MPS_MAC0_RED_DROP_CNT_L 0x914c
+#define A_MPS_MAC1_RED_DROP_CNT_H 0x9150
+#define A_MPS_MAC1_RED_DROP_CNT_L 0x9154
+#define A_MPS_MAC2_RED_DROP_CNT_H 0x9158
+#define A_MPS_MAC2_RED_DROP_CNT_L 0x915c
+#define A_MPS_MAC3_RED_DROP_CNT_H 0x9160
+#define A_MPS_MAC3_RED_DROP_CNT_L 0x9164
+#define A_MPS_LPBK0_RED_DROP_CNT_H 0x9168
+#define A_MPS_LPBK0_RED_DROP_CNT_L 0x916c
+#define A_MPS_LPBK1_RED_DROP_CNT_H 0x9170
+#define A_MPS_LPBK1_RED_DROP_CNT_L 0x9174
+#define A_MPS_LPBK2_RED_DROP_CNT_H 0x9178
+#define A_MPS_LPBK2_RED_DROP_CNT_L 0x917c
+#define A_MPS_LPBK3_RED_DROP_CNT_H 0x9180
+#define A_MPS_LPBK3_RED_DROP_CNT_L 0x9184
+#define A_MPS_MAC_RED_PP_DROP_EN 0x9188
+
+#define S_T7_MAC3 24
+#define M_T7_MAC3 0xffU
+#define V_T7_MAC3(x) ((x) << S_T7_MAC3)
+#define G_T7_MAC3(x) (((x) >> S_T7_MAC3) & M_T7_MAC3)
+
+#define S_T7_MAC2 16
+#define M_T7_MAC2 0xffU
+#define V_T7_MAC2(x) ((x) << S_T7_MAC2)
+#define G_T7_MAC2(x) (((x) >> S_T7_MAC2) & M_T7_MAC2)
+
+#define S_T7_MAC1 8
+#define M_T7_MAC1 0xffU
+#define V_T7_MAC1(x) ((x) << S_T7_MAC1)
+#define G_T7_MAC1(x) (((x) >> S_T7_MAC1) & M_T7_MAC1)
+
+#define S_T7_MAC0 0
+#define M_T7_MAC0 0xffU
+#define V_T7_MAC0(x) ((x) << S_T7_MAC0)
+#define G_T7_MAC0(x) (((x) >> S_T7_MAC0) & M_T7_MAC0)
#define A_MPS_TX_PRTY_SEL 0x9400
@@ -32714,6 +41317,26 @@
#define V_NCSI_SOURCE(x) ((x) << S_NCSI_SOURCE)
#define G_NCSI_SOURCE(x) (((x) >> S_NCSI_SOURCE) & M_NCSI_SOURCE)
+#define S_T7_CH4_PRTY 16
+#define M_T7_CH4_PRTY 0x7U
+#define V_T7_CH4_PRTY(x) ((x) << S_T7_CH4_PRTY)
+#define G_T7_CH4_PRTY(x) (((x) >> S_T7_CH4_PRTY) & M_T7_CH4_PRTY)
+
+#define S_T7_CH3_PRTY 13
+#define M_T7_CH3_PRTY 0x7U
+#define V_T7_CH3_PRTY(x) ((x) << S_T7_CH3_PRTY)
+#define G_T7_CH3_PRTY(x) (((x) >> S_T7_CH3_PRTY) & M_T7_CH3_PRTY)
+
+#define S_T7_CH2_PRTY 10
+#define M_T7_CH2_PRTY 0x7U
+#define V_T7_CH2_PRTY(x) ((x) << S_T7_CH2_PRTY)
+#define G_T7_CH2_PRTY(x) (((x) >> S_T7_CH2_PRTY) & M_T7_CH2_PRTY)
+
+#define S_T7_CH1_PRTY 7
+#define M_T7_CH1_PRTY 0x7U
+#define V_T7_CH1_PRTY(x) ((x) << S_T7_CH1_PRTY)
+#define G_T7_CH1_PRTY(x) (((x) >> S_T7_CH1_PRTY) & M_T7_CH1_PRTY)
+
#define A_MPS_TX_INT_ENABLE 0x9404
#define S_PORTERR 16
@@ -32751,9 +41374,52 @@
#define V_TPFIFO(x) ((x) << S_TPFIFO)
#define G_TPFIFO(x) (((x) >> S_TPFIFO) & M_TPFIFO)
+#define S_T7_PORTERR 28
+#define V_T7_PORTERR(x) ((x) << S_T7_PORTERR)
+#define F_T7_PORTERR V_T7_PORTERR(1U)
+
+#define S_T7_FRMERR 27
+#define V_T7_FRMERR(x) ((x) << S_T7_FRMERR)
+#define F_T7_FRMERR V_T7_FRMERR(1U)
+
+#define S_T7_SECNTERR 26
+#define V_T7_SECNTERR(x) ((x) << S_T7_SECNTERR)
+#define F_T7_SECNTERR V_T7_SECNTERR(1U)
+
+#define S_T7_BUBBLE 25
+#define V_T7_BUBBLE(x) ((x) << S_T7_BUBBLE)
+#define F_T7_BUBBLE V_T7_BUBBLE(1U)
+
+#define S_TXTOKENFIFO 15
+#define M_TXTOKENFIFO 0x3ffU
+#define V_TXTOKENFIFO(x) ((x) << S_TXTOKENFIFO)
+#define G_TXTOKENFIFO(x) (((x) >> S_TXTOKENFIFO) & M_TXTOKENFIFO)
+
+#define S_PERR_TP2MPS_TFIFO 13
+#define M_PERR_TP2MPS_TFIFO 0x3U
+#define V_PERR_TP2MPS_TFIFO(x) ((x) << S_PERR_TP2MPS_TFIFO)
+#define G_PERR_TP2MPS_TFIFO(x) (((x) >> S_PERR_TP2MPS_TFIFO) & M_PERR_TP2MPS_TFIFO)
+
#define A_MPS_TX_INT_CAUSE 0x9408
#define A_MPS_TX_NCSI2MPS_CNT 0x940c
#define A_MPS_TX_PERR_ENABLE 0x9410
+
+#define S_PORTERRINT 28
+#define V_PORTERRINT(x) ((x) << S_PORTERRINT)
+#define F_PORTERRINT V_PORTERRINT(1U)
+
+#define S_FRAMINGERRINT 27
+#define V_FRAMINGERRINT(x) ((x) << S_FRAMINGERRINT)
+#define F_FRAMINGERRINT V_FRAMINGERRINT(1U)
+
+#define S_SECNTERRINT 26
+#define V_SECNTERRINT(x) ((x) << S_SECNTERRINT)
+#define F_SECNTERRINT V_SECNTERRINT(1U)
+
+#define S_BUBBLEERRINT 25
+#define V_BUBBLEERRINT(x) ((x) << S_BUBBLEERRINT)
+#define F_BUBBLEERRINT V_BUBBLEERRINT(1U)
+
#define A_MPS_TX_PERR_INJECT 0x9414
#define S_MPSTXMEMSEL 1
@@ -33481,6 +42147,41 @@
#define F_TXINCH0_CGEN V_TXINCH0_CGEN(1U)
#define A_MPS_TX_CGEN_DYNAMIC 0x9470
+#define A_MPS_TX2RX_CH_MAP 0x9474
+
+#define S_ENABLELBK_CH3 3
+#define V_ENABLELBK_CH3(x) ((x) << S_ENABLELBK_CH3)
+#define F_ENABLELBK_CH3 V_ENABLELBK_CH3(1U)
+
+#define S_ENABLELBK_CH2 2
+#define V_ENABLELBK_CH2(x) ((x) << S_ENABLELBK_CH2)
+#define F_ENABLELBK_CH2 V_ENABLELBK_CH2(1U)
+
+#define S_ENABLELBK_CH1 1
+#define V_ENABLELBK_CH1(x) ((x) << S_ENABLELBK_CH1)
+#define F_ENABLELBK_CH1 V_ENABLELBK_CH1(1U)
+
+#define S_ENABLELBK_CH0 0
+#define V_ENABLELBK_CH0(x) ((x) << S_ENABLELBK_CH0)
+#define F_ENABLELBK_CH0 V_ENABLELBK_CH0(1U)
+
+#define A_MPS_TX_DBG_CNT_CTL 0x9478
+
+#define S_DBG_CNT_CTL 0
+#define M_DBG_CNT_CTL 0xffU
+#define V_DBG_CNT_CTL(x) ((x) << S_DBG_CNT_CTL)
+#define G_DBG_CNT_CTL(x) (((x) >> S_DBG_CNT_CTL) & M_DBG_CNT_CTL)
+
+#define A_MPS_TX_DBG_CNT 0x947c
+#define A_MPS_TX_INT2_ENABLE 0x9498
+#define A_MPS_TX_INT2_CAUSE 0x949c
+#define A_MPS_TX_PERR2_ENABLE 0x94a0
+#define A_MPS_TX_INT3_ENABLE 0x94a4
+#define A_MPS_TX_INT3_CAUSE 0x94a8
+#define A_MPS_TX_PERR3_ENABLE 0x94ac
+#define A_MPS_TX_INT4_ENABLE 0x94b0
+#define A_MPS_TX_INT4_CAUSE 0x94b4
+#define A_MPS_TX_PERR4_ENABLE 0x94b8
#define A_MPS_STAT_CTL 0x9600
#define S_COUNTVFINPF 1
@@ -33810,6 +42511,7 @@
#define A_MPS_TRC_RSS_HASH 0x9804
#define A_MPS_TRC_FILTER0_RSS_HASH 0x9804
+#define A_T7_MPS_TRC_PERR_INJECT 0x9804
#define A_MPS_TRC_RSS_CONTROL 0x9808
#define S_RSSCONTROL 16
@@ -33939,6 +42641,20 @@
#define V_FILTMEM(x) ((x) << S_FILTMEM)
#define G_FILTMEM(x) (((x) >> S_FILTMEM) & M_FILTMEM)
+#define S_T7_MISCPERR 16
+#define V_T7_MISCPERR(x) ((x) << S_T7_MISCPERR)
+#define F_T7_MISCPERR V_T7_MISCPERR(1U)
+
+#define S_T7_PKTFIFO 8
+#define M_T7_PKTFIFO 0xffU
+#define V_T7_PKTFIFO(x) ((x) << S_T7_PKTFIFO)
+#define G_T7_PKTFIFO(x) (((x) >> S_T7_PKTFIFO) & M_T7_PKTFIFO)
+
+#define S_T7_FILTMEM 0
+#define M_T7_FILTMEM 0xffU
+#define V_T7_FILTMEM(x) ((x) << S_T7_FILTMEM)
+#define G_T7_FILTMEM(x) (((x) >> S_T7_FILTMEM) & M_T7_FILTMEM)
+
#define A_MPS_TRC_INT_ENABLE 0x9858
#define S_TRCPLERRENB 9
@@ -33961,6 +42677,7 @@
#define A_MPS_TRC_FILTER2_RSS_HASH 0x9ff8
#define A_MPS_TRC_FILTER2_RSS_CONTROL 0x9ffc
#define A_MPS_TRC_FILTER3_RSS_HASH 0xa000
+#define A_MPS_TRC_FILTER4_MATCH 0xa000
#define A_MPS_TRC_FILTER3_RSS_CONTROL 0xa004
#define A_MPS_T5_TRC_RSS_HASH 0xa008
#define A_MPS_T5_TRC_RSS_CONTROL 0xa00c
@@ -34043,125 +42760,8 @@
#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
#define A_MPS_TRC_VF_OFF_FILTER_1 0xa014
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_VF_OFF_FILTER_2 0xa018
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_VF_OFF_FILTER_3 0xa01c
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_CGEN 0xa020
#define S_MPSTRCCGEN 0
@@ -34169,6 +42769,129 @@
#define V_MPSTRCCGEN(x) ((x) << S_MPSTRCCGEN)
#define G_MPSTRCCGEN(x) (((x) >> S_MPSTRCCGEN) & M_MPSTRCCGEN)
+#define A_MPS_TRC_FILTER4_DONT_CARE 0xa080
+#define A_MPS_TRC_FILTER5_MATCH 0xa100
+#define A_MPS_TRC_FILTER5_DONT_CARE 0xa180
+#define A_MPS_TRC_FILTER6_MATCH 0xa200
+#define A_MPS_TRC_FILTER6_DONT_CARE 0xa280
+#define A_MPS_TRC_FILTER7_MATCH 0xa300
+#define A_MPS_TRC_FILTER7_DONT_CARE 0xa380
+#define A_T7_MPS_TRC_FILTER0_RSS_HASH 0xa3f0
+#define A_T7_MPS_TRC_FILTER0_RSS_CONTROL 0xa3f4
+#define A_T7_MPS_TRC_FILTER1_RSS_HASH 0xa3f8
+#define A_T7_MPS_TRC_FILTER1_RSS_CONTROL 0xa3fc
+#define A_T7_MPS_TRC_FILTER2_RSS_HASH 0xa400
+#define A_T7_MPS_TRC_FILTER2_RSS_CONTROL 0xa404
+#define A_T7_MPS_TRC_FILTER3_RSS_HASH 0xa408
+#define A_T7_MPS_TRC_FILTER3_RSS_CONTROL 0xa40c
+#define A_MPS_TRC_FILTER4_RSS_HASH 0xa410
+#define A_MPS_TRC_FILTER4_RSS_CONTROL 0xa414
+#define A_MPS_TRC_FILTER5_RSS_HASH 0xa418
+#define A_MPS_TRC_FILTER5_RSS_CONTROL 0xa41c
+#define A_MPS_TRC_FILTER6_RSS_HASH 0xa420
+#define A_MPS_TRC_FILTER6_RSS_CONTROL 0xa424
+#define A_MPS_TRC_FILTER7_RSS_HASH 0xa428
+#define A_MPS_TRC_FILTER7_RSS_CONTROL 0xa42c
+#define A_T7_MPS_T5_TRC_RSS_HASH 0xa430
+#define A_T7_MPS_T5_TRC_RSS_CONTROL 0xa434
+#define A_T7_MPS_TRC_VF_OFF_FILTER_0 0xa438
+#define A_T7_MPS_TRC_VF_OFF_FILTER_1 0xa43c
+#define A_T7_MPS_TRC_VF_OFF_FILTER_2 0xa440
+#define A_T7_MPS_TRC_VF_OFF_FILTER_3 0xa444
+#define A_MPS_TRC_VF_OFF_FILTER_4 0xa448
+#define A_MPS_TRC_VF_OFF_FILTER_5 0xa44c
+#define A_MPS_TRC_VF_OFF_FILTER_6 0xa450
+#define A_MPS_TRC_VF_OFF_FILTER_7 0xa454
+#define A_T7_MPS_TRC_CGEN 0xa458
+
+#define S_T7_MPSTRCCGEN 0
+#define M_T7_MPSTRCCGEN 0xffU
+#define V_T7_MPSTRCCGEN(x) ((x) << S_T7_MPSTRCCGEN)
+#define G_T7_MPSTRCCGEN(x) (((x) >> S_T7_MPSTRCCGEN) & M_T7_MPSTRCCGEN)
+
+#define A_T7_MPS_TRC_FILTER_MATCH_CTL_A 0xa460
+#define A_T7_MPS_TRC_FILTER_MATCH_CTL_B 0xa480
+#define A_T7_MPS_TRC_FILTER_RUNT_CTL 0xa4a0
+#define A_T7_MPS_TRC_FILTER_DROP 0xa4c0
+#define A_T7_MPS_TRC_INT_ENABLE 0xa4e0
+
+#define S_T7_TRCPLERRENB 17
+#define V_T7_TRCPLERRENB(x) ((x) << S_T7_TRCPLERRENB)
+#define F_T7_TRCPLERRENB V_T7_TRCPLERRENB(1U)
+
+#define A_T7_MPS_TRC_INT_CAUSE 0xa4e4
+#define A_T7_MPS_TRC_TIMESTAMP_L 0xa4e8
+#define A_T7_MPS_TRC_TIMESTAMP_H 0xa4ec
+#define A_MPS_TRC_PERR_ENABLE2 0xa4f0
+
+#define S_TRC_TF_ECC 24
+#define M_TRC_TF_ECC 0xffU
+#define V_TRC_TF_ECC(x) ((x) << S_TRC_TF_ECC)
+#define G_TRC_TF_ECC(x) (((x) >> S_TRC_TF_ECC) & M_TRC_TF_ECC)
+
+#define S_MPS2MAC_CONV_TRC_CERR 22
+#define M_MPS2MAC_CONV_TRC_CERR 0x3U
+#define V_MPS2MAC_CONV_TRC_CERR(x) ((x) << S_MPS2MAC_CONV_TRC_CERR)
+#define G_MPS2MAC_CONV_TRC_CERR(x) (((x) >> S_MPS2MAC_CONV_TRC_CERR) & M_MPS2MAC_CONV_TRC_CERR)
+
+#define S_MPS2MAC_CONV_TRC 18
+#define M_MPS2MAC_CONV_TRC 0xfU
+#define V_MPS2MAC_CONV_TRC(x) ((x) << S_MPS2MAC_CONV_TRC)
+#define G_MPS2MAC_CONV_TRC(x) (((x) >> S_MPS2MAC_CONV_TRC) & M_MPS2MAC_CONV_TRC)
+
+#define S_TF0_PERR_1 17
+#define V_TF0_PERR_1(x) ((x) << S_TF0_PERR_1)
+#define F_TF0_PERR_1 V_TF0_PERR_1(1U)
+
+#define S_TF1_PERR_1 16
+#define V_TF1_PERR_1(x) ((x) << S_TF1_PERR_1)
+#define F_TF1_PERR_1 V_TF1_PERR_1(1U)
+
+#define S_TF2_PERR_1 15
+#define V_TF2_PERR_1(x) ((x) << S_TF2_PERR_1)
+#define F_TF2_PERR_1 V_TF2_PERR_1(1U)
+
+#define S_TF3_PERR_1 14
+#define V_TF3_PERR_1(x) ((x) << S_TF3_PERR_1)
+#define F_TF3_PERR_1 V_TF3_PERR_1(1U)
+
+#define S_TF4_PERR_1 13
+#define V_TF4_PERR_1(x) ((x) << S_TF4_PERR_1)
+#define F_TF4_PERR_1 V_TF4_PERR_1(1U)
+
+#define S_TF0_PERR_0 12
+#define V_TF0_PERR_0(x) ((x) << S_TF0_PERR_0)
+#define F_TF0_PERR_0 V_TF0_PERR_0(1U)
+
+#define S_TF1_PERR_0 11
+#define V_TF1_PERR_0(x) ((x) << S_TF1_PERR_0)
+#define F_TF1_PERR_0 V_TF1_PERR_0(1U)
+
+#define S_TF2_PERR_0 10
+#define V_TF2_PERR_0(x) ((x) << S_TF2_PERR_0)
+#define F_TF2_PERR_0 V_TF2_PERR_0(1U)
+
+#define S_TF3_PERR_0 9
+#define V_TF3_PERR_0(x) ((x) << S_TF3_PERR_0)
+#define F_TF3_PERR_0 V_TF3_PERR_0(1U)
+
+#define S_TF4_PERR_0 8
+#define V_TF4_PERR_0(x) ((x) << S_TF4_PERR_0)
+#define F_TF4_PERR_0 V_TF4_PERR_0(1U)
+
+#define S_PERR_TF_IN_CTL 0
+#define M_PERR_TF_IN_CTL 0xffU
+#define V_PERR_TF_IN_CTL(x) ((x) << S_PERR_TF_IN_CTL)
+#define G_PERR_TF_IN_CTL(x) (((x) >> S_PERR_TF_IN_CTL) & M_PERR_TF_IN_CTL)
+
+#define A_MPS_TRC_INT_ENABLE2 0xa4f4
+#define A_MPS_TRC_INT_CAUSE2 0xa4f8
+
+#define S_T7_TRC_TF_ECC 22
+#define M_T7_TRC_TF_ECC 0xffU
+#define V_T7_TRC_TF_ECC(x) ((x) << S_T7_TRC_TF_ECC)
+#define G_T7_TRC_TF_ECC(x) (((x) >> S_T7_TRC_TF_ECC) & M_T7_TRC_TF_ECC)
+
#define A_MPS_CLS_CTL 0xd000
#define S_MEMWRITEFAULT 4
@@ -34246,12 +42969,24 @@
#define V_MATCHSRAM(x) ((x) << S_MATCHSRAM)
#define F_MATCHSRAM V_MATCHSRAM(1U)
+#define S_CIM2MPS_INTF_PAR 4
+#define V_CIM2MPS_INTF_PAR(x) ((x) << S_CIM2MPS_INTF_PAR)
+#define F_CIM2MPS_INTF_PAR V_CIM2MPS_INTF_PAR(1U)
+
+#define S_TCAM_CRC_SRAM 3
+#define V_TCAM_CRC_SRAM(x) ((x) << S_TCAM_CRC_SRAM)
+#define F_TCAM_CRC_SRAM V_TCAM_CRC_SRAM(1U)
+
#define A_MPS_CLS_INT_ENABLE 0xd024
#define S_PLERRENB 3
#define V_PLERRENB(x) ((x) << S_PLERRENB)
#define F_PLERRENB V_PLERRENB(1U)
+#define S_T7_PLERRENB 5
+#define V_T7_PLERRENB(x) ((x) << S_T7_PLERRENB)
+#define F_T7_PLERRENB V_T7_PLERRENB(1U)
+
#define A_MPS_CLS_INT_CAUSE 0xd028
#define A_MPS_CLS_PL_TEST_DATA_L 0xd02c
#define A_MPS_CLS_PL_TEST_DATA_H 0xd030
@@ -34314,6 +43049,25 @@
#define V_T6_CLS_VF(x) ((x) << S_T6_CLS_VF)
#define G_T6_CLS_VF(x) (((x) >> S_T6_CLS_VF) & M_T6_CLS_VF)
+#define S_T7_CLS_SPARE 30
+#define M_T7_CLS_SPARE 0x3U
+#define V_T7_CLS_SPARE(x) ((x) << S_T7_CLS_SPARE)
+#define G_T7_CLS_SPARE(x) (((x) >> S_T7_CLS_SPARE) & M_T7_CLS_SPARE)
+
+#define S_T7_1_CLS_PRIORITY 27
+#define M_T7_1_CLS_PRIORITY 0x7U
+#define V_T7_1_CLS_PRIORITY(x) ((x) << S_T7_1_CLS_PRIORITY)
+#define G_T7_1_CLS_PRIORITY(x) (((x) >> S_T7_1_CLS_PRIORITY) & M_T7_1_CLS_PRIORITY)
+
+#define S_T7_1_CLS_REPLICATE 26
+#define V_T7_1_CLS_REPLICATE(x) ((x) << S_T7_1_CLS_REPLICATE)
+#define F_T7_1_CLS_REPLICATE V_T7_1_CLS_REPLICATE(1U)
+
+#define S_T7_1_CLS_INDEX 15
+#define M_T7_1_CLS_INDEX 0x7ffU
+#define V_T7_1_CLS_INDEX(x) ((x) << S_T7_1_CLS_INDEX)
+#define G_T7_1_CLS_INDEX(x) (((x) >> S_T7_1_CLS_INDEX) & M_T7_1_CLS_INDEX)
+
#define A_MPS_CLS_PL_TEST_CTL 0xd038
#define S_PLTESTCTL 0
@@ -34327,12 +43081,26 @@
#define F_PRTBMCCTL V_PRTBMCCTL(1U)
#define A_MPS_CLS_MATCH_CNT_TCAM 0xd100
+#define A_MPS_CLS0_MATCH_CNT_TCAM 0xd100
#define A_MPS_CLS_MATCH_CNT_HASH 0xd104
+#define A_MPS_CLS0_MATCH_CNT_HASH 0xd104
#define A_MPS_CLS_MATCH_CNT_BCAST 0xd108
+#define A_MPS_CLS0_MATCH_CNT_BCAST 0xd108
#define A_MPS_CLS_MATCH_CNT_BMC 0xd10c
+#define A_MPS_CLS0_MATCH_CNT_BMC 0xd10c
#define A_MPS_CLS_MATCH_CNT_PROM 0xd110
+#define A_MPS_CLS0_MATCH_CNT_PROM 0xd110
#define A_MPS_CLS_MATCH_CNT_HPROM 0xd114
+#define A_MPS_CLS0_MATCH_CNT_HPROM 0xd114
#define A_MPS_CLS_MISS_CNT 0xd118
+#define A_MPS_CLS0_MISS_CNT 0xd118
+#define A_MPS_CLS1_MATCH_CNT_TCAM 0xd11c
+#define A_MPS_CLS1_MATCH_CNT_HASH 0xd120
+#define A_MPS_CLS1_MATCH_CNT_BCAST 0xd124
+#define A_MPS_CLS1_MATCH_CNT_BMC 0xd128
+#define A_MPS_CLS1_MATCH_CNT_PROM 0xd12c
+#define A_MPS_CLS1_MATCH_CNT_HPROM 0xd130
+#define A_MPS_CLS1_MISS_CNT 0xd134
#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_L 0xd200
#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_H 0xd204
@@ -34428,6 +43196,15 @@
#define V_CLSTRCVF(x) ((x) << S_CLSTRCVF)
#define G_CLSTRCVF(x) (((x) >> S_CLSTRCVF) & M_CLSTRCVF)
+#define S_T7_CLSTRCMATCH 23
+#define V_T7_CLSTRCMATCH(x) ((x) << S_T7_CLSTRCMATCH)
+#define F_T7_CLSTRCMATCH V_T7_CLSTRCMATCH(1U)
+
+#define S_T7_CLSTRCINDEX 12
+#define M_T7_CLSTRCINDEX 0x7ffU
+#define V_T7_CLSTRCINDEX(x) ((x) << S_T7_CLSTRCINDEX)
+#define G_T7_CLSTRCINDEX(x) (((x) >> S_T7_CLSTRCINDEX) & M_T7_CLSTRCINDEX)
+
#define A_MPS_CLS_VLAN_TABLE 0xdfc0
#define S_VLAN_MASK 16
@@ -34536,24 +43313,6 @@
#define V_T6_SRAM_VLD(x) ((x) << S_T6_SRAM_VLD)
#define F_T6_SRAM_VLD V_T6_SRAM_VLD(1U)
-#define S_T6_REPLICATE 12
-#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
-#define F_T6_REPLICATE V_T6_REPLICATE(1U)
-
-#define S_T6_PF 9
-#define M_T6_PF 0x7U
-#define V_T6_PF(x) ((x) << S_T6_PF)
-#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
-
-#define S_T6_VF_VALID 8
-#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
-#define F_T6_VF_VALID V_T6_VF_VALID(1U)
-
-#define S_T6_VF 0
-#define M_T6_VF 0xffU
-#define V_T6_VF(x) ((x) << S_T6_VF)
-#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
-
#define A_MPS_CLS_SRAM_H 0xe004
#define S_MACPARITY1 9
@@ -34580,6 +43339,41 @@
#define V_MACPARITY2(x) ((x) << S_MACPARITY2)
#define F_MACPARITY2 V_MACPARITY2(1U)
+#define S_SRAMWRN 31
+#define V_SRAMWRN(x) ((x) << S_SRAMWRN)
+#define F_SRAMWRN V_SRAMWRN(1U)
+
+#define S_SRAMSPARE 27
+#define M_SRAMSPARE 0xfU
+#define V_SRAMSPARE(x) ((x) << S_SRAMSPARE)
+#define G_SRAMSPARE(x) (((x) >> S_SRAMSPARE) & M_SRAMSPARE)
+
+#define S_SRAMINDEX 16
+#define M_SRAMINDEX 0x7ffU
+#define V_SRAMINDEX(x) ((x) << S_SRAMINDEX)
+#define G_SRAMINDEX(x) (((x) >> S_SRAMINDEX) & M_SRAMINDEX)
+
+#define A_MPS_CLS_HASH_TCAM_CTL 0xe008
+
+#define S_T7_CTLCMDTYPE 15
+#define V_T7_CTLCMDTYPE(x) ((x) << S_T7_CTLCMDTYPE)
+#define F_T7_CTLCMDTYPE V_T7_CTLCMDTYPE(1U)
+
+#define S_T7_CTLXYBITSEL 12
+#define V_T7_CTLXYBITSEL(x) ((x) << S_T7_CTLXYBITSEL)
+#define F_T7_CTLXYBITSEL V_T7_CTLXYBITSEL(1U)
+
+#define S_T7_CTLTCAMINDEX 0
+#define M_T7_CTLTCAMINDEX 0x1ffU
+#define V_T7_CTLTCAMINDEX(x) ((x) << S_T7_CTLTCAMINDEX)
+#define G_T7_CTLTCAMINDEX(x) (((x) >> S_T7_CTLTCAMINDEX) & M_T7_CTLTCAMINDEX)
+
+#define A_MPS_CLS_HASH_TCAM_DATA 0xe00c
+
+#define S_LKPTYPE 24
+#define V_LKPTYPE(x) ((x) << S_LKPTYPE)
+#define F_LKPTYPE V_LKPTYPE(1U)
+
#define A_MPS_CLS_TCAM_Y_L 0xf000
#define A_MPS_CLS_TCAM_DATA0 0xf000
#define A_MPS_CLS_TCAM_Y_H 0xf004
@@ -34648,6 +43442,16 @@
#define V_DATAVIDH1(x) ((x) << S_DATAVIDH1)
#define G_DATAVIDH1(x) (((x) >> S_DATAVIDH1) & M_DATAVIDH1)
+#define S_T7_CTLTCAMSEL 26
+#define M_T7_CTLTCAMSEL 0x3U
+#define V_T7_CTLTCAMSEL(x) ((x) << S_T7_CTLTCAMSEL)
+#define G_T7_CTLTCAMSEL(x) (((x) >> S_T7_CTLTCAMSEL) & M_T7_CTLTCAMSEL)
+
+#define S_T7_1_CTLTCAMINDEX 17
+#define M_T7_1_CTLTCAMINDEX 0x1ffU
+#define V_T7_1_CTLTCAMINDEX(x) ((x) << S_T7_1_CTLTCAMINDEX)
+#define G_T7_1_CTLTCAMINDEX(x) (((x) >> S_T7_1_CTLTCAMINDEX) & M_T7_1_CTLTCAMINDEX)
+
#define A_MPS_CLS_TCAM_X_H 0xf00c
#define S_TCAMXH 0
@@ -34656,11 +43460,47 @@
#define G_TCAMXH(x) (((x) >> S_TCAMXH) & M_TCAMXH)
#define A_MPS_CLS_TCAM_RDATA0_REQ_ID0 0xf010
+#define A_MPS_CLS_TCAM0_RDATA0_REQ_ID0 0xf010
#define A_MPS_CLS_TCAM_RDATA1_REQ_ID0 0xf014
+#define A_MPS_CLS_TCAM0_RDATA1_REQ_ID0 0xf014
#define A_MPS_CLS_TCAM_RDATA2_REQ_ID0 0xf018
+#define A_MPS_CLS_TCAM0_RDATA2_REQ_ID0 0xf018
+#define A_MPS_CLS_TCAM0_RDATA0_REQ_ID1 0xf01c
#define A_MPS_CLS_TCAM_RDATA0_REQ_ID1 0xf020
+#define A_MPS_CLS_TCAM0_RDATA1_REQ_ID1 0xf020
#define A_MPS_CLS_TCAM_RDATA1_REQ_ID1 0xf024
+#define A_MPS_CLS_TCAM0_RDATA2_REQ_ID1 0xf024
#define A_MPS_CLS_TCAM_RDATA2_REQ_ID1 0xf028
+#define A_MPS_CLS_TCAM1_RDATA0_REQ_ID0 0xf028
+#define A_MPS_CLS_TCAM1_RDATA1_REQ_ID0 0xf02c
+#define A_MPS_CLS_TCAM1_RDATA2_REQ_ID0 0xf030
+#define A_MPS_CLS_TCAM1_RDATA0_REQ_ID1 0xf034
+#define A_MPS_CLS_TCAM1_RDATA1_REQ_ID1 0xf038
+#define A_MPS_CLS_TCAM1_RDATA2_REQ_ID1 0xf03c
+#define A_MPS_CLS_TCAM0_MASK_REG0 0xf040
+#define A_MPS_CLS_TCAM0_MASK_REG1 0xf044
+#define A_MPS_CLS_TCAM0_MASK_REG2 0xf048
+
+#define S_MASK_0_2 0
+#define M_MASK_0_2 0xffffU
+#define V_MASK_0_2(x) ((x) << S_MASK_0_2)
+#define G_MASK_0_2(x) (((x) >> S_MASK_0_2) & M_MASK_0_2)
+
+#define A_MPS_CLS_TCAM1_MASK_REG0 0xf04c
+#define A_MPS_CLS_TCAM1_MASK_REG1 0xf050
+#define A_MPS_CLS_TCAM1_MASK_REG2 0xf054
+
+#define S_MASK_1_2 0
+#define M_MASK_1_2 0xffffU
+#define V_MASK_1_2(x) ((x) << S_MASK_1_2)
+#define G_MASK_1_2(x) (((x) >> S_MASK_1_2) & M_MASK_1_2)
+
+#define A_MPS_CLS_TCAM_BIST_CTRL 0xf058
+#define A_MPS_CLS_TCAM_BIST_CB_PASS 0xf05c
+#define A_MPS_CLS_TCAM_BIST_CB_BUSY 0xf060
+#define A_MPS_CLS_TCAM2_MASK_REG0 0xf064
+#define A_MPS_CLS_TCAM2_MASK_REG1 0xf068
+#define A_MPS_CLS_TCAM2_MASK_REG2 0xf06c
#define A_MPS_RX_CTL 0x11000
#define S_FILT_VLAN_SEL 17
@@ -34686,6 +43526,14 @@
#define V_SNF(x) ((x) << S_SNF)
#define G_SNF(x) (((x) >> S_SNF) & M_SNF)
+#define S_HASH_TCAM_EN 19
+#define V_HASH_TCAM_EN(x) ((x) << S_HASH_TCAM_EN)
+#define F_HASH_TCAM_EN V_HASH_TCAM_EN(1U)
+
+#define S_SND_ORG_PFVF 18
+#define V_SND_ORG_PFVF(x) ((x) << S_SND_ORG_PFVF)
+#define F_SND_ORG_PFVF V_SND_ORG_PFVF(1U)
+
#define A_MPS_RX_PORT_MUX_CTL 0x11004
#define S_CTL_P3 12
@@ -34877,6 +43725,11 @@
#define V_THRESH(x) ((x) << S_THRESH)
#define G_THRESH(x) (((x) >> S_THRESH) & M_THRESH)
+#define S_T7_THRESH 0
+#define M_T7_THRESH 0xfffU
+#define V_T7_THRESH(x) ((x) << S_T7_THRESH)
+#define G_T7_THRESH(x) (((x) >> S_T7_THRESH) & M_T7_THRESH)
+
#define A_MPS_RX_LPBK_BP1 0x11060
#define A_MPS_RX_LPBK_BP2 0x11064
#define A_MPS_RX_LPBK_BP3 0x11068
@@ -34888,6 +43741,12 @@
#define G_GAP(x) (((x) >> S_GAP) & M_GAP)
#define A_MPS_RX_CHMN_CNT 0x11070
+#define A_MPS_CTL_STAT 0x11070
+
+#define S_T7_CTL 0
+#define V_T7_CTL(x) ((x) << S_T7_CTL)
+#define F_T7_CTL V_T7_CTL(1U)
+
#define A_MPS_RX_PERR_INT_CAUSE 0x11074
#define S_FF 23
@@ -34990,18 +43849,54 @@
#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
-#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+#define S_MAC_IN_FIFO_768B 30
+#define V_MAC_IN_FIFO_768B(x) ((x) << S_MAC_IN_FIFO_768B)
+#define F_MAC_IN_FIFO_768B V_MAC_IN_FIFO_768B(1U)
-#define S_T6_INT_ERR_INT 24
-#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
-#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
+#define S_T7_1_INT_ERR_INT 29
+#define V_T7_1_INT_ERR_INT(x) ((x) << S_T7_1_INT_ERR_INT)
+#define F_T7_1_INT_ERR_INT V_T7_1_INT_ERR_INT(1U)
-#define A_MPS_RX_PERR_ENABLE 0x1107c
+#define S_FLOP_PERR 28
+#define V_FLOP_PERR(x) ((x) << S_FLOP_PERR)
+#define F_FLOP_PERR V_FLOP_PERR(1U)
-#define S_T6_INT_ERR_INT 24
-#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
-#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
+#define S_RPLC_MAP 13
+#define M_RPLC_MAP 0x1fU
+#define V_RPLC_MAP(x) ((x) << S_RPLC_MAP)
+#define G_RPLC_MAP(x) (((x) >> S_RPLC_MAP) & M_RPLC_MAP)
+
+#define S_TKN_RUNT_DROP_FIFO 12
+#define V_TKN_RUNT_DROP_FIFO(x) ((x) << S_TKN_RUNT_DROP_FIFO)
+#define F_TKN_RUNT_DROP_FIFO V_TKN_RUNT_DROP_FIFO(1U)
+
+#define S_T7_PPM3 9
+#define M_T7_PPM3 0x7U
+#define V_T7_PPM3(x) ((x) << S_T7_PPM3)
+#define G_T7_PPM3(x) (((x) >> S_T7_PPM3) & M_T7_PPM3)
+#define S_T7_PPM2 6
+#define M_T7_PPM2 0x7U
+#define V_T7_PPM2(x) ((x) << S_T7_PPM2)
+#define G_T7_PPM2(x) (((x) >> S_T7_PPM2) & M_T7_PPM2)
+
+#define S_T7_PPM1 3
+#define M_T7_PPM1 0x7U
+#define V_T7_PPM1(x) ((x) << S_T7_PPM1)
+#define G_T7_PPM1(x) (((x) >> S_T7_PPM1) & M_T7_PPM1)
+
+#define S_T7_PPM0 0
+#define M_T7_PPM0 0x7U
+#define V_T7_PPM0(x) ((x) << S_T7_PPM0)
+#define G_T7_PPM0(x) (((x) >> S_T7_PPM0) & M_T7_PPM0)
+
+#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+
+#define S_T7_2_INT_ERR_INT 30
+#define V_T7_2_INT_ERR_INT(x) ((x) << S_T7_2_INT_ERR_INT)
+#define F_T7_2_INT_ERR_INT V_T7_2_INT_ERR_INT(1U)
+
+#define A_MPS_RX_PERR_ENABLE 0x1107c
#define A_MPS_RX_PERR_INJECT 0x11080
#define A_MPS_RX_FUNC_INT_CAUSE 0x11084
@@ -35083,8 +43978,43 @@
#define V_TH_LOW(x) ((x) << S_TH_LOW)
#define G_TH_LOW(x) (((x) >> S_TH_LOW) & M_TH_LOW)
+#define A_MPS_RX_PERR_INT_CAUSE2 0x1108c
+
+#define S_CRYPT2MPS_RX_INTF_FIFO 28
+#define M_CRYPT2MPS_RX_INTF_FIFO 0xfU
+#define V_CRYPT2MPS_RX_INTF_FIFO(x) ((x) << S_CRYPT2MPS_RX_INTF_FIFO)
+#define G_CRYPT2MPS_RX_INTF_FIFO(x) (((x) >> S_CRYPT2MPS_RX_INTF_FIFO) & M_CRYPT2MPS_RX_INTF_FIFO)
+
+#define S_INIC2MPS_TX0_PERR 27
+#define V_INIC2MPS_TX0_PERR(x) ((x) << S_INIC2MPS_TX0_PERR)
+#define F_INIC2MPS_TX0_PERR V_INIC2MPS_TX0_PERR(1U)
+
+#define S_INIC2MPS_TX1_PERR 26
+#define V_INIC2MPS_TX1_PERR(x) ((x) << S_INIC2MPS_TX1_PERR)
+#define F_INIC2MPS_TX1_PERR V_INIC2MPS_TX1_PERR(1U)
+
+#define S_XGMAC2MPS_RX0_PERR 25
+#define V_XGMAC2MPS_RX0_PERR(x) ((x) << S_XGMAC2MPS_RX0_PERR)
+#define F_XGMAC2MPS_RX0_PERR V_XGMAC2MPS_RX0_PERR(1U)
+
+#define S_XGMAC2MPS_RX1_PERR 24
+#define V_XGMAC2MPS_RX1_PERR(x) ((x) << S_XGMAC2MPS_RX1_PERR)
+#define F_XGMAC2MPS_RX1_PERR V_XGMAC2MPS_RX1_PERR(1U)
+
+#define S_MPS2CRYPTO_RX_INTF_FIFO 20
+#define M_MPS2CRYPTO_RX_INTF_FIFO 0xfU
+#define V_MPS2CRYPTO_RX_INTF_FIFO(x) ((x) << S_MPS2CRYPTO_RX_INTF_FIFO)
+#define G_MPS2CRYPTO_RX_INTF_FIFO(x) (((x) >> S_MPS2CRYPTO_RX_INTF_FIFO) & M_MPS2CRYPTO_RX_INTF_FIFO)
+
+#define S_RX_PRE_PROC_PERR 9
+#define M_RX_PRE_PROC_PERR 0x7ffU
+#define V_RX_PRE_PROC_PERR(x) ((x) << S_RX_PRE_PROC_PERR)
+#define G_RX_PRE_PROC_PERR(x) (((x) >> S_RX_PRE_PROC_PERR) & M_RX_PRE_PROC_PERR)
+
#define A_MPS_RX_PAUSE_GEN_TH_1 0x11090
+#define A_MPS_RX_PERR_INT_ENABLE2 0x11090
#define A_MPS_RX_PAUSE_GEN_TH_2 0x11094
+#define A_MPS_RX_PERR_ENABLE2 0x11094
#define A_MPS_RX_PAUSE_GEN_TH_3 0x11098
#define A_MPS_RX_REPL_CTL 0x11098
@@ -35126,10 +44056,13 @@
#define A_MPS_RX_PT_ARB1 0x110ac
#define A_MPS_RX_PT_ARB2 0x110b0
+#define A_T7_MPS_RX_PT_ARB4 0x110b0
#define A_MPS_RX_PT_ARB3 0x110b4
#define A_T6_MPS_PF_OUT_EN 0x110b4
+#define A_T7_MPS_PF_OUT_EN 0x110b4
#define A_MPS_RX_PT_ARB4 0x110b8
#define A_T6_MPS_BMC_MTU 0x110b8
+#define A_T7_MPS_BMC_MTU 0x110b8
#define A_MPS_PF_OUT_EN 0x110bc
#define S_OUTEN 0
@@ -35138,6 +44071,7 @@
#define G_OUTEN(x) (((x) >> S_OUTEN) & M_OUTEN)
#define A_T6_MPS_BMC_PKT_CNT 0x110bc
+#define A_T7_MPS_BMC_PKT_CNT 0x110bc
#define A_MPS_BMC_MTU 0x110c0
#define S_MTU 0
@@ -35146,6 +44080,7 @@
#define G_MTU(x) (((x) >> S_MTU) & M_MTU)
#define A_T6_MPS_BMC_BYTE_CNT 0x110c0
+#define A_T7_MPS_BMC_BYTE_CNT 0x110c0
#define A_MPS_BMC_PKT_CNT 0x110c4
#define A_T6_MPS_PFVF_ATRB_CTL 0x110c4
@@ -35154,6 +44089,7 @@
#define V_T6_PFVF(x) ((x) << S_T6_PFVF)
#define G_T6_PFVF(x) (((x) >> S_T6_PFVF) & M_T6_PFVF)
+#define A_T7_MPS_PFVF_ATRB_CTL 0x110c4
#define A_MPS_BMC_BYTE_CNT 0x110c8
#define A_T6_MPS_PFVF_ATRB 0x110c8
@@ -35161,6 +44097,12 @@
#define V_FULL_FRAME_MODE(x) ((x) << S_FULL_FRAME_MODE)
#define F_FULL_FRAME_MODE V_FULL_FRAME_MODE(1U)
+#define A_T7_MPS_PFVF_ATRB 0x110c8
+
+#define S_EXTRACT_DEL_VLAN 31
+#define V_EXTRACT_DEL_VLAN(x) ((x) << S_EXTRACT_DEL_VLAN)
+#define F_EXTRACT_DEL_VLAN V_EXTRACT_DEL_VLAN(1U)
+
#define A_MPS_PFVF_ATRB_CTL 0x110cc
#define S_RD_WRN 31
@@ -35173,6 +44115,7 @@
#define G_PFVF(x) (((x) >> S_PFVF) & M_PFVF)
#define A_T6_MPS_PFVF_ATRB_FLTR0 0x110cc
+#define A_T7_MPS_PFVF_ATRB_FLTR0 0x110cc
#define A_MPS_PFVF_ATRB 0x110d0
#define S_ATTR_PF 28
@@ -35193,6 +44136,7 @@
#define F_ATTR_MODE V_ATTR_MODE(1U)
#define A_T6_MPS_PFVF_ATRB_FLTR1 0x110d0
+#define A_T7_MPS_PFVF_ATRB_FLTR1 0x110d0
#define A_MPS_PFVF_ATRB_FLTR0 0x110d4
#define S_VLAN_EN 16
@@ -35205,36 +44149,58 @@
#define G_VLAN_ID(x) (((x) >> S_VLAN_ID) & M_VLAN_ID)
#define A_T6_MPS_PFVF_ATRB_FLTR2 0x110d4
+#define A_T7_MPS_PFVF_ATRB_FLTR2 0x110d4
#define A_MPS_PFVF_ATRB_FLTR1 0x110d8
#define A_T6_MPS_PFVF_ATRB_FLTR3 0x110d8
+#define A_T7_MPS_PFVF_ATRB_FLTR3 0x110d8
#define A_MPS_PFVF_ATRB_FLTR2 0x110dc
#define A_T6_MPS_PFVF_ATRB_FLTR4 0x110dc
+#define A_T7_MPS_PFVF_ATRB_FLTR4 0x110dc
#define A_MPS_PFVF_ATRB_FLTR3 0x110e0
#define A_T6_MPS_PFVF_ATRB_FLTR5 0x110e0
+#define A_T7_MPS_PFVF_ATRB_FLTR5 0x110e0
#define A_MPS_PFVF_ATRB_FLTR4 0x110e4
#define A_T6_MPS_PFVF_ATRB_FLTR6 0x110e4
+#define A_T7_MPS_PFVF_ATRB_FLTR6 0x110e4
#define A_MPS_PFVF_ATRB_FLTR5 0x110e8
#define A_T6_MPS_PFVF_ATRB_FLTR7 0x110e8
+#define A_T7_MPS_PFVF_ATRB_FLTR7 0x110e8
#define A_MPS_PFVF_ATRB_FLTR6 0x110ec
#define A_T6_MPS_PFVF_ATRB_FLTR8 0x110ec
+#define A_T7_MPS_PFVF_ATRB_FLTR8 0x110ec
#define A_MPS_PFVF_ATRB_FLTR7 0x110f0
#define A_T6_MPS_PFVF_ATRB_FLTR9 0x110f0
+#define A_T7_MPS_PFVF_ATRB_FLTR9 0x110f0
#define A_MPS_PFVF_ATRB_FLTR8 0x110f4
#define A_T6_MPS_PFVF_ATRB_FLTR10 0x110f4
+#define A_T7_MPS_PFVF_ATRB_FLTR10 0x110f4
#define A_MPS_PFVF_ATRB_FLTR9 0x110f8
#define A_T6_MPS_PFVF_ATRB_FLTR11 0x110f8
+#define A_T7_MPS_PFVF_ATRB_FLTR11 0x110f8
#define A_MPS_PFVF_ATRB_FLTR10 0x110fc
#define A_T6_MPS_PFVF_ATRB_FLTR12 0x110fc
+#define A_T7_MPS_PFVF_ATRB_FLTR12 0x110fc
#define A_MPS_PFVF_ATRB_FLTR11 0x11100
#define A_T6_MPS_PFVF_ATRB_FLTR13 0x11100
+#define A_T7_MPS_PFVF_ATRB_FLTR13 0x11100
#define A_MPS_PFVF_ATRB_FLTR12 0x11104
#define A_T6_MPS_PFVF_ATRB_FLTR14 0x11104
+#define A_T7_MPS_PFVF_ATRB_FLTR14 0x11104
#define A_MPS_PFVF_ATRB_FLTR13 0x11108
#define A_T6_MPS_PFVF_ATRB_FLTR15 0x11108
+#define A_T7_MPS_PFVF_ATRB_FLTR15 0x11108
#define A_MPS_PFVF_ATRB_FLTR14 0x1110c
#define A_T6_MPS_RPLC_MAP_CTL 0x1110c
+#define A_T7_MPS_RPLC_MAP_CTL 0x1110c
+
+#define S_T7_RPLC_MAP_ADDR 0
+#define M_T7_RPLC_MAP_ADDR 0xfffU
+#define V_T7_RPLC_MAP_ADDR(x) ((x) << S_T7_RPLC_MAP_ADDR)
+#define G_T7_RPLC_MAP_ADDR(x) (((x) >> S_T7_RPLC_MAP_ADDR) & M_T7_RPLC_MAP_ADDR)
+
#define A_MPS_PFVF_ATRB_FLTR15 0x11110
#define A_T6_MPS_PF_RPLCT_MAP 0x11110
+#define A_T7_MPS_PF_RPLCT_MAP 0x11110
#define A_MPS_RPLC_MAP_CTL 0x11114
#define S_RPLC_MAP_ADDR 0
@@ -35243,6 +44209,7 @@
#define G_RPLC_MAP_ADDR(x) (((x) >> S_RPLC_MAP_ADDR) & M_RPLC_MAP_ADDR)
#define A_T6_MPS_VF_RPLCT_MAP0 0x11114
+#define A_T7_MPS_VF_RPLCT_MAP0 0x11114
#define A_MPS_PF_RPLCT_MAP 0x11118
#define S_PF_EN 0
@@ -35251,10 +44218,13 @@
#define G_PF_EN(x) (((x) >> S_PF_EN) & M_PF_EN)
#define A_T6_MPS_VF_RPLCT_MAP1 0x11118
+#define A_T7_MPS_VF_RPLCT_MAP1 0x11118
#define A_MPS_VF_RPLCT_MAP0 0x1111c
#define A_T6_MPS_VF_RPLCT_MAP2 0x1111c
+#define A_T7_MPS_VF_RPLCT_MAP2 0x1111c
#define A_MPS_VF_RPLCT_MAP1 0x11120
#define A_T6_MPS_VF_RPLCT_MAP3 0x11120
+#define A_T7_MPS_VF_RPLCT_MAP3 0x11120
#define A_MPS_VF_RPLCT_MAP2 0x11124
#define A_MPS_VF_RPLCT_MAP3 0x11128
#define A_MPS_MEM_DBG_CTL 0x1112c
@@ -35629,9 +44599,13 @@
#define V_CONG_TH(x) ((x) << S_CONG_TH)
#define G_CONG_TH(x) (((x) >> S_CONG_TH) & M_CONG_TH)
+#define A_MPS_RX_LPBK_BG_PG_CNT2 0x11220
#define A_MPS_RX_CONGESTION_THRESHOLD_BG1 0x11224
+#define A_MPS_RX_LPBK_BG_PG_CNT3 0x11224
#define A_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11228
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG0 0x11228
#define A_MPS_RX_CONGESTION_THRESHOLD_BG3 0x1122c
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG1 0x1122c
#define A_MPS_RX_GRE_PROT_TYPE 0x11230
#define S_NVGRE_EN 9
@@ -35647,6 +44621,7 @@
#define V_GRE(x) ((x) << S_GRE)
#define G_GRE(x) (((x) >> S_GRE) & M_GRE)
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11230
#define A_MPS_RX_VXLAN_TYPE 0x11234
#define S_VXLAN_EN 16
@@ -35658,6 +44633,7 @@
#define V_VXLAN(x) ((x) << S_VXLAN)
#define G_VXLAN(x) (((x) >> S_VXLAN) & M_VXLAN)
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG3 0x11234
#define A_MPS_RX_GENEVE_TYPE 0x11238
#define S_GENEVE_EN 16
@@ -35669,12 +44645,14 @@
#define V_GENEVE(x) ((x) << S_GENEVE)
#define G_GENEVE(x) (((x) >> S_GENEVE) & M_GENEVE)
+#define A_T7_MPS_RX_GRE_PROT_TYPE 0x11238
#define A_MPS_RX_INNER_HDR_IVLAN 0x1123c
#define S_T6_IVLAN_EN 16
#define V_T6_IVLAN_EN(x) ((x) << S_T6_IVLAN_EN)
#define F_T6_IVLAN_EN V_T6_IVLAN_EN(1U)
+#define A_T7_MPS_RX_VXLAN_TYPE 0x1123c
#define A_MPS_RX_ENCAP_NVGRE 0x11240
#define S_ETYPE_EN 16
@@ -35686,13 +44664,9 @@
#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
+#define A_T7_MPS_RX_GENEVE_TYPE 0x11240
#define A_MPS_RX_ENCAP_GENEVE 0x11244
-
-#define S_T6_ETYPE 0
-#define M_T6_ETYPE 0xffffU
-#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
-#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
-
+#define A_T7_MPS_RX_INNER_HDR_IVLAN 0x11244
#define A_MPS_RX_TCP 0x11248
#define S_PROT_TYPE_EN 8
@@ -35704,8 +44678,11 @@
#define V_PROT_TYPE(x) ((x) << S_PROT_TYPE)
#define G_PROT_TYPE(x) (((x) >> S_PROT_TYPE) & M_PROT_TYPE)
+#define A_T7_MPS_RX_ENCAP_NVGRE 0x11248
#define A_MPS_RX_UDP 0x1124c
+#define A_T7_MPS_RX_ENCAP_GENEVE 0x1124c
#define A_MPS_RX_PAUSE 0x11250
+#define A_T7_MPS_RX_TCP 0x11250
#define A_MPS_RX_LENGTH 0x11254
#define S_SAP_VALUE 16
@@ -35718,6 +44695,7 @@
#define V_LENGTH_ETYPE(x) ((x) << S_LENGTH_ETYPE)
#define G_LENGTH_ETYPE(x) (((x) >> S_LENGTH_ETYPE) & M_LENGTH_ETYPE)
+#define A_T7_MPS_RX_UDP 0x11254
#define A_MPS_RX_CTL_ORG 0x11258
#define S_CTL_VALUE 24
@@ -35730,6 +44708,7 @@
#define V_ORG_VALUE(x) ((x) << S_ORG_VALUE)
#define G_ORG_VALUE(x) (((x) >> S_ORG_VALUE) & M_ORG_VALUE)
+#define A_T7_MPS_RX_PAUSE 0x11258
#define A_MPS_RX_IPV4 0x1125c
#define S_ETYPE_IPV4 0
@@ -35737,6 +44716,7 @@
#define V_ETYPE_IPV4(x) ((x) << S_ETYPE_IPV4)
#define G_ETYPE_IPV4(x) (((x) >> S_ETYPE_IPV4) & M_ETYPE_IPV4)
+#define A_T7_MPS_RX_LENGTH 0x1125c
#define A_MPS_RX_IPV6 0x11260
#define S_ETYPE_IPV6 0
@@ -35744,6 +44724,7 @@
#define V_ETYPE_IPV6(x) ((x) << S_ETYPE_IPV6)
#define G_ETYPE_IPV6(x) (((x) >> S_ETYPE_IPV6) & M_ETYPE_IPV6)
+#define A_T7_MPS_RX_CTL_ORG 0x11260
#define A_MPS_RX_TTL 0x11264
#define S_TTL_IPV4 10
@@ -35764,6 +44745,7 @@
#define V_TTL_CHK_EN_IPV6(x) ((x) << S_TTL_CHK_EN_IPV6)
#define F_TTL_CHK_EN_IPV6 V_TTL_CHK_EN_IPV6(1U)
+#define A_T7_MPS_RX_IPV4 0x11264
#define A_MPS_RX_DEFAULT_VNI 0x11268
#define S_VNI 0
@@ -35771,6 +44753,7 @@
#define V_VNI(x) ((x) << S_VNI)
#define G_VNI(x) (((x) >> S_VNI) & M_VNI)
+#define A_T7_MPS_RX_IPV6 0x11268
#define A_MPS_RX_PRS_CTL 0x1126c
#define S_CTL_CHK_EN 28
@@ -35821,6 +44804,7 @@
#define V_DIP_EN(x) ((x) << S_DIP_EN)
#define F_DIP_EN V_DIP_EN(1U)
+#define A_T7_MPS_RX_TTL 0x1126c
#define A_MPS_RX_PRS_CTL_2 0x11270
#define S_EN_UDP_CSUM_CHK 4
@@ -35843,7 +44827,9 @@
#define V_T6_IPV6_UDP_CSUM_COMPAT(x) ((x) << S_T6_IPV6_UDP_CSUM_COMPAT)
#define F_T6_IPV6_UDP_CSUM_COMPAT V_T6_IPV6_UDP_CSUM_COMPAT(1U)
+#define A_T7_MPS_RX_DEFAULT_VNI 0x11270
#define A_MPS_RX_MPS2NCSI_CNT 0x11274
+#define A_T7_MPS_RX_PRS_CTL 0x11274
#define A_MPS_RX_MAX_TNL_HDR_LEN 0x11278
#define S_T6_LEN 0
@@ -35851,38 +44837,222 @@
#define V_T6_LEN(x) ((x) << S_T6_LEN)
#define G_T6_LEN(x) (((x) >> S_T6_LEN) & M_T6_LEN)
+#define A_T7_MPS_RX_PRS_CTL_2 0x11278
+
+#define S_IP_EXT_HDR_EN 5
+#define V_IP_EXT_HDR_EN(x) ((x) << S_IP_EXT_HDR_EN)
+#define F_IP_EXT_HDR_EN V_IP_EXT_HDR_EN(1U)
+
#define A_MPS_RX_PAUSE_DA_H 0x1127c
+#define A_T7_MPS_RX_MPS2NCSI_CNT 0x1127c
#define A_MPS_RX_PAUSE_DA_L 0x11280
+#define A_T7_MPS_RX_MAX_TNL_HDR_LEN 0x11280
+
+#define S_MPS_TNL_HDR_LEN_MODE 9
+#define V_MPS_TNL_HDR_LEN_MODE(x) ((x) << S_MPS_TNL_HDR_LEN_MODE)
+#define F_MPS_TNL_HDR_LEN_MODE V_MPS_TNL_HDR_LEN_MODE(1U)
+
+#define S_MPS_MAX_TNL_HDR_LEN 0
+#define M_MPS_MAX_TNL_HDR_LEN 0x1ffU
+#define V_MPS_MAX_TNL_HDR_LEN(x) ((x) << S_MPS_MAX_TNL_HDR_LEN)
+#define G_MPS_MAX_TNL_HDR_LEN(x) (((x) >> S_MPS_MAX_TNL_HDR_LEN) & M_MPS_MAX_TNL_HDR_LEN)
+
#define A_MPS_RX_CNT_NVGRE_PKT_MAC0 0x11284
+#define A_T7_MPS_RX_PAUSE_DA_H 0x11284
#define A_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11288
+#define A_T7_MPS_RX_PAUSE_DA_L 0x11288
#define A_MPS_RX_CNT_GENEVE_PKT_MAC0 0x1128c
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_MAC0 0x1128c
#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11290
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11290
#define A_MPS_RX_CNT_NVGRE_PKT_MAC1 0x11294
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_MAC0 0x11294
#define A_MPS_RX_CNT_VXLAN_PKT_MAC1 0x11298
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11298
#define A_MPS_RX_CNT_GENEVE_PKT_MAC1 0x1129c
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_MAC1 0x1129c
#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_MAC1 0x112a0
#define A_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112a4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_MAC1 0x112a4
#define A_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112a8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a8
#define A_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112ac
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112ac
#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112b0
#define A_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112b4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112b4
#define A_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112b8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b8
#define A_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112bc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112bc
#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112c0
#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112c4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112c4
#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112c8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c8
#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112cc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112cc
#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112d0
#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112d4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112d4
#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112d8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d8
#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112dc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112dc
#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112e0
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112e4
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e8
+#define A_MPS_RX_ESP 0x112ec
+#define A_MPS_EN_LPBK_BLK_SNDR 0x112f0
+
+#define S_EN_CH3 3
+#define V_EN_CH3(x) ((x) << S_EN_CH3)
+#define F_EN_CH3 V_EN_CH3(1U)
+
+#define S_EN_CH2 2
+#define V_EN_CH2(x) ((x) << S_EN_CH2)
+#define F_EN_CH2 V_EN_CH2(1U)
+
+#define S_EN_CH1 1
+#define V_EN_CH1(x) ((x) << S_EN_CH1)
+#define F_EN_CH1 V_EN_CH1(1U)
+
+#define S_EN_CH0 0
+#define V_EN_CH0(x) ((x) << S_EN_CH0)
+#define F_EN_CH0 V_EN_CH0(1U)
+
#define A_MPS_VF_RPLCT_MAP4 0x11300
#define A_MPS_VF_RPLCT_MAP5 0x11304
#define A_MPS_VF_RPLCT_MAP6 0x11308
#define A_MPS_VF_RPLCT_MAP7 0x1130c
+#define A_MPS_RX_PERR_INT_CAUSE3 0x11310
+#define A_MPS_RX_PERR_INT_ENABLE3 0x11314
+#define A_MPS_RX_PERR_ENABLE3 0x11318
+#define A_MPS_RX_PERR_INT_CAUSE4 0x1131c
+
+#define S_CLS 20
+#define M_CLS 0x3fU
+#define V_CLS(x) ((x) << S_CLS)
+#define G_CLS(x) (((x) >> S_CLS) & M_CLS)
+
+#define S_RX_PRE_PROC 16
+#define M_RX_PRE_PROC 0xfU
+#define V_RX_PRE_PROC(x) ((x) << S_RX_PRE_PROC)
+#define G_RX_PRE_PROC(x) (((x) >> S_RX_PRE_PROC) & M_RX_PRE_PROC)
+
+#define S_PPROC3 12
+#define M_PPROC3 0xfU
+#define V_PPROC3(x) ((x) << S_PPROC3)
+#define G_PPROC3(x) (((x) >> S_PPROC3) & M_PPROC3)
+
+#define S_PPROC2 8
+#define M_PPROC2 0xfU
+#define V_PPROC2(x) ((x) << S_PPROC2)
+#define G_PPROC2(x) (((x) >> S_PPROC2) & M_PPROC2)
+
+#define S_PPROC1 4
+#define M_PPROC1 0xfU
+#define V_PPROC1(x) ((x) << S_PPROC1)
+#define G_PPROC1(x) (((x) >> S_PPROC1) & M_PPROC1)
+
+#define S_PPROC0 0
+#define M_PPROC0 0xfU
+#define V_PPROC0(x) ((x) << S_PPROC0)
+#define G_PPROC0(x) (((x) >> S_PPROC0) & M_PPROC0)
+
+#define A_MPS_RX_PERR_INT_ENABLE4 0x11320
+#define A_MPS_RX_PERR_ENABLE4 0x11324
+#define A_MPS_RX_PERR_INT_CAUSE5 0x11328
+
+#define S_MPS2CRYP_RX_FIFO 26
+#define M_MPS2CRYP_RX_FIFO 0xfU
+#define V_MPS2CRYP_RX_FIFO(x) ((x) << S_MPS2CRYP_RX_FIFO)
+#define G_MPS2CRYP_RX_FIFO(x) (((x) >> S_MPS2CRYP_RX_FIFO) & M_MPS2CRYP_RX_FIFO)
+
+#define S_RX_OUT 20
+#define M_RX_OUT 0x3fU
+#define V_RX_OUT(x) ((x) << S_RX_OUT)
+#define G_RX_OUT(x) (((x) >> S_RX_OUT) & M_RX_OUT)
+
+#define S_MEM_WRAP 0
+#define M_MEM_WRAP 0xfffffU
+#define V_MEM_WRAP(x) ((x) << S_MEM_WRAP)
+#define G_MEM_WRAP(x) (((x) >> S_MEM_WRAP) & M_MEM_WRAP)
+
+#define A_MPS_RX_PERR_INT_ENABLE5 0x1132c
+#define A_MPS_RX_PERR_ENABLE5 0x11330
+#define A_MPS_RX_PERR_INT_CAUSE6 0x11334
+
+#define S_MPS_RX_MEM_WRAP 0
+#define M_MPS_RX_MEM_WRAP 0x1ffffffU
+#define V_MPS_RX_MEM_WRAP(x) ((x) << S_MPS_RX_MEM_WRAP)
+#define G_MPS_RX_MEM_WRAP(x) (((x) >> S_MPS_RX_MEM_WRAP) & M_MPS_RX_MEM_WRAP)
+
+#define A_MPS_RX_PERR_INT_ENABLE6 0x11338
+#define A_MPS_RX_PERR_ENABLE6 0x1133c
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC2 0x11408
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC2 0x1140c
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC2 0x11410
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC2 0x11414
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC3 0x11418
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC3 0x1141c
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC3 0x11420
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC3 0x11424
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK2 0x11428
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK2 0x1142c
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK2 0x11430
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK2 0x11434
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK3 0x11438
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK3 0x1143c
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK3 0x11440
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK3 0x11444
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP2 0x11448
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP2 0x1144c
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP2 0x11450
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP2 0x11454
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP3 0x11458
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP3 0x1145c
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP3 0x11460
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP3 0x11464
+#define A_T7_MPS_RX_PT_ARB2 0x11468
+#define A_T7_MPS_RX_PT_ARB3 0x1146c
#define A_MPS_CLS_DIPIPV4_ID_TABLE 0x12000
+#define A_MPS_CLS_DIP_ID_TABLE_CTL 0x12000
+
+#define S_DIP_VLD 12
+#define V_DIP_VLD(x) ((x) << S_DIP_VLD)
+#define F_DIP_VLD V_DIP_VLD(1U)
+
+#define S_DIP_TYPE 11
+#define V_DIP_TYPE(x) ((x) << S_DIP_TYPE)
+#define F_DIP_TYPE V_DIP_TYPE(1U)
+
+#define S_DIP_WRN 10
+#define V_DIP_WRN(x) ((x) << S_DIP_WRN)
+#define F_DIP_WRN V_DIP_WRN(1U)
+
+#define S_DIP_SEG 8
+#define M_DIP_SEG 0x3U
+#define V_DIP_SEG(x) ((x) << S_DIP_SEG)
+#define G_DIP_SEG(x) (((x) >> S_DIP_SEG) & M_DIP_SEG)
+
+#define S_DIP_TBL_RSVD1 5
+#define M_DIP_TBL_RSVD1 0x7U
+#define V_DIP_TBL_RSVD1(x) ((x) << S_DIP_TBL_RSVD1)
+#define G_DIP_TBL_RSVD1(x) (((x) >> S_DIP_TBL_RSVD1) & M_DIP_TBL_RSVD1)
+
+#define S_DIP_TBL_ADDR 0
+#define M_DIP_TBL_ADDR 0x1fU
+#define V_DIP_TBL_ADDR(x) ((x) << S_DIP_TBL_ADDR)
+#define G_DIP_TBL_ADDR(x) (((x) >> S_DIP_TBL_ADDR) & M_DIP_TBL_ADDR)
+
#define A_MPS_CLS_DIPIPV4_MASK_TABLE 0x12004
+#define A_MPS_CLS_DIP_ID_TABLE_DATA 0x12004
#define A_MPS_CLS_DIPIPV6ID_0_TABLE 0x12020
#define A_MPS_CLS_DIPIPV6ID_1_TABLE 0x12024
#define A_MPS_CLS_DIPIPV6ID_2_TABLE 0x12028
@@ -35892,6 +45062,226 @@
#define A_MPS_CLS_DIPIPV6MASK_2_TABLE 0x12038
#define A_MPS_CLS_DIPIPV6MASK_3_TABLE 0x1203c
#define A_MPS_RX_HASH_LKP_TABLE 0x12060
+#define A_MPS_CLS_DROP_DMAC0_L 0x12070
+#define A_MPS_CLS_DROP_DMAC0_H 0x12074
+
+#define S_DMAC 0
+#define M_DMAC 0xffffU
+#define V_DMAC(x) ((x) << S_DMAC)
+#define G_DMAC(x) (((x) >> S_DMAC) & M_DMAC)
+
+#define A_MPS_CLS_DROP_DMAC1_L 0x12078
+#define A_MPS_CLS_DROP_DMAC1_H 0x1207c
+#define A_MPS_CLS_DROP_DMAC2_L 0x12080
+#define A_MPS_CLS_DROP_DMAC2_H 0x12084
+#define A_MPS_CLS_DROP_DMAC3_L 0x12088
+#define A_MPS_CLS_DROP_DMAC3_H 0x1208c
+#define A_MPS_CLS_DROP_DMAC4_L 0x12090
+#define A_MPS_CLS_DROP_DMAC4_H 0x12094
+#define A_MPS_CLS_DROP_DMAC5_L 0x12098
+#define A_MPS_CLS_DROP_DMAC5_H 0x1209c
+#define A_MPS_CLS_DROP_DMAC6_L 0x120a0
+#define A_MPS_CLS_DROP_DMAC6_H 0x120a4
+#define A_MPS_CLS_DROP_DMAC7_L 0x120a8
+#define A_MPS_CLS_DROP_DMAC7_H 0x120ac
+#define A_MPS_CLS_DROP_DMAC8_L 0x120b0
+#define A_MPS_CLS_DROP_DMAC8_H 0x120b4
+#define A_MPS_CLS_DROP_DMAC9_L 0x120b8
+#define A_MPS_CLS_DROP_DMAC9_H 0x120bc
+#define A_MPS_CLS_DROP_DMAC10_L 0x120c0
+#define A_MPS_CLS_DROP_DMAC10_H 0x120c4
+#define A_MPS_CLS_DROP_DMAC11_L 0x120c8
+#define A_MPS_CLS_DROP_DMAC11_H 0x120cc
+#define A_MPS_CLS_DROP_DMAC12_L 0x120d0
+#define A_MPS_CLS_DROP_DMAC12_H 0x120d4
+#define A_MPS_CLS_DROP_DMAC13_L 0x120d8
+#define A_MPS_CLS_DROP_DMAC13_H 0x120dc
+#define A_MPS_CLS_DROP_DMAC14_L 0x120e0
+#define A_MPS_CLS_DROP_DMAC14_H 0x120e4
+#define A_MPS_CLS_DROP_DMAC15_L 0x120e8
+#define A_MPS_CLS_DROP_DMAC15_H 0x120ec
+#define A_MPS_RX_ENCAP_VXLAN 0x120f0
+#define A_MPS_RX_INT_VXLAN 0x120f4
+
+#define S_INT_TYPE_EN 16
+#define V_INT_TYPE_EN(x) ((x) << S_INT_TYPE_EN)
+#define F_INT_TYPE_EN V_INT_TYPE_EN(1U)
+
+#define S_INT_TYPE 0
+#define M_INT_TYPE 0xffffU
+#define V_INT_TYPE(x) ((x) << S_INT_TYPE)
+#define G_INT_TYPE(x) (((x) >> S_INT_TYPE) & M_INT_TYPE)
+
+#define A_MPS_RX_INT_GENEVE 0x120f8
+#define A_MPS_PFVF_ATRB2 0x120fc
+
+#define S_EXTRACT_DEL_ENCAP 31
+#define V_EXTRACT_DEL_ENCAP(x) ((x) << S_EXTRACT_DEL_ENCAP)
+#define F_EXTRACT_DEL_ENCAP V_EXTRACT_DEL_ENCAP(1U)
+
+#define A_MPS_RX_TRANS_ENCAP_FLTR_CTL 0x12100
+
+#define S_TIMEOUT_FLT_CLR_EN 8
+#define V_TIMEOUT_FLT_CLR_EN(x) ((x) << S_TIMEOUT_FLT_CLR_EN)
+#define F_TIMEOUT_FLT_CLR_EN V_TIMEOUT_FLT_CLR_EN(1U)
+
+#define S_FLTR_TIMOUT_VAL 0
+#define M_FLTR_TIMOUT_VAL 0xffU
+#define V_FLTR_TIMOUT_VAL(x) ((x) << S_FLTR_TIMOUT_VAL)
+#define G_FLTR_TIMOUT_VAL(x) (((x) >> S_FLTR_TIMOUT_VAL) & M_FLTR_TIMOUT_VAL)
+
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_0 0x12104
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_1 0x12108
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_2 0x1210c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_3 0x12110
+#define A_MPS_RX_PAUSE_GEN_TH_0_4 0x12114
+#define A_MPS_RX_PAUSE_GEN_TH_0_5 0x12118
+#define A_MPS_RX_PAUSE_GEN_TH_0_6 0x1211c
+#define A_MPS_RX_PAUSE_GEN_TH_0_7 0x12120
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_0 0x12124
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_1 0x12128
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_2 0x1212c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_3 0x12130
+#define A_MPS_RX_PAUSE_GEN_TH_1_4 0x12134
+#define A_MPS_RX_PAUSE_GEN_TH_1_5 0x12138
+#define A_MPS_RX_PAUSE_GEN_TH_1_6 0x1213c
+#define A_MPS_RX_PAUSE_GEN_TH_1_7 0x12140
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_0 0x12144
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_1 0x12148
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_2 0x1214c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_3 0x12150
+#define A_MPS_RX_PAUSE_GEN_TH_2_4 0x12154
+#define A_MPS_RX_PAUSE_GEN_TH_2_5 0x12158
+#define A_MPS_RX_PAUSE_GEN_TH_2_6 0x1215c
+#define A_MPS_RX_PAUSE_GEN_TH_2_7 0x12160
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_0 0x12164
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_1 0x12168
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_2 0x1216c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_3 0x12170
+#define A_MPS_RX_PAUSE_GEN_TH_3_4 0x12174
+#define A_MPS_RX_PAUSE_GEN_TH_3_5 0x12178
+#define A_MPS_RX_PAUSE_GEN_TH_3_6 0x1217c
+#define A_MPS_RX_PAUSE_GEN_TH_3_7 0x12180
+#define A_MPS_RX_DROP_0_0 0x12184
+
+#define S_DROP_TH 0
+#define M_DROP_TH 0xffffU
+#define V_DROP_TH(x) ((x) << S_DROP_TH)
+#define G_DROP_TH(x) (((x) >> S_DROP_TH) & M_DROP_TH)
+
+#define A_MPS_RX_DROP_0_1 0x12188
+#define A_MPS_RX_DROP_0_2 0x1218c
+#define A_MPS_RX_DROP_0_3 0x12190
+#define A_MPS_RX_DROP_0_4 0x12194
+#define A_MPS_RX_DROP_0_5 0x12198
+#define A_MPS_RX_DROP_0_6 0x1219c
+#define A_MPS_RX_DROP_0_7 0x121a0
+#define A_MPS_RX_DROP_1_0 0x121a4
+#define A_MPS_RX_DROP_1_1 0x121a8
+#define A_MPS_RX_DROP_1_2 0x121ac
+#define A_MPS_RX_DROP_1_3 0x121b0
+#define A_MPS_RX_DROP_1_4 0x121b4
+#define A_MPS_RX_DROP_1_5 0x121b8
+#define A_MPS_RX_DROP_1_6 0x121bc
+#define A_MPS_RX_DROP_1_7 0x121c0
+#define A_MPS_RX_DROP_2_0 0x121c4
+#define A_MPS_RX_DROP_2_1 0x121c8
+#define A_MPS_RX_DROP_2_2 0x121cc
+#define A_MPS_RX_DROP_2_3 0x121d0
+#define A_MPS_RX_DROP_2_4 0x121d4
+#define A_MPS_RX_DROP_2_5 0x121d8
+#define A_MPS_RX_DROP_2_6 0x121dc
+#define A_MPS_RX_DROP_2_7 0x121e0
+#define A_MPS_RX_DROP_3_0 0x121e4
+#define A_MPS_RX_DROP_3_1 0x121e8
+#define A_MPS_RX_DROP_3_2 0x121ec
+#define A_MPS_RX_DROP_3_3 0x121f0
+#define A_MPS_RX_DROP_3_4 0x121f4
+#define A_MPS_RX_DROP_3_5 0x121f8
+#define A_MPS_RX_DROP_3_6 0x121fc
+#define A_MPS_RX_DROP_3_7 0x12200
+#define A_MPS_RX_MAC_BG_PG_CNT0_0 0x12204
+#define A_MPS_RX_MAC_BG_PG_CNT0_1 0x12208
+#define A_MPS_RX_MAC_BG_PG_CNT0_2 0x1220c
+#define A_MPS_RX_MAC_BG_PG_CNT0_3 0x12210
+#define A_MPS_RX_MAC_BG_PG_CNT0_4 0x12214
+#define A_MPS_RX_MAC_BG_PG_CNT0_5 0x12218
+#define A_MPS_RX_MAC_BG_PG_CNT0_6 0x1221c
+#define A_MPS_RX_MAC_BG_PG_CNT0_7 0x12220
+#define A_MPS_RX_MAC_BG_PG_CNT1_0 0x12224
+#define A_MPS_RX_MAC_BG_PG_CNT1_1 0x12228
+#define A_MPS_RX_MAC_BG_PG_CNT1_2 0x1222c
+#define A_MPS_RX_MAC_BG_PG_CNT1_3 0x12230
+#define A_MPS_RX_MAC_BG_PG_CNT1_4 0x12234
+#define A_MPS_RX_MAC_BG_PG_CNT1_5 0x12238
+#define A_MPS_RX_MAC_BG_PG_CNT1_6 0x1223c
+#define A_MPS_RX_MAC_BG_PG_CNT1_7 0x12240
+#define A_MPS_RX_MAC_BG_PG_CNT2_0 0x12244
+#define A_MPS_RX_MAC_BG_PG_CNT2_1 0x12248
+#define A_MPS_RX_MAC_BG_PG_CNT2_2 0x1224c
+#define A_MPS_RX_MAC_BG_PG_CNT2_3 0x12250
+#define A_MPS_RX_MAC_BG_PG_CNT2_4 0x12254
+#define A_MPS_RX_MAC_BG_PG_CNT2_5 0x12258
+#define A_MPS_RX_MAC_BG_PG_CNT2_6 0x1225c
+#define A_MPS_RX_MAC_BG_PG_CNT2_7 0x12260
+#define A_MPS_RX_MAC_BG_PG_CNT3_0 0x12264
+#define A_MPS_RX_MAC_BG_PG_CNT3_1 0x12268
+#define A_MPS_RX_MAC_BG_PG_CNT3_2 0x1226c
+#define A_MPS_RX_MAC_BG_PG_CNT3_3 0x12270
+#define A_MPS_RX_MAC_BG_PG_CNT3_4 0x12274
+#define A_MPS_RX_MAC_BG_PG_CNT3_5 0x12278
+#define A_MPS_RX_MAC_BG_PG_CNT3_6 0x1227c
+#define A_MPS_RX_MAC_BG_PG_CNT3_7 0x12280
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0 0x12284
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1 0x12288
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2 0x1228c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3 0x12290
+#define A_MPS_RX_BG0_IPSEC_CNT 0x12294
+#define A_MPS_RX_BG1_IPSEC_CNT 0x12298
+#define A_MPS_RX_BG2_IPSEC_CNT 0x1229c
+#define A_MPS_RX_BG3_IPSEC_CNT 0x122a0
+#define A_MPS_RX_MEM_FIFO_CONFIG0 0x122a4
+
+#define S_FIFO_CONFIG2 16
+#define M_FIFO_CONFIG2 0xffffU
+#define V_FIFO_CONFIG2(x) ((x) << S_FIFO_CONFIG2)
+#define G_FIFO_CONFIG2(x) (((x) >> S_FIFO_CONFIG2) & M_FIFO_CONFIG2)
+
+#define S_FIFO_CONFIG1 0
+#define M_FIFO_CONFIG1 0xffffU
+#define V_FIFO_CONFIG1(x) ((x) << S_FIFO_CONFIG1)
+#define G_FIFO_CONFIG1(x) (((x) >> S_FIFO_CONFIG1) & M_FIFO_CONFIG1)
+
+#define A_MPS_RX_MEM_FIFO_CONFIG1 0x122a8
+
+#define S_FIFO_CONFIG3 0
+#define M_FIFO_CONFIG3 0xffffU
+#define V_FIFO_CONFIG3(x) ((x) << S_FIFO_CONFIG3)
+#define G_FIFO_CONFIG3(x) (((x) >> S_FIFO_CONFIG3) & M_FIFO_CONFIG3)
+
+#define A_MPS_LPBK_MEM_FIFO_CONFIG0 0x122ac
+#define A_MPS_LPBK_MEM_FIFO_CONFIG1 0x122b0
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG0 0x122b4
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG1 0x122b8
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG2 0x122bc
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG3 0x122c0
+#define A_MPS_BG_PAUSE_CTL 0x122c4
+
+#define S_BG0_PAUSE_EN 3
+#define V_BG0_PAUSE_EN(x) ((x) << S_BG0_PAUSE_EN)
+#define F_BG0_PAUSE_EN V_BG0_PAUSE_EN(1U)
+
+#define S_BG1_PAUSE_EN 2
+#define V_BG1_PAUSE_EN(x) ((x) << S_BG1_PAUSE_EN)
+#define F_BG1_PAUSE_EN V_BG1_PAUSE_EN(1U)
+
+#define S_BG2_PAUSE_EN 1
+#define V_BG2_PAUSE_EN(x) ((x) << S_BG2_PAUSE_EN)
+#define F_BG2_PAUSE_EN V_BG2_PAUSE_EN(1U)
+
+#define S_BG3_PAUSE_EN 0
+#define V_BG3_PAUSE_EN(x) ((x) << S_BG3_PAUSE_EN)
+#define F_BG3_PAUSE_EN V_BG3_PAUSE_EN(1U)
/* registers for module CPL_SWITCH */
#define CPL_SWITCH_BASE_ADDR 0x19040
@@ -35931,6 +45321,7 @@
#define V_CIM_SPLIT_ENABLE(x) ((x) << S_CIM_SPLIT_ENABLE)
#define F_CIM_SPLIT_ENABLE V_CIM_SPLIT_ENABLE(1U)
+#define A_CNTRL 0x19040
#define A_CPL_SWITCH_TBL_IDX 0x19044
#define S_SWITCH_TBL_IDX 0
@@ -35938,7 +45329,9 @@
#define V_SWITCH_TBL_IDX(x) ((x) << S_SWITCH_TBL_IDX)
#define G_SWITCH_TBL_IDX(x) (((x) >> S_SWITCH_TBL_IDX) & M_SWITCH_TBL_IDX)
+#define A_TBL_IDX 0x19044
#define A_CPL_SWITCH_TBL_DATA 0x19048
+#define A_TBL_DATA 0x19048
#define A_CPL_SWITCH_ZERO_ERROR 0x1904c
#define S_ZERO_CMD_CH1 8
@@ -35951,6 +45344,18 @@
#define V_ZERO_CMD_CH0(x) ((x) << S_ZERO_CMD_CH0)
#define G_ZERO_CMD_CH0(x) (((x) >> S_ZERO_CMD_CH0) & M_ZERO_CMD_CH0)
+#define A_ZERO_ERROR 0x1904c
+
+#define S_ZERO_CMD_CH3 24
+#define M_ZERO_CMD_CH3 0xffU
+#define V_ZERO_CMD_CH3(x) ((x) << S_ZERO_CMD_CH3)
+#define G_ZERO_CMD_CH3(x) (((x) >> S_ZERO_CMD_CH3) & M_ZERO_CMD_CH3)
+
+#define S_ZERO_CMD_CH2 16
+#define M_ZERO_CMD_CH2 0xffU
+#define V_ZERO_CMD_CH2(x) ((x) << S_ZERO_CMD_CH2)
+#define G_ZERO_CMD_CH2(x) (((x) >> S_ZERO_CMD_CH2) & M_ZERO_CMD_CH2)
+
#define A_CPL_INTR_ENABLE 0x19050
#define S_CIM_OP_MAP_PERR 5
@@ -35985,7 +45390,18 @@
#define V_PERR_CPL_128TO128_0(x) ((x) << S_PERR_CPL_128TO128_0)
#define F_PERR_CPL_128TO128_0 V_PERR_CPL_128TO128_0(1U)
+#define A_INTR_ENABLE 0x19050
+
+#define S_PERR_CPL_128TO128_3 9
+#define V_PERR_CPL_128TO128_3(x) ((x) << S_PERR_CPL_128TO128_3)
+#define F_PERR_CPL_128TO128_3 V_PERR_CPL_128TO128_3(1U)
+
+#define S_PERR_CPL_128TO128_2 8
+#define V_PERR_CPL_128TO128_2(x) ((x) << S_PERR_CPL_128TO128_2)
+#define F_PERR_CPL_128TO128_2 V_PERR_CPL_128TO128_2(1U)
+
#define A_CPL_INTR_CAUSE 0x19054
+#define A_INTR_CAUSE 0x19054
#define A_CPL_MAP_TBL_IDX 0x19058
#define S_MAP_TBL_IDX 0
@@ -35997,6 +45413,13 @@
#define V_CIM_SPLIT_OPCODE_PROGRAM(x) ((x) << S_CIM_SPLIT_OPCODE_PROGRAM)
#define F_CIM_SPLIT_OPCODE_PROGRAM V_CIM_SPLIT_OPCODE_PROGRAM(1U)
+#define A_MAP_TBL_IDX 0x19058
+
+#define S_CPL_MAP_TBL_SEL 9
+#define M_CPL_MAP_TBL_SEL 0x3U
+#define V_CPL_MAP_TBL_SEL(x) ((x) << S_CPL_MAP_TBL_SEL)
+#define G_CPL_MAP_TBL_SEL(x) (((x) >> S_CPL_MAP_TBL_SEL) & M_CPL_MAP_TBL_SEL)
+
#define A_CPL_MAP_TBL_DATA 0x1905c
#define S_MAP_TBL_DATA 0
@@ -36004,6 +45427,8 @@
#define V_MAP_TBL_DATA(x) ((x) << S_MAP_TBL_DATA)
#define G_MAP_TBL_DATA(x) (((x) >> S_MAP_TBL_DATA) & M_MAP_TBL_DATA)
+#define A_MAP_TBL_DATA 0x1905c
+
/* registers for module SMB */
#define SMB_BASE_ADDR 0x19060
@@ -36019,6 +45444,16 @@
#define V_MICROCNTCFG(x) ((x) << S_MICROCNTCFG)
#define G_MICROCNTCFG(x) (((x) >> S_MICROCNTCFG) & M_MICROCNTCFG)
+#define S_T7_MACROCNTCFG 12
+#define M_T7_MACROCNTCFG 0x1fU
+#define V_T7_MACROCNTCFG(x) ((x) << S_T7_MACROCNTCFG)
+#define G_T7_MACROCNTCFG(x) (((x) >> S_T7_MACROCNTCFG) & M_T7_MACROCNTCFG)
+
+#define S_T7_MICROCNTCFG 0
+#define M_T7_MICROCNTCFG 0xfffU
+#define V_T7_MICROCNTCFG(x) ((x) << S_T7_MICROCNTCFG)
+#define G_T7_MICROCNTCFG(x) (((x) >> S_T7_MICROCNTCFG) & M_T7_MICROCNTCFG)
+
#define A_SMB_MST_TIMEOUT_CFG 0x19064
#define S_MSTTIMEOUTCFG 0
@@ -36685,6 +46120,26 @@
#define V_UART_CLKDIV(x) ((x) << S_UART_CLKDIV)
#define G_UART_CLKDIV(x) (((x) >> S_UART_CLKDIV) & M_UART_CLKDIV)
+#define S_T7_STOPBITS 25
+#define M_T7_STOPBITS 0x3U
+#define V_T7_STOPBITS(x) ((x) << S_T7_STOPBITS)
+#define G_T7_STOPBITS(x) (((x) >> S_T7_STOPBITS) & M_T7_STOPBITS)
+
+#define S_T7_PARITY 23
+#define M_T7_PARITY 0x3U
+#define V_T7_PARITY(x) ((x) << S_T7_PARITY)
+#define G_T7_PARITY(x) (((x) >> S_T7_PARITY) & M_T7_PARITY)
+
+#define S_T7_DATABITS 19
+#define M_T7_DATABITS 0xfU
+#define V_T7_DATABITS(x) ((x) << S_T7_DATABITS)
+#define G_T7_DATABITS(x) (((x) >> S_T7_DATABITS) & M_T7_DATABITS)
+
+#define S_T7_UART_CLKDIV 0
+#define M_T7_UART_CLKDIV 0x3ffffU
+#define V_T7_UART_CLKDIV(x) ((x) << S_T7_UART_CLKDIV)
+#define G_T7_UART_CLKDIV(x) (((x) >> S_T7_UART_CLKDIV) & M_T7_UART_CLKDIV)
+
/* registers for module PMU */
#define PMU_BASE_ADDR 0x19120
@@ -36767,6 +46222,26 @@
#define V_PL_DIS_PRTY_CHK(x) ((x) << S_PL_DIS_PRTY_CHK)
#define F_PL_DIS_PRTY_CHK V_PL_DIS_PRTY_CHK(1U)
+#define S_ARM_PART_CGEN 19
+#define V_ARM_PART_CGEN(x) ((x) << S_ARM_PART_CGEN)
+#define F_ARM_PART_CGEN V_ARM_PART_CGEN(1U)
+
+#define S_CRYPTO_PART_CGEN 14
+#define V_CRYPTO_PART_CGEN(x) ((x) << S_CRYPTO_PART_CGEN)
+#define F_CRYPTO_PART_CGEN V_CRYPTO_PART_CGEN(1U)
+
+#define S_NVME_PART_CGEN 9
+#define V_NVME_PART_CGEN(x) ((x) << S_NVME_PART_CGEN)
+#define F_NVME_PART_CGEN V_NVME_PART_CGEN(1U)
+
+#define S_XP10_PART_CGEN 8
+#define V_XP10_PART_CGEN(x) ((x) << S_XP10_PART_CGEN)
+#define F_XP10_PART_CGEN V_XP10_PART_CGEN(1U)
+
+#define S_GPEX_PART_CGEN 7
+#define V_GPEX_PART_CGEN(x) ((x) << S_GPEX_PART_CGEN)
+#define F_GPEX_PART_CGEN V_GPEX_PART_CGEN(1U)
+
#define A_PMU_SLEEPMODE_WAKEUP 0x19124
#define S_HWWAKEUPEN 5
@@ -36861,6 +46336,72 @@
#define V_TDDPTAGTCB(x) ((x) << S_TDDPTAGTCB)
#define F_TDDPTAGTCB V_TDDPTAGTCB(1U)
+#define S_ISCSI_PAGE_SIZE_CHK_ENB 31
+#define V_ISCSI_PAGE_SIZE_CHK_ENB(x) ((x) << S_ISCSI_PAGE_SIZE_CHK_ENB)
+#define F_ISCSI_PAGE_SIZE_CHK_ENB V_ISCSI_PAGE_SIZE_CHK_ENB(1U)
+
+#define S_RDMA_0B_WR_OPCODE_HI 29
+#define V_RDMA_0B_WR_OPCODE_HI(x) ((x) << S_RDMA_0B_WR_OPCODE_HI)
+#define F_RDMA_0B_WR_OPCODE_HI V_RDMA_0B_WR_OPCODE_HI(1U)
+
+#define S_RDMA_IMMEDIATE_CQE 28
+#define V_RDMA_IMMEDIATE_CQE(x) ((x) << S_RDMA_IMMEDIATE_CQE)
+#define F_RDMA_IMMEDIATE_CQE V_RDMA_IMMEDIATE_CQE(1U)
+
+#define S_RDMA_ATOMIC_WR_RSP_CQE 27
+#define V_RDMA_ATOMIC_WR_RSP_CQE(x) ((x) << S_RDMA_ATOMIC_WR_RSP_CQE)
+#define F_RDMA_ATOMIC_WR_RSP_CQE V_RDMA_ATOMIC_WR_RSP_CQE(1U)
+
+#define S_RDMA_VERIFY_RSP_FLUSH 26
+#define V_RDMA_VERIFY_RSP_FLUSH(x) ((x) << S_RDMA_VERIFY_RSP_FLUSH)
+#define F_RDMA_VERIFY_RSP_FLUSH V_RDMA_VERIFY_RSP_FLUSH(1U)
+
+#define S_RDMA_VERIFY_RSP_CQE 25
+#define V_RDMA_VERIFY_RSP_CQE(x) ((x) << S_RDMA_VERIFY_RSP_CQE)
+#define F_RDMA_VERIFY_RSP_CQE V_RDMA_VERIFY_RSP_CQE(1U)
+
+#define S_RDMA_FLUSH_RSP_CQE 24
+#define V_RDMA_FLUSH_RSP_CQE(x) ((x) << S_RDMA_FLUSH_RSP_CQE)
+#define F_RDMA_FLUSH_RSP_CQE V_RDMA_FLUSH_RSP_CQE(1U)
+
+#define S_RDMA_ATOMIC_RSP_CQE 23
+#define V_RDMA_ATOMIC_RSP_CQE(x) ((x) << S_RDMA_ATOMIC_RSP_CQE)
+#define F_RDMA_ATOMIC_RSP_CQE V_RDMA_ATOMIC_RSP_CQE(1U)
+
+#define S_T7_TPT_EXTENSION_MODE 22
+#define V_T7_TPT_EXTENSION_MODE(x) ((x) << S_T7_TPT_EXTENSION_MODE)
+#define F_T7_TPT_EXTENSION_MODE V_T7_TPT_EXTENSION_MODE(1U)
+
+#define S_NVME_TCP_DDP_VAL_EN 21
+#define V_NVME_TCP_DDP_VAL_EN(x) ((x) << S_NVME_TCP_DDP_VAL_EN)
+#define F_NVME_TCP_DDP_VAL_EN V_NVME_TCP_DDP_VAL_EN(1U)
+
+#define S_NVME_TCP_REMOVE_HDR_CRC 20
+#define V_NVME_TCP_REMOVE_HDR_CRC(x) ((x) << S_NVME_TCP_REMOVE_HDR_CRC)
+#define F_NVME_TCP_REMOVE_HDR_CRC V_NVME_TCP_REMOVE_HDR_CRC(1U)
+
+#define S_NVME_TCP_LAST_PDU_CHECK_ENB 19
+#define V_NVME_TCP_LAST_PDU_CHECK_ENB(x) ((x) << S_NVME_TCP_LAST_PDU_CHECK_ENB)
+#define F_NVME_TCP_LAST_PDU_CHECK_ENB V_NVME_TCP_LAST_PDU_CHECK_ENB(1U)
+
+#define S_NVME_TCP_OFFSET_SUBMODE 17
+#define M_NVME_TCP_OFFSET_SUBMODE 0x3U
+#define V_NVME_TCP_OFFSET_SUBMODE(x) ((x) << S_NVME_TCP_OFFSET_SUBMODE)
+#define G_NVME_TCP_OFFSET_SUBMODE(x) (((x) >> S_NVME_TCP_OFFSET_SUBMODE) & M_NVME_TCP_OFFSET_SUBMODE)
+
+#define S_NVME_TCP_OFFSET_MODE 16
+#define V_NVME_TCP_OFFSET_MODE(x) ((x) << S_NVME_TCP_OFFSET_MODE)
+#define F_NVME_TCP_OFFSET_MODE V_NVME_TCP_OFFSET_MODE(1U)
+
+#define S_QPID_CHECK_DISABLE_FOR_SEND 15
+#define V_QPID_CHECK_DISABLE_FOR_SEND(x) ((x) << S_QPID_CHECK_DISABLE_FOR_SEND)
+#define F_QPID_CHECK_DISABLE_FOR_SEND V_QPID_CHECK_DISABLE_FOR_SEND(1U)
+
+#define S_RDMA_0B_WR_OPCODE_LO 10
+#define M_RDMA_0B_WR_OPCODE_LO 0xfU
+#define V_RDMA_0B_WR_OPCODE_LO(x) ((x) << S_RDMA_0B_WR_OPCODE_LO)
+#define G_RDMA_0B_WR_OPCODE_LO(x) (((x) >> S_RDMA_0B_WR_OPCODE_LO) & M_RDMA_0B_WR_OPCODE_LO)
+
#define A_ULP_RX_INT_ENABLE 0x19154
#define S_ENABLE_CTX_1 24
@@ -36971,6 +46512,86 @@
#define V_SE_CNT_MISMATCH_0(x) ((x) << S_SE_CNT_MISMATCH_0)
#define F_SE_CNT_MISMATCH_0 V_SE_CNT_MISMATCH_0(1U)
+#define S_CERR_PCMD_FIFO_3 19
+#define V_CERR_PCMD_FIFO_3(x) ((x) << S_CERR_PCMD_FIFO_3)
+#define F_CERR_PCMD_FIFO_3 V_CERR_PCMD_FIFO_3(1U)
+
+#define S_CERR_PCMD_FIFO_2 18
+#define V_CERR_PCMD_FIFO_2(x) ((x) << S_CERR_PCMD_FIFO_2)
+#define F_CERR_PCMD_FIFO_2 V_CERR_PCMD_FIFO_2(1U)
+
+#define S_CERR_PCMD_FIFO_1 17
+#define V_CERR_PCMD_FIFO_1(x) ((x) << S_CERR_PCMD_FIFO_1)
+#define F_CERR_PCMD_FIFO_1 V_CERR_PCMD_FIFO_1(1U)
+
+#define S_CERR_PCMD_FIFO_0 16
+#define V_CERR_PCMD_FIFO_0(x) ((x) << S_CERR_PCMD_FIFO_0)
+#define F_CERR_PCMD_FIFO_0 V_CERR_PCMD_FIFO_0(1U)
+
+#define S_CERR_DATA_FIFO_3 15
+#define V_CERR_DATA_FIFO_3(x) ((x) << S_CERR_DATA_FIFO_3)
+#define F_CERR_DATA_FIFO_3 V_CERR_DATA_FIFO_3(1U)
+
+#define S_CERR_DATA_FIFO_2 14
+#define V_CERR_DATA_FIFO_2(x) ((x) << S_CERR_DATA_FIFO_2)
+#define F_CERR_DATA_FIFO_2 V_CERR_DATA_FIFO_2(1U)
+
+#define S_CERR_DATA_FIFO_1 13
+#define V_CERR_DATA_FIFO_1(x) ((x) << S_CERR_DATA_FIFO_1)
+#define F_CERR_DATA_FIFO_1 V_CERR_DATA_FIFO_1(1U)
+
+#define S_CERR_DATA_FIFO_0 12
+#define V_CERR_DATA_FIFO_0(x) ((x) << S_CERR_DATA_FIFO_0)
+#define F_CERR_DATA_FIFO_0 V_CERR_DATA_FIFO_0(1U)
+
+#define S_SE_CNT_MISMATCH_3 11
+#define V_SE_CNT_MISMATCH_3(x) ((x) << S_SE_CNT_MISMATCH_3)
+#define F_SE_CNT_MISMATCH_3 V_SE_CNT_MISMATCH_3(1U)
+
+#define S_SE_CNT_MISMATCH_2 10
+#define V_SE_CNT_MISMATCH_2(x) ((x) << S_SE_CNT_MISMATCH_2)
+#define F_SE_CNT_MISMATCH_2 V_SE_CNT_MISMATCH_2(1U)
+
+#define S_T7_SE_CNT_MISMATCH_1 9
+#define V_T7_SE_CNT_MISMATCH_1(x) ((x) << S_T7_SE_CNT_MISMATCH_1)
+#define F_T7_SE_CNT_MISMATCH_1 V_T7_SE_CNT_MISMATCH_1(1U)
+
+#define S_T7_SE_CNT_MISMATCH_0 8
+#define V_T7_SE_CNT_MISMATCH_0(x) ((x) << S_T7_SE_CNT_MISMATCH_0)
+#define F_T7_SE_CNT_MISMATCH_0 V_T7_SE_CNT_MISMATCH_0(1U)
+
+#define S_ENABLE_CTX_3 7
+#define V_ENABLE_CTX_3(x) ((x) << S_ENABLE_CTX_3)
+#define F_ENABLE_CTX_3 V_ENABLE_CTX_3(1U)
+
+#define S_ENABLE_CTX_2 6
+#define V_ENABLE_CTX_2(x) ((x) << S_ENABLE_CTX_2)
+#define F_ENABLE_CTX_2 V_ENABLE_CTX_2(1U)
+
+#define S_T7_ENABLE_CTX_1 5
+#define V_T7_ENABLE_CTX_1(x) ((x) << S_T7_ENABLE_CTX_1)
+#define F_T7_ENABLE_CTX_1 V_T7_ENABLE_CTX_1(1U)
+
+#define S_T7_ENABLE_CTX_0 4
+#define V_T7_ENABLE_CTX_0(x) ((x) << S_T7_ENABLE_CTX_0)
+#define F_T7_ENABLE_CTX_0 V_T7_ENABLE_CTX_0(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_3 3
+#define V_ENABLE_ALN_SDC_ERR_3(x) ((x) << S_ENABLE_ALN_SDC_ERR_3)
+#define F_ENABLE_ALN_SDC_ERR_3 V_ENABLE_ALN_SDC_ERR_3(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_2 2
+#define V_ENABLE_ALN_SDC_ERR_2(x) ((x) << S_ENABLE_ALN_SDC_ERR_2)
+#define F_ENABLE_ALN_SDC_ERR_2 V_ENABLE_ALN_SDC_ERR_2(1U)
+
+#define S_T7_ENABLE_ALN_SDC_ERR_1 1
+#define V_T7_ENABLE_ALN_SDC_ERR_1(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_1)
+#define F_T7_ENABLE_ALN_SDC_ERR_1 V_T7_ENABLE_ALN_SDC_ERR_1(1U)
+
+#define S_T7_ENABLE_ALN_SDC_ERR_0 0
+#define V_T7_ENABLE_ALN_SDC_ERR_0(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_0)
+#define F_T7_ENABLE_ALN_SDC_ERR_0 V_T7_ENABLE_ALN_SDC_ERR_0(1U)
+
#define A_ULP_RX_INT_CAUSE 0x19158
#define S_CAUSE_CTX_1 24
@@ -37282,6 +46903,312 @@
#define G_ULPRX_TID(x) (((x) >> S_ULPRX_TID) & M_ULPRX_TID)
#define A_ULP_RX_CTX_ACC_CH1 0x191b0
+#define A_ULP_RX_CTX_ACC_CH2 0x191b4
+#define A_ULP_RX_CTX_ACC_CH3 0x191b8
+#define A_ULP_RX_CTL2 0x191bc
+
+#define S_PCMD3THRESHOLD 24
+#define M_PCMD3THRESHOLD 0xffU
+#define V_PCMD3THRESHOLD(x) ((x) << S_PCMD3THRESHOLD)
+#define G_PCMD3THRESHOLD(x) (((x) >> S_PCMD3THRESHOLD) & M_PCMD3THRESHOLD)
+
+#define S_PCMD2THRESHOLD 16
+#define M_PCMD2THRESHOLD 0xffU
+#define V_PCMD2THRESHOLD(x) ((x) << S_PCMD2THRESHOLD)
+#define G_PCMD2THRESHOLD(x) (((x) >> S_PCMD2THRESHOLD) & M_PCMD2THRESHOLD)
+
+#define S_T7_PCMD1THRESHOLD 8
+#define M_T7_PCMD1THRESHOLD 0xffU
+#define V_T7_PCMD1THRESHOLD(x) ((x) << S_T7_PCMD1THRESHOLD)
+#define G_T7_PCMD1THRESHOLD(x) (((x) >> S_T7_PCMD1THRESHOLD) & M_T7_PCMD1THRESHOLD)
+
+#define S_T7_PCMD0THRESHOLD 0
+#define M_T7_PCMD0THRESHOLD 0xffU
+#define V_T7_PCMD0THRESHOLD(x) ((x) << S_T7_PCMD0THRESHOLD)
+#define G_T7_PCMD0THRESHOLD(x) (((x) >> S_T7_PCMD0THRESHOLD) & M_T7_PCMD0THRESHOLD)
+
+#define A_ULP_RX_INT_ENABLE_INTERFACE 0x191c0
+
+#define S_ENABLE_ULPRX2SBT_RSPPERR 31
+#define V_ENABLE_ULPRX2SBT_RSPPERR(x) ((x) << S_ENABLE_ULPRX2SBT_RSPPERR)
+#define F_ENABLE_ULPRX2SBT_RSPPERR V_ENABLE_ULPRX2SBT_RSPPERR(1U)
+
+#define S_ENABLE_ULPRX2MA_RSPPERR 30
+#define V_ENABLE_ULPRX2MA_RSPPERR(x) ((x) << S_ENABLE_ULPRX2MA_RSPPERR)
+#define F_ENABLE_ULPRX2MA_RSPPERR V_ENABLE_ULPRX2MA_RSPPERR(1U)
+
+#define S_ENABME_PIO_BUS_PERR 29
+#define V_ENABME_PIO_BUS_PERR(x) ((x) << S_ENABME_PIO_BUS_PERR)
+#define F_ENABME_PIO_BUS_PERR V_ENABME_PIO_BUS_PERR(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_3 19
+#define V_ENABLE_PM2ULP_SNOOPDATA_3(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_3)
+#define F_ENABLE_PM2ULP_SNOOPDATA_3 V_ENABLE_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_2 18
+#define V_ENABLE_PM2ULP_SNOOPDATA_2(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_2)
+#define F_ENABLE_PM2ULP_SNOOPDATA_2 V_ENABLE_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_1 17
+#define V_ENABLE_PM2ULP_SNOOPDATA_1(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_1)
+#define F_ENABLE_PM2ULP_SNOOPDATA_1 V_ENABLE_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_0 16
+#define V_ENABLE_PM2ULP_SNOOPDATA_0(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_0)
+#define F_ENABLE_PM2ULP_SNOOPDATA_0 V_ENABLE_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_3 15
+#define V_ENABLE_TLS2ULP_DATA_3(x) ((x) << S_ENABLE_TLS2ULP_DATA_3)
+#define F_ENABLE_TLS2ULP_DATA_3 V_ENABLE_TLS2ULP_DATA_3(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_2 14
+#define V_ENABLE_TLS2ULP_DATA_2(x) ((x) << S_ENABLE_TLS2ULP_DATA_2)
+#define F_ENABLE_TLS2ULP_DATA_2 V_ENABLE_TLS2ULP_DATA_2(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_1 13
+#define V_ENABLE_TLS2ULP_DATA_1(x) ((x) << S_ENABLE_TLS2ULP_DATA_1)
+#define F_ENABLE_TLS2ULP_DATA_1 V_ENABLE_TLS2ULP_DATA_1(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_0 12
+#define V_ENABLE_TLS2ULP_DATA_0(x) ((x) << S_ENABLE_TLS2ULP_DATA_0)
+#define F_ENABLE_TLS2ULP_DATA_0 V_ENABLE_TLS2ULP_DATA_0(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_3 11
+#define V_ENABLE_TLS2ULP_PLENDATA_3(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_3)
+#define F_ENABLE_TLS2ULP_PLENDATA_3 V_ENABLE_TLS2ULP_PLENDATA_3(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_2 10
+#define V_ENABLE_TLS2ULP_PLENDATA_2(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_2)
+#define F_ENABLE_TLS2ULP_PLENDATA_2 V_ENABLE_TLS2ULP_PLENDATA_2(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_1 9
+#define V_ENABLE_TLS2ULP_PLENDATA_1(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_1)
+#define F_ENABLE_TLS2ULP_PLENDATA_1 V_ENABLE_TLS2ULP_PLENDATA_1(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_0 8
+#define V_ENABLE_TLS2ULP_PLENDATA_0(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_0)
+#define F_ENABLE_TLS2ULP_PLENDATA_0 V_ENABLE_TLS2ULP_PLENDATA_0(1U)
+
+#define S_ENABLE_PM2ULP_DATA_3 7
+#define V_ENABLE_PM2ULP_DATA_3(x) ((x) << S_ENABLE_PM2ULP_DATA_3)
+#define F_ENABLE_PM2ULP_DATA_3 V_ENABLE_PM2ULP_DATA_3(1U)
+
+#define S_ENABLE_PM2ULP_DATA_2 6
+#define V_ENABLE_PM2ULP_DATA_2(x) ((x) << S_ENABLE_PM2ULP_DATA_2)
+#define F_ENABLE_PM2ULP_DATA_2 V_ENABLE_PM2ULP_DATA_2(1U)
+
+#define S_ENABLE_PM2ULP_DATA_1 5
+#define V_ENABLE_PM2ULP_DATA_1(x) ((x) << S_ENABLE_PM2ULP_DATA_1)
+#define F_ENABLE_PM2ULP_DATA_1 V_ENABLE_PM2ULP_DATA_1(1U)
+
+#define S_ENABLE_PM2ULP_DATA_0 4
+#define V_ENABLE_PM2ULP_DATA_0(x) ((x) << S_ENABLE_PM2ULP_DATA_0)
+#define F_ENABLE_PM2ULP_DATA_0 V_ENABLE_PM2ULP_DATA_0(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_3 3
+#define V_ENABLE_TP2ULP_PCMD_3(x) ((x) << S_ENABLE_TP2ULP_PCMD_3)
+#define F_ENABLE_TP2ULP_PCMD_3 V_ENABLE_TP2ULP_PCMD_3(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_2 2
+#define V_ENABLE_TP2ULP_PCMD_2(x) ((x) << S_ENABLE_TP2ULP_PCMD_2)
+#define F_ENABLE_TP2ULP_PCMD_2 V_ENABLE_TP2ULP_PCMD_2(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_1 1
+#define V_ENABLE_TP2ULP_PCMD_1(x) ((x) << S_ENABLE_TP2ULP_PCMD_1)
+#define F_ENABLE_TP2ULP_PCMD_1 V_ENABLE_TP2ULP_PCMD_1(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_0 0
+#define V_ENABLE_TP2ULP_PCMD_0(x) ((x) << S_ENABLE_TP2ULP_PCMD_0)
+#define F_ENABLE_TP2ULP_PCMD_0 V_ENABLE_TP2ULP_PCMD_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_INTERFACE 0x191c4
+
+#define S_CAUSE_ULPRX2SBT_RSPPERR 31
+#define V_CAUSE_ULPRX2SBT_RSPPERR(x) ((x) << S_CAUSE_ULPRX2SBT_RSPPERR)
+#define F_CAUSE_ULPRX2SBT_RSPPERR V_CAUSE_ULPRX2SBT_RSPPERR(1U)
+
+#define S_CAUSE_ULPRX2MA_RSPPERR 30
+#define V_CAUSE_ULPRX2MA_RSPPERR(x) ((x) << S_CAUSE_ULPRX2MA_RSPPERR)
+#define F_CAUSE_ULPRX2MA_RSPPERR V_CAUSE_ULPRX2MA_RSPPERR(1U)
+
+#define S_CAUSE_PIO_BUS_PERR 29
+#define V_CAUSE_PIO_BUS_PERR(x) ((x) << S_CAUSE_PIO_BUS_PERR)
+#define F_CAUSE_PIO_BUS_PERR V_CAUSE_PIO_BUS_PERR(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_3 19
+#define V_CAUSE_PM2ULP_SNOOPDATA_3(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_3)
+#define F_CAUSE_PM2ULP_SNOOPDATA_3 V_CAUSE_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_2 18
+#define V_CAUSE_PM2ULP_SNOOPDATA_2(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_2)
+#define F_CAUSE_PM2ULP_SNOOPDATA_2 V_CAUSE_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_1 17
+#define V_CAUSE_PM2ULP_SNOOPDATA_1(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_1)
+#define F_CAUSE_PM2ULP_SNOOPDATA_1 V_CAUSE_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_0 16
+#define V_CAUSE_PM2ULP_SNOOPDATA_0(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_0)
+#define F_CAUSE_PM2ULP_SNOOPDATA_0 V_CAUSE_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_3 15
+#define V_CAUSE_TLS2ULP_DATA_3(x) ((x) << S_CAUSE_TLS2ULP_DATA_3)
+#define F_CAUSE_TLS2ULP_DATA_3 V_CAUSE_TLS2ULP_DATA_3(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_2 14
+#define V_CAUSE_TLS2ULP_DATA_2(x) ((x) << S_CAUSE_TLS2ULP_DATA_2)
+#define F_CAUSE_TLS2ULP_DATA_2 V_CAUSE_TLS2ULP_DATA_2(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_1 13
+#define V_CAUSE_TLS2ULP_DATA_1(x) ((x) << S_CAUSE_TLS2ULP_DATA_1)
+#define F_CAUSE_TLS2ULP_DATA_1 V_CAUSE_TLS2ULP_DATA_1(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_0 12
+#define V_CAUSE_TLS2ULP_DATA_0(x) ((x) << S_CAUSE_TLS2ULP_DATA_0)
+#define F_CAUSE_TLS2ULP_DATA_0 V_CAUSE_TLS2ULP_DATA_0(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_3 11
+#define V_CAUSE_TLS2ULP_PLENDATA_3(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_3)
+#define F_CAUSE_TLS2ULP_PLENDATA_3 V_CAUSE_TLS2ULP_PLENDATA_3(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_2 10
+#define V_CAUSE_TLS2ULP_PLENDATA_2(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_2)
+#define F_CAUSE_TLS2ULP_PLENDATA_2 V_CAUSE_TLS2ULP_PLENDATA_2(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_1 9
+#define V_CAUSE_TLS2ULP_PLENDATA_1(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_1)
+#define F_CAUSE_TLS2ULP_PLENDATA_1 V_CAUSE_TLS2ULP_PLENDATA_1(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_0 8
+#define V_CAUSE_TLS2ULP_PLENDATA_0(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_0)
+#define F_CAUSE_TLS2ULP_PLENDATA_0 V_CAUSE_TLS2ULP_PLENDATA_0(1U)
+
+#define S_CAUSE_PM2ULP_DATA_3 7
+#define V_CAUSE_PM2ULP_DATA_3(x) ((x) << S_CAUSE_PM2ULP_DATA_3)
+#define F_CAUSE_PM2ULP_DATA_3 V_CAUSE_PM2ULP_DATA_3(1U)
+
+#define S_CAUSE_PM2ULP_DATA_2 6
+#define V_CAUSE_PM2ULP_DATA_2(x) ((x) << S_CAUSE_PM2ULP_DATA_2)
+#define F_CAUSE_PM2ULP_DATA_2 V_CAUSE_PM2ULP_DATA_2(1U)
+
+#define S_CAUSE_PM2ULP_DATA_1 5
+#define V_CAUSE_PM2ULP_DATA_1(x) ((x) << S_CAUSE_PM2ULP_DATA_1)
+#define F_CAUSE_PM2ULP_DATA_1 V_CAUSE_PM2ULP_DATA_1(1U)
+
+#define S_CAUSE_PM2ULP_DATA_0 4
+#define V_CAUSE_PM2ULP_DATA_0(x) ((x) << S_CAUSE_PM2ULP_DATA_0)
+#define F_CAUSE_PM2ULP_DATA_0 V_CAUSE_PM2ULP_DATA_0(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_3 3
+#define V_CAUSE_TP2ULP_PCMD_3(x) ((x) << S_CAUSE_TP2ULP_PCMD_3)
+#define F_CAUSE_TP2ULP_PCMD_3 V_CAUSE_TP2ULP_PCMD_3(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_2 2
+#define V_CAUSE_TP2ULP_PCMD_2(x) ((x) << S_CAUSE_TP2ULP_PCMD_2)
+#define F_CAUSE_TP2ULP_PCMD_2 V_CAUSE_TP2ULP_PCMD_2(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_1 1
+#define V_CAUSE_TP2ULP_PCMD_1(x) ((x) << S_CAUSE_TP2ULP_PCMD_1)
+#define F_CAUSE_TP2ULP_PCMD_1 V_CAUSE_TP2ULP_PCMD_1(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_0 0
+#define V_CAUSE_TP2ULP_PCMD_0(x) ((x) << S_CAUSE_TP2ULP_PCMD_0)
+#define F_CAUSE_TP2ULP_PCMD_0 V_CAUSE_TP2ULP_PCMD_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_INTERFACE 0x191c8
+
+#define S_PERR_ULPRX2SBT_RSPPERR 31
+#define V_PERR_ULPRX2SBT_RSPPERR(x) ((x) << S_PERR_ULPRX2SBT_RSPPERR)
+#define F_PERR_ULPRX2SBT_RSPPERR V_PERR_ULPRX2SBT_RSPPERR(1U)
+
+#define S_PERR_ULPRX2MA_RSPPERR 30
+#define V_PERR_ULPRX2MA_RSPPERR(x) ((x) << S_PERR_ULPRX2MA_RSPPERR)
+#define F_PERR_ULPRX2MA_RSPPERR V_PERR_ULPRX2MA_RSPPERR(1U)
+
+#define S_PERR_PIO_BUS_PERR 29
+#define V_PERR_PIO_BUS_PERR(x) ((x) << S_PERR_PIO_BUS_PERR)
+#define F_PERR_PIO_BUS_PERR V_PERR_PIO_BUS_PERR(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_3 19
+#define V_PERR_PM2ULP_SNOOPDATA_3(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_3)
+#define F_PERR_PM2ULP_SNOOPDATA_3 V_PERR_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_2 18
+#define V_PERR_PM2ULP_SNOOPDATA_2(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_2)
+#define F_PERR_PM2ULP_SNOOPDATA_2 V_PERR_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_1 17
+#define V_PERR_PM2ULP_SNOOPDATA_1(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_1)
+#define F_PERR_PM2ULP_SNOOPDATA_1 V_PERR_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_0 16
+#define V_PERR_PM2ULP_SNOOPDATA_0(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_0)
+#define F_PERR_PM2ULP_SNOOPDATA_0 V_PERR_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_PERR_TLS2ULP_DATA_3 15
+#define V_PERR_TLS2ULP_DATA_3(x) ((x) << S_PERR_TLS2ULP_DATA_3)
+#define F_PERR_TLS2ULP_DATA_3 V_PERR_TLS2ULP_DATA_3(1U)
+
+#define S_PERR_TLS2ULP_DATA_2 14
+#define V_PERR_TLS2ULP_DATA_2(x) ((x) << S_PERR_TLS2ULP_DATA_2)
+#define F_PERR_TLS2ULP_DATA_2 V_PERR_TLS2ULP_DATA_2(1U)
+
+#define S_PERR_TLS2ULP_DATA_1 13
+#define V_PERR_TLS2ULP_DATA_1(x) ((x) << S_PERR_TLS2ULP_DATA_1)
+#define F_PERR_TLS2ULP_DATA_1 V_PERR_TLS2ULP_DATA_1(1U)
+
+#define S_PERR_TLS2ULP_DATA_0 12
+#define V_PERR_TLS2ULP_DATA_0(x) ((x) << S_PERR_TLS2ULP_DATA_0)
+#define F_PERR_TLS2ULP_DATA_0 V_PERR_TLS2ULP_DATA_0(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_3 11
+#define V_PERR_TLS2ULP_PLENDATA_3(x) ((x) << S_PERR_TLS2ULP_PLENDATA_3)
+#define F_PERR_TLS2ULP_PLENDATA_3 V_PERR_TLS2ULP_PLENDATA_3(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_2 10
+#define V_PERR_TLS2ULP_PLENDATA_2(x) ((x) << S_PERR_TLS2ULP_PLENDATA_2)
+#define F_PERR_TLS2ULP_PLENDATA_2 V_PERR_TLS2ULP_PLENDATA_2(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_1 9
+#define V_PERR_TLS2ULP_PLENDATA_1(x) ((x) << S_PERR_TLS2ULP_PLENDATA_1)
+#define F_PERR_TLS2ULP_PLENDATA_1 V_PERR_TLS2ULP_PLENDATA_1(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_0 8
+#define V_PERR_TLS2ULP_PLENDATA_0(x) ((x) << S_PERR_TLS2ULP_PLENDATA_0)
+#define F_PERR_TLS2ULP_PLENDATA_0 V_PERR_TLS2ULP_PLENDATA_0(1U)
+
+#define S_PERR_PM2ULP_DATA_3 7
+#define V_PERR_PM2ULP_DATA_3(x) ((x) << S_PERR_PM2ULP_DATA_3)
+#define F_PERR_PM2ULP_DATA_3 V_PERR_PM2ULP_DATA_3(1U)
+
+#define S_PERR_PM2ULP_DATA_2 6
+#define V_PERR_PM2ULP_DATA_2(x) ((x) << S_PERR_PM2ULP_DATA_2)
+#define F_PERR_PM2ULP_DATA_2 V_PERR_PM2ULP_DATA_2(1U)
+
+#define S_PERR_PM2ULP_DATA_1 5
+#define V_PERR_PM2ULP_DATA_1(x) ((x) << S_PERR_PM2ULP_DATA_1)
+#define F_PERR_PM2ULP_DATA_1 V_PERR_PM2ULP_DATA_1(1U)
+
+#define S_PERR_PM2ULP_DATA_0 4
+#define V_PERR_PM2ULP_DATA_0(x) ((x) << S_PERR_PM2ULP_DATA_0)
+#define F_PERR_PM2ULP_DATA_0 V_PERR_PM2ULP_DATA_0(1U)
+
+#define S_PERR_TP2ULP_PCMD_3 3
+#define V_PERR_TP2ULP_PCMD_3(x) ((x) << S_PERR_TP2ULP_PCMD_3)
+#define F_PERR_TP2ULP_PCMD_3 V_PERR_TP2ULP_PCMD_3(1U)
+
+#define S_PERR_TP2ULP_PCMD_2 2
+#define V_PERR_TP2ULP_PCMD_2(x) ((x) << S_PERR_TP2ULP_PCMD_2)
+#define F_PERR_TP2ULP_PCMD_2 V_PERR_TP2ULP_PCMD_2(1U)
+
+#define S_PERR_TP2ULP_PCMD_1 1
+#define V_PERR_TP2ULP_PCMD_1(x) ((x) << S_PERR_TP2ULP_PCMD_1)
+#define F_PERR_TP2ULP_PCMD_1 V_PERR_TP2ULP_PCMD_1(1U)
+
+#define S_PERR_TP2ULP_PCMD_0 0
+#define V_PERR_TP2ULP_PCMD_0(x) ((x) << S_PERR_TP2ULP_PCMD_0)
+#define F_PERR_TP2ULP_PCMD_0 V_PERR_TP2ULP_PCMD_0(1U)
+
#define A_ULP_RX_SE_CNT_ERR 0x191d0
#define A_ULP_RX_SE_CNT_CLR 0x191d4
@@ -37295,6 +47222,26 @@
#define V_CLRCHAN1(x) ((x) << S_CLRCHAN1)
#define G_CLRCHAN1(x) (((x) >> S_CLRCHAN1) & M_CLRCHAN1)
+#define S_CLRCHAN3 12
+#define M_CLRCHAN3 0xfU
+#define V_CLRCHAN3(x) ((x) << S_CLRCHAN3)
+#define G_CLRCHAN3(x) (((x) >> S_CLRCHAN3) & M_CLRCHAN3)
+
+#define S_CLRCHAN2 8
+#define M_CLRCHAN2 0xfU
+#define V_CLRCHAN2(x) ((x) << S_CLRCHAN2)
+#define G_CLRCHAN2(x) (((x) >> S_CLRCHAN2) & M_CLRCHAN2)
+
+#define S_T7_CLRCHAN1 4
+#define M_T7_CLRCHAN1 0xfU
+#define V_T7_CLRCHAN1(x) ((x) << S_T7_CLRCHAN1)
+#define G_T7_CLRCHAN1(x) (((x) >> S_T7_CLRCHAN1) & M_T7_CLRCHAN1)
+
+#define S_T7_CLRCHAN0 0
+#define M_T7_CLRCHAN0 0xfU
+#define V_T7_CLRCHAN0(x) ((x) << S_T7_CLRCHAN0)
+#define G_T7_CLRCHAN0(x) (((x) >> S_T7_CLRCHAN0) & M_T7_CLRCHAN0)
+
#define A_ULP_RX_SE_CNT_CH0 0x191d8
#define S_SOP_CNT_OUT0 28
@@ -37400,6 +47347,7 @@
#define G_SEL_L(x) (((x) >> S_SEL_L) & M_SEL_L)
#define A_ULP_RX_DBG_DATAH 0x191e4
+#define A_ULP_RX_DBG_DATA 0x191e4
#define A_ULP_RX_DBG_DATAL 0x191e8
#define A_ULP_RX_LA_CHNL 0x19238
@@ -37581,6 +47529,11 @@
#define V_PIO_RDMA_SEND_RQE(x) ((x) << S_PIO_RDMA_SEND_RQE)
#define F_PIO_RDMA_SEND_RQE V_PIO_RDMA_SEND_RQE(1U)
+#define S_TLS_KEYSIZECONF 26
+#define M_TLS_KEYSIZECONF 0x3U
+#define V_TLS_KEYSIZECONF(x) ((x) << S_TLS_KEYSIZECONF)
+#define G_TLS_KEYSIZECONF(x) (((x) >> S_TLS_KEYSIZECONF) & M_TLS_KEYSIZECONF)
+
#define A_ULP_RX_CH0_CGEN 0x19260
#define S_BYPASS_CGEN 7
@@ -37615,7 +47568,61 @@
#define V_RDMA_DATAPATH_CGEN(x) ((x) << S_RDMA_DATAPATH_CGEN)
#define F_RDMA_DATAPATH_CGEN V_RDMA_DATAPATH_CGEN(1U)
+#define A_ULP_RX_CH_CGEN 0x19260
+
+#define S_T7_BYPASS_CGEN 28
+#define M_T7_BYPASS_CGEN 0xfU
+#define V_T7_BYPASS_CGEN(x) ((x) << S_T7_BYPASS_CGEN)
+#define G_T7_BYPASS_CGEN(x) (((x) >> S_T7_BYPASS_CGEN) & M_T7_BYPASS_CGEN)
+
+#define S_T7_TDDP_CGEN 24
+#define M_T7_TDDP_CGEN 0xfU
+#define V_T7_TDDP_CGEN(x) ((x) << S_T7_TDDP_CGEN)
+#define G_T7_TDDP_CGEN(x) (((x) >> S_T7_TDDP_CGEN) & M_T7_TDDP_CGEN)
+
+#define S_T7_ISCSI_CGEN 20
+#define M_T7_ISCSI_CGEN 0xfU
+#define V_T7_ISCSI_CGEN(x) ((x) << S_T7_ISCSI_CGEN)
+#define G_T7_ISCSI_CGEN(x) (((x) >> S_T7_ISCSI_CGEN) & M_T7_ISCSI_CGEN)
+
+#define S_T7_RDMA_CGEN 16
+#define M_T7_RDMA_CGEN 0xfU
+#define V_T7_RDMA_CGEN(x) ((x) << S_T7_RDMA_CGEN)
+#define G_T7_RDMA_CGEN(x) (((x) >> S_T7_RDMA_CGEN) & M_T7_RDMA_CGEN)
+
+#define S_T7_CHANNEL_CGEN 12
+#define M_T7_CHANNEL_CGEN 0xfU
+#define V_T7_CHANNEL_CGEN(x) ((x) << S_T7_CHANNEL_CGEN)
+#define G_T7_CHANNEL_CGEN(x) (((x) >> S_T7_CHANNEL_CGEN) & M_T7_CHANNEL_CGEN)
+
+#define S_T7_ALL_DATAPATH_CGEN 8
+#define M_T7_ALL_DATAPATH_CGEN 0xfU
+#define V_T7_ALL_DATAPATH_CGEN(x) ((x) << S_T7_ALL_DATAPATH_CGEN)
+#define G_T7_ALL_DATAPATH_CGEN(x) (((x) >> S_T7_ALL_DATAPATH_CGEN) & M_T7_ALL_DATAPATH_CGEN)
+
+#define S_T7_T10DIFF_DATAPATH_CGEN 4
+#define M_T7_T10DIFF_DATAPATH_CGEN 0xfU
+#define V_T7_T10DIFF_DATAPATH_CGEN(x) ((x) << S_T7_T10DIFF_DATAPATH_CGEN)
+#define G_T7_T10DIFF_DATAPATH_CGEN(x) (((x) >> S_T7_T10DIFF_DATAPATH_CGEN) & M_T7_T10DIFF_DATAPATH_CGEN)
+
+#define S_T7_RDMA_DATAPATH_CGEN 0
+#define M_T7_RDMA_DATAPATH_CGEN 0xfU
+#define V_T7_RDMA_DATAPATH_CGEN(x) ((x) << S_T7_RDMA_DATAPATH_CGEN)
+#define G_T7_RDMA_DATAPATH_CGEN(x) (((x) >> S_T7_RDMA_DATAPATH_CGEN) & M_T7_RDMA_DATAPATH_CGEN)
+
#define A_ULP_RX_CH1_CGEN 0x19264
+#define A_ULP_RX_CH_CGEN_1 0x19264
+
+#define S_NVME_TCP_CGEN 4
+#define M_NVME_TCP_CGEN 0xfU
+#define V_NVME_TCP_CGEN(x) ((x) << S_NVME_TCP_CGEN)
+#define G_NVME_TCP_CGEN(x) (((x) >> S_NVME_TCP_CGEN) & M_NVME_TCP_CGEN)
+
+#define S_ROCE_CGEN 0
+#define M_ROCE_CGEN 0xfU
+#define V_ROCE_CGEN(x) ((x) << S_ROCE_CGEN)
+#define G_ROCE_CGEN(x) (((x) >> S_ROCE_CGEN) & M_ROCE_CGEN)
+
#define A_ULP_RX_RFE_DISABLE 0x19268
#define S_RQE_LIM_CHECK_RFE_DISABLE 0
@@ -37742,6 +47749,30 @@
#define V_SKIP_MA_REQ_EN0(x) ((x) << S_SKIP_MA_REQ_EN0)
#define F_SKIP_MA_REQ_EN0 V_SKIP_MA_REQ_EN0(1U)
+#define S_CLEAR_CTX_ERR_CNT3 7
+#define V_CLEAR_CTX_ERR_CNT3(x) ((x) << S_CLEAR_CTX_ERR_CNT3)
+#define F_CLEAR_CTX_ERR_CNT3 V_CLEAR_CTX_ERR_CNT3(1U)
+
+#define S_CLEAR_CTX_ERR_CNT2 6
+#define V_CLEAR_CTX_ERR_CNT2(x) ((x) << S_CLEAR_CTX_ERR_CNT2)
+#define F_CLEAR_CTX_ERR_CNT2 V_CLEAR_CTX_ERR_CNT2(1U)
+
+#define S_T7_CLEAR_CTX_ERR_CNT1 5
+#define V_T7_CLEAR_CTX_ERR_CNT1(x) ((x) << S_T7_CLEAR_CTX_ERR_CNT1)
+#define F_T7_CLEAR_CTX_ERR_CNT1 V_T7_CLEAR_CTX_ERR_CNT1(1U)
+
+#define S_T7_CLEAR_CTX_ERR_CNT0 4
+#define V_T7_CLEAR_CTX_ERR_CNT0(x) ((x) << S_T7_CLEAR_CTX_ERR_CNT0)
+#define F_T7_CLEAR_CTX_ERR_CNT0 V_T7_CLEAR_CTX_ERR_CNT0(1U)
+
+#define S_SKIP_MA_REQ_EN3 3
+#define V_SKIP_MA_REQ_EN3(x) ((x) << S_SKIP_MA_REQ_EN3)
+#define F_SKIP_MA_REQ_EN3 V_SKIP_MA_REQ_EN3(1U)
+
+#define S_SKIP_MA_REQ_EN2 2
+#define V_SKIP_MA_REQ_EN2(x) ((x) << S_SKIP_MA_REQ_EN2)
+#define F_SKIP_MA_REQ_EN2 V_SKIP_MA_REQ_EN2(1U)
+
#define A_ULP_RX_CHNL0_CTX_ERROR_COUNT_PER_TID 0x19288
#define A_ULP_RX_CHNL1_CTX_ERROR_COUNT_PER_TID 0x1928c
#define A_ULP_RX_MSN_CHECK_ENABLE 0x19290
@@ -37758,6 +47789,92 @@
#define V_SEND_MSN_CHECK_ENABLE(x) ((x) << S_SEND_MSN_CHECK_ENABLE)
#define F_SEND_MSN_CHECK_ENABLE V_SEND_MSN_CHECK_ENABLE(1U)
+#define A_ULP_RX_SE_CNT_CH2 0x19294
+
+#define S_SOP_CNT_OUT2 28
+#define M_SOP_CNT_OUT2 0xfU
+#define V_SOP_CNT_OUT2(x) ((x) << S_SOP_CNT_OUT2)
+#define G_SOP_CNT_OUT2(x) (((x) >> S_SOP_CNT_OUT2) & M_SOP_CNT_OUT2)
+
+#define S_EOP_CNT_OUT2 24
+#define M_EOP_CNT_OUT2 0xfU
+#define V_EOP_CNT_OUT2(x) ((x) << S_EOP_CNT_OUT2)
+#define G_EOP_CNT_OUT2(x) (((x) >> S_EOP_CNT_OUT2) & M_EOP_CNT_OUT2)
+
+#define S_SOP_CNT_AL2 20
+#define M_SOP_CNT_AL2 0xfU
+#define V_SOP_CNT_AL2(x) ((x) << S_SOP_CNT_AL2)
+#define G_SOP_CNT_AL2(x) (((x) >> S_SOP_CNT_AL2) & M_SOP_CNT_AL2)
+
+#define S_EOP_CNT_AL2 16
+#define M_EOP_CNT_AL2 0xfU
+#define V_EOP_CNT_AL2(x) ((x) << S_EOP_CNT_AL2)
+#define G_EOP_CNT_AL2(x) (((x) >> S_EOP_CNT_AL2) & M_EOP_CNT_AL2)
+
+#define S_SOP_CNT_MR2 12
+#define M_SOP_CNT_MR2 0xfU
+#define V_SOP_CNT_MR2(x) ((x) << S_SOP_CNT_MR2)
+#define G_SOP_CNT_MR2(x) (((x) >> S_SOP_CNT_MR2) & M_SOP_CNT_MR2)
+
+#define S_EOP_CNT_MR2 8
+#define M_EOP_CNT_MR2 0xfU
+#define V_EOP_CNT_MR2(x) ((x) << S_EOP_CNT_MR2)
+#define G_EOP_CNT_MR2(x) (((x) >> S_EOP_CNT_MR2) & M_EOP_CNT_MR2)
+
+#define S_SOP_CNT_IN2 4
+#define M_SOP_CNT_IN2 0xfU
+#define V_SOP_CNT_IN2(x) ((x) << S_SOP_CNT_IN2)
+#define G_SOP_CNT_IN2(x) (((x) >> S_SOP_CNT_IN2) & M_SOP_CNT_IN2)
+
+#define S_EOP_CNT_IN2 0
+#define M_EOP_CNT_IN2 0xfU
+#define V_EOP_CNT_IN2(x) ((x) << S_EOP_CNT_IN2)
+#define G_EOP_CNT_IN2(x) (((x) >> S_EOP_CNT_IN2) & M_EOP_CNT_IN2)
+
+#define A_ULP_RX_SE_CNT_CH3 0x19298
+
+#define S_SOP_CNT_OUT3 28
+#define M_SOP_CNT_OUT3 0xfU
+#define V_SOP_CNT_OUT3(x) ((x) << S_SOP_CNT_OUT3)
+#define G_SOP_CNT_OUT3(x) (((x) >> S_SOP_CNT_OUT3) & M_SOP_CNT_OUT3)
+
+#define S_EOP_CNT_OUT3 24
+#define M_EOP_CNT_OUT3 0xfU
+#define V_EOP_CNT_OUT3(x) ((x) << S_EOP_CNT_OUT3)
+#define G_EOP_CNT_OUT3(x) (((x) >> S_EOP_CNT_OUT3) & M_EOP_CNT_OUT3)
+
+#define S_SOP_CNT_AL3 20
+#define M_SOP_CNT_AL3 0xfU
+#define V_SOP_CNT_AL3(x) ((x) << S_SOP_CNT_AL3)
+#define G_SOP_CNT_AL3(x) (((x) >> S_SOP_CNT_AL3) & M_SOP_CNT_AL3)
+
+#define S_EOP_CNT_AL3 16
+#define M_EOP_CNT_AL3 0xfU
+#define V_EOP_CNT_AL3(x) ((x) << S_EOP_CNT_AL3)
+#define G_EOP_CNT_AL3(x) (((x) >> S_EOP_CNT_AL3) & M_EOP_CNT_AL3)
+
+#define S_SOP_CNT_MR3 12
+#define M_SOP_CNT_MR3 0xfU
+#define V_SOP_CNT_MR3(x) ((x) << S_SOP_CNT_MR3)
+#define G_SOP_CNT_MR3(x) (((x) >> S_SOP_CNT_MR3) & M_SOP_CNT_MR3)
+
+#define S_EOP_CNT_MR3 8
+#define M_EOP_CNT_MR3 0xfU
+#define V_EOP_CNT_MR3(x) ((x) << S_EOP_CNT_MR3)
+#define G_EOP_CNT_MR3(x) (((x) >> S_EOP_CNT_MR3) & M_EOP_CNT_MR3)
+
+#define S_SOP_CNT_IN3 4
+#define M_SOP_CNT_IN3 0xfU
+#define V_SOP_CNT_IN3(x) ((x) << S_SOP_CNT_IN3)
+#define G_SOP_CNT_IN3(x) (((x) >> S_SOP_CNT_IN3) & M_SOP_CNT_IN3)
+
+#define S_EOP_CNT_IN3 0
+#define M_EOP_CNT_IN3 0xfU
+#define V_EOP_CNT_IN3(x) ((x) << S_EOP_CNT_IN3)
+#define G_EOP_CNT_IN3(x) (((x) >> S_EOP_CNT_IN3) & M_EOP_CNT_IN3)
+
+#define A_ULP_RX_CHNL2_CTX_ERROR_COUNT_PER_TID 0x1929c
+#define A_ULP_RX_CHNL3_CTX_ERROR_COUNT_PER_TID 0x192a0
#define A_ULP_RX_TLS_PP_LLIMIT 0x192a4
#define S_TLSPPLLIMIT 6
@@ -37787,6 +47904,933 @@
#define G_TLSKEYULIMIT(x) (((x) >> S_TLSKEYULIMIT) & M_TLSKEYULIMIT)
#define A_ULP_RX_TLS_CTL 0x192bc
+#define A_ULP_RX_RRQ_LLIMIT 0x192c0
+#define A_ULP_RX_RRQ_ULIMIT 0x192c4
+#define A_ULP_RX_NVME_TCP_STAG_LLIMIT 0x192c8
+#define A_ULP_RX_NVME_TCP_STAG_ULIMIT 0x192cc
+#define A_ULP_RX_NVME_TCP_RQ_LLIMIT 0x192d0
+#define A_ULP_RX_NVME_TCP_RQ_ULIMIT 0x192d4
+#define A_ULP_RX_NVME_TCP_PBL_LLIMIT 0x192d8
+#define A_ULP_RX_NVME_TCP_PBL_ULIMIT 0x192dc
+#define A_ULP_RX_NVME_TCP_MAX_LENGTH 0x192e0
+
+#define S_NVME_TCP_MAX_PLEN01 24
+#define M_NVME_TCP_MAX_PLEN01 0xffU
+#define V_NVME_TCP_MAX_PLEN01(x) ((x) << S_NVME_TCP_MAX_PLEN01)
+#define G_NVME_TCP_MAX_PLEN01(x) (((x) >> S_NVME_TCP_MAX_PLEN01) & M_NVME_TCP_MAX_PLEN01)
+
+#define S_NVME_TCP_MAX_PLEN23 16
+#define M_NVME_TCP_MAX_PLEN23 0xffU
+#define V_NVME_TCP_MAX_PLEN23(x) ((x) << S_NVME_TCP_MAX_PLEN23)
+#define G_NVME_TCP_MAX_PLEN23(x) (((x) >> S_NVME_TCP_MAX_PLEN23) & M_NVME_TCP_MAX_PLEN23)
+
+#define S_NVME_TCP_MAX_CMD_PDU_LENGTH 0
+#define M_NVME_TCP_MAX_CMD_PDU_LENGTH 0xffffU
+#define V_NVME_TCP_MAX_CMD_PDU_LENGTH(x) ((x) << S_NVME_TCP_MAX_CMD_PDU_LENGTH)
+#define G_NVME_TCP_MAX_CMD_PDU_LENGTH(x) (((x) >> S_NVME_TCP_MAX_CMD_PDU_LENGTH) & M_NVME_TCP_MAX_CMD_PDU_LENGTH)
+
+#define A_ULP_RX_NVME_TCP_IQE_SIZE 0x192e4
+#define A_ULP_RX_NVME_TCP_NEW_PDU_TYPES 0x192e8
+#define A_ULP_RX_IWARP_PMOF_OPCODES_1 0x192ec
+#define A_ULP_RX_IWARP_PMOF_OPCODES_2 0x192f0
+#define A_ULP_RX_INT_ENABLE_PCMD 0x19300
+
+#define S_ENABLE_PCMD_SFIFO_3 30
+#define V_ENABLE_PCMD_SFIFO_3(x) ((x) << S_ENABLE_PCMD_SFIFO_3)
+#define F_ENABLE_PCMD_SFIFO_3 V_ENABLE_PCMD_SFIFO_3(1U)
+
+#define S_ENABLE_PCMD_FIFO_3 29
+#define V_ENABLE_PCMD_FIFO_3(x) ((x) << S_ENABLE_PCMD_FIFO_3)
+#define F_ENABLE_PCMD_FIFO_3 V_ENABLE_PCMD_FIFO_3(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_3 28
+#define V_ENABLE_PCMD_DDP_HINT_3(x) ((x) << S_ENABLE_PCMD_DDP_HINT_3)
+#define F_ENABLE_PCMD_DDP_HINT_3 V_ENABLE_PCMD_DDP_HINT_3(1U)
+
+#define S_ENABLE_PCMD_TPT_3 27
+#define V_ENABLE_PCMD_TPT_3(x) ((x) << S_ENABLE_PCMD_TPT_3)
+#define F_ENABLE_PCMD_TPT_3 V_ENABLE_PCMD_TPT_3(1U)
+
+#define S_ENABLE_PCMD_DDP_3 26
+#define V_ENABLE_PCMD_DDP_3(x) ((x) << S_ENABLE_PCMD_DDP_3)
+#define F_ENABLE_PCMD_DDP_3 V_ENABLE_PCMD_DDP_3(1U)
+
+#define S_ENABLE_PCMD_MPAR_3 25
+#define V_ENABLE_PCMD_MPAR_3(x) ((x) << S_ENABLE_PCMD_MPAR_3)
+#define F_ENABLE_PCMD_MPAR_3 V_ENABLE_PCMD_MPAR_3(1U)
+
+#define S_ENABLE_PCMD_MPAC_3 24
+#define V_ENABLE_PCMD_MPAC_3(x) ((x) << S_ENABLE_PCMD_MPAC_3)
+#define F_ENABLE_PCMD_MPAC_3 V_ENABLE_PCMD_MPAC_3(1U)
+
+#define S_ENABLE_PCMD_SFIFO_2 22
+#define V_ENABLE_PCMD_SFIFO_2(x) ((x) << S_ENABLE_PCMD_SFIFO_2)
+#define F_ENABLE_PCMD_SFIFO_2 V_ENABLE_PCMD_SFIFO_2(1U)
+
+#define S_ENABLE_PCMD_FIFO_2 21
+#define V_ENABLE_PCMD_FIFO_2(x) ((x) << S_ENABLE_PCMD_FIFO_2)
+#define F_ENABLE_PCMD_FIFO_2 V_ENABLE_PCMD_FIFO_2(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_2 20
+#define V_ENABLE_PCMD_DDP_HINT_2(x) ((x) << S_ENABLE_PCMD_DDP_HINT_2)
+#define F_ENABLE_PCMD_DDP_HINT_2 V_ENABLE_PCMD_DDP_HINT_2(1U)
+
+#define S_ENABLE_PCMD_TPT_2 19
+#define V_ENABLE_PCMD_TPT_2(x) ((x) << S_ENABLE_PCMD_TPT_2)
+#define F_ENABLE_PCMD_TPT_2 V_ENABLE_PCMD_TPT_2(1U)
+
+#define S_ENABLE_PCMD_DDP_2 18
+#define V_ENABLE_PCMD_DDP_2(x) ((x) << S_ENABLE_PCMD_DDP_2)
+#define F_ENABLE_PCMD_DDP_2 V_ENABLE_PCMD_DDP_2(1U)
+
+#define S_ENABLE_PCMD_MPAR_2 17
+#define V_ENABLE_PCMD_MPAR_2(x) ((x) << S_ENABLE_PCMD_MPAR_2)
+#define F_ENABLE_PCMD_MPAR_2 V_ENABLE_PCMD_MPAR_2(1U)
+
+#define S_ENABLE_PCMD_MPAC_2 16
+#define V_ENABLE_PCMD_MPAC_2(x) ((x) << S_ENABLE_PCMD_MPAC_2)
+#define F_ENABLE_PCMD_MPAC_2 V_ENABLE_PCMD_MPAC_2(1U)
+
+#define S_ENABLE_PCMD_SFIFO_1 14
+#define V_ENABLE_PCMD_SFIFO_1(x) ((x) << S_ENABLE_PCMD_SFIFO_1)
+#define F_ENABLE_PCMD_SFIFO_1 V_ENABLE_PCMD_SFIFO_1(1U)
+
+#define S_ENABLE_PCMD_FIFO_1 13
+#define V_ENABLE_PCMD_FIFO_1(x) ((x) << S_ENABLE_PCMD_FIFO_1)
+#define F_ENABLE_PCMD_FIFO_1 V_ENABLE_PCMD_FIFO_1(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_1 12
+#define V_ENABLE_PCMD_DDP_HINT_1(x) ((x) << S_ENABLE_PCMD_DDP_HINT_1)
+#define F_ENABLE_PCMD_DDP_HINT_1 V_ENABLE_PCMD_DDP_HINT_1(1U)
+
+#define S_ENABLE_PCMD_TPT_1 11
+#define V_ENABLE_PCMD_TPT_1(x) ((x) << S_ENABLE_PCMD_TPT_1)
+#define F_ENABLE_PCMD_TPT_1 V_ENABLE_PCMD_TPT_1(1U)
+
+#define S_ENABLE_PCMD_DDP_1 10
+#define V_ENABLE_PCMD_DDP_1(x) ((x) << S_ENABLE_PCMD_DDP_1)
+#define F_ENABLE_PCMD_DDP_1 V_ENABLE_PCMD_DDP_1(1U)
+
+#define S_ENABLE_PCMD_MPAR_1 9
+#define V_ENABLE_PCMD_MPAR_1(x) ((x) << S_ENABLE_PCMD_MPAR_1)
+#define F_ENABLE_PCMD_MPAR_1 V_ENABLE_PCMD_MPAR_1(1U)
+
+#define S_ENABLE_PCMD_MPAC_1 8
+#define V_ENABLE_PCMD_MPAC_1(x) ((x) << S_ENABLE_PCMD_MPAC_1)
+#define F_ENABLE_PCMD_MPAC_1 V_ENABLE_PCMD_MPAC_1(1U)
+
+#define S_ENABLE_PCMD_SFIFO_0 6
+#define V_ENABLE_PCMD_SFIFO_0(x) ((x) << S_ENABLE_PCMD_SFIFO_0)
+#define F_ENABLE_PCMD_SFIFO_0 V_ENABLE_PCMD_SFIFO_0(1U)
+
+#define S_ENABLE_PCMD_FIFO_0 5
+#define V_ENABLE_PCMD_FIFO_0(x) ((x) << S_ENABLE_PCMD_FIFO_0)
+#define F_ENABLE_PCMD_FIFO_0 V_ENABLE_PCMD_FIFO_0(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_0 4
+#define V_ENABLE_PCMD_DDP_HINT_0(x) ((x) << S_ENABLE_PCMD_DDP_HINT_0)
+#define F_ENABLE_PCMD_DDP_HINT_0 V_ENABLE_PCMD_DDP_HINT_0(1U)
+
+#define S_ENABLE_PCMD_TPT_0 3
+#define V_ENABLE_PCMD_TPT_0(x) ((x) << S_ENABLE_PCMD_TPT_0)
+#define F_ENABLE_PCMD_TPT_0 V_ENABLE_PCMD_TPT_0(1U)
+
+#define S_ENABLE_PCMD_DDP_0 2
+#define V_ENABLE_PCMD_DDP_0(x) ((x) << S_ENABLE_PCMD_DDP_0)
+#define F_ENABLE_PCMD_DDP_0 V_ENABLE_PCMD_DDP_0(1U)
+
+#define S_ENABLE_PCMD_MPAR_0 1
+#define V_ENABLE_PCMD_MPAR_0(x) ((x) << S_ENABLE_PCMD_MPAR_0)
+#define F_ENABLE_PCMD_MPAR_0 V_ENABLE_PCMD_MPAR_0(1U)
+
+#define S_ENABLE_PCMD_MPAC_0 0
+#define V_ENABLE_PCMD_MPAC_0(x) ((x) << S_ENABLE_PCMD_MPAC_0)
+#define F_ENABLE_PCMD_MPAC_0 V_ENABLE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_PCMD 0x19304
+
+#define S_CAUSE_PCMD_SFIFO_3 30
+#define V_CAUSE_PCMD_SFIFO_3(x) ((x) << S_CAUSE_PCMD_SFIFO_3)
+#define F_CAUSE_PCMD_SFIFO_3 V_CAUSE_PCMD_SFIFO_3(1U)
+
+#define S_CAUSE_PCMD_FIFO_3 29
+#define V_CAUSE_PCMD_FIFO_3(x) ((x) << S_CAUSE_PCMD_FIFO_3)
+#define F_CAUSE_PCMD_FIFO_3 V_CAUSE_PCMD_FIFO_3(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_3 28
+#define V_CAUSE_PCMD_DDP_HINT_3(x) ((x) << S_CAUSE_PCMD_DDP_HINT_3)
+#define F_CAUSE_PCMD_DDP_HINT_3 V_CAUSE_PCMD_DDP_HINT_3(1U)
+
+#define S_CAUSE_PCMD_TPT_3 27
+#define V_CAUSE_PCMD_TPT_3(x) ((x) << S_CAUSE_PCMD_TPT_3)
+#define F_CAUSE_PCMD_TPT_3 V_CAUSE_PCMD_TPT_3(1U)
+
+#define S_CAUSE_PCMD_DDP_3 26
+#define V_CAUSE_PCMD_DDP_3(x) ((x) << S_CAUSE_PCMD_DDP_3)
+#define F_CAUSE_PCMD_DDP_3 V_CAUSE_PCMD_DDP_3(1U)
+
+#define S_CAUSE_PCMD_MPAR_3 25
+#define V_CAUSE_PCMD_MPAR_3(x) ((x) << S_CAUSE_PCMD_MPAR_3)
+#define F_CAUSE_PCMD_MPAR_3 V_CAUSE_PCMD_MPAR_3(1U)
+
+#define S_CAUSE_PCMD_MPAC_3 24
+#define V_CAUSE_PCMD_MPAC_3(x) ((x) << S_CAUSE_PCMD_MPAC_3)
+#define F_CAUSE_PCMD_MPAC_3 V_CAUSE_PCMD_MPAC_3(1U)
+
+#define S_CAUSE_PCMD_SFIFO_2 22
+#define V_CAUSE_PCMD_SFIFO_2(x) ((x) << S_CAUSE_PCMD_SFIFO_2)
+#define F_CAUSE_PCMD_SFIFO_2 V_CAUSE_PCMD_SFIFO_2(1U)
+
+#define S_CAUSE_PCMD_FIFO_2 21
+#define V_CAUSE_PCMD_FIFO_2(x) ((x) << S_CAUSE_PCMD_FIFO_2)
+#define F_CAUSE_PCMD_FIFO_2 V_CAUSE_PCMD_FIFO_2(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_2 20
+#define V_CAUSE_PCMD_DDP_HINT_2(x) ((x) << S_CAUSE_PCMD_DDP_HINT_2)
+#define F_CAUSE_PCMD_DDP_HINT_2 V_CAUSE_PCMD_DDP_HINT_2(1U)
+
+#define S_CAUSE_PCMD_TPT_2 19
+#define V_CAUSE_PCMD_TPT_2(x) ((x) << S_CAUSE_PCMD_TPT_2)
+#define F_CAUSE_PCMD_TPT_2 V_CAUSE_PCMD_TPT_2(1U)
+
+#define S_CAUSE_PCMD_DDP_2 18
+#define V_CAUSE_PCMD_DDP_2(x) ((x) << S_CAUSE_PCMD_DDP_2)
+#define F_CAUSE_PCMD_DDP_2 V_CAUSE_PCMD_DDP_2(1U)
+
+#define S_CAUSE_PCMD_MPAR_2 17
+#define V_CAUSE_PCMD_MPAR_2(x) ((x) << S_CAUSE_PCMD_MPAR_2)
+#define F_CAUSE_PCMD_MPAR_2 V_CAUSE_PCMD_MPAR_2(1U)
+
+#define S_CAUSE_PCMD_MPAC_2 16
+#define V_CAUSE_PCMD_MPAC_2(x) ((x) << S_CAUSE_PCMD_MPAC_2)
+#define F_CAUSE_PCMD_MPAC_2 V_CAUSE_PCMD_MPAC_2(1U)
+
+#define S_CAUSE_PCMD_SFIFO_1 14
+#define V_CAUSE_PCMD_SFIFO_1(x) ((x) << S_CAUSE_PCMD_SFIFO_1)
+#define F_CAUSE_PCMD_SFIFO_1 V_CAUSE_PCMD_SFIFO_1(1U)
+
+#define S_CAUSE_PCMD_FIFO_1 13
+#define V_CAUSE_PCMD_FIFO_1(x) ((x) << S_CAUSE_PCMD_FIFO_1)
+#define F_CAUSE_PCMD_FIFO_1 V_CAUSE_PCMD_FIFO_1(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_1 12
+#define V_CAUSE_PCMD_DDP_HINT_1(x) ((x) << S_CAUSE_PCMD_DDP_HINT_1)
+#define F_CAUSE_PCMD_DDP_HINT_1 V_CAUSE_PCMD_DDP_HINT_1(1U)
+
+#define S_CAUSE_PCMD_TPT_1 11
+#define V_CAUSE_PCMD_TPT_1(x) ((x) << S_CAUSE_PCMD_TPT_1)
+#define F_CAUSE_PCMD_TPT_1 V_CAUSE_PCMD_TPT_1(1U)
+
+#define S_CAUSE_PCMD_DDP_1 10
+#define V_CAUSE_PCMD_DDP_1(x) ((x) << S_CAUSE_PCMD_DDP_1)
+#define F_CAUSE_PCMD_DDP_1 V_CAUSE_PCMD_DDP_1(1U)
+
+#define S_CAUSE_PCMD_MPAR_1 9
+#define V_CAUSE_PCMD_MPAR_1(x) ((x) << S_CAUSE_PCMD_MPAR_1)
+#define F_CAUSE_PCMD_MPAR_1 V_CAUSE_PCMD_MPAR_1(1U)
+
+#define S_CAUSE_PCMD_MPAC_1 8
+#define V_CAUSE_PCMD_MPAC_1(x) ((x) << S_CAUSE_PCMD_MPAC_1)
+#define F_CAUSE_PCMD_MPAC_1 V_CAUSE_PCMD_MPAC_1(1U)
+
+#define S_CAUSE_PCMD_SFIFO_0 6
+#define V_CAUSE_PCMD_SFIFO_0(x) ((x) << S_CAUSE_PCMD_SFIFO_0)
+#define F_CAUSE_PCMD_SFIFO_0 V_CAUSE_PCMD_SFIFO_0(1U)
+
+#define S_CAUSE_PCMD_FIFO_0 5
+#define V_CAUSE_PCMD_FIFO_0(x) ((x) << S_CAUSE_PCMD_FIFO_0)
+#define F_CAUSE_PCMD_FIFO_0 V_CAUSE_PCMD_FIFO_0(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_0 4
+#define V_CAUSE_PCMD_DDP_HINT_0(x) ((x) << S_CAUSE_PCMD_DDP_HINT_0)
+#define F_CAUSE_PCMD_DDP_HINT_0 V_CAUSE_PCMD_DDP_HINT_0(1U)
+
+#define S_CAUSE_PCMD_TPT_0 3
+#define V_CAUSE_PCMD_TPT_0(x) ((x) << S_CAUSE_PCMD_TPT_0)
+#define F_CAUSE_PCMD_TPT_0 V_CAUSE_PCMD_TPT_0(1U)
+
+#define S_CAUSE_PCMD_DDP_0 2
+#define V_CAUSE_PCMD_DDP_0(x) ((x) << S_CAUSE_PCMD_DDP_0)
+#define F_CAUSE_PCMD_DDP_0 V_CAUSE_PCMD_DDP_0(1U)
+
+#define S_CAUSE_PCMD_MPAR_0 1
+#define V_CAUSE_PCMD_MPAR_0(x) ((x) << S_CAUSE_PCMD_MPAR_0)
+#define F_CAUSE_PCMD_MPAR_0 V_CAUSE_PCMD_MPAR_0(1U)
+
+#define S_CAUSE_PCMD_MPAC_0 0
+#define V_CAUSE_PCMD_MPAC_0(x) ((x) << S_CAUSE_PCMD_MPAC_0)
+#define F_CAUSE_PCMD_MPAC_0 V_CAUSE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_PCMD 0x19308
+
+#define S_PERR_ENABLE_PCMD_SFIFO_3 30
+#define V_PERR_ENABLE_PCMD_SFIFO_3(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_3)
+#define F_PERR_ENABLE_PCMD_SFIFO_3 V_PERR_ENABLE_PCMD_SFIFO_3(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_3 29
+#define V_PERR_ENABLE_PCMD_FIFO_3(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_3)
+#define F_PERR_ENABLE_PCMD_FIFO_3 V_PERR_ENABLE_PCMD_FIFO_3(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_3 28
+#define V_PERR_ENABLE_PCMD_DDP_HINT_3(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_3)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_3 V_PERR_ENABLE_PCMD_DDP_HINT_3(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_3 27
+#define V_PERR_ENABLE_PCMD_TPT_3(x) ((x) << S_PERR_ENABLE_PCMD_TPT_3)
+#define F_PERR_ENABLE_PCMD_TPT_3 V_PERR_ENABLE_PCMD_TPT_3(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_3 26
+#define V_PERR_ENABLE_PCMD_DDP_3(x) ((x) << S_PERR_ENABLE_PCMD_DDP_3)
+#define F_PERR_ENABLE_PCMD_DDP_3 V_PERR_ENABLE_PCMD_DDP_3(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_3 25
+#define V_PERR_ENABLE_PCMD_MPAR_3(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_3)
+#define F_PERR_ENABLE_PCMD_MPAR_3 V_PERR_ENABLE_PCMD_MPAR_3(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_3 24
+#define V_PERR_ENABLE_PCMD_MPAC_3(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_3)
+#define F_PERR_ENABLE_PCMD_MPAC_3 V_PERR_ENABLE_PCMD_MPAC_3(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_2 22
+#define V_PERR_ENABLE_PCMD_SFIFO_2(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_2)
+#define F_PERR_ENABLE_PCMD_SFIFO_2 V_PERR_ENABLE_PCMD_SFIFO_2(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_2 21
+#define V_PERR_ENABLE_PCMD_FIFO_2(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_2)
+#define F_PERR_ENABLE_PCMD_FIFO_2 V_PERR_ENABLE_PCMD_FIFO_2(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_2 20
+#define V_PERR_ENABLE_PCMD_DDP_HINT_2(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_2)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_2 V_PERR_ENABLE_PCMD_DDP_HINT_2(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_2 19
+#define V_PERR_ENABLE_PCMD_TPT_2(x) ((x) << S_PERR_ENABLE_PCMD_TPT_2)
+#define F_PERR_ENABLE_PCMD_TPT_2 V_PERR_ENABLE_PCMD_TPT_2(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_2 18
+#define V_PERR_ENABLE_PCMD_DDP_2(x) ((x) << S_PERR_ENABLE_PCMD_DDP_2)
+#define F_PERR_ENABLE_PCMD_DDP_2 V_PERR_ENABLE_PCMD_DDP_2(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_2 17
+#define V_PERR_ENABLE_PCMD_MPAR_2(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_2)
+#define F_PERR_ENABLE_PCMD_MPAR_2 V_PERR_ENABLE_PCMD_MPAR_2(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_2 16
+#define V_PERR_ENABLE_PCMD_MPAC_2(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_2)
+#define F_PERR_ENABLE_PCMD_MPAC_2 V_PERR_ENABLE_PCMD_MPAC_2(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_1 14
+#define V_PERR_ENABLE_PCMD_SFIFO_1(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_1)
+#define F_PERR_ENABLE_PCMD_SFIFO_1 V_PERR_ENABLE_PCMD_SFIFO_1(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_1 13
+#define V_PERR_ENABLE_PCMD_FIFO_1(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_1)
+#define F_PERR_ENABLE_PCMD_FIFO_1 V_PERR_ENABLE_PCMD_FIFO_1(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_1 12
+#define V_PERR_ENABLE_PCMD_DDP_HINT_1(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_1)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_1 V_PERR_ENABLE_PCMD_DDP_HINT_1(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_1 11
+#define V_PERR_ENABLE_PCMD_TPT_1(x) ((x) << S_PERR_ENABLE_PCMD_TPT_1)
+#define F_PERR_ENABLE_PCMD_TPT_1 V_PERR_ENABLE_PCMD_TPT_1(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_1 10
+#define V_PERR_ENABLE_PCMD_DDP_1(x) ((x) << S_PERR_ENABLE_PCMD_DDP_1)
+#define F_PERR_ENABLE_PCMD_DDP_1 V_PERR_ENABLE_PCMD_DDP_1(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_1 9
+#define V_PERR_ENABLE_PCMD_MPAR_1(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_1)
+#define F_PERR_ENABLE_PCMD_MPAR_1 V_PERR_ENABLE_PCMD_MPAR_1(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_1 8
+#define V_PERR_ENABLE_PCMD_MPAC_1(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_1)
+#define F_PERR_ENABLE_PCMD_MPAC_1 V_PERR_ENABLE_PCMD_MPAC_1(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_0 6
+#define V_PERR_ENABLE_PCMD_SFIFO_0(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_0)
+#define F_PERR_ENABLE_PCMD_SFIFO_0 V_PERR_ENABLE_PCMD_SFIFO_0(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_0 5
+#define V_PERR_ENABLE_PCMD_FIFO_0(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_0)
+#define F_PERR_ENABLE_PCMD_FIFO_0 V_PERR_ENABLE_PCMD_FIFO_0(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_0 4
+#define V_PERR_ENABLE_PCMD_DDP_HINT_0(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_0)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_0 V_PERR_ENABLE_PCMD_DDP_HINT_0(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_0 3
+#define V_PERR_ENABLE_PCMD_TPT_0(x) ((x) << S_PERR_ENABLE_PCMD_TPT_0)
+#define F_PERR_ENABLE_PCMD_TPT_0 V_PERR_ENABLE_PCMD_TPT_0(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_0 2
+#define V_PERR_ENABLE_PCMD_DDP_0(x) ((x) << S_PERR_ENABLE_PCMD_DDP_0)
+#define F_PERR_ENABLE_PCMD_DDP_0 V_PERR_ENABLE_PCMD_DDP_0(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_0 1
+#define V_PERR_ENABLE_PCMD_MPAR_0(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_0)
+#define F_PERR_ENABLE_PCMD_MPAR_0 V_PERR_ENABLE_PCMD_MPAR_0(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_0 0
+#define V_PERR_ENABLE_PCMD_MPAC_0(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_0)
+#define F_PERR_ENABLE_PCMD_MPAC_0 V_PERR_ENABLE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_INT_ENABLE_DATA 0x19310
+
+#define S_ENABLE_DATA_SNOOP_3 29
+#define V_ENABLE_DATA_SNOOP_3(x) ((x) << S_ENABLE_DATA_SNOOP_3)
+#define F_ENABLE_DATA_SNOOP_3 V_ENABLE_DATA_SNOOP_3(1U)
+
+#define S_ENABLE_DATA_SFIFO_3 28
+#define V_ENABLE_DATA_SFIFO_3(x) ((x) << S_ENABLE_DATA_SFIFO_3)
+#define F_ENABLE_DATA_SFIFO_3 V_ENABLE_DATA_SFIFO_3(1U)
+
+#define S_ENABLE_DATA_FIFO_3 27
+#define V_ENABLE_DATA_FIFO_3(x) ((x) << S_ENABLE_DATA_FIFO_3)
+#define F_ENABLE_DATA_FIFO_3 V_ENABLE_DATA_FIFO_3(1U)
+
+#define S_ENABLE_DATA_DDP_3 26
+#define V_ENABLE_DATA_DDP_3(x) ((x) << S_ENABLE_DATA_DDP_3)
+#define F_ENABLE_DATA_DDP_3 V_ENABLE_DATA_DDP_3(1U)
+
+#define S_ENABLE_DATA_CTX_3 25
+#define V_ENABLE_DATA_CTX_3(x) ((x) << S_ENABLE_DATA_CTX_3)
+#define F_ENABLE_DATA_CTX_3 V_ENABLE_DATA_CTX_3(1U)
+
+#define S_ENABLE_DATA_PARSER_3 24
+#define V_ENABLE_DATA_PARSER_3(x) ((x) << S_ENABLE_DATA_PARSER_3)
+#define F_ENABLE_DATA_PARSER_3 V_ENABLE_DATA_PARSER_3(1U)
+
+#define S_ENABLE_DATA_SNOOP_2 21
+#define V_ENABLE_DATA_SNOOP_2(x) ((x) << S_ENABLE_DATA_SNOOP_2)
+#define F_ENABLE_DATA_SNOOP_2 V_ENABLE_DATA_SNOOP_2(1U)
+
+#define S_ENABLE_DATA_SFIFO_2 20
+#define V_ENABLE_DATA_SFIFO_2(x) ((x) << S_ENABLE_DATA_SFIFO_2)
+#define F_ENABLE_DATA_SFIFO_2 V_ENABLE_DATA_SFIFO_2(1U)
+
+#define S_ENABLE_DATA_FIFO_2 19
+#define V_ENABLE_DATA_FIFO_2(x) ((x) << S_ENABLE_DATA_FIFO_2)
+#define F_ENABLE_DATA_FIFO_2 V_ENABLE_DATA_FIFO_2(1U)
+
+#define S_ENABLE_DATA_DDP_2 18
+#define V_ENABLE_DATA_DDP_2(x) ((x) << S_ENABLE_DATA_DDP_2)
+#define F_ENABLE_DATA_DDP_2 V_ENABLE_DATA_DDP_2(1U)
+
+#define S_ENABLE_DATA_CTX_2 17
+#define V_ENABLE_DATA_CTX_2(x) ((x) << S_ENABLE_DATA_CTX_2)
+#define F_ENABLE_DATA_CTX_2 V_ENABLE_DATA_CTX_2(1U)
+
+#define S_ENABLE_DATA_PARSER_2 16
+#define V_ENABLE_DATA_PARSER_2(x) ((x) << S_ENABLE_DATA_PARSER_2)
+#define F_ENABLE_DATA_PARSER_2 V_ENABLE_DATA_PARSER_2(1U)
+
+#define S_ENABLE_DATA_SNOOP_1 13
+#define V_ENABLE_DATA_SNOOP_1(x) ((x) << S_ENABLE_DATA_SNOOP_1)
+#define F_ENABLE_DATA_SNOOP_1 V_ENABLE_DATA_SNOOP_1(1U)
+
+#define S_ENABLE_DATA_SFIFO_1 12
+#define V_ENABLE_DATA_SFIFO_1(x) ((x) << S_ENABLE_DATA_SFIFO_1)
+#define F_ENABLE_DATA_SFIFO_1 V_ENABLE_DATA_SFIFO_1(1U)
+
+#define S_ENABLE_DATA_FIFO_1 11
+#define V_ENABLE_DATA_FIFO_1(x) ((x) << S_ENABLE_DATA_FIFO_1)
+#define F_ENABLE_DATA_FIFO_1 V_ENABLE_DATA_FIFO_1(1U)
+
+#define S_ENABLE_DATA_DDP_1 10
+#define V_ENABLE_DATA_DDP_1(x) ((x) << S_ENABLE_DATA_DDP_1)
+#define F_ENABLE_DATA_DDP_1 V_ENABLE_DATA_DDP_1(1U)
+
+#define S_ENABLE_DATA_CTX_1 9
+#define V_ENABLE_DATA_CTX_1(x) ((x) << S_ENABLE_DATA_CTX_1)
+#define F_ENABLE_DATA_CTX_1 V_ENABLE_DATA_CTX_1(1U)
+
+#define S_ENABLE_DATA_PARSER_1 8
+#define V_ENABLE_DATA_PARSER_1(x) ((x) << S_ENABLE_DATA_PARSER_1)
+#define F_ENABLE_DATA_PARSER_1 V_ENABLE_DATA_PARSER_1(1U)
+
+#define S_ENABLE_DATA_SNOOP_0 5
+#define V_ENABLE_DATA_SNOOP_0(x) ((x) << S_ENABLE_DATA_SNOOP_0)
+#define F_ENABLE_DATA_SNOOP_0 V_ENABLE_DATA_SNOOP_0(1U)
+
+#define S_ENABLE_DATA_SFIFO_0 4
+#define V_ENABLE_DATA_SFIFO_0(x) ((x) << S_ENABLE_DATA_SFIFO_0)
+#define F_ENABLE_DATA_SFIFO_0 V_ENABLE_DATA_SFIFO_0(1U)
+
+#define S_ENABLE_DATA_FIFO_0 3
+#define V_ENABLE_DATA_FIFO_0(x) ((x) << S_ENABLE_DATA_FIFO_0)
+#define F_ENABLE_DATA_FIFO_0 V_ENABLE_DATA_FIFO_0(1U)
+
+#define S_ENABLE_DATA_DDP_0 2
+#define V_ENABLE_DATA_DDP_0(x) ((x) << S_ENABLE_DATA_DDP_0)
+#define F_ENABLE_DATA_DDP_0 V_ENABLE_DATA_DDP_0(1U)
+
+#define S_ENABLE_DATA_CTX_0 1
+#define V_ENABLE_DATA_CTX_0(x) ((x) << S_ENABLE_DATA_CTX_0)
+#define F_ENABLE_DATA_CTX_0 V_ENABLE_DATA_CTX_0(1U)
+
+#define S_ENABLE_DATA_PARSER_0 0
+#define V_ENABLE_DATA_PARSER_0(x) ((x) << S_ENABLE_DATA_PARSER_0)
+#define F_ENABLE_DATA_PARSER_0 V_ENABLE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_DATA 0x19314
+
+#define S_CAUSE_DATA_SNOOP_3 29
+#define V_CAUSE_DATA_SNOOP_3(x) ((x) << S_CAUSE_DATA_SNOOP_3)
+#define F_CAUSE_DATA_SNOOP_3 V_CAUSE_DATA_SNOOP_3(1U)
+
+#define S_CAUSE_DATA_SFIFO_3 28
+#define V_CAUSE_DATA_SFIFO_3(x) ((x) << S_CAUSE_DATA_SFIFO_3)
+#define F_CAUSE_DATA_SFIFO_3 V_CAUSE_DATA_SFIFO_3(1U)
+
+#define S_CAUSE_DATA_FIFO_3 27
+#define V_CAUSE_DATA_FIFO_3(x) ((x) << S_CAUSE_DATA_FIFO_3)
+#define F_CAUSE_DATA_FIFO_3 V_CAUSE_DATA_FIFO_3(1U)
+
+#define S_CAUSE_DATA_DDP_3 26
+#define V_CAUSE_DATA_DDP_3(x) ((x) << S_CAUSE_DATA_DDP_3)
+#define F_CAUSE_DATA_DDP_3 V_CAUSE_DATA_DDP_3(1U)
+
+#define S_CAUSE_DATA_CTX_3 25
+#define V_CAUSE_DATA_CTX_3(x) ((x) << S_CAUSE_DATA_CTX_3)
+#define F_CAUSE_DATA_CTX_3 V_CAUSE_DATA_CTX_3(1U)
+
+#define S_CAUSE_DATA_PARSER_3 24
+#define V_CAUSE_DATA_PARSER_3(x) ((x) << S_CAUSE_DATA_PARSER_3)
+#define F_CAUSE_DATA_PARSER_3 V_CAUSE_DATA_PARSER_3(1U)
+
+#define S_CAUSE_DATA_SNOOP_2 21
+#define V_CAUSE_DATA_SNOOP_2(x) ((x) << S_CAUSE_DATA_SNOOP_2)
+#define F_CAUSE_DATA_SNOOP_2 V_CAUSE_DATA_SNOOP_2(1U)
+
+#define S_CAUSE_DATA_SFIFO_2 20
+#define V_CAUSE_DATA_SFIFO_2(x) ((x) << S_CAUSE_DATA_SFIFO_2)
+#define F_CAUSE_DATA_SFIFO_2 V_CAUSE_DATA_SFIFO_2(1U)
+
+#define S_CAUSE_DATA_FIFO_2 19
+#define V_CAUSE_DATA_FIFO_2(x) ((x) << S_CAUSE_DATA_FIFO_2)
+#define F_CAUSE_DATA_FIFO_2 V_CAUSE_DATA_FIFO_2(1U)
+
+#define S_CAUSE_DATA_DDP_2 18
+#define V_CAUSE_DATA_DDP_2(x) ((x) << S_CAUSE_DATA_DDP_2)
+#define F_CAUSE_DATA_DDP_2 V_CAUSE_DATA_DDP_2(1U)
+
+#define S_CAUSE_DATA_CTX_2 17
+#define V_CAUSE_DATA_CTX_2(x) ((x) << S_CAUSE_DATA_CTX_2)
+#define F_CAUSE_DATA_CTX_2 V_CAUSE_DATA_CTX_2(1U)
+
+#define S_CAUSE_DATA_PARSER_2 16
+#define V_CAUSE_DATA_PARSER_2(x) ((x) << S_CAUSE_DATA_PARSER_2)
+#define F_CAUSE_DATA_PARSER_2 V_CAUSE_DATA_PARSER_2(1U)
+
+#define S_CAUSE_DATA_SNOOP_1 13
+#define V_CAUSE_DATA_SNOOP_1(x) ((x) << S_CAUSE_DATA_SNOOP_1)
+#define F_CAUSE_DATA_SNOOP_1 V_CAUSE_DATA_SNOOP_1(1U)
+
+#define S_CAUSE_DATA_SFIFO_1 12
+#define V_CAUSE_DATA_SFIFO_1(x) ((x) << S_CAUSE_DATA_SFIFO_1)
+#define F_CAUSE_DATA_SFIFO_1 V_CAUSE_DATA_SFIFO_1(1U)
+
+#define S_CAUSE_DATA_FIFO_1 11
+#define V_CAUSE_DATA_FIFO_1(x) ((x) << S_CAUSE_DATA_FIFO_1)
+#define F_CAUSE_DATA_FIFO_1 V_CAUSE_DATA_FIFO_1(1U)
+
+#define S_CAUSE_DATA_DDP_1 10
+#define V_CAUSE_DATA_DDP_1(x) ((x) << S_CAUSE_DATA_DDP_1)
+#define F_CAUSE_DATA_DDP_1 V_CAUSE_DATA_DDP_1(1U)
+
+#define S_CAUSE_DATA_CTX_1 9
+#define V_CAUSE_DATA_CTX_1(x) ((x) << S_CAUSE_DATA_CTX_1)
+#define F_CAUSE_DATA_CTX_1 V_CAUSE_DATA_CTX_1(1U)
+
+#define S_CAUSE_DATA_PARSER_1 8
+#define V_CAUSE_DATA_PARSER_1(x) ((x) << S_CAUSE_DATA_PARSER_1)
+#define F_CAUSE_DATA_PARSER_1 V_CAUSE_DATA_PARSER_1(1U)
+
+#define S_CAUSE_DATA_SNOOP_0 5
+#define V_CAUSE_DATA_SNOOP_0(x) ((x) << S_CAUSE_DATA_SNOOP_0)
+#define F_CAUSE_DATA_SNOOP_0 V_CAUSE_DATA_SNOOP_0(1U)
+
+#define S_CAUSE_DATA_SFIFO_0 4
+#define V_CAUSE_DATA_SFIFO_0(x) ((x) << S_CAUSE_DATA_SFIFO_0)
+#define F_CAUSE_DATA_SFIFO_0 V_CAUSE_DATA_SFIFO_0(1U)
+
+#define S_CAUSE_DATA_FIFO_0 3
+#define V_CAUSE_DATA_FIFO_0(x) ((x) << S_CAUSE_DATA_FIFO_0)
+#define F_CAUSE_DATA_FIFO_0 V_CAUSE_DATA_FIFO_0(1U)
+
+#define S_CAUSE_DATA_DDP_0 2
+#define V_CAUSE_DATA_DDP_0(x) ((x) << S_CAUSE_DATA_DDP_0)
+#define F_CAUSE_DATA_DDP_0 V_CAUSE_DATA_DDP_0(1U)
+
+#define S_CAUSE_DATA_CTX_0 1
+#define V_CAUSE_DATA_CTX_0(x) ((x) << S_CAUSE_DATA_CTX_0)
+#define F_CAUSE_DATA_CTX_0 V_CAUSE_DATA_CTX_0(1U)
+
+#define S_CAUSE_DATA_PARSER_0 0
+#define V_CAUSE_DATA_PARSER_0(x) ((x) << S_CAUSE_DATA_PARSER_0)
+#define F_CAUSE_DATA_PARSER_0 V_CAUSE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_DATA 0x19318
+
+#define S_PERR_ENABLE_DATA_SNOOP_3 29
+#define V_PERR_ENABLE_DATA_SNOOP_3(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_3)
+#define F_PERR_ENABLE_DATA_SNOOP_3 V_PERR_ENABLE_DATA_SNOOP_3(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_3 28
+#define V_PERR_ENABLE_DATA_SFIFO_3(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_3)
+#define F_PERR_ENABLE_DATA_SFIFO_3 V_PERR_ENABLE_DATA_SFIFO_3(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_3 27
+#define V_PERR_ENABLE_DATA_FIFO_3(x) ((x) << S_PERR_ENABLE_DATA_FIFO_3)
+#define F_PERR_ENABLE_DATA_FIFO_3 V_PERR_ENABLE_DATA_FIFO_3(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_3 26
+#define V_PERR_ENABLE_DATA_DDP_3(x) ((x) << S_PERR_ENABLE_DATA_DDP_3)
+#define F_PERR_ENABLE_DATA_DDP_3 V_PERR_ENABLE_DATA_DDP_3(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_3 25
+#define V_PERR_ENABLE_DATA_CTX_3(x) ((x) << S_PERR_ENABLE_DATA_CTX_3)
+#define F_PERR_ENABLE_DATA_CTX_3 V_PERR_ENABLE_DATA_CTX_3(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_3 24
+#define V_PERR_ENABLE_DATA_PARSER_3(x) ((x) << S_PERR_ENABLE_DATA_PARSER_3)
+#define F_PERR_ENABLE_DATA_PARSER_3 V_PERR_ENABLE_DATA_PARSER_3(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_2 21
+#define V_PERR_ENABLE_DATA_SNOOP_2(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_2)
+#define F_PERR_ENABLE_DATA_SNOOP_2 V_PERR_ENABLE_DATA_SNOOP_2(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_2 20
+#define V_PERR_ENABLE_DATA_SFIFO_2(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_2)
+#define F_PERR_ENABLE_DATA_SFIFO_2 V_PERR_ENABLE_DATA_SFIFO_2(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_2 19
+#define V_PERR_ENABLE_DATA_FIFO_2(x) ((x) << S_PERR_ENABLE_DATA_FIFO_2)
+#define F_PERR_ENABLE_DATA_FIFO_2 V_PERR_ENABLE_DATA_FIFO_2(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_2 18
+#define V_PERR_ENABLE_DATA_DDP_2(x) ((x) << S_PERR_ENABLE_DATA_DDP_2)
+#define F_PERR_ENABLE_DATA_DDP_2 V_PERR_ENABLE_DATA_DDP_2(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_2 17
+#define V_PERR_ENABLE_DATA_CTX_2(x) ((x) << S_PERR_ENABLE_DATA_CTX_2)
+#define F_PERR_ENABLE_DATA_CTX_2 V_PERR_ENABLE_DATA_CTX_2(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_2 16
+#define V_PERR_ENABLE_DATA_PARSER_2(x) ((x) << S_PERR_ENABLE_DATA_PARSER_2)
+#define F_PERR_ENABLE_DATA_PARSER_2 V_PERR_ENABLE_DATA_PARSER_2(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_1 13
+#define V_PERR_ENABLE_DATA_SNOOP_1(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_1)
+#define F_PERR_ENABLE_DATA_SNOOP_1 V_PERR_ENABLE_DATA_SNOOP_1(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_1 12
+#define V_PERR_ENABLE_DATA_SFIFO_1(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_1)
+#define F_PERR_ENABLE_DATA_SFIFO_1 V_PERR_ENABLE_DATA_SFIFO_1(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_1 11
+#define V_PERR_ENABLE_DATA_FIFO_1(x) ((x) << S_PERR_ENABLE_DATA_FIFO_1)
+#define F_PERR_ENABLE_DATA_FIFO_1 V_PERR_ENABLE_DATA_FIFO_1(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_1 10
+#define V_PERR_ENABLE_DATA_DDP_1(x) ((x) << S_PERR_ENABLE_DATA_DDP_1)
+#define F_PERR_ENABLE_DATA_DDP_1 V_PERR_ENABLE_DATA_DDP_1(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_1 9
+#define V_PERR_ENABLE_DATA_CTX_1(x) ((x) << S_PERR_ENABLE_DATA_CTX_1)
+#define F_PERR_ENABLE_DATA_CTX_1 V_PERR_ENABLE_DATA_CTX_1(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_1 8
+#define V_PERR_ENABLE_DATA_PARSER_1(x) ((x) << S_PERR_ENABLE_DATA_PARSER_1)
+#define F_PERR_ENABLE_DATA_PARSER_1 V_PERR_ENABLE_DATA_PARSER_1(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_0 5
+#define V_PERR_ENABLE_DATA_SNOOP_0(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_0)
+#define F_PERR_ENABLE_DATA_SNOOP_0 V_PERR_ENABLE_DATA_SNOOP_0(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_0 4
+#define V_PERR_ENABLE_DATA_SFIFO_0(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_0)
+#define F_PERR_ENABLE_DATA_SFIFO_0 V_PERR_ENABLE_DATA_SFIFO_0(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_0 3
+#define V_PERR_ENABLE_DATA_FIFO_0(x) ((x) << S_PERR_ENABLE_DATA_FIFO_0)
+#define F_PERR_ENABLE_DATA_FIFO_0 V_PERR_ENABLE_DATA_FIFO_0(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_0 2
+#define V_PERR_ENABLE_DATA_DDP_0(x) ((x) << S_PERR_ENABLE_DATA_DDP_0)
+#define F_PERR_ENABLE_DATA_DDP_0 V_PERR_ENABLE_DATA_DDP_0(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_0 1
+#define V_PERR_ENABLE_DATA_CTX_0(x) ((x) << S_PERR_ENABLE_DATA_CTX_0)
+#define F_PERR_ENABLE_DATA_CTX_0 V_PERR_ENABLE_DATA_CTX_0(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_0 0
+#define V_PERR_ENABLE_DATA_PARSER_0(x) ((x) << S_PERR_ENABLE_DATA_PARSER_0)
+#define F_PERR_ENABLE_DATA_PARSER_0 V_PERR_ENABLE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_INT_ENABLE_ARB 0x19320
+
+#define S_ENABLE_ARB_PBL_PF_3 27
+#define V_ENABLE_ARB_PBL_PF_3(x) ((x) << S_ENABLE_ARB_PBL_PF_3)
+#define F_ENABLE_ARB_PBL_PF_3 V_ENABLE_ARB_PBL_PF_3(1U)
+
+#define S_ENABLE_ARB_PF_3 26
+#define V_ENABLE_ARB_PF_3(x) ((x) << S_ENABLE_ARB_PF_3)
+#define F_ENABLE_ARB_PF_3 V_ENABLE_ARB_PF_3(1U)
+
+#define S_ENABLE_ARB_TPT_PF_3 25
+#define V_ENABLE_ARB_TPT_PF_3(x) ((x) << S_ENABLE_ARB_TPT_PF_3)
+#define F_ENABLE_ARB_TPT_PF_3 V_ENABLE_ARB_TPT_PF_3(1U)
+
+#define S_ENABLE_ARB_F_3 24
+#define V_ENABLE_ARB_F_3(x) ((x) << S_ENABLE_ARB_F_3)
+#define F_ENABLE_ARB_F_3 V_ENABLE_ARB_F_3(1U)
+
+#define S_ENABLE_ARB_PBL_PF_2 19
+#define V_ENABLE_ARB_PBL_PF_2(x) ((x) << S_ENABLE_ARB_PBL_PF_2)
+#define F_ENABLE_ARB_PBL_PF_2 V_ENABLE_ARB_PBL_PF_2(1U)
+
+#define S_ENABLE_ARB_PF_2 18
+#define V_ENABLE_ARB_PF_2(x) ((x) << S_ENABLE_ARB_PF_2)
+#define F_ENABLE_ARB_PF_2 V_ENABLE_ARB_PF_2(1U)
+
+#define S_ENABLE_ARB_TPT_PF_2 17
+#define V_ENABLE_ARB_TPT_PF_2(x) ((x) << S_ENABLE_ARB_TPT_PF_2)
+#define F_ENABLE_ARB_TPT_PF_2 V_ENABLE_ARB_TPT_PF_2(1U)
+
+#define S_ENABLE_ARB_F_2 16
+#define V_ENABLE_ARB_F_2(x) ((x) << S_ENABLE_ARB_F_2)
+#define F_ENABLE_ARB_F_2 V_ENABLE_ARB_F_2(1U)
+
+#define S_ENABLE_ARB_PBL_PF_1 11
+#define V_ENABLE_ARB_PBL_PF_1(x) ((x) << S_ENABLE_ARB_PBL_PF_1)
+#define F_ENABLE_ARB_PBL_PF_1 V_ENABLE_ARB_PBL_PF_1(1U)
+
+#define S_ENABLE_ARB_PF_1 10
+#define V_ENABLE_ARB_PF_1(x) ((x) << S_ENABLE_ARB_PF_1)
+#define F_ENABLE_ARB_PF_1 V_ENABLE_ARB_PF_1(1U)
+
+#define S_ENABLE_ARB_TPT_PF_1 9
+#define V_ENABLE_ARB_TPT_PF_1(x) ((x) << S_ENABLE_ARB_TPT_PF_1)
+#define F_ENABLE_ARB_TPT_PF_1 V_ENABLE_ARB_TPT_PF_1(1U)
+
+#define S_ENABLE_ARB_F_1 8
+#define V_ENABLE_ARB_F_1(x) ((x) << S_ENABLE_ARB_F_1)
+#define F_ENABLE_ARB_F_1 V_ENABLE_ARB_F_1(1U)
+
+#define S_ENABLE_ARB_PBL_PF_0 3
+#define V_ENABLE_ARB_PBL_PF_0(x) ((x) << S_ENABLE_ARB_PBL_PF_0)
+#define F_ENABLE_ARB_PBL_PF_0 V_ENABLE_ARB_PBL_PF_0(1U)
+
+#define S_ENABLE_ARB_PF_0 2
+#define V_ENABLE_ARB_PF_0(x) ((x) << S_ENABLE_ARB_PF_0)
+#define F_ENABLE_ARB_PF_0 V_ENABLE_ARB_PF_0(1U)
+
+#define S_ENABLE_ARB_TPT_PF_0 1
+#define V_ENABLE_ARB_TPT_PF_0(x) ((x) << S_ENABLE_ARB_TPT_PF_0)
+#define F_ENABLE_ARB_TPT_PF_0 V_ENABLE_ARB_TPT_PF_0(1U)
+
+#define S_ENABLE_ARB_F_0 0
+#define V_ENABLE_ARB_F_0(x) ((x) << S_ENABLE_ARB_F_0)
+#define F_ENABLE_ARB_F_0 V_ENABLE_ARB_F_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_ARB 0x19324
+
+#define S_CAUSE_ARB_PBL_PF_3 27
+#define V_CAUSE_ARB_PBL_PF_3(x) ((x) << S_CAUSE_ARB_PBL_PF_3)
+#define F_CAUSE_ARB_PBL_PF_3 V_CAUSE_ARB_PBL_PF_3(1U)
+
+#define S_CAUSE_ARB_PF_3 26
+#define V_CAUSE_ARB_PF_3(x) ((x) << S_CAUSE_ARB_PF_3)
+#define F_CAUSE_ARB_PF_3 V_CAUSE_ARB_PF_3(1U)
+
+#define S_CAUSE_ARB_TPT_PF_3 25
+#define V_CAUSE_ARB_TPT_PF_3(x) ((x) << S_CAUSE_ARB_TPT_PF_3)
+#define F_CAUSE_ARB_TPT_PF_3 V_CAUSE_ARB_TPT_PF_3(1U)
+
+#define S_CAUSE_ARB_F_3 24
+#define V_CAUSE_ARB_F_3(x) ((x) << S_CAUSE_ARB_F_3)
+#define F_CAUSE_ARB_F_3 V_CAUSE_ARB_F_3(1U)
+
+#define S_CAUSE_ARB_PBL_PF_2 19
+#define V_CAUSE_ARB_PBL_PF_2(x) ((x) << S_CAUSE_ARB_PBL_PF_2)
+#define F_CAUSE_ARB_PBL_PF_2 V_CAUSE_ARB_PBL_PF_2(1U)
+
+#define S_CAUSE_ARB_PF_2 18
+#define V_CAUSE_ARB_PF_2(x) ((x) << S_CAUSE_ARB_PF_2)
+#define F_CAUSE_ARB_PF_2 V_CAUSE_ARB_PF_2(1U)
+
+#define S_CAUSE_ARB_TPT_PF_2 17
+#define V_CAUSE_ARB_TPT_PF_2(x) ((x) << S_CAUSE_ARB_TPT_PF_2)
+#define F_CAUSE_ARB_TPT_PF_2 V_CAUSE_ARB_TPT_PF_2(1U)
+
+#define S_CAUSE_ARB_F_2 16
+#define V_CAUSE_ARB_F_2(x) ((x) << S_CAUSE_ARB_F_2)
+#define F_CAUSE_ARB_F_2 V_CAUSE_ARB_F_2(1U)
+
+#define S_CAUSE_ARB_PBL_PF_1 11
+#define V_CAUSE_ARB_PBL_PF_1(x) ((x) << S_CAUSE_ARB_PBL_PF_1)
+#define F_CAUSE_ARB_PBL_PF_1 V_CAUSE_ARB_PBL_PF_1(1U)
+
+#define S_CAUSE_ARB_PF_1 10
+#define V_CAUSE_ARB_PF_1(x) ((x) << S_CAUSE_ARB_PF_1)
+#define F_CAUSE_ARB_PF_1 V_CAUSE_ARB_PF_1(1U)
+
+#define S_CAUSE_ARB_TPT_PF_1 9
+#define V_CAUSE_ARB_TPT_PF_1(x) ((x) << S_CAUSE_ARB_TPT_PF_1)
+#define F_CAUSE_ARB_TPT_PF_1 V_CAUSE_ARB_TPT_PF_1(1U)
+
+#define S_CAUSE_ARB_F_1 8
+#define V_CAUSE_ARB_F_1(x) ((x) << S_CAUSE_ARB_F_1)
+#define F_CAUSE_ARB_F_1 V_CAUSE_ARB_F_1(1U)
+
+#define S_CAUSE_ARB_PBL_PF_0 3
+#define V_CAUSE_ARB_PBL_PF_0(x) ((x) << S_CAUSE_ARB_PBL_PF_0)
+#define F_CAUSE_ARB_PBL_PF_0 V_CAUSE_ARB_PBL_PF_0(1U)
+
+#define S_CAUSE_ARB_PF_0 2
+#define V_CAUSE_ARB_PF_0(x) ((x) << S_CAUSE_ARB_PF_0)
+#define F_CAUSE_ARB_PF_0 V_CAUSE_ARB_PF_0(1U)
+
+#define S_CAUSE_ARB_TPT_PF_0 1
+#define V_CAUSE_ARB_TPT_PF_0(x) ((x) << S_CAUSE_ARB_TPT_PF_0)
+#define F_CAUSE_ARB_TPT_PF_0 V_CAUSE_ARB_TPT_PF_0(1U)
+
+#define S_CAUSE_ARB_F_0 0
+#define V_CAUSE_ARB_F_0(x) ((x) << S_CAUSE_ARB_F_0)
+#define F_CAUSE_ARB_F_0 V_CAUSE_ARB_F_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_ARB 0x19328
+
+#define S_PERR_ENABLE_ARB_PBL_PF_3 27
+#define V_PERR_ENABLE_ARB_PBL_PF_3(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_3)
+#define F_PERR_ENABLE_ARB_PBL_PF_3 V_PERR_ENABLE_ARB_PBL_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_PF_3 26
+#define V_PERR_ENABLE_ARB_PF_3(x) ((x) << S_PERR_ENABLE_ARB_PF_3)
+#define F_PERR_ENABLE_ARB_PF_3 V_PERR_ENABLE_ARB_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_3 25
+#define V_PERR_ENABLE_ARB_TPT_PF_3(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_3)
+#define F_PERR_ENABLE_ARB_TPT_PF_3 V_PERR_ENABLE_ARB_TPT_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_F_3 24
+#define V_PERR_ENABLE_ARB_F_3(x) ((x) << S_PERR_ENABLE_ARB_F_3)
+#define F_PERR_ENABLE_ARB_F_3 V_PERR_ENABLE_ARB_F_3(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_2 19
+#define V_PERR_ENABLE_ARB_PBL_PF_2(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_2)
+#define F_PERR_ENABLE_ARB_PBL_PF_2 V_PERR_ENABLE_ARB_PBL_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_PF_2 18
+#define V_PERR_ENABLE_ARB_PF_2(x) ((x) << S_PERR_ENABLE_ARB_PF_2)
+#define F_PERR_ENABLE_ARB_PF_2 V_PERR_ENABLE_ARB_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_2 17
+#define V_PERR_ENABLE_ARB_TPT_PF_2(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_2)
+#define F_PERR_ENABLE_ARB_TPT_PF_2 V_PERR_ENABLE_ARB_TPT_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_F_2 16
+#define V_PERR_ENABLE_ARB_F_2(x) ((x) << S_PERR_ENABLE_ARB_F_2)
+#define F_PERR_ENABLE_ARB_F_2 V_PERR_ENABLE_ARB_F_2(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_1 11
+#define V_PERR_ENABLE_ARB_PBL_PF_1(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_1)
+#define F_PERR_ENABLE_ARB_PBL_PF_1 V_PERR_ENABLE_ARB_PBL_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_PF_1 10
+#define V_PERR_ENABLE_ARB_PF_1(x) ((x) << S_PERR_ENABLE_ARB_PF_1)
+#define F_PERR_ENABLE_ARB_PF_1 V_PERR_ENABLE_ARB_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_1 9
+#define V_PERR_ENABLE_ARB_TPT_PF_1(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_1)
+#define F_PERR_ENABLE_ARB_TPT_PF_1 V_PERR_ENABLE_ARB_TPT_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_F_1 8
+#define V_PERR_ENABLE_ARB_F_1(x) ((x) << S_PERR_ENABLE_ARB_F_1)
+#define F_PERR_ENABLE_ARB_F_1 V_PERR_ENABLE_ARB_F_1(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_0 3
+#define V_PERR_ENABLE_ARB_PBL_PF_0(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_0)
+#define F_PERR_ENABLE_ARB_PBL_PF_0 V_PERR_ENABLE_ARB_PBL_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_PF_0 2
+#define V_PERR_ENABLE_ARB_PF_0(x) ((x) << S_PERR_ENABLE_ARB_PF_0)
+#define F_PERR_ENABLE_ARB_PF_0 V_PERR_ENABLE_ARB_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_0 1
+#define V_PERR_ENABLE_ARB_TPT_PF_0(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_0)
+#define F_PERR_ENABLE_ARB_TPT_PF_0 V_PERR_ENABLE_ARB_TPT_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_F_0 0
+#define V_PERR_ENABLE_ARB_F_0(x) ((x) << S_PERR_ENABLE_ARB_F_0)
+#define F_PERR_ENABLE_ARB_F_0 V_PERR_ENABLE_ARB_F_0(1U)
+
+#define A_ULP_RX_CTL1 0x19330
+
+#define S_ISCSI_CTL2 27
+#define V_ISCSI_CTL2(x) ((x) << S_ISCSI_CTL2)
+#define F_ISCSI_CTL2 V_ISCSI_CTL2(1U)
+
+#define S_ISCSI_CTL1 26
+#define V_ISCSI_CTL1(x) ((x) << S_ISCSI_CTL1)
+#define F_ISCSI_CTL1 V_ISCSI_CTL1(1U)
+
+#define S_ISCSI_CTL0 25
+#define V_ISCSI_CTL0(x) ((x) << S_ISCSI_CTL0)
+#define F_ISCSI_CTL0 V_ISCSI_CTL0(1U)
+
+#define S_NVME_TCP_DATA_ALIGNMENT 16
+#define M_NVME_TCP_DATA_ALIGNMENT 0x1ffU
+#define V_NVME_TCP_DATA_ALIGNMENT(x) ((x) << S_NVME_TCP_DATA_ALIGNMENT)
+#define G_NVME_TCP_DATA_ALIGNMENT(x) (((x) >> S_NVME_TCP_DATA_ALIGNMENT) & M_NVME_TCP_DATA_ALIGNMENT)
+
+#define S_NVME_TCP_INVLD_MSG_DIS 14
+#define M_NVME_TCP_INVLD_MSG_DIS 0x3U
+#define V_NVME_TCP_INVLD_MSG_DIS(x) ((x) << S_NVME_TCP_INVLD_MSG_DIS)
+#define G_NVME_TCP_INVLD_MSG_DIS(x) (((x) >> S_NVME_TCP_INVLD_MSG_DIS) & M_NVME_TCP_INVLD_MSG_DIS)
+
+#define S_NVME_TCP_DDP_PDU_CHK_TYPE 13
+#define V_NVME_TCP_DDP_PDU_CHK_TYPE(x) ((x) << S_NVME_TCP_DDP_PDU_CHK_TYPE)
+#define F_NVME_TCP_DDP_PDU_CHK_TYPE V_NVME_TCP_DDP_PDU_CHK_TYPE(1U)
+
+#define S_T10_CONFIG_ENB 12
+#define V_T10_CONFIG_ENB(x) ((x) << S_T10_CONFIG_ENB)
+#define F_T10_CONFIG_ENB V_T10_CONFIG_ENB(1U)
+
+#define S_NVME_TCP_COLOUR_ENB 10
+#define M_NVME_TCP_COLOUR_ENB 0x3U
+#define V_NVME_TCP_COLOUR_ENB(x) ((x) << S_NVME_TCP_COLOUR_ENB)
+#define G_NVME_TCP_COLOUR_ENB(x) (((x) >> S_NVME_TCP_COLOUR_ENB) & M_NVME_TCP_COLOUR_ENB)
+
+#define S_ROCE_SEND_RQE 8
+#define V_ROCE_SEND_RQE(x) ((x) << S_ROCE_SEND_RQE)
+#define F_ROCE_SEND_RQE V_ROCE_SEND_RQE(1U)
+
+#define S_RDMA_INVLD_MSG_DIS 6
+#define M_RDMA_INVLD_MSG_DIS 0x3U
+#define V_RDMA_INVLD_MSG_DIS(x) ((x) << S_RDMA_INVLD_MSG_DIS)
+#define G_RDMA_INVLD_MSG_DIS(x) (((x) >> S_RDMA_INVLD_MSG_DIS) & M_RDMA_INVLD_MSG_DIS)
+
+#define S_ROCE_INVLD_MSG_DIS 4
+#define M_ROCE_INVLD_MSG_DIS 0x3U
+#define V_ROCE_INVLD_MSG_DIS(x) ((x) << S_ROCE_INVLD_MSG_DIS)
+#define G_ROCE_INVLD_MSG_DIS(x) (((x) >> S_ROCE_INVLD_MSG_DIS) & M_ROCE_INVLD_MSG_DIS)
+
+#define S_T7_MEM_ADDR_CTRL 2
+#define M_T7_MEM_ADDR_CTRL 0x3U
+#define V_T7_MEM_ADDR_CTRL(x) ((x) << S_T7_MEM_ADDR_CTRL)
+#define G_T7_MEM_ADDR_CTRL(x) (((x) >> S_T7_MEM_ADDR_CTRL) & M_T7_MEM_ADDR_CTRL)
+
+#define S_ENB_32K_PDU 1
+#define V_ENB_32K_PDU(x) ((x) << S_ENB_32K_PDU)
+#define F_ENB_32K_PDU V_ENB_32K_PDU(1U)
+
+#define S_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS 0
+#define V_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS(x) ((x) << S_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS)
+#define F_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS V_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS(1U)
+
#define A_ULP_RX_TLS_IND_CMD 0x19348
#define S_TLS_RX_REG_OFF_ADDR 0
@@ -37795,6 +48839,8 @@
#define G_TLS_RX_REG_OFF_ADDR(x) (((x) >> S_TLS_RX_REG_OFF_ADDR) & M_TLS_RX_REG_OFF_ADDR)
#define A_ULP_RX_TLS_IND_DATA 0x1934c
+#define A_ULP_RX_TLS_CH0_HMACCTRL_CFG 0x20
+#define A_ULP_RX_TLS_CH1_HMACCTRL_CFG 0x60
/* registers for module SF */
#define SF_BASE_ADDR 0x193f8
@@ -37815,6 +48861,39 @@
#define V_BYTECNT(x) ((x) << S_BYTECNT)
#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+#define S_EN32BADDR 30
+#define V_EN32BADDR(x) ((x) << S_EN32BADDR)
+#define F_EN32BADDR V_EN32BADDR(1U)
+
+#define S_NUM_OF_BYTES 1
+#define M_NUM_OF_BYTES 0x3U
+#define V_NUM_OF_BYTES(x) ((x) << S_NUM_OF_BYTES)
+#define G_NUM_OF_BYTES(x) (((x) >> S_NUM_OF_BYTES) & M_NUM_OF_BYTES)
+
+#define S_QUADREADDISABLE 5
+#define V_QUADREADDISABLE(x) ((x) << S_QUADREADDISABLE)
+#define F_QUADREADDISABLE V_QUADREADDISABLE(1U)
+
+#define S_EXIT4B 6
+#define V_EXIT4B(x) ((x) << S_EXIT4B)
+#define F_EXIT4B V_EXIT4B(1U)
+
+#define S_ENTER4B 7
+#define V_ENTER4B(x) ((x) << S_ENTER4B)
+#define F_ENTER4B V_ENTER4B(1U)
+
+#define S_QUADWRENABLE 8
+#define V_QUADWRENABLE(x) ((x) << S_QUADWRENABLE)
+#define F_QUADWRENABLE V_QUADWRENABLE(1U)
+
+#define S_REGDBG_SEL 9
+#define V_REGDBG_SEL(x) ((x) << S_REGDBG_SEL)
+#define F_REGDBG_SEL V_REGDBG_SEL(1U)
+
+#define S_REGDBG_MODE 10
+#define V_REGDBG_MODE(x) ((x) << S_REGDBG_MODE)
+#define F_REGDBG_MODE V_REGDBG_MODE(1U)
+
/* registers for module PL */
#define PL_BASE_ADDR 0x19400
@@ -37892,21 +48971,6 @@
#define F_SWINT V_SWINT(1U)
#define A_PL_WHOAMI 0x19400
-
-#define S_T6_SOURCEPF 9
-#define M_T6_SOURCEPF 0x7U
-#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
-#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
-
-#define S_T6_ISVF 8
-#define V_T6_ISVF(x) ((x) << S_T6_ISVF)
-#define F_T6_ISVF V_T6_ISVF(1U)
-
-#define S_T6_VFID 0
-#define M_T6_VFID 0xffU
-#define V_T6_VFID(x) ((x) << S_T6_VFID)
-#define G_T6_VFID(x) (((x) >> S_T6_VFID) & M_T6_VFID)
-
#define A_PL_PERR_CAUSE 0x19404
#define S_UART 28
@@ -38037,6 +49101,134 @@
#define V_ANYMAC(x) ((x) << S_ANYMAC)
#define F_ANYMAC V_ANYMAC(1U)
+#define S_T7_PL_PERR_CRYPTO_KEY 31
+#define V_T7_PL_PERR_CRYPTO_KEY(x) ((x) << S_T7_PL_PERR_CRYPTO_KEY)
+#define F_T7_PL_PERR_CRYPTO_KEY V_T7_PL_PERR_CRYPTO_KEY(1U)
+
+#define S_T7_PL_PERR_CRYPTO1 30
+#define V_T7_PL_PERR_CRYPTO1(x) ((x) << S_T7_PL_PERR_CRYPTO1)
+#define F_T7_PL_PERR_CRYPTO1 V_T7_PL_PERR_CRYPTO1(1U)
+
+#define S_T7_PL_PERR_CRYPTO0 29
+#define V_T7_PL_PERR_CRYPTO0(x) ((x) << S_T7_PL_PERR_CRYPTO0)
+#define F_T7_PL_PERR_CRYPTO0 V_T7_PL_PERR_CRYPTO0(1U)
+
+#define S_T7_PL_PERR_GCACHE 28
+#define V_T7_PL_PERR_GCACHE(x) ((x) << S_T7_PL_PERR_GCACHE)
+#define F_T7_PL_PERR_GCACHE V_T7_PL_PERR_GCACHE(1U)
+
+#define S_T7_PL_PERR_ARM 27
+#define V_T7_PL_PERR_ARM(x) ((x) << S_T7_PL_PERR_ARM)
+#define F_T7_PL_PERR_ARM V_T7_PL_PERR_ARM(1U)
+
+#define S_T7_PL_PERR_ULP_TX 26
+#define V_T7_PL_PERR_ULP_TX(x) ((x) << S_T7_PL_PERR_ULP_TX)
+#define F_T7_PL_PERR_ULP_TX V_T7_PL_PERR_ULP_TX(1U)
+
+#define S_T7_PL_PERR_SGE 25
+#define V_T7_PL_PERR_SGE(x) ((x) << S_T7_PL_PERR_SGE)
+#define F_T7_PL_PERR_SGE V_T7_PL_PERR_SGE(1U)
+
+#define S_T7_PL_PERR_HMA 24
+#define V_T7_PL_PERR_HMA(x) ((x) << S_T7_PL_PERR_HMA)
+#define F_T7_PL_PERR_HMA V_T7_PL_PERR_HMA(1U)
+
+#define S_T7_PL_PERR_CPL_SWITCH 23
+#define V_T7_PL_PERR_CPL_SWITCH(x) ((x) << S_T7_PL_PERR_CPL_SWITCH)
+#define F_T7_PL_PERR_CPL_SWITCH V_T7_PL_PERR_CPL_SWITCH(1U)
+
+#define S_T7_PL_PERR_ULP_RX 22
+#define V_T7_PL_PERR_ULP_RX(x) ((x) << S_T7_PL_PERR_ULP_RX)
+#define F_T7_PL_PERR_ULP_RX V_T7_PL_PERR_ULP_RX(1U)
+
+#define S_T7_PL_PERR_PM_RX 21
+#define V_T7_PL_PERR_PM_RX(x) ((x) << S_T7_PL_PERR_PM_RX)
+#define F_T7_PL_PERR_PM_RX V_T7_PL_PERR_PM_RX(1U)
+
+#define S_T7_PL_PERR_PM_TX 20
+#define V_T7_PL_PERR_PM_TX(x) ((x) << S_T7_PL_PERR_PM_TX)
+#define F_T7_PL_PERR_PM_TX V_T7_PL_PERR_PM_TX(1U)
+
+#define S_T7_PL_PERR_MA 19
+#define V_T7_PL_PERR_MA(x) ((x) << S_T7_PL_PERR_MA)
+#define F_T7_PL_PERR_MA V_T7_PL_PERR_MA(1U)
+
+#define S_T7_PL_PERR_TP 18
+#define V_T7_PL_PERR_TP(x) ((x) << S_T7_PL_PERR_TP)
+#define F_T7_PL_PERR_TP V_T7_PL_PERR_TP(1U)
+
+#define S_T7_PL_PERR_LE 17
+#define V_T7_PL_PERR_LE(x) ((x) << S_T7_PL_PERR_LE)
+#define F_T7_PL_PERR_LE V_T7_PL_PERR_LE(1U)
+
+#define S_T7_PL_PERR_EDC1 16
+#define V_T7_PL_PERR_EDC1(x) ((x) << S_T7_PL_PERR_EDC1)
+#define F_T7_PL_PERR_EDC1 V_T7_PL_PERR_EDC1(1U)
+
+#define S_T7_PL_PERR_EDC0 15
+#define V_T7_PL_PERR_EDC0(x) ((x) << S_T7_PL_PERR_EDC0)
+#define F_T7_PL_PERR_EDC0 V_T7_PL_PERR_EDC0(1U)
+
+#define S_T7_PL_PERR_MC1 14
+#define V_T7_PL_PERR_MC1(x) ((x) << S_T7_PL_PERR_MC1)
+#define F_T7_PL_PERR_MC1 V_T7_PL_PERR_MC1(1U)
+
+#define S_T7_PL_PERR_MC0 13
+#define V_T7_PL_PERR_MC0(x) ((x) << S_T7_PL_PERR_MC0)
+#define F_T7_PL_PERR_MC0 V_T7_PL_PERR_MC0(1U)
+
+#define S_T7_PL_PERR_PCIE 12
+#define V_T7_PL_PERR_PCIE(x) ((x) << S_T7_PL_PERR_PCIE)
+#define F_T7_PL_PERR_PCIE V_T7_PL_PERR_PCIE(1U)
+
+#define S_T7_PL_PERR_UART 11
+#define V_T7_PL_PERR_UART(x) ((x) << S_T7_PL_PERR_UART)
+#define F_T7_PL_PERR_UART V_T7_PL_PERR_UART(1U)
+
+#define S_T7_PL_PERR_PMU 10
+#define V_T7_PL_PERR_PMU(x) ((x) << S_T7_PL_PERR_PMU)
+#define F_T7_PL_PERR_PMU V_T7_PL_PERR_PMU(1U)
+
+#define S_T7_PL_PERR_MAC 9
+#define V_T7_PL_PERR_MAC(x) ((x) << S_T7_PL_PERR_MAC)
+#define F_T7_PL_PERR_MAC V_T7_PL_PERR_MAC(1U)
+
+#define S_T7_PL_PERR_SMB 8
+#define V_T7_PL_PERR_SMB(x) ((x) << S_T7_PL_PERR_SMB)
+#define F_T7_PL_PERR_SMB V_T7_PL_PERR_SMB(1U)
+
+#define S_T7_PL_PERR_SF 7
+#define V_T7_PL_PERR_SF(x) ((x) << S_T7_PL_PERR_SF)
+#define F_T7_PL_PERR_SF V_T7_PL_PERR_SF(1U)
+
+#define S_T7_PL_PERR_PL 6
+#define V_T7_PL_PERR_PL(x) ((x) << S_T7_PL_PERR_PL)
+#define F_T7_PL_PERR_PL V_T7_PL_PERR_PL(1U)
+
+#define S_T7_PL_PERR_NCSI 5
+#define V_T7_PL_PERR_NCSI(x) ((x) << S_T7_PL_PERR_NCSI)
+#define F_T7_PL_PERR_NCSI V_T7_PL_PERR_NCSI(1U)
+
+#define S_T7_PL_PERR_MPS 4
+#define V_T7_PL_PERR_MPS(x) ((x) << S_T7_PL_PERR_MPS)
+#define F_T7_PL_PERR_MPS V_T7_PL_PERR_MPS(1U)
+
+#define S_T7_PL_PERR_MI 3
+#define V_T7_PL_PERR_MI(x) ((x) << S_T7_PL_PERR_MI)
+#define F_T7_PL_PERR_MI V_T7_PL_PERR_MI(1U)
+
+#define S_T7_PL_PERR_DBG 2
+#define V_T7_PL_PERR_DBG(x) ((x) << S_T7_PL_PERR_DBG)
+#define F_T7_PL_PERR_DBG V_T7_PL_PERR_DBG(1U)
+
+#define S_T7_PL_PERR_I2CM 1
+#define V_T7_PL_PERR_I2CM(x) ((x) << S_T7_PL_PERR_I2CM)
+#define F_T7_PL_PERR_I2CM V_T7_PL_PERR_I2CM(1U)
+
+#define S_T7_PL_PERR_CIM 0
+#define V_T7_PL_PERR_CIM(x) ((x) << S_T7_PL_PERR_CIM)
+#define F_T7_PL_PERR_CIM V_T7_PL_PERR_CIM(1U)
+
#define A_PL_PERR_ENABLE 0x19408
#define A_PL_INT_CAUSE 0x1940c
@@ -38064,6 +49256,78 @@
#define V_MAC0(x) ((x) << S_MAC0)
#define F_MAC0 V_MAC0(1U)
+#define S_T7_FLR 31
+#define V_T7_FLR(x) ((x) << S_T7_FLR)
+#define F_T7_FLR V_T7_FLR(1U)
+
+#define S_T7_SW_CIM 30
+#define V_T7_SW_CIM(x) ((x) << S_T7_SW_CIM)
+#define F_T7_SW_CIM V_T7_SW_CIM(1U)
+
+#define S_T7_ULP_TX 29
+#define V_T7_ULP_TX(x) ((x) << S_T7_ULP_TX)
+#define F_T7_ULP_TX V_T7_ULP_TX(1U)
+
+#define S_T7_SGE 28
+#define V_T7_SGE(x) ((x) << S_T7_SGE)
+#define F_T7_SGE V_T7_SGE(1U)
+
+#define S_T7_HMA 27
+#define V_T7_HMA(x) ((x) << S_T7_HMA)
+#define F_T7_HMA V_T7_HMA(1U)
+
+#define S_T7_CPL_SWITCH 26
+#define V_T7_CPL_SWITCH(x) ((x) << S_T7_CPL_SWITCH)
+#define F_T7_CPL_SWITCH V_T7_CPL_SWITCH(1U)
+
+#define S_T7_ULP_RX 25
+#define V_T7_ULP_RX(x) ((x) << S_T7_ULP_RX)
+#define F_T7_ULP_RX V_T7_ULP_RX(1U)
+
+#define S_T7_PM_RX 24
+#define V_T7_PM_RX(x) ((x) << S_T7_PM_RX)
+#define F_T7_PM_RX V_T7_PM_RX(1U)
+
+#define S_T7_PM_TX 23
+#define V_T7_PM_TX(x) ((x) << S_T7_PM_TX)
+#define F_T7_PM_TX V_T7_PM_TX(1U)
+
+#define S_T7_MA 22
+#define V_T7_MA(x) ((x) << S_T7_MA)
+#define F_T7_MA V_T7_MA(1U)
+
+#define S_T7_TP 21
+#define V_T7_TP(x) ((x) << S_T7_TP)
+#define F_T7_TP V_T7_TP(1U)
+
+#define S_T7_LE 20
+#define V_T7_LE(x) ((x) << S_T7_LE)
+#define F_T7_LE V_T7_LE(1U)
+
+#define S_T7_EDC1 19
+#define V_T7_EDC1(x) ((x) << S_T7_EDC1)
+#define F_T7_EDC1 V_T7_EDC1(1U)
+
+#define S_T7_EDC0 18
+#define V_T7_EDC0(x) ((x) << S_T7_EDC0)
+#define F_T7_EDC0 V_T7_EDC0(1U)
+
+#define S_T7_MC1 17
+#define V_T7_MC1(x) ((x) << S_T7_MC1)
+#define F_T7_MC1 V_T7_MC1(1U)
+
+#define S_T7_MC0 16
+#define V_T7_MC0(x) ((x) << S_T7_MC0)
+#define F_T7_MC0 V_T7_MC0(1U)
+
+#define S_T7_PCIE 15
+#define V_T7_PCIE(x) ((x) << S_T7_PCIE)
+#define F_T7_PCIE V_T7_PCIE(1U)
+
+#define S_T7_UART 14
+#define V_T7_UART(x) ((x) << S_T7_UART)
+#define F_T7_UART V_T7_UART(1U)
+
#define A_PL_INT_ENABLE 0x19410
#define A_PL_INT_MAP0 0x19414
@@ -38262,15 +49526,10 @@
#define V_T6_LN0_AECMD(x) ((x) << S_T6_LN0_AECMD)
#define G_T6_LN0_AECMD(x) (((x) >> S_T6_LN0_AECMD) & M_T6_LN0_AECMD)
-#define S_T6_STATECFGINITF 16
-#define M_T6_STATECFGINITF 0xffU
-#define V_T6_STATECFGINITF(x) ((x) << S_T6_STATECFGINITF)
-#define G_T6_STATECFGINITF(x) (((x) >> S_T6_STATECFGINITF) & M_T6_STATECFGINITF)
-
-#define S_T6_STATECFGINIT 12
-#define M_T6_STATECFGINIT 0xfU
-#define V_T6_STATECFGINIT(x) ((x) << S_T6_STATECFGINIT)
-#define G_T6_STATECFGINIT(x) (((x) >> S_T6_STATECFGINIT) & M_T6_STATECFGINIT)
+#define S_T6_1_STATECFGINITF 16
+#define M_T6_1_STATECFGINITF 0xffU
+#define V_T6_1_STATECFGINITF(x) ((x) << S_T6_1_STATECFGINITF)
+#define G_T6_1_STATECFGINITF(x) (((x) >> S_T6_1_STATECFGINITF) & M_T6_1_STATECFGINITF)
#define S_PHY_STATUS 10
#define V_PHY_STATUS(x) ((x) << S_PHY_STATUS)
@@ -38285,9 +49544,9 @@
#define V_PERSTTIMEOUT_PL(x) ((x) << S_PERSTTIMEOUT_PL)
#define F_PERSTTIMEOUT_PL V_PERSTTIMEOUT_PL(1U)
-#define S_T6_LTSSMENABLE 6
-#define V_T6_LTSSMENABLE(x) ((x) << S_T6_LTSSMENABLE)
-#define F_T6_LTSSMENABLE V_T6_LTSSMENABLE(1U)
+#define S_SPEEDMS 30
+#define V_SPEEDMS(x) ((x) << S_SPEEDMS)
+#define F_SPEEDMS V_SPEEDMS(1U)
#define A_PL_PCIE_CTL_STAT 0x19444
@@ -38382,6 +49641,37 @@
#define V_MAP0(x) ((x) << S_MAP0)
#define G_MAP0(x) (((x) >> S_MAP0) & M_MAP0)
+#define A_PL_INT_CAUSE2 0x19478
+
+#define S_CRYPTO_KEY 4
+#define V_CRYPTO_KEY(x) ((x) << S_CRYPTO_KEY)
+#define F_CRYPTO_KEY V_CRYPTO_KEY(1U)
+
+#define S_CRYPTO1 3
+#define V_CRYPTO1(x) ((x) << S_CRYPTO1)
+#define F_CRYPTO1 V_CRYPTO1(1U)
+
+#define S_CRYPTO0 2
+#define V_CRYPTO0(x) ((x) << S_CRYPTO0)
+#define F_CRYPTO0 V_CRYPTO0(1U)
+
+#define S_GCACHE 1
+#define V_GCACHE(x) ((x) << S_GCACHE)
+#define F_GCACHE V_GCACHE(1U)
+
+#define S_ARM 0
+#define V_ARM(x) ((x) << S_ARM)
+#define F_ARM V_ARM(1U)
+
+#define A_PL_INT_ENABLE2 0x1947c
+#define A_PL_ER_CMD 0x19488
+
+#define S_ER_ADDR 2
+#define M_ER_ADDR 0x3fffffffU
+#define V_ER_ADDR(x) ((x) << S_ER_ADDR)
+#define G_ER_ADDR(x) (((x) >> S_ER_ADDR) & M_ER_ADDR)
+
+#define A_PL_ER_DATA 0x1948c
#define A_PL_VF_SLICE_L 0x19490
#define S_LIMITADDR 16
@@ -38638,6 +49928,10 @@
#define V_REGION_EN(x) ((x) << S_REGION_EN)
#define G_REGION_EN(x) (((x) >> S_REGION_EN) & M_REGION_EN)
+#define S_CACHEBYPASS 28
+#define V_CACHEBYPASS(x) ((x) << S_CACHEBYPASS)
+#define F_CACHEBYPASS V_CACHEBYPASS(1U)
+
#define A_LE_MISC 0x19c08
#define S_CMPUNVAIL 0
@@ -38830,6 +50124,10 @@
#define V_TCAM_SIZE(x) ((x) << S_TCAM_SIZE)
#define G_TCAM_SIZE(x) (((x) >> S_TCAM_SIZE) & M_TCAM_SIZE)
+#define S_MLL_MASK 2
+#define V_MLL_MASK(x) ((x) << S_MLL_MASK)
+#define F_MLL_MASK V_MLL_MASK(1U)
+
#define A_LE_DB_INT_ENABLE 0x19c38
#define S_MSGSEL 27
@@ -39045,40 +50343,15 @@
#define V_PIPELINEERR(x) ((x) << S_PIPELINEERR)
#define F_PIPELINEERR V_PIPELINEERR(1U)
-#define A_LE_DB_INT_CAUSE 0x19c3c
-
-#define S_T6_ACTRGNFULL 21
-#define V_T6_ACTRGNFULL(x) ((x) << S_T6_ACTRGNFULL)
-#define F_T6_ACTRGNFULL V_T6_ACTRGNFULL(1U)
+#define S_CACHEINTPERR 31
+#define V_CACHEINTPERR(x) ((x) << S_CACHEINTPERR)
+#define F_CACHEINTPERR V_CACHEINTPERR(1U)
-#define S_T6_ACTCNTIPV6TZERO 20
-#define V_T6_ACTCNTIPV6TZERO(x) ((x) << S_T6_ACTCNTIPV6TZERO)
-#define F_T6_ACTCNTIPV6TZERO V_T6_ACTCNTIPV6TZERO(1U)
-
-#define S_T6_ACTCNTIPV4TZERO 19
-#define V_T6_ACTCNTIPV4TZERO(x) ((x) << S_T6_ACTCNTIPV4TZERO)
-#define F_T6_ACTCNTIPV4TZERO V_T6_ACTCNTIPV4TZERO(1U)
-
-#define S_T6_ACTCNTIPV6ZERO 18
-#define V_T6_ACTCNTIPV6ZERO(x) ((x) << S_T6_ACTCNTIPV6ZERO)
-#define F_T6_ACTCNTIPV6ZERO V_T6_ACTCNTIPV6ZERO(1U)
-
-#define S_T6_ACTCNTIPV4ZERO 17
-#define V_T6_ACTCNTIPV4ZERO(x) ((x) << S_T6_ACTCNTIPV4ZERO)
-#define F_T6_ACTCNTIPV4ZERO V_T6_ACTCNTIPV4ZERO(1U)
-
-#define S_T6_UNKNOWNCMD 3
-#define V_T6_UNKNOWNCMD(x) ((x) << S_T6_UNKNOWNCMD)
-#define F_T6_UNKNOWNCMD V_T6_UNKNOWNCMD(1U)
-
-#define S_T6_LIP0 2
-#define V_T6_LIP0(x) ((x) << S_T6_LIP0)
-#define F_T6_LIP0 V_T6_LIP0(1U)
-
-#define S_T6_LIPMISS 1
-#define V_T6_LIPMISS(x) ((x) << S_T6_LIPMISS)
-#define F_T6_LIPMISS V_T6_LIPMISS(1U)
+#define S_CACHESRAMPERR 30
+#define V_CACHESRAMPERR(x) ((x) << S_CACHESRAMPERR)
+#define F_CACHESRAMPERR V_CACHESRAMPERR(1U)
+#define A_LE_DB_INT_CAUSE 0x19c3c
#define A_LE_DB_INT_TID 0x19c40
#define S_INTTID 0
@@ -39287,6 +50560,14 @@
#define A_LE_DB_MASK_IPV6 0x19ca0
#define A_LE_DB_DBG_MATCH_DATA 0x19ca0
+#define A_LE_CMM_CONFIG 0x19cc0
+#define A_LE_CACHE_DBG 0x19cc4
+#define A_LE_CACHE_WR_ALL_CNT 0x19cc8
+#define A_LE_CACHE_WR_HIT_CNT 0x19ccc
+#define A_LE_CACHE_RD_ALL_CNT 0x19cd0
+#define A_LE_CACHE_RD_HIT_CNT 0x19cd4
+#define A_LE_CACHE_MC_WR_CNT 0x19cd8
+#define A_LE_CACHE_MC_RD_CNT 0x19cdc
#define A_LE_DB_REQ_RSP_CNT 0x19ce4
#define S_T4_RSPCNT 16
@@ -39309,6 +50590,14 @@
#define V_REQCNTLE(x) ((x) << S_REQCNTLE)
#define G_REQCNTLE(x) (((x) >> S_REQCNTLE) & M_REQCNTLE)
+#define A_LE_IND_ADDR 0x19ce8
+
+#define S_T7_1_ADDR 0
+#define M_T7_1_ADDR 0xffU
+#define V_T7_1_ADDR(x) ((x) << S_T7_1_ADDR)
+#define G_T7_1_ADDR(x) (((x) >> S_T7_1_ADDR) & M_T7_1_ADDR)
+
+#define A_LE_IND_DATA 0x19cec
#define A_LE_DB_DBGI_CONFIG 0x19cf0
#define S_DBGICMDPERR 31
@@ -39436,6 +50725,11 @@
#define V_T6_HASHTBLMEMCRCERR(x) ((x) << S_T6_HASHTBLMEMCRCERR)
#define F_T6_HASHTBLMEMCRCERR V_T6_HASHTBLMEMCRCERR(1U)
+#define S_T7_BKCHKPERIOD 22
+#define M_T7_BKCHKPERIOD 0xffU
+#define V_T7_BKCHKPERIOD(x) ((x) << S_T7_BKCHKPERIOD)
+#define G_T7_BKCHKPERIOD(x) (((x) >> S_T7_BKCHKPERIOD) & M_T7_BKCHKPERIOD)
+
#define A_LE_SPARE 0x19cfc
#define A_LE_DB_DBGI_REQ_DATA 0x19d00
#define A_LE_DB_DBGI_REQ_MASK 0x19d50
@@ -39551,6 +50845,7 @@
#define V_HASH_TID_BASE(x) ((x) << S_HASH_TID_BASE)
#define G_HASH_TID_BASE(x) (((x) >> S_HASH_TID_BASE) & M_HASH_TID_BASE)
+#define A_T7_LE_DB_HASH_TID_BASE 0x19df8
#define A_LE_PERR_INJECT 0x19dfc
#define S_LEMEMSEL 1
@@ -39573,6 +50868,7 @@
#define A_LE_HASH_MASK_GEN_IPV6 0x19eb0
#define A_LE_HASH_MASK_GEN_IPV6T5 0x19eb4
#define A_T6_LE_HASH_MASK_GEN_IPV6T5 0x19ec4
+#define A_T7_LE_HASH_MASK_GEN_IPV6T5 0x19ec4
#define A_LE_HASH_MASK_CMP_IPV4 0x19ee0
#define A_LE_HASH_MASK_CMP_IPV4T5 0x19ee4
#define A_LE_DB_PSV_FILTER_MASK_TUP_IPV4 0x19ee4
@@ -39677,6 +50973,9 @@
#define A_LE_TCAM_DEBUG_LA_DATA 0x19f4c
#define A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 0x19f90
#define A_LE_DB_SECOND_CMP_HASH_MASK_IPV4 0x19fa4
+#define A_LE_TCAM_BIST_CTRL 0x19fb0
+#define A_LE_TCAM_BIST_CB_PASS 0x19fb4
+#define A_LE_TCAM_BIST_CB_BUSY 0x19fbc
#define A_LE_HASH_COLLISION 0x19fc4
#define A_LE_GLOBAL_COLLISION 0x19fc8
#define A_LE_FULL_CNT_COLLISION 0x19fcc
@@ -39686,6 +50985,38 @@
#define A_LE_RSP_DEBUG_LA_DATAT5 0x19fdc
#define A_LE_RSP_DEBUG_LA_WRPTRT5 0x19fe0
#define A_LE_DEBUG_LA_SEL_DATA 0x19fe4
+#define A_LE_TCAM_NEG_CTRL0 0x0
+#define A_LE_TCAM_NEG_CTRL1 0x1
+#define A_LE_TCAM_NEG_CTRL2 0x2
+#define A_LE_TCAM_NEG_CTRL3 0x3
+#define A_LE_TCAM_NEG_CTRL4 0x4
+#define A_LE_TCAM_NEG_CTRL5 0x5
+#define A_LE_TCAM_NEG_CTRL6 0x6
+#define A_LE_TCAM_NEG_CTRL7 0x7
+#define A_LE_TCAM_NEG_CTRL8 0x8
+#define A_LE_TCAM_NEG_CTRL9 0x9
+#define A_LE_TCAM_NEG_CTRL10 0xa
+#define A_LE_TCAM_NEG_CTRL11 0xb
+#define A_LE_TCAM_NEG_CTRL12 0xc
+#define A_LE_TCAM_NEG_CTRL13 0xd
+#define A_LE_TCAM_NEG_CTRL14 0xe
+#define A_LE_TCAM_NEG_CTRL15 0xf
+#define A_LE_TCAM_NEG_CTRL16 0x10
+#define A_LE_TCAM_NEG_CTRL17 0x11
+#define A_LE_TCAM_NEG_CTRL18 0x12
+#define A_LE_TCAM_NEG_CTRL19 0x13
+#define A_LE_TCAM_NEG_CTRL20 0x14
+#define A_LE_TCAM_NEG_CTRL21 0x15
+#define A_LE_TCAM_NEG_CTRL22 0x16
+#define A_LE_TCAM_NEG_CTRL23 0x17
+#define A_LE_TCAM_NEG_CTRL24 0x18
+#define A_LE_TCAM_NEG_CTRL25 0x19
+#define A_LE_TCAM_NEG_CTRL26 0x1a
+#define A_LE_TCAM_NEG_CTRL27 0x1b
+#define A_LE_TCAM_NEG_CTRL28 0x1c
+#define A_LE_TCAM_NEG_CTRL29 0x1d
+#define A_LE_TCAM_NEG_CTRL30 0x1e
+#define A_LE_TCAM_NEG_CTRL31 0x1f
/* registers for module NCSI */
#define NCSI_BASE_ADDR 0x1a000
@@ -39735,6 +51066,10 @@
#define V_TX_BYTE_SWAP(x) ((x) << S_TX_BYTE_SWAP)
#define F_TX_BYTE_SWAP V_TX_BYTE_SWAP(1U)
+#define S_XGMAC0_EN 0
+#define V_XGMAC0_EN(x) ((x) << S_XGMAC0_EN)
+#define F_XGMAC0_EN V_XGMAC0_EN(1U)
+
#define A_NCSI_RST_CTRL 0x1a004
#define S_MAC_REF_RST 2
@@ -39991,6 +51326,10 @@
#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
#define F_RXFIFO_PRTY_ERR V_RXFIFO_PRTY_ERR(1U)
+#define S_CIM2NC_PERR 9
+#define V_CIM2NC_PERR(x) ((x) << S_CIM2NC_PERR)
+#define F_CIM2NC_PERR V_CIM2NC_PERR(1U)
+
#define A_NCSI_INT_CAUSE 0x1a0d8
#define A_NCSI_STATUS 0x1a0dc
@@ -40048,6 +51387,12 @@
#define F_MCSIMELSEL V_MCSIMELSEL(1U)
#define A_NCSI_PERR_ENABLE 0x1a0f8
+#define A_NCSI_MODE_SEL 0x1a0fc
+
+#define S_XGMAC_MODE 0
+#define V_XGMAC_MODE(x) ((x) << S_XGMAC_MODE)
+#define F_XGMAC_MODE V_XGMAC_MODE(1U)
+
#define A_NCSI_MACB_NETWORK_CTRL 0x1a100
#define S_TXSNDZEROPAUSE 12
@@ -40550,6 +51895,832 @@
#define V_DESREV(x) ((x) << S_DESREV)
#define G_DESREV(x) (((x) >> S_DESREV) & M_DESREV)
+#define A_NCSI_TX_CTRL 0x1a200
+
+#define S_T7_TXEN 0
+#define V_T7_TXEN(x) ((x) << S_T7_TXEN)
+#define F_T7_TXEN V_T7_TXEN(1U)
+
+#define A_NCSI_TX_CFG 0x1a204
+#define A_NCSI_TX_PAUSE_QUANTA 0x1a208
+#define A_NCSI_RX_CTRL 0x1a20c
+#define A_NCSI_RX_CFG 0x1a210
+#define A_NCSI_RX_HASH_LOW 0x1a214
+#define A_NCSI_RX_HASH_HIGH 0x1a218
+#define A_NCSI_RX_EXACT_MATCH_LOW_1 0x1a21c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_1 0x1a220
+#define A_NCSI_RX_EXACT_MATCH_LOW_2 0x1a224
+#define A_NCSI_RX_EXACT_MATCH_HIGH_2 0x1a228
+#define A_NCSI_RX_EXACT_MATCH_LOW_3 0x1a22c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_3 0x1a230
+#define A_NCSI_RX_EXACT_MATCH_LOW_4 0x1a234
+#define A_NCSI_RX_EXACT_MATCH_HIGH_4 0x1a238
+#define A_NCSI_RX_EXACT_MATCH_LOW_5 0x1a23c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_5 0x1a240
+#define A_NCSI_RX_EXACT_MATCH_LOW_6 0x1a244
+#define A_NCSI_RX_EXACT_MATCH_HIGH_6 0x1a248
+#define A_NCSI_RX_EXACT_MATCH_LOW_7 0x1a24c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_7 0x1a250
+#define A_NCSI_RX_EXACT_MATCH_LOW_8 0x1a254
+#define A_NCSI_RX_EXACT_MATCH_HIGH_8 0x1a258
+#define A_NCSI_RX_TYPE_MATCH_1 0x1a25c
+#define A_NCSI_RX_TYPE_MATCH_2 0x1a260
+#define A_NCSI_RX_TYPE_MATCH_3 0x1a264
+#define A_NCSI_RX_TYPE_MATCH_4 0x1a268
+#define A_NCSI_INT_STATUS 0x1a26c
+#define A_NCSI_XGM_INT_MASK 0x1a270
+#define A_NCSI_XGM_INT_ENABLE 0x1a274
+#define A_NCSI_XGM_INT_DISABLE 0x1a278
+#define A_NCSI_TX_PAUSE_TIMER 0x1a27c
+#define A_NCSI_STAT_CTRL 0x1a280
+#define A_NCSI_RXFIFO_CFG 0x1a284
+
+#define S_RXFIFO_EMPTY 31
+#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
+#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U)
+
+#define S_RXFIFO_FULL 30
+#define V_RXFIFO_FULL(x) ((x) << S_RXFIFO_FULL)
+#define F_RXFIFO_FULL V_RXFIFO_FULL(1U)
+
+#define S_RXFIFOPAUSEHWM 17
+#define M_RXFIFOPAUSEHWM 0xfffU
+#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
+#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
+
+#define S_RXFIFOPAUSELWM 5
+#define M_RXFIFOPAUSELWM 0xfffU
+#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
+#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+
+#define S_FORCEDPAUSE 4
+#define V_FORCEDPAUSE(x) ((x) << S_FORCEDPAUSE)
+#define F_FORCEDPAUSE V_FORCEDPAUSE(1U)
+
+#define S_EXTERNLOOPBACK 3
+#define V_EXTERNLOOPBACK(x) ((x) << S_EXTERNLOOPBACK)
+#define F_EXTERNLOOPBACK V_EXTERNLOOPBACK(1U)
+
+#define S_RXBYTESWAP 2
+#define V_RXBYTESWAP(x) ((x) << S_RXBYTESWAP)
+#define F_RXBYTESWAP V_RXBYTESWAP(1U)
+
+#define S_RXSTRFRWRD 1
+#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
+#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
+
+#define S_DISERRFRAMES 0
+#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
+#define F_DISERRFRAMES V_DISERRFRAMES(1U)
+
+#define A_NCSI_TXFIFO_CFG 0x1a288
+
+#define S_T7_TXFIFO_EMPTY 31
+#define V_T7_TXFIFO_EMPTY(x) ((x) << S_T7_TXFIFO_EMPTY)
+#define F_T7_TXFIFO_EMPTY V_T7_TXFIFO_EMPTY(1U)
+
+#define S_T7_TXFIFO_FULL 30
+#define V_T7_TXFIFO_FULL(x) ((x) << S_T7_TXFIFO_FULL)
+#define F_T7_TXFIFO_FULL V_T7_TXFIFO_FULL(1U)
+
+#define S_UNDERUNFIX 22
+#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
+#define F_UNDERUNFIX V_UNDERUNFIX(1U)
+
+#define S_ENDROPPKT 21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT V_ENDROPPKT(1U)
+
+#define S_TXIPG 13
+#define M_TXIPG 0xffU
+#define V_TXIPG(x) ((x) << S_TXIPG)
+#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
+
+#define S_TXFIFOTHRESH 4
+#define M_TXFIFOTHRESH 0x1ffU
+#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+#define G_TXFIFOTHRESH(x) (((x) >> S_TXFIFOTHRESH) & M_TXFIFOTHRESH)
+
+#define S_INTERNLOOPBACK 3
+#define V_INTERNLOOPBACK(x) ((x) << S_INTERNLOOPBACK)
+#define F_INTERNLOOPBACK V_INTERNLOOPBACK(1U)
+
+#define S_TXBYTESWAP 2
+#define V_TXBYTESWAP(x) ((x) << S_TXBYTESWAP)
+#define F_TXBYTESWAP V_TXBYTESWAP(1U)
+
+#define S_DISCRC 1
+#define V_DISCRC(x) ((x) << S_DISCRC)
+#define F_DISCRC V_DISCRC(1U)
+
+#define S_DISPREAMBLE 0
+#define V_DISPREAMBLE(x) ((x) << S_DISPREAMBLE)
+#define F_DISPREAMBLE V_DISPREAMBLE(1U)
+
+#define A_NCSI_SLOW_TIMER 0x1a28c
+
+#define S_PAUSESLOWTIMEREN 31
+#define V_PAUSESLOWTIMEREN(x) ((x) << S_PAUSESLOWTIMEREN)
+#define F_PAUSESLOWTIMEREN V_PAUSESLOWTIMEREN(1U)
+
+#define S_PAUSESLOWTIMER 0
+#define M_PAUSESLOWTIMER 0xfffffU
+#define V_PAUSESLOWTIMER(x) ((x) << S_PAUSESLOWTIMER)
+#define G_PAUSESLOWTIMER(x) (((x) >> S_PAUSESLOWTIMER) & M_PAUSESLOWTIMER)
+
+#define A_NCSI_PAUSE_TIMER 0x1a290
+
+#define S_PAUSETIMER 0
+#define M_PAUSETIMER 0xfffffU
+#define V_PAUSETIMER(x) ((x) << S_PAUSETIMER)
+#define G_PAUSETIMER(x) (((x) >> S_PAUSETIMER) & M_PAUSETIMER)
+
+#define A_NCSI_XAUI_PCS_TEST 0x1a294
+
+#define S_TESTPATTERN 1
+#define M_TESTPATTERN 0x3U
+#define V_TESTPATTERN(x) ((x) << S_TESTPATTERN)
+#define G_TESTPATTERN(x) (((x) >> S_TESTPATTERN) & M_TESTPATTERN)
+
+#define S_ENTEST 0
+#define V_ENTEST(x) ((x) << S_ENTEST)
+#define F_ENTEST V_ENTEST(1U)
+
+#define A_NCSI_RGMII_CTRL 0x1a298
+
+#define S_PHALIGNFIFOTHRESH 1
+#define M_PHALIGNFIFOTHRESH 0x3U
+#define V_PHALIGNFIFOTHRESH(x) ((x) << S_PHALIGNFIFOTHRESH)
+#define G_PHALIGNFIFOTHRESH(x) (((x) >> S_PHALIGNFIFOTHRESH) & M_PHALIGNFIFOTHRESH)
+
+#define S_TXCLK90SHIFT 0
+#define V_TXCLK90SHIFT(x) ((x) << S_TXCLK90SHIFT)
+#define F_TXCLK90SHIFT V_TXCLK90SHIFT(1U)
+
+#define A_NCSI_RGMII_IMP 0x1a29c
+
+#define S_CALRESET 8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET V_CALRESET(1U)
+
+#define S_CALUPDATE 7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE V_CALUPDATE(1U)
+
+#define S_IMPSETUPDATE 6
+#define V_IMPSETUPDATE(x) ((x) << S_IMPSETUPDATE)
+#define F_IMPSETUPDATE V_IMPSETUPDATE(1U)
+
+#define S_RGMIIIMPPD 3
+#define M_RGMIIIMPPD 0x7U
+#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+#define G_RGMIIIMPPD(x) (((x) >> S_RGMIIIMPPD) & M_RGMIIIMPPD)
+
+#define S_RGMIIIMPPU 0
+#define M_RGMIIIMPPU 0x7U
+#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
+#define G_RGMIIIMPPU(x) (((x) >> S_RGMIIIMPPU) & M_RGMIIIMPPU)
+
+#define A_NCSI_RX_MAX_PKT_SIZE 0x1a2a8
+
+#define S_RXMAXFRAMERSIZE 17
+#define M_RXMAXFRAMERSIZE 0x3fffU
+#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
+#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
+
+#define S_RXENERRORGATHER 16
+#define V_RXENERRORGATHER(x) ((x) << S_RXENERRORGATHER)
+#define F_RXENERRORGATHER V_RXENERRORGATHER(1U)
+
+#define S_RXENSINGLEFLIT 15
+#define V_RXENSINGLEFLIT(x) ((x) << S_RXENSINGLEFLIT)
+#define F_RXENSINGLEFLIT V_RXENSINGLEFLIT(1U)
+
+#define S_RXENFRAMER 14
+#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
+#define F_RXENFRAMER V_RXENFRAMER(1U)
+
+#define S_RXMAXPKTSIZE 0
+#define M_RXMAXPKTSIZE 0x3fffU
+#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
+#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
+
+#define A_NCSI_RESET_CTRL 0x1a2ac
+
+#define S_XGMAC_STOP_EN 4
+#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN)
+#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U)
+
+#define S_XG2G_RESET_ 3
+#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
+#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
+
+#define S_RGMII_RESET_ 2
+#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
+#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
+
+#define S_PCS_RESET_ 1
+#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
+#define F_PCS_RESET_ V_PCS_RESET_(1U)
+
+#define S_MAC_RESET_ 0
+#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
+#define F_MAC_RESET_ V_MAC_RESET_(1U)
+
+#define A_NCSI_XAUI1G_CTRL 0x1a2b0
+
+#define S_XAUI1GLINKID 0
+#define M_XAUI1GLINKID 0x3U
+#define V_XAUI1GLINKID(x) ((x) << S_XAUI1GLINKID)
+#define G_XAUI1GLINKID(x) (((x) >> S_XAUI1GLINKID) & M_XAUI1GLINKID)
+
+#define A_NCSI_SERDES_LANE_CTRL 0x1a2b4
+
+#define S_LANEREVERSAL 8
+#define V_LANEREVERSAL(x) ((x) << S_LANEREVERSAL)
+#define F_LANEREVERSAL V_LANEREVERSAL(1U)
+
+#define S_TXPOLARITY 4
+#define M_TXPOLARITY 0xfU
+#define V_TXPOLARITY(x) ((x) << S_TXPOLARITY)
+#define G_TXPOLARITY(x) (((x) >> S_TXPOLARITY) & M_TXPOLARITY)
+
+#define S_RXPOLARITY 0
+#define M_RXPOLARITY 0xfU
+#define V_RXPOLARITY(x) ((x) << S_RXPOLARITY)
+#define G_RXPOLARITY(x) (((x) >> S_RXPOLARITY) & M_RXPOLARITY)
+
+#define A_NCSI_PORT_CFG 0x1a2b8
+
+#define S_NCSI_SAFESPEEDCHANGE 4
+#define V_NCSI_SAFESPEEDCHANGE(x) ((x) << S_NCSI_SAFESPEEDCHANGE)
+#define F_NCSI_SAFESPEEDCHANGE V_NCSI_SAFESPEEDCHANGE(1U)
+
+#define S_NCSI_CLKDIVRESET_ 3
+#define V_NCSI_CLKDIVRESET_(x) ((x) << S_NCSI_CLKDIVRESET_)
+#define F_NCSI_CLKDIVRESET_ V_NCSI_CLKDIVRESET_(1U)
+
+#define S_NCSI_PORTSPEED 1
+#define M_NCSI_PORTSPEED 0x3U
+#define V_NCSI_PORTSPEED(x) ((x) << S_NCSI_PORTSPEED)
+#define G_NCSI_PORTSPEED(x) (((x) >> S_NCSI_PORTSPEED) & M_NCSI_PORTSPEED)
+
+#define S_NCSI_ENRGMII 0
+#define V_NCSI_ENRGMII(x) ((x) << S_NCSI_ENRGMII)
+#define F_NCSI_ENRGMII V_NCSI_ENRGMII(1U)
+
+#define A_NCSI_EPIO_DATA0 0x1a2c0
+#define A_NCSI_EPIO_DATA1 0x1a2c4
+#define A_NCSI_EPIO_DATA2 0x1a2c8
+#define A_NCSI_EPIO_DATA3 0x1a2cc
+#define A_NCSI_EPIO_OP 0x1a2d0
+
+#define S_PIO_READY 31
+#define V_PIO_READY(x) ((x) << S_PIO_READY)
+#define F_PIO_READY V_PIO_READY(1U)
+
+#define S_PIO_WRRD 24
+#define V_PIO_WRRD(x) ((x) << S_PIO_WRRD)
+#define F_PIO_WRRD V_PIO_WRRD(1U)
+
+#define S_PIO_ADDRESS 0
+#define M_PIO_ADDRESS 0xffU
+#define V_PIO_ADDRESS(x) ((x) << S_PIO_ADDRESS)
+#define G_PIO_ADDRESS(x) (((x) >> S_PIO_ADDRESS) & M_PIO_ADDRESS)
+
+#define A_NCSI_XGMAC0_INT_ENABLE 0x1a2d4
+
+#define S_XAUIPCSDECERR 24
+#define V_XAUIPCSDECERR(x) ((x) << S_XAUIPCSDECERR)
+#define F_XAUIPCSDECERR V_XAUIPCSDECERR(1U)
+
+#define S_RGMIIRXFIFOOVERFLOW 23
+#define V_RGMIIRXFIFOOVERFLOW(x) ((x) << S_RGMIIRXFIFOOVERFLOW)
+#define F_RGMIIRXFIFOOVERFLOW V_RGMIIRXFIFOOVERFLOW(1U)
+
+#define S_RGMIIRXFIFOUNDERFLOW 22
+#define V_RGMIIRXFIFOUNDERFLOW(x) ((x) << S_RGMIIRXFIFOUNDERFLOW)
+#define F_RGMIIRXFIFOUNDERFLOW V_RGMIIRXFIFOUNDERFLOW(1U)
+
+#define S_RXPKTSIZEERROR 21
+#define V_RXPKTSIZEERROR(x) ((x) << S_RXPKTSIZEERROR)
+#define F_RXPKTSIZEERROR V_RXPKTSIZEERROR(1U)
+
+#define S_WOLPATDETECTED 20
+#define V_WOLPATDETECTED(x) ((x) << S_WOLPATDETECTED)
+#define F_WOLPATDETECTED V_WOLPATDETECTED(1U)
+
+#define S_T7_TXFIFO_PRTY_ERR 17
+#define M_T7_TXFIFO_PRTY_ERR 0x7U
+#define V_T7_TXFIFO_PRTY_ERR(x) ((x) << S_T7_TXFIFO_PRTY_ERR)
+#define G_T7_TXFIFO_PRTY_ERR(x) (((x) >> S_T7_TXFIFO_PRTY_ERR) & M_T7_TXFIFO_PRTY_ERR)
+
+#define S_T7_RXFIFO_PRTY_ERR 14
+#define M_T7_RXFIFO_PRTY_ERR 0x7U
+#define V_T7_RXFIFO_PRTY_ERR(x) ((x) << S_T7_RXFIFO_PRTY_ERR)
+#define G_T7_RXFIFO_PRTY_ERR(x) (((x) >> S_T7_RXFIFO_PRTY_ERR) & M_T7_RXFIFO_PRTY_ERR)
+
+#define S_TXFIFO_UNDERRUN 13
+#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
+#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
+
+#define S_RXFIFO_OVERFLOW 12
+#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
+#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
+
+#define S_SERDESBISTERR 8
+#define M_SERDESBISTERR 0xfU
+#define V_SERDESBISTERR(x) ((x) << S_SERDESBISTERR)
+#define G_SERDESBISTERR(x) (((x) >> S_SERDESBISTERR) & M_SERDESBISTERR)
+
+#define S_SERDESLOWSIGCHANGE 4
+#define M_SERDESLOWSIGCHANGE 0xfU
+#define V_SERDESLOWSIGCHANGE(x) ((x) << S_SERDESLOWSIGCHANGE)
+#define G_SERDESLOWSIGCHANGE(x) (((x) >> S_SERDESLOWSIGCHANGE) & M_SERDESLOWSIGCHANGE)
+
+#define S_XAUIPCSCTCERR 3
+#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
+#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
+
+#define S_XAUIPCSALIGNCHANGE 2
+#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
+#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
+
+#define S_RGMIILINKSTSCHANGE 1
+#define V_RGMIILINKSTSCHANGE(x) ((x) << S_RGMIILINKSTSCHANGE)
+#define F_RGMIILINKSTSCHANGE V_RGMIILINKSTSCHANGE(1U)
+
+#define S_T7_XGM_INT 0
+#define V_T7_XGM_INT(x) ((x) << S_T7_XGM_INT)
+#define F_T7_XGM_INT V_T7_XGM_INT(1U)
+
+#define A_NCSI_XGMAC0_INT_CAUSE 0x1a2d8
+#define A_NCSI_XAUI_ACT_CTRL 0x1a2dc
+#define A_NCSI_SERDES_CTRL0 0x1a2e0
+
+#define S_INTSERLPBK3 27
+#define V_INTSERLPBK3(x) ((x) << S_INTSERLPBK3)
+#define F_INTSERLPBK3 V_INTSERLPBK3(1U)
+
+#define S_INTSERLPBK2 26
+#define V_INTSERLPBK2(x) ((x) << S_INTSERLPBK2)
+#define F_INTSERLPBK2 V_INTSERLPBK2(1U)
+
+#define S_INTSERLPBK1 25
+#define V_INTSERLPBK1(x) ((x) << S_INTSERLPBK1)
+#define F_INTSERLPBK1 V_INTSERLPBK1(1U)
+
+#define S_INTSERLPBK0 24
+#define V_INTSERLPBK0(x) ((x) << S_INTSERLPBK0)
+#define F_INTSERLPBK0 V_INTSERLPBK0(1U)
+
+#define S_RESET3 23
+#define V_RESET3(x) ((x) << S_RESET3)
+#define F_RESET3 V_RESET3(1U)
+
+#define S_RESET2 22
+#define V_RESET2(x) ((x) << S_RESET2)
+#define F_RESET2 V_RESET2(1U)
+
+#define S_RESET1 21
+#define V_RESET1(x) ((x) << S_RESET1)
+#define F_RESET1 V_RESET1(1U)
+
+#define S_RESET0 20
+#define V_RESET0(x) ((x) << S_RESET0)
+#define F_RESET0 V_RESET0(1U)
+
+#define S_PWRDN3 19
+#define V_PWRDN3(x) ((x) << S_PWRDN3)
+#define F_PWRDN3 V_PWRDN3(1U)
+
+#define S_PWRDN2 18
+#define V_PWRDN2(x) ((x) << S_PWRDN2)
+#define F_PWRDN2 V_PWRDN2(1U)
+
+#define S_PWRDN1 17
+#define V_PWRDN1(x) ((x) << S_PWRDN1)
+#define F_PWRDN1 V_PWRDN1(1U)
+
+#define S_PWRDN0 16
+#define V_PWRDN0(x) ((x) << S_PWRDN0)
+#define F_PWRDN0 V_PWRDN0(1U)
+
+#define S_RESETPLL23 15
+#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
+#define F_RESETPLL23 V_RESETPLL23(1U)
+
+#define S_RESETPLL01 14
+#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
+#define F_RESETPLL01 V_RESETPLL01(1U)
+
+#define S_PW23 12
+#define M_PW23 0x3U
+#define V_PW23(x) ((x) << S_PW23)
+#define G_PW23(x) (((x) >> S_PW23) & M_PW23)
+
+#define S_PW01 10
+#define M_PW01 0x3U
+#define V_PW01(x) ((x) << S_PW01)
+#define G_PW01(x) (((x) >> S_PW01) & M_PW01)
+
+#define S_DEQ 6
+#define M_DEQ 0xfU
+#define V_DEQ(x) ((x) << S_DEQ)
+#define G_DEQ(x) (((x) >> S_DEQ) & M_DEQ)
+
+#define S_DTX 2
+#define M_DTX 0xfU
+#define V_DTX(x) ((x) << S_DTX)
+#define G_DTX(x) (((x) >> S_DTX) & M_DTX)
+
+#define S_LODRV 1
+#define V_LODRV(x) ((x) << S_LODRV)
+#define F_LODRV V_LODRV(1U)
+
+#define S_HIDRV 0
+#define V_HIDRV(x) ((x) << S_HIDRV)
+#define F_HIDRV V_HIDRV(1U)
+
+#define A_NCSI_SERDES_CTRL1 0x1a2e4
+
+#define S_FMOFFSET3 19
+#define M_FMOFFSET3 0x1fU
+#define V_FMOFFSET3(x) ((x) << S_FMOFFSET3)
+#define G_FMOFFSET3(x) (((x) >> S_FMOFFSET3) & M_FMOFFSET3)
+
+#define S_FMOFFSETEN3 18
+#define V_FMOFFSETEN3(x) ((x) << S_FMOFFSETEN3)
+#define F_FMOFFSETEN3 V_FMOFFSETEN3(1U)
+
+#define S_FMOFFSET2 13
+#define M_FMOFFSET2 0x1fU
+#define V_FMOFFSET2(x) ((x) << S_FMOFFSET2)
+#define G_FMOFFSET2(x) (((x) >> S_FMOFFSET2) & M_FMOFFSET2)
+
+#define S_FMOFFSETEN2 12
+#define V_FMOFFSETEN2(x) ((x) << S_FMOFFSETEN2)
+#define F_FMOFFSETEN2 V_FMOFFSETEN2(1U)
+
+#define S_FMOFFSET1 7
+#define M_FMOFFSET1 0x1fU
+#define V_FMOFFSET1(x) ((x) << S_FMOFFSET1)
+#define G_FMOFFSET1(x) (((x) >> S_FMOFFSET1) & M_FMOFFSET1)
+
+#define S_FMOFFSETEN1 6
+#define V_FMOFFSETEN1(x) ((x) << S_FMOFFSETEN1)
+#define F_FMOFFSETEN1 V_FMOFFSETEN1(1U)
+
+#define S_FMOFFSET0 1
+#define M_FMOFFSET0 0x1fU
+#define V_FMOFFSET0(x) ((x) << S_FMOFFSET0)
+#define G_FMOFFSET0(x) (((x) >> S_FMOFFSET0) & M_FMOFFSET0)
+
+#define S_FMOFFSETEN0 0
+#define V_FMOFFSETEN0(x) ((x) << S_FMOFFSETEN0)
+#define F_FMOFFSETEN0 V_FMOFFSETEN0(1U)
+
+#define A_NCSI_SERDES_CTRL2 0x1a2e8
+
+#define S_DNIN3 11
+#define V_DNIN3(x) ((x) << S_DNIN3)
+#define F_DNIN3 V_DNIN3(1U)
+
+#define S_UPIN3 10
+#define V_UPIN3(x) ((x) << S_UPIN3)
+#define F_UPIN3 V_UPIN3(1U)
+
+#define S_RXSLAVE3 9
+#define V_RXSLAVE3(x) ((x) << S_RXSLAVE3)
+#define F_RXSLAVE3 V_RXSLAVE3(1U)
+
+#define S_DNIN2 8
+#define V_DNIN2(x) ((x) << S_DNIN2)
+#define F_DNIN2 V_DNIN2(1U)
+
+#define S_UPIN2 7
+#define V_UPIN2(x) ((x) << S_UPIN2)
+#define F_UPIN2 V_UPIN2(1U)
+
+#define S_RXSLAVE2 6
+#define V_RXSLAVE2(x) ((x) << S_RXSLAVE2)
+#define F_RXSLAVE2 V_RXSLAVE2(1U)
+
+#define S_DNIN1 5
+#define V_DNIN1(x) ((x) << S_DNIN1)
+#define F_DNIN1 V_DNIN1(1U)
+
+#define S_UPIN1 4
+#define V_UPIN1(x) ((x) << S_UPIN1)
+#define F_UPIN1 V_UPIN1(1U)
+
+#define S_RXSLAVE1 3
+#define V_RXSLAVE1(x) ((x) << S_RXSLAVE1)
+#define F_RXSLAVE1 V_RXSLAVE1(1U)
+
+#define S_DNIN0 2
+#define V_DNIN0(x) ((x) << S_DNIN0)
+#define F_DNIN0 V_DNIN0(1U)
+
+#define S_UPIN0 1
+#define V_UPIN0(x) ((x) << S_UPIN0)
+#define F_UPIN0 V_UPIN0(1U)
+
+#define S_RXSLAVE0 0
+#define V_RXSLAVE0(x) ((x) << S_RXSLAVE0)
+#define F_RXSLAVE0 V_RXSLAVE0(1U)
+
+#define A_NCSI_SERDES_CTRL3 0x1a2ec
+
+#define S_EXTBISTCHKERRCLR3 31
+#define V_EXTBISTCHKERRCLR3(x) ((x) << S_EXTBISTCHKERRCLR3)
+#define F_EXTBISTCHKERRCLR3 V_EXTBISTCHKERRCLR3(1U)
+
+#define S_EXTBISTCHKEN3 30
+#define V_EXTBISTCHKEN3(x) ((x) << S_EXTBISTCHKEN3)
+#define F_EXTBISTCHKEN3 V_EXTBISTCHKEN3(1U)
+
+#define S_EXTBISTGENEN3 29
+#define V_EXTBISTGENEN3(x) ((x) << S_EXTBISTGENEN3)
+#define F_EXTBISTGENEN3 V_EXTBISTGENEN3(1U)
+
+#define S_EXTBISTPAT3 26
+#define M_EXTBISTPAT3 0x7U
+#define V_EXTBISTPAT3(x) ((x) << S_EXTBISTPAT3)
+#define G_EXTBISTPAT3(x) (((x) >> S_EXTBISTPAT3) & M_EXTBISTPAT3)
+
+#define S_EXTPARRESET3 25
+#define V_EXTPARRESET3(x) ((x) << S_EXTPARRESET3)
+#define F_EXTPARRESET3 V_EXTPARRESET3(1U)
+
+#define S_EXTPARLPBK3 24
+#define V_EXTPARLPBK3(x) ((x) << S_EXTPARLPBK3)
+#define F_EXTPARLPBK3 V_EXTPARLPBK3(1U)
+
+#define S_EXTBISTCHKERRCLR2 23
+#define V_EXTBISTCHKERRCLR2(x) ((x) << S_EXTBISTCHKERRCLR2)
+#define F_EXTBISTCHKERRCLR2 V_EXTBISTCHKERRCLR2(1U)
+
+#define S_EXTBISTCHKEN2 22
+#define V_EXTBISTCHKEN2(x) ((x) << S_EXTBISTCHKEN2)
+#define F_EXTBISTCHKEN2 V_EXTBISTCHKEN2(1U)
+
+#define S_EXTBISTGENEN2 21
+#define V_EXTBISTGENEN2(x) ((x) << S_EXTBISTGENEN2)
+#define F_EXTBISTGENEN2 V_EXTBISTGENEN2(1U)
+
+#define S_EXTBISTPAT2 18
+#define M_EXTBISTPAT2 0x7U
+#define V_EXTBISTPAT2(x) ((x) << S_EXTBISTPAT2)
+#define G_EXTBISTPAT2(x) (((x) >> S_EXTBISTPAT2) & M_EXTBISTPAT2)
+
+#define S_EXTPARRESET2 17
+#define V_EXTPARRESET2(x) ((x) << S_EXTPARRESET2)
+#define F_EXTPARRESET2 V_EXTPARRESET2(1U)
+
+#define S_EXTPARLPBK2 16
+#define V_EXTPARLPBK2(x) ((x) << S_EXTPARLPBK2)
+#define F_EXTPARLPBK2 V_EXTPARLPBK2(1U)
+
+#define S_EXTBISTCHKERRCLR1 15
+#define V_EXTBISTCHKERRCLR1(x) ((x) << S_EXTBISTCHKERRCLR1)
+#define F_EXTBISTCHKERRCLR1 V_EXTBISTCHKERRCLR1(1U)
+
+#define S_EXTBISTCHKEN1 14
+#define V_EXTBISTCHKEN1(x) ((x) << S_EXTBISTCHKEN1)
+#define F_EXTBISTCHKEN1 V_EXTBISTCHKEN1(1U)
+
+#define S_EXTBISTGENEN1 13
+#define V_EXTBISTGENEN1(x) ((x) << S_EXTBISTGENEN1)
+#define F_EXTBISTGENEN1 V_EXTBISTGENEN1(1U)
+
+#define S_EXTBISTPAT1 10
+#define M_EXTBISTPAT1 0x7U
+#define V_EXTBISTPAT1(x) ((x) << S_EXTBISTPAT1)
+#define G_EXTBISTPAT1(x) (((x) >> S_EXTBISTPAT1) & M_EXTBISTPAT1)
+
+#define S_EXTPARRESET1 9
+#define V_EXTPARRESET1(x) ((x) << S_EXTPARRESET1)
+#define F_EXTPARRESET1 V_EXTPARRESET1(1U)
+
+#define S_EXTPARLPBK1 8
+#define V_EXTPARLPBK1(x) ((x) << S_EXTPARLPBK1)
+#define F_EXTPARLPBK1 V_EXTPARLPBK1(1U)
+
+#define S_EXTBISTCHKERRCLR0 7
+#define V_EXTBISTCHKERRCLR0(x) ((x) << S_EXTBISTCHKERRCLR0)
+#define F_EXTBISTCHKERRCLR0 V_EXTBISTCHKERRCLR0(1U)
+
+#define S_EXTBISTCHKEN0 6
+#define V_EXTBISTCHKEN0(x) ((x) << S_EXTBISTCHKEN0)
+#define F_EXTBISTCHKEN0 V_EXTBISTCHKEN0(1U)
+
+#define S_EXTBISTGENEN0 5
+#define V_EXTBISTGENEN0(x) ((x) << S_EXTBISTGENEN0)
+#define F_EXTBISTGENEN0 V_EXTBISTGENEN0(1U)
+
+#define S_EXTBISTPAT0 2
+#define M_EXTBISTPAT0 0x7U
+#define V_EXTBISTPAT0(x) ((x) << S_EXTBISTPAT0)
+#define G_EXTBISTPAT0(x) (((x) >> S_EXTBISTPAT0) & M_EXTBISTPAT0)
+
+#define S_EXTPARRESET0 1
+#define V_EXTPARRESET0(x) ((x) << S_EXTPARRESET0)
+#define F_EXTPARRESET0 V_EXTPARRESET0(1U)
+
+#define S_EXTPARLPBK0 0
+#define V_EXTPARLPBK0(x) ((x) << S_EXTPARLPBK0)
+#define F_EXTPARLPBK0 V_EXTPARLPBK0(1U)
+
+#define A_NCSI_SERDES_STAT0 0x1a2f0
+
+#define S_EXTBISTCHKERRCNT0 4
+#define M_EXTBISTCHKERRCNT0 0xffffffU
+#define V_EXTBISTCHKERRCNT0(x) ((x) << S_EXTBISTCHKERRCNT0)
+#define G_EXTBISTCHKERRCNT0(x) (((x) >> S_EXTBISTCHKERRCNT0) & M_EXTBISTCHKERRCNT0)
+
+#define S_EXTBISTCHKFMD0 3
+#define V_EXTBISTCHKFMD0(x) ((x) << S_EXTBISTCHKFMD0)
+#define F_EXTBISTCHKFMD0 V_EXTBISTCHKFMD0(1U)
+
+#define S_LOWSIGFORCEEN0 2
+#define V_LOWSIGFORCEEN0(x) ((x) << S_LOWSIGFORCEEN0)
+#define F_LOWSIGFORCEEN0 V_LOWSIGFORCEEN0(1U)
+
+#define S_LOWSIGFORCEVALUE0 1
+#define V_LOWSIGFORCEVALUE0(x) ((x) << S_LOWSIGFORCEVALUE0)
+#define F_LOWSIGFORCEVALUE0 V_LOWSIGFORCEVALUE0(1U)
+
+#define S_LOWSIG0 0
+#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
+#define F_LOWSIG0 V_LOWSIG0(1U)
+
+#define A_NCSI_SERDES_STAT1 0x1a2f4
+
+#define S_EXTBISTCHKERRCNT1 4
+#define M_EXTBISTCHKERRCNT1 0xffffffU
+#define V_EXTBISTCHKERRCNT1(x) ((x) << S_EXTBISTCHKERRCNT1)
+#define G_EXTBISTCHKERRCNT1(x) (((x) >> S_EXTBISTCHKERRCNT1) & M_EXTBISTCHKERRCNT1)
+
+#define S_EXTBISTCHKFMD1 3
+#define V_EXTBISTCHKFMD1(x) ((x) << S_EXTBISTCHKFMD1)
+#define F_EXTBISTCHKFMD1 V_EXTBISTCHKFMD1(1U)
+
+#define S_LOWSIGFORCEEN1 2
+#define V_LOWSIGFORCEEN1(x) ((x) << S_LOWSIGFORCEEN1)
+#define F_LOWSIGFORCEEN1 V_LOWSIGFORCEEN1(1U)
+
+#define S_LOWSIGFORCEVALUE1 1
+#define V_LOWSIGFORCEVALUE1(x) ((x) << S_LOWSIGFORCEVALUE1)
+#define F_LOWSIGFORCEVALUE1 V_LOWSIGFORCEVALUE1(1U)
+
+#define S_LOWSIG1 0
+#define V_LOWSIG1(x) ((x) << S_LOWSIG1)
+#define F_LOWSIG1 V_LOWSIG1(1U)
+
+#define A_NCSI_SERDES_STAT2 0x1a2f8
+
+#define S_EXTBISTCHKERRCNT2 4
+#define M_EXTBISTCHKERRCNT2 0xffffffU
+#define V_EXTBISTCHKERRCNT2(x) ((x) << S_EXTBISTCHKERRCNT2)
+#define G_EXTBISTCHKERRCNT2(x) (((x) >> S_EXTBISTCHKERRCNT2) & M_EXTBISTCHKERRCNT2)
+
+#define S_EXTBISTCHKFMD2 3
+#define V_EXTBISTCHKFMD2(x) ((x) << S_EXTBISTCHKFMD2)
+#define F_EXTBISTCHKFMD2 V_EXTBISTCHKFMD2(1U)
+
+#define S_LOWSIGFORCEEN2 2
+#define V_LOWSIGFORCEEN2(x) ((x) << S_LOWSIGFORCEEN2)
+#define F_LOWSIGFORCEEN2 V_LOWSIGFORCEEN2(1U)
+
+#define S_LOWSIGFORCEVALUE2 1
+#define V_LOWSIGFORCEVALUE2(x) ((x) << S_LOWSIGFORCEVALUE2)
+#define F_LOWSIGFORCEVALUE2 V_LOWSIGFORCEVALUE2(1U)
+
+#define S_LOWSIG2 0
+#define V_LOWSIG2(x) ((x) << S_LOWSIG2)
+#define F_LOWSIG2 V_LOWSIG2(1U)
+
+#define A_NCSI_SERDES_STAT3 0x1a2fc
+
+#define S_EXTBISTCHKERRCNT3 4
+#define M_EXTBISTCHKERRCNT3 0xffffffU
+#define V_EXTBISTCHKERRCNT3(x) ((x) << S_EXTBISTCHKERRCNT3)
+#define G_EXTBISTCHKERRCNT3(x) (((x) >> S_EXTBISTCHKERRCNT3) & M_EXTBISTCHKERRCNT3)
+
+#define S_EXTBISTCHKFMD3 3
+#define V_EXTBISTCHKFMD3(x) ((x) << S_EXTBISTCHKFMD3)
+#define F_EXTBISTCHKFMD3 V_EXTBISTCHKFMD3(1U)
+
+#define S_LOWSIGFORCEEN3 2
+#define V_LOWSIGFORCEEN3(x) ((x) << S_LOWSIGFORCEEN3)
+#define F_LOWSIGFORCEEN3 V_LOWSIGFORCEEN3(1U)
+
+#define S_LOWSIGFORCEVALUE3 1
+#define V_LOWSIGFORCEVALUE3(x) ((x) << S_LOWSIGFORCEVALUE3)
+#define F_LOWSIGFORCEVALUE3 V_LOWSIGFORCEVALUE3(1U)
+
+#define S_LOWSIG3 0
+#define V_LOWSIG3(x) ((x) << S_LOWSIG3)
+#define F_LOWSIG3 V_LOWSIG3(1U)
+
+#define A_NCSI_STAT_TX_BYTE_LOW 0x1a300
+#define A_NCSI_STAT_TX_BYTE_HIGH 0x1a304
+#define A_NCSI_STAT_TX_FRAME_LOW 0x1a308
+#define A_NCSI_STAT_TX_FRAME_HIGH 0x1a30c
+#define A_NCSI_STAT_TX_BCAST 0x1a310
+#define A_NCSI_STAT_TX_MCAST 0x1a314
+#define A_NCSI_STAT_TX_PAUSE 0x1a318
+#define A_NCSI_STAT_TX_64B_FRAMES 0x1a31c
+#define A_NCSI_STAT_TX_65_127B_FRAMES 0x1a320
+#define A_NCSI_STAT_TX_128_255B_FRAMES 0x1a324
+#define A_NCSI_STAT_TX_256_511B_FRAMES 0x1a328
+#define A_NCSI_STAT_TX_512_1023B_FRAMES 0x1a32c
+#define A_NCSI_STAT_TX_1024_1518B_FRAMES 0x1a330
+#define A_NCSI_STAT_TX_1519_MAXB_FRAMES 0x1a334
+#define A_NCSI_STAT_TX_ERR_FRAMES 0x1a338
+#define A_NCSI_STAT_RX_BYTES_LOW 0x1a33c
+#define A_NCSI_STAT_RX_BYTES_HIGH 0x1a340
+#define A_NCSI_STAT_RX_FRAMES_LOW 0x1a344
+#define A_NCSI_STAT_RX_FRAMES_HIGH 0x1a348
+#define A_NCSI_STAT_RX_BCAST_FRAMES 0x1a34c
+#define A_NCSI_STAT_RX_MCAST_FRAMES 0x1a350
+#define A_NCSI_STAT_RX_PAUSE_FRAMES 0x1a354
+#define A_NCSI_STAT_RX_64B_FRAMES 0x1a358
+#define A_NCSI_STAT_RX_65_127B_FRAMES 0x1a35c
+#define A_NCSI_STAT_RX_128_255B_FRAMES 0x1a360
+#define A_NCSI_STAT_RX_256_511B_FRAMES 0x1a364
+#define A_NCSI_STAT_RX_512_1023B_FRAMES 0x1a368
+#define A_NCSI_STAT_RX_1024_1518B_FRAMES 0x1a36c
+#define A_NCSI_STAT_RX_1519_MAXB_FRAMES 0x1a370
+#define A_NCSI_STAT_RX_SHORT_FRAMES 0x1a374
+#define A_NCSI_STAT_RX_OVERSIZE_FRAMES 0x1a378
+#define A_NCSI_STAT_RX_JABBER_FRAMES 0x1a37c
+#define A_NCSI_STAT_RX_CRC_ERR_FRAMES 0x1a380
+#define A_NCSI_STAT_RX_LENGTH_ERR_FRAMES 0x1a384
+#define A_NCSI_STAT_RX_SYM_CODE_ERR_FRAMES 0x1a388
+#define A_NCSI_XAUI_PCS_ERR 0x1a398
+
+#define S_PCS_SYNCSTATUS 5
+#define M_PCS_SYNCSTATUS 0xfU
+#define V_PCS_SYNCSTATUS(x) ((x) << S_PCS_SYNCSTATUS)
+#define G_PCS_SYNCSTATUS(x) (((x) >> S_PCS_SYNCSTATUS) & M_PCS_SYNCSTATUS)
+
+#define S_PCS_CTCFIFOERR 1
+#define M_PCS_CTCFIFOERR 0xfU
+#define V_PCS_CTCFIFOERR(x) ((x) << S_PCS_CTCFIFOERR)
+#define G_PCS_CTCFIFOERR(x) (((x) >> S_PCS_CTCFIFOERR) & M_PCS_CTCFIFOERR)
+
+#define S_PCS_NOTALIGNED 0
+#define V_PCS_NOTALIGNED(x) ((x) << S_PCS_NOTALIGNED)
+#define F_PCS_NOTALIGNED V_PCS_NOTALIGNED(1U)
+
+#define A_NCSI_RGMII_STATUS 0x1a39c
+
+#define S_GMIIDUPLEX 3
+#define V_GMIIDUPLEX(x) ((x) << S_GMIIDUPLEX)
+#define F_GMIIDUPLEX V_GMIIDUPLEX(1U)
+
+#define S_GMIISPEED 1
+#define M_GMIISPEED 0x3U
+#define V_GMIISPEED(x) ((x) << S_GMIISPEED)
+#define G_GMIISPEED(x) (((x) >> S_GMIISPEED) & M_GMIISPEED)
+
+#define S_GMIILINKSTATUS 0
+#define V_GMIILINKSTATUS(x) ((x) << S_GMIILINKSTATUS)
+#define F_GMIILINKSTATUS V_GMIILINKSTATUS(1U)
+
+#define A_NCSI_WOL_STATUS 0x1a3a0
+
+#define S_T7_PATDETECTED 31
+#define V_T7_PATDETECTED(x) ((x) << S_T7_PATDETECTED)
+#define F_T7_PATDETECTED V_T7_PATDETECTED(1U)
+
+#define A_NCSI_RX_MAX_PKT_SIZE_ERR_CNT 0x1a3a4
+#define A_NCSI_TX_SPI4_SOP_EOP_CNT 0x1a3a8
+
+#define S_TXSPI4SOPCNT 16
+#define M_TXSPI4SOPCNT 0xffffU
+#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
+#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+
+#define S_TXSPI4EOPCNT 0
+#define M_TXSPI4EOPCNT 0xffffU
+#define V_TXSPI4EOPCNT(x) ((x) << S_TXSPI4EOPCNT)
+#define G_TXSPI4EOPCNT(x) (((x) >> S_TXSPI4EOPCNT) & M_TXSPI4EOPCNT)
+
+#define A_NCSI_RX_SPI4_SOP_EOP_CNT 0x1a3ac
+
+#define S_RXSPI4SOPCNT 16
+#define M_RXSPI4SOPCNT 0xffffU
+#define V_RXSPI4SOPCNT(x) ((x) << S_RXSPI4SOPCNT)
+#define G_RXSPI4SOPCNT(x) (((x) >> S_RXSPI4SOPCNT) & M_RXSPI4SOPCNT)
+
+#define S_RXSPI4EOPCNT 0
+#define M_RXSPI4EOPCNT 0xffffU
+#define V_RXSPI4EOPCNT(x) ((x) << S_RXSPI4EOPCNT)
+#define G_RXSPI4EOPCNT(x) (((x) >> S_RXSPI4EOPCNT) & M_RXSPI4EOPCNT)
+
/* registers for module XGMAC */
#define XGMAC_BASE_ADDR 0x0
@@ -44054,6 +56225,16 @@
#define V_IBQEMPTY(x) ((x) << S_IBQEMPTY)
#define G_IBQEMPTY(x) (((x) >> S_IBQEMPTY) & M_IBQEMPTY)
+#define S_T7_IBQGEN1 10
+#define M_T7_IBQGEN1 0x3fU
+#define V_T7_IBQGEN1(x) ((x) << S_T7_IBQGEN1)
+#define G_T7_IBQGEN1(x) (((x) >> S_T7_IBQGEN1) & M_T7_IBQGEN1)
+
+#define S_T7_IBQEMPTY 0
+#define M_T7_IBQEMPTY 0x3ffU
+#define V_T7_IBQEMPTY(x) ((x) << S_T7_IBQEMPTY)
+#define G_T7_IBQEMPTY(x) (((x) >> S_T7_IBQEMPTY) & M_T7_IBQEMPTY)
+
#define A_UP_OBQ_GEN 0xc
#define S_OBQGEN 6
@@ -44076,6 +56257,16 @@
#define V_T5_OBQFULL(x) ((x) << S_T5_OBQFULL)
#define G_T5_OBQFULL(x) (((x) >> S_T5_OBQFULL) & M_T5_OBQFULL)
+#define S_T7_T5_OBQGEN 16
+#define M_T7_T5_OBQGEN 0xffffU
+#define V_T7_T5_OBQGEN(x) ((x) << S_T7_T5_OBQGEN)
+#define G_T7_T5_OBQGEN(x) (((x) >> S_T7_T5_OBQGEN) & M_T7_T5_OBQGEN)
+
+#define S_T7_T5_OBQFULL 0
+#define M_T7_T5_OBQFULL 0xffffU
+#define V_T7_T5_OBQFULL(x) ((x) << S_T7_T5_OBQFULL)
+#define G_T7_T5_OBQFULL(x) (((x) >> S_T7_T5_OBQFULL) & M_T7_T5_OBQFULL)
+
#define A_UP_IBQ_0_RDADDR 0x10
#define S_QUEID 13
@@ -44088,6 +56279,13 @@
#define V_IBQRDADDR(x) ((x) << S_IBQRDADDR)
#define G_IBQRDADDR(x) (((x) >> S_IBQRDADDR) & M_IBQRDADDR)
+#define A_UP_IBQ_GEN_IPC 0x10
+
+#define S_IPCEMPTY 0
+#define M_IPCEMPTY 0x7fU
+#define V_IPCEMPTY(x) ((x) << S_IPCEMPTY)
+#define G_IPCEMPTY(x) (((x) >> S_IPCEMPTY) & M_IPCEMPTY)
+
#define A_UP_IBQ_0_WRADDR 0x14
#define S_IBQWRADDR 0
@@ -44160,10 +56358,15 @@
#define A_UP_OBQ_0_STATUS 0x78
#define A_UP_OBQ_0_PKTCNT 0x7c
#define A_UP_OBQ_1_RDADDR 0x80
+#define A_UP_NXT_FLOWADDR0 0x80
#define A_UP_OBQ_1_WRADDR 0x84
+#define A_UP_NXT_FLOWADDR1 0x84
#define A_UP_OBQ_1_STATUS 0x88
+#define A_UP_NXT_FLOWADDR2 0x88
#define A_UP_OBQ_1_PKTCNT 0x8c
+#define A_UP_NXT_FLOWADDR3 0x8c
#define A_UP_OBQ_2_RDADDR 0x90
+#define A_UP_DFT_FLOWADDR 0x90
#define A_UP_OBQ_2_WRADDR 0x94
#define A_UP_OBQ_2_STATUS 0x98
#define A_UP_OBQ_2_PKTCNT 0x9c
@@ -44176,9 +56379,33 @@
#define A_UP_OBQ_4_STATUS 0xb8
#define A_UP_OBQ_4_PKTCNT 0xbc
#define A_UP_OBQ_5_RDADDR 0xc0
+#define A_UP_MAX_SEQ_NUM 0xc0
#define A_UP_OBQ_5_WRADDR 0xc4
+#define A_UP_UNACK_SEQ_NUM 0xc4
#define A_UP_OBQ_5_STATUS 0xc8
+#define A_UP_SEARCH_SEQ_NUM 0xc8
#define A_UP_OBQ_5_PKTCNT 0xcc
+#define A_UP_SEQ_SEARCH_CTRL 0xcc
+
+#define S_FIFO_SIZE 29
+#define M_FIFO_SIZE 0x7U
+#define V_FIFO_SIZE(x) ((x) << S_FIFO_SIZE)
+#define G_FIFO_SIZE(x) (((x) >> S_FIFO_SIZE) & M_FIFO_SIZE)
+
+#define S_ROCE_MODE 28
+#define V_ROCE_MODE(x) ((x) << S_ROCE_MODE)
+#define F_ROCE_MODE V_ROCE_MODE(1U)
+
+#define S_SEQ_WR_PTR 16
+#define M_SEQ_WR_PTR 0xfffU
+#define V_SEQ_WR_PTR(x) ((x) << S_SEQ_WR_PTR)
+#define G_SEQ_WR_PTR(x) (((x) >> S_SEQ_WR_PTR) & M_SEQ_WR_PTR)
+
+#define S_SEQ_RD_PTR 0
+#define M_SEQ_RD_PTR 0xfffU
+#define V_SEQ_RD_PTR(x) ((x) << S_SEQ_RD_PTR)
+#define G_SEQ_RD_PTR(x) (((x) >> S_SEQ_RD_PTR) & M_SEQ_RD_PTR)
+
#define A_UP_IBQ_0_CONFIG 0xd0
#define S_QUESIZE 26
@@ -44203,6 +56430,25 @@
#define V_QUE1KEN(x) ((x) << S_QUE1KEN)
#define F_QUE1KEN V_QUE1KEN(1U)
+#define A_UP_SEQ_SEARCH_RES0 0xd0
+
+#define S_INV_SEQ 18
+#define V_INV_SEQ(x) ((x) << S_INV_SEQ)
+#define F_INV_SEQ V_INV_SEQ(1U)
+
+#define S_DUP_SEQ 17
+#define V_DUP_SEQ(x) ((x) << S_DUP_SEQ)
+#define F_DUP_SEQ V_DUP_SEQ(1U)
+
+#define S_MATCH_VLD 16
+#define V_MATCH_VLD(x) ((x) << S_MATCH_VLD)
+#define F_MATCH_VLD V_MATCH_VLD(1U)
+
+#define S_MATCH_INDEX 0
+#define M_MATCH_INDEX 0xffffU
+#define V_MATCH_INDEX(x) ((x) << S_MATCH_INDEX)
+#define G_MATCH_INDEX(x) (((x) >> S_MATCH_INDEX) & M_MATCH_INDEX)
+
#define A_UP_IBQ_0_REALADDR 0xd4
#define S_QUERDADDRWRAP 31
@@ -44218,6 +56464,7 @@
#define V_QUEMEMADDR(x) ((x) << S_QUEMEMADDR)
#define G_QUEMEMADDR(x) (((x) >> S_QUEMEMADDR) & M_QUEMEMADDR)
+#define A_UP_SEQ_SEARCH_RES1 0xd4
#define A_UP_IBQ_1_CONFIG 0xd8
#define A_UP_IBQ_1_REALADDR 0xdc
#define A_UP_IBQ_2_CONFIG 0xe0
@@ -44229,14 +56476,34 @@
#define A_UP_IBQ_5_CONFIG 0xf8
#define A_UP_IBQ_5_REALADDR 0xfc
#define A_UP_OBQ_0_CONFIG 0x100
+#define A_UP_PEER_HALT_STAT0 0x100
+
+#define S_HALTINFO 1
+#define M_HALTINFO 0x7fffffffU
+#define V_HALTINFO(x) ((x) << S_HALTINFO)
+#define G_HALTINFO(x) (((x) >> S_HALTINFO) & M_HALTINFO)
+
#define A_UP_OBQ_0_REALADDR 0x104
+#define A_UP_PEER_HALT_STAT1 0x104
#define A_UP_OBQ_1_CONFIG 0x108
+#define A_UP_PEER_HALT_STAT2 0x108
#define A_UP_OBQ_1_REALADDR 0x10c
+#define A_UP_PEER_HALT_STAT3 0x10c
#define A_UP_OBQ_2_CONFIG 0x110
+#define A_UP_PEER_HALT_STAT4 0x110
#define A_UP_OBQ_2_REALADDR 0x114
+#define A_UP_PEER_HALT_STAT5 0x114
#define A_UP_OBQ_3_CONFIG 0x118
+#define A_UP_PEER_HALT_STAT6 0x118
#define A_UP_OBQ_3_REALADDR 0x11c
+#define A_UP_PEER_HALT_STAT7 0x11c
#define A_UP_OBQ_4_CONFIG 0x120
+#define A_UP_PEER_HALT_CTL 0x120
+
+#define S_HALTREQ 0
+#define V_HALTREQ(x) ((x) << S_HALTREQ)
+#define F_HALTREQ V_HALTREQ(1U)
+
#define A_UP_OBQ_4_REALADDR 0x124
#define A_UP_OBQ_5_CONFIG 0x128
#define A_UP_OBQ_5_REALADDR 0x12c
@@ -44516,6 +56783,204 @@
#define A_UP_OBQ_6_SHADOW_REALADDR 0x3c4
#define A_UP_OBQ_7_SHADOW_CONFIG 0x3c8
#define A_UP_OBQ_7_SHADOW_REALADDR 0x3cc
+#define A_T7_UP_IBQ_0_SHADOW_RDADDR 0x400
+#define A_T7_UP_IBQ_0_SHADOW_WRADDR 0x404
+#define A_T7_UP_IBQ_0_SHADOW_STATUS 0x408
+
+#define S_T7_QUEREMFLITS 0
+#define M_T7_QUEREMFLITS 0xfffU
+#define V_T7_QUEREMFLITS(x) ((x) << S_T7_QUEREMFLITS)
+#define G_T7_QUEREMFLITS(x) (((x) >> S_T7_QUEREMFLITS) & M_T7_QUEREMFLITS)
+
+#define A_T7_UP_IBQ_0_SHADOW_PKTCNT 0x40c
+#define A_T7_UP_IBQ_1_SHADOW_RDADDR 0x410
+#define A_T7_UP_IBQ_1_SHADOW_WRADDR 0x414
+#define A_T7_UP_IBQ_1_SHADOW_STATUS 0x418
+#define A_T7_UP_IBQ_1_SHADOW_PKTCNT 0x41c
+#define A_T7_UP_IBQ_2_SHADOW_RDADDR 0x420
+#define A_T7_UP_IBQ_2_SHADOW_WRADDR 0x424
+#define A_T7_UP_IBQ_2_SHADOW_STATUS 0x428
+#define A_T7_UP_IBQ_2_SHADOW_PKTCNT 0x42c
+#define A_T7_UP_IBQ_3_SHADOW_RDADDR 0x430
+#define A_T7_UP_IBQ_3_SHADOW_WRADDR 0x434
+#define A_T7_UP_IBQ_3_SHADOW_STATUS 0x438
+#define A_T7_UP_IBQ_3_SHADOW_PKTCNT 0x43c
+#define A_T7_UP_IBQ_4_SHADOW_RDADDR 0x440
+#define A_T7_UP_IBQ_4_SHADOW_WRADDR 0x444
+#define A_T7_UP_IBQ_4_SHADOW_STATUS 0x448
+#define A_T7_UP_IBQ_4_SHADOW_PKTCNT 0x44c
+#define A_T7_UP_IBQ_5_SHADOW_RDADDR 0x450
+#define A_T7_UP_IBQ_5_SHADOW_WRADDR 0x454
+#define A_T7_UP_IBQ_5_SHADOW_STATUS 0x458
+#define A_T7_UP_IBQ_5_SHADOW_PKTCNT 0x45c
+#define A_UP_IBQ_6_SHADOW_RDADDR 0x460
+#define A_UP_IBQ_6_SHADOW_WRADDR 0x464
+#define A_UP_IBQ_6_SHADOW_STATUS 0x468
+#define A_UP_IBQ_6_SHADOW_PKTCNT 0x46c
+#define A_UP_IBQ_7_SHADOW_RDADDR 0x470
+#define A_UP_IBQ_7_SHADOW_WRADDR 0x474
+#define A_UP_IBQ_7_SHADOW_STATUS 0x478
+#define A_UP_IBQ_7_SHADOW_PKTCNT 0x47c
+#define A_UP_IBQ_8_SHADOW_RDADDR 0x480
+#define A_UP_IBQ_8_SHADOW_WRADDR 0x484
+#define A_UP_IBQ_8_SHADOW_STATUS 0x488
+#define A_UP_IBQ_8_SHADOW_PKTCNT 0x48c
+#define A_UP_IBQ_9_SHADOW_RDADDR 0x490
+#define A_UP_IBQ_9_SHADOW_WRADDR 0x494
+#define A_UP_IBQ_9_SHADOW_STATUS 0x498
+#define A_UP_IBQ_9_SHADOW_PKTCNT 0x49c
+#define A_UP_IBQ_10_SHADOW_RDADDR 0x4a0
+#define A_UP_IBQ_10_SHADOW_WRADDR 0x4a4
+#define A_UP_IBQ_10_SHADOW_STATUS 0x4a8
+#define A_UP_IBQ_10_SHADOW_PKTCNT 0x4ac
+#define A_UP_IBQ_11_SHADOW_RDADDR 0x4b0
+#define A_UP_IBQ_11_SHADOW_WRADDR 0x4b4
+#define A_UP_IBQ_11_SHADOW_STATUS 0x4b8
+#define A_UP_IBQ_11_SHADOW_PKTCNT 0x4bc
+#define A_UP_IBQ_12_SHADOW_RDADDR 0x4c0
+#define A_UP_IBQ_12_SHADOW_WRADDR 0x4c4
+#define A_UP_IBQ_12_SHADOW_STATUS 0x4c8
+#define A_UP_IBQ_12_SHADOW_PKTCNT 0x4cc
+#define A_UP_IBQ_13_SHADOW_RDADDR 0x4d0
+#define A_UP_IBQ_13_SHADOW_WRADDR 0x4d4
+#define A_UP_IBQ_13_SHADOW_STATUS 0x4d8
+#define A_UP_IBQ_13_SHADOW_PKTCNT 0x4dc
+#define A_UP_IBQ_14_SHADOW_RDADDR 0x4e0
+#define A_UP_IBQ_14_SHADOW_WRADDR 0x4e4
+#define A_UP_IBQ_14_SHADOW_STATUS 0x4e8
+#define A_UP_IBQ_14_SHADOW_PKTCNT 0x4ec
+#define A_UP_IBQ_15_SHADOW_RDADDR 0x4f0
+#define A_UP_IBQ_15_SHADOW_WRADDR 0x4f4
+#define A_UP_IBQ_15_SHADOW_STATUS 0x4f8
+#define A_UP_IBQ_15_SHADOW_PKTCNT 0x4fc
+#define A_T7_UP_IBQ_0_SHADOW_CONFIG 0x500
+#define A_T7_UP_IBQ_0_SHADOW_REALADDR 0x504
+#define A_T7_UP_IBQ_1_SHADOW_CONFIG 0x510
+#define A_T7_UP_IBQ_1_SHADOW_REALADDR 0x514
+#define A_T7_UP_IBQ_2_SHADOW_CONFIG 0x520
+#define A_T7_UP_IBQ_2_SHADOW_REALADDR 0x524
+#define A_T7_UP_IBQ_3_SHADOW_CONFIG 0x530
+#define A_T7_UP_IBQ_3_SHADOW_REALADDR 0x534
+#define A_T7_UP_IBQ_4_SHADOW_CONFIG 0x540
+#define A_T7_UP_IBQ_4_SHADOW_REALADDR 0x544
+#define A_T7_UP_IBQ_5_SHADOW_CONFIG 0x550
+#define A_T7_UP_IBQ_5_SHADOW_REALADDR 0x554
+#define A_UP_IBQ_6_SHADOW_CONFIG 0x560
+#define A_UP_IBQ_6_SHADOW_REALADDR 0x564
+#define A_UP_IBQ_7_SHADOW_CONFIG 0x570
+#define A_UP_IBQ_7_SHADOW_REALADDR 0x574
+#define A_UP_IBQ_8_SHADOW_CONFIG 0x580
+#define A_UP_IBQ_8_SHADOW_REALADDR 0x584
+#define A_UP_IBQ_9_SHADOW_CONFIG 0x590
+#define A_UP_IBQ_9_SHADOW_REALADDR 0x594
+#define A_UP_IBQ_10_SHADOW_CONFIG 0x5a0
+#define A_UP_IBQ_10_SHADOW_REALADDR 0x5a4
+#define A_UP_IBQ_11_SHADOW_CONFIG 0x5b0
+#define A_UP_IBQ_11_SHADOW_REALADDR 0x5b4
+#define A_UP_IBQ_12_SHADOW_CONFIG 0x5c0
+#define A_UP_IBQ_12_SHADOW_REALADDR 0x5c4
+#define A_UP_IBQ_13_SHADOW_CONFIG 0x5d0
+#define A_UP_IBQ_13_SHADOW_REALADDR 0x5d4
+#define A_UP_IBQ_14_SHADOW_CONFIG 0x5e0
+#define A_UP_IBQ_14_SHADOW_REALADDR 0x5e4
+#define A_UP_IBQ_15_SHADOW_CONFIG 0x5f0
+#define A_UP_IBQ_15_SHADOW_REALADDR 0x5f4
+#define A_T7_UP_OBQ_0_SHADOW_RDADDR 0x600
+#define A_T7_UP_OBQ_0_SHADOW_WRADDR 0x604
+#define A_T7_UP_OBQ_0_SHADOW_STATUS 0x608
+#define A_T7_UP_OBQ_0_SHADOW_PKTCNT 0x60c
+#define A_T7_UP_OBQ_1_SHADOW_RDADDR 0x610
+#define A_T7_UP_OBQ_1_SHADOW_WRADDR 0x614
+#define A_T7_UP_OBQ_1_SHADOW_STATUS 0x618
+#define A_T7_UP_OBQ_1_SHADOW_PKTCNT 0x61c
+#define A_T7_UP_OBQ_2_SHADOW_RDADDR 0x620
+#define A_T7_UP_OBQ_2_SHADOW_WRADDR 0x624
+#define A_T7_UP_OBQ_2_SHADOW_STATUS 0x628
+#define A_T7_UP_OBQ_2_SHADOW_PKTCNT 0x62c
+#define A_T7_UP_OBQ_3_SHADOW_RDADDR 0x630
+#define A_T7_UP_OBQ_3_SHADOW_WRADDR 0x634
+#define A_T7_UP_OBQ_3_SHADOW_STATUS 0x638
+#define A_T7_UP_OBQ_3_SHADOW_PKTCNT 0x63c
+#define A_T7_UP_OBQ_4_SHADOW_RDADDR 0x640
+#define A_T7_UP_OBQ_4_SHADOW_WRADDR 0x644
+#define A_T7_UP_OBQ_4_SHADOW_STATUS 0x648
+#define A_T7_UP_OBQ_4_SHADOW_PKTCNT 0x64c
+#define A_T7_UP_OBQ_5_SHADOW_RDADDR 0x650
+#define A_T7_UP_OBQ_5_SHADOW_WRADDR 0x654
+#define A_T7_UP_OBQ_5_SHADOW_STATUS 0x658
+#define A_T7_UP_OBQ_5_SHADOW_PKTCNT 0x65c
+#define A_T7_UP_OBQ_6_SHADOW_RDADDR 0x660
+#define A_T7_UP_OBQ_6_SHADOW_WRADDR 0x664
+#define A_T7_UP_OBQ_6_SHADOW_STATUS 0x668
+#define A_T7_UP_OBQ_6_SHADOW_PKTCNT 0x66c
+#define A_T7_UP_OBQ_7_SHADOW_RDADDR 0x670
+#define A_T7_UP_OBQ_7_SHADOW_WRADDR 0x674
+#define A_T7_UP_OBQ_7_SHADOW_STATUS 0x678
+#define A_T7_UP_OBQ_7_SHADOW_PKTCNT 0x67c
+#define A_UP_OBQ_8_SHADOW_RDADDR 0x680
+#define A_UP_OBQ_8_SHADOW_WRADDR 0x684
+#define A_UP_OBQ_8_SHADOW_STATUS 0x688
+#define A_UP_OBQ_8_SHADOW_PKTCNT 0x68c
+#define A_UP_OBQ_9_SHADOW_RDADDR 0x690
+#define A_UP_OBQ_9_SHADOW_WRADDR 0x694
+#define A_UP_OBQ_9_SHADOW_STATUS 0x698
+#define A_UP_OBQ_9_SHADOW_PKTCNT 0x69c
+#define A_UP_OBQ_10_SHADOW_RDADDR 0x6a0
+#define A_UP_OBQ_10_SHADOW_WRADDR 0x6a4
+#define A_UP_OBQ_10_SHADOW_STATUS 0x6a8
+#define A_UP_OBQ_10_SHADOW_PKTCNT 0x6ac
+#define A_UP_OBQ_11_SHADOW_RDADDR 0x6b0
+#define A_UP_OBQ_11_SHADOW_WRADDR 0x6b4
+#define A_UP_OBQ_11_SHADOW_STATUS 0x6b8
+#define A_UP_OBQ_11_SHADOW_PKTCNT 0x6bc
+#define A_UP_OBQ_12_SHADOW_RDADDR 0x6c0
+#define A_UP_OBQ_12_SHADOW_WRADDR 0x6c4
+#define A_UP_OBQ_12_SHADOW_STATUS 0x6c8
+#define A_UP_OBQ_12_SHADOW_PKTCNT 0x6cc
+#define A_UP_OBQ_13_SHADOW_RDADDR 0x6d0
+#define A_UP_OBQ_13_SHADOW_WRADDR 0x6d4
+#define A_UP_OBQ_13_SHADOW_STATUS 0x6d8
+#define A_UP_OBQ_13_SHADOW_PKTCNT 0x6dc
+#define A_UP_OBQ_14_SHADOW_RDADDR 0x6e0
+#define A_UP_OBQ_14_SHADOW_WRADDR 0x6e4
+#define A_UP_OBQ_14_SHADOW_STATUS 0x6e8
+#define A_UP_OBQ_14_SHADOW_PKTCNT 0x6ec
+#define A_UP_OBQ_15_SHADOW_RDADDR 0x6f0
+#define A_UP_OBQ_15_SHADOW_WRADDR 0x6f4
+#define A_UP_OBQ_15_SHADOW_STATUS 0x6f8
+#define A_UP_OBQ_15_SHADOW_PKTCNT 0x6fc
+#define A_T7_UP_OBQ_0_SHADOW_CONFIG 0x700
+#define A_T7_UP_OBQ_0_SHADOW_REALADDR 0x704
+#define A_T7_UP_OBQ_1_SHADOW_CONFIG 0x710
+#define A_T7_UP_OBQ_1_SHADOW_REALADDR 0x714
+#define A_T7_UP_OBQ_2_SHADOW_CONFIG 0x720
+#define A_T7_UP_OBQ_2_SHADOW_REALADDR 0x724
+#define A_T7_UP_OBQ_3_SHADOW_CONFIG 0x730
+#define A_T7_UP_OBQ_3_SHADOW_REALADDR 0x734
+#define A_T7_UP_OBQ_4_SHADOW_CONFIG 0x740
+#define A_T7_UP_OBQ_4_SHADOW_REALADDR 0x744
+#define A_T7_UP_OBQ_5_SHADOW_CONFIG 0x750
+#define A_T7_UP_OBQ_5_SHADOW_REALADDR 0x754
+#define A_T7_UP_OBQ_6_SHADOW_CONFIG 0x760
+#define A_T7_UP_OBQ_6_SHADOW_REALADDR 0x764
+#define A_T7_UP_OBQ_7_SHADOW_CONFIG 0x770
+#define A_T7_UP_OBQ_7_SHADOW_REALADDR 0x774
+#define A_UP_OBQ_8_SHADOW_CONFIG 0x780
+#define A_UP_OBQ_8_SHADOW_REALADDR 0x784
+#define A_UP_OBQ_9_SHADOW_CONFIG 0x790
+#define A_UP_OBQ_9_SHADOW_REALADDR 0x794
+#define A_UP_OBQ_10_SHADOW_CONFIG 0x7a0
+#define A_UP_OBQ_10_SHADOW_REALADDR 0x7a4
+#define A_UP_OBQ_11_SHADOW_CONFIG 0x7b0
+#define A_UP_OBQ_11_SHADOW_REALADDR 0x7b4
+#define A_UP_OBQ_12_SHADOW_CONFIG 0x7c0
+#define A_UP_OBQ_12_SHADOW_REALADDR 0x7c4
+#define A_UP_OBQ_13_SHADOW_CONFIG 0x7d0
+#define A_UP_OBQ_13_SHADOW_REALADDR 0x7d4
+#define A_UP_OBQ_14_SHADOW_CONFIG 0x7e0
+#define A_UP_OBQ_14_SHADOW_REALADDR 0x7e4
+#define A_UP_OBQ_15_SHADOW_CONFIG 0x7f0
+#define A_UP_OBQ_15_SHADOW_REALADDR 0x7f4
/* registers for module CIM_CTL */
#define CIM_CTL_BASE_ADDR 0x0
@@ -44579,17 +57044,63 @@
#define A_CIM_CTL_STATIC_PREFADDR10 0x38
#define A_CIM_CTL_STATIC_PREFADDR11 0x3c
#define A_CIM_CTL_STATIC_PREFADDR12 0x40
+#define A_CIM_CTL_SEM_CFG 0x40
+
+#define S_SEMINIT 31
+#define V_SEMINIT(x) ((x) << S_SEMINIT)
+#define F_SEMINIT V_SEMINIT(1U)
+
+#define S_NUMSEM 0
+#define M_NUMSEM 0x3ffffU
+#define V_NUMSEM(x) ((x) << S_NUMSEM)
+#define G_NUMSEM(x) (((x) >> S_NUMSEM) & M_NUMSEM)
+
#define A_CIM_CTL_STATIC_PREFADDR13 0x44
+#define A_CIM_CTL_SEM_MA_CFG 0x44
+
+#define S_SEMMABASE 4
+#define M_SEMMABASE 0xfffffffU
+#define V_SEMMABASE(x) ((x) << S_SEMMABASE)
+#define G_SEMMABASE(x) (((x) >> S_SEMMABASE) & M_SEMMABASE)
+
+#define S_SEMMATHREADID 0
+#define M_SEMMATHREADID 0x7U
+#define V_SEMMATHREADID(x) ((x) << S_SEMMATHREADID)
+#define G_SEMMATHREADID(x) (((x) >> S_SEMMATHREADID) & M_SEMMATHREADID)
+
#define A_CIM_CTL_STATIC_PREFADDR14 0x48
#define A_CIM_CTL_STATIC_PREFADDR15 0x4c
#define A_CIM_CTL_STATIC_ALLOCADDR0 0x50
+#define A_CIM_CTL_LOCK_CFG 0x50
+
+#define S_NUMLOCK 0
+#define M_NUMLOCK 0x3ffffU
+#define V_NUMLOCK(x) ((x) << S_NUMLOCK)
+#define G_NUMLOCK(x) (((x) >> S_NUMLOCK) & M_NUMLOCK)
+
#define A_CIM_CTL_STATIC_ALLOCADDR1 0x54
+#define A_CIM_CTL_LOCK_MA_CFG 0x54
+
+#define S_LOCKMABASE 4
+#define M_LOCKMABASE 0xfffffffU
+#define V_LOCKMABASE(x) ((x) << S_LOCKMABASE)
+#define G_LOCKMABASE(x) (((x) >> S_LOCKMABASE) & M_LOCKMABASE)
+
+#define S_LOCKMATHREADID 0
+#define M_LOCKMATHREADID 0x7U
+#define V_LOCKMATHREADID(x) ((x) << S_LOCKMATHREADID)
+#define G_LOCKMATHREADID(x) (((x) >> S_LOCKMATHREADID) & M_LOCKMATHREADID)
+
#define A_CIM_CTL_STATIC_ALLOCADDR2 0x58
#define A_CIM_CTL_STATIC_ALLOCADDR3 0x5c
#define A_CIM_CTL_STATIC_ALLOCADDR4 0x60
+#define A_CIM_CTL_RSA_INT 0x60
#define A_CIM_CTL_STATIC_ALLOCADDR5 0x64
+#define A_CIM_CTL_RSA_BUSY 0x64
#define A_CIM_CTL_STATIC_ALLOCADDR6 0x68
+#define A_CIM_CTL_RSA_CPERR 0x68
#define A_CIM_CTL_STATIC_ALLOCADDR7 0x6c
+#define A_CIM_CTL_RSA_DPERR 0x6c
#define A_CIM_CTL_STATIC_ALLOCADDR8 0x70
#define A_CIM_CTL_STATIC_ALLOCADDR9 0x74
#define A_CIM_CTL_STATIC_ALLOCADDR10 0x78
@@ -44650,6 +57161,66 @@
#define A_CIM_CTL_GEN_TIMER3 0xd0
#define A_CIM_CTL_MAILBOX_VF_STATUS 0xe0
#define A_CIM_CTL_MAILBOX_VFN_CTL 0x100
+#define A_CIM_CTL_TID_MAP_EN 0x500
+#define A_CIM_CTL_TID_MAP_CORE 0x520
+#define A_CIM_CTL_TID_MAP_CONFIG 0x540
+
+#define S_TIDDEFCORE 4
+#define M_TIDDEFCORE 0xfU
+#define V_TIDDEFCORE(x) ((x) << S_TIDDEFCORE)
+#define G_TIDDEFCORE(x) (((x) >> S_TIDDEFCORE) & M_TIDDEFCORE)
+
+#define S_TIDVECBASE 0
+#define M_TIDVECBASE 0x7U
+#define V_TIDVECBASE(x) ((x) << S_TIDVECBASE)
+#define G_TIDVECBASE(x) (((x) >> S_TIDVECBASE) & M_TIDVECBASE)
+
+#define A_CIM_CTL_CRYPTO_KEY_DATA 0x600
+#define A_CIM_CTL_SECURE_CONFIG 0x6f8
+#define A_CIM_CTL_CRYPTO_KEY_CTRL 0x6fc
+
+#define S_CRYPTOKEYDATAREGNUM 8
+#define M_CRYPTOKEYDATAREGNUM 0xffU
+#define V_CRYPTOKEYDATAREGNUM(x) ((x) << S_CRYPTOKEYDATAREGNUM)
+#define G_CRYPTOKEYDATAREGNUM(x) (((x) >> S_CRYPTOKEYDATAREGNUM) & M_CRYPTOKEYDATAREGNUM)
+
+#define S_CRYPTOKEYSTARTBUSY 0
+#define V_CRYPTOKEYSTARTBUSY(x) ((x) << S_CRYPTOKEYSTARTBUSY)
+#define F_CRYPTOKEYSTARTBUSY V_CRYPTOKEYSTARTBUSY(1U)
+
+#define A_CIM_CTL_FLOWID_OP_VALID 0x700
+#define A_CIM_CTL_FLOWID_CTL 0x720
+
+#define S_FLOWBASEADDR 8
+#define M_FLOWBASEADDR 0xffffffU
+#define V_FLOWBASEADDR(x) ((x) << S_FLOWBASEADDR)
+#define G_FLOWBASEADDR(x) (((x) >> S_FLOWBASEADDR) & M_FLOWBASEADDR)
+
+#define S_SEQSRCHALIGNCFG 4
+#define M_SEQSRCHALIGNCFG 0x3U
+#define V_SEQSRCHALIGNCFG(x) ((x) << S_SEQSRCHALIGNCFG)
+#define G_SEQSRCHALIGNCFG(x) (((x) >> S_SEQSRCHALIGNCFG) & M_SEQSRCHALIGNCFG)
+
+#define S_FLOWADDRSIZE 1
+#define M_FLOWADDRSIZE 0x3U
+#define V_FLOWADDRSIZE(x) ((x) << S_FLOWADDRSIZE)
+#define G_FLOWADDRSIZE(x) (((x) >> S_FLOWADDRSIZE) & M_FLOWADDRSIZE)
+
+#define S_FLOWIDEN 0
+#define V_FLOWIDEN(x) ((x) << S_FLOWIDEN)
+#define F_FLOWIDEN V_FLOWIDEN(1U)
+
+#define A_CIM_CTL_FLOWID_MAX 0x724
+
+#define S_MAXFLOWID 0
+#define M_MAXFLOWID 0xffffffU
+#define V_MAXFLOWID(x) ((x) << S_MAXFLOWID)
+#define G_MAXFLOWID(x) (((x) >> S_MAXFLOWID) & M_MAXFLOWID)
+
+#define A_CIM_CTL_FLOWID_HINT0 0x728
+#define A_CIM_CTL_EFUSE_CTRL 0x780
+#define A_CIM_CTL_EFUSE_QOUT 0x784
+#define A_CIM_CTL_EFUSE_RFOUT 0x788
#define A_CIM_CTL_TSCH_CHNLN_CTL 0x900
#define S_TSCHNLEN 31
@@ -45001,14 +57572,19 @@
#define A_CIM_CTL_TSCH_TICK3 0xd8c
#define A_CIM_CTL_MAILBOX_PF3_CTL 0xd90
#define A_T6_CIM_CTL_MAILBOX_PF0_CTL 0xd90
+#define A_T7_CIM_CTL_MAILBOX_PF0_CTL 0xd90
#define A_CIM_CTL_MAILBOX_PF4_CTL 0xd94
#define A_T6_CIM_CTL_MAILBOX_PF1_CTL 0xd94
+#define A_T7_CIM_CTL_MAILBOX_PF1_CTL 0xd94
#define A_CIM_CTL_MAILBOX_PF5_CTL 0xd98
#define A_T6_CIM_CTL_MAILBOX_PF2_CTL 0xd98
+#define A_T7_CIM_CTL_MAILBOX_PF2_CTL 0xd98
#define A_CIM_CTL_MAILBOX_PF6_CTL 0xd9c
#define A_T6_CIM_CTL_MAILBOX_PF3_CTL 0xd9c
+#define A_T7_CIM_CTL_MAILBOX_PF3_CTL 0xd9c
#define A_CIM_CTL_MAILBOX_PF7_CTL 0xda0
#define A_T6_CIM_CTL_MAILBOX_PF4_CTL 0xda0
+#define A_T7_CIM_CTL_MAILBOX_PF4_CTL 0xda0
#define A_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xda4
#define S_PF7_OWNER_PL 15
@@ -45076,6 +57652,7 @@
#define F_PF0_OWNER_UP V_PF0_OWNER_UP(1U)
#define A_T6_CIM_CTL_MAILBOX_PF5_CTL 0xda4
+#define A_T7_CIM_CTL_MAILBOX_PF5_CTL 0xda4
#define A_CIM_CTL_PIO_MST_CONFIG 0xda8
#define S_T5_CTLRID 0
@@ -45084,15 +57661,13 @@
#define G_T5_CTLRID(x) (((x) >> S_T5_CTLRID) & M_T5_CTLRID)
#define A_T6_CIM_CTL_MAILBOX_PF6_CTL 0xda8
+#define A_T7_CIM_CTL_MAILBOX_PF6_CTL 0xda8
#define A_T6_CIM_CTL_MAILBOX_PF7_CTL 0xdac
+#define A_T7_CIM_CTL_MAILBOX_PF7_CTL 0xdac
#define A_T6_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0
+#define A_T7_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0
#define A_T6_CIM_CTL_PIO_MST_CONFIG 0xdb4
-
-#define S_T6_UPRID 0
-#define M_T6_UPRID 0x1ffU
-#define V_T6_UPRID(x) ((x) << S_T6_UPRID)
-#define G_T6_UPRID(x) (((x) >> S_T6_UPRID) & M_T6_UPRID)
-
+#define A_T7_CIM_CTL_PIO_MST_CONFIG 0xdb4
#define A_CIM_CTL_ULP_OBQ0_PAUSE_MASK 0xe00
#define A_CIM_CTL_ULP_OBQ1_PAUSE_MASK 0xe04
#define A_CIM_CTL_ULP_OBQ2_PAUSE_MASK 0xe08
@@ -45119,6 +57694,64 @@
#define V_MA_TIMEOUT(x) ((x) << S_MA_TIMEOUT)
#define G_MA_TIMEOUT(x) (((x) >> S_MA_TIMEOUT) & M_MA_TIMEOUT)
+#define A_CIM_CTL_BREAK 0xf00
+
+#define S_XOCDMODE 8
+#define M_XOCDMODE 0xffU
+#define V_XOCDMODE(x) ((x) << S_XOCDMODE)
+#define G_XOCDMODE(x) (((x) >> S_XOCDMODE) & M_XOCDMODE)
+
+#define S_BREAKIN_CONTROL 0
+#define M_BREAKIN_CONTROL 0xffU
+#define V_BREAKIN_CONTROL(x) ((x) << S_BREAKIN_CONTROL)
+#define G_BREAKIN_CONTROL(x) (((x) >> S_BREAKIN_CONTROL) & M_BREAKIN_CONTROL)
+
+#define A_CIM_CTL_SLV_BOOT_CFG 0x4000
+
+#define S_T7_UPGEN 3
+#define M_T7_UPGEN 0x1fU
+#define V_T7_UPGEN(x) ((x) << S_T7_UPGEN)
+#define G_T7_UPGEN(x) (((x) >> S_T7_UPGEN) & M_T7_UPGEN)
+
+#define S_UPCLKEN 2
+#define V_UPCLKEN(x) ((x) << S_UPCLKEN)
+#define F_UPCLKEN V_UPCLKEN(1U)
+
+#define A_CIM_CTL_SLV_BOOT_LEN 0x4004
+#define A_CIM_CTL_SLV_ACC_INT_ENABLE 0x4008
+#define A_CIM_CTL_SLV_ACC_INT_CAUSE 0x400c
+#define A_CIM_CTL_SLV_INT_ENABLE 0x4010
+#define A_CIM_CTL_SLV_INT_CAUSE 0x4014
+#define A_CIM_CTL_SLV_PERR_ENABLE 0x4018
+#define A_CIM_CTL_SLV_PERR_CAUSE 0x401c
+#define A_CIM_CTL_SLV_ADDR_TIMEOUT 0x4028
+#define A_CIM_CTL_SLV_ADDR_ILLEGAL 0x402c
+#define A_CIM_CTL_SLV_PIO_MST_CONFIG 0x4030
+#define A_CIM_CTL_SLV_MEM_ZONE0_VA 0x4040
+#define A_CIM_CTL_SLV_MEM_ZONE0_BA 0x4044
+#define A_CIM_CTL_SLV_MEM_ZONE0_LEN 0x4048
+#define A_CIM_CTL_SLV_MEM_ZONE1_VA 0x404c
+#define A_CIM_CTL_SLV_MEM_ZONE1_BA 0x4050
+#define A_CIM_CTL_SLV_MEM_ZONE1_LEN 0x4054
+#define A_CIM_CTL_SLV_MEM_ZONE2_VA 0x4058
+#define A_CIM_CTL_SLV_MEM_ZONE2_BA 0x405c
+#define A_CIM_CTL_SLV_MEM_ZONE2_LEN 0x4060
+#define A_CIM_CTL_SLV_MEM_ZONE3_VA 0x4064
+#define A_CIM_CTL_SLV_MEM_ZONE3_BA 0x4068
+#define A_CIM_CTL_SLV_MEM_ZONE3_LEN 0x406c
+#define A_CIM_CTL_SLV_MEM_ZONE4_VA 0x4070
+#define A_CIM_CTL_SLV_MEM_ZONE4_BA 0x4074
+#define A_CIM_CTL_SLV_MEM_ZONE4_LEN 0x4078
+#define A_CIM_CTL_SLV_MEM_ZONE5_VA 0x407c
+#define A_CIM_CTL_SLV_MEM_ZONE5_BA 0x4080
+#define A_CIM_CTL_SLV_MEM_ZONE5_LEN 0x4084
+#define A_CIM_CTL_SLV_MEM_ZONE6_VA 0x4088
+#define A_CIM_CTL_SLV_MEM_ZONE6_BA 0x408c
+#define A_CIM_CTL_SLV_MEM_ZONE6_LEN 0x4090
+#define A_CIM_CTL_SLV_MEM_ZONE7_VA 0x4094
+#define A_CIM_CTL_SLV_MEM_ZONE7_BA 0x4098
+#define A_CIM_CTL_SLV_MEM_ZONE7_LEN 0x409c
+
/* registers for module MAC */
#define MAC_BASE_ADDR 0x0
@@ -46613,33 +59246,7 @@
#define F_PERR_TX_PCS1G V_PERR_TX_PCS1G(1U)
#define A_MAC_PORT_PERR_INT_CAUSE 0x8e4
-
-#define S_T6_PERR_PKT_RAM 31
-#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
-#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U)
-
-#define S_T6_PERR_MASK_RAM 30
-#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
-#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U)
-
-#define S_T6_PERR_CRC_RAM 29
-#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
-#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U)
-
#define A_MAC_PORT_PERR_ENABLE 0x8e8
-
-#define S_T6_PERR_PKT_RAM 31
-#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
-#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U)
-
-#define S_T6_PERR_MASK_RAM 30
-#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
-#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U)
-
-#define S_T6_PERR_CRC_RAM 29
-#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
-#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U)
-
#define A_MAC_PORT_PERR_INJECT 0x8ec
#define S_MEMSEL_PERR 1
@@ -47304,10 +59911,12 @@
#define A_MAC_PORT_PTP_DRIFT_ADJUST_COUNT 0x9a0
#define A_MAC_PORT_PTP_OFFSET_ADJUST_FINE 0x9a4
+#if 0
#define S_B 16
-#define CXGBE_M_B 0xffffU
+#define M_B 0xffffU
#define V_B(x) ((x) << S_B)
-#define G_B(x) (((x) >> S_B) & CXGBE_M_B)
+#define G_B(x) (((x) >> S_B) & M_B)
+#endif
#define S_A 0
#define M_A 0xffffU
@@ -48454,10 +61063,6 @@
#define V_LOW_POWER(x) ((x) << S_LOW_POWER)
#define F_LOW_POWER V_LOW_POWER(1U)
-#define S_T6_SPEED_SEL1 6
-#define V_T6_SPEED_SEL1(x) ((x) << S_T6_SPEED_SEL1)
-#define F_T6_SPEED_SEL1 V_T6_SPEED_SEL1(1U)
-
#define S_SPEED_SEL2 2
#define M_SPEED_SEL2 0xfU
#define V_SPEED_SEL2(x) ((x) << S_SPEED_SEL2)
@@ -49016,7 +61621,7 @@
#define S_VLANTAG 0
#define CXGBE_M_VLANTAG 0xffffU
#define V_VLANTAG(x) ((x) << S_VLANTAG)
-#define G_VLANTAG(x) (((x) >> S_VLANTAG) & CXGBE_M_VLANTAG)
+#define G_VLANTAG(x) (((x) >> S_VLANTAG) & M_VLANTAG)
#define A_MAC_PORT_MTIP_VLAN_TPID_1 0x1a04
#define A_MAC_PORT_MTIP_VLAN_TPID_2 0x1a08
@@ -51279,75 +63884,24 @@
#define G_DPC_TIME_LIM(x) (((x) >> S_DPC_TIME_LIM) & M_DPC_TIME_LIM)
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_1 0x2b20
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_1 0x2b24
#define A_MAC_PORT_AET_ZFE_LIMITS_1 0x2b28
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_1 0x2b2c
#define A_MAC_PORT_AET_STATUS_1 0x2b30
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_21 0x2b34
#define A_MAC_PORT_AET_LIMITS1 0x2b38
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_2 0x2b40
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_2 0x2b44
#define A_MAC_PORT_AET_ZFE_LIMITS_2 0x2b48
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_2 0x2b4c
#define A_MAC_PORT_AET_STATUS_2 0x2b50
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_22 0x2b54
#define A_MAC_PORT_AET_LIMITS2 0x2b58
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_3 0x2b60
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_3 0x2b64
#define A_MAC_PORT_AET_ZFE_LIMITS_3 0x2b68
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_3 0x2b6c
#define A_MAC_PORT_AET_STATUS_3 0x2b70
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_23 0x2b74
#define A_MAC_PORT_AET_LIMITS3 0x2b78
#define A_T6_MAC_PORT_BEAN_CTL 0x2c00
@@ -52384,103 +64938,21 @@
#define F_BSOUTP V_BSOUTP(1U)
#define A_MAC_PORT_TX_LINKB_TRANSMIT_CONFIGURATION_MODE 0x3100
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TEST_CONTROL 0x3104
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_COEFFICIENT_CONTROL 0x3108
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_MODE_CONTROL 0x310c
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3110
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3114
#define A_MAC_PORT_TX_LINKB_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3118
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x311c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT 0x3120
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT 0x3124
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT 0x3128
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_3_COEFFICIENT 0x312c
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AMPLITUDE 0x3130
#define A_MAC_PORT_TX_LINKB_TRANSMIT_POLARITY 0x3134
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3138
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x313c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3140
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3140
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3144
@@ -52503,12 +64975,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3174
#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3178
#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x317c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3180
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3184
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3188
@@ -52521,21 +64987,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AZ_CONTROL 0x319c
#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_CONTROL 0x31a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKB_DCCSTEP_CTL 6
#define M_TX_LINKB_DCCSTEP_CTL 0x3U
#define V_TX_LINKB_DCCSTEP_CTL(x) ((x) << S_TX_LINKB_DCCSTEP_CTL)
@@ -52553,20 +65004,9 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x31e0
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_5 0x31ec
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_4 0x31f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_3 0x31f4
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_2 0x31f8
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_1 0x31fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINKA_RECEIVER_CONFIGURATION_MODE 0x3200
#define S_T5_RX_LINKEN 15
@@ -54442,56 +66882,15 @@
#define A_MAC_PORT_RX_LINKB_RECEIVER_TEST_CONTROL 0x3304
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_CONTROL 0x3308
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_OFFSET_CONTROL 0x330c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_1 0x3310
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_2 0x3314
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3318
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x331c
#define A_MAC_PORT_RX_LINKB_DFE_CONTROL 0x3320
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_1 0x3324
#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_2 0x3328
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_1 0x332c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_2 0x3330
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_3 0x3334
#define A_MAC_PORT_RX_LINKB_RECEIVER_DQCC_CONTROL_1 0x3338
#define A_MAC_PORT_RX_LINKB_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3338
@@ -54515,12 +66914,6 @@
#define A_MAC_PORT_RX_LINKB_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x336c
#define A_MAC_PORT_RX_LINKB_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3370
#define A_MAC_PORT_RX_LINKB_DYNAMIC_DATA_CENTERING_DDC 0x3374
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_INTERNAL_STATUS 0x3378
#define S_RX_LINKB_ACCCMP_RIS 11
@@ -54550,20 +66943,6 @@
#define A_MAC_PORT_RX_LINKB_INTEGRATOR_DAC_OFFSET 0x33a4
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_CONTROL 0x33a8
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS 0x33ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_ERROR_COUNT 0x33b0
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x33b4
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x33b8
@@ -54611,103 +66990,21 @@
#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x33f8
#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_1 0x33fc
#define A_MAC_PORT_TX_LINKC_TRANSMIT_CONFIGURATION_MODE 0x3400
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TEST_CONTROL 0x3404
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_COEFFICIENT_CONTROL 0x3408
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_MODE_CONTROL 0x340c
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3410
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3414
#define A_MAC_PORT_TX_LINKC_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3418
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x341c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT 0x3420
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT 0x3424
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT 0x3428
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_3_COEFFICIENT 0x342c
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AMPLITUDE 0x3430
#define A_MAC_PORT_TX_LINKC_TRANSMIT_POLARITY 0x3434
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3438
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x343c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3440
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3440
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3444
@@ -54730,12 +67027,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3474
#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3478
#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x347c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3480
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3484
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3488
@@ -54748,21 +67039,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AZ_CONTROL 0x349c
#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_CONTROL 0x34a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKC_DCCSTEP_CTL 6
#define M_TX_LINKC_DCCSTEP_CTL 0x3U
#define V_TX_LINKC_DCCSTEP_CTL(x) ((x) << S_TX_LINKC_DCCSTEP_CTL)
@@ -54780,118 +67056,25 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x34e0
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_5 0x34ec
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_4 0x34f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_3 0x34f4
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_2 0x34f8
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_1 0x34fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_CONFIGURATION_MODE 0x3500
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TEST_CONTROL 0x3504
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_COEFFICIENT_CONTROL 0x3508
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_MODE_CONTROL 0x350c
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3510
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3514
#define A_MAC_PORT_TX_LINKD_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3518
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x351c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT 0x3520
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT 0x3524
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT 0x3528
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_3_COEFFICIENT 0x352c
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AMPLITUDE 0x3530
#define A_MAC_PORT_TX_LINKD_TRANSMIT_POLARITY 0x3534
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3538
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x353c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3540
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3540
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3544
@@ -54914,12 +67097,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3574
#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3578
#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x357c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3580
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3584
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3588
@@ -54932,21 +67109,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AZ_CONTROL 0x359c
#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_CONTROL 0x35a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKD_DCCSTEP_CTL 6
#define M_TX_LINKD_DCCSTEP_CTL 0x3U
#define V_TX_LINKD_DCCSTEP_CTL(x) ((x) << S_TX_LINKD_DCCSTEP_CTL)
@@ -54964,74 +67126,22 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x35e0
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_5 0x35ec
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_4 0x35f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_3 0x35f4
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_2 0x35f8
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_1 0x35fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_CONFIGURATION_MODE 0x3600
#define A_MAC_PORT_RX_LINKC_RECEIVER_TEST_CONTROL 0x3604
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_CONTROL 0x3608
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_OFFSET_CONTROL 0x360c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_1 0x3610
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_2 0x3614
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3618
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x361c
#define A_MAC_PORT_RX_LINKC_DFE_CONTROL 0x3620
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_1 0x3624
#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_2 0x3628
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_1 0x362c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_2 0x3630
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_3 0x3634
#define A_MAC_PORT_RX_LINKC_RECEIVER_DQCC_CONTROL_1 0x3638
#define A_MAC_PORT_RX_LINKC_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3638
@@ -55055,12 +67165,6 @@
#define A_MAC_PORT_RX_LINKC_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x366c
#define A_MAC_PORT_RX_LINKC_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3670
#define A_MAC_PORT_RX_LINKC_DYNAMIC_DATA_CENTERING_DDC 0x3674
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_INTERNAL_STATUS 0x3678
#define S_RX_LINKC_ACCCMP_RIS 11
@@ -55090,20 +67194,6 @@
#define A_MAC_PORT_RX_LINKC_INTEGRATOR_DAC_OFFSET 0x36a4
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_CONTROL 0x36a8
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS 0x36ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_ERROR_COUNT 0x36b0
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x36b4
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x36b8
@@ -55154,56 +67244,15 @@
#define A_MAC_PORT_RX_LINKD_RECEIVER_TEST_CONTROL 0x3704
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_CONTROL 0x3708
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_OFFSET_CONTROL 0x370c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_1 0x3710
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_2 0x3714
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3718
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x371c
#define A_MAC_PORT_RX_LINKD_DFE_CONTROL 0x3720
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_1 0x3724
#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_2 0x3728
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_1 0x372c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_2 0x3730
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_3 0x3734
#define A_MAC_PORT_RX_LINKD_RECEIVER_DQCC_CONTROL_1 0x3738
#define A_MAC_PORT_RX_LINKD_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3738
@@ -55227,12 +67276,6 @@
#define A_MAC_PORT_RX_LINKD_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x376c
#define A_MAC_PORT_RX_LINKD_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3770
#define A_MAC_PORT_RX_LINKD_DYNAMIC_DATA_CENTERING_DDC 0x3774
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_INTERNAL_STATUS 0x3778
#define S_RX_LINKD_ACCCMP_RIS 11
@@ -55262,20 +67305,6 @@
#define A_MAC_PORT_RX_LINKD_INTEGRATOR_DAC_OFFSET 0x37a4
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_CONTROL 0x37a8
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS 0x37ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_ERROR_COUNT 0x37b0
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x37b4
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x37b8
@@ -55597,103 +67626,21 @@
#define F_MACROTEST V_MACROTEST(1U)
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_CONFIGURATION_MODE 0x3900
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TEST_CONTROL 0x3904
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_COEFFICIENT_CONTROL 0x3908
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_MODE_CONTROL 0x390c
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3910
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3914
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3918
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x391c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT 0x3920
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT 0x3924
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT 0x3928
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_3_COEFFICIENT 0x392c
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AMPLITUDE 0x3930
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_POLARITY 0x3934
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3938
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x393c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3940
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3940
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3944
@@ -55716,12 +67663,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3974
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3978
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x397c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3980
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3984
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3988
@@ -55734,21 +67675,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AZ_CONTROL 0x399c
#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_CONTROL 0x39a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINK_BCST_DCCSTEP_CTL 6
#define M_TX_LINK_BCST_DCCSTEP_CTL 0x3U
#define V_TX_LINK_BCST_DCCSTEP_CTL(x) ((x) << S_TX_LINK_BCST_DCCSTEP_CTL)
@@ -55766,74 +67692,22 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x39e0
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_5 0x39ec
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_4 0x39f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_3 0x39f4
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_2 0x39f8
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_1 0x39fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_CONFIGURATION_MODE 0x3a00
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_TEST_CONTROL 0x3a04
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_CONTROL 0x3a08
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_OFFSET_CONTROL 0x3a0c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_1 0x3a10
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_2 0x3a14
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3a18
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x3a1c
#define A_MAC_PORT_RX_LINK_BCST_DFE_CONTROL 0x3a20
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_1 0x3a24
#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_2 0x3a28
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_1 0x3a2c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_2 0x3a30
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_3 0x3a34
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DQCC_CONTROL_1 0x3a38
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3a38
@@ -55857,12 +67731,6 @@
#define A_MAC_PORT_RX_LINK_BCST_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x3a6c
#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3a70
#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_DATA_CENTERING_DDC 0x3a74
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_INTERNAL_STATUS 0x3a78
#define S_RX_LINK_BCST_ACCCMP_RIS 11
@@ -55892,20 +67760,6 @@
#define A_MAC_PORT_RX_LINK_BCST_INTEGRATOR_DAC_OFFSET 0x3aa4
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_CONTROL 0x3aa8
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS 0x3aac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_ERROR_COUNT 0x3ab0
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x3ab4
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x3ab8
@@ -56304,17 +68158,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56323,17 +68166,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56349,17 +68181,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56368,17 +68189,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56394,17 +68204,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56413,17 +68212,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56439,17 +68227,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56458,17 +68235,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56639,17 +68405,6 @@
#define G_RX_LINKB_INDEX_DFE_EN(x) (((x) >> S_RX_LINKB_INDEX_DFE_EN) & M_RX_LINKB_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKB_DFE_H1 0x2b04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKB_DFE_H2 0x2b08
#define A_T6_MAC_PORT_RX_LINKB_DFE_H3 0x2b0c
#define A_T6_MAC_PORT_RX_LINKB_DFE_H4 0x2b10
@@ -56668,17 +68423,6 @@
#define G_RX_LINKC_INDEX_DFE_EN(x) (((x) >> S_RX_LINKC_INDEX_DFE_EN) & M_RX_LINKC_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKC_DFE_H1 0x2e04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKC_DFE_H2 0x2e08
#define A_T6_MAC_PORT_RX_LINKC_DFE_H3 0x2e0c
#define A_T6_MAC_PORT_RX_LINKC_DFE_H4 0x2e10
@@ -56697,17 +68441,6 @@
#define G_RX_LINKD_INDEX_DFE_EN(x) (((x) >> S_RX_LINKD_INDEX_DFE_EN) & M_RX_LINKD_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKD_DFE_H1 0x2f04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKD_DFE_H2 0x2f08
#define A_T6_MAC_PORT_RX_LINKD_DFE_H3 0x2f0c
#define A_T6_MAC_PORT_RX_LINKD_DFE_H4 0x2f10
@@ -56726,17 +68459,6 @@
#define G_RX_LINK_BCST_INDEX_DFE_EN(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_EN) & M_RX_LINK_BCST_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H1 0x3204
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H2 0x3208
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H3 0x320c
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H4 0x3210
@@ -57294,69 +69016,21 @@
#define G_BANK(x) (((x) >> S_BANK) & M_BANK)
#define A_MC_LMC_INITSEQ1 0x40148
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD1 0x4014c
#define A_MC_LMC_INITSEQ2 0x40150
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD2 0x40154
#define A_MC_LMC_INITSEQ3 0x40158
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD3 0x4015c
#define A_MC_LMC_INITSEQ4 0x40160
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD4 0x40164
#define A_MC_LMC_INITSEQ5 0x40168
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD5 0x4016c
#define A_MC_LMC_INITSEQ6 0x40170
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD6 0x40174
#define A_MC_LMC_INITSEQ7 0x40178
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD7 0x4017c
#define A_MC_UPCTL_ECCCFG 0x40180
#define A_MC_LMC_INITSEQ8 0x40180
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_UPCTL_ECCTST 0x40184
#define S_ECC_TEST_MASK0 0
@@ -57367,61 +69041,19 @@
#define A_MC_LMC_CMD8 0x40184
#define A_MC_UPCTL_ECCCLR 0x40188
#define A_MC_LMC_INITSEQ9 0x40188
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_UPCTL_ECCLOG 0x4018c
#define A_MC_LMC_CMD9 0x4018c
#define A_MC_LMC_INITSEQ10 0x40190
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD10 0x40194
#define A_MC_LMC_INITSEQ11 0x40198
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD11 0x4019c
#define A_MC_LMC_INITSEQ12 0x401a0
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD12 0x401a4
#define A_MC_LMC_INITSEQ13 0x401a8
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD13 0x401ac
#define A_MC_LMC_INITSEQ14 0x401b0
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD14 0x401b4
#define A_MC_LMC_INITSEQ15 0x401b8
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD15 0x401bc
#define A_MC_UPCTL_DTUWACTL 0x40200
@@ -61990,6 +73622,11 @@
#define V_NUMPIPESTAGES(x) ((x) << S_NUMPIPESTAGES)
#define G_NUMPIPESTAGES(x) (((x) >> S_NUMPIPESTAGES) & M_NUMPIPESTAGES)
+#define S_DRAMREFENABLE 27
+#define M_DRAMREFENABLE 0x3U
+#define V_DRAMREFENABLE(x) ((x) << S_DRAMREFENABLE)
+#define G_DRAMREFENABLE(x) (((x) >> S_DRAMREFENABLE) & M_DRAMREFENABLE)
+
#define A_EDC_H_DBG_MA_CMD_INTF 0x50300
#define S_MCMDADDR 12
@@ -62372,12 +74009,51 @@
#define V_REFCNT(x) ((x) << S_REFCNT)
#define G_REFCNT(x) (((x) >> S_REFCNT) & M_REFCNT)
+#define A_EDC_H_PAR_CAUSE 0x50404
+
+#define S_STG_CMDQ_PARERR_CAUSE 7
+#define V_STG_CMDQ_PARERR_CAUSE(x) ((x) << S_STG_CMDQ_PARERR_CAUSE)
+#define F_STG_CMDQ_PARERR_CAUSE V_STG_CMDQ_PARERR_CAUSE(1U)
+
+#define S_STG_WRDQ_PARERR_CAUSE 6
+#define V_STG_WRDQ_PARERR_CAUSE(x) ((x) << S_STG_WRDQ_PARERR_CAUSE)
+#define F_STG_WRDQ_PARERR_CAUSE V_STG_WRDQ_PARERR_CAUSE(1U)
+
+#define S_INP_CMDQ_PARERR_CAUSE 5
+#define V_INP_CMDQ_PARERR_CAUSE(x) ((x) << S_INP_CMDQ_PARERR_CAUSE)
+#define F_INP_CMDQ_PARERR_CAUSE V_INP_CMDQ_PARERR_CAUSE(1U)
+
+#define S_INP_WRDQ_PARERR_CAUSE 4
+#define V_INP_WRDQ_PARERR_CAUSE(x) ((x) << S_INP_WRDQ_PARERR_CAUSE)
+#define F_INP_WRDQ_PARERR_CAUSE V_INP_WRDQ_PARERR_CAUSE(1U)
+
+#define S_INP_BEQ_PARERR_CAUSE 3
+#define V_INP_BEQ_PARERR_CAUSE(x) ((x) << S_INP_BEQ_PARERR_CAUSE)
+#define F_INP_BEQ_PARERR_CAUSE V_INP_BEQ_PARERR_CAUSE(1U)
+
+#define S_ECC_CE_PAR_ENABLE_CAUSE 2
+#define V_ECC_CE_PAR_ENABLE_CAUSE(x) ((x) << S_ECC_CE_PAR_ENABLE_CAUSE)
+#define F_ECC_CE_PAR_ENABLE_CAUSE V_ECC_CE_PAR_ENABLE_CAUSE(1U)
+
+#define S_ECC_UE_PAR_ENABLE_CAUSE 1
+#define V_ECC_UE_PAR_ENABLE_CAUSE(x) ((x) << S_ECC_UE_PAR_ENABLE_CAUSE)
+#define F_ECC_UE_PAR_ENABLE_CAUSE V_ECC_UE_PAR_ENABLE_CAUSE(1U)
+
+#define S_RDDQ_PARERR_CAUSE 0
+#define V_RDDQ_PARERR_CAUSE(x) ((x) << S_RDDQ_PARERR_CAUSE)
+#define F_RDDQ_PARERR_CAUSE V_RDDQ_PARERR_CAUSE(1U)
+
/* registers for module EDC_T61 */
#define EDC_T61_BASE_ADDR 0x50800
/* registers for module HMA_T6 */
#define HMA_T6_BASE_ADDR 0x51000
+#define S_T7_CLIENT_EN 0
+#define M_T7_CLIENT_EN 0x7fffU
+#define V_T7_CLIENT_EN(x) ((x) << S_T7_CLIENT_EN)
+#define G_T7_CLIENT_EN(x) (((x) >> S_T7_CLIENT_EN) & M_T7_CLIENT_EN)
+
#define S_TPH 12
#define M_TPH 0x3U
#define V_TPH(x) ((x) << S_TPH)
@@ -62398,6 +74074,14 @@
#define V_OP_MODE(x) ((x) << S_OP_MODE)
#define F_OP_MODE V_OP_MODE(1U)
+#define S_GK_ENABLE 30
+#define V_GK_ENABLE(x) ((x) << S_GK_ENABLE)
+#define F_GK_ENABLE V_GK_ENABLE(1U)
+
+#define S_DBGCNTRST 29
+#define V_DBGCNTRST(x) ((x) << S_DBGCNTRST)
+#define F_DBGCNTRST V_DBGCNTRST(1U)
+
#define A_HMA_TLB_ACCESS 0x51028
#define S_INV_ALL 29
@@ -62437,6 +74121,11 @@
#define V_REGION(x) ((x) << S_REGION)
#define G_REGION(x) (((x) >> S_REGION) & M_REGION)
+#define S_T7_VA 8
+#define M_T7_VA 0xffffffU
+#define V_T7_VA(x) ((x) << S_T7_VA)
+#define G_T7_VA(x) (((x) >> S_T7_VA) & M_T7_VA)
+
#define A_HMA_TLB_DESC_0_H 0x51030
#define A_HMA_TLB_DESC_0_L 0x51034
#define A_HMA_TLB_DESC_1_H 0x51038
@@ -62460,6 +74149,11 @@
#define V_ADDR0_MIN(x) ((x) << S_ADDR0_MIN)
#define G_ADDR0_MIN(x) (((x) >> S_ADDR0_MIN) & M_ADDR0_MIN)
+#define S_REG0MINADDR0MIN 8
+#define M_REG0MINADDR0MIN 0xffffffU
+#define V_REG0MINADDR0MIN(x) ((x) << S_REG0MINADDR0MIN)
+#define G_REG0MINADDR0MIN(x) (((x) >> S_REG0MINADDR0MIN) & M_REG0MINADDR0MIN)
+
#define A_HMA_REG0_MAX 0x51074
#define S_ADDR0_MAX 12
@@ -62467,6 +74161,11 @@
#define V_ADDR0_MAX(x) ((x) << S_ADDR0_MAX)
#define G_ADDR0_MAX(x) (((x) >> S_ADDR0_MAX) & M_ADDR0_MAX)
+#define S_REG0MAXADDR0MAX 8
+#define M_REG0MAXADDR0MAX 0xffffffU
+#define V_REG0MAXADDR0MAX(x) ((x) << S_REG0MAXADDR0MAX)
+#define G_REG0MAXADDR0MAX(x) (((x) >> S_REG0MAXADDR0MAX) & M_REG0MAXADDR0MAX)
+
#define A_HMA_REG0_MASK 0x51078
#define S_PAGE_SIZE0 12
@@ -62475,6 +74174,7 @@
#define G_PAGE_SIZE0(x) (((x) >> S_PAGE_SIZE0) & M_PAGE_SIZE0)
#define A_HMA_REG0_BASE 0x5107c
+#define A_HMA_REG0_BASE_LSB 0x5107c
#define A_HMA_REG1_MIN 0x51080
#define S_ADDR1_MIN 12
@@ -62482,6 +74182,11 @@
#define V_ADDR1_MIN(x) ((x) << S_ADDR1_MIN)
#define G_ADDR1_MIN(x) (((x) >> S_ADDR1_MIN) & M_ADDR1_MIN)
+#define S_REG1MINADDR1MIN 8
+#define M_REG1MINADDR1MIN 0xffffffU
+#define V_REG1MINADDR1MIN(x) ((x) << S_REG1MINADDR1MIN)
+#define G_REG1MINADDR1MIN(x) (((x) >> S_REG1MINADDR1MIN) & M_REG1MINADDR1MIN)
+
#define A_HMA_REG1_MAX 0x51084
#define S_ADDR1_MAX 12
@@ -62489,6 +74194,11 @@
#define V_ADDR1_MAX(x) ((x) << S_ADDR1_MAX)
#define G_ADDR1_MAX(x) (((x) >> S_ADDR1_MAX) & M_ADDR1_MAX)
+#define S_REG1MAXADDR1MAX 8
+#define M_REG1MAXADDR1MAX 0xffffffU
+#define V_REG1MAXADDR1MAX(x) ((x) << S_REG1MAXADDR1MAX)
+#define G_REG1MAXADDR1MAX(x) (((x) >> S_REG1MAXADDR1MAX) & M_REG1MAXADDR1MAX)
+
#define A_HMA_REG1_MASK 0x51088
#define S_PAGE_SIZE1 12
@@ -62497,6 +74207,7 @@
#define G_PAGE_SIZE1(x) (((x) >> S_PAGE_SIZE1) & M_PAGE_SIZE1)
#define A_HMA_REG1_BASE 0x5108c
+#define A_HMA_REG1_BASE_LSB 0x5108c
#define A_HMA_REG2_MIN 0x51090
#define S_ADDR2_MIN 12
@@ -62504,6 +74215,11 @@
#define V_ADDR2_MIN(x) ((x) << S_ADDR2_MIN)
#define G_ADDR2_MIN(x) (((x) >> S_ADDR2_MIN) & M_ADDR2_MIN)
+#define S_REG2MINADDR2MIN 8
+#define M_REG2MINADDR2MIN 0xffffffU
+#define V_REG2MINADDR2MIN(x) ((x) << S_REG2MINADDR2MIN)
+#define G_REG2MINADDR2MIN(x) (((x) >> S_REG2MINADDR2MIN) & M_REG2MINADDR2MIN)
+
#define A_HMA_REG2_MAX 0x51094
#define S_ADDR2_MAX 12
@@ -62511,6 +74227,11 @@
#define V_ADDR2_MAX(x) ((x) << S_ADDR2_MAX)
#define G_ADDR2_MAX(x) (((x) >> S_ADDR2_MAX) & M_ADDR2_MAX)
+#define S_REG2MAXADDR2MAX 8
+#define M_REG2MAXADDR2MAX 0xffffffU
+#define V_REG2MAXADDR2MAX(x) ((x) << S_REG2MAXADDR2MAX)
+#define G_REG2MAXADDR2MAX(x) (((x) >> S_REG2MAXADDR2MAX) & M_REG2MAXADDR2MAX)
+
#define A_HMA_REG2_MASK 0x51098
#define S_PAGE_SIZE2 12
@@ -62519,6 +74240,7 @@
#define G_PAGE_SIZE2(x) (((x) >> S_PAGE_SIZE2) & M_PAGE_SIZE2)
#define A_HMA_REG2_BASE 0x5109c
+#define A_HMA_REG2_BASE_LSB 0x5109c
#define A_HMA_REG3_MIN 0x510a0
#define S_ADDR3_MIN 12
@@ -62526,6 +74248,11 @@
#define V_ADDR3_MIN(x) ((x) << S_ADDR3_MIN)
#define G_ADDR3_MIN(x) (((x) >> S_ADDR3_MIN) & M_ADDR3_MIN)
+#define S_REG3MINADDR3MIN 8
+#define M_REG3MINADDR3MIN 0xffffffU
+#define V_REG3MINADDR3MIN(x) ((x) << S_REG3MINADDR3MIN)
+#define G_REG3MINADDR3MIN(x) (((x) >> S_REG3MINADDR3MIN) & M_REG3MINADDR3MIN)
+
#define A_HMA_REG3_MAX 0x510a4
#define S_ADDR3_MAX 12
@@ -62533,6 +74260,11 @@
#define V_ADDR3_MAX(x) ((x) << S_ADDR3_MAX)
#define G_ADDR3_MAX(x) (((x) >> S_ADDR3_MAX) & M_ADDR3_MAX)
+#define S_REG3MAXADDR3MAX 8
+#define M_REG3MAXADDR3MAX 0xffffffU
+#define V_REG3MAXADDR3MAX(x) ((x) << S_REG3MAXADDR3MAX)
+#define G_REG3MAXADDR3MAX(x) (((x) >> S_REG3MAXADDR3MAX) & M_REG3MAXADDR3MAX)
+
#define A_HMA_REG3_MASK 0x510a8
#define S_PAGE_SIZE3 12
@@ -62541,6 +74273,7 @@
#define G_PAGE_SIZE3(x) (((x) >> S_PAGE_SIZE3) & M_PAGE_SIZE3)
#define A_HMA_REG3_BASE 0x510ac
+#define A_HMA_REG3_BASE_LSB 0x510ac
#define A_HMA_SW_SYNC 0x510b0
#define S_ENTER_SYNC 31
@@ -62551,6 +74284,84 @@
#define V_EXIT_SYNC(x) ((x) << S_EXIT_SYNC)
#define F_EXIT_SYNC V_EXIT_SYNC(1U)
+#define A_HMA_GC_MODE_SEL 0x510b4
+
+#define S_MODE_SEL 8
+#define M_MODE_SEL 0x3U
+#define V_MODE_SEL(x) ((x) << S_MODE_SEL)
+#define G_MODE_SEL(x) (((x) >> S_MODE_SEL) & M_MODE_SEL)
+
+#define S_FLUSH_REQ 4
+#define V_FLUSH_REQ(x) ((x) << S_FLUSH_REQ)
+#define F_FLUSH_REQ V_FLUSH_REQ(1U)
+
+#define S_CLEAR_REQ 0
+#define V_CLEAR_REQ(x) ((x) << S_CLEAR_REQ)
+#define F_CLEAR_REQ V_CLEAR_REQ(1U)
+
+#define A_HMA_REG0_BASE_MSB 0x510b8
+
+#define S_BASE0_MSB 0
+#define M_BASE0_MSB 0xfU
+#define V_BASE0_MSB(x) ((x) << S_BASE0_MSB)
+#define G_BASE0_MSB(x) (((x) >> S_BASE0_MSB) & M_BASE0_MSB)
+
+#define A_HMA_REG1_BASE_MSB 0x510bc
+
+#define S_BASE1_MSB 0
+#define M_BASE1_MSB 0xfU
+#define V_BASE1_MSB(x) ((x) << S_BASE1_MSB)
+#define G_BASE1_MSB(x) (((x) >> S_BASE1_MSB) & M_BASE1_MSB)
+
+#define A_HMA_REG2_BASE_MSB 0x510c0
+
+#define S_BASE2_MSB 0
+#define M_BASE2_MSB 0xfU
+#define V_BASE2_MSB(x) ((x) << S_BASE2_MSB)
+#define G_BASE2_MSB(x) (((x) >> S_BASE2_MSB) & M_BASE2_MSB)
+
+#define A_HMA_REG3_BASE_MSB 0x510c4
+
+#define S_BASE3_MSB 0
+#define M_BASE3_MSB 0xfU
+#define V_BASE3_MSB(x) ((x) << S_BASE3_MSB)
+#define G_BASE3_MSB(x) (((x) >> S_BASE3_MSB) & M_BASE3_MSB)
+
+#define A_HMA_DBG_CTL 0x51104
+#define A_HMA_DBG_DATA 0x51108
+#define A_HMA_H_BIST_CMD 0x51200
+#define A_HMA_H_BIST_CMD_ADDR 0x51204
+#define A_HMA_H_BIST_CMD_LEN 0x51208
+#define A_HMA_H_BIST_DATA_PATTERN 0x5120c
+#define A_HMA_H_BIST_USER_WDATA0 0x51210
+#define A_HMA_H_BIST_USER_WDATA1 0x51214
+#define A_HMA_H_BIST_USER_WDATA2 0x51218
+#define A_HMA_H_BIST_NUM_ERR 0x5121c
+#define A_HMA_H_BIST_ERR_FIRST_ADDR 0x51220
+#define A_HMA_H_BIST_STATUS_RDATA 0x51224
+#define A_HMA_H_BIST_CRC_SEED 0x5126c
+#define A_HMA_TABLE_LINE1_MSB 0x51270
+
+#define S_STARTA 0
+#define M_STARTA 0xfU
+#define V_STARTA(x) ((x) << S_STARTA)
+#define G_STARTA(x) (((x) >> S_STARTA) & M_STARTA)
+
+#define A_HMA_TABLE_LINE2_MSB 0x51274
+
+#define S_ENDA 0
+#define M_ENDA 0xfU
+#define V_ENDA(x) ((x) << S_ENDA)
+#define G_ENDA(x) (((x) >> S_ENDA) & M_ENDA)
+
+#define S_GK_UF_PAR_ENABLE 6
+#define V_GK_UF_PAR_ENABLE(x) ((x) << S_GK_UF_PAR_ENABLE)
+#define F_GK_UF_PAR_ENABLE V_GK_UF_PAR_ENABLE(1U)
+
+#define S_PCIEMST_PAR_ENABLE 2
+#define V_PCIEMST_PAR_ENABLE(x) ((x) << S_PCIEMST_PAR_ENABLE)
+#define F_PCIEMST_PAR_ENABLE V_PCIEMST_PAR_ENABLE(1U)
+
#define S_IDTF_INT_ENABLE 5
#define V_IDTF_INT_ENABLE(x) ((x) << S_IDTF_INT_ENABLE)
#define F_IDTF_INT_ENABLE V_IDTF_INT_ENABLE(1U)
@@ -62571,6 +74382,10 @@
#define V_MAMST_INT_ENABLE(x) ((x) << S_MAMST_INT_ENABLE)
#define F_MAMST_INT_ENABLE V_MAMST_INT_ENABLE(1U)
+#define S_GK_UF_INT_ENABLE 6
+#define V_GK_UF_INT_ENABLE(x) ((x) << S_GK_UF_INT_ENABLE)
+#define F_GK_UF_INT_ENABLE V_GK_UF_INT_ENABLE(1U)
+
#define S_IDTF_INT_CAUSE 5
#define V_IDTF_INT_CAUSE(x) ((x) << S_IDTF_INT_CAUSE)
#define F_IDTF_INT_CAUSE V_IDTF_INT_CAUSE(1U)
@@ -62591,6 +74406,10 @@
#define V_MAMST_INT_CAUSE(x) ((x) << S_MAMST_INT_CAUSE)
#define F_MAMST_INT_CAUSE V_MAMST_INT_CAUSE(1U)
+#define S_GK_UF_INT_CAUSE 6
+#define V_GK_UF_INT_CAUSE(x) ((x) << S_GK_UF_INT_CAUSE)
+#define F_GK_UF_INT_CAUSE V_GK_UF_INT_CAUSE(1U)
+
#define A_HMA_MA_MST_ERR 0x5130c
#define A_HMA_RTF_ERR 0x51310
#define A_HMA_OTF_ERR 0x51314
@@ -62904,3 +74723,12365 @@
#define M_RD_EOP_CNT 0xffU
#define V_RD_EOP_CNT(x) ((x) << S_RD_EOP_CNT)
#define G_RD_EOP_CNT(x) (((x) >> S_RD_EOP_CNT) & M_RD_EOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT 16
+#define M_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT 8
+#define M_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT 0
+#define M_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT)
+
+/* registers for module MAC_T7 */
+#define MAC_T7_BASE_ADDR 0x38000
+
+#define S_T7_PORT_MAP 21
+#define M_T7_PORT_MAP 0x7U
+#define V_T7_PORT_MAP(x) ((x) << S_T7_PORT_MAP)
+#define G_T7_PORT_MAP(x) (((x) >> S_T7_PORT_MAP) & M_T7_PORT_MAP)
+
+#define S_T7_SMUX_RX_LOOP 17
+#define M_T7_SMUX_RX_LOOP 0xfU
+#define V_T7_SMUX_RX_LOOP(x) ((x) << S_T7_SMUX_RX_LOOP)
+#define G_T7_SMUX_RX_LOOP(x) (((x) >> S_T7_SMUX_RX_LOOP) & M_T7_SMUX_RX_LOOP)
+
+#define S_T7_SIGNAL_DET 15
+#define V_T7_SIGNAL_DET(x) ((x) << S_T7_SIGNAL_DET)
+#define F_T7_SIGNAL_DET V_T7_SIGNAL_DET(1U)
+
+#define S_CFG_MAC_2_MPS_FULL 13
+#define V_CFG_MAC_2_MPS_FULL(x) ((x) << S_CFG_MAC_2_MPS_FULL)
+#define F_CFG_MAC_2_MPS_FULL V_CFG_MAC_2_MPS_FULL(1U)
+
+#define S_MPS_FULL_SEL 12
+#define V_MPS_FULL_SEL(x) ((x) << S_MPS_FULL_SEL)
+#define F_MPS_FULL_SEL V_MPS_FULL_SEL(1U)
+
+#define S_T7_SMUXTXSEL 8
+#define M_T7_SMUXTXSEL 0xfU
+#define V_T7_SMUXTXSEL(x) ((x) << S_T7_SMUXTXSEL)
+#define G_T7_SMUXTXSEL(x) (((x) >> S_T7_SMUXTXSEL) & M_T7_SMUXTXSEL)
+
+#define S_T7_PORTSPEED 4
+#define M_T7_PORTSPEED 0xfU
+#define V_T7_PORTSPEED(x) ((x) << S_T7_PORTSPEED)
+#define G_T7_PORTSPEED(x) (((x) >> S_T7_PORTSPEED) & M_T7_PORTSPEED)
+
+#define S_MTIP_REG_RESET 25
+#define V_MTIP_REG_RESET(x) ((x) << S_MTIP_REG_RESET)
+#define F_MTIP_REG_RESET V_MTIP_REG_RESET(1U)
+
+#define S_RESET_REG_CLK_I 24
+#define V_RESET_REG_CLK_I(x) ((x) << S_RESET_REG_CLK_I)
+#define F_RESET_REG_CLK_I V_RESET_REG_CLK_I(1U)
+
+#define S_T7_LED1_CFG1 15
+#define M_T7_LED1_CFG1 0x7U
+#define V_T7_LED1_CFG1(x) ((x) << S_T7_LED1_CFG1)
+#define G_T7_LED1_CFG1(x) (((x) >> S_T7_LED1_CFG1) & M_T7_LED1_CFG1)
+
+#define S_T7_LED0_CFG1 12
+#define M_T7_LED0_CFG1 0x7U
+#define V_T7_LED0_CFG1(x) ((x) << S_T7_LED0_CFG1)
+#define G_T7_LED0_CFG1(x) (((x) >> S_T7_LED0_CFG1) & M_T7_LED0_CFG1)
+
+#define A_T7_MAC_PORT_MAGIC_MACID_LO 0x820
+#define A_T7_MAC_PORT_MAGIC_MACID_HI 0x824
+#define A_T7_MAC_PORT_LINK_STATUS 0x828
+
+#define S_EGR_SE_CNT_ERR 9
+#define V_EGR_SE_CNT_ERR(x) ((x) << S_EGR_SE_CNT_ERR)
+#define F_EGR_SE_CNT_ERR V_EGR_SE_CNT_ERR(1U)
+
+#define S_INGR_SE_CNT_ERR 8
+#define V_INGR_SE_CNT_ERR(x) ((x) << S_INGR_SE_CNT_ERR)
+#define F_INGR_SE_CNT_ERR V_INGR_SE_CNT_ERR(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_EN_100G 0x82c
+
+#define S_PERR_PCSR_FDM_3 21
+#define V_PERR_PCSR_FDM_3(x) ((x) << S_PERR_PCSR_FDM_3)
+#define F_PERR_PCSR_FDM_3 V_PERR_PCSR_FDM_3(1U)
+
+#define S_PERR_PCSR_FDM_2 20
+#define V_PERR_PCSR_FDM_2(x) ((x) << S_PERR_PCSR_FDM_2)
+#define F_PERR_PCSR_FDM_2 V_PERR_PCSR_FDM_2(1U)
+
+#define S_PERR_PCSR_FDM_1 19
+#define V_PERR_PCSR_FDM_1(x) ((x) << S_PERR_PCSR_FDM_1)
+#define F_PERR_PCSR_FDM_1 V_PERR_PCSR_FDM_1(1U)
+
+#define S_PERR_PCSR_FDM_0 18
+#define V_PERR_PCSR_FDM_0(x) ((x) << S_PERR_PCSR_FDM_0)
+#define F_PERR_PCSR_FDM_0 V_PERR_PCSR_FDM_0(1U)
+
+#define S_PERR_PCSR_FM_3 17
+#define V_PERR_PCSR_FM_3(x) ((x) << S_PERR_PCSR_FM_3)
+#define F_PERR_PCSR_FM_3 V_PERR_PCSR_FM_3(1U)
+
+#define S_PERR_PCSR_FM_2 16
+#define V_PERR_PCSR_FM_2(x) ((x) << S_PERR_PCSR_FM_2)
+#define F_PERR_PCSR_FM_2 V_PERR_PCSR_FM_2(1U)
+
+#define S_PERR_PCSR_FM_1 15
+#define V_PERR_PCSR_FM_1(x) ((x) << S_PERR_PCSR_FM_1)
+#define F_PERR_PCSR_FM_1 V_PERR_PCSR_FM_1(1U)
+
+#define S_PERR_PCSR_FM_0 14
+#define V_PERR_PCSR_FM_0(x) ((x) << S_PERR_PCSR_FM_0)
+#define F_PERR_PCSR_FM_0 V_PERR_PCSR_FM_0(1U)
+
+#define S_PERR_PCSR_DM_1 13
+#define V_PERR_PCSR_DM_1(x) ((x) << S_PERR_PCSR_DM_1)
+#define F_PERR_PCSR_DM_1 V_PERR_PCSR_DM_1(1U)
+
+#define S_PERR_PCSR_DM_0 12
+#define V_PERR_PCSR_DM_0(x) ((x) << S_PERR_PCSR_DM_0)
+#define F_PERR_PCSR_DM_0 V_PERR_PCSR_DM_0(1U)
+
+#define S_PERR_PCSR_DK_3 11
+#define V_PERR_PCSR_DK_3(x) ((x) << S_PERR_PCSR_DK_3)
+#define F_PERR_PCSR_DK_3 V_PERR_PCSR_DK_3(1U)
+
+#define S_PERR_PCSR_DK_2 10
+#define V_PERR_PCSR_DK_2(x) ((x) << S_PERR_PCSR_DK_2)
+#define F_PERR_PCSR_DK_2 V_PERR_PCSR_DK_2(1U)
+
+#define S_PERR_PCSR_DK_1 9
+#define V_PERR_PCSR_DK_1(x) ((x) << S_PERR_PCSR_DK_1)
+#define F_PERR_PCSR_DK_1 V_PERR_PCSR_DK_1(1U)
+
+#define S_PERR_PCSR_DK_0 8
+#define V_PERR_PCSR_DK_0(x) ((x) << S_PERR_PCSR_DK_0)
+#define F_PERR_PCSR_DK_0 V_PERR_PCSR_DK_0(1U)
+
+#define S_PERR_F91RO_1 7
+#define V_PERR_F91RO_1(x) ((x) << S_PERR_F91RO_1)
+#define F_PERR_F91RO_1 V_PERR_F91RO_1(1U)
+
+#define S_PERR_F91RO_0 6
+#define V_PERR_F91RO_0(x) ((x) << S_PERR_F91RO_0)
+#define F_PERR_F91RO_0 V_PERR_F91RO_0(1U)
+
+#define S_PERR_PCSR_F91DM 5
+#define V_PERR_PCSR_F91DM(x) ((x) << S_PERR_PCSR_F91DM)
+#define F_PERR_PCSR_F91DM V_PERR_PCSR_F91DM(1U)
+
+#define S_PERR_PCSR_F91TI 4
+#define V_PERR_PCSR_F91TI(x) ((x) << S_PERR_PCSR_F91TI)
+#define F_PERR_PCSR_F91TI V_PERR_PCSR_F91TI(1U)
+
+#define S_PERR_PCSR_F91TO 3
+#define V_PERR_PCSR_F91TO(x) ((x) << S_PERR_PCSR_F91TO)
+#define F_PERR_PCSR_F91TO V_PERR_PCSR_F91TO(1U)
+
+#define S_PERR_PCSR_F91M 2
+#define V_PERR_PCSR_F91M(x) ((x) << S_PERR_PCSR_F91M)
+#define F_PERR_PCSR_F91M V_PERR_PCSR_F91M(1U)
+
+#define S_PERR_PCSR_80_16_1 1
+#define V_PERR_PCSR_80_16_1(x) ((x) << S_PERR_PCSR_80_16_1)
+#define F_PERR_PCSR_80_16_1 V_PERR_PCSR_80_16_1(1U)
+
+#define S_PERR_PCSR_80_16_0 0
+#define V_PERR_PCSR_80_16_0(x) ((x) << S_PERR_PCSR_80_16_0)
+#define F_PERR_PCSR_80_16_0 V_PERR_PCSR_80_16_0(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_CAUSE_100G 0x830
+#define A_T7_MAC_PORT_PERR_ENABLE_100G 0x834
+#define A_MAC_PORT_MAC10G100G_CONFIG_0 0x838
+
+#define S_PEER_DELAY_VAL 31
+#define V_PEER_DELAY_VAL(x) ((x) << S_PEER_DELAY_VAL)
+#define F_PEER_DELAY_VAL V_PEER_DELAY_VAL(1U)
+
+#define S_PEER_DELAY 1
+#define M_PEER_DELAY 0x3fffffffU
+#define V_PEER_DELAY(x) ((x) << S_PEER_DELAY)
+#define G_PEER_DELAY(x) (((x) >> S_PEER_DELAY) & M_PEER_DELAY)
+
+#define S_MODE1S_ENA 0
+#define V_MODE1S_ENA(x) ((x) << S_MODE1S_ENA)
+#define F_MODE1S_ENA V_MODE1S_ENA(1U)
+
+#define A_MAC_PORT_MAC10G100G_CONFIG_1 0x83c
+
+#define S_TX_STOP 25
+#define V_TX_STOP(x) ((x) << S_TX_STOP)
+#define F_TX_STOP V_TX_STOP(1U)
+
+#define S_T7_MODE1S_ENA 24
+#define V_T7_MODE1S_ENA(x) ((x) << S_T7_MODE1S_ENA)
+#define F_T7_MODE1S_ENA V_T7_MODE1S_ENA(1U)
+
+#define S_TX_TS_ID 12
+#define M_TX_TS_ID 0xfffU
+#define V_TX_TS_ID(x) ((x) << S_TX_TS_ID)
+#define G_TX_TS_ID(x) (((x) >> S_TX_TS_ID) & M_TX_TS_ID)
+
+#define S_T7_TX_LI_FAULT 11
+#define V_T7_TX_LI_FAULT(x) ((x) << S_T7_TX_LI_FAULT)
+#define F_T7_TX_LI_FAULT V_T7_TX_LI_FAULT(1U)
+
+#define S_XOFF_GEN 3
+#define M_XOFF_GEN 0xffU
+#define V_XOFF_GEN(x) ((x) << S_XOFF_GEN)
+#define G_XOFF_GEN(x) (((x) >> S_XOFF_GEN) & M_XOFF_GEN)
+
+#define S_TX_REM_FAULT 1
+#define V_TX_REM_FAULT(x) ((x) << S_TX_REM_FAULT)
+#define F_TX_REM_FAULT V_TX_REM_FAULT(1U)
+
+#define S_TX_LOC_FAULT 0
+#define V_TX_LOC_FAULT(x) ((x) << S_TX_LOC_FAULT)
+#define F_TX_LOC_FAULT V_TX_LOC_FAULT(1U)
+
+#define A_MAC_PORT_MAC10G100G_CONFIG_2 0x840
+
+#define S_FF_TX_RX_TS_NS 0
+#define M_FF_TX_RX_TS_NS 0x3fffffffU
+#define V_FF_TX_RX_TS_NS(x) ((x) << S_FF_TX_RX_TS_NS)
+#define G_FF_TX_RX_TS_NS(x) (((x) >> S_FF_TX_RX_TS_NS) & M_FF_TX_RX_TS_NS)
+
+#define A_MAC_PORT_MAC10G100G_STATUS 0x844
+
+#define S_REG_LOWP 21
+#define V_REG_LOWP(x) ((x) << S_REG_LOWP)
+#define F_REG_LOWP V_REG_LOWP(1U)
+
+#define S_LI_FAULT 20
+#define V_LI_FAULT(x) ((x) << S_LI_FAULT)
+#define F_LI_FAULT V_LI_FAULT(1U)
+
+#define S_TX_ISIDLE 19
+#define V_TX_ISIDLE(x) ((x) << S_TX_ISIDLE)
+#define F_TX_ISIDLE V_TX_ISIDLE(1U)
+
+#define S_TX_UNDERFLOW 18
+#define V_TX_UNDERFLOW(x) ((x) << S_TX_UNDERFLOW)
+#define F_TX_UNDERFLOW V_TX_UNDERFLOW(1U)
+
+#define S_T7_TX_EMPTY 17
+#define V_T7_TX_EMPTY(x) ((x) << S_T7_TX_EMPTY)
+#define F_T7_TX_EMPTY V_T7_TX_EMPTY(1U)
+
+#define S_T7_1_REM_FAULT 16
+#define V_T7_1_REM_FAULT(x) ((x) << S_T7_1_REM_FAULT)
+#define F_T7_1_REM_FAULT V_T7_1_REM_FAULT(1U)
+
+#define S_REG_TS_AVAIL 15
+#define V_REG_TS_AVAIL(x) ((x) << S_REG_TS_AVAIL)
+#define F_REG_TS_AVAIL V_REG_TS_AVAIL(1U)
+
+#define S_T7_PHY_TXENA 14
+#define V_T7_PHY_TXENA(x) ((x) << S_T7_PHY_TXENA)
+#define F_T7_PHY_TXENA V_T7_PHY_TXENA(1U)
+
+#define S_T7_PFC_MODE 13
+#define V_T7_PFC_MODE(x) ((x) << S_T7_PFC_MODE)
+#define F_T7_PFC_MODE V_T7_PFC_MODE(1U)
+
+#define S_PAUSE_ON 5
+#define M_PAUSE_ON 0xffU
+#define V_PAUSE_ON(x) ((x) << S_PAUSE_ON)
+#define G_PAUSE_ON(x) (((x) >> S_PAUSE_ON) & M_PAUSE_ON)
+
+#define S_MAC_PAUSE_EN 4
+#define V_MAC_PAUSE_EN(x) ((x) << S_MAC_PAUSE_EN)
+#define F_MAC_PAUSE_EN V_MAC_PAUSE_EN(1U)
+
+#define S_MAC_ENABLE 3
+#define V_MAC_ENABLE(x) ((x) << S_MAC_ENABLE)
+#define F_MAC_ENABLE V_MAC_ENABLE(1U)
+
+#define S_LOOP_ENA 2
+#define V_LOOP_ENA(x) ((x) << S_LOOP_ENA)
+#define F_LOOP_ENA V_LOOP_ENA(1U)
+
+#define S_LOC_FAULT 1
+#define V_LOC_FAULT(x) ((x) << S_LOC_FAULT)
+#define F_LOC_FAULT V_LOC_FAULT(1U)
+
+#define S_FF_RX_EMPTY 0
+#define V_FF_RX_EMPTY(x) ((x) << S_FF_RX_EMPTY)
+#define F_FF_RX_EMPTY V_FF_RX_EMPTY(1U)
+
+#define A_MAC_PORT_MAC_AN_STATE_STATUS0 0x848
+
+#define S_AN_VAL_AN 15
+#define V_AN_VAL_AN(x) ((x) << S_AN_VAL_AN)
+#define F_AN_VAL_AN V_AN_VAL_AN(1U)
+
+#define S_AN_TR_DIS_STATUS_AN 14
+#define V_AN_TR_DIS_STATUS_AN(x) ((x) << S_AN_TR_DIS_STATUS_AN)
+#define F_AN_TR_DIS_STATUS_AN V_AN_TR_DIS_STATUS_AN(1U)
+
+#define S_AN_STATUS_AN 13
+#define V_AN_STATUS_AN(x) ((x) << S_AN_STATUS_AN)
+#define F_AN_STATUS_AN V_AN_STATUS_AN(1U)
+
+#define S_AN_SELECT_AN 8
+#define M_AN_SELECT_AN 0x1fU
+#define V_AN_SELECT_AN(x) ((x) << S_AN_SELECT_AN)
+#define G_AN_SELECT_AN(x) (((x) >> S_AN_SELECT_AN) & M_AN_SELECT_AN)
+
+#define S_AN_RS_FEC_ENA_AN 7
+#define V_AN_RS_FEC_ENA_AN(x) ((x) << S_AN_RS_FEC_ENA_AN)
+#define F_AN_RS_FEC_ENA_AN V_AN_RS_FEC_ENA_AN(1U)
+
+#define S_AN_INT_AN 6
+#define V_AN_INT_AN(x) ((x) << S_AN_INT_AN)
+#define F_AN_INT_AN V_AN_INT_AN(1U)
+
+#define S_AN_FEC_ENA_AN 5
+#define V_AN_FEC_ENA_AN(x) ((x) << S_AN_FEC_ENA_AN)
+#define F_AN_FEC_ENA_AN V_AN_FEC_ENA_AN(1U)
+
+#define S_AN_DONE_AN 4
+#define V_AN_DONE_AN(x) ((x) << S_AN_DONE_AN)
+#define F_AN_DONE_AN V_AN_DONE_AN(1U)
+
+#define S_AN_STATE 0
+#define M_AN_STATE 0xfU
+#define V_AN_STATE(x) ((x) << S_AN_STATE)
+#define G_AN_STATE(x) (((x) >> S_AN_STATE) & M_AN_STATE)
+
+#define A_MAC_PORT_MAC_AN_STATE_STATUS1 0x84c
+#define A_T7_MAC_PORT_EPIO_DATA0 0x850
+#define A_T7_MAC_PORT_EPIO_DATA1 0x854
+#define A_T7_MAC_PORT_EPIO_DATA2 0x858
+#define A_T7_MAC_PORT_EPIO_DATA3 0x85c
+#define A_T7_MAC_PORT_EPIO_OP 0x860
+#define A_T7_MAC_PORT_WOL_STATUS 0x864
+#define A_T7_MAC_PORT_INT_EN 0x868
+
+#define S_MAC2MPS_PERR 31
+#define V_MAC2MPS_PERR(x) ((x) << S_MAC2MPS_PERR)
+#define F_MAC2MPS_PERR V_MAC2MPS_PERR(1U)
+
+#define S_MAC_PPS_INT_EN 30
+#define V_MAC_PPS_INT_EN(x) ((x) << S_MAC_PPS_INT_EN)
+#define F_MAC_PPS_INT_EN V_MAC_PPS_INT_EN(1U)
+
+#define S_MAC_TX_TS_AVAIL_INT_EN 29
+#define V_MAC_TX_TS_AVAIL_INT_EN(x) ((x) << S_MAC_TX_TS_AVAIL_INT_EN)
+#define F_MAC_TX_TS_AVAIL_INT_EN V_MAC_TX_TS_AVAIL_INT_EN(1U)
+
+#define S_MAC_SINGLE_ALARM_INT_EN 28
+#define V_MAC_SINGLE_ALARM_INT_EN(x) ((x) << S_MAC_SINGLE_ALARM_INT_EN)
+#define F_MAC_SINGLE_ALARM_INT_EN V_MAC_SINGLE_ALARM_INT_EN(1U)
+
+#define S_MAC_PERIODIC_ALARM_INT_EN 27
+#define V_MAC_PERIODIC_ALARM_INT_EN(x) ((x) << S_MAC_PERIODIC_ALARM_INT_EN)
+#define F_MAC_PERIODIC_ALARM_INT_EN V_MAC_PERIODIC_ALARM_INT_EN(1U)
+
+#define S_MAC_PATDETWAKE_INT_EN 26
+#define V_MAC_PATDETWAKE_INT_EN(x) ((x) << S_MAC_PATDETWAKE_INT_EN)
+#define F_MAC_PATDETWAKE_INT_EN V_MAC_PATDETWAKE_INT_EN(1U)
+
+#define S_MAC_MAGIC_WAKE_INT_EN 25
+#define V_MAC_MAGIC_WAKE_INT_EN(x) ((x) << S_MAC_MAGIC_WAKE_INT_EN)
+#define F_MAC_MAGIC_WAKE_INT_EN V_MAC_MAGIC_WAKE_INT_EN(1U)
+
+#define S_MAC_SIGDETCHG_INT_EN 24
+#define V_MAC_SIGDETCHG_INT_EN(x) ((x) << S_MAC_SIGDETCHG_INT_EN)
+#define F_MAC_SIGDETCHG_INT_EN V_MAC_SIGDETCHG_INT_EN(1U)
+
+#define S_MAC_PCS_LINK_GOOD_EN 12
+#define V_MAC_PCS_LINK_GOOD_EN(x) ((x) << S_MAC_PCS_LINK_GOOD_EN)
+#define F_MAC_PCS_LINK_GOOD_EN V_MAC_PCS_LINK_GOOD_EN(1U)
+
+#define S_MAC_PCS_LINK_FAIL_EN 11
+#define V_MAC_PCS_LINK_FAIL_EN(x) ((x) << S_MAC_PCS_LINK_FAIL_EN)
+#define F_MAC_PCS_LINK_FAIL_EN V_MAC_PCS_LINK_FAIL_EN(1U)
+
+#define S_MAC_OVRFLOW_INT_EN 10
+#define V_MAC_OVRFLOW_INT_EN(x) ((x) << S_MAC_OVRFLOW_INT_EN)
+#define F_MAC_OVRFLOW_INT_EN V_MAC_OVRFLOW_INT_EN(1U)
+
+#define S_MAC_REM_FAULT_INT_EN 7
+#define V_MAC_REM_FAULT_INT_EN(x) ((x) << S_MAC_REM_FAULT_INT_EN)
+#define F_MAC_REM_FAULT_INT_EN V_MAC_REM_FAULT_INT_EN(1U)
+
+#define S_MAC_LOC_FAULT_INT_EN 6
+#define V_MAC_LOC_FAULT_INT_EN(x) ((x) << S_MAC_LOC_FAULT_INT_EN)
+#define F_MAC_LOC_FAULT_INT_EN V_MAC_LOC_FAULT_INT_EN(1U)
+
+#define S_MAC_LINK_DOWN_INT_EN 5
+#define V_MAC_LINK_DOWN_INT_EN(x) ((x) << S_MAC_LINK_DOWN_INT_EN)
+#define F_MAC_LINK_DOWN_INT_EN V_MAC_LINK_DOWN_INT_EN(1U)
+
+#define S_MAC_LINK_UP_INT_EN 4
+#define V_MAC_LINK_UP_INT_EN(x) ((x) << S_MAC_LINK_UP_INT_EN)
+#define F_MAC_LINK_UP_INT_EN V_MAC_LINK_UP_INT_EN(1U)
+
+#define S_MAC_AN_DONE_INT_EN 3
+#define V_MAC_AN_DONE_INT_EN(x) ((x) << S_MAC_AN_DONE_INT_EN)
+#define F_MAC_AN_DONE_INT_EN V_MAC_AN_DONE_INT_EN(1U)
+
+#define S_MAC_AN_PGRD_INT_EN 2
+#define V_MAC_AN_PGRD_INT_EN(x) ((x) << S_MAC_AN_PGRD_INT_EN)
+#define F_MAC_AN_PGRD_INT_EN V_MAC_AN_PGRD_INT_EN(1U)
+
+#define S_MAC_TXFIFO_ERR_INT_EN 1
+#define V_MAC_TXFIFO_ERR_INT_EN(x) ((x) << S_MAC_TXFIFO_ERR_INT_EN)
+#define F_MAC_TXFIFO_ERR_INT_EN V_MAC_TXFIFO_ERR_INT_EN(1U)
+
+#define S_MAC_RXFIFO_ERR_INT_EN 0
+#define V_MAC_RXFIFO_ERR_INT_EN(x) ((x) << S_MAC_RXFIFO_ERR_INT_EN)
+#define F_MAC_RXFIFO_ERR_INT_EN V_MAC_RXFIFO_ERR_INT_EN(1U)
+
+#define A_T7_MAC_PORT_INT_CAUSE 0x86c
+
+#define S_MAC2MPS_PERR_CAUSE 31
+#define V_MAC2MPS_PERR_CAUSE(x) ((x) << S_MAC2MPS_PERR_CAUSE)
+#define F_MAC2MPS_PERR_CAUSE V_MAC2MPS_PERR_CAUSE(1U)
+
+#define S_MAC_PPS_INT_CAUSE 30
+#define V_MAC_PPS_INT_CAUSE(x) ((x) << S_MAC_PPS_INT_CAUSE)
+#define F_MAC_PPS_INT_CAUSE V_MAC_PPS_INT_CAUSE(1U)
+
+#define S_MAC_TX_TS_AVAIL_INT_CAUSE 29
+#define V_MAC_TX_TS_AVAIL_INT_CAUSE(x) ((x) << S_MAC_TX_TS_AVAIL_INT_CAUSE)
+#define F_MAC_TX_TS_AVAIL_INT_CAUSE V_MAC_TX_TS_AVAIL_INT_CAUSE(1U)
+
+#define S_MAC_SINGLE_ALARM_INT_CAUSE 28
+#define V_MAC_SINGLE_ALARM_INT_CAUSE(x) ((x) << S_MAC_SINGLE_ALARM_INT_CAUSE)
+#define F_MAC_SINGLE_ALARM_INT_CAUSE V_MAC_SINGLE_ALARM_INT_CAUSE(1U)
+
+#define S_MAC_PERIODIC_ALARM_INT_CAUSE 27
+#define V_MAC_PERIODIC_ALARM_INT_CAUSE(x) ((x) << S_MAC_PERIODIC_ALARM_INT_CAUSE)
+#define F_MAC_PERIODIC_ALARM_INT_CAUSE V_MAC_PERIODIC_ALARM_INT_CAUSE(1U)
+
+#define S_MAC_PATDETWAKE_INT_CAUSE 26
+#define V_MAC_PATDETWAKE_INT_CAUSE(x) ((x) << S_MAC_PATDETWAKE_INT_CAUSE)
+#define F_MAC_PATDETWAKE_INT_CAUSE V_MAC_PATDETWAKE_INT_CAUSE(1U)
+
+#define S_MAC_MAGIC_WAKE_INT_CAUSE 25
+#define V_MAC_MAGIC_WAKE_INT_CAUSE(x) ((x) << S_MAC_MAGIC_WAKE_INT_CAUSE)
+#define F_MAC_MAGIC_WAKE_INT_CAUSE V_MAC_MAGIC_WAKE_INT_CAUSE(1U)
+
+#define S_MAC_SIGDETCHG_INT_CAUSE 24
+#define V_MAC_SIGDETCHG_INT_CAUSE(x) ((x) << S_MAC_SIGDETCHG_INT_CAUSE)
+#define F_MAC_SIGDETCHG_INT_CAUSE V_MAC_SIGDETCHG_INT_CAUSE(1U)
+
+#define S_MAC_PCS_LINK_GOOD_CAUSE 12
+#define V_MAC_PCS_LINK_GOOD_CAUSE(x) ((x) << S_MAC_PCS_LINK_GOOD_CAUSE)
+#define F_MAC_PCS_LINK_GOOD_CAUSE V_MAC_PCS_LINK_GOOD_CAUSE(1U)
+
+#define S_MAC_PCS_LINK_FAIL_CAUSE 11
+#define V_MAC_PCS_LINK_FAIL_CAUSE(x) ((x) << S_MAC_PCS_LINK_FAIL_CAUSE)
+#define F_MAC_PCS_LINK_FAIL_CAUSE V_MAC_PCS_LINK_FAIL_CAUSE(1U)
+
+#define S_MAC_OVRFLOW_INT_CAUSE 10
+#define V_MAC_OVRFLOW_INT_CAUSE(x) ((x) << S_MAC_OVRFLOW_INT_CAUSE)
+#define F_MAC_OVRFLOW_INT_CAUSE V_MAC_OVRFLOW_INT_CAUSE(1U)
+
+#define S_MAC_REM_FAULT_INT_CAUSE 7
+#define V_MAC_REM_FAULT_INT_CAUSE(x) ((x) << S_MAC_REM_FAULT_INT_CAUSE)
+#define F_MAC_REM_FAULT_INT_CAUSE V_MAC_REM_FAULT_INT_CAUSE(1U)
+
+#define S_MAC_LOC_FAULT_INT_CAUSE 6
+#define V_MAC_LOC_FAULT_INT_CAUSE(x) ((x) << S_MAC_LOC_FAULT_INT_CAUSE)
+#define F_MAC_LOC_FAULT_INT_CAUSE V_MAC_LOC_FAULT_INT_CAUSE(1U)
+
+#define S_MAC_LINK_DOWN_INT_CAUSE 5
+#define V_MAC_LINK_DOWN_INT_CAUSE(x) ((x) << S_MAC_LINK_DOWN_INT_CAUSE)
+#define F_MAC_LINK_DOWN_INT_CAUSE V_MAC_LINK_DOWN_INT_CAUSE(1U)
+
+#define S_MAC_LINK_UP_INT_CAUSE 4
+#define V_MAC_LINK_UP_INT_CAUSE(x) ((x) << S_MAC_LINK_UP_INT_CAUSE)
+#define F_MAC_LINK_UP_INT_CAUSE V_MAC_LINK_UP_INT_CAUSE(1U)
+
+#define S_MAC_AN_DONE_INT_CAUSE 3
+#define V_MAC_AN_DONE_INT_CAUSE(x) ((x) << S_MAC_AN_DONE_INT_CAUSE)
+#define F_MAC_AN_DONE_INT_CAUSE V_MAC_AN_DONE_INT_CAUSE(1U)
+
+#define S_MAC_AN_PGRD_INT_CAUSE 2
+#define V_MAC_AN_PGRD_INT_CAUSE(x) ((x) << S_MAC_AN_PGRD_INT_CAUSE)
+#define F_MAC_AN_PGRD_INT_CAUSE V_MAC_AN_PGRD_INT_CAUSE(1U)
+
+#define S_MAC_TXFIFO_ERR_INT_CAUSE 1
+#define V_MAC_TXFIFO_ERR_INT_CAUSE(x) ((x) << S_MAC_TXFIFO_ERR_INT_CAUSE)
+#define F_MAC_TXFIFO_ERR_INT_CAUSE V_MAC_TXFIFO_ERR_INT_CAUSE(1U)
+
+#define S_MAC_RXFIFO_ERR_INT_CAUSE 0
+#define V_MAC_RXFIFO_ERR_INT_CAUSE(x) ((x) << S_MAC_RXFIFO_ERR_INT_CAUSE)
+#define F_MAC_RXFIFO_ERR_INT_CAUSE V_MAC_RXFIFO_ERR_INT_CAUSE(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_EN 0x870
+#define A_T7_MAC_PORT_PERR_INT_CAUSE 0x874
+#define A_T7_MAC_PORT_PERR_ENABLE 0x878
+#define A_T7_MAC_PORT_PERR_INJECT 0x87c
+
+#define S_T7_MEMSEL_PERR 1
+#define M_T7_MEMSEL_PERR 0xffU
+#define V_T7_MEMSEL_PERR(x) ((x) << S_T7_MEMSEL_PERR)
+#define G_T7_MEMSEL_PERR(x) (((x) >> S_T7_MEMSEL_PERR) & M_T7_MEMSEL_PERR)
+
+#define A_T7_MAC_PORT_RUNT_FRAME 0x880
+#define A_T7_MAC_PORT_EEE_STATUS 0x884
+#define A_T7_MAC_PORT_TX_TS_ID 0x888
+
+#define S_TS_ID_MSB 3
+#define V_TS_ID_MSB(x) ((x) << S_TS_ID_MSB)
+#define F_TS_ID_MSB V_TS_ID_MSB(1U)
+
+#define A_T7_MAC_PORT_TX_TS_VAL_LO 0x88c
+#define A_T7_MAC_PORT_TX_TS_VAL_HI 0x890
+#define A_T7_MAC_PORT_EEE_CTL 0x894
+#define A_T7_MAC_PORT_EEE_TX_CTL 0x898
+#define A_T7_MAC_PORT_EEE_RX_CTL 0x89c
+#define A_T7_MAC_PORT_EEE_TX_10G_SLEEP_TIMER 0x8a0
+#define A_T7_MAC_PORT_EEE_TX_10G_QUIET_TIMER 0x8a4
+#define A_T7_MAC_PORT_EEE_TX_10G_WAKE_TIMER 0x8a8
+#define A_T7_MAC_PORT_EEE_RX_10G_QUIET_TIMER 0x8b8
+#define A_T7_MAC_PORT_EEE_RX_10G_WAKE_TIMER 0x8bc
+#define A_T7_MAC_PORT_EEE_RX_10G_WF_TIMER 0x8c0
+#define A_T7_MAC_PORT_EEE_WF_COUNT 0x8cc
+#define A_MAC_PORT_WOL_EN 0x8d0
+
+#define S_WOL_ENABLE 1
+#define V_WOL_ENABLE(x) ((x) << S_WOL_ENABLE)
+#define F_WOL_ENABLE V_WOL_ENABLE(1U)
+
+#define S_WOL_INDICATOR 0
+#define V_WOL_INDICATOR(x) ((x) << S_WOL_INDICATOR)
+#define F_WOL_INDICATOR V_WOL_INDICATOR(1U)
+
+#define A_MAC_PORT_INT_TRACE 0x8d4
+
+#define S_INTERRUPT 0
+#define M_INTERRUPT 0x7fffffffU
+#define V_INTERRUPT(x) ((x) << S_INTERRUPT)
+#define G_INTERRUPT(x) (((x) >> S_INTERRUPT) & M_INTERRUPT)
+
+#define A_MAC_PORT_TRACE_TS_LO 0x8d8
+#define A_MAC_PORT_TRACE_TS_HI 0x8dc
+#define A_MAC_PORT_MTIP_10G100G_REVISION 0x900
+
+#define S_VER_10G100G 8
+#define M_VER_10G100G 0xffU
+#define V_VER_10G100G(x) ((x) << S_VER_10G100G)
+#define G_VER_10G100G(x) (((x) >> S_VER_10G100G) & M_VER_10G100G)
+
+#define S_REV_10G100G 0
+#define M_REV_10G100G 0xffU
+#define V_REV_10G100G(x) ((x) << S_REV_10G100G)
+#define G_REV_10G100G(x) (((x) >> S_REV_10G100G) & M_REV_10G100G)
+
+#define A_MAC_PORT_MTIP_10G100G_SCRATCH 0x904
+#define A_MAC_PORT_MTIP_10G100G_COMMAND_CONFIG 0x908
+
+#define S_NO_PREAM 31
+#define V_NO_PREAM(x) ((x) << S_NO_PREAM)
+#define F_NO_PREAM V_NO_PREAM(1U)
+
+#define S_SHORT_PREAM 30
+#define V_SHORT_PREAM(x) ((x) << S_SHORT_PREAM)
+#define F_SHORT_PREAM V_SHORT_PREAM(1U)
+
+#define S_FLT_HDL_DIS 27
+#define V_FLT_HDL_DIS(x) ((x) << S_FLT_HDL_DIS)
+#define F_FLT_HDL_DIS V_FLT_HDL_DIS(1U)
+
+#define S_TX_FIFO_RESET 26
+#define V_TX_FIFO_RESET(x) ((x) << S_TX_FIFO_RESET)
+#define F_TX_FIFO_RESET V_TX_FIFO_RESET(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_MAC_ADDR_0 0x90c
+#define A_MAC_PORT_MTIP_10G100G_MAC_ADDR_1 0x910
+#define A_MAC_PORT_MTIP_10G100G_FRM_LENGTH_TX_MTU 0x914
+#define A_MAC_PORT_MTIP_10G100G_RX_FIFO_SECTIONS 0x91c
+
+#define S_RX10G100G_EMPTY 16
+#define M_RX10G100G_EMPTY 0xffffU
+#define V_RX10G100G_EMPTY(x) ((x) << S_RX10G100G_EMPTY)
+#define G_RX10G100G_EMPTY(x) (((x) >> S_RX10G100G_EMPTY) & M_RX10G100G_EMPTY)
+
+#define S_RX10G100G_AVAIL 0
+#define M_RX10G100G_AVAIL 0xffffU
+#define V_RX10G100G_AVAIL(x) ((x) << S_RX10G100G_AVAIL)
+#define G_RX10G100G_AVAIL(x) (((x) >> S_RX10G100G_AVAIL) & M_RX10G100G_AVAIL)
+
+#define A_MAC_PORT_MTIP_10G100G_TX_FIFO_SECTIONS 0x920
+
+#define S_TX10G100G_EMPTY 16
+#define M_TX10G100G_EMPTY 0xffffU
+#define V_TX10G100G_EMPTY(x) ((x) << S_TX10G100G_EMPTY)
+#define G_TX10G100G_EMPTY(x) (((x) >> S_TX10G100G_EMPTY) & M_TX10G100G_EMPTY)
+
+#define S_TX10G100G_AVAIL 0
+#define M_TX10G100G_AVAIL 0xffffU
+#define V_TX10G100G_AVAIL(x) ((x) << S_TX10G100G_AVAIL)
+#define G_TX10G100G_AVAIL(x) (((x) >> S_TX10G100G_AVAIL) & M_TX10G100G_AVAIL)
+
+#define A_MAC_PORT_MTIP_10G100G_RX_FIFO_ALMOST_F_E 0x924
+#define A_MAC_PORT_MTIP_10G100G_TX_FIFO_ALMOST_F_E 0x928
+#define A_MAC_PORT_MTIP_10G100G_MDIO_CFG_STATUS 0x930
+#define A_MAC_PORT_MTIP_10G100G_MDIO_COMMAND 0x934
+#define A_MAC_PORT_MTIP_10G100G_MDIO_DATA 0x938
+#define A_MAC_PORT_MTIP_10G100G_MDIO_REGADDR 0x93c
+#define A_MAC_PORT_MTIP_10G100G_STATUS 0x940
+
+#define S_T7_TX_ISIDLE 8
+#define V_T7_TX_ISIDLE(x) ((x) << S_T7_TX_ISIDLE)
+#define F_T7_TX_ISIDLE V_T7_TX_ISIDLE(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_TX_IPG_LENGTH 0x944
+
+#define S_IPG_COMP_CNT 16
+#define M_IPG_COMP_CNT 0xffffU
+#define V_IPG_COMP_CNT(x) ((x) << S_IPG_COMP_CNT)
+#define G_IPG_COMP_CNT(x) (((x) >> S_IPG_COMP_CNT) & M_IPG_COMP_CNT)
+
+#define S_AVG_IPG_LEN 2
+#define M_AVG_IPG_LEN 0xfU
+#define V_AVG_IPG_LEN(x) ((x) << S_AVG_IPG_LEN)
+#define G_AVG_IPG_LEN(x) (((x) >> S_AVG_IPG_LEN) & M_AVG_IPG_LEN)
+
+#define S_DSBL_DIC 0
+#define V_DSBL_DIC(x) ((x) << S_DSBL_DIC)
+#define F_DSBL_DIC V_DSBL_DIC(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_CRC_MODE 0x948
+#define A_MAC_PORT_MTIP_10G100G_CL01_PAUSE_QUANTA 0x954
+#define A_MAC_PORT_MTIP_10G100G_CL23_PAUSE_QUANTA 0x958
+#define A_MAC_PORT_MTIP_10G100G_CL45_PAUSE_QUANTA 0x95c
+#define A_MAC_PORT_MTIP_10G100G_CL67_PAUSE_QUANTA 0x960
+#define A_MAC_PORT_MTIP_10G100G_CL01_QUANTA_THRESH 0x964
+#define A_MAC_PORT_MTIP_10G100G_CL23_QUANTA_THRESH 0x968
+#define A_MAC_PORT_MTIP_10G100G_CL45_QUANTA_THRESH 0x96c
+#define A_MAC_PORT_MTIP_10G100G_CL67_QUANTA_THRESH 0x970
+#define A_MAC_PORT_MTIP_10G100G_RX_PAUSE_STATUS 0x974
+#define A_MAC_PORT_MTIP_10G100G_TS_TIMESTAMP 0x97c
+#define A_MAC_PORT_MTIP_10G100G_XIF_MODE 0x980
+
+#define S_RX_CNT_MODE 16
+#define V_RX_CNT_MODE(x) ((x) << S_RX_CNT_MODE)
+#define F_RX_CNT_MODE V_RX_CNT_MODE(1U)
+
+#define S_TS_UPD64_MODE 12
+#define V_TS_UPD64_MODE(x) ((x) << S_TS_UPD64_MODE)
+#define F_TS_UPD64_MODE V_TS_UPD64_MODE(1U)
+
+#define S_TS_BINARY_MODE 11
+#define V_TS_BINARY_MODE(x) ((x) << S_TS_BINARY_MODE)
+#define F_TS_BINARY_MODE V_TS_BINARY_MODE(1U)
+
+#define S_TS_DELAY_MODE 10
+#define V_TS_DELAY_MODE(x) ((x) << S_TS_DELAY_MODE)
+#define F_TS_DELAY_MODE V_TS_DELAY_MODE(1U)
+
+#define S_TS_DELTA_MODE 9
+#define V_TS_DELTA_MODE(x) ((x) << S_TS_DELTA_MODE)
+#define F_TS_DELTA_MODE V_TS_DELTA_MODE(1U)
+
+#define S_TX_MAC_RS_ERR 8
+#define V_TX_MAC_RS_ERR(x) ((x) << S_TX_MAC_RS_ERR)
+#define F_TX_MAC_RS_ERR V_TX_MAC_RS_ERR(1U)
+
+#define S_RX_PAUSE_BYPASS 6
+#define V_RX_PAUSE_BYPASS(x) ((x) << S_RX_PAUSE_BYPASS)
+#define F_RX_PAUSE_BYPASS V_RX_PAUSE_BYPASS(1U)
+
+#define S_ONE_STEP_ENA 5
+#define V_ONE_STEP_ENA(x) ((x) << S_ONE_STEP_ENA)
+#define F_ONE_STEP_ENA V_ONE_STEP_ENA(1U)
+
+#define S_PAUSETIMERX8 4
+#define V_PAUSETIMERX8(x) ((x) << S_PAUSETIMERX8)
+#define F_PAUSETIMERX8 V_PAUSETIMERX8(1U)
+
+#define S_XGMII_ENA 0
+#define V_XGMII_ENA(x) ((x) << S_XGMII_ENA)
+#define F_XGMII_ENA V_XGMII_ENA(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_CONTROL_1 0xa00
+#define A_MAC_PORT_MTIP_CR4_0_STATUS_1 0xa04
+
+#define S_CR4_0_RX_LINK_STATUS 2
+#define V_CR4_0_RX_LINK_STATUS(x) ((x) << S_CR4_0_RX_LINK_STATUS)
+#define F_CR4_0_RX_LINK_STATUS V_CR4_0_RX_LINK_STATUS(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICE_ID0 0xa08
+
+#define S_CR4_0_DEVICE_ID0 0
+#define M_CR4_0_DEVICE_ID0 0xffffU
+#define V_CR4_0_DEVICE_ID0(x) ((x) << S_CR4_0_DEVICE_ID0)
+#define G_CR4_0_DEVICE_ID0(x) (((x) >> S_CR4_0_DEVICE_ID0) & M_CR4_0_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICE_ID1 0xa0c
+
+#define S_CR4_0_DEVICE_ID1 0
+#define M_CR4_0_DEVICE_ID1 0xffffU
+#define V_CR4_0_DEVICE_ID1(x) ((x) << S_CR4_0_DEVICE_ID1)
+#define G_CR4_0_DEVICE_ID1(x) (((x) >> S_CR4_0_DEVICE_ID1) & M_CR4_0_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SPEED_ABILITY 0xa10
+
+#define S_50G_CAPABLE 5
+#define V_50G_CAPABLE(x) ((x) << S_50G_CAPABLE)
+#define F_50G_CAPABLE V_50G_CAPABLE(1U)
+
+#define S_25G_CAPABLE 4
+#define V_25G_CAPABLE(x) ((x) << S_25G_CAPABLE)
+#define F_25G_CAPABLE V_25G_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICES_IN_PKG1 0xa14
+#define A_MAC_PORT_MTIP_CR4_0_DEVICES_IN_PKG2 0xa18
+#define A_MAC_PORT_MTIP_CR4_0_CONTROL_2 0xa1c
+
+#define S_T7_PCS_TYPE_SELECTION 0
+#define M_T7_PCS_TYPE_SELECTION 0xfU
+#define V_T7_PCS_TYPE_SELECTION(x) ((x) << S_T7_PCS_TYPE_SELECTION)
+#define G_T7_PCS_TYPE_SELECTION(x) (((x) >> S_T7_PCS_TYPE_SELECTION) & M_T7_PCS_TYPE_SELECTION)
+
+#define A_MAC_PORT_MTIP_CR4_0_STATUS_2 0xa20
+
+#define S_50GBASE_R_CAPABLE 8
+#define V_50GBASE_R_CAPABLE(x) ((x) << S_50GBASE_R_CAPABLE)
+#define F_50GBASE_R_CAPABLE V_50GBASE_R_CAPABLE(1U)
+
+#define S_25GBASE_R_CAPABLE 7
+#define V_25GBASE_R_CAPABLE(x) ((x) << S_25GBASE_R_CAPABLE)
+#define F_25GBASE_R_CAPABLE V_25GBASE_R_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_PKG_ID0 0xa38
+#define A_MAC_PORT_MTIP_CR4_0_PKG_ID1 0xa3c
+#define A_MAC_PORT_MTIP_CR4_0_EEE_CTRL 0xa50
+
+#define S_50GBASE_R_FW 14
+#define V_50GBASE_R_FW(x) ((x) << S_50GBASE_R_FW)
+#define F_50GBASE_R_FW V_50GBASE_R_FW(1U)
+
+#define S_100GBASE_R_DS 13
+#define V_100GBASE_R_DS(x) ((x) << S_100GBASE_R_DS)
+#define F_100GBASE_R_DS V_100GBASE_R_DS(1U)
+
+#define S_100GBASE_R_FW 12
+#define V_100GBASE_R_FW(x) ((x) << S_100GBASE_R_FW)
+#define F_100GBASE_R_FW V_100GBASE_R_FW(1U)
+
+#define S_25GBASE_R_DS 11
+#define V_25GBASE_R_DS(x) ((x) << S_25GBASE_R_DS)
+#define F_25GBASE_R_DS V_25GBASE_R_DS(1U)
+
+#define S_25GBASE_R_FW 10
+#define V_25GBASE_R_FW(x) ((x) << S_25GBASE_R_FW)
+#define F_25GBASE_R_FW V_25GBASE_R_FW(1U)
+
+#define S_40GBASE_R_DS 9
+#define V_40GBASE_R_DS(x) ((x) << S_40GBASE_R_DS)
+#define F_40GBASE_R_DS V_40GBASE_R_DS(1U)
+
+#define S_40GBASE_R_FW 8
+#define V_40GBASE_R_FW(x) ((x) << S_40GBASE_R_FW)
+#define F_40GBASE_R_FW V_40GBASE_R_FW(1U)
+
+#define S_10GBASE_KE_EEE 6
+#define V_10GBASE_KE_EEE(x) ((x) << S_10GBASE_KE_EEE)
+#define F_10GBASE_KE_EEE V_10GBASE_KE_EEE(1U)
+
+#define S_FAST_WAKE 1
+#define M_FAST_WAKE 0x1fU
+#define V_FAST_WAKE(x) ((x) << S_FAST_WAKE)
+#define G_FAST_WAKE(x) (((x) >> S_FAST_WAKE) & M_FAST_WAKE)
+
+#define S_DEEP_SLEEP 0
+#define V_DEEP_SLEEP(x) ((x) << S_DEEP_SLEEP)
+#define F_DEEP_SLEEP V_DEEP_SLEEP(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_WAKE_ERROR_COUNTER 0xa58
+
+#define S_WAKE_ERROR_COUNTER 0
+#define M_WAKE_ERROR_COUNTER 0x1ffffU
+#define V_WAKE_ERROR_COUNTER(x) ((x) << S_WAKE_ERROR_COUNTER)
+#define G_WAKE_ERROR_COUNTER(x) (((x) >> S_WAKE_ERROR_COUNTER) & M_WAKE_ERROR_COUNTER)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_STATUS_1 0xa80
+
+#define S_CR4_0_BR_BLOCK_LOCK 0
+#define V_CR4_0_BR_BLOCK_LOCK(x) ((x) << S_CR4_0_BR_BLOCK_LOCK)
+#define F_CR4_0_BR_BLOCK_LOCK V_CR4_0_BR_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_STATUS_2 0xa84
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_0 0xa88
+
+#define S_SEED_A_0 0
+#define M_SEED_A_0 0xffffU
+#define V_SEED_A_0(x) ((x) << S_SEED_A_0)
+#define G_SEED_A_0(x) (((x) >> S_SEED_A_0) & M_SEED_A_0)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_1 0xa8c
+
+#define S_SEED_A_1 0
+#define M_SEED_A_1 0xffffU
+#define V_SEED_A_1(x) ((x) << S_SEED_A_1)
+#define G_SEED_A_1(x) (((x) >> S_SEED_A_1) & M_SEED_A_1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_2 0xa90
+
+#define S_SEED_A_2 0
+#define M_SEED_A_2 0xffffU
+#define V_SEED_A_2(x) ((x) << S_SEED_A_2)
+#define G_SEED_A_2(x) (((x) >> S_SEED_A_2) & M_SEED_A_2)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_3 0xa94
+
+#define S_SEED_A_3 0
+#define M_SEED_A_3 0xffffU
+#define V_SEED_A_3(x) ((x) << S_SEED_A_3)
+#define G_SEED_A_3(x) (((x) >> S_SEED_A_3) & M_SEED_A_3)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_0 0xa98
+
+#define S_SEED_B_0 0
+#define M_SEED_B_0 0xffffU
+#define V_SEED_B_0(x) ((x) << S_SEED_B_0)
+#define G_SEED_B_0(x) (((x) >> S_SEED_B_0) & M_SEED_B_0)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_1 0xa9c
+
+#define S_SEED_B_1 0
+#define M_SEED_B_1 0xffffU
+#define V_SEED_B_1(x) ((x) << S_SEED_B_1)
+#define G_SEED_B_1(x) (((x) >> S_SEED_B_1) & M_SEED_B_1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_2 0xaa0
+
+#define S_SEED_B_2 0
+#define M_SEED_B_2 0xffffU
+#define V_SEED_B_2(x) ((x) << S_SEED_B_2)
+#define G_SEED_B_2(x) (((x) >> S_SEED_B_2) & M_SEED_B_2)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_3 0xaa4
+
+#define S_SEED_B_3 0
+#define M_SEED_B_3 0xffffU
+#define V_SEED_B_3(x) ((x) << S_SEED_B_3)
+#define G_SEED_B_3(x) (((x) >> S_SEED_B_3) & M_SEED_B_3)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_TEST_PATTERN_CONTROL 0xaa8
+
+#define S_TEST_PATTERN_40G 7
+#define V_TEST_PATTERN_40G(x) ((x) << S_TEST_PATTERN_40G)
+#define F_TEST_PATTERN_40G V_TEST_PATTERN_40G(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_TEST_ERR_CNT 0xaac
+#define A_MAC_PORT_MTIP_CR4_0_BER_HIGH_ORDER_CNT 0xab0
+
+#define S_BASE_R_BER_HIGH_ORDER_CNT 0
+#define M_BASE_R_BER_HIGH_ORDER_CNT 0xffffU
+#define V_BASE_R_BER_HIGH_ORDER_CNT(x) ((x) << S_BASE_R_BER_HIGH_ORDER_CNT)
+#define G_BASE_R_BER_HIGH_ORDER_CNT(x) (((x) >> S_BASE_R_BER_HIGH_ORDER_CNT) & M_BASE_R_BER_HIGH_ORDER_CNT)
+
+#define A_MAC_PORT_MTIP_CR4_0_ERR_BLK_HIGH_ORDER_CNT 0xab4
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_1 0xac8
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_2 0xacc
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_3 0xad0
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_4 0xad4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_0 0xad8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_1 0xadc
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_2 0xae0
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_3 0xae4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_4 0xae8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_5 0xaec
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_6 0xaf0
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_7 0xaf4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_8 0xaf8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_9 0xafc
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_10 0xb00
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_11 0xb04
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_12 0xb08
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_13 0xb0c
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_14 0xb10
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_15 0xb14
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_16 0xb18
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_17 0xb1c
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_18 0xb20
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_19 0xb24
+#define A_MAC_PORT_MTIP_CR4_0_LANE_0_MAPPING 0xb28
+#define A_MAC_PORT_MTIP_CR4_0_LANE_1_MAPPING 0xb2c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_2_MAPPING 0xb30
+#define A_MAC_PORT_MTIP_CR4_0_LANE_3_MAPPING 0xb34
+#define A_MAC_PORT_MTIP_CR4_0_LANE_4_MAPPING 0xb38
+#define A_MAC_PORT_MTIP_CR4_0_LANE_5_MAPPING 0xb3c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_6_MAPPING 0xb40
+#define A_MAC_PORT_MTIP_CR4_0_LANE_7_MAPPING 0xb44
+#define A_MAC_PORT_MTIP_CR4_0_LANE_8_MAPPING 0xb48
+#define A_MAC_PORT_MTIP_CR4_0_LANE_9_MAPPING 0xb4c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_10_MAPPING 0xb50
+#define A_MAC_PORT_MTIP_CR4_0_LANE_11_MAPPING 0xb54
+#define A_MAC_PORT_MTIP_CR4_0_LANE_12_MAPPING 0xb58
+#define A_MAC_PORT_MTIP_CR4_0_LANE_13_MAPPING 0xb5c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_14_MAPPING 0xb60
+#define A_MAC_PORT_MTIP_CR4_0_LANE_15_MAPPING 0xb64
+#define A_MAC_PORT_MTIP_CR4_0_LANE_16_MAPPING 0xb68
+#define A_MAC_PORT_MTIP_CR4_0_LANE_17_MAPPING 0xb6c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_18_MAPPING 0xb70
+#define A_MAC_PORT_MTIP_CR4_0_LANE_19_MAPPING 0xb74
+#define A_MAC_PORT_MTIP_CR4_0_SCRATCH 0xb78
+#define A_MAC_PORT_MTIP_CR4_0_CORE_REVISION 0xb7c
+#define A_MAC_PORT_MTIP_CR4_0_VL_INTVL 0xb80
+
+#define S_VL_INTCL 0
+#define M_VL_INTCL 0xffffU
+#define V_VL_INTCL(x) ((x) << S_VL_INTCL)
+#define G_VL_INTCL(x) (((x) >> S_VL_INTCL) & M_VL_INTCL)
+
+#define A_MAC_PORT_MTIP_CR4_0_TX_LANE_THRESH 0xb84
+
+#define S_LANE6_LANE7 12
+#define M_LANE6_LANE7 0xfU
+#define V_LANE6_LANE7(x) ((x) << S_LANE6_LANE7)
+#define G_LANE6_LANE7(x) (((x) >> S_LANE6_LANE7) & M_LANE6_LANE7)
+
+#define S_LANE4_LANE5 8
+#define M_LANE4_LANE5 0xfU
+#define V_LANE4_LANE5(x) ((x) << S_LANE4_LANE5)
+#define G_LANE4_LANE5(x) (((x) >> S_LANE4_LANE5) & M_LANE4_LANE5)
+
+#define S_LANE2_LANE3 4
+#define M_LANE2_LANE3 0xfU
+#define V_LANE2_LANE3(x) ((x) << S_LANE2_LANE3)
+#define G_LANE2_LANE3(x) (((x) >> S_LANE2_LANE3) & M_LANE2_LANE3)
+
+#define S_LANE0_LANE1 0
+#define M_LANE0_LANE1 0xfU
+#define V_LANE0_LANE1(x) ((x) << S_LANE0_LANE1)
+#define G_LANE0_LANE1(x) (((x) >> S_LANE0_LANE1) & M_LANE0_LANE1)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL0_0 0xb98
+
+#define S_M1 8
+#define M_M1 0xffU
+#define V_M1(x) ((x) << S_M1)
+#define G_M1(x) (((x) >> S_M1) & M_M1)
+
+#define S_M0 0
+#define M_M0 0xffU
+#define V_M0(x) ((x) << S_M0)
+#define G_M0(x) (((x) >> S_M0) & M_M0)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL0_1 0xb9c
+
+#define S_M2 0
+#define M_M2 0xffU
+#define V_M2(x) ((x) << S_M2)
+#define G_M2(x) (((x) >> S_M2) & M_M2)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL1_0 0xba0
+#define A_MAC_PORT_MTIP_CR4_0_VL1_1 0xba4
+#define A_MAC_PORT_MTIP_CR4_0_VL2_0 0xba8
+#define A_MAC_PORT_MTIP_CR4_0_VL2_1 0xbac
+#define A_MAC_PORT_MTIP_CR4_0_VL3_0 0xbb0
+#define A_MAC_PORT_MTIP_CR4_0_VL3_1 0xbb4
+#define A_MAC_PORT_MTIP_CR4_0_PCS_MODE 0xbb8
+
+#define S_ST_DISABLE_MLD 9
+#define V_ST_DISABLE_MLD(x) ((x) << S_ST_DISABLE_MLD)
+#define F_ST_DISABLE_MLD V_ST_DISABLE_MLD(1U)
+
+#define S_ST_EN_CLAUSE49 8
+#define V_ST_EN_CLAUSE49(x) ((x) << S_ST_EN_CLAUSE49)
+#define F_ST_EN_CLAUSE49 V_ST_EN_CLAUSE49(1U)
+
+#define S_HI_BER25 2
+#define V_HI_BER25(x) ((x) << S_HI_BER25)
+#define F_HI_BER25 V_HI_BER25(1U)
+
+#define S_DISABLE_MLD 1
+#define V_DISABLE_MLD(x) ((x) << S_DISABLE_MLD)
+#define F_DISABLE_MLD V_DISABLE_MLD(1U)
+
+#define S_ENA_CLAUSE49 0
+#define V_ENA_CLAUSE49(x) ((x) << S_ENA_CLAUSE49)
+#define F_ENA_CLAUSE49 V_ENA_CLAUSE49(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL4_0 0xc98
+#define A_MAC_PORT_MTIP_CR4_0_VL4_1 0xc9c
+#define A_MAC_PORT_MTIP_CR4_0_VL5_0 0xca0
+#define A_MAC_PORT_MTIP_CR4_0_VL5_1 0xca4
+#define A_MAC_PORT_MTIP_CR4_0_VL6_0 0xca8
+#define A_MAC_PORT_MTIP_CR4_0_VL6_1 0xcac
+#define A_MAC_PORT_MTIP_CR4_0_VL7_0 0xcb0
+#define A_MAC_PORT_MTIP_CR4_0_VL7_1 0xcb4
+#define A_MAC_PORT_MTIP_CR4_0_VL8_0 0xcb8
+#define A_MAC_PORT_MTIP_CR4_0_VL8_1 0xcbc
+#define A_MAC_PORT_MTIP_CR4_0_VL9_0 0xcc0
+#define A_MAC_PORT_MTIP_CR4_0_VL9_1 0xcc4
+#define A_MAC_PORT_MTIP_CR4_0_VL10_0 0xcc8
+#define A_MAC_PORT_MTIP_CR4_0_VL10_1 0xccc
+#define A_MAC_PORT_MTIP_CR4_0_VL11_0 0xcd0
+#define A_MAC_PORT_MTIP_CR4_0_VL11_1 0xcd4
+#define A_MAC_PORT_MTIP_CR4_0_VL12_0 0xcd8
+#define A_MAC_PORT_MTIP_CR4_0_VL12_1 0xcdc
+#define A_MAC_PORT_MTIP_CR4_0_VL13_0 0xce0
+#define A_MAC_PORT_MTIP_CR4_0_VL13_1 0xce4
+#define A_MAC_PORT_MTIP_CR4_0_VL14_0 0xce8
+#define A_MAC_PORT_MTIP_CR4_0_VL14_1 0xcec
+#define A_MAC_PORT_MTIP_CR4_0_VL15_0 0xcf0
+#define A_MAC_PORT_MTIP_CR4_0_VL15_1 0xcf4
+#define A_MAC_PORT_MTIP_CR4_0_VL16_0 0xcf8
+#define A_MAC_PORT_MTIP_CR4_0_VL16_1 0xcfc
+#define A_MAC_PORT_MTIP_CR4_0_VL17_0 0xd00
+#define A_MAC_PORT_MTIP_CR4_0_VL17_1 0xd04
+#define A_MAC_PORT_MTIP_CR4_0_VL18_0 0xd08
+#define A_MAC_PORT_MTIP_CR4_0_VL18_1 0xd0c
+#define A_MAC_PORT_MTIP_CR4_0_VL19_0 0xd10
+#define A_MAC_PORT_MTIP_CR4_0_VL19_1 0xd14
+#define A_MAC_PORT_MTIP_CR4_1_CONTROL_1 0x1000
+#define A_MAC_PORT_MTIP_CR4_1_STATUS_1 0x1004
+
+#define S_CR4_RX_LINK_STATUS_1 2
+#define V_CR4_RX_LINK_STATUS_1(x) ((x) << S_CR4_RX_LINK_STATUS_1)
+#define F_CR4_RX_LINK_STATUS_1 V_CR4_RX_LINK_STATUS_1(1U)
+
+#define A_MAC_PORT_MTIP_CR4_1_DEVICE_ID0 0x1008
+
+#define S_CR4_1_DEVICE_ID0 0
+#define M_CR4_1_DEVICE_ID0 0xffffU
+#define V_CR4_1_DEVICE_ID0(x) ((x) << S_CR4_1_DEVICE_ID0)
+#define G_CR4_1_DEVICE_ID0(x) (((x) >> S_CR4_1_DEVICE_ID0) & M_CR4_1_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_1_DEVICE_ID1 0x100c
+
+#define S_CR4_1_DEVICE_ID1 0
+#define M_CR4_1_DEVICE_ID1 0xffffU
+#define V_CR4_1_DEVICE_ID1(x) ((x) << S_CR4_1_DEVICE_ID1)
+#define G_CR4_1_DEVICE_ID1(x) (((x) >> S_CR4_1_DEVICE_ID1) & M_CR4_1_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_1_SPEED_ABILITY 0x1010
+#define A_MAC_PORT_MTIP_CR4_1_DEVICES_IN_PKG1 0x1014
+#define A_MAC_PORT_MTIP_CR4_1_DEVICES_IN_PKG2 0x1018
+#define A_MAC_PORT_MTIP_CR4_1_CONTROL_2 0x101c
+#define A_MAC_PORT_MTIP_CR4_1_STATUS_2 0x1020
+#define A_MAC_PORT_MTIP_CR4_1_PKG_ID0 0x1038
+#define A_MAC_PORT_MTIP_CR4_1_PKG_ID1 0x103c
+#define A_MAC_PORT_MTIP_CR4_1_EEE_CTRL 0x1050
+#define A_MAC_PORT_MTIP_CR4_1_WAKE_ERROR_COUNTER 0x1058
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_STATUS_1 0x1080
+
+#define S_CR4_1_BR_BLOCK_LOCK 0
+#define V_CR4_1_BR_BLOCK_LOCK(x) ((x) << S_CR4_1_BR_BLOCK_LOCK)
+#define F_CR4_1_BR_BLOCK_LOCK V_CR4_1_BR_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_STATUS_2 0x1084
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_0 0x1088
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_1 0x108c
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_2 0x1090
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_3 0x1094
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_0 0x1098
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_1 0x109c
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_2 0x10a0
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_3 0x10a4
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_TEST_PATTERN_CONTROL 0x10a8
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_TEST_ERR_CNT 0x10ac
+#define A_MAC_PORT_MTIP_CR4_1_BER_HIGH_ORDER_CNT 0x10b0
+#define A_MAC_PORT_MTIP_CR4_1_ERR_BLK_HIGH_ORDER_CNT 0x10b4
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_1 0x10c8
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_2 0x10cc
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_3 0x10d0
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_4 0x10d4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_0 0x10d8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_1 0x10dc
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_2 0x10e0
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_3 0x10e4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_4 0x10e8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_5 0x10ec
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_6 0x10f0
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_7 0x10f4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_8 0x10f8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_9 0x10fc
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_10 0x1100
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_11 0x1104
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_12 0x1108
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_13 0x110c
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_14 0x1110
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_15 0x1114
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_16 0x1118
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_17 0x111c
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_18 0x1120
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_19 0x1124
+#define A_MAC_PORT_MTIP_CR4_1_LANE_0_MAPPING 0x1128
+#define A_MAC_PORT_MTIP_CR4_1_LANE_1_MAPPING 0x112c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_2_MAPPING 0x1130
+#define A_MAC_PORT_MTIP_CR4_1_LANE_3_MAPPING 0x1134
+#define A_MAC_PORT_MTIP_CR4_1_LANE_4_MAPPING 0x1138
+#define A_MAC_PORT_MTIP_CR4_1_LANE_5_MAPPING 0x113c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_6_MAPPING 0x1140
+#define A_MAC_PORT_MTIP_CR4_1_LANE_7_MAPPING 0x1144
+#define A_MAC_PORT_MTIP_CR4_1_LANE_8_MAPPING 0x1148
+#define A_MAC_PORT_MTIP_CR4_1_LANE_9_MAPPING 0x114c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_10_MAPPING 0x1150
+#define A_MAC_PORT_MTIP_CR4_1_LANE_11_MAPPING 0x1154
+#define A_MAC_PORT_MTIP_CR4_1_LANE_12_MAPPING 0x1158
+#define A_MAC_PORT_MTIP_CR4_1_LANE_13_MAPPING 0x115c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_14_MAPPING 0x1160
+#define A_MAC_PORT_MTIP_CR4_1_LANE_15_MAPPING 0x1164
+#define A_MAC_PORT_MTIP_CR4_1_LANE_16_MAPPING 0x1168
+#define A_MAC_PORT_MTIP_CR4_1_LANE_17_MAPPING 0x116c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_18_MAPPING 0x1170
+#define A_MAC_PORT_MTIP_CR4_1_LANE_19_MAPPING 0x1174
+#define A_MAC_PORT_MTIP_CR4_1_SCRATCH 0x1178
+#define A_MAC_PORT_MTIP_CR4_1_CORE_REVISION 0x117c
+#define A_MAC_PORT_MTIP_CR4_1_VL_INTVL 0x1180
+#define A_MAC_PORT_MTIP_CR4_1_TX_LANE_THRESH 0x1184
+#define A_MAC_PORT_MTIP_CR4_1_VL0_0 0x1198
+#define A_MAC_PORT_MTIP_CR4_1_VL0_1 0x119c
+#define A_MAC_PORT_MTIP_CR4_1_VL1_0 0x11a0
+#define A_MAC_PORT_MTIP_CR4_1_VL1_1 0x11a4
+#define A_MAC_PORT_MTIP_CR4_1_VL2_0 0x11a8
+#define A_MAC_PORT_MTIP_CR4_1_VL2_1 0x11ac
+#define A_MAC_PORT_MTIP_CR4_1_VL3_0 0x11b0
+#define A_MAC_PORT_MTIP_CR4_1_VL3_1 0x11b4
+#define A_MAC_PORT_MTIP_CR4_1_PCS_MODE 0x11b8
+#define A_MAC_COMMON_CFG_0 0x38000
+
+#define S_T7_RX_POLARITY_INV 24
+#define M_T7_RX_POLARITY_INV 0xffU
+#define V_T7_RX_POLARITY_INV(x) ((x) << S_T7_RX_POLARITY_INV)
+#define G_T7_RX_POLARITY_INV(x) (((x) >> S_T7_RX_POLARITY_INV) & M_T7_RX_POLARITY_INV)
+
+#define S_T7_TX_POLARITY_INV 16
+#define M_T7_TX_POLARITY_INV 0xffU
+#define V_T7_TX_POLARITY_INV(x) ((x) << S_T7_TX_POLARITY_INV)
+#define G_T7_TX_POLARITY_INV(x) (((x) >> S_T7_TX_POLARITY_INV) & M_T7_TX_POLARITY_INV)
+
+#define S_T7_DEBUG_PORT_SEL 14
+#define M_T7_DEBUG_PORT_SEL 0x3U
+#define V_T7_DEBUG_PORT_SEL(x) ((x) << S_T7_DEBUG_PORT_SEL)
+#define G_T7_DEBUG_PORT_SEL(x) (((x) >> S_T7_DEBUG_PORT_SEL) & M_T7_DEBUG_PORT_SEL)
+
+#define S_MAC_SEPTY_CTL 8
+#define M_MAC_SEPTY_CTL 0x3fU
+#define V_MAC_SEPTY_CTL(x) ((x) << S_MAC_SEPTY_CTL)
+#define G_MAC_SEPTY_CTL(x) (((x) >> S_MAC_SEPTY_CTL) & M_MAC_SEPTY_CTL)
+
+#define S_T7_DEBUG_TX_RX_SEL 7
+#define V_T7_DEBUG_TX_RX_SEL(x) ((x) << S_T7_DEBUG_TX_RX_SEL)
+#define F_T7_DEBUG_TX_RX_SEL V_T7_DEBUG_TX_RX_SEL(1U)
+
+#define S_MAC_RDY_CTL 0
+#define M_MAC_RDY_CTL 0x3fU
+#define V_MAC_RDY_CTL(x) ((x) << S_MAC_RDY_CTL)
+#define G_MAC_RDY_CTL(x) (((x) >> S_MAC_RDY_CTL) & M_MAC_RDY_CTL)
+
+#define A_MAC_MTIP_RESET_CTRL_0 0x38004
+
+#define S_RESET_F91_REF_CLK_I 31
+#define V_RESET_F91_REF_CLK_I(x) ((x) << S_RESET_F91_REF_CLK_I)
+#define F_RESET_F91_REF_CLK_I V_RESET_F91_REF_CLK_I(1U)
+
+#define S_RESET_PCS000_REF_CLK_I 30
+#define V_RESET_PCS000_REF_CLK_I(x) ((x) << S_RESET_PCS000_REF_CLK_I)
+#define F_RESET_PCS000_REF_CLK_I V_RESET_PCS000_REF_CLK_I(1U)
+
+#define S_RESET_REF_CLK_I 29
+#define V_RESET_REF_CLK_I(x) ((x) << S_RESET_REF_CLK_I)
+#define F_RESET_REF_CLK_I V_RESET_REF_CLK_I(1U)
+
+#define S_RESET_SD_RX_CLK_I_0 28
+#define V_RESET_SD_RX_CLK_I_0(x) ((x) << S_RESET_SD_RX_CLK_I_0)
+#define F_RESET_SD_RX_CLK_I_0 V_RESET_SD_RX_CLK_I_0(1U)
+
+#define S_RESET_SD_RX_CLK_I_1 27
+#define V_RESET_SD_RX_CLK_I_1(x) ((x) << S_RESET_SD_RX_CLK_I_1)
+#define F_RESET_SD_RX_CLK_I_1 V_RESET_SD_RX_CLK_I_1(1U)
+
+#define S_RESET_SD_RX_CLK_I_2 26
+#define V_RESET_SD_RX_CLK_I_2(x) ((x) << S_RESET_SD_RX_CLK_I_2)
+#define F_RESET_SD_RX_CLK_I_2 V_RESET_SD_RX_CLK_I_2(1U)
+
+#define S_RESET_SD_RX_CLK_I_3 25
+#define V_RESET_SD_RX_CLK_I_3(x) ((x) << S_RESET_SD_RX_CLK_I_3)
+#define F_RESET_SD_RX_CLK_I_3 V_RESET_SD_RX_CLK_I_3(1U)
+
+#define S_RESET_SD_RX_CLK_I_4 24
+#define V_RESET_SD_RX_CLK_I_4(x) ((x) << S_RESET_SD_RX_CLK_I_4)
+#define F_RESET_SD_RX_CLK_I_4 V_RESET_SD_RX_CLK_I_4(1U)
+
+#define S_RESET_SD_RX_CLK_I_5 23
+#define V_RESET_SD_RX_CLK_I_5(x) ((x) << S_RESET_SD_RX_CLK_I_5)
+#define F_RESET_SD_RX_CLK_I_5 V_RESET_SD_RX_CLK_I_5(1U)
+
+#define S_RESET_SD_RX_CLK_I_6 22
+#define V_RESET_SD_RX_CLK_I_6(x) ((x) << S_RESET_SD_RX_CLK_I_6)
+#define F_RESET_SD_RX_CLK_I_6 V_RESET_SD_RX_CLK_I_6(1U)
+
+#define S_RESET_SD_RX_CLK_I_7 21
+#define V_RESET_SD_RX_CLK_I_7(x) ((x) << S_RESET_SD_RX_CLK_I_7)
+#define F_RESET_SD_RX_CLK_I_7 V_RESET_SD_RX_CLK_I_7(1U)
+
+#define S_RESET_SD_TX_CLK_I_0 20
+#define V_RESET_SD_TX_CLK_I_0(x) ((x) << S_RESET_SD_TX_CLK_I_0)
+#define F_RESET_SD_TX_CLK_I_0 V_RESET_SD_TX_CLK_I_0(1U)
+
+#define S_RESET_SD_TX_CLK_I_1 19
+#define V_RESET_SD_TX_CLK_I_1(x) ((x) << S_RESET_SD_TX_CLK_I_1)
+#define F_RESET_SD_TX_CLK_I_1 V_RESET_SD_TX_CLK_I_1(1U)
+
+#define S_RESET_SD_TX_CLK_I_2 18
+#define V_RESET_SD_TX_CLK_I_2(x) ((x) << S_RESET_SD_TX_CLK_I_2)
+#define F_RESET_SD_TX_CLK_I_2 V_RESET_SD_TX_CLK_I_2(1U)
+
+#define S_RESET_SD_TX_CLK_I_3 17
+#define V_RESET_SD_TX_CLK_I_3(x) ((x) << S_RESET_SD_TX_CLK_I_3)
+#define F_RESET_SD_TX_CLK_I_3 V_RESET_SD_TX_CLK_I_3(1U)
+
+#define S_RESET_SD_TX_CLK_I_4 16
+#define V_RESET_SD_TX_CLK_I_4(x) ((x) << S_RESET_SD_TX_CLK_I_4)
+#define F_RESET_SD_TX_CLK_I_4 V_RESET_SD_TX_CLK_I_4(1U)
+
+#define S_RESET_SD_TX_CLK_I_5 15
+#define V_RESET_SD_TX_CLK_I_5(x) ((x) << S_RESET_SD_TX_CLK_I_5)
+#define F_RESET_SD_TX_CLK_I_5 V_RESET_SD_TX_CLK_I_5(1U)
+
+#define S_RESET_SD_TX_CLK_I_6 14
+#define V_RESET_SD_TX_CLK_I_6(x) ((x) << S_RESET_SD_TX_CLK_I_6)
+#define F_RESET_SD_TX_CLK_I_6 V_RESET_SD_TX_CLK_I_6(1U)
+
+#define S_RESET_SD_TX_CLK_I_7 13
+#define V_RESET_SD_TX_CLK_I_7(x) ((x) << S_RESET_SD_TX_CLK_I_7)
+#define F_RESET_SD_TX_CLK_I_7 V_RESET_SD_TX_CLK_I_7(1U)
+
+#define S_RESET_XPCS_REF_CLK_I_0 12
+#define V_RESET_XPCS_REF_CLK_I_0(x) ((x) << S_RESET_XPCS_REF_CLK_I_0)
+#define F_RESET_XPCS_REF_CLK_I_0 V_RESET_XPCS_REF_CLK_I_0(1U)
+
+#define S_RESET_XPCS_REF_CLK_I_1 11
+#define V_RESET_XPCS_REF_CLK_I_1(x) ((x) << S_RESET_XPCS_REF_CLK_I_1)
+#define F_RESET_XPCS_REF_CLK_I_1 V_RESET_XPCS_REF_CLK_I_1(1U)
+
+#define S_RESET_FF_RX_CLK_0_I 9
+#define V_RESET_FF_RX_CLK_0_I(x) ((x) << S_RESET_FF_RX_CLK_0_I)
+#define F_RESET_FF_RX_CLK_0_I V_RESET_FF_RX_CLK_0_I(1U)
+
+#define S_RESET_FF_TX_CLK_0_I 8
+#define V_RESET_FF_TX_CLK_0_I(x) ((x) << S_RESET_FF_TX_CLK_0_I)
+#define F_RESET_FF_TX_CLK_0_I V_RESET_FF_TX_CLK_0_I(1U)
+
+#define S_RESET_RXCLK_0_I 7
+#define V_RESET_RXCLK_0_I(x) ((x) << S_RESET_RXCLK_0_I)
+#define F_RESET_RXCLK_0_I V_RESET_RXCLK_0_I(1U)
+
+#define S_RESET_TXCLK_0_I 6
+#define V_RESET_TXCLK_0_I(x) ((x) << S_RESET_TXCLK_0_I)
+#define F_RESET_TXCLK_0_I V_RESET_TXCLK_0_I(1U)
+
+#define S_RESET_FF_RX_CLK_1_I 5
+#define V_RESET_FF_RX_CLK_1_I(x) ((x) << S_RESET_FF_RX_CLK_1_I)
+#define F_RESET_FF_RX_CLK_1_I V_RESET_FF_RX_CLK_1_I(1U)
+
+#define S_RESET_FF_TX_CLK_1_I 4
+#define V_RESET_FF_TX_CLK_1_I(x) ((x) << S_RESET_FF_TX_CLK_1_I)
+#define F_RESET_FF_TX_CLK_1_I V_RESET_FF_TX_CLK_1_I(1U)
+
+#define S_RESET_RXCLK_1_I 3
+#define V_RESET_RXCLK_1_I(x) ((x) << S_RESET_RXCLK_1_I)
+#define F_RESET_RXCLK_1_I V_RESET_RXCLK_1_I(1U)
+
+#define S_RESET_TXCLK_1_I 2
+#define V_RESET_TXCLK_1_I(x) ((x) << S_RESET_TXCLK_1_I)
+#define F_RESET_TXCLK_1_I V_RESET_TXCLK_1_I(1U)
+
+#define S_XGMII_CLK_RESET_0 0
+#define V_XGMII_CLK_RESET_0(x) ((x) << S_XGMII_CLK_RESET_0)
+#define F_XGMII_CLK_RESET_0 V_XGMII_CLK_RESET_0(1U)
+
+#define A_MAC_MTIP_RESET_CTRL_1 0x38008
+
+#define S_RESET_FF_RX_CLK_2_I 31
+#define V_RESET_FF_RX_CLK_2_I(x) ((x) << S_RESET_FF_RX_CLK_2_I)
+#define F_RESET_FF_RX_CLK_2_I V_RESET_FF_RX_CLK_2_I(1U)
+
+#define S_RESET_FF_TX_CLK_2_I 30
+#define V_RESET_FF_TX_CLK_2_I(x) ((x) << S_RESET_FF_TX_CLK_2_I)
+#define F_RESET_FF_TX_CLK_2_I V_RESET_FF_TX_CLK_2_I(1U)
+
+#define S_RESET_RXCLK_2_I 29
+#define V_RESET_RXCLK_2_I(x) ((x) << S_RESET_RXCLK_2_I)
+#define F_RESET_RXCLK_2_I V_RESET_RXCLK_2_I(1U)
+
+#define S_RESET_TXCLK_2_I 28
+#define V_RESET_TXCLK_2_I(x) ((x) << S_RESET_TXCLK_2_I)
+#define F_RESET_TXCLK_2_I V_RESET_TXCLK_2_I(1U)
+
+#define S_RESET_FF_RX_CLK_3_I 27
+#define V_RESET_FF_RX_CLK_3_I(x) ((x) << S_RESET_FF_RX_CLK_3_I)
+#define F_RESET_FF_RX_CLK_3_I V_RESET_FF_RX_CLK_3_I(1U)
+
+#define S_RESET_FF_TX_CLK_3_I 26
+#define V_RESET_FF_TX_CLK_3_I(x) ((x) << S_RESET_FF_TX_CLK_3_I)
+#define F_RESET_FF_TX_CLK_3_I V_RESET_FF_TX_CLK_3_I(1U)
+
+#define S_RESET_RXCLK_3_I 25
+#define V_RESET_RXCLK_3_I(x) ((x) << S_RESET_RXCLK_3_I)
+#define F_RESET_RXCLK_3_I V_RESET_RXCLK_3_I(1U)
+
+#define S_RESET_TXCLK_3_I 24
+#define V_RESET_TXCLK_3_I(x) ((x) << S_RESET_TXCLK_3_I)
+#define F_RESET_TXCLK_3_I V_RESET_TXCLK_3_I(1U)
+
+#define S_RESET_FF_RX_CLK_4_I 23
+#define V_RESET_FF_RX_CLK_4_I(x) ((x) << S_RESET_FF_RX_CLK_4_I)
+#define F_RESET_FF_RX_CLK_4_I V_RESET_FF_RX_CLK_4_I(1U)
+
+#define S_RESET_FF_TX_CLK_4_I 22
+#define V_RESET_FF_TX_CLK_4_I(x) ((x) << S_RESET_FF_TX_CLK_4_I)
+#define F_RESET_FF_TX_CLK_4_I V_RESET_FF_TX_CLK_4_I(1U)
+
+#define S_RESET_RXCLK_4_I 21
+#define V_RESET_RXCLK_4_I(x) ((x) << S_RESET_RXCLK_4_I)
+#define F_RESET_RXCLK_4_I V_RESET_RXCLK_4_I(1U)
+
+#define S_RESET_TXCLK_4_I 20
+#define V_RESET_TXCLK_4_I(x) ((x) << S_RESET_TXCLK_4_I)
+#define F_RESET_TXCLK_4_I V_RESET_TXCLK_4_I(1U)
+
+#define S_RESET_FF_RX_CLK_5_I 19
+#define V_RESET_FF_RX_CLK_5_I(x) ((x) << S_RESET_FF_RX_CLK_5_I)
+#define F_RESET_FF_RX_CLK_5_I V_RESET_FF_RX_CLK_5_I(1U)
+
+#define S_RESET_FF_TX_CLK_5_I 18
+#define V_RESET_FF_TX_CLK_5_I(x) ((x) << S_RESET_FF_TX_CLK_5_I)
+#define F_RESET_FF_TX_CLK_5_I V_RESET_FF_TX_CLK_5_I(1U)
+
+#define S_RESET_RXCLK_5_I 17
+#define V_RESET_RXCLK_5_I(x) ((x) << S_RESET_RXCLK_5_I)
+#define F_RESET_RXCLK_5_I V_RESET_RXCLK_5_I(1U)
+
+#define S_RESET_TXCLK_5_I 16
+#define V_RESET_TXCLK_5_I(x) ((x) << S_RESET_TXCLK_5_I)
+#define F_RESET_TXCLK_5_I V_RESET_TXCLK_5_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_0_I 15
+#define V_RESET_SD_RX_CLK_AN_0_I(x) ((x) << S_RESET_SD_RX_CLK_AN_0_I)
+#define F_RESET_SD_RX_CLK_AN_0_I V_RESET_SD_RX_CLK_AN_0_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_0_I 14
+#define V_RESET_SD_TX_CLK_AN_0_I(x) ((x) << S_RESET_SD_TX_CLK_AN_0_I)
+#define F_RESET_SD_TX_CLK_AN_0_I V_RESET_SD_TX_CLK_AN_0_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_1_I 13
+#define V_RESET_SD_RX_CLK_AN_1_I(x) ((x) << S_RESET_SD_RX_CLK_AN_1_I)
+#define F_RESET_SD_RX_CLK_AN_1_I V_RESET_SD_RX_CLK_AN_1_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_1_I 12
+#define V_RESET_SD_TX_CLK_AN_1_I(x) ((x) << S_RESET_SD_TX_CLK_AN_1_I)
+#define F_RESET_SD_TX_CLK_AN_1_I V_RESET_SD_TX_CLK_AN_1_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_2_I 11
+#define V_RESET_SD_RX_CLK_AN_2_I(x) ((x) << S_RESET_SD_RX_CLK_AN_2_I)
+#define F_RESET_SD_RX_CLK_AN_2_I V_RESET_SD_RX_CLK_AN_2_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_2_I 10
+#define V_RESET_SD_TX_CLK_AN_2_I(x) ((x) << S_RESET_SD_TX_CLK_AN_2_I)
+#define F_RESET_SD_TX_CLK_AN_2_I V_RESET_SD_TX_CLK_AN_2_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_3_I 9
+#define V_RESET_SD_RX_CLK_AN_3_I(x) ((x) << S_RESET_SD_RX_CLK_AN_3_I)
+#define F_RESET_SD_RX_CLK_AN_3_I V_RESET_SD_RX_CLK_AN_3_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_3_I 8
+#define V_RESET_SD_TX_CLK_AN_3_I(x) ((x) << S_RESET_SD_TX_CLK_AN_3_I)
+#define F_RESET_SD_TX_CLK_AN_3_I V_RESET_SD_TX_CLK_AN_3_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_4_I 7
+#define V_RESET_SD_RX_CLK_AN_4_I(x) ((x) << S_RESET_SD_RX_CLK_AN_4_I)
+#define F_RESET_SD_RX_CLK_AN_4_I V_RESET_SD_RX_CLK_AN_4_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_4_I 6
+#define V_RESET_SD_TX_CLK_AN_4_I(x) ((x) << S_RESET_SD_TX_CLK_AN_4_I)
+#define F_RESET_SD_TX_CLK_AN_4_I V_RESET_SD_TX_CLK_AN_4_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_5_I 5
+#define V_RESET_SD_RX_CLK_AN_5_I(x) ((x) << S_RESET_SD_RX_CLK_AN_5_I)
+#define F_RESET_SD_RX_CLK_AN_5_I V_RESET_SD_RX_CLK_AN_5_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_5_I 4
+#define V_RESET_SD_TX_CLK_AN_5_I(x) ((x) << S_RESET_SD_TX_CLK_AN_5_I)
+#define F_RESET_SD_TX_CLK_AN_5_I V_RESET_SD_TX_CLK_AN_5_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_6_I 3
+#define V_RESET_SD_RX_CLK_AN_6_I(x) ((x) << S_RESET_SD_RX_CLK_AN_6_I)
+#define F_RESET_SD_RX_CLK_AN_6_I V_RESET_SD_RX_CLK_AN_6_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_6_I 2
+#define V_RESET_SD_TX_CLK_AN_6_I(x) ((x) << S_RESET_SD_TX_CLK_AN_6_I)
+#define F_RESET_SD_TX_CLK_AN_6_I V_RESET_SD_TX_CLK_AN_6_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_7_I 1
+#define V_RESET_SD_RX_CLK_AN_7_I(x) ((x) << S_RESET_SD_RX_CLK_AN_7_I)
+#define F_RESET_SD_RX_CLK_AN_7_I V_RESET_SD_RX_CLK_AN_7_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_7_I 0
+#define V_RESET_SD_TX_CLK_AN_7_I(x) ((x) << S_RESET_SD_TX_CLK_AN_7_I)
+#define F_RESET_SD_TX_CLK_AN_7_I V_RESET_SD_TX_CLK_AN_7_I(1U)
+
+#define A_MAC_MTIP_RESET_CTRL_2 0x3800c
+
+#define S_RESET_SGMII_TXCLK_I_3 31
+#define V_RESET_SGMII_TXCLK_I_3(x) ((x) << S_RESET_SGMII_TXCLK_I_3)
+#define F_RESET_SGMII_TXCLK_I_3 V_RESET_SGMII_TXCLK_I_3(1U)
+
+#define S_RESET_SGMII_RXCLK_I_3 30
+#define V_RESET_SGMII_RXCLK_I_3(x) ((x) << S_RESET_SGMII_RXCLK_I_3)
+#define F_RESET_SGMII_RXCLK_I_3 V_RESET_SGMII_RXCLK_I_3(1U)
+
+#define S_RESET_SGMII_TXCLK_I_2 29
+#define V_RESET_SGMII_TXCLK_I_2(x) ((x) << S_RESET_SGMII_TXCLK_I_2)
+#define F_RESET_SGMII_TXCLK_I_2 V_RESET_SGMII_TXCLK_I_2(1U)
+
+#define S_RESET_SGMII_RXCLK_I_2 28
+#define V_RESET_SGMII_RXCLK_I_2(x) ((x) << S_RESET_SGMII_RXCLK_I_2)
+#define F_RESET_SGMII_RXCLK_I_2 V_RESET_SGMII_RXCLK_I_2(1U)
+
+#define S_RESET_SGMII_TXCLK_I_1 27
+#define V_RESET_SGMII_TXCLK_I_1(x) ((x) << S_RESET_SGMII_TXCLK_I_1)
+#define F_RESET_SGMII_TXCLK_I_1 V_RESET_SGMII_TXCLK_I_1(1U)
+
+#define S_RESET_SGMII_RXCLK_I_1 26
+#define V_RESET_SGMII_RXCLK_I_1(x) ((x) << S_RESET_SGMII_RXCLK_I_1)
+#define F_RESET_SGMII_RXCLK_I_1 V_RESET_SGMII_RXCLK_I_1(1U)
+
+#define S_RESET_SGMII_TXCLK_I_0 25
+#define V_RESET_SGMII_TXCLK_I_0(x) ((x) << S_RESET_SGMII_TXCLK_I_0)
+#define F_RESET_SGMII_TXCLK_I_0 V_RESET_SGMII_TXCLK_I_0(1U)
+
+#define S_RESET_SGMII_RXCLK_I_0 24
+#define V_RESET_SGMII_RXCLK_I_0(x) ((x) << S_RESET_SGMII_RXCLK_I_0)
+#define F_RESET_SGMII_RXCLK_I_0 V_RESET_SGMII_RXCLK_I_0(1U)
+
+#define S_MTIPSD7TXRST 23
+#define V_MTIPSD7TXRST(x) ((x) << S_MTIPSD7TXRST)
+#define F_MTIPSD7TXRST V_MTIPSD7TXRST(1U)
+
+#define S_MTIPSD6TXRST 22
+#define V_MTIPSD6TXRST(x) ((x) << S_MTIPSD6TXRST)
+#define F_MTIPSD6TXRST V_MTIPSD6TXRST(1U)
+
+#define S_MTIPSD5TXRST 21
+#define V_MTIPSD5TXRST(x) ((x) << S_MTIPSD5TXRST)
+#define F_MTIPSD5TXRST V_MTIPSD5TXRST(1U)
+
+#define S_MTIPSD4TXRST 20
+#define V_MTIPSD4TXRST(x) ((x) << S_MTIPSD4TXRST)
+#define F_MTIPSD4TXRST V_MTIPSD4TXRST(1U)
+
+#define S_T7_MTIPSD3TXRST 19
+#define V_T7_MTIPSD3TXRST(x) ((x) << S_T7_MTIPSD3TXRST)
+#define F_T7_MTIPSD3TXRST V_T7_MTIPSD3TXRST(1U)
+
+#define S_T7_MTIPSD2TXRST 18
+#define V_T7_MTIPSD2TXRST(x) ((x) << S_T7_MTIPSD2TXRST)
+#define F_T7_MTIPSD2TXRST V_T7_MTIPSD2TXRST(1U)
+
+#define S_T7_MTIPSD1TXRST 17
+#define V_T7_MTIPSD1TXRST(x) ((x) << S_T7_MTIPSD1TXRST)
+#define F_T7_MTIPSD1TXRST V_T7_MTIPSD1TXRST(1U)
+
+#define S_T7_MTIPSD0TXRST 16
+#define V_T7_MTIPSD0TXRST(x) ((x) << S_T7_MTIPSD0TXRST)
+#define F_T7_MTIPSD0TXRST V_T7_MTIPSD0TXRST(1U)
+
+#define S_MTIPSD7RXRST 15
+#define V_MTIPSD7RXRST(x) ((x) << S_MTIPSD7RXRST)
+#define F_MTIPSD7RXRST V_MTIPSD7RXRST(1U)
+
+#define S_MTIPSD6RXRST 14
+#define V_MTIPSD6RXRST(x) ((x) << S_MTIPSD6RXRST)
+#define F_MTIPSD6RXRST V_MTIPSD6RXRST(1U)
+
+#define S_MTIPSD5RXRST 13
+#define V_MTIPSD5RXRST(x) ((x) << S_MTIPSD5RXRST)
+#define F_MTIPSD5RXRST V_MTIPSD5RXRST(1U)
+
+#define S_MTIPSD4RXRST 12
+#define V_MTIPSD4RXRST(x) ((x) << S_MTIPSD4RXRST)
+#define F_MTIPSD4RXRST V_MTIPSD4RXRST(1U)
+
+#define S_T7_MTIPSD3RXRST 11
+#define V_T7_MTIPSD3RXRST(x) ((x) << S_T7_MTIPSD3RXRST)
+#define F_T7_MTIPSD3RXRST V_T7_MTIPSD3RXRST(1U)
+
+#define S_T7_MTIPSD2RXRST 10
+#define V_T7_MTIPSD2RXRST(x) ((x) << S_T7_MTIPSD2RXRST)
+#define F_T7_MTIPSD2RXRST V_T7_MTIPSD2RXRST(1U)
+
+#define S_T7_MTIPSD1RXRST 9
+#define V_T7_MTIPSD1RXRST(x) ((x) << S_T7_MTIPSD1RXRST)
+#define F_T7_MTIPSD1RXRST V_T7_MTIPSD1RXRST(1U)
+
+#define S_T7_MTIPSD0RXRST 8
+#define V_T7_MTIPSD0RXRST(x) ((x) << S_T7_MTIPSD0RXRST)
+#define F_T7_MTIPSD0RXRST V_T7_MTIPSD0RXRST(1U)
+
+#define S_RESET_REG_CLK_AN_0_I 7
+#define V_RESET_REG_CLK_AN_0_I(x) ((x) << S_RESET_REG_CLK_AN_0_I)
+#define F_RESET_REG_CLK_AN_0_I V_RESET_REG_CLK_AN_0_I(1U)
+
+#define S_RESET_REG_CLK_AN_1_I 6
+#define V_RESET_REG_CLK_AN_1_I(x) ((x) << S_RESET_REG_CLK_AN_1_I)
+#define F_RESET_REG_CLK_AN_1_I V_RESET_REG_CLK_AN_1_I(1U)
+
+#define S_RESET_REG_CLK_AN_2_I 5
+#define V_RESET_REG_CLK_AN_2_I(x) ((x) << S_RESET_REG_CLK_AN_2_I)
+#define F_RESET_REG_CLK_AN_2_I V_RESET_REG_CLK_AN_2_I(1U)
+
+#define S_RESET_REG_CLK_AN_3_I 4
+#define V_RESET_REG_CLK_AN_3_I(x) ((x) << S_RESET_REG_CLK_AN_3_I)
+#define F_RESET_REG_CLK_AN_3_I V_RESET_REG_CLK_AN_3_I(1U)
+
+#define S_RESET_REG_CLK_AN_4_I 3
+#define V_RESET_REG_CLK_AN_4_I(x) ((x) << S_RESET_REG_CLK_AN_4_I)
+#define F_RESET_REG_CLK_AN_4_I V_RESET_REG_CLK_AN_4_I(1U)
+
+#define S_RESET_REG_CLK_AN_5_I 2
+#define V_RESET_REG_CLK_AN_5_I(x) ((x) << S_RESET_REG_CLK_AN_5_I)
+#define F_RESET_REG_CLK_AN_5_I V_RESET_REG_CLK_AN_5_I(1U)
+
+#define S_RESET_REG_CLK_AN_6_I 1
+#define V_RESET_REG_CLK_AN_6_I(x) ((x) << S_RESET_REG_CLK_AN_6_I)
+#define F_RESET_REG_CLK_AN_6_I V_RESET_REG_CLK_AN_6_I(1U)
+
+#define S_RESET_REG_CLK_AN_7_I 0
+#define V_RESET_REG_CLK_AN_7_I(x) ((x) << S_RESET_REG_CLK_AN_7_I)
+#define F_RESET_REG_CLK_AN_7_I V_RESET_REG_CLK_AN_7_I(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_0 0x38010
+
+#define S_F91_REF_CLK_I_G 31
+#define V_F91_REF_CLK_I_G(x) ((x) << S_F91_REF_CLK_I_G)
+#define F_F91_REF_CLK_I_G V_F91_REF_CLK_I_G(1U)
+
+#define S_PCS000_REF_CLK_I_G 30
+#define V_PCS000_REF_CLK_I_G(x) ((x) << S_PCS000_REF_CLK_I_G)
+#define F_PCS000_REF_CLK_I_G V_PCS000_REF_CLK_I_G(1U)
+
+#define S_REF_CLK_I_G 29
+#define V_REF_CLK_I_G(x) ((x) << S_REF_CLK_I_G)
+#define F_REF_CLK_I_G V_REF_CLK_I_G(1U)
+
+#define S_SD_RX_CLK_I_0_G 28
+#define V_SD_RX_CLK_I_0_G(x) ((x) << S_SD_RX_CLK_I_0_G)
+#define F_SD_RX_CLK_I_0_G V_SD_RX_CLK_I_0_G(1U)
+
+#define S_SD_RX_CLK_I_1_G 27
+#define V_SD_RX_CLK_I_1_G(x) ((x) << S_SD_RX_CLK_I_1_G)
+#define F_SD_RX_CLK_I_1_G V_SD_RX_CLK_I_1_G(1U)
+
+#define S_SD_RX_CLK_I_2_G 26
+#define V_SD_RX_CLK_I_2_G(x) ((x) << S_SD_RX_CLK_I_2_G)
+#define F_SD_RX_CLK_I_2_G V_SD_RX_CLK_I_2_G(1U)
+
+#define S_SD_RX_CLK_I_3_G 25
+#define V_SD_RX_CLK_I_3_G(x) ((x) << S_SD_RX_CLK_I_3_G)
+#define F_SD_RX_CLK_I_3_G V_SD_RX_CLK_I_3_G(1U)
+
+#define S_SD_RX_CLK_I_4_G 24
+#define V_SD_RX_CLK_I_4_G(x) ((x) << S_SD_RX_CLK_I_4_G)
+#define F_SD_RX_CLK_I_4_G V_SD_RX_CLK_I_4_G(1U)
+
+#define S_SD_RX_CLK_I_5_G 23
+#define V_SD_RX_CLK_I_5_G(x) ((x) << S_SD_RX_CLK_I_5_G)
+#define F_SD_RX_CLK_I_5_G V_SD_RX_CLK_I_5_G(1U)
+
+#define S_SD_RX_CLK_I_6_G 22
+#define V_SD_RX_CLK_I_6_G(x) ((x) << S_SD_RX_CLK_I_6_G)
+#define F_SD_RX_CLK_I_6_G V_SD_RX_CLK_I_6_G(1U)
+
+#define S_SD_RX_CLK_I_7_G 21
+#define V_SD_RX_CLK_I_7_G(x) ((x) << S_SD_RX_CLK_I_7_G)
+#define F_SD_RX_CLK_I_7_G V_SD_RX_CLK_I_7_G(1U)
+
+#define S_SD_TX_CLK_I_0_G 20
+#define V_SD_TX_CLK_I_0_G(x) ((x) << S_SD_TX_CLK_I_0_G)
+#define F_SD_TX_CLK_I_0_G V_SD_TX_CLK_I_0_G(1U)
+
+#define S_SD_TX_CLK_I_1_G 19
+#define V_SD_TX_CLK_I_1_G(x) ((x) << S_SD_TX_CLK_I_1_G)
+#define F_SD_TX_CLK_I_1_G V_SD_TX_CLK_I_1_G(1U)
+
+#define S_SD_TX_CLK_I_2_G 18
+#define V_SD_TX_CLK_I_2_G(x) ((x) << S_SD_TX_CLK_I_2_G)
+#define F_SD_TX_CLK_I_2_G V_SD_TX_CLK_I_2_G(1U)
+
+#define S_SD_TX_CLK_I_3_G 17
+#define V_SD_TX_CLK_I_3_G(x) ((x) << S_SD_TX_CLK_I_3_G)
+#define F_SD_TX_CLK_I_3_G V_SD_TX_CLK_I_3_G(1U)
+
+#define S_SD_TX_CLK_I_4_G 16
+#define V_SD_TX_CLK_I_4_G(x) ((x) << S_SD_TX_CLK_I_4_G)
+#define F_SD_TX_CLK_I_4_G V_SD_TX_CLK_I_4_G(1U)
+
+#define S_SD_TX_CLK_I_5_G 15
+#define V_SD_TX_CLK_I_5_G(x) ((x) << S_SD_TX_CLK_I_5_G)
+#define F_SD_TX_CLK_I_5_G V_SD_TX_CLK_I_5_G(1U)
+
+#define S_SD_TX_CLK_I_6_G 14
+#define V_SD_TX_CLK_I_6_G(x) ((x) << S_SD_TX_CLK_I_6_G)
+#define F_SD_TX_CLK_I_6_G V_SD_TX_CLK_I_6_G(1U)
+
+#define S_SD_TX_CLK_I_7_G 13
+#define V_SD_TX_CLK_I_7_G(x) ((x) << S_SD_TX_CLK_I_7_G)
+#define F_SD_TX_CLK_I_7_G V_SD_TX_CLK_I_7_G(1U)
+
+#define S_XPCS_REF_CLK_I_0_G 12
+#define V_XPCS_REF_CLK_I_0_G(x) ((x) << S_XPCS_REF_CLK_I_0_G)
+#define F_XPCS_REF_CLK_I_0_G V_XPCS_REF_CLK_I_0_G(1U)
+
+#define S_XPCS_REF_CLK_I_1_G 11
+#define V_XPCS_REF_CLK_I_1_G(x) ((x) << S_XPCS_REF_CLK_I_1_G)
+#define F_XPCS_REF_CLK_I_1_G V_XPCS_REF_CLK_I_1_G(1U)
+
+#define S_REG_CLK_I_G 10
+#define V_REG_CLK_I_G(x) ((x) << S_REG_CLK_I_G)
+#define F_REG_CLK_I_G V_REG_CLK_I_G(1U)
+
+#define S_FF_RX_CLK_0_I_G 9
+#define V_FF_RX_CLK_0_I_G(x) ((x) << S_FF_RX_CLK_0_I_G)
+#define F_FF_RX_CLK_0_I_G V_FF_RX_CLK_0_I_G(1U)
+
+#define S_FF_TX_CLK_0_I_G 8
+#define V_FF_TX_CLK_0_I_G(x) ((x) << S_FF_TX_CLK_0_I_G)
+#define F_FF_TX_CLK_0_I_G V_FF_TX_CLK_0_I_G(1U)
+
+#define S_RXCLK_0_I_G 7
+#define V_RXCLK_0_I_G(x) ((x) << S_RXCLK_0_I_G)
+#define F_RXCLK_0_I_G V_RXCLK_0_I_G(1U)
+
+#define S_TXCLK_0_I_G 6
+#define V_TXCLK_0_I_G(x) ((x) << S_TXCLK_0_I_G)
+#define F_TXCLK_0_I_G V_TXCLK_0_I_G(1U)
+
+#define S_FF_RX_CLK_1_I_G 5
+#define V_FF_RX_CLK_1_I_G(x) ((x) << S_FF_RX_CLK_1_I_G)
+#define F_FF_RX_CLK_1_I_G V_FF_RX_CLK_1_I_G(1U)
+
+#define S_FF_TX_CLK_1_I_G 4
+#define V_FF_TX_CLK_1_I_G(x) ((x) << S_FF_TX_CLK_1_I_G)
+#define F_FF_TX_CLK_1_I_G V_FF_TX_CLK_1_I_G(1U)
+
+#define S_RXCLK_1_I_G 3
+#define V_RXCLK_1_I_G(x) ((x) << S_RXCLK_1_I_G)
+#define F_RXCLK_1_I_G V_RXCLK_1_I_G(1U)
+
+#define S_TXCLK_1_I_G 2
+#define V_TXCLK_1_I_G(x) ((x) << S_TXCLK_1_I_G)
+#define F_TXCLK_1_I_G V_TXCLK_1_I_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_1 0x38014
+
+#define S_FF_RX_CLK_2_I_G 31
+#define V_FF_RX_CLK_2_I_G(x) ((x) << S_FF_RX_CLK_2_I_G)
+#define F_FF_RX_CLK_2_I_G V_FF_RX_CLK_2_I_G(1U)
+
+#define S_FF_TX_CLK_2_I_G 30
+#define V_FF_TX_CLK_2_I_G(x) ((x) << S_FF_TX_CLK_2_I_G)
+#define F_FF_TX_CLK_2_I_G V_FF_TX_CLK_2_I_G(1U)
+
+#define S_RXCLK_2_I_G 29
+#define V_RXCLK_2_I_G(x) ((x) << S_RXCLK_2_I_G)
+#define F_RXCLK_2_I_G V_RXCLK_2_I_G(1U)
+
+#define S_TXCLK_2_I_G 28
+#define V_TXCLK_2_I_G(x) ((x) << S_TXCLK_2_I_G)
+#define F_TXCLK_2_I_G V_TXCLK_2_I_G(1U)
+
+#define S_FF_RX_CLK_3_I_G 27
+#define V_FF_RX_CLK_3_I_G(x) ((x) << S_FF_RX_CLK_3_I_G)
+#define F_FF_RX_CLK_3_I_G V_FF_RX_CLK_3_I_G(1U)
+
+#define S_FF_TX_CLK_3_I_G 26
+#define V_FF_TX_CLK_3_I_G(x) ((x) << S_FF_TX_CLK_3_I_G)
+#define F_FF_TX_CLK_3_I_G V_FF_TX_CLK_3_I_G(1U)
+
+#define S_RXCLK_3_I_G 25
+#define V_RXCLK_3_I_G(x) ((x) << S_RXCLK_3_I_G)
+#define F_RXCLK_3_I_G V_RXCLK_3_I_G(1U)
+
+#define S_TXCLK_3_I_G 24
+#define V_TXCLK_3_I_G(x) ((x) << S_TXCLK_3_I_G)
+#define F_TXCLK_3_I_G V_TXCLK_3_I_G(1U)
+
+#define S_FF_RX_CLK_4_I_G 23
+#define V_FF_RX_CLK_4_I_G(x) ((x) << S_FF_RX_CLK_4_I_G)
+#define F_FF_RX_CLK_4_I_G V_FF_RX_CLK_4_I_G(1U)
+
+#define S_FF_TX_CLK_4_I_G 22
+#define V_FF_TX_CLK_4_I_G(x) ((x) << S_FF_TX_CLK_4_I_G)
+#define F_FF_TX_CLK_4_I_G V_FF_TX_CLK_4_I_G(1U)
+
+#define S_RXCLK_4_I_G 21
+#define V_RXCLK_4_I_G(x) ((x) << S_RXCLK_4_I_G)
+#define F_RXCLK_4_I_G V_RXCLK_4_I_G(1U)
+
+#define S_TXCLK_4_I_G 20
+#define V_TXCLK_4_I_G(x) ((x) << S_TXCLK_4_I_G)
+#define F_TXCLK_4_I_G V_TXCLK_4_I_G(1U)
+
+#define S_FF_RX_CLK_5_I_G 19
+#define V_FF_RX_CLK_5_I_G(x) ((x) << S_FF_RX_CLK_5_I_G)
+#define F_FF_RX_CLK_5_I_G V_FF_RX_CLK_5_I_G(1U)
+
+#define S_FF_TX_CLK_5_I_G 18
+#define V_FF_TX_CLK_5_I_G(x) ((x) << S_FF_TX_CLK_5_I_G)
+#define F_FF_TX_CLK_5_I_G V_FF_TX_CLK_5_I_G(1U)
+
+#define S_RXCLK_5_I_G 17
+#define V_RXCLK_5_I_G(x) ((x) << S_RXCLK_5_I_G)
+#define F_RXCLK_5_I_G V_RXCLK_5_I_G(1U)
+
+#define S_TXCLK_5_I_G 16
+#define V_TXCLK_5_I_G(x) ((x) << S_TXCLK_5_I_G)
+#define F_TXCLK_5_I_G V_TXCLK_5_I_G(1U)
+
+#define S_SD_RX_CLK_AN_0_I_G 15
+#define V_SD_RX_CLK_AN_0_I_G(x) ((x) << S_SD_RX_CLK_AN_0_I_G)
+#define F_SD_RX_CLK_AN_0_I_G V_SD_RX_CLK_AN_0_I_G(1U)
+
+#define S_SD_TX_CLK_AN_0_I_G 14
+#define V_SD_TX_CLK_AN_0_I_G(x) ((x) << S_SD_TX_CLK_AN_0_I_G)
+#define F_SD_TX_CLK_AN_0_I_G V_SD_TX_CLK_AN_0_I_G(1U)
+
+#define S_SD_RX_CLK_AN_1_I_G 13
+#define V_SD_RX_CLK_AN_1_I_G(x) ((x) << S_SD_RX_CLK_AN_1_I_G)
+#define F_SD_RX_CLK_AN_1_I_G V_SD_RX_CLK_AN_1_I_G(1U)
+
+#define S_SD_TX_CLK_AN_1_I_G 12
+#define V_SD_TX_CLK_AN_1_I_G(x) ((x) << S_SD_TX_CLK_AN_1_I_G)
+#define F_SD_TX_CLK_AN_1_I_G V_SD_TX_CLK_AN_1_I_G(1U)
+
+#define S_SD_RX_CLK_AN_2_I_G 11
+#define V_SD_RX_CLK_AN_2_I_G(x) ((x) << S_SD_RX_CLK_AN_2_I_G)
+#define F_SD_RX_CLK_AN_2_I_G V_SD_RX_CLK_AN_2_I_G(1U)
+
+#define S_SD_TX_CLK_AN_2_I_G 10
+#define V_SD_TX_CLK_AN_2_I_G(x) ((x) << S_SD_TX_CLK_AN_2_I_G)
+#define F_SD_TX_CLK_AN_2_I_G V_SD_TX_CLK_AN_2_I_G(1U)
+
+#define S_SD_RX_CLK_AN_3_I_G 9
+#define V_SD_RX_CLK_AN_3_I_G(x) ((x) << S_SD_RX_CLK_AN_3_I_G)
+#define F_SD_RX_CLK_AN_3_I_G V_SD_RX_CLK_AN_3_I_G(1U)
+
+#define S_SD_TX_CLK_AN_3_I_G 8
+#define V_SD_TX_CLK_AN_3_I_G(x) ((x) << S_SD_TX_CLK_AN_3_I_G)
+#define F_SD_TX_CLK_AN_3_I_G V_SD_TX_CLK_AN_3_I_G(1U)
+
+#define S_SD_RX_CLK_AN_4_I_G 7
+#define V_SD_RX_CLK_AN_4_I_G(x) ((x) << S_SD_RX_CLK_AN_4_I_G)
+#define F_SD_RX_CLK_AN_4_I_G V_SD_RX_CLK_AN_4_I_G(1U)
+
+#define S_SD_TX_CLK_AN_4_I_G 6
+#define V_SD_TX_CLK_AN_4_I_G(x) ((x) << S_SD_TX_CLK_AN_4_I_G)
+#define F_SD_TX_CLK_AN_4_I_G V_SD_TX_CLK_AN_4_I_G(1U)
+
+#define S_SD_RX_CLK_AN_5_I_G 5
+#define V_SD_RX_CLK_AN_5_I_G(x) ((x) << S_SD_RX_CLK_AN_5_I_G)
+#define F_SD_RX_CLK_AN_5_I_G V_SD_RX_CLK_AN_5_I_G(1U)
+
+#define S_SD_TX_CLK_AN_5_I_G 4
+#define V_SD_TX_CLK_AN_5_I_G(x) ((x) << S_SD_TX_CLK_AN_5_I_G)
+#define F_SD_TX_CLK_AN_5_I_G V_SD_TX_CLK_AN_5_I_G(1U)
+
+#define S_SD_RX_CLK_AN_6_I_G 3
+#define V_SD_RX_CLK_AN_6_I_G(x) ((x) << S_SD_RX_CLK_AN_6_I_G)
+#define F_SD_RX_CLK_AN_6_I_G V_SD_RX_CLK_AN_6_I_G(1U)
+
+#define S_SD_TX_CLK_AN_6_I_G 2
+#define V_SD_TX_CLK_AN_6_I_G(x) ((x) << S_SD_TX_CLK_AN_6_I_G)
+#define F_SD_TX_CLK_AN_6_I_G V_SD_TX_CLK_AN_6_I_G(1U)
+
+#define S_SD_RX_CLK_AN_7_I_G 1
+#define V_SD_RX_CLK_AN_7_I_G(x) ((x) << S_SD_RX_CLK_AN_7_I_G)
+#define F_SD_RX_CLK_AN_7_I_G V_SD_RX_CLK_AN_7_I_G(1U)
+
+#define S_SD_TX_CLK_AN_7_I_G 0
+#define V_SD_TX_CLK_AN_7_I_G(x) ((x) << S_SD_TX_CLK_AN_7_I_G)
+#define F_SD_TX_CLK_AN_7_I_G V_SD_TX_CLK_AN_7_I_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_2 0x38018
+
+#define S_SD_RX_CLK_0_G 31
+#define V_SD_RX_CLK_0_G(x) ((x) << S_SD_RX_CLK_0_G)
+#define F_SD_RX_CLK_0_G V_SD_RX_CLK_0_G(1U)
+
+#define S_SD_RX_CLK_1_G 30
+#define V_SD_RX_CLK_1_G(x) ((x) << S_SD_RX_CLK_1_G)
+#define F_SD_RX_CLK_1_G V_SD_RX_CLK_1_G(1U)
+
+#define S_SD_RX_CLK_2_G 29
+#define V_SD_RX_CLK_2_G(x) ((x) << S_SD_RX_CLK_2_G)
+#define F_SD_RX_CLK_2_G V_SD_RX_CLK_2_G(1U)
+
+#define S_SD_RX_CLK_3_G 28
+#define V_SD_RX_CLK_3_G(x) ((x) << S_SD_RX_CLK_3_G)
+#define F_SD_RX_CLK_3_G V_SD_RX_CLK_3_G(1U)
+
+#define S_SD_RX_CLK_4_G 27
+#define V_SD_RX_CLK_4_G(x) ((x) << S_SD_RX_CLK_4_G)
+#define F_SD_RX_CLK_4_G V_SD_RX_CLK_4_G(1U)
+
+#define S_SD_RX_CLK_5_G 26
+#define V_SD_RX_CLK_5_G(x) ((x) << S_SD_RX_CLK_5_G)
+#define F_SD_RX_CLK_5_G V_SD_RX_CLK_5_G(1U)
+
+#define S_SD_RX_CLK_6_G 25
+#define V_SD_RX_CLK_6_G(x) ((x) << S_SD_RX_CLK_6_G)
+#define F_SD_RX_CLK_6_G V_SD_RX_CLK_6_G(1U)
+
+#define S_SD_RX_CLK_7_G 24
+#define V_SD_RX_CLK_7_G(x) ((x) << S_SD_RX_CLK_7_G)
+#define F_SD_RX_CLK_7_G V_SD_RX_CLK_7_G(1U)
+
+#define S_SD_TX_CLK_0_G 23
+#define V_SD_TX_CLK_0_G(x) ((x) << S_SD_TX_CLK_0_G)
+#define F_SD_TX_CLK_0_G V_SD_TX_CLK_0_G(1U)
+
+#define S_SD_TX_CLK_1_G 22
+#define V_SD_TX_CLK_1_G(x) ((x) << S_SD_TX_CLK_1_G)
+#define F_SD_TX_CLK_1_G V_SD_TX_CLK_1_G(1U)
+
+#define S_SD_TX_CLK_2_G 21
+#define V_SD_TX_CLK_2_G(x) ((x) << S_SD_TX_CLK_2_G)
+#define F_SD_TX_CLK_2_G V_SD_TX_CLK_2_G(1U)
+
+#define S_SD_TX_CLK_3_G 20
+#define V_SD_TX_CLK_3_G(x) ((x) << S_SD_TX_CLK_3_G)
+#define F_SD_TX_CLK_3_G V_SD_TX_CLK_3_G(1U)
+
+#define S_SD_TX_CLK_4_G 19
+#define V_SD_TX_CLK_4_G(x) ((x) << S_SD_TX_CLK_4_G)
+#define F_SD_TX_CLK_4_G V_SD_TX_CLK_4_G(1U)
+
+#define S_SD_TX_CLK_5_G 18
+#define V_SD_TX_CLK_5_G(x) ((x) << S_SD_TX_CLK_5_G)
+#define F_SD_TX_CLK_5_G V_SD_TX_CLK_5_G(1U)
+
+#define S_SD_TX_CLK_6_G 17
+#define V_SD_TX_CLK_6_G(x) ((x) << S_SD_TX_CLK_6_G)
+#define F_SD_TX_CLK_6_G V_SD_TX_CLK_6_G(1U)
+
+#define S_SD_TX_CLK_7_G 16
+#define V_SD_TX_CLK_7_G(x) ((x) << S_SD_TX_CLK_7_G)
+#define F_SD_TX_CLK_7_G V_SD_TX_CLK_7_G(1U)
+
+#define S_SD_RX_CLK_AEC_0_G 15
+#define V_SD_RX_CLK_AEC_0_G(x) ((x) << S_SD_RX_CLK_AEC_0_G)
+#define F_SD_RX_CLK_AEC_0_G V_SD_RX_CLK_AEC_0_G(1U)
+
+#define S_SD_RX_CLK_AEC_1_G 14
+#define V_SD_RX_CLK_AEC_1_G(x) ((x) << S_SD_RX_CLK_AEC_1_G)
+#define F_SD_RX_CLK_AEC_1_G V_SD_RX_CLK_AEC_1_G(1U)
+
+#define S_SD_RX_CLK_AEC_2_G 13
+#define V_SD_RX_CLK_AEC_2_G(x) ((x) << S_SD_RX_CLK_AEC_2_G)
+#define F_SD_RX_CLK_AEC_2_G V_SD_RX_CLK_AEC_2_G(1U)
+
+#define S_SD_RX_CLK_AEC_3_G 12
+#define V_SD_RX_CLK_AEC_3_G(x) ((x) << S_SD_RX_CLK_AEC_3_G)
+#define F_SD_RX_CLK_AEC_3_G V_SD_RX_CLK_AEC_3_G(1U)
+
+#define S_SD_RX_CLK_AEC_4_G 11
+#define V_SD_RX_CLK_AEC_4_G(x) ((x) << S_SD_RX_CLK_AEC_4_G)
+#define F_SD_RX_CLK_AEC_4_G V_SD_RX_CLK_AEC_4_G(1U)
+
+#define S_SD_RX_CLK_AEC_5_G 10
+#define V_SD_RX_CLK_AEC_5_G(x) ((x) << S_SD_RX_CLK_AEC_5_G)
+#define F_SD_RX_CLK_AEC_5_G V_SD_RX_CLK_AEC_5_G(1U)
+
+#define S_SD_RX_CLK_AEC_6_G 9
+#define V_SD_RX_CLK_AEC_6_G(x) ((x) << S_SD_RX_CLK_AEC_6_G)
+#define F_SD_RX_CLK_AEC_6_G V_SD_RX_CLK_AEC_6_G(1U)
+
+#define S_SD_RX_CLK_AEC_7_G 8
+#define V_SD_RX_CLK_AEC_7_G(x) ((x) << S_SD_RX_CLK_AEC_7_G)
+#define F_SD_RX_CLK_AEC_7_G V_SD_RX_CLK_AEC_7_G(1U)
+
+#define S_SD_TX_CLK_AEC_0_G 7
+#define V_SD_TX_CLK_AEC_0_G(x) ((x) << S_SD_TX_CLK_AEC_0_G)
+#define F_SD_TX_CLK_AEC_0_G V_SD_TX_CLK_AEC_0_G(1U)
+
+#define S_SD_TX_CLK_AEC_1_G 6
+#define V_SD_TX_CLK_AEC_1_G(x) ((x) << S_SD_TX_CLK_AEC_1_G)
+#define F_SD_TX_CLK_AEC_1_G V_SD_TX_CLK_AEC_1_G(1U)
+
+#define S_SD_TX_CLK_AEC_2_G 5
+#define V_SD_TX_CLK_AEC_2_G(x) ((x) << S_SD_TX_CLK_AEC_2_G)
+#define F_SD_TX_CLK_AEC_2_G V_SD_TX_CLK_AEC_2_G(1U)
+
+#define S_SD_TX_CLK_AEC_3_G 4
+#define V_SD_TX_CLK_AEC_3_G(x) ((x) << S_SD_TX_CLK_AEC_3_G)
+#define F_SD_TX_CLK_AEC_3_G V_SD_TX_CLK_AEC_3_G(1U)
+
+#define S_SD_TX_CLK_AEC_4_G 3
+#define V_SD_TX_CLK_AEC_4_G(x) ((x) << S_SD_TX_CLK_AEC_4_G)
+#define F_SD_TX_CLK_AEC_4_G V_SD_TX_CLK_AEC_4_G(1U)
+
+#define S_SD_TX_CLK_AEC_5_G 2
+#define V_SD_TX_CLK_AEC_5_G(x) ((x) << S_SD_TX_CLK_AEC_5_G)
+#define F_SD_TX_CLK_AEC_5_G V_SD_TX_CLK_AEC_5_G(1U)
+
+#define S_SD_TX_CLK_AEC_6_G 1
+#define V_SD_TX_CLK_AEC_6_G(x) ((x) << S_SD_TX_CLK_AEC_6_G)
+#define F_SD_TX_CLK_AEC_6_G V_SD_TX_CLK_AEC_6_G(1U)
+
+#define S_SD_TX_CLK_AEC_7_G 0
+#define V_SD_TX_CLK_AEC_7_G(x) ((x) << S_SD_TX_CLK_AEC_7_G)
+#define F_SD_TX_CLK_AEC_7_G V_SD_TX_CLK_AEC_7_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_3 0x3801c
+
+#define S_PCS_RX_CLK_0_G 31
+#define V_PCS_RX_CLK_0_G(x) ((x) << S_PCS_RX_CLK_0_G)
+#define F_PCS_RX_CLK_0_G V_PCS_RX_CLK_0_G(1U)
+
+#define S_PCS_RX_CLK_1_G 30
+#define V_PCS_RX_CLK_1_G(x) ((x) << S_PCS_RX_CLK_1_G)
+#define F_PCS_RX_CLK_1_G V_PCS_RX_CLK_1_G(1U)
+
+#define S_PCS_RX_CLK_2_G 29
+#define V_PCS_RX_CLK_2_G(x) ((x) << S_PCS_RX_CLK_2_G)
+#define F_PCS_RX_CLK_2_G V_PCS_RX_CLK_2_G(1U)
+
+#define S_PCS_RX_CLK_3_G 28
+#define V_PCS_RX_CLK_3_G(x) ((x) << S_PCS_RX_CLK_3_G)
+#define F_PCS_RX_CLK_3_G V_PCS_RX_CLK_3_G(1U)
+
+#define S_PCS_RX_CLK_4_G 27
+#define V_PCS_RX_CLK_4_G(x) ((x) << S_PCS_RX_CLK_4_G)
+#define F_PCS_RX_CLK_4_G V_PCS_RX_CLK_4_G(1U)
+
+#define S_PCS_RX_CLK_5_G 26
+#define V_PCS_RX_CLK_5_G(x) ((x) << S_PCS_RX_CLK_5_G)
+#define F_PCS_RX_CLK_5_G V_PCS_RX_CLK_5_G(1U)
+
+#define S_PCS_RX_CLK_6_G 25
+#define V_PCS_RX_CLK_6_G(x) ((x) << S_PCS_RX_CLK_6_G)
+#define F_PCS_RX_CLK_6_G V_PCS_RX_CLK_6_G(1U)
+
+#define S_PCS_RX_CLK_7_G 24
+#define V_PCS_RX_CLK_7_G(x) ((x) << S_PCS_RX_CLK_7_G)
+#define F_PCS_RX_CLK_7_G V_PCS_RX_CLK_7_G(1U)
+
+#define S_PCS_TX_CLK_0_G 23
+#define V_PCS_TX_CLK_0_G(x) ((x) << S_PCS_TX_CLK_0_G)
+#define F_PCS_TX_CLK_0_G V_PCS_TX_CLK_0_G(1U)
+
+#define S_PCS_TX_CLK_1_G 22
+#define V_PCS_TX_CLK_1_G(x) ((x) << S_PCS_TX_CLK_1_G)
+#define F_PCS_TX_CLK_1_G V_PCS_TX_CLK_1_G(1U)
+
+#define S_PCS_TX_CLK_2_G 21
+#define V_PCS_TX_CLK_2_G(x) ((x) << S_PCS_TX_CLK_2_G)
+#define F_PCS_TX_CLK_2_G V_PCS_TX_CLK_2_G(1U)
+
+#define S_PCS_TX_CLK_3_G 20
+#define V_PCS_TX_CLK_3_G(x) ((x) << S_PCS_TX_CLK_3_G)
+#define F_PCS_TX_CLK_3_G V_PCS_TX_CLK_3_G(1U)
+
+#define S_PCS_TX_CLK_4_G 19
+#define V_PCS_TX_CLK_4_G(x) ((x) << S_PCS_TX_CLK_4_G)
+#define F_PCS_TX_CLK_4_G V_PCS_TX_CLK_4_G(1U)
+
+#define S_PCS_TX_CLK_5_G 18
+#define V_PCS_TX_CLK_5_G(x) ((x) << S_PCS_TX_CLK_5_G)
+#define F_PCS_TX_CLK_5_G V_PCS_TX_CLK_5_G(1U)
+
+#define S_PCS_TX_CLK_6_G 17
+#define V_PCS_TX_CLK_6_G(x) ((x) << S_PCS_TX_CLK_6_G)
+#define F_PCS_TX_CLK_6_G V_PCS_TX_CLK_6_G(1U)
+
+#define S_PCS_TX_CLK_7_G 16
+#define V_PCS_TX_CLK_7_G(x) ((x) << S_PCS_TX_CLK_7_G)
+#define F_PCS_TX_CLK_7_G V_PCS_TX_CLK_7_G(1U)
+
+#define S_SD_RX_CLK_EN_0 15
+#define V_SD_RX_CLK_EN_0(x) ((x) << S_SD_RX_CLK_EN_0)
+#define F_SD_RX_CLK_EN_0 V_SD_RX_CLK_EN_0(1U)
+
+#define S_SD_RX_CLK_EN_1 14
+#define V_SD_RX_CLK_EN_1(x) ((x) << S_SD_RX_CLK_EN_1)
+#define F_SD_RX_CLK_EN_1 V_SD_RX_CLK_EN_1(1U)
+
+#define S_SD_RX_CLK_EN_2 13
+#define V_SD_RX_CLK_EN_2(x) ((x) << S_SD_RX_CLK_EN_2)
+#define F_SD_RX_CLK_EN_2 V_SD_RX_CLK_EN_2(1U)
+
+#define S_SD_RX_CLK_EN_3 12
+#define V_SD_RX_CLK_EN_3(x) ((x) << S_SD_RX_CLK_EN_3)
+#define F_SD_RX_CLK_EN_3 V_SD_RX_CLK_EN_3(1U)
+
+#define S_SD_RX_CLK_EN_4 11
+#define V_SD_RX_CLK_EN_4(x) ((x) << S_SD_RX_CLK_EN_4)
+#define F_SD_RX_CLK_EN_4 V_SD_RX_CLK_EN_4(1U)
+
+#define S_SD_RX_CLK_EN_5 10
+#define V_SD_RX_CLK_EN_5(x) ((x) << S_SD_RX_CLK_EN_5)
+#define F_SD_RX_CLK_EN_5 V_SD_RX_CLK_EN_5(1U)
+
+#define S_SD_RX_CLK_EN_6 9
+#define V_SD_RX_CLK_EN_6(x) ((x) << S_SD_RX_CLK_EN_6)
+#define F_SD_RX_CLK_EN_6 V_SD_RX_CLK_EN_6(1U)
+
+#define S_SD_RX_CLK_EN_7 8
+#define V_SD_RX_CLK_EN_7(x) ((x) << S_SD_RX_CLK_EN_7)
+#define F_SD_RX_CLK_EN_7 V_SD_RX_CLK_EN_7(1U)
+
+#define S_SD_TX_CLK_EN_0 7
+#define V_SD_TX_CLK_EN_0(x) ((x) << S_SD_TX_CLK_EN_0)
+#define F_SD_TX_CLK_EN_0 V_SD_TX_CLK_EN_0(1U)
+
+#define S_SD_TX_CLK_EN_1 6
+#define V_SD_TX_CLK_EN_1(x) ((x) << S_SD_TX_CLK_EN_1)
+#define F_SD_TX_CLK_EN_1 V_SD_TX_CLK_EN_1(1U)
+
+#define S_SD_TX_CLK_EN_2 5
+#define V_SD_TX_CLK_EN_2(x) ((x) << S_SD_TX_CLK_EN_2)
+#define F_SD_TX_CLK_EN_2 V_SD_TX_CLK_EN_2(1U)
+
+#define S_SD_TX_CLK_EN_3 4
+#define V_SD_TX_CLK_EN_3(x) ((x) << S_SD_TX_CLK_EN_3)
+#define F_SD_TX_CLK_EN_3 V_SD_TX_CLK_EN_3(1U)
+
+#define S_SD_TX_CLK_EN_4 3
+#define V_SD_TX_CLK_EN_4(x) ((x) << S_SD_TX_CLK_EN_4)
+#define F_SD_TX_CLK_EN_4 V_SD_TX_CLK_EN_4(1U)
+
+#define S_SD_TX_CLK_EN_5 2
+#define V_SD_TX_CLK_EN_5(x) ((x) << S_SD_TX_CLK_EN_5)
+#define F_SD_TX_CLK_EN_5 V_SD_TX_CLK_EN_5(1U)
+
+#define S_SD_TX_CLK_EN_6 1
+#define V_SD_TX_CLK_EN_6(x) ((x) << S_SD_TX_CLK_EN_6)
+#define F_SD_TX_CLK_EN_6 V_SD_TX_CLK_EN_6(1U)
+
+#define S_SD_TX_CLK_EN_7 0
+#define V_SD_TX_CLK_EN_7(x) ((x) << S_SD_TX_CLK_EN_7)
+#define F_SD_TX_CLK_EN_7 V_SD_TX_CLK_EN_7(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_4 0x38020
+
+#define S_SGMII_TX_CLK_0_G 7
+#define V_SGMII_TX_CLK_0_G(x) ((x) << S_SGMII_TX_CLK_0_G)
+#define F_SGMII_TX_CLK_0_G V_SGMII_TX_CLK_0_G(1U)
+
+#define S_SGMII_TX_CLK_1_G 6
+#define V_SGMII_TX_CLK_1_G(x) ((x) << S_SGMII_TX_CLK_1_G)
+#define F_SGMII_TX_CLK_1_G V_SGMII_TX_CLK_1_G(1U)
+
+#define S_SGMII_TX_CLK_2_G 5
+#define V_SGMII_TX_CLK_2_G(x) ((x) << S_SGMII_TX_CLK_2_G)
+#define F_SGMII_TX_CLK_2_G V_SGMII_TX_CLK_2_G(1U)
+
+#define S_SGMII_TX_CLK_3_G 4
+#define V_SGMII_TX_CLK_3_G(x) ((x) << S_SGMII_TX_CLK_3_G)
+#define F_SGMII_TX_CLK_3_G V_SGMII_TX_CLK_3_G(1U)
+
+#define S_SGMII_RX_CLK_0_G 3
+#define V_SGMII_RX_CLK_0_G(x) ((x) << S_SGMII_RX_CLK_0_G)
+#define F_SGMII_RX_CLK_0_G V_SGMII_RX_CLK_0_G(1U)
+
+#define S_SGMII_RX_CLK_1_G 2
+#define V_SGMII_RX_CLK_1_G(x) ((x) << S_SGMII_RX_CLK_1_G)
+#define F_SGMII_RX_CLK_1_G V_SGMII_RX_CLK_1_G(1U)
+
+#define S_SGMII_RX_CLK_2_G 1
+#define V_SGMII_RX_CLK_2_G(x) ((x) << S_SGMII_RX_CLK_2_G)
+#define F_SGMII_RX_CLK_2_G V_SGMII_RX_CLK_2_G(1U)
+
+#define S_SGMII_RX_CLK_3_G 0
+#define V_SGMII_RX_CLK_3_G(x) ((x) << S_SGMII_RX_CLK_3_G)
+#define F_SGMII_RX_CLK_3_G V_SGMII_RX_CLK_3_G(1U)
+
+#define A_MAC_PCS_CONFIG_0 0x38024
+
+#define S_KP_MODE_IN 24
+#define M_KP_MODE_IN 0xffU
+#define V_KP_MODE_IN(x) ((x) << S_KP_MODE_IN)
+#define G_KP_MODE_IN(x) (((x) >> S_KP_MODE_IN) & M_KP_MODE_IN)
+
+#define S_FEC91_ENA_IN 16
+#define M_FEC91_ENA_IN 0xffU
+#define V_FEC91_ENA_IN(x) ((x) << S_FEC91_ENA_IN)
+#define G_FEC91_ENA_IN(x) (((x) >> S_FEC91_ENA_IN) & M_FEC91_ENA_IN)
+
+#define S_SD_8X 8
+#define M_SD_8X 0xffU
+#define V_SD_8X(x) ((x) << S_SD_8X)
+#define G_SD_8X(x) (((x) >> S_SD_8X) & M_SD_8X)
+
+#define S_SD_N2 0
+#define M_SD_N2 0xffU
+#define V_SD_N2(x) ((x) << S_SD_N2)
+#define G_SD_N2(x) (((x) >> S_SD_N2) & M_SD_N2)
+
+#define A_MAC_PCS_CONFIG_1 0x38028
+
+#define S_FAST_1LANE_MODE 24
+#define M_FAST_1LANE_MODE 0xffU
+#define V_FAST_1LANE_MODE(x) ((x) << S_FAST_1LANE_MODE)
+#define G_FAST_1LANE_MODE(x) (((x) >> S_FAST_1LANE_MODE) & M_FAST_1LANE_MODE)
+
+#define S_PACER_10G 16
+#define M_PACER_10G 0xffU
+#define V_PACER_10G(x) ((x) << S_PACER_10G)
+#define G_PACER_10G(x) (((x) >> S_PACER_10G) & M_PACER_10G)
+
+#define S_PCS400_ENA_IN 14
+#define M_PCS400_ENA_IN 0x3U
+#define V_PCS400_ENA_IN(x) ((x) << S_PCS400_ENA_IN)
+#define G_PCS400_ENA_IN(x) (((x) >> S_PCS400_ENA_IN) & M_PCS400_ENA_IN)
+
+#define S_MODE40_ENA_IN4 13
+#define V_MODE40_ENA_IN4(x) ((x) << S_MODE40_ENA_IN4)
+#define F_MODE40_ENA_IN4 V_MODE40_ENA_IN4(1U)
+
+#define S_MODE40_ENA_IN0 12
+#define V_MODE40_ENA_IN0(x) ((x) << S_MODE40_ENA_IN0)
+#define F_MODE40_ENA_IN0 V_MODE40_ENA_IN0(1U)
+
+#define S_PCS100_ENA_IN6 11
+#define V_PCS100_ENA_IN6(x) ((x) << S_PCS100_ENA_IN6)
+#define F_PCS100_ENA_IN6 V_PCS100_ENA_IN6(1U)
+
+#define S_PCS100_ENA_IN4 10
+#define V_PCS100_ENA_IN4(x) ((x) << S_PCS100_ENA_IN4)
+#define F_PCS100_ENA_IN4 V_PCS100_ENA_IN4(1U)
+
+#define S_PCS100_ENA_IN2 9
+#define V_PCS100_ENA_IN2(x) ((x) << S_PCS100_ENA_IN2)
+#define F_PCS100_ENA_IN2 V_PCS100_ENA_IN2(1U)
+
+#define S_PCS100_ENA_IN0 8
+#define V_PCS100_ENA_IN0(x) ((x) << S_PCS100_ENA_IN0)
+#define F_PCS100_ENA_IN0 V_PCS100_ENA_IN0(1U)
+
+#define S_RXLAUI_ENA_IN6 7
+#define V_RXLAUI_ENA_IN6(x) ((x) << S_RXLAUI_ENA_IN6)
+#define F_RXLAUI_ENA_IN6 V_RXLAUI_ENA_IN6(1U)
+
+#define S_RXLAUI_ENA_IN4 6
+#define V_RXLAUI_ENA_IN4(x) ((x) << S_RXLAUI_ENA_IN4)
+#define F_RXLAUI_ENA_IN4 V_RXLAUI_ENA_IN4(1U)
+
+#define S_RXLAUI_ENA_IN2 5
+#define V_RXLAUI_ENA_IN2(x) ((x) << S_RXLAUI_ENA_IN2)
+#define F_RXLAUI_ENA_IN2 V_RXLAUI_ENA_IN2(1U)
+
+#define S_RXLAUI_ENA_IN0 4
+#define V_RXLAUI_ENA_IN0(x) ((x) << S_RXLAUI_ENA_IN0)
+#define F_RXLAUI_ENA_IN0 V_RXLAUI_ENA_IN0(1U)
+
+#define S_FEC91_LANE_IN6 3
+#define V_FEC91_LANE_IN6(x) ((x) << S_FEC91_LANE_IN6)
+#define F_FEC91_LANE_IN6 V_FEC91_LANE_IN6(1U)
+
+#define S_FEC91_LANE_IN4 2
+#define V_FEC91_LANE_IN4(x) ((x) << S_FEC91_LANE_IN4)
+#define F_FEC91_LANE_IN4 V_FEC91_LANE_IN4(1U)
+
+#define S_FEC91_LANE_IN2 1
+#define V_FEC91_LANE_IN2(x) ((x) << S_FEC91_LANE_IN2)
+#define F_FEC91_LANE_IN2 V_FEC91_LANE_IN2(1U)
+
+#define S_FEC91_LANE_IN0 0
+#define V_FEC91_LANE_IN0(x) ((x) << S_FEC91_LANE_IN0)
+#define F_FEC91_LANE_IN0 V_FEC91_LANE_IN0(1U)
+
+#define A_MAC_PCS_CONFIG_2 0x3802c
+
+#define S_SGPCS_EN_3 29
+#define V_SGPCS_EN_3(x) ((x) << S_SGPCS_EN_3)
+#define F_SGPCS_EN_3 V_SGPCS_EN_3(1U)
+
+#define S_SGPCS_EN_2 28
+#define V_SGPCS_EN_2(x) ((x) << S_SGPCS_EN_2)
+#define F_SGPCS_EN_2 V_SGPCS_EN_2(1U)
+
+#define S_SGPCS_EN_1 27
+#define V_SGPCS_EN_1(x) ((x) << S_SGPCS_EN_1)
+#define F_SGPCS_EN_1 V_SGPCS_EN_1(1U)
+
+#define S_SGPCS_EN_0 26
+#define V_SGPCS_EN_0(x) ((x) << S_SGPCS_EN_0)
+#define F_SGPCS_EN_0 V_SGPCS_EN_0(1U)
+
+#define S_CFG_CLOCK_RATE 22
+#define M_CFG_CLOCK_RATE 0xfU
+#define V_CFG_CLOCK_RATE(x) ((x) << S_CFG_CLOCK_RATE)
+#define G_CFG_CLOCK_RATE(x) (((x) >> S_CFG_CLOCK_RATE) & M_CFG_CLOCK_RATE)
+
+#define S_FEC_ERR_ENA 14
+#define M_FEC_ERR_ENA 0xffU
+#define V_FEC_ERR_ENA(x) ((x) << S_FEC_ERR_ENA)
+#define G_FEC_ERR_ENA(x) (((x) >> S_FEC_ERR_ENA) & M_FEC_ERR_ENA)
+
+#define S_FEC_ENA 6
+#define M_FEC_ENA 0xffU
+#define V_FEC_ENA(x) ((x) << S_FEC_ENA)
+#define G_FEC_ENA(x) (((x) >> S_FEC_ENA) & M_FEC_ENA)
+
+#define S_PCS001_TX_AM_SF 3
+#define M_PCS001_TX_AM_SF 0x7U
+#define V_PCS001_TX_AM_SF(x) ((x) << S_PCS001_TX_AM_SF)
+#define G_PCS001_TX_AM_SF(x) (((x) >> S_PCS001_TX_AM_SF) & M_PCS001_TX_AM_SF)
+
+#define S_PCS000_TX_AM_SF 0
+#define M_PCS000_TX_AM_SF 0x7U
+#define V_PCS000_TX_AM_SF(x) ((x) << S_PCS000_TX_AM_SF)
+#define G_PCS000_TX_AM_SF(x) (((x) >> S_PCS000_TX_AM_SF) & M_PCS000_TX_AM_SF)
+
+#define A_MAC_PCS_STATUS_0 0x38030
+
+#define S_PCS000_ALIGN_LOCK 30
+#define M_PCS000_ALIGN_LOCK 0x3U
+#define V_PCS000_ALIGN_LOCK(x) ((x) << S_PCS000_ALIGN_LOCK)
+#define G_PCS000_ALIGN_LOCK(x) (((x) >> S_PCS000_ALIGN_LOCK) & M_PCS000_ALIGN_LOCK)
+
+#define S_PCS000_HI_SER 28
+#define M_PCS000_HI_SER 0x3U
+#define V_PCS000_HI_SER(x) ((x) << S_PCS000_HI_SER)
+#define G_PCS000_HI_SER(x) (((x) >> S_PCS000_HI_SER) & M_PCS000_HI_SER)
+
+#define S_BER_TIMER_DONE 20
+#define M_BER_TIMER_DONE 0xffU
+#define V_BER_TIMER_DONE(x) ((x) << S_BER_TIMER_DONE)
+#define G_BER_TIMER_DONE(x) (((x) >> S_BER_TIMER_DONE) & M_BER_TIMER_DONE)
+
+#define S_T7_AMPS_LOCK 4
+#define M_T7_AMPS_LOCK 0xffffU
+#define V_T7_AMPS_LOCK(x) ((x) << S_T7_AMPS_LOCK)
+#define G_T7_AMPS_LOCK(x) (((x) >> S_T7_AMPS_LOCK) & M_T7_AMPS_LOCK)
+
+#define S_T7_ALIGN_DONE 0
+#define M_T7_ALIGN_DONE 0xfU
+#define V_T7_ALIGN_DONE(x) ((x) << S_T7_ALIGN_DONE)
+#define G_T7_ALIGN_DONE(x) (((x) >> S_T7_ALIGN_DONE) & M_T7_ALIGN_DONE)
+
+#define A_MAC_PCS_STATUS_1 0x38034
+#define A_MAC_PCS_STATUS_2 0x38038
+
+#define S_RSFEC_ALIGNED 24
+#define M_RSFEC_ALIGNED 0xffU
+#define V_RSFEC_ALIGNED(x) ((x) << S_RSFEC_ALIGNED)
+#define G_RSFEC_ALIGNED(x) (((x) >> S_RSFEC_ALIGNED) & M_RSFEC_ALIGNED)
+
+#define S_T7_FEC_LOCKED 8
+#define M_T7_FEC_LOCKED 0xffffU
+#define V_T7_FEC_LOCKED(x) ((x) << S_T7_FEC_LOCKED)
+#define G_T7_FEC_LOCKED(x) (((x) >> S_T7_FEC_LOCKED) & M_T7_FEC_LOCKED)
+
+#define S_T7_BLOCK_LOCK 0
+#define M_T7_BLOCK_LOCK 0xffU
+#define V_T7_BLOCK_LOCK(x) ((x) << S_T7_BLOCK_LOCK)
+#define G_T7_BLOCK_LOCK(x) (((x) >> S_T7_BLOCK_LOCK) & M_T7_BLOCK_LOCK)
+
+#define A_MAC_PCS_STATUS_3 0x3803c
+
+#define S_FEC_NCERR 16
+#define M_FEC_NCERR 0xffffU
+#define V_FEC_NCERR(x) ((x) << S_FEC_NCERR)
+#define G_FEC_NCERR(x) (((x) >> S_FEC_NCERR) & M_FEC_NCERR)
+
+#define S_FEC_CERR 0
+#define M_FEC_CERR 0xffffU
+#define V_FEC_CERR(x) ((x) << S_FEC_CERR)
+#define G_FEC_CERR(x) (((x) >> S_FEC_CERR) & M_FEC_CERR)
+
+#define A_MAC_PCS_STATUS_4 0x38040
+
+#define S_MAC1_RES_SPEED 23
+#define M_MAC1_RES_SPEED 0xffU
+#define V_MAC1_RES_SPEED(x) ((x) << S_MAC1_RES_SPEED)
+#define G_MAC1_RES_SPEED(x) (((x) >> S_MAC1_RES_SPEED) & M_MAC1_RES_SPEED)
+
+#define S_MAC0_RES_SPEED 14
+#define M_MAC0_RES_SPEED 0xffU
+#define V_MAC0_RES_SPEED(x) ((x) << S_MAC0_RES_SPEED)
+#define G_MAC0_RES_SPEED(x) (((x) >> S_MAC0_RES_SPEED) & M_MAC0_RES_SPEED)
+
+#define S_PCS400_ENA_IN_REF 12
+#define M_PCS400_ENA_IN_REF 0x3U
+#define V_PCS400_ENA_IN_REF(x) ((x) << S_PCS400_ENA_IN_REF)
+#define G_PCS400_ENA_IN_REF(x) (((x) >> S_PCS400_ENA_IN_REF) & M_PCS400_ENA_IN_REF)
+
+#define S_PCS000_DEGRADE_SER 10
+#define M_PCS000_DEGRADE_SER 0x3U
+#define V_PCS000_DEGRADE_SER(x) ((x) << S_PCS000_DEGRADE_SER)
+#define G_PCS000_DEGRADE_SER(x) (((x) >> S_PCS000_DEGRADE_SER) & M_PCS000_DEGRADE_SER)
+
+#define S_P4X_SIGNAL_OK 8
+#define M_P4X_SIGNAL_OK 0x3U
+#define V_P4X_SIGNAL_OK(x) ((x) << S_P4X_SIGNAL_OK)
+#define G_P4X_SIGNAL_OK(x) (((x) >> S_P4X_SIGNAL_OK) & M_P4X_SIGNAL_OK)
+
+#define S_MODE200_IND_REF 7
+#define V_MODE200_IND_REF(x) ((x) << S_MODE200_IND_REF)
+#define F_MODE200_IND_REF V_MODE200_IND_REF(1U)
+
+#define S_MODE200_8X26_IND_REF 6
+#define V_MODE200_8X26_IND_REF(x) ((x) << S_MODE200_8X26_IND_REF)
+#define F_MODE200_8X26_IND_REF V_MODE200_8X26_IND_REF(1U)
+
+#define S_PCS001_RX_AM_SF 3
+#define M_PCS001_RX_AM_SF 0x7U
+#define V_PCS001_RX_AM_SF(x) ((x) << S_PCS001_RX_AM_SF)
+#define G_PCS001_RX_AM_SF(x) (((x) >> S_PCS001_RX_AM_SF) & M_PCS001_RX_AM_SF)
+
+#define S_PCS000_RX_AM_SF 0
+#define M_PCS000_RX_AM_SF 0x7U
+#define V_PCS000_RX_AM_SF(x) ((x) << S_PCS000_RX_AM_SF)
+#define G_PCS000_RX_AM_SF(x) (((x) >> S_PCS000_RX_AM_SF) & M_PCS000_RX_AM_SF)
+
+#define A_MAC_PCS_STATUS_5 0x38044
+
+#define S_MAC5_RES_SPEED 24
+#define M_MAC5_RES_SPEED 0xffU
+#define V_MAC5_RES_SPEED(x) ((x) << S_MAC5_RES_SPEED)
+#define G_MAC5_RES_SPEED(x) (((x) >> S_MAC5_RES_SPEED) & M_MAC5_RES_SPEED)
+
+#define S_MAC4_RES_SPEED 16
+#define M_MAC4_RES_SPEED 0xffU
+#define V_MAC4_RES_SPEED(x) ((x) << S_MAC4_RES_SPEED)
+#define G_MAC4_RES_SPEED(x) (((x) >> S_MAC4_RES_SPEED) & M_MAC4_RES_SPEED)
+
+#define S_MAC3_RES_SPEED 8
+#define M_MAC3_RES_SPEED 0xffU
+#define V_MAC3_RES_SPEED(x) ((x) << S_MAC3_RES_SPEED)
+#define G_MAC3_RES_SPEED(x) (((x) >> S_MAC3_RES_SPEED) & M_MAC3_RES_SPEED)
+
+#define S_MAC2_RES_SPEED 0
+#define M_MAC2_RES_SPEED 0xffU
+#define V_MAC2_RES_SPEED(x) ((x) << S_MAC2_RES_SPEED)
+#define G_MAC2_RES_SPEED(x) (((x) >> S_MAC2_RES_SPEED) & M_MAC2_RES_SPEED)
+
+#define A_MAC_PCS_STATUS_6 0x38048
+
+#define S_MARKER_INS_CNT_100_00 16
+#define M_MARKER_INS_CNT_100_00 0x7fffU
+#define V_MARKER_INS_CNT_100_00(x) ((x) << S_MARKER_INS_CNT_100_00)
+#define G_MARKER_INS_CNT_100_00(x) (((x) >> S_MARKER_INS_CNT_100_00) & M_MARKER_INS_CNT_100_00)
+
+#define S_MAC7_RES_SPEED 8
+#define M_MAC7_RES_SPEED 0xffU
+#define V_MAC7_RES_SPEED(x) ((x) << S_MAC7_RES_SPEED)
+#define G_MAC7_RES_SPEED(x) (((x) >> S_MAC7_RES_SPEED) & M_MAC7_RES_SPEED)
+
+#define S_MAC6_RES_SPEED 0
+#define M_MAC6_RES_SPEED 0xffU
+#define V_MAC6_RES_SPEED(x) ((x) << S_MAC6_RES_SPEED)
+#define G_MAC6_RES_SPEED(x) (((x) >> S_MAC6_RES_SPEED) & M_MAC6_RES_SPEED)
+
+#define A_MAC_PCS_STATUS_7 0x3804c
+
+#define S_PCS000_LINK_STATUS 30
+#define M_PCS000_LINK_STATUS 0x3U
+#define V_PCS000_LINK_STATUS(x) ((x) << S_PCS000_LINK_STATUS)
+#define G_PCS000_LINK_STATUS(x) (((x) >> S_PCS000_LINK_STATUS) & M_PCS000_LINK_STATUS)
+
+#define S_MARKER_INS_CNT_100_02 15
+#define M_MARKER_INS_CNT_100_02 0x7fffU
+#define V_MARKER_INS_CNT_100_02(x) ((x) << S_MARKER_INS_CNT_100_02)
+#define G_MARKER_INS_CNT_100_02(x) (((x) >> S_MARKER_INS_CNT_100_02) & M_MARKER_INS_CNT_100_02)
+
+#define S_MARKER_INS_CNT_100_01 0
+#define M_MARKER_INS_CNT_100_01 0x7fffU
+#define V_MARKER_INS_CNT_100_01(x) ((x) << S_MARKER_INS_CNT_100_01)
+#define G_MARKER_INS_CNT_100_01(x) (((x) >> S_MARKER_INS_CNT_100_01) & M_MARKER_INS_CNT_100_01)
+
+#define A_MAC_PCS_STATUS_8 0x38050
+
+#define S_MARKER_INS_CNT_25_1 15
+#define M_MARKER_INS_CNT_25_1 0xffffU
+#define V_MARKER_INS_CNT_25_1(x) ((x) << S_MARKER_INS_CNT_25_1)
+#define G_MARKER_INS_CNT_25_1(x) (((x) >> S_MARKER_INS_CNT_25_1) & M_MARKER_INS_CNT_25_1)
+
+#define S_MARKER_INS_CNT_100_03 0
+#define M_MARKER_INS_CNT_100_03 0x7fffU
+#define V_MARKER_INS_CNT_100_03(x) ((x) << S_MARKER_INS_CNT_100_03)
+#define G_MARKER_INS_CNT_100_03(x) (((x) >> S_MARKER_INS_CNT_100_03) & M_MARKER_INS_CNT_100_03)
+
+#define A_MAC_PCS_STATUS_9 0x38054
+
+#define S_MARKER_INS_CNT_25_5 16
+#define M_MARKER_INS_CNT_25_5 0xffffU
+#define V_MARKER_INS_CNT_25_5(x) ((x) << S_MARKER_INS_CNT_25_5)
+#define G_MARKER_INS_CNT_25_5(x) (((x) >> S_MARKER_INS_CNT_25_5) & M_MARKER_INS_CNT_25_5)
+
+#define S_MARKER_INS_CNT_25_3 0
+#define M_MARKER_INS_CNT_25_3 0xffffU
+#define V_MARKER_INS_CNT_25_3(x) ((x) << S_MARKER_INS_CNT_25_3)
+#define G_MARKER_INS_CNT_25_3(x) (((x) >> S_MARKER_INS_CNT_25_3) & M_MARKER_INS_CNT_25_3)
+
+#define A_MAC_PCS_STATUS_10 0x38058
+
+#define S_MARKER_INS_CNT_25_50_2 16
+#define M_MARKER_INS_CNT_25_50_2 0xffffU
+#define V_MARKER_INS_CNT_25_50_2(x) ((x) << S_MARKER_INS_CNT_25_50_2)
+#define G_MARKER_INS_CNT_25_50_2(x) (((x) >> S_MARKER_INS_CNT_25_50_2) & M_MARKER_INS_CNT_25_50_2)
+
+#define S_MARKER_INS_CNT_25_50_0 0
+#define M_MARKER_INS_CNT_25_50_0 0xffffU
+#define V_MARKER_INS_CNT_25_50_0(x) ((x) << S_MARKER_INS_CNT_25_50_0)
+#define G_MARKER_INS_CNT_25_50_0(x) (((x) >> S_MARKER_INS_CNT_25_50_0) & M_MARKER_INS_CNT_25_50_0)
+
+#define A_MAC_PCS_STATUS_11 0x3805c
+
+#define S_MARKER_INS_CNT_25_50_6 16
+#define M_MARKER_INS_CNT_25_50_6 0xffffU
+#define V_MARKER_INS_CNT_25_50_6(x) ((x) << S_MARKER_INS_CNT_25_50_6)
+#define G_MARKER_INS_CNT_25_50_6(x) (((x) >> S_MARKER_INS_CNT_25_50_6) & M_MARKER_INS_CNT_25_50_6)
+
+#define S_MARKER_INS_CNT_25_50_4 0
+#define M_MARKER_INS_CNT_25_50_4 0xffffU
+#define V_MARKER_INS_CNT_25_50_4(x) ((x) << S_MARKER_INS_CNT_25_50_4)
+#define G_MARKER_INS_CNT_25_50_4(x) (((x) >> S_MARKER_INS_CNT_25_50_4) & M_MARKER_INS_CNT_25_50_4)
+
+#define A_MAC_PCS_STATUS_12 0x38060
+
+#define S_T7_LINK_STATUS 24
+#define M_T7_LINK_STATUS 0xffU
+#define V_T7_LINK_STATUS(x) ((x) << S_T7_LINK_STATUS)
+#define G_T7_LINK_STATUS(x) (((x) >> S_T7_LINK_STATUS) & M_T7_LINK_STATUS)
+
+#define S_T7_HI_BER 16
+#define M_T7_HI_BER 0xffU
+#define V_T7_HI_BER(x) ((x) << S_T7_HI_BER)
+#define G_T7_HI_BER(x) (((x) >> S_T7_HI_BER) & M_T7_HI_BER)
+
+#define S_MARKER_INS_CNT_25_7 0
+#define M_MARKER_INS_CNT_25_7 0xffffU
+#define V_MARKER_INS_CNT_25_7(x) ((x) << S_MARKER_INS_CNT_25_7)
+#define G_MARKER_INS_CNT_25_7(x) (((x) >> S_MARKER_INS_CNT_25_7) & M_MARKER_INS_CNT_25_7)
+
+#define A_MAC_MAC200G400G_0_CONFIG_0 0x38064
+#define A_MAC_MAC200G400G_0_CONFIG_1 0x38068
+
+#define S_FF_TX_CRC_OVR 11
+#define V_FF_TX_CRC_OVR(x) ((x) << S_FF_TX_CRC_OVR)
+#define F_FF_TX_CRC_OVR V_FF_TX_CRC_OVR(1U)
+
+#define S_TX_SMHOLD 2
+#define V_TX_SMHOLD(x) ((x) << S_TX_SMHOLD)
+#define F_TX_SMHOLD V_TX_SMHOLD(1U)
+
+#define A_MAC_MAC200G400G_0_CONFIG_2 0x3806c
+#define A_MAC_MAC200G400G_0_CONFIG_3 0x38070
+#define A_MAC_MAC200G400G_0_CONFIG_4 0x38074
+
+#define S_FRC_DELTA 0
+#define M_FRC_DELTA 0xffffU
+#define V_FRC_DELTA(x) ((x) << S_FRC_DELTA)
+#define G_FRC_DELTA(x) (((x) >> S_FRC_DELTA) & M_FRC_DELTA)
+
+#define A_MAC_MAC200G400G_0_STATUS 0x38078
+
+#define S_T7_LOOP_ENA 4
+#define V_T7_LOOP_ENA(x) ((x) << S_T7_LOOP_ENA)
+#define F_T7_LOOP_ENA V_T7_LOOP_ENA(1U)
+
+#define S_T7_LOC_FAULT 3
+#define V_T7_LOC_FAULT(x) ((x) << S_T7_LOC_FAULT)
+#define F_T7_LOC_FAULT V_T7_LOC_FAULT(1U)
+
+#define S_FRM_DROP 2
+#define V_FRM_DROP(x) ((x) << S_FRM_DROP)
+#define F_FRM_DROP V_FRM_DROP(1U)
+
+#define S_FF_TX_CREDIT 1
+#define V_FF_TX_CREDIT(x) ((x) << S_FF_TX_CREDIT)
+#define F_FF_TX_CREDIT V_FF_TX_CREDIT(1U)
+
+#define A_MAC_MAC200G400G_1_CONFIG_0 0x3807c
+#define A_MAC_MAC200G400G_1_CONFIG_1 0x38080
+#define A_MAC_MAC200G400G_1_CONFIG_2 0x38084
+#define A_MAC_MAC200G400G_1_CONFIG_3 0x38088
+#define A_MAC_MAC200G400G_1_CONFIG_4 0x3808c
+#define A_MAC_MAC200G400G_1_STATUS 0x38090
+#define A_MAC_AN_CFG_0 0x38094
+
+#define S_T7_AN_DATA_CTL 24
+#define M_T7_AN_DATA_CTL 0xffU
+#define V_T7_AN_DATA_CTL(x) ((x) << S_T7_AN_DATA_CTL)
+#define G_T7_AN_DATA_CTL(x) (((x) >> S_T7_AN_DATA_CTL) & M_T7_AN_DATA_CTL)
+
+#define S_T7_AN_ENA 16
+#define M_T7_AN_ENA 0xffU
+#define V_T7_AN_ENA(x) ((x) << S_T7_AN_ENA)
+#define G_T7_AN_ENA(x) (((x) >> S_T7_AN_ENA) & M_T7_AN_ENA)
+
+#define A_MAC_AN_CFG_1 0x38098
+
+#define S_AN_DIS_TIMER_AN_7 7
+#define V_AN_DIS_TIMER_AN_7(x) ((x) << S_AN_DIS_TIMER_AN_7)
+#define F_AN_DIS_TIMER_AN_7 V_AN_DIS_TIMER_AN_7(1U)
+
+#define S_AN_DIS_TIMER_AN_6 6
+#define V_AN_DIS_TIMER_AN_6(x) ((x) << S_AN_DIS_TIMER_AN_6)
+#define F_AN_DIS_TIMER_AN_6 V_AN_DIS_TIMER_AN_6(1U)
+
+#define S_AN_DIS_TIMER_AN_5 5
+#define V_AN_DIS_TIMER_AN_5(x) ((x) << S_AN_DIS_TIMER_AN_5)
+#define F_AN_DIS_TIMER_AN_5 V_AN_DIS_TIMER_AN_5(1U)
+
+#define S_AN_DIS_TIMER_AN_4 4
+#define V_AN_DIS_TIMER_AN_4(x) ((x) << S_AN_DIS_TIMER_AN_4)
+#define F_AN_DIS_TIMER_AN_4 V_AN_DIS_TIMER_AN_4(1U)
+
+#define S_AN_DIS_TIMER_AN_3 3
+#define V_AN_DIS_TIMER_AN_3(x) ((x) << S_AN_DIS_TIMER_AN_3)
+#define F_AN_DIS_TIMER_AN_3 V_AN_DIS_TIMER_AN_3(1U)
+
+#define S_AN_DIS_TIMER_AN_2 2
+#define V_AN_DIS_TIMER_AN_2(x) ((x) << S_AN_DIS_TIMER_AN_2)
+#define F_AN_DIS_TIMER_AN_2 V_AN_DIS_TIMER_AN_2(1U)
+
+#define S_AN_DIS_TIMER_AN_1 1
+#define V_AN_DIS_TIMER_AN_1(x) ((x) << S_AN_DIS_TIMER_AN_1)
+#define F_AN_DIS_TIMER_AN_1 V_AN_DIS_TIMER_AN_1(1U)
+
+#define S_AN_DIS_TIMER_AN_0 0
+#define V_AN_DIS_TIMER_AN_0(x) ((x) << S_AN_DIS_TIMER_AN_0)
+#define F_AN_DIS_TIMER_AN_0 V_AN_DIS_TIMER_AN_0(1U)
+
+#define A_MAC_AN_SERDES25G_ENA 0x3809c
+
+#define S_AN_SD25_TX_ENA_7 15
+#define V_AN_SD25_TX_ENA_7(x) ((x) << S_AN_SD25_TX_ENA_7)
+#define F_AN_SD25_TX_ENA_7 V_AN_SD25_TX_ENA_7(1U)
+
+#define S_AN_SD25_TX_ENA_6 14
+#define V_AN_SD25_TX_ENA_6(x) ((x) << S_AN_SD25_TX_ENA_6)
+#define F_AN_SD25_TX_ENA_6 V_AN_SD25_TX_ENA_6(1U)
+
+#define S_AN_SD25_TX_ENA_5 13
+#define V_AN_SD25_TX_ENA_5(x) ((x) << S_AN_SD25_TX_ENA_5)
+#define F_AN_SD25_TX_ENA_5 V_AN_SD25_TX_ENA_5(1U)
+
+#define S_AN_SD25_TX_ENA_4 12
+#define V_AN_SD25_TX_ENA_4(x) ((x) << S_AN_SD25_TX_ENA_4)
+#define F_AN_SD25_TX_ENA_4 V_AN_SD25_TX_ENA_4(1U)
+
+#define S_AN_SD25_TX_ENA_3 11
+#define V_AN_SD25_TX_ENA_3(x) ((x) << S_AN_SD25_TX_ENA_3)
+#define F_AN_SD25_TX_ENA_3 V_AN_SD25_TX_ENA_3(1U)
+
+#define S_AN_SD25_TX_ENA_2 10
+#define V_AN_SD25_TX_ENA_2(x) ((x) << S_AN_SD25_TX_ENA_2)
+#define F_AN_SD25_TX_ENA_2 V_AN_SD25_TX_ENA_2(1U)
+
+#define S_AN_SD25_TX_ENA_1 9
+#define V_AN_SD25_TX_ENA_1(x) ((x) << S_AN_SD25_TX_ENA_1)
+#define F_AN_SD25_TX_ENA_1 V_AN_SD25_TX_ENA_1(1U)
+
+#define S_AN_SD25_TX_ENA_0 8
+#define V_AN_SD25_TX_ENA_0(x) ((x) << S_AN_SD25_TX_ENA_0)
+#define F_AN_SD25_TX_ENA_0 V_AN_SD25_TX_ENA_0(1U)
+
+#define S_AN_SD25_RX_ENA_7 7
+#define V_AN_SD25_RX_ENA_7(x) ((x) << S_AN_SD25_RX_ENA_7)
+#define F_AN_SD25_RX_ENA_7 V_AN_SD25_RX_ENA_7(1U)
+
+#define S_AN_SD25_RX_ENA_6 6
+#define V_AN_SD25_RX_ENA_6(x) ((x) << S_AN_SD25_RX_ENA_6)
+#define F_AN_SD25_RX_ENA_6 V_AN_SD25_RX_ENA_6(1U)
+
+#define S_AN_SD25_RX_ENA_5 5
+#define V_AN_SD25_RX_ENA_5(x) ((x) << S_AN_SD25_RX_ENA_5)
+#define F_AN_SD25_RX_ENA_5 V_AN_SD25_RX_ENA_5(1U)
+
+#define S_AN_SD25_RX_ENA_4 4
+#define V_AN_SD25_RX_ENA_4(x) ((x) << S_AN_SD25_RX_ENA_4)
+#define F_AN_SD25_RX_ENA_4 V_AN_SD25_RX_ENA_4(1U)
+
+#define S_AN_SD25_RX_ENA_3 3
+#define V_AN_SD25_RX_ENA_3(x) ((x) << S_AN_SD25_RX_ENA_3)
+#define F_AN_SD25_RX_ENA_3 V_AN_SD25_RX_ENA_3(1U)
+
+#define S_AN_SD25_RX_ENA_2 2
+#define V_AN_SD25_RX_ENA_2(x) ((x) << S_AN_SD25_RX_ENA_2)
+#define F_AN_SD25_RX_ENA_2 V_AN_SD25_RX_ENA_2(1U)
+
+#define S_AN_SD25_RX_ENA_1 1
+#define V_AN_SD25_RX_ENA_1(x) ((x) << S_AN_SD25_RX_ENA_1)
+#define F_AN_SD25_RX_ENA_1 V_AN_SD25_RX_ENA_1(1U)
+
+#define S_AN_SD25_RX_ENA_0 0
+#define V_AN_SD25_RX_ENA_0(x) ((x) << S_AN_SD25_RX_ENA_0)
+#define F_AN_SD25_RX_ENA_0 V_AN_SD25_RX_ENA_0(1U)
+
+#define A_MAC_PLL_CFG_0 0x380a0
+
+#define S_USE_RX_CDR_CLK_FOR_TX 7
+#define V_USE_RX_CDR_CLK_FOR_TX(x) ((x) << S_USE_RX_CDR_CLK_FOR_TX)
+#define F_USE_RX_CDR_CLK_FOR_TX V_USE_RX_CDR_CLK_FOR_TX(1U)
+
+#define S_HSSPLLSEL0 5
+#define M_HSSPLLSEL0 0x3U
+#define V_HSSPLLSEL0(x) ((x) << S_HSSPLLSEL0)
+#define G_HSSPLLSEL0(x) (((x) >> S_HSSPLLSEL0) & M_HSSPLLSEL0)
+
+#define S_HSSTXDIV2CLK_SEL0 3
+#define M_HSSTXDIV2CLK_SEL0 0x3U
+#define V_HSSTXDIV2CLK_SEL0(x) ((x) << S_HSSTXDIV2CLK_SEL0)
+#define G_HSSTXDIV2CLK_SEL0(x) (((x) >> S_HSSTXDIV2CLK_SEL0) & M_HSSTXDIV2CLK_SEL0)
+
+#define S_HSS_RESET0 2
+#define V_HSS_RESET0(x) ((x) << S_HSS_RESET0)
+#define F_HSS_RESET0 V_HSS_RESET0(1U)
+
+#define S_APB_RESET0 1
+#define V_APB_RESET0(x) ((x) << S_APB_RESET0)
+#define F_APB_RESET0 V_APB_RESET0(1U)
+
+#define S_HSSCLK32DIV2_RESET0 0
+#define V_HSSCLK32DIV2_RESET0(x) ((x) << S_HSSCLK32DIV2_RESET0)
+#define F_HSSCLK32DIV2_RESET0 V_HSSCLK32DIV2_RESET0(1U)
+
+#define A_MAC_PLL_CFG_1 0x380a4
+
+#define S_HSSPLLSEL1 5
+#define M_HSSPLLSEL1 0x3U
+#define V_HSSPLLSEL1(x) ((x) << S_HSSPLLSEL1)
+#define G_HSSPLLSEL1(x) (((x) >> S_HSSPLLSEL1) & M_HSSPLLSEL1)
+
+#define S_HSSTXDIV2CLK_SEL1 3
+#define M_HSSTXDIV2CLK_SEL1 0x3U
+#define V_HSSTXDIV2CLK_SEL1(x) ((x) << S_HSSTXDIV2CLK_SEL1)
+#define G_HSSTXDIV2CLK_SEL1(x) (((x) >> S_HSSTXDIV2CLK_SEL1) & M_HSSTXDIV2CLK_SEL1)
+
+#define S_HSS_RESET1 2
+#define V_HSS_RESET1(x) ((x) << S_HSS_RESET1)
+#define F_HSS_RESET1 V_HSS_RESET1(1U)
+
+#define S_APB_RESET1 1
+#define V_APB_RESET1(x) ((x) << S_APB_RESET1)
+#define F_APB_RESET1 V_APB_RESET1(1U)
+
+#define S_HSSCLK32DIV2_RESET1 0
+#define V_HSSCLK32DIV2_RESET1(x) ((x) << S_HSSCLK32DIV2_RESET1)
+#define F_HSSCLK32DIV2_RESET1 V_HSSCLK32DIV2_RESET1(1U)
+
+#define A_MAC_PLL_CFG_2 0x380a8
+
+#define S_HSSPLLSEL2 5
+#define M_HSSPLLSEL2 0x3U
+#define V_HSSPLLSEL2(x) ((x) << S_HSSPLLSEL2)
+#define G_HSSPLLSEL2(x) (((x) >> S_HSSPLLSEL2) & M_HSSPLLSEL2)
+
+#define S_HSSTXDIV2CLK_SEL2 3
+#define M_HSSTXDIV2CLK_SEL2 0x3U
+#define V_HSSTXDIV2CLK_SEL2(x) ((x) << S_HSSTXDIV2CLK_SEL2)
+#define G_HSSTXDIV2CLK_SEL2(x) (((x) >> S_HSSTXDIV2CLK_SEL2) & M_HSSTXDIV2CLK_SEL2)
+
+#define S_HSS_RESET2 2
+#define V_HSS_RESET2(x) ((x) << S_HSS_RESET2)
+#define F_HSS_RESET2 V_HSS_RESET2(1U)
+
+#define S_APB_RESET2 1
+#define V_APB_RESET2(x) ((x) << S_APB_RESET2)
+#define F_APB_RESET2 V_APB_RESET2(1U)
+
+#define S_HSSCLK32DIV2_RESET2 0
+#define V_HSSCLK32DIV2_RESET2(x) ((x) << S_HSSCLK32DIV2_RESET2)
+#define F_HSSCLK32DIV2_RESET2 V_HSSCLK32DIV2_RESET2(1U)
+
+#define A_MAC_PLL_CFG_3 0x380ac
+
+#define S_HSSPLLSEL3 5
+#define M_HSSPLLSEL3 0x3U
+#define V_HSSPLLSEL3(x) ((x) << S_HSSPLLSEL3)
+#define G_HSSPLLSEL3(x) (((x) >> S_HSSPLLSEL3) & M_HSSPLLSEL3)
+
+#define S_HSSTXDIV2CLK_SEL3 3
+#define M_HSSTXDIV2CLK_SEL3 0x3U
+#define V_HSSTXDIV2CLK_SEL3(x) ((x) << S_HSSTXDIV2CLK_SEL3)
+#define G_HSSTXDIV2CLK_SEL3(x) (((x) >> S_HSSTXDIV2CLK_SEL3) & M_HSSTXDIV2CLK_SEL3)
+
+#define S_HSS_RESET3 2
+#define V_HSS_RESET3(x) ((x) << S_HSS_RESET3)
+#define F_HSS_RESET3 V_HSS_RESET3(1U)
+
+#define S_APB_RESET3 1
+#define V_APB_RESET3(x) ((x) << S_APB_RESET3)
+#define F_APB_RESET3 V_APB_RESET3(1U)
+
+#define S_HSSCLK32DIV2_RESET3 0
+#define V_HSSCLK32DIV2_RESET3(x) ((x) << S_HSSCLK32DIV2_RESET3)
+#define F_HSSCLK32DIV2_RESET3 V_HSSCLK32DIV2_RESET3(1U)
+
+#define A_MAC_HSS_STATUS 0x380b0
+
+#define S_TX_LANE_PLL_SEL_3 30
+#define M_TX_LANE_PLL_SEL_3 0x3U
+#define V_TX_LANE_PLL_SEL_3(x) ((x) << S_TX_LANE_PLL_SEL_3)
+#define G_TX_LANE_PLL_SEL_3(x) (((x) >> S_TX_LANE_PLL_SEL_3) & M_TX_LANE_PLL_SEL_3)
+
+#define S_TX_LANE_PLL_SEL_2 28
+#define M_TX_LANE_PLL_SEL_2 0x3U
+#define V_TX_LANE_PLL_SEL_2(x) ((x) << S_TX_LANE_PLL_SEL_2)
+#define G_TX_LANE_PLL_SEL_2(x) (((x) >> S_TX_LANE_PLL_SEL_2) & M_TX_LANE_PLL_SEL_2)
+
+#define S_TX_LANE_PLL_SEL_1 26
+#define M_TX_LANE_PLL_SEL_1 0x3U
+#define V_TX_LANE_PLL_SEL_1(x) ((x) << S_TX_LANE_PLL_SEL_1)
+#define G_TX_LANE_PLL_SEL_1(x) (((x) >> S_TX_LANE_PLL_SEL_1) & M_TX_LANE_PLL_SEL_1)
+
+#define S_TX_LANE_PLL_SEL_0 24
+#define M_TX_LANE_PLL_SEL_0 0x3U
+#define V_TX_LANE_PLL_SEL_0(x) ((x) << S_TX_LANE_PLL_SEL_0)
+#define G_TX_LANE_PLL_SEL_0(x) (((x) >> S_TX_LANE_PLL_SEL_0) & M_TX_LANE_PLL_SEL_0)
+
+#define S_HSSPLLLOCKB_HSS3 7
+#define V_HSSPLLLOCKB_HSS3(x) ((x) << S_HSSPLLLOCKB_HSS3)
+#define F_HSSPLLLOCKB_HSS3 V_HSSPLLLOCKB_HSS3(1U)
+
+#define S_HSSPLLLOCKA_HSS3 6
+#define V_HSSPLLLOCKA_HSS3(x) ((x) << S_HSSPLLLOCKA_HSS3)
+#define F_HSSPLLLOCKA_HSS3 V_HSSPLLLOCKA_HSS3(1U)
+
+#define S_HSSPLLLOCKB_HSS2 5
+#define V_HSSPLLLOCKB_HSS2(x) ((x) << S_HSSPLLLOCKB_HSS2)
+#define F_HSSPLLLOCKB_HSS2 V_HSSPLLLOCKB_HSS2(1U)
+
+#define S_HSSPLLLOCKA_HSS2 4
+#define V_HSSPLLLOCKA_HSS2(x) ((x) << S_HSSPLLLOCKA_HSS2)
+#define F_HSSPLLLOCKA_HSS2 V_HSSPLLLOCKA_HSS2(1U)
+
+#define S_HSSPLLLOCKB_HSS1 3
+#define V_HSSPLLLOCKB_HSS1(x) ((x) << S_HSSPLLLOCKB_HSS1)
+#define F_HSSPLLLOCKB_HSS1 V_HSSPLLLOCKB_HSS1(1U)
+
+#define S_HSSPLLLOCKA_HSS1 2
+#define V_HSSPLLLOCKA_HSS1(x) ((x) << S_HSSPLLLOCKA_HSS1)
+#define F_HSSPLLLOCKA_HSS1 V_HSSPLLLOCKA_HSS1(1U)
+
+#define S_HSSPLLLOCKB_HSS0 1
+#define V_HSSPLLLOCKB_HSS0(x) ((x) << S_HSSPLLLOCKB_HSS0)
+#define F_HSSPLLLOCKB_HSS0 V_HSSPLLLOCKB_HSS0(1U)
+
+#define S_HSSPLLLOCKA_HSS0 0
+#define V_HSSPLLLOCKA_HSS0(x) ((x) << S_HSSPLLLOCKA_HSS0)
+#define F_HSSPLLLOCKA_HSS0 V_HSSPLLLOCKA_HSS0(1U)
+
+#define A_MAC_HSS_SIGDET_STATUS 0x380b4
+
+#define S_HSS3_SIGDET 6
+#define M_HSS3_SIGDET 0x3U
+#define V_HSS3_SIGDET(x) ((x) << S_HSS3_SIGDET)
+#define G_HSS3_SIGDET(x) (((x) >> S_HSS3_SIGDET) & M_HSS3_SIGDET)
+
+#define S_HSS2_SIGDET 4
+#define M_HSS2_SIGDET 0x3U
+#define V_HSS2_SIGDET(x) ((x) << S_HSS2_SIGDET)
+#define G_HSS2_SIGDET(x) (((x) >> S_HSS2_SIGDET) & M_HSS2_SIGDET)
+
+#define S_HSS1_SIGDET 2
+#define M_HSS1_SIGDET 0x3U
+#define V_HSS1_SIGDET(x) ((x) << S_HSS1_SIGDET)
+#define G_HSS1_SIGDET(x) (((x) >> S_HSS1_SIGDET) & M_HSS1_SIGDET)
+
+#define S_HSS0_SIGDET 0
+#define M_HSS0_SIGDET 0x3U
+#define V_HSS0_SIGDET(x) ((x) << S_HSS0_SIGDET)
+#define G_HSS0_SIGDET(x) (((x) >> S_HSS0_SIGDET) & M_HSS0_SIGDET)
+
+#define A_MAC_FPGA_CFG_0 0x380b8
+#define A_MAC_PMD_STATUS 0x380bc
+
+#define S_SIGNAL_DETECT 0
+#define M_SIGNAL_DETECT 0xffU
+#define V_SIGNAL_DETECT(x) ((x) << S_SIGNAL_DETECT)
+#define G_SIGNAL_DETECT(x) (((x) >> S_SIGNAL_DETECT) & M_SIGNAL_DETECT)
+
+#define A_MAC_PMD_AN_CONFIG0 0x380c0
+
+#define S_AN3_RATE_SELECT 25
+#define M_AN3_RATE_SELECT 0x1fU
+#define V_AN3_RATE_SELECT(x) ((x) << S_AN3_RATE_SELECT)
+#define G_AN3_RATE_SELECT(x) (((x) >> S_AN3_RATE_SELECT) & M_AN3_RATE_SELECT)
+
+#define S_AN3_STATUS 24
+#define V_AN3_STATUS(x) ((x) << S_AN3_STATUS)
+#define F_AN3_STATUS V_AN3_STATUS(1U)
+
+#define S_AN2_RATE_SELECT 17
+#define M_AN2_RATE_SELECT 0x1fU
+#define V_AN2_RATE_SELECT(x) ((x) << S_AN2_RATE_SELECT)
+#define G_AN2_RATE_SELECT(x) (((x) >> S_AN2_RATE_SELECT) & M_AN2_RATE_SELECT)
+
+#define S_AN2_STATUS 16
+#define V_AN2_STATUS(x) ((x) << S_AN2_STATUS)
+#define F_AN2_STATUS V_AN2_STATUS(1U)
+
+#define S_AN1_RATE_SELECT 9
+#define M_AN1_RATE_SELECT 0x1fU
+#define V_AN1_RATE_SELECT(x) ((x) << S_AN1_RATE_SELECT)
+#define G_AN1_RATE_SELECT(x) (((x) >> S_AN1_RATE_SELECT) & M_AN1_RATE_SELECT)
+
+#define S_AN1_STATUS 8
+#define V_AN1_STATUS(x) ((x) << S_AN1_STATUS)
+#define F_AN1_STATUS V_AN1_STATUS(1U)
+
+#define S_AN0_RATE_SELECT 1
+#define M_AN0_RATE_SELECT 0x1fU
+#define V_AN0_RATE_SELECT(x) ((x) << S_AN0_RATE_SELECT)
+#define G_AN0_RATE_SELECT(x) (((x) >> S_AN0_RATE_SELECT) & M_AN0_RATE_SELECT)
+
+#define S_AN0_STATUS 0
+#define V_AN0_STATUS(x) ((x) << S_AN0_STATUS)
+#define F_AN0_STATUS V_AN0_STATUS(1U)
+
+#define A_MAC_PMD_AN_CONFIG1 0x380c4
+
+#define S_AN7_RATE_SELECT 25
+#define M_AN7_RATE_SELECT 0x1fU
+#define V_AN7_RATE_SELECT(x) ((x) << S_AN7_RATE_SELECT)
+#define G_AN7_RATE_SELECT(x) (((x) >> S_AN7_RATE_SELECT) & M_AN7_RATE_SELECT)
+
+#define S_AN7_STATUS 24
+#define V_AN7_STATUS(x) ((x) << S_AN7_STATUS)
+#define F_AN7_STATUS V_AN7_STATUS(1U)
+
+#define S_AN6_RATE_SELECT 17
+#define M_AN6_RATE_SELECT 0x1fU
+#define V_AN6_RATE_SELECT(x) ((x) << S_AN6_RATE_SELECT)
+#define G_AN6_RATE_SELECT(x) (((x) >> S_AN6_RATE_SELECT) & M_AN6_RATE_SELECT)
+
+#define S_AN6_STATUS 16
+#define V_AN6_STATUS(x) ((x) << S_AN6_STATUS)
+#define F_AN6_STATUS V_AN6_STATUS(1U)
+
+#define S_AN5_RATE_SELECT 9
+#define M_AN5_RATE_SELECT 0x1fU
+#define V_AN5_RATE_SELECT(x) ((x) << S_AN5_RATE_SELECT)
+#define G_AN5_RATE_SELECT(x) (((x) >> S_AN5_RATE_SELECT) & M_AN5_RATE_SELECT)
+
+#define S_AN5_STATUS 8
+#define V_AN5_STATUS(x) ((x) << S_AN5_STATUS)
+#define F_AN5_STATUS V_AN5_STATUS(1U)
+
+#define S_AN4_RATE_SELECT 1
+#define M_AN4_RATE_SELECT 0x1fU
+#define V_AN4_RATE_SELECT(x) ((x) << S_AN4_RATE_SELECT)
+#define G_AN4_RATE_SELECT(x) (((x) >> S_AN4_RATE_SELECT) & M_AN4_RATE_SELECT)
+
+#define S_AN4_STATUS 0
+#define V_AN4_STATUS(x) ((x) << S_AN4_STATUS)
+#define F_AN4_STATUS V_AN4_STATUS(1U)
+
+#define A_MAC_INT_EN_CMN 0x380c8
+
+#define S_HSS3PLL1_LOCK_LOST_INT_EN 21
+#define V_HSS3PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS3PLL1_LOCK_LOST_INT_EN)
+#define F_HSS3PLL1_LOCK_LOST_INT_EN V_HSS3PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS3PLL1_LOCK_INT_EN 20
+#define V_HSS3PLL1_LOCK_INT_EN(x) ((x) << S_HSS3PLL1_LOCK_INT_EN)
+#define F_HSS3PLL1_LOCK_INT_EN V_HSS3PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS3PLL0_LOCK_LOST_INT_EN 19
+#define V_HSS3PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS3PLL0_LOCK_LOST_INT_EN)
+#define F_HSS3PLL0_LOCK_LOST_INT_EN V_HSS3PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS3PLL0_LOCK_INT_EN 18
+#define V_HSS3PLL0_LOCK_INT_EN(x) ((x) << S_HSS3PLL0_LOCK_INT_EN)
+#define F_HSS3PLL0_LOCK_INT_EN V_HSS3PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS2PLL1_LOCK_LOST_INT_EN 17
+#define V_HSS2PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS2PLL1_LOCK_LOST_INT_EN)
+#define F_HSS2PLL1_LOCK_LOST_INT_EN V_HSS2PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS2PLL1_LOCK_INT_EN 16
+#define V_HSS2PLL1_LOCK_INT_EN(x) ((x) << S_HSS2PLL1_LOCK_INT_EN)
+#define F_HSS2PLL1_LOCK_INT_EN V_HSS2PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS2PLL0_LOCK_LOST_INT_EN 15
+#define V_HSS2PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS2PLL0_LOCK_LOST_INT_EN)
+#define F_HSS2PLL0_LOCK_LOST_INT_EN V_HSS2PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS2PLL0_LOCK_INT_EN 14
+#define V_HSS2PLL0_LOCK_INT_EN(x) ((x) << S_HSS2PLL0_LOCK_INT_EN)
+#define F_HSS2PLL0_LOCK_INT_EN V_HSS2PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS1PLL1_LOCK_LOST_INT_EN 13
+#define V_HSS1PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS1PLL1_LOCK_LOST_INT_EN)
+#define F_HSS1PLL1_LOCK_LOST_INT_EN V_HSS1PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS1PLL1_LOCK_INT_EN 12
+#define V_HSS1PLL1_LOCK_INT_EN(x) ((x) << S_HSS1PLL1_LOCK_INT_EN)
+#define F_HSS1PLL1_LOCK_INT_EN V_HSS1PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS1PLL0_LOCK_LOST_INT_EN 11
+#define V_HSS1PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS1PLL0_LOCK_LOST_INT_EN)
+#define F_HSS1PLL0_LOCK_LOST_INT_EN V_HSS1PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS1PLL0_LOCK_INT_EN 10
+#define V_HSS1PLL0_LOCK_INT_EN(x) ((x) << S_HSS1PLL0_LOCK_INT_EN)
+#define F_HSS1PLL0_LOCK_INT_EN V_HSS1PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS0PLL1_LOCK_LOST_INT_EN 9
+#define V_HSS0PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS0PLL1_LOCK_LOST_INT_EN)
+#define F_HSS0PLL1_LOCK_LOST_INT_EN V_HSS0PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS0PLL1_LOCK_INT_EN 8
+#define V_HSS0PLL1_LOCK_INT_EN(x) ((x) << S_HSS0PLL1_LOCK_INT_EN)
+#define F_HSS0PLL1_LOCK_INT_EN V_HSS0PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS0PLL0_LOCK_LOST_INT_EN 7
+#define V_HSS0PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS0PLL0_LOCK_LOST_INT_EN)
+#define F_HSS0PLL0_LOCK_LOST_INT_EN V_HSS0PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS0PLL0_LOCK_INT_EN 6
+#define V_HSS0PLL0_LOCK_INT_EN(x) ((x) << S_HSS0PLL0_LOCK_INT_EN)
+#define F_HSS0PLL0_LOCK_INT_EN V_HSS0PLL0_LOCK_INT_EN(1U)
+
+#define S_FLOCK_ASSERTED 5
+#define V_FLOCK_ASSERTED(x) ((x) << S_FLOCK_ASSERTED)
+#define F_FLOCK_ASSERTED V_FLOCK_ASSERTED(1U)
+
+#define S_FLOCK_LOST 4
+#define V_FLOCK_LOST(x) ((x) << S_FLOCK_LOST)
+#define F_FLOCK_LOST V_FLOCK_LOST(1U)
+
+#define S_PHASE_LOCK_ASSERTED 3
+#define V_PHASE_LOCK_ASSERTED(x) ((x) << S_PHASE_LOCK_ASSERTED)
+#define F_PHASE_LOCK_ASSERTED V_PHASE_LOCK_ASSERTED(1U)
+
+#define S_PHASE_LOCK_LOST 2
+#define V_PHASE_LOCK_LOST(x) ((x) << S_PHASE_LOCK_LOST)
+#define F_PHASE_LOCK_LOST V_PHASE_LOCK_LOST(1U)
+
+#define S_LOCK_ASSERTED 1
+#define V_LOCK_ASSERTED(x) ((x) << S_LOCK_ASSERTED)
+#define F_LOCK_ASSERTED V_LOCK_ASSERTED(1U)
+
+#define S_LOCK_LOST 0
+#define V_LOCK_LOST(x) ((x) << S_LOCK_LOST)
+#define F_LOCK_LOST V_LOCK_LOST(1U)
+
+#define A_MAC_INT_CAUSE_CMN 0x380cc
+
+#define S_HSS3PLL1_LOCK_LOST_INT_CAUSE 21
+#define V_HSS3PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS3PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS3PLL1_LOCK_LOST_INT_CAUSE V_HSS3PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS3PLL1_LOCK_INT_CAUSE 20
+#define V_HSS3PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS3PLL1_LOCK_INT_CAUSE)
+#define F_HSS3PLL1_LOCK_INT_CAUSE V_HSS3PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS3PLL0_LOCK_LOST_INT_CAUSE 19
+#define V_HSS3PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS3PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS3PLL0_LOCK_LOST_INT_CAUSE V_HSS3PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS3PLL0_LOCK_INT_CAUSE 18
+#define V_HSS3PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS3PLL0_LOCK_INT_CAUSE)
+#define F_HSS3PLL0_LOCK_INT_CAUSE V_HSS3PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS2PLL1_LOCK_LOST_CAUSE 17
+#define V_HSS2PLL1_LOCK_LOST_CAUSE(x) ((x) << S_HSS2PLL1_LOCK_LOST_CAUSE)
+#define F_HSS2PLL1_LOCK_LOST_CAUSE V_HSS2PLL1_LOCK_LOST_CAUSE(1U)
+
+#define S_HSS2PLL1_LOCK_INT_CAUSE 16
+#define V_HSS2PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS2PLL1_LOCK_INT_CAUSE)
+#define F_HSS2PLL1_LOCK_INT_CAUSE V_HSS2PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS2PLL0_LOCK_LOST_INT_CAUSE 15
+#define V_HSS2PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS2PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS2PLL0_LOCK_LOST_INT_CAUSE V_HSS2PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS2PLL0_LOCK_INT_CAUSE 14
+#define V_HSS2PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS2PLL0_LOCK_INT_CAUSE)
+#define F_HSS2PLL0_LOCK_INT_CAUSE V_HSS2PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS1PLL1_LOCK_LOST_INT_CAUSE 13
+#define V_HSS1PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS1PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS1PLL1_LOCK_LOST_INT_CAUSE V_HSS1PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS1PLL1_LOCK_INT_CAUSE 12
+#define V_HSS1PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS1PLL1_LOCK_INT_CAUSE)
+#define F_HSS1PLL1_LOCK_INT_CAUSE V_HSS1PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS1PLL0_LOCK_LOST_INT_CAUSE 11
+#define V_HSS1PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS1PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS1PLL0_LOCK_LOST_INT_CAUSE V_HSS1PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS1PLL0_LOCK_INT_CAUSE 10
+#define V_HSS1PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS1PLL0_LOCK_INT_CAUSE)
+#define F_HSS1PLL0_LOCK_INT_CAUSE V_HSS1PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS0PLL1_LOCK_LOST_INT_CAUSE 9
+#define V_HSS0PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS0PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS0PLL1_LOCK_LOST_INT_CAUSE V_HSS0PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS0PLL1_LOCK_INT_CAUSE 8
+#define V_HSS0PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS0PLL1_LOCK_INT_CAUSE)
+#define F_HSS0PLL1_LOCK_INT_CAUSE V_HSS0PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS0PLL0_LOCK_LOST_INT_CAUSE 7
+#define V_HSS0PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS0PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS0PLL0_LOCK_LOST_INT_CAUSE V_HSS0PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS0PLL0_LOCK_INT_CAUSE 6
+#define V_HSS0PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS0PLL0_LOCK_INT_CAUSE)
+#define F_HSS0PLL0_LOCK_INT_CAUSE V_HSS0PLL0_LOCK_INT_CAUSE(1U)
+
+#define A_MAC_PERR_INT_EN_MTIP 0x380d0
+
+#define S_PERR_MAC0_TX 19
+#define V_PERR_MAC0_TX(x) ((x) << S_PERR_MAC0_TX)
+#define F_PERR_MAC0_TX V_PERR_MAC0_TX(1U)
+
+#define S_PERR_MAC1_TX 18
+#define V_PERR_MAC1_TX(x) ((x) << S_PERR_MAC1_TX)
+#define F_PERR_MAC1_TX V_PERR_MAC1_TX(1U)
+
+#define S_PERR_MAC2_TX 17
+#define V_PERR_MAC2_TX(x) ((x) << S_PERR_MAC2_TX)
+#define F_PERR_MAC2_TX V_PERR_MAC2_TX(1U)
+
+#define S_PERR_MAC3_TX 16
+#define V_PERR_MAC3_TX(x) ((x) << S_PERR_MAC3_TX)
+#define F_PERR_MAC3_TX V_PERR_MAC3_TX(1U)
+
+#define S_PERR_MAC4_TX 15
+#define V_PERR_MAC4_TX(x) ((x) << S_PERR_MAC4_TX)
+#define F_PERR_MAC4_TX V_PERR_MAC4_TX(1U)
+
+#define S_PERR_MAC5_TX 14
+#define V_PERR_MAC5_TX(x) ((x) << S_PERR_MAC5_TX)
+#define F_PERR_MAC5_TX V_PERR_MAC5_TX(1U)
+
+#define S_PERR_MAC0_RX 13
+#define V_PERR_MAC0_RX(x) ((x) << S_PERR_MAC0_RX)
+#define F_PERR_MAC0_RX V_PERR_MAC0_RX(1U)
+
+#define S_PERR_MAC1_RX 12
+#define V_PERR_MAC1_RX(x) ((x) << S_PERR_MAC1_RX)
+#define F_PERR_MAC1_RX V_PERR_MAC1_RX(1U)
+
+#define S_PERR_MAC2_RX 11
+#define V_PERR_MAC2_RX(x) ((x) << S_PERR_MAC2_RX)
+#define F_PERR_MAC2_RX V_PERR_MAC2_RX(1U)
+
+#define S_PERR_MAC3_RX 10
+#define V_PERR_MAC3_RX(x) ((x) << S_PERR_MAC3_RX)
+#define F_PERR_MAC3_RX V_PERR_MAC3_RX(1U)
+
+#define S_PERR_MAC4_RX 9
+#define V_PERR_MAC4_RX(x) ((x) << S_PERR_MAC4_RX)
+#define F_PERR_MAC4_RX V_PERR_MAC4_RX(1U)
+
+#define S_PERR_MAC5_RX 8
+#define V_PERR_MAC5_RX(x) ((x) << S_PERR_MAC5_RX)
+#define F_PERR_MAC5_RX V_PERR_MAC5_RX(1U)
+
+#define S_PERR_MAC_STAT2_RX 7
+#define V_PERR_MAC_STAT2_RX(x) ((x) << S_PERR_MAC_STAT2_RX)
+#define F_PERR_MAC_STAT2_RX V_PERR_MAC_STAT2_RX(1U)
+
+#define S_PERR_MAC_STAT3_RX 6
+#define V_PERR_MAC_STAT3_RX(x) ((x) << S_PERR_MAC_STAT3_RX)
+#define F_PERR_MAC_STAT3_RX V_PERR_MAC_STAT3_RX(1U)
+
+#define S_PERR_MAC_STAT4_RX 5
+#define V_PERR_MAC_STAT4_RX(x) ((x) << S_PERR_MAC_STAT4_RX)
+#define F_PERR_MAC_STAT4_RX V_PERR_MAC_STAT4_RX(1U)
+
+#define S_PERR_MAC_STAT5_RX 4
+#define V_PERR_MAC_STAT5_RX(x) ((x) << S_PERR_MAC_STAT5_RX)
+#define F_PERR_MAC_STAT5_RX V_PERR_MAC_STAT5_RX(1U)
+
+#define S_PERR_MAC_STAT2_TX 3
+#define V_PERR_MAC_STAT2_TX(x) ((x) << S_PERR_MAC_STAT2_TX)
+#define F_PERR_MAC_STAT2_TX V_PERR_MAC_STAT2_TX(1U)
+
+#define S_PERR_MAC_STAT3_TX 2
+#define V_PERR_MAC_STAT3_TX(x) ((x) << S_PERR_MAC_STAT3_TX)
+#define F_PERR_MAC_STAT3_TX V_PERR_MAC_STAT3_TX(1U)
+
+#define S_PERR_MAC_STAT4_TX 1
+#define V_PERR_MAC_STAT4_TX(x) ((x) << S_PERR_MAC_STAT4_TX)
+#define F_PERR_MAC_STAT4_TX V_PERR_MAC_STAT4_TX(1U)
+
+#define S_PERR_MAC_STAT5_TX 0
+#define V_PERR_MAC_STAT5_TX(x) ((x) << S_PERR_MAC_STAT5_TX)
+#define F_PERR_MAC_STAT5_TX V_PERR_MAC_STAT5_TX(1U)
+
+#define A_MAC_PERR_INT_CAUSE_MTIP 0x380d4
+
+#define S_PERR_MAC_STAT_RX 7
+#define V_PERR_MAC_STAT_RX(x) ((x) << S_PERR_MAC_STAT_RX)
+#define F_PERR_MAC_STAT_RX V_PERR_MAC_STAT_RX(1U)
+
+#define S_PERR_MAC_STAT_TX 3
+#define V_PERR_MAC_STAT_TX(x) ((x) << S_PERR_MAC_STAT_TX)
+#define F_PERR_MAC_STAT_TX V_PERR_MAC_STAT_TX(1U)
+
+#define S_PERR_MAC_STAT_CAP 2
+#define V_PERR_MAC_STAT_CAP(x) ((x) << S_PERR_MAC_STAT_CAP)
+#define F_PERR_MAC_STAT_CAP V_PERR_MAC_STAT_CAP(1U)
+
+#define A_MAC_PERR_ENABLE_MTIP 0x380d8
+#define A_MAC_PCS_1G_CONFIG_0 0x380dc
+
+#define S_SEQ_ENA_3 19
+#define V_SEQ_ENA_3(x) ((x) << S_SEQ_ENA_3)
+#define F_SEQ_ENA_3 V_SEQ_ENA_3(1U)
+
+#define S_SEQ_ENA_2 18
+#define V_SEQ_ENA_2(x) ((x) << S_SEQ_ENA_2)
+#define F_SEQ_ENA_2 V_SEQ_ENA_2(1U)
+
+#define S_SEQ_ENA_1 17
+#define V_SEQ_ENA_1(x) ((x) << S_SEQ_ENA_1)
+#define F_SEQ_ENA_1 V_SEQ_ENA_1(1U)
+
+#define S_SEQ_ENA_0 16
+#define V_SEQ_ENA_0(x) ((x) << S_SEQ_ENA_0)
+#define F_SEQ_ENA_0 V_SEQ_ENA_0(1U)
+
+#define S_TX_LANE_THRESH_3 12
+#define M_TX_LANE_THRESH_3 0xfU
+#define V_TX_LANE_THRESH_3(x) ((x) << S_TX_LANE_THRESH_3)
+#define G_TX_LANE_THRESH_3(x) (((x) >> S_TX_LANE_THRESH_3) & M_TX_LANE_THRESH_3)
+
+#define S_TX_LANE_THRESH_2 8
+#define M_TX_LANE_THRESH_2 0xfU
+#define V_TX_LANE_THRESH_2(x) ((x) << S_TX_LANE_THRESH_2)
+#define G_TX_LANE_THRESH_2(x) (((x) >> S_TX_LANE_THRESH_2) & M_TX_LANE_THRESH_2)
+
+#define S_TX_LANE_THRESH_1 4
+#define M_TX_LANE_THRESH_1 0xfU
+#define V_TX_LANE_THRESH_1(x) ((x) << S_TX_LANE_THRESH_1)
+#define G_TX_LANE_THRESH_1(x) (((x) >> S_TX_LANE_THRESH_1) & M_TX_LANE_THRESH_1)
+
+#define S_TX_LANE_THRESH_0 0
+#define M_TX_LANE_THRESH_0 0xfU
+#define V_TX_LANE_THRESH_0(x) ((x) << S_TX_LANE_THRESH_0)
+#define G_TX_LANE_THRESH_0(x) (((x) >> S_TX_LANE_THRESH_0) & M_TX_LANE_THRESH_0)
+
+#define A_MAC_PCS_1G_CONFIG_1 0x380e0
+
+#define S_TX_LANE_CKMULT_3 9
+#define M_TX_LANE_CKMULT_3 0x7U
+#define V_TX_LANE_CKMULT_3(x) ((x) << S_TX_LANE_CKMULT_3)
+#define G_TX_LANE_CKMULT_3(x) (((x) >> S_TX_LANE_CKMULT_3) & M_TX_LANE_CKMULT_3)
+
+#define S_TX_LANE_CKMULT_2 6
+#define M_TX_LANE_CKMULT_2 0x7U
+#define V_TX_LANE_CKMULT_2(x) ((x) << S_TX_LANE_CKMULT_2)
+#define G_TX_LANE_CKMULT_2(x) (((x) >> S_TX_LANE_CKMULT_2) & M_TX_LANE_CKMULT_2)
+
+#define S_TX_LANE_CKMULT_1 3
+#define M_TX_LANE_CKMULT_1 0x7U
+#define V_TX_LANE_CKMULT_1(x) ((x) << S_TX_LANE_CKMULT_1)
+#define G_TX_LANE_CKMULT_1(x) (((x) >> S_TX_LANE_CKMULT_1) & M_TX_LANE_CKMULT_1)
+
+#define S_TX_LANE_CKMULT_0 0
+#define M_TX_LANE_CKMULT_0 0x7U
+#define V_TX_LANE_CKMULT_0(x) ((x) << S_TX_LANE_CKMULT_0)
+#define G_TX_LANE_CKMULT_0(x) (((x) >> S_TX_LANE_CKMULT_0) & M_TX_LANE_CKMULT_0)
+
+#define A_MAC_PTP_TIMER_RD0_LO 0x380e4
+#define A_MAC_PTP_TIMER_RD0_HI 0x380e8
+#define A_MAC_PTP_TIMER_RD1_LO 0x380ec
+#define A_MAC_PTP_TIMER_RD1_HI 0x380f0
+#define A_MAC_PTP_TIMER_WR_LO 0x380f4
+#define A_MAC_PTP_TIMER_WR_HI 0x380f8
+#define A_MAC_PTP_TIMER_OFFSET_0 0x380fc
+#define A_MAC_PTP_TIMER_OFFSET_1 0x38100
+#define A_MAC_PTP_TIMER_OFFSET_2 0x38104
+#define A_MAC_PTP_SUM_LO 0x38108
+#define A_MAC_PTP_SUM_HI 0x3810c
+#define A_MAC_PTP_TIMER_INCR0 0x38110
+#define A_MAC_PTP_TIMER_INCR1 0x38114
+#define A_MAC_PTP_DRIFT_ADJUST_COUNT 0x38118
+#define A_MAC_PTP_OFFSET_ADJUST_FINE 0x3811c
+#define A_MAC_PTP_OFFSET_ADJUST_TOTAL 0x38120
+#define A_MAC_PTP_CFG 0x38124
+#define A_MAC_PTP_PPS 0x38128
+#define A_MAC_PTP_SINGLE_ALARM 0x3812c
+#define A_MAC_PTP_PERIODIC_ALARM 0x38130
+#define A_MAC_PTP_STATUS 0x38134
+#define A_MAC_STS_GPIO_SEL 0x38140
+
+#define S_STSOUTSEL 1
+#define V_STSOUTSEL(x) ((x) << S_STSOUTSEL)
+#define F_STSOUTSEL V_STSOUTSEL(1U)
+
+#define S_STSINSEL 0
+#define V_STSINSEL(x) ((x) << S_STSINSEL)
+#define F_STSINSEL V_STSINSEL(1U)
+
+#define A_MAC_CERR_INT_EN_MTIP 0x38150
+
+#define S_CERR_MAC0_TX 11
+#define V_CERR_MAC0_TX(x) ((x) << S_CERR_MAC0_TX)
+#define F_CERR_MAC0_TX V_CERR_MAC0_TX(1U)
+
+#define S_CERR_MAC1_TX 10
+#define V_CERR_MAC1_TX(x) ((x) << S_CERR_MAC1_TX)
+#define F_CERR_MAC1_TX V_CERR_MAC1_TX(1U)
+
+#define S_CERR_MAC2_TX 9
+#define V_CERR_MAC2_TX(x) ((x) << S_CERR_MAC2_TX)
+#define F_CERR_MAC2_TX V_CERR_MAC2_TX(1U)
+
+#define S_CERR_MAC3_TX 8
+#define V_CERR_MAC3_TX(x) ((x) << S_CERR_MAC3_TX)
+#define F_CERR_MAC3_TX V_CERR_MAC3_TX(1U)
+
+#define S_CERR_MAC4_TX 7
+#define V_CERR_MAC4_TX(x) ((x) << S_CERR_MAC4_TX)
+#define F_CERR_MAC4_TX V_CERR_MAC4_TX(1U)
+
+#define S_CERR_MAC5_TX 6
+#define V_CERR_MAC5_TX(x) ((x) << S_CERR_MAC5_TX)
+#define F_CERR_MAC5_TX V_CERR_MAC5_TX(1U)
+
+#define S_CERR_MAC0_RX 5
+#define V_CERR_MAC0_RX(x) ((x) << S_CERR_MAC0_RX)
+#define F_CERR_MAC0_RX V_CERR_MAC0_RX(1U)
+
+#define S_CERR_MAC1_RX 4
+#define V_CERR_MAC1_RX(x) ((x) << S_CERR_MAC1_RX)
+#define F_CERR_MAC1_RX V_CERR_MAC1_RX(1U)
+
+#define S_CERR_MAC2_RX 3
+#define V_CERR_MAC2_RX(x) ((x) << S_CERR_MAC2_RX)
+#define F_CERR_MAC2_RX V_CERR_MAC2_RX(1U)
+
+#define S_CERR_MAC3_RX 2
+#define V_CERR_MAC3_RX(x) ((x) << S_CERR_MAC3_RX)
+#define F_CERR_MAC3_RX V_CERR_MAC3_RX(1U)
+
+#define S_CERR_MAC4_RX 1
+#define V_CERR_MAC4_RX(x) ((x) << S_CERR_MAC4_RX)
+#define F_CERR_MAC4_RX V_CERR_MAC4_RX(1U)
+
+#define S_CERR_MAC5_RX 0
+#define V_CERR_MAC5_RX(x) ((x) << S_CERR_MAC5_RX)
+#define F_CERR_MAC5_RX V_CERR_MAC5_RX(1U)
+
+#define A_MAC_CERR_INT_CAUSE_MTIP 0x38154
+#define A_MAC_1G_PCS0_STATUS 0x38160
+
+#define S_1G_PCS0_LOOPBACK 12
+#define V_1G_PCS0_LOOPBACK(x) ((x) << S_1G_PCS0_LOOPBACK)
+#define F_1G_PCS0_LOOPBACK V_1G_PCS0_LOOPBACK(1U)
+
+#define S_1G_PCS0_LINK_STATUS 11
+#define V_1G_PCS0_LINK_STATUS(x) ((x) << S_1G_PCS0_LINK_STATUS)
+#define F_1G_PCS0_LINK_STATUS V_1G_PCS0_LINK_STATUS(1U)
+
+#define S_1G_PCS0_RX_SYNC 10
+#define V_1G_PCS0_RX_SYNC(x) ((x) << S_1G_PCS0_RX_SYNC)
+#define F_1G_PCS0_RX_SYNC V_1G_PCS0_RX_SYNC(1U)
+
+#define S_1G_PCS0_AN_DONE 9
+#define V_1G_PCS0_AN_DONE(x) ((x) << S_1G_PCS0_AN_DONE)
+#define F_1G_PCS0_AN_DONE V_1G_PCS0_AN_DONE(1U)
+
+#define S_1G_PCS0_PGRCVD 8
+#define V_1G_PCS0_PGRCVD(x) ((x) << S_1G_PCS0_PGRCVD)
+#define F_1G_PCS0_PGRCVD V_1G_PCS0_PGRCVD(1U)
+
+#define S_1G_PCS0_SPEED_SEL 6
+#define M_1G_PCS0_SPEED_SEL 0x3U
+#define V_1G_PCS0_SPEED_SEL(x) ((x) << S_1G_PCS0_SPEED_SEL)
+#define G_1G_PCS0_SPEED_SEL(x) (((x) >> S_1G_PCS0_SPEED_SEL) & M_1G_PCS0_SPEED_SEL)
+
+#define S_1G_PCS0_HALF_DUPLEX 5
+#define V_1G_PCS0_HALF_DUPLEX(x) ((x) << S_1G_PCS0_HALF_DUPLEX)
+#define F_1G_PCS0_HALF_DUPLEX V_1G_PCS0_HALF_DUPLEX(1U)
+
+#define S_1G_PCS0_TX_MODE_QUIET 4
+#define V_1G_PCS0_TX_MODE_QUIET(x) ((x) << S_1G_PCS0_TX_MODE_QUIET)
+#define F_1G_PCS0_TX_MODE_QUIET V_1G_PCS0_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS0_TX_LPI_ACTIVE 3
+#define V_1G_PCS0_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS0_TX_LPI_ACTIVE)
+#define F_1G_PCS0_TX_LPI_ACTIVE V_1G_PCS0_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS0_RX_MODE_QUIET 2
+#define V_1G_PCS0_RX_MODE_QUIET(x) ((x) << S_1G_PCS0_RX_MODE_QUIET)
+#define F_1G_PCS0_RX_MODE_QUIET V_1G_PCS0_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS0_RX_LPI_ACTIVE 1
+#define V_1G_PCS0_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS0_RX_LPI_ACTIVE)
+#define F_1G_PCS0_RX_LPI_ACTIVE V_1G_PCS0_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS0_RX_WAKE_ERR 0
+#define V_1G_PCS0_RX_WAKE_ERR(x) ((x) << S_1G_PCS0_RX_WAKE_ERR)
+#define F_1G_PCS0_RX_WAKE_ERR V_1G_PCS0_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS1_STATUS 0x38164
+
+#define S_1G_PCS1_LOOPBACK 12
+#define V_1G_PCS1_LOOPBACK(x) ((x) << S_1G_PCS1_LOOPBACK)
+#define F_1G_PCS1_LOOPBACK V_1G_PCS1_LOOPBACK(1U)
+
+#define S_1G_PCS1_LINK_STATUS 11
+#define V_1G_PCS1_LINK_STATUS(x) ((x) << S_1G_PCS1_LINK_STATUS)
+#define F_1G_PCS1_LINK_STATUS V_1G_PCS1_LINK_STATUS(1U)
+
+#define S_1G_PCS1_RX_SYNC 10
+#define V_1G_PCS1_RX_SYNC(x) ((x) << S_1G_PCS1_RX_SYNC)
+#define F_1G_PCS1_RX_SYNC V_1G_PCS1_RX_SYNC(1U)
+
+#define S_1G_PCS1_AN_DONE 9
+#define V_1G_PCS1_AN_DONE(x) ((x) << S_1G_PCS1_AN_DONE)
+#define F_1G_PCS1_AN_DONE V_1G_PCS1_AN_DONE(1U)
+
+#define S_1G_PCS1_PGRCVD 8
+#define V_1G_PCS1_PGRCVD(x) ((x) << S_1G_PCS1_PGRCVD)
+#define F_1G_PCS1_PGRCVD V_1G_PCS1_PGRCVD(1U)
+
+#define S_1G_PCS1_SPEED_SEL 6
+#define M_1G_PCS1_SPEED_SEL 0x3U
+#define V_1G_PCS1_SPEED_SEL(x) ((x) << S_1G_PCS1_SPEED_SEL)
+#define G_1G_PCS1_SPEED_SEL(x) (((x) >> S_1G_PCS1_SPEED_SEL) & M_1G_PCS1_SPEED_SEL)
+
+#define S_1G_PCS1_HALF_DUPLEX 5
+#define V_1G_PCS1_HALF_DUPLEX(x) ((x) << S_1G_PCS1_HALF_DUPLEX)
+#define F_1G_PCS1_HALF_DUPLEX V_1G_PCS1_HALF_DUPLEX(1U)
+
+#define S_1G_PCS1_TX_MODE_QUIET 4
+#define V_1G_PCS1_TX_MODE_QUIET(x) ((x) << S_1G_PCS1_TX_MODE_QUIET)
+#define F_1G_PCS1_TX_MODE_QUIET V_1G_PCS1_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS1_TX_LPI_ACTIVE 3
+#define V_1G_PCS1_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS1_TX_LPI_ACTIVE)
+#define F_1G_PCS1_TX_LPI_ACTIVE V_1G_PCS1_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS1_RX_MODE_QUIET 2
+#define V_1G_PCS1_RX_MODE_QUIET(x) ((x) << S_1G_PCS1_RX_MODE_QUIET)
+#define F_1G_PCS1_RX_MODE_QUIET V_1G_PCS1_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS1_RX_LPI_ACTIVE 1
+#define V_1G_PCS1_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS1_RX_LPI_ACTIVE)
+#define F_1G_PCS1_RX_LPI_ACTIVE V_1G_PCS1_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS1_RX_WAKE_ERR 0
+#define V_1G_PCS1_RX_WAKE_ERR(x) ((x) << S_1G_PCS1_RX_WAKE_ERR)
+#define F_1G_PCS1_RX_WAKE_ERR V_1G_PCS1_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS2_STATUS 0x38168
+
+#define S_1G_PCS2_LOOPBACK 12
+#define V_1G_PCS2_LOOPBACK(x) ((x) << S_1G_PCS2_LOOPBACK)
+#define F_1G_PCS2_LOOPBACK V_1G_PCS2_LOOPBACK(1U)
+
+#define S_1G_PCS2_LINK_STATUS 11
+#define V_1G_PCS2_LINK_STATUS(x) ((x) << S_1G_PCS2_LINK_STATUS)
+#define F_1G_PCS2_LINK_STATUS V_1G_PCS2_LINK_STATUS(1U)
+
+#define S_1G_PCS2_RX_SYNC 10
+#define V_1G_PCS2_RX_SYNC(x) ((x) << S_1G_PCS2_RX_SYNC)
+#define F_1G_PCS2_RX_SYNC V_1G_PCS2_RX_SYNC(1U)
+
+#define S_1G_PCS2_AN_DONE 9
+#define V_1G_PCS2_AN_DONE(x) ((x) << S_1G_PCS2_AN_DONE)
+#define F_1G_PCS2_AN_DONE V_1G_PCS2_AN_DONE(1U)
+
+#define S_1G_PCS2_PGRCVD 8
+#define V_1G_PCS2_PGRCVD(x) ((x) << S_1G_PCS2_PGRCVD)
+#define F_1G_PCS2_PGRCVD V_1G_PCS2_PGRCVD(1U)
+
+#define S_1G_PCS2_SPEED_SEL 6
+#define M_1G_PCS2_SPEED_SEL 0x3U
+#define V_1G_PCS2_SPEED_SEL(x) ((x) << S_1G_PCS2_SPEED_SEL)
+#define G_1G_PCS2_SPEED_SEL(x) (((x) >> S_1G_PCS2_SPEED_SEL) & M_1G_PCS2_SPEED_SEL)
+
+#define S_1G_PCS2_HALF_DUPLEX 5
+#define V_1G_PCS2_HALF_DUPLEX(x) ((x) << S_1G_PCS2_HALF_DUPLEX)
+#define F_1G_PCS2_HALF_DUPLEX V_1G_PCS2_HALF_DUPLEX(1U)
+
+#define S_1G_PCS2_TX_MODE_QUIET 4
+#define V_1G_PCS2_TX_MODE_QUIET(x) ((x) << S_1G_PCS2_TX_MODE_QUIET)
+#define F_1G_PCS2_TX_MODE_QUIET V_1G_PCS2_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS2_TX_LPI_ACTIVE 3
+#define V_1G_PCS2_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS2_TX_LPI_ACTIVE)
+#define F_1G_PCS2_TX_LPI_ACTIVE V_1G_PCS2_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS2_RX_MODE_QUIET 2
+#define V_1G_PCS2_RX_MODE_QUIET(x) ((x) << S_1G_PCS2_RX_MODE_QUIET)
+#define F_1G_PCS2_RX_MODE_QUIET V_1G_PCS2_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS2_RX_LPI_ACTIVE 1
+#define V_1G_PCS2_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS2_RX_LPI_ACTIVE)
+#define F_1G_PCS2_RX_LPI_ACTIVE V_1G_PCS2_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS2_RX_WAKE_ERR 0
+#define V_1G_PCS2_RX_WAKE_ERR(x) ((x) << S_1G_PCS2_RX_WAKE_ERR)
+#define F_1G_PCS2_RX_WAKE_ERR V_1G_PCS2_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS3_STATUS 0x3816c
+
+#define S_1G_PCS3_LOOPBACK 12
+#define V_1G_PCS3_LOOPBACK(x) ((x) << S_1G_PCS3_LOOPBACK)
+#define F_1G_PCS3_LOOPBACK V_1G_PCS3_LOOPBACK(1U)
+
+#define S_1G_PCS3_LINK_STATUS 11
+#define V_1G_PCS3_LINK_STATUS(x) ((x) << S_1G_PCS3_LINK_STATUS)
+#define F_1G_PCS3_LINK_STATUS V_1G_PCS3_LINK_STATUS(1U)
+
+#define S_1G_PCS3_RX_SYNC 10
+#define V_1G_PCS3_RX_SYNC(x) ((x) << S_1G_PCS3_RX_SYNC)
+#define F_1G_PCS3_RX_SYNC V_1G_PCS3_RX_SYNC(1U)
+
+#define S_1G_PCS3_AN_DONE 9
+#define V_1G_PCS3_AN_DONE(x) ((x) << S_1G_PCS3_AN_DONE)
+#define F_1G_PCS3_AN_DONE V_1G_PCS3_AN_DONE(1U)
+
+#define S_1G_PCS3_PGRCVD 8
+#define V_1G_PCS3_PGRCVD(x) ((x) << S_1G_PCS3_PGRCVD)
+#define F_1G_PCS3_PGRCVD V_1G_PCS3_PGRCVD(1U)
+
+#define S_1G_PCS3_SPEED_SEL 6
+#define M_1G_PCS3_SPEED_SEL 0x3U
+#define V_1G_PCS3_SPEED_SEL(x) ((x) << S_1G_PCS3_SPEED_SEL)
+#define G_1G_PCS3_SPEED_SEL(x) (((x) >> S_1G_PCS3_SPEED_SEL) & M_1G_PCS3_SPEED_SEL)
+
+#define S_1G_PCS3_HALF_DUPLEX 5
+#define V_1G_PCS3_HALF_DUPLEX(x) ((x) << S_1G_PCS3_HALF_DUPLEX)
+#define F_1G_PCS3_HALF_DUPLEX V_1G_PCS3_HALF_DUPLEX(1U)
+
+#define S_1G_PCS3_TX_MODE_QUIET 4
+#define V_1G_PCS3_TX_MODE_QUIET(x) ((x) << S_1G_PCS3_TX_MODE_QUIET)
+#define F_1G_PCS3_TX_MODE_QUIET V_1G_PCS3_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS3_TX_LPI_ACTIVE 3
+#define V_1G_PCS3_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS3_TX_LPI_ACTIVE)
+#define F_1G_PCS3_TX_LPI_ACTIVE V_1G_PCS3_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS3_RX_MODE_QUIET 2
+#define V_1G_PCS3_RX_MODE_QUIET(x) ((x) << S_1G_PCS3_RX_MODE_QUIET)
+#define F_1G_PCS3_RX_MODE_QUIET V_1G_PCS3_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS3_RX_LPI_ACTIVE 1
+#define V_1G_PCS3_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS3_RX_LPI_ACTIVE)
+#define F_1G_PCS3_RX_LPI_ACTIVE V_1G_PCS3_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS3_RX_WAKE_ERR 0
+#define V_1G_PCS3_RX_WAKE_ERR(x) ((x) << S_1G_PCS3_RX_WAKE_ERR)
+#define F_1G_PCS3_RX_WAKE_ERR V_1G_PCS3_RX_WAKE_ERR(1U)
+
+#define A_MAC_PCS_LPI_STATUS_0 0x38170
+
+#define S_TX_LPI_STATE 0
+#define M_TX_LPI_STATE 0xffffffU
+#define V_TX_LPI_STATE(x) ((x) << S_TX_LPI_STATE)
+#define G_TX_LPI_STATE(x) (((x) >> S_TX_LPI_STATE) & M_TX_LPI_STATE)
+
+#define A_MAC_PCS_LPI_STATUS_1 0x38174
+
+#define S_TX_LPI_MODE 0
+#define M_TX_LPI_MODE 0xffffU
+#define V_TX_LPI_MODE(x) ((x) << S_TX_LPI_MODE)
+#define G_TX_LPI_MODE(x) (((x) >> S_TX_LPI_MODE) & M_TX_LPI_MODE)
+
+#define A_MAC_PCS_LPI_STATUS_2 0x38178
+
+#define S_RX_LPI_MODE 24
+#define M_RX_LPI_MODE 0xffU
+#define V_RX_LPI_MODE(x) ((x) << S_RX_LPI_MODE)
+#define G_RX_LPI_MODE(x) (((x) >> S_RX_LPI_MODE) & M_RX_LPI_MODE)
+
+#define S_RX_LPI_STATE 0
+#define M_RX_LPI_STATE 0xffffffU
+#define V_RX_LPI_STATE(x) ((x) << S_RX_LPI_STATE)
+#define G_RX_LPI_STATE(x) (((x) >> S_RX_LPI_STATE) & M_RX_LPI_STATE)
+
+#define A_MAC_PCS_LPI_STATUS_3 0x3817c
+
+#define S_T7_RX_LPI_ACTIVE 0
+#define M_T7_RX_LPI_ACTIVE 0xffU
+#define V_T7_RX_LPI_ACTIVE(x) ((x) << S_T7_RX_LPI_ACTIVE)
+#define G_T7_RX_LPI_ACTIVE(x) (((x) >> S_T7_RX_LPI_ACTIVE) & M_T7_RX_LPI_ACTIVE)
+
+#define A_MAC_TX0_CLK_DIV 0x38180
+#define A_MAC_TX1_CLK_DIV 0x38184
+#define A_MAC_TX2_CLK_DIV 0x38188
+#define A_MAC_TX3_CLK_DIV 0x3818c
+#define A_MAC_TX4_CLK_DIV 0x38190
+#define A_MAC_TX5_CLK_DIV 0x38194
+#define A_MAC_TX6_CLK_DIV 0x38198
+#define A_MAC_TX7_CLK_DIV 0x3819c
+#define A_MAC_RX0_CLK_DIV 0x381a0
+#define A_MAC_RX1_CLK_DIV 0x381a4
+#define A_MAC_RX2_CLK_DIV 0x381a8
+#define A_MAC_RX3_CLK_DIV 0x381ac
+#define A_MAC_RX4_CLK_DIV 0x381b0
+#define A_MAC_RX5_CLK_DIV 0x381b4
+#define A_MAC_RX6_CLK_DIV 0x381b8
+#define A_MAC_RX7_CLK_DIV 0x381bc
+#define A_MAC_SYNC_E_CDR_LANE_SEL 0x381c0
+
+#define S_CML_MUX_SEL 11
+#define V_CML_MUX_SEL(x) ((x) << S_CML_MUX_SEL)
+#define F_CML_MUX_SEL V_CML_MUX_SEL(1U)
+
+#define S_CMOS_OUT_EN 10
+#define V_CMOS_OUT_EN(x) ((x) << S_CMOS_OUT_EN)
+#define F_CMOS_OUT_EN V_CMOS_OUT_EN(1U)
+
+#define S_CML_OUT_EN 9
+#define V_CML_OUT_EN(x) ((x) << S_CML_OUT_EN)
+#define F_CML_OUT_EN V_CML_OUT_EN(1U)
+
+#define S_LOC_FAULT_PORT_SEL 6
+#define M_LOC_FAULT_PORT_SEL 0x3U
+#define V_LOC_FAULT_PORT_SEL(x) ((x) << S_LOC_FAULT_PORT_SEL)
+#define G_LOC_FAULT_PORT_SEL(x) (((x) >> S_LOC_FAULT_PORT_SEL) & M_LOC_FAULT_PORT_SEL)
+
+#define S_TX_CDR_LANE_SEL 3
+#define M_TX_CDR_LANE_SEL 0x7U
+#define V_TX_CDR_LANE_SEL(x) ((x) << S_TX_CDR_LANE_SEL)
+#define G_TX_CDR_LANE_SEL(x) (((x) >> S_TX_CDR_LANE_SEL) & M_TX_CDR_LANE_SEL)
+
+#define S_RX_CDR_LANE_SEL 0
+#define M_RX_CDR_LANE_SEL 0x7U
+#define V_RX_CDR_LANE_SEL(x) ((x) << S_RX_CDR_LANE_SEL)
+#define G_RX_CDR_LANE_SEL(x) (((x) >> S_RX_CDR_LANE_SEL) & M_RX_CDR_LANE_SEL)
+
+#define A_MAC_DEBUG_PL_IF_1 0x381c4
+#define A_MAC_SIGNAL_DETECT_CTRL 0x381f0
+
+#define S_SIGNAL_DET_LN7 15
+#define V_SIGNAL_DET_LN7(x) ((x) << S_SIGNAL_DET_LN7)
+#define F_SIGNAL_DET_LN7 V_SIGNAL_DET_LN7(1U)
+
+#define S_SIGNAL_DET_LN6 14
+#define V_SIGNAL_DET_LN6(x) ((x) << S_SIGNAL_DET_LN6)
+#define F_SIGNAL_DET_LN6 V_SIGNAL_DET_LN6(1U)
+
+#define S_SIGNAL_DET_LN5 13
+#define V_SIGNAL_DET_LN5(x) ((x) << S_SIGNAL_DET_LN5)
+#define F_SIGNAL_DET_LN5 V_SIGNAL_DET_LN5(1U)
+
+#define S_SIGNAL_DET_LN4 12
+#define V_SIGNAL_DET_LN4(x) ((x) << S_SIGNAL_DET_LN4)
+#define F_SIGNAL_DET_LN4 V_SIGNAL_DET_LN4(1U)
+
+#define S_SIGNAL_DET_LN3 11
+#define V_SIGNAL_DET_LN3(x) ((x) << S_SIGNAL_DET_LN3)
+#define F_SIGNAL_DET_LN3 V_SIGNAL_DET_LN3(1U)
+
+#define S_SIGNAL_DET_LN2 10
+#define V_SIGNAL_DET_LN2(x) ((x) << S_SIGNAL_DET_LN2)
+#define F_SIGNAL_DET_LN2 V_SIGNAL_DET_LN2(1U)
+
+#define S_SIGNAL_DET_LN1 9
+#define V_SIGNAL_DET_LN1(x) ((x) << S_SIGNAL_DET_LN1)
+#define F_SIGNAL_DET_LN1 V_SIGNAL_DET_LN1(1U)
+
+#define S_SIGNAL_DET_LN0 8
+#define V_SIGNAL_DET_LN0(x) ((x) << S_SIGNAL_DET_LN0)
+#define F_SIGNAL_DET_LN0 V_SIGNAL_DET_LN0(1U)
+
+#define S_SIGDETCTRL_LN7 7
+#define V_SIGDETCTRL_LN7(x) ((x) << S_SIGDETCTRL_LN7)
+#define F_SIGDETCTRL_LN7 V_SIGDETCTRL_LN7(1U)
+
+#define S_SIGDETCTRL_LN6 6
+#define V_SIGDETCTRL_LN6(x) ((x) << S_SIGDETCTRL_LN6)
+#define F_SIGDETCTRL_LN6 V_SIGDETCTRL_LN6(1U)
+
+#define S_SIGDETCTRL_LN5 5
+#define V_SIGDETCTRL_LN5(x) ((x) << S_SIGDETCTRL_LN5)
+#define F_SIGDETCTRL_LN5 V_SIGDETCTRL_LN5(1U)
+
+#define S_SIGDETCTRL_LN4 4
+#define V_SIGDETCTRL_LN4(x) ((x) << S_SIGDETCTRL_LN4)
+#define F_SIGDETCTRL_LN4 V_SIGDETCTRL_LN4(1U)
+
+#define S_SIGDETCTRL_LN3 3
+#define V_SIGDETCTRL_LN3(x) ((x) << S_SIGDETCTRL_LN3)
+#define F_SIGDETCTRL_LN3 V_SIGDETCTRL_LN3(1U)
+
+#define S_SIGDETCTRL_LN2 2
+#define V_SIGDETCTRL_LN2(x) ((x) << S_SIGDETCTRL_LN2)
+#define F_SIGDETCTRL_LN2 V_SIGDETCTRL_LN2(1U)
+
+#define S_SIGDETCTRL_LN1 1
+#define V_SIGDETCTRL_LN1(x) ((x) << S_SIGDETCTRL_LN1)
+#define F_SIGDETCTRL_LN1 V_SIGDETCTRL_LN1(1U)
+
+#define S_SIGDETCTRL_LN0 0
+#define V_SIGDETCTRL_LN0(x) ((x) << S_SIGDETCTRL_LN0)
+#define F_SIGDETCTRL_LN0 V_SIGDETCTRL_LN0(1U)
+
+#define A_MAC_FPGA_STATUS_FRM_BOARD 0x381f4
+
+#define S_SFP3_RX_LOS 15
+#define V_SFP3_RX_LOS(x) ((x) << S_SFP3_RX_LOS)
+#define F_SFP3_RX_LOS V_SFP3_RX_LOS(1U)
+
+#define S_SFP3_TX_FAULT 14
+#define V_SFP3_TX_FAULT(x) ((x) << S_SFP3_TX_FAULT)
+#define F_SFP3_TX_FAULT V_SFP3_TX_FAULT(1U)
+
+#define S_SFP3_MOD_PRES 13
+#define V_SFP3_MOD_PRES(x) ((x) << S_SFP3_MOD_PRES)
+#define F_SFP3_MOD_PRES V_SFP3_MOD_PRES(1U)
+
+#define S_SFP2_RX_LOS 12
+#define V_SFP2_RX_LOS(x) ((x) << S_SFP2_RX_LOS)
+#define F_SFP2_RX_LOS V_SFP2_RX_LOS(1U)
+
+#define S_SFP2_TX_FAULT 11
+#define V_SFP2_TX_FAULT(x) ((x) << S_SFP2_TX_FAULT)
+#define F_SFP2_TX_FAULT V_SFP2_TX_FAULT(1U)
+
+#define S_SFP2_MOD_PRES 10
+#define V_SFP2_MOD_PRES(x) ((x) << S_SFP2_MOD_PRES)
+#define F_SFP2_MOD_PRES V_SFP2_MOD_PRES(1U)
+
+#define S_SFP1_RX_LOS 9
+#define V_SFP1_RX_LOS(x) ((x) << S_SFP1_RX_LOS)
+#define F_SFP1_RX_LOS V_SFP1_RX_LOS(1U)
+
+#define S_SFP1_TX_FAULT 8
+#define V_SFP1_TX_FAULT(x) ((x) << S_SFP1_TX_FAULT)
+#define F_SFP1_TX_FAULT V_SFP1_TX_FAULT(1U)
+
+#define S_SFP1_MOD_PRES 7
+#define V_SFP1_MOD_PRES(x) ((x) << S_SFP1_MOD_PRES)
+#define F_SFP1_MOD_PRES V_SFP1_MOD_PRES(1U)
+
+#define S_SFP0_RX_LOS 6
+#define V_SFP0_RX_LOS(x) ((x) << S_SFP0_RX_LOS)
+#define F_SFP0_RX_LOS V_SFP0_RX_LOS(1U)
+
+#define S_SFP0_TX_FAULT 5
+#define V_SFP0_TX_FAULT(x) ((x) << S_SFP0_TX_FAULT)
+#define F_SFP0_TX_FAULT V_SFP0_TX_FAULT(1U)
+
+#define S_SFP0_MOD_PRES 4
+#define V_SFP0_MOD_PRES(x) ((x) << S_SFP0_MOD_PRES)
+#define F_SFP0_MOD_PRES V_SFP0_MOD_PRES(1U)
+
+#define S_QSFP1_INT_L 3
+#define V_QSFP1_INT_L(x) ((x) << S_QSFP1_INT_L)
+#define F_QSFP1_INT_L V_QSFP1_INT_L(1U)
+
+#define S_QSFP1_MOD_PRES 2
+#define V_QSFP1_MOD_PRES(x) ((x) << S_QSFP1_MOD_PRES)
+#define F_QSFP1_MOD_PRES V_QSFP1_MOD_PRES(1U)
+
+#define S_QSFP0_INT_L 1
+#define V_QSFP0_INT_L(x) ((x) << S_QSFP0_INT_L)
+#define F_QSFP0_INT_L V_QSFP0_INT_L(1U)
+
+#define S_QSFP0_MOD_PRES 0
+#define V_QSFP0_MOD_PRES(x) ((x) << S_QSFP0_MOD_PRES)
+#define F_QSFP0_MOD_PRES V_QSFP0_MOD_PRES(1U)
+
+#define A_MAC_FPGA_CONTROL_TO_BOARD 0x381f8
+
+#define S_T7_1_LB_MODE 10
+#define M_T7_1_LB_MODE 0x3U
+#define V_T7_1_LB_MODE(x) ((x) << S_T7_1_LB_MODE)
+#define G_T7_1_LB_MODE(x) (((x) >> S_T7_1_LB_MODE) & M_T7_1_LB_MODE)
+
+#define S_SFP3_TX_DISABLE 9
+#define V_SFP3_TX_DISABLE(x) ((x) << S_SFP3_TX_DISABLE)
+#define F_SFP3_TX_DISABLE V_SFP3_TX_DISABLE(1U)
+
+#define S_SFP2_TX_DISABLE 8
+#define V_SFP2_TX_DISABLE(x) ((x) << S_SFP2_TX_DISABLE)
+#define F_SFP2_TX_DISABLE V_SFP2_TX_DISABLE(1U)
+
+#define S_SFP1_TX_DISABLE 7
+#define V_SFP1_TX_DISABLE(x) ((x) << S_SFP1_TX_DISABLE)
+#define F_SFP1_TX_DISABLE V_SFP1_TX_DISABLE(1U)
+
+#define S_SFP0_TX_DISABLE 6
+#define V_SFP0_TX_DISABLE(x) ((x) << S_SFP0_TX_DISABLE)
+#define F_SFP0_TX_DISABLE V_SFP0_TX_DISABLE(1U)
+
+#define S_QSFP1_LPMODE 5
+#define V_QSFP1_LPMODE(x) ((x) << S_QSFP1_LPMODE)
+#define F_QSFP1_LPMODE V_QSFP1_LPMODE(1U)
+
+#define S_QSFP1_MODSEL_L 4
+#define V_QSFP1_MODSEL_L(x) ((x) << S_QSFP1_MODSEL_L)
+#define F_QSFP1_MODSEL_L V_QSFP1_MODSEL_L(1U)
+
+#define S_QSFP1_RESET_L 3
+#define V_QSFP1_RESET_L(x) ((x) << S_QSFP1_RESET_L)
+#define F_QSFP1_RESET_L V_QSFP1_RESET_L(1U)
+
+#define S_QSFP0_LPMODE 2
+#define V_QSFP0_LPMODE(x) ((x) << S_QSFP0_LPMODE)
+#define F_QSFP0_LPMODE V_QSFP0_LPMODE(1U)
+
+#define S_QSFP0_MODSEL_L 1
+#define V_QSFP0_MODSEL_L(x) ((x) << S_QSFP0_MODSEL_L)
+#define F_QSFP0_MODSEL_L V_QSFP0_MODSEL_L(1U)
+
+#define S_QSFP0_RESET_L 0
+#define V_QSFP0_RESET_L(x) ((x) << S_QSFP0_RESET_L)
+#define F_QSFP0_RESET_L V_QSFP0_RESET_L(1U)
+
+#define A_MAC_FPGA_LINK_STATUS 0x381fc
+
+#define S_PORT3_FPGA_LINK_UP 3
+#define V_PORT3_FPGA_LINK_UP(x) ((x) << S_PORT3_FPGA_LINK_UP)
+#define F_PORT3_FPGA_LINK_UP V_PORT3_FPGA_LINK_UP(1U)
+
+#define S_PORT2_FPGA_LINK_UP 2
+#define V_PORT2_FPGA_LINK_UP(x) ((x) << S_PORT2_FPGA_LINK_UP)
+#define F_PORT2_FPGA_LINK_UP V_PORT2_FPGA_LINK_UP(1U)
+
+#define S_PORT1_FPGA_LINK_UP 1
+#define V_PORT1_FPGA_LINK_UP(x) ((x) << S_PORT1_FPGA_LINK_UP)
+#define F_PORT1_FPGA_LINK_UP V_PORT1_FPGA_LINK_UP(1U)
+
+#define S_PORT0_FPGA_LINK_UP 0
+#define V_PORT0_FPGA_LINK_UP(x) ((x) << S_PORT0_FPGA_LINK_UP)
+#define F_PORT0_FPGA_LINK_UP V_PORT0_FPGA_LINK_UP(1U)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_REVISION 0x38200
+
+#define S_MTIP_REV_400G_0 0
+#define M_MTIP_REV_400G_0 0xffU
+#define V_MTIP_REV_400G_0(x) ((x) << S_MTIP_REV_400G_0)
+#define G_MTIP_REV_400G_0(x) (((x) >> S_MTIP_REV_400G_0) & M_MTIP_REV_400G_0)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_SCRATCH 0x38204
+#define A_MAC_MTIP_MAC400G_0_MTIP_COMMAND_CONFIG 0x38208
+
+#define S_INV_LOOP 31
+#define V_INV_LOOP(x) ((x) << S_INV_LOOP)
+#define F_INV_LOOP V_INV_LOOP(1U)
+
+#define S_TX_FLUSH_ENABLE_400G_0 22
+#define V_TX_FLUSH_ENABLE_400G_0(x) ((x) << S_TX_FLUSH_ENABLE_400G_0)
+#define F_TX_FLUSH_ENABLE_400G_0 V_TX_FLUSH_ENABLE_400G_0(1U)
+
+#define S_PHY_LOOPBACK_EN_400G 10
+#define V_PHY_LOOPBACK_EN_400G(x) ((x) << S_PHY_LOOPBACK_EN_400G)
+#define F_PHY_LOOPBACK_EN_400G V_PHY_LOOPBACK_EN_400G(1U)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_ADDR_0 0x3820c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_ADDR_1 0x38210
+#define A_MAC_MTIP_MAC400G_0_MTIP_FRM_LENGTH 0x38214
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_SECTIONS 0x3821c
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_SECTIONS 0x38220
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_ALMOST_F_E 0x38224
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_ALMOST_F_E 0x38228
+#define A_MAC_MTIP_MAC400G_0_MTIP_HASHTABLE_LOAD 0x3822c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_STATUS 0x38240
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_IPG_LENGTH 0x38244
+
+#define S_T7_IPG 19
+#define M_T7_IPG 0x1fffU
+#define V_T7_IPG(x) ((x) << S_T7_IPG)
+#define G_T7_IPG(x) (((x) >> S_T7_IPG) & M_T7_IPG)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL01_PAUSE_QUANTA 0x38254
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL23_PAUSE_QUANTA 0x38258
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL45_PAUSE_QUANTA 0x3825c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL67_PAUSE_QUANTA 0x38260
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL01_PAUSE_QUANTA_THRESH 0x38264
+
+#define S_CL1_PAUSE_QUANTA_THRESH 16
+#define M_CL1_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL1_PAUSE_QUANTA_THRESH(x) ((x) << S_CL1_PAUSE_QUANTA_THRESH)
+#define G_CL1_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL1_PAUSE_QUANTA_THRESH) & M_CL1_PAUSE_QUANTA_THRESH)
+
+#define S_CL0_PAUSE_QUANTA_THRESH 0
+#define M_CL0_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL0_PAUSE_QUANTA_THRESH(x) ((x) << S_CL0_PAUSE_QUANTA_THRESH)
+#define G_CL0_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL0_PAUSE_QUANTA_THRESH) & M_CL0_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL23_PAUSE_QUANTA_THRESH 0x38268
+
+#define S_CL3_PAUSE_QUANTA_THRESH 16
+#define M_CL3_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL3_PAUSE_QUANTA_THRESH(x) ((x) << S_CL3_PAUSE_QUANTA_THRESH)
+#define G_CL3_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL3_PAUSE_QUANTA_THRESH) & M_CL3_PAUSE_QUANTA_THRESH)
+
+#define S_CL2_PAUSE_QUANTA_THRESH 0
+#define M_CL2_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL2_PAUSE_QUANTA_THRESH(x) ((x) << S_CL2_PAUSE_QUANTA_THRESH)
+#define G_CL2_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL2_PAUSE_QUANTA_THRESH) & M_CL2_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL45_PAUSE_QUANTA_THRESH 0x3826c
+
+#define S_CL5_PAUSE_QUANTA_THRESH 16
+#define M_CL5_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL5_PAUSE_QUANTA_THRESH(x) ((x) << S_CL5_PAUSE_QUANTA_THRESH)
+#define G_CL5_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL5_PAUSE_QUANTA_THRESH) & M_CL5_PAUSE_QUANTA_THRESH)
+
+#define S_CL4_PAUSE_QUANTA_THRESH 0
+#define M_CL4_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL4_PAUSE_QUANTA_THRESH(x) ((x) << S_CL4_PAUSE_QUANTA_THRESH)
+#define G_CL4_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL4_PAUSE_QUANTA_THRESH) & M_CL4_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL67_PAUSE_QUANTA_THRESH 0x38270
+
+#define S_CL7_PAUSE_QUANTA_THRESH 16
+#define M_CL7_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL7_PAUSE_QUANTA_THRESH(x) ((x) << S_CL7_PAUSE_QUANTA_THRESH)
+#define G_CL7_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL7_PAUSE_QUANTA_THRESH) & M_CL7_PAUSE_QUANTA_THRESH)
+
+#define S_CL6_PAUSE_QUANTA_THRESH 0
+#define M_CL6_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL6_PAUSE_QUANTA_THRESH(x) ((x) << S_CL6_PAUSE_QUANTA_THRESH)
+#define G_CL6_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL6_PAUSE_QUANTA_THRESH) & M_CL6_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_PAUSE_STATUS 0x38274
+
+#define S_RX_PAUSE_STATUS 0
+#define M_RX_PAUSE_STATUS 0xffU
+#define V_RX_PAUSE_STATUS(x) ((x) << S_RX_PAUSE_STATUS)
+#define G_RX_PAUSE_STATUS(x) (((x) >> S_RX_PAUSE_STATUS) & M_RX_PAUSE_STATUS)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_TS_TIMESTAMP 0x3827c
+#define A_MAC_MTIP_MAC400G_0_MTIP_XIF_MODE 0x38280
+#define A_MAC_MTIP_MAC400G_1_MTIP_REVISION 0x38300
+
+#define S_MTIP_REV_400G_1 0
+#define M_MTIP_REV_400G_1 0xffU
+#define V_MTIP_REV_400G_1(x) ((x) << S_MTIP_REV_400G_1)
+#define G_MTIP_REV_400G_1(x) (((x) >> S_MTIP_REV_400G_1) & M_MTIP_REV_400G_1)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_SCRATCH 0x38304
+#define A_MAC_MTIP_MAC400G_1_MTIP_COMMAND_CONFIG 0x38308
+
+#define S_TX_FLUSH_ENABLE_400G_1 22
+#define V_TX_FLUSH_ENABLE_400G_1(x) ((x) << S_TX_FLUSH_ENABLE_400G_1)
+#define F_TX_FLUSH_ENABLE_400G_1 V_TX_FLUSH_ENABLE_400G_1(1U)
+
+#define S_PHY_LOOPBACK_EN_400G_1 10
+#define V_PHY_LOOPBACK_EN_400G_1(x) ((x) << S_PHY_LOOPBACK_EN_400G_1)
+#define F_PHY_LOOPBACK_EN_400G_1 V_PHY_LOOPBACK_EN_400G_1(1U)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_ADDR_0 0x3830c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_ADDR_1 0x38310
+#define A_MAC_MTIP_MAC400G_1_MTIP_FRM_LENGTH 0x38314
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_FIFO_SECTIONS 0x3831c
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_FIFO_SECTIONS 0x38320
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_FIFO_ALMOST_F_E 0x38324
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_FIFO_ALMOST_F_E 0x38328
+#define A_MAC_MTIP_MAC400G_1_MTIP_HASHTABLE_LOAD 0x3832c
+
+#define S_ENABLE_MCAST_RX_400G_1 8
+#define V_ENABLE_MCAST_RX_400G_1(x) ((x) << S_ENABLE_MCAST_RX_400G_1)
+#define F_ENABLE_MCAST_RX_400G_1 V_ENABLE_MCAST_RX_400G_1(1U)
+
+#define S_HASHTABLE_ADDR_400G_1 0
+#define M_HASHTABLE_ADDR_400G_1 0x3fU
+#define V_HASHTABLE_ADDR_400G_1(x) ((x) << S_HASHTABLE_ADDR_400G_1)
+#define G_HASHTABLE_ADDR_400G_1(x) (((x) >> S_HASHTABLE_ADDR_400G_1) & M_HASHTABLE_ADDR_400G_1)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_STATUS 0x38340
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_IPG_LENGTH 0x38344
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL01_PAUSE_QUANTA 0x38354
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL23_PAUSE_QUANTA 0x38358
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL45_PAUSE_QUANTA 0x3835c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL67_PAUSE_QUANTA 0x38360
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL01_PAUSE_QUANTA_THRESH 0x38364
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL23_PAUSE_QUANTA_THRESH 0x38368
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL45_PAUSE_QUANTA_THRESH 0x3836c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL67_PAUSE_QUANTA_THRESH 0x38370
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_PAUSE_STATUS 0x38374
+#define A_MAC_MTIP_MAC400G_1_MTIP_TS_TIMESTAMP 0x3837c
+#define A_MAC_MTIP_MAC400G_1_MTIP_XIF_MODE 0x38380
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CONTROL_1 0x38400
+
+#define S_T7_SPEED_SELECTION 2
+#define V_T7_SPEED_SELECTION(x) ((x) << S_T7_SPEED_SELECTION)
+#define F_T7_SPEED_SELECTION V_T7_SPEED_SELECTION(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_1 0x38404
+
+#define S_400G_RX_LINK_STATUS 2
+#define V_400G_RX_LINK_STATUS(x) ((x) << S_400G_RX_LINK_STATUS)
+#define F_400G_RX_LINK_STATUS V_400G_RX_LINK_STATUS(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICE_ID0 0x38408
+
+#define S_400G_DEVICE_ID0_0 0
+#define M_400G_DEVICE_ID0_0 0xffffU
+#define V_400G_DEVICE_ID0_0(x) ((x) << S_400G_DEVICE_ID0_0)
+#define G_400G_DEVICE_ID0_0(x) (((x) >> S_400G_DEVICE_ID0_0) & M_400G_DEVICE_ID0_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICE_ID1 0x3840c
+
+#define S_400G_DEVICE_ID1_0 0
+#define M_400G_DEVICE_ID1_0 0xffffU
+#define V_400G_DEVICE_ID1_0(x) ((x) << S_400G_DEVICE_ID1_0)
+#define G_400G_DEVICE_ID1_0(x) (((x) >> S_400G_DEVICE_ID1_0) & M_400G_DEVICE_ID1_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_SPEED_ABILITY 0x38410
+
+#define S_400G_CAPABLE_0 9
+#define V_400G_CAPABLE_0(x) ((x) << S_400G_CAPABLE_0)
+#define F_400G_CAPABLE_0 V_400G_CAPABLE_0(1U)
+
+#define S_200G_CAPABLE_0 8
+#define V_200G_CAPABLE_0(x) ((x) << S_200G_CAPABLE_0)
+#define F_200G_CAPABLE_0 V_200G_CAPABLE_0(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICES_IN_PKG1 0x38414
+
+#define S_DEVICE_PACKAGE 3
+#define V_DEVICE_PACKAGE(x) ((x) << S_DEVICE_PACKAGE)
+#define F_DEVICE_PACKAGE V_DEVICE_PACKAGE(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICES_IN_PKG2 0x38418
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CONTROL_2 0x3841c
+
+#define S_400G_PCS_TYPE_SELECTION_0 0
+#define M_400G_PCS_TYPE_SELECTION_0 0xfU
+#define V_400G_PCS_TYPE_SELECTION_0(x) ((x) << S_400G_PCS_TYPE_SELECTION_0)
+#define G_400G_PCS_TYPE_SELECTION_0(x) (((x) >> S_400G_PCS_TYPE_SELECTION_0) & M_400G_PCS_TYPE_SELECTION_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_2 0x38420
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_3 0x38424
+
+#define S_T7_DEVICE_PRESENT 2
+#define M_T7_DEVICE_PRESENT 0x3fffU
+#define V_T7_DEVICE_PRESENT(x) ((x) << S_T7_DEVICE_PRESENT)
+#define G_T7_DEVICE_PRESENT(x) (((x) >> S_T7_DEVICE_PRESENT) & M_T7_DEVICE_PRESENT)
+
+#define S_400GBASE_R 1
+#define V_400GBASE_R(x) ((x) << S_400GBASE_R)
+#define F_400GBASE_R V_400GBASE_R(1U)
+
+#define S_200GBASE_R 0
+#define V_200GBASE_R(x) ((x) << S_200GBASE_R)
+#define F_200GBASE_R V_200GBASE_R(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_PKG_ID0 0x38438
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_PKG_ID1 0x3843c
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_STATUS_1 0x38480
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_STATUS_2 0x38484
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_TEST_CONTROL 0x384a8
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_TEST_ERR_CNT 0x384ac
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BER_HIGH_ORDER_CNT 0x384b0
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_ERR_BLK_HIGH_ORDER_CNT 0x384b4
+
+#define S_HIGH_ORDER 15
+#define V_HIGH_ORDER(x) ((x) << S_HIGH_ORDER)
+#define F_HIGH_ORDER V_HIGH_ORDER(1U)
+
+#define S_ERROR_BLOCK_COUNTER 0
+#define M_ERROR_BLOCK_COUNTER 0x3fffU
+#define V_ERROR_BLOCK_COUNTER(x) ((x) << S_ERROR_BLOCK_COUNTER)
+#define G_ERROR_BLOCK_COUNTER(x) (((x) >> S_ERROR_BLOCK_COUNTER) & M_ERROR_BLOCK_COUNTER)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_1 0x384c8
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_2 0x384cc
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_3 0x384d0
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_4 0x384d4
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_0_MAPPING 0x384d8
+
+#define S_T7_LANE_0_MAPPING 0
+#define M_T7_LANE_0_MAPPING 0xfU
+#define V_T7_LANE_0_MAPPING(x) ((x) << S_T7_LANE_0_MAPPING)
+#define G_T7_LANE_0_MAPPING(x) (((x) >> S_T7_LANE_0_MAPPING) & M_T7_LANE_0_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_1_MAPPING 0x384dc
+
+#define S_T7_LANE_1_MAPPING 0
+#define M_T7_LANE_1_MAPPING 0xfU
+#define V_T7_LANE_1_MAPPING(x) ((x) << S_T7_LANE_1_MAPPING)
+#define G_T7_LANE_1_MAPPING(x) (((x) >> S_T7_LANE_1_MAPPING) & M_T7_LANE_1_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_2_MAPPING 0x384e0
+
+#define S_T7_LANE_2_MAPPING 0
+#define M_T7_LANE_2_MAPPING 0xfU
+#define V_T7_LANE_2_MAPPING(x) ((x) << S_T7_LANE_2_MAPPING)
+#define G_T7_LANE_2_MAPPING(x) (((x) >> S_T7_LANE_2_MAPPING) & M_T7_LANE_2_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_3_MAPPING 0x384e4
+
+#define S_T7_LANE_3_MAPPING 0
+#define M_T7_LANE_3_MAPPING 0xfU
+#define V_T7_LANE_3_MAPPING(x) ((x) << S_T7_LANE_3_MAPPING)
+#define G_T7_LANE_3_MAPPING(x) (((x) >> S_T7_LANE_3_MAPPING) & M_T7_LANE_3_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_4_MAPPING 0x384e8
+
+#define S_T7_LANE_4_MAPPING 0
+#define M_T7_LANE_4_MAPPING 0xfU
+#define V_T7_LANE_4_MAPPING(x) ((x) << S_T7_LANE_4_MAPPING)
+#define G_T7_LANE_4_MAPPING(x) (((x) >> S_T7_LANE_4_MAPPING) & M_T7_LANE_4_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_5_MAPPING 0x384ec
+
+#define S_T7_LANE_5_MAPPING 0
+#define M_T7_LANE_5_MAPPING 0xfU
+#define V_T7_LANE_5_MAPPING(x) ((x) << S_T7_LANE_5_MAPPING)
+#define G_T7_LANE_5_MAPPING(x) (((x) >> S_T7_LANE_5_MAPPING) & M_T7_LANE_5_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_6_MAPPING 0x384f0
+
+#define S_T7_LANE_6_MAPPING 0
+#define M_T7_LANE_6_MAPPING 0xfU
+#define V_T7_LANE_6_MAPPING(x) ((x) << S_T7_LANE_6_MAPPING)
+#define G_T7_LANE_6_MAPPING(x) (((x) >> S_T7_LANE_6_MAPPING) & M_T7_LANE_6_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_7_MAPPING 0x384f4
+
+#define S_T7_LANE_7_MAPPING 0
+#define M_T7_LANE_7_MAPPING 0xfU
+#define V_T7_LANE_7_MAPPING(x) ((x) << S_T7_LANE_7_MAPPING)
+#define G_T7_LANE_7_MAPPING(x) (((x) >> S_T7_LANE_7_MAPPING) & M_T7_LANE_7_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_8_MAPPING 0x384f8
+
+#define S_T7_LANE_8_MAPPING 0
+#define M_T7_LANE_8_MAPPING 0xfU
+#define V_T7_LANE_8_MAPPING(x) ((x) << S_T7_LANE_8_MAPPING)
+#define G_T7_LANE_8_MAPPING(x) (((x) >> S_T7_LANE_8_MAPPING) & M_T7_LANE_8_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_9_MAPPING 0x384fc
+
+#define S_T7_LANE_9_MAPPING 0
+#define M_T7_LANE_9_MAPPING 0xfU
+#define V_T7_LANE_9_MAPPING(x) ((x) << S_T7_LANE_9_MAPPING)
+#define G_T7_LANE_9_MAPPING(x) (((x) >> S_T7_LANE_9_MAPPING) & M_T7_LANE_9_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_10_MAPPING 0x38500
+
+#define S_T7_LANE_10_MAPPING 0
+#define M_T7_LANE_10_MAPPING 0xfU
+#define V_T7_LANE_10_MAPPING(x) ((x) << S_T7_LANE_10_MAPPING)
+#define G_T7_LANE_10_MAPPING(x) (((x) >> S_T7_LANE_10_MAPPING) & M_T7_LANE_10_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_11_MAPPING 0x38504
+
+#define S_T7_LANE_11_MAPPING 0
+#define M_T7_LANE_11_MAPPING 0xfU
+#define V_T7_LANE_11_MAPPING(x) ((x) << S_T7_LANE_11_MAPPING)
+#define G_T7_LANE_11_MAPPING(x) (((x) >> S_T7_LANE_11_MAPPING) & M_T7_LANE_11_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_12_MAPPING 0x38508
+
+#define S_T7_LANE_12_MAPPING 0
+#define M_T7_LANE_12_MAPPING 0xfU
+#define V_T7_LANE_12_MAPPING(x) ((x) << S_T7_LANE_12_MAPPING)
+#define G_T7_LANE_12_MAPPING(x) (((x) >> S_T7_LANE_12_MAPPING) & M_T7_LANE_12_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_13_MAPPING 0x3850c
+
+#define S_T7_LANE_13_MAPPING 0
+#define M_T7_LANE_13_MAPPING 0xfU
+#define V_T7_LANE_13_MAPPING(x) ((x) << S_T7_LANE_13_MAPPING)
+#define G_T7_LANE_13_MAPPING(x) (((x) >> S_T7_LANE_13_MAPPING) & M_T7_LANE_13_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_14_MAPPING 0x38510
+
+#define S_T7_LANE_14_MAPPING 0
+#define M_T7_LANE_14_MAPPING 0xfU
+#define V_T7_LANE_14_MAPPING(x) ((x) << S_T7_LANE_14_MAPPING)
+#define G_T7_LANE_14_MAPPING(x) (((x) >> S_T7_LANE_14_MAPPING) & M_T7_LANE_14_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_15_MAPPING 0x38514
+
+#define S_T7_LANE_15_MAPPING 0
+#define M_T7_LANE_15_MAPPING 0xfU
+#define V_T7_LANE_15_MAPPING(x) ((x) << S_T7_LANE_15_MAPPING)
+#define G_T7_LANE_15_MAPPING(x) (((x) >> S_T7_LANE_15_MAPPING) & M_T7_LANE_15_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_SCRATCH 0x38600
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CORE_REVISION 0x38604
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CL_INTVL 0x38608
+
+#define S_T7_VL_INTVL 0
+#define M_T7_VL_INTVL 0xffffU
+#define V_T7_VL_INTVL(x) ((x) << S_T7_VL_INTVL)
+#define G_T7_VL_INTVL(x) (((x) >> S_T7_VL_INTVL) & M_T7_VL_INTVL)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_TX_LANE_THRESH 0x3860c
+
+#define S_TX_LANE_THRESH 0
+#define M_TX_LANE_THRESH 0xfU
+#define V_TX_LANE_THRESH(x) ((x) << S_TX_LANE_THRESH)
+#define G_TX_LANE_THRESH(x) (((x) >> S_TX_LANE_THRESH) & M_TX_LANE_THRESH)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_TX_CDMII_PACE 0x3861c
+
+#define S_TX_CDMII_PACE 0
+#define M_TX_CDMII_PACE 0xfU
+#define V_TX_CDMII_PACE(x) ((x) << S_TX_CDMII_PACE)
+#define G_TX_CDMII_PACE(x) (((x) >> S_TX_CDMII_PACE) & M_TX_CDMII_PACE)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_AM_0 0x38620
+
+#define S_AM_0 0
+#define M_AM_0 0xffffU
+#define V_AM_0(x) ((x) << S_AM_0)
+#define G_AM_0(x) (((x) >> S_AM_0) & M_AM_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_AM_1 0x38624
+
+#define S_AM_1 0
+#define M_AM_1 0xffffU
+#define V_AM_1(x) ((x) << S_AM_1)
+#define G_AM_1(x) (((x) >> S_AM_1) & M_AM_1)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO0 0x38800
+
+#define S_DBGINFO0 0
+#define M_DBGINFO0 0xffffU
+#define V_DBGINFO0(x) ((x) << S_DBGINFO0)
+#define G_DBGINFO0(x) (((x) >> S_DBGINFO0) & M_DBGINFO0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO1 0x38804
+
+#define S_DBGINFO1 0
+#define M_DBGINFO1 0xffffU
+#define V_DBGINFO1(x) ((x) << S_DBGINFO1)
+#define G_DBGINFO1(x) (((x) >> S_DBGINFO1) & M_DBGINFO1)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO2 0x38808
+
+#define S_DBGINFO2 0
+#define M_DBGINFO2 0xffffU
+#define V_DBGINFO2(x) ((x) << S_DBGINFO2)
+#define G_DBGINFO2(x) (((x) >> S_DBGINFO2) & M_DBGINFO2)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO3 0x3880c
+
+#define S_DBGINFO3 0
+#define M_DBGINFO3 0xffffU
+#define V_DBGINFO3(x) ((x) << S_DBGINFO3)
+#define G_DBGINFO3(x) (((x) >> S_DBGINFO3) & M_DBGINFO3)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CONTROL_1 0x38900
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_1 0x38904
+
+#define S_400G_RX_LINK_STATUS_1 2
+#define V_400G_RX_LINK_STATUS_1(x) ((x) << S_400G_RX_LINK_STATUS_1)
+#define F_400G_RX_LINK_STATUS_1 V_400G_RX_LINK_STATUS_1(1U)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICE_ID0 0x38908
+
+#define S_400G_DEVICE_ID0_1 0
+#define M_400G_DEVICE_ID0_1 0xffffU
+#define V_400G_DEVICE_ID0_1(x) ((x) << S_400G_DEVICE_ID0_1)
+#define G_400G_DEVICE_ID0_1(x) (((x) >> S_400G_DEVICE_ID0_1) & M_400G_DEVICE_ID0_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICE_ID1 0x3890c
+
+#define S_400G_DEVICE_ID1_1 0
+#define M_400G_DEVICE_ID1_1 0xffffU
+#define V_400G_DEVICE_ID1_1(x) ((x) << S_400G_DEVICE_ID1_1)
+#define G_400G_DEVICE_ID1_1(x) (((x) >> S_400G_DEVICE_ID1_1) & M_400G_DEVICE_ID1_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_SPEED_ABILITY 0x38910
+
+#define S_400G_CAPABLE_1 9
+#define V_400G_CAPABLE_1(x) ((x) << S_400G_CAPABLE_1)
+#define F_400G_CAPABLE_1 V_400G_CAPABLE_1(1U)
+
+#define S_200G_CAPABLE_1 8
+#define V_200G_CAPABLE_1(x) ((x) << S_200G_CAPABLE_1)
+#define F_200G_CAPABLE_1 V_200G_CAPABLE_1(1U)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICES_IN_PKG1 0x38914
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICES_IN_PKG2 0x38918
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CONTROL_2 0x3891c
+
+#define S_400G_PCS_TYPE_SELECTION_1 0
+#define M_400G_PCS_TYPE_SELECTION_1 0xfU
+#define V_400G_PCS_TYPE_SELECTION_1(x) ((x) << S_400G_PCS_TYPE_SELECTION_1)
+#define G_400G_PCS_TYPE_SELECTION_1(x) (((x) >> S_400G_PCS_TYPE_SELECTION_1) & M_400G_PCS_TYPE_SELECTION_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_2 0x38920
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_3 0x38924
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_PKG_ID0 0x38938
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_PKG_ID1 0x3893c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_STATUS_1 0x38980
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_STATUS_2 0x38984
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_TEST_CONTROL 0x389a8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_TEST_ERR_CNT 0x389ac
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BER_HIGH_ORDER_CNT 0x389b0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_ERR_BLK_HIGH_ORDER_CNT 0x389b4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_1 0x389c8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_2 0x389cc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_3 0x389d0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_4 0x389d4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_0_MAPPING 0x389d8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_1_MAPPING 0x389dc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_2_MAPPING 0x389e0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_3_MAPPING 0x389e4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_4_MAPPING 0x389e8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_5_MAPPING 0x389ec
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_6_MAPPING 0x389f0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_7_MAPPING 0x389f4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_8_MAPPING 0x389f8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_9_MAPPING 0x389fc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_10_MAPPING 0x38a00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_11_MAPPING 0x38a04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_12_MAPPING 0x38a08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_13_MAPPING 0x38a0c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_14_MAPPING 0x38a10
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_15_MAPPING 0x38a14
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_SCRATCH 0x38b00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CORE_REVISION 0x38b04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CL_INTVL 0x38b08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_TX_LANE_THRESH 0x38b0c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_TX_CDMII_PACE 0x38b1c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_AM_0 0x38b20
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_AM_1 0x38b24
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO0 0x38d00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO1 0x38d04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO2 0x38d08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO3 0x38d0c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_0 0x38e00
+
+#define S_TC_PAD_ALTER 10
+#define V_TC_PAD_ALTER(x) ((x) << S_TC_PAD_ALTER)
+#define F_TC_PAD_ALTER V_TC_PAD_ALTER(1U)
+
+#define S_TC_PAD_VALUE 9
+#define V_TC_PAD_VALUE(x) ((x) << S_TC_PAD_VALUE)
+#define F_TC_PAD_VALUE V_TC_PAD_VALUE(1U)
+
+#define S_KP_ENABLE 8
+#define V_KP_ENABLE(x) ((x) << S_KP_ENABLE)
+#define F_KP_ENABLE V_KP_ENABLE(1U)
+
+#define S_AM16_COPY_DIS 3
+#define V_AM16_COPY_DIS(x) ((x) << S_AM16_COPY_DIS)
+#define F_AM16_COPY_DIS V_AM16_COPY_DIS(1U)
+
+#define S_RS_FEC_DEGRADE_OPTION_ENA 2
+#define V_RS_FEC_DEGRADE_OPTION_ENA(x) ((x) << S_RS_FEC_DEGRADE_OPTION_ENA)
+#define F_RS_FEC_DEGRADE_OPTION_ENA V_RS_FEC_DEGRADE_OPTION_ENA(1U)
+
+#define A_MAC_MTIP_RS_FEC_STATUS_0_0 0x38e04
+
+#define S_FEC_STATUS_0_14 14
+#define V_FEC_STATUS_0_14(x) ((x) << S_FEC_STATUS_0_14)
+#define F_FEC_STATUS_0_14 V_FEC_STATUS_0_14(1U)
+
+#define S_FEC_STATUS_0_11 8
+#define M_FEC_STATUS_0_11 0xfU
+#define V_FEC_STATUS_0_11(x) ((x) << S_FEC_STATUS_0_11)
+#define G_FEC_STATUS_0_11(x) (((x) >> S_FEC_STATUS_0_11) & M_FEC_STATUS_0_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_0 V_RS_FEC_DEGRADE_SER_RECEIVED0_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_1 V_RS_FEC_DEGRADE_SER_RECEIVED0_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_2 V_RS_FEC_DEGRADE_SER_RECEIVED0_2(1U)
+
+#define S_FEC_STATUS_0_4 4
+#define V_FEC_STATUS_0_4(x) ((x) << S_FEC_STATUS_0_4)
+#define F_FEC_STATUS_0_4 V_FEC_STATUS_0_4(1U)
+
+#define S_FEC_STATUS_0_3 3
+#define V_FEC_STATUS_0_3(x) ((x) << S_FEC_STATUS_0_3)
+#define F_FEC_STATUS_0_3 V_FEC_STATUS_0_3(1U)
+
+#define S_FEC_STATUS_0_2 2
+#define V_FEC_STATUS_0_2(x) ((x) << S_FEC_STATUS_0_2)
+#define F_FEC_STATUS_0_2 V_FEC_STATUS_0_2(1U)
+
+#define S_FEC_STATUS_0_1 1
+#define V_FEC_STATUS_0_1(x) ((x) << S_FEC_STATUS_0_1)
+#define F_FEC_STATUS_0_1 V_FEC_STATUS_0_1(1U)
+
+#define S_FEC_STATUS_0_0 0
+#define V_FEC_STATUS_0_0(x) ((x) << S_FEC_STATUS_0_0)
+#define F_FEC_STATUS_0_0 V_FEC_STATUS_0_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_0 0x38e08
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_0 0x38e0c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_0 0x38e10
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_0 0x38e14
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_0 0x38e18
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_0 0x38e1c
+
+#define S_DEC_TRESH 0
+#define M_DEC_TRESH 0x3fU
+#define V_DEC_TRESH(x) ((x) << S_DEC_TRESH)
+#define G_DEC_TRESH(x) (((x) >> S_DEC_TRESH) & M_DEC_TRESH)
+
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_1 0x38e20
+#define A_MAC_MTIP_RS_FEC_STATUS_0_1 0x38e24
+
+#define S_FEC_STATUS_1_14 14
+#define V_FEC_STATUS_1_14(x) ((x) << S_FEC_STATUS_1_14)
+#define F_FEC_STATUS_1_14 V_FEC_STATUS_1_14(1U)
+
+#define S_FEC_STATUS_1_11 8
+#define M_FEC_STATUS_1_11 0xfU
+#define V_FEC_STATUS_1_11(x) ((x) << S_FEC_STATUS_1_11)
+#define G_FEC_STATUS_1_11(x) (((x) >> S_FEC_STATUS_1_11) & M_FEC_STATUS_1_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_0 V_RS_FEC_DEGRADE_SER_RECEIVED1_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_1 V_RS_FEC_DEGRADE_SER_RECEIVED1_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_2 V_RS_FEC_DEGRADE_SER_RECEIVED1_2(1U)
+
+#define S_FEC_STATUS_1_4 4
+#define V_FEC_STATUS_1_4(x) ((x) << S_FEC_STATUS_1_4)
+#define F_FEC_STATUS_1_4 V_FEC_STATUS_1_4(1U)
+
+#define S_FEC_STATUS_1_3 3
+#define V_FEC_STATUS_1_3(x) ((x) << S_FEC_STATUS_1_3)
+#define F_FEC_STATUS_1_3 V_FEC_STATUS_1_3(1U)
+
+#define S_FEC_STATUS_1_2 2
+#define V_FEC_STATUS_1_2(x) ((x) << S_FEC_STATUS_1_2)
+#define F_FEC_STATUS_1_2 V_FEC_STATUS_1_2(1U)
+
+#define S_FEC_STATUS_1_1 1
+#define V_FEC_STATUS_1_1(x) ((x) << S_FEC_STATUS_1_1)
+#define F_FEC_STATUS_1_1 V_FEC_STATUS_1_1(1U)
+
+#define S_FEC_STATUS_1_0 0
+#define V_FEC_STATUS_1_0(x) ((x) << S_FEC_STATUS_1_0)
+#define F_FEC_STATUS_1_0 V_FEC_STATUS_1_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_1 0x38e28
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_1 0x38e2c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_1 0x38e30
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_1 0x38e34
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_1 0x38e38
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_1 0x38e3c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_2 0x38e40
+#define A_MAC_MTIP_RS_FEC_STATUS_0_2 0x38e44
+
+#define S_FEC_STATUS_2_14 14
+#define V_FEC_STATUS_2_14(x) ((x) << S_FEC_STATUS_2_14)
+#define F_FEC_STATUS_2_14 V_FEC_STATUS_2_14(1U)
+
+#define S_FEC_STATUS_2_11 8
+#define M_FEC_STATUS_2_11 0xfU
+#define V_FEC_STATUS_2_11(x) ((x) << S_FEC_STATUS_2_11)
+#define G_FEC_STATUS_2_11(x) (((x) >> S_FEC_STATUS_2_11) & M_FEC_STATUS_2_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_0 V_RS_FEC_DEGRADE_SER_RECEIVED2_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_1 V_RS_FEC_DEGRADE_SER_RECEIVED2_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_2 V_RS_FEC_DEGRADE_SER_RECEIVED2_2(1U)
+
+#define S_FEC_STATUS_2_4 4
+#define V_FEC_STATUS_2_4(x) ((x) << S_FEC_STATUS_2_4)
+#define F_FEC_STATUS_2_4 V_FEC_STATUS_2_4(1U)
+
+#define S_FEC_STATUS_2_3 3
+#define V_FEC_STATUS_2_3(x) ((x) << S_FEC_STATUS_2_3)
+#define F_FEC_STATUS_2_3 V_FEC_STATUS_2_3(1U)
+
+#define S_FEC_STATUS_2_2 2
+#define V_FEC_STATUS_2_2(x) ((x) << S_FEC_STATUS_2_2)
+#define F_FEC_STATUS_2_2 V_FEC_STATUS_2_2(1U)
+
+#define S_FEC_STATUS_2_1 1
+#define V_FEC_STATUS_2_1(x) ((x) << S_FEC_STATUS_2_1)
+#define F_FEC_STATUS_2_1 V_FEC_STATUS_2_1(1U)
+
+#define S_FEC_STATUS_2_0 0
+#define V_FEC_STATUS_2_0(x) ((x) << S_FEC_STATUS_2_0)
+#define F_FEC_STATUS_2_0 V_FEC_STATUS_2_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_2 0x38e48
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_2 0x38e4c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_2 0x38e50
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_2 0x38e54
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_2 0x38e58
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_2 0x38e5c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_3 0x38e60
+#define A_MAC_MTIP_RS_FEC_STATUS_0_3 0x38e64
+
+#define S_FEC_STATUS_3_14 14
+#define V_FEC_STATUS_3_14(x) ((x) << S_FEC_STATUS_3_14)
+#define F_FEC_STATUS_3_14 V_FEC_STATUS_3_14(1U)
+
+#define S_FEC_STATUS_3_11 8
+#define M_FEC_STATUS_3_11 0xfU
+#define V_FEC_STATUS_3_11(x) ((x) << S_FEC_STATUS_3_11)
+#define G_FEC_STATUS_3_11(x) (((x) >> S_FEC_STATUS_3_11) & M_FEC_STATUS_3_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_0 V_RS_FEC_DEGRADE_SER_RECEIVED3_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_1 V_RS_FEC_DEGRADE_SER_RECEIVED3_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_2 V_RS_FEC_DEGRADE_SER_RECEIVED3_2(1U)
+
+#define S_FEC_STATUS_3_4 4
+#define V_FEC_STATUS_3_4(x) ((x) << S_FEC_STATUS_3_4)
+#define F_FEC_STATUS_3_4 V_FEC_STATUS_3_4(1U)
+
+#define S_FEC_STATUS_3_3 3
+#define V_FEC_STATUS_3_3(x) ((x) << S_FEC_STATUS_3_3)
+#define F_FEC_STATUS_3_3 V_FEC_STATUS_3_3(1U)
+
+#define S_FEC_STATUS_3_2 2
+#define V_FEC_STATUS_3_2(x) ((x) << S_FEC_STATUS_3_2)
+#define F_FEC_STATUS_3_2 V_FEC_STATUS_3_2(1U)
+
+#define S_FEC_STATUS_3_1 1
+#define V_FEC_STATUS_3_1(x) ((x) << S_FEC_STATUS_3_1)
+#define F_FEC_STATUS_3_1 V_FEC_STATUS_3_1(1U)
+
+#define S_FEC_STATUS_3_0 0
+#define V_FEC_STATUS_3_0(x) ((x) << S_FEC_STATUS_3_0)
+#define F_FEC_STATUS_3_0 V_FEC_STATUS_3_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_3 0x38e68
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_3 0x38e6c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_3 0x38e70
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_3 0x38e74
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_3 0x38e78
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_3 0x38e7c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_4 0x38e80
+#define A_MAC_MTIP_RS_FEC_STATUS_0_4 0x38e84
+
+#define S_FEC_STATUS_4_14 14
+#define V_FEC_STATUS_4_14(x) ((x) << S_FEC_STATUS_4_14)
+#define F_FEC_STATUS_4_14 V_FEC_STATUS_4_14(1U)
+
+#define S_FEC_STATUS_4_11 8
+#define M_FEC_STATUS_4_11 0xfU
+#define V_FEC_STATUS_4_11(x) ((x) << S_FEC_STATUS_4_11)
+#define G_FEC_STATUS_4_11(x) (((x) >> S_FEC_STATUS_4_11) & M_FEC_STATUS_4_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_0 V_RS_FEC_DEGRADE_SER_RECEIVED4_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_1 V_RS_FEC_DEGRADE_SER_RECEIVED4_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_2 V_RS_FEC_DEGRADE_SER_RECEIVED4_2(1U)
+
+#define S_FEC_STATUS_4_4 4
+#define V_FEC_STATUS_4_4(x) ((x) << S_FEC_STATUS_4_4)
+#define F_FEC_STATUS_4_4 V_FEC_STATUS_4_4(1U)
+
+#define S_FEC_STATUS_4_3 3
+#define V_FEC_STATUS_4_3(x) ((x) << S_FEC_STATUS_4_3)
+#define F_FEC_STATUS_4_3 V_FEC_STATUS_4_3(1U)
+
+#define S_FEC_STATUS_4_2 2
+#define V_FEC_STATUS_4_2(x) ((x) << S_FEC_STATUS_4_2)
+#define F_FEC_STATUS_4_2 V_FEC_STATUS_4_2(1U)
+
+#define S_FEC_STATUS_4_1 1
+#define V_FEC_STATUS_4_1(x) ((x) << S_FEC_STATUS_4_1)
+#define F_FEC_STATUS_4_1 V_FEC_STATUS_4_1(1U)
+
+#define S_FEC_STATUS_4_0 0
+#define V_FEC_STATUS_4_0(x) ((x) << S_FEC_STATUS_4_0)
+#define F_FEC_STATUS_4_0 V_FEC_STATUS_4_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_4 0x38e88
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_4 0x38e8c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_4 0x38e90
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_4 0x38e94
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_4 0x38e98
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_4 0x38e9c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_5 0x38ea0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_5 0x38ea4
+
+#define S_FEC_STATUS_5_14 14
+#define V_FEC_STATUS_5_14(x) ((x) << S_FEC_STATUS_5_14)
+#define F_FEC_STATUS_5_14 V_FEC_STATUS_5_14(1U)
+
+#define S_FEC_STATUS_5_11 8
+#define M_FEC_STATUS_5_11 0xfU
+#define V_FEC_STATUS_5_11(x) ((x) << S_FEC_STATUS_5_11)
+#define G_FEC_STATUS_5_11(x) (((x) >> S_FEC_STATUS_5_11) & M_FEC_STATUS_5_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_0 V_RS_FEC_DEGRADE_SER_RECEIVED5_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_1 V_RS_FEC_DEGRADE_SER_RECEIVED5_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_2 V_RS_FEC_DEGRADE_SER_RECEIVED5_2(1U)
+
+#define S_FEC_STATUS_5_4 4
+#define V_FEC_STATUS_5_4(x) ((x) << S_FEC_STATUS_5_4)
+#define F_FEC_STATUS_5_4 V_FEC_STATUS_5_4(1U)
+
+#define S_FEC_STATUS_5_3 3
+#define V_FEC_STATUS_5_3(x) ((x) << S_FEC_STATUS_5_3)
+#define F_FEC_STATUS_5_3 V_FEC_STATUS_5_3(1U)
+
+#define S_FEC_STATUS_5_2 2
+#define V_FEC_STATUS_5_2(x) ((x) << S_FEC_STATUS_5_2)
+#define F_FEC_STATUS_5_2 V_FEC_STATUS_5_2(1U)
+
+#define S_FEC_STATUS_5_1 1
+#define V_FEC_STATUS_5_1(x) ((x) << S_FEC_STATUS_5_1)
+#define F_FEC_STATUS_5_1 V_FEC_STATUS_5_1(1U)
+
+#define S_FEC_STATUS_5_0 0
+#define V_FEC_STATUS_5_0(x) ((x) << S_FEC_STATUS_5_0)
+#define F_FEC_STATUS_5_0 V_FEC_STATUS_5_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_5 0x38ea8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_5 0x38eac
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_5 0x38eb0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_5 0x38eb4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_5 0x38eb8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_5 0x38ebc
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_6 0x38ec0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_6 0x38ec4
+
+#define S_FEC_STATUS_6_14 14
+#define V_FEC_STATUS_6_14(x) ((x) << S_FEC_STATUS_6_14)
+#define F_FEC_STATUS_6_14 V_FEC_STATUS_6_14(1U)
+
+#define S_FEC_STATUS_6_11 8
+#define M_FEC_STATUS_6_11 0xfU
+#define V_FEC_STATUS_6_11(x) ((x) << S_FEC_STATUS_6_11)
+#define G_FEC_STATUS_6_11(x) (((x) >> S_FEC_STATUS_6_11) & M_FEC_STATUS_6_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_0 V_RS_FEC_DEGRADE_SER_RECEIVED6_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_1 V_RS_FEC_DEGRADE_SER_RECEIVED6_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_2 V_RS_FEC_DEGRADE_SER_RECEIVED6_2(1U)
+
+#define S_FEC_STATUS_6_4 4
+#define V_FEC_STATUS_6_4(x) ((x) << S_FEC_STATUS_6_4)
+#define F_FEC_STATUS_6_4 V_FEC_STATUS_6_4(1U)
+
+#define S_FEC_STATUS_6_3 3
+#define V_FEC_STATUS_6_3(x) ((x) << S_FEC_STATUS_6_3)
+#define F_FEC_STATUS_6_3 V_FEC_STATUS_6_3(1U)
+
+#define S_FEC_STATUS_6_2 2
+#define V_FEC_STATUS_6_2(x) ((x) << S_FEC_STATUS_6_2)
+#define F_FEC_STATUS_6_2 V_FEC_STATUS_6_2(1U)
+
+#define S_FEC_STATUS_6_1 1
+#define V_FEC_STATUS_6_1(x) ((x) << S_FEC_STATUS_6_1)
+#define F_FEC_STATUS_6_1 V_FEC_STATUS_6_1(1U)
+
+#define S_FEC_STATUS_6_0 0
+#define V_FEC_STATUS_6_0(x) ((x) << S_FEC_STATUS_6_0)
+#define F_FEC_STATUS_6_0 V_FEC_STATUS_6_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_6 0x38ec8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_6 0x38ecc
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_6 0x38ed0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_6 0x38ed4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_6 0x38ed8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_6 0x38edc
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_7 0x38ee0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_7 0x38ee4
+
+#define S_FEC_STATUS_7_14 14
+#define V_FEC_STATUS_7_14(x) ((x) << S_FEC_STATUS_7_14)
+#define F_FEC_STATUS_7_14 V_FEC_STATUS_7_14(1U)
+
+#define S_FEC_STATUS_7_11 8
+#define M_FEC_STATUS_7_11 0xfU
+#define V_FEC_STATUS_7_11(x) ((x) << S_FEC_STATUS_7_11)
+#define G_FEC_STATUS_7_11(x) (((x) >> S_FEC_STATUS_7_11) & M_FEC_STATUS_7_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_0 V_RS_FEC_DEGRADE_SER_RECEIVED7_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_1 V_RS_FEC_DEGRADE_SER_RECEIVED7_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_2 V_RS_FEC_DEGRADE_SER_RECEIVED7_2(1U)
+
+#define S_FEC_STATUS_7_4 4
+#define V_FEC_STATUS_7_4(x) ((x) << S_FEC_STATUS_7_4)
+#define F_FEC_STATUS_7_4 V_FEC_STATUS_7_4(1U)
+
+#define S_FEC_STATUS_7_3 3
+#define V_FEC_STATUS_7_3(x) ((x) << S_FEC_STATUS_7_3)
+#define F_FEC_STATUS_7_3 V_FEC_STATUS_7_3(1U)
+
+#define S_FEC_STATUS_7_2 2
+#define V_FEC_STATUS_7_2(x) ((x) << S_FEC_STATUS_7_2)
+#define F_FEC_STATUS_7_2 V_FEC_STATUS_7_2(1U)
+
+#define S_FEC_STATUS_7_1 1
+#define V_FEC_STATUS_7_1(x) ((x) << S_FEC_STATUS_7_1)
+#define F_FEC_STATUS_7_1 V_FEC_STATUS_7_1(1U)
+
+#define S_FEC_STATUS_7_0 0
+#define V_FEC_STATUS_7_0(x) ((x) << S_FEC_STATUS_7_0)
+#define F_FEC_STATUS_7_0 V_FEC_STATUS_7_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_7 0x38ee8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_7 0x38eec
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_7 0x38ef0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_7 0x38ef4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_7 0x38ef8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_7 0x38efc
+#define A_MAC_MTIP_RS_FEC_HISER_CW 0x38f00
+
+#define S_HISER_CW 0
+#define M_HISER_CW 0xffffU
+#define V_HISER_CW(x) ((x) << S_HISER_CW)
+#define G_HISER_CW(x) (((x) >> S_HISER_CW) & M_HISER_CW)
+
+#define A_MAC_MTIP_RS_FEC_HISER_THRESH 0x38f04
+
+#define S_HISER_THRESH 0
+#define M_HISER_THRESH 0xffffU
+#define V_HISER_THRESH(x) ((x) << S_HISER_THRESH)
+#define G_HISER_THRESH(x) (((x) >> S_HISER_THRESH) & M_HISER_THRESH)
+
+#define A_MAC_MTIP_RS_FEC_HISER_TIME 0x38f08
+
+#define S_HISER_TIME 0
+#define M_HISER_TIME 0xffffU
+#define V_HISER_TIME(x) ((x) << S_HISER_TIME)
+#define G_HISER_TIME(x) (((x) >> S_HISER_TIME) & M_HISER_TIME)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CW 0x38f10
+
+#define S_DEGRADE_SET_CW 0
+#define M_DEGRADE_SET_CW 0xffffU
+#define V_DEGRADE_SET_CW(x) ((x) << S_DEGRADE_SET_CW)
+#define G_DEGRADE_SET_CW(x) (((x) >> S_DEGRADE_SET_CW) & M_DEGRADE_SET_CW)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CW_HI 0x38f14
+
+#define S_DEGRADE_SET_CW_HI 0
+#define M_DEGRADE_SET_CW_HI 0xffffU
+#define V_DEGRADE_SET_CW_HI(x) ((x) << S_DEGRADE_SET_CW_HI)
+#define G_DEGRADE_SET_CW_HI(x) (((x) >> S_DEGRADE_SET_CW_HI) & M_DEGRADE_SET_CW_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_THRESH 0x38f18
+
+#define S_DEGRADE_SET_THRESH 0
+#define M_DEGRADE_SET_THRESH 0xffffU
+#define V_DEGRADE_SET_THRESH(x) ((x) << S_DEGRADE_SET_THRESH)
+#define G_DEGRADE_SET_THRESH(x) (((x) >> S_DEGRADE_SET_THRESH) & M_DEGRADE_SET_THRESH)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_THRESH_HI 0x38f1c
+
+#define S_DEGRADE_SET_THRESH_HI 0
+#define M_DEGRADE_SET_THRESH_HI 0xffffU
+#define V_DEGRADE_SET_THRESH_HI(x) ((x) << S_DEGRADE_SET_THRESH_HI)
+#define G_DEGRADE_SET_THRESH_HI(x) (((x) >> S_DEGRADE_SET_THRESH_HI) & M_DEGRADE_SET_THRESH_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_CLEAR 0x38f20
+
+#define S_DEGRADE_SET_CLEAR 0
+#define M_DEGRADE_SET_CLEAR 0xffffU
+#define V_DEGRADE_SET_CLEAR(x) ((x) << S_DEGRADE_SET_CLEAR)
+#define G_DEGRADE_SET_CLEAR(x) (((x) >> S_DEGRADE_SET_CLEAR) & M_DEGRADE_SET_CLEAR)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CLEAR_HI 0x38f24
+
+#define S_DEGRADE_SET_CLEAR_HI 0
+#define M_DEGRADE_SET_CLEAR_HI 0xffffU
+#define V_DEGRADE_SET_CLEAR_HI(x) ((x) << S_DEGRADE_SET_CLEAR_HI)
+#define G_DEGRADE_SET_CLEAR_HI(x) (((x) >> S_DEGRADE_SET_CLEAR_HI) & M_DEGRADE_SET_CLEAR_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_CLEAR_THRESH 0x38f28
+
+#define S_DEGRADE_SET_CLEAR_THRESH 0
+#define M_DEGRADE_SET_CLEAR_THRESH 0xffffU
+#define V_DEGRADE_SET_CLEAR_THRESH(x) ((x) << S_DEGRADE_SET_CLEAR_THRESH)
+#define G_DEGRADE_SET_CLEAR_THRESH(x) (((x) >> S_DEGRADE_SET_CLEAR_THRESH) & M_DEGRADE_SET_CLEAR_THRESH)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CLEAR_THRESH_HI 0x38f2c
+
+#define S_DEGRADE_SET_CLEAR_THRESH_HI 0
+#define M_DEGRADE_SET_CLEAR_THRESH_HI 0xffffU
+#define V_DEGRADE_SET_CLEAR_THRESH_HI(x) ((x) << S_DEGRADE_SET_CLEAR_THRESH_HI)
+#define G_DEGRADE_SET_CLEAR_THRESH_HI(x) (((x) >> S_DEGRADE_SET_CLEAR_THRESH_HI) & M_DEGRADE_SET_CLEAR_THRESH_HI)
+
+#define A_MAC_MTIP_RS_VL0_0 0x38f80
+#define A_MAC_MTIP_RS_VL0_1 0x38f84
+#define A_MAC_MTIP_RS_VL1_0 0x38f88
+#define A_MAC_MTIP_RS_VL1_1 0x38f8c
+#define A_MAC_MTIP_RS_VL2_0 0x38f90
+#define A_MAC_MTIP_RS_VL2_1 0x38f94
+#define A_MAC_MTIP_RS_VL3_0 0x38f98
+#define A_MAC_MTIP_RS_VL3_1 0x38f9c
+#define A_MAC_MTIP_RS_VL4_0 0x38fa0
+#define A_MAC_MTIP_RS_VL4_1 0x38fa4
+#define A_MAC_MTIP_RS_VL5_0 0x38fa8
+#define A_MAC_MTIP_RS_VL5_1 0x38fac
+#define A_MAC_MTIP_RS_VL6_0 0x38fb0
+#define A_MAC_MTIP_RS_VL6_1 0x38fb4
+#define A_MAC_MTIP_RS_VL7_0 0x38fb8
+#define A_MAC_MTIP_RS_VL7_1 0x38fbc
+#define A_MAC_MTIP_RS_VL8_0 0x38fc0
+#define A_MAC_MTIP_RS_VL8_1 0x38fc4
+#define A_MAC_MTIP_RS_VL9_0 0x38fc8
+#define A_MAC_MTIP_RS_VL9_1 0x38fcc
+#define A_MAC_MTIP_RS_VL10_0 0x38fd0
+#define A_MAC_MTIP_RS_VL10_1 0x38fd4
+#define A_MAC_MTIP_RS_VL11_0 0x38fd8
+#define A_MAC_MTIP_RS_VL11_1 0x38fdc
+#define A_MAC_MTIP_RS_VL12_0 0x38fe0
+#define A_MAC_MTIP_RS_VL12_1 0x38fe4
+#define A_MAC_MTIP_RS_VL13_0 0x38fe8
+#define A_MAC_MTIP_RS_VL13_1 0x38fec
+#define A_MAC_MTIP_RS_VL14_0 0x38ff0
+#define A_MAC_MTIP_RS_VL14_1 0x38ff4
+#define A_MAC_MTIP_RS_VL15_0 0x38ff8
+#define A_MAC_MTIP_RS_VL15_1 0x38ffc
+#define A_MAC_MTIP_RS_FEC_SYMBLERR0_LO 0x39000
+#define A_MAC_MTIP_RS_FEC_SYMBLERR0_HI 0x39004
+#define A_MAC_MTIP_RS_FEC_SYMBLERR1_LO 0x39008
+#define A_MAC_MTIP_RS_FEC_SYMBLERR1_HI 0x3900c
+#define A_MAC_MTIP_RS_FEC_SYMBLERR2_LO 0x39010
+#define A_MAC_MTIP_RS_FEC_SYMBLERR2_HI 0x39014
+#define A_MAC_MTIP_RS_FEC_SYMBLERR3_LO 0x39018
+#define A_MAC_MTIP_RS_FEC_SYMBLERR3_HI 0x3901c
+#define A_MAC_MTIP_RS_FEC_SYMBLERR4_LO 0x39020
+
+#define S_RS_FEC_SYMBLERR4_LO 0
+#define V_RS_FEC_SYMBLERR4_LO(x) ((x) << S_RS_FEC_SYMBLERR4_LO)
+#define F_RS_FEC_SYMBLERR4_LO V_RS_FEC_SYMBLERR4_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR4_HI 0x39024
+
+#define S_RS_FEC_SYMBLERR4_HI 0
+#define V_RS_FEC_SYMBLERR4_HI(x) ((x) << S_RS_FEC_SYMBLERR4_HI)
+#define F_RS_FEC_SYMBLERR4_HI V_RS_FEC_SYMBLERR4_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR5_LO 0x39028
+
+#define S_RS_FEC_SYMBLERR5_LO 0
+#define V_RS_FEC_SYMBLERR5_LO(x) ((x) << S_RS_FEC_SYMBLERR5_LO)
+#define F_RS_FEC_SYMBLERR5_LO V_RS_FEC_SYMBLERR5_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR5_HI 0x3902c
+
+#define S_RS_FEC_SYMBLERR5_HI 0
+#define V_RS_FEC_SYMBLERR5_HI(x) ((x) << S_RS_FEC_SYMBLERR5_HI)
+#define F_RS_FEC_SYMBLERR5_HI V_RS_FEC_SYMBLERR5_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR6_LO 0x39030
+
+#define S_RS_FEC_SYMBLERR6_LO 0
+#define V_RS_FEC_SYMBLERR6_LO(x) ((x) << S_RS_FEC_SYMBLERR6_LO)
+#define F_RS_FEC_SYMBLERR6_LO V_RS_FEC_SYMBLERR6_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR6_HI 0x39034
+
+#define S_RS_FEC_SYMBLERR6_HI 0
+#define V_RS_FEC_SYMBLERR6_HI(x) ((x) << S_RS_FEC_SYMBLERR6_HI)
+#define F_RS_FEC_SYMBLERR6_HI V_RS_FEC_SYMBLERR6_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR7_LO 0x39038
+
+#define S_RS_FEC_SYMBLERR7_LO 0
+#define V_RS_FEC_SYMBLERR7_LO(x) ((x) << S_RS_FEC_SYMBLERR7_LO)
+#define F_RS_FEC_SYMBLERR7_LO V_RS_FEC_SYMBLERR7_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR7_HI 0x3903c
+
+#define S_RS_FEC_SYMBLERR7_HI 0
+#define V_RS_FEC_SYMBLERR7_HI(x) ((x) << S_RS_FEC_SYMBLERR7_HI)
+#define F_RS_FEC_SYMBLERR7_HI V_RS_FEC_SYMBLERR7_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR8_LO 0x39040
+
+#define S_RS_FEC_SYMBLERR8_LO 0
+#define V_RS_FEC_SYMBLERR8_LO(x) ((x) << S_RS_FEC_SYMBLERR8_LO)
+#define F_RS_FEC_SYMBLERR8_LO V_RS_FEC_SYMBLERR8_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR8_HI 0x39044
+
+#define S_RS_FEC_SYMBLERR8_HI 0
+#define V_RS_FEC_SYMBLERR8_HI(x) ((x) << S_RS_FEC_SYMBLERR8_HI)
+#define F_RS_FEC_SYMBLERR8_HI V_RS_FEC_SYMBLERR8_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR9_LO 0x39048
+
+#define S_RS_FEC_SYMBLERR9_LO 0
+#define V_RS_FEC_SYMBLERR9_LO(x) ((x) << S_RS_FEC_SYMBLERR9_LO)
+#define F_RS_FEC_SYMBLERR9_LO V_RS_FEC_SYMBLERR9_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR9_HI 0x3904c
+
+#define S_RS_FEC_SYMBLERR9_HI 0
+#define V_RS_FEC_SYMBLERR9_HI(x) ((x) << S_RS_FEC_SYMBLERR9_HI)
+#define F_RS_FEC_SYMBLERR9_HI V_RS_FEC_SYMBLERR9_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR10_LO 0x39050
+
+#define S_RS_FEC_SYMBLERR10_LO 0
+#define V_RS_FEC_SYMBLERR10_LO(x) ((x) << S_RS_FEC_SYMBLERR10_LO)
+#define F_RS_FEC_SYMBLERR10_LO V_RS_FEC_SYMBLERR10_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR10_HI 0x39054
+
+#define S_RS_FEC_SYMBLERR10_HI 0
+#define V_RS_FEC_SYMBLERR10_HI(x) ((x) << S_RS_FEC_SYMBLERR10_HI)
+#define F_RS_FEC_SYMBLERR10_HI V_RS_FEC_SYMBLERR10_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR11_LO 0x39058
+
+#define S_RS_FEC_SYMBLERR11_LO 0
+#define V_RS_FEC_SYMBLERR11_LO(x) ((x) << S_RS_FEC_SYMBLERR11_LO)
+#define F_RS_FEC_SYMBLERR11_LO V_RS_FEC_SYMBLERR11_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR11_HI 0x3905c
+
+#define S_RS_FEC_SYMBLERR11_HI 0
+#define V_RS_FEC_SYMBLERR11_HI(x) ((x) << S_RS_FEC_SYMBLERR11_HI)
+#define F_RS_FEC_SYMBLERR11_HI V_RS_FEC_SYMBLERR11_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR12_LO 0x39060
+
+#define S_RS_FEC_SYMBLERR12_LO 0
+#define V_RS_FEC_SYMBLERR12_LO(x) ((x) << S_RS_FEC_SYMBLERR12_LO)
+#define F_RS_FEC_SYMBLERR12_LO V_RS_FEC_SYMBLERR12_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR12_HI 0x39064
+
+#define S_RS_FEC_SYMBLERR12_HI 0
+#define V_RS_FEC_SYMBLERR12_HI(x) ((x) << S_RS_FEC_SYMBLERR12_HI)
+#define F_RS_FEC_SYMBLERR12_HI V_RS_FEC_SYMBLERR12_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR13_LO 0x39068
+
+#define S_RS_FEC_SYMBLERR13_LO 0
+#define V_RS_FEC_SYMBLERR13_LO(x) ((x) << S_RS_FEC_SYMBLERR13_LO)
+#define F_RS_FEC_SYMBLERR13_LO V_RS_FEC_SYMBLERR13_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR13_HI 0x3906c
+
+#define S_RS_FEC_SYMBLERR13_HI 0
+#define V_RS_FEC_SYMBLERR13_HI(x) ((x) << S_RS_FEC_SYMBLERR13_HI)
+#define F_RS_FEC_SYMBLERR13_HI V_RS_FEC_SYMBLERR13_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR14_LO 0x39070
+
+#define S_RS_FEC_SYMBLERR14_LO 0
+#define V_RS_FEC_SYMBLERR14_LO(x) ((x) << S_RS_FEC_SYMBLERR14_LO)
+#define F_RS_FEC_SYMBLERR14_LO V_RS_FEC_SYMBLERR14_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR14_HI 0x39074
+
+#define S_RS_FEC_SYMBLERR14_HI 0
+#define V_RS_FEC_SYMBLERR14_HI(x) ((x) << S_RS_FEC_SYMBLERR14_HI)
+#define F_RS_FEC_SYMBLERR14_HI V_RS_FEC_SYMBLERR14_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR15_LO 0x39078
+
+#define S_RS_FEC_SYMBLERR15_LO 0
+#define V_RS_FEC_SYMBLERR15_LO(x) ((x) << S_RS_FEC_SYMBLERR15_LO)
+#define F_RS_FEC_SYMBLERR15_LO V_RS_FEC_SYMBLERR15_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR15_HI 0x3907c
+
+#define S_RS_FEC_SYMBLERR15_HI 0
+#define V_RS_FEC_SYMBLERR15_HI(x) ((x) << S_RS_FEC_SYMBLERR15_HI)
+#define F_RS_FEC_SYMBLERR15_HI V_RS_FEC_SYMBLERR15_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_CONTROL 0x39080
+#define A_MAC_MTIP_RS_FEC_VENDOR_INFO_1 0x39084
+
+#define S_VENDOR_INFO_1_AMPS_LOCK 0
+#define V_VENDOR_INFO_1_AMPS_LOCK(x) ((x) << S_VENDOR_INFO_1_AMPS_LOCK)
+#define F_VENDOR_INFO_1_AMPS_LOCK V_VENDOR_INFO_1_AMPS_LOCK(1U)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_INFO_2 0x39088
+
+#define S_VENDOR_INFO_2_AMPS_LOCK 0
+#define M_VENDOR_INFO_2_AMPS_LOCK 0xffffU
+#define V_VENDOR_INFO_2_AMPS_LOCK(x) ((x) << S_VENDOR_INFO_2_AMPS_LOCK)
+#define G_VENDOR_INFO_2_AMPS_LOCK(x) (((x) >> S_VENDOR_INFO_2_AMPS_LOCK) & M_VENDOR_INFO_2_AMPS_LOCK)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_REVISION 0x3908c
+#define A_MAC_MTIP_RS_FEC_VENDOR_ALIGN_STATUS 0x39090
+
+#define S_RS_FEC_VENDOR_ALIGN_STATUS 0
+#define M_RS_FEC_VENDOR_ALIGN_STATUS 0xffffU
+#define V_RS_FEC_VENDOR_ALIGN_STATUS(x) ((x) << S_RS_FEC_VENDOR_ALIGN_STATUS)
+#define G_RS_FEC_VENDOR_ALIGN_STATUS(x) (((x) >> S_RS_FEC_VENDOR_ALIGN_STATUS) & M_RS_FEC_VENDOR_ALIGN_STATUS)
+
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_0 0x39100
+
+#define S_FEC74_FEC_ABILITY_0_B1 1
+#define V_FEC74_FEC_ABILITY_0_B1(x) ((x) << S_FEC74_FEC_ABILITY_0_B1)
+#define F_FEC74_FEC_ABILITY_0_B1 V_FEC74_FEC_ABILITY_0_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_0_B0 0
+#define V_FEC74_FEC_ABILITY_0_B0(x) ((x) << S_FEC74_FEC_ABILITY_0_B0)
+#define F_FEC74_FEC_ABILITY_0_B0 V_FEC74_FEC_ABILITY_0_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_0 0x39104
+
+#define S_FEC_ENABLE_ERROR_INDICATION 1
+#define V_FEC_ENABLE_ERROR_INDICATION(x) ((x) << S_FEC_ENABLE_ERROR_INDICATION)
+#define F_FEC_ENABLE_ERROR_INDICATION V_FEC_ENABLE_ERROR_INDICATION(1U)
+
+#define S_T7_FEC_ENABLE 0
+#define V_T7_FEC_ENABLE(x) ((x) << S_T7_FEC_ENABLE)
+#define F_T7_FEC_ENABLE V_T7_FEC_ENABLE(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_STATUS_0 0x39108
+
+#define S_FEC_LOCKED_1 1
+#define V_FEC_LOCKED_1(x) ((x) << S_FEC_LOCKED_1)
+#define F_FEC_LOCKED_1 V_FEC_LOCKED_1(1U)
+
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_0 0x3910c
+
+#define S_VL0_CCW_LO 0
+#define M_VL0_CCW_LO 0xffffU
+#define V_VL0_CCW_LO(x) ((x) << S_VL0_CCW_LO)
+#define G_VL0_CCW_LO(x) (((x) >> S_VL0_CCW_LO) & M_VL0_CCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_0 0x39110
+
+#define S_VL0_NCCW_LO 0
+#define M_VL0_NCCW_LO 0xffffU
+#define V_VL0_NCCW_LO(x) ((x) << S_VL0_NCCW_LO)
+#define G_VL0_NCCW_LO(x) (((x) >> S_VL0_NCCW_LO) & M_VL0_NCCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_0 0x39114
+
+#define S_VL1_CCW_LO 0
+#define M_VL1_CCW_LO 0xffffU
+#define V_VL1_CCW_LO(x) ((x) << S_VL1_CCW_LO)
+#define G_VL1_CCW_LO(x) (((x) >> S_VL1_CCW_LO) & M_VL1_CCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_0 0x39118
+
+#define S_VL1_NCCW_LO 0
+#define M_VL1_NCCW_LO 0xffffU
+#define V_VL1_NCCW_LO(x) ((x) << S_VL1_NCCW_LO)
+#define G_VL1_NCCW_LO(x) (((x) >> S_VL1_NCCW_LO) & M_VL1_NCCW_LO)
+
+#define A_MAC_MTIP_FEC74_COUNTER_HI_0 0x3911c
+
+#define S_COUNTER_HI 0
+#define M_COUNTER_HI 0xffffU
+#define V_COUNTER_HI(x) ((x) << S_COUNTER_HI)
+#define G_COUNTER_HI(x) (((x) >> S_COUNTER_HI) & M_COUNTER_HI)
+
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_1 0x39120
+
+#define S_FEC74_FEC_ABILITY_1_B1 1
+#define V_FEC74_FEC_ABILITY_1_B1(x) ((x) << S_FEC74_FEC_ABILITY_1_B1)
+#define F_FEC74_FEC_ABILITY_1_B1 V_FEC74_FEC_ABILITY_1_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_1_B0 0
+#define V_FEC74_FEC_ABILITY_1_B0(x) ((x) << S_FEC74_FEC_ABILITY_1_B0)
+#define F_FEC74_FEC_ABILITY_1_B0 V_FEC74_FEC_ABILITY_1_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_1 0x39124
+#define A_MAC_MTIP_FEC74_FEC_STATUS_1 0x39128
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_1 0x3912c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_1 0x39130
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_1 0x39134
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_1 0x39138
+#define A_MAC_MTIP_FEC74_COUNTER_HI_1 0x3913c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_2 0x39140
+
+#define S_FEC74_FEC_ABILITY_2_B1 1
+#define V_FEC74_FEC_ABILITY_2_B1(x) ((x) << S_FEC74_FEC_ABILITY_2_B1)
+#define F_FEC74_FEC_ABILITY_2_B1 V_FEC74_FEC_ABILITY_2_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_2_B0 0
+#define V_FEC74_FEC_ABILITY_2_B0(x) ((x) << S_FEC74_FEC_ABILITY_2_B0)
+#define F_FEC74_FEC_ABILITY_2_B0 V_FEC74_FEC_ABILITY_2_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_2 0x39144
+#define A_MAC_MTIP_FEC74_FEC_STATUS_2 0x39148
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_2 0x3914c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_2 0x39150
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_2 0x39154
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_2 0x39158
+#define A_MAC_MTIP_FEC74_COUNTER_HI_2 0x3915c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_3 0x39160
+
+#define S_FEC74_FEC_ABILITY_3_B1 1
+#define V_FEC74_FEC_ABILITY_3_B1(x) ((x) << S_FEC74_FEC_ABILITY_3_B1)
+#define F_FEC74_FEC_ABILITY_3_B1 V_FEC74_FEC_ABILITY_3_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_3_B0 0
+#define V_FEC74_FEC_ABILITY_3_B0(x) ((x) << S_FEC74_FEC_ABILITY_3_B0)
+#define F_FEC74_FEC_ABILITY_3_B0 V_FEC74_FEC_ABILITY_3_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_3 0x39164
+#define A_MAC_MTIP_FEC74_FEC_STATUS_3 0x39168
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_3 0x3916c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_3 0x39170
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_3 0x39174
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_3 0x39178
+#define A_MAC_MTIP_FEC74_COUNTER_HI_3 0x3917c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_4 0x39180
+
+#define S_FEC74_FEC_ABILITY_4_B1 1
+#define V_FEC74_FEC_ABILITY_4_B1(x) ((x) << S_FEC74_FEC_ABILITY_4_B1)
+#define F_FEC74_FEC_ABILITY_4_B1 V_FEC74_FEC_ABILITY_4_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_4_B0 0
+#define V_FEC74_FEC_ABILITY_4_B0(x) ((x) << S_FEC74_FEC_ABILITY_4_B0)
+#define F_FEC74_FEC_ABILITY_4_B0 V_FEC74_FEC_ABILITY_4_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_4 0x39184
+#define A_MAC_MTIP_FEC74_FEC_STATUS_4 0x39188
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_4 0x3918c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_4 0x39190
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_4 0x39194
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_4 0x39198
+#define A_MAC_MTIP_FEC74_COUNTER_HI_4 0x3919c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_5 0x391a0
+
+#define S_FEC74_FEC_ABILITY_5_B1 1
+#define V_FEC74_FEC_ABILITY_5_B1(x) ((x) << S_FEC74_FEC_ABILITY_5_B1)
+#define F_FEC74_FEC_ABILITY_5_B1 V_FEC74_FEC_ABILITY_5_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_5_B0 0
+#define V_FEC74_FEC_ABILITY_5_B0(x) ((x) << S_FEC74_FEC_ABILITY_5_B0)
+#define F_FEC74_FEC_ABILITY_5_B0 V_FEC74_FEC_ABILITY_5_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_5 0x391a4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_5 0x391a8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_5 0x391ac
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_5 0x391b0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_5 0x391b4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_5 0x391b8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_5 0x391bc
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_6 0x391c0
+
+#define S_FEC74_FEC_ABILITY_6_B1 1
+#define V_FEC74_FEC_ABILITY_6_B1(x) ((x) << S_FEC74_FEC_ABILITY_6_B1)
+#define F_FEC74_FEC_ABILITY_6_B1 V_FEC74_FEC_ABILITY_6_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_6_B0 0
+#define V_FEC74_FEC_ABILITY_6_B0(x) ((x) << S_FEC74_FEC_ABILITY_6_B0)
+#define F_FEC74_FEC_ABILITY_6_B0 V_FEC74_FEC_ABILITY_6_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_6 0x391c4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_6 0x391c8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_6 0x391cc
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_6 0x391d0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_6 0x391d4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_6 0x391d8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_6 0x391dc
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_7 0x391e0
+
+#define S_FEC74_FEC_ABILITY_7_B1 1
+#define V_FEC74_FEC_ABILITY_7_B1(x) ((x) << S_FEC74_FEC_ABILITY_7_B1)
+#define F_FEC74_FEC_ABILITY_7_B1 V_FEC74_FEC_ABILITY_7_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_7_B0 0
+#define V_FEC74_FEC_ABILITY_7_B0(x) ((x) << S_FEC74_FEC_ABILITY_7_B0)
+#define F_FEC74_FEC_ABILITY_7_B0 V_FEC74_FEC_ABILITY_7_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_7 0x391e4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_7 0x391e8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_7 0x391ec
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_7 0x391f0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_7 0x391f4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_7 0x391f8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_7 0x391fc
+#define A_MAC_BEAN0_CTL 0x39200
+#define A_MAC_BEAN0_STATUS 0x39204
+#define A_MAC_BEAN0_ABILITY_0 0x39208
+
+#define S_BEAN0_REM_FAULT 13
+#define V_BEAN0_REM_FAULT(x) ((x) << S_BEAN0_REM_FAULT)
+#define F_BEAN0_REM_FAULT V_BEAN0_REM_FAULT(1U)
+
+#define A_MAC_BEAN0_ABILITY_1 0x3920c
+#define A_MAC_BEAN0_ABILITY_2 0x39210
+
+#define S_BEAN0_AB_2_15_12 12
+#define M_BEAN0_AB_2_15_12 0xfU
+#define V_BEAN0_AB_2_15_12(x) ((x) << S_BEAN0_AB_2_15_12)
+#define G_BEAN0_AB_2_15_12(x) (((x) >> S_BEAN0_AB_2_15_12) & M_BEAN0_AB_2_15_12)
+
+#define S_BEAN0_AB_2_11_0 0
+#define M_BEAN0_AB_2_11_0 0xfffU
+#define V_BEAN0_AB_2_11_0(x) ((x) << S_BEAN0_AB_2_11_0)
+#define G_BEAN0_AB_2_11_0(x) (((x) >> S_BEAN0_AB_2_11_0) & M_BEAN0_AB_2_11_0)
+
+#define A_MAC_BEAN0_REM_ABILITY_0 0x39214
+
+#define S_BEAN0_ABL_REM_FAULT 13
+#define V_BEAN0_ABL_REM_FAULT(x) ((x) << S_BEAN0_ABL_REM_FAULT)
+#define F_BEAN0_ABL_REM_FAULT V_BEAN0_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN0_REM_ABILITY_1 0x39218
+#define A_MAC_BEAN0_REM_ABILITY_2 0x3921c
+
+#define S_BEAN0_REM_AB_15_12 12
+#define M_BEAN0_REM_AB_15_12 0xfU
+#define V_BEAN0_REM_AB_15_12(x) ((x) << S_BEAN0_REM_AB_15_12)
+#define G_BEAN0_REM_AB_15_12(x) (((x) >> S_BEAN0_REM_AB_15_12) & M_BEAN0_REM_AB_15_12)
+
+#define S_BEAN0_REM_AB_11_0 0
+#define M_BEAN0_REM_AB_11_0 0xfffU
+#define V_BEAN0_REM_AB_11_0(x) ((x) << S_BEAN0_REM_AB_11_0)
+#define G_BEAN0_REM_AB_11_0(x) (((x) >> S_BEAN0_REM_AB_11_0) & M_BEAN0_REM_AB_11_0)
+
+#define A_MAC_BEAN0_MS_COUNT 0x39220
+#define A_MAC_BEAN0_XNP_0 0x39224
+#define A_MAC_BEAN0_XNP_1 0x39228
+#define A_MAC_BEAN0_XNP_2 0x3922c
+#define A_MAC_LP_BEAN0_XNP_0 0x39230
+#define A_MAC_LP_BEAN0_XNP_1 0x39234
+#define A_MAC_LP_BEAN0_XNP_2 0x39238
+#define A_MAC_BEAN0_ETH_STATUS 0x3923c
+
+#define S_5GKR 15
+#define V_5GKR(x) ((x) << S_5GKR)
+#define F_5GKR V_5GKR(1U)
+
+#define S_2P5GKX 14
+#define V_2P5GKX(x) ((x) << S_2P5GKX)
+#define F_2P5GKX V_2P5GKX(1U)
+
+#define S_25G_KR 13
+#define V_25G_KR(x) ((x) << S_25G_KR)
+#define F_25G_KR V_25G_KR(1U)
+
+#define S_25G_KR_S 12
+#define V_25G_KR_S(x) ((x) << S_25G_KR_S)
+#define F_25G_KR_S V_25G_KR_S(1U)
+
+#define S_RS_FEC 7
+#define V_RS_FEC(x) ((x) << S_RS_FEC)
+#define F_RS_FEC V_RS_FEC(1U)
+
+#define S_FC_FEC 4
+#define V_FC_FEC(x) ((x) << S_FC_FEC)
+#define F_FC_FEC V_FC_FEC(1U)
+
+#define A_MAC_BEAN0_ETH_STATUS_2 0x39240
+
+#define S_RS_FEC_NEGOTIATED 6
+#define V_RS_FEC_NEGOTIATED(x) ((x) << S_RS_FEC_NEGOTIATED)
+#define F_RS_FEC_NEGOTIATED V_RS_FEC_NEGOTIATED(1U)
+
+#define S_400GKR4CR4 5
+#define V_400GKR4CR4(x) ((x) << S_400GKR4CR4)
+#define F_400GKR4CR4 V_400GKR4CR4(1U)
+
+#define S_200GKR2CR2 4
+#define V_200GKR2CR2(x) ((x) << S_200GKR2CR2)
+#define F_200GKR2CR2 V_200GKR2CR2(1U)
+
+#define S_100GKR1CR1 3
+#define V_100GKR1CR1(x) ((x) << S_100GKR1CR1)
+#define F_100GKR1CR1 V_100GKR1CR1(1U)
+
+#define S_200GKR4CR4 2
+#define V_200GKR4CR4(x) ((x) << S_200GKR4CR4)
+#define F_200GKR4CR4 V_200GKR4CR4(1U)
+
+#define S_100GKR2CR2 1
+#define V_100GKR2CR2(x) ((x) << S_100GKR2CR2)
+#define F_100GKR2CR2 V_100GKR2CR2(1U)
+
+#define S_50GKRCR 0
+#define V_50GKRCR(x) ((x) << S_50GKRCR)
+#define F_50GKRCR V_50GKRCR(1U)
+
+#define A_MAC_BEAN1_CTL 0x39300
+#define A_MAC_BEAN1_STATUS 0x39304
+#define A_MAC_BEAN1_ABILITY_0 0x39308
+
+#define S_BEAN1_REM_FAULT 13
+#define V_BEAN1_REM_FAULT(x) ((x) << S_BEAN1_REM_FAULT)
+#define F_BEAN1_REM_FAULT V_BEAN1_REM_FAULT(1U)
+
+#define A_MAC_BEAN1_ABILITY_1 0x3930c
+#define A_MAC_BEAN1_ABILITY_2 0x39310
+
+#define S_BEAN1_AB_2_15_12 12
+#define M_BEAN1_AB_2_15_12 0xfU
+#define V_BEAN1_AB_2_15_12(x) ((x) << S_BEAN1_AB_2_15_12)
+#define G_BEAN1_AB_2_15_12(x) (((x) >> S_BEAN1_AB_2_15_12) & M_BEAN1_AB_2_15_12)
+
+#define S_BEAN1_AB_2_11_0 0
+#define M_BEAN1_AB_2_11_0 0xfffU
+#define V_BEAN1_AB_2_11_0(x) ((x) << S_BEAN1_AB_2_11_0)
+#define G_BEAN1_AB_2_11_0(x) (((x) >> S_BEAN1_AB_2_11_0) & M_BEAN1_AB_2_11_0)
+
+#define A_MAC_BEAN1_REM_ABILITY_0 0x39314
+
+#define S_BEAN1_ABL_REM_FAULT 13
+#define V_BEAN1_ABL_REM_FAULT(x) ((x) << S_BEAN1_ABL_REM_FAULT)
+#define F_BEAN1_ABL_REM_FAULT V_BEAN1_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN1_REM_ABILITY_1 0x39318
+#define A_MAC_BEAN1_REM_ABILITY_2 0x3931c
+
+#define S_BEAN1_REM_AB_15_12 12
+#define M_BEAN1_REM_AB_15_12 0xfU
+#define V_BEAN1_REM_AB_15_12(x) ((x) << S_BEAN1_REM_AB_15_12)
+#define G_BEAN1_REM_AB_15_12(x) (((x) >> S_BEAN1_REM_AB_15_12) & M_BEAN1_REM_AB_15_12)
+
+#define S_BEAN1_REM_AB_11_0 0
+#define M_BEAN1_REM_AB_11_0 0xfffU
+#define V_BEAN1_REM_AB_11_0(x) ((x) << S_BEAN1_REM_AB_11_0)
+#define G_BEAN1_REM_AB_11_0(x) (((x) >> S_BEAN1_REM_AB_11_0) & M_BEAN1_REM_AB_11_0)
+
+#define A_MAC_BEAN1_MS_COUNT 0x39320
+#define A_MAC_BEAN1_XNP_0 0x39324
+#define A_MAC_BEAN1_XNP_1 0x39328
+#define A_MAC_BEAN1_XNP_2 0x3932c
+#define A_MAC_LP_BEAN1_XNP_0 0x39330
+#define A_MAC_LP_BEAN1_XNP_1 0x39334
+#define A_MAC_LP_BEAN1_XNP_2 0x39338
+#define A_MAC_BEAN1_ETH_STATUS 0x3933c
+#define A_MAC_BEAN1_ETH_STATUS_2 0x39340
+#define A_MAC_BEAN2_CTL 0x39400
+#define A_MAC_BEAN2_STATUS 0x39404
+#define A_MAC_BEAN2_ABILITY_0 0x39408
+
+#define S_BEAN2_REM_FAULT 13
+#define V_BEAN2_REM_FAULT(x) ((x) << S_BEAN2_REM_FAULT)
+#define F_BEAN2_REM_FAULT V_BEAN2_REM_FAULT(1U)
+
+#define A_MAC_BEAN2_ABILITY_1 0x3940c
+#define A_MAC_BEAN2_ABILITY_2 0x39410
+
+#define S_BEAN2_AB_2_15_12 12
+#define M_BEAN2_AB_2_15_12 0xfU
+#define V_BEAN2_AB_2_15_12(x) ((x) << S_BEAN2_AB_2_15_12)
+#define G_BEAN2_AB_2_15_12(x) (((x) >> S_BEAN2_AB_2_15_12) & M_BEAN2_AB_2_15_12)
+
+#define S_BEAN2_AB_2_11_0 0
+#define M_BEAN2_AB_2_11_0 0xfffU
+#define V_BEAN2_AB_2_11_0(x) ((x) << S_BEAN2_AB_2_11_0)
+#define G_BEAN2_AB_2_11_0(x) (((x) >> S_BEAN2_AB_2_11_0) & M_BEAN2_AB_2_11_0)
+
+#define A_MAC_BEAN2_REM_ABILITY_0 0x39414
+
+#define S_BEAN2_ABL_REM_FAULT 13
+#define V_BEAN2_ABL_REM_FAULT(x) ((x) << S_BEAN2_ABL_REM_FAULT)
+#define F_BEAN2_ABL_REM_FAULT V_BEAN2_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN2_REM_ABILITY_1 0x39418
+#define A_MAC_BEAN2_REM_ABILITY_2 0x3941c
+
+#define S_BEAN2_REM_AB_15_12 12
+#define M_BEAN2_REM_AB_15_12 0xfU
+#define V_BEAN2_REM_AB_15_12(x) ((x) << S_BEAN2_REM_AB_15_12)
+#define G_BEAN2_REM_AB_15_12(x) (((x) >> S_BEAN2_REM_AB_15_12) & M_BEAN2_REM_AB_15_12)
+
+#define S_BEAN2_REM_AB_11_0 0
+#define M_BEAN2_REM_AB_11_0 0xfffU
+#define V_BEAN2_REM_AB_11_0(x) ((x) << S_BEAN2_REM_AB_11_0)
+#define G_BEAN2_REM_AB_11_0(x) (((x) >> S_BEAN2_REM_AB_11_0) & M_BEAN2_REM_AB_11_0)
+
+#define A_MAC_BEAN2_MS_COUNT 0x39420
+#define A_MAC_BEAN2_XNP_0 0x39424
+#define A_MAC_BEAN2_XNP_1 0x39428
+#define A_MAC_BEAN2_XNP_2 0x3942c
+#define A_MAC_LP_BEAN2_XNP_0 0x39430
+#define A_MAC_LP_BEAN2_XNP_1 0x39434
+#define A_MAC_LP_BEAN2_XNP_2 0x39438
+#define A_MAC_BEAN2_ETH_STATUS 0x3943c
+#define A_MAC_BEAN2_ETH_STATUS_2 0x39440
+#define A_MAC_BEAN3_CTL 0x39500
+#define A_MAC_BEAN3_STATUS 0x39504
+#define A_MAC_BEAN3_ABILITY_0 0x39508
+
+#define S_BEAN3_REM_FAULT 13
+#define V_BEAN3_REM_FAULT(x) ((x) << S_BEAN3_REM_FAULT)
+#define F_BEAN3_REM_FAULT V_BEAN3_REM_FAULT(1U)
+
+#define A_MAC_BEAN3_ABILITY_1 0x3950c
+#define A_MAC_BEAN3_ABILITY_2 0x39510
+
+#define S_BEAN3_AB_2_15_12 12
+#define M_BEAN3_AB_2_15_12 0xfU
+#define V_BEAN3_AB_2_15_12(x) ((x) << S_BEAN3_AB_2_15_12)
+#define G_BEAN3_AB_2_15_12(x) (((x) >> S_BEAN3_AB_2_15_12) & M_BEAN3_AB_2_15_12)
+
+#define S_BEAN3_AB_2_11_0 0
+#define M_BEAN3_AB_2_11_0 0xfffU
+#define V_BEAN3_AB_2_11_0(x) ((x) << S_BEAN3_AB_2_11_0)
+#define G_BEAN3_AB_2_11_0(x) (((x) >> S_BEAN3_AB_2_11_0) & M_BEAN3_AB_2_11_0)
+
+#define A_MAC_BEAN3_REM_ABILITY_0 0x39514
+
+#define S_BEAN3_ABL_REM_FAULT 13
+#define V_BEAN3_ABL_REM_FAULT(x) ((x) << S_BEAN3_ABL_REM_FAULT)
+#define F_BEAN3_ABL_REM_FAULT V_BEAN3_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN3_REM_ABILITY_1 0x39518
+#define A_MAC_BEAN3_REM_ABILITY_2 0x3951c
+
+#define S_BEAN3_REM_AB_15_12 12
+#define M_BEAN3_REM_AB_15_12 0xfU
+#define V_BEAN3_REM_AB_15_12(x) ((x) << S_BEAN3_REM_AB_15_12)
+#define G_BEAN3_REM_AB_15_12(x) (((x) >> S_BEAN3_REM_AB_15_12) & M_BEAN3_REM_AB_15_12)
+
+#define S_BEAN3_REM_AB_11_0 0
+#define M_BEAN3_REM_AB_11_0 0xfffU
+#define V_BEAN3_REM_AB_11_0(x) ((x) << S_BEAN3_REM_AB_11_0)
+#define G_BEAN3_REM_AB_11_0(x) (((x) >> S_BEAN3_REM_AB_11_0) & M_BEAN3_REM_AB_11_0)
+
+#define A_MAC_BEAN3_MS_COUNT 0x39520
+#define A_MAC_BEAN3_XNP_0 0x39524
+#define A_MAC_BEAN3_XNP_1 0x39528
+#define A_MAC_BEAN3_XNP_2 0x3952c
+#define A_MAC_LP_BEAN3_XNP_0 0x39530
+#define A_MAC_LP_BEAN3_XNP_1 0x39534
+#define A_MAC_LP_BEAN3_XNP_2 0x39538
+#define A_MAC_BEAN3_ETH_STATUS 0x3953c
+#define A_MAC_BEAN3_ETH_STATUS_2 0x39540
+#define A_MAC_BEAN4_CTL 0x39600
+#define A_MAC_BEAN4_STATUS 0x39604
+#define A_MAC_BEAN4_ABILITY_0 0x39608
+
+#define S_BEAN4_REM_FAULT 13
+#define V_BEAN4_REM_FAULT(x) ((x) << S_BEAN4_REM_FAULT)
+#define F_BEAN4_REM_FAULT V_BEAN4_REM_FAULT(1U)
+
+#define A_MAC_BEAN4_ABILITY_1 0x3960c
+#define A_MAC_BEAN4_ABILITY_2 0x39610
+
+#define S_BEAN4_AB_2_15_12 12
+#define M_BEAN4_AB_2_15_12 0xfU
+#define V_BEAN4_AB_2_15_12(x) ((x) << S_BEAN4_AB_2_15_12)
+#define G_BEAN4_AB_2_15_12(x) (((x) >> S_BEAN4_AB_2_15_12) & M_BEAN4_AB_2_15_12)
+
+#define S_BEAN4_AB_2_11_0 0
+#define M_BEAN4_AB_2_11_0 0xfffU
+#define V_BEAN4_AB_2_11_0(x) ((x) << S_BEAN4_AB_2_11_0)
+#define G_BEAN4_AB_2_11_0(x) (((x) >> S_BEAN4_AB_2_11_0) & M_BEAN4_AB_2_11_0)
+
+#define A_MAC_BEAN4_REM_ABILITY_0 0x39614
+
+#define S_BEAN4_ABL_REM_FAULT 13
+#define V_BEAN4_ABL_REM_FAULT(x) ((x) << S_BEAN4_ABL_REM_FAULT)
+#define F_BEAN4_ABL_REM_FAULT V_BEAN4_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN4_REM_ABILITY_1 0x39618
+#define A_MAC_BEAN4_REM_ABILITY_2 0x3961c
+
+#define S_BEAN4_REM_AB_15_12 12
+#define M_BEAN4_REM_AB_15_12 0xfU
+#define V_BEAN4_REM_AB_15_12(x) ((x) << S_BEAN4_REM_AB_15_12)
+#define G_BEAN4_REM_AB_15_12(x) (((x) >> S_BEAN4_REM_AB_15_12) & M_BEAN4_REM_AB_15_12)
+
+#define S_BEAN4_REM_AB_11_0 0
+#define M_BEAN4_REM_AB_11_0 0xfffU
+#define V_BEAN4_REM_AB_11_0(x) ((x) << S_BEAN4_REM_AB_11_0)
+#define G_BEAN4_REM_AB_11_0(x) (((x) >> S_BEAN4_REM_AB_11_0) & M_BEAN4_REM_AB_11_0)
+
+#define A_MAC_BEAN4_MS_COUNT 0x39620
+#define A_MAC_BEAN4_XNP_0 0x39624
+#define A_MAC_BEAN4_XNP_1 0x39628
+#define A_MAC_BEAN4_XNP_2 0x3962c
+#define A_MAC_LP_BEAN4_XNP_0 0x39630
+#define A_MAC_LP_BEAN4_XNP_1 0x39634
+#define A_MAC_LP_BEAN4_XNP_2 0x39638
+#define A_MAC_BEAN4_ETH_STATUS 0x3963c
+#define A_MAC_BEAN4_ETH_STATUS_2 0x39640
+#define A_MAC_BEAN5_CTL 0x39700
+#define A_MAC_BEAN5_STATUS 0x39704
+#define A_MAC_BEAN5_ABILITY_0 0x39708
+
+#define S_BEAN5_REM_FAULT 13
+#define V_BEAN5_REM_FAULT(x) ((x) << S_BEAN5_REM_FAULT)
+#define F_BEAN5_REM_FAULT V_BEAN5_REM_FAULT(1U)
+
+#define A_MAC_BEAN5_ABILITY_1 0x3970c
+#define A_MAC_BEAN5_ABILITY_2 0x39710
+
+#define S_BEAN5_AB_2_15_12 12
+#define M_BEAN5_AB_2_15_12 0xfU
+#define V_BEAN5_AB_2_15_12(x) ((x) << S_BEAN5_AB_2_15_12)
+#define G_BEAN5_AB_2_15_12(x) (((x) >> S_BEAN5_AB_2_15_12) & M_BEAN5_AB_2_15_12)
+
+#define S_BEAN5_AB_2_11_0 0
+#define M_BEAN5_AB_2_11_0 0xfffU
+#define V_BEAN5_AB_2_11_0(x) ((x) << S_BEAN5_AB_2_11_0)
+#define G_BEAN5_AB_2_11_0(x) (((x) >> S_BEAN5_AB_2_11_0) & M_BEAN5_AB_2_11_0)
+
+#define A_MAC_BEAN5_REM_ABILITY_0 0x39714
+
+#define S_BEAN5_ABL_REM_FAULT 13
+#define V_BEAN5_ABL_REM_FAULT(x) ((x) << S_BEAN5_ABL_REM_FAULT)
+#define F_BEAN5_ABL_REM_FAULT V_BEAN5_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN5_REM_ABILITY_1 0x39718
+#define A_MAC_BEAN5_REM_ABILITY_2 0x3971c
+
+#define S_BEAN5_REM_AB_15_12 12
+#define M_BEAN5_REM_AB_15_12 0xfU
+#define V_BEAN5_REM_AB_15_12(x) ((x) << S_BEAN5_REM_AB_15_12)
+#define G_BEAN5_REM_AB_15_12(x) (((x) >> S_BEAN5_REM_AB_15_12) & M_BEAN5_REM_AB_15_12)
+
+#define S_BEAN5_REM_AB_11_0 0
+#define M_BEAN5_REM_AB_11_0 0xfffU
+#define V_BEAN5_REM_AB_11_0(x) ((x) << S_BEAN5_REM_AB_11_0)
+#define G_BEAN5_REM_AB_11_0(x) (((x) >> S_BEAN5_REM_AB_11_0) & M_BEAN5_REM_AB_11_0)
+
+#define A_MAC_BEAN5_MS_COUNT 0x39720
+#define A_MAC_BEAN5_XNP_0 0x39724
+#define A_MAC_BEAN5_XNP_1 0x39728
+#define A_MAC_BEAN5_XNP_2 0x3972c
+#define A_MAC_LP_BEAN5_XNP_0 0x39730
+#define A_MAC_LP_BEAN5_XNP_1 0x39734
+#define A_MAC_LP_BEAN5_XNP_2 0x39738
+#define A_MAC_BEAN5_ETH_STATUS 0x3973c
+#define A_MAC_BEAN5_ETH_STATUS_2 0x39740
+#define A_MAC_BEAN6_CTL 0x39800
+#define A_MAC_BEAN6_STATUS 0x39804
+#define A_MAC_BEAN6_ABILITY_0 0x39808
+
+#define S_BEAN6_REM_FAULT 13
+#define V_BEAN6_REM_FAULT(x) ((x) << S_BEAN6_REM_FAULT)
+#define F_BEAN6_REM_FAULT V_BEAN6_REM_FAULT(1U)
+
+#define A_MAC_BEAN6_ABILITY_1 0x3980c
+#define A_MAC_BEAN6_ABILITY_2 0x39810
+
+#define S_BEAN6_AB_2_15_12 12
+#define M_BEAN6_AB_2_15_12 0xfU
+#define V_BEAN6_AB_2_15_12(x) ((x) << S_BEAN6_AB_2_15_12)
+#define G_BEAN6_AB_2_15_12(x) (((x) >> S_BEAN6_AB_2_15_12) & M_BEAN6_AB_2_15_12)
+
+#define S_BEAN6_AB_2_11_0 0
+#define M_BEAN6_AB_2_11_0 0xfffU
+#define V_BEAN6_AB_2_11_0(x) ((x) << S_BEAN6_AB_2_11_0)
+#define G_BEAN6_AB_2_11_0(x) (((x) >> S_BEAN6_AB_2_11_0) & M_BEAN6_AB_2_11_0)
+
+#define A_MAC_BEAN6_REM_ABILITY_0 0x39814
+
+#define S_BEAN6_ABL_REM_FAULT 13
+#define V_BEAN6_ABL_REM_FAULT(x) ((x) << S_BEAN6_ABL_REM_FAULT)
+#define F_BEAN6_ABL_REM_FAULT V_BEAN6_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN6_REM_ABILITY_1 0x39818
+#define A_MAC_BEAN6_REM_ABILITY_2 0x3981c
+
+#define S_BEAN6_REM_AB_15_12 12
+#define M_BEAN6_REM_AB_15_12 0xfU
+#define V_BEAN6_REM_AB_15_12(x) ((x) << S_BEAN6_REM_AB_15_12)
+#define G_BEAN6_REM_AB_15_12(x) (((x) >> S_BEAN6_REM_AB_15_12) & M_BEAN6_REM_AB_15_12)
+
+#define S_BEAN6_REM_AB_11_0 0
+#define M_BEAN6_REM_AB_11_0 0xfffU
+#define V_BEAN6_REM_AB_11_0(x) ((x) << S_BEAN6_REM_AB_11_0)
+#define G_BEAN6_REM_AB_11_0(x) (((x) >> S_BEAN6_REM_AB_11_0) & M_BEAN6_REM_AB_11_0)
+
+#define A_MAC_BEAN6_MS_COUNT 0x39820
+#define A_MAC_BEAN6_XNP_0 0x39824
+#define A_MAC_BEAN6_XNP_1 0x39828
+#define A_MAC_BEAN6_XNP_2 0x3982c
+#define A_MAC_LP_BEAN6_XNP_0 0x39830
+#define A_MAC_LP_BEAN6_XNP_1 0x39834
+#define A_MAC_LP_BEAN6_XNP_2 0x39838
+#define A_MAC_BEAN6_ETH_STATUS 0x3983c
+#define A_MAC_BEAN6_ETH_STATUS_2 0x39840
+#define A_MAC_BEAN7_CTL 0x39900
+#define A_MAC_BEAN7_STATUS 0x39904
+#define A_MAC_BEAN7_ABILITY_0 0x39908
+
+#define S_BEAN7_REM_FAULT 13
+#define V_BEAN7_REM_FAULT(x) ((x) << S_BEAN7_REM_FAULT)
+#define F_BEAN7_REM_FAULT V_BEAN7_REM_FAULT(1U)
+
+#define A_MAC_BEAN7_ABILITY_1 0x3990c
+#define A_MAC_BEAN7_ABILITY_2 0x39910
+
+#define S_BEAN7_AB_2_15_12 12
+#define M_BEAN7_AB_2_15_12 0xfU
+#define V_BEAN7_AB_2_15_12(x) ((x) << S_BEAN7_AB_2_15_12)
+#define G_BEAN7_AB_2_15_12(x) (((x) >> S_BEAN7_AB_2_15_12) & M_BEAN7_AB_2_15_12)
+
+#define S_BEAN7_AB_2_11_0 0
+#define M_BEAN7_AB_2_11_0 0xfffU
+#define V_BEAN7_AB_2_11_0(x) ((x) << S_BEAN7_AB_2_11_0)
+#define G_BEAN7_AB_2_11_0(x) (((x) >> S_BEAN7_AB_2_11_0) & M_BEAN7_AB_2_11_0)
+
+#define A_MAC_BEAN7_REM_ABILITY_0 0x39914
+
+#define S_BEAN7_ABL_REM_FAULT 13
+#define V_BEAN7_ABL_REM_FAULT(x) ((x) << S_BEAN7_ABL_REM_FAULT)
+#define F_BEAN7_ABL_REM_FAULT V_BEAN7_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN7_REM_ABILITY_1 0x39918
+#define A_MAC_BEAN7_REM_ABILITY_2 0x3991c
+
+#define S_BEAN7_REM_AB_15_12 12
+#define M_BEAN7_REM_AB_15_12 0xfU
+#define V_BEAN7_REM_AB_15_12(x) ((x) << S_BEAN7_REM_AB_15_12)
+#define G_BEAN7_REM_AB_15_12(x) (((x) >> S_BEAN7_REM_AB_15_12) & M_BEAN7_REM_AB_15_12)
+
+#define S_BEAN7_REM_AB_11_0 0
+#define M_BEAN7_REM_AB_11_0 0xfffU
+#define V_BEAN7_REM_AB_11_0(x) ((x) << S_BEAN7_REM_AB_11_0)
+#define G_BEAN7_REM_AB_11_0(x) (((x) >> S_BEAN7_REM_AB_11_0) & M_BEAN7_REM_AB_11_0)
+
+#define A_MAC_BEAN7_MS_COUNT 0x39920
+#define A_MAC_BEAN7_XNP_0 0x39924
+#define A_MAC_BEAN7_XNP_1 0x39928
+#define A_MAC_BEAN7_XNP_2 0x3992c
+#define A_MAC_LP_BEAN7_XNP_0 0x39930
+#define A_MAC_LP_BEAN7_XNP_1 0x39934
+#define A_MAC_LP_BEAN7_XNP_2 0x39938
+#define A_MAC_BEAN7_ETH_STATUS 0x3993c
+#define A_MAC_BEAN7_ETH_STATUS_2 0x39940
+#define A_MAC_MTIP_ETHERSTATS_DATA_HI 0x39a00
+#define A_MAC_MTIP_ETHERSTATS_STATN_STATUS 0x39a04
+#define A_MAC_MTIP_ETHERSTATS_STATN_CONFIG 0x39a08
+
+#define S_T7_RESET 31
+#define V_T7_RESET(x) ((x) << S_T7_RESET)
+#define F_T7_RESET V_T7_RESET(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CONTROL 0x39a0c
+
+#define S_CMD_CLEAR_TX 31
+#define V_CMD_CLEAR_TX(x) ((x) << S_CMD_CLEAR_TX)
+#define F_CMD_CLEAR_TX V_CMD_CLEAR_TX(1U)
+
+#define S_CMD_CLEAR_RX 30
+#define V_CMD_CLEAR_RX(x) ((x) << S_CMD_CLEAR_RX)
+#define F_CMD_CLEAR_RX V_CMD_CLEAR_RX(1U)
+
+#define S_CLEAR_PRE 29
+#define V_CLEAR_PRE(x) ((x) << S_CLEAR_PRE)
+#define F_CLEAR_PRE V_CLEAR_PRE(1U)
+
+#define S_CMD_CAPTURE_TX 28
+#define V_CMD_CAPTURE_TX(x) ((x) << S_CMD_CAPTURE_TX)
+#define F_CMD_CAPTURE_TX V_CMD_CAPTURE_TX(1U)
+
+#define S_CMD_CAPTURE_RX 27
+#define V_CMD_CAPTURE_RX(x) ((x) << S_CMD_CAPTURE_RX)
+#define F_CMD_CAPTURE_RX V_CMD_CAPTURE_RX(1U)
+
+#define S_PORTMASK 0
+#define M_PORTMASK 0xffU
+#define V_PORTMASK(x) ((x) << S_PORTMASK)
+#define G_PORTMASK(x) (((x) >> S_PORTMASK) & M_PORTMASK)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CLEARVALUE_LO 0x39a10
+
+#define S_STATN_CLEARVALUE_LO 0
+#define V_STATN_CLEARVALUE_LO(x) ((x) << S_STATN_CLEARVALUE_LO)
+#define F_STATN_CLEARVALUE_LO V_STATN_CLEARVALUE_LO(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CLEARVALUE_HI 0x39a14
+
+#define S_STATN_CLEARVALUE_HI 0
+#define V_STATN_CLEARVALUE_HI(x) ((x) << S_STATN_CLEARVALUE_HI)
+#define F_STATN_CLEARVALUE_HI V_STATN_CLEARVALUE_HI(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_DATA_HI_1 0x39a1c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_0 0x39a20
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_1 0x39a24
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_2 0x39a28
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_3 0x39a2c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_4 0x39a30
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_5 0x39a34
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_6 0x39a38
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_7 0x39a3c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_8 0x39a40
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_9 0x39a44
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_10 0x39a48
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_11 0x39a4c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_12 0x39a50
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_13 0x39a54
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_14 0x39a58
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_15 0x39a5c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_16 0x39a60
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_17 0x39a64
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_18 0x39a68
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_19 0x39a6c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_20 0x39a70
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_21 0x39a74
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_22 0x39a78
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_23 0x39a7c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_24 0x39a80
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_25 0x39a84
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_26 0x39a88
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_27 0x39a8c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_28 0x39a90
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_29 0x39a94
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_30 0x39a98
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_31 0x39a9c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_32 0x39aa0
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_33 0x39aa4
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_34 0x39aa8
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOCTETS 0x39b00
+#define A_MAC_MTIP_ETHERSTATS0_OCTETSRECEIVEDOK 0x39b04
+#define A_MAC_MTIP_ETHERSTATS0_AALIGNMENTERRORS 0x39b08
+#define A_MAC_MTIP_ETHERSTATS0_APAUSEMACCTRLFRAMESRECEIVED 0x39b0c
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMETOOLONGERRORS 0x39b10
+#define A_MAC_MTIP_ETHERSTATS0_AINRANGELENGTHERRORS 0x39b14
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMESRECEIVEDOK 0x39b18
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMECHECKSEQUENCEERRORS 0x39b1c
+#define A_MAC_MTIP_ETHERSTATS0_VLANRECEIVEDOK 0x39b20
+#define A_MAC_MTIP_ETHERSTATS0_IFINERRORS_RX 0x39b24
+#define A_MAC_MTIP_ETHERSTATS0_IFINUCASTPKTS_RX 0x39b28
+#define A_MAC_MTIP_ETHERSTATS0_IFINMULTICASTPKTS_RX 0x39b2c
+#define A_MAC_MTIP_ETHERSTATS0_IFINBROADCASTPKTS_RX 0x39b30
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSDROPEVENTS_RX 0x39b34
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS_RX 0x39b38
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSUNDERSIZEPKTS_RX 0x39b3c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS64OCTETS_RX 0x39b40
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS65TO127OCTETS_RX 0x39b44
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS128TO255OCTETS_RX 0x39b48
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS256TO511OCTETS_RX 0x39b4c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39b50
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39b54
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39b58
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOVERSIZEPKTS_RX 0x39b5c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSJABBERS_RX 0x39b60
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSFRAGMENTS_RX 0x39b64
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39b68
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39b6c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39b70
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39b74
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39b78
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39b7c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39b80
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39b84
+#define A_MAC_MTIP_ETHERSTATS0_AMACCONTROLFRAMESRECEIVED_RX 0x39b88
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOCTETS 0x39b8c
+#define A_MAC_MTIP_ETHERSTATS1_OCTETSRECEIVEDOK 0x39b90
+#define A_MAC_MTIP_ETHERSTATS1_AALIGNMENTERRORS 0x39b94
+#define A_MAC_MTIP_ETHERSTATS1_APAUSEMACCTRLFRAMESRECEIVED 0x39b98
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMETOOLONGERRORS 0x39b9c
+#define A_MAC_MTIP_ETHERSTATS1_AINRANGELENGTHERRORS 0x39ba0
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMESRECEIVEDOK 0x39ba4
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMECHECKSEQUENCEERRORS 0x39ba8
+#define A_MAC_MTIP_ETHERSTATS1_VLANRECEIVEDOK 0x39bac
+#define A_MAC_MTIP_ETHERSTATS1_IFINERRORS_RX 0x39bb0
+#define A_MAC_MTIP_ETHERSTATS1_IFINUCASTPKTS_RX 0x39bb4
+#define A_MAC_MTIP_ETHERSTATS1_IFINMULTICASTPKTS_RX 0x39bb8
+#define A_MAC_MTIP_ETHERSTATS1_IFINBROADCASTPKTS_RX 0x39bbc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSDROPEVENTS_RX 0x39bc0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS_RX 0x39bc4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSUNDERSIZEPKTS_RX 0x39bc8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS64OCTETS_RX 0x39bcc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS65TO127OCTETS_RX 0x39bd0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS128TO255OCTETS_RX 0x39bd4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS256TO511OCTETS_RX 0x39bd8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39bdc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39be0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39be4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOVERSIZEPKTS_RX 0x39be8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSJABBERS_RX 0x39bec
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSFRAGMENTS_RX 0x39bf0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39bf4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39bf8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39bfc
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39c00
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39c04
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39c08
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39c0c
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39c10
+#define A_MAC_MTIP_ETHERSTATS1_AMACCONTROLFRAMESRECEIVED_RX 0x39c14
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOCTETS 0x39c18
+#define A_MAC_MTIP_ETHERSTATS2_OCTETSRECEIVEDOK 0x39c1c
+#define A_MAC_MTIP_ETHERSTATS2_AALIGNMENTERRORS 0x39c20
+#define A_MAC_MTIP_ETHERSTATS2_APAUSEMACCTRLFRAMESRECEIVED 0x39c24
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMETOOLONGERRORS 0x39c28
+#define A_MAC_MTIP_ETHERSTATS2_AINRANGELENGTHERRORS 0x39c2c
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMESRECEIVEDOK 0x39c30
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMECHECKSEQUENCEERRORS 0x39c34
+#define A_MAC_MTIP_ETHERSTATS2_VLANRECEIVEDOK 0x39c38
+#define A_MAC_MTIP_ETHERSTATS2_IFINERRORS_RX 0x39c3c
+#define A_MAC_MTIP_ETHERSTATS2_IFINUCASTPKTS_RX 0x39c40
+#define A_MAC_MTIP_ETHERSTATS2_IFINMULTICASTPKTS_RX 0x39c44
+#define A_MAC_MTIP_ETHERSTATS2_IFINBROADCASTPKTS_RX 0x39c48
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSDROPEVENTS_RX 0x39c4c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS_RX 0x39c50
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSUNDERSIZEPKTS_RX 0x39c54
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS64OCTETS_RX 0x39c58
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS65TO127OCTETS_RX 0x39c5c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS128TO255OCTETS_RX 0x39c60
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS256TO511OCTETS_RX 0x39c64
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39c68
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39c6c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39c70
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOVERSIZEPKTS_RX 0x39c74
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSJABBERS_RX 0x39c78
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSFRAGMENTS_RX 0x39c7c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39c80
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39c84
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39c88
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39c8c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39c90
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39c94
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39c98
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39c9c
+#define A_MAC_MTIP_ETHERSTATS2_AMACCONTROLFRAMESRECEIVED_RX 0x39ca0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOCTETS 0x39ca4
+#define A_MAC_MTIP_ETHERSTATS3_OCTETSRECEIVEDOK 0x39ca8
+#define A_MAC_MTIP_ETHERSTATS3_AALIGNMENTERRORS 0x39cac
+#define A_MAC_MTIP_ETHERSTATS3_APAUSEMACCTRLFRAMESRECEIVED 0x39cb0
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMETOOLONGERRORS 0x39cb4
+#define A_MAC_MTIP_ETHERSTATS3_AINRANGELENGTHERRORS 0x39cb8
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMESRECEIVEDOK 0x39cbc
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMECHECKSEQUENCEERRORS 0x39cc0
+#define A_MAC_MTIP_ETHERSTATS3_VLANRECEIVEDOK 0x39cc4
+#define A_MAC_MTIP_ETHERSTATS3_IFINERRORS_RX 0x39cc8
+#define A_MAC_MTIP_ETHERSTATS3_IFINUCASTPKTS_RX 0x39ccc
+#define A_MAC_MTIP_ETHERSTATS3_IFINMULTICASTPKTS_RX 0x39cd0
+#define A_MAC_MTIP_ETHERSTATS3_IFINBROADCASTPKTS_RX 0x39cd4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSDROPEVENTS_RX 0x39cd8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS_RX 0x39cdc
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSUNDERSIZEPKTS_RX 0x39ce0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS64OCTETS_RX 0x39ce4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS65TO127OCTETS_RX 0x39ce8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS128TO255OCTETS_RX 0x39cec
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS256TO511OCTETS_RX 0x39cf0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39cf4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39cf8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39cfc
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOVERSIZEPKTS_RX 0x39d00
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSJABBERS_RX 0x39d04
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSFRAGMENTS_RX 0x39d08
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39d0c
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39d10
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39d14
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39d18
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39d1c
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39d20
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39d24
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39d28
+#define A_MAC_MTIP_ETHERSTATS3_AMACCONTROLFRAMESRECEIVED_RX 0x39d2c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOCTETS_TX 0x39d30
+#define A_MAC_MTIP_ETHERSTATS0_OCTETSTRANSMITTEDOK_TX 0x39d34
+#define A_MAC_MTIP_ETHERSTATS0_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39d38
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMESTRANSMITTEDOK_TX 0x39d3c
+#define A_MAC_MTIP_ETHERSTATS0_VLANTRANSMITTEDOK_TX 0x39d40
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTERRORS_TX 0x39d44
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTUCASTPKTS_TX 0x39d48
+#define A_MAC_MTIP_ETHERSTATS0IFOUTMULTICASTPKTS_TX 0x39d4c
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTBROADCASTPKTS_TX 0x39d50
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS64OCTETS_TX 0x39d54
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS65TO127OCTETS_TX 0x39d58
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS128TO255OCTETS_TX 0x39d5c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS256TO511OCTETS_TX 0x39d60
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39d64
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39d68
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39d6c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39d70
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39d74
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39d78
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39d7c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39d80
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39d84
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39d88
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39d8c
+#define A_MAC_MTIP_ETHERSTATS0_AMACCONTROLFRAMESTRANSMITTED_TX 0x39d90
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS_TX 0x39d94
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOCTETS_TX 0x39d98
+#define A_MAC_MTIP_ETHERSTATS1_OCTETSTRANSMITTEDOK_TX 0x39d9c
+#define A_MAC_MTIP_ETHERSTATS1_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39da0
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMESTRANSMITTEDOK_TX 0x39da4
+#define A_MAC_MTIP_ETHERSTATS1_VLANTRANSMITTEDOK_TX 0x39da8
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTERRORS_TX 0x39dac
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTUCASTPKTS_TX 0x39db0
+#define A_MAC_MTIP_ETHERSTATS1IFOUTMULTICASTPKTS_TX 0x39db4
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTBROADCASTPKTS_TX 0x39db8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS64OCTETS_TX 0x39dbc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS65TO127OCTETS_TX 0x39dc0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS128TO255OCTETS_TX 0x39dc4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS256TO511OCTETS_TX 0x39dc8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39dcc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39dd0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39dd4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39dd8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39ddc
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39de0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39de4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39de8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39dec
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39df0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39df4
+#define A_MAC_MTIP_ETHERSTATS1_AMACCONTROLFRAMESTRANSMITTED_TX 0x39df8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS_TX 0x39dfc
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOCTETS_TX 0x39e00
+#define A_MAC_MTIP_ETHERSTATS2_OCTETSTRANSMITTEDOK_TX 0x39e04
+#define A_MAC_MTIP_ETHERSTATS2_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39e08
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMESTRANSMITTEDOK_TX 0x39e0c
+#define A_MAC_MTIP_ETHERSTATS2_VLANTRANSMITTEDOK_TX 0x39e10
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTERRORS_TX 0x39e14
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTUCASTPKTS_TX 0x39e18
+#define A_MAC_MTIP_ETHERSTATS2IFOUTMULTICASTPKTS_TX 0x39e1c
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTBROADCASTPKTS_TX 0x39e20
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS64OCTETS_TX 0x39e24
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS65TO127OCTETS_TX 0x39e28
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS128TO255OCTETS_TX 0x39e2c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS256TO511OCTETS_TX 0x39e30
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39e34
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39e38
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39e3c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39e40
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39e44
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39e48
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39e4c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39e50
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39e54
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39e58
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39e5c
+#define A_MAC_MTIP_ETHERSTATS2_AMACCONTROLFRAMESTRANSMITTED_TX 0x39e60
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS_TX 0x39e64
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOCTETS_TX 0x39e68
+#define A_MAC_MTIP_ETHERSTATS3_OCTETSTRANSMITTEDOK_TX 0x39e6c
+#define A_MAC_MTIP_ETHERSTATS3_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39e70
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMESTRANSMITTEDOK_TX 0x39e74
+#define A_MAC_MTIP_ETHERSTATS3_VLANTRANSMITTEDOK_TX 0x39e78
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTERRORS_TX 0x39e7c
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTUCASTPKTS_TX 0x39e80
+#define A_MAC_MTIP_ETHERSTATS3IFOUTMULTICASTPKTS_TX 0x39e84
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTBROADCASTPKTS_TX 0x39e88
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS64OCTETS_TX 0x39e8c
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS65TO127OCTETS_TX 0x39e90
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS128TO255OCTETS_TX 0x39e94
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS256TO511OCTETS_TX 0x39e98
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39e9c
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39ea0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39ea4
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39ea8
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39eac
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39eb0
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39eb4
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39eb8
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39ebc
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39ec0
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39ec4
+#define A_MAC_MTIP_ETHERSTATS3_AMACCONTROLFRAMESTRANSMITTED_TX 0x39ec8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS_TX 0x39ecc
+#define A_MAC_IOS_CTRL 0x3a000
+
+#define S_SUB_BLOCK_SEL 28
+#define M_SUB_BLOCK_SEL 0x7U
+#define V_SUB_BLOCK_SEL(x) ((x) << S_SUB_BLOCK_SEL)
+#define G_SUB_BLOCK_SEL(x) (((x) >> S_SUB_BLOCK_SEL) & M_SUB_BLOCK_SEL)
+
+#define S_QUAD_BROADCAST_EN 24
+#define V_QUAD_BROADCAST_EN(x) ((x) << S_QUAD_BROADCAST_EN)
+#define F_QUAD_BROADCAST_EN V_QUAD_BROADCAST_EN(1U)
+
+#define S_AUTO_INCR 20
+#define V_AUTO_INCR(x) ((x) << S_AUTO_INCR)
+#define F_AUTO_INCR V_AUTO_INCR(1U)
+
+#define S_T7_2_ADDR 0
+#define M_T7_2_ADDR 0x7ffffU
+#define V_T7_2_ADDR(x) ((x) << S_T7_2_ADDR)
+#define G_T7_2_ADDR(x) (((x) >> S_T7_2_ADDR) & M_T7_2_ADDR)
+
+#define A_MAC_IOS_DATA 0x3a004
+#define A_MAC_IOS_BGR_RST 0x3a050
+
+#define S_BGR_RSTN 0
+#define V_BGR_RSTN(x) ((x) << S_BGR_RSTN)
+#define F_BGR_RSTN V_BGR_RSTN(1U)
+
+#define A_MAC_IOS_BGR_CFG 0x3a054
+
+#define S_SOC_REFCLK_EN 0
+#define V_SOC_REFCLK_EN(x) ((x) << S_SOC_REFCLK_EN)
+#define F_SOC_REFCLK_EN V_SOC_REFCLK_EN(1U)
+
+#define A_MAC_IOS_QUAD0_CFG 0x3a058
+
+#define S_QUAD0_CH3_RSTN 5
+#define V_QUAD0_CH3_RSTN(x) ((x) << S_QUAD0_CH3_RSTN)
+#define F_QUAD0_CH3_RSTN V_QUAD0_CH3_RSTN(1U)
+
+#define S_QUAD0_CH2_RSTN 4
+#define V_QUAD0_CH2_RSTN(x) ((x) << S_QUAD0_CH2_RSTN)
+#define F_QUAD0_CH2_RSTN V_QUAD0_CH2_RSTN(1U)
+
+#define S_QUAD0_CH1_RSTN 3
+#define V_QUAD0_CH1_RSTN(x) ((x) << S_QUAD0_CH1_RSTN)
+#define F_QUAD0_CH1_RSTN V_QUAD0_CH1_RSTN(1U)
+
+#define S_QUAD0_CH0_RSTN 2
+#define V_QUAD0_CH0_RSTN(x) ((x) << S_QUAD0_CH0_RSTN)
+#define F_QUAD0_CH0_RSTN V_QUAD0_CH0_RSTN(1U)
+
+#define S_QUAD0_RSTN 1
+#define V_QUAD0_RSTN(x) ((x) << S_QUAD0_RSTN)
+#define F_QUAD0_RSTN V_QUAD0_RSTN(1U)
+
+#define S_PLL0_RSTN 0
+#define V_PLL0_RSTN(x) ((x) << S_PLL0_RSTN)
+#define F_PLL0_RSTN V_PLL0_RSTN(1U)
+
+#define A_MAC_IOS_QUAD1_CFG 0x3a05c
+
+#define S_QUAD1_CH3_RSTN 5
+#define V_QUAD1_CH3_RSTN(x) ((x) << S_QUAD1_CH3_RSTN)
+#define F_QUAD1_CH3_RSTN V_QUAD1_CH3_RSTN(1U)
+
+#define S_QUAD1_CH2_RSTN 4
+#define V_QUAD1_CH2_RSTN(x) ((x) << S_QUAD1_CH2_RSTN)
+#define F_QUAD1_CH2_RSTN V_QUAD1_CH2_RSTN(1U)
+
+#define S_QUAD1_CH1_RSTN 3
+#define V_QUAD1_CH1_RSTN(x) ((x) << S_QUAD1_CH1_RSTN)
+#define F_QUAD1_CH1_RSTN V_QUAD1_CH1_RSTN(1U)
+
+#define S_QUAD1_CH0_RSTN 2
+#define V_QUAD1_CH0_RSTN(x) ((x) << S_QUAD1_CH0_RSTN)
+#define F_QUAD1_CH0_RSTN V_QUAD1_CH0_RSTN(1U)
+
+#define S_QUAD1_RSTN 1
+#define V_QUAD1_RSTN(x) ((x) << S_QUAD1_RSTN)
+#define F_QUAD1_RSTN V_QUAD1_RSTN(1U)
+
+#define S_PLL1_RSTN 0
+#define V_PLL1_RSTN(x) ((x) << S_PLL1_RSTN)
+#define F_PLL1_RSTN V_PLL1_RSTN(1U)
+
+#define A_MAC_IOS_SCRATCHPAD0 0x3a060
+#define A_MAC_IOS_SCRATCHPAD1 0x3a064
+#define A_MAC_IOS_SCRATCHPAD2 0x3a068
+#define A_MAC_IOS_SCRATCHPAD3 0x3a06c
+
+#define S_DATA0 1
+#define M_DATA0 0x7fffffffU
+#define V_DATA0(x) ((x) << S_DATA0)
+#define G_DATA0(x) (((x) >> S_DATA0) & M_DATA0)
+
+#define S_I2C_MODE 0
+#define V_I2C_MODE(x) ((x) << S_I2C_MODE)
+#define F_I2C_MODE V_I2C_MODE(1U)
+
+#define A_MAC_IOS_BGR_DBG_COUNTER 0x3a070
+#define A_MAC_IOS_QUAD0_DBG_COUNTER 0x3a074
+#define A_MAC_IOS_PLL0_DBG_COUNTER 0x3a078
+#define A_MAC_IOS_QUAD1_DBG_COUNTER 0x3a07c
+#define A_MAC_IOS_PLL1_DBG_COUNTER 0x3a080
+#define A_MAC_IOS_DBG_CLK_CFG 0x3a084
+
+#define S_DBG_CLK_MUX_GPIO 3
+#define V_DBG_CLK_MUX_GPIO(x) ((x) << S_DBG_CLK_MUX_GPIO)
+#define F_DBG_CLK_MUX_GPIO V_DBG_CLK_MUX_GPIO(1U)
+
+#define S_DBG_CLK_MUX_SEL 0
+#define M_DBG_CLK_MUX_SEL 0x7U
+#define V_DBG_CLK_MUX_SEL(x) ((x) << S_DBG_CLK_MUX_SEL)
+#define G_DBG_CLK_MUX_SEL(x) (((x) >> S_DBG_CLK_MUX_SEL) & M_DBG_CLK_MUX_SEL)
+
+#define A_MAC_IOS_INTR_EN_QUAD0 0x3a090
+
+#define S_Q0_MAILBOX_INT_ASSERT 24
+#define V_Q0_MAILBOX_INT_ASSERT(x) ((x) << S_Q0_MAILBOX_INT_ASSERT)
+#define F_Q0_MAILBOX_INT_ASSERT V_Q0_MAILBOX_INT_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_3_ASSERT 23
+#define V_Q0_TRAINING_FAILURE_3_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_3_ASSERT)
+#define F_Q0_TRAINING_FAILURE_3_ASSERT V_Q0_TRAINING_FAILURE_3_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_2_ASSERT 22
+#define V_Q0_TRAINING_FAILURE_2_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_2_ASSERT)
+#define F_Q0_TRAINING_FAILURE_2_ASSERT V_Q0_TRAINING_FAILURE_2_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_1_ASSERT 21
+#define V_Q0_TRAINING_FAILURE_1_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_1_ASSERT)
+#define F_Q0_TRAINING_FAILURE_1_ASSERT V_Q0_TRAINING_FAILURE_1_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_0_ASSERT 20
+#define V_Q0_TRAINING_FAILURE_0_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_0_ASSERT)
+#define F_Q0_TRAINING_FAILURE_0_ASSERT V_Q0_TRAINING_FAILURE_0_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_3_ASSERT 19
+#define V_Q0_TRAINING_COMPLETE_3_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_3_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_3_ASSERT V_Q0_TRAINING_COMPLETE_3_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_2_ASSERT 18
+#define V_Q0_TRAINING_COMPLETE_2_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_2_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_2_ASSERT V_Q0_TRAINING_COMPLETE_2_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_1_ASSERT 17
+#define V_Q0_TRAINING_COMPLETE_1_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_1_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_1_ASSERT V_Q0_TRAINING_COMPLETE_1_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_0_ASSERT 16
+#define V_Q0_TRAINING_COMPLETE_0_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_0_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_0_ASSERT V_Q0_TRAINING_COMPLETE_0_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_3_ASSERT 15
+#define V_Q0_AN_TX_INT_3_ASSERT(x) ((x) << S_Q0_AN_TX_INT_3_ASSERT)
+#define F_Q0_AN_TX_INT_3_ASSERT V_Q0_AN_TX_INT_3_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_2_ASSERT 14
+#define V_Q0_AN_TX_INT_2_ASSERT(x) ((x) << S_Q0_AN_TX_INT_2_ASSERT)
+#define F_Q0_AN_TX_INT_2_ASSERT V_Q0_AN_TX_INT_2_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_1_ASSERT 13
+#define V_Q0_AN_TX_INT_1_ASSERT(x) ((x) << S_Q0_AN_TX_INT_1_ASSERT)
+#define F_Q0_AN_TX_INT_1_ASSERT V_Q0_AN_TX_INT_1_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_0_ASSERT 12
+#define V_Q0_AN_TX_INT_0_ASSERT(x) ((x) << S_Q0_AN_TX_INT_0_ASSERT)
+#define F_Q0_AN_TX_INT_0_ASSERT V_Q0_AN_TX_INT_0_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_3_ASSERT 11
+#define V_Q0_SIGNAL_DETECT_3_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_3_ASSERT)
+#define F_Q0_SIGNAL_DETECT_3_ASSERT V_Q0_SIGNAL_DETECT_3_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_2_ASSERT 10
+#define V_Q0_SIGNAL_DETECT_2_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_2_ASSERT)
+#define F_Q0_SIGNAL_DETECT_2_ASSERT V_Q0_SIGNAL_DETECT_2_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_1_ASSERT 9
+#define V_Q0_SIGNAL_DETECT_1_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_1_ASSERT)
+#define F_Q0_SIGNAL_DETECT_1_ASSERT V_Q0_SIGNAL_DETECT_1_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_0_ASSERT 8
+#define V_Q0_SIGNAL_DETECT_0_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_0_ASSERT)
+#define F_Q0_SIGNAL_DETECT_0_ASSERT V_Q0_SIGNAL_DETECT_0_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_3_ASSERT 7
+#define V_Q0_CDR_LOL_3_ASSERT(x) ((x) << S_Q0_CDR_LOL_3_ASSERT)
+#define F_Q0_CDR_LOL_3_ASSERT V_Q0_CDR_LOL_3_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_2_ASSERT 6
+#define V_Q0_CDR_LOL_2_ASSERT(x) ((x) << S_Q0_CDR_LOL_2_ASSERT)
+#define F_Q0_CDR_LOL_2_ASSERT V_Q0_CDR_LOL_2_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_1_ASSERT 5
+#define V_Q0_CDR_LOL_1_ASSERT(x) ((x) << S_Q0_CDR_LOL_1_ASSERT)
+#define F_Q0_CDR_LOL_1_ASSERT V_Q0_CDR_LOL_1_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_0_ASSERT 4
+#define V_Q0_CDR_LOL_0_ASSERT(x) ((x) << S_Q0_CDR_LOL_0_ASSERT)
+#define F_Q0_CDR_LOL_0_ASSERT V_Q0_CDR_LOL_0_ASSERT(1U)
+
+#define S_Q0_LOS_3_ASSERT 3
+#define V_Q0_LOS_3_ASSERT(x) ((x) << S_Q0_LOS_3_ASSERT)
+#define F_Q0_LOS_3_ASSERT V_Q0_LOS_3_ASSERT(1U)
+
+#define S_Q0_LOS_2_ASSERT 2
+#define V_Q0_LOS_2_ASSERT(x) ((x) << S_Q0_LOS_2_ASSERT)
+#define F_Q0_LOS_2_ASSERT V_Q0_LOS_2_ASSERT(1U)
+
+#define S_Q0_LOS_1_ASSERT 1
+#define V_Q0_LOS_1_ASSERT(x) ((x) << S_Q0_LOS_1_ASSERT)
+#define F_Q0_LOS_1_ASSERT V_Q0_LOS_1_ASSERT(1U)
+
+#define S_Q0_LOS_0_ASSERT 0
+#define V_Q0_LOS_0_ASSERT(x) ((x) << S_Q0_LOS_0_ASSERT)
+#define F_Q0_LOS_0_ASSERT V_Q0_LOS_0_ASSERT(1U)
+
+#define A_MAC_IOS_INTR_CAUSE_QUAD0 0x3a094
+#define A_MAC_IOS_INTR_EN_QUAD1 0x3a098
+
+#define S_Q1_MAILBOX_INT_ASSERT 24
+#define V_Q1_MAILBOX_INT_ASSERT(x) ((x) << S_Q1_MAILBOX_INT_ASSERT)
+#define F_Q1_MAILBOX_INT_ASSERT V_Q1_MAILBOX_INT_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_3_ASSERT 23
+#define V_Q1_TRAINING_FAILURE_3_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_3_ASSERT)
+#define F_Q1_TRAINING_FAILURE_3_ASSERT V_Q1_TRAINING_FAILURE_3_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_2_ASSERT 22
+#define V_Q1_TRAINING_FAILURE_2_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_2_ASSERT)
+#define F_Q1_TRAINING_FAILURE_2_ASSERT V_Q1_TRAINING_FAILURE_2_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_1_ASSERT 21
+#define V_Q1_TRAINING_FAILURE_1_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_1_ASSERT)
+#define F_Q1_TRAINING_FAILURE_1_ASSERT V_Q1_TRAINING_FAILURE_1_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_0_ASSERT 20
+#define V_Q1_TRAINING_FAILURE_0_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_0_ASSERT)
+#define F_Q1_TRAINING_FAILURE_0_ASSERT V_Q1_TRAINING_FAILURE_0_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_3_ASSERT 19
+#define V_Q1_TRAINING_COMPLETE_3_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_3_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_3_ASSERT V_Q1_TRAINING_COMPLETE_3_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_2_ASSERT 18
+#define V_Q1_TRAINING_COMPLETE_2_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_2_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_2_ASSERT V_Q1_TRAINING_COMPLETE_2_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_1_ASSERT 17
+#define V_Q1_TRAINING_COMPLETE_1_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_1_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_1_ASSERT V_Q1_TRAINING_COMPLETE_1_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_0_ASSERT 16
+#define V_Q1_TRAINING_COMPLETE_0_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_0_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_0_ASSERT V_Q1_TRAINING_COMPLETE_0_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_3_ASSERT 15
+#define V_Q1_AN_TX_INT_3_ASSERT(x) ((x) << S_Q1_AN_TX_INT_3_ASSERT)
+#define F_Q1_AN_TX_INT_3_ASSERT V_Q1_AN_TX_INT_3_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_2_ASSERT 14
+#define V_Q1_AN_TX_INT_2_ASSERT(x) ((x) << S_Q1_AN_TX_INT_2_ASSERT)
+#define F_Q1_AN_TX_INT_2_ASSERT V_Q1_AN_TX_INT_2_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_1_ASSERT 13
+#define V_Q1_AN_TX_INT_1_ASSERT(x) ((x) << S_Q1_AN_TX_INT_1_ASSERT)
+#define F_Q1_AN_TX_INT_1_ASSERT V_Q1_AN_TX_INT_1_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_0_ASSERT 12
+#define V_Q1_AN_TX_INT_0_ASSERT(x) ((x) << S_Q1_AN_TX_INT_0_ASSERT)
+#define F_Q1_AN_TX_INT_0_ASSERT V_Q1_AN_TX_INT_0_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_3_ASSERT 11
+#define V_Q1_SIGNAL_DETECT_3_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_3_ASSERT)
+#define F_Q1_SIGNAL_DETECT_3_ASSERT V_Q1_SIGNAL_DETECT_3_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_2_ASSERT 10
+#define V_Q1_SIGNAL_DETECT_2_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_2_ASSERT)
+#define F_Q1_SIGNAL_DETECT_2_ASSERT V_Q1_SIGNAL_DETECT_2_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_1_ASSERT 9
+#define V_Q1_SIGNAL_DETECT_1_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_1_ASSERT)
+#define F_Q1_SIGNAL_DETECT_1_ASSERT V_Q1_SIGNAL_DETECT_1_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_0_ASSERT 8
+#define V_Q1_SIGNAL_DETECT_0_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_0_ASSERT)
+#define F_Q1_SIGNAL_DETECT_0_ASSERT V_Q1_SIGNAL_DETECT_0_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_3_ASSERT 7
+#define V_Q1_CDR_LOL_3_ASSERT(x) ((x) << S_Q1_CDR_LOL_3_ASSERT)
+#define F_Q1_CDR_LOL_3_ASSERT V_Q1_CDR_LOL_3_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_2_ASSERT 6
+#define V_Q1_CDR_LOL_2_ASSERT(x) ((x) << S_Q1_CDR_LOL_2_ASSERT)
+#define F_Q1_CDR_LOL_2_ASSERT V_Q1_CDR_LOL_2_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_1_ASSERT 5
+#define V_Q1_CDR_LOL_1_ASSERT(x) ((x) << S_Q1_CDR_LOL_1_ASSERT)
+#define F_Q1_CDR_LOL_1_ASSERT V_Q1_CDR_LOL_1_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_0_ASSERT 4
+#define V_Q1_CDR_LOL_0_ASSERT(x) ((x) << S_Q1_CDR_LOL_0_ASSERT)
+#define F_Q1_CDR_LOL_0_ASSERT V_Q1_CDR_LOL_0_ASSERT(1U)
+
+#define S_Q1_LOS_3_ASSERT 3
+#define V_Q1_LOS_3_ASSERT(x) ((x) << S_Q1_LOS_3_ASSERT)
+#define F_Q1_LOS_3_ASSERT V_Q1_LOS_3_ASSERT(1U)
+
+#define S_Q1_LOS_2_ASSERT 2
+#define V_Q1_LOS_2_ASSERT(x) ((x) << S_Q1_LOS_2_ASSERT)
+#define F_Q1_LOS_2_ASSERT V_Q1_LOS_2_ASSERT(1U)
+
+#define S_Q1_LOS_1_ASSERT 1
+#define V_Q1_LOS_1_ASSERT(x) ((x) << S_Q1_LOS_1_ASSERT)
+#define F_Q1_LOS_1_ASSERT V_Q1_LOS_1_ASSERT(1U)
+
+#define S_Q1_LOS_0_ASSERT 0
+#define V_Q1_LOS_0_ASSERT(x) ((x) << S_Q1_LOS_0_ASSERT)
+#define F_Q1_LOS_0_ASSERT V_Q1_LOS_0_ASSERT(1U)
+
+#define A_MAC_IOS_INTR_CAUSE_QUAD1 0x3a09c
+#define A_MAC_MTIP_PCS_1G_0_CONTROL 0x3e000
+
+#define S_SPEED_SEL_1 13
+#define V_SPEED_SEL_1(x) ((x) << S_SPEED_SEL_1)
+#define F_SPEED_SEL_1 V_SPEED_SEL_1(1U)
+
+#define S_AUTO_NEG_ENA 12
+#define V_AUTO_NEG_ENA(x) ((x) << S_AUTO_NEG_ENA)
+#define F_AUTO_NEG_ENA V_AUTO_NEG_ENA(1U)
+
+#define S_T7_POWER_DOWN 11
+#define V_T7_POWER_DOWN(x) ((x) << S_T7_POWER_DOWN)
+#define F_T7_POWER_DOWN V_T7_POWER_DOWN(1U)
+
+#define S_RESTART_AUTO_NEG 9
+#define V_RESTART_AUTO_NEG(x) ((x) << S_RESTART_AUTO_NEG)
+#define F_RESTART_AUTO_NEG V_RESTART_AUTO_NEG(1U)
+
+#define S_SPEED_SEL_0 6
+#define V_SPEED_SEL_0(x) ((x) << S_SPEED_SEL_0)
+#define F_SPEED_SEL_0 V_SPEED_SEL_0(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_STATUS 0x3e004
+
+#define S_100BASE_T4 15
+#define V_100BASE_T4(x) ((x) << S_100BASE_T4)
+#define F_100BASE_T4 V_100BASE_T4(1U)
+
+#define S_100BASE_X_FULL_DUPLEX 14
+#define V_100BASE_X_FULL_DUPLEX(x) ((x) << S_100BASE_X_FULL_DUPLEX)
+#define F_100BASE_X_FULL_DUPLEX V_100BASE_X_FULL_DUPLEX(1U)
+
+#define S_100BASE_X_HALF_DUPLEX 13
+#define V_100BASE_X_HALF_DUPLEX(x) ((x) << S_100BASE_X_HALF_DUPLEX)
+#define F_100BASE_X_HALF_DUPLEX V_100BASE_X_HALF_DUPLEX(1U)
+
+#define S_10MBPS_FULL_DUPLEX 12
+#define V_10MBPS_FULL_DUPLEX(x) ((x) << S_10MBPS_FULL_DUPLEX)
+#define F_10MBPS_FULL_DUPLEX V_10MBPS_FULL_DUPLEX(1U)
+
+#define S_10MBPS_HALF_DUPLEX 11
+#define V_10MBPS_HALF_DUPLEX(x) ((x) << S_10MBPS_HALF_DUPLEX)
+#define F_10MBPS_HALF_DUPLEX V_10MBPS_HALF_DUPLEX(1U)
+
+#define S_100BASE_T2_HALF_DUPLEX1 10
+#define V_100BASE_T2_HALF_DUPLEX1(x) ((x) << S_100BASE_T2_HALF_DUPLEX1)
+#define F_100BASE_T2_HALF_DUPLEX1 V_100BASE_T2_HALF_DUPLEX1(1U)
+
+#define S_100BASE_T2_HALF_DUPLEX0 9
+#define V_100BASE_T2_HALF_DUPLEX0(x) ((x) << S_100BASE_T2_HALF_DUPLEX0)
+#define F_100BASE_T2_HALF_DUPLEX0 V_100BASE_T2_HALF_DUPLEX0(1U)
+
+#define S_T7_EXTENDED_STATUS 8
+#define V_T7_EXTENDED_STATUS(x) ((x) << S_T7_EXTENDED_STATUS)
+#define F_T7_EXTENDED_STATUS V_T7_EXTENDED_STATUS(1U)
+
+#define S_AUTO_NEG_COMPLETE 5
+#define V_AUTO_NEG_COMPLETE(x) ((x) << S_AUTO_NEG_COMPLETE)
+#define F_AUTO_NEG_COMPLETE V_AUTO_NEG_COMPLETE(1U)
+
+#define S_T7_REMOTE_FAULT 4
+#define V_T7_REMOTE_FAULT(x) ((x) << S_T7_REMOTE_FAULT)
+#define F_T7_REMOTE_FAULT V_T7_REMOTE_FAULT(1U)
+
+#define S_AUTO_NEG_ABILITY 3
+#define V_AUTO_NEG_ABILITY(x) ((x) << S_AUTO_NEG_ABILITY)
+#define F_AUTO_NEG_ABILITY V_AUTO_NEG_ABILITY(1U)
+
+#define S_JABBER_DETECT 1
+#define V_JABBER_DETECT(x) ((x) << S_JABBER_DETECT)
+#define F_JABBER_DETECT V_JABBER_DETECT(1U)
+
+#define S_EXTENDED_CAPABILITY 0
+#define V_EXTENDED_CAPABILITY(x) ((x) << S_EXTENDED_CAPABILITY)
+#define F_EXTENDED_CAPABILITY V_EXTENDED_CAPABILITY(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_PHY_IDENTIFIER_0 0x3e008
+#define A_MAC_MTIP_PCS_1G_0_PHY_IDENTIFIER_1 0x3e00c
+#define A_MAC_MTIP_PCS_1G_0_DEV_ABILITY 0x3e010
+
+#define S_EEE_CLOCK_STOP_ENABLE 8
+#define V_EEE_CLOCK_STOP_ENABLE(x) ((x) << S_EEE_CLOCK_STOP_ENABLE)
+#define F_EEE_CLOCK_STOP_ENABLE V_EEE_CLOCK_STOP_ENABLE(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_PARTNER_ABILITY 0x3e014
+
+#define S_COPPER_LINK_STATUS 15
+#define V_COPPER_LINK_STATUS(x) ((x) << S_COPPER_LINK_STATUS)
+#define F_COPPER_LINK_STATUS V_COPPER_LINK_STATUS(1U)
+
+#define S_COPPER_DUPLEX_STATUS 12
+#define V_COPPER_DUPLEX_STATUS(x) ((x) << S_COPPER_DUPLEX_STATUS)
+#define F_COPPER_DUPLEX_STATUS V_COPPER_DUPLEX_STATUS(1U)
+
+#define S_COPPER_SPEED 10
+#define M_COPPER_SPEED 0x3U
+#define V_COPPER_SPEED(x) ((x) << S_COPPER_SPEED)
+#define G_COPPER_SPEED(x) (((x) >> S_COPPER_SPEED) & M_COPPER_SPEED)
+
+#define S_EEE_CAPABILITY 9
+#define V_EEE_CAPABILITY(x) ((x) << S_EEE_CAPABILITY)
+#define F_EEE_CAPABILITY V_EEE_CAPABILITY(1U)
+
+#define S_EEE_CLOCK_STOP_CAPABILITY 8
+#define V_EEE_CLOCK_STOP_CAPABILITY(x) ((x) << S_EEE_CLOCK_STOP_CAPABILITY)
+#define F_EEE_CLOCK_STOP_CAPABILITY V_EEE_CLOCK_STOP_CAPABILITY(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_AN_EXPANSION 0x3e018
+#define A_MAC_MTIP_PCS_1G_0_NP_TX 0x3e01c
+#define A_MAC_MTIP_PCS_1G_0_LP_NP_RX 0x3e020
+
+#define S_T7_DATA 0
+#define M_T7_DATA 0x7ffU
+#define V_T7_DATA(x) ((x) << S_T7_DATA)
+#define G_T7_DATA(x) (((x) >> S_T7_DATA) & M_T7_DATA)
+
+#define A_MAC_MTIP_PCS_1G_0_EXTENDED_STATUS 0x3e03c
+#define A_MAC_MTIP_PCS_1G_0_SCRATCH 0x3e040
+#define A_MAC_MTIP_PCS_1G_0_REV 0x3e044
+#define A_MAC_MTIP_PCS_1G_0_LINK_TIMER_0 0x3e048
+
+#define S_LINK_TIMER_VAL 0
+#define M_LINK_TIMER_VAL 0xffffU
+#define V_LINK_TIMER_VAL(x) ((x) << S_LINK_TIMER_VAL)
+#define G_LINK_TIMER_VAL(x) (((x) >> S_LINK_TIMER_VAL) & M_LINK_TIMER_VAL)
+
+#define A_MAC_MTIP_PCS_1G_0_LINK_TIMER_1 0x3e04c
+
+#define S_T7_LINK_TIMER_VAL 0
+#define M_T7_LINK_TIMER_VAL 0x1fU
+#define V_T7_LINK_TIMER_VAL(x) ((x) << S_T7_LINK_TIMER_VAL)
+#define G_T7_LINK_TIMER_VAL(x) (((x) >> S_T7_LINK_TIMER_VAL) & M_T7_LINK_TIMER_VAL)
+
+#define A_MAC_MTIP_PCS_1G_0_IF_MODE 0x3e050
+#define A_MAC_MTIP_PCS_1G_0_DEC_ERR_CNT 0x3e054
+#define A_MAC_MTIP_PCS_1G_0_VENDOR_CONTROL 0x3e058
+
+#define S_SGPCS_ENA_ST 15
+#define V_SGPCS_ENA_ST(x) ((x) << S_SGPCS_ENA_ST)
+#define F_SGPCS_ENA_ST V_SGPCS_ENA_ST(1U)
+
+#define S_T7_CFG_CLOCK_RATE 4
+#define M_T7_CFG_CLOCK_RATE 0xfU
+#define V_T7_CFG_CLOCK_RATE(x) ((x) << S_T7_CFG_CLOCK_RATE)
+#define G_T7_CFG_CLOCK_RATE(x) (((x) >> S_T7_CFG_CLOCK_RATE) & M_T7_CFG_CLOCK_RATE)
+
+#define S_SGPCS_ENA_R 0
+#define V_SGPCS_ENA_R(x) ((x) << S_SGPCS_ENA_R)
+#define F_SGPCS_ENA_R V_SGPCS_ENA_R(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_SD_BIT_SLIP 0x3e05c
+
+#define S_SD_BIT_SLIP 0
+#define M_SD_BIT_SLIP 0xfU
+#define V_SD_BIT_SLIP(x) ((x) << S_SD_BIT_SLIP)
+#define G_SD_BIT_SLIP(x) (((x) >> S_SD_BIT_SLIP) & M_SD_BIT_SLIP)
+
+#define A_MAC_MTIP_PCS_1G_1_CONTROL 0x3e100
+#define A_MAC_MTIP_PCS_1G_1_STATUS 0x3e104
+#define A_MAC_MTIP_PCS_1G_1_PHY_IDENTIFIER_0 0x3e108
+#define A_MAC_MTIP_PCS_1G_1_PHY_IDENTIFIER_1 0x3e10c
+#define A_MAC_MTIP_PCS_1G_1_DEV_ABILITY 0x3e110
+#define A_MAC_MTIP_PCS_1G_1_PARTNER_ABILITY 0x3e114
+#define A_MAC_MTIP_PCS_1G_1_AN_EXPANSION 0x3e118
+#define A_MAC_MTIP_PCS_1G_1_NP_TX 0x3e11c
+#define A_MAC_MTIP_PCS_1G_1_LP_NP_RX 0x3e120
+#define A_MAC_MTIP_PCS_1G_1_EXTENDED_STATUS 0x3e13c
+#define A_MAC_MTIP_PCS_1G_1_SCRATCH 0x3e140
+#define A_MAC_MTIP_PCS_1G_1_REV 0x3e144
+#define A_MAC_MTIP_PCS_1G_1_LINK_TIMER_0 0x3e148
+#define A_MAC_MTIP_PCS_1G_1_LINK_TIMER_1 0x3e14c
+#define A_MAC_MTIP_PCS_1G_1_IF_MODE 0x3e150
+#define A_MAC_MTIP_PCS_1G_1_DEC_ERR_CNT 0x3e154
+#define A_MAC_MTIP_PCS_1G_1_VENDOR_CONTROL 0x3e158
+#define A_MAC_MTIP_PCS_1G_1_SD_BIT_SLIP 0x3e15c
+#define A_MAC_MTIP_PCS_1G_2_CONTROL 0x3e200
+#define A_MAC_MTIP_PCS_1G_2_STATUS 0x3e204
+#define A_MAC_MTIP_PCS_1G_2_PHY_IDENTIFIER_0 0x3e208
+#define A_MAC_MTIP_PCS_1G_2_PHY_IDENTIFIER_1 0x3e20c
+#define A_MAC_MTIP_PCS_1G_2_DEV_ABILITY 0x3e210
+#define A_MAC_MTIP_PCS_1G_2_PARTNER_ABILITY 0x3e214
+#define A_MAC_MTIP_PCS_1G_2_AN_EXPANSION 0x3e218
+#define A_MAC_MTIP_PCS_1G_2_NP_TX 0x3e21c
+#define A_MAC_MTIP_PCS_1G_2_LP_NP_RX 0x3e220
+#define A_MAC_MTIP_PCS_1G_2_EXTENDED_STATUS 0x3e23c
+#define A_MAC_MTIP_PCS_1G_2_SCRATCH 0x3e240
+#define A_MAC_MTIP_PCS_1G_2_REV 0x3e244
+#define A_MAC_MTIP_PCS_1G_2_LINK_TIMER_0 0x3e248
+#define A_MAC_MTIP_PCS_1G_2_LINK_TIMER_1 0x3e24c
+#define A_MAC_MTIP_PCS_1G_2_IF_MODE 0x3e250
+#define A_MAC_MTIP_PCS_1G_2_DEC_ERR_CNT 0x3e254
+#define A_MAC_MTIP_PCS_1G_2_VENDOR_CONTROL 0x3e258
+#define A_MAC_MTIP_PCS_1G_2_SD_BIT_SLIP 0x3e25c
+#define A_MAC_MTIP_PCS_1G_3_CONTROL 0x3e300
+#define A_MAC_MTIP_PCS_1G_3_STATUS 0x3e304
+#define A_MAC_MTIP_PCS_1G_3_PHY_IDENTIFIER_0 0x3e308
+#define A_MAC_MTIP_PCS_1G_3_PHY_IDENTIFIER_1 0x3e30c
+#define A_MAC_MTIP_PCS_1G_3_DEV_ABILITY 0x3e310
+#define A_MAC_MTIP_PCS_1G_3_PARTNER_ABILITY 0x3e314
+#define A_MAC_MTIP_PCS_1G_3_AN_EXPANSION 0x3e318
+#define A_MAC_MTIP_PCS_1G_3_NP_TX 0x3e31c
+#define A_MAC_MTIP_PCS_1G_3_LP_NP_RX 0x3e320
+#define A_MAC_MTIP_PCS_1G_3_EXTENDED_STATUS 0x3e33c
+#define A_MAC_MTIP_PCS_1G_3_SCRATCH 0x3e340
+#define A_MAC_MTIP_PCS_1G_3_REV 0x3e344
+#define A_MAC_MTIP_PCS_1G_3_LINK_TIMER_0 0x3e348
+#define A_MAC_MTIP_PCS_1G_3_LINK_TIMER_1 0x3e34c
+#define A_MAC_MTIP_PCS_1G_3_IF_MODE 0x3e350
+#define A_MAC_MTIP_PCS_1G_3_DEC_ERR_CNT 0x3e354
+#define A_MAC_MTIP_PCS_1G_3_VENDOR_CONTROL 0x3e358
+#define A_MAC_MTIP_PCS_1G_3_SD_BIT_SLIP 0x3e35c
+#define A_MAC_DPLL_CTRL_0 0x3f000
+
+#define S_LOCAL_FAULT_OVRD 18
+#define V_LOCAL_FAULT_OVRD(x) ((x) << S_LOCAL_FAULT_OVRD)
+#define F_LOCAL_FAULT_OVRD V_LOCAL_FAULT_OVRD(1U)
+
+#define S_LOCAL_FAULT_HOLD_EN 17
+#define V_LOCAL_FAULT_HOLD_EN(x) ((x) << S_LOCAL_FAULT_HOLD_EN)
+#define F_LOCAL_FAULT_HOLD_EN V_LOCAL_FAULT_HOLD_EN(1U)
+
+#define S_DPLL_RST 16
+#define V_DPLL_RST(x) ((x) << S_DPLL_RST)
+#define F_DPLL_RST V_DPLL_RST(1U)
+
+#define S_CNTOFFSET 0
+#define M_CNTOFFSET 0xffffU
+#define V_CNTOFFSET(x) ((x) << S_CNTOFFSET)
+#define G_CNTOFFSET(x) (((x) >> S_CNTOFFSET) & M_CNTOFFSET)
+
+#define A_MAC_DPLL_CTRL_1 0x3f004
+
+#define S_DELAYK 0
+#define M_DELAYK 0xffffffU
+#define V_DELAYK(x) ((x) << S_DELAYK)
+#define G_DELAYK(x) (((x) >> S_DELAYK) & M_DELAYK)
+
+#define A_MAC_DPLL_CTRL_2 0x3f008
+
+#define S_DIVFFB 16
+#define M_DIVFFB 0xffffU
+#define V_DIVFFB(x) ((x) << S_DIVFFB)
+#define G_DIVFFB(x) (((x) >> S_DIVFFB) & M_DIVFFB)
+
+#define S_DIVFIN 0
+#define M_DIVFIN 0xffffU
+#define V_DIVFIN(x) ((x) << S_DIVFIN)
+#define G_DIVFIN(x) (((x) >> S_DIVFIN) & M_DIVFIN)
+
+#define A_MAC_DPLL_CTRL_3 0x3f00c
+
+#define S_ISHIFT_HOLD 28
+#define M_ISHIFT_HOLD 0xfU
+#define V_ISHIFT_HOLD(x) ((x) << S_ISHIFT_HOLD)
+#define G_ISHIFT_HOLD(x) (((x) >> S_ISHIFT_HOLD) & M_ISHIFT_HOLD)
+
+#define S_ISHIFT 24
+#define M_ISHIFT 0xfU
+#define V_ISHIFT(x) ((x) << S_ISHIFT)
+#define G_ISHIFT(x) (((x) >> S_ISHIFT) & M_ISHIFT)
+
+#define S_INT_PRESET 12
+#define M_INT_PRESET 0xfffU
+#define V_INT_PRESET(x) ((x) << S_INT_PRESET)
+#define G_INT_PRESET(x) (((x) >> S_INT_PRESET) & M_INT_PRESET)
+
+#define S_FMI 4
+#define M_FMI 0xffU
+#define V_FMI(x) ((x) << S_FMI)
+#define G_FMI(x) (((x) >> S_FMI) & M_FMI)
+
+#define S_DPLL_PROGRAM 3
+#define V_DPLL_PROGRAM(x) ((x) << S_DPLL_PROGRAM)
+#define F_DPLL_PROGRAM V_DPLL_PROGRAM(1U)
+
+#define S_PRESET_EN 2
+#define V_PRESET_EN(x) ((x) << S_PRESET_EN)
+#define F_PRESET_EN V_PRESET_EN(1U)
+
+#define S_ONTARGETOV 1
+#define V_ONTARGETOV(x) ((x) << S_ONTARGETOV)
+#define F_ONTARGETOV V_ONTARGETOV(1U)
+
+#define S_FDONLY 0
+#define V_FDONLY(x) ((x) << S_FDONLY)
+#define F_FDONLY V_FDONLY(1U)
+
+#define A_MAC_DPLL_CTRL_4 0x3f010
+
+#define S_FKI 24
+#define M_FKI 0x1fU
+#define V_FKI(x) ((x) << S_FKI)
+#define G_FKI(x) (((x) >> S_FKI) & M_FKI)
+
+#define S_FRAC_PRESET 0
+#define M_FRAC_PRESET 0xffffffU
+#define V_FRAC_PRESET(x) ((x) << S_FRAC_PRESET)
+#define G_FRAC_PRESET(x) (((x) >> S_FRAC_PRESET) & M_FRAC_PRESET)
+
+#define A_MAC_DPLL_CTRL_5 0x3f014
+
+#define S_PH_STEP_CNT_HOLD 24
+#define M_PH_STEP_CNT_HOLD 0x1fU
+#define V_PH_STEP_CNT_HOLD(x) ((x) << S_PH_STEP_CNT_HOLD)
+#define G_PH_STEP_CNT_HOLD(x) (((x) >> S_PH_STEP_CNT_HOLD) & M_PH_STEP_CNT_HOLD)
+
+#define S_CFG_RESET 23
+#define V_CFG_RESET(x) ((x) << S_CFG_RESET)
+#define F_CFG_RESET V_CFG_RESET(1U)
+
+#define S_PH_STEP_CNT 16
+#define M_PH_STEP_CNT 0x1fU
+#define V_PH_STEP_CNT(x) ((x) << S_PH_STEP_CNT)
+#define G_PH_STEP_CNT(x) (((x) >> S_PH_STEP_CNT) & M_PH_STEP_CNT)
+
+#define S_OTDLY 0
+#define M_OTDLY 0xffffU
+#define V_OTDLY(x) ((x) << S_OTDLY)
+#define G_OTDLY(x) (((x) >> S_OTDLY) & M_OTDLY)
+
+#define A_MAC_DPLL_CTRL_6 0x3f018
+
+#define S_TARGETCNT 16
+#define M_TARGETCNT 0xffffU
+#define V_TARGETCNT(x) ((x) << S_TARGETCNT)
+#define G_TARGETCNT(x) (((x) >> S_TARGETCNT) & M_TARGETCNT)
+
+#define S_PKP 8
+#define M_PKP 0x1fU
+#define V_PKP(x) ((x) << S_PKP)
+#define G_PKP(x) (((x) >> S_PKP) & M_PKP)
+
+#define S_PMP 0
+#define M_PMP 0xffU
+#define V_PMP(x) ((x) << S_PMP)
+#define G_PMP(x) (((x) >> S_PMP) & M_PMP)
+
+#define A_MAC_DPLL_CTRL_7 0x3f01c
+#define A_MAC_DPLL_STATUS_0 0x3f020
+
+#define S_FRAC 0
+#define M_FRAC 0xffffffU
+#define V_FRAC(x) ((x) << S_FRAC)
+#define G_FRAC(x) (((x) >> S_FRAC) & M_FRAC)
+
+#define A_MAC_DPLL_STATUS_1 0x3f024
+
+#define S_FRAC_PD_OUT 0
+#define M_FRAC_PD_OUT 0xffffffU
+#define V_FRAC_PD_OUT(x) ((x) << S_FRAC_PD_OUT)
+#define G_FRAC_PD_OUT(x) (((x) >> S_FRAC_PD_OUT) & M_FRAC_PD_OUT)
+
+#define A_MAC_DPLL_STATUS_2 0x3f028
+
+#define S_INT 12
+#define M_INT 0xfffU
+#define V_INT(x) ((x) << S_INT)
+#define G_INT(x) (((x) >> S_INT) & M_INT)
+
+#define S_INT_PD_OUT 0
+#define M_INT_PD_OUT 0xfffU
+#define V_INT_PD_OUT(x) ((x) << S_INT_PD_OUT)
+#define G_INT_PD_OUT(x) (((x) >> S_INT_PD_OUT) & M_INT_PD_OUT)
+
+#define A_MAC_FRAC_N_PLL_CTRL_0 0x3f02c
+
+#define S_FRAC_N_DSKEWCALCNT 29
+#define M_FRAC_N_DSKEWCALCNT 0x7U
+#define V_FRAC_N_DSKEWCALCNT(x) ((x) << S_FRAC_N_DSKEWCALCNT)
+#define G_FRAC_N_DSKEWCALCNT(x) (((x) >> S_FRAC_N_DSKEWCALCNT) & M_FRAC_N_DSKEWCALCNT)
+
+#define S_PLLEN 28
+#define V_PLLEN(x) ((x) << S_PLLEN)
+#define F_PLLEN V_PLLEN(1U)
+
+#define S_T7_BYPASS 24
+#define M_T7_BYPASS 0xfU
+#define V_T7_BYPASS(x) ((x) << S_T7_BYPASS)
+#define G_T7_BYPASS(x) (((x) >> S_T7_BYPASS) & M_T7_BYPASS)
+
+#define S_POSTDIV3A 21
+#define M_POSTDIV3A 0x7U
+#define V_POSTDIV3A(x) ((x) << S_POSTDIV3A)
+#define G_POSTDIV3A(x) (((x) >> S_POSTDIV3A) & M_POSTDIV3A)
+
+#define S_POSTDIV3B 18
+#define M_POSTDIV3B 0x7U
+#define V_POSTDIV3B(x) ((x) << S_POSTDIV3B)
+#define G_POSTDIV3B(x) (((x) >> S_POSTDIV3B) & M_POSTDIV3B)
+
+#define S_POSTDIV2A 15
+#define M_POSTDIV2A 0x7U
+#define V_POSTDIV2A(x) ((x) << S_POSTDIV2A)
+#define G_POSTDIV2A(x) (((x) >> S_POSTDIV2A) & M_POSTDIV2A)
+
+#define S_POSTDIV2B 12
+#define M_POSTDIV2B 0x7U
+#define V_POSTDIV2B(x) ((x) << S_POSTDIV2B)
+#define G_POSTDIV2B(x) (((x) >> S_POSTDIV2B) & M_POSTDIV2B)
+
+#define S_POSTDIV1A 9
+#define M_POSTDIV1A 0x7U
+#define V_POSTDIV1A(x) ((x) << S_POSTDIV1A)
+#define G_POSTDIV1A(x) (((x) >> S_POSTDIV1A) & M_POSTDIV1A)
+
+#define S_POSTDIV1B 6
+#define M_POSTDIV1B 0x7U
+#define V_POSTDIV1B(x) ((x) << S_POSTDIV1B)
+#define G_POSTDIV1B(x) (((x) >> S_POSTDIV1B) & M_POSTDIV1B)
+
+#define S_POSTDIV0A 3
+#define M_POSTDIV0A 0x7U
+#define V_POSTDIV0A(x) ((x) << S_POSTDIV0A)
+#define G_POSTDIV0A(x) (((x) >> S_POSTDIV0A) & M_POSTDIV0A)
+
+#define S_POSTDIV0B 0
+#define M_POSTDIV0B 0x7U
+#define V_POSTDIV0B(x) ((x) << S_POSTDIV0B)
+#define G_POSTDIV0B(x) (((x) >> S_POSTDIV0B) & M_POSTDIV0B)
+
+#define A_MAC_FRAC_N_PLL_CTRL_1 0x3f030
+
+#define S_FRAC_N_FRAC_N_FOUTEN 28
+#define M_FRAC_N_FRAC_N_FOUTEN 0xfU
+#define V_FRAC_N_FRAC_N_FOUTEN(x) ((x) << S_FRAC_N_FRAC_N_FOUTEN)
+#define G_FRAC_N_FRAC_N_FOUTEN(x) (((x) >> S_FRAC_N_FRAC_N_FOUTEN) & M_FRAC_N_FRAC_N_FOUTEN)
+
+#define S_FRAC_N_DSKEWCALIN 16
+#define M_FRAC_N_DSKEWCALIN 0xfffU
+#define V_FRAC_N_DSKEWCALIN(x) ((x) << S_FRAC_N_DSKEWCALIN)
+#define G_FRAC_N_DSKEWCALIN(x) (((x) >> S_FRAC_N_DSKEWCALIN) & M_FRAC_N_DSKEWCALIN)
+
+#define S_FRAC_N_REFDIV 10
+#define M_FRAC_N_REFDIV 0x3fU
+#define V_FRAC_N_REFDIV(x) ((x) << S_FRAC_N_REFDIV)
+#define G_FRAC_N_REFDIV(x) (((x) >> S_FRAC_N_REFDIV) & M_FRAC_N_REFDIV)
+
+#define S_FRAC_N_DSMEN 9
+#define V_FRAC_N_DSMEN(x) ((x) << S_FRAC_N_DSMEN)
+#define F_FRAC_N_DSMEN V_FRAC_N_DSMEN(1U)
+
+#define S_FRAC_N_PLLEN 8
+#define V_FRAC_N_PLLEN(x) ((x) << S_FRAC_N_PLLEN)
+#define F_FRAC_N_PLLEN V_FRAC_N_PLLEN(1U)
+
+#define S_FRAC_N_DACEN 7
+#define V_FRAC_N_DACEN(x) ((x) << S_FRAC_N_DACEN)
+#define F_FRAC_N_DACEN V_FRAC_N_DACEN(1U)
+
+#define S_FRAC_N_POSTDIV0PRE 6
+#define V_FRAC_N_POSTDIV0PRE(x) ((x) << S_FRAC_N_POSTDIV0PRE)
+#define F_FRAC_N_POSTDIV0PRE V_FRAC_N_POSTDIV0PRE(1U)
+
+#define S_FRAC_N_DSKEWCALBYP 5
+#define V_FRAC_N_DSKEWCALBYP(x) ((x) << S_FRAC_N_DSKEWCALBYP)
+#define F_FRAC_N_DSKEWCALBYP V_FRAC_N_DSKEWCALBYP(1U)
+
+#define S_FRAC_N_DSKEWFASTCAL 4
+#define V_FRAC_N_DSKEWFASTCAL(x) ((x) << S_FRAC_N_DSKEWFASTCAL)
+#define F_FRAC_N_DSKEWFASTCAL V_FRAC_N_DSKEWFASTCAL(1U)
+
+#define S_FRAC_N_DSKEWCALEN 3
+#define V_FRAC_N_DSKEWCALEN(x) ((x) << S_FRAC_N_DSKEWCALEN)
+#define F_FRAC_N_DSKEWCALEN V_FRAC_N_DSKEWCALEN(1U)
+
+#define S_FRAC_N_FREFCMLEN 2
+#define V_FRAC_N_FREFCMLEN(x) ((x) << S_FRAC_N_FREFCMLEN)
+#define F_FRAC_N_FREFCMLEN V_FRAC_N_FREFCMLEN(1U)
+
+#define A_MAC_FRAC_N_PLL_STATUS_0 0x3f034
+
+#define S_DSKEWCALLOCK 12
+#define V_DSKEWCALLOCK(x) ((x) << S_DSKEWCALLOCK)
+#define F_DSKEWCALLOCK V_DSKEWCALLOCK(1U)
+
+#define S_DSKEWCALOUT 0
+#define M_DSKEWCALOUT 0xfffU
+#define V_DSKEWCALOUT(x) ((x) << S_DSKEWCALOUT)
+#define G_DSKEWCALOUT(x) (((x) >> S_DSKEWCALOUT) & M_DSKEWCALOUT)
+
+#define A_MAC_MTIP_PCS_STATUS_0 0x3f100
+
+#define S_XLGMII7_TX_TSU 22
+#define M_XLGMII7_TX_TSU 0x3U
+#define V_XLGMII7_TX_TSU(x) ((x) << S_XLGMII7_TX_TSU)
+#define G_XLGMII7_TX_TSU(x) (((x) >> S_XLGMII7_TX_TSU) & M_XLGMII7_TX_TSU)
+
+#define S_XLGMII6_TX_TSU 20
+#define M_XLGMII6_TX_TSU 0x3U
+#define V_XLGMII6_TX_TSU(x) ((x) << S_XLGMII6_TX_TSU)
+#define G_XLGMII6_TX_TSU(x) (((x) >> S_XLGMII6_TX_TSU) & M_XLGMII6_TX_TSU)
+
+#define S_XLGMII5_TX_TSU 18
+#define M_XLGMII5_TX_TSU 0x3U
+#define V_XLGMII5_TX_TSU(x) ((x) << S_XLGMII5_TX_TSU)
+#define G_XLGMII5_TX_TSU(x) (((x) >> S_XLGMII5_TX_TSU) & M_XLGMII5_TX_TSU)
+
+#define S_XLGMII4_TX_TSU 16
+#define M_XLGMII4_TX_TSU 0x3U
+#define V_XLGMII4_TX_TSU(x) ((x) << S_XLGMII4_TX_TSU)
+#define G_XLGMII4_TX_TSU(x) (((x) >> S_XLGMII4_TX_TSU) & M_XLGMII4_TX_TSU)
+
+#define S_XLGMII3_TX_TSU 14
+#define M_XLGMII3_TX_TSU 0x3U
+#define V_XLGMII3_TX_TSU(x) ((x) << S_XLGMII3_TX_TSU)
+#define G_XLGMII3_TX_TSU(x) (((x) >> S_XLGMII3_TX_TSU) & M_XLGMII3_TX_TSU)
+
+#define S_XLGMII2_TX_TSU 12
+#define M_XLGMII2_TX_TSU 0x3U
+#define V_XLGMII2_TX_TSU(x) ((x) << S_XLGMII2_TX_TSU)
+#define G_XLGMII2_TX_TSU(x) (((x) >> S_XLGMII2_TX_TSU) & M_XLGMII2_TX_TSU)
+
+#define S_XLGMII1_TX_TSU 10
+#define M_XLGMII1_TX_TSU 0x3U
+#define V_XLGMII1_TX_TSU(x) ((x) << S_XLGMII1_TX_TSU)
+#define G_XLGMII1_TX_TSU(x) (((x) >> S_XLGMII1_TX_TSU) & M_XLGMII1_TX_TSU)
+
+#define S_XLGMII0_TX_TSU 8
+#define M_XLGMII0_TX_TSU 0x3U
+#define V_XLGMII0_TX_TSU(x) ((x) << S_XLGMII0_TX_TSU)
+#define G_XLGMII0_TX_TSU(x) (((x) >> S_XLGMII0_TX_TSU) & M_XLGMII0_TX_TSU)
+
+#define S_CGMII3_TX_TSU 6
+#define M_CGMII3_TX_TSU 0x3U
+#define V_CGMII3_TX_TSU(x) ((x) << S_CGMII3_TX_TSU)
+#define G_CGMII3_TX_TSU(x) (((x) >> S_CGMII3_TX_TSU) & M_CGMII3_TX_TSU)
+
+#define S_CGMII2_TX_TSU 4
+#define M_CGMII2_TX_TSU 0x3U
+#define V_CGMII2_TX_TSU(x) ((x) << S_CGMII2_TX_TSU)
+#define G_CGMII2_TX_TSU(x) (((x) >> S_CGMII2_TX_TSU) & M_CGMII2_TX_TSU)
+
+#define S_CGMII1_TX_TSU 2
+#define M_CGMII1_TX_TSU 0x3U
+#define V_CGMII1_TX_TSU(x) ((x) << S_CGMII1_TX_TSU)
+#define G_CGMII1_TX_TSU(x) (((x) >> S_CGMII1_TX_TSU) & M_CGMII1_TX_TSU)
+
+#define S_CGMII0_TX_TSU 0
+#define M_CGMII0_TX_TSU 0x3U
+#define V_CGMII0_TX_TSU(x) ((x) << S_CGMII0_TX_TSU)
+#define G_CGMII0_TX_TSU(x) (((x) >> S_CGMII0_TX_TSU) & M_CGMII0_TX_TSU)
+
+#define A_MAC_MTIP_PCS_STATUS_1 0x3f104
+
+#define S_CDMII1_RX_TSU 26
+#define M_CDMII1_RX_TSU 0x3U
+#define V_CDMII1_RX_TSU(x) ((x) << S_CDMII1_RX_TSU)
+#define G_CDMII1_RX_TSU(x) (((x) >> S_CDMII1_RX_TSU) & M_CDMII1_RX_TSU)
+
+#define S_CDMII0_RX_TSU 24
+#define M_CDMII0_RX_TSU 0x3U
+#define V_CDMII0_RX_TSU(x) ((x) << S_CDMII0_RX_TSU)
+#define G_CDMII0_RX_TSU(x) (((x) >> S_CDMII0_RX_TSU) & M_CDMII0_RX_TSU)
+
+#define S_XLGMII7_RX_TSU 22
+#define M_XLGMII7_RX_TSU 0x3U
+#define V_XLGMII7_RX_TSU(x) ((x) << S_XLGMII7_RX_TSU)
+#define G_XLGMII7_RX_TSU(x) (((x) >> S_XLGMII7_RX_TSU) & M_XLGMII7_RX_TSU)
+
+#define S_XLGMII6_RX_TSU 20
+#define M_XLGMII6_RX_TSU 0x3U
+#define V_XLGMII6_RX_TSU(x) ((x) << S_XLGMII6_RX_TSU)
+#define G_XLGMII6_RX_TSU(x) (((x) >> S_XLGMII6_RX_TSU) & M_XLGMII6_RX_TSU)
+
+#define S_XLGMII5_RX_TSU 18
+#define M_XLGMII5_RX_TSU 0x3U
+#define V_XLGMII5_RX_TSU(x) ((x) << S_XLGMII5_RX_TSU)
+#define G_XLGMII5_RX_TSU(x) (((x) >> S_XLGMII5_RX_TSU) & M_XLGMII5_RX_TSU)
+
+#define S_XLGMII4_RX_TSU 16
+#define M_XLGMII4_RX_TSU 0x3U
+#define V_XLGMII4_RX_TSU(x) ((x) << S_XLGMII4_RX_TSU)
+#define G_XLGMII4_RX_TSU(x) (((x) >> S_XLGMII4_RX_TSU) & M_XLGMII4_RX_TSU)
+
+#define S_XLGMII3_RX_TSU 14
+#define M_XLGMII3_RX_TSU 0x3U
+#define V_XLGMII3_RX_TSU(x) ((x) << S_XLGMII3_RX_TSU)
+#define G_XLGMII3_RX_TSU(x) (((x) >> S_XLGMII3_RX_TSU) & M_XLGMII3_RX_TSU)
+
+#define S_XLGMII2_RX_TSU 12
+#define M_XLGMII2_RX_TSU 0x3U
+#define V_XLGMII2_RX_TSU(x) ((x) << S_XLGMII2_RX_TSU)
+#define G_XLGMII2_RX_TSU(x) (((x) >> S_XLGMII2_RX_TSU) & M_XLGMII2_RX_TSU)
+
+#define S_XLGMII1_RX_TSU 10
+#define M_XLGMII1_RX_TSU 0x3U
+#define V_XLGMII1_RX_TSU(x) ((x) << S_XLGMII1_RX_TSU)
+#define G_XLGMII1_RX_TSU(x) (((x) >> S_XLGMII1_RX_TSU) & M_XLGMII1_RX_TSU)
+
+#define S_XLGMII0_RX_TSU 8
+#define M_XLGMII0_RX_TSU 0x3U
+#define V_XLGMII0_RX_TSU(x) ((x) << S_XLGMII0_RX_TSU)
+#define G_XLGMII0_RX_TSU(x) (((x) >> S_XLGMII0_RX_TSU) & M_XLGMII0_RX_TSU)
+
+#define S_CGMII3_RX_TSU 6
+#define M_CGMII3_RX_TSU 0x3U
+#define V_CGMII3_RX_TSU(x) ((x) << S_CGMII3_RX_TSU)
+#define G_CGMII3_RX_TSU(x) (((x) >> S_CGMII3_RX_TSU) & M_CGMII3_RX_TSU)
+
+#define S_CGMII2_RX_TSU 4
+#define M_CGMII2_RX_TSU 0x3U
+#define V_CGMII2_RX_TSU(x) ((x) << S_CGMII2_RX_TSU)
+#define G_CGMII2_RX_TSU(x) (((x) >> S_CGMII2_RX_TSU) & M_CGMII2_RX_TSU)
+
+#define S_CGMII1_RX_TSU 2
+#define M_CGMII1_RX_TSU 0x3U
+#define V_CGMII1_RX_TSU(x) ((x) << S_CGMII1_RX_TSU)
+#define G_CGMII1_RX_TSU(x) (((x) >> S_CGMII1_RX_TSU) & M_CGMII1_RX_TSU)
+
+#define S_CGMII0_RX_TSU 0
+#define M_CGMII0_RX_TSU 0x3U
+#define V_CGMII0_RX_TSU(x) ((x) << S_CGMII0_RX_TSU)
+#define G_CGMII0_RX_TSU(x) (((x) >> S_CGMII0_RX_TSU) & M_CGMII0_RX_TSU)
+
+#define A_MAC_MTIP_PCS_STATUS_2 0x3f108
+
+#define S_SD_BIT_SLIP_0 0
+#define M_SD_BIT_SLIP_0 0x3fffffffU
+#define V_SD_BIT_SLIP_0(x) ((x) << S_SD_BIT_SLIP_0)
+#define G_SD_BIT_SLIP_0(x) (((x) >> S_SD_BIT_SLIP_0) & M_SD_BIT_SLIP_0)
+
+#define A_MAC_MTIP_PCS_STATUS_3 0x3f10c
+
+#define S_SD_BIT_SLIP_1 0
+#define M_SD_BIT_SLIP_1 0x3ffffU
+#define V_SD_BIT_SLIP_1(x) ((x) << S_SD_BIT_SLIP_1)
+#define G_SD_BIT_SLIP_1(x) (((x) >> S_SD_BIT_SLIP_1) & M_SD_BIT_SLIP_1)
+
+#define A_MAC_MTIP_PCS_STATUS_4 0x3f110
+
+#define S_TSU_RX_SD 0
+#define M_TSU_RX_SD 0xffffU
+#define V_TSU_RX_SD(x) ((x) << S_TSU_RX_SD)
+#define G_TSU_RX_SD(x) (((x) >> S_TSU_RX_SD) & M_TSU_RX_SD)
+
+#define A_MAC_MTIP_PCS_STATUS_5 0x3f114
+
+#define S_RSFEC_XSTATS_STRB 0
+#define M_RSFEC_XSTATS_STRB 0xffffffU
+#define V_RSFEC_XSTATS_STRB(x) ((x) << S_RSFEC_XSTATS_STRB)
+#define G_RSFEC_XSTATS_STRB(x) (((x) >> S_RSFEC_XSTATS_STRB) & M_RSFEC_XSTATS_STRB)
+
+#define A_MAC_MTIP_PCS_STATUS_6 0x3f118
+#define A_MAC_MTIP_PCS_STATUS_7 0x3f11c
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_0 0x3f120
+
+#define S_TSV_XON_STB_2 24
+#define M_TSV_XON_STB_2 0xffU
+#define V_TSV_XON_STB_2(x) ((x) << S_TSV_XON_STB_2)
+#define G_TSV_XON_STB_2(x) (((x) >> S_TSV_XON_STB_2) & M_TSV_XON_STB_2)
+
+#define S_TSV_XOFF_STB_2 16
+#define M_TSV_XOFF_STB_2 0xffU
+#define V_TSV_XOFF_STB_2(x) ((x) << S_TSV_XOFF_STB_2)
+#define G_TSV_XOFF_STB_2(x) (((x) >> S_TSV_XOFF_STB_2) & M_TSV_XOFF_STB_2)
+
+#define S_RSV_XON_STB_2 8
+#define M_RSV_XON_STB_2 0xffU
+#define V_RSV_XON_STB_2(x) ((x) << S_RSV_XON_STB_2)
+#define G_RSV_XON_STB_2(x) (((x) >> S_RSV_XON_STB_2) & M_RSV_XON_STB_2)
+
+#define S_RSV_XOFF_STB_2 0
+#define M_RSV_XOFF_STB_2 0xffU
+#define V_RSV_XOFF_STB_2(x) ((x) << S_RSV_XOFF_STB_2)
+#define G_RSV_XOFF_STB_2(x) (((x) >> S_RSV_XOFF_STB_2) & M_RSV_XOFF_STB_2)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_1 0x3f124
+
+#define S_TSV_XON_STB_3 24
+#define M_TSV_XON_STB_3 0xffU
+#define V_TSV_XON_STB_3(x) ((x) << S_TSV_XON_STB_3)
+#define G_TSV_XON_STB_3(x) (((x) >> S_TSV_XON_STB_3) & M_TSV_XON_STB_3)
+
+#define S_TSV_XOFF_STB_3 16
+#define M_TSV_XOFF_STB_3 0xffU
+#define V_TSV_XOFF_STB_3(x) ((x) << S_TSV_XOFF_STB_3)
+#define G_TSV_XOFF_STB_3(x) (((x) >> S_TSV_XOFF_STB_3) & M_TSV_XOFF_STB_3)
+
+#define S_RSV_XON_STB_3 8
+#define M_RSV_XON_STB_3 0xffU
+#define V_RSV_XON_STB_3(x) ((x) << S_RSV_XON_STB_3)
+#define G_RSV_XON_STB_3(x) (((x) >> S_RSV_XON_STB_3) & M_RSV_XON_STB_3)
+
+#define S_RSV_XOFF_STB_3 0
+#define M_RSV_XOFF_STB_3 0xffU
+#define V_RSV_XOFF_STB_3(x) ((x) << S_RSV_XOFF_STB_3)
+#define G_RSV_XOFF_STB_3(x) (((x) >> S_RSV_XOFF_STB_3) & M_RSV_XOFF_STB_3)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_2 0x3f128
+
+#define S_TSV_XON_STB_4 24
+#define M_TSV_XON_STB_4 0xffU
+#define V_TSV_XON_STB_4(x) ((x) << S_TSV_XON_STB_4)
+#define G_TSV_XON_STB_4(x) (((x) >> S_TSV_XON_STB_4) & M_TSV_XON_STB_4)
+
+#define S_TSV_XOFF_STB_4 16
+#define M_TSV_XOFF_STB_4 0xffU
+#define V_TSV_XOFF_STB_4(x) ((x) << S_TSV_XOFF_STB_4)
+#define G_TSV_XOFF_STB_4(x) (((x) >> S_TSV_XOFF_STB_4) & M_TSV_XOFF_STB_4)
+
+#define S_RSV_XON_STB_4 8
+#define M_RSV_XON_STB_4 0xffU
+#define V_RSV_XON_STB_4(x) ((x) << S_RSV_XON_STB_4)
+#define G_RSV_XON_STB_4(x) (((x) >> S_RSV_XON_STB_4) & M_RSV_XON_STB_4)
+
+#define S_RSV_XOFF_STB_4 0
+#define M_RSV_XOFF_STB_4 0xffU
+#define V_RSV_XOFF_STB_4(x) ((x) << S_RSV_XOFF_STB_4)
+#define G_RSV_XOFF_STB_4(x) (((x) >> S_RSV_XOFF_STB_4) & M_RSV_XOFF_STB_4)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_3 0x3f12c
+
+#define S_TSV_XON_STB_5 24
+#define M_TSV_XON_STB_5 0xffU
+#define V_TSV_XON_STB_5(x) ((x) << S_TSV_XON_STB_5)
+#define G_TSV_XON_STB_5(x) (((x) >> S_TSV_XON_STB_5) & M_TSV_XON_STB_5)
+
+#define S_TSV_XOFF_STB_5 16
+#define M_TSV_XOFF_STB_5 0xffU
+#define V_TSV_XOFF_STB_5(x) ((x) << S_TSV_XOFF_STB_5)
+#define G_TSV_XOFF_STB_5(x) (((x) >> S_TSV_XOFF_STB_5) & M_TSV_XOFF_STB_5)
+
+#define S_RSV_XON_STB_5 8
+#define M_RSV_XON_STB_5 0xffU
+#define V_RSV_XON_STB_5(x) ((x) << S_RSV_XON_STB_5)
+#define G_RSV_XON_STB_5(x) (((x) >> S_RSV_XON_STB_5) & M_RSV_XON_STB_5)
+
+#define S_RSV_XOFF_STB_5 0
+#define M_RSV_XOFF_STB_5 0xffU
+#define V_RSV_XOFF_STB_5(x) ((x) << S_RSV_XOFF_STB_5)
+#define G_RSV_XOFF_STB_5(x) (((x) >> S_RSV_XOFF_STB_5) & M_RSV_XOFF_STB_5)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_4 0x3f130
+
+#define S_TX_SFD_O_5 19
+#define V_TX_SFD_O_5(x) ((x) << S_TX_SFD_O_5)
+#define F_TX_SFD_O_5 V_TX_SFD_O_5(1U)
+
+#define S_TX_SFD_O_4 18
+#define V_TX_SFD_O_4(x) ((x) << S_TX_SFD_O_4)
+#define F_TX_SFD_O_4 V_TX_SFD_O_4(1U)
+
+#define S_TX_SFD_O_3 17
+#define V_TX_SFD_O_3(x) ((x) << S_TX_SFD_O_3)
+#define F_TX_SFD_O_3 V_TX_SFD_O_3(1U)
+
+#define S_TX_SFD_O_2 16
+#define V_TX_SFD_O_2(x) ((x) << S_TX_SFD_O_2)
+#define F_TX_SFD_O_2 V_TX_SFD_O_2(1U)
+
+#define S_RX_SFD_O_5 15
+#define V_RX_SFD_O_5(x) ((x) << S_RX_SFD_O_5)
+#define F_RX_SFD_O_5 V_RX_SFD_O_5(1U)
+
+#define S_RX_SFD_O_4 14
+#define V_RX_SFD_O_4(x) ((x) << S_RX_SFD_O_4)
+#define F_RX_SFD_O_4 V_RX_SFD_O_4(1U)
+
+#define S_RX_SFD_O_3 13
+#define V_RX_SFD_O_3(x) ((x) << S_RX_SFD_O_3)
+#define F_RX_SFD_O_3 V_RX_SFD_O_3(1U)
+
+#define S_RX_SFD_O_2 12
+#define V_RX_SFD_O_2(x) ((x) << S_RX_SFD_O_2)
+#define F_RX_SFD_O_2 V_RX_SFD_O_2(1U)
+
+#define S_RX_SFD_SHIFT_O_5 11
+#define V_RX_SFD_SHIFT_O_5(x) ((x) << S_RX_SFD_SHIFT_O_5)
+#define F_RX_SFD_SHIFT_O_5 V_RX_SFD_SHIFT_O_5(1U)
+
+#define S_RX_SFD_SHIFT_O_4 10
+#define V_RX_SFD_SHIFT_O_4(x) ((x) << S_RX_SFD_SHIFT_O_4)
+#define F_RX_SFD_SHIFT_O_4 V_RX_SFD_SHIFT_O_4(1U)
+
+#define S_RX_SFD_SHIFT_O_3 9
+#define V_RX_SFD_SHIFT_O_3(x) ((x) << S_RX_SFD_SHIFT_O_3)
+#define F_RX_SFD_SHIFT_O_3 V_RX_SFD_SHIFT_O_3(1U)
+
+#define S_RX_SFD_SHIFT_O_2 8
+#define V_RX_SFD_SHIFT_O_2(x) ((x) << S_RX_SFD_SHIFT_O_2)
+#define F_RX_SFD_SHIFT_O_2 V_RX_SFD_SHIFT_O_2(1U)
+
+#define S_TX_SFD_SHIFT_O_5 7
+#define V_TX_SFD_SHIFT_O_5(x) ((x) << S_TX_SFD_SHIFT_O_5)
+#define F_TX_SFD_SHIFT_O_5 V_TX_SFD_SHIFT_O_5(1U)
+
+#define S_TX_SFD_SHIFT_O_4 6
+#define V_TX_SFD_SHIFT_O_4(x) ((x) << S_TX_SFD_SHIFT_O_4)
+#define F_TX_SFD_SHIFT_O_4 V_TX_SFD_SHIFT_O_4(1U)
+
+#define S_TX_SFD_SHIFT_O_3 5
+#define V_TX_SFD_SHIFT_O_3(x) ((x) << S_TX_SFD_SHIFT_O_3)
+#define F_TX_SFD_SHIFT_O_3 V_TX_SFD_SHIFT_O_3(1U)
+
+#define S_TX_SFD_SHIFT_O_2 4
+#define V_TX_SFD_SHIFT_O_2(x) ((x) << S_TX_SFD_SHIFT_O_2)
+#define F_TX_SFD_SHIFT_O_2 V_TX_SFD_SHIFT_O_2(1U)
+
+#define S_TS_SFD_ENA_5 3
+#define V_TS_SFD_ENA_5(x) ((x) << S_TS_SFD_ENA_5)
+#define F_TS_SFD_ENA_5 V_TS_SFD_ENA_5(1U)
+
+#define S_TS_SFD_ENA_4 2
+#define V_TS_SFD_ENA_4(x) ((x) << S_TS_SFD_ENA_4)
+#define F_TS_SFD_ENA_4 V_TS_SFD_ENA_4(1U)
+
+#define S_TS_SFD_ENA_3 1
+#define V_TS_SFD_ENA_3(x) ((x) << S_TS_SFD_ENA_3)
+#define F_TS_SFD_ENA_3 V_TS_SFD_ENA_3(1U)
+
+#define S_TS_SFD_ENA_2 0
+#define V_TS_SFD_ENA_2(x) ((x) << S_TS_SFD_ENA_2)
+#define F_TS_SFD_ENA_2 V_TS_SFD_ENA_2(1U)
+
+#define A_MAC_STS_CONFIG 0x3f200
+
+#define S_STS_ENA 30
+#define V_STS_ENA(x) ((x) << S_STS_ENA)
+#define F_STS_ENA V_STS_ENA(1U)
+
+#define S_N_PPS_ENA 29
+#define V_N_PPS_ENA(x) ((x) << S_N_PPS_ENA)
+#define F_N_PPS_ENA V_N_PPS_ENA(1U)
+
+#define S_STS_RESET 28
+#define V_STS_RESET(x) ((x) << S_STS_RESET)
+#define F_STS_RESET V_STS_RESET(1U)
+
+#define S_DEBOUNCE_CNT 0
+#define M_DEBOUNCE_CNT 0xfffffffU
+#define V_DEBOUNCE_CNT(x) ((x) << S_DEBOUNCE_CNT)
+#define G_DEBOUNCE_CNT(x) (((x) >> S_DEBOUNCE_CNT) & M_DEBOUNCE_CNT)
+
+#define A_MAC_STS_COUNTER 0x3f204
+#define A_MAC_STS_COUNT_1 0x3f208
+#define A_MAC_STS_COUNT_2 0x3f20c
+#define A_MAC_STS_N_PPS_COUNT_HI 0x3f210
+#define A_MAC_STS_N_PPS_COUNT_LO 0x3f214
+#define A_MAC_STS_N_PPS_COUNTER 0x3f218
+#define A_MAC_BGR_PQ0_FIRMWARE_COMMON_0 0x4030
+
+#define S_MAC_BGR_BGR_REG_APB_SEL 0
+#define V_MAC_BGR_BGR_REG_APB_SEL(x) ((x) << S_MAC_BGR_BGR_REG_APB_SEL)
+#define F_MAC_BGR_BGR_REG_APB_SEL V_MAC_BGR_BGR_REG_APB_SEL(1U)
+
+#define A_MAC_BGR_TOP_DIG_CTRL1_REG_LSB 0x4430
+
+#define S_MAC_BGR_BGR_REFCLK_CTRL_BYPASS 15
+#define V_MAC_BGR_BGR_REFCLK_CTRL_BYPASS(x) ((x) << S_MAC_BGR_BGR_REFCLK_CTRL_BYPASS)
+#define F_MAC_BGR_BGR_REFCLK_CTRL_BYPASS V_MAC_BGR_BGR_REFCLK_CTRL_BYPASS(1U)
+
+#define S_MAC_BGR_BGR_COREREFCLK_SEL 14
+#define V_MAC_BGR_BGR_COREREFCLK_SEL(x) ((x) << S_MAC_BGR_BGR_COREREFCLK_SEL)
+#define F_MAC_BGR_BGR_COREREFCLK_SEL V_MAC_BGR_BGR_COREREFCLK_SEL(1U)
+
+#define S_MAC_BGR_BGR_TEST_CLK_DIV 8
+#define M_MAC_BGR_BGR_TEST_CLK_DIV 0x7U
+#define V_MAC_BGR_BGR_TEST_CLK_DIV(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_DIV)
+#define G_MAC_BGR_BGR_TEST_CLK_DIV(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_DIV) & M_MAC_BGR_BGR_TEST_CLK_DIV)
+
+#define S_MAC_BGR_BGR_TEST_CLK_EN 7
+#define V_MAC_BGR_BGR_TEST_CLK_EN(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_EN)
+#define F_MAC_BGR_BGR_TEST_CLK_EN V_MAC_BGR_BGR_TEST_CLK_EN(1U)
+
+#define S_MAC_BGR_BGR_TEST_CLK_BGRSEL 5
+#define M_MAC_BGR_BGR_TEST_CLK_BGRSEL 0x3U
+#define V_MAC_BGR_BGR_TEST_CLK_BGRSEL(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_BGRSEL)
+#define G_MAC_BGR_BGR_TEST_CLK_BGRSEL(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_BGRSEL) & M_MAC_BGR_BGR_TEST_CLK_BGRSEL)
+
+#define S_MAC_BGR_BGR_TEST_CLK_SEL 0
+#define M_MAC_BGR_BGR_TEST_CLK_SEL 0x1fU
+#define V_MAC_BGR_BGR_TEST_CLK_SEL(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_SEL)
+#define G_MAC_BGR_BGR_TEST_CLK_SEL(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_SEL) & M_MAC_BGR_BGR_TEST_CLK_SEL)
+
+#define A_MAC_BGR_PQ0_FIRMWARE_SEQ0_0 0x6000
+
+#define S_MAC_BGR_BGR_REG_PRG_EN 0
+#define V_MAC_BGR_BGR_REG_PRG_EN(x) ((x) << S_MAC_BGR_BGR_REG_PRG_EN)
+#define F_MAC_BGR_BGR_REG_PRG_EN V_MAC_BGR_BGR_REG_PRG_EN(1U)
+
+#define A_MAC_BGR_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_BGR_BGR_REG_GPO 0
+#define V_MAC_BGR_BGR_REG_GPO(x) ((x) << S_MAC_BGR_BGR_REG_GPO)
+#define F_MAC_BGR_BGR_REG_GPO V_MAC_BGR_BGR_REG_GPO(1U)
+
+#define A_MAC_BGR_MGMT_SPINE_MACRO_PMA_0 0x40000
+
+#define S_MAC_BGR_CUREFCLKSEL1 0
+#define M_MAC_BGR_CUREFCLKSEL1 0x3U
+#define V_MAC_BGR_CUREFCLKSEL1(x) ((x) << S_MAC_BGR_CUREFCLKSEL1)
+#define G_MAC_BGR_CUREFCLKSEL1(x) (((x) >> S_MAC_BGR_CUREFCLKSEL1) & M_MAC_BGR_CUREFCLKSEL1)
+
+#define A_MAC_BGR_REFCLK_CONTROL_1 0x40004
+
+#define S_MAC_BGR_IM_CUREFCLKLR_EN 0
+#define V_MAC_BGR_IM_CUREFCLKLR_EN(x) ((x) << S_MAC_BGR_IM_CUREFCLKLR_EN)
+#define F_MAC_BGR_IM_CUREFCLKLR_EN V_MAC_BGR_IM_CUREFCLKLR_EN(1U)
+
+#define A_MAC_BGR_REFCLK_CONTROL_2 0x40080
+
+#define S_MAC_BGR_IM_REF_EN 0
+#define V_MAC_BGR_IM_REF_EN(x) ((x) << S_MAC_BGR_IM_REF_EN)
+#define F_MAC_BGR_IM_REF_EN V_MAC_BGR_IM_REF_EN(1U)
+
+#define A_MAC_PLL0_PLL_TOP_CUPLL_LOCK 0x4438
+
+#define S_MAC_PLL0_PLL2_LOCK_STATUS 2
+#define V_MAC_PLL0_PLL2_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL2_LOCK_STATUS)
+#define F_MAC_PLL0_PLL2_LOCK_STATUS V_MAC_PLL0_PLL2_LOCK_STATUS(1U)
+
+#define S_MAC_PLL0_PLL1_LOCK_STATUS 1
+#define V_MAC_PLL0_PLL1_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL1_LOCK_STATUS)
+#define F_MAC_PLL0_PLL1_LOCK_STATUS V_MAC_PLL0_PLL1_LOCK_STATUS(1U)
+
+#define S_MAC_PLL0_PLL0_LOCK_STATUS 0
+#define V_MAC_PLL0_PLL0_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL0_LOCK_STATUS)
+#define F_MAC_PLL0_PLL0_LOCK_STATUS V_MAC_PLL0_PLL0_LOCK_STATUS(1U)
+
+#define A_MAC_PLL0_PLL_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_PLL0_PLL_PRG_EN 0
+#define M_MAC_PLL0_PLL_PRG_EN 0xfU
+#define V_MAC_PLL0_PLL_PRG_EN(x) ((x) << S_MAC_PLL0_PLL_PRG_EN)
+#define G_MAC_PLL0_PLL_PRG_EN(x) (((x) >> S_MAC_PLL0_PLL_PRG_EN) & M_MAC_PLL0_PLL_PRG_EN)
+
+#define A_MAC_PLL0_PLL_CMUTOP_KV16_MGMT_PLL_MACRO_SELECT_0 0x7fc00
+
+#define S_MAC_PLL0_PMA_MACRO_SELECT 0
+#define M_MAC_PLL0_PMA_MACRO_SELECT 0x3ffU
+#define V_MAC_PLL0_PMA_MACRO_SELECT(x) ((x) << S_MAC_PLL0_PMA_MACRO_SELECT)
+#define G_MAC_PLL0_PMA_MACRO_SELECT(x) (((x) >> S_MAC_PLL0_PMA_MACRO_SELECT) & M_MAC_PLL0_PMA_MACRO_SELECT)
+
+#define A_MAC_PLL1_PLL_TOP_CUPLL_LOCK 0x4438
+
+#define S_MAC_PLL1_PLL2_LOCK_STATUS 2
+#define V_MAC_PLL1_PLL2_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL2_LOCK_STATUS)
+#define F_MAC_PLL1_PLL2_LOCK_STATUS V_MAC_PLL1_PLL2_LOCK_STATUS(1U)
+
+#define S_MAC_PLL1_PLL1_LOCK_STATUS 1
+#define V_MAC_PLL1_PLL1_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL1_LOCK_STATUS)
+#define F_MAC_PLL1_PLL1_LOCK_STATUS V_MAC_PLL1_PLL1_LOCK_STATUS(1U)
+
+#define S_MAC_PLL1_PLL0_LOCK_STATUS 0
+#define V_MAC_PLL1_PLL0_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL0_LOCK_STATUS)
+#define F_MAC_PLL1_PLL0_LOCK_STATUS V_MAC_PLL1_PLL0_LOCK_STATUS(1U)
+
+#define A_MAC_PLL1_PLL_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_PLL1_PLL_PRG_EN 0
+#define M_MAC_PLL1_PLL_PRG_EN 0xfU
+#define V_MAC_PLL1_PLL_PRG_EN(x) ((x) << S_MAC_PLL1_PLL_PRG_EN)
+#define G_MAC_PLL1_PLL_PRG_EN(x) (((x) >> S_MAC_PLL1_PLL_PRG_EN) & M_MAC_PLL1_PLL_PRG_EN)
+
+#define A_MAC_PLL1_PLL_CMUTOP_KV16_MGMT_PLL_MACRO_SELECT_0 0x7fc00
+
+#define S_MAC_PLL1_PMA_MACRO_SELECT 0
+#define M_MAC_PLL1_PMA_MACRO_SELECT 0x3ffU
+#define V_MAC_PLL1_PMA_MACRO_SELECT(x) ((x) << S_MAC_PLL1_PMA_MACRO_SELECT)
+#define G_MAC_PLL1_PMA_MACRO_SELECT(x) (((x) >> S_MAC_PLL1_PMA_MACRO_SELECT) & M_MAC_PLL1_PMA_MACRO_SELECT)
+
+/* registers for module CRYPTO_0 */
+#define CRYPTO_0_BASE_ADDR 0x44000
+
+#define A_TLS_TX_CH_CONFIG 0x44000
+
+#define S_SMALL_LEN_THRESH 16
+#define M_SMALL_LEN_THRESH 0xffffU
+#define V_SMALL_LEN_THRESH(x) ((x) << S_SMALL_LEN_THRESH)
+#define G_SMALL_LEN_THRESH(x) (((x) >> S_SMALL_LEN_THRESH) & M_SMALL_LEN_THRESH)
+
+#define S_CIPH0_CTL_SEL 12
+#define M_CIPH0_CTL_SEL 0x7U
+#define V_CIPH0_CTL_SEL(x) ((x) << S_CIPH0_CTL_SEL)
+#define G_CIPH0_CTL_SEL(x) (((x) >> S_CIPH0_CTL_SEL) & M_CIPH0_CTL_SEL)
+
+#define S_CIPHN_CTL_SEL 9
+#define M_CIPHN_CTL_SEL 0x7U
+#define V_CIPHN_CTL_SEL(x) ((x) << S_CIPHN_CTL_SEL)
+#define G_CIPHN_CTL_SEL(x) (((x) >> S_CIPHN_CTL_SEL) & M_CIPHN_CTL_SEL)
+
+#define S_MAC_CTL_SEL 6
+#define M_MAC_CTL_SEL 0x7U
+#define V_MAC_CTL_SEL(x) ((x) << S_MAC_CTL_SEL)
+#define G_MAC_CTL_SEL(x) (((x) >> S_MAC_CTL_SEL) & M_MAC_CTL_SEL)
+
+#define S_CIPH0_XOR_SEL 5
+#define V_CIPH0_XOR_SEL(x) ((x) << S_CIPH0_XOR_SEL)
+#define F_CIPH0_XOR_SEL V_CIPH0_XOR_SEL(1U)
+
+#define S_CIPHN_XOR_SEL 4
+#define V_CIPHN_XOR_SEL(x) ((x) << S_CIPHN_XOR_SEL)
+#define F_CIPHN_XOR_SEL V_CIPHN_XOR_SEL(1U)
+
+#define S_MAC_XOR_SEL 3
+#define V_MAC_XOR_SEL(x) ((x) << S_MAC_XOR_SEL)
+#define F_MAC_XOR_SEL V_MAC_XOR_SEL(1U)
+
+#define S_CIPH0_DP_SEL 2
+#define V_CIPH0_DP_SEL(x) ((x) << S_CIPH0_DP_SEL)
+#define F_CIPH0_DP_SEL V_CIPH0_DP_SEL(1U)
+
+#define S_CIPHN_DP_SEL 1
+#define V_CIPHN_DP_SEL(x) ((x) << S_CIPHN_DP_SEL)
+#define F_CIPHN_DP_SEL V_CIPHN_DP_SEL(1U)
+
+#define S_MAC_DP_SEL 0
+#define V_MAC_DP_SEL(x) ((x) << S_MAC_DP_SEL)
+#define F_MAC_DP_SEL V_MAC_DP_SEL(1U)
+
+#define A_TLS_TX_CH_PERR_INJECT 0x44004
+#define A_TLS_TX_CH_INT_ENABLE 0x44008
+
+#define S_KEYLENERR 3
+#define V_KEYLENERR(x) ((x) << S_KEYLENERR)
+#define F_KEYLENERR V_KEYLENERR(1U)
+
+#define S_INTF1_PERR 2
+#define V_INTF1_PERR(x) ((x) << S_INTF1_PERR)
+#define F_INTF1_PERR V_INTF1_PERR(1U)
+
+#define S_INTF0_PERR 1
+#define V_INTF0_PERR(x) ((x) << S_INTF0_PERR)
+#define F_INTF0_PERR V_INTF0_PERR(1U)
+
+#define A_TLS_TX_CH_INT_CAUSE 0x4400c
+
+#define S_KEX_CERR 4
+#define V_KEX_CERR(x) ((x) << S_KEX_CERR)
+#define F_KEX_CERR V_KEX_CERR(1U)
+
+#define A_TLS_TX_CH_PERR_ENABLE 0x44010
+#define A_TLS_TX_CH_DEBUG_FLAGS 0x44014
+#define A_TLS_TX_CH_HMACCTRL_CFG 0x44020
+#define A_TLS_TX_CH_ERR_RSP_HDR 0x44024
+#define A_TLS_TX_CH_HANG_TIMEOUT 0x44028
+
+#define S_T7_TIMEOUT 0
+#define M_T7_TIMEOUT 0xffU
+#define V_T7_TIMEOUT(x) ((x) << S_T7_TIMEOUT)
+#define G_T7_TIMEOUT(x) (((x) >> S_T7_TIMEOUT) & M_T7_TIMEOUT)
+
+#define A_TLS_TX_CH_DBG_STEP_CTRL 0x44030
+
+#define S_DBG_STEP_CTRL 1
+#define V_DBG_STEP_CTRL(x) ((x) << S_DBG_STEP_CTRL)
+#define F_DBG_STEP_CTRL V_DBG_STEP_CTRL(1U)
+
+#define S_DBG_STEP_EN 0
+#define V_DBG_STEP_EN(x) ((x) << S_DBG_STEP_EN)
+#define F_DBG_STEP_EN V_DBG_STEP_EN(1U)
+
+#define A_TLS_TX_DBG_SELL_DATA 0x44714
+#define A_TLS_TX_DBG_SELH_DATA 0x44718
+#define A_TLS_TX_DBG_SEL_CTRL 0x44730
+#define A_TLS_TX_GLOBAL_CONFIG 0x447c0
+
+#define S_QUIC_EN 2
+#define V_QUIC_EN(x) ((x) << S_QUIC_EN)
+#define F_QUIC_EN V_QUIC_EN(1U)
+
+#define S_IPSEC_IDX_UPD_EN 1
+#define V_IPSEC_IDX_UPD_EN(x) ((x) << S_IPSEC_IDX_UPD_EN)
+#define F_IPSEC_IDX_UPD_EN V_IPSEC_IDX_UPD_EN(1U)
+
+#define S_IPSEC_IDX_CTL 0
+#define V_IPSEC_IDX_CTL(x) ((x) << S_IPSEC_IDX_CTL)
+#define F_IPSEC_IDX_CTL V_IPSEC_IDX_CTL(1U)
+
+#define A_TLS_TX_CGEN 0x447f0
+
+#define S_CHCGEN 0
+#define M_CHCGEN 0x3fU
+#define V_CHCGEN(x) ((x) << S_CHCGEN)
+#define G_CHCGEN(x) (((x) >> S_CHCGEN) & M_CHCGEN)
+
+#define A_TLS_TX_IND_ADDR 0x447f8
+
+#define S_T7_3_ADDR 0
+#define M_T7_3_ADDR 0xfffU
+#define V_T7_3_ADDR(x) ((x) << S_T7_3_ADDR)
+#define G_T7_3_ADDR(x) (((x) >> S_T7_3_ADDR) & M_T7_3_ADDR)
+
+#define A_TLS_TX_IND_DATA 0x447fc
+#define A_TLS_TX_CH_IND_ING_BYTE_CNT_LO 0x0
+#define A_TLS_TX_CH_IND_ING_BYTE_CNT_HI 0x1
+#define A_TLS_TX_CH_IND_ING_PKT_CNT 0x2
+#define A_TLS_TX_CH_IND_DISPATCH_PKT_CNT 0x4
+#define A_TLS_TX_CH_IND_ERROR_CNTS0 0x5
+#define A_TLS_TX_CH_IND_DEC_ERROR_CNTS 0x7
+#define A_TLS_TX_CH_IND_DBG_SPP_CFG 0x1f
+
+#define S_DIS_IF_ERR 11
+#define V_DIS_IF_ERR(x) ((x) << S_DIS_IF_ERR)
+#define F_DIS_IF_ERR V_DIS_IF_ERR(1U)
+
+#define S_DIS_ERR_MSG 10
+#define V_DIS_ERR_MSG(x) ((x) << S_DIS_ERR_MSG)
+#define F_DIS_ERR_MSG V_DIS_ERR_MSG(1U)
+
+#define S_DIS_BP_SEQF 9
+#define V_DIS_BP_SEQF(x) ((x) << S_DIS_BP_SEQF)
+#define F_DIS_BP_SEQF V_DIS_BP_SEQF(1U)
+
+#define S_DIS_BP_LENF 8
+#define V_DIS_BP_LENF(x) ((x) << S_DIS_BP_LENF)
+#define F_DIS_BP_LENF V_DIS_BP_LENF(1U)
+
+#define S_DIS_KEX_ERR 6
+#define V_DIS_KEX_ERR(x) ((x) << S_DIS_KEX_ERR)
+#define F_DIS_KEX_ERR V_DIS_KEX_ERR(1U)
+
+#define S_CLR_STS 5
+#define V_CLR_STS(x) ((x) << S_CLR_STS)
+#define F_CLR_STS V_CLR_STS(1U)
+
+#define S_TGL_CNT 4
+#define V_TGL_CNT(x) ((x) << S_TGL_CNT)
+#define F_TGL_CNT V_TGL_CNT(1U)
+
+#define S_ENB_PAZ 3
+#define V_ENB_PAZ(x) ((x) << S_ENB_PAZ)
+#define F_ENB_PAZ V_ENB_PAZ(1U)
+
+#define S_DIS_NOP 2
+#define V_DIS_NOP(x) ((x) << S_DIS_NOP)
+#define F_DIS_NOP V_DIS_NOP(1U)
+
+#define S_DIS_CPL_ERR 1
+#define V_DIS_CPL_ERR(x) ((x) << S_DIS_CPL_ERR)
+#define F_DIS_CPL_ERR V_DIS_CPL_ERR(1U)
+
+#define S_DIS_OFF_ERR 0
+#define V_DIS_OFF_ERR(x) ((x) << S_DIS_OFF_ERR)
+#define F_DIS_OFF_ERR V_DIS_OFF_ERR(1U)
+
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID0 0x20
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID1 0x21
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID2 0x22
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID3 0x23
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID4 0x24
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID5 0x25
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID6 0x26
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID7 0x27
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W0 0x28
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W1 0x29
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W2 0x2a
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W3 0x2b
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W0 0x2c
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W1 0x2d
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W2 0x2e
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W3 0x2f
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_ERR 0x30
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_BP 0x31
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_CTL_M 0x32
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_CTL_L 0x33
+#define A_TLS_TX_CH_IND_DBG_PKT_STAT 0x3f
+
+/* registers for module CRYPTO_1 */
+#define CRYPTO_1_BASE_ADDR 0x45000
+
+/* registers for module CRYPTO_KEY */
+#define CRYPTO_KEY_BASE_ADDR 0x46000
+
+#define A_CRYPTO_KEY_CONFIG 0x46000
+
+#define S_ESNWIN 1
+#define M_ESNWIN 0x7U
+#define V_ESNWIN(x) ((x) << S_ESNWIN)
+#define G_ESNWIN(x) (((x) >> S_ESNWIN) & M_ESNWIN)
+
+#define S_INGKEY96 0
+#define V_INGKEY96(x) ((x) << S_INGKEY96)
+#define F_INGKEY96 V_INGKEY96(1U)
+
+#define A_CRYPTO_KEY_RST 0x46004
+
+#define S_CORE1RST 1
+#define V_CORE1RST(x) ((x) << S_CORE1RST)
+#define F_CORE1RST V_CORE1RST(1U)
+
+#define S_CORE0RST 0
+#define V_CORE0RST(x) ((x) << S_CORE0RST)
+#define F_CORE0RST V_CORE0RST(1U)
+
+#define A_CRYPTO_KEY_INT_ENABLE 0x46008
+
+#define S_MA_FIFO_PERR 22
+#define V_MA_FIFO_PERR(x) ((x) << S_MA_FIFO_PERR)
+#define F_MA_FIFO_PERR V_MA_FIFO_PERR(1U)
+
+#define S_MA_RSP_PERR 21
+#define V_MA_RSP_PERR(x) ((x) << S_MA_RSP_PERR)
+#define F_MA_RSP_PERR V_MA_RSP_PERR(1U)
+
+#define S_ING_CACHE_DATA_PERR 19
+#define V_ING_CACHE_DATA_PERR(x) ((x) << S_ING_CACHE_DATA_PERR)
+#define F_ING_CACHE_DATA_PERR V_ING_CACHE_DATA_PERR(1U)
+
+#define S_ING_CACHE_TAG_PERR 18
+#define V_ING_CACHE_TAG_PERR(x) ((x) << S_ING_CACHE_TAG_PERR)
+#define F_ING_CACHE_TAG_PERR V_ING_CACHE_TAG_PERR(1U)
+
+#define S_LKP_KEY_REQ_PERR 17
+#define V_LKP_KEY_REQ_PERR(x) ((x) << S_LKP_KEY_REQ_PERR)
+#define F_LKP_KEY_REQ_PERR V_LKP_KEY_REQ_PERR(1U)
+
+#define S_LKP_CLIP_TCAM_PERR 16
+#define V_LKP_CLIP_TCAM_PERR(x) ((x) << S_LKP_CLIP_TCAM_PERR)
+#define F_LKP_CLIP_TCAM_PERR V_LKP_CLIP_TCAM_PERR(1U)
+
+#define S_LKP_MAIN_TCAM_PERR 15
+#define V_LKP_MAIN_TCAM_PERR(x) ((x) << S_LKP_MAIN_TCAM_PERR)
+#define F_LKP_MAIN_TCAM_PERR V_LKP_MAIN_TCAM_PERR(1U)
+
+#define S_EGR_KEY_REQ_PERR 14
+#define V_EGR_KEY_REQ_PERR(x) ((x) << S_EGR_KEY_REQ_PERR)
+#define F_EGR_KEY_REQ_PERR V_EGR_KEY_REQ_PERR(1U)
+
+#define S_EGR_CACHE_DATA_PERR 13
+#define V_EGR_CACHE_DATA_PERR(x) ((x) << S_EGR_CACHE_DATA_PERR)
+#define F_EGR_CACHE_DATA_PERR V_EGR_CACHE_DATA_PERR(1U)
+
+#define S_EGR_CACHE_TAG_PERR 12
+#define V_EGR_CACHE_TAG_PERR(x) ((x) << S_EGR_CACHE_TAG_PERR)
+#define F_EGR_CACHE_TAG_PERR V_EGR_CACHE_TAG_PERR(1U)
+
+#define S_CIM_PERR 11
+#define V_CIM_PERR(x) ((x) << S_CIM_PERR)
+#define F_CIM_PERR V_CIM_PERR(1U)
+
+#define S_MA_INV_RSP_TAG 10
+#define V_MA_INV_RSP_TAG(x) ((x) << S_MA_INV_RSP_TAG)
+#define F_MA_INV_RSP_TAG V_MA_INV_RSP_TAG(1U)
+
+#define S_ING_KEY_RANGE_ERR 9
+#define V_ING_KEY_RANGE_ERR(x) ((x) << S_ING_KEY_RANGE_ERR)
+#define F_ING_KEY_RANGE_ERR V_ING_KEY_RANGE_ERR(1U)
+
+#define S_ING_MFIFO_OVFL 8
+#define V_ING_MFIFO_OVFL(x) ((x) << S_ING_MFIFO_OVFL)
+#define F_ING_MFIFO_OVFL V_ING_MFIFO_OVFL(1U)
+
+#define S_LKP_REQ_OVFL 7
+#define V_LKP_REQ_OVFL(x) ((x) << S_LKP_REQ_OVFL)
+#define F_LKP_REQ_OVFL V_LKP_REQ_OVFL(1U)
+
+#define S_EOK_WAIT_ERR 6
+#define V_EOK_WAIT_ERR(x) ((x) << S_EOK_WAIT_ERR)
+#define F_EOK_WAIT_ERR V_EOK_WAIT_ERR(1U)
+
+#define S_EGR_KEY_RANGE_ERR 5
+#define V_EGR_KEY_RANGE_ERR(x) ((x) << S_EGR_KEY_RANGE_ERR)
+#define F_EGR_KEY_RANGE_ERR V_EGR_KEY_RANGE_ERR(1U)
+
+#define S_EGR_MFIFO_OVFL 4
+#define V_EGR_MFIFO_OVFL(x) ((x) << S_EGR_MFIFO_OVFL)
+#define F_EGR_MFIFO_OVFL V_EGR_MFIFO_OVFL(1U)
+
+#define S_SEQ_WRAP_HP_OVFL 3
+#define V_SEQ_WRAP_HP_OVFL(x) ((x) << S_SEQ_WRAP_HP_OVFL)
+#define F_SEQ_WRAP_HP_OVFL V_SEQ_WRAP_HP_OVFL(1U)
+
+#define S_SEQ_WRAP_LP_OVFL 2
+#define V_SEQ_WRAP_LP_OVFL(x) ((x) << S_SEQ_WRAP_LP_OVFL)
+#define F_SEQ_WRAP_LP_OVFL V_SEQ_WRAP_LP_OVFL(1U)
+
+#define S_EGR_SEQ_WRAP_HP 1
+#define V_EGR_SEQ_WRAP_HP(x) ((x) << S_EGR_SEQ_WRAP_HP)
+#define F_EGR_SEQ_WRAP_HP V_EGR_SEQ_WRAP_HP(1U)
+
+#define S_EGR_SEQ_WRAP_LP 0
+#define V_EGR_SEQ_WRAP_LP(x) ((x) << S_EGR_SEQ_WRAP_LP)
+#define F_EGR_SEQ_WRAP_LP V_EGR_SEQ_WRAP_LP(1U)
+
+#define A_CRYPTO_KEY_INT_CAUSE 0x4600c
+#define A_CRYPTO_KEY_PERR_ENABLE 0x46010
+#define A_CRYPTO_KEY_EGR_SEQ_WRAP_LP_KEY_ID 0x46018
+
+#define S_KEY_VALID 31
+#define V_KEY_VALID(x) ((x) << S_KEY_VALID)
+#define F_KEY_VALID V_KEY_VALID(1U)
+
+#define S_KEY_ID 0
+#define M_KEY_ID 0x7fffffffU
+#define V_KEY_ID(x) ((x) << S_KEY_ID)
+#define G_KEY_ID(x) (((x) >> S_KEY_ID) & M_KEY_ID)
+
+#define A_CRYPTO_KEY_EGR_SEQ_WRAP_HP_KEY_ID 0x4601c
+#define A_CRYPTO_KEY_TCAM_DATA0 0x46020
+#define A_CRYPTO_KEY_TCAM_DATA1 0x46024
+#define A_CRYPTO_KEY_TCAM_DATA2 0x46028
+#define A_CRYPTO_KEY_TCAM_DATA3 0x4602c
+#define A_CRYPTO_KEY_TCAM_CTL 0x46030
+
+#define S_SRCHMHIT 21
+#define V_SRCHMHIT(x) ((x) << S_SRCHMHIT)
+#define F_SRCHMHIT V_SRCHMHIT(1U)
+
+#define S_T7_BUSY 20
+#define V_T7_BUSY(x) ((x) << S_T7_BUSY)
+#define F_T7_BUSY V_T7_BUSY(1U)
+
+#define S_SRCHHIT 19
+#define V_SRCHHIT(x) ((x) << S_SRCHHIT)
+#define F_SRCHHIT V_SRCHHIT(1U)
+
+#define S_IPVERSION 18
+#define V_IPVERSION(x) ((x) << S_IPVERSION)
+#define F_IPVERSION V_IPVERSION(1U)
+
+#define S_BITSEL 17
+#define V_BITSEL(x) ((x) << S_BITSEL)
+#define F_BITSEL V_BITSEL(1U)
+
+#define S_TCAMSEL 16
+#define V_TCAMSEL(x) ((x) << S_TCAMSEL)
+#define F_TCAMSEL V_TCAMSEL(1U)
+
+#define S_CMDTYPE 14
+#define M_CMDTYPE 0x3U
+#define V_CMDTYPE(x) ((x) << S_CMDTYPE)
+#define G_CMDTYPE(x) (((x) >> S_CMDTYPE) & M_CMDTYPE)
+
+#define S_TCAMINDEX 0
+#define M_TCAMINDEX 0x3fffU
+#define V_TCAMINDEX(x) ((x) << S_TCAMINDEX)
+#define G_TCAMINDEX(x) (((x) >> S_TCAMINDEX) & M_TCAMINDEX)
+
+#define A_CRYPTO_KEY_TCAM_CONFIG 0x46034
+
+#define S_T7_CLTCAMDEEPSLEEP_STAT 3
+#define V_T7_CLTCAMDEEPSLEEP_STAT(x) ((x) << S_T7_CLTCAMDEEPSLEEP_STAT)
+#define F_T7_CLTCAMDEEPSLEEP_STAT V_T7_CLTCAMDEEPSLEEP_STAT(1U)
+
+#define S_T7_TCAMDEEPSLEEP_STAT 2
+#define V_T7_TCAMDEEPSLEEP_STAT(x) ((x) << S_T7_TCAMDEEPSLEEP_STAT)
+#define F_T7_TCAMDEEPSLEEP_STAT V_T7_TCAMDEEPSLEEP_STAT(1U)
+
+#define S_T7_CLTCAMDEEPSLEEP 1
+#define V_T7_CLTCAMDEEPSLEEP(x) ((x) << S_T7_CLTCAMDEEPSLEEP)
+#define F_T7_CLTCAMDEEPSLEEP V_T7_CLTCAMDEEPSLEEP(1U)
+
+#define S_T7_TCAMDEEPSLEEP 0
+#define V_T7_TCAMDEEPSLEEP(x) ((x) << S_T7_TCAMDEEPSLEEP)
+#define F_T7_TCAMDEEPSLEEP V_T7_TCAMDEEPSLEEP(1U)
+
+#define A_CRYPTO_KEY_TX_CMM_CONFIG 0x46040
+#define A_CRYPTO_KEY_TX_TNL_BASE 0x46044
+#define A_CRYPTO_KEY_TX_TRN_BASE 0x46048
+#define A_CRYPTO_KEY_TX_MAX_KEYS 0x4604c
+
+#define S_TNL_MAX 16
+#define M_TNL_MAX 0xffffU
+#define V_TNL_MAX(x) ((x) << S_TNL_MAX)
+#define G_TNL_MAX(x) (((x) >> S_TNL_MAX) & M_TNL_MAX)
+
+#define S_TRN_MAX 0
+#define M_TRN_MAX 0xffffU
+#define V_TRN_MAX(x) ((x) << S_TRN_MAX)
+#define G_TRN_MAX(x) (((x) >> S_TRN_MAX) & M_TRN_MAX)
+
+#define A_CRYPTO_KEY_TX_SEQ_STAT 0x46050
+
+#define S_ESN 24
+#define V_ESN(x) ((x) << S_ESN)
+#define F_ESN V_ESN(1U)
+
+#define S_SEQHI 20
+#define M_SEQHI 0xfU
+#define V_SEQHI(x) ((x) << S_SEQHI)
+#define G_SEQHI(x) (((x) >> S_SEQHI) & M_SEQHI)
+
+#define S_KEYID 0
+#define M_KEYID 0xfffffU
+#define V_KEYID(x) ((x) << S_KEYID)
+#define G_KEYID(x) (((x) >> S_KEYID) & M_KEYID)
+
+#define A_CRYPTO_KEY_RX_CMM_CONFIG 0x46060
+#define A_CRYPTO_KEY_RX_BASE 0x46064
+#define A_CRYPTO_KEY_RX_MAX_KEYS 0x46068
+
+#define S_MAXKEYS 0
+#define M_MAXKEYS 0xffffU
+#define V_MAXKEYS(x) ((x) << S_MAXKEYS)
+#define G_MAXKEYS(x) (((x) >> S_MAXKEYS) & M_MAXKEYS)
+
+#define A_CRYPTO_KEY_CRYPTO_REVISION 0x4606c
+#define A_CRYPTO_KEY_RX_SEQ_STAT 0x46070
+#define A_CRYPTO_KEY_TCAM_BIST_CTRL 0x46074
+#define A_CRYPTO_KEY_TCAM_BIST_CB_PASS 0x46078
+#define A_CRYPTO_KEY_TCAM_BIST_CB_BUSY 0x4607c
+#define A_CRYPTO_KEY_DBG_SEL_CTRL 0x46080
+
+#define S_SEL_OVR_EN 16
+#define V_SEL_OVR_EN(x) ((x) << S_SEL_OVR_EN)
+#define F_SEL_OVR_EN V_SEL_OVR_EN(1U)
+
+#define S_T7_1_SELH 8
+#define M_T7_1_SELH 0xffU
+#define V_T7_1_SELH(x) ((x) << S_T7_1_SELH)
+#define G_T7_1_SELH(x) (((x) >> S_T7_1_SELH) & M_T7_1_SELH)
+
+#define S_T7_1_SELL 0
+#define M_T7_1_SELL 0xffU
+#define V_T7_1_SELL(x) ((x) << S_T7_1_SELL)
+#define G_T7_1_SELL(x) (((x) >> S_T7_1_SELL) & M_T7_1_SELL)
+
+#define A_CRYPTO_KEY_DBG_SELL_DATA 0x46084
+#define A_CRYPTO_KEY_DBG_SELH_DATA 0x46088
+
+/* registers for module ARM */
+#define ARM_BASE_ADDR 0x47000
+
+#define A_ARM_CPU_POR_RST 0x47000
+
+#define S_CPUPORRSTN3 3
+#define V_CPUPORRSTN3(x) ((x) << S_CPUPORRSTN3)
+#define F_CPUPORRSTN3 V_CPUPORRSTN3(1U)
+
+#define S_CPUPORRSTN2 2
+#define V_CPUPORRSTN2(x) ((x) << S_CPUPORRSTN2)
+#define F_CPUPORRSTN2 V_CPUPORRSTN2(1U)
+
+#define S_CPUPORRSTN1 1
+#define V_CPUPORRSTN1(x) ((x) << S_CPUPORRSTN1)
+#define F_CPUPORRSTN1 V_CPUPORRSTN1(1U)
+
+#define S_CPUPORRSTN0 0
+#define V_CPUPORRSTN0(x) ((x) << S_CPUPORRSTN0)
+#define F_CPUPORRSTN0 V_CPUPORRSTN0(1U)
+
+#define A_ARM_CPU_CORE_RST 0x47004
+
+#define S_CPUCORERSTN3 3
+#define V_CPUCORERSTN3(x) ((x) << S_CPUCORERSTN3)
+#define F_CPUCORERSTN3 V_CPUCORERSTN3(1U)
+
+#define S_CPUCORERSTN2 2
+#define V_CPUCORERSTN2(x) ((x) << S_CPUCORERSTN2)
+#define F_CPUCORERSTN2 V_CPUCORERSTN2(1U)
+
+#define S_CPUCORERSTN1 1
+#define V_CPUCORERSTN1(x) ((x) << S_CPUCORERSTN1)
+#define F_CPUCORERSTN1 V_CPUCORERSTN1(1U)
+
+#define S_CPUCORERSTN0 0
+#define V_CPUCORERSTN0(x) ((x) << S_CPUCORERSTN0)
+#define F_CPUCORERSTN0 V_CPUCORERSTN0(1U)
+
+#define A_ARM_CPU_WARM_RST_REQ 0x47008
+
+#define S_CPUWARMRSTREQ3 3
+#define V_CPUWARMRSTREQ3(x) ((x) << S_CPUWARMRSTREQ3)
+#define F_CPUWARMRSTREQ3 V_CPUWARMRSTREQ3(1U)
+
+#define S_CPUWARMRSTREQ2 2
+#define V_CPUWARMRSTREQ2(x) ((x) << S_CPUWARMRSTREQ2)
+#define F_CPUWARMRSTREQ2 V_CPUWARMRSTREQ2(1U)
+
+#define S_CPUWARMRSTREQ1 1
+#define V_CPUWARMRSTREQ1(x) ((x) << S_CPUWARMRSTREQ1)
+#define F_CPUWARMRSTREQ1 V_CPUWARMRSTREQ1(1U)
+
+#define S_CPUWARMRSTREQ0 0
+#define V_CPUWARMRSTREQ0(x) ((x) << S_CPUWARMRSTREQ0)
+#define F_CPUWARMRSTREQ0 V_CPUWARMRSTREQ0(1U)
+
+#define A_ARM_CPU_L2_RST 0x4700c
+
+#define S_CPUL2RSTN 0
+#define V_CPUL2RSTN(x) ((x) << S_CPUL2RSTN)
+#define F_CPUL2RSTN V_CPUL2RSTN(1U)
+
+#define A_ARM_CPU_L2_RST_DIS 0x47010
+
+#define S_CPUL2RSTDISABLE 0
+#define V_CPUL2RSTDISABLE(x) ((x) << S_CPUL2RSTDISABLE)
+#define F_CPUL2RSTDISABLE V_CPUL2RSTDISABLE(1U)
+
+#define A_ARM_CPU_PRESET_DBG 0x47014
+
+#define S_CPUPRESETDBGN 0
+#define V_CPUPRESETDBGN(x) ((x) << S_CPUPRESETDBGN)
+#define F_CPUPRESETDBGN V_CPUPRESETDBGN(1U)
+
+#define A_ARM_PL_DMA_AW_OFFSET 0x47018
+
+#define S_PL_DMA_AW_OFFSET 0
+#define M_PL_DMA_AW_OFFSET 0x3fffffffU
+#define V_PL_DMA_AW_OFFSET(x) ((x) << S_PL_DMA_AW_OFFSET)
+#define G_PL_DMA_AW_OFFSET(x) (((x) >> S_PL_DMA_AW_OFFSET) & M_PL_DMA_AW_OFFSET)
+
+#define A_ARM_PL_DMA_AR_OFFSET 0x4701c
+
+#define S_PL_DMA_AR_OFFSET 0
+#define M_PL_DMA_AR_OFFSET 0x3fffffffU
+#define V_PL_DMA_AR_OFFSET(x) ((x) << S_PL_DMA_AR_OFFSET)
+#define G_PL_DMA_AR_OFFSET(x) (((x) >> S_PL_DMA_AR_OFFSET) & M_PL_DMA_AR_OFFSET)
+
+#define A_ARM_CPU_RESET_VECTOR_BASE_ADDR0 0x47020
+#define A_ARM_CPU_RESET_VECTOR_BASE_ADDR1 0x47024
+
+#define S_CPURESETVECBA1 0
+#define M_CPURESETVECBA1 0x3ffU
+#define V_CPURESETVECBA1(x) ((x) << S_CPURESETVECBA1)
+#define G_CPURESETVECBA1(x) (((x) >> S_CPURESETVECBA1) & M_CPURESETVECBA1)
+
+#define A_ARM_CPU_PMU_EVENT 0x47028
+
+#define S_CPUPMUEVENT 0
+#define M_CPUPMUEVENT 0x1ffffffU
+#define V_CPUPMUEVENT(x) ((x) << S_CPUPMUEVENT)
+#define G_CPUPMUEVENT(x) (((x) >> S_CPUPMUEVENT) & M_CPUPMUEVENT)
+
+#define A_ARM_DMA_RST 0x4702c
+
+#define S_DMA_PL_RST_N 0
+#define V_DMA_PL_RST_N(x) ((x) << S_DMA_PL_RST_N)
+#define F_DMA_PL_RST_N V_DMA_PL_RST_N(1U)
+
+#define A_ARM_PLM_RID_CFG 0x4703c
+#define A_ARM_PLM_EROM_CFG 0x47040
+#define A_ARM_PL_ARM_HDR_CFG 0x4704c
+#define A_ARM_RC_INT_STATUS 0x4705c
+
+#define S_RC_INT_STATUS_REG 0
+#define M_RC_INT_STATUS_REG 0x3fU
+#define V_RC_INT_STATUS_REG(x) ((x) << S_RC_INT_STATUS_REG)
+#define G_RC_INT_STATUS_REG(x) (((x) >> S_RC_INT_STATUS_REG) & M_RC_INT_STATUS_REG)
+
+#define A_ARM_CPU_DBG_PWR_UP_REQ 0x47060
+
+#define S_CPUDBGPWRUPREQ3 3
+#define V_CPUDBGPWRUPREQ3(x) ((x) << S_CPUDBGPWRUPREQ3)
+#define F_CPUDBGPWRUPREQ3 V_CPUDBGPWRUPREQ3(1U)
+
+#define S_CPUDBGPWRUPREQ2 2
+#define V_CPUDBGPWRUPREQ2(x) ((x) << S_CPUDBGPWRUPREQ2)
+#define F_CPUDBGPWRUPREQ2 V_CPUDBGPWRUPREQ2(1U)
+
+#define S_CPUDBGPWRUPREQ1 1
+#define V_CPUDBGPWRUPREQ1(x) ((x) << S_CPUDBGPWRUPREQ1)
+#define F_CPUDBGPWRUPREQ1 V_CPUDBGPWRUPREQ1(1U)
+
+#define S_CPUDBGPWRUPREQ0 0
+#define V_CPUDBGPWRUPREQ0(x) ((x) << S_CPUDBGPWRUPREQ0)
+#define F_CPUDBGPWRUPREQ0 V_CPUDBGPWRUPREQ0(1U)
+
+#define A_ARM_CPU_STANDBY_WFE_WFI 0x47064
+
+#define S_CPUSTANDBYWFIL2 8
+#define V_CPUSTANDBYWFIL2(x) ((x) << S_CPUSTANDBYWFIL2)
+#define F_CPUSTANDBYWFIL2 V_CPUSTANDBYWFIL2(1U)
+
+#define S_CPUSTANDBYWFI3 7
+#define V_CPUSTANDBYWFI3(x) ((x) << S_CPUSTANDBYWFI3)
+#define F_CPUSTANDBYWFI3 V_CPUSTANDBYWFI3(1U)
+
+#define S_CPUSTANDBYWFI2 6
+#define V_CPUSTANDBYWFI2(x) ((x) << S_CPUSTANDBYWFI2)
+#define F_CPUSTANDBYWFI2 V_CPUSTANDBYWFI2(1U)
+
+#define S_CPUSTANDBYWFI1 5
+#define V_CPUSTANDBYWFI1(x) ((x) << S_CPUSTANDBYWFI1)
+#define F_CPUSTANDBYWFI1 V_CPUSTANDBYWFI1(1U)
+
+#define S_CPUSTANDBYWFI0 4
+#define V_CPUSTANDBYWFI0(x) ((x) << S_CPUSTANDBYWFI0)
+#define F_CPUSTANDBYWFI0 V_CPUSTANDBYWFI0(1U)
+
+#define S_CPUSTANDBYWFE3 3
+#define V_CPUSTANDBYWFE3(x) ((x) << S_CPUSTANDBYWFE3)
+#define F_CPUSTANDBYWFE3 V_CPUSTANDBYWFE3(1U)
+
+#define S_CPUSTANDBYWFE2 2
+#define V_CPUSTANDBYWFE2(x) ((x) << S_CPUSTANDBYWFE2)
+#define F_CPUSTANDBYWFE2 V_CPUSTANDBYWFE2(1U)
+
+#define S_CPUSTANDBYWFE1 1
+#define V_CPUSTANDBYWFE1(x) ((x) << S_CPUSTANDBYWFE1)
+#define F_CPUSTANDBYWFE1 V_CPUSTANDBYWFE1(1U)
+
+#define S_CPUSTANDBYWFE0 0
+#define V_CPUSTANDBYWFE0(x) ((x) << S_CPUSTANDBYWFE0)
+#define F_CPUSTANDBYWFE0 V_CPUSTANDBYWFE0(1U)
+
+#define A_ARM_CPU_SMPEN 0x47068
+
+#define S_CPUSMPEN3 3
+#define V_CPUSMPEN3(x) ((x) << S_CPUSMPEN3)
+#define F_CPUSMPEN3 V_CPUSMPEN3(1U)
+
+#define S_CPUSMPEN2 2
+#define V_CPUSMPEN2(x) ((x) << S_CPUSMPEN2)
+#define F_CPUSMPEN2 V_CPUSMPEN2(1U)
+
+#define S_CPUSMPEN1 1
+#define V_CPUSMPEN1(x) ((x) << S_CPUSMPEN1)
+#define F_CPUSMPEN1 V_CPUSMPEN1(1U)
+
+#define S_CPUSMPEN0 0
+#define V_CPUSMPEN0(x) ((x) << S_CPUSMPEN0)
+#define F_CPUSMPEN0 V_CPUSMPEN0(1U)
+
+#define A_ARM_CPU_QACTIVE 0x4706c
+
+#define S_CPUQACTIVE3 3
+#define V_CPUQACTIVE3(x) ((x) << S_CPUQACTIVE3)
+#define F_CPUQACTIVE3 V_CPUQACTIVE3(1U)
+
+#define S_CPUQACTIVE2 2
+#define V_CPUQACTIVE2(x) ((x) << S_CPUQACTIVE2)
+#define F_CPUQACTIVE2 V_CPUQACTIVE2(1U)
+
+#define S_CPUQACTIVE1 1
+#define V_CPUQACTIVE1(x) ((x) << S_CPUQACTIVE1)
+#define F_CPUQACTIVE1 V_CPUQACTIVE1(1U)
+
+#define S_CPUQACTIVE0 0
+#define V_CPUQACTIVE0(x) ((x) << S_CPUQACTIVE0)
+#define F_CPUQACTIVE0 V_CPUQACTIVE0(1U)
+
+#define A_ARM_CPU_QREQ 0x47070
+
+#define S_CPUL2FLUSHREQ 5
+#define V_CPUL2FLUSHREQ(x) ((x) << S_CPUL2FLUSHREQ)
+#define F_CPUL2FLUSHREQ V_CPUL2FLUSHREQ(1U)
+
+#define S_CPUL2QREQN 4
+#define V_CPUL2QREQN(x) ((x) << S_CPUL2QREQN)
+#define F_CPUL2QREQN V_CPUL2QREQN(1U)
+
+#define S_CPUQREQ3N 3
+#define V_CPUQREQ3N(x) ((x) << S_CPUQREQ3N)
+#define F_CPUQREQ3N V_CPUQREQ3N(1U)
+
+#define S_CPUQREQ2N 2
+#define V_CPUQREQ2N(x) ((x) << S_CPUQREQ2N)
+#define F_CPUQREQ2N V_CPUQREQ2N(1U)
+
+#define S_CPUQREQ1N 1
+#define V_CPUQREQ1N(x) ((x) << S_CPUQREQ1N)
+#define F_CPUQREQ1N V_CPUQREQ1N(1U)
+
+#define S_CPUQREQ0N 0
+#define V_CPUQREQ0N(x) ((x) << S_CPUQREQ0N)
+#define F_CPUQREQ0N V_CPUQREQ0N(1U)
+
+#define A_ARM_CPU_QREQ_STATUS 0x47074
+
+#define S_CPUL2FLUSHDONE 10
+#define V_CPUL2FLUSHDONE(x) ((x) << S_CPUL2FLUSHDONE)
+#define F_CPUL2FLUSHDONE V_CPUL2FLUSHDONE(1U)
+
+#define S_CPUL2QDENY 9
+#define V_CPUL2QDENY(x) ((x) << S_CPUL2QDENY)
+#define F_CPUL2QDENY V_CPUL2QDENY(1U)
+
+#define S_CPUL2QACCEPTN 8
+#define V_CPUL2QACCEPTN(x) ((x) << S_CPUL2QACCEPTN)
+#define F_CPUL2QACCEPTN V_CPUL2QACCEPTN(1U)
+
+#define S_CPUQDENY3 7
+#define V_CPUQDENY3(x) ((x) << S_CPUQDENY3)
+#define F_CPUQDENY3 V_CPUQDENY3(1U)
+
+#define S_CPUQDENY2 6
+#define V_CPUQDENY2(x) ((x) << S_CPUQDENY2)
+#define F_CPUQDENY2 V_CPUQDENY2(1U)
+
+#define S_CPUQDENY1 5
+#define V_CPUQDENY1(x) ((x) << S_CPUQDENY1)
+#define F_CPUQDENY1 V_CPUQDENY1(1U)
+
+#define S_CPUQDENY0 4
+#define V_CPUQDENY0(x) ((x) << S_CPUQDENY0)
+#define F_CPUQDENY0 V_CPUQDENY0(1U)
+
+#define S_CPUQACCEPT3N 3
+#define V_CPUQACCEPT3N(x) ((x) << S_CPUQACCEPT3N)
+#define F_CPUQACCEPT3N V_CPUQACCEPT3N(1U)
+
+#define S_CPUQACCEPT2N 2
+#define V_CPUQACCEPT2N(x) ((x) << S_CPUQACCEPT2N)
+#define F_CPUQACCEPT2N V_CPUQACCEPT2N(1U)
+
+#define S_CPUQACCEPT1N 1
+#define V_CPUQACCEPT1N(x) ((x) << S_CPUQACCEPT1N)
+#define F_CPUQACCEPT1N V_CPUQACCEPT1N(1U)
+
+#define S_CPUQACCEPT0N 0
+#define V_CPUQACCEPT0N(x) ((x) << S_CPUQACCEPT0N)
+#define F_CPUQACCEPT0N V_CPUQACCEPT0N(1U)
+
+#define A_ARM_CPU_DBG_EN 0x47078
+
+#define S_CPUDBGL1RSTDISABLE 28
+#define V_CPUDBGL1RSTDISABLE(x) ((x) << S_CPUDBGL1RSTDISABLE)
+#define F_CPUDBGL1RSTDISABLE V_CPUDBGL1RSTDISABLE(1U)
+
+#define S_CPUDBGRSTREQ3 27
+#define V_CPUDBGRSTREQ3(x) ((x) << S_CPUDBGRSTREQ3)
+#define F_CPUDBGRSTREQ3 V_CPUDBGRSTREQ3(1U)
+
+#define S_CPUDBGRSTREQ2 26
+#define V_CPUDBGRSTREQ2(x) ((x) << S_CPUDBGRSTREQ2)
+#define F_CPUDBGRSTREQ2 V_CPUDBGRSTREQ2(1U)
+
+#define S_CPUDBGRSTREQ1 25
+#define V_CPUDBGRSTREQ1(x) ((x) << S_CPUDBGRSTREQ1)
+#define F_CPUDBGRSTREQ1 V_CPUDBGRSTREQ1(1U)
+
+#define S_CPUDBGRSTREQ0 24
+#define V_CPUDBGRSTREQ0(x) ((x) << S_CPUDBGRSTREQ0)
+#define F_CPUDBGRSTREQ0 V_CPUDBGRSTREQ0(1U)
+
+#define S_CPUDBGPWRDUP3 23
+#define V_CPUDBGPWRDUP3(x) ((x) << S_CPUDBGPWRDUP3)
+#define F_CPUDBGPWRDUP3 V_CPUDBGPWRDUP3(1U)
+
+#define S_CPUDBGPWRDUP2 22
+#define V_CPUDBGPWRDUP2(x) ((x) << S_CPUDBGPWRDUP2)
+#define F_CPUDBGPWRDUP2 V_CPUDBGPWRDUP2(1U)
+
+#define S_CPUDBGPWRDUP1 21
+#define V_CPUDBGPWRDUP1(x) ((x) << S_CPUDBGPWRDUP1)
+#define F_CPUDBGPWRDUP1 V_CPUDBGPWRDUP1(1U)
+
+#define S_CPUDBGPWRDUP0 20
+#define V_CPUDBGPWRDUP0(x) ((x) << S_CPUDBGPWRDUP0)
+#define F_CPUDBGPWRDUP0 V_CPUDBGPWRDUP0(1U)
+
+#define S_CPUEXTDBGREQ3 19
+#define V_CPUEXTDBGREQ3(x) ((x) << S_CPUEXTDBGREQ3)
+#define F_CPUEXTDBGREQ3 V_CPUEXTDBGREQ3(1U)
+
+#define S_CPUEXTDBGREQ2 18
+#define V_CPUEXTDBGREQ2(x) ((x) << S_CPUEXTDBGREQ2)
+#define F_CPUEXTDBGREQ2 V_CPUEXTDBGREQ2(1U)
+
+#define S_CPUEXTDBGREQ1 17
+#define V_CPUEXTDBGREQ1(x) ((x) << S_CPUEXTDBGREQ1)
+#define F_CPUEXTDBGREQ1 V_CPUEXTDBGREQ1(1U)
+
+#define S_CPUEXTDBGREQ0 16
+#define V_CPUEXTDBGREQ0(x) ((x) << S_CPUEXTDBGREQ0)
+#define F_CPUEXTDBGREQ0 V_CPUEXTDBGREQ0(1U)
+
+#define S_CPUSPNIDEN3 15
+#define V_CPUSPNIDEN3(x) ((x) << S_CPUSPNIDEN3)
+#define F_CPUSPNIDEN3 V_CPUSPNIDEN3(1U)
+
+#define S_CPUSPNIDEN2 14
+#define V_CPUSPNIDEN2(x) ((x) << S_CPUSPNIDEN2)
+#define F_CPUSPNIDEN2 V_CPUSPNIDEN2(1U)
+
+#define S_CPUSPNIDEN1 13
+#define V_CPUSPNIDEN1(x) ((x) << S_CPUSPNIDEN1)
+#define F_CPUSPNIDEN1 V_CPUSPNIDEN1(1U)
+
+#define S_CPUSPNIDEN0 12
+#define V_CPUSPNIDEN0(x) ((x) << S_CPUSPNIDEN0)
+#define F_CPUSPNIDEN0 V_CPUSPNIDEN0(1U)
+
+#define S_CPUSPDBGEN3 11
+#define V_CPUSPDBGEN3(x) ((x) << S_CPUSPDBGEN3)
+#define F_CPUSPDBGEN3 V_CPUSPDBGEN3(1U)
+
+#define S_CPUSPDBGEN2 10
+#define V_CPUSPDBGEN2(x) ((x) << S_CPUSPDBGEN2)
+#define F_CPUSPDBGEN2 V_CPUSPDBGEN2(1U)
+
+#define S_CPUSPDBGEN1 9
+#define V_CPUSPDBGEN1(x) ((x) << S_CPUSPDBGEN1)
+#define F_CPUSPDBGEN1 V_CPUSPDBGEN1(1U)
+
+#define S_CPUSPDBGEN0 8
+#define V_CPUSPDBGEN0(x) ((x) << S_CPUSPDBGEN0)
+#define F_CPUSPDBGEN0 V_CPUSPDBGEN0(1U)
+
+#define S_CPUNIDEN3 7
+#define V_CPUNIDEN3(x) ((x) << S_CPUNIDEN3)
+#define F_CPUNIDEN3 V_CPUNIDEN3(1U)
+
+#define S_CPUNIDEN2 6
+#define V_CPUNIDEN2(x) ((x) << S_CPUNIDEN2)
+#define F_CPUNIDEN2 V_CPUNIDEN2(1U)
+
+#define S_CPUNIDEN1 5
+#define V_CPUNIDEN1(x) ((x) << S_CPUNIDEN1)
+#define F_CPUNIDEN1 V_CPUNIDEN1(1U)
+
+#define S_CPUNIDEN0 4
+#define V_CPUNIDEN0(x) ((x) << S_CPUNIDEN0)
+#define F_CPUNIDEN0 V_CPUNIDEN0(1U)
+
+#define S_CPUDBGEN3 3
+#define V_CPUDBGEN3(x) ((x) << S_CPUDBGEN3)
+#define F_CPUDBGEN3 V_CPUDBGEN3(1U)
+
+#define S_CPUDBGEN2 2
+#define V_CPUDBGEN2(x) ((x) << S_CPUDBGEN2)
+#define F_CPUDBGEN2 V_CPUDBGEN2(1U)
+
+#define S_CPUDBGEN1 1
+#define V_CPUDBGEN1(x) ((x) << S_CPUDBGEN1)
+#define F_CPUDBGEN1 V_CPUDBGEN1(1U)
+
+#define S_CPUDBGEN0 0
+#define V_CPUDBGEN0(x) ((x) << S_CPUDBGEN0)
+#define F_CPUDBGEN0 V_CPUDBGEN0(1U)
+
+#define A_ARM_CPU_DBG_ACK 0x4707c
+
+#define S_CPUDBGNOPWRDWN3 11
+#define V_CPUDBGNOPWRDWN3(x) ((x) << S_CPUDBGNOPWRDWN3)
+#define F_CPUDBGNOPWRDWN3 V_CPUDBGNOPWRDWN3(1U)
+
+#define S_CPUDBGNOPWRDWN2 10
+#define V_CPUDBGNOPWRDWN2(x) ((x) << S_CPUDBGNOPWRDWN2)
+#define F_CPUDBGNOPWRDWN2 V_CPUDBGNOPWRDWN2(1U)
+
+#define S_CPUDBGNOPWRDWN1 9
+#define V_CPUDBGNOPWRDWN1(x) ((x) << S_CPUDBGNOPWRDWN1)
+#define F_CPUDBGNOPWRDWN1 V_CPUDBGNOPWRDWN1(1U)
+
+#define S_CPUDBGNOPWRDWN0 8
+#define V_CPUDBGNOPWRDWN0(x) ((x) << S_CPUDBGNOPWRDWN0)
+#define F_CPUDBGNOPWRDWN0 V_CPUDBGNOPWRDWN0(1U)
+
+#define S_CPUDGNRSTREQ3 7
+#define V_CPUDGNRSTREQ3(x) ((x) << S_CPUDGNRSTREQ3)
+#define F_CPUDGNRSTREQ3 V_CPUDGNRSTREQ3(1U)
+
+#define S_CPUDGNRSTREQ2 6
+#define V_CPUDGNRSTREQ2(x) ((x) << S_CPUDGNRSTREQ2)
+#define F_CPUDGNRSTREQ2 V_CPUDGNRSTREQ2(1U)
+
+#define S_CPUDGNRSTREQ1 5
+#define V_CPUDGNRSTREQ1(x) ((x) << S_CPUDGNRSTREQ1)
+#define F_CPUDGNRSTREQ1 V_CPUDGNRSTREQ1(1U)
+
+#define S_CPUDGNRSTREQ0 4
+#define V_CPUDGNRSTREQ0(x) ((x) << S_CPUDGNRSTREQ0)
+#define F_CPUDGNRSTREQ0 V_CPUDGNRSTREQ0(1U)
+
+#define S_CPUDBGACK3 3
+#define V_CPUDBGACK3(x) ((x) << S_CPUDBGACK3)
+#define F_CPUDBGACK3 V_CPUDBGACK3(1U)
+
+#define S_CPUDBGACK2 2
+#define V_CPUDBGACK2(x) ((x) << S_CPUDBGACK2)
+#define F_CPUDBGACK2 V_CPUDBGACK2(1U)
+
+#define S_CPUDBGACK1 1
+#define V_CPUDBGACK1(x) ((x) << S_CPUDBGACK1)
+#define F_CPUDBGACK1 V_CPUDBGACK1(1U)
+
+#define S_CPUDBGACK0 0
+#define V_CPUDBGACK0(x) ((x) << S_CPUDBGACK0)
+#define F_CPUDBGACK0 V_CPUDBGACK0(1U)
+
+#define A_ARM_CPU_PMU_SNAPSHOT_REQ 0x47080
+
+#define S_CPUPMUSNAPSHOTREQ3 3
+#define V_CPUPMUSNAPSHOTREQ3(x) ((x) << S_CPUPMUSNAPSHOTREQ3)
+#define F_CPUPMUSNAPSHOTREQ3 V_CPUPMUSNAPSHOTREQ3(1U)
+
+#define S_CPUPMUSNAPSHOTREQ2 2
+#define V_CPUPMUSNAPSHOTREQ2(x) ((x) << S_CPUPMUSNAPSHOTREQ2)
+#define F_CPUPMUSNAPSHOTREQ2 V_CPUPMUSNAPSHOTREQ2(1U)
+
+#define S_CPUPMUSNAPSHOTREQ1 1
+#define V_CPUPMUSNAPSHOTREQ1(x) ((x) << S_CPUPMUSNAPSHOTREQ1)
+#define F_CPUPMUSNAPSHOTREQ1 V_CPUPMUSNAPSHOTREQ1(1U)
+
+#define S_CPUPMUSNAPSHOTREQ0 0
+#define V_CPUPMUSNAPSHOTREQ0(x) ((x) << S_CPUPMUSNAPSHOTREQ0)
+#define F_CPUPMUSNAPSHOTREQ0 V_CPUPMUSNAPSHOTREQ0(1U)
+
+#define A_ARM_CPU_PMU_SNAPSHOT_ACK 0x47084
+
+#define S_CPUPMUSNAPSHOTACK3 3
+#define V_CPUPMUSNAPSHOTACK3(x) ((x) << S_CPUPMUSNAPSHOTACK3)
+#define F_CPUPMUSNAPSHOTACK3 V_CPUPMUSNAPSHOTACK3(1U)
+
+#define S_CPUPMUSNAPSHOTACK2 2
+#define V_CPUPMUSNAPSHOTACK2(x) ((x) << S_CPUPMUSNAPSHOTACK2)
+#define F_CPUPMUSNAPSHOTACK2 V_CPUPMUSNAPSHOTACK2(1U)
+
+#define S_CPUPMUSNAPSHOTACK1 1
+#define V_CPUPMUSNAPSHOTACK1(x) ((x) << S_CPUPMUSNAPSHOTACK1)
+#define F_CPUPMUSNAPSHOTACK1 V_CPUPMUSNAPSHOTACK1(1U)
+
+#define S_CPUPMUSNAPSHOTACK0 0
+#define V_CPUPMUSNAPSHOTACK0(x) ((x) << S_CPUPMUSNAPSHOTACK0)
+#define F_CPUPMUSNAPSHOTACK0 V_CPUPMUSNAPSHOTACK0(1U)
+
+#define A_ARM_EMMC_CTRL 0x47088
+
+#define S_EMMC_DATA_P2 24
+#define M_EMMC_DATA_P2 0xffU
+#define V_EMMC_DATA_P2(x) ((x) << S_EMMC_DATA_P2)
+#define G_EMMC_DATA_P2(x) (((x) >> S_EMMC_DATA_P2) & M_EMMC_DATA_P2)
+
+#define S_EMMC_DATA_P1 16
+#define M_EMMC_DATA_P1 0xffU
+#define V_EMMC_DATA_P1(x) ((x) << S_EMMC_DATA_P1)
+#define G_EMMC_DATA_P1(x) (((x) >> S_EMMC_DATA_P1) & M_EMMC_DATA_P1)
+
+#define S_EMMC_CMD_P2 15
+#define V_EMMC_CMD_P2(x) ((x) << S_EMMC_CMD_P2)
+#define F_EMMC_CMD_P2 V_EMMC_CMD_P2(1U)
+
+#define S_EMMC_CMD_P1 14
+#define V_EMMC_CMD_P1(x) ((x) << S_EMMC_CMD_P1)
+#define F_EMMC_CMD_P1 V_EMMC_CMD_P1(1U)
+
+#define S_EMMC_RST_P2 13
+#define V_EMMC_RST_P2(x) ((x) << S_EMMC_RST_P2)
+#define F_EMMC_RST_P2 V_EMMC_RST_P2(1U)
+
+#define S_EMMC_RST_P1 12
+#define V_EMMC_RST_P1(x) ((x) << S_EMMC_RST_P1)
+#define F_EMMC_RST_P1 V_EMMC_RST_P1(1U)
+
+#define S_EMMC_GP_IN_P2 10
+#define M_EMMC_GP_IN_P2 0x3U
+#define V_EMMC_GP_IN_P2(x) ((x) << S_EMMC_GP_IN_P2)
+#define G_EMMC_GP_IN_P2(x) (((x) >> S_EMMC_GP_IN_P2) & M_EMMC_GP_IN_P2)
+
+#define S_EMMC_GP_IN_P1 8
+#define M_EMMC_GP_IN_P1 0x3U
+#define V_EMMC_GP_IN_P1(x) ((x) << S_EMMC_GP_IN_P1)
+#define G_EMMC_GP_IN_P1(x) (((x) >> S_EMMC_GP_IN_P1) & M_EMMC_GP_IN_P1)
+
+#define S_EMMC_CLK_SEL 0
+#define M_EMMC_CLK_SEL 0xffU
+#define V_EMMC_CLK_SEL(x) ((x) << S_EMMC_CLK_SEL)
+#define G_EMMC_CLK_SEL(x) (((x) >> S_EMMC_CLK_SEL) & M_EMMC_CLK_SEL)
+
+#define A_ARM_CPU_CFG_END_VINI_TE 0x4708c
+
+#define S_CPUSYSBARDISABLE 23
+#define V_CPUSYSBARDISABLE(x) ((x) << S_CPUSYSBARDISABLE)
+#define F_CPUSYSBARDISABLE V_CPUSYSBARDISABLE(1U)
+
+#define S_CPUBROADCACHEMAIN 22
+#define V_CPUBROADCACHEMAIN(x) ((x) << S_CPUBROADCACHEMAIN)
+#define F_CPUBROADCACHEMAIN V_CPUBROADCACHEMAIN(1U)
+
+#define S_CPUBROADOUTER 21
+#define V_CPUBROADOUTER(x) ((x) << S_CPUBROADOUTER)
+#define F_CPUBROADOUTER V_CPUBROADOUTER(1U)
+
+#define S_CPUBROADINNER 20
+#define V_CPUBROADINNER(x) ((x) << S_CPUBROADINNER)
+#define F_CPUBROADINNER V_CPUBROADINNER(1U)
+
+#define S_CPUCRYPTODISABLE3 19
+#define V_CPUCRYPTODISABLE3(x) ((x) << S_CPUCRYPTODISABLE3)
+#define F_CPUCRYPTODISABLE3 V_CPUCRYPTODISABLE3(1U)
+
+#define S_CPUCRYPTODISABLE2 18
+#define V_CPUCRYPTODISABLE2(x) ((x) << S_CPUCRYPTODISABLE2)
+#define F_CPUCRYPTODISABLE2 V_CPUCRYPTODISABLE2(1U)
+
+#define S_CPUCRYPTODISABLE1 17
+#define V_CPUCRYPTODISABLE1(x) ((x) << S_CPUCRYPTODISABLE1)
+#define F_CPUCRYPTODISABLE1 V_CPUCRYPTODISABLE1(1U)
+
+#define S_CPUCRYPTODISABLE0 16
+#define V_CPUCRYPTODISABLE0(x) ((x) << S_CPUCRYPTODISABLE0)
+#define F_CPUCRYPTODISABLE0 V_CPUCRYPTODISABLE0(1U)
+
+#define S_CPUAA64NAA323 15
+#define V_CPUAA64NAA323(x) ((x) << S_CPUAA64NAA323)
+#define F_CPUAA64NAA323 V_CPUAA64NAA323(1U)
+
+#define S_CPUAA64NAA322 14
+#define V_CPUAA64NAA322(x) ((x) << S_CPUAA64NAA322)
+#define F_CPUAA64NAA322 V_CPUAA64NAA322(1U)
+
+#define S_CPUAA64NAA321 13
+#define V_CPUAA64NAA321(x) ((x) << S_CPUAA64NAA321)
+#define F_CPUAA64NAA321 V_CPUAA64NAA321(1U)
+
+#define S_CPUAA64NAA320 12
+#define V_CPUAA64NAA320(x) ((x) << S_CPUAA64NAA320)
+#define F_CPUAA64NAA320 V_CPUAA64NAA320(1U)
+
+#define S_CPUCFGTE3 11
+#define V_CPUCFGTE3(x) ((x) << S_CPUCFGTE3)
+#define F_CPUCFGTE3 V_CPUCFGTE3(1U)
+
+#define S_CPUCFGTE2 10
+#define V_CPUCFGTE2(x) ((x) << S_CPUCFGTE2)
+#define F_CPUCFGTE2 V_CPUCFGTE2(1U)
+
+#define S_CPUCFGTE1 9
+#define V_CPUCFGTE1(x) ((x) << S_CPUCFGTE1)
+#define F_CPUCFGTE1 V_CPUCFGTE1(1U)
+
+#define S_CPUCFGTE0 8
+#define V_CPUCFGTE0(x) ((x) << S_CPUCFGTE0)
+#define F_CPUCFGTE0 V_CPUCFGTE0(1U)
+
+#define S_CPUVINIHI3 7
+#define V_CPUVINIHI3(x) ((x) << S_CPUVINIHI3)
+#define F_CPUVINIHI3 V_CPUVINIHI3(1U)
+
+#define S_CPUVINIHI2 6
+#define V_CPUVINIHI2(x) ((x) << S_CPUVINIHI2)
+#define F_CPUVINIHI2 V_CPUVINIHI2(1U)
+
+#define S_CPUVINIHI1 5
+#define V_CPUVINIHI1(x) ((x) << S_CPUVINIHI1)
+#define F_CPUVINIHI1 V_CPUVINIHI1(1U)
+
+#define S_CPUVINIHI0 4
+#define V_CPUVINIHI0(x) ((x) << S_CPUVINIHI0)
+#define F_CPUVINIHI0 V_CPUVINIHI0(1U)
+
+#define S_CPUCFGEND3 3
+#define V_CPUCFGEND3(x) ((x) << S_CPUCFGEND3)
+#define F_CPUCFGEND3 V_CPUCFGEND3(1U)
+
+#define S_CPUCFGEND2 2
+#define V_CPUCFGEND2(x) ((x) << S_CPUCFGEND2)
+#define F_CPUCFGEND2 V_CPUCFGEND2(1U)
+
+#define S_CPUCFGEND1 1
+#define V_CPUCFGEND1(x) ((x) << S_CPUCFGEND1)
+#define F_CPUCFGEND1 V_CPUCFGEND1(1U)
+
+#define S_CPUCFGEND0 0
+#define V_CPUCFGEND0(x) ((x) << S_CPUCFGEND0)
+#define F_CPUCFGEND0 V_CPUCFGEND0(1U)
+
+#define A_ARM_CPU_CP15_SDISABLE 0x47090
+
+#define S_CPUCP15SDISABLE3 3
+#define V_CPUCP15SDISABLE3(x) ((x) << S_CPUCP15SDISABLE3)
+#define F_CPUCP15SDISABLE3 V_CPUCP15SDISABLE3(1U)
+
+#define S_CPUCP15SDISABLE2 2
+#define V_CPUCP15SDISABLE2(x) ((x) << S_CPUCP15SDISABLE2)
+#define F_CPUCP15SDISABLE2 V_CPUCP15SDISABLE2(1U)
+
+#define S_CPUCP15SDISABLE1 1
+#define V_CPUCP15SDISABLE1(x) ((x) << S_CPUCP15SDISABLE1)
+#define F_CPUCP15SDISABLE1 V_CPUCP15SDISABLE1(1U)
+
+#define S_CPUCP15SDISABLE0 0
+#define V_CPUCP15SDISABLE0(x) ((x) << S_CPUCP15SDISABLE0)
+#define F_CPUCP15SDISABLE0 V_CPUCP15SDISABLE0(1U)
+
+#define A_ARM_CPU_CLUSTER_ID_AFF 0x47094
+
+#define S_CPUCLUSTERIDAFF2 8
+#define M_CPUCLUSTERIDAFF2 0xffU
+#define V_CPUCLUSTERIDAFF2(x) ((x) << S_CPUCLUSTERIDAFF2)
+#define G_CPUCLUSTERIDAFF2(x) (((x) >> S_CPUCLUSTERIDAFF2) & M_CPUCLUSTERIDAFF2)
+
+#define S_CPUCLUSTERIDAFF1 0
+#define M_CPUCLUSTERIDAFF1 0xffU
+#define V_CPUCLUSTERIDAFF1(x) ((x) << S_CPUCLUSTERIDAFF1)
+#define G_CPUCLUSTERIDAFF1(x) (((x) >> S_CPUCLUSTERIDAFF1) & M_CPUCLUSTERIDAFF1)
+
+#define A_ARM_CPU_CLK_CFG 0x47098
+
+#define S_CPUACINACTIVEM 1
+#define V_CPUACINACTIVEM(x) ((x) << S_CPUACINACTIVEM)
+#define F_CPUACINACTIVEM V_CPUACINACTIVEM(1U)
+
+#define S_CPUACLKENM 0
+#define V_CPUACLKENM(x) ((x) << S_CPUACLKENM)
+#define F_CPUACLKENM V_CPUACLKENM(1U)
+
+#define A_ARM_NVME_DB_EMU_INT_CAUSE 0x4709c
+
+#define S_INVALID_BRESP 3
+#define V_INVALID_BRESP(x) ((x) << S_INVALID_BRESP)
+#define F_INVALID_BRESP V_INVALID_BRESP(1U)
+
+#define S_DATA_LEN_OF 2
+#define V_DATA_LEN_OF(x) ((x) << S_DATA_LEN_OF)
+#define F_DATA_LEN_OF V_DATA_LEN_OF(1U)
+
+#define S_INVALID_EMU_ADDR 1
+#define V_INVALID_EMU_ADDR(x) ((x) << S_INVALID_EMU_ADDR)
+#define F_INVALID_EMU_ADDR V_INVALID_EMU_ADDR(1U)
+
+#define S_INVALID_AXI_ADDR_CFG 0
+#define V_INVALID_AXI_ADDR_CFG(x) ((x) << S_INVALID_AXI_ADDR_CFG)
+#define F_INVALID_AXI_ADDR_CFG V_INVALID_AXI_ADDR_CFG(1U)
+
+#define A_ARM_CS_RST 0x470c0
+
+#define S_ATCLKEN 9
+#define V_ATCLKEN(x) ((x) << S_ATCLKEN)
+#define F_ATCLKEN V_ATCLKEN(1U)
+
+#define S_CXAPBICRSTN 8
+#define V_CXAPBICRSTN(x) ((x) << S_CXAPBICRSTN)
+#define F_CXAPBICRSTN V_CXAPBICRSTN(1U)
+
+#define S_CSDBGEN 7
+#define V_CSDBGEN(x) ((x) << S_CSDBGEN)
+#define F_CSDBGEN V_CSDBGEN(1U)
+
+#define S_JTAGNPOTRST 6
+#define V_JTAGNPOTRST(x) ((x) << S_JTAGNPOTRST)
+#define F_JTAGNPOTRST V_JTAGNPOTRST(1U)
+
+#define S_JTAGNTRST 5
+#define V_JTAGNTRST(x) ((x) << S_JTAGNTRST)
+#define F_JTAGNTRST V_JTAGNTRST(1U)
+
+#define S_PADDR31S0 4
+#define V_PADDR31S0(x) ((x) << S_PADDR31S0)
+#define F_PADDR31S0 V_PADDR31S0(1U)
+
+#define S_CTICLKEN 3
+#define V_CTICLKEN(x) ((x) << S_CTICLKEN)
+#define F_CTICLKEN V_CTICLKEN(1U)
+
+#define S_PCLKENDBG 2
+#define V_PCLKENDBG(x) ((x) << S_PCLKENDBG)
+#define F_PCLKENDBG V_PCLKENDBG(1U)
+
+#define S_CPU_NIDEN 1
+#define V_CPU_NIDEN(x) ((x) << S_CPU_NIDEN)
+#define F_CPU_NIDEN V_CPU_NIDEN(1U)
+
+#define S_CPU_DBGEN 0
+#define V_CPU_DBGEN(x) ((x) << S_CPU_DBGEN)
+#define F_CPU_DBGEN V_CPU_DBGEN(1U)
+
+#define A_ARM_CS_ADDRL 0x470c4
+#define A_ARM_CS_ADDRH 0x470c8
+#define A_ARM_CS_DFT_CONTROL 0x470cc
+
+#define S_DFTMBISTADDR 5
+#define M_DFTMBISTADDR 0x7ffU
+#define V_DFTMBISTADDR(x) ((x) << S_DFTMBISTADDR)
+#define G_DFTMBISTADDR(x) (((x) >> S_DFTMBISTADDR) & M_DFTMBISTADDR)
+
+#define S_DFTMTESTON 3
+#define V_DFTMTESTON(x) ((x) << S_DFTMTESTON)
+#define F_DFTMTESTON V_DFTMTESTON(1U)
+
+#define S_DFTMBISTCE 2
+#define V_DFTMBISTCE(x) ((x) << S_DFTMBISTCE)
+#define F_DFTMBISTCE V_DFTMBISTCE(1U)
+
+#define S_DFTMBITWR 1
+#define V_DFTMBITWR(x) ((x) << S_DFTMBITWR)
+#define F_DFTMBITWR V_DFTMBITWR(1U)
+
+#define S_DFTSE 0
+#define V_DFTSE(x) ((x) << S_DFTSE)
+#define F_DFTSE V_DFTSE(1U)
+
+#define A_ARM_CS_DFT_IN 0x470d0
+#define A_ARM_CS_DFT_OUT 0x470d4
+#define A_ARM_CPU_EVENT_I 0x47100
+
+#define S_CPUEVENTI 0
+#define V_CPUEVENTI(x) ((x) << S_CPUEVENTI)
+#define F_CPUEVENTI V_CPUEVENTI(1U)
+
+#define A_ARM_CPU_EVENT_O 0x47104
+
+#define S_CPUEVENTO 0
+#define V_CPUEVENTO(x) ((x) << S_CPUEVENTO)
+#define F_CPUEVENTO V_CPUEVENTO(1U)
+
+#define A_ARM_CPU_CLR_EXMON_REQ 0x47108
+
+#define S_CPUCLREXMONREQ 0
+#define V_CPUCLREXMONREQ(x) ((x) << S_CPUCLREXMONREQ)
+#define F_CPUCLREXMONREQ V_CPUCLREXMONREQ(1U)
+
+#define A_ARM_CPU_CLR_EXMON_ACK 0x4710c
+
+#define S_CPUCLREXMONACK 0
+#define V_CPUCLREXMONACK(x) ((x) << S_CPUCLREXMONACK)
+#define F_CPUCLREXMONACK V_CPUCLREXMONACK(1U)
+
+#define A_ARM_UART_MSTR_RXD 0x47110
+#define A_ARM_UART_MSTR_RXC 0x47114
+
+#define S_UART_MSTR_RXC 0
+#define V_UART_MSTR_RXC(x) ((x) << S_UART_MSTR_RXC)
+#define F_UART_MSTR_RXC V_UART_MSTR_RXC(1U)
+
+#define A_ARM_UART_MSTR_TXD 0x47118
+#define A_ARM_UART_MSTR_TXC 0x4711c
+
+#define S_T7_INT 1
+#define V_T7_INT(x) ((x) << S_T7_INT)
+#define F_T7_INT V_T7_INT(1U)
+
+#define S_UART_MSTC_TXC 0
+#define V_UART_MSTC_TXC(x) ((x) << S_UART_MSTC_TXC)
+#define F_UART_MSTC_TXC V_UART_MSTC_TXC(1U)
+
+#define A_ARM_UART_SLV_SEL 0x47120
+
+#define S_UART_SLV_SEL 0
+#define V_UART_SLV_SEL(x) ((x) << S_UART_SLV_SEL)
+#define F_UART_SLV_SEL V_UART_SLV_SEL(1U)
+
+#define A_ARM_CPU_PERIPH_BASE 0x47124
+#define A_ARM_PERR_INT_ENB2 0x47128
+#define A_ARM_PERR_ENABLE2 0x4712c
+#define A_ARM_UART_CONFIG 0x47130
+#define A_ARM_UART_STAT 0x47134
+
+#define S_RSV1 6
+#define M_RSV1 0x3ffffffU
+#define V_RSV1(x) ((x) << S_RSV1)
+#define G_RSV1(x) (((x) >> S_RSV1) & M_RSV1)
+
+#define S_RXFRMERR 5
+#define V_RXFRMERR(x) ((x) << S_RXFRMERR)
+#define F_RXFRMERR V_RXFRMERR(1U)
+
+#define S_RXPARERR 4
+#define V_RXPARERR(x) ((x) << S_RXPARERR)
+#define F_RXPARERR V_RXPARERR(1U)
+
+#define S_RXOVRN 3
+#define V_RXOVRN(x) ((x) << S_RXOVRN)
+#define F_RXOVRN V_RXOVRN(1U)
+
+#define S_CTL_RXRDY 2
+#define V_CTL_RXRDY(x) ((x) << S_CTL_RXRDY)
+#define F_CTL_RXRDY V_CTL_RXRDY(1U)
+
+#define S_TXOVRN 1
+#define V_TXOVRN(x) ((x) << S_TXOVRN)
+#define F_TXOVRN V_TXOVRN(1U)
+
+#define S_CTL_TXRDY 0
+#define V_CTL_TXRDY(x) ((x) << S_CTL_TXRDY)
+#define F_CTL_TXRDY V_CTL_TXRDY(1U)
+
+#define A_ARM_UART_TX_DATA 0x47138
+
+#define S_TX_DATA 0
+#define M_TX_DATA 0xffU
+#define V_TX_DATA(x) ((x) << S_TX_DATA)
+#define G_TX_DATA(x) (((x) >> S_TX_DATA) & M_TX_DATA)
+
+#define A_ARM_UART_RX_DATA 0x4713c
+
+#define S_RX_DATA 0
+#define M_RX_DATA 0xffU
+#define V_RX_DATA(x) ((x) << S_RX_DATA)
+#define G_RX_DATA(x) (((x) >> S_RX_DATA) & M_RX_DATA)
+
+#define A_ARM_UART_DBG0 0x47140
+#define A_ARM_UART_DBG1 0x47144
+#define A_ARM_UART_DBG2 0x47148
+#define A_ARM_UART_DBG3 0x4714c
+#define A_ARM_ARM_CPU_PC0 0x47150
+#define A_ARM_ARM_CPU_PC1 0x47154
+#define A_ARM_ARM_UART_INT_CAUSE 0x47158
+
+#define S_RX_FIFO_NOT_EMPTY 1
+#define V_RX_FIFO_NOT_EMPTY(x) ((x) << S_RX_FIFO_NOT_EMPTY)
+#define F_RX_FIFO_NOT_EMPTY V_RX_FIFO_NOT_EMPTY(1U)
+
+#define S_TX_FIFO_EMPTY 0
+#define V_TX_FIFO_EMPTY(x) ((x) << S_TX_FIFO_EMPTY)
+#define F_TX_FIFO_EMPTY V_TX_FIFO_EMPTY(1U)
+
+#define A_ARM_ARM_UART_INT_EN 0x4715c
+
+#define S_RX_FIFO_INT_NOT_EMPTY 1
+#define V_RX_FIFO_INT_NOT_EMPTY(x) ((x) << S_RX_FIFO_INT_NOT_EMPTY)
+#define F_RX_FIFO_INT_NOT_EMPTY V_RX_FIFO_INT_NOT_EMPTY(1U)
+
+#define S_TX_FIFO_INT_EMPTY 0
+#define V_TX_FIFO_INT_EMPTY(x) ((x) << S_TX_FIFO_INT_EMPTY)
+#define F_TX_FIFO_INT_EMPTY V_TX_FIFO_INT_EMPTY(1U)
+
+#define A_ARM_ARM_UART_GPIO_SEL 0x47160
+
+#define S_PC_SEL 1
+#define M_PC_SEL 0x7U
+#define V_PC_SEL(x) ((x) << S_PC_SEL)
+#define G_PC_SEL(x) (((x) >> S_PC_SEL) & M_PC_SEL)
+
+#define S_UART_GPIO_SEL 0
+#define V_UART_GPIO_SEL(x) ((x) << S_UART_GPIO_SEL)
+#define F_UART_GPIO_SEL V_UART_GPIO_SEL(1U)
+
+#define A_ARM_ARM_SCRATCH_PAD0 0x47164
+#define A_ARM_ARM_SCRATCH_PAD1 0x47168
+#define A_ARM_ARM_SCRATCH_PAD2 0x4716c
+#define A_ARM_PERR_INT_CAUSE0 0x47170
+
+#define S_INIC_WRDATA_FIFO_PERR 31
+#define V_INIC_WRDATA_FIFO_PERR(x) ((x) << S_INIC_WRDATA_FIFO_PERR)
+#define F_INIC_WRDATA_FIFO_PERR V_INIC_WRDATA_FIFO_PERR(1U)
+
+#define S_INIC_RDATA_FIFO_PERR 30
+#define V_INIC_RDATA_FIFO_PERR(x) ((x) << S_INIC_RDATA_FIFO_PERR)
+#define F_INIC_RDATA_FIFO_PERR V_INIC_RDATA_FIFO_PERR(1U)
+
+#define S_MSI_MEM_PERR 29
+#define V_MSI_MEM_PERR(x) ((x) << S_MSI_MEM_PERR)
+#define F_MSI_MEM_PERR V_MSI_MEM_PERR(1U)
+
+#define S_ARM_DB_SRAM_PERR 27
+#define M_ARM_DB_SRAM_PERR 0x3U
+#define V_ARM_DB_SRAM_PERR(x) ((x) << S_ARM_DB_SRAM_PERR)
+#define G_ARM_DB_SRAM_PERR(x) (((x) >> S_ARM_DB_SRAM_PERR) & M_ARM_DB_SRAM_PERR)
+
+#define S_EMMC_FIFOPARINT 26
+#define V_EMMC_FIFOPARINT(x) ((x) << S_EMMC_FIFOPARINT)
+#define F_EMMC_FIFOPARINT V_EMMC_FIFOPARINT(1U)
+
+#define S_ICB_RAM_PERR 25
+#define V_ICB_RAM_PERR(x) ((x) << S_ICB_RAM_PERR)
+#define F_ICB_RAM_PERR V_ICB_RAM_PERR(1U)
+
+#define S_MESS2AXI4_WRFIFO_PERR 24
+#define V_MESS2AXI4_WRFIFO_PERR(x) ((x) << S_MESS2AXI4_WRFIFO_PERR)
+#define F_MESS2AXI4_WRFIFO_PERR V_MESS2AXI4_WRFIFO_PERR(1U)
+
+#define S_RC_WFIFO_OUTPERR 23
+#define V_RC_WFIFO_OUTPERR(x) ((x) << S_RC_WFIFO_OUTPERR)
+#define F_RC_WFIFO_OUTPERR V_RC_WFIFO_OUTPERR(1U)
+
+#define S_RC_SRAM_PERR 21
+#define M_RC_SRAM_PERR 0x3U
+#define V_RC_SRAM_PERR(x) ((x) << S_RC_SRAM_PERR)
+#define G_RC_SRAM_PERR(x) (((x) >> S_RC_SRAM_PERR) & M_RC_SRAM_PERR)
+
+#define S_MSI_FIFO_PAR_ERR 20
+#define V_MSI_FIFO_PAR_ERR(x) ((x) << S_MSI_FIFO_PAR_ERR)
+#define F_MSI_FIFO_PAR_ERR V_MSI_FIFO_PAR_ERR(1U)
+
+#define S_INIC2MA_INTFPERR 19
+#define V_INIC2MA_INTFPERR(x) ((x) << S_INIC2MA_INTFPERR)
+#define F_INIC2MA_INTFPERR V_INIC2MA_INTFPERR(1U)
+
+#define S_RDATAFIFO0_PERR 18
+#define V_RDATAFIFO0_PERR(x) ((x) << S_RDATAFIFO0_PERR)
+#define F_RDATAFIFO0_PERR V_RDATAFIFO0_PERR(1U)
+
+#define S_RDATAFIFO1_PERR 17
+#define V_RDATAFIFO1_PERR(x) ((x) << S_RDATAFIFO1_PERR)
+#define F_RDATAFIFO1_PERR V_RDATAFIFO1_PERR(1U)
+
+#define S_WRDATAFIFO0_PERR 16
+#define V_WRDATAFIFO0_PERR(x) ((x) << S_WRDATAFIFO0_PERR)
+#define F_WRDATAFIFO0_PERR V_WRDATAFIFO0_PERR(1U)
+
+#define S_WRDATAFIFO1_PERR 15
+#define V_WRDATAFIFO1_PERR(x) ((x) << S_WRDATAFIFO1_PERR)
+#define F_WRDATAFIFO1_PERR V_WRDATAFIFO1_PERR(1U)
+
+#define S_WR512DATAFIFO0_PERR 14
+#define V_WR512DATAFIFO0_PERR(x) ((x) << S_WR512DATAFIFO0_PERR)
+#define F_WR512DATAFIFO0_PERR V_WR512DATAFIFO0_PERR(1U)
+
+#define S_WR512DATAFIFO1_PERR 13
+#define V_WR512DATAFIFO1_PERR(x) ((x) << S_WR512DATAFIFO1_PERR)
+#define F_WR512DATAFIFO1_PERR V_WR512DATAFIFO1_PERR(1U)
+
+#define S_ROBUFF_PARERR3 12
+#define V_ROBUFF_PARERR3(x) ((x) << S_ROBUFF_PARERR3)
+#define F_ROBUFF_PARERR3 V_ROBUFF_PARERR3(1U)
+
+#define S_ROBUFF_PARERR2 11
+#define V_ROBUFF_PARERR2(x) ((x) << S_ROBUFF_PARERR2)
+#define F_ROBUFF_PARERR2 V_ROBUFF_PARERR2(1U)
+
+#define S_ROBUFF_PARERR1 10
+#define V_ROBUFF_PARERR1(x) ((x) << S_ROBUFF_PARERR1)
+#define F_ROBUFF_PARERR1 V_ROBUFF_PARERR1(1U)
+
+#define S_ROBUFF_PARERR0 9
+#define V_ROBUFF_PARERR0(x) ((x) << S_ROBUFF_PARERR0)
+#define F_ROBUFF_PARERR0 V_ROBUFF_PARERR0(1U)
+
+#define S_MA2AXI_REQDATAPARERR 8
+#define V_MA2AXI_REQDATAPARERR(x) ((x) << S_MA2AXI_REQDATAPARERR)
+#define F_MA2AXI_REQDATAPARERR V_MA2AXI_REQDATAPARERR(1U)
+
+#define S_MA2AXI_REQCTLPARERR 7
+#define V_MA2AXI_REQCTLPARERR(x) ((x) << S_MA2AXI_REQCTLPARERR)
+#define F_MA2AXI_REQCTLPARERR V_MA2AXI_REQCTLPARERR(1U)
+
+#define S_MA_RSPPERR 6
+#define V_MA_RSPPERR(x) ((x) << S_MA_RSPPERR)
+#define F_MA_RSPPERR V_MA_RSPPERR(1U)
+
+#define S_PCIE2MA_REQCTLPARERR 5
+#define V_PCIE2MA_REQCTLPARERR(x) ((x) << S_PCIE2MA_REQCTLPARERR)
+#define F_PCIE2MA_REQCTLPARERR V_PCIE2MA_REQCTLPARERR(1U)
+
+#define S_PCIE2MA_REQDATAPARERR 4
+#define V_PCIE2MA_REQDATAPARERR(x) ((x) << S_PCIE2MA_REQDATAPARERR)
+#define F_PCIE2MA_REQDATAPARERR V_PCIE2MA_REQDATAPARERR(1U)
+
+#define S_INIC2MA_REQCTLPARERR 3
+#define V_INIC2MA_REQCTLPARERR(x) ((x) << S_INIC2MA_REQCTLPARERR)
+#define F_INIC2MA_REQCTLPARERR V_INIC2MA_REQCTLPARERR(1U)
+
+#define S_INIC2MA_REQDATAPARERR 2
+#define V_INIC2MA_REQDATAPARERR(x) ((x) << S_INIC2MA_REQDATAPARERR)
+#define F_INIC2MA_REQDATAPARERR V_INIC2MA_REQDATAPARERR(1U)
+
+#define S_MA_RSPUE 1
+#define V_MA_RSPUE(x) ((x) << S_MA_RSPUE)
+#define F_MA_RSPUE V_MA_RSPUE(1U)
+
+#define S_APB2PL_RSPDATAPERR 0
+#define V_APB2PL_RSPDATAPERR(x) ((x) << S_APB2PL_RSPDATAPERR)
+#define F_APB2PL_RSPDATAPERR V_APB2PL_RSPDATAPERR(1U)
+
+#define A_ARM_PERR_INT_ENB0 0x47174
+#define A_ARM_SCRATCH_PAD3 0x47178
+
+#define S_ECO_43187 31
+#define V_ECO_43187(x) ((x) << S_ECO_43187)
+#define F_ECO_43187 V_ECO_43187(1U)
+
+#define S_TIMER_SEL 28
+#define M_TIMER_SEL 0x7U
+#define V_TIMER_SEL(x) ((x) << S_TIMER_SEL)
+#define G_TIMER_SEL(x) (((x) >> S_TIMER_SEL) & M_TIMER_SEL)
+
+#define S_TIMER 4
+#define M_TIMER 0xffffffU
+#define V_TIMER(x) ((x) << S_TIMER)
+#define G_TIMER(x) (((x) >> S_TIMER) & M_TIMER)
+
+#define S_T7_1_INT 0
+#define M_T7_1_INT 0x3U
+#define V_T7_1_INT(x) ((x) << S_T7_1_INT)
+#define G_T7_1_INT(x) (((x) >> S_T7_1_INT) & M_T7_1_INT)
+
+#define A_ARM_PERR_INT_CAUSE2 0x4717c
+
+#define S_INIC_WSTRB_FIFO_PERR 31
+#define V_INIC_WSTRB_FIFO_PERR(x) ((x) << S_INIC_WSTRB_FIFO_PERR)
+#define F_INIC_WSTRB_FIFO_PERR V_INIC_WSTRB_FIFO_PERR(1U)
+
+#define S_INIC_BID_FIFO_PERR 30
+#define V_INIC_BID_FIFO_PERR(x) ((x) << S_INIC_BID_FIFO_PERR)
+#define F_INIC_BID_FIFO_PERR V_INIC_BID_FIFO_PERR(1U)
+
+#define S_CC_SRAM_PKA_PERR 29
+#define V_CC_SRAM_PKA_PERR(x) ((x) << S_CC_SRAM_PKA_PERR)
+#define F_CC_SRAM_PKA_PERR V_CC_SRAM_PKA_PERR(1U)
+
+#define S_CC_SRAM_SEC_PERR 28
+#define V_CC_SRAM_SEC_PERR(x) ((x) << S_CC_SRAM_SEC_PERR)
+#define F_CC_SRAM_SEC_PERR V_CC_SRAM_SEC_PERR(1U)
+
+#define S_MESS2AXI4_PARERR 27
+#define V_MESS2AXI4_PARERR(x) ((x) << S_MESS2AXI4_PARERR)
+#define F_MESS2AXI4_PARERR V_MESS2AXI4_PARERR(1U)
+
+#define S_CCI2INIC_INTF_PARERR 26
+#define V_CCI2INIC_INTF_PARERR(x) ((x) << S_CCI2INIC_INTF_PARERR)
+#define F_CCI2INIC_INTF_PARERR V_CCI2INIC_INTF_PARERR(1U)
+
+#define A_ARM_MA2AXI_AW_ATTR 0x47180
+
+#define S_AWLOCKR1 29
+#define V_AWLOCKR1(x) ((x) << S_AWLOCKR1)
+#define F_AWLOCKR1 V_AWLOCKR1(1U)
+
+#define S_AWCACHER1 25
+#define M_AWCACHER1 0xfU
+#define V_AWCACHER1(x) ((x) << S_AWCACHER1)
+#define G_AWCACHER1(x) (((x) >> S_AWCACHER1) & M_AWCACHER1)
+
+#define S_AWPROTR1 21
+#define M_AWPROTR1 0xfU
+#define V_AWPROTR1(x) ((x) << S_AWPROTR1)
+#define G_AWPROTR1(x) (((x) >> S_AWPROTR1) & M_AWPROTR1)
+
+#define S_AWSNOOPR1 18
+#define M_AWSNOOPR1 0x7U
+#define V_AWSNOOPR1(x) ((x) << S_AWSNOOPR1)
+#define G_AWSNOOPR1(x) (((x) >> S_AWSNOOPR1) & M_AWSNOOPR1)
+
+#define S_AWDOMAINR1 16
+#define M_AWDOMAINR1 0x3U
+#define V_AWDOMAINR1(x) ((x) << S_AWDOMAINR1)
+#define G_AWDOMAINR1(x) (((x) >> S_AWDOMAINR1) & M_AWDOMAINR1)
+
+#define S_AWLOCKR0 13
+#define V_AWLOCKR0(x) ((x) << S_AWLOCKR0)
+#define F_AWLOCKR0 V_AWLOCKR0(1U)
+
+#define S_AWCACHER0 9
+#define M_AWCACHER0 0xfU
+#define V_AWCACHER0(x) ((x) << S_AWCACHER0)
+#define G_AWCACHER0(x) (((x) >> S_AWCACHER0) & M_AWCACHER0)
+
+#define S_AWPROTR0 5
+#define M_AWPROTR0 0xfU
+#define V_AWPROTR0(x) ((x) << S_AWPROTR0)
+#define G_AWPROTR0(x) (((x) >> S_AWPROTR0) & M_AWPROTR0)
+
+#define S_AWSNOOPR0 2
+#define M_AWSNOOPR0 0x7U
+#define V_AWSNOOPR0(x) ((x) << S_AWSNOOPR0)
+#define G_AWSNOOPR0(x) (((x) >> S_AWSNOOPR0) & M_AWSNOOPR0)
+
+#define S_AWDOMAINR0 0
+#define M_AWDOMAINR0 0x3U
+#define V_AWDOMAINR0(x) ((x) << S_AWDOMAINR0)
+#define G_AWDOMAINR0(x) (((x) >> S_AWDOMAINR0) & M_AWDOMAINR0)
+
+#define A_ARM_MA2AXI_AR_ATTR 0x47184
+
+#define S_ARLOCKR1 29
+#define V_ARLOCKR1(x) ((x) << S_ARLOCKR1)
+#define F_ARLOCKR1 V_ARLOCKR1(1U)
+
+#define S_ARCACHER1 25
+#define M_ARCACHER1 0xfU
+#define V_ARCACHER1(x) ((x) << S_ARCACHER1)
+#define G_ARCACHER1(x) (((x) >> S_ARCACHER1) & M_ARCACHER1)
+
+#define S_ARPROTR1 21
+#define M_ARPROTR1 0xfU
+#define V_ARPROTR1(x) ((x) << S_ARPROTR1)
+#define G_ARPROTR1(x) (((x) >> S_ARPROTR1) & M_ARPROTR1)
+
+#define S_ARSNOOPR1 18
+#define M_ARSNOOPR1 0x7U
+#define V_ARSNOOPR1(x) ((x) << S_ARSNOOPR1)
+#define G_ARSNOOPR1(x) (((x) >> S_ARSNOOPR1) & M_ARSNOOPR1)
+
+#define S_ARDOMAINR1 16
+#define M_ARDOMAINR1 0x3U
+#define V_ARDOMAINR1(x) ((x) << S_ARDOMAINR1)
+#define G_ARDOMAINR1(x) (((x) >> S_ARDOMAINR1) & M_ARDOMAINR1)
+
+#define S_ARLOCKR0 13
+#define V_ARLOCKR0(x) ((x) << S_ARLOCKR0)
+#define F_ARLOCKR0 V_ARLOCKR0(1U)
+
+#define S_ARCACHER0 9
+#define M_ARCACHER0 0xfU
+#define V_ARCACHER0(x) ((x) << S_ARCACHER0)
+#define G_ARCACHER0(x) (((x) >> S_ARCACHER0) & M_ARCACHER0)
+
+#define S_ARPROTR0 5
+#define M_ARPROTR0 0xfU
+#define V_ARPROTR0(x) ((x) << S_ARPROTR0)
+#define G_ARPROTR0(x) (((x) >> S_ARPROTR0) & M_ARPROTR0)
+
+#define S_ARSNOOPR0 2
+#define M_ARSNOOPR0 0x7U
+#define V_ARSNOOPR0(x) ((x) << S_ARSNOOPR0)
+#define G_ARSNOOPR0(x) (((x) >> S_ARSNOOPR0) & M_ARSNOOPR0)
+
+#define S_ARDOMAINR0 0
+#define M_ARDOMAINR0 0x3U
+#define V_ARDOMAINR0(x) ((x) << S_ARDOMAINR0)
+#define G_ARDOMAINR0(x) (((x) >> S_ARDOMAINR0) & M_ARDOMAINR0)
+
+#define A_ARM_MA2AXI_SNOOP_RGN 0x47188
+
+#define S_SNOOP_END 16
+#define M_SNOOP_END 0xffffU
+#define V_SNOOP_END(x) ((x) << S_SNOOP_END)
+#define G_SNOOP_END(x) (((x) >> S_SNOOP_END) & M_SNOOP_END)
+
+#define S_SNOOP_START 0
+#define M_SNOOP_START 0xffffU
+#define V_SNOOP_START(x) ((x) << S_SNOOP_START)
+#define G_SNOOP_START(x) (((x) >> S_SNOOP_START) & M_SNOOP_START)
+
+#define A_ARM_PERIPHERAL_INT_CAUSE 0x4718c
+
+#define S_TIMER_INT 5
+#define V_TIMER_INT(x) ((x) << S_TIMER_INT)
+#define F_TIMER_INT V_TIMER_INT(1U)
+
+#define S_NVME_INT 4
+#define V_NVME_INT(x) ((x) << S_NVME_INT)
+#define F_NVME_INT V_NVME_INT(1U)
+
+#define S_EMMC_WAKEUP_INT 3
+#define V_EMMC_WAKEUP_INT(x) ((x) << S_EMMC_WAKEUP_INT)
+#define F_EMMC_WAKEUP_INT V_EMMC_WAKEUP_INT(1U)
+
+#define S_EMMC_INT 2
+#define V_EMMC_INT(x) ((x) << S_EMMC_INT)
+#define F_EMMC_INT V_EMMC_INT(1U)
+
+#define S_USB_MC_INT 1
+#define V_USB_MC_INT(x) ((x) << S_USB_MC_INT)
+#define F_USB_MC_INT V_USB_MC_INT(1U)
+
+#define S_USB_DMA_INT 0
+#define V_USB_DMA_INT(x) ((x) << S_USB_DMA_INT)
+#define F_USB_DMA_INT V_USB_DMA_INT(1U)
+
+#define A_ARM_SCRATCH_PAD4 0x47190
+
+#define S_PAD4 15
+#define M_PAD4 0x1ffffU
+#define V_PAD4(x) ((x) << S_PAD4)
+#define G_PAD4(x) (((x) >> S_PAD4) & M_PAD4)
+
+#define S_ARM_DB_CNT 0
+#define M_ARM_DB_CNT 0x7fffU
+#define V_ARM_DB_CNT(x) ((x) << S_ARM_DB_CNT)
+#define G_ARM_DB_CNT(x) (((x) >> S_ARM_DB_CNT) & M_ARM_DB_CNT)
+
+#define A_ARM_SCRATCH_PAD5 0x47194
+#define A_ARM_SCRATCH_PAD6 0x47198
+#define A_ARM_SCRATCH_PAD7 0x4719c
+#define A_ARM_NVME_DB_EMU_INDEX 0x471a0
+#define A_ARM_NVME_DB_EMU_REGION_CTL 0x471a4
+
+#define S_WINDOW_EN 4
+#define V_WINDOW_EN(x) ((x) << S_WINDOW_EN)
+#define F_WINDOW_EN V_WINDOW_EN(1U)
+
+#define S_RGN2_INT_EN 3
+#define V_RGN2_INT_EN(x) ((x) << S_RGN2_INT_EN)
+#define F_RGN2_INT_EN V_RGN2_INT_EN(1U)
+
+#define S_RGN1_INT_EN 2
+#define V_RGN1_INT_EN(x) ((x) << S_RGN1_INT_EN)
+#define F_RGN1_INT_EN V_RGN1_INT_EN(1U)
+
+#define S_QUEUE_EN 1
+#define V_QUEUE_EN(x) ((x) << S_QUEUE_EN)
+#define F_QUEUE_EN V_QUEUE_EN(1U)
+
+#define S_RGN0_INT_EN 0
+#define V_RGN0_INT_EN(x) ((x) << S_RGN0_INT_EN)
+#define F_RGN0_INT_EN V_RGN0_INT_EN(1U)
+
+#define A_ARM_NVME_DB_EMU_DEVICE_CTL 0x471a8
+
+#define S_DEVICE_SIZE 8
+#define M_DEVICE_SIZE 0xfU
+#define V_DEVICE_SIZE(x) ((x) << S_DEVICE_SIZE)
+#define G_DEVICE_SIZE(x) (((x) >> S_DEVICE_SIZE) & M_DEVICE_SIZE)
+
+#define S_RGN1_SIZE 4
+#define M_RGN1_SIZE 0xfU
+#define V_RGN1_SIZE(x) ((x) << S_RGN1_SIZE)
+#define G_RGN1_SIZE(x) (((x) >> S_RGN1_SIZE) & M_RGN1_SIZE)
+
+#define S_RGN0_SIZE 0
+#define M_RGN0_SIZE 0xfU
+#define V_RGN0_SIZE(x) ((x) << S_RGN0_SIZE)
+#define G_RGN0_SIZE(x) (((x) >> S_RGN0_SIZE) & M_RGN0_SIZE)
+
+#define A_ARM_NVME_DB_EMU_WINDOW_START_ADDR 0x471b0
+
+#define S_T7_4_ADDR 0
+#define M_T7_4_ADDR 0xfffffffU
+#define V_T7_4_ADDR(x) ((x) << S_T7_4_ADDR)
+#define G_T7_4_ADDR(x) (((x) >> S_T7_4_ADDR) & M_T7_4_ADDR)
+
+#define A_ARM_NVME_DB_EMU_WINDOW_END_ADDR 0x471b4
+#define A_ARM_NVME_DB_EMU_QBASE_ADDR 0x471b8
+#define A_ARM_NVME_DB_EMU_QUEUE_CID 0x471bc
+
+#define S_T7_CID 0
+#define M_T7_CID 0x1ffffU
+#define V_T7_CID(x) ((x) << S_T7_CID)
+#define G_T7_CID(x) (((x) >> S_T7_CID) & M_T7_CID)
+
+#define A_ARM_NVME_DB_EMU_QUEUE_CTL 0x471c0
+
+#define S_INT_EN 27
+#define V_INT_EN(x) ((x) << S_INT_EN)
+#define F_INT_EN V_INT_EN(1U)
+
+#define S_THRESHOLD 10
+#define M_THRESHOLD 0x1ffffU
+#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+#define G_THRESHOLD(x) (((x) >> S_THRESHOLD) & M_THRESHOLD)
+
+#define S_T7_1_SIZE 0
+#define M_T7_1_SIZE 0x3ffU
+#define V_T7_1_SIZE(x) ((x) << S_T7_1_SIZE)
+#define G_T7_1_SIZE(x) (((x) >> S_T7_1_SIZE) & M_T7_1_SIZE)
+
+#define A_ARM_NVME_DB_EMU_MSIX_ADDR_L 0x471c4
+#define A_ARM_NVME_DB_EMU_MSIX_ADDR_H 0x471c8
+#define A_ARM_NVME_DB_EMU_MSIX_OFFSET 0x471cc
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_ADDR_L 0x471d0
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_ADDR_H 0x471d4
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_OFFSET 0x471d8
+#define A_ARM_CERR_INT_CAUSE0 0x471dc
+
+#define S_WRDATA_FIFO0_CERR 31
+#define V_WRDATA_FIFO0_CERR(x) ((x) << S_WRDATA_FIFO0_CERR)
+#define F_WRDATA_FIFO0_CERR V_WRDATA_FIFO0_CERR(1U)
+
+#define S_WRDATA_FIFO1_CERR 30
+#define V_WRDATA_FIFO1_CERR(x) ((x) << S_WRDATA_FIFO1_CERR)
+#define F_WRDATA_FIFO1_CERR V_WRDATA_FIFO1_CERR(1U)
+
+#define S_WR512DATAFIFO0_CERR 29
+#define V_WR512DATAFIFO0_CERR(x) ((x) << S_WR512DATAFIFO0_CERR)
+#define F_WR512DATAFIFO0_CERR V_WR512DATAFIFO0_CERR(1U)
+
+#define S_WR512DATAFIFO1_CERR 28
+#define V_WR512DATAFIFO1_CERR(x) ((x) << S_WR512DATAFIFO1_CERR)
+#define F_WR512DATAFIFO1_CERR V_WR512DATAFIFO1_CERR(1U)
+
+#define S_RDATAFIFO0_CERR 27
+#define V_RDATAFIFO0_CERR(x) ((x) << S_RDATAFIFO0_CERR)
+#define F_RDATAFIFO0_CERR V_RDATAFIFO0_CERR(1U)
+
+#define S_RDATAFIFO1_CERR 26
+#define V_RDATAFIFO1_CERR(x) ((x) << S_RDATAFIFO1_CERR)
+#define F_RDATAFIFO1_CERR V_RDATAFIFO1_CERR(1U)
+
+#define S_ROBUFF_CORERR0 25
+#define V_ROBUFF_CORERR0(x) ((x) << S_ROBUFF_CORERR0)
+#define F_ROBUFF_CORERR0 V_ROBUFF_CORERR0(1U)
+
+#define S_ROBUFF_CORERR1 24
+#define V_ROBUFF_CORERR1(x) ((x) << S_ROBUFF_CORERR1)
+#define F_ROBUFF_CORERR1 V_ROBUFF_CORERR1(1U)
+
+#define S_ROBUFF_CORERR2 23
+#define V_ROBUFF_CORERR2(x) ((x) << S_ROBUFF_CORERR2)
+#define F_ROBUFF_CORERR2 V_ROBUFF_CORERR2(1U)
+
+#define S_ROBUFF_CORERR3 22
+#define V_ROBUFF_CORERR3(x) ((x) << S_ROBUFF_CORERR3)
+#define F_ROBUFF_CORERR3 V_ROBUFF_CORERR3(1U)
+
+#define S_MA2AXI_RSPDATACORERR 21
+#define V_MA2AXI_RSPDATACORERR(x) ((x) << S_MA2AXI_RSPDATACORERR)
+#define F_MA2AXI_RSPDATACORERR V_MA2AXI_RSPDATACORERR(1U)
+
+#define S_RC_SRAM_CERR 19
+#define M_RC_SRAM_CERR 0x3U
+#define V_RC_SRAM_CERR(x) ((x) << S_RC_SRAM_CERR)
+#define G_RC_SRAM_CERR(x) (((x) >> S_RC_SRAM_CERR) & M_RC_SRAM_CERR)
+
+#define S_RC_WFIFO_OUTCERR 18
+#define V_RC_WFIFO_OUTCERR(x) ((x) << S_RC_WFIFO_OUTCERR)
+#define F_RC_WFIFO_OUTCERR V_RC_WFIFO_OUTCERR(1U)
+
+#define S_RC_RSPFIFO_CERR 17
+#define V_RC_RSPFIFO_CERR(x) ((x) << S_RC_RSPFIFO_CERR)
+#define F_RC_RSPFIFO_CERR V_RC_RSPFIFO_CERR(1U)
+
+#define S_MSI_MEM_CERR 16
+#define V_MSI_MEM_CERR(x) ((x) << S_MSI_MEM_CERR)
+#define F_MSI_MEM_CERR V_MSI_MEM_CERR(1U)
+
+#define S_INIC_WRDATA_FIFO_CERR 15
+#define V_INIC_WRDATA_FIFO_CERR(x) ((x) << S_INIC_WRDATA_FIFO_CERR)
+#define F_INIC_WRDATA_FIFO_CERR V_INIC_WRDATA_FIFO_CERR(1U)
+
+#define S_INIC_RDATAFIFO_CERR 14
+#define V_INIC_RDATAFIFO_CERR(x) ((x) << S_INIC_RDATAFIFO_CERR)
+#define F_INIC_RDATAFIFO_CERR V_INIC_RDATAFIFO_CERR(1U)
+
+#define S_ARM_DB_SRAM_CERR 12
+#define M_ARM_DB_SRAM_CERR 0x3U
+#define V_ARM_DB_SRAM_CERR(x) ((x) << S_ARM_DB_SRAM_CERR)
+#define G_ARM_DB_SRAM_CERR(x) (((x) >> S_ARM_DB_SRAM_CERR) & M_ARM_DB_SRAM_CERR)
+
+#define S_ICB_RAM_CERR 11
+#define V_ICB_RAM_CERR(x) ((x) << S_ICB_RAM_CERR)
+#define F_ICB_RAM_CERR V_ICB_RAM_CERR(1U)
+
+#define S_CC_SRAM_PKA_CERR 10
+#define V_CC_SRAM_PKA_CERR(x) ((x) << S_CC_SRAM_PKA_CERR)
+#define F_CC_SRAM_PKA_CERR V_CC_SRAM_PKA_CERR(1U)
+
+#define S_CC_SRAM_SEC_CERR 9
+#define V_CC_SRAM_SEC_CERR(x) ((x) << S_CC_SRAM_SEC_CERR)
+#define F_CC_SRAM_SEC_CERR V_CC_SRAM_SEC_CERR(1U)
+
+#define A_ARM_NVME_DB_EMU_QUEUE_CTL_2 0x471e0
+
+#define S_INTERRUPT_CLEAR 0
+#define V_INTERRUPT_CLEAR(x) ((x) << S_INTERRUPT_CLEAR)
+#define F_INTERRUPT_CLEAR V_INTERRUPT_CLEAR(1U)
+
+#define A_ARM_PERIPHERAL_INT_ENB 0x471e4
+#define A_ARM_CERR_INT_ENB0 0x471e8
+#define A_ARM_CPU_DBG_ROM_ADDR0 0x47200
+
+#define S_CPUDBGROMADDR0 0
+#define M_CPUDBGROMADDR0 0xfffffU
+#define V_CPUDBGROMADDR0(x) ((x) << S_CPUDBGROMADDR0)
+#define G_CPUDBGROMADDR0(x) (((x) >> S_CPUDBGROMADDR0) & M_CPUDBGROMADDR0)
+
+#define A_ARM_CPU_DBG_ROM_ADDR1 0x47204
+
+#define S_CPUDBGROMADDR1 0
+#define M_CPUDBGROMADDR1 0x3ffU
+#define V_CPUDBGROMADDR1(x) ((x) << S_CPUDBGROMADDR1)
+#define G_CPUDBGROMADDR1(x) (((x) >> S_CPUDBGROMADDR1) & M_CPUDBGROMADDR1)
+
+#define A_ARM_CPU_DBG_ROM_ADDR_VALID 0x47208
+
+#define S_CPUDBGROMADDRVALID 0
+#define V_CPUDBGROMADDRVALID(x) ((x) << S_CPUDBGROMADDRVALID)
+#define F_CPUDBGROMADDRVALID V_CPUDBGROMADDRVALID(1U)
+
+#define A_ARM_PERR_ENABLE0 0x4720c
+#define A_ARM_SRAM2_WRITE_DATA3 0x47210
+#define A_ARM_SRAM2_READ_DATA3 0x4721c
+#define A_ARM_CPU_DFT_CFG 0x47220
+
+#define S_CPUMBISTREQ 11
+#define V_CPUMBISTREQ(x) ((x) << S_CPUMBISTREQ)
+#define F_CPUMBISTREQ V_CPUMBISTREQ(1U)
+
+#define S_CPUMBISTRSTN 10
+#define V_CPUMBISTRSTN(x) ((x) << S_CPUMBISTRSTN)
+#define F_CPUMBISTRSTN V_CPUMBISTRSTN(1U)
+
+#define S_CPUDFTDFTSE 9
+#define V_CPUDFTDFTSE(x) ((x) << S_CPUDFTDFTSE)
+#define F_CPUDFTDFTSE V_CPUDFTDFTSE(1U)
+
+#define S_CPUDFTRSTDISABLE 8
+#define V_CPUDFTRSTDISABLE(x) ((x) << S_CPUDFTRSTDISABLE)
+#define F_CPUDFTRSTDISABLE V_CPUDFTRSTDISABLE(1U)
+
+#define S_CPUDFTRAMDISABLE 7
+#define V_CPUDFTRAMDISABLE(x) ((x) << S_CPUDFTRAMDISABLE)
+#define F_CPUDFTRAMDISABLE V_CPUDFTRAMDISABLE(1U)
+
+#define S_CPUDFTMCPDISABLE 6
+#define V_CPUDFTMCPDISABLE(x) ((x) << S_CPUDFTMCPDISABLE)
+#define F_CPUDFTMCPDISABLE V_CPUDFTMCPDISABLE(1U)
+
+#define S_CPUDFTL2CLKDISABLE 5
+#define V_CPUDFTL2CLKDISABLE(x) ((x) << S_CPUDFTL2CLKDISABLE)
+#define F_CPUDFTL2CLKDISABLE V_CPUDFTL2CLKDISABLE(1U)
+
+#define S_CPUDFTCLKDISABLE3 4
+#define V_CPUDFTCLKDISABLE3(x) ((x) << S_CPUDFTCLKDISABLE3)
+#define F_CPUDFTCLKDISABLE3 V_CPUDFTCLKDISABLE3(1U)
+
+#define S_CPUDFTCLKDISABLE2 3
+#define V_CPUDFTCLKDISABLE2(x) ((x) << S_CPUDFTCLKDISABLE2)
+#define F_CPUDFTCLKDISABLE2 V_CPUDFTCLKDISABLE2(1U)
+
+#define S_CPUDFTCLKDISABLE1 2
+#define V_CPUDFTCLKDISABLE1(x) ((x) << S_CPUDFTCLKDISABLE1)
+#define F_CPUDFTCLKDISABLE1 V_CPUDFTCLKDISABLE1(1U)
+
+#define S_CPUDFTCLKDISABLE0 1
+#define V_CPUDFTCLKDISABLE0(x) ((x) << S_CPUDFTCLKDISABLE0)
+#define F_CPUDFTCLKDISABLE0 V_CPUDFTCLKDISABLE0(1U)
+
+#define S_CPUDFTCLKBYPASS 0
+#define V_CPUDFTCLKBYPASS(x) ((x) << S_CPUDFTCLKBYPASS)
+#define F_CPUDFTCLKBYPASS V_CPUDFTCLKBYPASS(1U)
+
+#define A_ARM_APB_CFG 0x47224
+
+#define S_APB_CFG 0
+#define M_APB_CFG 0x3ffffU
+#define V_APB_CFG(x) ((x) << S_APB_CFG)
+#define G_APB_CFG(x) (((x) >> S_APB_CFG) & M_APB_CFG)
+
+#define A_ARM_EMMC_BUFS 0x47228
+
+#define S_EMMC_BUFS_OEN 2
+#define M_EMMC_BUFS_OEN 0x3U
+#define V_EMMC_BUFS_OEN(x) ((x) << S_EMMC_BUFS_OEN)
+#define G_EMMC_BUFS_OEN(x) (((x) >> S_EMMC_BUFS_OEN) & M_EMMC_BUFS_OEN)
+
+#define S_EMMC_BUFS_I 0
+#define M_EMMC_BUFS_I 0x3U
+#define V_EMMC_BUFS_I(x) ((x) << S_EMMC_BUFS_I)
+#define G_EMMC_BUFS_I(x) (((x) >> S_EMMC_BUFS_I) & M_EMMC_BUFS_I)
+
+#define A_ARM_SWP_EN 0x4722c
+#define A_ARM_ADB_PWR_DWN_REQ_N 0x47230
+
+#define S_ADBPWRDWNREQN 0
+#define V_ADBPWRDWNREQN(x) ((x) << S_ADBPWRDWNREQN)
+#define F_ADBPWRDWNREQN V_ADBPWRDWNREQN(1U)
+
+#define A_ARM_GIC_USER 0x47238
+
+#define S_CPU_GIC_USER 0
+#define M_CPU_GIC_USER 0x7fU
+#define V_CPU_GIC_USER(x) ((x) << S_CPU_GIC_USER)
+#define G_CPU_GIC_USER(x) (((x) >> S_CPU_GIC_USER) & M_CPU_GIC_USER)
+
+#define A_ARM_DBPROC_SRAM_TH_ADDR 0x47240
+
+#define S_DBPROC_TH_ADDR 0
+#define M_DBPROC_TH_ADDR 0x1ffU
+#define V_DBPROC_TH_ADDR(x) ((x) << S_DBPROC_TH_ADDR)
+#define G_DBPROC_TH_ADDR(x) (((x) >> S_DBPROC_TH_ADDR) & M_DBPROC_TH_ADDR)
+
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA0 0x47244
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA1 0x47248
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA2 0x4724c
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA3 0x47250
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA0 0x47254
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA1 0x47258
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA2 0x4725c
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA3 0x47260
+#define A_ARM_SWP_EN_2 0x47264
+
+#define S_SWP_EN_2 0
+#define M_SWP_EN_2 0x3U
+#define V_SWP_EN_2(x) ((x) << S_SWP_EN_2)
+#define G_SWP_EN_2(x) (((x) >> S_SWP_EN_2) & M_SWP_EN_2)
+
+#define A_ARM_GIC_ERR 0x47268
+
+#define S_ECC_FATAL 1
+#define V_ECC_FATAL(x) ((x) << S_ECC_FATAL)
+#define F_ECC_FATAL V_ECC_FATAL(1U)
+
+#define S_AXIM_ERR 0
+#define V_AXIM_ERR(x) ((x) << S_AXIM_ERR)
+#define F_AXIM_ERR V_AXIM_ERR(1U)
+
+#define A_ARM_CPU_STAT 0x4726c
+
+#define S_CPU_L2_QACTIVE 12
+#define V_CPU_L2_QACTIVE(x) ((x) << S_CPU_L2_QACTIVE)
+#define F_CPU_L2_QACTIVE V_CPU_L2_QACTIVE(1U)
+
+#define S_WAKEUPM_O_ADB 11
+#define V_WAKEUPM_O_ADB(x) ((x) << S_WAKEUPM_O_ADB)
+#define F_WAKEUPM_O_ADB V_WAKEUPM_O_ADB(1U)
+
+#define S_PWRQACTIVEM_ADB 10
+#define V_PWRQACTIVEM_ADB(x) ((x) << S_PWRQACTIVEM_ADB)
+#define F_PWRQACTIVEM_ADB V_PWRQACTIVEM_ADB(1U)
+
+#define S_CLKQACTIVEM_ADB 9
+#define V_CLKQACTIVEM_ADB(x) ((x) << S_CLKQACTIVEM_ADB)
+#define F_CLKQACTIVEM_ADB V_CLKQACTIVEM_ADB(1U)
+
+#define S_CLKQDENYM_ADB 8
+#define V_CLKQDENYM_ADB(x) ((x) << S_CLKQDENYM_ADB)
+#define F_CLKQDENYM_ADB V_CLKQDENYM_ADB(1U)
+
+#define S_CLKQACCEPTNM_ADB 7
+#define V_CLKQACCEPTNM_ADB(x) ((x) << S_CLKQACCEPTNM_ADB)
+#define F_CLKQACCEPTNM_ADB V_CLKQACCEPTNM_ADB(1U)
+
+#define S_WAKEUPS_O_ADB 6
+#define V_WAKEUPS_O_ADB(x) ((x) << S_WAKEUPS_O_ADB)
+#define F_WAKEUPS_O_ADB V_WAKEUPS_O_ADB(1U)
+
+#define S_PWRQACTIVES_ADB 5
+#define V_PWRQACTIVES_ADB(x) ((x) << S_PWRQACTIVES_ADB)
+#define F_PWRQACTIVES_ADB V_PWRQACTIVES_ADB(1U)
+
+#define S_CLKQACTIVES_ADB 4
+#define V_CLKQACTIVES_ADB(x) ((x) << S_CLKQACTIVES_ADB)
+#define F_CLKQACTIVES_ADB V_CLKQACTIVES_ADB(1U)
+
+#define S_CLKQDENYS_ADB 3
+#define V_CLKQDENYS_ADB(x) ((x) << S_CLKQDENYS_ADB)
+#define F_CLKQDENYS_ADB V_CLKQDENYS_ADB(1U)
+
+#define S_CLKQACCEPTNS_ADB 2
+#define V_CLKQACCEPTNS_ADB(x) ((x) << S_CLKQACCEPTNS_ADB)
+#define F_CLKQACCEPTNS_ADB V_CLKQACCEPTNS_ADB(1U)
+
+#define S_PWRQDENYS_ADB 1
+#define V_PWRQDENYS_ADB(x) ((x) << S_PWRQDENYS_ADB)
+#define F_PWRQDENYS_ADB V_PWRQDENYS_ADB(1U)
+
+#define S_PWRQACCEPTNS_ADB 0
+#define V_PWRQACCEPTNS_ADB(x) ((x) << S_PWRQACCEPTNS_ADB)
+#define F_PWRQACCEPTNS_ADB V_PWRQACCEPTNS_ADB(1U)
+
+#define A_ARM_DEBUG_INT_WRITE_DATA 0x47270
+
+#define S_DEBUG_INT_WRITE_DATA 0
+#define M_DEBUG_INT_WRITE_DATA 0xfffU
+#define V_DEBUG_INT_WRITE_DATA(x) ((x) << S_DEBUG_INT_WRITE_DATA)
+#define G_DEBUG_INT_WRITE_DATA(x) (((x) >> S_DEBUG_INT_WRITE_DATA) & M_DEBUG_INT_WRITE_DATA)
+
+#define A_ARM_DEBUG_INT_STAT 0x47274
+
+#define S_DEBUG_INT_STATUS_REG 0
+#define M_DEBUG_INT_STATUS_REG 0xfffU
+#define V_DEBUG_INT_STATUS_REG(x) ((x) << S_DEBUG_INT_STATUS_REG)
+#define G_DEBUG_INT_STATUS_REG(x) (((x) >> S_DEBUG_INT_STATUS_REG) & M_DEBUG_INT_STATUS_REG)
+
+#define A_ARM_DEBUG_STAT 0x47278
+
+#define S_ARM_DEBUG_STAT 0
+#define M_ARM_DEBUG_STAT 0x3fffU
+#define V_ARM_DEBUG_STAT(x) ((x) << S_ARM_DEBUG_STAT)
+#define G_ARM_DEBUG_STAT(x) (((x) >> S_ARM_DEBUG_STAT) & M_ARM_DEBUG_STAT)
+
+#define A_ARM_SIZE_STAT 0x4727c
+
+#define S_ARM_SIZE_STAT 0
+#define M_ARM_SIZE_STAT 0x3fffffffU
+#define V_ARM_SIZE_STAT(x) ((x) << S_ARM_SIZE_STAT)
+#define G_ARM_SIZE_STAT(x) (((x) >> S_ARM_SIZE_STAT) & M_ARM_SIZE_STAT)
+
+#define A_ARM_CCI_CFG0 0x47280
+
+#define S_CCIBROADCASTCACHEMAINT 28
+#define M_CCIBROADCASTCACHEMAINT 0x7U
+#define V_CCIBROADCASTCACHEMAINT(x) ((x) << S_CCIBROADCASTCACHEMAINT)
+#define G_CCIBROADCASTCACHEMAINT(x) (((x) >> S_CCIBROADCASTCACHEMAINT) & M_CCIBROADCASTCACHEMAINT)
+
+#define S_CCISTRIPINGGRANULE 25
+#define M_CCISTRIPINGGRANULE 0x7U
+#define V_CCISTRIPINGGRANULE(x) ((x) << S_CCISTRIPINGGRANULE)
+#define G_CCISTRIPINGGRANULE(x) (((x) >> S_CCISTRIPINGGRANULE) & M_CCISTRIPINGGRANULE)
+
+#define S_CCIPERIPHBASE 0
+#define M_CCIPERIPHBASE 0x1ffffffU
+#define V_CCIPERIPHBASE(x) ((x) << S_CCIPERIPHBASE)
+#define G_CCIPERIPHBASE(x) (((x) >> S_CCIPERIPHBASE) & M_CCIPERIPHBASE)
+
+#define A_ARM_CCI_CFG1 0x47284
+
+#define S_CCIDFTRSTDISABLE 18
+#define V_CCIDFTRSTDISABLE(x) ((x) << S_CCIDFTRSTDISABLE)
+#define F_CCIDFTRSTDISABLE V_CCIDFTRSTDISABLE(1U)
+
+#define S_CCISPNIDEN 17
+#define V_CCISPNIDEN(x) ((x) << S_CCISPNIDEN)
+#define F_CCISPNIDEN V_CCISPNIDEN(1U)
+
+#define S_CCINIDEN 16
+#define V_CCINIDEN(x) ((x) << S_CCINIDEN)
+#define F_CCINIDEN V_CCINIDEN(1U)
+
+#define S_CCIACCHANNELN 11
+#define M_CCIACCHANNELN 0x1fU
+#define V_CCIACCHANNELN(x) ((x) << S_CCIACCHANNELN)
+#define G_CCIACCHANNELN(x) (((x) >> S_CCIACCHANNELN) & M_CCIACCHANNELN)
+
+#define S_CCIQOSOVERRIDE 6
+#define M_CCIQOSOVERRIDE 0x1fU
+#define V_CCIQOSOVERRIDE(x) ((x) << S_CCIQOSOVERRIDE)
+#define G_CCIQOSOVERRIDE(x) (((x) >> S_CCIQOSOVERRIDE) & M_CCIQOSOVERRIDE)
+
+#define S_CCIBUFFERABLEOVERRIDE 3
+#define M_CCIBUFFERABLEOVERRIDE 0x7U
+#define V_CCIBUFFERABLEOVERRIDE(x) ((x) << S_CCIBUFFERABLEOVERRIDE)
+#define G_CCIBUFFERABLEOVERRIDE(x) (((x) >> S_CCIBUFFERABLEOVERRIDE) & M_CCIBUFFERABLEOVERRIDE)
+
+#define S_CCIBARRIERTERMINATE 0
+#define M_CCIBARRIERTERMINATE 0x7U
+#define V_CCIBARRIERTERMINATE(x) ((x) << S_CCIBARRIERTERMINATE)
+#define G_CCIBARRIERTERMINATE(x) (((x) >> S_CCIBARRIERTERMINATE) & M_CCIBARRIERTERMINATE)
+
+#define A_ARM_CCI_CFG2 0x47288
+
+#define S_CCIADDRMAP15 30
+#define M_CCIADDRMAP15 0x3U
+#define V_CCIADDRMAP15(x) ((x) << S_CCIADDRMAP15)
+#define G_CCIADDRMAP15(x) (((x) >> S_CCIADDRMAP15) & M_CCIADDRMAP15)
+
+#define S_CCIADDRMAP14 28
+#define M_CCIADDRMAP14 0x3U
+#define V_CCIADDRMAP14(x) ((x) << S_CCIADDRMAP14)
+#define G_CCIADDRMAP14(x) (((x) >> S_CCIADDRMAP14) & M_CCIADDRMAP14)
+
+#define S_CCIADDRMAP13 26
+#define M_CCIADDRMAP13 0x3U
+#define V_CCIADDRMAP13(x) ((x) << S_CCIADDRMAP13)
+#define G_CCIADDRMAP13(x) (((x) >> S_CCIADDRMAP13) & M_CCIADDRMAP13)
+
+#define S_CCIADDRMAP12 24
+#define M_CCIADDRMAP12 0x3U
+#define V_CCIADDRMAP12(x) ((x) << S_CCIADDRMAP12)
+#define G_CCIADDRMAP12(x) (((x) >> S_CCIADDRMAP12) & M_CCIADDRMAP12)
+
+#define S_CCIADDRMAP11 22
+#define M_CCIADDRMAP11 0x3U
+#define V_CCIADDRMAP11(x) ((x) << S_CCIADDRMAP11)
+#define G_CCIADDRMAP11(x) (((x) >> S_CCIADDRMAP11) & M_CCIADDRMAP11)
+
+#define S_CCIADDRMAP10 20
+#define M_CCIADDRMAP10 0x3U
+#define V_CCIADDRMAP10(x) ((x) << S_CCIADDRMAP10)
+#define G_CCIADDRMAP10(x) (((x) >> S_CCIADDRMAP10) & M_CCIADDRMAP10)
+
+#define S_CCIADDRMAP9 18
+#define M_CCIADDRMAP9 0x3U
+#define V_CCIADDRMAP9(x) ((x) << S_CCIADDRMAP9)
+#define G_CCIADDRMAP9(x) (((x) >> S_CCIADDRMAP9) & M_CCIADDRMAP9)
+
+#define S_CCIADDRMAP8 16
+#define M_CCIADDRMAP8 0x3U
+#define V_CCIADDRMAP8(x) ((x) << S_CCIADDRMAP8)
+#define G_CCIADDRMAP8(x) (((x) >> S_CCIADDRMAP8) & M_CCIADDRMAP8)
+
+#define S_CCIADDRMAP7 14
+#define M_CCIADDRMAP7 0x3U
+#define V_CCIADDRMAP7(x) ((x) << S_CCIADDRMAP7)
+#define G_CCIADDRMAP7(x) (((x) >> S_CCIADDRMAP7) & M_CCIADDRMAP7)
+
+#define S_CCIADDRMAP6 12
+#define M_CCIADDRMAP6 0x3U
+#define V_CCIADDRMAP6(x) ((x) << S_CCIADDRMAP6)
+#define G_CCIADDRMAP6(x) (((x) >> S_CCIADDRMAP6) & M_CCIADDRMAP6)
+
+#define S_CCIADDRMAP5 10
+#define M_CCIADDRMAP5 0x3U
+#define V_CCIADDRMAP5(x) ((x) << S_CCIADDRMAP5)
+#define G_CCIADDRMAP5(x) (((x) >> S_CCIADDRMAP5) & M_CCIADDRMAP5)
+
+#define S_CCIADDRMAP4 8
+#define M_CCIADDRMAP4 0x3U
+#define V_CCIADDRMAP4(x) ((x) << S_CCIADDRMAP4)
+#define G_CCIADDRMAP4(x) (((x) >> S_CCIADDRMAP4) & M_CCIADDRMAP4)
+
+#define S_CCIADDRMAP3 6
+#define M_CCIADDRMAP3 0x3U
+#define V_CCIADDRMAP3(x) ((x) << S_CCIADDRMAP3)
+#define G_CCIADDRMAP3(x) (((x) >> S_CCIADDRMAP3) & M_CCIADDRMAP3)
+
+#define S_CCIADDRMAP2 4
+#define M_CCIADDRMAP2 0x3U
+#define V_CCIADDRMAP2(x) ((x) << S_CCIADDRMAP2)
+#define G_CCIADDRMAP2(x) (((x) >> S_CCIADDRMAP2) & M_CCIADDRMAP2)
+
+#define S_CCIADDRMAP1 2
+#define M_CCIADDRMAP1 0x3U
+#define V_CCIADDRMAP1(x) ((x) << S_CCIADDRMAP1)
+#define G_CCIADDRMAP1(x) (((x) >> S_CCIADDRMAP1) & M_CCIADDRMAP1)
+
+#define S_CCIADDRMAP0 0
+#define M_CCIADDRMAP0 0x3U
+#define V_CCIADDRMAP0(x) ((x) << S_CCIADDRMAP0)
+#define G_CCIADDRMAP0(x) (((x) >> S_CCIADDRMAP0) & M_CCIADDRMAP0)
+
+#define A_ARM_CCI_STATUS 0x4728c
+
+#define S_CCICACTIVE 6
+#define V_CCICACTIVE(x) ((x) << S_CCICACTIVE)
+#define F_CCICACTIVE V_CCICACTIVE(1U)
+
+#define S_CCICSYSACK 5
+#define V_CCICSYSACK(x) ((x) << S_CCICSYSACK)
+#define F_CCICSYSACK V_CCICSYSACK(1U)
+
+#define S_CCINEVNTCNTOVERFLOW 0
+#define M_CCINEVNTCNTOVERFLOW 0x1fU
+#define V_CCINEVNTCNTOVERFLOW(x) ((x) << S_CCINEVNTCNTOVERFLOW)
+#define G_CCINEVNTCNTOVERFLOW(x) (((x) >> S_CCINEVNTCNTOVERFLOW) & M_CCINEVNTCNTOVERFLOW)
+
+#define A_ARM_CCIM_CCI_QVN_MASTER_CFG 0x47290
+
+#define S_CCIVWREADYVN3M 20
+#define V_CCIVWREADYVN3M(x) ((x) << S_CCIVWREADYVN3M)
+#define F_CCIVWREADYVN3M V_CCIVWREADYVN3M(1U)
+
+#define S_CCIVAWREADYVN3M 19
+#define V_CCIVAWREADYVN3M(x) ((x) << S_CCIVAWREADYVN3M)
+#define F_CCIVAWREADYVN3M V_CCIVAWREADYVN3M(1U)
+
+#define S_CCIVARREADYVN3M 18
+#define V_CCIVARREADYVN3M(x) ((x) << S_CCIVARREADYVN3M)
+#define F_CCIVARREADYVN3M V_CCIVARREADYVN3M(1U)
+
+#define S_CCIVWREADYVN2M 17
+#define V_CCIVWREADYVN2M(x) ((x) << S_CCIVWREADYVN2M)
+#define F_CCIVWREADYVN2M V_CCIVWREADYVN2M(1U)
+
+#define S_CCIVAWREADYVN2M 16
+#define V_CCIVAWREADYVN2M(x) ((x) << S_CCIVAWREADYVN2M)
+#define F_CCIVAWREADYVN2M V_CCIVAWREADYVN2M(1U)
+
+#define S_CCIVARREADYVN2M 15
+#define V_CCIVARREADYVN2M(x) ((x) << S_CCIVARREADYVN2M)
+#define F_CCIVARREADYVN2M V_CCIVARREADYVN2M(1U)
+
+#define S_CCIVWREADYVN1M 14
+#define V_CCIVWREADYVN1M(x) ((x) << S_CCIVWREADYVN1M)
+#define F_CCIVWREADYVN1M V_CCIVWREADYVN1M(1U)
+
+#define S_CCIVAWREADYVN1M 13
+#define V_CCIVAWREADYVN1M(x) ((x) << S_CCIVAWREADYVN1M)
+#define F_CCIVAWREADYVN1M V_CCIVAWREADYVN1M(1U)
+
+#define S_CCIVARREADYVN1M 12
+#define V_CCIVARREADYVN1M(x) ((x) << S_CCIVARREADYVN1M)
+#define F_CCIVARREADYVN1M V_CCIVARREADYVN1M(1U)
+
+#define S_CCIVWREADYVN0M 11
+#define V_CCIVWREADYVN0M(x) ((x) << S_CCIVWREADYVN0M)
+#define F_CCIVWREADYVN0M V_CCIVWREADYVN0M(1U)
+
+#define S_CCIVAWREADYVN0M 10
+#define V_CCIVAWREADYVN0M(x) ((x) << S_CCIVAWREADYVN0M)
+#define F_CCIVAWREADYVN0M V_CCIVAWREADYVN0M(1U)
+
+#define S_CCIVARREADYVN0M 9
+#define V_CCIVARREADYVN0M(x) ((x) << S_CCIVARREADYVN0M)
+#define F_CCIVARREADYVN0M V_CCIVARREADYVN0M(1U)
+
+#define S_CCIQVNPREALLOCWM 5
+#define M_CCIQVNPREALLOCWM 0xfU
+#define V_CCIQVNPREALLOCWM(x) ((x) << S_CCIQVNPREALLOCWM)
+#define G_CCIQVNPREALLOCWM(x) (((x) >> S_CCIQVNPREALLOCWM) & M_CCIQVNPREALLOCWM)
+
+#define S_CCIQVNPREALLOCRM 1
+#define M_CCIQVNPREALLOCRM 0xfU
+#define V_CCIQVNPREALLOCRM(x) ((x) << S_CCIQVNPREALLOCRM)
+#define G_CCIQVNPREALLOCRM(x) (((x) >> S_CCIQVNPREALLOCRM) & M_CCIQVNPREALLOCRM)
+
+#define S_CCIQVNENABLEM 0
+#define V_CCIQVNENABLEM(x) ((x) << S_CCIQVNENABLEM)
+#define F_CCIQVNENABLEM V_CCIQVNENABLEM(1U)
+
+#define A_ARM_CCIM_CCI_QVN_MASTER_STATUS 0x47294
+
+#define S_CCIVWVALIDN3M 31
+#define V_CCIVWVALIDN3M(x) ((x) << S_CCIVWVALIDN3M)
+#define F_CCIVWVALIDN3M V_CCIVWVALIDN3M(1U)
+
+#define S_CCIVAWVALIDN3M 30
+#define V_CCIVAWVALIDN3M(x) ((x) << S_CCIVAWVALIDN3M)
+#define F_CCIVAWVALIDN3M V_CCIVAWVALIDN3M(1U)
+
+#define S_CCIVAWQOSN3M 29
+#define V_CCIVAWQOSN3M(x) ((x) << S_CCIVAWQOSN3M)
+#define F_CCIVAWQOSN3M V_CCIVAWQOSN3M(1U)
+
+#define S_CCIVARVALIDN3M 28
+#define V_CCIVARVALIDN3M(x) ((x) << S_CCIVARVALIDN3M)
+#define F_CCIVARVALIDN3M V_CCIVARVALIDN3M(1U)
+
+#define S_CCIVARQOSN3M 24
+#define M_CCIVARQOSN3M 0xfU
+#define V_CCIVARQOSN3M(x) ((x) << S_CCIVARQOSN3M)
+#define G_CCIVARQOSN3M(x) (((x) >> S_CCIVARQOSN3M) & M_CCIVARQOSN3M)
+
+#define S_CCIVWVALIDN2M 23
+#define V_CCIVWVALIDN2M(x) ((x) << S_CCIVWVALIDN2M)
+#define F_CCIVWVALIDN2M V_CCIVWVALIDN2M(1U)
+
+#define S_CCIVAWVALIDN2M 22
+#define V_CCIVAWVALIDN2M(x) ((x) << S_CCIVAWVALIDN2M)
+#define F_CCIVAWVALIDN2M V_CCIVAWVALIDN2M(1U)
+
+#define S_CCIVAWQOSN2M 21
+#define V_CCIVAWQOSN2M(x) ((x) << S_CCIVAWQOSN2M)
+#define F_CCIVAWQOSN2M V_CCIVAWQOSN2M(1U)
+
+#define S_CCIVARVALIDN2M 20
+#define V_CCIVARVALIDN2M(x) ((x) << S_CCIVARVALIDN2M)
+#define F_CCIVARVALIDN2M V_CCIVARVALIDN2M(1U)
+
+#define S_CCIVARQOSN2M 16
+#define M_CCIVARQOSN2M 0xfU
+#define V_CCIVARQOSN2M(x) ((x) << S_CCIVARQOSN2M)
+#define G_CCIVARQOSN2M(x) (((x) >> S_CCIVARQOSN2M) & M_CCIVARQOSN2M)
+
+#define S_CCIVWVALIDN1M 15
+#define V_CCIVWVALIDN1M(x) ((x) << S_CCIVWVALIDN1M)
+#define F_CCIVWVALIDN1M V_CCIVWVALIDN1M(1U)
+
+#define S_CCIVAWVALIDN1M 14
+#define V_CCIVAWVALIDN1M(x) ((x) << S_CCIVAWVALIDN1M)
+#define F_CCIVAWVALIDN1M V_CCIVAWVALIDN1M(1U)
+
+#define S_CCIVAWQOSN1M 13
+#define V_CCIVAWQOSN1M(x) ((x) << S_CCIVAWQOSN1M)
+#define F_CCIVAWQOSN1M V_CCIVAWQOSN1M(1U)
+
+#define S_CCIVARVALIDN1M 12
+#define V_CCIVARVALIDN1M(x) ((x) << S_CCIVARVALIDN1M)
+#define F_CCIVARVALIDN1M V_CCIVARVALIDN1M(1U)
+
+#define S_CCIVARQOSN1M 8
+#define M_CCIVARQOSN1M 0xfU
+#define V_CCIVARQOSN1M(x) ((x) << S_CCIVARQOSN1M)
+#define G_CCIVARQOSN1M(x) (((x) >> S_CCIVARQOSN1M) & M_CCIVARQOSN1M)
+
+#define S_CCIVWVALIDN0M 7
+#define V_CCIVWVALIDN0M(x) ((x) << S_CCIVWVALIDN0M)
+#define F_CCIVWVALIDN0M V_CCIVWVALIDN0M(1U)
+
+#define S_CCIVAWVALIDN0M 6
+#define V_CCIVAWVALIDN0M(x) ((x) << S_CCIVAWVALIDN0M)
+#define F_CCIVAWVALIDN0M V_CCIVAWVALIDN0M(1U)
+
+#define S_CCIVAWQOSN0M 5
+#define V_CCIVAWQOSN0M(x) ((x) << S_CCIVAWQOSN0M)
+#define F_CCIVAWQOSN0M V_CCIVAWQOSN0M(1U)
+
+#define S_CCIVARVALIDN0M 4
+#define V_CCIVARVALIDN0M(x) ((x) << S_CCIVARVALIDN0M)
+#define F_CCIVARVALIDN0M V_CCIVARVALIDN0M(1U)
+
+#define S_CCIVARQOSN0M 0
+#define M_CCIVARQOSN0M 0xfU
+#define V_CCIVARQOSN0M(x) ((x) << S_CCIVARQOSN0M)
+#define G_CCIVARQOSN0M(x) (((x) >> S_CCIVARQOSN0M) & M_CCIVARQOSN0M)
+
+#define A_ARM_CCIS_CCI_QVN_SLAVE_CFG 0x472d0
+
+#define S_CCIQVNVNETS 0
+#define M_CCIQVNVNETS 0x3U
+#define V_CCIQVNVNETS(x) ((x) << S_CCIQVNVNETS)
+#define G_CCIQVNVNETS(x) (((x) >> S_CCIQVNVNETS) & M_CCIQVNVNETS)
+
+#define A_ARM_CCIS_CCI_QVN_SLAVE_STATUS 0x472d4
+
+#define S_CCIEVNTAWQOS 4
+#define M_CCIEVNTAWQOS 0xfU
+#define V_CCIEVNTAWQOS(x) ((x) << S_CCIEVNTAWQOS)
+#define G_CCIEVNTAWQOS(x) (((x) >> S_CCIEVNTAWQOS) & M_CCIEVNTAWQOS)
+
+#define S_CCIEVNTARQOS 0
+#define M_CCIEVNTARQOS 0xfU
+#define V_CCIEVNTARQOS(x) ((x) << S_CCIEVNTARQOS)
+#define G_CCIEVNTARQOS(x) (((x) >> S_CCIEVNTARQOS) & M_CCIEVNTARQOS)
+
+#define A_ARM_CCI_EVNTBUS 0x47300
+#define A_ARM_CCI_RST_N 0x47318
+
+#define S_CCIRSTN 0
+#define V_CCIRSTN(x) ((x) << S_CCIRSTN)
+#define F_CCIRSTN V_CCIRSTN(1U)
+
+#define A_ARM_CCI_CSYREQ 0x4731c
+
+#define S_CCICSYSREQ 0
+#define V_CCICSYSREQ(x) ((x) << S_CCICSYSREQ)
+#define F_CCICSYSREQ V_CCICSYSREQ(1U)
+
+#define A_ARM_CCI_TR_DEBUGS0 0x47320
+
+#define S_CCIS0RCNT 24
+#define M_CCIS0RCNT 0xffU
+#define V_CCIS0RCNT(x) ((x) << S_CCIS0RCNT)
+#define G_CCIS0RCNT(x) (((x) >> S_CCIS0RCNT) & M_CCIS0RCNT)
+
+#define S_CCIS0ARCNT 16
+#define M_CCIS0ARCNT 0xffU
+#define V_CCIS0ARCNT(x) ((x) << S_CCIS0ARCNT)
+#define G_CCIS0ARCNT(x) (((x) >> S_CCIS0ARCNT) & M_CCIS0ARCNT)
+
+#define S_CCIS0WCNT 8
+#define M_CCIS0WCNT 0xffU
+#define V_CCIS0WCNT(x) ((x) << S_CCIS0WCNT)
+#define G_CCIS0WCNT(x) (((x) >> S_CCIS0WCNT) & M_CCIS0WCNT)
+
+#define S_CCIS0AWCNT 0
+#define M_CCIS0AWCNT 0xffU
+#define V_CCIS0AWCNT(x) ((x) << S_CCIS0AWCNT)
+#define G_CCIS0AWCNT(x) (((x) >> S_CCIS0AWCNT) & M_CCIS0AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS1 0x47324
+
+#define S_CCIS1RCNT 24
+#define M_CCIS1RCNT 0xffU
+#define V_CCIS1RCNT(x) ((x) << S_CCIS1RCNT)
+#define G_CCIS1RCNT(x) (((x) >> S_CCIS1RCNT) & M_CCIS1RCNT)
+
+#define S_CCIS1ARCNT 16
+#define M_CCIS1ARCNT 0xffU
+#define V_CCIS1ARCNT(x) ((x) << S_CCIS1ARCNT)
+#define G_CCIS1ARCNT(x) (((x) >> S_CCIS1ARCNT) & M_CCIS1ARCNT)
+
+#define S_CCIS1WCNT 8
+#define M_CCIS1WCNT 0xffU
+#define V_CCIS1WCNT(x) ((x) << S_CCIS1WCNT)
+#define G_CCIS1WCNT(x) (((x) >> S_CCIS1WCNT) & M_CCIS1WCNT)
+
+#define S_CCIS1AWCNT 0
+#define M_CCIS1AWCNT 0xffU
+#define V_CCIS1AWCNT(x) ((x) << S_CCIS1AWCNT)
+#define G_CCIS1AWCNT(x) (((x) >> S_CCIS1AWCNT) & M_CCIS1AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS2 0x47328
+
+#define S_CCIS2RCNT 24
+#define M_CCIS2RCNT 0xffU
+#define V_CCIS2RCNT(x) ((x) << S_CCIS2RCNT)
+#define G_CCIS2RCNT(x) (((x) >> S_CCIS2RCNT) & M_CCIS2RCNT)
+
+#define S_CCIS2ARCNT 16
+#define M_CCIS2ARCNT 0xffU
+#define V_CCIS2ARCNT(x) ((x) << S_CCIS2ARCNT)
+#define G_CCIS2ARCNT(x) (((x) >> S_CCIS2ARCNT) & M_CCIS2ARCNT)
+
+#define S_CCIS2WCNT 8
+#define M_CCIS2WCNT 0xffU
+#define V_CCIS2WCNT(x) ((x) << S_CCIS2WCNT)
+#define G_CCIS2WCNT(x) (((x) >> S_CCIS2WCNT) & M_CCIS2WCNT)
+
+#define S_CCIS2AWCNT 0
+#define M_CCIS2AWCNT 0xffU
+#define V_CCIS2AWCNT(x) ((x) << S_CCIS2AWCNT)
+#define G_CCIS2AWCNT(x) (((x) >> S_CCIS2AWCNT) & M_CCIS2AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS3 0x4732c
+
+#define S_CCIS3RCNT 24
+#define M_CCIS3RCNT 0xffU
+#define V_CCIS3RCNT(x) ((x) << S_CCIS3RCNT)
+#define G_CCIS3RCNT(x) (((x) >> S_CCIS3RCNT) & M_CCIS3RCNT)
+
+#define S_CCIS3ARCNT 16
+#define M_CCIS3ARCNT 0xffU
+#define V_CCIS3ARCNT(x) ((x) << S_CCIS3ARCNT)
+#define G_CCIS3ARCNT(x) (((x) >> S_CCIS3ARCNT) & M_CCIS3ARCNT)
+
+#define S_CCIS3WCNT 8
+#define M_CCIS3WCNT 0xffU
+#define V_CCIS3WCNT(x) ((x) << S_CCIS3WCNT)
+#define G_CCIS3WCNT(x) (((x) >> S_CCIS3WCNT) & M_CCIS3WCNT)
+
+#define S_CCIS3AWCNT 0
+#define M_CCIS3AWCNT 0xffU
+#define V_CCIS3AWCNT(x) ((x) << S_CCIS3AWCNT)
+#define G_CCIS3AWCNT(x) (((x) >> S_CCIS3AWCNT) & M_CCIS3AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS4 0x47330
+
+#define S_CCIS4RCNT 24
+#define M_CCIS4RCNT 0xffU
+#define V_CCIS4RCNT(x) ((x) << S_CCIS4RCNT)
+#define G_CCIS4RCNT(x) (((x) >> S_CCIS4RCNT) & M_CCIS4RCNT)
+
+#define S_CCIS4ARCNT 16
+#define M_CCIS4ARCNT 0xffU
+#define V_CCIS4ARCNT(x) ((x) << S_CCIS4ARCNT)
+#define G_CCIS4ARCNT(x) (((x) >> S_CCIS4ARCNT) & M_CCIS4ARCNT)
+
+#define S_CCIS4WCNT 8
+#define M_CCIS4WCNT 0xffU
+#define V_CCIS4WCNT(x) ((x) << S_CCIS4WCNT)
+#define G_CCIS4WCNT(x) (((x) >> S_CCIS4WCNT) & M_CCIS4WCNT)
+
+#define S_CCIS4AWCNT 0
+#define M_CCIS4AWCNT 0xffU
+#define V_CCIS4AWCNT(x) ((x) << S_CCIS4AWCNT)
+#define G_CCIS4AWCNT(x) (((x) >> S_CCIS4AWCNT) & M_CCIS4AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS34 0x47334
+
+#define S_CCIS4RSPCNT 24
+#define M_CCIS4RSPCNT 0xffU
+#define V_CCIS4RSPCNT(x) ((x) << S_CCIS4RSPCNT)
+#define G_CCIS4RSPCNT(x) (((x) >> S_CCIS4RSPCNT) & M_CCIS4RSPCNT)
+
+#define S_CCIS4ACCNT 16
+#define M_CCIS4ACCNT 0xffU
+#define V_CCIS4ACCNT(x) ((x) << S_CCIS4ACCNT)
+#define G_CCIS4ACCNT(x) (((x) >> S_CCIS4ACCNT) & M_CCIS4ACCNT)
+
+#define S_CCIS3RSPCNT 8
+#define M_CCIS3RSPCNT 0xffU
+#define V_CCIS3RSPCNT(x) ((x) << S_CCIS3RSPCNT)
+#define G_CCIS3RSPCNT(x) (((x) >> S_CCIS3RSPCNT) & M_CCIS3RSPCNT)
+
+#define S_CCIS3ACCNT 0
+#define M_CCIS3ACCNT 0xffU
+#define V_CCIS3ACCNT(x) ((x) << S_CCIS3ACCNT)
+#define G_CCIS3ACCNT(x) (((x) >> S_CCIS3ACCNT) & M_CCIS3ACCNT)
+
+#define A_ARM_CCI_TR_DEBUGM0 0x47338
+
+#define S_CCIM0RCNT 24
+#define M_CCIM0RCNT 0xffU
+#define V_CCIM0RCNT(x) ((x) << S_CCIM0RCNT)
+#define G_CCIM0RCNT(x) (((x) >> S_CCIM0RCNT) & M_CCIM0RCNT)
+
+#define S_CCIM0ARCNT 16
+#define M_CCIM0ARCNT 0xffU
+#define V_CCIM0ARCNT(x) ((x) << S_CCIM0ARCNT)
+#define G_CCIM0ARCNT(x) (((x) >> S_CCIM0ARCNT) & M_CCIM0ARCNT)
+
+#define S_CCIM0WCNT 8
+#define M_CCIM0WCNT 0xffU
+#define V_CCIM0WCNT(x) ((x) << S_CCIM0WCNT)
+#define G_CCIM0WCNT(x) (((x) >> S_CCIM0WCNT) & M_CCIM0WCNT)
+
+#define S_CCIM0AWCNT 0
+#define M_CCIM0AWCNT 0xffU
+#define V_CCIM0AWCNT(x) ((x) << S_CCIM0AWCNT)
+#define G_CCIM0AWCNT(x) (((x) >> S_CCIM0AWCNT) & M_CCIM0AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGM1 0x4733c
+
+#define S_CCIM1RCNT 24
+#define M_CCIM1RCNT 0xffU
+#define V_CCIM1RCNT(x) ((x) << S_CCIM1RCNT)
+#define G_CCIM1RCNT(x) (((x) >> S_CCIM1RCNT) & M_CCIM1RCNT)
+
+#define S_CCIM1ARCNT 16
+#define M_CCIM1ARCNT 0xffU
+#define V_CCIM1ARCNT(x) ((x) << S_CCIM1ARCNT)
+#define G_CCIM1ARCNT(x) (((x) >> S_CCIM1ARCNT) & M_CCIM1ARCNT)
+
+#define S_CCIM1WCNT 8
+#define M_CCIM1WCNT 0xffU
+#define V_CCIM1WCNT(x) ((x) << S_CCIM1WCNT)
+#define G_CCIM1WCNT(x) (((x) >> S_CCIM1WCNT) & M_CCIM1WCNT)
+
+#define S_CCIM1AWCNT 0
+#define M_CCIM1AWCNT 0xffU
+#define V_CCIM1AWCNT(x) ((x) << S_CCIM1AWCNT)
+#define G_CCIM1AWCNT(x) (((x) >> S_CCIM1AWCNT) & M_CCIM1AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGM2 0x47340
+
+#define S_CCIM2RCNT 24
+#define M_CCIM2RCNT 0xffU
+#define V_CCIM2RCNT(x) ((x) << S_CCIM2RCNT)
+#define G_CCIM2RCNT(x) (((x) >> S_CCIM2RCNT) & M_CCIM2RCNT)
+
+#define S_CCIM2ARCNT 16
+#define M_CCIM2ARCNT 0xffU
+#define V_CCIM2ARCNT(x) ((x) << S_CCIM2ARCNT)
+#define G_CCIM2ARCNT(x) (((x) >> S_CCIM2ARCNT) & M_CCIM2ARCNT)
+
+#define S_CCIM2WCNT 8
+#define M_CCIM2WCNT 0xffU
+#define V_CCIM2WCNT(x) ((x) << S_CCIM2WCNT)
+#define G_CCIM2WCNT(x) (((x) >> S_CCIM2WCNT) & M_CCIM2WCNT)
+
+#define S_CCIM2AWCNT 0
+#define M_CCIM2AWCNT 0xffU
+#define V_CCIM2AWCNT(x) ((x) << S_CCIM2AWCNT)
+#define G_CCIM2AWCNT(x) (((x) >> S_CCIM2AWCNT) & M_CCIM2AWCNT)
+
+#define A_ARM_MA_TR_DEBUG 0x47344
+
+#define S_MA1_RD_CNT 24
+#define M_MA1_RD_CNT 0xffU
+#define V_MA1_RD_CNT(x) ((x) << S_MA1_RD_CNT)
+#define G_MA1_RD_CNT(x) (((x) >> S_MA1_RD_CNT) & M_MA1_RD_CNT)
+
+#define S_MA1_WR_CNT 16
+#define M_MA1_WR_CNT 0xffU
+#define V_MA1_WR_CNT(x) ((x) << S_MA1_WR_CNT)
+#define G_MA1_WR_CNT(x) (((x) >> S_MA1_WR_CNT) & M_MA1_WR_CNT)
+
+#define S_MA0_RD_CNT 8
+#define M_MA0_RD_CNT 0xffU
+#define V_MA0_RD_CNT(x) ((x) << S_MA0_RD_CNT)
+#define G_MA0_RD_CNT(x) (((x) >> S_MA0_RD_CNT) & M_MA0_RD_CNT)
+
+#define S_MA0_WR_CNT 0
+#define M_MA0_WR_CNT 0xffU
+#define V_MA0_WR_CNT(x) ((x) << S_MA0_WR_CNT)
+#define G_MA0_WR_CNT(x) (((x) >> S_MA0_WR_CNT) & M_MA0_WR_CNT)
+
+#define A_ARM_GP_INT 0x47348
+
+#define S_GP_INT 0
+#define M_GP_INT 0xffU
+#define V_GP_INT(x) ((x) << S_GP_INT)
+#define G_GP_INT(x) (((x) >> S_GP_INT) & M_GP_INT)
+
+#define A_ARM_DMA_CFG0 0x47350
+#define A_ARM_DMA_CFG1 0x47354
+
+#define S_DMABOOTPERIPHNS 16
+#define M_DMABOOTPERIPHNS 0x3ffU
+#define V_DMABOOTPERIPHNS(x) ((x) << S_DMABOOTPERIPHNS)
+#define G_DMABOOTPERIPHNS(x) (((x) >> S_DMABOOTPERIPHNS) & M_DMABOOTPERIPHNS)
+
+#define S_DMABOOTIRQNS 4
+#define M_DMABOOTIRQNS 0x3ffU
+#define V_DMABOOTIRQNS(x) ((x) << S_DMABOOTIRQNS)
+#define G_DMABOOTIRQNS(x) (((x) >> S_DMABOOTIRQNS) & M_DMABOOTIRQNS)
+
+#define S_DMABOOTMANAGERNS 1
+#define V_DMABOOTMANAGERNS(x) ((x) << S_DMABOOTMANAGERNS)
+#define F_DMABOOTMANAGERNS V_DMABOOTMANAGERNS(1U)
+
+#define S_DMABOOTFROMPC 0
+#define V_DMABOOTFROMPC(x) ((x) << S_DMABOOTFROMPC)
+#define F_DMABOOTFROMPC V_DMABOOTFROMPC(1U)
+
+#define A_ARM_ARM_CFG0 0x47380
+
+#define S_MESSAGEBYPASS_DATA 2
+#define V_MESSAGEBYPASS_DATA(x) ((x) << S_MESSAGEBYPASS_DATA)
+#define F_MESSAGEBYPASS_DATA V_MESSAGEBYPASS_DATA(1U)
+
+#define S_MESSAGEBYPASS 1
+#define V_MESSAGEBYPASS(x) ((x) << S_MESSAGEBYPASS)
+#define F_MESSAGEBYPASS V_MESSAGEBYPASS(1U)
+
+#define S_PCIEBYPASS 0
+#define V_PCIEBYPASS(x) ((x) << S_PCIEBYPASS)
+#define F_PCIEBYPASS V_PCIEBYPASS(1U)
+
+#define A_ARM_ARM_CFG1 0x47384
+#define A_ARM_ARM_CFG2 0x47390
+#define A_ARM_PCIE_MA_ADDR_REGION0 0x47400
+
+#define S_ADDRREG0 0
+#define M_ADDRREG0 0xfffffffU
+#define V_ADDRREG0(x) ((x) << S_ADDRREG0)
+#define G_ADDRREG0(x) (((x) >> S_ADDRREG0) & M_ADDRREG0)
+
+#define A_ARM_PCIE_MA_ADDR_REGION1 0x47404
+
+#define S_ADDRREG1 0
+#define M_ADDRREG1 0xfffffffU
+#define V_ADDRREG1(x) ((x) << S_ADDRREG1)
+#define G_ADDRREG1(x) (((x) >> S_ADDRREG1) & M_ADDRREG1)
+
+#define A_ARM_PCIE_MA_ADDR_REGION2 0x47408
+
+#define S_ADDRREG2 0
+#define M_ADDRREG2 0xfffffffU
+#define V_ADDRREG2(x) ((x) << S_ADDRREG2)
+#define G_ADDRREG2(x) (((x) >> S_ADDRREG2) & M_ADDRREG2)
+
+#define A_ARM_PCIE_MA_ADDR_REGION3 0x4740c
+
+#define S_ADDRREG3 0
+#define M_ADDRREG3 0xfffffffU
+#define V_ADDRREG3(x) ((x) << S_ADDRREG3)
+#define G_ADDRREG3(x) (((x) >> S_ADDRREG3) & M_ADDRREG3)
+
+#define A_ARM_PCIE_MA_ADDR_REGION4 0x47410
+
+#define S_ADDRREG4 0
+#define M_ADDRREG4 0xfffffffU
+#define V_ADDRREG4(x) ((x) << S_ADDRREG4)
+#define G_ADDRREG4(x) (((x) >> S_ADDRREG4) & M_ADDRREG4)
+
+#define A_ARM_PCIE_MA_ADDR_REGION5 0x47414
+
+#define S_ADDRREG5 0
+#define M_ADDRREG5 0xfffffffU
+#define V_ADDRREG5(x) ((x) << S_ADDRREG5)
+#define G_ADDRREG5(x) (((x) >> S_ADDRREG5) & M_ADDRREG5)
+
+#define A_ARM_PCIE_MA_ADDR_REGION6 0x47418
+
+#define S_ADDRREG6 0
+#define M_ADDRREG6 0xfffffffU
+#define V_ADDRREG6(x) ((x) << S_ADDRREG6)
+#define G_ADDRREG6(x) (((x) >> S_ADDRREG6) & M_ADDRREG6)
+
+#define A_ARM_PCIE_MA_ADDR_REGION7 0x4741c
+
+#define S_ADDRREG7 0
+#define M_ADDRREG7 0xfffffffU
+#define V_ADDRREG7(x) ((x) << S_ADDRREG7)
+#define G_ADDRREG7(x) (((x) >> S_ADDRREG7) & M_ADDRREG7)
+
+#define A_ARM_INTERRUPT_GEN 0x47420
+
+#define S_INT_GEN 0
+#define M_INT_GEN 0x3U
+#define V_INT_GEN(x) ((x) << S_INT_GEN)
+#define G_INT_GEN(x) (((x) >> S_INT_GEN) & M_INT_GEN)
+
+#define A_ARM_INTERRUPT_CLEAR 0x47424
+
+#define S_INT_CLEAR 0
+#define M_INT_CLEAR 0x3U
+#define V_INT_CLEAR(x) ((x) << S_INT_CLEAR)
+#define G_INT_CLEAR(x) (((x) >> S_INT_CLEAR) & M_INT_CLEAR)
+
+#define A_ARM_DEBUG_STATUS_0 0x47428
+#define A_ARM_DBPROC_CONTROL 0x4742c
+
+#define S_NO_OF_INTERRUPTS 0
+#define M_NO_OF_INTERRUPTS 0x3U
+#define V_NO_OF_INTERRUPTS(x) ((x) << S_NO_OF_INTERRUPTS)
+#define G_NO_OF_INTERRUPTS(x) (((x) >> S_NO_OF_INTERRUPTS) & M_NO_OF_INTERRUPTS)
+
+#define A_ARM_PERR_INT_CAUSE1 0x47430
+
+#define S_ARWFIFO0_PERR 31
+#define V_ARWFIFO0_PERR(x) ((x) << S_ARWFIFO0_PERR)
+#define F_ARWFIFO0_PERR V_ARWFIFO0_PERR(1U)
+
+#define S_ARWFIFO1_PERR 30
+#define V_ARWFIFO1_PERR(x) ((x) << S_ARWFIFO1_PERR)
+#define F_ARWFIFO1_PERR V_ARWFIFO1_PERR(1U)
+
+#define S_ARWIDFIFO0_PERR 29
+#define V_ARWIDFIFO0_PERR(x) ((x) << S_ARWIDFIFO0_PERR)
+#define F_ARWIDFIFO0_PERR V_ARWIDFIFO0_PERR(1U)
+
+#define S_ARWIDFIFO1_PERR 28
+#define V_ARWIDFIFO1_PERR(x) ((x) << S_ARWIDFIFO1_PERR)
+#define F_ARWIDFIFO1_PERR V_ARWIDFIFO1_PERR(1U)
+
+#define S_ARIDFIFO0_PERR 27
+#define V_ARIDFIFO0_PERR(x) ((x) << S_ARIDFIFO0_PERR)
+#define F_ARIDFIFO0_PERR V_ARIDFIFO0_PERR(1U)
+
+#define S_ARIDFIFO1_PERR 26
+#define V_ARIDFIFO1_PERR(x) ((x) << S_ARIDFIFO1_PERR)
+#define F_ARIDFIFO1_PERR V_ARIDFIFO1_PERR(1U)
+
+#define S_RRSPADDR_FIFO0_PERR 25
+#define V_RRSPADDR_FIFO0_PERR(x) ((x) << S_RRSPADDR_FIFO0_PERR)
+#define F_RRSPADDR_FIFO0_PERR V_RRSPADDR_FIFO0_PERR(1U)
+
+#define S_RRSPADDR_FIFO1_PERR 24
+#define V_RRSPADDR_FIFO1_PERR(x) ((x) << S_RRSPADDR_FIFO1_PERR)
+#define F_RRSPADDR_FIFO1_PERR V_RRSPADDR_FIFO1_PERR(1U)
+
+#define S_WRSTRB_FIFO0_PERR 23
+#define V_WRSTRB_FIFO0_PERR(x) ((x) << S_WRSTRB_FIFO0_PERR)
+#define F_WRSTRB_FIFO0_PERR V_WRSTRB_FIFO0_PERR(1U)
+
+#define S_WRSTRB_FIFO1_PERR 22
+#define V_WRSTRB_FIFO1_PERR(x) ((x) << S_WRSTRB_FIFO1_PERR)
+#define F_WRSTRB_FIFO1_PERR V_WRSTRB_FIFO1_PERR(1U)
+
+#define S_MA2AXI_RSPDATAPARERR 21
+#define V_MA2AXI_RSPDATAPARERR(x) ((x) << S_MA2AXI_RSPDATAPARERR)
+#define F_MA2AXI_RSPDATAPARERR V_MA2AXI_RSPDATAPARERR(1U)
+
+#define S_MA2AXI_DATA_PAR_ERR 20
+#define V_MA2AXI_DATA_PAR_ERR(x) ((x) << S_MA2AXI_DATA_PAR_ERR)
+#define F_MA2AXI_DATA_PAR_ERR V_MA2AXI_DATA_PAR_ERR(1U)
+
+#define S_MA2AXI_WR_ORD_FIFO_PARERR 19
+#define V_MA2AXI_WR_ORD_FIFO_PARERR(x) ((x) << S_MA2AXI_WR_ORD_FIFO_PARERR)
+#define F_MA2AXI_WR_ORD_FIFO_PARERR V_MA2AXI_WR_ORD_FIFO_PARERR(1U)
+
+#define S_NVME_DB_EMU_TRACKER_FIFO_PERR 18
+#define V_NVME_DB_EMU_TRACKER_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_TRACKER_FIFO_PERR)
+#define F_NVME_DB_EMU_TRACKER_FIFO_PERR V_NVME_DB_EMU_TRACKER_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR 17
+#define V_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR)
+#define F_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR V_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR 16
+#define V_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR)
+#define F_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR V_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_ID_FIFO0_PERR 15
+#define V_NVME_DB_EMU_ID_FIFO0_PERR(x) ((x) << S_NVME_DB_EMU_ID_FIFO0_PERR)
+#define F_NVME_DB_EMU_ID_FIFO0_PERR V_NVME_DB_EMU_ID_FIFO0_PERR(1U)
+
+#define S_NVME_DB_EMU_ID_FIFO1_PERR 14
+#define V_NVME_DB_EMU_ID_FIFO1_PERR(x) ((x) << S_NVME_DB_EMU_ID_FIFO1_PERR)
+#define F_NVME_DB_EMU_ID_FIFO1_PERR V_NVME_DB_EMU_ID_FIFO1_PERR(1U)
+
+#define S_RC_ARWFIFO_PERR 13
+#define V_RC_ARWFIFO_PERR(x) ((x) << S_RC_ARWFIFO_PERR)
+#define F_RC_ARWFIFO_PERR V_RC_ARWFIFO_PERR(1U)
+
+#define S_RC_ARIDBURSTADDRFIFO_PERR 12
+#define V_RC_ARIDBURSTADDRFIFO_PERR(x) ((x) << S_RC_ARIDBURSTADDRFIFO_PERR)
+#define F_RC_ARIDBURSTADDRFIFO_PERR V_RC_ARIDBURSTADDRFIFO_PERR(1U)
+
+#define S_RC_CFG_FIFO_PERR 11
+#define V_RC_CFG_FIFO_PERR(x) ((x) << S_RC_CFG_FIFO_PERR)
+#define F_RC_CFG_FIFO_PERR V_RC_CFG_FIFO_PERR(1U)
+
+#define S_RC_RSPFIFO_PERR 10
+#define V_RC_RSPFIFO_PERR(x) ((x) << S_RC_RSPFIFO_PERR)
+#define F_RC_RSPFIFO_PERR V_RC_RSPFIFO_PERR(1U)
+
+#define S_INIC_ARIDFIFO_PERR 9
+#define V_INIC_ARIDFIFO_PERR(x) ((x) << S_INIC_ARIDFIFO_PERR)
+#define F_INIC_ARIDFIFO_PERR V_INIC_ARIDFIFO_PERR(1U)
+
+#define S_INIC_ARWFIFO_PERR 8
+#define V_INIC_ARWFIFO_PERR(x) ((x) << S_INIC_ARWFIFO_PERR)
+#define F_INIC_ARWFIFO_PERR V_INIC_ARWFIFO_PERR(1U)
+
+#define S_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR 7
+#define V_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR(x) ((x) << S_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR)
+#define F_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR V_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR(1U)
+
+#define S_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR 6
+#define V_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR(x) ((x) << S_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR)
+#define F_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR V_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR(1U)
+
+#define S_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR 5
+#define V_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR(x) ((x) << S_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR)
+#define F_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR V_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR(1U)
+
+#define S_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR 4
+#define V_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR(x) ((x) << S_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR)
+#define F_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR V_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR(1U)
+
+#define S_ARM_MA_512B_ARB_FIFO_PERR 3
+#define V_ARM_MA_512B_ARB_FIFO_PERR(x) ((x) << S_ARM_MA_512B_ARB_FIFO_PERR)
+#define F_ARM_MA_512B_ARB_FIFO_PERR V_ARM_MA_512B_ARB_FIFO_PERR(1U)
+
+#define S_PCIE_INIC_MA_ARB_FIFO_PERR 2
+#define V_PCIE_INIC_MA_ARB_FIFO_PERR(x) ((x) << S_PCIE_INIC_MA_ARB_FIFO_PERR)
+#define F_PCIE_INIC_MA_ARB_FIFO_PERR V_PCIE_INIC_MA_ARB_FIFO_PERR(1U)
+
+#define S_PCIE_INIC_ARB_RSPPERR 1
+#define V_PCIE_INIC_ARB_RSPPERR(x) ((x) << S_PCIE_INIC_ARB_RSPPERR)
+#define F_PCIE_INIC_ARB_RSPPERR V_PCIE_INIC_ARB_RSPPERR(1U)
+
+#define S_ITE_CACHE_PERR 0
+#define V_ITE_CACHE_PERR(x) ((x) << S_ITE_CACHE_PERR)
+#define F_ITE_CACHE_PERR V_ITE_CACHE_PERR(1U)
+
+#define A_ARM_PERR_INT_ENB1 0x47434
+#define A_ARM_PERR_ENABLE1 0x47438
+#define A_ARM_DEBUG_STATUS_1 0x4743c
+#define A_ARM_PCIE_MA_ADDR_REGION_DST 0x47440
+
+#define S_ADDRREGDST 0
+#define M_ADDRREGDST 0x1ffU
+#define V_ADDRREGDST(x) ((x) << S_ADDRREGDST)
+#define G_ADDRREGDST(x) (((x) >> S_ADDRREGDST) & M_ADDRREGDST)
+
+#define A_ARM_ERR_INT_CAUSE0 0x47444
+
+#define S_STRB0_ERROR 31
+#define V_STRB0_ERROR(x) ((x) << S_STRB0_ERROR)
+#define F_STRB0_ERROR V_STRB0_ERROR(1U)
+
+#define S_STRB1_ERROR 30
+#define V_STRB1_ERROR(x) ((x) << S_STRB1_ERROR)
+#define F_STRB1_ERROR V_STRB1_ERROR(1U)
+
+#define S_PCIE_INIC_MA_ARB_INV_RSP_TAG 29
+#define V_PCIE_INIC_MA_ARB_INV_RSP_TAG(x) ((x) << S_PCIE_INIC_MA_ARB_INV_RSP_TAG)
+#define F_PCIE_INIC_MA_ARB_INV_RSP_TAG V_PCIE_INIC_MA_ARB_INV_RSP_TAG(1U)
+
+#define S_ERROR0_NOCMD_DATA 28
+#define V_ERROR0_NOCMD_DATA(x) ((x) << S_ERROR0_NOCMD_DATA)
+#define F_ERROR0_NOCMD_DATA V_ERROR0_NOCMD_DATA(1U)
+
+#define S_ERROR1_NOCMD_DATA 27
+#define V_ERROR1_NOCMD_DATA(x) ((x) << S_ERROR1_NOCMD_DATA)
+#define F_ERROR1_NOCMD_DATA V_ERROR1_NOCMD_DATA(1U)
+
+#define S_INIC_STRB_ERROR 26
+#define V_INIC_STRB_ERROR(x) ((x) << S_INIC_STRB_ERROR)
+#define F_INIC_STRB_ERROR V_INIC_STRB_ERROR(1U)
+
+#define A_ARM_ERR_INT_ENB0 0x47448
+#define A_ARM_DEBUG_INDEX 0x47450
+#define A_ARM_DEBUG_DATA_HIGH 0x47454
+#define A_ARM_DEBUG_DATA_LOW 0x47458
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_BA0 0x47500
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_BA1 0x47504
+
+#define S_BASEADDRESS 0
+#define M_BASEADDRESS 0x3U
+#define V_BASEADDRESS(x) ((x) << S_BASEADDRESS)
+#define G_BASEADDRESS(x) (((x) >> S_BASEADDRESS) & M_BASEADDRESS)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG0 0x47508
+
+#define S_WATERMARK 16
+#define M_WATERMARK 0x3ffU
+#define V_WATERMARK(x) ((x) << S_WATERMARK)
+#define G_WATERMARK(x) (((x) >> S_WATERMARK) & M_WATERMARK)
+
+#define S_SIZEMAX 0
+#define M_SIZEMAX 0x3ffU
+#define V_SIZEMAX(x) ((x) << S_SIZEMAX)
+#define G_SIZEMAX(x) (((x) >> S_SIZEMAX) & M_SIZEMAX)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG1 0x4750c
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG2 0x47510
+
+#define S_CPUREADADDRESS 0
+#define M_CPUREADADDRESS 0x3ffU
+#define V_CPUREADADDRESS(x) ((x) << S_CPUREADADDRESS)
+#define G_CPUREADADDRESS(x) (((x) >> S_CPUREADADDRESS) & M_CPUREADADDRESS)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG3 0x47514
+
+#define S_CPUREADADDRESSVLD 0
+#define V_CPUREADADDRESSVLD(x) ((x) << S_CPUREADADDRESSVLD)
+#define F_CPUREADADDRESSVLD V_CPUREADADDRESSVLD(1U)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG4 0x47518
+#define A_ARM_APB2MSI_INTERRUPT_0_STATUS 0x47600
+#define A_ARM_APB2MSI_INTERRUPT_1_STATUS 0x47604
+#define A_ARM_APB2MSI_INTERRUPT_2_STATUS 0x47608
+#define A_ARM_APB2MSI_INTERRUPT_3_STATUS 0x4760c
+#define A_ARM_APB2MSI_INTERRUPT_0_ENABLE 0x47610
+#define A_ARM_APB2MSI_INTERRUPT_1_ENABLE 0x47614
+#define A_ARM_APB2MSI_INTERRUPT_2_ENABLE 0x47618
+#define A_ARM_APB2MSI_INTERRUPT_3_ENABLE 0x4761c
+#define A_ARM_APB2MSI_INTERRUPT_PRIORITY_LEVEL 0x47620
+
+#define S_ARM_APB2MSI_INT_PRIORITY_LEVEL 0
+#define M_ARM_APB2MSI_INT_PRIORITY_LEVEL 0x7U
+#define V_ARM_APB2MSI_INT_PRIORITY_LEVEL(x) ((x) << S_ARM_APB2MSI_INT_PRIORITY_LEVEL)
+#define G_ARM_APB2MSI_INT_PRIORITY_LEVEL(x) (((x) >> S_ARM_APB2MSI_INT_PRIORITY_LEVEL) & M_ARM_APB2MSI_INT_PRIORITY_LEVEL)
+
+#define A_ARM_APB2MSI_MEM_READ_ADDR 0x47624
+
+#define S_ARM_APB2MSI_MEM_READ_ADDR 0
+#define M_ARM_APB2MSI_MEM_READ_ADDR 0x7fU
+#define V_ARM_APB2MSI_MEM_READ_ADDR(x) ((x) << S_ARM_APB2MSI_MEM_READ_ADDR)
+#define G_ARM_APB2MSI_MEM_READ_ADDR(x) (((x) >> S_ARM_APB2MSI_MEM_READ_ADDR) & M_ARM_APB2MSI_MEM_READ_ADDR)
+
+#define A_ARM_MSI_MEMORY_DATA 0x47628
+#define A_ARM_MSI_MEMORY_ADDR 0x4762c
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG5 0x47630
+
+#define S_CONFIGDONE 0
+#define V_CONFIGDONE(x) ((x) << S_CONFIGDONE)
+#define F_CONFIGDONE V_CONFIGDONE(1U)
+
+#define A_ARM_AXI2MA_TIMERCNT 0x47640
+#define A_ARM_AXI2MA_TRTYPE 0x47644
+
+#define S_ARMMA2AXI1ARTRTYPE 3
+#define V_ARMMA2AXI1ARTRTYPE(x) ((x) << S_ARMMA2AXI1ARTRTYPE)
+#define F_ARMMA2AXI1ARTRTYPE V_ARMMA2AXI1ARTRTYPE(1U)
+
+#define S_ARMMA2AXI1AWTRTYPE 2
+#define V_ARMMA2AXI1AWTRTYPE(x) ((x) << S_ARMMA2AXI1AWTRTYPE)
+#define F_ARMMA2AXI1AWTRTYPE V_ARMMA2AXI1AWTRTYPE(1U)
+
+#define S_ARMMA2AXI0ARTRTYPE 1
+#define V_ARMMA2AXI0ARTRTYPE(x) ((x) << S_ARMMA2AXI0ARTRTYPE)
+#define F_ARMMA2AXI0ARTRTYPE V_ARMMA2AXI0ARTRTYPE(1U)
+
+#define S_ARMMA2AXI0AWTRTYPE 0
+#define V_ARMMA2AXI0AWTRTYPE(x) ((x) << S_ARMMA2AXI0AWTRTYPE)
+#define F_ARMMA2AXI0AWTRTYPE V_ARMMA2AXI0AWTRTYPE(1U)
+
+#define A_ARM_AXI2PCIE_VENDOR 0x47660
+
+#define S_T7_VENDORID 4
+#define M_T7_VENDORID 0xffffU
+#define V_T7_VENDORID(x) ((x) << S_T7_VENDORID)
+#define G_T7_VENDORID(x) (((x) >> S_T7_VENDORID) & M_T7_VENDORID)
+
+#define S_OBFFCODE 0
+#define M_OBFFCODE 0xfU
+#define V_OBFFCODE(x) ((x) << S_OBFFCODE)
+#define G_OBFFCODE(x) (((x) >> S_OBFFCODE) & M_OBFFCODE)
+
+#define A_ARM_AXI2PCIE_VENMSGHDR_DW3 0x47664
+#define A_ARM_CLUSTER_SEL 0x47668
+
+#define S_ARM_CLUSTER_SEL 0
+#define V_ARM_CLUSTER_SEL(x) ((x) << S_ARM_CLUSTER_SEL)
+#define F_ARM_CLUSTER_SEL V_ARM_CLUSTER_SEL(1U)
+
+#define A_ARM_PWRREQ_PERMIT_ADB 0x4766c
+
+#define S_PWRQ_PERMIT_DENY_SAR 1
+#define V_PWRQ_PERMIT_DENY_SAR(x) ((x) << S_PWRQ_PERMIT_DENY_SAR)
+#define F_PWRQ_PERMIT_DENY_SAR V_PWRQ_PERMIT_DENY_SAR(1U)
+
+#define S_PWRQREQNS_ADB 0
+#define V_PWRQREQNS_ADB(x) ((x) << S_PWRQREQNS_ADB)
+#define F_PWRQREQNS_ADB V_PWRQREQNS_ADB(1U)
+
+#define A_ARM_CLK_REQ_ADB 0x47670
+
+#define S_CLKQREQNS_ADB 0
+#define V_CLKQREQNS_ADB(x) ((x) << S_CLKQREQNS_ADB)
+#define F_CLKQREQNS_ADB V_CLKQREQNS_ADB(1U)
+
+#define A_ARM_WAKEUPM 0x47674
+
+#define S_DFTRSTDISABLEM_ADB 2
+#define V_DFTRSTDISABLEM_ADB(x) ((x) << S_DFTRSTDISABLEM_ADB)
+#define F_DFTRSTDISABLEM_ADB V_DFTRSTDISABLEM_ADB(1U)
+
+#define S_DFTRSTDISABLES_ADB 1
+#define V_DFTRSTDISABLES_ADB(x) ((x) << S_DFTRSTDISABLES_ADB)
+#define F_DFTRSTDISABLES_ADB V_DFTRSTDISABLES_ADB(1U)
+
+#define S_WAKEUPM_I_ADB 0
+#define V_WAKEUPM_I_ADB(x) ((x) << S_WAKEUPM_I_ADB)
+#define F_WAKEUPM_I_ADB V_WAKEUPM_I_ADB(1U)
+
+#define A_ARM_CC_APB_FILTERING 0x47678
+
+#define S_CC_DFTSCANMODE 11
+#define V_CC_DFTSCANMODE(x) ((x) << S_CC_DFTSCANMODE)
+#define F_CC_DFTSCANMODE V_CC_DFTSCANMODE(1U)
+
+#define S_CC_OTP_FILTERING_DISABLE 10
+#define V_CC_OTP_FILTERING_DISABLE(x) ((x) << S_CC_OTP_FILTERING_DISABLE)
+#define F_CC_OTP_FILTERING_DISABLE V_CC_OTP_FILTERING_DISABLE(1U)
+
+#define S_CC_APB_FILTERING 0
+#define M_CC_APB_FILTERING 0x3ffU
+#define V_CC_APB_FILTERING(x) ((x) << S_CC_APB_FILTERING)
+#define G_CC_APB_FILTERING(x) (((x) >> S_CC_APB_FILTERING) & M_CC_APB_FILTERING)
+
+#define A_ARM_DCU_EN0 0x4767c
+#define A_ARM_DCU_EN1 0x47680
+#define A_ARM_DCU_EN2 0x47684
+#define A_ARM_DCU_EN3 0x47688
+#define A_ARM_DCU_LOCK0 0x4768c
+#define A_ARM_DCU_LOCK1 0x47690
+#define A_ARM_DCU_LOCK2 0x47694
+#define A_ARM_DCU_LOCK3 0x47698
+#define A_ARM_GPPC 0x4769c
+
+#define S_CC_SEC_DEBUG_RESET 24
+#define V_CC_SEC_DEBUG_RESET(x) ((x) << S_CC_SEC_DEBUG_RESET)
+#define F_CC_SEC_DEBUG_RESET V_CC_SEC_DEBUG_RESET(1U)
+
+#define S_CC_DFTSE 23
+#define V_CC_DFTSE(x) ((x) << S_CC_DFTSE)
+#define F_CC_DFTSE V_CC_DFTSE(1U)
+
+#define S_CC_DFTCGEN 22
+#define V_CC_DFTCGEN(x) ((x) << S_CC_DFTCGEN)
+#define F_CC_DFTCGEN V_CC_DFTCGEN(1U)
+
+#define S_CC_DFTRAMHOLD 21
+#define V_CC_DFTRAMHOLD(x) ((x) << S_CC_DFTRAMHOLD)
+#define F_CC_DFTRAMHOLD V_CC_DFTRAMHOLD(1U)
+
+#define S_CC_LOCK_BITS 12
+#define M_CC_LOCK_BITS 0x1ffU
+#define V_CC_LOCK_BITS(x) ((x) << S_CC_LOCK_BITS)
+#define G_CC_LOCK_BITS(x) (((x) >> S_CC_LOCK_BITS) & M_CC_LOCK_BITS)
+
+#define S_CC_LCS_IS_VALID 11
+#define V_CC_LCS_IS_VALID(x) ((x) << S_CC_LCS_IS_VALID)
+#define F_CC_LCS_IS_VALID V_CC_LCS_IS_VALID(1U)
+
+#define S_CC_LCS 8
+#define M_CC_LCS 0x7U
+#define V_CC_LCS(x) ((x) << S_CC_LCS)
+#define G_CC_LCS(x) (((x) >> S_CC_LCS) & M_CC_LCS)
+
+#define S_CC_GPPC 0
+#define M_CC_GPPC 0xffU
+#define V_CC_GPPC(x) ((x) << S_CC_GPPC)
+#define G_CC_GPPC(x) (((x) >> S_CC_GPPC) & M_CC_GPPC)
+
+#define A_ARM_EMMC 0x47700
+
+#define S_EMMC_CARD_CLK_EN 31
+#define V_EMMC_CARD_CLK_EN(x) ((x) << S_EMMC_CARD_CLK_EN)
+#define F_EMMC_CARD_CLK_EN V_EMMC_CARD_CLK_EN(1U)
+
+#define S_EMMC_LED_CONTROL 30
+#define V_EMMC_LED_CONTROL(x) ((x) << S_EMMC_LED_CONTROL)
+#define F_EMMC_LED_CONTROL V_EMMC_LED_CONTROL(1U)
+
+#define S_EMMC_UHS1_SWVOLT_EN 29
+#define V_EMMC_UHS1_SWVOLT_EN(x) ((x) << S_EMMC_UHS1_SWVOLT_EN)
+#define F_EMMC_UHS1_SWVOLT_EN V_EMMC_UHS1_SWVOLT_EN(1U)
+
+#define S_EMMC_UHS1_DRV_STH 27
+#define M_EMMC_UHS1_DRV_STH 0x3U
+#define V_EMMC_UHS1_DRV_STH(x) ((x) << S_EMMC_UHS1_DRV_STH)
+#define G_EMMC_UHS1_DRV_STH(x) (((x) >> S_EMMC_UHS1_DRV_STH) & M_EMMC_UHS1_DRV_STH)
+
+#define S_EMMC_SD_VDD1_ON 26
+#define V_EMMC_SD_VDD1_ON(x) ((x) << S_EMMC_SD_VDD1_ON)
+#define F_EMMC_SD_VDD1_ON V_EMMC_SD_VDD1_ON(1U)
+
+#define S_EMMC_SD_VDD1_SEL 23
+#define M_EMMC_SD_VDD1_SEL 0x7U
+#define V_EMMC_SD_VDD1_SEL(x) ((x) << S_EMMC_SD_VDD1_SEL)
+#define G_EMMC_SD_VDD1_SEL(x) (((x) >> S_EMMC_SD_VDD1_SEL) & M_EMMC_SD_VDD1_SEL)
+
+#define S_EMMC_INTCLK_EN 22
+#define V_EMMC_INTCLK_EN(x) ((x) << S_EMMC_INTCLK_EN)
+#define F_EMMC_INTCLK_EN V_EMMC_INTCLK_EN(1U)
+
+#define S_EMMC_CARD_CLK_FREQ_SEL 12
+#define M_EMMC_CARD_CLK_FREQ_SEL 0x3ffU
+#define V_EMMC_CARD_CLK_FREQ_SEL(x) ((x) << S_EMMC_CARD_CLK_FREQ_SEL)
+#define G_EMMC_CARD_CLK_FREQ_SEL(x) (((x) >> S_EMMC_CARD_CLK_FREQ_SEL) & M_EMMC_CARD_CLK_FREQ_SEL)
+
+#define S_EMMC_CARD_CLK_GEN_SEL 11
+#define V_EMMC_CARD_CLK_GEN_SEL(x) ((x) << S_EMMC_CARD_CLK_GEN_SEL)
+#define F_EMMC_CARD_CLK_GEN_SEL V_EMMC_CARD_CLK_GEN_SEL(1U)
+
+#define S_EMMC_CLK2CARD_ON 10
+#define V_EMMC_CLK2CARD_ON(x) ((x) << S_EMMC_CLK2CARD_ON)
+#define F_EMMC_CLK2CARD_ON V_EMMC_CLK2CARD_ON(1U)
+
+#define S_EMMC_CARD_CLK_STABLE 9
+#define V_EMMC_CARD_CLK_STABLE(x) ((x) << S_EMMC_CARD_CLK_STABLE)
+#define F_EMMC_CARD_CLK_STABLE V_EMMC_CARD_CLK_STABLE(1U)
+
+#define S_EMMC_INT_BCLK_STABLE 8
+#define V_EMMC_INT_BCLK_STABLE(x) ((x) << S_EMMC_INT_BCLK_STABLE)
+#define F_EMMC_INT_BCLK_STABLE V_EMMC_INT_BCLK_STABLE(1U)
+
+#define S_EMMC_INT_ACLK_STABLE 7
+#define V_EMMC_INT_ACLK_STABLE(x) ((x) << S_EMMC_INT_ACLK_STABLE)
+#define F_EMMC_INT_ACLK_STABLE V_EMMC_INT_ACLK_STABLE(1U)
+
+#define S_EMMC_INT_TMCLK_STABLE 6
+#define V_EMMC_INT_TMCLK_STABLE(x) ((x) << S_EMMC_INT_TMCLK_STABLE)
+#define F_EMMC_INT_TMCLK_STABLE V_EMMC_INT_TMCLK_STABLE(1U)
+
+#define S_EMMC_HOST_REG_VOL_STABLE 5
+#define V_EMMC_HOST_REG_VOL_STABLE(x) ((x) << S_EMMC_HOST_REG_VOL_STABLE)
+#define F_EMMC_HOST_REG_VOL_STABLE V_EMMC_HOST_REG_VOL_STABLE(1U)
+
+#define S_EMMC_CARD_DETECT_N 4
+#define V_EMMC_CARD_DETECT_N(x) ((x) << S_EMMC_CARD_DETECT_N)
+#define F_EMMC_CARD_DETECT_N V_EMMC_CARD_DETECT_N(1U)
+
+#define S_EMMC_CARD_WRITE_PROT 3
+#define V_EMMC_CARD_WRITE_PROT(x) ((x) << S_EMMC_CARD_WRITE_PROT)
+#define F_EMMC_CARD_WRITE_PROT V_EMMC_CARD_WRITE_PROT(1U)
+
+#define S_EMMC_GP_IN 2
+#define V_EMMC_GP_IN(x) ((x) << S_EMMC_GP_IN)
+#define F_EMMC_GP_IN V_EMMC_GP_IN(1U)
+
+#define S_EMMC_TEST_SCAN_MODE 1
+#define V_EMMC_TEST_SCAN_MODE(x) ((x) << S_EMMC_TEST_SCAN_MODE)
+#define F_EMMC_TEST_SCAN_MODE V_EMMC_TEST_SCAN_MODE(1U)
+
+#define S_EMMC_FIFOINJDATAERR 0
+#define V_EMMC_FIFOINJDATAERR(x) ((x) << S_EMMC_FIFOINJDATAERR)
+#define F_EMMC_FIFOINJDATAERR V_EMMC_FIFOINJDATAERR(1U)
+
+#define A_ARM_WAKEUPS 0x47704
+
+#define S_WAKEUPS_I_ADB 0
+#define V_WAKEUPS_I_ADB(x) ((x) << S_WAKEUPS_I_ADB)
+#define F_WAKEUPS_I_ADB V_WAKEUPS_I_ADB(1U)
+
+#define A_ARM_CLKREQNM_ADB 0x47708
+
+#define S_CLKQREQNM_ADB 0
+#define V_CLKQREQNM_ADB(x) ((x) << S_CLKQREQNM_ADB)
+#define F_CLKQREQNM_ADB V_CLKQREQNM_ADB(1U)
+
+#define A_ARM_ATOMICDATA0_0 0x4770c
+#define A_ARM_ATOMICDATA1_0 0x47710
+#define A_ARM_NVME_DB_EMU_INT_ENABLE 0x47740
+#define A_ARM_TCAM_WRITE_DATA 0x47744
+
+#define S_TCAM_WRITE_DATA 0
+#define M_TCAM_WRITE_DATA 0x3fffffffU
+#define V_TCAM_WRITE_DATA(x) ((x) << S_TCAM_WRITE_DATA)
+#define G_TCAM_WRITE_DATA(x) (((x) >> S_TCAM_WRITE_DATA) & M_TCAM_WRITE_DATA)
+
+#define A_ARM_TCAM_WRITE_ADDR 0x47748
+
+#define S_TCAM_WRITE_ADDR 0
+#define M_TCAM_WRITE_ADDR 0x1ffU
+#define V_TCAM_WRITE_ADDR(x) ((x) << S_TCAM_WRITE_ADDR)
+#define G_TCAM_WRITE_ADDR(x) (((x) >> S_TCAM_WRITE_ADDR) & M_TCAM_WRITE_ADDR)
+
+#define A_ARM_TCAM_READ_ADDR 0x4774c
+
+#define S_TCAM_READ_ADDR 0
+#define M_TCAM_READ_ADDR 0x1ffU
+#define V_TCAM_READ_ADDR(x) ((x) << S_TCAM_READ_ADDR)
+#define G_TCAM_READ_ADDR(x) (((x) >> S_TCAM_READ_ADDR) & M_TCAM_READ_ADDR)
+
+#define A_ARM_TCAM_CTL 0x47750
+
+#define S_TCAMCBBUSY 6
+#define V_TCAMCBBUSY(x) ((x) << S_TCAMCBBUSY)
+#define F_TCAMCBBUSY V_TCAMCBBUSY(1U)
+
+#define S_TCAMCBPASS 5
+#define V_TCAMCBPASS(x) ((x) << S_TCAMCBPASS)
+#define F_TCAMCBPASS V_TCAMCBPASS(1U)
+
+#define S_TCAMCBSTART 4
+#define V_TCAMCBSTART(x) ((x) << S_TCAMCBSTART)
+#define F_TCAMCBSTART V_TCAMCBSTART(1U)
+
+#define S_TCAMRSTCB 3
+#define V_TCAMRSTCB(x) ((x) << S_TCAMRSTCB)
+#define F_TCAMRSTCB V_TCAMRSTCB(1U)
+
+#define S_TCAM_REQBITPOS 2
+#define V_TCAM_REQBITPOS(x) ((x) << S_TCAM_REQBITPOS)
+#define F_TCAM_REQBITPOS V_TCAM_REQBITPOS(1U)
+
+#define S_TCAM_WRITE 1
+#define V_TCAM_WRITE(x) ((x) << S_TCAM_WRITE)
+#define F_TCAM_WRITE V_TCAM_WRITE(1U)
+
+#define S_TCAM_ENABLE 0
+#define V_TCAM_ENABLE(x) ((x) << S_TCAM_ENABLE)
+#define F_TCAM_ENABLE V_TCAM_ENABLE(1U)
+
+#define A_ARM_TCAM_READ_DATA 0x4775c
+
+#define S_TCAM_READ_DATA 0
+#define M_TCAM_READ_DATA 0x3fffffffU
+#define V_TCAM_READ_DATA(x) ((x) << S_TCAM_READ_DATA)
+#define G_TCAM_READ_DATA(x) (((x) >> S_TCAM_READ_DATA) & M_TCAM_READ_DATA)
+
+#define A_ARM_SRAM1_WRITE_DATA 0x47760
+
+#define S_SRAM1_WRITE_DATA 0
+#define M_SRAM1_WRITE_DATA 0x7fffffU
+#define V_SRAM1_WRITE_DATA(x) ((x) << S_SRAM1_WRITE_DATA)
+#define G_SRAM1_WRITE_DATA(x) (((x) >> S_SRAM1_WRITE_DATA) & M_SRAM1_WRITE_DATA)
+
+#define A_ARM_SRAM1_WRITE_ADDR 0x47764
+
+#define S_SRAM1_WRITE_ADDR 0
+#define M_SRAM1_WRITE_ADDR 0x1ffU
+#define V_SRAM1_WRITE_ADDR(x) ((x) << S_SRAM1_WRITE_ADDR)
+#define G_SRAM1_WRITE_ADDR(x) (((x) >> S_SRAM1_WRITE_ADDR) & M_SRAM1_WRITE_ADDR)
+
+#define A_ARM_SRAM1_READ_ADDR 0x47768
+
+#define S_SRAM1_READ_ADDR 0
+#define M_SRAM1_READ_ADDR 0x1ffU
+#define V_SRAM1_READ_ADDR(x) ((x) << S_SRAM1_READ_ADDR)
+#define G_SRAM1_READ_ADDR(x) (((x) >> S_SRAM1_READ_ADDR) & M_SRAM1_READ_ADDR)
+
+#define A_ARM_SRAM1_CTL 0x4776c
+
+#define S_SRAM1_WRITE 1
+#define V_SRAM1_WRITE(x) ((x) << S_SRAM1_WRITE)
+#define F_SRAM1_WRITE V_SRAM1_WRITE(1U)
+
+#define S_SRAM1_ENABLE 0
+#define V_SRAM1_ENABLE(x) ((x) << S_SRAM1_ENABLE)
+#define F_SRAM1_ENABLE V_SRAM1_ENABLE(1U)
+
+#define A_ARM_SRAM1_READ_DATA 0x47770
+
+#define S_SRAM1_READ_DATA 0
+#define M_SRAM1_READ_DATA 0x7fffffU
+#define V_SRAM1_READ_DATA(x) ((x) << S_SRAM1_READ_DATA)
+#define G_SRAM1_READ_DATA(x) (((x) >> S_SRAM1_READ_DATA) & M_SRAM1_READ_DATA)
+
+#define A_ARM_SRAM2_WRITE_DATA0 0x47774
+#define A_ARM_SRAM2_WRITE_DATA1 0x47778
+#define A_ARM_SRAM2_WRITE_DATA2 0x4777c
+#define A_ARM_SRAM2_WRITE_ADDR 0x47780
+
+#define S_SRAM2_WRITE_ADDR 0
+#define M_SRAM2_WRITE_ADDR 0x1fffU
+#define V_SRAM2_WRITE_ADDR(x) ((x) << S_SRAM2_WRITE_ADDR)
+#define G_SRAM2_WRITE_ADDR(x) (((x) >> S_SRAM2_WRITE_ADDR) & M_SRAM2_WRITE_ADDR)
+
+#define A_ARM_SRAM2_READ_ADDR 0x47784
+
+#define S_SRAM2_READ_ADDR 0
+#define M_SRAM2_READ_ADDR 0x1fffU
+#define V_SRAM2_READ_ADDR(x) ((x) << S_SRAM2_READ_ADDR)
+#define G_SRAM2_READ_ADDR(x) (((x) >> S_SRAM2_READ_ADDR) & M_SRAM2_READ_ADDR)
+
+#define A_ARM_SRAM2_CTL 0x47788
+
+#define S_SRAM2_WRITE 1
+#define V_SRAM2_WRITE(x) ((x) << S_SRAM2_WRITE)
+#define F_SRAM2_WRITE V_SRAM2_WRITE(1U)
+
+#define S_SRAM2_ENABLE 0
+#define V_SRAM2_ENABLE(x) ((x) << S_SRAM2_ENABLE)
+#define F_SRAM2_ENABLE V_SRAM2_ENABLE(1U)
+
+#define A_ARM_SRAM2_READ_DATA0 0x4778c
+#define A_ARM_SRAM2_READ_DATA1 0x47790
+#define A_ARM_SRAM2_READ_DATA2 0x47794
+#define A_ARM_DBPROC_SRAM_CTL 0x47798
+
+#define S_DBPROC_RD_EN 0
+#define V_DBPROC_RD_EN(x) ((x) << S_DBPROC_RD_EN)
+#define F_DBPROC_RD_EN V_DBPROC_RD_EN(1U)
+
+#define A_ARM_DBPROC_SRAM_READ_ADDR 0x4779c
+
+#define S_DBPROC_RD_ADDR 0
+#define M_DBPROC_RD_ADDR 0x1ffU
+#define V_DBPROC_RD_ADDR(x) ((x) << S_DBPROC_RD_ADDR)
+#define G_DBPROC_RD_ADDR(x) (((x) >> S_DBPROC_RD_ADDR) & M_DBPROC_RD_ADDR)
+
+#define A_ARM_DBPROC_SRAM_READ_DATA0 0x477a0
+#define A_ARM_DBPROC_SRAM_READ_DATA1 0x477a4
+#define A_ARM_DBPROC_SRAM_READ_DATA2 0x477a8
+#define A_ARM_DBPROC_SRAM_READ_DATA3 0x477ac
+#define A_ARM_ATOMICDATA0_1 0x477b0
+#define A_ARM_ATOMICDATA1_1 0x477b4
+#define A_ARM_SPIDEN 0x477b8
+
+#define S_SPIDEN 0
+#define V_SPIDEN(x) ((x) << S_SPIDEN)
+#define F_SPIDEN V_SPIDEN(1U)
+
+#define A_ARM_RC_INT_WRITE_DATA 0x477bc
+
+#define S_RC_INT_STATUS_WRITE_DATA 0
+#define M_RC_INT_STATUS_WRITE_DATA 0x3fU
+#define V_RC_INT_STATUS_WRITE_DATA(x) ((x) << S_RC_INT_STATUS_WRITE_DATA)
+#define G_RC_INT_STATUS_WRITE_DATA(x) (((x) >> S_RC_INT_STATUS_WRITE_DATA) & M_RC_INT_STATUS_WRITE_DATA)
+
+#define A_ARM_DFT_MBI 0x477c4
+
+#define S_MBISTREQ 3
+#define V_MBISTREQ(x) ((x) << S_MBISTREQ)
+#define F_MBISTREQ V_MBISTREQ(1U)
+
+#define S_MBISTRESETN 2
+#define V_MBISTRESETN(x) ((x) << S_MBISTRESETN)
+#define F_MBISTRESETN V_MBISTRESETN(1U)
+
+#define S_DFTRAMHOLD 1
+#define V_DFTRAMHOLD(x) ((x) << S_DFTRAMHOLD)
+#define F_DFTRAMHOLD V_DFTRAMHOLD(1U)
+
+#define S_DFTCGEN 0
+#define V_DFTCGEN(x) ((x) << S_DFTCGEN)
+#define F_DFTCGEN V_DFTCGEN(1U)
+
+#define A_ARM_DBPROC_SRAM_TH_CTL 0x477c8
+
+#define S_DBPROC_TH_WR_EN 1
+#define V_DBPROC_TH_WR_EN(x) ((x) << S_DBPROC_TH_WR_EN)
+#define F_DBPROC_TH_WR_EN V_DBPROC_TH_WR_EN(1U)
+
+#define S_DBPROC_TH_RD_EN 0
+#define V_DBPROC_TH_RD_EN(x) ((x) << S_DBPROC_TH_RD_EN)
+#define F_DBPROC_TH_RD_EN V_DBPROC_TH_RD_EN(1U)
+
+#define A_ARM_MBISTACK 0x477d4
+
+#define S_MBISTACK 0
+#define V_MBISTACK(x) ((x) << S_MBISTACK)
+#define F_MBISTACK V_MBISTACK(1U)
+
+#define A_ARM_MBISTADDR 0x477d8
+
+#define S_MBISTADDR 0
+#define M_MBISTADDR 0xfffU
+#define V_MBISTADDR(x) ((x) << S_MBISTADDR)
+#define G_MBISTADDR(x) (((x) >> S_MBISTADDR) & M_MBISTADDR)
+
+#define A_ARM_MBISTREADEN 0x477dc
+
+#define S_MBISTREADEN 0
+#define V_MBISTREADEN(x) ((x) << S_MBISTREADEN)
+#define F_MBISTREADEN V_MBISTREADEN(1U)
+
+#define A_ARM_MBISTWRITEEN 0x477e0
+
+#define S_MBISTWRITEEN 0
+#define V_MBISTWRITEEN(x) ((x) << S_MBISTWRITEEN)
+#define F_MBISTWRITEEN V_MBISTWRITEEN(1U)
+
+#define A_ARM_MBISTARRAY 0x477e4
+
+#define S_MBISTARRAY 0
+#define M_MBISTARRAY 0x3U
+#define V_MBISTARRAY(x) ((x) << S_MBISTARRAY)
+#define G_MBISTARRAY(x) (((x) >> S_MBISTARRAY) & M_MBISTARRAY)
+
+#define A_ARM_MBISTCFG 0x477e8
+
+#define S_MBISTCFG 0
+#define V_MBISTCFG(x) ((x) << S_MBISTCFG)
+#define F_MBISTCFG V_MBISTCFG(1U)
+
+#define A_ARM_MBISTINDATA0 0x477ec
+#define A_ARM_MBISTINDATA1 0x477f0
+#define A_ARM_MBISTOUTDATA1 0x477f4
+#define A_ARM_MBISTOUTDATA0 0x477f8
+#define A_ARM_NVME_DB_EMU_EN 0x477fc
+
+#define S_NVME_DB_EN 0
+#define V_NVME_DB_EN(x) ((x) << S_NVME_DB_EN)
+#define F_NVME_DB_EN V_NVME_DB_EN(1U)
+
+/* registers for module MC_T70 */
+#define MC_T70_BASE_ADDR 0x48000
+
+#define A_MC_IND_ADDR 0x48000
+
+#define S_T7_AUTOINCR 30
+#define M_T7_AUTOINCR 0x3U
+#define V_T7_AUTOINCR(x) ((x) << S_T7_AUTOINCR)
+#define G_T7_AUTOINCR(x) (((x) >> S_T7_AUTOINCR) & M_T7_AUTOINCR)
+
+#define S_IND_ADDR_ADDR 0
+#define M_IND_ADDR_ADDR 0x1ffffffU
+#define V_IND_ADDR_ADDR(x) ((x) << S_IND_ADDR_ADDR)
+#define G_IND_ADDR_ADDR(x) (((x) >> S_IND_ADDR_ADDR) & M_IND_ADDR_ADDR)
+
+#define A_MC_IND_DATA 0x48004
+#define A_MC_DBG_CTL 0x48018
+#define A_MC_DBG_DATA 0x4801c
+#define A_T7_MC_P_DDRPHY_RST_CTRL 0x49300
+#define A_T7_MC_P_PERFORMANCE_CTRL 0x49304
+#define A_T7_MC_P_ECC_CTRL 0x49308
+
+#define S_BISTECCHBWCTL 7
+#define M_BISTECCHBWCTL 0x3U
+#define V_BISTECCHBWCTL(x) ((x) << S_BISTECCHBWCTL)
+#define G_BISTECCHBWCTL(x) (((x) >> S_BISTECCHBWCTL) & M_BISTECCHBWCTL)
+
+#define S_BISTTESTMODE 6
+#define V_BISTTESTMODE(x) ((x) << S_BISTTESTMODE)
+#define F_BISTTESTMODE V_BISTTESTMODE(1U)
+
+#define S_RMW_CTL_CFG 4
+#define M_RMW_CTL_CFG 0x3U
+#define V_RMW_CTL_CFG(x) ((x) << S_RMW_CTL_CFG)
+#define G_RMW_CTL_CFG(x) (((x) >> S_RMW_CTL_CFG) & M_RMW_CTL_CFG)
+
+#define A_MC_P_DDRCTL_INT_ENABLE 0x4930c
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE 5
+#define V_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE V_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_DCH1_ENABLE 4
+#define V_HIF_RDATA_CRC_ERR_DCH1_ENABLE(x) ((x) << S_HIF_RDATA_CRC_ERR_DCH1_ENABLE)
+#define F_HIF_RDATA_CRC_ERR_DCH1_ENABLE V_HIF_RDATA_CRC_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_DCH1_ENABLE 3
+#define V_HIF_RDATA_ADDR_ERR_DCH1_ENABLE(x) ((x) << S_HIF_RDATA_ADDR_ERR_DCH1_ENABLE)
+#define F_HIF_RDATA_ADDR_ERR_DCH1_ENABLE V_HIF_RDATA_ADDR_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE 2
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE 1
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE V_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE 0
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE V_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE(1U)
+
+#define A_MC_P_DDRCTL_INT_CAUSE 0x49310
+
+#define S_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE 25
+#define V_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_WR_CRC_ERR_INTR_DCH1_CAUSE 24
+#define V_WR_CRC_ERR_INTR_DCH1_CAUSE(x) ((x) << S_WR_CRC_ERR_INTR_DCH1_CAUSE)
+#define F_WR_CRC_ERR_INTR_DCH1_CAUSE V_WR_CRC_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE 23
+#define V_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE 22
+#define V_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE 21
+#define V_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE(x) ((x) << S_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE)
+#define F_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE V_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE(1U)
+
+#define S_SWCMD_ERR_INTR_DCH1_CAUSE 20
+#define V_SWCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_SWCMD_ERR_INTR_DCH1_CAUSE)
+#define F_SWCMD_ERR_INTR_DCH1_CAUSE V_SWCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_DUCMD_ERR_INTR_DCH1_CAUSE 19
+#define V_DUCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_DUCMD_ERR_INTR_DCH1_CAUSE)
+#define F_DUCMD_ERR_INTR_DCH1_CAUSE V_DUCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_LCCMD_ERR_INTR_DCH1_CAUSE 18
+#define V_LCCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_LCCMD_ERR_INTR_DCH1_CAUSE)
+#define F_LCCMD_ERR_INTR_DCH1_CAUSE V_LCCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_CTRLUPD_ERR_INTR_DCH1_CAUSE 17
+#define V_CTRLUPD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_CTRLUPD_ERR_INTR_DCH1_CAUSE)
+#define F_CTRLUPD_ERR_INTR_DCH1_CAUSE V_CTRLUPD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_RFM_ALERT_INTR_DCH1_CAUSE 16
+#define V_RFM_ALERT_INTR_DCH1_CAUSE(x) ((x) << S_RFM_ALERT_INTR_DCH1_CAUSE)
+#define F_RFM_ALERT_INTR_DCH1_CAUSE V_RFM_ALERT_INTR_DCH1_CAUSE(1U)
+
+#define S_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE 15
+#define V_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_WR_CRC_ERR_INTR_DCH0_CAUSE 14
+#define V_WR_CRC_ERR_INTR_DCH0_CAUSE(x) ((x) << S_WR_CRC_ERR_INTR_DCH0_CAUSE)
+#define F_WR_CRC_ERR_INTR_DCH0_CAUSE V_WR_CRC_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE 13
+#define V_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE 12
+#define V_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE 11
+#define V_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE(x) ((x) << S_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE)
+#define F_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE V_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE(1U)
+
+#define S_SWCMD_ERR_INTR_DCH0_CAUSE 10
+#define V_SWCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_SWCMD_ERR_INTR_DCH0_CAUSE)
+#define F_SWCMD_ERR_INTR_DCH0_CAUSE V_SWCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_DUCMD_ERR_INTR_DCH0_CAUSE 9
+#define V_DUCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_DUCMD_ERR_INTR_DCH0_CAUSE)
+#define F_DUCMD_ERR_INTR_DCH0_CAUSE V_DUCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_LCCMD_ERR_INTR_DCH0_CAUSE 8
+#define V_LCCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_LCCMD_ERR_INTR_DCH0_CAUSE)
+#define F_LCCMD_ERR_INTR_DCH0_CAUSE V_LCCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_CTRLUPD_ERR_INTR_DCH0_CAUSE 7
+#define V_CTRLUPD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_CTRLUPD_ERR_INTR_DCH0_CAUSE)
+#define F_CTRLUPD_ERR_INTR_DCH0_CAUSE V_CTRLUPD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_RFM_ALERT_INTR_DCH0_CAUSE 6
+#define V_RFM_ALERT_INTR_DCH0_CAUSE(x) ((x) << S_RFM_ALERT_INTR_DCH0_CAUSE)
+#define F_RFM_ALERT_INTR_DCH0_CAUSE V_RFM_ALERT_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE 5
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE 4
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE V_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE 3
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE V_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE 2
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE 1
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE V_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE 0
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE V_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE(1U)
+
+#define A_T7_MC_P_PAR_ENABLE 0x49314
+
+#define S_HIF_WDATA_Q_PARERR_DCH1_ENABLE 13
+#define V_HIF_WDATA_Q_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH1_ENABLE)
+#define F_HIF_WDATA_Q_PARERR_DCH1_ENABLE V_HIF_WDATA_Q_PARERR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH1_ENABLE 12
+#define V_DDRCTL_ECC_CE_PAR_DCH1_ENABLE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH1_ENABLE)
+#define F_DDRCTL_ECC_CE_PAR_DCH1_ENABLE V_DDRCTL_ECC_CE_PAR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH0_ENABLE 11
+#define V_DDRCTL_ECC_CE_PAR_DCH0_ENABLE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH0_ENABLE)
+#define F_DDRCTL_ECC_CE_PAR_DCH0_ENABLE V_DDRCTL_ECC_CE_PAR_DCH0_ENABLE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH1_ENABLE 10
+#define V_DDRCTL_ECC_UE_PAR_DCH1_ENABLE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH1_ENABLE)
+#define F_DDRCTL_ECC_UE_PAR_DCH1_ENABLE V_DDRCTL_ECC_UE_PAR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH0_ENABLE 9
+#define V_DDRCTL_ECC_UE_PAR_DCH0_ENABLE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH0_ENABLE)
+#define F_DDRCTL_ECC_UE_PAR_DCH0_ENABLE V_DDRCTL_ECC_UE_PAR_DCH0_ENABLE(1U)
+
+#define S_WDATARAM_PARERR_DCH1_ENABLE 8
+#define V_WDATARAM_PARERR_DCH1_ENABLE(x) ((x) << S_WDATARAM_PARERR_DCH1_ENABLE)
+#define F_WDATARAM_PARERR_DCH1_ENABLE V_WDATARAM_PARERR_DCH1_ENABLE(1U)
+
+#define S_WDATARAM_PARERR_DCH0_ENABLE 7
+#define V_WDATARAM_PARERR_DCH0_ENABLE(x) ((x) << S_WDATARAM_PARERR_DCH0_ENABLE)
+#define F_WDATARAM_PARERR_DCH0_ENABLE V_WDATARAM_PARERR_DCH0_ENABLE(1U)
+
+#define S_BIST_ADDR_FIFO_PARERR_ENABLE 6
+#define V_BIST_ADDR_FIFO_PARERR_ENABLE(x) ((x) << S_BIST_ADDR_FIFO_PARERR_ENABLE)
+#define F_BIST_ADDR_FIFO_PARERR_ENABLE V_BIST_ADDR_FIFO_PARERR_ENABLE(1U)
+
+#define S_BIST_ERR_ADDR_FIFO_PARERR_ENABLE 5
+#define V_BIST_ERR_ADDR_FIFO_PARERR_ENABLE(x) ((x) << S_BIST_ERR_ADDR_FIFO_PARERR_ENABLE)
+#define F_BIST_ERR_ADDR_FIFO_PARERR_ENABLE V_BIST_ERR_ADDR_FIFO_PARERR_ENABLE(1U)
+
+#define S_HIF_WDATA_Q_PARERR_DCH0_ENABLE 4
+#define V_HIF_WDATA_Q_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH0_ENABLE)
+#define F_HIF_WDATA_Q_PARERR_DCH0_ENABLE V_HIF_WDATA_Q_PARERR_DCH0_ENABLE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE 3
+#define V_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE V_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE 2
+#define V_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE V_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE 1
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE 0
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE(1U)
+
+#define A_T7_MC_P_PAR_CAUSE 0x49318
+
+#define S_HIF_WDATA_Q_PARERR_DCH1_CAUSE 13
+#define V_HIF_WDATA_Q_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH1_CAUSE)
+#define F_HIF_WDATA_Q_PARERR_DCH1_CAUSE V_HIF_WDATA_Q_PARERR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH1_CAUSE 12
+#define V_DDRCTL_ECC_CE_PAR_DCH1_CAUSE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH1_CAUSE)
+#define F_DDRCTL_ECC_CE_PAR_DCH1_CAUSE V_DDRCTL_ECC_CE_PAR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH0_CAUSE 11
+#define V_DDRCTL_ECC_CE_PAR_DCH0_CAUSE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH0_CAUSE)
+#define F_DDRCTL_ECC_CE_PAR_DCH0_CAUSE V_DDRCTL_ECC_CE_PAR_DCH0_CAUSE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH1_CAUSE 10
+#define V_DDRCTL_ECC_UE_PAR_DCH1_CAUSE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH1_CAUSE)
+#define F_DDRCTL_ECC_UE_PAR_DCH1_CAUSE V_DDRCTL_ECC_UE_PAR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH0_CAUSE 9
+#define V_DDRCTL_ECC_UE_PAR_DCH0_CAUSE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH0_CAUSE)
+#define F_DDRCTL_ECC_UE_PAR_DCH0_CAUSE V_DDRCTL_ECC_UE_PAR_DCH0_CAUSE(1U)
+
+#define S_WDATARAM_PARERR_DCH1_CAUSE 8
+#define V_WDATARAM_PARERR_DCH1_CAUSE(x) ((x) << S_WDATARAM_PARERR_DCH1_CAUSE)
+#define F_WDATARAM_PARERR_DCH1_CAUSE V_WDATARAM_PARERR_DCH1_CAUSE(1U)
+
+#define S_WDATARAM_PARERR_DCH0_CAUSE 7
+#define V_WDATARAM_PARERR_DCH0_CAUSE(x) ((x) << S_WDATARAM_PARERR_DCH0_CAUSE)
+#define F_WDATARAM_PARERR_DCH0_CAUSE V_WDATARAM_PARERR_DCH0_CAUSE(1U)
+
+#define S_BIST_ADDR_FIFO_PARERR_CAUSE 6
+#define V_BIST_ADDR_FIFO_PARERR_CAUSE(x) ((x) << S_BIST_ADDR_FIFO_PARERR_CAUSE)
+#define F_BIST_ADDR_FIFO_PARERR_CAUSE V_BIST_ADDR_FIFO_PARERR_CAUSE(1U)
+
+#define S_BIST_ERR_ADDR_FIFO_PARERR_CAUSE 5
+#define V_BIST_ERR_ADDR_FIFO_PARERR_CAUSE(x) ((x) << S_BIST_ERR_ADDR_FIFO_PARERR_CAUSE)
+#define F_BIST_ERR_ADDR_FIFO_PARERR_CAUSE V_BIST_ERR_ADDR_FIFO_PARERR_CAUSE(1U)
+
+#define S_HIF_WDATA_Q_PARERR_DCH0_CAUSE 4
+#define V_HIF_WDATA_Q_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH0_CAUSE)
+#define F_HIF_WDATA_Q_PARERR_DCH0_CAUSE V_HIF_WDATA_Q_PARERR_DCH0_CAUSE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE 3
+#define V_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE V_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE 2
+#define V_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE V_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE 1
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE 0
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE(1U)
+
+#define A_T7_MC_P_INT_ENABLE 0x4931c
+
+#define S_DDRPHY_INT_ENABLE 4
+#define V_DDRPHY_INT_ENABLE(x) ((x) << S_DDRPHY_INT_ENABLE)
+#define F_DDRPHY_INT_ENABLE V_DDRPHY_INT_ENABLE(1U)
+
+#define S_DDRCTL_INT_ENABLE 3
+#define V_DDRCTL_INT_ENABLE(x) ((x) << S_DDRCTL_INT_ENABLE)
+#define F_DDRCTL_INT_ENABLE V_DDRCTL_INT_ENABLE(1U)
+
+#define S_T7_ECC_CE_INT_ENABLE 2
+#define V_T7_ECC_CE_INT_ENABLE(x) ((x) << S_T7_ECC_CE_INT_ENABLE)
+#define F_T7_ECC_CE_INT_ENABLE V_T7_ECC_CE_INT_ENABLE(1U)
+
+#define S_T7_ECC_UE_INT_ENABLE 1
+#define V_T7_ECC_UE_INT_ENABLE(x) ((x) << S_T7_ECC_UE_INT_ENABLE)
+#define F_T7_ECC_UE_INT_ENABLE V_T7_ECC_UE_INT_ENABLE(1U)
+
+#define A_T7_MC_P_INT_CAUSE 0x49320
+
+#define S_DDRPHY_INT_CAUSE 4
+#define V_DDRPHY_INT_CAUSE(x) ((x) << S_DDRPHY_INT_CAUSE)
+#define F_DDRPHY_INT_CAUSE V_DDRPHY_INT_CAUSE(1U)
+
+#define S_DDRCTL_INT_CAUSE 3
+#define V_DDRCTL_INT_CAUSE(x) ((x) << S_DDRCTL_INT_CAUSE)
+#define F_DDRCTL_INT_CAUSE V_DDRCTL_INT_CAUSE(1U)
+
+#define S_T7_ECC_CE_INT_CAUSE 2
+#define V_T7_ECC_CE_INT_CAUSE(x) ((x) << S_T7_ECC_CE_INT_CAUSE)
+#define F_T7_ECC_CE_INT_CAUSE V_T7_ECC_CE_INT_CAUSE(1U)
+
+#define S_T7_ECC_UE_INT_CAUSE 1
+#define V_T7_ECC_UE_INT_CAUSE(x) ((x) << S_T7_ECC_UE_INT_CAUSE)
+#define F_T7_ECC_UE_INT_CAUSE V_T7_ECC_UE_INT_CAUSE(1U)
+
+#define A_MC_P_ECC_UE_INT_ENABLE 0x49324
+
+#define S_BIST_RSP_SRAM_UERR_ENABLE 0
+#define V_BIST_RSP_SRAM_UERR_ENABLE(x) ((x) << S_BIST_RSP_SRAM_UERR_ENABLE)
+#define F_BIST_RSP_SRAM_UERR_ENABLE V_BIST_RSP_SRAM_UERR_ENABLE(1U)
+
+#define A_MC_P_ECC_UE_INT_CAUSE 0x49328
+
+#define S_BIST_RSP_SRAM_UERR_CAUSE 0
+#define V_BIST_RSP_SRAM_UERR_CAUSE(x) ((x) << S_BIST_RSP_SRAM_UERR_CAUSE)
+#define F_BIST_RSP_SRAM_UERR_CAUSE V_BIST_RSP_SRAM_UERR_CAUSE(1U)
+
+#define A_T7_MC_P_ECC_STATUS 0x4932c
+#define A_T7_MC_P_PHY_CTRL 0x49330
+#define A_T7_MC_P_STATIC_CFG_STATUS 0x49334
+
+#define S_DFIFREQRATIO 27
+#define V_DFIFREQRATIO(x) ((x) << S_DFIFREQRATIO)
+#define F_DFIFREQRATIO V_DFIFREQRATIO(1U)
+
+#define S_STATIC_DDR5_HBW_CHANNEL 3
+#define V_STATIC_DDR5_HBW_CHANNEL(x) ((x) << S_STATIC_DDR5_HBW_CHANNEL)
+#define F_STATIC_DDR5_HBW_CHANNEL V_STATIC_DDR5_HBW_CHANNEL(1U)
+
+#define S_STATIC_DDR5_HBW 2
+#define V_STATIC_DDR5_HBW(x) ((x) << S_STATIC_DDR5_HBW)
+#define F_STATIC_DDR5_HBW V_STATIC_DDR5_HBW(1U)
+
+#define S_T7_STATIC_WIDTH 1
+#define V_T7_STATIC_WIDTH(x) ((x) << S_T7_STATIC_WIDTH)
+#define F_T7_STATIC_WIDTH V_T7_STATIC_WIDTH(1U)
+
+#define A_T7_MC_P_CORE_PCTL_STAT 0x49338
+#define A_T7_MC_P_DEBUG_CNT 0x4933c
+#define A_T7_MC_CE_ERR_DATA_RDATA 0x49340
+#define A_T7_MC_UE_ERR_DATA_RDATA 0x49380
+#define A_T7_MC_CE_ADDR 0x493c0
+#define A_T7_MC_UE_ADDR 0x493c4
+#define A_T7_MC_P_DEEP_SLEEP 0x493c8
+#define A_T7_MC_P_FPGA_BONUS 0x493cc
+#define A_T7_MC_P_DEBUG_CFG 0x493d0
+#define A_T7_MC_P_DEBUG_RPT 0x493d4
+#define A_T7_MC_P_PHY_ADR_CK_EN 0x493d8
+#define A_MC_P_WDATARAM_INIT 0x493dc
+
+#define S_ENABLE_DCH1 1
+#define V_ENABLE_DCH1(x) ((x) << S_ENABLE_DCH1)
+#define F_ENABLE_DCH1 V_ENABLE_DCH1(1U)
+
+#define S_ENABLE_DCH0 0
+#define V_ENABLE_DCH0(x) ((x) << S_ENABLE_DCH0)
+#define F_ENABLE_DCH0 V_ENABLE_DCH0(1U)
+
+#define A_T7_MC_CE_ERR_ECC_DATA0 0x493e0
+#define A_T7_MC_CE_ERR_ECC_DATA1 0x493e4
+#define A_T7_MC_UE_ERR_ECC_DATA0 0x493e8
+#define A_T7_MC_UE_ERR_ECC_DATA1 0x493ec
+#define A_T7_MC_P_RMW_PRIO 0x493f0
+#define A_T7_MC_P_BIST_CMD 0x49400
+
+#define S_FIFO_ERROR_FLAG 30
+#define V_FIFO_ERROR_FLAG(x) ((x) << S_FIFO_ERROR_FLAG)
+#define F_FIFO_ERROR_FLAG V_FIFO_ERROR_FLAG(1U)
+
+#define A_T7_MC_P_BIST_CMD_ADDR 0x49404
+
+#define S_T7_VALUE 0
+#define M_T7_VALUE 0x1fffffffU
+#define V_T7_VALUE(x) ((x) << S_T7_VALUE)
+#define G_T7_VALUE(x) (((x) >> S_T7_VALUE) & M_T7_VALUE)
+
+#define A_MC_P_BIST_NUM_BURST 0x49408
+#define A_T7_MC_P_BIST_DATA_PATTERN 0x4940c
+
+#define S_DATA_TYPE 0
+#define M_DATA_TYPE 0xfU
+#define V_DATA_TYPE(x) ((x) << S_DATA_TYPE)
+#define G_DATA_TYPE(x) (((x) >> S_DATA_TYPE) & M_DATA_TYPE)
+
+#define A_T7_MC_P_BIST_CRC_SEED 0x49410
+#define A_T7_MC_P_BIST_NUM_ERR 0x49460
+#define A_MC_P_BIST_ERR_ADDR 0x49464
+
+#define S_ERROR_ADDR 0
+#define M_ERROR_ADDR 0x3fffffffU
+#define V_ERROR_ADDR(x) ((x) << S_ERROR_ADDR)
+#define G_ERROR_ADDR(x) (((x) >> S_ERROR_ADDR) & M_ERROR_ADDR)
+
+#define A_MC_P_BIST_USER_RWEDATA 0x49468
+#define A_MC_REGB_DDRC_CH0_SCHED0 0x10380
+
+#define S_OPT_VPRW_SCH 31
+#define V_OPT_VPRW_SCH(x) ((x) << S_OPT_VPRW_SCH)
+#define F_OPT_VPRW_SCH V_OPT_VPRW_SCH(1U)
+
+#define S_DIS_SPECULATIVE_ACT 30
+#define V_DIS_SPECULATIVE_ACT(x) ((x) << S_DIS_SPECULATIVE_ACT)
+#define F_DIS_SPECULATIVE_ACT V_DIS_SPECULATIVE_ACT(1U)
+
+#define S_OPT_ACT_LAT 27
+#define V_OPT_ACT_LAT(x) ((x) << S_OPT_ACT_LAT)
+#define F_OPT_ACT_LAT V_OPT_ACT_LAT(1U)
+
+#define S_LPR_NUM_ENTRIES 8
+#define M_LPR_NUM_ENTRIES 0x3fU
+#define V_LPR_NUM_ENTRIES(x) ((x) << S_LPR_NUM_ENTRIES)
+#define G_LPR_NUM_ENTRIES(x) (((x) >> S_LPR_NUM_ENTRIES) & M_LPR_NUM_ENTRIES)
+
+#define S_AUTOPRE_RMW 7
+#define V_AUTOPRE_RMW(x) ((x) << S_AUTOPRE_RMW)
+#define F_AUTOPRE_RMW V_AUTOPRE_RMW(1U)
+
+#define S_DIS_OPT_NTT_BY_PRE 6
+#define V_DIS_OPT_NTT_BY_PRE(x) ((x) << S_DIS_OPT_NTT_BY_PRE)
+#define F_DIS_OPT_NTT_BY_PRE V_DIS_OPT_NTT_BY_PRE(1U)
+
+#define S_DIS_OPT_NTT_BY_ACT 5
+#define V_DIS_OPT_NTT_BY_ACT(x) ((x) << S_DIS_OPT_NTT_BY_ACT)
+#define F_DIS_OPT_NTT_BY_ACT V_DIS_OPT_NTT_BY_ACT(1U)
+
+#define S_OPT_WRCAM_FILL_LEVEL 4
+#define V_OPT_WRCAM_FILL_LEVEL(x) ((x) << S_OPT_WRCAM_FILL_LEVEL)
+#define F_OPT_WRCAM_FILL_LEVEL V_OPT_WRCAM_FILL_LEVEL(1U)
+
+#define S_PAGECLOSE 2
+#define V_PAGECLOSE(x) ((x) << S_PAGECLOSE)
+#define F_PAGECLOSE V_PAGECLOSE(1U)
+
+#define S_PREFER_WRITE 1
+#define V_PREFER_WRITE(x) ((x) << S_PREFER_WRITE)
+#define F_PREFER_WRITE V_PREFER_WRITE(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG0 0x10600
+
+#define S_DIS_SCRUB 23
+#define V_DIS_SCRUB(x) ((x) << S_DIS_SCRUB)
+#define F_DIS_SCRUB V_DIS_SCRUB(1U)
+
+#define S_ECC_TYPE 4
+#define M_ECC_TYPE 0x3U
+#define V_ECC_TYPE(x) ((x) << S_ECC_TYPE)
+#define G_ECC_TYPE(x) (((x) >> S_ECC_TYPE) & M_ECC_TYPE)
+
+#define S_TEST_MODE 3
+#define V_TEST_MODE(x) ((x) << S_TEST_MODE)
+#define F_TEST_MODE V_TEST_MODE(1U)
+
+#define S_ECC_MODE 0
+#define M_ECC_MODE 0x7U
+#define V_ECC_MODE(x) ((x) << S_ECC_MODE)
+#define G_ECC_MODE(x) (((x) >> S_ECC_MODE) & M_ECC_MODE)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG1 0x10604
+
+#define S_DATA_POISON_BIT 1
+#define V_DATA_POISON_BIT(x) ((x) << S_DATA_POISON_BIT)
+#define F_DATA_POISON_BIT V_DATA_POISON_BIT(1U)
+
+#define S_DATA_POISON_EN 0
+#define V_DATA_POISON_EN(x) ((x) << S_DATA_POISON_EN)
+#define F_DATA_POISON_EN V_DATA_POISON_EN(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCSTAT 0x10608
+
+#define S_ECC_UNCORRECTED_ERR 16
+#define M_ECC_UNCORRECTED_ERR 0xffU
+#define V_ECC_UNCORRECTED_ERR(x) ((x) << S_ECC_UNCORRECTED_ERR)
+#define G_ECC_UNCORRECTED_ERR(x) (((x) >> S_ECC_UNCORRECTED_ERR) & M_ECC_UNCORRECTED_ERR)
+
+#define S_ECC_CORRECTED_ERR 8
+#define M_ECC_CORRECTED_ERR 0xffU
+#define V_ECC_CORRECTED_ERR(x) ((x) << S_ECC_CORRECTED_ERR)
+#define G_ECC_CORRECTED_ERR(x) (((x) >> S_ECC_CORRECTED_ERR) & M_ECC_CORRECTED_ERR)
+
+#define S_ECC_CORRECTED_BIT_NUM 0
+#define M_ECC_CORRECTED_BIT_NUM 0x7fU
+#define V_ECC_CORRECTED_BIT_NUM(x) ((x) << S_ECC_CORRECTED_BIT_NUM)
+#define G_ECC_CORRECTED_BIT_NUM(x) (((x) >> S_ECC_CORRECTED_BIT_NUM) & M_ECC_CORRECTED_BIT_NUM)
+
+#define A_MC_REGB_DDRC_CH0_ECCCTL 0x1060c
+
+#define S_ECC_UNCORRECTED_ERR_INTR_FORCE 17
+#define V_ECC_UNCORRECTED_ERR_INTR_FORCE(x) ((x) << S_ECC_UNCORRECTED_ERR_INTR_FORCE)
+#define F_ECC_UNCORRECTED_ERR_INTR_FORCE V_ECC_UNCORRECTED_ERR_INTR_FORCE(1U)
+
+#define S_ECC_CORRECTED_ERR_INTR_FORCE 16
+#define V_ECC_CORRECTED_ERR_INTR_FORCE(x) ((x) << S_ECC_CORRECTED_ERR_INTR_FORCE)
+#define F_ECC_CORRECTED_ERR_INTR_FORCE V_ECC_CORRECTED_ERR_INTR_FORCE(1U)
+
+#define S_ECC_UNCORRECTED_ERR_INTR_EN 9
+#define V_ECC_UNCORRECTED_ERR_INTR_EN(x) ((x) << S_ECC_UNCORRECTED_ERR_INTR_EN)
+#define F_ECC_UNCORRECTED_ERR_INTR_EN V_ECC_UNCORRECTED_ERR_INTR_EN(1U)
+
+#define S_ECC_CORRECTED_ERR_INTR_EN 8
+#define V_ECC_CORRECTED_ERR_INTR_EN(x) ((x) << S_ECC_CORRECTED_ERR_INTR_EN)
+#define F_ECC_CORRECTED_ERR_INTR_EN V_ECC_CORRECTED_ERR_INTR_EN(1U)
+
+#define S_ECC_UNCORR_ERR_CNT_CLR 3
+#define V_ECC_UNCORR_ERR_CNT_CLR(x) ((x) << S_ECC_UNCORR_ERR_CNT_CLR)
+#define F_ECC_UNCORR_ERR_CNT_CLR V_ECC_UNCORR_ERR_CNT_CLR(1U)
+
+#define S_ECC_CORR_ERR_CNT_CLR 2
+#define V_ECC_CORR_ERR_CNT_CLR(x) ((x) << S_ECC_CORR_ERR_CNT_CLR)
+#define F_ECC_CORR_ERR_CNT_CLR V_ECC_CORR_ERR_CNT_CLR(1U)
+
+#define S_ECC_UNCORRECTED_ERR_CLR 1
+#define V_ECC_UNCORRECTED_ERR_CLR(x) ((x) << S_ECC_UNCORRECTED_ERR_CLR)
+#define F_ECC_UNCORRECTED_ERR_CLR V_ECC_UNCORRECTED_ERR_CLR(1U)
+
+#define S_ECC_CORRECTED_ERR_CLR 0
+#define V_ECC_CORRECTED_ERR_CLR(x) ((x) << S_ECC_CORRECTED_ERR_CLR)
+#define F_ECC_CORRECTED_ERR_CLR V_ECC_CORRECTED_ERR_CLR(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCERRCNT 0x10610
+
+#define S_ECC_UNCORR_ERR_CNT 16
+#define M_ECC_UNCORR_ERR_CNT 0xffffU
+#define V_ECC_UNCORR_ERR_CNT(x) ((x) << S_ECC_UNCORR_ERR_CNT)
+#define G_ECC_UNCORR_ERR_CNT(x) (((x) >> S_ECC_UNCORR_ERR_CNT) & M_ECC_UNCORR_ERR_CNT)
+
+#define S_ECC_CORR_ERR_CNT 0
+#define M_ECC_CORR_ERR_CNT 0xffffU
+#define V_ECC_CORR_ERR_CNT(x) ((x) << S_ECC_CORR_ERR_CNT)
+#define G_ECC_CORR_ERR_CNT(x) (((x) >> S_ECC_CORR_ERR_CNT) & M_ECC_CORR_ERR_CNT)
+
+#define A_MC_REGB_DDRC_CH0_ECCCADDR0 0x10614
+
+#define S_ECC_CORR_RANK 24
+#define V_ECC_CORR_RANK(x) ((x) << S_ECC_CORR_RANK)
+#define F_ECC_CORR_RANK V_ECC_CORR_RANK(1U)
+
+#define S_ECC_CORR_ROW 0
+#define M_ECC_CORR_ROW 0x3ffffU
+#define V_ECC_CORR_ROW(x) ((x) << S_ECC_CORR_ROW)
+#define G_ECC_CORR_ROW(x) (((x) >> S_ECC_CORR_ROW) & M_ECC_CORR_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCCADDR1 0x10618
+
+#define S_ECC_CORR_BG 24
+#define M_ECC_CORR_BG 0x7U
+#define V_ECC_CORR_BG(x) ((x) << S_ECC_CORR_BG)
+#define G_ECC_CORR_BG(x) (((x) >> S_ECC_CORR_BG) & M_ECC_CORR_BG)
+
+#define S_ECC_CORR_BANK 16
+#define M_ECC_CORR_BANK 0x3U
+#define V_ECC_CORR_BANK(x) ((x) << S_ECC_CORR_BANK)
+#define G_ECC_CORR_BANK(x) (((x) >> S_ECC_CORR_BANK) & M_ECC_CORR_BANK)
+
+#define S_ECC_CORR_COL 0
+#define M_ECC_CORR_COL 0x7ffU
+#define V_ECC_CORR_COL(x) ((x) << S_ECC_CORR_COL)
+#define G_ECC_CORR_COL(x) (((x) >> S_ECC_CORR_COL) & M_ECC_CORR_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCCSYN0 0x1061c
+#define A_MC_REGB_DDRC_CH0_ECCCSYN1 0x10620
+#define A_MC_REGB_DDRC_CH0_ECCCSYN2 0x10624
+
+#define S_CB_CORR_SYNDROME 16
+#define M_CB_CORR_SYNDROME 0xffU
+#define V_CB_CORR_SYNDROME(x) ((x) << S_CB_CORR_SYNDROME)
+#define G_CB_CORR_SYNDROME(x) (((x) >> S_CB_CORR_SYNDROME) & M_CB_CORR_SYNDROME)
+
+#define S_ECC_CORR_SYNDROMES_71_64 0
+#define M_ECC_CORR_SYNDROMES_71_64 0xffU
+#define V_ECC_CORR_SYNDROMES_71_64(x) ((x) << S_ECC_CORR_SYNDROMES_71_64)
+#define G_ECC_CORR_SYNDROMES_71_64(x) (((x) >> S_ECC_CORR_SYNDROMES_71_64) & M_ECC_CORR_SYNDROMES_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK0 0x10628
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK1 0x1062c
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK2 0x10630
+
+#define S_ECC_CORR_BIT_MASK_71_64 0
+#define M_ECC_CORR_BIT_MASK_71_64 0xffU
+#define V_ECC_CORR_BIT_MASK_71_64(x) ((x) << S_ECC_CORR_BIT_MASK_71_64)
+#define G_ECC_CORR_BIT_MASK_71_64(x) (((x) >> S_ECC_CORR_BIT_MASK_71_64) & M_ECC_CORR_BIT_MASK_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCUADDR0 0x10634
+
+#define S_ECC_UNCORR_RANK 24
+#define V_ECC_UNCORR_RANK(x) ((x) << S_ECC_UNCORR_RANK)
+#define F_ECC_UNCORR_RANK V_ECC_UNCORR_RANK(1U)
+
+#define S_ECC_UNCORR_ROW 0
+#define M_ECC_UNCORR_ROW 0x3ffffU
+#define V_ECC_UNCORR_ROW(x) ((x) << S_ECC_UNCORR_ROW)
+#define G_ECC_UNCORR_ROW(x) (((x) >> S_ECC_UNCORR_ROW) & M_ECC_UNCORR_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCUADDR1 0x10638
+
+#define S_ECC_UNCORR_BG 24
+#define M_ECC_UNCORR_BG 0x7U
+#define V_ECC_UNCORR_BG(x) ((x) << S_ECC_UNCORR_BG)
+#define G_ECC_UNCORR_BG(x) (((x) >> S_ECC_UNCORR_BG) & M_ECC_UNCORR_BG)
+
+#define S_ECC_UNCORR_BANK 16
+#define M_ECC_UNCORR_BANK 0x3U
+#define V_ECC_UNCORR_BANK(x) ((x) << S_ECC_UNCORR_BANK)
+#define G_ECC_UNCORR_BANK(x) (((x) >> S_ECC_UNCORR_BANK) & M_ECC_UNCORR_BANK)
+
+#define S_ECC_UNCORR_COL 0
+#define M_ECC_UNCORR_COL 0x7ffU
+#define V_ECC_UNCORR_COL(x) ((x) << S_ECC_UNCORR_COL)
+#define G_ECC_UNCORR_COL(x) (((x) >> S_ECC_UNCORR_COL) & M_ECC_UNCORR_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCUSYN0 0x1063c
+#define A_MC_REGB_DDRC_CH0_ECCUSYN1 0x10640
+#define A_MC_REGB_DDRC_CH0_ECCUSYN2 0x10644
+
+#define S_CB_UNCORR_SYNDROME 16
+#define M_CB_UNCORR_SYNDROME 0xffU
+#define V_CB_UNCORR_SYNDROME(x) ((x) << S_CB_UNCORR_SYNDROME)
+#define G_CB_UNCORR_SYNDROME(x) (((x) >> S_CB_UNCORR_SYNDROME) & M_CB_UNCORR_SYNDROME)
+
+#define S_ECC_UNCORR_SYNDROMES_71_64 0
+#define M_ECC_UNCORR_SYNDROMES_71_64 0xffU
+#define V_ECC_UNCORR_SYNDROMES_71_64(x) ((x) << S_ECC_UNCORR_SYNDROMES_71_64)
+#define G_ECC_UNCORR_SYNDROMES_71_64(x) (((x) >> S_ECC_UNCORR_SYNDROMES_71_64) & M_ECC_UNCORR_SYNDROMES_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONADDR0 0x10648
+
+#define S_ECC_POISON_RANK 24
+#define V_ECC_POISON_RANK(x) ((x) << S_ECC_POISON_RANK)
+#define F_ECC_POISON_RANK V_ECC_POISON_RANK(1U)
+
+#define S_ECC_POISON_COL 0
+#define M_ECC_POISON_COL 0xfffU
+#define V_ECC_POISON_COL(x) ((x) << S_ECC_POISON_COL)
+#define G_ECC_POISON_COL(x) (((x) >> S_ECC_POISON_COL) & M_ECC_POISON_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONADDR1 0x1064c
+
+#define S_ECC_POISON_BG 28
+#define M_ECC_POISON_BG 0x7U
+#define V_ECC_POISON_BG(x) ((x) << S_ECC_POISON_BG)
+#define G_ECC_POISON_BG(x) (((x) >> S_ECC_POISON_BG) & M_ECC_POISON_BG)
+
+#define S_ECC_POISON_BANK 24
+#define M_ECC_POISON_BANK 0x3U
+#define V_ECC_POISON_BANK(x) ((x) << S_ECC_POISON_BANK)
+#define G_ECC_POISON_BANK(x) (((x) >> S_ECC_POISON_BANK) & M_ECC_POISON_BANK)
+
+#define S_ECC_POISON_ROW 0
+#define M_ECC_POISON_ROW 0x3ffffU
+#define V_ECC_POISON_ROW(x) ((x) << S_ECC_POISON_ROW)
+#define G_ECC_POISON_ROW(x) (((x) >> S_ECC_POISON_ROW) & M_ECC_POISON_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT0 0x10658
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT1 0x1065c
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT2 0x10660
+
+#define S_ECC_POISON_DATA_71_64 0
+#define M_ECC_POISON_DATA_71_64 0xffU
+#define V_ECC_POISON_DATA_71_64(x) ((x) << S_ECC_POISON_DATA_71_64)
+#define G_ECC_POISON_DATA_71_64(x) (((x) >> S_ECC_POISON_DATA_71_64) & M_ECC_POISON_DATA_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG2 0x10668
+
+#define S_FLIP_BIT_POS1 24
+#define M_FLIP_BIT_POS1 0x7fU
+#define V_FLIP_BIT_POS1(x) ((x) << S_FLIP_BIT_POS1)
+#define G_FLIP_BIT_POS1(x) (((x) >> S_FLIP_BIT_POS1) & M_FLIP_BIT_POS1)
+
+#define S_FLIP_BIT_POS0 16
+#define M_FLIP_BIT_POS0 0x7fU
+#define V_FLIP_BIT_POS0(x) ((x) << S_FLIP_BIT_POS0)
+#define G_FLIP_BIT_POS0(x) (((x) >> S_FLIP_BIT_POS0) & M_FLIP_BIT_POS0)
+
+#define A_MC_REGB_DDRC_CH1_ECCCTL 0x1160c
+#define A_MC_REGB_DDRC_CH1_ECCERRCNT 0x11610
+#define A_MC_REGB_DDRC_CH1_ECCCADDR0 0x11614
+#define A_MC_REGB_DDRC_CH1_ECCCADDR1 0x11618
+#define A_MC_REGB_DDRC_CH1_ECCCSYN0 0x1161c
+#define A_MC_REGB_DDRC_CH1_ECCCSYN1 0x11620
+#define A_MC_REGB_DDRC_CH1_ECCCSYN2 0x11624
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK0 0x11628
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK1 0x1162c
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK2 0x11630
+#define A_MC_REGB_DDRC_CH1_ECCUADDR0 0x11634
+#define A_MC_REGB_DDRC_CH1_ECCUADDR1 0x11638
+#define A_MC_REGB_DDRC_CH1_ECCUSYN0 0x1163c
+#define A_MC_REGB_DDRC_CH1_ECCUSYN1 0x11640
+#define A_MC_REGB_DDRC_CH1_ECCUSYN2 0x11644
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTENABLE 0x20100
+
+#define S_PHYSTICKYUNLOCKEN 15
+#define V_PHYSTICKYUNLOCKEN(x) ((x) << S_PHYSTICKYUNLOCKEN)
+#define F_PHYSTICKYUNLOCKEN V_PHYSTICKYUNLOCKEN(1U)
+
+#define S_PHYBSIEN 14
+#define V_PHYBSIEN(x) ((x) << S_PHYBSIEN)
+#define F_PHYBSIEN V_PHYBSIEN(1U)
+
+#define S_PHYANIBRCVERREN 13
+#define V_PHYANIBRCVERREN(x) ((x) << S_PHYANIBRCVERREN)
+#define F_PHYANIBRCVERREN V_PHYANIBRCVERREN(1U)
+
+#define S_PHYD5ACSM1PARITYEN 12
+#define V_PHYD5ACSM1PARITYEN(x) ((x) << S_PHYD5ACSM1PARITYEN)
+#define F_PHYD5ACSM1PARITYEN V_PHYD5ACSM1PARITYEN(1U)
+
+#define S_PHYD5ACSM0PARITYEN 11
+#define V_PHYD5ACSM0PARITYEN(x) ((x) << S_PHYD5ACSM0PARITYEN)
+#define F_PHYD5ACSM0PARITYEN V_PHYD5ACSM0PARITYEN(1U)
+
+#define S_PHYRXFIFOCHECKEN 10
+#define V_PHYRXFIFOCHECKEN(x) ((x) << S_PHYRXFIFOCHECKEN)
+#define F_PHYRXFIFOCHECKEN V_PHYRXFIFOCHECKEN(1U)
+
+#define S_PHYTXPPTEN 9
+#define V_PHYTXPPTEN(x) ((x) << S_PHYTXPPTEN)
+#define F_PHYTXPPTEN V_PHYTXPPTEN(1U)
+
+#define S_PHYECCEN 8
+#define V_PHYECCEN(x) ((x) << S_PHYECCEN)
+#define F_PHYECCEN V_PHYECCEN(1U)
+
+#define S_PHYFWRESERVEDEN 3
+#define M_PHYFWRESERVEDEN 0x1fU
+#define V_PHYFWRESERVEDEN(x) ((x) << S_PHYFWRESERVEDEN)
+#define G_PHYFWRESERVEDEN(x) (((x) >> S_PHYFWRESERVEDEN) & M_PHYFWRESERVEDEN)
+
+#define S_PHYTRNGFAILEN 2
+#define V_PHYTRNGFAILEN(x) ((x) << S_PHYTRNGFAILEN)
+#define F_PHYTRNGFAILEN V_PHYTRNGFAILEN(1U)
+
+#define S_PHYINITCMPLTEN 1
+#define V_PHYINITCMPLTEN(x) ((x) << S_PHYINITCMPLTEN)
+#define F_PHYINITCMPLTEN V_PHYINITCMPLTEN(1U)
+
+#define S_PHYTRNGCMPLTEN 0
+#define V_PHYTRNGCMPLTEN(x) ((x) << S_PHYTRNGCMPLTEN)
+#define F_PHYTRNGCMPLTEN V_PHYTRNGCMPLTEN(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTFWCONTROL 0x20101
+
+#define S_PHYFWRESERVEDFW 3
+#define M_PHYFWRESERVEDFW 0x1fU
+#define V_PHYFWRESERVEDFW(x) ((x) << S_PHYFWRESERVEDFW)
+#define G_PHYFWRESERVEDFW(x) (((x) >> S_PHYFWRESERVEDFW) & M_PHYFWRESERVEDFW)
+
+#define S_PHYTRNGFAILFW 2
+#define V_PHYTRNGFAILFW(x) ((x) << S_PHYTRNGFAILFW)
+#define F_PHYTRNGFAILFW V_PHYTRNGFAILFW(1U)
+
+#define S_PHYINITCMPLTFW 1
+#define V_PHYINITCMPLTFW(x) ((x) << S_PHYINITCMPLTFW)
+#define F_PHYINITCMPLTFW V_PHYINITCMPLTFW(1U)
+
+#define S_PHYTRNGCMPLTFW 0
+#define V_PHYTRNGCMPLTFW(x) ((x) << S_PHYTRNGCMPLTFW)
+#define F_PHYTRNGCMPLTFW V_PHYTRNGCMPLTFW(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTMASK 0x20102
+
+#define S_PHYSTICKYUNLOCKMSK 15
+#define V_PHYSTICKYUNLOCKMSK(x) ((x) << S_PHYSTICKYUNLOCKMSK)
+#define F_PHYSTICKYUNLOCKMSK V_PHYSTICKYUNLOCKMSK(1U)
+
+#define S_PHYBSIMSK 14
+#define V_PHYBSIMSK(x) ((x) << S_PHYBSIMSK)
+#define F_PHYBSIMSK V_PHYBSIMSK(1U)
+
+#define S_PHYANIBRCVERRMSK 13
+#define V_PHYANIBRCVERRMSK(x) ((x) << S_PHYANIBRCVERRMSK)
+#define F_PHYANIBRCVERRMSK V_PHYANIBRCVERRMSK(1U)
+
+#define S_PHYD5ACSM1PARITYMSK 12
+#define V_PHYD5ACSM1PARITYMSK(x) ((x) << S_PHYD5ACSM1PARITYMSK)
+#define F_PHYD5ACSM1PARITYMSK V_PHYD5ACSM1PARITYMSK(1U)
+
+#define S_PHYD5ACSM0PARITYMSK 11
+#define V_PHYD5ACSM0PARITYMSK(x) ((x) << S_PHYD5ACSM0PARITYMSK)
+#define F_PHYD5ACSM0PARITYMSK V_PHYD5ACSM0PARITYMSK(1U)
+
+#define S_PHYRXFIFOCHECKMSK 10
+#define V_PHYRXFIFOCHECKMSK(x) ((x) << S_PHYRXFIFOCHECKMSK)
+#define F_PHYRXFIFOCHECKMSK V_PHYRXFIFOCHECKMSK(1U)
+
+#define S_PHYTXPPTMSK 9
+#define V_PHYTXPPTMSK(x) ((x) << S_PHYTXPPTMSK)
+#define F_PHYTXPPTMSK V_PHYTXPPTMSK(1U)
+
+#define S_PHYECCMSK 8
+#define V_PHYECCMSK(x) ((x) << S_PHYECCMSK)
+#define F_PHYECCMSK V_PHYECCMSK(1U)
+
+#define S_PHYFWRESERVEDMSK 3
+#define M_PHYFWRESERVEDMSK 0x1fU
+#define V_PHYFWRESERVEDMSK(x) ((x) << S_PHYFWRESERVEDMSK)
+#define G_PHYFWRESERVEDMSK(x) (((x) >> S_PHYFWRESERVEDMSK) & M_PHYFWRESERVEDMSK)
+
+#define S_PHYTRNGFAILMSK 2
+#define V_PHYTRNGFAILMSK(x) ((x) << S_PHYTRNGFAILMSK)
+#define F_PHYTRNGFAILMSK V_PHYTRNGFAILMSK(1U)
+
+#define S_PHYINITCMPLTMSK 1
+#define V_PHYINITCMPLTMSK(x) ((x) << S_PHYINITCMPLTMSK)
+#define F_PHYINITCMPLTMSK V_PHYINITCMPLTMSK(1U)
+
+#define S_PHYTRNGCMPLTMSK 0
+#define V_PHYTRNGCMPLTMSK(x) ((x) << S_PHYTRNGCMPLTMSK)
+#define F_PHYTRNGCMPLTMSK V_PHYTRNGCMPLTMSK(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTCLEAR 0x20103
+
+#define S_PHYSTICKYUNLOCKCLR 15
+#define V_PHYSTICKYUNLOCKCLR(x) ((x) << S_PHYSTICKYUNLOCKCLR)
+#define F_PHYSTICKYUNLOCKCLR V_PHYSTICKYUNLOCKCLR(1U)
+
+#define S_PHYBSICLR 14
+#define V_PHYBSICLR(x) ((x) << S_PHYBSICLR)
+#define F_PHYBSICLR V_PHYBSICLR(1U)
+
+#define S_PHYANIBRCVERRCLR 13
+#define V_PHYANIBRCVERRCLR(x) ((x) << S_PHYANIBRCVERRCLR)
+#define F_PHYANIBRCVERRCLR V_PHYANIBRCVERRCLR(1U)
+
+#define S_PHYD5ACSM1PARITYCLR 12
+#define V_PHYD5ACSM1PARITYCLR(x) ((x) << S_PHYD5ACSM1PARITYCLR)
+#define F_PHYD5ACSM1PARITYCLR V_PHYD5ACSM1PARITYCLR(1U)
+
+#define S_PHYD5ACSM0PARITYCLR 11
+#define V_PHYD5ACSM0PARITYCLR(x) ((x) << S_PHYD5ACSM0PARITYCLR)
+#define F_PHYD5ACSM0PARITYCLR V_PHYD5ACSM0PARITYCLR(1U)
+
+#define S_PHYRXFIFOCHECKCLR 10
+#define V_PHYRXFIFOCHECKCLR(x) ((x) << S_PHYRXFIFOCHECKCLR)
+#define F_PHYRXFIFOCHECKCLR V_PHYRXFIFOCHECKCLR(1U)
+
+#define S_PHYTXPPTCLR 9
+#define V_PHYTXPPTCLR(x) ((x) << S_PHYTXPPTCLR)
+#define F_PHYTXPPTCLR V_PHYTXPPTCLR(1U)
+
+#define S_PHYECCCLR 8
+#define V_PHYECCCLR(x) ((x) << S_PHYECCCLR)
+#define F_PHYECCCLR V_PHYECCCLR(1U)
+
+#define S_PHYFWRESERVEDCLR 3
+#define M_PHYFWRESERVEDCLR 0x1fU
+#define V_PHYFWRESERVEDCLR(x) ((x) << S_PHYFWRESERVEDCLR)
+#define G_PHYFWRESERVEDCLR(x) (((x) >> S_PHYFWRESERVEDCLR) & M_PHYFWRESERVEDCLR)
+
+#define S_PHYTRNGFAILCLR 2
+#define V_PHYTRNGFAILCLR(x) ((x) << S_PHYTRNGFAILCLR)
+#define F_PHYTRNGFAILCLR V_PHYTRNGFAILCLR(1U)
+
+#define S_PHYINITCMPLTCLR 1
+#define V_PHYINITCMPLTCLR(x) ((x) << S_PHYINITCMPLTCLR)
+#define F_PHYINITCMPLTCLR V_PHYINITCMPLTCLR(1U)
+
+#define S_PHYTRNGCMPLTCLR 0
+#define V_PHYTRNGCMPLTCLR(x) ((x) << S_PHYTRNGCMPLTCLR)
+#define F_PHYTRNGCMPLTCLR V_PHYTRNGCMPLTCLR(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTSTATUS 0x20104
+
+#define S_PHYSTICKYUNLOCKERR 15
+#define V_PHYSTICKYUNLOCKERR(x) ((x) << S_PHYSTICKYUNLOCKERR)
+#define F_PHYSTICKYUNLOCKERR V_PHYSTICKYUNLOCKERR(1U)
+
+#define S_PHYBSIINT 14
+#define V_PHYBSIINT(x) ((x) << S_PHYBSIINT)
+#define F_PHYBSIINT V_PHYBSIINT(1U)
+
+#define S_PHYANIBRCVERR 13
+#define V_PHYANIBRCVERR(x) ((x) << S_PHYANIBRCVERR)
+#define F_PHYANIBRCVERR V_PHYANIBRCVERR(1U)
+
+#define S_PHYD5ACSM1PARITYERR 12
+#define V_PHYD5ACSM1PARITYERR(x) ((x) << S_PHYD5ACSM1PARITYERR)
+#define F_PHYD5ACSM1PARITYERR V_PHYD5ACSM1PARITYERR(1U)
+
+#define S_PHYD5ACSM0PARITYERR 11
+#define V_PHYD5ACSM0PARITYERR(x) ((x) << S_PHYD5ACSM0PARITYERR)
+#define F_PHYD5ACSM0PARITYERR V_PHYD5ACSM0PARITYERR(1U)
+
+#define S_PHYRXFIFOCHECKERR 10
+#define V_PHYRXFIFOCHECKERR(x) ((x) << S_PHYRXFIFOCHECKERR)
+#define F_PHYRXFIFOCHECKERR V_PHYRXFIFOCHECKERR(1U)
+
+#define S_PHYRXTXPPTERR 9
+#define V_PHYRXTXPPTERR(x) ((x) << S_PHYRXTXPPTERR)
+#define F_PHYRXTXPPTERR V_PHYRXTXPPTERR(1U)
+
+#define S_PHYECCERR 8
+#define V_PHYECCERR(x) ((x) << S_PHYECCERR)
+#define F_PHYECCERR V_PHYECCERR(1U)
+
+#define S_PHYFWRESERVED 3
+#define M_PHYFWRESERVED 0x1fU
+#define V_PHYFWRESERVED(x) ((x) << S_PHYFWRESERVED)
+#define G_PHYFWRESERVED(x) (((x) >> S_PHYFWRESERVED) & M_PHYFWRESERVED)
+
+#define S_PHYTRNGFAIL 2
+#define V_PHYTRNGFAIL(x) ((x) << S_PHYTRNGFAIL)
+#define F_PHYTRNGFAIL V_PHYTRNGFAIL(1U)
+
+#define S_PHYINITCMPLT 1
+#define V_PHYINITCMPLT(x) ((x) << S_PHYINITCMPLT)
+#define F_PHYINITCMPLT V_PHYINITCMPLT(1U)
+
+#define S_PHYTRNGCMPLT 0
+#define V_PHYTRNGCMPLT(x) ((x) << S_PHYTRNGCMPLT)
+#define F_PHYTRNGCMPLT V_PHYTRNGCMPLT(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTOVERRIDE 0x20107
+
+#define S_PHYINTERRUPTOVERRIDE 0
+#define M_PHYINTERRUPTOVERRIDE 0xffffU
+#define V_PHYINTERRUPTOVERRIDE(x) ((x) << S_PHYINTERRUPTOVERRIDE)
+#define G_PHYINTERRUPTOVERRIDE(x) (((x) >> S_PHYINTERRUPTOVERRIDE) & M_PHYINTERRUPTOVERRIDE)
+
+/* registers for module MC_T71 */
+#define MC_T71_BASE_ADDR 0x58000
+
+/* registers for module GCACHE */
+#define GCACHE_BASE_ADDR 0x51400
+
+#define A_GCACHE_MODE_SEL0 0x51400
+
+#define S_GC_MA_RSP 16
+#define V_GC_MA_RSP(x) ((x) << S_GC_MA_RSP)
+#define F_GC_MA_RSP V_GC_MA_RSP(1U)
+
+#define A_GCACHE_MEMZONE0_REGION1 0x51404
+
+#define S_REGION_EN1 18
+#define V_REGION_EN1(x) ((x) << S_REGION_EN1)
+#define F_REGION_EN1 V_REGION_EN1(1U)
+
+#define S_EDC_REGION1 17
+#define V_EDC_REGION1(x) ((x) << S_EDC_REGION1)
+#define F_EDC_REGION1 V_EDC_REGION1(1U)
+
+#define S_CACHE_REGION1 16
+#define V_CACHE_REGION1(x) ((x) << S_CACHE_REGION1)
+#define F_CACHE_REGION1 V_CACHE_REGION1(1U)
+
+#define S_END1 0
+#define M_END1 0xffffU
+#define V_END1(x) ((x) << S_END1)
+#define G_END1(x) (((x) >> S_END1) & M_END1)
+
+#define A_GCACHE_MEMZONE0_REGION2 0x51408
+
+#define S_REGION_EN2 18
+#define V_REGION_EN2(x) ((x) << S_REGION_EN2)
+#define F_REGION_EN2 V_REGION_EN2(1U)
+
+#define S_EDC_REGION2 17
+#define V_EDC_REGION2(x) ((x) << S_EDC_REGION2)
+#define F_EDC_REGION2 V_EDC_REGION2(1U)
+
+#define S_CACHE_REGION2 16
+#define V_CACHE_REGION2(x) ((x) << S_CACHE_REGION2)
+#define F_CACHE_REGION2 V_CACHE_REGION2(1U)
+
+#define S_END2 0
+#define M_END2 0xffffU
+#define V_END2(x) ((x) << S_END2)
+#define G_END2(x) (((x) >> S_END2) & M_END2)
+
+#define A_GCACHE_MEMZONE0_REGION3 0x5140c
+
+#define S_REGION_EN3 18
+#define V_REGION_EN3(x) ((x) << S_REGION_EN3)
+#define F_REGION_EN3 V_REGION_EN3(1U)
+
+#define S_EDC_REGION3 17
+#define V_EDC_REGION3(x) ((x) << S_EDC_REGION3)
+#define F_EDC_REGION3 V_EDC_REGION3(1U)
+
+#define S_CACHE_REGION3 16
+#define V_CACHE_REGION3(x) ((x) << S_CACHE_REGION3)
+#define F_CACHE_REGION3 V_CACHE_REGION3(1U)
+
+#define S_END3 0
+#define M_END3 0xffffU
+#define V_END3(x) ((x) << S_END3)
+#define G_END3(x) (((x) >> S_END3) & M_END3)
+
+#define A_GCACHE_MEMZONE0_REGION4 0x51410
+
+#define S_REGION_EN4 18
+#define V_REGION_EN4(x) ((x) << S_REGION_EN4)
+#define F_REGION_EN4 V_REGION_EN4(1U)
+
+#define S_EDC_REGION4 17
+#define V_EDC_REGION4(x) ((x) << S_EDC_REGION4)
+#define F_EDC_REGION4 V_EDC_REGION4(1U)
+
+#define S_CACHE_REGION4 16
+#define V_CACHE_REGION4(x) ((x) << S_CACHE_REGION4)
+#define F_CACHE_REGION4 V_CACHE_REGION4(1U)
+
+#define S_END4 0
+#define M_END4 0xffffU
+#define V_END4(x) ((x) << S_END4)
+#define G_END4(x) (((x) >> S_END4) & M_END4)
+
+#define A_GCACHE_MEMZONE0_REGION5 0x51414
+
+#define S_REGION_EN5 18
+#define V_REGION_EN5(x) ((x) << S_REGION_EN5)
+#define F_REGION_EN5 V_REGION_EN5(1U)
+
+#define S_EDC_REGION5 17
+#define V_EDC_REGION5(x) ((x) << S_EDC_REGION5)
+#define F_EDC_REGION5 V_EDC_REGION5(1U)
+
+#define S_CACHE_REGION5 16
+#define V_CACHE_REGION5(x) ((x) << S_CACHE_REGION5)
+#define F_CACHE_REGION5 V_CACHE_REGION5(1U)
+
+#define S_END5 0
+#define M_END5 0xffffU
+#define V_END5(x) ((x) << S_END5)
+#define G_END5(x) (((x) >> S_END5) & M_END5)
+
+#define A_GCACHE_MEMZONE0_REGION6 0x51418
+
+#define S_REGION_EN6 18
+#define V_REGION_EN6(x) ((x) << S_REGION_EN6)
+#define F_REGION_EN6 V_REGION_EN6(1U)
+
+#define S_EDC_REGION6 17
+#define V_EDC_REGION6(x) ((x) << S_EDC_REGION6)
+#define F_EDC_REGION6 V_EDC_REGION6(1U)
+
+#define S_CACHE_REGION6 16
+#define V_CACHE_REGION6(x) ((x) << S_CACHE_REGION6)
+#define F_CACHE_REGION6 V_CACHE_REGION6(1U)
+
+#define S_END6 0
+#define M_END6 0xffffU
+#define V_END6(x) ((x) << S_END6)
+#define G_END6(x) (((x) >> S_END6) & M_END6)
+
+#define A_GCACHE_MEMZONE0_REGION7 0x5141c
+
+#define S_REGION_EN7 18
+#define V_REGION_EN7(x) ((x) << S_REGION_EN7)
+#define F_REGION_EN7 V_REGION_EN7(1U)
+
+#define S_EDC_REGION7 17
+#define V_EDC_REGION7(x) ((x) << S_EDC_REGION7)
+#define F_EDC_REGION7 V_EDC_REGION7(1U)
+
+#define S_CACHE_REGION7 16
+#define V_CACHE_REGION7(x) ((x) << S_CACHE_REGION7)
+#define F_CACHE_REGION7 V_CACHE_REGION7(1U)
+
+#define S_END7 0
+#define M_END7 0xffffU
+#define V_END7(x) ((x) << S_END7)
+#define G_END7(x) (((x) >> S_END7) & M_END7)
+
+#define A_GCACHE_MEMZONE0_REGION8 0x51420
+
+#define S_REGION_EN8 18
+#define V_REGION_EN8(x) ((x) << S_REGION_EN8)
+#define F_REGION_EN8 V_REGION_EN8(1U)
+
+#define S_EDC_REGION8 17
+#define V_EDC_REGION8(x) ((x) << S_EDC_REGION8)
+#define F_EDC_REGION8 V_EDC_REGION8(1U)
+
+#define S_CACHE_REGION8 16
+#define V_CACHE_REGION8(x) ((x) << S_CACHE_REGION8)
+#define F_CACHE_REGION8 V_CACHE_REGION8(1U)
+
+#define S_END8 0
+#define M_END8 0xffffU
+#define V_END8(x) ((x) << S_END8)
+#define G_END8(x) (((x) >> S_END8) & M_END8)
+
+#define A_GCACHE_REG0_BASE_MSB 0x51424
+#define A_GCACHE_MEMZONE0_REGION1_MSB 0x51428
+
+#define S_START1 0
+#define M_START1 0xffffU
+#define V_START1(x) ((x) << S_START1)
+#define G_START1(x) (((x) >> S_START1) & M_START1)
+
+#define A_GCACHE_MEMZONE0_REGION2_MSB 0x5142c
+
+#define S_START2 0
+#define M_START2 0xffffU
+#define V_START2(x) ((x) << S_START2)
+#define G_START2(x) (((x) >> S_START2) & M_START2)
+
+#define A_GCACHE_MEMZONE0_REGION3_MSB 0x51430
+
+#define S_START3 0
+#define M_START3 0xffffU
+#define V_START3(x) ((x) << S_START3)
+#define G_START3(x) (((x) >> S_START3) & M_START3)
+
+#define A_GCACHE_MEMZONE0_REGION4_MSB 0x51434
+
+#define S_START4 0
+#define M_START4 0xffffU
+#define V_START4(x) ((x) << S_START4)
+#define G_START4(x) (((x) >> S_START4) & M_START4)
+
+#define A_GCACHE_MEMZONE0_REGION5_MSB 0x51438
+
+#define S_START5 0
+#define M_START5 0xffffU
+#define V_START5(x) ((x) << S_START5)
+#define G_START5(x) (((x) >> S_START5) & M_START5)
+
+#define A_GCACHE_MEMZONE0_REGION6_MSB 0x5143c
+
+#define S_START6 0
+#define M_START6 0xffffU
+#define V_START6(x) ((x) << S_START6)
+#define G_START6(x) (((x) >> S_START6) & M_START6)
+
+#define A_GCACHE_MEMZONE0_REGION7_MSB 0x51440
+
+#define S_START7 0
+#define M_START7 0xffffU
+#define V_START7(x) ((x) << S_START7)
+#define G_START7(x) (((x) >> S_START7) & M_START7)
+
+#define A_GCACHE_MEMZONE0_REGION8_MSB 0x51444
+
+#define S_START8 0
+#define M_START8 0xffffU
+#define V_START8(x) ((x) << S_START8)
+#define G_START8(x) (((x) >> S_START8) & M_START8)
+
+#define A_GCACHE_MODE_SEL1 0x51448
+#define A_GCACHE_MEMZONE1_REGION1 0x5144c
+#define A_GCACHE_MEMZONE1_REGION2 0x51450
+#define A_GCACHE_MEMZONE1_REGION3 0x51454
+#define A_GCACHE_MEMZONE1_REGION4 0x51458
+#define A_GCACHE_MEMZONE1_REGION5 0x5145c
+#define A_GCACHE_MEMZONE1_REGION6 0x51460
+#define A_GCACHE_MEMZONE1_REGION7 0x51464
+#define A_GCACHE_MEMZONE1_REGION8 0x51468
+#define A_GCACHE_MEMZONE1_REGION1_MSB 0x5146c
+#define A_GCACHE_MEMZONE1_REGION2_MSB 0x51470
+#define A_GCACHE_MEMZONE1_REGION3_MSB 0x51474
+#define A_GCACHE_MEMZONE1_REGION4_MSB 0x51478
+#define A_GCACHE_MEMZONE1_REGION5_MSB 0x5147c
+#define A_GCACHE_MEMZONE1_REGION6_MSB 0x51480
+#define A_GCACHE_MEMZONE1_REGION7_MSB 0x51484
+#define A_GCACHE_MEMZONE1_REGION8_MSB 0x51488
+#define A_GCACHE_HMA_MC1_EN 0x5148c
+
+#define S_MC1_EN 1
+#define V_MC1_EN(x) ((x) << S_MC1_EN)
+#define F_MC1_EN V_MC1_EN(1U)
+
+#define S_HMA_EN 0
+#define V_HMA_EN(x) ((x) << S_HMA_EN)
+#define F_HMA_EN V_HMA_EN(1U)
+
+#define A_GCACHE_P_BIST_CMD 0x51490
+#define A_GCACHE_P_BIST_CMD_ADDR 0x51494
+#define A_GCACHE_P_BIST_CMD_LEN 0x51498
+#define A_GCACHE_P_BIST_DATA_PATTERN 0x5149c
+#define A_GCACHE_P_BIST_USER_WDATA0 0x514a0
+#define A_GCACHE_P_BIST_USER_WDATA1 0x514a4
+#define A_GCACHE_P_BIST_USER_WDATA2 0x514a8
+#define A_GCACHE_P_BIST_NUM_ERR 0x514ac
+#define A_GCACHE_P_BIST_ERR_FIRST_ADDR 0x514b0
+#define A_GCACHE_P_BIST_STATUS_RDATA 0x514b4
+#define A_GCACHE_P_BIST_CRC_SEED 0x514fc
+#define A_GCACHE_CACHE_SIZE 0x51500
+
+#define S_HMA_2MB 1
+#define V_HMA_2MB(x) ((x) << S_HMA_2MB)
+#define F_HMA_2MB V_HMA_2MB(1U)
+
+#define S_MC0_2MB 0
+#define V_MC0_2MB(x) ((x) << S_MC0_2MB)
+#define F_MC0_2MB V_MC0_2MB(1U)
+
+#define A_GCACHE_HINT_MAPPING 0x51504
+
+#define S_CLIENT_HINT_EN 16
+#define M_CLIENT_HINT_EN 0x7fffU
+#define V_CLIENT_HINT_EN(x) ((x) << S_CLIENT_HINT_EN)
+#define G_CLIENT_HINT_EN(x) (((x) >> S_CLIENT_HINT_EN) & M_CLIENT_HINT_EN)
+
+#define S_HINT_ADDR_SPLIT_EN 8
+#define V_HINT_ADDR_SPLIT_EN(x) ((x) << S_HINT_ADDR_SPLIT_EN)
+#define F_HINT_ADDR_SPLIT_EN V_HINT_ADDR_SPLIT_EN(1U)
+
+#define S_TP_HINT_HMA_MC 2
+#define V_TP_HINT_HMA_MC(x) ((x) << S_TP_HINT_HMA_MC)
+#define F_TP_HINT_HMA_MC V_TP_HINT_HMA_MC(1U)
+
+#define S_CIM_HINT_HMA_MC 1
+#define V_CIM_HINT_HMA_MC(x) ((x) << S_CIM_HINT_HMA_MC)
+#define F_CIM_HINT_HMA_MC V_CIM_HINT_HMA_MC(1U)
+
+#define S_LE_HINT_HMA_MC 0
+#define V_LE_HINT_HMA_MC(x) ((x) << S_LE_HINT_HMA_MC)
+#define F_LE_HINT_HMA_MC V_LE_HINT_HMA_MC(1U)
+
+#define A_GCACHE_PERF_EN 0x51508
+
+#define S_PERF_CLEAR_GC1 3
+#define V_PERF_CLEAR_GC1(x) ((x) << S_PERF_CLEAR_GC1)
+#define F_PERF_CLEAR_GC1 V_PERF_CLEAR_GC1(1U)
+
+#define S_PERF_CLEAR_GC0 2
+#define V_PERF_CLEAR_GC0(x) ((x) << S_PERF_CLEAR_GC0)
+#define F_PERF_CLEAR_GC0 V_PERF_CLEAR_GC0(1U)
+
+#define S_PERF_EN_GC1 1
+#define V_PERF_EN_GC1(x) ((x) << S_PERF_EN_GC1)
+#define F_PERF_EN_GC1 V_PERF_EN_GC1(1U)
+
+#define S_PERF_EN_GC0 0
+#define V_PERF_EN_GC0(x) ((x) << S_PERF_EN_GC0)
+#define F_PERF_EN_GC0 V_PERF_EN_GC0(1U)
+
+#define A_GCACHE_PERF_GC0_RD_HIT 0x5150c
+#define A_GCACHE_PERF_GC1_RD_HIT 0x51510
+#define A_GCACHE_PERF_GC0_WR_HIT 0x51514
+#define A_GCACHE_PERF_GC1_WR_HIT 0x51518
+#define A_GCACHE_PERF_GC0_RD_MISS 0x5151c
+#define A_GCACHE_PERF_GC1_RD_MISS 0x51520
+#define A_GCACHE_PERF_GC0_WR_MISS 0x51524
+#define A_GCACHE_PERF_GC1_WR_MISS 0x51528
+#define A_GCACHE_PERF_GC0_RD_REQ 0x5152c
+#define A_GCACHE_PERF_GC1_RD_REQ 0x51530
+#define A_GCACHE_PERF_GC0_WR_REQ 0x51534
+#define A_GCACHE_PERF_GC1_WR_REQ 0x51538
+#define A_GCACHE_PAR_CAUSE 0x5153c
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE V_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE V_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_PAR_CAUSE 25
+#define V_GC1_WQDATA_FIFO_PERR_PAR_CAUSE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_PAR_CAUSE)
+#define F_GC1_WQDATA_FIFO_PERR_PAR_CAUSE V_GC1_WQDATA_FIFO_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_PAR_CAUSE 24
+#define V_GC0_WQDATA_FIFO_PERR_PAR_CAUSE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_PAR_CAUSE)
+#define F_GC0_WQDATA_FIFO_PERR_PAR_CAUSE V_GC0_WQDATA_FIFO_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE 23
+#define V_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE 22
+#define V_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_RSP_PERR_PAR_CAUSE 19
+#define V_GC1_RSP_PERR_PAR_CAUSE(x) ((x) << S_GC1_RSP_PERR_PAR_CAUSE)
+#define F_GC1_RSP_PERR_PAR_CAUSE V_GC1_RSP_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_RSP_PERR_PAR_CAUSE 18
+#define V_GC0_RSP_PERR_PAR_CAUSE(x) ((x) << S_GC0_RSP_PERR_PAR_CAUSE)
+#define F_GC0_RSP_PERR_PAR_CAUSE V_GC0_RSP_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_LRU_UERR_PAR_CAUSE 17
+#define V_GC1_LRU_UERR_PAR_CAUSE(x) ((x) << S_GC1_LRU_UERR_PAR_CAUSE)
+#define F_GC1_LRU_UERR_PAR_CAUSE V_GC1_LRU_UERR_PAR_CAUSE(1U)
+
+#define S_GC0_LRU_UERR_PAR_CAUSE 16
+#define V_GC0_LRU_UERR_PAR_CAUSE(x) ((x) << S_GC0_LRU_UERR_PAR_CAUSE)
+#define F_GC0_LRU_UERR_PAR_CAUSE V_GC0_LRU_UERR_PAR_CAUSE(1U)
+
+#define S_GC1_TAG_UERR_PAR_CAUSE 15
+#define V_GC1_TAG_UERR_PAR_CAUSE(x) ((x) << S_GC1_TAG_UERR_PAR_CAUSE)
+#define F_GC1_TAG_UERR_PAR_CAUSE V_GC1_TAG_UERR_PAR_CAUSE(1U)
+
+#define S_GC0_TAG_UERR_PAR_CAUSE 14
+#define V_GC0_TAG_UERR_PAR_CAUSE(x) ((x) << S_GC0_TAG_UERR_PAR_CAUSE)
+#define F_GC0_TAG_UERR_PAR_CAUSE V_GC0_TAG_UERR_PAR_CAUSE(1U)
+
+#define S_GC1_LRU_CERR_PAR_CAUSE 13
+#define V_GC1_LRU_CERR_PAR_CAUSE(x) ((x) << S_GC1_LRU_CERR_PAR_CAUSE)
+#define F_GC1_LRU_CERR_PAR_CAUSE V_GC1_LRU_CERR_PAR_CAUSE(1U)
+
+#define S_GC0_LRU_CERR_PAR_CAUSE 12
+#define V_GC0_LRU_CERR_PAR_CAUSE(x) ((x) << S_GC0_LRU_CERR_PAR_CAUSE)
+#define F_GC0_LRU_CERR_PAR_CAUSE V_GC0_LRU_CERR_PAR_CAUSE(1U)
+
+#define S_GC1_TAG_CERR_PAR_CAUSE 11
+#define V_GC1_TAG_CERR_PAR_CAUSE(x) ((x) << S_GC1_TAG_CERR_PAR_CAUSE)
+#define F_GC1_TAG_CERR_PAR_CAUSE V_GC1_TAG_CERR_PAR_CAUSE(1U)
+
+#define S_GC0_TAG_CERR_PAR_CAUSE 10
+#define V_GC0_TAG_CERR_PAR_CAUSE(x) ((x) << S_GC0_TAG_CERR_PAR_CAUSE)
+#define F_GC0_TAG_CERR_PAR_CAUSE V_GC0_TAG_CERR_PAR_CAUSE(1U)
+
+#define S_GC1_CE_PAR_CAUSE 9
+#define V_GC1_CE_PAR_CAUSE(x) ((x) << S_GC1_CE_PAR_CAUSE)
+#define F_GC1_CE_PAR_CAUSE V_GC1_CE_PAR_CAUSE(1U)
+
+#define S_GC0_CE_PAR_CAUSE 8
+#define V_GC0_CE_PAR_CAUSE(x) ((x) << S_GC0_CE_PAR_CAUSE)
+#define F_GC0_CE_PAR_CAUSE V_GC0_CE_PAR_CAUSE(1U)
+
+#define S_GC1_UE_PAR_CAUSE 7
+#define V_GC1_UE_PAR_CAUSE(x) ((x) << S_GC1_UE_PAR_CAUSE)
+#define F_GC1_UE_PAR_CAUSE V_GC1_UE_PAR_CAUSE(1U)
+
+#define S_GC0_UE_PAR_CAUSE 6
+#define V_GC0_UE_PAR_CAUSE(x) ((x) << S_GC0_UE_PAR_CAUSE)
+#define F_GC0_UE_PAR_CAUSE V_GC0_UE_PAR_CAUSE(1U)
+
+#define S_GC1_CMD_PAR_CAUSE 5
+#define V_GC1_CMD_PAR_CAUSE(x) ((x) << S_GC1_CMD_PAR_CAUSE)
+#define F_GC1_CMD_PAR_CAUSE V_GC1_CMD_PAR_CAUSE(1U)
+
+#define S_GC1_DATA_PAR_CAUSE 4
+#define V_GC1_DATA_PAR_CAUSE(x) ((x) << S_GC1_DATA_PAR_CAUSE)
+#define F_GC1_DATA_PAR_CAUSE V_GC1_DATA_PAR_CAUSE(1U)
+
+#define S_GC0_CMD_PAR_CAUSE 3
+#define V_GC0_CMD_PAR_CAUSE(x) ((x) << S_GC0_CMD_PAR_CAUSE)
+#define F_GC0_CMD_PAR_CAUSE V_GC0_CMD_PAR_CAUSE(1U)
+
+#define S_GC0_DATA_PAR_CAUSE 2
+#define V_GC0_DATA_PAR_CAUSE(x) ((x) << S_GC0_DATA_PAR_CAUSE)
+#define F_GC0_DATA_PAR_CAUSE V_GC0_DATA_PAR_CAUSE(1U)
+
+#define S_ILLADDRACCESS1_PAR_CAUSE 1
+#define V_ILLADDRACCESS1_PAR_CAUSE(x) ((x) << S_ILLADDRACCESS1_PAR_CAUSE)
+#define F_ILLADDRACCESS1_PAR_CAUSE V_ILLADDRACCESS1_PAR_CAUSE(1U)
+
+#define S_ILLADDRACCESS0_PAR_CAUSE 0
+#define V_ILLADDRACCESS0_PAR_CAUSE(x) ((x) << S_ILLADDRACCESS0_PAR_CAUSE)
+#define F_ILLADDRACCESS0_PAR_CAUSE V_ILLADDRACCESS0_PAR_CAUSE(1U)
+
+#define A_GCACHE_PAR_ENABLE 0x51540
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE V_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE V_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_PAR_ENABLE 25
+#define V_GC1_WQDATA_FIFO_PERR_PAR_ENABLE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_PAR_ENABLE)
+#define F_GC1_WQDATA_FIFO_PERR_PAR_ENABLE V_GC1_WQDATA_FIFO_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_PAR_ENABLE 24
+#define V_GC0_WQDATA_FIFO_PERR_PAR_ENABLE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_PAR_ENABLE)
+#define F_GC0_WQDATA_FIFO_PERR_PAR_ENABLE V_GC0_WQDATA_FIFO_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE 23
+#define V_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE 22
+#define V_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_RSP_PERR_PAR_ENABLE 19
+#define V_GC1_RSP_PERR_PAR_ENABLE(x) ((x) << S_GC1_RSP_PERR_PAR_ENABLE)
+#define F_GC1_RSP_PERR_PAR_ENABLE V_GC1_RSP_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_RSP_PERR_PAR_ENABLE 18
+#define V_GC0_RSP_PERR_PAR_ENABLE(x) ((x) << S_GC0_RSP_PERR_PAR_ENABLE)
+#define F_GC0_RSP_PERR_PAR_ENABLE V_GC0_RSP_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_LRU_UERR_PAR_ENABLE 17
+#define V_GC1_LRU_UERR_PAR_ENABLE(x) ((x) << S_GC1_LRU_UERR_PAR_ENABLE)
+#define F_GC1_LRU_UERR_PAR_ENABLE V_GC1_LRU_UERR_PAR_ENABLE(1U)
+
+#define S_GC0_LRU_UERR_PAR_ENABLE 16
+#define V_GC0_LRU_UERR_PAR_ENABLE(x) ((x) << S_GC0_LRU_UERR_PAR_ENABLE)
+#define F_GC0_LRU_UERR_PAR_ENABLE V_GC0_LRU_UERR_PAR_ENABLE(1U)
+
+#define S_GC1_TAG_UERR_PAR_ENABLE 15
+#define V_GC1_TAG_UERR_PAR_ENABLE(x) ((x) << S_GC1_TAG_UERR_PAR_ENABLE)
+#define F_GC1_TAG_UERR_PAR_ENABLE V_GC1_TAG_UERR_PAR_ENABLE(1U)
+
+#define S_GC0_TAG_UERR_PAR_ENABLE 14
+#define V_GC0_TAG_UERR_PAR_ENABLE(x) ((x) << S_GC0_TAG_UERR_PAR_ENABLE)
+#define F_GC0_TAG_UERR_PAR_ENABLE V_GC0_TAG_UERR_PAR_ENABLE(1U)
+
+#define S_GC1_LRU_CERR_PAR_ENABLE 13
+#define V_GC1_LRU_CERR_PAR_ENABLE(x) ((x) << S_GC1_LRU_CERR_PAR_ENABLE)
+#define F_GC1_LRU_CERR_PAR_ENABLE V_GC1_LRU_CERR_PAR_ENABLE(1U)
+
+#define S_GC0_LRU_CERR_PAR_ENABLE 12
+#define V_GC0_LRU_CERR_PAR_ENABLE(x) ((x) << S_GC0_LRU_CERR_PAR_ENABLE)
+#define F_GC0_LRU_CERR_PAR_ENABLE V_GC0_LRU_CERR_PAR_ENABLE(1U)
+
+#define S_GC1_TAG_CERR_PAR_ENABLE 11
+#define V_GC1_TAG_CERR_PAR_ENABLE(x) ((x) << S_GC1_TAG_CERR_PAR_ENABLE)
+#define F_GC1_TAG_CERR_PAR_ENABLE V_GC1_TAG_CERR_PAR_ENABLE(1U)
+
+#define S_GC0_TAG_CERR_PAR_ENABLE 10
+#define V_GC0_TAG_CERR_PAR_ENABLE(x) ((x) << S_GC0_TAG_CERR_PAR_ENABLE)
+#define F_GC0_TAG_CERR_PAR_ENABLE V_GC0_TAG_CERR_PAR_ENABLE(1U)
+
+#define S_GC1_CE_PAR_ENABLE 9
+#define V_GC1_CE_PAR_ENABLE(x) ((x) << S_GC1_CE_PAR_ENABLE)
+#define F_GC1_CE_PAR_ENABLE V_GC1_CE_PAR_ENABLE(1U)
+
+#define S_GC0_CE_PAR_ENABLE 8
+#define V_GC0_CE_PAR_ENABLE(x) ((x) << S_GC0_CE_PAR_ENABLE)
+#define F_GC0_CE_PAR_ENABLE V_GC0_CE_PAR_ENABLE(1U)
+
+#define S_GC1_UE_PAR_ENABLE 7
+#define V_GC1_UE_PAR_ENABLE(x) ((x) << S_GC1_UE_PAR_ENABLE)
+#define F_GC1_UE_PAR_ENABLE V_GC1_UE_PAR_ENABLE(1U)
+
+#define S_GC0_UE_PAR_ENABLE 6
+#define V_GC0_UE_PAR_ENABLE(x) ((x) << S_GC0_UE_PAR_ENABLE)
+#define F_GC0_UE_PAR_ENABLE V_GC0_UE_PAR_ENABLE(1U)
+
+#define S_GC1_CMD_PAR_ENABLE 5
+#define V_GC1_CMD_PAR_ENABLE(x) ((x) << S_GC1_CMD_PAR_ENABLE)
+#define F_GC1_CMD_PAR_ENABLE V_GC1_CMD_PAR_ENABLE(1U)
+
+#define S_GC1_DATA_PAR_ENABLE 4
+#define V_GC1_DATA_PAR_ENABLE(x) ((x) << S_GC1_DATA_PAR_ENABLE)
+#define F_GC1_DATA_PAR_ENABLE V_GC1_DATA_PAR_ENABLE(1U)
+
+#define S_GC0_CMD_PAR_ENABLE 3
+#define V_GC0_CMD_PAR_ENABLE(x) ((x) << S_GC0_CMD_PAR_ENABLE)
+#define F_GC0_CMD_PAR_ENABLE V_GC0_CMD_PAR_ENABLE(1U)
+
+#define S_GC0_DATA_PAR_ENABLE 2
+#define V_GC0_DATA_PAR_ENABLE(x) ((x) << S_GC0_DATA_PAR_ENABLE)
+#define F_GC0_DATA_PAR_ENABLE V_GC0_DATA_PAR_ENABLE(1U)
+
+#define S_ILLADDRACCESS1_PAR_ENABLE 1
+#define V_ILLADDRACCESS1_PAR_ENABLE(x) ((x) << S_ILLADDRACCESS1_PAR_ENABLE)
+#define F_ILLADDRACCESS1_PAR_ENABLE V_ILLADDRACCESS1_PAR_ENABLE(1U)
+
+#define S_ILLADDRACCESS0_PAR_ENABLE 0
+#define V_ILLADDRACCESS0_PAR_ENABLE(x) ((x) << S_ILLADDRACCESS0_PAR_ENABLE)
+#define F_ILLADDRACCESS0_PAR_ENABLE V_ILLADDRACCESS0_PAR_ENABLE(1U)
+
+#define A_GCACHE_INT_ENABLE 0x51544
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE V_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE V_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_INT_ENABLE 25
+#define V_GC1_WQDATA_FIFO_PERR_INT_ENABLE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_INT_ENABLE)
+#define F_GC1_WQDATA_FIFO_PERR_INT_ENABLE V_GC1_WQDATA_FIFO_PERR_INT_ENABLE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_INT_ENABLE 24
+#define V_GC0_WQDATA_FIFO_PERR_INT_ENABLE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_INT_ENABLE)
+#define F_GC0_WQDATA_FIFO_PERR_INT_ENABLE V_GC0_WQDATA_FIFO_PERR_INT_ENABLE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_INT_ENABLE 23
+#define V_GC1_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC1_RDTAG_QUEUE_PERR_INT_ENABLE V_GC1_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_INT_ENABLE 22
+#define V_GC0_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC0_RDTAG_QUEUE_PERR_INT_ENABLE V_GC0_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC1_RSP_PERR_INT_ENABLE 19
+#define V_GC1_RSP_PERR_INT_ENABLE(x) ((x) << S_GC1_RSP_PERR_INT_ENABLE)
+#define F_GC1_RSP_PERR_INT_ENABLE V_GC1_RSP_PERR_INT_ENABLE(1U)
+
+#define S_GC0_RSP_PERR_INT_ENABLE 18
+#define V_GC0_RSP_PERR_INT_ENABLE(x) ((x) << S_GC0_RSP_PERR_INT_ENABLE)
+#define F_GC0_RSP_PERR_INT_ENABLE V_GC0_RSP_PERR_INT_ENABLE(1U)
+
+#define S_GC1_LRU_UERR_INT_ENABLE 17
+#define V_GC1_LRU_UERR_INT_ENABLE(x) ((x) << S_GC1_LRU_UERR_INT_ENABLE)
+#define F_GC1_LRU_UERR_INT_ENABLE V_GC1_LRU_UERR_INT_ENABLE(1U)
+
+#define S_GC0_LRU_UERR_INT_ENABLE 16
+#define V_GC0_LRU_UERR_INT_ENABLE(x) ((x) << S_GC0_LRU_UERR_INT_ENABLE)
+#define F_GC0_LRU_UERR_INT_ENABLE V_GC0_LRU_UERR_INT_ENABLE(1U)
+
+#define S_GC1_TAG_UERR_INT_ENABLE 15
+#define V_GC1_TAG_UERR_INT_ENABLE(x) ((x) << S_GC1_TAG_UERR_INT_ENABLE)
+#define F_GC1_TAG_UERR_INT_ENABLE V_GC1_TAG_UERR_INT_ENABLE(1U)
+
+#define S_GC0_TAG_UERR_INT_ENABLE 14
+#define V_GC0_TAG_UERR_INT_ENABLE(x) ((x) << S_GC0_TAG_UERR_INT_ENABLE)
+#define F_GC0_TAG_UERR_INT_ENABLE V_GC0_TAG_UERR_INT_ENABLE(1U)
+
+#define S_GC1_LRU_CERR_INT_ENABLE 13
+#define V_GC1_LRU_CERR_INT_ENABLE(x) ((x) << S_GC1_LRU_CERR_INT_ENABLE)
+#define F_GC1_LRU_CERR_INT_ENABLE V_GC1_LRU_CERR_INT_ENABLE(1U)
+
+#define S_GC0_LRU_CERR_INT_ENABLE 12
+#define V_GC0_LRU_CERR_INT_ENABLE(x) ((x) << S_GC0_LRU_CERR_INT_ENABLE)
+#define F_GC0_LRU_CERR_INT_ENABLE V_GC0_LRU_CERR_INT_ENABLE(1U)
+
+#define S_GC1_TAG_CERR_INT_ENABLE 11
+#define V_GC1_TAG_CERR_INT_ENABLE(x) ((x) << S_GC1_TAG_CERR_INT_ENABLE)
+#define F_GC1_TAG_CERR_INT_ENABLE V_GC1_TAG_CERR_INT_ENABLE(1U)
+
+#define S_GC0_TAG_CERR_INT_ENABLE 10
+#define V_GC0_TAG_CERR_INT_ENABLE(x) ((x) << S_GC0_TAG_CERR_INT_ENABLE)
+#define F_GC0_TAG_CERR_INT_ENABLE V_GC0_TAG_CERR_INT_ENABLE(1U)
+
+#define S_GC1_CE_INT_ENABLE 9
+#define V_GC1_CE_INT_ENABLE(x) ((x) << S_GC1_CE_INT_ENABLE)
+#define F_GC1_CE_INT_ENABLE V_GC1_CE_INT_ENABLE(1U)
+
+#define S_GC0_CE_INT_ENABLE 8
+#define V_GC0_CE_INT_ENABLE(x) ((x) << S_GC0_CE_INT_ENABLE)
+#define F_GC0_CE_INT_ENABLE V_GC0_CE_INT_ENABLE(1U)
+
+#define S_GC1_UE_INT_ENABLE 7
+#define V_GC1_UE_INT_ENABLE(x) ((x) << S_GC1_UE_INT_ENABLE)
+#define F_GC1_UE_INT_ENABLE V_GC1_UE_INT_ENABLE(1U)
+
+#define S_GC0_UE_INT_ENABLE 6
+#define V_GC0_UE_INT_ENABLE(x) ((x) << S_GC0_UE_INT_ENABLE)
+#define F_GC0_UE_INT_ENABLE V_GC0_UE_INT_ENABLE(1U)
+
+#define S_GC1_CMD_PAR_INT_ENABLE 5
+#define V_GC1_CMD_PAR_INT_ENABLE(x) ((x) << S_GC1_CMD_PAR_INT_ENABLE)
+#define F_GC1_CMD_PAR_INT_ENABLE V_GC1_CMD_PAR_INT_ENABLE(1U)
+
+#define S_GC1_DATA_PAR_INT_ENABLE 4
+#define V_GC1_DATA_PAR_INT_ENABLE(x) ((x) << S_GC1_DATA_PAR_INT_ENABLE)
+#define F_GC1_DATA_PAR_INT_ENABLE V_GC1_DATA_PAR_INT_ENABLE(1U)
+
+#define S_GC0_CMD_PAR_INT_ENABLE 3
+#define V_GC0_CMD_PAR_INT_ENABLE(x) ((x) << S_GC0_CMD_PAR_INT_ENABLE)
+#define F_GC0_CMD_PAR_INT_ENABLE V_GC0_CMD_PAR_INT_ENABLE(1U)
+
+#define S_GC0_DATA_PAR_INT_ENABLE 2
+#define V_GC0_DATA_PAR_INT_ENABLE(x) ((x) << S_GC0_DATA_PAR_INT_ENABLE)
+#define F_GC0_DATA_PAR_INT_ENABLE V_GC0_DATA_PAR_INT_ENABLE(1U)
+
+#define S_ILLADDRACCESS1_INT_ENABLE 1
+#define V_ILLADDRACCESS1_INT_ENABLE(x) ((x) << S_ILLADDRACCESS1_INT_ENABLE)
+#define F_ILLADDRACCESS1_INT_ENABLE V_ILLADDRACCESS1_INT_ENABLE(1U)
+
+#define S_ILLADDRACCESS0_INT_ENABLE 0
+#define V_ILLADDRACCESS0_INT_ENABLE(x) ((x) << S_ILLADDRACCESS0_INT_ENABLE)
+#define F_ILLADDRACCESS0_INT_ENABLE V_ILLADDRACCESS0_INT_ENABLE(1U)
+
+#define A_GCACHE_INT_CAUSE 0x51548
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE V_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE V_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_INT_CAUSE 25
+#define V_GC1_WQDATA_FIFO_PERR_INT_CAUSE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_INT_CAUSE)
+#define F_GC1_WQDATA_FIFO_PERR_INT_CAUSE V_GC1_WQDATA_FIFO_PERR_INT_CAUSE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_INT_CAUSE 24
+#define V_GC0_WQDATA_FIFO_PERR_INT_CAUSE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_INT_CAUSE)
+#define F_GC0_WQDATA_FIFO_PERR_INT_CAUSE V_GC0_WQDATA_FIFO_PERR_INT_CAUSE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_INT_CAUSE 23
+#define V_GC1_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC1_RDTAG_QUEUE_PERR_INT_CAUSE V_GC1_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_INT_CAUSE 22
+#define V_GC0_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC0_RDTAG_QUEUE_PERR_INT_CAUSE V_GC0_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC1_RSP_PERR_INT_CAUSE 19
+#define V_GC1_RSP_PERR_INT_CAUSE(x) ((x) << S_GC1_RSP_PERR_INT_CAUSE)
+#define F_GC1_RSP_PERR_INT_CAUSE V_GC1_RSP_PERR_INT_CAUSE(1U)
+
+#define S_GC0_RSP_PERR_INT_CAUSE 18
+#define V_GC0_RSP_PERR_INT_CAUSE(x) ((x) << S_GC0_RSP_PERR_INT_CAUSE)
+#define F_GC0_RSP_PERR_INT_CAUSE V_GC0_RSP_PERR_INT_CAUSE(1U)
+
+#define S_GC1_LRU_UERR_INT_CAUSE 17
+#define V_GC1_LRU_UERR_INT_CAUSE(x) ((x) << S_GC1_LRU_UERR_INT_CAUSE)
+#define F_GC1_LRU_UERR_INT_CAUSE V_GC1_LRU_UERR_INT_CAUSE(1U)
+
+#define S_GC0_LRU_UERR_INT_CAUSE 16
+#define V_GC0_LRU_UERR_INT_CAUSE(x) ((x) << S_GC0_LRU_UERR_INT_CAUSE)
+#define F_GC0_LRU_UERR_INT_CAUSE V_GC0_LRU_UERR_INT_CAUSE(1U)
+
+#define S_GC1_TAG_UERR_INT_CAUSE 15
+#define V_GC1_TAG_UERR_INT_CAUSE(x) ((x) << S_GC1_TAG_UERR_INT_CAUSE)
+#define F_GC1_TAG_UERR_INT_CAUSE V_GC1_TAG_UERR_INT_CAUSE(1U)
+
+#define S_GC0_TAG_UERR_INT_CAUSE 14
+#define V_GC0_TAG_UERR_INT_CAUSE(x) ((x) << S_GC0_TAG_UERR_INT_CAUSE)
+#define F_GC0_TAG_UERR_INT_CAUSE V_GC0_TAG_UERR_INT_CAUSE(1U)
+
+#define S_GC1_LRU_CERR_INT_CAUSE 13
+#define V_GC1_LRU_CERR_INT_CAUSE(x) ((x) << S_GC1_LRU_CERR_INT_CAUSE)
+#define F_GC1_LRU_CERR_INT_CAUSE V_GC1_LRU_CERR_INT_CAUSE(1U)
+
+#define S_GC0_LRU_CERR_INT_CAUSE 12
+#define V_GC0_LRU_CERR_INT_CAUSE(x) ((x) << S_GC0_LRU_CERR_INT_CAUSE)
+#define F_GC0_LRU_CERR_INT_CAUSE V_GC0_LRU_CERR_INT_CAUSE(1U)
+
+#define S_GC1_TAG_CERR_INT_CAUSE 11
+#define V_GC1_TAG_CERR_INT_CAUSE(x) ((x) << S_GC1_TAG_CERR_INT_CAUSE)
+#define F_GC1_TAG_CERR_INT_CAUSE V_GC1_TAG_CERR_INT_CAUSE(1U)
+
+#define S_GC0_TAG_CERR_INT_CAUSE 10
+#define V_GC0_TAG_CERR_INT_CAUSE(x) ((x) << S_GC0_TAG_CERR_INT_CAUSE)
+#define F_GC0_TAG_CERR_INT_CAUSE V_GC0_TAG_CERR_INT_CAUSE(1U)
+
+#define S_GC1_CE_INT_CAUSE 9
+#define V_GC1_CE_INT_CAUSE(x) ((x) << S_GC1_CE_INT_CAUSE)
+#define F_GC1_CE_INT_CAUSE V_GC1_CE_INT_CAUSE(1U)
+
+#define S_GC0_CE_INT_CAUSE 8
+#define V_GC0_CE_INT_CAUSE(x) ((x) << S_GC0_CE_INT_CAUSE)
+#define F_GC0_CE_INT_CAUSE V_GC0_CE_INT_CAUSE(1U)
+
+#define S_GC1_UE_INT_CAUSE 7
+#define V_GC1_UE_INT_CAUSE(x) ((x) << S_GC1_UE_INT_CAUSE)
+#define F_GC1_UE_INT_CAUSE V_GC1_UE_INT_CAUSE(1U)
+
+#define S_GC0_UE_INT_CAUSE 6
+#define V_GC0_UE_INT_CAUSE(x) ((x) << S_GC0_UE_INT_CAUSE)
+#define F_GC0_UE_INT_CAUSE V_GC0_UE_INT_CAUSE(1U)
+
+#define S_GC1_CMD_PAR_INT_CAUSE 5
+#define V_GC1_CMD_PAR_INT_CAUSE(x) ((x) << S_GC1_CMD_PAR_INT_CAUSE)
+#define F_GC1_CMD_PAR_INT_CAUSE V_GC1_CMD_PAR_INT_CAUSE(1U)
+
+#define S_GC1_DATA_PAR_INT_CAUSE 4
+#define V_GC1_DATA_PAR_INT_CAUSE(x) ((x) << S_GC1_DATA_PAR_INT_CAUSE)
+#define F_GC1_DATA_PAR_INT_CAUSE V_GC1_DATA_PAR_INT_CAUSE(1U)
+
+#define S_GC0_CMD_PAR_INT_CAUSE 3
+#define V_GC0_CMD_PAR_INT_CAUSE(x) ((x) << S_GC0_CMD_PAR_INT_CAUSE)
+#define F_GC0_CMD_PAR_INT_CAUSE V_GC0_CMD_PAR_INT_CAUSE(1U)
+
+#define S_GC0_DATA_PAR_INT_CAUSE 2
+#define V_GC0_DATA_PAR_INT_CAUSE(x) ((x) << S_GC0_DATA_PAR_INT_CAUSE)
+#define F_GC0_DATA_PAR_INT_CAUSE V_GC0_DATA_PAR_INT_CAUSE(1U)
+
+#define S_ILLADDRACCESS1_INT_CAUSE 1
+#define V_ILLADDRACCESS1_INT_CAUSE(x) ((x) << S_ILLADDRACCESS1_INT_CAUSE)
+#define F_ILLADDRACCESS1_INT_CAUSE V_ILLADDRACCESS1_INT_CAUSE(1U)
+
+#define S_ILLADDRACCESS0_INT_CAUSE 0
+#define V_ILLADDRACCESS0_INT_CAUSE(x) ((x) << S_ILLADDRACCESS0_INT_CAUSE)
+#define F_ILLADDRACCESS0_INT_CAUSE V_ILLADDRACCESS0_INT_CAUSE(1U)
+
+#define A_GCACHE_DBG_SEL_CTRL 0x51550
+
+#define S_DBG_SEL_CTRLSEL_OVR_EN 31
+#define V_DBG_SEL_CTRLSEL_OVR_EN(x) ((x) << S_DBG_SEL_CTRLSEL_OVR_EN)
+#define F_DBG_SEL_CTRLSEL_OVR_EN V_DBG_SEL_CTRLSEL_OVR_EN(1U)
+
+#define S_T7_DEBUG_HI 16
+#define V_T7_DEBUG_HI(x) ((x) << S_T7_DEBUG_HI)
+#define F_T7_DEBUG_HI V_T7_DEBUG_HI(1U)
+
+#define S_DBG_SEL_CTRLSELH 8
+#define M_DBG_SEL_CTRLSELH 0xffU
+#define V_DBG_SEL_CTRLSELH(x) ((x) << S_DBG_SEL_CTRLSELH)
+#define G_DBG_SEL_CTRLSELH(x) (((x) >> S_DBG_SEL_CTRLSELH) & M_DBG_SEL_CTRLSELH)
+
+#define S_DBG_SEL_CTRLSELL 0
+#define M_DBG_SEL_CTRLSELL 0xffU
+#define V_DBG_SEL_CTRLSELL(x) ((x) << S_DBG_SEL_CTRLSELL)
+#define G_DBG_SEL_CTRLSELL(x) (((x) >> S_DBG_SEL_CTRLSELL) & M_DBG_SEL_CTRLSELL)
+
+#define A_GCACHE_LOCAL_DEBUG_RPT 0x51554
+#define A_GCACHE_DBG_ILL_ACC 0x5155c
+#define A_GCACHE_DBG_ILL_ADDR0 0x51560
+#define A_GCACHE_DBG_ILL_ADDR1 0x51564
+#define A_GCACHE_GC0_DBG_ADDR_0_32 0x51568
+#define A_GCACHE_GC0_DBG_ADDR_32_32 0x5156c
+#define A_GCACHE_GC0_DBG_ADDR_64_32 0x51570
+#define A_GCACHE_GC0_DBG_ADDR_96_32 0x51574
+#define A_GCACHE_GC0_DBG_ADDR_0_64 0x51578
+#define A_GCACHE_GC0_DBG_ADDR_64_64 0x5157c
+#define A_GCACHE_GC0_DBG_ADDR_0_96 0x51580
+#define A_GCACHE_GC0_DBG_ADDR_32_96 0x51584
+#define A_GCACHE_GC1_DBG_ADDR_0_32 0x5158c
+#define A_GCACHE_GC1_DBG_ADDR_32_32 0x51590
+#define A_GCACHE_GC1_DBG_ADDR_64_32 0x51594
+#define A_GCACHE_GC1_DBG_ADDR_96_32 0x51598
+#define A_GCACHE_GC1_DBG_ADDR_0_64 0x5159c
+#define A_GCACHE_GC1_DBG_ADDR_64_64 0x515a0
+#define A_GCACHE_GC1_DBG_ADDR_0_96 0x515a4
+#define A_GCACHE_GC1_DBG_ADDR_32_96 0x515a8
+#define A_GCACHE_GC0_DBG_ADDR_32_64 0x515ac
+#define A_GCACHE_GC1_DBG_ADDR_32_64 0x515b0
+#define A_GCACHE_PERF_GC0_EVICT 0x515b4
+#define A_GCACHE_PERF_GC1_EVICT 0x515b8
+#define A_GCACHE_PERF_GC0_CE_COUNT 0x515bc
+#define A_GCACHE_PERF_GC1_CE_COUNT 0x515c0
+#define A_GCACHE_PERF_GC0_UE_COUNT 0x515c4
+#define A_GCACHE_PERF_GC1_UE_COUNT 0x515c8
+#define A_GCACHE_DBG_CTL 0x515f0
+#define A_GCACHE_DBG_DATA 0x515f4
diff --git a/sys/dev/cxgbe/common/t4_regs_values.h b/sys/dev/cxgbe/common/t4_regs_values.h
index 830828097802..6485fa50bd08 100644
--- a/sys/dev/cxgbe/common/t4_regs_values.h
+++ b/sys/dev/cxgbe/common/t4_regs_values.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -269,6 +268,7 @@
#define X_WINDOW_SHIFT 10
#define X_PCIEOFST_SHIFT 10
+#define X_T7_MEMOFST_SHIFT 4
/*
* TP definitions.
@@ -284,6 +284,10 @@
#define S_FT_FIRST S_FCOE
#define S_FT_LAST S_FRAGMENTATION
+#define S_T7_FT_FIRST S_IPSECIDX
+#define S_T7_FT_LAST S_TCPFLAGS
+
+#define W_FT_IPSECIDX 12
#define W_FT_FCOE 1
#define W_FT_PORT 3
#define W_FT_VNIC_ID 17
@@ -294,17 +298,9 @@
#define W_FT_MACMATCH 9
#define W_FT_MPSHITTYPE 3
#define W_FT_FRAGMENTATION 1
-
-#define M_FT_FCOE ((1ULL << W_FT_FCOE) - 1)
-#define M_FT_PORT ((1ULL << W_FT_PORT) - 1)
-#define M_FT_VNIC_ID ((1ULL << W_FT_VNIC_ID) - 1)
-#define M_FT_VLAN ((1ULL << W_FT_VLAN) - 1)
-#define M_FT_TOS ((1ULL << W_FT_TOS) - 1)
-#define M_FT_PROTOCOL ((1ULL << W_FT_PROTOCOL) - 1)
-#define M_FT_ETHERTYPE ((1ULL << W_FT_ETHERTYPE) - 1)
-#define M_FT_MACMATCH ((1ULL << W_FT_MACMATCH) - 1)
-#define M_FT_MPSHITTYPE ((1ULL << W_FT_MPSHITTYPE) - 1)
-#define M_FT_FRAGMENTATION ((1ULL << W_FT_FRAGMENTATION) - 1)
+#define W_FT_ROCE 1
+#define W_FT_SYNONLY 1
+#define W_FT_TCPFLAGS 12
/*
* Some of the Compressed Filter Tuple fields have internal structure. These
@@ -327,6 +323,6 @@
#define S_FT_VNID_ID_VLD 16
#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
-#define F_FT_VNID_ID_VLD(x) V_FT_VNID_ID_VLD(1U)
+#define F_FT_VNID_ID_VLD V_FT_VNID_ID_VLD(1U)
#endif /* __T4_REGS_VALUES_H__ */
diff --git a/sys/dev/cxgbe/common/t4_tcb.h b/sys/dev/cxgbe/common/t4_tcb.h
index f9631ba58418..8bff15f04e7a 100644
--- a/sys/dev/cxgbe/common/t4_tcb.h
+++ b/sys/dev/cxgbe/common/t4_tcb.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -340,10 +339,9 @@
/* 1023:1020 */
#define W_TCB_ULP_EXT 31
-#define S_TCP_ULP_EXT 28
+#define S_TCB_ULP_EXT 28
#define M_TCB_ULP_EXT 0xfULL
-#define V_TCB_ULP_EXT(x) ((x) << S_TCP_ULP_EXT)
-
+#define V_TCB_ULP_EXT(x) ((x) << S_TCB_ULP_EXT)
/* 840:832 */
#define W_TCB_IRS_ULP 26
@@ -495,31 +493,31 @@
#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
-/* 855:832 */
+/* 855:832 */
#define W_TCB_RX_TLS_BUF_OFFSET 26
#define S_TCB_RX_TLS_BUF_OFFSET 0
#define M_TCB_RX_TLS_BUF_OFFSET 0xffffffULL
#define V_TCB_RX_TLS_BUF_OFFSET(x) ((x) << S_TCB_RX_TLS_BUF_OFFSET)
-/* 876:856 */
+/* 879:856 */
#define W_TCB_RX_TLS_BUF_LEN 26
#define S_TCB_RX_TLS_BUF_LEN 24
#define M_TCB_RX_TLS_BUF_LEN 0xffffffULL
#define V_TCB_RX_TLS_BUF_LEN(x) ((__u64)(x) << S_TCB_RX_TLS_BUF_LEN)
-/* 895:880 */
-#define W_TCB_RX_TLS_FLAGS 26
-#define S_TCB_RX_TLS_FLAGS 48
+/* 895:880 */
+#define W_TCB_RX_TLS_FLAGS 27
+#define S_TCB_RX_TLS_FLAGS 16
#define M_TCB_RX_TLS_FLAGS 0xffffULL
#define V_TCB_RX_TLS_FLAGS(x) ((__u64)(x) << S_TCB_RX_TLS_FLAGS)
-/* 959:896 */
-#define W_TCB_TLS_SEQ 28
-#define S_TCB_TLS_SEQ 0
-#define M_TCB_TLS_SEQ 0xffffffffffffffffULL
-#define V_TCB_TLS_SEQ(x) ((__u64)(x) << S_TCB_TLS_SEQ)
+/* 959:896 */
+#define W_TCB_RX_TLS_SEQ 28
+#define S_TCB_RX_TLS_SEQ 0
+#define M_TCB_RX_TLS_SEQ 0xffffffffffffffffULL
+#define V_TCB_RX_TLS_SEQ(x) ((__u64)(x) << S_TCB_RX_TLS_SEQ)
-/* 991:960 */
+/* 991:960 */
#define W_TCB_RX_TLS_BUF_TAG 30
#define S_TCB_RX_TLS_BUF_TAG 0
#define M_TCB_RX_TLS_BUF_TAG 0xffffffffULL
@@ -531,17 +529,113 @@
#define M_TCB_RX_TLS_KEY_TAG 0xffffffffULL
#define V_TCB_RX_TLS_KEY_TAG(x) ((x) << S_TCB_RX_TLS_KEY_TAG)
+#define S_TF_TLS_ENABLE 0
+#define V_TF_TLS_ENABLE(x) ((x) << S_TF_TLS_ENABLE)
+
+#define S_TF_TLS_ACTIVE 1
+#define V_TF_TLS_ACTIVE(x) ((x) << S_TF_TLS_ACTIVE)
+
+#define S_TF_TLS_CONTROL 2
+#define V_TF_TLS_CONTROL(x) ((x) << S_TF_TLS_CONTROL)
+
#define S_TF_TLS_KEY_SIZE 7
#define V_TF_TLS_KEY_SIZE(x) ((x) << S_TF_TLS_KEY_SIZE)
-#define S_TF_TLS_CONTROL 2
-#define V_TF_TLS_CONTROL(x) ((x) << S_TF_TLS_CONTROL)
+/* 853:832 */
+#define W_TCB_TPT_OFFSET 26
+#define S_TCB_TPT_OFFSET 0
+#define M_TCB_TPT_OFFSET 0x3fffffULL
+#define V_TCB_TPT_OFFSET(x) ((x) << S_TCB_TPT_OFFSET)
+
+/* 863:854 */
+#define W_TCB_T10_CONFIG 26
+#define S_TCB_T10_CONFIG 22
+#define M_TCB_T10_CONFIG 0x3ffULL
+#define V_TCB_T10_CONFIG(x) ((x) << S_TCB_T10_CONFIG)
+
+/* 871:864 */
+#define W_TCB_PDU_HLEN 27
+#define S_TCB_PDU_HLEN 0
+#define M_TCB_PDU_HLEN 0xffULL
+#define V_TCB_PDU_HLEN(x) ((x) << S_TCB_PDU_HLEN)
+
+/* 879:872 */
+#define W_TCB_PDU_PDO 27
+#define S_TCB_PDU_PDO 8
+#define M_TCB_PDU_PDO 0xffULL
+#define V_TCB_PDU_PDO(x) ((x) << S_TCB_PDU_PDO)
-#define S_TF_TLS_ACTIVE 1
-#define V_TF_TLS_ACTIVE(x) ((x) << S_TF_TLS_ACTIVE)
+/* 895:880 */
+#define W_TCB_N_CQ_IDX_RQ 27
+#define S_TCB_N_CQ_IDX_RQ 16
+#define M_TCB_N_CQ_IDX_RQ 0xffffULL
+#define V_TCB_N_CQ_IDX_RQ(x) ((x) << S_TCB_N_CQ_IDX_RQ)
+
+/* 900:896 */
+#define W_TCB_NVMT_PDA 28
+#define S_TCB_NVMT_PDA 0
+#define M_TCB_NVMT_PDA 0x1fULL
+#define V_TCB_NVMT_PDA(x) ((x) << S_TCB_NVMT_PDA)
+
+/* 911:901 */
+#define W_TCB_RSVD 28
+#define S_TCB_RSVD 5
+#define M_TCB_RSVD 0x7ffULL
+#define V_TCB_RSVD(x) ((x) << S_TCB_RSVD)
-#define S_TF_TLS_ENABLE 0
-#define V_TF_TLS_ENABLE(x) ((x) << S_TF_TLS_ENABLE)
+/* 927:912 */
+#define W_TCB_N_PD_ID 28
+#define S_TCB_N_PD_ID 16
+#define M_TCB_N_PD_ID 0xffffULL
+#define V_TCB_N_PD_ID(x) ((x) << S_TCB_N_PD_ID)
+
+/* 929:928 */
+#define W_TCB_CMP_IMM_SZ 29
+#define S_TCB_CMP_IMM_SZ 0
+#define M_TCB_CMP_IMM_SZ 0x3ULL
+#define V_TCB_CMP_IMM_SZ(x) ((x) << S_TCB_CMP_IMM_SZ)
+
+/* 931:930 */
+#define W_TCB_PDU_DGST_FLAGS 29
+#define S_TCB_PDU_DGST_FLAGS 2
+#define M_TCB_PDU_DGST_FLAGS 0x3ULL
+#define V_TCB_PDU_DGST_FLAGS(x) ((x) << S_TCB_PDU_DGST_FLAGS)
+
+/* 959:932 */
+#define W_TCB_RSVD1 29
+#define S_TCB_RSVD1 4
+#define M_TCB_RSVD1 0xfffffffULL
+#define V_TCB_RSVD1(x) ((x) << S_TCB_RSVD1)
+
+/* 985:960 */
+#define W_TCB_N_RQ_START 30
+#define S_TCB_N_RQ_START 0
+#define M_TCB_N_RQ_START 0x3ffffffULL
+#define V_TCB_N_RQ_START(x) ((x) << S_TCB_N_RQ_START)
+
+/* 998:986 */
+#define W_TCB_N_RQ_MSN 30
+#define S_TCB_N_RQ_MSN 26
+#define M_TCB_N_RQ_MSN 0x1fffULL
+#define V_TCB_N_RQ_MSN(x) ((__u64)(x) << S_TCB_N_RQ_MSN)
+
+/* 1002:999 */
+#define W_TCB_N_RQ_MAX_OFFSET 31
+#define S_TCB_N_RQ_MAX_OFFSET 7
+#define M_TCB_N_RQ_MAX_OFFSET 0xfULL
+#define V_TCB_N_RQ_MAX_OFFSET(x) ((x) << S_TCB_N_RQ_MAX_OFFSET)
+
+/* 1015:1003 */
+#define W_TCB_N_RQ_WRITE_PTR 31
+#define S_TCB_N_RQ_WRITE_PTR 11
+#define M_TCB_N_RQ_WRITE_PTR 0x1fffULL
+#define V_TCB_N_RQ_WRITE_PTR(x) ((x) << S_TCB_N_RQ_WRITE_PTR)
+
+/* 1023:1016 */
+#define W_TCB_N_PDU_TYPE 31
+#define S_TCB_N_PDU_TYPE 24
+#define M_TCB_N_PDU_TYPE 0xffULL
+#define V_TCB_N_PDU_TYPE(x) ((x) << S_TCB_N_PDU_TYPE)
#define S_TF_MIGRATING 0
#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
@@ -549,15 +643,24 @@
#define S_TF_NON_OFFLOAD 1
#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
+#define S_TF_FILTER 1
+#define V_TF_FILTER(x) ((x) << S_TF_FILTER)
+
#define S_TF_LOCK_TID 2
#define V_TF_LOCK_TID(x) ((x) << S_TF_LOCK_TID)
#define S_TF_KEEPALIVE 3
#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
+#define S_TF_DROP_ENCAPS_HDR 3
+#define V_TF_DROP_ENCAPS_HDR(x) ((x) << S_TF_DROP_ENCAPS_HDR)
+
#define S_TF_DACK 4
#define V_TF_DACK(x) ((x) << S_TF_DACK)
+#define S_TF_COUNT_HITS 4
+#define V_TF_COUNT_HITS(x) ((x) << S_TF_COUNT_HITS)
+
#define S_TF_DACK_MSS 5
#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
@@ -567,6 +670,9 @@
#define S_TF_NAGLE 7
#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
+#define S_TF_REMOVE_VLAN 7
+#define V_TF_REMOVE_VLAN(x) ((x) << S_TF_REMOVE_VLAN)
+
#define S_TF_SSWS_DISABLED 8
#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
@@ -576,15 +682,24 @@
#define S_TF_RX_FLOW_CONTROL_DISABLE 10
#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
+#define S_TF_NAT_SEQ_CHECK 10
+#define V_TF_NAT_SEQ_CHECK(x) ((x) << S_TF_NAT_SEQ_CHECK)
+
#define S_TF_RX_CHANNEL 11
#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
#define S_TF_TX_CHANNEL0 12
#define V_TF_TX_CHANNEL0(x) ((x) << S_TF_TX_CHANNEL0)
+#define S_TF_LPBK_TX_CHANNEL0 12
+#define V_TF_LPBK_TX_CHANNEL0(x) ((x) << S_TF_LPBK_TX_CHANNEL0)
+
#define S_TF_TX_CHANNEL1 13
#define V_TF_TX_CHANNEL1(x) ((x) << S_TF_TX_CHANNEL1)
+#define S_TF_LPBK_TX_CHANNEL1 13
+#define V_TF_LPBK_TX_CHANNEL1(x) ((x) << S_TF_LPBK_TX_CHANNEL1)
+
#define S_TF_TX_QUIESCE 14
#define V_TF_TX_QUIESCE(x) ((x) << S_TF_TX_QUIESCE)
@@ -607,6 +722,10 @@
#define M_TF_TX_QUEUE 0x7ULL
#define V_TF_TX_QUEUE(x) ((x) << S_TF_TX_QUEUE)
+#define S_TF_NAT_MODE 18
+#define M_TF_NAT_MODE 0x7ULL
+#define V_TF_NAT_MODE(x) ((x) << S_TF_NAT_MODE)
+
#define S_TF_TURBO 21
#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
@@ -652,8 +771,8 @@
#define S_TF_RCV_COALESCE_HEARTBEAT 32
#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((__u64)(x) << S_TF_RCV_COALESCE_HEARTBEAT)
-#define S_TF_INIT 33
-#define V_TF_INIT(x) ((__u64)(x) << S_TF_INIT)
+#define S_TF_RSS_FW 33
+#define V_TF_RSS_FW(x) ((__u64)(x) << S_TF_RSS_FW)
#define S_TF_ACTIVE_OPEN 34
#define V_TF_ACTIVE_OPEN(x) ((__u64)(x) << S_TF_ACTIVE_OPEN)
@@ -712,12 +831,21 @@
#define S_TF_RECV_SCALE 52
#define V_TF_RECV_SCALE(x) ((__u64)(x) << S_TF_RECV_SCALE)
+#define S_TF_NAT_FLAG_CHECK 52
+#define V_TF_NAT_FLAG_CHECK(x) ((__u64)(x) << S_TF_NAT_FLAG_CHECK)
+
#define S_TF_RECV_TSTMP 53
#define V_TF_RECV_TSTMP(x) ((__u64)(x) << S_TF_RECV_TSTMP)
+#define S_TF_LPBK_TX_LPBK 53
+#define V_TF_LPBK_TX_LPBK(x) ((__u64)(x) << S_TF_LPBK_TX_LPBK)
+
#define S_TF_RECV_SACK 54
#define V_TF_RECV_SACK(x) ((__u64)(x) << S_TF_RECV_SACK)
+#define S_TF_SWAP_MAC_ADDR 54
+#define V_TF_SWAP_MAC_ADDR(x) ((__u64)(x) << S_TF_SWAP_MAC_ADDR)
+
#define S_TF_PEND_CTL0 55
#define V_TF_PEND_CTL0(x) ((__u64)(x) << S_TF_PEND_CTL0)
@@ -751,6 +879,9 @@
#define S_TF_CCTRL_RFR 62
#define V_TF_CCTRL_RFR(x) ((__u64)(x) << S_TF_CCTRL_RFR)
+#define S_TF_INSERT_VLAN 62
+#define V_TF_INSERT_VLAN(x) ((__u64)(x) << S_TF_INSERT_VLAN)
+
#define S_TF_CORE_BYPASS 63
#define V_TF_CORE_BYPASS(x) ((__u64)(x) << S_TF_CORE_BYPASS)
@@ -772,6 +903,9 @@
#define S_TF_DDP_RX2TX 21
#define V_TF_DDP_RX2TX(x) ((x) << S_TF_DDP_RX2TX)
+#define S_TF_DDP_INDICATE_FLL 22
+#define V_TF_DDP_INDICATE_FLL(x) ((x) << S_TF_DDP_INDICATE_FLL)
+
#define S_TF_DDP_BUF0_VALID 24
#define V_TF_DDP_BUF0_VALID(x) ((x) << S_TF_DDP_BUF0_VALID)
diff --git a/sys/dev/cxgbe/crypto/t4_crypto.c b/sys/dev/cxgbe/crypto/t4_crypto.c
index 2c83b10b13d6..80e31b1159fd 100644
--- a/sys/dev/cxgbe/crypto/t4_crypto.c
+++ b/sys/dev/cxgbe/crypto/t4_crypto.c
@@ -208,6 +208,7 @@ struct ccr_softc {
counter_u64_t stats_pad_error;
counter_u64_t stats_sglist_error;
counter_u64_t stats_process_error;
+ counter_u64_t stats_pointer_error;
counter_u64_t stats_sw_fallback;
struct sysctl_ctx_list ctx;
@@ -458,8 +459,9 @@ ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s,
crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
V_ULP_TXPKT_DATAMODIFY(0) |
- V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
+ V_T7_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
V_ULP_TXPKT_DEST(0) |
+ (is_t7(sc->adapter) ? V_ULP_TXPKT_CMDMORE(1) : 0) |
V_ULP_TXPKT_FID(sc->first_rxq_id) | V_ULP_TXPKT_RO(1));
crwr->ulptx.len = htobe32(
((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
@@ -545,7 +547,7 @@ ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
@@ -705,7 +707,7 @@ ccr_cipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1006,7 +1008,7 @@ ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1293,7 +1295,7 @@ ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1645,7 +1647,7 @@ ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1883,6 +1885,9 @@ ccr_sysctls(struct ccr_softc *sc)
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error",
CTLFLAG_RD, &sc->stats_process_error,
"Requests failed during queueing");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pointer_error",
+ CTLFLAG_RD, &sc->stats_pointer_error,
+ "Requests with a misaligned request pointer");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback",
CTLFLAG_RD, &sc->stats_sw_fallback,
"Requests processed by falling back to software");
@@ -1932,13 +1937,15 @@ ccr_init_port(struct ccr_softc *sc, int port)
"Too many ports to fit in port_mask");
/*
- * Completions for crypto requests on port 1 can sometimes
+ * Completions for crypto requests on port 1 on T6 can sometimes
* return a stale cookie value due to a firmware bug. Disable
* requests on port 1 by default on affected firmware.
*/
- if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) ||
- port == 0)
- sc->port_mask |= 1u << port;
+ if (port != 0 && is_t6(sc->adapter) &&
+ sc->adapter->params.fw_vers < FW_VERSION32(1, 25, 4, 0))
+ return;
+
+ sc->port_mask |= 1u << port;
}
static int
@@ -1988,6 +1995,7 @@ ccr_attach(device_t dev)
sc->stats_pad_error = counter_u64_alloc(M_WAITOK);
sc->stats_sglist_error = counter_u64_alloc(M_WAITOK);
sc->stats_process_error = counter_u64_alloc(M_WAITOK);
+ sc->stats_pointer_error = counter_u64_alloc(M_WAITOK);
sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK);
ccr_sysctls(sc);
@@ -2034,6 +2042,7 @@ ccr_detach(device_t dev)
counter_u64_free(sc->stats_pad_error);
counter_u64_free(sc->stats_sglist_error);
counter_u64_free(sc->stats_process_error);
+ counter_u64_free(sc->stats_pointer_error);
counter_u64_free(sc->stats_sw_fallback);
for_each_port(sc->adapter, i) {
ccr_free_port(sc, i);
@@ -2531,6 +2540,16 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
s = crypto_get_driver_session(crp->crp_session);
sc = device_get_softc(dev);
+ /*
+ * Request pointers with the low bit set in the pointer can't
+ * be stored as the cookie in the CPL_FW6_PLD reply.
+ */
+ if (((uintptr_t)crp & CPL_FW6_COOKIE_MASK) != 0) {
+ counter_u64_add(sc->stats_pointer_error, 1);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+
mtx_lock(&s->lock);
error = ccr_populate_sglist(s->sg_input, &crp->crp_buf);
if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp))
@@ -2637,6 +2656,7 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
out:
mtx_unlock(&s->lock);
+out_unlocked:
if (error) {
crp->crp_etype = error;
crypto_done(crp);
@@ -2646,7 +2666,7 @@ out:
}
static int
-do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
+fw6_pld_ccr(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
{
struct ccr_softc *sc;
@@ -2661,7 +2681,7 @@ do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
else
cpl = (const void *)(rss + 1);
- crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
+ crp = (struct cryptop *)(uintptr_t)CPL_FW6_PLD_COOKIE(cpl);
s = crypto_get_driver_session(crp->crp_session);
status = be64toh(cpl->data[0]);
if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
@@ -2715,10 +2735,12 @@ ccr_modevent(module_t mod, int cmd, void *arg)
switch (cmd) {
case MOD_LOAD:
- t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, fw6_pld_ccr,
+ CPL_FW6_COOKIE_CCR);
return (0);
case MOD_UNLOAD:
- t4_register_cpl_handler(CPL_FW6_PLD, NULL);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, NULL,
+ CPL_FW6_COOKIE_CCR);
return (0);
default:
return (EOPNOTSUPP);
@@ -2745,7 +2767,9 @@ static driver_t ccr_driver = {
sizeof(struct ccr_softc)
};
-DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_modevent, NULL);
+DRIVER_MODULE(ccr, chnex, ccr_driver, ccr_modevent, NULL);
+DRIVER_MODULE(ccr, t6nex, ccr_driver, NULL, NULL);
MODULE_VERSION(ccr, 1);
MODULE_DEPEND(ccr, crypto, 1, 1, 1);
+MODULE_DEPEND(ccr, chnex, 1, 1, 1);
MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
diff --git a/sys/dev/cxgbe/crypto/t4_crypto.h b/sys/dev/cxgbe/crypto/t4_crypto.h
index 452e48d20dfd..71c9ec3903ef 100644
--- a/sys/dev/cxgbe/crypto/t4_crypto.h
+++ b/sys/dev/cxgbe/crypto/t4_crypto.h
@@ -139,6 +139,7 @@ struct phys_sge_pairs {
#define SCMD_PROTO_VERSION_TLS_1_2 0
#define SCMD_PROTO_VERSION_TLS_1_1 1
#define SCMD_PROTO_VERSION_GENERIC 4
+#define SCMD_PROTO_VERSION_TLS_1_3 8
#define SCMD_CIPH_MODE_NOP 0
#define SCMD_CIPH_MODE_AES_CBC 1
diff --git a/sys/dev/cxgbe/crypto/t4_keyctx.c b/sys/dev/cxgbe/crypto/t4_keyctx.c
index 50e339ac2e05..b85e50fd6cb1 100644
--- a/sys/dev/cxgbe/crypto/t4_keyctx.c
+++ b/sys/dev/cxgbe/crypto/t4_keyctx.c
@@ -437,10 +437,16 @@ t4_tls_key_info_size(const struct ktls_session *tls)
int
t4_tls_proto_ver(const struct ktls_session *tls)
{
- if (tls->params.tls_vminor == TLS_MINOR_VER_ONE)
+ switch (tls->params.tls_vminor) {
+ case TLS_MINOR_VER_ONE:
return (SCMD_PROTO_VERSION_TLS_1_1);
- else
+ case TLS_MINOR_VER_TWO:
return (SCMD_PROTO_VERSION_TLS_1_2);
+ case TLS_MINOR_VER_THREE:
+ return (SCMD_PROTO_VERSION_TLS_1_3);
+ default:
+ __assert_unreachable();
+ }
}
int
@@ -492,6 +498,17 @@ t4_tls_hmac_ctrl(const struct ktls_session *tls)
}
static int
+tls_seqnum_ctrl(const struct ktls_session *tls)
+{
+ switch (tls->params.tls_vminor) {
+ case TLS_MINOR_VER_THREE:
+ return (0);
+ default:
+ return (3);
+ }
+}
+
+static int
tls_cipher_key_size(const struct ktls_session *tls)
{
switch (tls->params.cipher_key_len) {
@@ -557,7 +574,7 @@ t4_tls_key_ctx(const struct ktls_session *tls, int direction,
kctx->u.rxhdr.authmode_to_rxvalid =
V_TLS_KEYCTX_TX_WR_AUTHMODE(t4_tls_auth_mode(tls)) |
- V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
+ V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(tls_seqnum_ctrl(tls)) |
V_TLS_KEYCTX_TX_WR_RXVALID(1);
kctx->u.rxhdr.ivpresent_to_rxmk_size =
@@ -607,7 +624,8 @@ t4_tls_key_ctx(const struct ktls_session *tls, int direction,
_Static_assert(offsetof(struct tx_keyctx_hdr, txsalt) ==
offsetof(struct rx_keyctx_hdr, rxsalt),
"salt offset mismatch");
- memcpy(kctx->u.txhdr.txsalt, tls->params.iv, SALT_SIZE);
+ memcpy(kctx->u.txhdr.txsalt, tls->params.iv,
+ tls->params.iv_len);
t4_init_gmac_hash(tls->params.cipher_key,
tls->params.cipher_key_len, hash);
} else {
@@ -665,6 +683,10 @@ t4_write_tlskey_wr(const struct ktls_session *tls, int direction, int tid,
kwr->reneg_to_write_rx = V_KEY_GET_LOC(direction == KTLS_TX ?
KEY_WRITE_TX : KEY_WRITE_RX);
+ /* We don't need to use V_T7_ULP_MEMIO_DATA_LEN in this routine. */
+ _Static_assert(V_T7_ULP_MEMIO_DATA_LEN(TLS_KEY_CONTEXT_SZ >> 5) ==
+ V_ULP_MEMIO_DATA_LEN(TLS_KEY_CONTEXT_SZ >> 5), "datalen mismatch");
+
/* master command */
kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
diff --git a/sys/dev/cxgbe/crypto/t6_kern_tls.c b/sys/dev/cxgbe/crypto/t6_kern_tls.c
index 04bb6c944050..454b2e264a0e 100644
--- a/sys/dev/cxgbe/crypto/t6_kern_tls.c
+++ b/sys/dev/cxgbe/crypto/t6_kern_tls.c
@@ -2003,7 +2003,7 @@ t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
if (tlsp->l2te)
t4_l2t_release(tlsp->l2te);
tlsp->l2te = t4_l2t_alloc_tls(tlsp->sc, txq, dst, &ndesc,
- vlan_tag, tlsp->vi->pi->lport, eh->ether_dhost);
+ vlan_tag, tlsp->vi->pi->hw_port, eh->ether_dhost);
if (tlsp->l2te == NULL)
CXGBE_UNIMPLEMENTED("failed to allocate TLS L2TE");
if (ndesc != 0) {
diff --git a/sys/dev/cxgbe/crypto/t7_kern_tls.c b/sys/dev/cxgbe/crypto/t7_kern_tls.c
new file mode 100644
index 000000000000..217459126361
--- /dev/null
+++ b/sys/dev/cxgbe/crypto/t7_kern_tls.c
@@ -0,0 +1,2196 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Chelsio Communications
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_kern_tls.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/ktr.h>
+#include <sys/ktls.h>
+#include <sys/sglist.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sockbuf.h>
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp_var.h>
+#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
+#include "common/t4_tcb.h"
+#include "t4_l2t.h"
+#include "t4_clip.h"
+#include "t4_mp_ring.h"
+#include "crypto/t4_crypto.h"
+
+#if defined(INET) || defined(INET6)
+
+#define TLS_HEADER_LENGTH 5
+
+struct tls_scmd {
+ __be32 seqno_numivs;
+ __be32 ivgen_hdrlen;
+};
+
+struct tlspcb {
+ struct m_snd_tag com;
+ struct vi_info *vi; /* virtual interface */
+ struct adapter *sc;
+ struct sge_txq *txq;
+
+ int tx_key_addr;
+ bool inline_key;
+ bool tls13;
+ unsigned char enc_mode;
+
+ struct tls_scmd scmd0;
+ struct tls_scmd scmd0_partial;
+ struct tls_scmd scmd0_short;
+
+ unsigned int tx_key_info_size;
+
+ uint16_t prev_mss;
+
+ /* Fields used for GCM records using GHASH state. */
+ uint16_t ghash_offset;
+ uint64_t ghash_tls_seqno;
+ char ghash[AES_GMAC_HASH_LEN];
+ bool ghash_valid;
+ bool ghash_pending;
+ bool ghash_lcb;
+ bool queue_mbufs;
+ uint8_t rx_chid;
+ uint16_t rx_qid;
+ struct mbufq pending_mbufs;
+
+ /*
+ * Only used outside of setup and teardown when using inline
+ * keys or for partial GCM mode.
+ */
+ struct tls_keyctx keyctx;
+};
+
+static void t7_tls_tag_free(struct m_snd_tag *mst);
+static int ktls_setup_keys(struct tlspcb *tlsp,
+ const struct ktls_session *tls, struct sge_txq *txq);
+
+static void *zero_buffer;
+static vm_paddr_t zero_buffer_pa;
+
+static const struct if_snd_tag_sw t7_tls_tag_sw = {
+ .snd_tag_free = t7_tls_tag_free,
+ .type = IF_SND_TAG_TYPE_TLS
+};
+
+static inline struct tlspcb *
+mst_to_tls(struct m_snd_tag *t)
+{
+ return (__containerof(t, struct tlspcb, com));
+}
+
+static struct tlspcb *
+alloc_tlspcb(struct ifnet *ifp, struct vi_info *vi, int flags)
+{
+ struct port_info *pi = vi->pi;
+ struct adapter *sc = pi->adapter;
+ struct tlspcb *tlsp;
+
+ tlsp = malloc(sizeof(*tlsp), M_CXGBE, M_ZERO | flags);
+ if (tlsp == NULL)
+ return (NULL);
+
+ m_snd_tag_init(&tlsp->com, ifp, &t7_tls_tag_sw);
+ tlsp->vi = vi;
+ tlsp->sc = sc;
+ tlsp->tx_key_addr = -1;
+ tlsp->ghash_offset = -1;
+ tlsp->rx_chid = pi->rx_chan;
+ tlsp->rx_qid = sc->sge.rxq[pi->vi->first_rxq].iq.abs_id;
+ mbufq_init(&tlsp->pending_mbufs, INT_MAX);
+
+ return (tlsp);
+}
+
+int
+t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
+ struct m_snd_tag **pt)
+{
+ const struct ktls_session *tls;
+ struct tlspcb *tlsp;
+ struct adapter *sc;
+ struct vi_info *vi;
+ struct inpcb *inp;
+ struct sge_txq *txq;
+ int error, iv_size, keyid, mac_first;
+
+ tls = params->tls.tls;
+
+ /* TLS 1.1 through TLS 1.3 are currently supported. */
+ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
+ tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
+ tls->params.tls_vminor > TLS_MINOR_VER_THREE)
+ return (EPROTONOSUPPORT);
+
+ /* Sanity check values in *tls. */
+ switch (tls->params.cipher_algorithm) {
+ case CRYPTO_AES_CBC:
+ /* XXX: Explicitly ignore any provided IV. */
+ switch (tls->params.cipher_key_len) {
+ case 128 / 8:
+ case 192 / 8:
+ case 256 / 8:
+ break;
+ default:
+ return (EINVAL);
+ }
+ switch (tls->params.auth_algorithm) {
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ break;
+ default:
+ return (EPROTONOSUPPORT);
+ }
+ iv_size = AES_BLOCK_LEN;
+ mac_first = 1;
+ break;
+ case CRYPTO_AES_NIST_GCM_16:
+ switch (tls->params.cipher_key_len) {
+ case 128 / 8:
+ case 192 / 8:
+ case 256 / 8:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ /*
+ * The IV size for TLS 1.2 is the explicit IV in the
+ * record header. For TLS 1.3 it is the size of the
+ * sequence number.
+ */
+ iv_size = 8;
+ mac_first = 0;
+ break;
+ default:
+ return (EPROTONOSUPPORT);
+ }
+
+ vi = if_getsoftc(ifp);
+ sc = vi->adapter;
+
+ tlsp = alloc_tlspcb(ifp, vi, M_WAITOK);
+
+ /*
+ * Pointers with the low bit set in the pointer can't
+ * be stored as the cookie in the CPL_FW6_PLD reply.
+ */
+ if (((uintptr_t)tlsp & CPL_FW6_COOKIE_MASK) != 0) {
+ error = EINVAL;
+ goto failed;
+ }
+
+ tlsp->tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
+
+ if (sc->tlst.inline_keys)
+ keyid = -1;
+ else
+ keyid = t4_alloc_tls_keyid(sc);
+ if (keyid < 0) {
+ CTR(KTR_CXGBE, "%s: %p using immediate key ctx", __func__,
+ tlsp);
+ tlsp->inline_key = true;
+ } else {
+ tlsp->tx_key_addr = keyid;
+ CTR(KTR_CXGBE, "%s: %p allocated TX key addr %#x", __func__,
+ tlsp, tlsp->tx_key_addr);
+ }
+
+ inp = params->tls.inp;
+ INP_RLOCK(inp);
+ if (inp->inp_flags & INP_DROPPED) {
+ INP_RUNLOCK(inp);
+ error = ECONNRESET;
+ goto failed;
+ }
+
+ txq = &sc->sge.txq[vi->first_txq];
+ if (inp->inp_flowtype != M_HASHTYPE_NONE)
+ txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) +
+ vi->rsrv_noflowq);
+ tlsp->txq = txq;
+ INP_RUNLOCK(inp);
+
+ error = ktls_setup_keys(tlsp, tls, txq);
+ if (error)
+ goto failed;
+
+ tlsp->enc_mode = t4_tls_cipher_mode(tls);
+ tlsp->tx_key_info_size = t4_tls_key_info_size(tls);
+
+ /* The SCMD fields used when encrypting a full TLS record. */
+ if (tlsp->tls13)
+ tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0);
+ else
+ tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(3);
+ tlsp->scmd0.seqno_numivs |=
+ V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_CIPH_MODE(tlsp->enc_mode) |
+ V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
+ V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
+ V_SCMD_IV_SIZE(iv_size / 2) | V_SCMD_NUM_IVS(1);
+ tlsp->scmd0.seqno_numivs = htobe32(tlsp->scmd0.seqno_numivs);
+
+ tlsp->scmd0.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0);
+ if (tlsp->inline_key)
+ tlsp->scmd0.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
+
+ /*
+ * The SCMD fields used when encrypting a short TLS record
+ * (no trailer and possibly a truncated payload).
+ */
+ tlsp->scmd0_short.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
+ V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
+ V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
+ V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(0);
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
+ tlsp->scmd0_short.seqno_numivs |=
+ V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CTR);
+ else
+ tlsp->scmd0_short.seqno_numivs |=
+ V_SCMD_CIPH_MODE(tlsp->enc_mode);
+ tlsp->scmd0_short.seqno_numivs =
+ htobe32(tlsp->scmd0_short.seqno_numivs);
+
+ tlsp->scmd0_short.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1);
+ if (tlsp->inline_key)
+ tlsp->scmd0_short.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
+
+ /*
+ * The SCMD fields used when encrypting a short TLS record
+ * using a partial GHASH.
+ */
+ tlsp->scmd0_partial.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
+ V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_CIPH_MODE(tlsp->enc_mode) |
+ V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
+ V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
+ V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(1);
+ tlsp->scmd0_partial.seqno_numivs =
+ htobe32(tlsp->scmd0_partial.seqno_numivs);
+
+ tlsp->scmd0_partial.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1) |
+ V_SCMD_KEY_CTX_INLINE(1);
+
+ TXQ_LOCK(txq);
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
+ txq->kern_tls_gcm++;
+ else
+ txq->kern_tls_cbc++;
+ TXQ_UNLOCK(txq);
+ *pt = &tlsp->com;
+ return (0);
+
+failed:
+ m_snd_tag_rele(&tlsp->com);
+ return (error);
+}
+
+static int
+ktls_setup_keys(struct tlspcb *tlsp, const struct ktls_session *tls,
+ struct sge_txq *txq)
+{
+ struct tls_key_req *kwr;
+ struct tls_keyctx *kctx;
+ void *items[1];
+ struct mbuf *m;
+ int error;
+
+ /*
+ * Store the salt and keys in the key context. For
+ * connections with an inline key, this key context is passed
+ * as immediate data in each work request. For connections
+ * storing the key in DDR, a work request is used to store a
+ * copy of the key context in DDR.
+ */
+ t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx);
+ if (tlsp->inline_key)
+ return (0);
+
+ /* Populate key work request. */
+ m = alloc_wr_mbuf(TLS_KEY_WR_SZ, M_NOWAIT);
+ if (m == NULL) {
+ CTR(KTR_CXGBE, "%s: %p failed to alloc WR mbuf", __func__,
+ tlsp);
+ return (ENOMEM);
+ }
+ m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
+ m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
+ kwr = mtod(m, void *);
+ memset(kwr, 0, TLS_KEY_WR_SZ);
+
+ t4_write_tlskey_wr(tls, KTLS_TX, 0, 0, tlsp->tx_key_addr, kwr);
+ kctx = (struct tls_keyctx *)(kwr + 1);
+ memcpy(kctx, &tlsp->keyctx, sizeof(*kctx));
+
+ /*
+ * Place the key work request in the transmit queue. It
+ * should be sent to the NIC before any TLS packets using this
+ * session.
+ */
+ items[0] = m;
+ error = mp_ring_enqueue(txq->r, items, 1, 1);
+ if (error)
+ m_free(m);
+ else
+ CTR(KTR_CXGBE, "%s: %p sent key WR", __func__, tlsp);
+ return (error);
+}
+
+static u_int
+ktls_base_wr_size(struct tlspcb *tlsp, bool inline_key)
+{
+ u_int wr_len;
+
+ wr_len = sizeof(struct fw_ulptx_wr); // 16
+ wr_len += sizeof(struct ulp_txpkt); // 8
+ wr_len += sizeof(struct ulptx_idata); // 8
+ wr_len += sizeof(struct cpl_tx_sec_pdu);// 32
+ if (inline_key)
+ wr_len += tlsp->tx_key_info_size;
+ else {
+ wr_len += sizeof(struct ulptx_sc_memrd);// 8
+ wr_len += sizeof(struct ulptx_idata); // 8
+ }
+ /* SplitMode CPL_RX_PHYS_DSGL here if needed. */
+ /* CPL_TX_*_LSO here if needed. */
+ wr_len += sizeof(struct cpl_tx_pkt_core);// 16
+ return (wr_len);
+}
+
+static u_int
+ktls_sgl_size(u_int nsegs)
+{
+ u_int wr_len;
+
+ /* First segment is part of ulptx_sgl. */
+ nsegs--;
+
+ wr_len = sizeof(struct ulptx_sgl);
+ wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1));
+ return (wr_len);
+}
+
+/*
+ * A request that doesn't need to generate the TLS trailer is a short
+ * record. For these requests, part of the TLS record payload is
+ * encrypted without invoking the MAC.
+ *
+ * Returns true if this record should be sent as a short record. In
+ * either case, the remaining outputs describe the how much of the
+ * TLS record to send as input to the crypto block and the amount of
+ * crypto output to trim via SplitMode:
+ *
+ * *header_len - Number of bytes of TLS header to pass as immediate
+ * data
+ *
+ * *offset - Start offset of TLS record payload to pass as DSGL data
+ *
+ * *plen - Length of TLS record payload to pass as DSGL data
+ *
+ * *leading_waste - amount of non-packet-header bytes to drop at the
+ * start of the crypto output
+ *
+ * *trailing_waste - amount of crypto output to drop from the end
+ */
+static bool
+ktls_is_short_record(struct tlspcb *tlsp, struct mbuf *m_tls, u_int tlen,
+ u_int rlen, u_int *header_len, u_int *offset, u_int *plen,
+ u_int *leading_waste, u_int *trailing_waste, bool send_partial_ghash,
+ bool request_ghash)
+{
+ u_int new_tlen, trailer_len;
+
+ MPASS(tlen > m_tls->m_epg_hdrlen);
+
+ /*
+ * For TLS 1.3 treat the inner record type stored as the first
+ * byte of the trailer as part of the payload rather than part
+ * of the trailer.
+ */
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+
+ /*
+ * Default to sending the full record as input to the crypto
+ * engine and relying on SplitMode to drop any waste.
+ */
+ *header_len = m_tls->m_epg_hdrlen;
+ *offset = 0;
+ *plen = rlen - (m_tls->m_epg_hdrlen + trailer_len);
+ *leading_waste = mtod(m_tls, vm_offset_t);
+ *trailing_waste = rlen - tlen;
+ if (!tlsp->sc->tlst.short_records)
+ return (false);
+
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC) {
+ /*
+ * For AES-CBC we have to send input from the start of
+ * the TLS record payload that is a multiple of the
+ * block size. new_tlen rounds up tlen to the end of
+ * the containing AES block. If this last block
+ * overlaps with the trailer, send the full record to
+ * generate the MAC.
+ */
+ new_tlen = TLS_HEADER_LENGTH +
+ roundup2(tlen - TLS_HEADER_LENGTH, AES_BLOCK_LEN);
+ if (rlen - new_tlen < trailer_len)
+ return (false);
+
+ *trailing_waste = new_tlen - tlen;
+ *plen = new_tlen - m_tls->m_epg_hdrlen;
+ } else {
+ if (rlen - tlen < trailer_len ||
+ (rlen - tlen == trailer_len && request_ghash)) {
+ /*
+ * For AES-GCM we have to send the full record
+ * if the end overlaps with the trailer and a
+ * partial GHASH isn't being sent.
+ */
+ if (!send_partial_ghash)
+ return (false);
+
+ /*
+ * Will need to treat any excess trailer bytes as
+ * trailing waste. *trailing_waste is already
+ * correct.
+ */
+ } else {
+ /*
+ * We can use AES-CTR or AES-GCM in partial GHASH
+ * mode to encrypt a partial PDU.
+ *
+ * The last block can be partially encrypted
+ * without any trailing waste.
+ */
+ *trailing_waste = 0;
+ *plen = tlen - m_tls->m_epg_hdrlen;
+ }
+
+ /*
+ * If this request starts at the first byte of the
+ * payload (so the previous request sent the full TLS
+ * header as a tunnel packet) and a partial GHASH is
+ * being requested, the full TLS header must be sent
+ * as input for the GHASH.
+ */
+ if (mtod(m_tls, vm_offset_t) == m_tls->m_epg_hdrlen &&
+ request_ghash)
+ return (true);
+
+ /*
+ * In addition, we can minimize leading waste by
+ * starting encryption at the start of the closest AES
+ * block.
+ */
+ if (mtod(m_tls, vm_offset_t) >= m_tls->m_epg_hdrlen) {
+ *header_len = 0;
+ *offset = mtod(m_tls, vm_offset_t) -
+ m_tls->m_epg_hdrlen;
+ if (*offset >= *plen)
+ *offset = *plen;
+ else
+ *offset = rounddown2(*offset, AES_BLOCK_LEN);
+
+ /*
+ * If the request is just bytes from the trailer,
+ * trim the offset to the end of the payload.
+ */
+ *offset = min(*offset, *plen);
+ *plen -= *offset;
+ *leading_waste -= (m_tls->m_epg_hdrlen + *offset);
+ }
+ }
+ return (true);
+}
+
+/* Size of the AES-GCM TLS AAD for a given connection. */
+static int
+ktls_gcm_aad_len(struct tlspcb *tlsp)
+{
+ return (tlsp->tls13 ? sizeof(struct tls_aead_data_13) :
+ sizeof(struct tls_aead_data));
+}
+
+static int
+ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
+ int *nsegsp)
+{
+ const struct tls_record_layer *hdr;
+ u_int header_len, imm_len, offset, plen, rlen, tlen, wr_len;
+ u_int leading_waste, trailing_waste;
+ bool inline_key, last_ghash_frag, request_ghash, send_partial_ghash;
+ bool short_record;
+
+ M_ASSERTEXTPG(m_tls);
+
+ /*
+ * The relative offset of the last byte to send from the TLS
+ * record.
+ */
+ tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
+ if (tlen <= m_tls->m_epg_hdrlen) {
+ /*
+ * For requests that only want to send the TLS header,
+ * send a tunnelled packet as immediate data.
+ */
+ wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_core) +
+ roundup2(m->m_len + m_tls->m_len, 16);
+ if (wr_len > SGE_MAX_WR_LEN) {
+ CTR(KTR_CXGBE,
+ "%s: %p TLS header-only packet too long (len %d)",
+ __func__, tlsp, m->m_len + m_tls->m_len);
+ }
+
+ /* This should always be the last TLS record in a chain. */
+ MPASS(m_tls->m_next == NULL);
+ *nsegsp = 0;
+ return (wr_len);
+ }
+
+ hdr = (void *)m_tls->m_epg_hdr;
+ rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
+
+ /*
+ * See if this request might make use of GHASH state. This
+ * errs on the side of over-budgeting the WR size.
+ */
+ last_ghash_frag = false;
+ request_ghash = false;
+ send_partial_ghash = false;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
+ tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
+ u_int trailer_len;
+
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+ KASSERT(trailer_len == AES_GMAC_HASH_LEN,
+ ("invalid trailer length for AES-GCM"));
+
+ /* Is this the start of a TLS record? */
+ if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
+ /*
+ * Might use partial GHASH if this doesn't
+ * send the full record.
+ */
+ if (tlen < rlen) {
+ if (tlen < (rlen - trailer_len))
+ send_partial_ghash = true;
+ request_ghash = true;
+ }
+ } else {
+ send_partial_ghash = true;
+ if (tlen < rlen)
+ request_ghash = true;
+ if (tlen >= (rlen - trailer_len))
+ last_ghash_frag = true;
+ }
+ }
+
+ /*
+ * Assume not sending partial GHASH for this call to get the
+ * larger size.
+ */
+ short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
+ &header_len, &offset, &plen, &leading_waste, &trailing_waste,
+ false, request_ghash);
+
+ inline_key = send_partial_ghash || tlsp->inline_key;
+
+ /* Calculate the size of the work request. */
+ wr_len = ktls_base_wr_size(tlsp, inline_key);
+
+ if (send_partial_ghash)
+ wr_len += AES_GMAC_HASH_LEN;
+
+ if (leading_waste != 0 || trailing_waste != 0) {
+ /*
+ * Partial records might require a SplitMode
+ * CPL_RX_PHYS_DSGL.
+ */
+ wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ }
+
+ /* Budget for an LSO header even if we don't use it. */
+ wr_len += sizeof(struct cpl_tx_pkt_lso_core);
+
+ /*
+ * Headers (including the TLS header) are always sent as
+ * immediate data. Short records include a raw AES IV as
+ * immediate data. TLS 1.3 non-short records include a
+ * placeholder for the sequence number as immediate data.
+ * Short records using a partial hash may also need to send
+ * TLS AAD. If a partial hash might be sent, assume a short
+ * record to get the larger size.
+ */
+ imm_len = m->m_len + header_len;
+ if (short_record || send_partial_ghash) {
+ imm_len += AES_BLOCK_LEN;
+ if (send_partial_ghash && header_len != 0)
+ imm_len += ktls_gcm_aad_len(tlsp);
+ } else if (tlsp->tls13)
+ imm_len += sizeof(uint64_t);
+ wr_len += roundup2(imm_len, 16);
+
+ /*
+ * TLS record payload via DSGL. For partial GCM mode we
+ * might need an extra SG entry for a placeholder.
+ */
+ *nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
+ plen);
+ wr_len += ktls_sgl_size(*nsegsp + (last_ghash_frag ? 1 : 0));
+
+ if (request_ghash) {
+ /* AES-GCM records might return a partial hash. */
+ wr_len += sizeof(struct ulp_txpkt);
+ wr_len += sizeof(struct ulptx_idata);
+ wr_len += sizeof(struct cpl_tx_tls_ack);
+ wr_len += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ wr_len += AES_GMAC_HASH_LEN;
+ }
+
+ wr_len = roundup2(wr_len, 16);
+ return (wr_len);
+}
+
+/* Queue the next pending packet. */
+static void
+ktls_queue_next_packet(struct tlspcb *tlsp, bool enqueue_only)
+{
+#ifdef KTR
+ struct ether_header *eh;
+ struct tcphdr *tcp;
+ tcp_seq tcp_seqno;
+#endif
+ struct mbuf *m;
+ void *items[1];
+ int rc;
+
+ TXQ_LOCK_ASSERT_OWNED(tlsp->txq);
+ KASSERT(tlsp->queue_mbufs, ("%s: mbufs not being queued for %p",
+ __func__, tlsp));
+ for (;;) {
+ m = mbufq_dequeue(&tlsp->pending_mbufs);
+ if (m == NULL) {
+ tlsp->queue_mbufs = false;
+ return;
+ }
+
+#ifdef KTR
+ eh = mtod(m, struct ether_header *);
+ tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
+ m->m_pkthdr.l3hlen);
+ tcp_seqno = ntohl(tcp->th_seq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__,
+ m->m_pkthdr.len, tcp_seqno);
+#endif
+#endif
+
+ items[0] = m;
+ if (enqueue_only)
+ rc = mp_ring_enqueue_only(tlsp->txq->r, items, 1);
+ else {
+ TXQ_UNLOCK(tlsp->txq);
+ rc = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
+ TXQ_LOCK(tlsp->txq);
+ }
+ if (__predict_true(rc == 0))
+ return;
+
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u dropped", __func__,
+ m->m_pkthdr.len, tcp_seqno);
+ m_freem(m);
+ }
+}
+
+int
+t7_ktls_parse_pkt(struct mbuf *m)
+{
+ struct tlspcb *tlsp;
+ struct ether_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *tcp;
+ struct mbuf *m_tls;
+ void *items[1];
+ int error, nsegs;
+ u_int wr_len, tot_len;
+ uint16_t eh_type;
+
+ /*
+ * Locate headers in initial mbuf.
+ *
+ * XXX: This assumes all of the headers are in the initial mbuf.
+ * Could perhaps use m_advance() like parse_pkt() if that turns
+ * out to not be true.
+ */
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_pkthdr.snd_tag != NULL);
+ tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
+
+ if (m->m_len <= sizeof(*eh) + sizeof(*ip)) {
+ CTR(KTR_CXGBE, "%s: %p header mbuf too short", __func__, tlsp);
+ return (EINVAL);
+ }
+ eh = mtod(m, struct ether_header *);
+ eh_type = ntohs(eh->ether_type);
+ if (eh_type == ETHERTYPE_VLAN) {
+ struct ether_vlan_header *evh = (void *)eh;
+
+ eh_type = ntohs(evh->evl_proto);
+ m->m_pkthdr.l2hlen = sizeof(*evh);
+ } else
+ m->m_pkthdr.l2hlen = sizeof(*eh);
+
+ switch (eh_type) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(eh + 1);
+ if (ip->ip_p != IPPROTO_TCP) {
+ CTR(KTR_CXGBE, "%s: %p mbuf not IPPROTO_TCP", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ m->m_pkthdr.l3hlen = ip->ip_hl * 4;
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(eh + 1);
+ if (ip6->ip6_nxt != IPPROTO_TCP) {
+ CTR(KTR_CXGBE, "%s: %p, mbuf not IPPROTO_TCP (%u)",
+ __func__, tlsp, ip6->ip6_nxt);
+ return (EINVAL);
+ }
+ m->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
+ break;
+ default:
+ CTR(KTR_CXGBE, "%s: %p mbuf not ETHERTYPE_IP{,V6}", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
+ sizeof(*tcp)) {
+ CTR(KTR_CXGBE, "%s: %p header mbuf too short (2)", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ tcp = (struct tcphdr *)((char *)(eh + 1) + m->m_pkthdr.l3hlen);
+ m->m_pkthdr.l4hlen = tcp->th_off * 4;
+
+ /* Bail if there is TCP payload before the TLS record. */
+ if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
+ m->m_pkthdr.l4hlen) {
+ CTR(KTR_CXGBE,
+ "%s: %p header mbuf bad length (%d + %d + %d != %d)",
+ __func__, tlsp, m->m_pkthdr.l2hlen, m->m_pkthdr.l3hlen,
+ m->m_pkthdr.l4hlen, m->m_len);
+ return (EINVAL);
+ }
+
+ /* Assume all headers are in 'm' for now. */
+ MPASS(m->m_next != NULL);
+ MPASS(m->m_next->m_flags & M_EXTPG);
+
+ tot_len = 0;
+
+ /*
+ * Each of the remaining mbufs in the chain should reference a
+ * TLS record.
+ */
+ for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
+ MPASS(m_tls->m_flags & M_EXTPG);
+
+ wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p wr_len %d nsegs %d", __func__, tlsp,
+ wr_len, nsegs);
+#endif
+ if (wr_len > SGE_MAX_WR_LEN || nsegs > TX_SGL_SEGS)
+ return (EFBIG);
+ tot_len += roundup2(wr_len, EQ_ESIZE);
+
+ /*
+ * Store 'nsegs' for the first TLS record in the
+ * header mbuf's metadata.
+ */
+ if (m_tls == m->m_next)
+ set_mbuf_nsegs(m, nsegs);
+ }
+
+ MPASS(tot_len != 0);
+ set_mbuf_len16(m, tot_len / 16);
+
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ /* Defer packets beyond what has been sent so far. */
+ TXQ_LOCK(tlsp->txq);
+ if (tlsp->queue_mbufs) {
+ error = mbufq_enqueue(&tlsp->pending_mbufs, m);
+ if (error == 0) {
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p len16 %d nsegs %d TCP seq %u deferred",
+ __func__, tlsp, mbuf_len16(m),
+ mbuf_nsegs(m), ntohl(tcp->th_seq));
+#endif
+ }
+ TXQ_UNLOCK(tlsp->txq);
+ return (error);
+ }
+ tlsp->queue_mbufs = true;
+ TXQ_UNLOCK(tlsp->txq);
+ }
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p len16 %d nsegs %d", __func__, tlsp,
+ mbuf_len16(m), mbuf_nsegs(m));
+#endif
+ items[0] = m;
+ error = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
+ if (__predict_false(error != 0)) {
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ TXQ_LOCK(tlsp->txq);
+ ktls_queue_next_packet(tlsp, false);
+ TXQ_UNLOCK(tlsp->txq);
+ }
+ }
+ return (error);
+}
+
+static inline bool
+needs_vlan_insertion(struct mbuf *m)
+{
+
+ M_ASSERTPKTHDR(m);
+
+ return (m->m_flags & M_VLANTAG);
+}
+
+static inline uint64_t
+pkt_ctrl1(struct sge_txq *txq, struct mbuf *m, uint16_t eh_type)
+{
+ uint64_t ctrl1;
+
+ /* Checksums are always offloaded */
+ if (eh_type == ETHERTYPE_IP) {
+ ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
+ V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
+ V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
+ } else {
+ MPASS(m->m_pkthdr.l3hlen == sizeof(struct ip6_hdr));
+ ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
+ V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
+ V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
+ }
+ txq->txcsum++;
+
+ /* VLAN tag insertion */
+ if (needs_vlan_insertion(m)) {
+ ctrl1 |= F_TXPKT_VLAN_VLD |
+ V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
+ txq->vlan_insertion++;
+ }
+
+ return (ctrl1);
+}
+
+static inline void *
+write_lso_cpl(void *cpl, struct mbuf *m0, uint16_t mss, uint16_t eh_type,
+ int total_len)
+{
+ struct cpl_tx_pkt_lso_core *lso;
+ uint32_t ctrl;
+
+ KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
+ m0->m_pkthdr.l4hlen > 0,
+ ("%s: mbuf %p needs TSO but missing header lengths",
+ __func__, m0));
+
+ ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
+ F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
+ V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) |
+ V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
+ V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
+ if (eh_type == ETHERTYPE_IPV6)
+ ctrl |= F_LSO_IPV6;
+
+ lso = cpl;
+ lso->lso_ctrl = htobe32(ctrl);
+ lso->ipid_ofst = htobe16(0);
+ lso->mss = htobe16(mss);
+ lso->seqno_offset = htobe32(0);
+ lso->len = htobe32(total_len);
+
+ return (lso + 1);
+}
+
+static inline void *
+write_tx_tls_ack(void *dst, u_int rx_chid, u_int hash_len, bool ghash_lcb)
+{
+ struct cpl_tx_tls_ack *cpl;
+ uint32_t flags;
+
+ flags = ghash_lcb ? F_CPL_TX_TLS_ACK_LCB : F_CPL_TX_TLS_ACK_PHASH;
+ cpl = dst;
+ cpl->op_to_Rsvd2 = htobe32(V_CPL_TX_TLS_ACK_OPCODE(CPL_TX_TLS_ACK) |
+ V_T7_CPL_TX_TLS_ACK_RXCHID(rx_chid) | F_CPL_TX_TLS_ACK_ULPTXLPBK |
+ flags);
+
+ /* 32 == AckEncCpl, 16 == LCB */
+ cpl->PldLen = htobe32(V_CPL_TX_TLS_ACK_PLDLEN(32 + 16 + hash_len));
+ cpl->Rsvd3 = 0;
+
+ return (cpl + 1);
+}
+
+static inline void *
+write_fw6_pld(void *dst, u_int rx_chid, u_int rx_qid, u_int hash_len,
+ uint64_t cookie)
+{
+ struct rss_header *rss;
+ struct cpl_fw6_pld *cpl;
+
+ rss = dst;
+ memset(rss, 0, sizeof(*rss));
+ rss->opcode = CPL_FW6_PLD;
+ rss->qid = htobe16(rx_qid);
+ rss->channel = rx_chid;
+
+ cpl = (void *)(rss + 1);
+ memset(cpl, 0, sizeof(*cpl));
+ cpl->opcode = CPL_FW6_PLD;
+ cpl->len = htobe16(hash_len);
+ cpl->data[1] = htobe64(cookie);
+
+ return (cpl + 1);
+}
+
+static inline void *
+write_split_mode_rx_phys(void *dst, struct mbuf *m, struct mbuf *m_tls,
+ u_int crypto_hdr_len, u_int leading_waste, u_int trailing_waste)
+{
+ struct cpl_t7_rx_phys_dsgl *cpl;
+ uint16_t *len;
+ uint8_t numsge;
+
+ /* Forward first (3) and third (1) segments. */
+ numsge = 0xa;
+
+ cpl = dst;
+ cpl->ot.opcode = CPL_RX_PHYS_DSGL;
+ cpl->PhysAddrFields_lo_to_NumSGE =
+ htobe32(F_CPL_T7_RX_PHYS_DSGL_SPLITMODE |
+ V_CPL_T7_RX_PHYS_DSGL_NUMSGE(numsge));
+
+ len = (uint16_t *)(cpl->RSSCopy);
+
+ /*
+ * First segment always contains packet headers as well as
+ * transmit-related CPLs.
+ */
+ len[0] = htobe16(crypto_hdr_len);
+
+ /*
+ * Second segment is "gap" of data to drop at the front of the
+ * TLS record.
+ */
+ len[1] = htobe16(leading_waste);
+
+ /* Third segment is how much of the TLS record to send. */
+ len[2] = htobe16(m_tls->m_len);
+
+ /* Fourth segment is how much data to drop at the end. */
+ len[3] = htobe16(trailing_waste);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: forward %u skip %u forward %u skip %u",
+ __func__, be16toh(len[0]), be16toh(len[1]), be16toh(len[2]),
+ be16toh(len[3]));
+#endif
+ return (cpl + 1);
+}
+
+/*
+ * If the SGL ends on an address that is not 16 byte aligned, this function will
+ * add a 0 filled flit at the end.
+ */
+static void *
+write_gl_to_buf(struct sglist *gl, caddr_t to)
+{
+ struct sglist_seg *seg;
+ __be64 *flitp;
+ struct ulptx_sgl *usgl;
+ int i, nflits, nsegs;
+
+ KASSERT(((uintptr_t)to & 0xf) == 0,
+ ("%s: SGL must start at a 16 byte boundary: %p", __func__, to));
+
+ nsegs = gl->sg_nseg;
+ MPASS(nsegs > 0);
+
+ nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
+ flitp = (__be64 *)to;
+ seg = &gl->sg_segs[0];
+ usgl = (void *)flitp;
+
+ usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+ V_ULPTX_NSGE(nsegs));
+ usgl->len0 = htobe32(seg->ss_len);
+ usgl->addr0 = htobe64(seg->ss_paddr);
+ seg++;
+
+ for (i = 0; i < nsegs - 1; i++, seg++) {
+ usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
+ usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
+ }
+ if (i & 1)
+ usgl->sge[i / 2].len[1] = htobe32(0);
+ flitp += nflits;
+
+ if (nflits & 1) {
+ MPASS(((uintptr_t)flitp) & 0xf);
+ *flitp++ = 0;
+ }
+
+ MPASS((((uintptr_t)flitp) & 0xf) == 0);
+ return (flitp);
+}
+
+static inline void
+copy_to_txd(struct sge_eq *eq, const char *from, caddr_t *to, int len)
+{
+
+ MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
+ MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
+
+ if (__predict_true((uintptr_t)(*to) + len <=
+ (uintptr_t)&eq->desc[eq->sidx])) {
+ bcopy(from, *to, len);
+ (*to) += len;
+ if ((uintptr_t)(*to) == (uintptr_t)&eq->desc[eq->sidx])
+ (*to) = (caddr_t)eq->desc;
+ } else {
+ int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
+
+ bcopy(from, *to, portion);
+ from += portion;
+ portion = len - portion; /* remaining */
+ bcopy(from, (void *)eq->desc, portion);
+ (*to) = (caddr_t)eq->desc + portion;
+ }
+}
+
+static int
+ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
+ const void *src, u_int len, u_int available, tcp_seq tcp_seqno, u_int pidx,
+ uint16_t eh_type, bool last_wr)
+{
+ struct tx_sdesc *txsd;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ uint32_t ctrl;
+ int len16, ndesc, pktlen;
+ struct ether_header *eh;
+ struct ip *ip, newip;
+ struct ip6_hdr *ip6, newip6;
+ struct tcphdr *tcp, newtcp;
+ caddr_t out;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ M_ASSERTPKTHDR(m);
+
+ wr = dst;
+ pktlen = m->m_len + len;
+ ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
+ len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
+ ndesc = tx_len16_to_desc(len16);
+ MPASS(ndesc <= available);
+
+ /* Firmware work request header */
+ /* TODO: Handle VF work request. */
+ wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
+
+ ctrl = V_FW_WR_LEN16(len16);
+ wr->equiq_to_len16 = htobe32(ctrl);
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = txq->cpl_ctrl0;
+ cpl->pack = 0;
+ cpl->len = htobe16(pktlen);
+
+ out = (void *)(cpl + 1);
+
+ /* Copy over Ethernet header. */
+ eh = mtod(m, struct ether_header *);
+ copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
+
+ /* Fixup length in IP header and copy out. */
+ if (eh_type == ETHERTYPE_IP) {
+ ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
+ newip = *ip;
+ newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
+ copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
+ if (m->m_pkthdr.l3hlen > sizeof(*ip))
+ copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
+ m->m_pkthdr.l3hlen - sizeof(*ip));
+ } else {
+ ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
+ newip6 = *ip6;
+ newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen -
+ sizeof(*ip6));
+ copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
+ MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
+ }
+ cpl->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
+
+ /* Set sequence number in TCP header. */
+ tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
+ newtcp = *tcp;
+ newtcp.th_seq = htonl(tcp_seqno);
+ copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
+
+ /* Copy rest of TCP header. */
+ copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
+ (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
+
+ /* Copy the payload data. */
+ copy_to_txd(&txq->eq, src, &out, len);
+ txq->imm_wrs++;
+
+ txq->txpkt_wrs++;
+
+ txsd = &txq->sdesc[pidx];
+ if (last_wr)
+ txsd->m = m;
+ else
+ txsd->m = NULL;
+ txsd->desc_used = ndesc;
+
+ return (ndesc);
+}
+
+static int
+ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
+ void *dst, struct mbuf *m, struct tcphdr *tcp, struct mbuf *m_tls,
+ u_int available, tcp_seq tcp_seqno, u_int pidx, uint16_t eh_type,
+ uint16_t mss)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct tx_sdesc *txsd;
+ struct fw_ulptx_wr *wr;
+ struct ulp_txpkt *txpkt;
+ struct ulptx_sc_memrd *memrd;
+ struct ulptx_idata *idata;
+ struct cpl_tx_sec_pdu *sec_pdu;
+ struct cpl_tx_pkt_core *tx_pkt;
+ const struct tls_record_layer *hdr;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *newtcp;
+ char *iv, *out;
+ u_int aad_start, aad_stop;
+ u_int auth_start, auth_stop, auth_insert;
+ u_int cipher_start, cipher_stop, iv_offset;
+ u_int header_len, offset, plen, rlen, tlen;
+ u_int imm_len, ndesc, nsegs, txpkt_lens[2], wr_len;
+ u_int cpl_len, crypto_hdr_len, post_key_context_len;
+ u_int leading_waste, trailing_waste;
+ u_short ip_len;
+ bool inline_key, ghash_lcb, last_ghash_frag, last_wr, need_lso;
+ bool request_ghash, send_partial_ghash, short_record, split_mode;
+ bool using_scratch;
+
+ MPASS(tlsp->txq == txq);
+ M_ASSERTEXTPG(m_tls);
+
+ /* Final work request for this mbuf chain? */
+ last_wr = (m_tls->m_next == NULL);
+
+ /*
+ * The relative offset of the last byte to send from the TLS
+ * record.
+ */
+ tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
+ if (tlen <= m_tls->m_epg_hdrlen) {
+ /*
+ * For requests that only want to send the TLS header,
+ * send a tunnelled packet as immediate data.
+ */
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p header-only TLS record %u", __func__,
+ tlsp, (u_int)m_tls->m_epg_seqno);
+#endif
+ /* This should always be the last TLS record in a chain. */
+ MPASS(last_wr);
+
+ txq->kern_tls_header++;
+
+ return (ktls_write_tunnel_packet(txq, dst, m,
+ (char *)m_tls->m_epg_hdr + mtod(m_tls, vm_offset_t),
+ m_tls->m_len, available, tcp_seqno, pidx, eh_type,
+ last_wr));
+ }
+
+ /* Locate the TLS header. */
+ hdr = (void *)m_tls->m_epg_hdr;
+ rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: offset %lu len %u TCP seq %u TLS record %u",
+ __func__, mtod(m_tls, vm_offset_t), m_tls->m_len, tcp_seqno,
+ (u_int)m_tls->m_epg_seqno);
+#endif
+
+ /* Should this request make use of GHASH state? */
+ ghash_lcb = false;
+ last_ghash_frag = false;
+ request_ghash = false;
+ send_partial_ghash = false;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
+ tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
+ u_int trailer_len;
+
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+ KASSERT(trailer_len == AES_GMAC_HASH_LEN,
+ ("invalid trailer length for AES-GCM"));
+
+ /* Is this the start of a TLS record? */
+ if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
+ /*
+ * If this is the very first TLS record or
+ * if this is a newer TLS record, request a partial
+ * hash, but not if we are going to send the whole
+ * thing.
+ */
+ if ((tlsp->ghash_tls_seqno == 0 ||
+ tlsp->ghash_tls_seqno < m_tls->m_epg_seqno) &&
+ tlen < rlen) {
+ /*
+ * If we are only missing part or all
+ * of the trailer, send a normal full
+ * record but request the hash.
+ * Otherwise, use partial GHASH mode.
+ */
+ if (tlen >= (rlen - trailer_len))
+ ghash_lcb = true;
+ else
+ send_partial_ghash = true;
+ request_ghash = true;
+ tlsp->ghash_tls_seqno = m_tls->m_epg_seqno;
+ }
+ } else if (tlsp->ghash_tls_seqno == m_tls->m_epg_seqno &&
+ tlsp->ghash_valid) {
+ /*
+ * Compute the offset of the first AES block as
+ * is done in ktls_is_short_record.
+ */
+ if (rlen - tlen < trailer_len)
+ plen = rlen - (m_tls->m_epg_hdrlen +
+ trailer_len);
+ else
+ plen = tlen - m_tls->m_epg_hdrlen;
+ offset = mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen;
+ if (offset >= plen)
+ offset = plen;
+ else
+ offset = rounddown2(offset, AES_BLOCK_LEN);
+ if (tlsp->ghash_offset == offset) {
+ if (offset == plen) {
+ /*
+ * Send a partial trailer as a
+ * tunnelled packet as
+ * immediate data.
+ */
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p trailer-only TLS record %u",
+ __func__, tlsp,
+ (u_int)m_tls->m_epg_seqno);
+#endif
+
+ txq->kern_tls_trailer++;
+
+ offset = mtod(m_tls, vm_offset_t) -
+ (m_tls->m_epg_hdrlen + plen);
+ KASSERT(offset <= AES_GMAC_HASH_LEN,
+ ("offset outside of trailer"));
+ return (ktls_write_tunnel_packet(txq,
+ dst, m, tlsp->ghash + offset,
+ m_tls->m_len, available, tcp_seqno,
+ pidx, eh_type, last_wr));
+ }
+
+ /*
+ * If this request sends the end of
+ * the payload, it is the last
+ * fragment.
+ */
+ if (tlen >= (rlen - trailer_len)) {
+ last_ghash_frag = true;
+ ghash_lcb = true;
+ }
+
+ /*
+ * Only use partial GCM mode (rather
+ * than an AES-CTR short record) if
+ * there is input auth data to pass to
+ * the GHASH. That is true so long as
+ * there is at least one full block of
+ * payload data, or if the remaining
+ * payload data is the final partial
+ * block.
+ */
+ if (plen - offset >= GMAC_BLOCK_LEN ||
+ last_ghash_frag) {
+ send_partial_ghash = true;
+
+ /*
+ * If not sending the complete
+ * end of the record, this is
+ * a middle request so needs
+ * to request an updated
+ * partial hash.
+ */
+ if (tlen < rlen)
+ request_ghash = true;
+ }
+ }
+ }
+ }
+
+ short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
+ &header_len, &offset, &plen, &leading_waste, &trailing_waste,
+ send_partial_ghash, request_ghash);
+
+ if (short_record) {
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p short TLS record %u hdr %u offs %u plen %u",
+ __func__, tlsp, (u_int)m_tls->m_epg_seqno, header_len,
+ offset, plen);
+ if (send_partial_ghash) {
+ if (header_len != 0)
+ CTR(KTR_CXGBE, "%s: %p sending initial GHASH",
+ __func__, tlsp);
+ else
+ CTR(KTR_CXGBE, "%s: %p sending partial GHASH for offset %u%s",
+ __func__, tlsp, tlsp->ghash_offset,
+ last_ghash_frag ? ", last_frag" : "");
+ }
+#endif
+ KASSERT(send_partial_ghash || !request_ghash,
+ ("requesting but not sending partial hash for short record"));
+ } else {
+ KASSERT(!send_partial_ghash,
+ ("sending partial hash with full record"));
+ }
+
+ if (tlen < rlen && m_tls->m_next == NULL &&
+ (tcp->th_flags & TH_FIN) != 0) {
+ txq->kern_tls_fin_short++;
+#ifdef INVARIANTS
+ panic("%s: FIN on short TLS record", __func__);
+#endif
+ }
+
+ /*
+ * Use cached value for first record in chain if not using
+ * partial GCM mode. ktls_parse_pkt() calculates nsegs based
+ * on send_partial_ghash being false.
+ */
+ if (m->m_next == m_tls && !send_partial_ghash)
+ nsegs = mbuf_nsegs(m);
+ else
+ nsegs = sglist_count_mbuf_epg(m_tls,
+ m_tls->m_epg_hdrlen + offset, plen);
+
+ /* Determine if we need an LSO header. */
+ need_lso = (m_tls->m_len > mss);
+
+ /* Calculate the size of the TLS work request. */
+ inline_key = send_partial_ghash || tlsp->inline_key;
+ wr_len = ktls_base_wr_size(tlsp, inline_key);
+
+ if (send_partial_ghash) {
+ /* Inline key context includes partial hash in OPAD. */
+ wr_len += AES_GMAC_HASH_LEN;
+ }
+
+ /*
+ * SplitMode is required if there is any thing we need to trim
+ * from the crypto output, either at the front or end of the
+ * record. Note that short records might not need trimming.
+ */
+ split_mode = leading_waste != 0 || trailing_waste != 0;
+ if (split_mode) {
+ /*
+ * Partial records require a SplitMode
+ * CPL_RX_PHYS_DSGL.
+ */
+ wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ }
+
+ if (need_lso)
+ wr_len += sizeof(struct cpl_tx_pkt_lso_core);
+
+ imm_len = m->m_len + header_len;
+ if (short_record) {
+ imm_len += AES_BLOCK_LEN;
+ if (send_partial_ghash && header_len != 0)
+ imm_len += ktls_gcm_aad_len(tlsp);
+ } else if (tlsp->tls13)
+ imm_len += sizeof(uint64_t);
+ wr_len += roundup2(imm_len, 16);
+ wr_len += ktls_sgl_size(nsegs + (last_ghash_frag ? 1 : 0));
+ wr_len = roundup2(wr_len, 16);
+ txpkt_lens[0] = wr_len - sizeof(*wr);
+
+ if (request_ghash) {
+ /*
+ * Requesting the hash entails a second ULP_TX_PKT
+ * containing CPL_TX_TLS_ACK, CPL_FW6_PLD, and space
+ * for the hash.
+ */
+ txpkt_lens[1] = sizeof(struct ulp_txpkt);
+ txpkt_lens[1] += sizeof(struct ulptx_idata);
+ txpkt_lens[1] += sizeof(struct cpl_tx_tls_ack);
+ txpkt_lens[1] += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ txpkt_lens[1] += AES_GMAC_HASH_LEN;
+ wr_len += txpkt_lens[1];
+ } else
+ txpkt_lens[1] = 0;
+
+ ndesc = howmany(wr_len, EQ_ESIZE);
+ MPASS(ndesc <= available);
+
+ /*
+ * Use the per-txq scratch pad if near the end of the ring to
+ * simplify handling of wrap-around.
+ */
+ using_scratch = (eq->sidx - pidx < ndesc);
+ if (using_scratch)
+ wr = (void *)txq->ss;
+ else
+ wr = dst;
+
+ /* FW_ULPTX_WR */
+ wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
+ wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
+ V_FW_WR_LEN16(wr_len / 16));
+ wr->cookie = 0;
+
+ /* ULP_TXPKT */
+ txpkt = (void *)(wr + 1);
+ txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DATAMODIFY(0) |
+ V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
+ V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_CMDMORE(request_ghash ? 1 : 0) |
+ V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
+ txpkt->len = htobe32(howmany(txpkt_lens[0], 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (void *)(txpkt + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = sizeof(struct cpl_tx_sec_pdu);
+
+ /*
+ * After the key context comes CPL_RX_PHYS_DSGL, CPL_TX_*, and
+ * immediate data containing headers. When using an inline
+ * key, these are counted as part of this ULPTX_IDATA. When
+ * reading the key from memory, these are part of a separate
+ * ULPTX_IDATA.
+ */
+ cpl_len = sizeof(struct cpl_tx_pkt_core);
+ if (need_lso)
+ cpl_len += sizeof(struct cpl_tx_pkt_lso_core);
+ if (split_mode)
+ cpl_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ post_key_context_len = cpl_len + imm_len;
+
+ if (inline_key) {
+ idata->len += tlsp->tx_key_info_size + post_key_context_len;
+ if (send_partial_ghash) {
+ /* Partial GHASH in key context. */
+ idata->len += AES_GMAC_HASH_LEN;
+ }
+ }
+ idata->len = htobe32(idata->len);
+
+ /* CPL_TX_SEC_PDU */
+ sec_pdu = (void *)(idata + 1);
+
+ /*
+ * Packet headers are passed through unchanged by the crypto
+ * engine by marking them as header data in SCMD0.
+ */
+ crypto_hdr_len = m->m_len;
+
+ if (send_partial_ghash) {
+ /*
+ * For short records using a partial hash, the TLS
+ * header is counted as header data in SCMD0. TLS AAD
+ * is next (if AAD is present) followed by the AES-CTR
+ * IV. Last is the cipher region for the payload.
+ */
+ if (header_len != 0) {
+ aad_start = 1;
+ aad_stop = ktls_gcm_aad_len(tlsp);
+ } else {
+ aad_start = 0;
+ aad_stop = 0;
+ }
+ iv_offset = aad_stop + 1;
+ cipher_start = iv_offset + AES_BLOCK_LEN;
+ cipher_stop = 0;
+ if (last_ghash_frag) {
+ auth_start = cipher_start;
+ auth_stop = AES_GMAC_HASH_LEN;
+ auth_insert = auth_stop;
+ } else if (plen < GMAC_BLOCK_LEN) {
+ /*
+ * A request that sends part of the first AES
+ * block will only have AAD.
+ */
+ KASSERT(header_len != 0,
+ ("%s: partial GHASH with no auth", __func__));
+ auth_start = 0;
+ auth_stop = 0;
+ auth_insert = 0;
+ } else {
+ auth_start = cipher_start;
+ auth_stop = plen % GMAC_BLOCK_LEN;
+ auth_insert = 0;
+ }
+
+ sec_pdu->pldlen = htobe32(aad_stop + AES_BLOCK_LEN + plen +
+ (last_ghash_frag ? AES_GMAC_HASH_LEN : 0));
+
+ /*
+ * For short records, the TLS header is treated as
+ * header data.
+ */
+ crypto_hdr_len += header_len;
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0_partial.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = tlsp->scmd0_partial.ivgen_hdrlen;
+ if (last_ghash_frag)
+ sec_pdu->ivgen_hdrlen |= V_SCMD_LAST_FRAG(1);
+ else
+ sec_pdu->ivgen_hdrlen |= V_SCMD_MORE_FRAGS(1);
+ sec_pdu->ivgen_hdrlen = htobe32(sec_pdu->ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ txq->kern_tls_partial_ghash++;
+ } else if (short_record) {
+ /*
+ * For short records without a partial hash, the TLS
+ * header is counted as header data in SCMD0 and the
+ * IV is next, followed by a cipher region for the
+ * payload.
+ */
+ aad_start = 0;
+ aad_stop = 0;
+ iv_offset = 1;
+ auth_start = 0;
+ auth_stop = 0;
+ auth_insert = 0;
+ cipher_start = AES_BLOCK_LEN + 1;
+ cipher_stop = 0;
+
+ sec_pdu->pldlen = htobe32(AES_BLOCK_LEN + plen);
+
+ /*
+ * For short records, the TLS header is treated as
+ * header data.
+ */
+ crypto_hdr_len += header_len;
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = htobe32(
+ tlsp->scmd0_short.ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ txq->kern_tls_short++;
+ } else {
+ /*
+ * AAD is TLS header. IV is after AAD for TLS < 1.3.
+ * For TLS 1.3, a placeholder for the TLS sequence
+ * number is provided as an IV before the AAD. The
+ * cipher region starts after the AAD and IV. See
+ * comments in ccr_authenc() and ccr_gmac() in
+ * t4_crypto.c regarding cipher and auth start/stop
+ * values.
+ */
+ if (tlsp->tls13) {
+ iv_offset = 1;
+ aad_start = 1 + sizeof(uint64_t);
+ aad_stop = sizeof(uint64_t) + TLS_HEADER_LENGTH;
+ cipher_start = aad_stop + 1;
+ } else {
+ aad_start = 1;
+ aad_stop = TLS_HEADER_LENGTH;
+ iv_offset = TLS_HEADER_LENGTH + 1;
+ cipher_start = m_tls->m_epg_hdrlen + 1;
+ }
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ cipher_stop = 0;
+ auth_start = cipher_start;
+ auth_stop = 0;
+ auth_insert = 0;
+ } else {
+ cipher_stop = 0;
+ auth_start = cipher_start;
+ auth_stop = 0;
+ auth_insert = 0;
+ }
+
+ sec_pdu->pldlen = htobe32((tlsp->tls13 ? sizeof(uint64_t) : 0) +
+ m_tls->m_epg_hdrlen + plen);
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ if (split_mode)
+ txq->kern_tls_partial++;
+ else
+ txq->kern_tls_full++;
+ }
+ sec_pdu->op_ivinsrtofst = htobe32(
+ V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
+ V_CPL_TX_SEC_PDU_CPLLEN(cpl_len / 8) |
+ V_CPL_TX_SEC_PDU_PLACEHOLDER(send_partial_ghash ? 1 : 0) |
+ V_CPL_TX_SEC_PDU_IVINSRTOFST(iv_offset));
+ sec_pdu->aadstart_cipherstop_hi = htobe32(
+ V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
+ V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
+ V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
+ sec_pdu->cipherstop_lo_authinsert = htobe32(
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
+ V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
+ V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
+ V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
+
+ if (send_partial_ghash && last_ghash_frag) {
+ uint64_t aad_len, cipher_len;
+
+ aad_len = ktls_gcm_aad_len(tlsp);
+ cipher_len = rlen - (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
+ sec_pdu->scmd1 = htobe64(aad_len << 44 | cipher_len);
+ } else
+ sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
+
+ /* Key context */
+ out = (void *)(sec_pdu + 1);
+ if (inline_key) {
+ memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size);
+ if (send_partial_ghash) {
+ struct tls_keyctx *keyctx = (void *)out;
+
+ keyctx->u.txhdr.ctxlen++;
+ keyctx->u.txhdr.dualck_to_txvalid &= ~htobe16(
+ V_KEY_CONTEXT_MK_SIZE(M_KEY_CONTEXT_MK_SIZE));
+ keyctx->u.txhdr.dualck_to_txvalid |= htobe16(
+ F_KEY_CONTEXT_OPAD_PRESENT |
+ V_KEY_CONTEXT_MK_SIZE(0));
+ }
+ out += tlsp->tx_key_info_size;
+ if (send_partial_ghash) {
+ if (header_len != 0)
+ memset(out, 0, AES_GMAC_HASH_LEN);
+ else
+ memcpy(out, tlsp->ghash, AES_GMAC_HASH_LEN);
+ out += AES_GMAC_HASH_LEN;
+ }
+ } else {
+ /* ULPTX_SC_MEMRD to read key context. */
+ memrd = (void *)out;
+ memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
+ V_ULP_TX_SC_MORE(1) |
+ V_ULPTX_LEN16(tlsp->tx_key_info_size >> 4));
+ memrd->addr = htobe32(tlsp->tx_key_addr >> 5);
+
+ /* ULPTX_IDATA for CPL_TX_* and headers. */
+ idata = (void *)(memrd + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = htobe32(post_key_context_len);
+
+ out = (void *)(idata + 1);
+ }
+
+ /* CPL_RX_PHYS_DSGL */
+ if (split_mode) {
+ crypto_hdr_len = sizeof(struct cpl_tx_pkt_core);
+ if (need_lso)
+ crypto_hdr_len += sizeof(struct cpl_tx_pkt_lso_core);
+ crypto_hdr_len += m->m_len;
+ out = write_split_mode_rx_phys(out, m, m_tls, crypto_hdr_len,
+ leading_waste, trailing_waste);
+ }
+
+ /* CPL_TX_PKT_LSO */
+ if (need_lso) {
+ out = write_lso_cpl(out, m, mss, eh_type, m->m_len +
+ m_tls->m_len);
+ txq->tso_wrs++;
+ }
+
+ /* CPL_TX_PKT_XT */
+ tx_pkt = (void *)out;
+ tx_pkt->ctrl0 = txq->cpl_ctrl0;
+ tx_pkt->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
+ tx_pkt->pack = 0;
+ tx_pkt->len = htobe16(m->m_len + m_tls->m_len);
+
+ /* Copy the packet headers. */
+ out = (void *)(tx_pkt + 1);
+ memcpy(out, mtod(m, char *), m->m_len);
+
+ /* Modify the packet length in the IP header. */
+ ip_len = m->m_len + m_tls->m_len - m->m_pkthdr.l2hlen;
+ if (eh_type == ETHERTYPE_IP) {
+ ip = (void *)(out + m->m_pkthdr.l2hlen);
+ be16enc(&ip->ip_len, ip_len);
+ } else {
+ ip6 = (void *)(out + m->m_pkthdr.l2hlen);
+ be16enc(&ip6->ip6_plen, ip_len - sizeof(*ip6));
+ }
+
+ /* Modify sequence number and flags in TCP header. */
+ newtcp = (void *)(out + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
+ be32enc(&newtcp->th_seq, tcp_seqno);
+ if (!last_wr)
+ newtcp->th_flags = tcp->th_flags & ~(TH_PUSH | TH_FIN);
+ out += m->m_len;
+
+ /*
+ * Insert placeholder for sequence number as IV for TLS 1.3
+ * non-short records.
+ */
+ if (tlsp->tls13 && !short_record) {
+ memset(out, 0, sizeof(uint64_t));
+ out += sizeof(uint64_t);
+ }
+
+ /* Populate the TLS header */
+ memcpy(out, m_tls->m_epg_hdr, header_len);
+ out += header_len;
+
+ /* TLS AAD for short records using a partial hash. */
+ if (send_partial_ghash && header_len != 0) {
+ if (tlsp->tls13) {
+ struct tls_aead_data_13 ad;
+
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+ ad.tls_length = hdr->tls_length;
+ memcpy(out, &ad, sizeof(ad));
+ out += sizeof(ad);
+ } else {
+ struct tls_aead_data ad;
+ uint16_t cipher_len;
+
+ cipher_len = rlen -
+ (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
+ ad.seq = htobe64(m_tls->m_epg_seqno);
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+ ad.tls_length = htons(cipher_len);
+ memcpy(out, &ad, sizeof(ad));
+ out += sizeof(ad);
+ }
+ }
+
+ /* AES IV for a short record. */
+ if (short_record) {
+ iv = out;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ memcpy(iv, tlsp->keyctx.u.txhdr.txsalt, SALT_SIZE);
+ if (tlsp->tls13) {
+ uint64_t value;
+
+ value = be64dec(tlsp->keyctx.u.txhdr.txsalt +
+ 4);
+ value ^= m_tls->m_epg_seqno;
+ be64enc(iv + 4, value);
+ } else
+ memcpy(iv + 4, hdr + 1, 8);
+ if (send_partial_ghash)
+ be32enc(iv + 12, 1 + offset / AES_BLOCK_LEN);
+ else
+ be32enc(iv + 12, 2 + offset / AES_BLOCK_LEN);
+ } else
+ memcpy(iv, hdr + 1, AES_BLOCK_LEN);
+ out += AES_BLOCK_LEN;
+ }
+
+ if (imm_len % 16 != 0) {
+ if (imm_len % 8 != 0) {
+ /* Zero pad to an 8-byte boundary. */
+ memset(out, 0, 8 - (imm_len % 8));
+ out += 8 - (imm_len % 8);
+ }
+
+ /*
+ * Insert a ULP_TX_SC_NOOP if needed so the SGL is
+ * 16-byte aligned.
+ */
+ if (imm_len % 16 <= 8) {
+ idata = (void *)out;
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = htobe32(0);
+ out = (void *)(idata + 1);
+ }
+ }
+
+ /* SGL for record payload */
+ sglist_reset(txq->gl);
+ if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
+ plen) != 0) {
+#ifdef INVARIANTS
+ panic("%s: failed to append sglist", __func__);
+#endif
+ }
+ if (last_ghash_frag) {
+ if (sglist_append_phys(txq->gl, zero_buffer_pa,
+ AES_GMAC_HASH_LEN) != 0) {
+#ifdef INVARIANTS
+ panic("%s: failed to append sglist (2)", __func__);
+#endif
+ }
+ }
+ out = write_gl_to_buf(txq->gl, out);
+
+ if (request_ghash) {
+ /* ULP_TXPKT */
+ txpkt = (void *)out;
+ txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DATAMODIFY(0) |
+ V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
+ V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
+ txpkt->len = htobe32(howmany(txpkt_lens[1], 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (void *)(txpkt + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(0));
+ idata->len = sizeof(struct cpl_tx_tls_ack);
+ idata->len += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ idata->len += AES_GMAC_HASH_LEN;
+ idata->len = htobe32(idata->len);
+ out = (void *)(idata + 1);
+
+ /* CPL_TX_TLS_ACK */
+ out = write_tx_tls_ack(out, tlsp->rx_chid, AES_GMAC_HASH_LEN,
+ ghash_lcb);
+
+ /* CPL_FW6_PLD */
+ out = write_fw6_pld(out, tlsp->rx_chid, tlsp->rx_qid,
+ AES_GMAC_HASH_LEN, (uintptr_t)tlsp | CPL_FW6_COOKIE_KTLS);
+
+ /* Space for partial hash. */
+ memset(out, 0, AES_GMAC_HASH_LEN);
+ out += AES_GMAC_HASH_LEN;
+
+ tlsp->ghash_pending = true;
+ tlsp->ghash_valid = false;
+ tlsp->ghash_lcb = ghash_lcb;
+ if (last_ghash_frag)
+ tlsp->ghash_offset = offset + plen;
+ else
+ tlsp->ghash_offset = rounddown2(offset + plen,
+ GMAC_BLOCK_LEN);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p requesting GHASH for offset %u",
+ __func__, tlsp, tlsp->ghash_offset);
+#endif
+ m_snd_tag_ref(&tlsp->com);
+
+ txq->kern_tls_ghash_requested++;
+ }
+
+ if (using_scratch) {
+ out = dst;
+ copy_to_txd(eq, txq->ss, &out, wr_len);
+ }
+
+ txq->kern_tls_records++;
+ txq->kern_tls_octets += m_tls->m_len;
+ if (split_mode) {
+ txq->kern_tls_splitmode++;
+ txq->kern_tls_waste += leading_waste + trailing_waste;
+ }
+ if (need_lso)
+ txq->kern_tls_lso++;
+
+ txsd = &txq->sdesc[pidx];
+ if (last_wr)
+ txsd->m = m;
+ else
+ txsd->m = NULL;
+ txsd->desc_used = ndesc;
+
+ return (ndesc);
+}
+
+int
+t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
+ u_int available)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct tlspcb *tlsp;
+ struct tcphdr *tcp;
+ struct mbuf *m_tls;
+ struct ether_header *eh;
+ tcp_seq tcp_seqno;
+ u_int ndesc, pidx, totdesc;
+ uint16_t eh_type, mss;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_pkthdr.snd_tag != NULL);
+ tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
+
+ totdesc = 0;
+ eh = mtod(m, struct ether_header *);
+ eh_type = ntohs(eh->ether_type);
+ if (eh_type == ETHERTYPE_VLAN) {
+ struct ether_vlan_header *evh = (void *)eh;
+
+ eh_type = ntohs(evh->evl_proto);
+ }
+
+ tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
+ m->m_pkthdr.l3hlen);
+ pidx = eq->pidx;
+
+ /* Determine MSS. */
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ mss = m->m_pkthdr.tso_segsz;
+ tlsp->prev_mss = mss;
+ } else if (tlsp->prev_mss != 0)
+ mss = tlsp->prev_mss;
+ else
+ mss = if_getmtu(tlsp->vi->ifp) -
+ (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen);
+
+ /* Fetch the starting TCP sequence number for this chain. */
+ tcp_seqno = ntohl(tcp->th_seq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__, m->m_pkthdr.len,
+ tcp_seqno);
+#endif
+ KASSERT(!tlsp->ghash_pending, ("%s: GHASH pending for send", __func__));
+
+ /*
+ * Iterate over each TLS record constructing a work request
+ * for that record.
+ */
+ for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
+ MPASS(m_tls->m_flags & M_EXTPG);
+
+ ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls,
+ available - totdesc, tcp_seqno, pidx, eh_type, mss);
+ totdesc += ndesc;
+ IDXINCR(pidx, ndesc, eq->sidx);
+ dst = &eq->desc[pidx];
+
+ tcp_seqno += m_tls->m_len;
+ }
+
+ /*
+ * Queue another packet if this was a GCM request that didn't
+ * request a GHASH response.
+ */
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM && !tlsp->ghash_pending)
+ ktls_queue_next_packet(tlsp, true);
+
+ MPASS(totdesc <= available);
+ return (totdesc);
+}
+
+static void
+t7_tls_tag_free(struct m_snd_tag *mst)
+{
+ struct adapter *sc;
+ struct tlspcb *tlsp;
+
+ tlsp = mst_to_tls(mst);
+ sc = tlsp->sc;
+
+ CTR2(KTR_CXGBE, "%s: %p", __func__, tlsp);
+
+ if (tlsp->tx_key_addr >= 0)
+ t4_free_tls_keyid(sc, tlsp->tx_key_addr);
+
+ KASSERT(mbufq_len(&tlsp->pending_mbufs) == 0,
+ ("%s: pending mbufs", __func__));
+
+ zfree(tlsp, M_CXGBE);
+}
+
+static int
+ktls_fw6_pld(struct sge_iq *iq, const struct rss_header *rss,
+ struct mbuf *m)
+{
+ const struct cpl_fw6_pld *cpl;
+ struct tlspcb *tlsp;
+ const void *ghash;
+
+ if (m != NULL)
+ cpl = mtod(m, const void *);
+ else
+ cpl = (const void *)(rss + 1);
+
+ tlsp = (struct tlspcb *)(uintptr_t)CPL_FW6_PLD_COOKIE(cpl);
+ KASSERT(cpl->data[0] == 0, ("%s: error status returned", __func__));
+
+ TXQ_LOCK(tlsp->txq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p received GHASH for offset %u%s", __func__, tlsp,
+ tlsp->ghash_offset, tlsp->ghash_lcb ? " in LCB" : "");
+#endif
+ if (tlsp->ghash_lcb)
+ ghash = &cpl->data[2];
+ else
+ ghash = cpl + 1;
+ memcpy(tlsp->ghash, ghash, AES_GMAC_HASH_LEN);
+ tlsp->ghash_valid = true;
+ tlsp->ghash_pending = false;
+ tlsp->txq->kern_tls_ghash_received++;
+
+ ktls_queue_next_packet(tlsp, false);
+ TXQ_UNLOCK(tlsp->txq);
+
+ m_snd_tag_rele(&tlsp->com);
+ m_freem(m);
+ return (0);
+}
+
+void
+t7_ktls_modload(void)
+{
+ zero_buffer = malloc_aligned(AES_GMAC_HASH_LEN, AES_GMAC_HASH_LEN,
+ M_CXGBE, M_ZERO | M_WAITOK);
+ zero_buffer_pa = vtophys(zero_buffer);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, ktls_fw6_pld,
+ CPL_FW6_COOKIE_KTLS);
+}
+
+void
+t7_ktls_modunload(void)
+{
+ free(zero_buffer, M_CXGBE);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, NULL, CPL_FW6_COOKIE_KTLS);
+}
+
+#else
+
+int
+t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
+ struct m_snd_tag **pt)
+{
+ return (ENXIO);
+}
+
+int
+t7_ktls_parse_pkt(struct mbuf *m)
+{
+ return (EINVAL);
+}
+
+int
+t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
+ u_int available)
+{
+ panic("can't happen");
+}
+
+void
+t7_ktls_modload(void)
+{
+}
+
+void
+t7_ktls_modunload(void)
+{
+}
+
+#endif
diff --git a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
index b8e6eeba0280..2cd24c635325 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
+++ b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
@@ -32,19 +32,6 @@
#include "cudbg.h"
#include "cudbg_lib_common.h"
-enum {
- SF_ATTEMPTS = 10, /* max retries for SF operations */
-
- /* flash command opcodes */
- SF_PROG_PAGE = 2, /* program page */
- SF_WR_DISABLE = 4, /* disable writes */
- SF_RD_STATUS = 5, /* read status register */
- SF_WR_ENABLE = 6, /* enable writes */
- SF_RD_DATA_FAST = 0xb, /* read flash */
- SF_RD_ID = 0x9f, /* read ID */
- SF_ERASE_SECTOR = 0xd8, /* erase sector */
-};
-
int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size);
int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
u32 start_address);
@@ -56,10 +43,12 @@ update_skip_size(struct cudbg_flash_sec_info *sec_info, u32 size)
}
static
-void set_sector_availability(struct cudbg_flash_sec_info *sec_info,
- int sector_nu, int avail)
+void set_sector_availability(struct adapter *adap,
+ struct cudbg_flash_sec_info *sec_info, int sector_nu, int avail)
{
- sector_nu -= CUDBG_START_SEC;
+ int start = t4_flash_loc_start(adap, FLASH_LOC_CUDBG, NULL);
+
+ sector_nu -= start / SF_SEC_SIZE;;
if (avail)
set_dbg_bitmap(sec_info->sec_bitmap, sector_nu);
else
@@ -68,13 +57,17 @@ void set_sector_availability(struct cudbg_flash_sec_info *sec_info,
/* This function will return empty sector available for filling */
static int
-find_empty_sec(struct cudbg_flash_sec_info *sec_info)
+find_empty_sec(struct adapter *adap, struct cudbg_flash_sec_info *sec_info)
{
int i, index, bit;
-
- for (i = CUDBG_START_SEC; i < CUDBG_SF_MAX_SECTOR; i++) {
- index = (i - CUDBG_START_SEC) / 8;
- bit = (i - CUDBG_START_SEC) % 8;
+ unsigned int len = 0;
+ int start = t4_flash_loc_start(adap, FLASH_LOC_CUDBG, &len);
+
+ start /= SF_SEC_SIZE; /* addr -> sector */
+ len /= SF_SEC_SIZE;
+ for (i = start; i < start + len; i++) {
+ index = (i - start) / 8;
+ bit = (i - start) % 8;
if (!(sec_info->sec_bitmap[index] & (1 << bit)))
return i;
}
@@ -102,7 +95,7 @@ static void update_headers(void *handle, struct cudbg_buffer *dbg_buff,
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
sec_hdr = sec_info->sec_data + sec_hdr_start_addr;
flash_hdr = (struct cudbg_flash_hdr *)(sec_hdr);
@@ -166,11 +159,13 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
u32 space_left;
int rc = 0;
int sec;
+ unsigned int cudbg_max_size = 0;
+ t4_flash_loc_start(adap, FLASH_LOC_CUDBG, &cudbg_max_size);
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
sec_data_size = sec_hdr_start_addr;
cudbg_init->print("\tWriting %u bytes to flash\n", cur_entity_size);
@@ -191,12 +186,12 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
flash_hdr = (struct cudbg_flash_hdr *)(sec_info->sec_data +
sec_hdr_start_addr);
- if (flash_hdr->data_len > CUDBG_FLASH_SIZE) {
+ if (flash_hdr->data_len > cudbg_max_size) {
rc = CUDBG_STATUS_FLASH_FULL;
goto out;
}
- space_left = CUDBG_FLASH_SIZE - flash_hdr->data_len;
+ space_left = cudbg_max_size - flash_hdr->data_len;
if (cur_entity_size > space_left) {
rc = CUDBG_STATUS_FLASH_FULL;
@@ -204,10 +199,11 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
}
while (cur_entity_size > 0) {
- sec = find_empty_sec(sec_info);
+ sec = find_empty_sec(adap, sec_info);
if (sec_info->par_sec) {
sec_data_offset = sec_info->par_sec_offset;
- set_sector_availability(sec_info, sec_info->par_sec, 0);
+ set_sector_availability(adap, sec_info,
+ sec_info->par_sec, 0);
sec_info->par_sec = 0;
sec_info->par_sec_offset = 0;
@@ -230,13 +226,12 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
(void *)((char *)dbg_buff->data + start_offset),
tmp_size);
- rc = write_flash(adap, sec, sec_info->sec_data,
- CUDBG_SF_SECTOR_SIZE);
+ rc = write_flash(adap, sec, sec_info->sec_data, SF_SEC_SIZE);
if (rc)
goto out;
cur_entity_size -= tmp_size;
- set_sector_availability(sec_info, sec, 1);
+ set_sector_availability(adap, sec_info, sec, 1);
start_offset += tmp_size;
}
out:
@@ -247,19 +242,14 @@ int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size)
{
unsigned int addr;
unsigned int i, n;
- unsigned int sf_sec_size;
int rc = 0;
u8 *ptr = (u8 *)data;
- sf_sec_size = adap->params.sf_size/adap->params.sf_nsec;
-
- addr = start_sec * CUDBG_SF_SECTOR_SIZE;
- i = DIV_ROUND_UP(size,/* # of sectors spanned */
- sf_sec_size);
+ addr = start_sec * SF_SEC_SIZE;
+ i = DIV_ROUND_UP(size, SF_SEC_SIZE);
- rc = t4_flash_erase_sectors(adap, start_sec,
- start_sec + i - 1);
+ rc = t4_flash_erase_sectors(adap, start_sec, start_sec + i - 1);
/*
* If size == 0 then we're simply erasing the FLASH sectors associated
* with the on-adapter OptionROM Configuration File.
@@ -337,6 +327,9 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
u32 data_offset = 0;
u32 i, j;
int rc;
+ unsigned int cudbg_len = 0;
+ int cudbg_start_sec = t4_flash_loc_start(adap, FLASH_LOC_CUDBG,
+ &cudbg_len) / SF_SEC_SIZE;
rc = t4_get_flash_params(adap);
if (rc) {
@@ -348,7 +341,7 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
if (!data_flag) {
/* fill header */
@@ -357,14 +350,14 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
* have older filled sector also
*/
memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
- rc = read_flash(adap, CUDBG_START_SEC, &flash_hdr,
+ rc = read_flash(adap, cudbg_start_sec, &flash_hdr,
sizeof(struct cudbg_flash_hdr),
sec_hdr_start_addr);
if (flash_hdr.signature == CUDBG_FL_SIGNATURE) {
sec_info->max_timestamp = flash_hdr.timestamp;
} else {
- rc = read_flash(adap, CUDBG_START_SEC + 1,
+ rc = read_flash(adap, cudbg_start_sec + 1,
&flash_hdr,
sizeof(struct cudbg_flash_hdr),
sec_hdr_start_addr);
@@ -383,8 +376,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
/* finding max sequence number because max sequenced
* sector has updated header
*/
- for (i = CUDBG_START_SEC; i <
- CUDBG_SF_MAX_SECTOR; i++) {
+ for (i = cudbg_start_sec; i < cudbg_start_sec +
+ cudbg_len / SF_SEC_SIZE; i++) {
memset(&flash_hdr, 0,
sizeof(struct cudbg_flash_hdr));
rc = read_flash(adap, i, &flash_hdr,
@@ -423,7 +416,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
/* finding sector sequence sorted */
for (i = 1; i <= sec_info->max_seq_no; i++) {
- for (j = CUDBG_START_SEC; j < CUDBG_SF_MAX_SECTOR; j++) {
+ for (j = cudbg_start_sec; j < cudbg_start_sec +
+ cudbg_len / SF_SEC_SIZE; j++) {
memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
rc = read_flash(adap, j, &flash_hdr,
sizeof(struct cudbg_flash_hdr),
@@ -434,10 +428,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
sec_info->max_timestamp ==
flash_hdr.timestamp &&
flash_hdr.sec_seq_no == i) {
- if (size + total_hdr_size >
- CUDBG_SF_SECTOR_SIZE)
- tmp_size = CUDBG_SF_SECTOR_SIZE -
- total_hdr_size;
+ if (size + total_hdr_size > SF_SEC_SIZE)
+ tmp_size = SF_SEC_SIZE - total_hdr_size;
else
tmp_size = size;
@@ -468,7 +460,7 @@ int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
unsigned int addr, i, n;
int rc;
u32 *ptr = (u32 *)data;
- addr = start_sec * CUDBG_SF_SECTOR_SIZE + start_address;
+ addr = start_sec * SF_SEC_SIZE + start_address;
size = size / 4;
for (i = 0; i < size; i += SF_PAGE_SIZE) {
if ((size - i) < SF_PAGE_SIZE)
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib.c b/sys/dev/cxgbe/cudbg/cudbg_lib.c
index a36c53f68223..f0273349263a 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_lib.c
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib.c
@@ -155,23 +155,25 @@ static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
u32 flash_data_offset;
u32 data_hdr_size;
int rc = -1;
+ unsigned int cudbg_len;
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
+ t4_flash_loc_start(cudbg_init->adap, FLASH_LOC_CUDBG, &cudbg_len);
- flash_data_offset = (FLASH_CUDBG_NSECS *
+ flash_data_offset = ((cudbg_len / SF_SEC_SIZE) *
(sizeof(struct cudbg_flash_hdr) +
data_hdr_size)) +
(cur_entity_data_offset - data_hdr_size);
- if (flash_data_offset > CUDBG_FLASH_SIZE) {
+ if (flash_data_offset > cudbg_len) {
update_skip_size(sec_info, cur_entity_size);
if (cudbg_init->verbose)
cudbg_init->print("Large entity skipping...\n");
return rc;
}
- remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
+ remain_flash_size = cudbg_len - flash_data_offset;
if (cur_entity_size > remain_flash_size) {
update_skip_size(sec_info, cur_entity_size);
@@ -1292,6 +1294,7 @@ static int collect_macstats(struct cudbg_init *pdbg_init,
mac_stats_buff->port_count = n;
for (i = 0; i < mac_stats_buff->port_count; i++)
+ /* Incorrect, should use hport instead of i */
t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
rc = write_compression_hdr(&scratch_buff, dbg_buff);
@@ -1967,7 +1970,7 @@ static int collect_fw_devlog(struct cudbg_init *pdbg_init,
u32 offset;
int rc = 0, i;
- rc = t4_init_devlog_params(padap, 1);
+ rc = t4_init_devlog_ncores_params(padap, 1);
if (rc < 0) {
pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
index 86390eb4399d..b6a85f436db0 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
@@ -59,11 +59,6 @@
#include "common/t4_hw.h"
#endif
-#define CUDBG_SF_MAX_SECTOR (FLASH_CUDBG_START_SEC + FLASH_CUDBG_NSECS)
-#define CUDBG_SF_SECTOR_SIZE SF_SEC_SIZE
-#define CUDBG_START_SEC FLASH_CUDBG_START_SEC
-#define CUDBG_FLASH_SIZE FLASH_CUDBG_MAX_SIZE
-
#define CUDBG_EXT_DATA_BIT 0
#define CUDBG_EXT_DATA_VALID (1 << CUDBG_EXT_DATA_BIT)
@@ -121,7 +116,7 @@ struct cudbg_flash_sec_info {
u32 hdr_data_len; /* Total data */
u32 skip_size; /* Total size of large entities. */
u64 max_timestamp;
- char sec_data[CUDBG_SF_SECTOR_SIZE];
+ char sec_data[SF_SEC_SIZE];
u8 sec_bitmap[8];
};
diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
index d805642541d3..9cdfd0fb9652 100644
--- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
@@ -976,42 +976,6 @@ icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace,
return (0);
}
-/*
- * Request/response structure used to find out the adapter offloading a socket.
- */
-struct find_ofld_adapter_rr {
- struct socket *so;
- struct adapter *sc; /* result */
-};
-
-static void
-find_offload_adapter(struct adapter *sc, void *arg)
-{
- struct find_ofld_adapter_rr *fa = arg;
- struct socket *so = fa->so;
- struct tom_data *td = sc->tom_softc;
- struct tcpcb *tp;
- struct inpcb *inp;
-
- /* Non-TCP were filtered out earlier. */
- MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
-
- if (fa->sc != NULL)
- return; /* Found already. */
-
- if (td == NULL)
- return; /* TOE not enabled on this adapter. */
-
- inp = sotoinpcb(so);
- INP_WLOCK(inp);
- if ((inp->inp_flags & INP_DROPPED) == 0) {
- tp = intotcpcb(inp);
- if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
- fa->sc = sc; /* Found. */
- }
- INP_WUNLOCK(inp);
-}
-
static bool
is_memfree(struct adapter *sc)
{
@@ -1025,48 +989,6 @@ is_memfree(struct adapter *sc)
return (true);
}
-/* XXXNP: move this to t4_tom. */
-static void
-send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen)
-{
- struct wrqe *wr;
- struct fw_flowc_wr *flowc;
- const u_int nparams = 1;
- u_int flowclen;
- struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
-
- flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
-
- wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
- if (wr == NULL) {
- /* XXX */
- panic("%s: allocation failure.", __func__);
- }
- flowc = wrtod(wr);
- memset(flowc, 0, wr->wr_len);
-
- flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
- V_FW_FLOWC_WR_NPARAMS(nparams));
- flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
- V_FW_WR_FLOWID(toep->tid));
-
- flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
- flowc->mnemval[0].val = htobe32(maxlen);
-
- KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
- ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
- txsd->tx_credits = howmany(flowclen, 16);
- txsd->plen = 0;
- KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
- ("%s: not enough credits (%d)", __func__, toep->tx_credits));
- toep->tx_credits -= txsd->tx_credits;
- if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
- toep->txsd_pidx = 0;
- toep->txsd_avail--;
-
- t4_wrq_tx(sc, wr);
-}
-
static void
set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, u_int ulp_submode)
{
@@ -1095,7 +1017,6 @@ int
icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
{
struct icl_cxgbei_conn *icc = ic_to_icc(ic);
- struct find_ofld_adapter_rr fa;
struct file *fp;
struct socket *so;
struct inpcb *inp;
@@ -1139,15 +1060,11 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
fdrop(fp, curthread);
ICL_CONN_UNLOCK(ic);
- /* Find the adapter offloading this socket. */
- fa.sc = NULL;
- fa.so = so;
- t4_iterate(find_offload_adapter, &fa);
- if (fa.sc == NULL) {
+ icc->sc = find_offload_adapter(so);
+ if (icc->sc == NULL) {
error = EINVAL;
goto out;
}
- icc->sc = fa.sc;
max_rx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_recv_data_segment_length;
max_tx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_send_data_segment_length;
@@ -1205,7 +1122,7 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
toep->params.ulp_mode = ULP_MODE_ISCSI;
toep->ulpcb = icc;
- send_iscsi_flowc_wr(icc->sc, toep,
+ send_txdataplen_max_flowc_wr(icc->sc, toep,
roundup(max_iso_pdus * max_tx_pdu_len, tp->t_maxseg));
set_ulp_mode_iscsi(icc->sc, toep, icc->ulp_submode);
INP_WUNLOCK(inp);
@@ -1778,7 +1695,6 @@ cxgbei_limits(struct adapter *sc, void *arg)
static int
cxgbei_limits_fd(struct icl_drv_limits *idl, int fd)
{
- struct find_ofld_adapter_rr fa;
struct file *fp;
struct socket *so;
struct adapter *sc;
@@ -1801,17 +1717,13 @@ cxgbei_limits_fd(struct icl_drv_limits *idl, int fd)
return (EINVAL);
}
- /* Find the adapter offloading this socket. */
- fa.sc = NULL;
- fa.so = so;
- t4_iterate(find_offload_adapter, &fa);
- if (fa.sc == NULL) {
+ sc = find_offload_adapter(so);
+ if (sc == NULL) {
fdrop(fp, curthread);
return (ENXIO);
}
fdrop(fp, curthread);
- sc = fa.sc;
error = begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims");
if (error != 0)
return (error);
diff --git a/sys/dev/cxgbe/firmware/t4fw_interface.h b/sys/dev/cxgbe/firmware/t4fw_interface.h
index 2794bae9474b..5874f0343b03 100644
--- a/sys/dev/cxgbe/firmware/t4fw_interface.h
+++ b/sys/dev/cxgbe/firmware/t4fw_interface.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2012-2017 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2012-2017, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,8 +66,8 @@ enum fw_retval {
FW_FCOE_NO_XCHG = 136, /* */
FW_SCSI_RSP_ERR = 137, /* */
FW_ERR_RDEV_IMPL_LOGO = 138, /* */
- FW_SCSI_UNDER_FLOW_ERR = 139, /* */
- FW_SCSI_OVER_FLOW_ERR = 140, /* */
+ FW_SCSI_UNDER_FLOW_ERR = 139, /* */
+ FW_SCSI_OVER_FLOW_ERR = 140, /* */
FW_SCSI_DDP_ERR = 141, /* DDP error*/
FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
FW_SCSI_IO_BLOCK = 143, /* IO is going to be blocked due to resource failure */
@@ -85,7 +84,7 @@ enum fw_memtype {
FW_MEMTYPE_FLASH = 0x4,
FW_MEMTYPE_INTERNAL = 0x5,
FW_MEMTYPE_EXTMEM1 = 0x6,
- FW_MEMTYPE_HMA = 0x7,
+ FW_MEMTYPE_HMA = 0x7,
};
/******************************************************************************
@@ -106,10 +105,14 @@ enum fw_wr_opcodes {
FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
+ FW_OFLD_TX_DATA_V2_WR = 0x0f,
FW_CMD_WR = 0x10,
FW_ETH_TX_PKT_VM_WR = 0x11,
FW_ETH_TX_PKTS_VM_WR = 0x12,
FW_RI_RES_WR = 0x0c,
+ FW_QP_RES_WR = FW_RI_RES_WR,
+ /* iwarp wr used from rdma kernel and user space */
+ FW_V2_NVMET_TX_DATA_WR = 0x13,
FW_RI_RDMA_WRITE_WR = 0x14,
FW_RI_SEND_WR = 0x15,
FW_RI_RDMA_READ_WR = 0x16,
@@ -118,6 +121,15 @@ enum fw_wr_opcodes {
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_FR_NSMR_TPTE_WR = 0x20,
FW_RI_RDMA_WRITE_CMPL_WR = 0x21,
+ /* rocev2 wr used from rdma kernel and user space */
+ FW_RI_V2_RDMA_WRITE_WR = 0x22,
+ FW_RI_V2_SEND_WR = 0x23,
+ FW_RI_V2_RDMA_READ_WR = 0x24,
+ FW_RI_V2_BIND_MW_WR = 0x25,
+ FW_RI_V2_FR_NSMR_WR = 0x26,
+ FW_RI_V2_ATOMIC_WR = 0x27,
+ FW_NVMET_V2_FR_NSMR_WR = 0x28,
+ FW_RI_V2_INV_LSTAG_WR = 0x1e,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_RI_SEND_IMMEDIATE_WR = 0x15,
FW_RI_ATOMIC_WR = 0x16,
@@ -138,10 +150,11 @@ enum fw_wr_opcodes {
FW_POFCOE_TCB_WR = 0x42,
FW_POFCOE_ULPTX_WR = 0x43,
FW_ISCSI_TX_DATA_WR = 0x45,
- FW_PTP_TX_PKT_WR = 0x46,
+ FW_PTP_TX_PKT_WR = 0x46,
FW_TLSTX_DATA_WR = 0x68,
FW_TLS_TUNNEL_OFLD_WR = 0x69,
FW_CRYPTO_LOOKASIDE_WR = 0x6d,
+ FW_CRYPTO_UPDATE_SA_WR = 0x6e,
FW_COISCSI_TGT_WR = 0x70,
FW_COISCSI_TGT_CONN_WR = 0x71,
FW_COISCSI_TGT_XMIT_WR = 0x72,
@@ -149,7 +162,8 @@ enum fw_wr_opcodes {
FW_ISNS_WR = 0x75,
FW_ISNS_XMIT_WR = 0x76,
FW_FILTER2_WR = 0x77,
- FW_LASTC2E_WR = 0x80
+ /* FW_LASTC2E_WR = 0x80 */
+ FW_LASTC2E_WR = 0xB0
};
/*
@@ -308,7 +322,7 @@ enum fw_filter_wr_cookie {
enum fw_filter_wr_nat_mode {
FW_FILTER_WR_NATMODE_NONE = 0,
- FW_FILTER_WR_NATMODE_DIP ,
+ FW_FILTER_WR_NATMODE_DIP,
FW_FILTER_WR_NATMODE_DIPDP,
FW_FILTER_WR_NATMODE_DIPDPSIP,
FW_FILTER_WR_NATMODE_DIPDPSP,
@@ -387,7 +401,7 @@ struct fw_filter2_wr {
__u8 newlip[16];
__u8 newfip[16];
__be32 natseqcheck;
- __be32 r9;
+ __be32 rocev2_qpn;
__be64 r10;
__be64 r11;
__be64 r12;
@@ -675,6 +689,19 @@ struct fw_filter2_wr {
#define G_FW_FILTER_WR_MATCHTYPEM(x) \
(((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
+#define S_FW_FILTER2_WR_ROCEV2 31
+#define M_FW_FILTER2_WR_ROCEV2 0x1
+#define V_FW_FILTER2_WR_ROCEV2(x) ((x) << S_FW_FILTER2_WR_ROCEV2)
+#define G_FW_FILTER2_WR_ROCEV2(x) \
+ (((x) >> S_FW_FILTER2_WR_ROCEV2) & M_FW_FILTER2_WR_ROCEV2)
+#define F_FW_FILTER2_WR_ROCEV2 V_FW_FILTER2_WR_ROCEV2(1U)
+
+#define S_FW_FILTER2_WR_QPN 0
+#define M_FW_FILTER2_WR_QPN 0xffffff
+#define V_FW_FILTER2_WR_QPN(x) ((x) << S_FW_FILTER2_WR_QPN)
+#define G_FW_FILTER2_WR_QPN(x) \
+ (((x) >> S_FW_FILTER2_WR_QPN) & M_FW_FILTER2_WR_QPN)
+
struct fw_ulptx_wr {
__be32 op_to_compl;
__be32 flowid_len16;
@@ -1034,7 +1061,10 @@ enum fw_flowc_mnem {
FW_FLOWC_MNEM_SND_SCALE = 13,
FW_FLOWC_MNEM_RCV_SCALE = 14,
FW_FLOWC_MNEM_ULP_MODE = 15,
- FW_FLOWC_MNEM_MAX = 16,
+ FW_FLOWC_MNEM_EQID = 16,
+ FW_FLOWC_MNEM_CONG_ALG = 17,
+ FW_FLOWC_MNEM_TXDATAPLEN_MIN = 18,
+ FW_FLOWC_MNEM_MAX = 19,
};
struct fw_flowc_mnemval {
@@ -1153,6 +1183,55 @@ struct fw_ofld_tx_data_wr {
#define G_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x) \
(((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_LO) & M_FW_ISCSI_TX_DATA_WR_FLAGS_LO)
+struct fw_ofld_tx_data_v2_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 r4;
+ __be16 r5;
+ __be16 wrid;
+ __be32 r6;
+ __be32 seqno;
+ __be32 plen;
+ __be32 lsodisable_to_flags;
+};
+
+#define S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE 31
+#define M_FW_OFLD_TX_DATA_V2_WR_LSODISABLE 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE)
+#define G_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE) & \
+ M_FW_OFLD_TX_DATA_V2_WR_LSODISABLE)
+#define F_FW_OFLD_TX_DATA_V2_WR_LSODISABLE \
+ V_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD 30
+#define M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD)
+#define G_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD) & \
+ M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD)
+#define F_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD \
+ V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE 29
+#define M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE)
+#define G_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE) & \
+ M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE)
+#define F_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE \
+ V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_FLAGS 0
+#define M_FW_OFLD_TX_DATA_V2_WR_FLAGS 0xfffffff
+#define V_FW_OFLD_TX_DATA_V2_WR_FLAGS(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_FLAGS)
+#define G_FW_OFLD_TX_DATA_V2_WR_FLAGS(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_FLAGS) & M_FW_OFLD_TX_DATA_V2_WR_FLAGS)
+
struct fw_cmd_wr {
__be32 op_dma;
__be32 len16_pkd;
@@ -1218,8 +1297,15 @@ enum fw_ri_wr_opcode {
FW_RI_FAST_REGISTER = 0xd,
FW_RI_LOCAL_INV = 0xe,
#endif
+ /* Chelsio specific */
FW_RI_SGE_EC_CR_RETURN = 0xf,
FW_RI_WRITE_IMMEDIATE = FW_RI_RDMA_INIT,
+ FW_RI_SEND_IMMEDIATE = FW_RI_RDMA_INIT,
+
+ FW_RI_ROCEV2_SEND = 0x0,
+ FW_RI_ROCEV2_WRITE = 0x0,
+ FW_RI_ROCEV2_SEND_WITH_INV = 0x5,
+ FW_RI_ROCEV2_SEND_IMMEDIATE = 0xa,
};
enum fw_ri_wr_flags {
@@ -1229,7 +1315,8 @@ enum fw_ri_wr_flags {
FW_RI_READ_FENCE_FLAG = 0x08,
FW_RI_LOCAL_FENCE_FLAG = 0x10,
FW_RI_RDMA_READ_INVALIDATE = 0x20,
- FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40
+ FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40,
+ //FW_RI_REPLAYED_WR_FLAG = 0x80,
};
enum fw_ri_mpa_attrs {
@@ -1522,18 +1609,302 @@ struct fw_ri_cqe {
#define G_FW_RI_CQE_TYPE(x) \
(((x) >> S_FW_RI_CQE_TYPE) & M_FW_RI_CQE_TYPE)
-enum fw_ri_res_type {
+enum fw_res_type {
FW_RI_RES_TYPE_SQ,
FW_RI_RES_TYPE_RQ,
FW_RI_RES_TYPE_CQ,
FW_RI_RES_TYPE_SRQ,
+ FW_QP_RES_TYPE_SQ = FW_RI_RES_TYPE_SQ,
+ FW_QP_RES_TYPE_CQ = FW_RI_RES_TYPE_CQ,
};
-enum fw_ri_res_op {
+enum fw_res_op {
FW_RI_RES_OP_WRITE,
FW_RI_RES_OP_RESET,
+ FW_QP_RES_OP_WRITE = FW_RI_RES_OP_WRITE,
+ FW_QP_RES_OP_RESET = FW_RI_RES_OP_RESET,
+};
+
+enum fw_qp_transport_type {
+ FW_QP_TRANSPORT_TYPE_IWARP,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_UD,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_RC,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_XRC_INI,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_XRC_TGT,
+ FW_QP_TRANSPORT_TYPE_NVMET,
+ FW_QP_TRANSPORT_TYPE_TOE,
+ FW_QP_TRANSPORT_TYPE_ISCSI,
+};
+
+struct fw_qp_res {
+ union fw_qp_restype {
+ struct fw_qp_res_sqrq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ } sqrq;
+ struct fw_qp_res_cq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 iqid;
+ __be32 r4[2];
+ __be32 iqandst_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_iqro;
+ __be32 r6_lo;
+ __be64 r7;
+ } cq;
+ } u;
+};
+
+struct fw_qp_res_wr {
+ __be32 op_to_nres;
+ __be32 len16_pkd;
+ __u64 cookie;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_qp_res res[0];
+#endif
};
+#define S_FW_QP_RES_WR_TRANSPORT_TYPE 16
+#define M_FW_QP_RES_WR_TRANSPORT_TYPE 0x7
+#define V_FW_QP_RES_WR_TRANSPORT_TYPE(x) \
+ ((x) << S_FW_QP_RES_WR_TRANSPORT_TYPE)
+#define G_FW_QP_RES_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_QP_RES_WR_TRANSPORT_TYPE) & M_FW_QP_RES_WR_TRANSPORT_TYPE)
+
+#define S_FW_QP_RES_WR_VFN 8
+#define M_FW_QP_RES_WR_VFN 0xff
+#define V_FW_QP_RES_WR_VFN(x) ((x) << S_FW_QP_RES_WR_VFN)
+#define G_FW_QP_RES_WR_VFN(x) \
+ (((x) >> S_FW_QP_RES_WR_VFN) & M_FW_QP_RES_WR_VFN)
+
+#define S_FW_QP_RES_WR_NRES 0
+#define M_FW_QP_RES_WR_NRES 0xff
+#define V_FW_QP_RES_WR_NRES(x) ((x) << S_FW_QP_RES_WR_NRES)
+#define G_FW_QP_RES_WR_NRES(x) \
+ (((x) >> S_FW_QP_RES_WR_NRES) & M_FW_QP_RES_WR_NRES)
+
+#define S_FW_QP_RES_WR_FETCHSZM 26
+#define M_FW_QP_RES_WR_FETCHSZM 0x1
+#define V_FW_QP_RES_WR_FETCHSZM(x) ((x) << S_FW_QP_RES_WR_FETCHSZM)
+#define G_FW_QP_RES_WR_FETCHSZM(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHSZM) & M_FW_QP_RES_WR_FETCHSZM)
+#define F_FW_QP_RES_WR_FETCHSZM V_FW_QP_RES_WR_FETCHSZM(1U)
+
+#define S_FW_QP_RES_WR_STATUSPGNS 25
+#define M_FW_QP_RES_WR_STATUSPGNS 0x1
+#define V_FW_QP_RES_WR_STATUSPGNS(x) ((x) << S_FW_QP_RES_WR_STATUSPGNS)
+#define G_FW_QP_RES_WR_STATUSPGNS(x) \
+ (((x) >> S_FW_QP_RES_WR_STATUSPGNS) & M_FW_QP_RES_WR_STATUSPGNS)
+#define F_FW_QP_RES_WR_STATUSPGNS V_FW_QP_RES_WR_STATUSPGNS(1U)
+
+#define S_FW_QP_RES_WR_STATUSPGRO 24
+#define M_FW_QP_RES_WR_STATUSPGRO 0x1
+#define V_FW_QP_RES_WR_STATUSPGRO(x) ((x) << S_FW_QP_RES_WR_STATUSPGRO)
+#define G_FW_QP_RES_WR_STATUSPGRO(x) \
+ (((x) >> S_FW_QP_RES_WR_STATUSPGRO) & M_FW_QP_RES_WR_STATUSPGRO)
+#define F_FW_QP_RES_WR_STATUSPGRO V_FW_QP_RES_WR_STATUSPGRO(1U)
+
+#define S_FW_QP_RES_WR_FETCHNS 23
+#define M_FW_QP_RES_WR_FETCHNS 0x1
+#define V_FW_QP_RES_WR_FETCHNS(x) ((x) << S_FW_QP_RES_WR_FETCHNS)
+#define G_FW_QP_RES_WR_FETCHNS(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHNS) & M_FW_QP_RES_WR_FETCHNS)
+#define F_FW_QP_RES_WR_FETCHNS V_FW_QP_RES_WR_FETCHNS(1U)
+
+#define S_FW_QP_RES_WR_FETCHRO 22
+#define M_FW_QP_RES_WR_FETCHRO 0x1
+#define V_FW_QP_RES_WR_FETCHRO(x) ((x) << S_FW_QP_RES_WR_FETCHRO)
+#define G_FW_QP_RES_WR_FETCHRO(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHRO) & M_FW_QP_RES_WR_FETCHRO)
+#define F_FW_QP_RES_WR_FETCHRO V_FW_QP_RES_WR_FETCHRO(1U)
+
+#define S_FW_QP_RES_WR_HOSTFCMODE 20
+#define M_FW_QP_RES_WR_HOSTFCMODE 0x3
+#define V_FW_QP_RES_WR_HOSTFCMODE(x) ((x) << S_FW_QP_RES_WR_HOSTFCMODE)
+#define G_FW_QP_RES_WR_HOSTFCMODE(x) \
+ (((x) >> S_FW_QP_RES_WR_HOSTFCMODE) & M_FW_QP_RES_WR_HOSTFCMODE)
+
+#define S_FW_QP_RES_WR_CPRIO 19
+#define M_FW_QP_RES_WR_CPRIO 0x1
+#define V_FW_QP_RES_WR_CPRIO(x) ((x) << S_FW_QP_RES_WR_CPRIO)
+#define G_FW_QP_RES_WR_CPRIO(x) \
+ (((x) >> S_FW_QP_RES_WR_CPRIO) & M_FW_QP_RES_WR_CPRIO)
+#define F_FW_QP_RES_WR_CPRIO V_FW_QP_RES_WR_CPRIO(1U)
+
+#define S_FW_QP_RES_WR_ONCHIP 18
+#define M_FW_QP_RES_WR_ONCHIP 0x1
+#define V_FW_QP_RES_WR_ONCHIP(x) ((x) << S_FW_QP_RES_WR_ONCHIP)
+#define G_FW_QP_RES_WR_ONCHIP(x) \
+ (((x) >> S_FW_QP_RES_WR_ONCHIP) & M_FW_QP_RES_WR_ONCHIP)
+#define F_FW_QP_RES_WR_ONCHIP V_FW_QP_RES_WR_ONCHIP(1U)
+
+#define S_FW_QP_RES_WR_PCIECHN 16
+#define M_FW_QP_RES_WR_PCIECHN 0x3
+#define V_FW_QP_RES_WR_PCIECHN(x) ((x) << S_FW_QP_RES_WR_PCIECHN)
+#define G_FW_QP_RES_WR_PCIECHN(x) \
+ (((x) >> S_FW_QP_RES_WR_PCIECHN) & M_FW_QP_RES_WR_PCIECHN)
+
+#define S_FW_QP_RES_WR_IQID 0
+#define M_FW_QP_RES_WR_IQID 0xffff
+#define V_FW_QP_RES_WR_IQID(x) ((x) << S_FW_QP_RES_WR_IQID)
+#define G_FW_QP_RES_WR_IQID(x) \
+ (((x) >> S_FW_QP_RES_WR_IQID) & M_FW_QP_RES_WR_IQID)
+
+#define S_FW_QP_RES_WR_DCAEN 31
+#define M_FW_QP_RES_WR_DCAEN 0x1
+#define V_FW_QP_RES_WR_DCAEN(x) ((x) << S_FW_QP_RES_WR_DCAEN)
+#define G_FW_QP_RES_WR_DCAEN(x) \
+ (((x) >> S_FW_QP_RES_WR_DCAEN) & M_FW_QP_RES_WR_DCAEN)
+#define F_FW_QP_RES_WR_DCAEN V_FW_QP_RES_WR_DCAEN(1U)
+
+#define S_FW_QP_RES_WR_DCACPU 26
+#define M_FW_QP_RES_WR_DCACPU 0x1f
+#define V_FW_QP_RES_WR_DCACPU(x) ((x) << S_FW_QP_RES_WR_DCACPU)
+#define G_FW_QP_RES_WR_DCACPU(x) \
+ (((x) >> S_FW_QP_RES_WR_DCACPU) & M_FW_QP_RES_WR_DCACPU)
+
+#define S_FW_QP_RES_WR_FBMIN 23
+#define M_FW_QP_RES_WR_FBMIN 0x7
+#define V_FW_QP_RES_WR_FBMIN(x) ((x) << S_FW_QP_RES_WR_FBMIN)
+#define G_FW_QP_RES_WR_FBMIN(x) \
+ (((x) >> S_FW_QP_RES_WR_FBMIN) & M_FW_QP_RES_WR_FBMIN)
+
+#define S_FW_QP_RES_WR_FBMAX 20
+#define M_FW_QP_RES_WR_FBMAX 0x7
+#define V_FW_QP_RES_WR_FBMAX(x) ((x) << S_FW_QP_RES_WR_FBMAX)
+#define G_FW_QP_RES_WR_FBMAX(x) \
+ (((x) >> S_FW_QP_RES_WR_FBMAX) & M_FW_QP_RES_WR_FBMAX)
+
+#define S_FW_QP_RES_WR_CIDXFTHRESHO 19
+#define M_FW_QP_RES_WR_CIDXFTHRESHO 0x1
+#define V_FW_QP_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_QP_RES_WR_CIDXFTHRESHO)
+#define G_FW_QP_RES_WR_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_QP_RES_WR_CIDXFTHRESHO) & M_FW_QP_RES_WR_CIDXFTHRESHO)
+#define F_FW_QP_RES_WR_CIDXFTHRESHO V_FW_QP_RES_WR_CIDXFTHRESHO(1U)
+
+#define S_FW_QP_RES_WR_CIDXFTHRESH 16
+#define M_FW_QP_RES_WR_CIDXFTHRESH 0x7
+#define V_FW_QP_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_QP_RES_WR_CIDXFTHRESH)
+#define G_FW_QP_RES_WR_CIDXFTHRESH(x) \
+ (((x) >> S_FW_QP_RES_WR_CIDXFTHRESH) & M_FW_QP_RES_WR_CIDXFTHRESH)
+
+#define S_FW_QP_RES_WR_EQSIZE 0
+#define M_FW_QP_RES_WR_EQSIZE 0xffff
+#define V_FW_QP_RES_WR_EQSIZE(x) ((x) << S_FW_QP_RES_WR_EQSIZE)
+#define G_FW_QP_RES_WR_EQSIZE(x) \
+ (((x) >> S_FW_QP_RES_WR_EQSIZE) & M_FW_QP_RES_WR_EQSIZE)
+
+#define S_FW_QP_RES_WR_IQANDST 15
+#define M_FW_QP_RES_WR_IQANDST 0x1
+#define V_FW_QP_RES_WR_IQANDST(x) ((x) << S_FW_QP_RES_WR_IQANDST)
+#define G_FW_QP_RES_WR_IQANDST(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANDST) & M_FW_QP_RES_WR_IQANDST)
+#define F_FW_QP_RES_WR_IQANDST V_FW_QP_RES_WR_IQANDST(1U)
+
+#define S_FW_QP_RES_WR_IQANUS 14
+#define M_FW_QP_RES_WR_IQANUS 0x1
+#define V_FW_QP_RES_WR_IQANUS(x) ((x) << S_FW_QP_RES_WR_IQANUS)
+#define G_FW_QP_RES_WR_IQANUS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANUS) & M_FW_QP_RES_WR_IQANUS)
+#define F_FW_QP_RES_WR_IQANUS V_FW_QP_RES_WR_IQANUS(1U)
+
+#define S_FW_QP_RES_WR_IQANUD 12
+#define M_FW_QP_RES_WR_IQANUD 0x3
+#define V_FW_QP_RES_WR_IQANUD(x) ((x) << S_FW_QP_RES_WR_IQANUD)
+#define G_FW_QP_RES_WR_IQANUD(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANUD) & M_FW_QP_RES_WR_IQANUD)
+
+#define S_FW_QP_RES_WR_IQANDSTINDEX 0
+#define M_FW_QP_RES_WR_IQANDSTINDEX 0xfff
+#define V_FW_QP_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_QP_RES_WR_IQANDSTINDEX)
+#define G_FW_QP_RES_WR_IQANDSTINDEX(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANDSTINDEX) & M_FW_QP_RES_WR_IQANDSTINDEX)
+
+#define S_FW_QP_RES_WR_IQDROPRSS 15
+#define M_FW_QP_RES_WR_IQDROPRSS 0x1
+#define V_FW_QP_RES_WR_IQDROPRSS(x) ((x) << S_FW_QP_RES_WR_IQDROPRSS)
+#define G_FW_QP_RES_WR_IQDROPRSS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDROPRSS) & M_FW_QP_RES_WR_IQDROPRSS)
+#define F_FW_QP_RES_WR_IQDROPRSS V_FW_QP_RES_WR_IQDROPRSS(1U)
+
+#define S_FW_QP_RES_WR_IQGTSMODE 14
+#define M_FW_QP_RES_WR_IQGTSMODE 0x1
+#define V_FW_QP_RES_WR_IQGTSMODE(x) ((x) << S_FW_QP_RES_WR_IQGTSMODE)
+#define G_FW_QP_RES_WR_IQGTSMODE(x) \
+ (((x) >> S_FW_QP_RES_WR_IQGTSMODE) & M_FW_QP_RES_WR_IQGTSMODE)
+#define F_FW_QP_RES_WR_IQGTSMODE V_FW_QP_RES_WR_IQGTSMODE(1U)
+
+#define S_FW_QP_RES_WR_IQPCIECH 12
+#define M_FW_QP_RES_WR_IQPCIECH 0x3
+#define V_FW_QP_RES_WR_IQPCIECH(x) ((x) << S_FW_QP_RES_WR_IQPCIECH)
+#define G_FW_QP_RES_WR_IQPCIECH(x) \
+ (((x) >> S_FW_QP_RES_WR_IQPCIECH) & M_FW_QP_RES_WR_IQPCIECH)
+
+#define S_FW_QP_RES_WR_IQDCAEN 11
+#define M_FW_QP_RES_WR_IQDCAEN 0x1
+#define V_FW_QP_RES_WR_IQDCAEN(x) ((x) << S_FW_QP_RES_WR_IQDCAEN)
+#define G_FW_QP_RES_WR_IQDCAEN(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDCAEN) & M_FW_QP_RES_WR_IQDCAEN)
+#define F_FW_QP_RES_WR_IQDCAEN V_FW_QP_RES_WR_IQDCAEN(1U)
+
+#define S_FW_QP_RES_WR_IQDCACPU 6
+#define M_FW_QP_RES_WR_IQDCACPU 0x1f
+#define V_FW_QP_RES_WR_IQDCACPU(x) ((x) << S_FW_QP_RES_WR_IQDCACPU)
+#define G_FW_QP_RES_WR_IQDCACPU(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDCACPU) & M_FW_QP_RES_WR_IQDCACPU)
+
+#define S_FW_QP_RES_WR_IQINTCNTTHRESH 4
+#define M_FW_QP_RES_WR_IQINTCNTTHRESH 0x3
+#define V_FW_QP_RES_WR_IQINTCNTTHRESH(x) \
+ ((x) << S_FW_QP_RES_WR_IQINTCNTTHRESH)
+#define G_FW_QP_RES_WR_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_QP_RES_WR_IQINTCNTTHRESH) & M_FW_QP_RES_WR_IQINTCNTTHRESH)
+
+#define S_FW_QP_RES_WR_IQO 3
+#define M_FW_QP_RES_WR_IQO 0x1
+#define V_FW_QP_RES_WR_IQO(x) ((x) << S_FW_QP_RES_WR_IQO)
+#define G_FW_QP_RES_WR_IQO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQO) & M_FW_QP_RES_WR_IQO)
+#define F_FW_QP_RES_WR_IQO V_FW_QP_RES_WR_IQO(1U)
+
+#define S_FW_QP_RES_WR_IQCPRIO 2
+#define M_FW_QP_RES_WR_IQCPRIO 0x1
+#define V_FW_QP_RES_WR_IQCPRIO(x) ((x) << S_FW_QP_RES_WR_IQCPRIO)
+#define G_FW_QP_RES_WR_IQCPRIO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQCPRIO) & M_FW_QP_RES_WR_IQCPRIO)
+#define F_FW_QP_RES_WR_IQCPRIO V_FW_QP_RES_WR_IQCPRIO(1U)
+
+#define S_FW_QP_RES_WR_IQESIZE 0
+#define M_FW_QP_RES_WR_IQESIZE 0x3
+#define V_FW_QP_RES_WR_IQESIZE(x) ((x) << S_FW_QP_RES_WR_IQESIZE)
+#define G_FW_QP_RES_WR_IQESIZE(x) \
+ (((x) >> S_FW_QP_RES_WR_IQESIZE) & M_FW_QP_RES_WR_IQESIZE)
+
+#define S_FW_QP_RES_WR_IQNS 31
+#define M_FW_QP_RES_WR_IQNS 0x1
+#define V_FW_QP_RES_WR_IQNS(x) ((x) << S_FW_QP_RES_WR_IQNS)
+#define G_FW_QP_RES_WR_IQNS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQNS) & M_FW_QP_RES_WR_IQNS)
+#define F_FW_QP_RES_WR_IQNS V_FW_QP_RES_WR_IQNS(1U)
+
+#define S_FW_QP_RES_WR_IQRO 30
+#define M_FW_QP_RES_WR_IQRO 0x1
+#define V_FW_QP_RES_WR_IQRO(x) ((x) << S_FW_QP_RES_WR_IQRO)
+#define G_FW_QP_RES_WR_IQRO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQRO) & M_FW_QP_RES_WR_IQRO)
+#define F_FW_QP_RES_WR_IQRO V_FW_QP_RES_WR_IQRO(1U)
+
+
struct fw_ri_res {
union fw_ri_restype {
struct fw_ri_res_sqrq {
@@ -1586,6 +1957,13 @@ struct fw_ri_res_wr {
#endif
};
+#define S_FW_RI_RES_WR_TRANSPORT_TYPE 16
+#define M_FW_RI_RES_WR_TRANSPORT_TYPE 0x7
+#define V_FW_RI_RES_WR_TRANSPORT_TYPE(x) \
+ ((x) << S_FW_RI_RES_WR_TRANSPORT_TYPE)
+#define G_FW_RI_RES_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_RI_RES_WR_TRANSPORT_TYPE) & M_FW_RI_RES_WR_TRANSPORT_TYPE)
+
#define S_FW_RI_RES_WR_VFN 8
#define M_FW_RI_RES_WR_VFN 0xff
#define V_FW_RI_RES_WR_VFN(x) ((x) << S_FW_RI_RES_WR_VFN)
@@ -2092,8 +2470,18 @@ enum fw_ri_init_rqeqid_srq {
FW_RI_INIT_RQEQID_SRQ = 1 << 31,
};
+enum fw_nvmet_ulpsubmode {
+ FW_NVMET_ULPSUBMODE_HCRC = 0x1<<0,
+ FW_NVMET_ULPSUBMODE_DCRC = 0x1<<1,
+ FW_NVMET_ULPSUBMODE_ING_DIR = 0x1<<2,
+ FW_NVMET_ULPSUBMODE_SRQ_ENABLE = 0x1<<3,
+ FW_NVMET_ULPSUBMODE_PER_PDU_CMP = 0x1<<4,
+ FW_NVMET_ULPSUBMODE_PI_ENABLE = 0x1<<5,
+ FW_NVMET_ULPSUBMODE_USER_MODE = 0x1<<6,
+};
+
struct fw_ri_wr {
- __be32 op_compl;
+ __be32 op_compl; /* op_to_transport_type */
__be32 flowid_len16;
__u64 cookie;
union fw_ri {
@@ -2123,6 +2511,55 @@ struct fw_ri_wr {
struct fw_ri_send_wr send;
} u;
} init;
+ struct fw_ri_rocev2_init {
+ __u8 type;
+ __u8 r3[3];
+ __u8 rocev2_flags;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 ord_max;
+ __be32 ird_max;
+ __be32 psn_pkd;
+ __be32 epsn_pkd;
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __be32 q_key;
+ __u8 pkthdrsize;
+ __u8 r;
+ __be16 p_key;
+ //struct cpl_tx_tnl_lso tnl_lso;
+ __u8 tnl_lso[48]; /* cpl_tx_tnl_lso + cpl_tx_pkt_xt */
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_immd pkthdr[0];
+#endif
+ } rocev2_init;
+ struct fw_ri_nvmet_init {
+ __u8 type;
+ __u8 r3[3];
+ __u8 nvmt_flags;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 r4[4];
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __u8 ulpsubmode;
+ __u8 nvmt_pda_cmp_imm_sz;
+ __be16 r7;
+ __be32 tpt_offset_t10_config;
+ __be32 r8[2];
+ } nvmet_init;
struct fw_ri_fini {
__u8 type;
__u8 r3[7];
@@ -2137,6 +2574,12 @@ struct fw_ri_wr {
} u;
};
+#define S_FW_RI_WR_TRANSPORT_TYPE 16
+#define M_FW_RI_WR_TRANSPORT_TYPE 0x7
+#define V_FW_RI_WR_TRANSPORT_TYPE(x) ((x) << S_FW_RI_WR_TRANSPORT_TYPE)
+#define G_FW_RI_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_RI_WR_TRANSPORT_TYPE) & M_FW_RI_WR_TRANSPORT_TYPE)
+
#define S_FW_RI_WR_MPAREQBIT 7
#define M_FW_RI_WR_MPAREQBIT 0x1
#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
@@ -2157,6 +2600,414 @@ struct fw_ri_wr {
#define G_FW_RI_WR_P2PTYPE(x) \
(((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+#define S_FW_RI_WR_PSN 0
+#define M_FW_RI_WR_PSN 0xffffff
+#define V_FW_RI_WR_PSN(x) ((x) << S_FW_RI_WR_PSN)
+#define G_FW_RI_WR_PSN(x) (((x) >> S_FW_RI_WR_PSN) & M_FW_RI_WR_PSN)
+
+#define S_FW_RI_WR_EPSN 0
+#define M_FW_RI_WR_EPSN 0xffffff
+#define V_FW_RI_WR_EPSN(x) ((x) << S_FW_RI_WR_EPSN)
+#define G_FW_RI_WR_EPSN(x) (((x) >> S_FW_RI_WR_EPSN) & M_FW_RI_WR_EPSN)
+
+#define S_FW_RI_WR_NVMT_PDA 3
+#define M_FW_RI_WR_NVMT_PDA 0x1f
+#define V_FW_RI_WR_NVMT_PDA(x) ((x) << S_FW_RI_WR_NVMT_PDA)
+#define G_FW_RI_WR_NVMT_PDA(x) \
+ (((x) >> S_FW_RI_WR_NVMT_PDA) & M_FW_RI_WR_NVMT_PDA)
+
+#define S_FW_RI_WR_CMP_IMM_SZ 1
+#define M_FW_RI_WR_CMP_IMM_SZ 0x3
+#define V_FW_RI_WR_CMP_IMM_SZ(x) ((x) << S_FW_RI_WR_CMP_IMM_SZ)
+#define G_FW_RI_WR_CMP_IMM_SZ(x) \
+ (((x) >> S_FW_RI_WR_CMP_IMM_SZ) & M_FW_RI_WR_CMP_IMM_SZ)
+
+#define S_FW_RI_WR_TPT_OFFSET 10
+#define M_FW_RI_WR_TPT_OFFSET 0x3fffff
+#define V_FW_RI_WR_TPT_OFFSET(x) ((x) << S_FW_RI_WR_TPT_OFFSET)
+#define G_FW_RI_WR_TPT_OFFSET(x) \
+ (((x) >> S_FW_RI_WR_TPT_OFFSET) & M_FW_RI_WR_TPT_OFFSET)
+
+#define S_FW_RI_WR_T10_CONFIG 0
+#define M_FW_RI_WR_T10_CONFIG 0x3ff
+#define V_FW_RI_WR_T10_CONFIG(x) ((x) << S_FW_RI_WR_T10_CONFIG)
+#define G_FW_RI_WR_T10_CONFIG(x) \
+ (((x) >> S_FW_RI_WR_T10_CONFIG) & M_FW_RI_WR_T10_CONFIG)
+
+
+/******************************************************************************
+ * R o C E V 2 W O R K R E Q U E S T s
+ **************************************/
+enum fw_rocev2_wr_opcode {
+ /* RC */
+ FW_ROCEV2_RC_SEND_FIRST = 0x00,
+ FW_ROCEV2_RC_SEND_MIDDLE = 0x01,
+ FW_ROCEV2_RC_SEND_LAST = 0x02,
+ FW_ROCEV2_RC_SEND_LAST_WITH_IMMD = 0x03,
+ FW_ROCEV2_RC_SEND_ONLY = 0x04,
+ FW_ROCEV2_RC_SEND_ONLY_WITH_IMMD = 0x05,
+ FW_ROCEV2_RC_RDMA_WRITE_FIRST = 0x06,
+ FW_ROCEV2_RC_RDMA_WRITE_MIDDLE = 0x07,
+ FW_ROCEV2_RC_RDMA_WRITE_LAST = 0x08,
+ FW_ROCEV2_RC_RDMA_WRITE_LAST_WITH_IMMD = 0x09,
+ FW_ROCEV2_RC_RDMA_WRITE_ONLY = 0x0a,
+ FW_ROCEV2_RC_RDMA_WRITE_ONLY_WITH_IMMD = 0x0b,
+ FW_ROCEV2_RC_RDMA_READ_REQ = 0x0c,
+ FW_ROCEV2_RC_RDMA_READ_RESP_FIRST = 0x0d,
+ FW_ROCEV2_RC_RDMA_READ_RESP_MIDDLE = 0x0e,
+ FW_ROCEV2_RC_RDMA_READ_RESP_LAST = 0x0f,
+ FW_ROCEV2_RC_RDMA_READ_RESP_ONLY = 0x10,
+ FW_ROCEV2_RC_ACK = 0x11,
+ FW_ROCEV2_RC_ATOMIC_ACK = 0x12,
+ FW_ROCEV2_RC_CMP_SWAP = 0x13,
+ FW_ROCEV2_RC_FETCH_ADD = 0x14,
+ FW_ROCEV2_RC_SEND_LAST_WITH_INV = 0x16,
+ FW_ROCEV2_RC_SEND_ONLY_WITH_INV = 0x17,
+
+ /* XRC */
+ FW_ROCEV2_XRC_SEND_FIRST = 0xa0,
+ FW_ROCEV2_XRC_SEND_MIDDLE = 0xa1,
+ FW_ROCEV2_XRC_SEND_LAST = 0xa2,
+ FW_ROCEV2_XRC_SEND_LAST_WITH_IMMD = 0xa3,
+ FW_ROCEV2_XRC_SEND_ONLY = 0xa4,
+ FW_ROCEV2_XRC_SEND_ONLY_WITH_IMMD = 0xa5,
+ FW_ROCEV2_XRC_RDMA_WRITE_FIRST = 0xa6,
+ FW_ROCEV2_XRC_RDMA_WRITE_MIDDLE = 0xa7,
+ FW_ROCEV2_XRC_RDMA_WRITE_LAST = 0xa8,
+ FW_ROCEV2_XRC_RDMA_WRITE_LAST_WITH_IMMD = 0xa9,
+ FW_ROCEV2_XRC_RDMA_WRITE_ONLY = 0xaa,
+ FW_ROCEV2_XRC_RDMA_WRITE_ONLY_WITH_IMMD = 0xab,
+ FW_ROCEV2_XRC_RDMA_READ_REQ = 0xac,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_FIRST = 0xad,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_MIDDLE = 0xae,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_LAST = 0xaf,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_ONLY = 0xb0,
+ FW_ROCEV2_XRC_ACK = 0xb1,
+ FW_ROCEV2_XRC_ATOMIC_ACK = 0xb2,
+ FW_ROCEV2_XRC_CMP_SWAP = 0xb3,
+ FW_ROCEV2_XRC_FETCH_ADD = 0xb4,
+ FW_ROCEV2_XRC_SEND_LAST_WITH_INV = 0xb6,
+ FW_ROCEV2_XRC_SEND_ONLY_WITH_INV = 0xb7,
+};
+
+#if 0
+enum fw_rocev2_cqe_err {
+ /* TODO */
+};
+#endif
+
+struct fw_ri_v2_rdma_write_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 psn_pkd;
+ __be32 r4[2];
+ __be32 r5;
+ __be32 immd_data;
+ __be64 to_sink;
+ __be32 stag_sink;
+ __be32 plen;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_V2_RDMA_WRITE_WR_PSN 0
+#define M_FW_RI_V2_RDMA_WRITE_WR_PSN 0xffffff
+#define V_FW_RI_V2_RDMA_WRITE_WR_PSN(x) ((x) << S_FW_RI_V2_RDMA_WRITE_WR_PSN)
+#define G_FW_RI_V2_RDMA_WRITE_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_RDMA_WRITE_WR_PSN) & M_FW_RI_V2_RDMA_WRITE_WR_PSN)
+
+struct fw_ri_v2_send_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 sendop_psn;
+ __u8 immdlen;
+ __u8 r3[3];
+ __be32 r4;
+ /* CPL_TX_TNL_LSO, CPL_TX_PKT_XT and Eth/IP/UDP/BTH
+ * headers in UD QP case, align size to 16B */
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_V2_SEND_WR_SENDOP 24
+#define M_FW_RI_V2_SEND_WR_SENDOP 0xff
+#define V_FW_RI_V2_SEND_WR_SENDOP(x) ((x) << S_FW_RI_V2_SEND_WR_SENDOP)
+#define G_FW_RI_V2_SEND_WR_SENDOP(x) \
+ (((x) >> S_FW_RI_V2_SEND_WR_SENDOP) & M_FW_RI_V2_SEND_WR_SENDOP)
+
+#define S_FW_RI_V2_SEND_WR_PSN 0
+#define M_FW_RI_V2_SEND_WR_PSN 0xffffff
+#define V_FW_RI_V2_SEND_WR_PSN(x) ((x) << S_FW_RI_V2_SEND_WR_PSN)
+#define G_FW_RI_V2_SEND_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_SEND_WR_PSN) & M_FW_RI_V2_SEND_WR_PSN)
+
+struct fw_ri_v2_rdma_read_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 psn_pkd;
+ __be64 to_src;
+ __be32 stag_src;
+ __be32 plen;
+ struct fw_ri_isgl isgl_sink; /* RRQ, max 4 nsge in rocev2, 1 in iwarp */
+};
+
+#define S_FW_RI_V2_RDMA_READ_WR_PSN 0
+#define M_FW_RI_V2_RDMA_READ_WR_PSN 0xffffff
+#define V_FW_RI_V2_RDMA_READ_WR_PSN(x) ((x) << S_FW_RI_V2_RDMA_READ_WR_PSN)
+#define G_FW_RI_V2_RDMA_READ_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_RDMA_READ_WR_PSN) & M_FW_RI_V2_RDMA_READ_WR_PSN)
+
+struct fw_ri_v2_atomic_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 atomicop_psn;
+};
+
+#define S_FW_RI_V2_ATOMIC_WR_ATOMICOP 28
+#define M_FW_RI_V2_ATOMIC_WR_ATOMICOP 0xf
+#define V_FW_RI_V2_ATOMIC_WR_ATOMICOP(x) \
+ ((x) << S_FW_RI_V2_ATOMIC_WR_ATOMICOP)
+#define G_FW_RI_V2_ATOMIC_WR_ATOMICOP(x) \
+ (((x) >> S_FW_RI_V2_ATOMIC_WR_ATOMICOP) & M_FW_RI_V2_ATOMIC_WR_ATOMICOP)
+
+#define S_FW_RI_V2_ATOMIC_WR_PSN 0
+#define M_FW_RI_V2_ATOMIC_WR_PSN 0xffffff
+#define V_FW_RI_V2_ATOMIC_WR_PSN(x) ((x) << S_FW_RI_V2_ATOMIC_WR_PSN)
+#define G_FW_RI_V2_ATOMIC_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_ATOMIC_WR_PSN) & M_FW_RI_V2_ATOMIC_WR_PSN)
+
+struct fw_ri_v2_bind_mw_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 r5;
+ __be32 r6[2];
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag_mr;
+ __be32 stag_mw;
+ __be32 r3;
+ __be64 len_mw;
+ __be64 va_fbo;
+ __be64 r4;
+};
+
+
+#define S_FW_RI_V2_BIND_MW_WR_QPBINDE 6
+#define M_FW_RI_V2_BIND_MW_WR_QPBINDE 0x1
+#define V_FW_RI_V2_BIND_MW_WR_QPBINDE(x) \
+ ((x) << S_FW_RI_V2_BIND_MW_WR_QPBINDE)
+#define G_FW_RI_V2_BIND_MW_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_QPBINDE) & M_FW_RI_V2_BIND_MW_WR_QPBINDE)
+#define F_FW_RI_V2_BIND_MW_WR_QPBINDE V_FW_RI_V2_BIND_MW_WR_QPBINDE(1U)
+
+#define S_FW_RI_V2_BIND_MW_WR_NS 5
+#define M_FW_RI_V2_BIND_MW_WR_NS 0x1
+#define V_FW_RI_V2_BIND_MW_WR_NS(x) ((x) << S_FW_RI_V2_BIND_MW_WR_NS)
+#define G_FW_RI_V2_BIND_MW_WR_NS(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_NS) & M_FW_RI_V2_BIND_MW_WR_NS)
+#define F_FW_RI_V2_BIND_MW_WR_NS V_FW_RI_V2_BIND_MW_WR_NS(1U)
+
+#define S_FW_RI_V2_BIND_MW_WR_DCACPU 0
+#define M_FW_RI_V2_BIND_MW_WR_DCACPU 0x1f
+#define V_FW_RI_V2_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_V2_BIND_MW_WR_DCACPU)
+#define G_FW_RI_V2_BIND_MW_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_DCACPU) & M_FW_RI_V2_BIND_MW_WR_DCACPU)
+
+struct fw_ri_v2_fr_nsmr_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 r3;
+ __be32 r4[2];
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag;
+ __be32 len_hi;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+};
+
+#define S_FW_RI_V2_FR_NSMR_WR_QPBINDE 6
+#define M_FW_RI_V2_FR_NSMR_WR_QPBINDE 0x1
+#define V_FW_RI_V2_FR_NSMR_WR_QPBINDE(x) \
+ ((x) << S_FW_RI_V2_FR_NSMR_WR_QPBINDE)
+#define G_FW_RI_V2_FR_NSMR_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_QPBINDE) & M_FW_RI_V2_FR_NSMR_WR_QPBINDE)
+#define F_FW_RI_V2_FR_NSMR_WR_QPBINDE V_FW_RI_V2_FR_NSMR_WR_QPBINDE(1U)
+
+#define S_FW_RI_V2_FR_NSMR_WR_NS 5
+#define M_FW_RI_V2_FR_NSMR_WR_NS 0x1
+#define V_FW_RI_V2_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_V2_FR_NSMR_WR_NS)
+#define G_FW_RI_V2_FR_NSMR_WR_NS(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_NS) & M_FW_RI_V2_FR_NSMR_WR_NS)
+#define F_FW_RI_V2_FR_NSMR_WR_NS V_FW_RI_V2_FR_NSMR_WR_NS(1U)
+
+#define S_FW_RI_V2_FR_NSMR_WR_DCACPU 0
+#define M_FW_RI_V2_FR_NSMR_WR_DCACPU 0x1f
+#define V_FW_RI_V2_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_V2_FR_NSMR_WR_DCACPU)
+#define G_FW_RI_V2_FR_NSMR_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_DCACPU) & M_FW_RI_V2_FR_NSMR_WR_DCACPU)
+
+/******************************************************************************
+ * N V M E - T C P W O R K R E Q U E S T s
+ *****************************************************************************/
+
+struct fw_nvmet_v2_fr_nsmr_wr {
+ __be32 op_to_wrid;
+ __be32 flowid_len16;
+ __be32 r3;
+ __be32 r4;
+ __be32 mem_write_addr32;
+ __u8 r5;
+ __u8 imm_data_len32;
+ union {
+ __be16 dsgl_data_len32;
+ __be16 reset_mem_len32;
+ };
+ __be64 r6;
+};
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL 23
+#define M_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL 0x1
+#define V_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL)
+#define G_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL) & \
+ M_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL)
+#define F_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL \
+ V_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(1U)
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM 22
+#define M_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM 0x1
+#define V_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM)
+#define G_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM) & \
+ M_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM)
+#define F_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM \
+ V_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(1U)
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_WRID 0
+#define M_FW_NVMET_V2_FR_NSMR_WR_WRID 0xffff
+#define V_FW_NVMET_V2_FR_NSMR_WR_WRID(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_WRID)
+#define G_FW_NVMET_V2_FR_NSMR_WR_WRID(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_WRID) & M_FW_NVMET_V2_FR_NSMR_WR_WRID)
+
+struct fw_v2_nvmet_tx_data_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 r4;
+ __be16 r5;
+ __be16 wrid;
+ __be32 r6;
+ __be32 seqno;
+ __be32 plen;
+ __be32 flags_hi_to_flags_lo;
+ /* optional immdlen data (fw_tx_pi_hdr, iso cpl, nvmet header etc) */
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_dsgl dsgl_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI 10
+#define M_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI 0x3fffff
+#define V_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI)
+#define G_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI) & \
+ M_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO 9
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI 8
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC 7
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC 6
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO 0
+#define M_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO 0x3f
+#define V_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO)
+#define G_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO) & \
+ M_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO)
+
+
/******************************************************************************
* F O i S C S I W O R K R E Q U E S T s
*********************************************/
@@ -3827,17 +4678,17 @@ struct fw_pi_error {
(((x) >> S_FW_PI_ERROR_ERR_TYPE) & M_FW_PI_ERROR_ERR_TYPE)
struct fw_tlstx_data_wr {
- __be32 op_to_immdlen;
- __be32 flowid_len16;
- __be32 plen;
- __be32 lsodisable_to_flags;
- __be32 r5;
- __be32 ctxloc_to_exp;
- __be16 mfs;
- __be16 adjustedplen_pkd;
- __be16 expinplenmax_pkd;
- __u8 pdusinplenmax_pkd;
- __u8 r10;
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 plen;
+ __be32 lsodisable_to_flags;
+ __be32 r5;
+ __be32 ctxloc_to_exp;
+ __be16 mfs;
+ __be16 adjustedplen_pkd;
+ __be16 expinplenmax_pkd;
+ __u8 pdusinplenmax_pkd;
+ __u8 r10;
};
#define S_FW_TLSTX_DATA_WR_OPCODE 24
@@ -4092,6 +4943,265 @@ struct fw_tls_tunnel_ofld_wr {
__be32 r4;
};
+struct fw_crypto_update_sa_wr {
+ __u8 opcode;
+ __u8 saop_to_txrx;
+ __u8 vfn;
+ __u8 r1;
+ __u8 r2[3];
+ __u8 len16;
+ __be64 cookie;
+ __be16 r3;
+ __be16 ipsecidx;
+ __be32 SPI;
+ __be64 dip_hi;
+ __be64 dip_lo;
+ __be64 lip_hi;
+ __be64 lip_lo;
+ union fw_crypto_update_sa_sa {
+ struct egress_sa {
+ __be32 valid_SPI_hi;
+ __be32 SPI_lo_eSeqNum_hi;
+ __be32 eSeqNum_lo_Salt_hi;
+ __be32 Salt_lo_to_keyID;
+ } egress;
+ struct ingress_sa {
+ __be32 valid_to_iSeqNum_hi;
+ __be32 iSeqNum_mi;
+ __be32 iSeqNum_lo_Salt_hi;
+ __be32 Salt_lo_to_IPVer;
+ } ingress;
+ } sa;
+ union fw_crypto_update_sa_key {
+ struct _aes128 {
+ __u8 key128[16];
+ __u8 H128[16];
+ __u8 rsvd[16];
+ } aes128;
+ struct _aes192 {
+ __u8 key192[24];
+ __be64 r3;
+ __u8 H192[16];
+ } aes192;
+ struct _aes256 {
+ __u8 key256[32];
+ __u8 H256[16];
+ } aes256;
+ } key;
+};
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SAOP 2
+#define M_FW_CRYPTO_UPDATE_SA_WR_SAOP 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_SAOP(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SAOP)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SAOP(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SAOP) & M_FW_CRYPTO_UPDATE_SA_WR_SAOP)
+#define F_FW_CRYPTO_UPDATE_SA_WR_SAOP V_FW_CRYPTO_UPDATE_SA_WR_SAOP(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_MODE 1
+#define M_FW_CRYPTO_UPDATE_SA_WR_MODE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_MODE) & M_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_MODE V_FW_CRYPTO_UPDATE_SA_WR_MODE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_TXRX 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_TXRX 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_TXRX(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_TXRX)
+#define G_FW_CRYPTO_UPDATE_SA_WR_TXRX(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_TXRX) & M_FW_CRYPTO_UPDATE_SA_WR_TXRX)
+#define F_FW_CRYPTO_UPDATE_SA_WR_TXRX V_FW_CRYPTO_UPDATE_SA_WR_TXRX(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_VALID 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_VALID 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_VALID) & M_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define F_FW_CRYPTO_UPDATE_SA_WR_VALID V_FW_CRYPTO_UPDATE_SA_WR_VALID(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SPI_HI 0x7fffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SPI_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SPI_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SPI_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_SPI_LO 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SPI_LO)
+#define F_FW_CRYPTO_UPDATE_SA_WR_SPI_LO V_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI 0x7fffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 5
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE 4
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE \
+ V_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYID 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYID 0xf
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYID) & M_FW_CRYPTO_UPDATE_SA_WR_KEYID)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_VALID 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_VALID 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_VALID) & M_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define F_FW_CRYPTO_UPDATE_SA_WR_VALID V_FW_CRYPTO_UPDATE_SA_WR_VALID(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID 12
+#define M_FW_CRYPTO_UPDATE_SA_WR_EGKEYID 0xfff
+#define V_FW_CRYPTO_UPDATE_SA_WR_EGKEYID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_EGKEYID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_EGKEYID)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN 11
+#define M_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN)
+#define F_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN \
+ V_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW 0xf
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 5
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH 3
+#define M_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESNEN 2
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESNEN 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESNEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESNEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESNEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESNEN) & M_FW_CRYPTO_UPDATE_SA_WR_ESNEN)
+#define F_FW_CRYPTO_UPDATE_SA_WR_ESNEN V_FW_CRYPTO_UPDATE_SA_WR_ESNEN(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_MODE 1
+#define M_FW_CRYPTO_UPDATE_SA_WR_MODE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_MODE) & M_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_MODE V_FW_CRYPTO_UPDATE_SA_WR_MODE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_IPVER 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_IPVER 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_IPVER(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_IPVER)
+#define G_FW_CRYPTO_UPDATE_SA_WR_IPVER(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_IPVER) & M_FW_CRYPTO_UPDATE_SA_WR_IPVER)
+#define F_FW_CRYPTO_UPDATE_SA_WR_IPVER V_FW_CRYPTO_UPDATE_SA_WR_IPVER(1U)
+
/******************************************************************************
* C O M M A N D s
*********************/
@@ -4157,11 +5267,12 @@ enum fw_cmd_opcodes {
FW_FCOE_SPARAMS_CMD = 0x35,
FW_FCOE_STATS_CMD = 0x37,
FW_FCOE_FCF_CMD = 0x38,
- FW_DCB_IEEE_CMD = 0x3a,
- FW_DIAG_CMD = 0x3d,
+ FW_DCB_IEEE_CMD = 0x3a,
+ FW_DIAG_CMD = 0x3d,
FW_PTP_CMD = 0x3e,
FW_HMA_CMD = 0x3f,
- FW_LASTC2E_CMD = 0x40,
+ FW_JBOF_WIN_REG_CMD = 0x40,
+ FW_LASTC2E_CMD = 0x41,
FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81,
};
@@ -4246,7 +5357,7 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FUNC = 0x0028,
FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
FW_LDST_ADDRSPC_FUNC_I2C = 0x002A, /* legacy */
- FW_LDST_ADDRSPC_LE = 0x0030,
+ FW_LDST_ADDRSPC_LE = 0x0030,
FW_LDST_ADDRSPC_I2C = 0x0038,
FW_LDST_ADDRSPC_PCIE_CFGS = 0x0040,
FW_LDST_ADDRSPC_PCIE_DBG = 0x0041,
@@ -4665,11 +5776,17 @@ enum fw_caps_config_nic {
enum fw_caps_config_toe {
FW_CAPS_CONFIG_TOE = 0x00000001,
+ FW_CAPS_CONFIG_TOE_SENDPATH = 0x00000002,
};
enum fw_caps_config_rdma {
FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
+ FW_CAPS_CONFIG_RDMA_ROCEV2 = 0x00000004,
+};
+
+enum fw_caps_config_nvme {
+ FW_CAPS_CONFIG_NVME_TCP = 0x00000001,
};
enum fw_caps_config_iscsi {
@@ -4687,8 +5804,9 @@ enum fw_caps_config_iscsi {
enum fw_caps_config_crypto {
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
FW_CAPS_CONFIG_TLSKEYS = 0x00000002,
- FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004,
+ FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004, /* NIC over ipsecofld */
FW_CAPS_CONFIG_TLS_HW = 0x00000008,
+ FW_CAPS_CONFIG_OFLD_OVER_IPSEC_INLINE = 0x00000010,/* ofld over ipsecofld */
};
enum fw_caps_config_fcoe {
@@ -4716,7 +5834,7 @@ struct fw_caps_config_cmd {
__be16 nbmcaps;
__be16 linkcaps;
__be16 switchcaps;
- __be16 r3;
+ __be16 nvmecaps;
__be16 niccaps;
__be16 toecaps;
__be16 rdmacaps;
@@ -4840,6 +5958,8 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_DEV_512SGL_MR = 0x30,
FW_PARAMS_PARAM_DEV_KTLS_HW = 0x31,
FW_PARAMS_PARAM_DEV_VI_ENABLE_INGRESS_AFTER_LINKUP = 0x32,
+ FW_PARAMS_PARAM_DEV_TID_QID_SEL_MASK = 0x33,
+ FW_PARAMS_PARAM_DEV_TX_TPCHMAP = 0x3A,
};
/*
@@ -4911,6 +6031,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
+ /* no separate STAG/PBL START/END for nvmet.
+ * use same rdma stag/pbl memory range */
FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
@@ -4943,7 +6065,7 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32,
FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33,
FW_PARAMS_PARAM_PFVF_TLS_START = 0x34,
- FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
+ FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36,
FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_RSSKEYINFO = 0x38,
@@ -4955,6 +6077,13 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_GET_SMT_START = 0x3E,
FW_PARAMS_PARAM_PFVF_GET_SMT_SIZE = 0x3F,
FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40,
+ FW_PARAMS_PARAM_PFVF_RRQ_START = 0x41,
+ FW_PARAMS_PARAM_PFVF_RRQ_END = 0x42,
+ FW_PARAMS_PARAM_PFVF_PKTHDR_START = 0x43,
+ FW_PARAMS_PARAM_PFVF_PKTHDR_END = 0x44,
+ FW_PARAMS_PARAM_PFVF_NIPSEC_TUNNEL = 0x45,
+ FW_PARAMS_PARAM_PFVF_NIPSEC_TRANSPORT = 0x46,
+ FW_PARAMS_PARAM_PFVF_OFLD_NIPSEC_TUNNEL = 0x47,
};
/*
@@ -4984,6 +6113,19 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_FLM_DCA = 0x30
};
+#define S_T7_DMAQ_CONM_CTXT_CNGTPMODE 0
+#define M_T7_DMAQ_CONM_CTXT_CNGTPMODE 0x3
+#define V_T7_DMAQ_CONM_CTXT_CNGTPMODE(x) ((x) << S_T7_DMAQ_CONM_CTXT_CNGTPMODE)
+#define G_T7_DMAQ_CONM_CTXT_CNGTPMODE(x) \
+ (((x) >> S_T7_DMAQ_CONM_CTXT_CNGTPMODE) & M_T7_DMAQ_CONM_CTXT_CNGTPMODE)
+
+#define S_T7_DMAQ_CONM_CTXT_CH_VEC 2
+#define M_T7_DMAQ_CONM_CTXT_CH_VEC 0xf
+#define V_T7_DMAQ_CONM_CTXT_CH_VEC(x) ((x) << S_T7_DMAQ_CONM_CTXT_CH_VEC)
+#define G_T7_DMAQ_CONM_CTXT_CH_VEC(x) \
+ (((x) >> S_T7_DMAQ_CONM_CTXT_CH_VEC) & M_T7_DMAQ_CONM_CTXT_CH_VEC)
+
+
/*
* chnet parameters
*/
@@ -5199,7 +6341,8 @@ struct fw_pfvf_cmd {
enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
FW_IQ_TYPE_NO_FL_INT_CAP,
- FW_IQ_TYPE_VF_CQ
+ FW_IQ_TYPE_VF_CQ,
+ FW_IQ_TYPE_CQ,
};
enum fw_iq_iqtype {
@@ -5787,6 +6930,12 @@ struct fw_eq_mngt_cmd {
(((x) >> S_FW_EQ_MNGT_CMD_EQSTOP) & M_FW_EQ_MNGT_CMD_EQSTOP)
#define F_FW_EQ_MNGT_CMD_EQSTOP V_FW_EQ_MNGT_CMD_EQSTOP(1U)
+#define S_FW_EQ_MNGT_CMD_COREGROUP 16
+#define M_FW_EQ_MNGT_CMD_COREGROUP 0x3f
+#define V_FW_EQ_MNGT_CMD_COREGROUP(x) ((x) << S_FW_EQ_MNGT_CMD_COREGROUP)
+#define G_FW_EQ_MNGT_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_COREGROUP) & M_FW_EQ_MNGT_CMD_COREGROUP)
+
#define S_FW_EQ_MNGT_CMD_CMPLIQID 20
#define M_FW_EQ_MNGT_CMD_CMPLIQID 0xfff
#define V_FW_EQ_MNGT_CMD_CMPLIQID(x) ((x) << S_FW_EQ_MNGT_CMD_CMPLIQID)
@@ -5977,6 +7126,12 @@ struct fw_eq_eth_cmd {
(((x) >> S_FW_EQ_ETH_CMD_EQSTOP) & M_FW_EQ_ETH_CMD_EQSTOP)
#define F_FW_EQ_ETH_CMD_EQSTOP V_FW_EQ_ETH_CMD_EQSTOP(1U)
+#define S_FW_EQ_ETH_CMD_COREGROUP 16
+#define M_FW_EQ_ETH_CMD_COREGROUP 0x3f
+#define V_FW_EQ_ETH_CMD_COREGROUP(x) ((x) << S_FW_EQ_ETH_CMD_COREGROUP)
+#define G_FW_EQ_ETH_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_COREGROUP) & M_FW_EQ_ETH_CMD_COREGROUP)
+
#define S_FW_EQ_ETH_CMD_EQID 0
#define M_FW_EQ_ETH_CMD_EQID 0xfffff
#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID)
@@ -6190,6 +7345,12 @@ struct fw_eq_ctrl_cmd {
(((x) >> S_FW_EQ_CTRL_CMD_EQSTOP) & M_FW_EQ_CTRL_CMD_EQSTOP)
#define F_FW_EQ_CTRL_CMD_EQSTOP V_FW_EQ_CTRL_CMD_EQSTOP(1U)
+#define S_FW_EQ_CTRL_CMD_COREGROUP 16
+#define M_FW_EQ_CTRL_CMD_COREGROUP 0x3f
+#define V_FW_EQ_CTRL_CMD_COREGROUP(x) ((x) << S_FW_EQ_CTRL_CMD_COREGROUP)
+#define G_FW_EQ_CTRL_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_COREGROUP) & M_FW_EQ_CTRL_CMD_COREGROUP)
+
#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
#define M_FW_EQ_CTRL_CMD_CMPLIQID 0xfff
#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
@@ -6377,6 +7538,12 @@ struct fw_eq_ofld_cmd {
(((x) >> S_FW_EQ_OFLD_CMD_EQSTOP) & M_FW_EQ_OFLD_CMD_EQSTOP)
#define F_FW_EQ_OFLD_CMD_EQSTOP V_FW_EQ_OFLD_CMD_EQSTOP(1U)
+#define S_FW_EQ_OFLD_CMD_COREGROUP 16
+#define M_FW_EQ_OFLD_CMD_COREGROUP 0x3f
+#define V_FW_EQ_OFLD_CMD_COREGROUP(x) ((x) << S_FW_EQ_OFLD_CMD_COREGROUP)
+#define G_FW_EQ_OFLD_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_COREGROUP) & M_FW_EQ_OFLD_CMD_COREGROUP)
+
#define S_FW_EQ_OFLD_CMD_EQID 0
#define M_FW_EQ_OFLD_CMD_EQID 0xfffff
#define V_FW_EQ_OFLD_CMD_EQID(x) ((x) << S_FW_EQ_OFLD_CMD_EQID)
@@ -7285,7 +8452,8 @@ fec_supported(uint32_t caps)
{
return ((caps & (FW_PORT_CAP32_SPEED_25G | FW_PORT_CAP32_SPEED_50G |
- FW_PORT_CAP32_SPEED_100G)) != 0);
+ FW_PORT_CAP32_SPEED_100G | FW_PORT_CAP32_SPEED_200G |
+ FW_PORT_CAP32_SPEED_400G)) != 0);
}
enum fw_port_action {
@@ -7799,6 +8967,8 @@ enum fw_port_type {
FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */
FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */
FW_PORT_TYPE_KR_XLAUI = 22, /* No, 4, 40G/10G/1G, No AN*/
+ FW_PORT_TYPE_SFP56 = 26,
+ FW_PORT_TYPE_QSFP56 = 27,
FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
};
@@ -8862,7 +10032,9 @@ struct fw_devlog_cmd {
__u8 r2[7];
__be32 memtype_devlog_memaddr16_devlog;
__be32 memsize_devlog;
- __be32 r3[2];
+ __u8 num_devlog;
+ __u8 r3[3];
+ __be32 r4;
};
#define S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 28
@@ -9786,6 +10958,45 @@ struct fw_hma_cmd {
#define G_FW_HMA_CMD_ADDR_SIZE(x) \
(((x) >> S_FW_HMA_CMD_ADDR_SIZE) & M_FW_HMA_CMD_ADDR_SIZE)
+struct fw_jbof_win_reg_cmd {
+ __be32 op_pkd;
+ __be32 alloc_to_len16;
+ __be32 window_num_pcie_params;
+ __be32 window_size;
+ __be64 bus_addr;
+ __be64 phy_address;
+};
+
+#define S_FW_JBOF_WIN_REG_CMD_ALLOC 31
+#define M_FW_JBOF_WIN_REG_CMD_ALLOC 0x1
+#define V_FW_JBOF_WIN_REG_CMD_ALLOC(x) ((x) << S_FW_JBOF_WIN_REG_CMD_ALLOC)
+#define G_FW_JBOF_WIN_REG_CMD_ALLOC(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_ALLOC) & M_FW_JBOF_WIN_REG_CMD_ALLOC)
+#define F_FW_JBOF_WIN_REG_CMD_ALLOC V_FW_JBOF_WIN_REG_CMD_ALLOC(1U)
+
+#define S_FW_JBOF_WIN_REG_CMD_FREE 30
+#define M_FW_JBOF_WIN_REG_CMD_FREE 0x1
+#define V_FW_JBOF_WIN_REG_CMD_FREE(x) ((x) << S_FW_JBOF_WIN_REG_CMD_FREE)
+#define G_FW_JBOF_WIN_REG_CMD_FREE(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_FREE) & M_FW_JBOF_WIN_REG_CMD_FREE)
+#define F_FW_JBOF_WIN_REG_CMD_FREE V_FW_JBOF_WIN_REG_CMD_FREE(1U)
+
+#define S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM 7
+#define M_FW_JBOF_WIN_REG_CMD_WINDOW_NUM 0xf
+#define V_FW_JBOF_WIN_REG_CMD_WINDOW_NUM(x) \
+ ((x) << S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM)
+#define G_FW_JBOF_WIN_REG_CMD_WINDOW_NUM(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM) & \
+ M_FW_JBOF_WIN_REG_CMD_WINDOW_NUM)
+
+#define S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS 0
+#define M_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS 0x7f
+#define V_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS(x) \
+ ((x) << S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS)
+#define G_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS) & \
+ M_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS)
+
/******************************************************************************
* P C I E F W R E G I S T E R
**************************************/
@@ -9914,8 +11125,15 @@ enum pcie_fw_eval {
*/
#define PCIE_FW_PF_DEVLOG 7
+#define S_PCIE_FW_PF_DEVLOG_COUNT_MSB 31
+#define M_PCIE_FW_PF_DEVLOG_COUNT_MSB 0x1
+#define V_PCIE_FW_PF_DEVLOG_COUNT_MSB(x) \
+ ((x) << S_PCIE_FW_PF_DEVLOG_COUNT_MSB)
+#define G_PCIE_FW_PF_DEVLOG_COUNT_MSB(x) \
+ (((x) >> S_PCIE_FW_PF_DEVLOG_COUNT_MSB) & M_PCIE_FW_PF_DEVLOG_COUNT_MSB)
+
#define S_PCIE_FW_PF_DEVLOG_NENTRIES128 28
-#define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0xf
+#define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0x7
#define V_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
((x) << S_PCIE_FW_PF_DEVLOG_NENTRIES128)
#define G_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
@@ -9928,8 +11146,15 @@ enum pcie_fw_eval {
#define G_PCIE_FW_PF_DEVLOG_ADDR16(x) \
(((x) >> S_PCIE_FW_PF_DEVLOG_ADDR16) & M_PCIE_FW_PF_DEVLOG_ADDR16)
+#define S_PCIE_FW_PF_DEVLOG_COUNT_LSB 3
+#define M_PCIE_FW_PF_DEVLOG_COUNT_LSB 0x1
+#define V_PCIE_FW_PF_DEVLOG_COUNT_LSB(x) \
+ ((x) << S_PCIE_FW_PF_DEVLOG_COUNT_LSB)
+#define G_PCIE_FW_PF_DEVLOG_COUNT_LSB(x) \
+ (((x) >> S_PCIE_FW_PF_DEVLOG_COUNT_LSB) & M_PCIE_FW_PF_DEVLOG_COUNT_LSB)
+
#define S_PCIE_FW_PF_DEVLOG_MEMTYPE 0
-#define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0xf
+#define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0x7
#define V_PCIE_FW_PF_DEVLOG_MEMTYPE(x) ((x) << S_PCIE_FW_PF_DEVLOG_MEMTYPE)
#define G_PCIE_FW_PF_DEVLOG_MEMTYPE(x) \
(((x) >> S_PCIE_FW_PF_DEVLOG_MEMTYPE) & M_PCIE_FW_PF_DEVLOG_MEMTYPE)
@@ -9969,7 +11194,8 @@ struct fw_hdr {
enum fw_hdr_chip {
FW_HDR_CHIP_T4,
FW_HDR_CHIP_T5,
- FW_HDR_CHIP_T6
+ FW_HDR_CHIP_T6,
+ FW_HDR_CHIP_T7
};
#define S_FW_HDR_FW_VER_MAJOR 24
@@ -10015,6 +11241,11 @@ enum {
T6FW_VERSION_MINOR = 27,
T6FW_VERSION_MICRO = 5,
T6FW_VERSION_BUILD = 0,
+
+ T7FW_VERSION_MAJOR = 2,
+ T7FW_VERSION_MINOR = 0,
+ T7FW_VERSION_MICRO = 0,
+ T7FW_VERSION_BUILD = 0,
};
enum {
@@ -10050,6 +11281,17 @@ enum {
T6FW_HDR_INTFVER_ISCSI = 0x00,
T6FW_HDR_INTFVER_FCOEPDU= 0x00,
T6FW_HDR_INTFVER_FCOE = 0x00,
+
+ /* T7
+ */
+ T7FW_HDR_INTFVER_NIC = 0x00,
+ T7FW_HDR_INTFVER_VNIC = 0x00,
+ T7FW_HDR_INTFVER_OFLD = 0x00,
+ T7FW_HDR_INTFVER_RI = 0x00,
+ T7FW_HDR_INTFVER_ISCSIPDU= 0x00,
+ T7FW_HDR_INTFVER_ISCSI = 0x00,
+ T7FW_HDR_INTFVER_FCOEPDU= 0x00,
+ T7FW_HDR_INTFVER_FCOE = 0x00,
};
#define FW_VERSION32(MAJOR, MINOR, MICRO, BUILD) ( \
@@ -10085,7 +11327,7 @@ struct fw_ephy_hdr {
enum {
FW_EPHY_HDR_MAGIC = 0x65706879,
};
-
+
struct fw_ifconf_dhcp_info {
__be32 addr;
__be32 mask;
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg.txt b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
new file mode 100644
index 000000000000..499af3675bd9
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
@@ -0,0 +1,644 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 200 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 50 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+
+ #CIM_QUEUE_FEATURE_DISABLE.obq_eom_enable bit needs to be set to 1 for CmdMore handling support
+ reg[0x7c4c] = 0x20/0x20
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 #Crypto core reset
+
+ #Tick granularities in kbps
+ tsch_ticks = 100000, 10000, 1000, 10
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+[function "0"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port
+
+
+[function "1"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port
+
+[function "2"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x4 # access to only one port
+ pmask = 0x1 # access to only one port
+
+[function "3"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x2 # access to only one port
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+
+[function "4"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 224 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 320 # number of clip region entries
+ nfilter = 480 # number of filter region entries
+ nserver = 480 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "5"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 1536
+ tp_l2t = 508
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+[function "6"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 66 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX) + 2 (EXTRA)
+ nexactf = 32 # NPORTS + adding 28 exact entries for FCoE
+ # which is OK since < MIN(SUM PF0..3, PF4)
+ # and we never load PF0..3 and PF4 concurrently
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nhash = 1536
+ tp_l2t = 4
+ protocol = fcoe_initiator
+ tp_ddp = 1
+ fcoe_nfcf = 16
+ fcoe_nvnp = 32
+ fcoe_nssn = 1024
+
+# Following function 7 is used by embedded ARM to communicate to
+# the firmware.
+[function "7"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 16 # number of high priority filter region entries
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nserver = 16
+ nhash = 1024
+ tp_l2t = 512
+ protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+[function "2/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "3/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ #dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ #dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x684e23fb
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt b/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt
new file mode 100644
index 000000000000..f06f059f4112
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt
@@ -0,0 +1,530 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 1000 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 1 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 # Crypto core reset
+ reg[0x46000] = 0xa/0xe # 16K ESH Hi Extraction window
+
+ #Tick granularities in kbps
+ tsch_ticks = 1000, 100, 10, 1
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ #gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+[function "0"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 96 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 32 # number of clip region entries
+ nfilter = 48 # number of filter region entries
+ nserver = 48 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "1"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 2048
+ tp_l2t = 1020
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x22432d98
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
new file mode 100644
index 000000000000..0bca1c194af8
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
@@ -0,0 +1,644 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 200 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 50 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+
+ #CIM_QUEUE_FEATURE_DISABLE.obq_eom_enable bit needs to be set to 1 for CmdMore handling support
+ reg[0x7c4c] = 0x20/0x20
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 #Crypto core reset
+
+ #Tick granularities in kbps
+ tsch_ticks = 100000, 10000, 1000, 10
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+[function "0"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port
+
+
+[function "1"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port
+
+[function "2"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x4 # access to only one port
+ pmask = 0x1 # access to only one port
+
+[function "3"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x2 # access to only one port
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+
+[function "4"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 224 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 320 # number of clip region entries
+ nfilter = 480 # number of filter region entries
+ nserver = 480 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "5"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 1536
+ tp_l2t = 508
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+[function "6"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 66 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX) + 2 (EXTRA)
+ nexactf = 32 # NPORTS + adding 28 exact entries for FCoE
+ # which is OK since < MIN(SUM PF0..3, PF4)
+ # and we never load PF0..3 and PF4 concurrently
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nhash = 1536
+ tp_l2t = 4
+ protocol = fcoe_initiator
+ tp_ddp = 1
+ fcoe_nfcf = 16
+ fcoe_nvnp = 32
+ fcoe_nssn = 1024
+
+# Following function 7 is used by embedded ARM to communicate to
+# the firmware.
+[function "7"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 16 # number of high priority filter region entries
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nserver = 16
+ nhash = 1024
+ tp_l2t = 512
+ protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+[function "2/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "3/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ dcb = ppp, dcbx
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ dcb = ppp, dcbx
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x5cab62d4
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/iw_cxgbe/device.c b/sys/dev/cxgbe/iw_cxgbe/device.c
index 3c4d269f6c69..4610f91e96ac 100644
--- a/sys/dev/cxgbe/iw_cxgbe/device.c
+++ b/sys/dev/cxgbe/iw_cxgbe/device.c
@@ -132,26 +132,21 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->stats.rqt.total = sc->vres.rq.size;
rdev->stats.qid.total = sc->vres.qp.size;
- rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ rc = c4iw_init_resource(rdev, T4_MAX_NUM_PD);
if (rc) {
device_printf(sc->dev, "error %d initializing resources\n", rc);
goto err1;
}
- rc = c4iw_pblpool_create(rdev);
- if (rc) {
- device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
- goto err2;
- }
rc = c4iw_rqtpool_create(rdev);
if (rc) {
device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
- goto err3;
+ goto err2;
}
rdev->status_page = (struct t4_dev_status_page *)
__get_free_page(GFP_KERNEL);
if (!rdev->status_page) {
rc = -ENOMEM;
- goto err4;
+ goto err3;
}
rdev->status_page->qp_start = sc->vres.qp.start;
rdev->status_page->qp_size = sc->vres.qp.size;
@@ -168,15 +163,13 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) {
rc = -ENOMEM;
- goto err5;
+ goto err4;
}
return (0);
-err5:
- free_page((unsigned long)rdev->status_page);
err4:
- c4iw_rqtpool_destroy(rdev);
+ free_page((unsigned long)rdev->status_page);
err3:
- c4iw_pblpool_destroy(rdev);
+ c4iw_rqtpool_destroy(rdev);
err2:
c4iw_destroy_resource(&rdev->resource);
err1:
@@ -186,7 +179,6 @@ err1:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
free_page((unsigned long)rdev->status_page);
- c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource);
}
diff --git a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
index ca2595b65b02..47ce10562c66 100644
--- a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
+++ b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
@@ -99,7 +99,6 @@ struct c4iw_id_table {
};
struct c4iw_resource {
- struct c4iw_id_table tpt_table;
struct c4iw_id_table qid_table;
struct c4iw_id_table pdid_table;
};
@@ -904,11 +903,9 @@ int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
struct l2t_entry *l2t);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid);
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
-int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
diff --git a/sys/dev/cxgbe/iw_cxgbe/mem.c b/sys/dev/cxgbe/iw_cxgbe/mem.c
index 4a1adc118b7c..ae0aa0edc17a 100644
--- a/sys/dev/cxgbe/iw_cxgbe/mem.c
+++ b/sys/dev/cxgbe/iw_cxgbe/mem.c
@@ -56,46 +56,23 @@ mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
static int
_c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, int wait)
+ dma_addr_t data, int wait)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_sgl *sgl;
u8 wr_len;
int ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- addr &= 0x7FFFFFF;
-
if (wait)
c4iw_init_wr_wait(&wr_wait);
- wr_len = roundup(sizeof *ulpmc + sizeof *sgl, 16);
+ wr_len = T4_WRITE_MEM_DMA_LEN;
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- (wait ? F_FW_WR_COMPL : 0));
- ulpmc->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0;
- ulpmc->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- ulpmc->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
- V_T5_ULP_MEMIO_ORDER(1) |
- V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr));
-
- sgl = (struct ulptx_sgl *)(ulpmc + 1);
- sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
- V_ULPTX_NSGE(1));
- sgl->len0 = cpu_to_be32(len);
- sgl->addr0 = cpu_to_be64((u64)data);
-
+ t4_write_mem_dma_wr(sc, wrtod(wr), wr_len, 0, addr, len, data,
+ wait ? (u64)(unsigned long)&wr_wait : 0);
t4_wrq_tx(sc, wr);
if (wait)
@@ -108,70 +85,32 @@ static int
_c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_idata *ulpsc;
- u8 wr_len, *to_dp, *from_dp;
+ u8 wr_len, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- u32 cmd;
-
- cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
- cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
-
- addr &= 0x7FFFFFF;
CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
- num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
c4iw_init_wr_wait(&wr_wait);
+ num_wqe = DIV_ROUND_UP(len, T4_MAX_INLINE_SIZE);
+ from_dp = data;
for (i = 0; i < num_wqe; i++) {
-
- copy_len = min(len, C4IW_MAX_INLINE_SIZE);
- wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
- roundup(copy_len, T4_ULPTX_MIN_IO), 16);
+ copy_len = min(len, T4_MAX_INLINE_SIZE);
+ wr_len = T4_WRITE_MEM_INLINE_LEN(copy_len);
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
-
- if (i == (num_wqe-1)) {
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- F_FW_WR_COMPL);
- ulpmc->wr.wr_lo =
- (__force __be64)(unsigned long) &wr_wait;
- } else
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
- ulpmc->wr.wr_mid = cpu_to_be32(
- V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
-
- ulpmc->cmd = cmd;
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
- DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
- 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
-
- ulpsc = (struct ulptx_idata *)(ulpmc + 1);
- ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
- ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
-
- to_dp = (u8 *)(ulpsc + 1);
- from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
- if (data)
- memcpy(to_dp, from_dp, copy_len);
- else
- memset(to_dp, 0, copy_len);
- if (copy_len % T4_ULPTX_MIN_IO)
- memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
- (copy_len % T4_ULPTX_MIN_IO));
+ t4_write_mem_inline_wr(sc, wrtod(wr), wr_len, 0, addr, copy_len,
+ from_dp, i == (num_wqe - 1) ?
+ (__force __be64)(unsigned long) &wr_wait : 0);
t4_wrq_tx(sc, wr);
- len -= C4IW_MAX_INLINE_SIZE;
- }
+ if (from_dp != NULL)
+ from_dp += T4_MAX_INLINE_SIZE;
+ addr += T4_MAX_INLINE_SIZE >> 5;
+ len -= T4_MAX_INLINE_SIZE;
+ }
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
return ret;
}
@@ -201,7 +140,7 @@ _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen,
- (void *)daddr, !remain);
+ daddr, !remain);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -263,8 +202,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
stag_idx = (*stag) >> 8;
if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
- stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
- if (!stag_idx) {
+ stag_idx = t4_stag_alloc(rdev->adap, 1);
+ if (stag_idx == T4_STAG_UNSET) {
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
@@ -309,7 +248,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
sizeof(tpt), &tpt);
if (reset_tpt_entry) {
- c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
+ t4_stag_free(rdev->adap, stag_idx, 1);
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
diff --git a/sys/dev/cxgbe/iw_cxgbe/resource.c b/sys/dev/cxgbe/iw_cxgbe/resource.c
index 644ea0c631bf..cd20f1eafdd6 100644
--- a/sys/dev/cxgbe/iw_cxgbe/resource.c
+++ b/sys/dev/cxgbe/iw_cxgbe/resource.c
@@ -59,13 +59,9 @@ static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
}
/* nr_* must be power of 2 */
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid)
{
int err = 0;
- err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
- C4IW_ID_TABLE_F_RANDOM);
- if (err)
- goto tpt_err;
err = c4iw_init_qid_table(rdev);
if (err)
goto qid_err;
@@ -77,8 +73,6 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
- c4iw_id_table_free(&rdev->resource.tpt_table);
- tpt_err:
return -ENOMEM;
}
@@ -243,7 +237,6 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
void c4iw_destroy_resource(struct c4iw_resource *rscp)
{
- c4iw_id_table_free(&rscp->tpt_table);
c4iw_id_table_free(&rscp->qid_table);
c4iw_id_table_free(&rscp->pdid_table);
}
@@ -254,12 +247,9 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
- unsigned long addr;
+ u32 addr;
- vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
- 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
- M_FIRSTFIT|M_NOWAIT, &addr);
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size);
+ addr = t4_pblpool_alloc(rdev->adap, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
@@ -268,33 +258,15 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
- return (u32)addr;
+ return addr;
}
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
- vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT)));
-}
-
-int c4iw_pblpool_create(struct c4iw_rdev *rdev)
-{
- rdev->pbl_arena = vmem_create("PBL_MEM_POOL",
- rdev->adap->vres.pbl.start,
- rdev->adap->vres.pbl.size,
- 1, 0, M_FIRSTFIT| M_NOWAIT);
- if (!rdev->pbl_arena)
- return -ENOMEM;
-
- return 0;
-}
-
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
-{
- vmem_destroy(rdev->pbl_arena);
+ t4_pblpool_free(rdev->adap, addr, size);
}
/* RQT Memory Manager. */
diff --git a/sys/dev/cxgbe/iw_cxgbe/t4.h b/sys/dev/cxgbe/iw_cxgbe/t4.h
index 48f85cf7965b..ffb610420640 100644
--- a/sys/dev/cxgbe/iw_cxgbe/t4.h
+++ b/sys/dev/cxgbe/iw_cxgbe/t4.h
@@ -64,7 +64,6 @@
#define T4_MAX_NUM_PD 65536
#define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */
-#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
#define A_PCIE_MA_SYNC 0x30b4
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index b57c03f076b5..91a43785aaca 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -229,7 +229,17 @@ struct iw_tunables {
struct tls_tunables {
int inline_keys;
- int combo_wrs;
+ union {
+ struct {
+ /* T6 only. */
+ int combo_wrs;
+ };
+ struct {
+ /* T7 only. */
+ int short_records;
+ int partial_ghash;
+ };
+ };
};
#ifdef TCP_OFFLOAD
diff --git a/sys/dev/cxgbe/t4_filter.c b/sys/dev/cxgbe/t4_filter.c
index 8d4552116d96..4b583b67ba07 100644
--- a/sys/dev/cxgbe/t4_filter.c
+++ b/sys/dev/cxgbe/t4_filter.c
@@ -322,48 +322,85 @@ remove_hftid(struct adapter *sc, struct filter_entry *f)
LIST_REMOVE(f, link_tid);
}
-/*
- * Input: driver's 32b filter mode.
- * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
- */
static uint16_t
-mode_to_fconf(uint32_t mode)
+mode_to_fconf_t4(uint32_t mode)
{
uint32_t fconf = 0;
if (mode & T4_FILTER_IP_FRAGMENT)
fconf |= F_FRAGMENTATION;
-
if (mode & T4_FILTER_MPS_HIT_TYPE)
fconf |= F_MPSHITTYPE;
-
if (mode & T4_FILTER_MAC_IDX)
fconf |= F_MACMATCH;
-
if (mode & T4_FILTER_ETH_TYPE)
fconf |= F_ETHERTYPE;
-
if (mode & T4_FILTER_IP_PROTO)
fconf |= F_PROTOCOL;
-
if (mode & T4_FILTER_IP_TOS)
fconf |= F_TOS;
-
if (mode & T4_FILTER_VLAN)
fconf |= F_VLAN;
-
if (mode & T4_FILTER_VNIC)
fconf |= F_VNIC_ID;
-
if (mode & T4_FILTER_PORT)
fconf |= F_PORT;
-
if (mode & T4_FILTER_FCoE)
fconf |= F_FCOE;
return (fconf);
}
+static uint16_t
+mode_to_fconf_t7(uint32_t mode)
+{
+ uint32_t fconf = 0;
+
+ if (mode & T4_FILTER_TCPFLAGS)
+ fconf |= F_TCPFLAGS;
+ if (mode & T4_FILTER_SYNONLY)
+ fconf |= F_SYNONLY;
+ if (mode & T4_FILTER_ROCE)
+ fconf |= F_ROCE;
+ if (mode & T4_FILTER_IP_FRAGMENT)
+ fconf |= F_T7_FRAGMENTATION;
+ if (mode & T4_FILTER_MPS_HIT_TYPE)
+ fconf |= F_T7_MPSHITTYPE;
+ if (mode & T4_FILTER_MAC_IDX)
+ fconf |= F_T7_MACMATCH;
+ if (mode & T4_FILTER_ETH_TYPE)
+ fconf |= F_T7_ETHERTYPE;
+ if (mode & T4_FILTER_IP_PROTO)
+ fconf |= F_T7_PROTOCOL;
+ if (mode & T4_FILTER_IP_TOS)
+ fconf |= F_T7_TOS;
+ if (mode & T4_FILTER_VLAN)
+ fconf |= F_T7_VLAN;
+ if (mode & T4_FILTER_VNIC)
+ fconf |= F_T7_VNIC_ID;
+ if (mode & T4_FILTER_PORT)
+ fconf |= F_T7_PORT;
+ if (mode & T4_FILTER_FCoE)
+ fconf |= F_T7_FCOE;
+ if (mode & T4_FILTER_IPSECIDX)
+ fconf |= F_IPSECIDX;
+
+ return (fconf);
+}
+
+/*
+ * Input: driver's 32b filter mode.
+ * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
+ */
+static uint16_t
+mode_to_fconf(struct adapter *sc, uint32_t mode)
+{
+ if (chip_id(sc) >= CHELSIO_T7)
+ return (mode_to_fconf_t7(mode));
+ else
+ return (mode_to_fconf_t4(mode));
+}
+
/*
* Input: driver's 32b filter mode.
* Returns: hardware vnic mode (ingress config) matching the input.
@@ -389,65 +426,100 @@ check_fspec_against_fconf_iconf(struct adapter *sc,
struct tp_params *tpp = &sc->params.tp;
uint32_t fconf = 0;
- if (fs->val.frag || fs->mask.frag)
- fconf |= F_FRAGMENTATION;
-
- if (fs->val.matchtype || fs->mask.matchtype)
- fconf |= F_MPSHITTYPE;
-
- if (fs->val.macidx || fs->mask.macidx)
- fconf |= F_MACMATCH;
-
- if (fs->val.ethtype || fs->mask.ethtype)
- fconf |= F_ETHERTYPE;
-
- if (fs->val.proto || fs->mask.proto)
- fconf |= F_PROTOCOL;
-
- if (fs->val.tos || fs->mask.tos)
- fconf |= F_TOS;
-
- if (fs->val.vlan_vld || fs->mask.vlan_vld)
- fconf |= F_VLAN;
-
- if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
- return (EINVAL);
- fconf |= F_VNIC_ID;
- }
-
- if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
- return (EINVAL);
- fconf |= F_VNIC_ID;
- }
-
+ if (chip_id(sc) >= CHELSIO_T7) {
+ if (fs->val.tcpflags || fs->mask.tcpflags)
+ fconf |= F_TCPFLAGS;
+ if (fs->val.synonly || fs->mask.synonly)
+ fconf |= F_SYNONLY;
+ if (fs->val.roce || fs->mask.roce)
+ fconf |= F_ROCE;
+ if (fs->val.frag || fs->mask.frag)
+ fconf |= F_T7_FRAGMENTATION;
+ if (fs->val.matchtype || fs->mask.matchtype)
+ fconf |= F_T7_MPSHITTYPE;
+ if (fs->val.macidx || fs->mask.macidx)
+ fconf |= F_T7_MACMATCH;
+ if (fs->val.ethtype || fs->mask.ethtype)
+ fconf |= F_T7_ETHERTYPE;
+ if (fs->val.proto || fs->mask.proto)
+ fconf |= F_T7_PROTOCOL;
+ if (fs->val.tos || fs->mask.tos)
+ fconf |= F_T7_TOS;
+ if (fs->val.vlan_vld || fs->mask.vlan_vld)
+ fconf |= F_T7_VLAN;
+ if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
+ if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
#ifdef notyet
- if (fs->val.encap_vld || fs->mask.encap_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ if (fs->val.encap_vld || fs->mask.encap_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
+#endif
+ if (fs->val.iport || fs->mask.iport)
+ fconf |= F_T7_PORT;
+ if (fs->val.fcoe || fs->mask.fcoe)
+ fconf |= F_T7_FCOE;
+ if (fs->val.ipsecidx || fs->mask.ipsecidx)
+ fconf |= F_IPSECIDX;
+ } else {
+ if (fs->val.tcpflags || fs->mask.tcpflags ||
+ fs->val.synonly || fs->mask.synonly ||
+ fs->val.roce || fs->mask.roce ||
+ fs->val.ipsecidx || fs->mask.ipsecidx)
return (EINVAL);
- fconf |= F_VNIC_ID;
- }
+ if (fs->val.frag || fs->mask.frag)
+ fconf |= F_FRAGMENTATION;
+ if (fs->val.matchtype || fs->mask.matchtype)
+ fconf |= F_MPSHITTYPE;
+ if (fs->val.macidx || fs->mask.macidx)
+ fconf |= F_MACMATCH;
+ if (fs->val.ethtype || fs->mask.ethtype)
+ fconf |= F_ETHERTYPE;
+ if (fs->val.proto || fs->mask.proto)
+ fconf |= F_PROTOCOL;
+ if (fs->val.tos || fs->mask.tos)
+ fconf |= F_TOS;
+ if (fs->val.vlan_vld || fs->mask.vlan_vld)
+ fconf |= F_VLAN;
+ if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
+ if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
+#ifdef notyet
+ if (fs->val.encap_vld || fs->mask.encap_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
#endif
-
- if (fs->val.iport || fs->mask.iport)
- fconf |= F_PORT;
-
- if (fs->val.fcoe || fs->mask.fcoe)
- fconf |= F_FCOE;
-
+ if (fs->val.iport || fs->mask.iport)
+ fconf |= F_PORT;
+ if (fs->val.fcoe || fs->mask.fcoe)
+ fconf |= F_FCOE;
+ }
if ((tpp->filter_mode | fconf) != tpp->filter_mode)
return (E2BIG);
return (0);
}
-/*
- * Input: hardware filter configuration (filter mode/mask, ingress config).
- * Input: driver's 32b filter mode matching the input.
- */
static uint32_t
-fconf_to_mode(uint16_t hwmode, int vnic_mode)
+fconf_to_mode_t4(uint16_t hwmode, int vnic_mode)
{
uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
@@ -488,6 +560,69 @@ fconf_to_mode(uint16_t hwmode, int vnic_mode)
return (mode);
}
+static uint32_t
+fconf_to_mode_t7(uint16_t hwmode, int vnic_mode)
+{
+ uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
+ T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
+
+ if (hwmode & F_TCPFLAGS)
+ mode |= T4_FILTER_TCPFLAGS;
+ if (hwmode & F_SYNONLY)
+ mode |= T4_FILTER_SYNONLY;
+ if (hwmode & F_ROCE)
+ mode |= T4_FILTER_ROCE;
+ if (hwmode & F_T7_FRAGMENTATION)
+ mode |= T4_FILTER_IP_FRAGMENT;
+ if (hwmode & F_T7_MPSHITTYPE)
+ mode |= T4_FILTER_MPS_HIT_TYPE;
+ if (hwmode & F_T7_MACMATCH)
+ mode |= T4_FILTER_MAC_IDX;
+ if (hwmode & F_T7_ETHERTYPE)
+ mode |= T4_FILTER_ETH_TYPE;
+ if (hwmode & F_T7_PROTOCOL)
+ mode |= T4_FILTER_IP_PROTO;
+ if (hwmode & F_T7_TOS)
+ mode |= T4_FILTER_IP_TOS;
+ if (hwmode & F_T7_VLAN)
+ mode |= T4_FILTER_VLAN;
+ if (hwmode & F_T7_VNIC_ID)
+ mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
+ if (hwmode & F_T7_PORT)
+ mode |= T4_FILTER_PORT;
+ if (hwmode & F_T7_FCOE)
+ mode |= T4_FILTER_FCoE;
+ if (hwmode & F_IPSECIDX)
+ mode |= T4_FILTER_IPSECIDX;
+
+ switch (vnic_mode) {
+ case FW_VNIC_MODE_PF_VF:
+ mode |= T4_FILTER_IC_VNIC;
+ break;
+ case FW_VNIC_MODE_ENCAP_EN:
+ mode |= T4_FILTER_IC_ENCAP;
+ break;
+ case FW_VNIC_MODE_OUTER_VLAN:
+ default:
+ break;
+ }
+
+ return (mode);
+}
+
+/*
+ * Input: hardware filter configuration (filter mode/mask, ingress config).
+ * Output: driver's 32b filter mode matching the input.
+ */
+static inline uint32_t
+fconf_to_mode(struct adapter *sc, uint16_t hwmode, int vnic_mode)
+{
+ if (chip_id(sc) >= CHELSIO_T7)
+ return (fconf_to_mode_t7(hwmode, vnic_mode));
+ else
+ return (fconf_to_mode_t4(hwmode, vnic_mode));
+}
+
int
get_filter_mode(struct adapter *sc, uint32_t *mode)
{
@@ -499,7 +634,7 @@ get_filter_mode(struct adapter *sc, uint32_t *mode)
/* Non-zero incoming value in mode means "hashfilter mode". */
filter_mode = *mode ? tp->filter_mask : tp->filter_mode;
- *mode = fconf_to_mode(filter_mode, tp->vnic_mode);
+ *mode = fconf_to_mode(sc, filter_mode, tp->vnic_mode);
return (0);
}
@@ -512,7 +647,7 @@ set_filter_mode(struct adapter *sc, uint32_t mode)
uint16_t fconf;
iconf = mode_to_iconf(mode);
- fconf = mode_to_fconf(mode);
+ fconf = mode_to_fconf(sc, mode);
if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode)
return (0); /* Nothing to do */
@@ -554,7 +689,7 @@ set_filter_mask(struct adapter *sc, uint32_t mode)
uint16_t fmask;
iconf = mode_to_iconf(mode);
- fmask = mode_to_fconf(mode);
+ fmask = mode_to_fconf(sc, mode);
if ((iconf == -1 || iconf == tp->vnic_mode) && fmask == tp->filter_mask)
return (0); /* Nothing to do */
@@ -811,71 +946,138 @@ hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
struct tp_params *tp = &sc->params.tp;
uint16_t fmask;
- *ftuple = fmask = 0;
-
/*
* Initialize each of the fields which we care about which are present
* in the Compressed Filter Tuple.
*/
- if (tp->vlan_shift >= 0 && fs->mask.vlan) {
- *ftuple |= (uint64_t)(F_FT_VLAN_VLD | fs->val.vlan) <<
- tp->vlan_shift;
- fmask |= F_VLAN;
- }
-
- if (tp->port_shift >= 0 && fs->mask.iport) {
- *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
- fmask |= F_PORT;
- }
-
- if (tp->protocol_shift >= 0 && fs->mask.proto) {
- *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
- fmask |= F_PROTOCOL;
- }
-
- if (tp->tos_shift >= 0 && fs->mask.tos) {
- *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
- fmask |= F_TOS;
- }
-
- if (tp->vnic_shift >= 0 && fs->mask.vnic) {
- /* vnic_mode was already validated. */
- if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
- MPASS(fs->mask.pfvf_vld);
- else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
- MPASS(fs->mask.ovlan_vld);
+#define SFF(V, S) ((uint64_t)(V) << S) /* Shifted Filter Field. */
+ *ftuple = fmask = 0;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ if (tp->ipsecidx_shift >= 0 && fs->mask.ipsecidx) {
+ *ftuple |= SFF(fs->val.ipsecidx, tp->ipsecidx_shift);
+ fmask |= F_IPSECIDX;
+ }
+ if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
+ *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
+ fmask |= F_T7_FCOE;
+ }
+ if (tp->port_shift >= 0 && fs->mask.iport) {
+ *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
+ fmask |= F_T7_PORT;
+ }
+ if (tp->vnic_shift >= 0 && fs->mask.vnic) {
+ /* vnic_mode was already validated. */
+ if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
+ MPASS(fs->mask.pfvf_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
+ MPASS(fs->mask.ovlan_vld);
#ifdef notyet
- else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
- MPASS(fs->mask.encap_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
+ MPASS(fs->mask.encap_vld);
#endif
- *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
- fmask |= F_VNIC_ID;
- }
-
- if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
- *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
- fmask |= F_MACMATCH;
- }
-
- if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
- *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
- fmask |= F_ETHERTYPE;
- }
-
- if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
- *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
- fmask |= F_MPSHITTYPE;
- }
-
- if (tp->frag_shift >= 0 && fs->mask.frag) {
- *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
- fmask |= F_FRAGMENTATION;
- }
-
- if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
- *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
- fmask |= F_FCOE;
+ *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
+ fmask |= F_T7_VNIC_ID;
+ }
+ if (tp->vlan_shift >= 0 && fs->mask.vlan) {
+ *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
+ fmask |= F_T7_VLAN;
+ }
+ if (tp->tos_shift >= 0 && fs->mask.tos) {
+ *ftuple |= SFF(fs->val.tos, tp->tos_shift);
+ fmask |= F_T7_TOS;
+ }
+ if (tp->protocol_shift >= 0 && fs->mask.proto) {
+ *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
+ fmask |= F_T7_PROTOCOL;
+ }
+ if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
+ *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
+ fmask |= F_T7_ETHERTYPE;
+ }
+ if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
+ *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
+ fmask |= F_T7_MACMATCH;
+ }
+ if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
+ *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
+ fmask |= F_T7_MPSHITTYPE;
+ }
+ if (tp->frag_shift >= 0 && fs->mask.frag) {
+ *ftuple |= SFF(fs->val.frag, tp->frag_shift);
+ fmask |= F_T7_FRAGMENTATION;
+ }
+ if (tp->roce_shift >= 0 && fs->mask.roce) {
+ *ftuple |= SFF(fs->val.roce, tp->roce_shift);
+ fmask |= F_ROCE;
+ }
+ if (tp->synonly_shift >= 0 && fs->mask.synonly) {
+ *ftuple |= SFF(fs->val.synonly, tp->synonly_shift);
+ fmask |= F_SYNONLY;
+ }
+ if (tp->tcpflags_shift >= 0 && fs->mask.tcpflags) {
+ *ftuple |= SFF(fs->val.tcpflags, tp->synonly_shift);
+ fmask |= F_TCPFLAGS;
+ }
+ } else {
+ if (fs->mask.ipsecidx || fs->mask.roce || fs->mask.synonly ||
+ fs->mask.tcpflags) {
+ MPASS(tp->ipsecidx_shift == -1);
+ MPASS(tp->roce_shift == -1);
+ MPASS(tp->synonly_shift == -1);
+ MPASS(tp->tcpflags_shift == -1);
+ return (EINVAL);
+ }
+ if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
+ *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
+ fmask |= F_FCOE;
+ }
+ if (tp->port_shift >= 0 && fs->mask.iport) {
+ *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
+ fmask |= F_PORT;
+ }
+ if (tp->vnic_shift >= 0 && fs->mask.vnic) {
+ /* vnic_mode was already validated. */
+ if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
+ MPASS(fs->mask.pfvf_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
+ MPASS(fs->mask.ovlan_vld);
+#ifdef notyet
+ else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
+ MPASS(fs->mask.encap_vld);
+#endif
+ *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
+ fmask |= F_VNIC_ID;
+ }
+ if (tp->vlan_shift >= 0 && fs->mask.vlan) {
+ *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
+ fmask |= F_VLAN;
+ }
+ if (tp->tos_shift >= 0 && fs->mask.tos) {
+ *ftuple |= SFF(fs->val.tos, tp->tos_shift);
+ fmask |= F_TOS;
+ }
+ if (tp->protocol_shift >= 0 && fs->mask.proto) {
+ *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
+ fmask |= F_PROTOCOL;
+ }
+ if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
+ *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
+ fmask |= F_ETHERTYPE;
+ }
+ if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
+ *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
+ fmask |= F_MACMATCH;
+ }
+ if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
+ *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
+ fmask |= F_MPSHITTYPE;
+ }
+ if (tp->frag_shift >= 0 && fs->mask.frag) {
+ *ftuple |= SFF(fs->val.frag, tp->frag_shift);
+ fmask |= F_FRAGMENTATION;
+ }
}
+#undef SFF
/* A hashfilter must conform to the hardware filter mask. */
if (fmask != tp->filter_mask)
@@ -1195,11 +1397,19 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
return (ENOMEM);
bzero(req, sizeof(*req));
INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
- if (no_reply == 0) {
- req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
- V_NO_REPLY(0));
- } else
- req->reply_ctrl = htobe16(V_NO_REPLY(1));
+ if (no_reply) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ } else {
+ const int qid = sc->sge.fwq.abs_id;
+
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ }
req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
req->mask = htobe64(mask);
req->val = htobe64(val);
@@ -1594,7 +1804,7 @@ static int
act_open_cpl_len16(struct adapter *sc, int isipv6)
{
int idx;
- static const int sz_table[3][2] = {
+ static const int sz_table[4][2] = {
{
howmany(sizeof (struct cpl_act_open_req), 16),
howmany(sizeof (struct cpl_act_open_req6), 16)
@@ -1607,10 +1817,14 @@ act_open_cpl_len16(struct adapter *sc, int isipv6)
howmany(sizeof (struct cpl_t6_act_open_req), 16),
howmany(sizeof (struct cpl_t6_act_open_req6), 16)
},
+ {
+ howmany(sizeof (struct cpl_t7_act_open_req), 16),
+ howmany(sizeof (struct cpl_t7_act_open_req6), 16)
+ },
};
MPASS(chip_id(sc) >= CHELSIO_T4);
- idx = min(chip_id(sc) - CHELSIO_T4, 2);
+ idx = min(chip_id(sc) - CHELSIO_T4, 3);
return (sz_table[idx][!!isipv6]);
}
diff --git a/sys/dev/cxgbe/t4_ioctl.h b/sys/dev/cxgbe/t4_ioctl.h
index ba9a17dbaddf..f7c8ee24d596 100644
--- a/sys/dev/cxgbe/t4_ioctl.h
+++ b/sys/dev/cxgbe/t4_ioctl.h
@@ -64,6 +64,7 @@ enum {
T4_SET_FILTER_MASK, /* set filter mask (hashfilter mode) */
T4_HOLD_CLIP_ADDR, /* add ref on an IP in the CLIP */
T4_RELEASE_CLIP_ADDR, /* remove ref from an IP in the CLIP */
+ T4_GET_SGE_CTXT, /* get SGE context for a queue */
};
struct t4_reg {
@@ -119,6 +120,10 @@ struct t4_i2c_data {
#define T4_FILTER_MAC_IDX 0x2000 /* MPS MAC address match index */
#define T4_FILTER_MPS_HIT_TYPE 0x4000 /* MPS match type */
#define T4_FILTER_IP_FRAGMENT 0x8000 /* IP fragment */
+#define T4_FILTER_IPSECIDX 0x10000
+#define T4_FILTER_ROCE 0x20000
+#define T4_FILTER_SYNONLY 0x40000
+#define T4_FILTER_TCPFLAGS 0x80000
/*
* T4_FILTER_VNIC's real meaning depends on the ingress config.
*/
@@ -199,6 +204,10 @@ struct t4_filter_tuple {
uint32_t vlan_vld:1; /* VLAN valid */
uint32_t ovlan_vld:1; /* outer VLAN tag valid, value in "vnic" */
uint32_t pfvf_vld:1; /* VNIC id (PF/VF) valid, value in "vnic" */
+ uint32_t roce:1;
+ uint32_t synonly:1;
+ uint32_t tcpflags:6;
+ uint32_t ipsecidx:12;
};
struct t4_filter_specification {
@@ -322,6 +331,7 @@ struct t4_sched_queue {
};
#define T4_SGE_CONTEXT_SIZE 24
+#define T7_SGE_CONTEXT_SIZE 28
enum {
SGE_CONTEXT_EGRESS,
SGE_CONTEXT_INGRESS,
@@ -335,6 +345,12 @@ struct t4_sge_context {
uint32_t data[T4_SGE_CONTEXT_SIZE / 4];
};
+struct t4_sge_ctxt {
+ uint32_t mem_id;
+ uint32_t cid;
+ uint32_t data[T7_SGE_CONTEXT_SIZE / 4];
+};
+
struct t4_mem_range {
uint32_t addr;
uint32_t len;
@@ -444,4 +460,5 @@ struct t4_clip_addr {
#define CHELSIO_T4_SET_FILTER_MASK _IOW('f', T4_SET_FILTER_MASK, uint32_t)
#define CHELSIO_T4_HOLD_CLIP_ADDR _IOW('f', T4_HOLD_CLIP_ADDR, struct t4_clip_addr)
#define CHELSIO_T4_RELEASE_CLIP_ADDR _IOW('f', T4_RELEASE_CLIP_ADDR, struct t4_clip_addr)
+#define CHELSIO_T4_GET_SGE_CTXT _IOWR('f', T4_GET_SGE_CTXT, struct t4_sge_ctxt)
#endif
diff --git a/sys/dev/cxgbe/t4_iov.c b/sys/dev/cxgbe/t4_iov.c
index bfd1613e9795..452ebaaf0172 100644
--- a/sys/dev/cxgbe/t4_iov.c
+++ b/sys/dev/cxgbe/t4_iov.c
@@ -119,6 +119,28 @@ struct {
{0x6085, "Chelsio T6240-SO 85"},
{0x6086, "Chelsio T6225-SO-CR 86"},
{0x6087, "Chelsio T6225-CR 87"},
+}, t7iov_pciids[] = {
+ {0xd000, "Chelsio Terminator 7 FPGA"}, /* T7 PE12K FPGA */
+ {0x7000, "Chelsio T72200-DBG"}, /* 2 x 200G, debug */
+ {0x7001, "Chelsio T7250"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7002, "Chelsio S7250"}, /* 2 x 10/25/50G, nomem */
+ {0x7003, "Chelsio T7450"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7004, "Chelsio S7450"}, /* 4 x 10/25/50G, nomem */
+ {0x7005, "Chelsio T72200"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7006, "Chelsio S72200"}, /* 2 x 40/100/200G, nomem */
+ {0x7007, "Chelsio T72200-FH"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7008, "Chelsio T71400"}, /* 1 x 400G, nomem */
+ {0x7009, "Chelsio S7210-BT"}, /* 2 x 10GBASE-T, nomem */
+ {0x700a, "Chelsio T7450-RC"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x700b, "Chelsio T72200-RC"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x700c, "Chelsio T72200-FH-RC"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x700d, "Chelsio S72200-OCP3"}, /* 2 x 40/100/200G OCP3 */
+ {0x700e, "Chelsio S7450-OCP3"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x700f, "Chelsio S7410-BT-OCP3"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7010, "Chelsio S7210-BT-A"}, /* 2 x 10GBASE-T */
+ {0x7011, "Chelsio T7_MAYRA_7"}, /* Motherboard */
+
+ {0x7080, "Custom T7"},
};
static inline uint32_t
@@ -191,6 +213,26 @@ t6iov_probe(device_t dev)
}
static int
+chiov_probe(device_t dev)
+{
+ uint16_t d;
+ size_t i;
+
+ if (pci_get_vendor(dev) != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
+ d = pci_get_device(dev);
+ for (i = 0; i < nitems(t7iov_pciids); i++) {
+ if (d == t7iov_pciids[i].device) {
+ device_set_desc(dev, t7iov_pciids[i].desc);
+ device_quiet(dev);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+ return (ENXIO);
+}
+
+static int
t4iov_attach(device_t dev)
{
struct t4iov_softc *sc;
@@ -460,6 +502,28 @@ static driver_t t6iov_driver = {
sizeof(struct t4iov_softc)
};
+static device_method_t chiov_methods[] = {
+ DEVMETHOD(device_probe, chiov_probe),
+ DEVMETHOD(device_attach, t4iov_attach),
+ DEVMETHOD(device_detach, t4iov_detach),
+
+#ifdef PCI_IOV
+ DEVMETHOD(pci_iov_init, t4iov_iov_init),
+ DEVMETHOD(pci_iov_uninit, t4iov_iov_uninit),
+ DEVMETHOD(pci_iov_add_vf, t4iov_add_vf),
+#endif
+
+ DEVMETHOD(t4_attach_child, t4iov_attach_child),
+ DEVMETHOD(t4_detach_child, t4iov_detach_child),
+
+ DEVMETHOD_END
+};
+
+static driver_t chiov_driver = {
+ "chiov",
+ chiov_methods,
+ sizeof(struct t4iov_softc)
+};
DRIVER_MODULE(t4iov, pci, t4iov_driver, 0, 0);
MODULE_VERSION(t4iov, 1);
@@ -468,3 +532,6 @@ MODULE_VERSION(t5iov, 1);
DRIVER_MODULE(t6iov, pci, t6iov_driver, 0, 0);
MODULE_VERSION(t6iov, 1);
+
+DRIVER_MODULE(chiov, pci, chiov_driver, 0, 0);
+MODULE_VERSION(chiov, 1);
diff --git a/sys/dev/cxgbe/t4_l2t.c b/sys/dev/cxgbe/t4_l2t.c
index b1307bf2ace5..5f9c26a0f720 100644
--- a/sys/dev/cxgbe/t4_l2t.c
+++ b/sys/dev/cxgbe/t4_l2t.c
@@ -119,7 +119,7 @@ find_or_alloc_l2e(struct l2t_data *d, uint16_t vlan, uint8_t port, uint8_t *dmac
first_free = e;
} else if (e->state == L2T_STATE_SWITCHING &&
memcmp(e->dmac, dmac, ETHER_ADDR_LEN) == 0 &&
- e->vlan == vlan && e->lport == port)
+ e->vlan == vlan && e->hw_port == port)
return (e); /* Found existing entry that matches. */
}
@@ -156,7 +156,7 @@ mk_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync, int reply,
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
- req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!reply));
+ req->params = htons(V_L2T_W_PORT(e->hw_port) | V_L2T_W_NOREPLY(!reply));
req->l2t_idx = htons(idx);
req->vlan = htons(e->vlan);
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
@@ -227,7 +227,7 @@ t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
e = &d->l2tab[i];
if (e->state != L2T_STATE_TLS)
continue;
- if (e->vlan == vlan && e->lport == port &&
+ if (e->vlan == vlan && e->hw_port == port &&
e->wrq == (struct sge_wrq *)txq &&
memcmp(e->dmac, eth_addr, ETHER_ADDR_LEN) == 0) {
if (atomic_fetchadd_int(&e->refcnt, 1) == 0) {
@@ -263,7 +263,7 @@ t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
/* Initialize the entry. */
e->state = L2T_STATE_TLS;
e->vlan = vlan;
- e->lport = port;
+ e->hw_port = port;
e->iqid = sc->sge.fwq.abs_id;
e->wrq = (struct sge_wrq *)txq;
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
@@ -303,7 +303,7 @@ t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
e->iqid = sc->sge.fwq.abs_id;
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
- e->lport = port;
+ e->hw_port = port;
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
atomic_store_rel_int(&e->refcnt, 1);
atomic_subtract_int(&d->nfree, 1);
@@ -313,7 +313,7 @@ t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
e = NULL;
} else {
MPASS(e->vlan == vlan);
- MPASS(e->lport == port);
+ MPASS(e->hw_port == port);
atomic_add_int(&e->refcnt, 1);
}
}
@@ -488,7 +488,7 @@ sysctl_l2t(SYSCTL_HANDLER_ARGS)
" %u %2u %c %5u %s",
e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
e->dmac[3], e->dmac[4], e->dmac[5],
- e->vlan & 0xfff, vlan_prio(e), e->lport,
+ e->vlan & 0xfff, vlan_prio(e), e->hw_port,
l2e_state(e), atomic_load_acq_int(&e->refcnt),
e->ifp ? if_name(e->ifp) : "-");
skip:
diff --git a/sys/dev/cxgbe/t4_l2t.h b/sys/dev/cxgbe/t4_l2t.h
index 13e085bb7467..989d2d5ec8f3 100644
--- a/sys/dev/cxgbe/t4_l2t.h
+++ b/sys/dev/cxgbe/t4_l2t.h
@@ -71,7 +71,7 @@ struct l2t_entry {
volatile int refcnt; /* entry reference count */
uint16_t hash; /* hash bucket the entry is on */
uint8_t ipv6; /* entry is for an IPv6 address */
- uint8_t lport; /* associated offload logical port */
+ uint8_t hw_port; /* associated hardware port idx */
uint8_t dmac[ETHER_ADDR_LEN]; /* next hop's MAC address */
};
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 9756a6945384..22d2f504c257 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -241,6 +240,45 @@ static driver_t vcc_driver = {
sizeof(struct vi_info)
};
+/* T7+ bus driver interface */
+static int ch_probe(device_t);
+static device_method_t ch_methods[] = {
+ DEVMETHOD(device_probe, ch_probe),
+ DEVMETHOD(device_attach, t4_attach),
+ DEVMETHOD(device_detach, t4_detach),
+ DEVMETHOD(device_suspend, t4_suspend),
+ DEVMETHOD(device_resume, t4_resume),
+
+ DEVMETHOD(bus_child_location, t4_child_location),
+ DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
+ DEVMETHOD(bus_reset_post, t4_reset_post),
+
+ DEVMETHOD(t4_is_main_ready, t4_ready),
+ DEVMETHOD(t4_read_port_device, t4_read_port_device),
+
+ DEVMETHOD_END
+};
+static driver_t ch_driver = {
+ "chnex",
+ ch_methods,
+ sizeof(struct adapter)
+};
+
+
+/* T7+ port (che) interface */
+static driver_t che_driver = {
+ "che",
+ cxgbe_methods,
+ sizeof(struct port_info)
+};
+
+/* T7+ VI (vche) interface */
+static driver_t vche_driver = {
+ "vche",
+ vcxgbe_methods,
+ sizeof(struct vi_info)
+};
+
/* ifnet interface */
static void cxgbe_init(void *);
static int cxgbe_ioctl(if_t, unsigned long, caddr_t);
@@ -519,6 +557,9 @@ static int t4_fec = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
"Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
+static const char *
+t4_fec_bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2\6auto\7module";
+
/*
* Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it
* issues to the firmware. If the firmware doesn't support FORCE_FEC then the
@@ -570,6 +611,10 @@ static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
&t4_switchcaps_allowed, 0, "Default switch capabilities");
+static int t4_nvmecaps_allowed = 0;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, nvmecaps_allowed, CTLFLAG_RDTUN,
+ &t4_nvmecaps_allowed, 0, "Default NVMe capabilities");
+
#ifdef RATELIMIT
static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
@@ -716,6 +761,14 @@ SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
static int t4_tls_combo_wrs = 0;
SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
0, "Attempt to combine TCB field updates with TLS record work requests.");
+
+static int t4_tls_short_records = 1;
+SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, short_records, CTLFLAG_RDTUN,
+ &t4_tls_short_records, 0, "Use cipher-only mode for short records.");
+
+static int t4_tls_partial_ghash = 1;
+SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, partial_ghash, CTLFLAG_RDTUN,
+ &t4_tls_partial_ghash, 0, "Use partial GHASH for AES-GCM records.");
#endif
/* Functions used by VIs to obtain unique MAC addresses for each VI. */
@@ -809,17 +862,20 @@ static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
static int sysctl_force_fec(SYSCTL_HANDLER_ARGS);
+static int sysctl_handle_t4_portstat64(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
-static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_ibq(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_obq(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_qcfg_t7(SYSCTL_HANDLER_ARGS);
static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS);
@@ -831,6 +887,7 @@ static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
+static int sysctl_mps_tcam_t7(SYSCTL_HANDLER_ARGS);
static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
@@ -855,7 +912,7 @@ static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
#endif
-static int get_sge_context(struct adapter *, struct t4_sge_context *);
+static int get_sge_context(struct adapter *, int, uint32_t, int, uint32_t *);
static int load_fw(struct adapter *, struct t4_data *);
static int load_cfg(struct adapter *, struct t4_data *);
static int load_boot(struct adapter *, struct t4_bootrom *);
@@ -960,6 +1017,29 @@ struct {
{0x6485, "Custom T6240-SO"},
{0x6486, "Custom T6225-SO-CR"},
{0x6487, "Custom T6225-CR"},
+}, t7_pciids[] = {
+ {0xd000, "Chelsio Terminator 7 FPGA"}, /* T7 PE12K FPGA */
+ {0x7400, "Chelsio T72200-DBG"}, /* 2 x 200G, debug */
+ {0x7401, "Chelsio T7250"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7402, "Chelsio S7250"}, /* 2 x 10/25/50G, nomem */
+ {0x7403, "Chelsio T7450"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7404, "Chelsio S7450"}, /* 4 x 10/25/50G, nomem */
+ {0x7405, "Chelsio T72200"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7406, "Chelsio S72200"}, /* 2 x 40/100/200G, nomem */
+ {0x7407, "Chelsio T72200-FH"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7408, "Chelsio S71400"}, /* 1 x 400G, nomem */
+ {0x7409, "Chelsio S7210-BT"}, /* 2 x 10GBASE-T, nomem */
+ {0x740a, "Chelsio T7450-RC"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x740b, "Chelsio T72200-RC"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x740c, "Chelsio T72200-FH-RC"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x740d, "Chelsio S72200-OCP3"}, /* 2 x 40/100/200G OCP3 */
+ {0x740e, "Chelsio S7450-OCP3"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x740f, "Chelsio S7410-BT-OCP3"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7410, "Chelsio S7210-BT-A"}, /* 2 x 10GBASE-T */
+ {0x7411, "Chelsio T7_MAYRA_7"}, /* Motherboard */
+
+ /* Custom */
+ {0x7480, "Custom T7"},
};
#ifdef TCP_OFFLOAD
@@ -1042,6 +1122,31 @@ t6_probe(device_t dev)
return (ENXIO);
}
+static int
+ch_probe(device_t dev)
+{
+ int i;
+ uint16_t v = pci_get_vendor(dev);
+ uint16_t d = pci_get_device(dev);
+ uint8_t f = pci_get_function(dev);
+
+ if (v != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
+ /* Attach only to PF0 of the FPGA */
+ if (d == 0xd000 && f != 0)
+ return (ENXIO);
+
+ for (i = 0; i < nitems(t7_pciids); i++) {
+ if (d == t7_pciids[i].device) {
+ device_set_desc(dev, t7_pciids[i].desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+
+ return (ENXIO);
+}
+
static void
t5_attribute_workaround(device_t dev)
{
@@ -1091,6 +1196,13 @@ static const struct devnames devnames[] = {
.pf03_drv_name = "t6iov",
.vf_nexus_name = "t6vf",
.vf_ifnet_name = "ccv"
+ }, {
+ .nexus_name = "chnex",
+ .ifnet_name = "che",
+ .vi_ifnet_name = "vche",
+ .pf03_drv_name = "chiov",
+ .vf_nexus_name = "chvf",
+ .vf_ifnet_name = "chev"
}
};
@@ -1100,12 +1212,13 @@ t4_init_devnames(struct adapter *sc)
int id;
id = chip_id(sc);
- if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
- sc->names = &devnames[id - CHELSIO_T4];
- else {
+ if (id < CHELSIO_T4) {
device_printf(sc->dev, "chip id %d is not supported.\n", id);
sc->names = NULL;
- }
+ } else if (id - CHELSIO_T4 < nitems(devnames))
+ sc->names = &devnames[id - CHELSIO_T4];
+ else
+ sc->names = &devnames[nitems(devnames) - 1];
}
static int
@@ -1277,6 +1390,7 @@ t4_attach(device_t dev)
goto done; /* error message displayed already */
memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
+ memset(sc->port_map, 0xff, sizeof(sc->port_map));
/* Prepare the adapter for operation. */
buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
@@ -1309,7 +1423,7 @@ t4_attach(device_t dev)
* will work even in "recovery mode".
*/
setup_memwin(sc);
- if (t4_init_devlog_params(sc, 0) == 0)
+ if (t4_init_devlog_ncores_params(sc, 0) == 0)
fixup_devlog_params(sc);
make_dev_args_init(&mda);
mda.mda_devsw = &t4_cdevsw;
@@ -1407,14 +1521,16 @@ t4_attach(device_t dev)
}
if (is_bt(pi->port_type))
- setbit(&sc->bt_map, pi->tx_chan);
+ setbit(&sc->bt_map, pi->hw_port);
else
- MPASS(!isset(&sc->bt_map, pi->tx_chan));
+ MPASS(!isset(&sc->bt_map, pi->hw_port));
snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
device_get_nameunit(dev), i);
mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
- sc->chan_map[pi->tx_chan] = i;
+ for (j = 0; j < sc->params.tp.lb_nchan; j++)
+ sc->chan_map[pi->tx_chan + j] = i;
+ sc->port_map[pi->hw_port] = i;
/*
* The MPS counter for FCS errors doesn't work correctly on the
@@ -1424,10 +1540,8 @@ t4_attach(device_t dev)
*/
if (is_t6(sc))
pi->fcs_reg = -1;
- else {
- pi->fcs_reg = t4_port_reg(sc, pi->tx_chan,
- A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
- }
+ else
+ pi->fcs_reg = A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L;
pi->fcs_base = 0;
/* All VIs on this port share this media. */
@@ -1467,6 +1581,7 @@ t4_attach(device_t dev)
sc->intr_count = iaq.nirq;
s = &sc->sge;
+ s->nctrlq = max(sc->params.nports, sc->params.ncores);
s->nrxq = nports * iaq.nrxq;
s->ntxq = nports * iaq.ntxq;
if (num_vis > 1) {
@@ -1521,7 +1636,7 @@ t4_attach(device_t dev)
MPASS(s->niq <= s->iqmap_sz);
MPASS(s->neq <= s->eqmap_sz);
- s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
+ s->ctrlq = malloc(s->nctrlq * sizeof(struct sge_wrq), M_CXGBE,
M_ZERO | M_WAITOK);
s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
M_ZERO | M_WAITOK);
@@ -1548,6 +1663,7 @@ t4_attach(device_t dev)
if (sc->vres.key.size != 0)
sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
+ t4_init_tpt(sc);
/*
* Second pass over the ports. This time we know the number of rx and
@@ -1849,6 +1965,7 @@ t4_detach_common(device_t dev)
#endif
if (sc->key_map)
vmem_destroy(sc->key_map);
+ t4_free_tpt(sc);
#ifdef INET6
t4_destroy_clip_table(sc);
#endif
@@ -2156,6 +2273,7 @@ struct adapter_pre_reset_state {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -2187,6 +2305,7 @@ save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
o->nbmcaps = sc->nbmcaps;
o->linkcaps = sc->linkcaps;
o->switchcaps = sc->switchcaps;
+ o->nvmecaps = sc->nvmecaps;
o->niccaps = sc->niccaps;
o->toecaps = sc->toecaps;
o->rdmacaps = sc->rdmacaps;
@@ -2225,6 +2344,7 @@ compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
COMPARE_CAPS(nbm);
COMPARE_CAPS(link);
COMPARE_CAPS(switch);
+ COMPARE_CAPS(nvme);
COMPARE_CAPS(nic);
COMPARE_CAPS(toe);
COMPARE_CAPS(rdma);
@@ -2417,11 +2537,7 @@ restart_lld(struct adapter *sc)
}
if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
- t4_write_reg(sc, is_t4(sc) ?
- A_MPS_TRC_RSS_CONTROL :
- A_MPS_T5_TRC_RSS_CONTROL,
- V_RSSCONTROL(pi->tx_chan) |
- V_QUEUENUMBER(sc->traceq));
+ t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq);
pi->flags |= HAS_TRACEQ;
}
@@ -3407,7 +3523,7 @@ cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
if (is_t6(vi->pi->adapter))
error = t6_tls_tag_alloc(ifp, params, pt);
else
- error = EOPNOTSUPP;
+ error = t7_tls_tag_alloc(ifp, params, pt);
break;
}
#endif
@@ -3534,6 +3650,8 @@ port_mword(struct port_info *pi, uint32_t speed)
case FW_PORT_TYPE_CR_QSFP:
case FW_PORT_TYPE_CR2_QSFP:
case FW_PORT_TYPE_SFP28:
+ case FW_PORT_TYPE_SFP56:
+ case FW_PORT_TYPE_QSFP56:
/* Pluggable transceiver */
switch (pi->mod_type) {
case FW_PORT_MOD_TYPE_LR:
@@ -3551,6 +3669,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_LR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_LR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_LR4);
}
break;
case FW_PORT_MOD_TYPE_SR:
@@ -3567,6 +3687,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_SR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_SR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_SR4);
}
break;
case FW_PORT_MOD_TYPE_ER:
@@ -3588,6 +3710,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_CR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_CR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_CR4_PAM4);
}
break;
case FW_PORT_MOD_TYPE_LRM:
@@ -3597,6 +3721,8 @@ port_mword(struct port_info *pi, uint32_t speed)
case FW_PORT_MOD_TYPE_DR:
if (speed == FW_PORT_CAP32_SPEED_100G)
return (IFM_100G_DR);
+ if (speed == FW_PORT_CAP32_SPEED_200G)
+ return (IFM_200G_DR4);
break;
case FW_PORT_MOD_TYPE_NA:
MPASS(0); /* Not pluggable? */
@@ -3684,7 +3810,7 @@ alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
("%s: VI %s doesn't have a MAC func", __func__,
device_get_nameunit(vi->dev)));
func = vi_mac_funcs[index];
- rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
+ rc = t4_alloc_vi_func(sc, sc->mbox, pi->hw_port, sc->pf, 0, 1,
vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
if (rc < 0) {
CH_ERR(vi, "failed to allocate virtual interface %d"
@@ -3954,7 +4080,7 @@ setup_memwin(struct adapter *sc)
const struct memwin_init *mw_init;
struct memwin *mw;
int i;
- uint32_t bar0;
+ uint32_t bar0, reg;
if (is_t4(sc)) {
/*
@@ -3982,9 +4108,10 @@ setup_memwin(struct adapter *sc)
mw->mw_aperture = mw_init->aperture;
mw->mw_curpos = 0;
}
- t4_write_reg(sc,
- PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
- (mw->mw_base + bar0) | V_BIR(0) |
+ reg = chip_id(sc) > CHELSIO_T6 ?
+ PCIE_MEM_ACCESS_T7_REG(A_T7_PCIE_MEM_ACCESS_BASE_WIN, i) :
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i);
+ t4_write_reg(sc, reg, (mw->mw_base + bar0) | V_BIR(0) |
V_WINDOW(ilog2(mw->mw_aperture) - 10));
rw_wlock(&mw->mw_lock);
position_memwin(sc, i, mw->mw_curpos);
@@ -3992,7 +4119,7 @@ setup_memwin(struct adapter *sc)
}
/* flush */
- t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
+ t4_read_reg(sc, reg);
}
/*
@@ -4005,8 +4132,7 @@ static void
position_memwin(struct adapter *sc, int idx, uint32_t addr)
{
struct memwin *mw;
- uint32_t pf;
- uint32_t reg;
+ uint32_t pf, reg, val;
MPASS(idx >= 0 && idx < NUM_MEMWIN);
mw = &sc->memwin[idx];
@@ -4019,8 +4145,14 @@ position_memwin(struct adapter *sc, int idx, uint32_t addr)
pf = V_PFNUM(sc->pf);
mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
}
- reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
- t4_write_reg(sc, reg, mw->mw_curpos | pf);
+ if (chip_id(sc) > CHELSIO_T6) {
+ reg = PCIE_MEM_ACCESS_T7_REG(A_PCIE_MEM_ACCESS_OFFSET0, idx);
+ val = (mw->mw_curpos >> X_T7_MEMOFST_SHIFT) | pf;
+ } else {
+ reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
+ val = mw->mw_curpos | pf;
+ }
+ t4_write_reg(sc, reg, val);
t4_read_reg(sc, reg); /* flush */
}
@@ -4453,8 +4585,27 @@ calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
iaq->nrxq_vi = t4_nrxq_vi;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
if (is_offload(sc) || is_ethoffload(sc)) {
- iaq->nofldtxq = t4_nofldtxq;
- iaq->nofldtxq_vi = t4_nofldtxq_vi;
+ if (sc->params.tid_qid_sel_mask == 0) {
+ iaq->nofldtxq = t4_nofldtxq;
+ iaq->nofldtxq_vi = t4_nofldtxq_vi;
+ } else {
+ iaq->nofldtxq = roundup(t4_nofldtxq, sc->params.ncores);
+ iaq->nofldtxq_vi = roundup(t4_nofldtxq_vi,
+ sc->params.ncores);
+ if (iaq->nofldtxq != t4_nofldtxq)
+ device_printf(sc->dev,
+ "nofldtxq updated (%d -> %d) for correct"
+ " operation with %d firmware cores.\n",
+ t4_nofldtxq, iaq->nofldtxq,
+ sc->params.ncores);
+ if (iaq->num_vis > 1 &&
+ iaq->nofldtxq_vi != t4_nofldtxq_vi)
+ device_printf(sc->dev,
+ "nofldtxq_vi updated (%d -> %d) for correct"
+ " operation with %d firmware cores.\n",
+ t4_nofldtxq_vi, iaq->nofldtxq_vi,
+ sc->params.ncores);
+ }
}
#endif
#ifdef TCP_OFFLOAD
@@ -4555,6 +4706,10 @@ calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
if (iaq->nofldrxq > 0) {
iaq->nofldrxq = 1;
iaq->nofldtxq = 1;
+ if (sc->params.tid_qid_sel_mask == 0)
+ iaq->nofldtxq = 1;
+ else
+ iaq->nofldtxq = sc->params.ncores;
}
iaq->nnmtxq = 0;
iaq->nnmrxq = 0;
@@ -4567,9 +4722,10 @@ done:
MPASS(iaq->nirq > 0);
MPASS(iaq->nrxq > 0);
MPASS(iaq->ntxq > 0);
- if (itype == INTR_MSI) {
+ if (itype == INTR_MSI)
MPASS(powerof2(iaq->nirq));
- }
+ if (sc->params.tid_qid_sel_mask != 0)
+ MPASS(iaq->nofldtxq % sc->params.ncores == 0);
}
static int
@@ -4711,6 +4867,22 @@ struct fw_info {
.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
.intfver_fcoe = FW_INTFVER(T6, FCOE),
},
+ }, {
+ .chip = CHELSIO_T7,
+ .kld_name = "t7fw_cfg",
+ .fw_mod_name = "t7fw",
+ .fw_h = {
+ .chip = FW_HDR_CHIP_T7,
+ .fw_ver = htobe32(FW_VERSION(T7)),
+ .intfver_nic = FW_INTFVER(T7, NIC),
+ .intfver_vnic = FW_INTFVER(T7, VNIC),
+ .intfver_ofld = FW_INTFVER(T7, OFLD),
+ .intfver_ri = FW_INTFVER(T7, RI),
+ .intfver_iscsipdu = FW_INTFVER(T7, ISCSIPDU),
+ .intfver_iscsi = FW_INTFVER(T7, ISCSI),
+ .intfver_fcoepdu = FW_INTFVER(T7, FCOEPDU),
+ .intfver_fcoe = FW_INTFVER(T7, FCOE),
+ },
}
};
@@ -5032,7 +5204,7 @@ done:
static int
copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
- uint32_t mtype, uint32_t moff)
+ uint32_t mtype, uint32_t moff, u_int maxlen)
{
struct fw_info *fw_info;
const struct firmware *dcfg, *rcfg = NULL;
@@ -5084,10 +5256,10 @@ copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
cflen = rcfg->datasize & ~3;
}
- if (cflen > FLASH_CFG_MAX_SIZE) {
+ if (cflen > maxlen) {
device_printf(sc->dev,
"config file too long (%d, max allowed is %d).\n",
- cflen, FLASH_CFG_MAX_SIZE);
+ cflen, maxlen);
rc = EINVAL;
goto done;
}
@@ -5112,6 +5284,7 @@ struct caps_allowed {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -5139,6 +5312,8 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
int rc;
struct fw_caps_config_cmd caps;
uint32_t mtype, moff, finicsum, cfcsum, param, val;
+ unsigned int maxlen = 0;
+ const int cfg_addr = t4_flash_cfg_addr(sc, &maxlen);
rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
if (rc != 0) {
@@ -5155,7 +5330,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
} else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
mtype = FW_MEMTYPE_FLASH;
- moff = t4_flash_cfg_addr(sc);
+ moff = cfg_addr;
caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
@@ -5179,7 +5354,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
FW_LEN16(caps));
- rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
+ rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff, maxlen);
if (rc != 0) {
device_printf(sc->dev,
"failed to upload config file to card: %d.\n", rc);
@@ -5213,6 +5388,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
LIMIT_CAPS(nbm);
LIMIT_CAPS(link);
LIMIT_CAPS(switch);
+ LIMIT_CAPS(nvme);
LIMIT_CAPS(nic);
LIMIT_CAPS(toe);
LIMIT_CAPS(rdma);
@@ -5278,6 +5454,7 @@ partition_resources(struct adapter *sc)
COPY_CAPS(nbm);
COPY_CAPS(link);
COPY_CAPS(switch);
+ COPY_CAPS(nvme);
COPY_CAPS(nic);
COPY_CAPS(toe);
COPY_CAPS(rdma);
@@ -5354,7 +5531,7 @@ get_params__pre_init(struct adapter *sc)
sc->params.vpd.cclk = val[1];
/* Read device log parameters. */
- rc = -t4_init_devlog_params(sc, 1);
+ rc = -t4_init_devlog_ncores_params(sc, 1);
if (rc == 0)
fixup_devlog_params(sc);
else {
@@ -5508,6 +5685,14 @@ get_params__post_init(struct adapter *sc)
}
}
+ if (sc->params.ncores > 1) {
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ param[0] = FW_PARAM_DEV(TID_QID_SEL_MASK);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ sc->params.tid_qid_sel_mask = rc == 0 ? val[0] : 0;
+ }
+
/*
* The parameters that follow may not be available on all firmwares. We
* query them individually rather than in a compound query because old
@@ -5533,6 +5718,14 @@ get_params__post_init(struct adapter *sc)
else
sc->params.tp_ch_map = UINT32_MAX; /* Not a legal value. */
+ param[0] = FW_PARAM_DEV(TX_TPCHMAP);
+ val[0] = 0;
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ if (rc == 0)
+ sc->params.tx_tp_ch_map = val[0];
+ else
+ sc->params.tx_tp_ch_map = UINT32_MAX; /* Not a legal value. */
+
/*
* Determine whether the firmware supports the filter2 work request.
*/
@@ -5604,6 +5797,7 @@ get_params__post_init(struct adapter *sc)
READ_CAPS(nbmcaps);
READ_CAPS(linkcaps);
READ_CAPS(switchcaps);
+ READ_CAPS(nvmecaps);
READ_CAPS(niccaps);
READ_CAPS(toecaps);
READ_CAPS(rdmacaps);
@@ -5946,9 +6140,13 @@ set_params__post_init(struct adapter *sc)
#ifdef KERN_TLS
if (is_ktls(sc)) {
sc->tlst.inline_keys = t4_tls_inline_keys;
- sc->tlst.combo_wrs = t4_tls_combo_wrs;
- if (t4_kern_tls != 0 && is_t6(sc))
+ if (t4_kern_tls != 0 && is_t6(sc)) {
+ sc->tlst.combo_wrs = t4_tls_combo_wrs;
t6_config_kern_tls(sc, true);
+ } else {
+ sc->tlst.short_records = t4_tls_short_records;
+ sc->tlst.partial_ghash = t4_tls_partial_ghash;
+ }
}
#endif
return (0);
@@ -6220,7 +6418,7 @@ apply_link_config(struct port_info *pi)
MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
#endif
if (!(sc->flags & IS_VF)) {
- rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
+ rc = -t4_link_l1cfg(sc, sc->mbox, pi->hw_port, lc);
if (rc != 0) {
device_printf(pi->dev, "l1cfg failed: %d\n", rc);
return (rc);
@@ -6581,9 +6779,7 @@ cxgbe_init_synchronized(struct vi_info *vi)
*/
if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
- t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
- A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
- V_QUEUENUMBER(sc->traceq));
+ t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq);
pi->flags |= HAS_TRACEQ;
}
@@ -7443,7 +7639,7 @@ cxgbe_refresh_stats(struct vi_info *vi)
pi = vi->pi;
sc = vi->adapter;
tnl_cong_drops = 0;
- t4_get_port_stats(sc, pi->port_id, &pi->stats);
+ t4_get_port_stats(sc, pi->hw_port, &pi->stats);
chan_map = pi->rx_e_chan_map;
while (chan_map) {
i = ffs(chan_map) - 1;
@@ -7481,6 +7677,150 @@ vi_tick(void *arg)
callout_schedule(&vi->tick, hz);
}
+/* CIM inbound queues */
+static const char *t4_ibq[CIM_NUM_IBQ] = {
+ "ibq_tp0", "ibq_tp1", "ibq_ulp", "ibq_sge0", "ibq_sge1", "ibq_ncsi"
+};
+static const char *t7_ibq[CIM_NUM_IBQ_T7] = {
+ "ibq_tp0", "ibq_tp1", "ibq_tp2", "ibq_tp3", "ibq_ulp", "ibq_sge0",
+ "ibq_sge1", "ibq_ncsi", NULL, "ibq_ipc1", "ibq_ipc2", "ibq_ipc3",
+ "ibq_ipc4", "ibq_ipc5", "ibq_ipc6", "ibq_ipc7"
+};
+static const char *t7_ibq_sec[] = {
+ "ibq_tp0", "ibq_tp1", "ibq_tp2", "ibq_tp3", "ibq_ulp", "ibq_sge0",
+ NULL, NULL, NULL, "ibq_ipc0"
+};
+
+/* CIM outbound queues */
+static const char *t4_obq[CIM_NUM_OBQ_T5] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", "obq_ncsi",
+ "obq_sge_rx_q0", "obq_sge_rx_q1" /* These two are T5/T6 only */
+};
+static const char *t7_obq[CIM_NUM_OBQ_T7] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", "obq_ncsi",
+ "obq_sge_rx_q0", NULL, NULL, "obq_ipc1", "obq_ipc2", "obq_ipc3",
+ "obq_ipc4", "obq_ipc5", "obq_ipc6", "obq_ipc7"
+};
+static const char *t7_obq_sec[] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", NULL,
+ "obq_sge_rx_q0", NULL, NULL, "obq_ipc0"
+};
+
+static void
+cim_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *c0)
+{
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children1;
+ int i, j, qcount;
+ char s[16];
+ const char **qname;
+
+ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "cim",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CIM block");
+ c0 = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_U8(ctx, c0, OID_AUTO, "ncores", CTLFLAG_RD, NULL,
+ sc->params.ncores, "# of active CIM cores");
+
+ for (i = 0; i < sc->params.ncores; i++) {
+ snprintf(s, sizeof(s), "%u", i);
+ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, s,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CIM core");
+ children1 = SYSCTL_CHILDREN(oid);
+
+ /*
+ * CTLFLAG_SKIP because the misc.devlog sysctl already displays
+ * the log for all cores. Use this sysctl to get the log for a
+ * particular core only.
+ */
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "devlog",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
+ sc, i, sysctl_devlog, "A", "firmware's device log");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "loadavg",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_loadavg, "A",
+ "microprocessor load averages (select firmwares only)");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "qcfg",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ chip_id(sc) > CHELSIO_T6 ? sysctl_cim_qcfg_t7 : sysctl_cim_qcfg,
+ "A", "Queue configuration");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_la, "A", "Logic analyzer");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "ma_la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "pif_la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
+
+ /* IBQs */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ qname = &t4_ibq[0];
+ qcount = nitems(t4_ibq);
+ break;
+ case CHELSIO_T7:
+ default:
+ if (i == 0) {
+ qname = &t7_ibq[0];
+ qcount = nitems(t7_ibq);
+ } else {
+ qname = &t7_ibq_sec[0];
+ qcount = nitems(t7_ibq_sec);
+ }
+ break;
+ }
+ MPASS(qcount <= sc->chip_params->cim_num_ibq);
+ for (j = 0; j < qcount; j++) {
+ if (qname[j] == NULL)
+ continue;
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, qname[j],
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
+ (i << 16) | j, sysctl_cim_ibq, "A", NULL);
+ }
+
+ /* OBQs */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ qname = t4_obq;
+ qcount = CIM_NUM_OBQ;
+ break;
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ qname = t4_obq;
+ qcount = nitems(t4_obq);
+ break;
+ case CHELSIO_T7:
+ default:
+ if (i == 0) {
+ qname = t7_obq;
+ qcount = nitems(t7_obq);
+ } else {
+ qname = t7_obq_sec;
+ qcount = nitems(t7_obq_sec);
+ }
+ break;
+ }
+ MPASS(qcount <= sc->chip_params->cim_num_obq);
+ for (j = 0; j < qcount; j++) {
+ if (qname[j] == NULL)
+ continue;
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, qname[j],
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
+ (i << 16) | j, sysctl_cim_obq, "A", NULL);
+ }
+ }
+}
+
/*
* Should match fw_caps_config_<foo> enums in t4fw_interface.h
*/
@@ -7490,17 +7830,18 @@ static char *caps_decoder[] = {
"\20\001INGRESS\002EGRESS", /* 2: switch */
"\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
"\006HASHFILTER\007ETHOFLD",
- "\20\001TOE", /* 4: TOE */
- "\20\001RDDP\002RDMAC", /* 5: RDMA */
+ "\20\001TOE\002SENDPATH", /* 4: TOE */
+ "\20\001RDDP\002RDMAC\003ROCEv2", /* 5: RDMA */
"\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
"\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
"\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
"\007T10DIF"
"\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
"\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */
- "\004TLS_HW",
+ "\004TLS_HW,\005TOE_IPSEC",
"\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
"\004PO_INITIATOR\005PO_TARGET",
+ "\20\001NVMe_TCP", /* 9: NVMe */
};
void
@@ -7605,6 +7946,7 @@ t4_sysctls(struct adapter *sc)
SYSCTL_CAP(nbmcaps, 0, "NBM");
SYSCTL_CAP(linkcaps, 1, "link");
SYSCTL_CAP(switchcaps, 2, "switch");
+ SYSCTL_CAP(nvmecaps, 9, "NVMe");
SYSCTL_CAP(niccaps, 3, "NIC");
SYSCTL_CAP(toecaps, 4, "TCP offload");
SYSCTL_CAP(rdmacaps, 5, "RDMA");
@@ -7623,11 +7965,6 @@ t4_sysctls(struct adapter *sc)
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_loadavg, "A",
- "microprocessor load averages (debug firmwares only)");
-
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd,
"I", "core Vdd (in mV)");
@@ -7659,81 +7996,7 @@ t4_sysctls(struct adapter *sc)
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cctrl, "A", "congestion control");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_la, "A", "CIM logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
-
- if (chip_id(sc) > CHELSIO_T4) {
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
- "CIM OBQ 6 (SGE0-RX)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
- "CIM OBQ 7 (SGE1-RX)");
- }
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_qcfg, "A", "CIM queue configuration");
+ cim_sysctls(sc, ctx, children);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
@@ -7748,8 +8011,8 @@ t4_sysctls(struct adapter *sc)
sysctl_tid_stats, "A", "tid stats");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_devlog, "A", "firmware's device log");
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, -1,
+ sysctl_devlog, "A", "firmware's device log (all cores)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
@@ -7783,7 +8046,8 @@ t4_sysctls(struct adapter *sc)
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
+ chip_id(sc) >= CHELSIO_T7 ? sysctl_mps_tcam_t7 :
+ (chip_id(sc) >= CHELSIO_T6 ? sysctl_mps_tcam_t6 : sysctl_mps_tcam),
"A", "MPS TCAM entries");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
@@ -7855,6 +8119,14 @@ t4_sysctls(struct adapter *sc)
CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to "
"combine TCB field updates with TLS record work "
"requests.");
+ else {
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "short_records",
+ CTLFLAG_RW, &sc->tlst.short_records, 0,
+ "Use cipher-only mode for short records.");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "partial_ghash",
+ CTLFLAG_RW, &sc->tlst.partial_ghash, 0,
+ "Use partial GHASH for AES-GCM records.");
+ }
}
#endif
@@ -8230,86 +8502,112 @@ cxgbe_sysctls(struct port_info *pi)
&pi->tx_parse_error, 0,
"# of tx packets with invalid length or # of segments");
-#define T4_REGSTAT(name, stat, desc) \
- SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
- CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
- t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
- sysctl_handle_t4_reg64, "QU", desc)
-
-/* We get these from port_stats and they may be stale by up to 1s */
-#define T4_PORTSTAT(name, desc) \
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
- &pi->stats.name, desc)
-
- T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
- T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
- T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
- T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
- T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
- T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
- T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
- T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
- T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
- T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
- T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
- T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
- T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
- T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
- T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
- T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
- T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
-
- T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
- T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
- T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
- T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
- T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
- T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
- T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
+#define T4_LBSTAT(name, stat, desc) do { \
+ if (sc->params.tp.lb_mode) { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, \
+ A_MPS_PORT_STAT_##stat##_L, \
+ sysctl_handle_t4_portstat64, "QU", desc); \
+ } else { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
+ t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
+ sysctl_handle_t4_reg64, "QU", desc); \
+ } \
+} while (0)
+
+ T4_LBSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
+ T4_LBSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
+ T4_LBSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
+ T4_LBSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
+ T4_LBSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
+ T4_LBSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
+ T4_LBSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
+ T4_LBSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
+ T4_LBSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
+ T4_LBSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
+ T4_LBSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
+ T4_LBSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
+ T4_LBSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
+ T4_LBSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
+ T4_LBSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
+ T4_LBSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
+ T4_LBSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
+
+ T4_LBSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
+ T4_LBSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
+ T4_LBSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
+ T4_LBSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
+ T4_LBSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
+ T4_LBSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
+ T4_LBSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
if (is_t6(sc)) {
- T4_PORTSTAT(rx_fcs_err,
+ /* Read from port_stats and may be stale by up to 1s */
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "rx_fcs_err",
+ CTLFLAG_RD, &pi->stats.rx_fcs_err,
"# of frames received with bad FCS since last link up");
} else {
- T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
+ T4_LBSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
"# of frames received with bad FCS");
}
- T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
- T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
- T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
- T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
- T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
- T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
- T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
- T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
- T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
- T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
- T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
- T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
- T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
-
- T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows");
- T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows");
- T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows");
- T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows");
- T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets");
- T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets");
- T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets");
- T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets");
+ T4_LBSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
+ T4_LBSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
+ T4_LBSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
+ T4_LBSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
+ T4_LBSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
+ T4_LBSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
+ T4_LBSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
+ T4_LBSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
+ T4_LBSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
+ T4_LBSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
+ T4_LBSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
+ T4_LBSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
+ T4_LBSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
+#undef T4_LBSTAT
+
+#define T4_REGSTAT(name, stat, desc) do { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
+ A_MPS_STAT_##stat##_L, sysctl_handle_t4_reg64, "QU", desc); \
+} while (0)
+ if (pi->mps_bg_map & 1) {
+ T4_REGSTAT(rx_ovflow0, RX_BG_0_MAC_DROP_FRAME,
+ "# drops due to buffer-group 0 overflows");
+ T4_REGSTAT(rx_trunc0, RX_BG_0_MAC_TRUNC_FRAME,
+ "# of buffer-group 0 truncated packets");
+ }
+ if (pi->mps_bg_map & 2) {
+ T4_REGSTAT(rx_ovflow1, RX_BG_1_MAC_DROP_FRAME,
+ "# drops due to buffer-group 1 overflows");
+ T4_REGSTAT(rx_trunc1, RX_BG_1_MAC_TRUNC_FRAME,
+ "# of buffer-group 1 truncated packets");
+ }
+ if (pi->mps_bg_map & 4) {
+ T4_REGSTAT(rx_ovflow2, RX_BG_2_MAC_DROP_FRAME,
+ "# drops due to buffer-group 2 overflows");
+ T4_REGSTAT(rx_trunc2, RX_BG_2_MAC_TRUNC_FRAME,
+ "# of buffer-group 2 truncated packets");
+ }
+ if (pi->mps_bg_map & 8) {
+ T4_REGSTAT(rx_ovflow3, RX_BG_3_MAC_DROP_FRAME,
+ "# drops due to buffer-group 3 overflows");
+ T4_REGSTAT(rx_trunc3, RX_BG_3_MAC_TRUNC_FRAME,
+ "# of buffer-group 3 truncated packets");
+ }
#undef T4_REGSTAT
-#undef T4_PORTSTAT
}
static int
@@ -8452,14 +8750,14 @@ sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
vi->flags |= TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan));
+ V_TXPKT_INTF(pi->hw_port));
if (!(sc->flags & IS_VF))
npkt--;
} else {
vi->flags &= ~TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
}
for_each_txq(vi, i, txq) {
@@ -8669,13 +8967,12 @@ sysctl_link_fec(SYSCTL_HANDLER_ARGS)
struct link_config *lc = &pi->link_cfg;
int rc;
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2";
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
if (lc->link_ok)
- sbuf_printf(sb, "%b", lc->fec, bits);
+ sbuf_printf(sb, "%b", lc->fec, t4_fec_bits);
else
sbuf_printf(sb, "no link");
rc = sbuf_finish(sb);
@@ -8695,14 +8992,12 @@ sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
if (req->newptr == NULL) {
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
- "\5RSVD3\6auto\7module";
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
- sbuf_printf(sb, "%b", lc->requested_fec, bits);
+ sbuf_printf(sb, "%b", lc->requested_fec, t4_fec_bits);
rc = sbuf_finish(sb);
sbuf_delete(sb);
} else {
@@ -8771,7 +9066,6 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
int rc;
int8_t fec;
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
@@ -8805,7 +9099,7 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
if (fec == 0)
fec = FEC_NONE;
PORT_UNLOCK(pi);
- sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
+ sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, t4_fec_bits);
}
rc = sbuf_finish(sb);
done:
@@ -8913,6 +9207,31 @@ sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_handle_t4_portstat64(SYSCTL_HANDLER_ARGS)
+{
+ struct port_info *pi = arg1;
+ struct adapter *sc = pi->adapter;
+ int rc, i, reg = arg2;
+ uint64_t val;
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ val = 0;
+ for (i = 0; i < sc->params.tp.lb_nchan; i++) {
+ val += t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan + i, reg));
+ }
+ rc = 0;
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc == 0)
+ rc = sysctl_handle_64(oidp, &val, 0, req);
+ return (rc);
+}
+
+static int
sysctl_temperature(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -9012,6 +9331,10 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS)
struct sbuf *sb;
int rc;
uint32_t param, val;
+ uint8_t coreid = (uint8_t)arg2;
+
+ KASSERT(coreid < sc->params.ncores,
+ ("%s: bad coreid %u\n", __func__, coreid));
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
if (rc)
@@ -9020,7 +9343,8 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS)
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD) |
+ V_FW_PARAMS_PARAM_Y(coreid);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
}
end_synchronized_op(sc, 0);
@@ -9086,50 +9410,30 @@ done:
return (rc);
}
-static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
- "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
- "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
- "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
-};
-
static int
-sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
+sysctl_cim_ibq(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
- int rc, i, n, qid = arg2;
+ int rc, i, n, qid, coreid;
uint32_t *buf, *p;
- char *qtype;
- u_int cim_num_obq = sc->chip_params->cim_num_obq;
- KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
- ("%s: bad qid %d\n", __func__, qid));
+ qid = arg2 & 0xffff;
+ coreid = arg2 >> 16;
- if (qid < CIM_NUM_IBQ) {
- /* inbound queue */
- qtype = "IBQ";
- n = 4 * CIM_IBQ_SIZE;
- buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- mtx_lock(&sc->reg_lock);
- if (hw_off_limits(sc))
- rc = -ENXIO;
- else
- rc = t4_read_cim_ibq(sc, qid, buf, n);
- mtx_unlock(&sc->reg_lock);
- } else {
- /* outbound queue */
- qtype = "OBQ";
- qid -= CIM_NUM_IBQ;
- n = 4 * cim_num_obq * CIM_OBQ_SIZE;
- buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- mtx_lock(&sc->reg_lock);
- if (hw_off_limits(sc))
- rc = -ENXIO;
- else
- rc = t4_read_cim_obq(sc, qid, buf, n);
- mtx_unlock(&sc->reg_lock);
- }
+ KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_ibq,
+ ("%s: bad ibq qid %d\n", __func__, qid));
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+ n = 4 * CIM_IBQ_SIZE;
+ buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_ibq_core(sc, coreid, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
if (rc < 0) {
rc = -rc;
goto done;
@@ -9141,12 +9445,58 @@ sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
rc = ENOMEM;
goto done;
}
-
- sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
for (i = 0, p = buf; i < n; i += 16, p += 4)
sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
p[2], p[3]);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+done:
+ free(buf, M_CXGBE);
+ return (rc);
+}
+
+static int
+sysctl_cim_obq(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ struct sbuf *sb;
+ int rc, i, n, qid, coreid;
+ uint32_t *buf, *p;
+
+ qid = arg2 & 0xffff;
+ coreid = arg2 >> 16;
+
+ KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_obq,
+ ("%s: bad obq qid %d\n", __func__, qid));
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+
+ n = 6 * CIM_OBQ_SIZE * 4;
+ buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_obq_core(sc, coreid, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
+ if (rc < 0) {
+ rc = -rc;
+ goto done;
+ }
+ n = rc * sizeof(uint32_t); /* rc has # of words actually read */
+ rc = sysctl_wire_old_buffer(req, 0);
+ if (rc != 0)
+ goto done;
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
+ if (sb == NULL) {
+ rc = ENOMEM;
+ goto done;
+ }
+ for (i = 0, p = buf; i < n; i += 16, p += 4)
+ sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
+ p[2], p[3]);
rc = sbuf_finish(sb);
sbuf_delete(sb);
done:
@@ -9217,7 +9567,7 @@ sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
}
static int
-sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
+sbuf_cim_la(struct adapter *sc, int coreid, struct sbuf *sb, int flags)
{
uint32_t cfg, *buf;
int rc;
@@ -9232,9 +9582,10 @@ sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
if (hw_off_limits(sc))
rc = ENXIO;
else {
- rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ rc = -t4_cim_read_core(sc, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &cfg);
if (rc == 0)
- rc = -t4_cim_read_la(sc, buf, NULL);
+ rc = -t4_cim_read_la_core(sc, coreid, buf, NULL);
}
mtx_unlock(&sc->reg_lock);
if (rc == 0) {
@@ -9251,6 +9602,7 @@ static int
sysctl_cim_la(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
+ int coreid = arg2;
struct sbuf *sb;
int rc;
@@ -9258,7 +9610,7 @@ sysctl_cim_la(SYSCTL_HANDLER_ARGS)
if (sb == NULL)
return (ENOMEM);
- rc = sbuf_cim_la(sc, sb, M_WAITOK);
+ rc = sbuf_cim_la(sc, coreid, sb, M_WAITOK);
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -9295,7 +9647,7 @@ dump_cimla(struct adapter *sc)
device_get_nameunit(sc->dev));
return;
}
- rc = sbuf_cim_la(sc, &sb, M_WAITOK);
+ rc = sbuf_cim_la(sc, 0, &sb, M_WAITOK);
if (rc == 0) {
rc = sbuf_finish(&sb);
if (rc == 0) {
@@ -9419,6 +9771,13 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
+ static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
+ "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
+ "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
+ };
+
+ MPASS(chip_id(sc) < CHELSIO_T7);
cim_num_obq = sc->chip_params->cim_num_obq;
if (is_t4(sc)) {
@@ -9471,6 +9830,104 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_cim_qcfg_t7(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ u_int coreid = arg2;
+ struct sbuf *sb;
+ int rc, i;
+ u_int addr;
+ uint16_t base[CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7];
+ uint16_t size[CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7];
+ uint16_t thres[CIM_NUM_IBQ_T7];
+ uint32_t obq_wr[2 * CIM_NUM_OBQ_T7], *wr = obq_wr;
+ uint32_t stat[4 * (CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7)], *p = stat;
+ static const char * const qname_ibq_t7[] = {
+ "TP0", "TP1", "TP2", "TP3", "ULP", "SGE0", "SGE1", "NC-SI",
+ "RSVD", "IPC1", "IPC2", "IPC3", "IPC4", "IPC5", "IPC6", "IPC7",
+ };
+ static const char * const qname_obq_t7[] = {
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", "SGE0-RX",
+ "RSVD", "RSVD", "IPC1", "IPC2", "IPC3", "IPC4", "IPC5",
+ "IPC6", "IPC7"
+ };
+ static const char * const qname_ibq_sec_t7[] = {
+ "TP0", "TP1", "TP2", "TP3", "ULP", "SGE0", "RSVD", "RSVD",
+ "RSVD", "IPC0", "RSVD", "RSVD", "RSVD", "RSVD", "RSVD", "RSVD",
+ };
+ static const char * const qname_obq_sec_t7[] = {
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "RSVD", "SGE0-RX",
+ "RSVD", "RSVD", "IPC0", "RSVD", "RSVD", "RSVD", "RSVD",
+ "RSVD", "RSVD",
+ };
+
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = -t4_cim_read_core(sc, 1, coreid,
+ A_T7_UP_IBQ_0_SHADOW_RDADDR, 4 * CIM_NUM_IBQ_T7, stat);
+ if (rc != 0)
+ goto unlock;
+
+ rc = -t4_cim_read_core(sc, 1, coreid,
+ A_T7_UP_OBQ_0_SHADOW_RDADDR, 4 * CIM_NUM_OBQ_T7,
+ &stat[4 * CIM_NUM_IBQ_T7]);
+ if (rc != 0)
+ goto unlock;
+
+ addr = A_T7_UP_OBQ_0_SHADOW_REALADDR;
+ for (i = 0; i < CIM_NUM_OBQ_T7 * 2; i++, addr += 8) {
+ rc = -t4_cim_read_core(sc, 1, coreid, addr, 1,
+ &obq_wr[i]);
+ if (rc != 0)
+ goto unlock;
+ }
+ t4_read_cimq_cfg_core(sc, coreid, base, size, thres);
+ }
+unlock:
+ mtx_unlock(&sc->reg_lock);
+ if (rc)
+ return (rc);
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
+ if (sb == NULL)
+ return (ENOMEM);
+
+ sbuf_printf(sb,
+ " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
+
+ for (i = 0; i < CIM_NUM_IBQ_T7; i++, p += 4) {
+ if (!size[i])
+ continue;
+
+ sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
+ coreid == 0 ? qname_ibq_t7[i] : qname_ibq_sec_t7[i],
+ base[i], size[i], thres[i], G_IBQRDADDR(p[0]) & 0xfff,
+ G_IBQWRADDR(p[1]) & 0xfff, G_QUESOPCNT(p[3]),
+ G_QUEEOPCNT(p[3]), G_T7_QUEREMFLITS(p[2]) * 16);
+ }
+
+ for ( ; i < CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7; i++, p += 4, wr += 2) {
+ if (!size[i])
+ continue;
+
+ sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u",
+ coreid == 0 ? qname_obq_t7[i - CIM_NUM_IBQ_T7] :
+ qname_obq_sec_t7[i - CIM_NUM_IBQ_T7],
+ base[i], size[i], G_QUERDADDR(p[0]) & 0xfff,
+ wr[0] << 1, G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
+ G_T7_QUEREMFLITS(p[2]) * 16);
+ }
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+}
+
+static int
sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -9612,18 +10069,25 @@ static const char * const devlog_facility_strings[] = {
};
static int
-sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
+sbuf_devlog(struct adapter *sc, int coreid, struct sbuf *sb, int flags)
{
int i, j, rc, nentries, first = 0;
struct devlog_params *dparams = &sc->params.devlog;
struct fw_devlog_e *buf, *e;
+ uint32_t addr, size;
uint64_t ftstamp = UINT64_MAX;
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+
if (dparams->addr == 0)
return (ENXIO);
+ size = dparams->size / sc->params.ncores;
+ addr = dparams->addr + coreid * size;
+
MPASS(flags == M_WAITOK || flags == M_NOWAIT);
- buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
+ buf = malloc(size, M_CXGBE, M_ZERO | flags);
if (buf == NULL)
return (ENOMEM);
@@ -9631,13 +10095,12 @@ sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
if (hw_off_limits(sc))
rc = ENXIO;
else
- rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf,
- dparams->size);
+ rc = read_via_memwin(sc, 1, addr, (void *)buf, size);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
- nentries = dparams->size / sizeof(struct fw_devlog_e);
+ nentries = size / sizeof(struct fw_devlog_e);
for (i = 0; i < nentries; i++) {
e = &buf[i];
@@ -9689,14 +10152,24 @@ static int
sysctl_devlog(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
- int rc;
+ int rc, i, coreid = arg2;
struct sbuf *sb;
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
-
- rc = sbuf_devlog(sc, sb, M_WAITOK);
+ if (coreid == -1) {
+ /* -1 means all cores */
+ for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) {
+ if (sc->params.ncores > 0)
+ sbuf_printf(sb, "=== CIM core %u ===\n", i);
+ rc = sbuf_devlog(sc, i, sb, M_WAITOK);
+ }
+ } else {
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+ rc = sbuf_devlog(sc, coreid, sb, M_WAITOK);
+ }
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -9706,7 +10179,7 @@ sysctl_devlog(SYSCTL_HANDLER_ARGS)
static void
dump_devlog(struct adapter *sc)
{
- int rc;
+ int rc, i;
struct sbuf sb;
if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
@@ -9714,13 +10187,15 @@ dump_devlog(struct adapter *sc)
device_get_nameunit(sc->dev));
return;
}
- rc = sbuf_devlog(sc, &sb, M_WAITOK);
+ for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) {
+ if (sc->params.ncores > 0)
+ sbuf_printf(&sb, "=== CIM core %u ===\n", i);
+ rc = sbuf_devlog(sc, i, &sb, M_WAITOK);
+ }
if (rc == 0) {
- rc = sbuf_finish(&sb);
- if (rc == 0) {
- log(LOG_DEBUG, "%s: device log follows.\n%s",
- device_get_nameunit(sc->dev), sbuf_data(&sb));
- }
+ sbuf_finish(&sb);
+ log(LOG_DEBUG, "%s: device log follows.\n%s",
+ device_get_nameunit(sc->dev), sbuf_data(&sb));
}
sbuf_delete(&sb);
}
@@ -9909,16 +10384,16 @@ sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
}
struct mem_desc {
- u_int base;
- u_int limit;
+ uint64_t base;
+ uint64_t limit;
u_int idx;
};
static int
mem_desc_cmp(const void *a, const void *b)
{
- const u_int v1 = ((const struct mem_desc *)a)->base;
- const u_int v2 = ((const struct mem_desc *)b)->base;
+ const uint64_t v1 = ((const struct mem_desc *)a)->base;
+ const uint64_t v2 = ((const struct mem_desc *)b)->base;
if (v1 < v2)
return (-1);
@@ -9929,10 +10404,9 @@ mem_desc_cmp(const void *a, const void *b)
}
static void
-mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
- unsigned int to)
+mem_region_show(struct sbuf *sb, const char *name, uint64_t from, uint64_t to)
{
- unsigned int size;
+ uintmax_t size;
if (from == to)
return;
@@ -9941,8 +10415,12 @@ mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
if (size == 0)
return;
- /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
- sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
+ if (from > UINT32_MAX || to > UINT32_MAX)
+ sbuf_printf(sb, "%-18s 0x%012jx-0x%012jx [%ju]\n", name,
+ (uintmax_t)from, (uintmax_t)to, size);
+ else
+ sbuf_printf(sb, "%-18s 0x%08jx-0x%08jx [%ju]\n", name,
+ (uintmax_t)from, (uintmax_t)to, size);
}
static int
@@ -9950,7 +10428,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
- int rc, i, n;
+ int rc, i, n, nchan;
uint32_t lo, hi, used, free, alloc;
static const char *memory[] = {
"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"
@@ -9961,12 +10439,14 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
"RQUDP region:", "PBL region:", "TXPBL region:",
- "TLSKey region:", "DBVFIFO region:", "ULPRX state:",
- "ULPTX state:", "On-chip queues:",
+ "TLSKey region:", "RRQ region:", "NVMe STAG region:",
+ "NVMe RQ region:", "NVMe RXPBL region:", "NVMe TPT region:",
+ "NVMe TXPBL region:", "DBVFIFO region:", "ULPRX state:",
+ "ULPTX state:", "RoCE RRQ region:", "On-chip queues:",
};
struct mem_desc avail[4];
struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
- struct mem_desc *md = mem;
+ struct mem_desc *md;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
@@ -9992,36 +10472,91 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
if (lo & F_EDRAM0_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
- avail[i].base = G_EDRAM0_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EDRAM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EDRAM0_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EDRAM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EDRAM0_SIZE(hi) << 20);
+ }
avail[i].idx = 0;
i++;
}
if (lo & F_EDRAM1_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
- avail[i].base = G_EDRAM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EDRAM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EDRAM1_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EDRAM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EDRAM1_SIZE(hi) << 20);
+ }
avail[i].idx = 1;
i++;
}
if (lo & F_EXT_MEM_ENABLE) {
- hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
- avail[i].base = G_EXT_MEM_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
- avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ case CHELSIO_T6:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
+ avail[i].base = (uint64_t)G_EXT_MEM_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM_SIZE(hi) << 20);
+ avail[i].idx = 2;
+ break;
+ case CHELSIO_T5:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY0_BAR);
+ avail[i].base = (uint64_t)G_EXT_MEM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM0_SIZE(hi) << 20);
+ avail[i].idx = 3; /* Call it MC0 for T5 */
+ break;
+ default:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY0_BAR);
+ avail[i].base = (uint64_t)G_T7_EXT_MEM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EXT_MEM0_SIZE(hi) << 20);
+ avail[i].idx = 3; /* Call it MC0 for T7+ */
+ break;
+ }
i++;
}
- if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
+ if (lo & F_EXT_MEM1_ENABLE && !(lo & F_MC_SPLIT)) {
+ /* Only T5 and T7+ have 2 MCs. */
+ MPASS(is_t5(sc) || chip_id(sc) >= CHELSIO_T7);
+
hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
- avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EXT_MEM1_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM1_SIZE(hi) << 20);
+ }
avail[i].idx = 4;
i++;
}
- if (is_t6(sc) && lo & F_HMA_MUX) {
- hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
- avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
+ if (lo & F_HMA_MUX) {
+ /* Only T6+ have HMA. */
+ MPASS(chip_id(sc) >= CHELSIO_T6);
+
+ if (chip_id(sc) >= CHELSIO_T7) {
+ hi = t4_read_reg(sc, A_MA_HOST_MEMORY_BAR);
+ avail[i].base = (uint64_t)G_HMATARGETBASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_HMA_SIZE(hi) << 20);
+ } else {
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
+ avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM1_SIZE(hi) << 20);
+ }
avail[i].idx = 5;
i++;
}
@@ -10030,6 +10565,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
goto done;
qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
+ md = &mem[0];
(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
@@ -10065,22 +10601,52 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
}
md++;
-#define ulp_region(reg) \
- md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
- (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
+#define ulp_region(reg) do {\
+ const u_int shift = chip_id(sc) >= CHELSIO_T7 ? 4 : 0; \
+ md->base = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT) << shift; \
+ md->limit = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) << shift; \
+ md->limit += (1 << shift) - 1; \
+ md++; \
+ } while (0)
+
+#define hide_ulp_region() do { \
+ md->base = 0; \
+ md->idx = nitems(region); \
+ md++; \
+ } while (0)
ulp_region(RX_ISCSI);
ulp_region(RX_TDDP);
ulp_region(TX_TPT);
ulp_region(RX_STAG);
ulp_region(RX_RQ);
- ulp_region(RX_RQUDP);
+ if (chip_id(sc) < CHELSIO_T7)
+ ulp_region(RX_RQUDP);
+ else
+ hide_ulp_region();
ulp_region(RX_PBL);
ulp_region(TX_PBL);
- if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
+ if (chip_id(sc) >= CHELSIO_T6)
ulp_region(RX_TLS_KEY);
+ else
+ hide_ulp_region();
+ if (chip_id(sc) >= CHELSIO_T7) {
+ ulp_region(RX_RRQ);
+ ulp_region(RX_NVME_TCP_STAG);
+ ulp_region(RX_NVME_TCP_RQ);
+ ulp_region(RX_NVME_TCP_PBL);
+ ulp_region(TX_NVME_TCP_TPT);
+ ulp_region(TX_NVME_TCP_PBL);
+ } else {
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
}
#undef ulp_region
+#undef hide_ulp_region
md->base = 0;
if (is_t4(sc))
@@ -10111,6 +10677,15 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
md->limit = 0;
md++;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ t4_tp_pio_read(sc, &lo, 1, A_TP_ROCE_RRQ_BASE, false);
+ md->base = lo;
+ } else {
+ md->base = 0;
+ md->idx = nitems(region);
+ }
+ md++;
+
md->base = sc->vres.ocq.start;
if (sc->vres.ocq.size)
md->limit = md->base + sc->vres.ocq.size - 1;
@@ -10143,31 +10718,41 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
mem[i].limit);
}
- sbuf_printf(sb, "\n");
lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
- mem_region_show(sb, "uP RAM:", lo, hi);
+ if (hi != lo - 1) {
+ sbuf_printf(sb, "\n");
+ mem_region_show(sb, "uP RAM:", lo, hi);
+ }
lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
- mem_region_show(sb, "uP Extmem2:", lo, hi);
+ if (hi != lo - 1)
+ mem_region_show(sb, "uP Extmem2:", lo, hi);
lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
- for (i = 0, free = 0; i < 2; i++)
+ if (chip_id(sc) >= CHELSIO_T7)
+ nchan = 1 << G_T7_PMRXNUMCHN(lo);
+ else
+ nchan = lo & F_PMRXNUMCHN ? 2 : 1;
+ for (i = 0, free = 0; i < nchan; i++)
free += G_FREERXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_RX_CNT));
sbuf_printf(sb, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
G_PMRXMAXPAGE(lo), free,
- t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
- (lo & F_PMRXNUMCHN) ? 2 : 1);
+ t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, nchan);
lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
- for (i = 0, free = 0; i < 4; i++)
+ if (chip_id(sc) >= CHELSIO_T7)
+ nchan = 1 << G_T7_PMTXNUMCHN(lo);
+ else
+ nchan = 1 << G_PMTXNUMCHN(lo);
+ for (i = 0, free = 0; i < nchan; i++)
free += G_FREETXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_TX_CNT));
sbuf_printf(sb, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
G_PMTXMAXPAGE(lo), free,
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
- hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
+ hi >= (1 << 20) ? 'M' : 'K', nchan);
sbuf_printf(sb, "%u p-structs (%u free)\n",
t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT),
G_FREEPSTRUCTCOUNT(t4_read_reg(sc, A_TP_FLM_FREE_PS_CNT)));
@@ -10184,7 +10769,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
- /* For T6 these are MAC buffer groups */
+ /* For T6+ these are MAC buffer groups */
sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
i, used, alloc);
}
@@ -10200,7 +10785,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
- /* For T6 these are MAC buffer groups */
+ /* For T6+ these are MAC buffer groups */
sbuf_printf(sb,
"\nLoopback %d using %u pages out of %u allocated",
i, used, alloc);
@@ -10329,7 +10914,7 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
struct sbuf *sb;
int rc, i;
- MPASS(chip_id(sc) > CHELSIO_T5);
+ MPASS(chip_id(sc) == CHELSIO_T6);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
@@ -10338,7 +10923,7 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
" IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
" Replication"
- " P0 P1 P2 P3 ML\n");
+ " P0 P1 P2 P3 ML");
rc = 0;
for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
@@ -10503,6 +11088,206 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_mps_tcam_t7(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ struct sbuf *sb;
+ int rc, i;
+
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+ if (sb == NULL)
+ return (ENOMEM);
+
+ sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
+ " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
+ " Replication"
+ " P0 P1 P2 P3 ML");
+
+ rc = 0;
+ for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
+ uint8_t dip_hit, vlan_vld, lookup_type, port_num;
+ uint16_t ivlan;
+ uint64_t tcamx, tcamy, val, mask;
+ uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
+ uint8_t addr[ETHER_ADDR_LEN];
+
+ /* Read tcamy */
+ ctl = (V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
+ if (chip_rev(sc) == 0) {
+ if (i < 256)
+ ctl |= V_CTLTCAMINDEX(i) | V_T7_CTLTCAMSEL(0);
+ else
+ ctl |= V_CTLTCAMINDEX(i - 256) | V_T7_CTLTCAMSEL(1);
+ } else {
+#if 0
+ ctl = (V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
+#endif
+ if (i < 512)
+ ctl |= V_CTLTCAMINDEX(i) | V_T7_CTLTCAMSEL(0);
+ else if (i < 1024)
+ ctl |= V_CTLTCAMINDEX(i - 512) | V_T7_CTLTCAMSEL(1);
+ else
+ ctl |= V_CTLTCAMINDEX(i - 1024) | V_T7_CTLTCAMSEL(2);
+ }
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA1_REQ_ID1);
+ tcamy = G_DMACH(val) << 32;
+ tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ lookup_type = G_DATALKPTYPE(data2);
+ port_num = G_DATAPORTNUM(data2);
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ /* Inner header VNI */
+ vniy = (((data2 & F_DATAVIDH2) |
+ G_DATAVIDH1(data2)) << 16) | G_VIDL(val);
+ dip_hit = data2 & F_DATADIPHIT;
+ vlan_vld = 0;
+ } else {
+ vniy = 0;
+ dip_hit = 0;
+ vlan_vld = data2 & F_DATAVIDH2;
+ ivlan = G_VIDL(val);
+ }
+
+ ctl |= V_CTLXYBITSEL(1);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA1_REQ_ID1);
+ tcamx = G_DMACH(val) << 32;
+ tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ /* Inner header VNI mask */
+ vnix = (((data2 & F_DATAVIDH2) |
+ G_DATAVIDH1(data2)) << 16) | G_VIDL(val);
+ } else
+ vnix = 0;
+
+ if (tcamx & tcamy)
+ continue;
+ tcamxy2valmask(tcamx, tcamy, addr, &mask);
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ if (chip_rev(sc) == 0) {
+ cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
+ cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+ } else {
+ t4_write_reg(sc, A_MPS_CLS_SRAM_H,
+ V_SRAMWRN(0) | V_SRAMINDEX(i));
+ cls_lo = t4_read_reg(sc, A_MPS_CLS_SRAM_L);
+ cls_hi = t4_read_reg(sc, A_MPS_CLS_SRAM_H);
+ }
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012jx %06x %06x - - %3c"
+ " I %4x %3c %#x%4u%4d", i, addr[0],
+ addr[1], addr[2], addr[3], addr[4], addr[5],
+ (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
+ port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+ G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+ cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+ } else {
+ sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012jx - - ", i, addr[0], addr[1],
+ addr[2], addr[3], addr[4], addr[5],
+ (uintmax_t)mask);
+
+ if (vlan_vld)
+ sbuf_printf(sb, "%4u Y ", ivlan);
+ else
+ sbuf_printf(sb, " - N ");
+
+ sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
+ lookup_type ? 'I' : 'O', port_num,
+ cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+ G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+ cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+ }
+
+ if (cls_lo & F_T6_REPLICATE) {
+ struct fw_ldst_cmd ldst_cmd;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
+ ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.mps.rplc.fid_idx =
+ htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
+ V_FW_LDST_CMD_IDX(i));
+
+ rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
+ "t6mps");
+ if (rc)
+ break;
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+ end_synchronized_op(sc, 0);
+ if (rc != 0)
+ break;
+ else {
+ sbuf_printf(sb, " %08x %08x %08x %08x"
+ " %08x %08x %08x %08x",
+ be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
+ be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
+ be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
+ be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
+ be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
+ be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
+ be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
+ be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
+ }
+ } else
+ sbuf_printf(sb, "%72s", "");
+
+ sbuf_printf(sb, "%4u%3u%3u%3u %#x",
+ G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
+ G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
+ (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
+ }
+
+ if (rc)
+ (void) sbuf_finish(sb);
+ else
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+
+ return (rc);
+}
+
+static int
sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -10543,6 +11328,7 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
int rc, i;
uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
+ uint32_t stats[T7_PM_RX_CACHE_NSTATS];
static const char *tx_stats[MAX_PM_NSTATS] = {
"Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
"Tx FIFO wait", NULL, "Tx latency"
@@ -10559,12 +11345,14 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
else {
t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
+ if (chip_id(sc) >= CHELSIO_T7)
+ t4_pmrx_cache_get_stats(sc, stats);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
- sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
+ sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -10599,6 +11387,61 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
rx_cyc[i]);
}
+ if (chip_id(sc) >= CHELSIO_T7) {
+ i = 0;
+ sbuf_printf(sb, "\n\nPM RX Cache Stats\n");
+ sbuf_printf(sb, "%-40s %u\n", "ReqWrite", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "ReqReadInv", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "ReqReadNoInv", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Split Request",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Split (Read Invalidate)", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Feedback Read Split (Read NoInvalidate)",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Hit", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Feedback Read Hit",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full UnAvail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Hit Partial Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full UnAvail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Partial Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Full Free",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Part-avail Mul-Regions",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "FB Read Part-avail Mul-Regions",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Miss FL Used",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Miss LRU Used",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Write Miss LRU-Multiple Evict", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Write Hit Increasing Islands", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Island Read split", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Overflow Eviction",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u", "Read Overflow Eviction",
+ stats[i++]);
+ }
+
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -11609,15 +12452,17 @@ sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
#endif
static int
-get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
+get_sge_context(struct adapter *sc, int mem_id, uint32_t cid, int len,
+ uint32_t *data)
{
int rc;
- if (cntxt->cid > M_CTXTQID)
+ if (len < sc->chip_params->sge_ctxt_size)
+ return (ENOBUFS);
+ if (cid > M_CTXTQID)
return (EINVAL);
-
- if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
- cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
+ if (mem_id != CTXT_EGRESS && mem_id != CTXT_INGRESS &&
+ mem_id != CTXT_FLM && mem_id != CTXT_CNM)
return (EINVAL);
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
@@ -11630,8 +12475,7 @@ get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
}
if (sc->flags & FW_OK) {
- rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
- &cntxt->data[0]);
+ rc = -t4_sge_ctxt_rd(sc, sc->mbox, cid, mem_id, data);
if (rc == 0)
goto done;
}
@@ -11640,7 +12484,7 @@ get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
* Read via firmware failed or wasn't even attempted. Read directly via
* the backdoor.
*/
- rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
+ rc = -t4_sge_ctxt_rd_bd(sc, cid, mem_id, data);
done:
end_synchronized_op(sc, 0);
return (rc);
@@ -12048,10 +12892,11 @@ clear_stats(struct adapter *sc, u_int port_id)
mtx_lock(&sc->reg_lock);
if (!hw_off_limits(sc)) {
/* MAC stats */
- t4_clr_port_stats(sc, pi->tx_chan);
+ t4_clr_port_stats(sc, pi->hw_port);
if (is_t6(sc)) {
if (pi->fcs_reg != -1)
- pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
+ pi->fcs_base = t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan, pi->fcs_reg));
else
pi->stats.rx_fcs_err = 0;
}
@@ -12114,12 +12959,21 @@ clear_stats(struct adapter *sc, u_int port_id)
txq->kern_tls_full = 0;
txq->kern_tls_octets = 0;
txq->kern_tls_waste = 0;
- txq->kern_tls_options = 0;
txq->kern_tls_header = 0;
- txq->kern_tls_fin = 0;
txq->kern_tls_fin_short = 0;
txq->kern_tls_cbc = 0;
txq->kern_tls_gcm = 0;
+ if (is_t6(sc)) {
+ txq->kern_tls_options = 0;
+ txq->kern_tls_fin = 0;
+ } else {
+ txq->kern_tls_ghash_received = 0;
+ txq->kern_tls_ghash_requested = 0;
+ txq->kern_tls_lso = 0;
+ txq->kern_tls_partial_ghash = 0;
+ txq->kern_tls_splitmode = 0;
+ txq->kern_tls_trailer = 0;
+ }
mp_ring_reset_stats(txq->r);
}
@@ -12264,14 +13118,12 @@ t4_os_link_changed(struct port_info *pi)
if (is_t6(sc)) {
if (lc->link_ok) {
if (lc->speed > 25000 ||
- (lc->speed == 25000 && lc->fec == FEC_RS)) {
- pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
- A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS);
- } else {
- pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
- A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS);
- }
- pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
+ (lc->speed == 25000 && lc->fec == FEC_RS))
+ pi->fcs_reg = A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS;
+ else
+ pi->fcs_reg = A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS;
+ pi->fcs_base = t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan, pi->fcs_reg));
pi->stats.rx_fcs_err = 0;
} else {
pi->fcs_reg = -1;
@@ -12404,9 +13256,13 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
case CHELSIO_T4_DEL_FILTER:
rc = del_filter(sc, (struct t4_filter *)data);
break;
- case CHELSIO_T4_GET_SGE_CONTEXT:
- rc = get_sge_context(sc, (struct t4_sge_context *)data);
+ case CHELSIO_T4_GET_SGE_CONTEXT: {
+ struct t4_sge_context *ctxt = (struct t4_sge_context *)data;
+
+ rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid,
+ sizeof(ctxt->data), &ctxt->data[0]);
break;
+ }
case CHELSIO_T4_LOAD_FW:
rc = load_fw(sc, (struct t4_data *)data);
break;
@@ -12452,6 +13308,13 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
case CHELSIO_T4_RELEASE_CLIP_ADDR:
rc = release_clip_addr(sc, (struct t4_clip_addr *)data);
break;
+ case CHELSIO_T4_GET_SGE_CTXT: {
+ struct t4_sge_ctxt *ctxt = (struct t4_sge_ctxt *)data;
+
+ rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid,
+ sizeof(ctxt->data), &ctxt->data[0]);
+ break;
+ }
default:
rc = ENOTTY;
}
@@ -12898,7 +13761,9 @@ t4_dump_mem(struct adapter *sc, u_int addr, u_int len)
{
uint32_t base, j, off, pf, reg, save, win_pos;
- reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
+ reg = chip_id(sc) > CHELSIO_T6 ?
+ PCIE_MEM_ACCESS_T7_REG(A_PCIE_MEM_ACCESS_OFFSET0, 2) :
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
save = t4_read_reg(sc, reg);
base = sc->memwin[2].mw_base;
@@ -12910,6 +13775,8 @@ t4_dump_mem(struct adapter *sc, u_int addr, u_int len)
win_pos = addr & ~0x7f; /* start must be 128B aligned */
}
off = addr - win_pos;
+ if (chip_id(sc) > CHELSIO_T6)
+ win_pos >>= X_T7_MEMOFST_SHIFT;
t4_write_reg(sc, reg, win_pos | pf);
t4_read_reg(sc, reg);
@@ -13274,6 +14141,7 @@ mod_event(module_t mod, int cmd, void *arg)
#endif
#ifdef KERN_TLS
t6_ktls_modload();
+ t7_ktls_modload();
#endif
t4_tracer_modload();
tweak_tunables();
@@ -13337,6 +14205,7 @@ mod_event(module_t mod, int cmd, void *arg)
vxlan_stop_evtag);
t4_tracer_modunload();
#ifdef KERN_TLS
+ t7_ktls_modunload();
t6_ktls_modunload();
#endif
#ifdef INET6
@@ -13383,6 +14252,14 @@ MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
+DRIVER_MODULE(chnex, pci, ch_driver, mod_event, 0);
+MODULE_VERSION(chnex, 1);
+MODULE_DEPEND(chnex, crypto, 1, 1, 1);
+MODULE_DEPEND(chnex, firmware, 1, 1, 1);
+#ifdef DEV_NETMAP
+MODULE_DEPEND(chnex, netmap, 1, 1, 1);
+#endif /* DEV_NETMAP */
+
DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, 0, 0);
MODULE_VERSION(cxgbe, 1);
@@ -13392,6 +14269,9 @@ MODULE_VERSION(cxl, 1);
DRIVER_MODULE(cc, t6nex, cc_driver, 0, 0);
MODULE_VERSION(cc, 1);
+DRIVER_MODULE(che, chnex, che_driver, 0, 0);
+MODULE_VERSION(che, 1);
+
DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, 0, 0);
MODULE_VERSION(vcxgbe, 1);
@@ -13400,3 +14280,6 @@ MODULE_VERSION(vcxl, 1);
DRIVER_MODULE(vcc, cc, vcc_driver, 0, 0);
MODULE_VERSION(vcc, 1);
+
+DRIVER_MODULE(vche, che, vche_driver, 0, 0);
+MODULE_VERSION(vche, 1);
diff --git a/sys/dev/cxgbe/t4_mp_ring.c b/sys/dev/cxgbe/t4_mp_ring.c
index 531fd356728e..916c363a0c2a 100644
--- a/sys/dev/cxgbe/t4_mp_ring.c
+++ b/sys/dev/cxgbe/t4_mp_ring.c
@@ -305,7 +305,6 @@ failed:
}
void
-
mp_ring_free(struct mp_ring *r)
{
int i;
@@ -472,6 +471,86 @@ mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
return (0);
}
+/*
+ * Enqueue n items but never drain the ring. Can be called
+ * to enqueue new items while draining the ring.
+ *
+ * Returns an errno.
+ */
+int
+mp_ring_enqueue_only(struct mp_ring *r, void **items, int n)
+{
+ union ring_state os, ns;
+ uint16_t pidx_start, pidx_stop;
+ int i;
+
+ MPASS(items != NULL);
+ MPASS(n > 0);
+
+ /*
+ * Reserve room for the new items. Our reservation, if successful, is
+ * from 'pidx_start' to 'pidx_stop'.
+ */
+ os.state = atomic_load_64(&r->state);
+
+ /* Should only be used from the drain callback. */
+ MPASS(os.flags == BUSY || os.flags == TOO_BUSY ||
+ os.flags == TAKING_OVER);
+
+ for (;;) {
+ if (__predict_false(space_available(r, os) < n)) {
+ /* Not enough room in the ring. */
+ counter_u64_add(r->dropped, n);
+ return (ENOBUFS);
+ }
+
+ /* There is room in the ring. */
+
+ ns.state = os.state;
+ ns.pidx_head = increment_idx(r, os.pidx_head, n);
+ critical_enter();
+ if (atomic_fcmpset_64(&r->state, &os.state, ns.state))
+ break;
+ critical_exit();
+ cpu_spinwait();
+ };
+
+ pidx_start = os.pidx_head;
+ pidx_stop = ns.pidx_head;
+
+ /*
+ * Wait for other producers who got in ahead of us to enqueue their
+ * items, one producer at a time. It is our turn when the ring's
+ * pidx_tail reaches the beginning of our reservation (pidx_start).
+ */
+ while (ns.pidx_tail != pidx_start) {
+ cpu_spinwait();
+ ns.state = atomic_load_64(&r->state);
+ }
+
+ /* Now it is our turn to fill up the area we reserved earlier. */
+ i = pidx_start;
+ do {
+ r->items[i] = *items++;
+ if (__predict_false(++i == r->size))
+ i = 0;
+ } while (i != pidx_stop);
+
+ /*
+ * Update the ring's pidx_tail. The release style atomic guarantees
+ * that the items are visible to any thread that sees the updated pidx.
+ */
+ os.state = atomic_load_64(&r->state);
+ do {
+ ns.state = os.state;
+ ns.pidx_tail = pidx_stop;
+ } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0);
+ critical_exit();
+
+ counter_u64_add(r->not_consumer, 1);
+ return (0);
+}
+
void
mp_ring_check_drainage(struct mp_ring *r, int budget)
{
diff --git a/sys/dev/cxgbe/t4_mp_ring.h b/sys/dev/cxgbe/t4_mp_ring.h
index 949174b9056d..07b15906cd43 100644
--- a/sys/dev/cxgbe/t4_mp_ring.h
+++ b/sys/dev/cxgbe/t4_mp_ring.h
@@ -62,6 +62,7 @@ int mp_ring_alloc(struct mp_ring **, int, void *, ring_drain_t,
ring_can_drain_t, struct malloc_type *, struct mtx *, int);
void mp_ring_free(struct mp_ring *);
int mp_ring_enqueue(struct mp_ring *, void **, int, int);
+int mp_ring_enqueue_only(struct mp_ring *, void **, int);
void mp_ring_check_drainage(struct mp_ring *, int);
void mp_ring_reset_stats(struct mp_ring *);
bool mp_ring_is_idle(struct mp_ring *);
diff --git a/sys/dev/cxgbe/t4_netmap.c b/sys/dev/cxgbe/t4_netmap.c
index e53fb5054316..0135bec6e2c1 100644
--- a/sys/dev/cxgbe/t4_netmap.c
+++ b/sys/dev/cxgbe/t4_netmap.c
@@ -232,7 +232,7 @@ alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx)
nm_txq->nid = idx;
nm_txq->iqidx = iqidx;
nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0))
nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
@@ -276,7 +276,7 @@ free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
static int
alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, cong_map;
__be32 v;
struct adapter *sc = vi->adapter;
struct port_info *pi = vi->pi;
@@ -284,7 +284,6 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
struct netmap_adapter *na = NA(vi->ifp);
struct fw_iq_cmd c;
const int cong_drop = nm_cong_drop;
- const int cong_map = pi->rx_e_chan_map;
MPASS(na != NULL);
MPASS(nm_rxq->iq_desc != NULL);
@@ -314,13 +313,17 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
- c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
c.iqsize = htobe16(vi->qsize_rxq);
c.iqaddr = htobe64(nm_rxq->iq_ba);
if (cong_drop != -1) {
+ if (chip_id(sc) >= CHELSIO_T7)
+ cong_map = 1 << pi->hw_port;
+ else
+ cong_map = pi->rx_e_chan_map;
c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
V_FW_IQ_CMD_FL0CNGCHMAP(cong_map) | F_FW_IQ_CMD_FL0CONGCIF |
F_FW_IQ_CMD_FL0CONGEN);
@@ -421,15 +424,19 @@ alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
V_FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
- if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID)
- c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC);
- else
+ if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) {
+ const int core = sc->params.ncores > 1 ?
+ nm_txq->nid % sc->params.ncores : 0;
+
+ c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+ V_FW_EQ_ETH_CMD_COREGROUP(core));
+ } else
c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+ V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO |
V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
diff --git a/sys/dev/cxgbe/t4_sched.c b/sys/dev/cxgbe/t4_sched.c
index 2186c8aa2ac0..65c2720d692c 100644
--- a/sys/dev/cxgbe/t4_sched.c
+++ b/sys/dev/cxgbe/t4_sched.c
@@ -272,7 +272,7 @@ update_tx_sched(void *context, int pending)
}
rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
FW_SCHED_PARAMS_LEVEL_CL_RL, tc->mode, tc->rateunit,
- tc->ratemode, pi->tx_chan, j, 0, tc->maxrate, 0,
+ tc->ratemode, pi->hw_port, j, 0, tc->maxrate, 0,
tc->pktsize, tc->burstsize, 1);
end_synchronized_op(sc, 0);
@@ -291,7 +291,7 @@ update_tx_sched(void *context, int pending)
"params: mode %d, rateunit %d, ratemode %d, "
"channel %d, minrate %d, maxrate %d, pktsize %d, "
"burstsize %d\n", j, rc, tc->mode, tc->rateunit,
- tc->ratemode, pi->tx_chan, 0, tc->maxrate,
+ tc->ratemode, pi->hw_port, 0, tc->maxrate,
tc->pktsize, tc->burstsize);
}
}
@@ -839,7 +839,7 @@ failed:
cst->tx_total = cst->tx_credits;
cst->plen = 0;
cst->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
/*
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 86454bc4fe10..2f9cb1a4ebb5 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -259,17 +258,20 @@ static void free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
static void add_ofld_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
struct sge_ofld_rxq *);
#endif
-static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
-static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
+static int ctrl_eq_alloc(struct adapter *, struct sge_eq *, int);
+static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
-static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
+static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
#endif
static int alloc_eq(struct adapter *, struct sge_eq *, struct sysctl_ctx_list *,
struct sysctl_oid *);
static void free_eq(struct adapter *, struct sge_eq *);
static void add_eq_sysctls(struct adapter *, struct sysctl_ctx_list *,
struct sysctl_oid *, struct sge_eq *);
-static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
+static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
static int free_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
struct sysctl_ctx_list *, struct sysctl_oid *);
@@ -348,6 +350,7 @@ cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES];
cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES];
cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES];
cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES];
+cpl_handler_t fw6_pld_handlers[NUM_CPL_FW6_COOKIES];
void
t4_register_an_handler(an_handler_t h)
@@ -477,6 +480,21 @@ fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (fw4_ack_handlers[cookie](iq, rss, m));
}
+static int
+fw6_pld_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+ const struct cpl_fw6_pld *cpl;
+ uint64_t cookie;
+
+ if (m != NULL)
+ cpl = mtod(m, const void *);
+ else
+ cpl = (const void *)(rss + 1);
+ cookie = be64toh(cpl->data[1]) & CPL_FW6_COOKIE_MASK;
+
+ return (fw6_pld_handlers[cookie](iq, rss, m));
+}
+
static void
t4_init_shared_cpl_handlers(void)
{
@@ -486,6 +504,7 @@ t4_init_shared_cpl_handlers(void)
t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler);
t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler);
t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler);
+ t4_register_cpl_handler(CPL_FW6_PLD, fw6_pld_handler);
}
void
@@ -494,8 +513,12 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
uintptr_t *loc;
MPASS(opcode < nitems(t4_cpl_handler));
- MPASS(cookie > CPL_COOKIE_RESERVED);
- MPASS(cookie < NUM_CPL_COOKIES);
+ if (opcode == CPL_FW6_PLD) {
+ MPASS(cookie < NUM_CPL_FW6_COOKIES);
+ } else {
+ MPASS(cookie > CPL_COOKIE_RESERVED);
+ MPASS(cookie < NUM_CPL_COOKIES);
+ }
MPASS(t4_cpl_handler[opcode] != NULL);
switch (opcode) {
@@ -514,6 +537,9 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
case CPL_FW4_ACK:
loc = (uintptr_t *)&fw4_ack_handlers[cookie];
break;
+ case CPL_FW6_PLD:
+ loc = (uintptr_t *)&fw6_pld_handlers[cookie];
+ break;
default:
MPASS(0);
return;
@@ -1064,9 +1090,9 @@ t4_setup_adapter_queues(struct adapter *sc)
*/
/*
- * Control queues, one per port.
+ * Control queues. At least one per port and per internal core.
*/
- for_each_port(sc, i) {
+ for (i = 0; i < sc->sge.nctrlq; i++) {
rc = alloc_ctrlq(sc, i);
if (rc != 0)
return (rc);
@@ -1087,7 +1113,7 @@ t4_teardown_adapter_queues(struct adapter *sc)
if (sc->sge.ctrlq != NULL) {
MPASS(!(sc->flags & IS_VF)); /* VFs don't allocate ctrlq. */
- for_each_port(sc, i)
+ for (i = 0; i < sc->sge.nctrlq; i++)
free_ctrlq(sc, i);
}
free_fwq(sc);
@@ -2701,9 +2727,14 @@ restart:
#endif
#ifdef KERN_TLS
if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) {
+ struct vi_info *vi = if_getsoftc(mst->ifp);
+
cflags |= MC_TLS;
set_mbuf_cflags(m0, cflags);
- rc = t6_ktls_parse_pkt(m0);
+ if (is_t6(vi->pi->adapter))
+ rc = t6_ktls_parse_pkt(m0);
+ else
+ rc = t7_ktls_parse_pkt(m0);
if (rc != 0)
goto fail;
return (EINPROGRESS);
@@ -3273,7 +3304,10 @@ skip_coalescing:
#ifdef KERN_TLS
} else if (mbuf_cflags(m0) & MC_TLS) {
ETHER_BPF_MTAP(ifp, m0);
- n = t6_ktls_write_wr(txq, wr, m0, avail);
+ if (is_t6(sc))
+ n = t6_ktls_write_wr(txq, wr, m0, avail);
+ else
+ n = t7_ktls_write_wr(txq, wr, m0, avail);
#endif
} else {
ETHER_BPF_MTAP(ifp, m0);
@@ -3414,6 +3448,7 @@ init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
eq->type = eqtype;
eq->port_id = port_id;
eq->tx_chan = sc->port[port_id]->tx_chan;
+ eq->hw_port = sc->port[port_id]->hw_port;
eq->iq = iq;
eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
strlcpy(eq->lockname, name, sizeof(eq->lockname));
@@ -3577,7 +3612,7 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
- c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
@@ -3585,7 +3620,13 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
c.iqaddr = htobe64(iq->ba);
c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype));
if (iq->cong_drop != -1) {
- cong_map = iq->qtype == IQ_ETH ? pi->rx_e_chan_map : 0;
+ if (iq->qtype == IQ_ETH) {
+ if (chip_id(sc) >= CHELSIO_T7)
+ cong_map = 1 << pi->hw_port;
+ else
+ cong_map = pi->rx_e_chan_map;
+ } else
+ cong_map = 0;
c.iqns_to_fl0congen |= htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
}
@@ -3842,7 +3883,7 @@ alloc_ctrlq(struct adapter *sc, int idx)
struct sysctl_oid *oid;
struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx];
- MPASS(idx < sc->params.nports);
+ MPASS(idx < sc->sge.nctrlq);
if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) {
MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
@@ -3854,8 +3895,8 @@ alloc_ctrlq(struct adapter *sc, int idx)
snprintf(name, sizeof(name), "%s ctrlq%d",
device_get_nameunit(sc->dev), idx);
- init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, idx,
- &sc->sge.fwq, name);
+ init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE,
+ idx % sc->params.nports, &sc->sge.fwq, name);
rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid);
if (rc != 0) {
CH_ERR(sc, "failed to allocate ctrlq%d: %d\n", idx, rc);
@@ -3870,7 +3911,7 @@ alloc_ctrlq(struct adapter *sc, int idx)
MPASS(ctrlq->nwr_pending == 0);
MPASS(ctrlq->ndesc_needed == 0);
- rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq);
+ rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq, idx);
if (rc != 0) {
CH_ERR(sc, "failed to create hw ctrlq%d: %d\n", idx, rc);
return (rc);
@@ -3938,14 +3979,19 @@ t4_sge_set_conm_context(struct adapter *sc, int cntxt_id, int cong_drop,
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
V_FW_PARAMS_PARAM_YZ(cntxt_id);
- val = V_CONMCTXT_CNGTPMODE(cong_mode);
- if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL ||
- cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) {
- for (i = 0, ch_map = 0; i < 4; i++) {
- if (cong_map & (1 << i))
- ch_map |= 1 << (i << cng_ch_bits_log);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ val = V_T7_DMAQ_CONM_CTXT_CNGTPMODE(cong_mode) |
+ V_T7_DMAQ_CONM_CTXT_CH_VEC(cong_map);
+ } else {
+ val = V_CONMCTXT_CNGTPMODE(cong_mode);
+ if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL ||
+ cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) {
+ for (i = 0, ch_map = 0; i < 4; i++) {
+ if (cong_map & (1 << i))
+ ch_map |= 1 << (i << cng_ch_bits_log);
+ }
+ val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
- val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
if (rc != 0) {
@@ -4253,24 +4299,26 @@ qsize_to_fthresh(int qsize)
}
static int
-ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
+ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq, int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_ctrl_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ core = sc->params.tid_qid_sel_mask != 0 ? idx % sc->params.ncores : 0;
bzero(&c, sizeof(c));
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
V_FW_EQ_CTRL_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
+ V_FW_EQ_CTRL_CMD_COREGROUP(core) |
F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
c.physeqid_pkd = htobe32(0);
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
- V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(eq->hw_port) |
F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4282,8 +4330,8 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
if (rc != 0) {
- CH_ERR(sc, "failed to create hw ctrlq for tx_chan %d: %d\n",
- eq->tx_chan, rc);
+ CH_ERR(sc, "failed to create hw ctrlq for port %d: %d\n",
+ eq->port_id, rc);
return (rc);
}
@@ -4299,24 +4347,26 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
}
static int
-eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_eth_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ core = sc->params.ncores > 1 ? idx % sc->params.ncores : 0;
bzero(&c, sizeof(c));
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
V_FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+ V_FW_EQ_ETH_CMD_COREGROUP(core) |
F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+ V_FW_EQ_ETH_CMD_PCIECHN(eq->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO |
V_FW_EQ_ETH_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4344,23 +4394,44 @@ eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+/*
+ * ncores number of uP cores.
+ * nq number of queues for this VI
+ * idx queue index
+ */
+static inline int
+qidx_to_core(int ncores, int nq, int idx)
+{
+ MPASS(nq % ncores == 0);
+ MPASS(idx >= 0 && idx < nq);
+
+ return (idx * ncores / nq);
+}
+
static int
-ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq,
+ int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_ofld_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ if (sc->params.tid_qid_sel_mask != 0)
+ core = qidx_to_core(sc->params.ncores, vi->nofldtxq, idx);
+ else
+ core = 0;
+
bzero(&c, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
V_FW_EQ_OFLD_CMD_VFN(0));
c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
+ V_FW_EQ_OFLD_CMD_COREGROUP(core) |
F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
c.fetchszm_to_iqid =
htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
- V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
+ V_FW_EQ_OFLD_CMD_PCIECHN(eq->hw_port) |
F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4449,7 +4520,7 @@ add_eq_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
}
static int
-alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx)
{
int rc;
@@ -4464,16 +4535,16 @@ alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
switch (eq->type) {
case EQ_CTRL:
- rc = ctrl_eq_alloc(sc, eq);
+ rc = ctrl_eq_alloc(sc, eq, idx);
break;
case EQ_ETH:
- rc = eth_eq_alloc(sc, vi, eq);
+ rc = eth_eq_alloc(sc, vi, eq, idx);
break;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
case EQ_OFLD:
- rc = ofld_eq_alloc(sc, vi, eq);
+ rc = ofld_eq_alloc(sc, vi, eq, idx);
break;
#endif
@@ -4653,7 +4724,7 @@ failed:
if (!(eq->flags & EQ_HW_ALLOCATED)) {
MPASS(eq->flags & EQ_SW_ALLOCATED);
- rc = alloc_eq_hwq(sc, vi, eq);
+ rc = alloc_eq_hwq(sc, vi, eq, idx);
if (rc != 0) {
CH_ERR(vi, "failed to create hw txq%d: %d\n", idx, rc);
return (rc);
@@ -4678,10 +4749,10 @@ failed:
if (vi->flags & TX_USES_VM_WR)
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan));
+ V_TXPKT_INTF(pi->hw_port));
else
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
txq->tc_idx = -1;
@@ -4788,18 +4859,46 @@ add_txq_sysctls(struct vi_info *vi, struct sysctl_ctx_list *ctx,
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_waste",
CTLFLAG_RD, &txq->kern_tls_waste,
"# of octets DMAd but not transmitted in NIC TLS records");
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_options",
- CTLFLAG_RD, &txq->kern_tls_options,
- "# of NIC TLS options-only packets transmitted");
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_header",
CTLFLAG_RD, &txq->kern_tls_header,
"# of NIC TLS header-only packets transmitted");
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin",
- CTLFLAG_RD, &txq->kern_tls_fin,
- "# of NIC TLS FIN-only packets transmitted");
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin_short",
CTLFLAG_RD, &txq->kern_tls_fin_short,
"# of NIC TLS padded FIN packets on short TLS records");
+ if (is_t6(sc)) {
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_options", CTLFLAG_RD,
+ &txq->kern_tls_options,
+ "# of NIC TLS options-only packets transmitted");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin,
+ "# of NIC TLS FIN-only packets transmitted");
+ } else {
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_ghash_received", CTLFLAG_RD,
+ &txq->kern_tls_ghash_received,
+ "# of NIC TLS GHASHes received");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_ghash_requested", CTLFLAG_RD,
+ &txq->kern_tls_ghash_requested,
+ "# of NIC TLS GHASHes requested");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_lso", CTLFLAG_RD,
+ &txq->kern_tls_lso,
+ "# of NIC TLS records transmitted using LSO");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_partial_ghash", CTLFLAG_RD,
+ &txq->kern_tls_partial_ghash,
+ "# of NIC TLS records encrypted using a partial GHASH");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_splitmode", CTLFLAG_RD,
+ &txq->kern_tls_splitmode,
+ "# of NIC TLS records using SplitMode");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_trailer", CTLFLAG_RD,
+ &txq->kern_tls_trailer,
+ "# of NIC TLS trailer-only packets transmitted");
+ }
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_cbc",
CTLFLAG_RD, &txq->kern_tls_cbc,
"# of NIC TLS sessions using AES-CBC");
@@ -4869,7 +4968,7 @@ alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx)
MPASS(eq->flags & EQ_SW_ALLOCATED);
MPASS(ofld_txq->wrq.nwr_pending == 0);
MPASS(ofld_txq->wrq.ndesc_needed == 0);
- rc = alloc_eq_hwq(sc, vi, eq);
+ rc = alloc_eq_hwq(sc, vi, eq, idx);
if (rc != 0) {
CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx,
rc);
@@ -5418,7 +5517,8 @@ write_tnl_lso_cpl(void *cpl, struct mbuf *m0)
m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen +
m0->m_pkthdr.l5hlen) |
V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN));
- tnl_lso->r1 = 0;
+ tnl_lso->ipsecen_to_rocev2 = 0;
+ tnl_lso->roce_eth = 0;
/* Inner headers. */
ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN(
@@ -6583,10 +6683,11 @@ send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
V_FW_WR_FLOWID(cst->etid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htobe32(pfvf);
+ /* Firmware expects hw port and will translate to channel itself. */
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
- flowc->mnemval[1].val = htobe32(pi->tx_chan);
+ flowc->mnemval[1].val = htobe32(pi->hw_port);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
- flowc->mnemval[2].val = htobe32(pi->tx_chan);
+ flowc->mnemval[2].val = htobe32(pi->hw_port);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(cst->iqid);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE;
diff --git a/sys/dev/cxgbe/t4_tpt.c b/sys/dev/cxgbe/t4_tpt.c
new file mode 100644
index 000000000000..d18eabb026f1
--- /dev/null
+++ b/sys/dev/cxgbe/t4_tpt.c
@@ -0,0 +1,193 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Chelsio Communications, Inc.
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "common/common.h"
+
+/*
+ * Support routines to manage TPT entries used for both RDMA and NVMe
+ * offloads. This includes allocating STAG indices and managing the
+ * PBL pool.
+ */
+
+#define T4_ULPTX_MIN_IO 32
+#define T4_MAX_INLINE_SIZE 96
+#define T4_ULPTX_MAX_DMA 1024
+
+/* PBL and STAG Memory Managers. */
+
+#define MIN_PBL_SHIFT 5 /* 32B == min PBL size (4 entries) */
+
+uint32_t
+t4_pblpool_alloc(struct adapter *sc, int size)
+{
+ vmem_addr_t addr;
+
+ if (vmem_xalloc(sc->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
+ 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, M_FIRSTFIT | M_NOWAIT,
+ &addr) != 0)
+ return (0);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: addr 0x%lx size %d", __func__, addr, size);
+#endif
+ return (addr);
+}
+
+void
+t4_pblpool_free(struct adapter *sc, uint32_t addr, int size)
+{
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: addr 0x%x size %d", __func__, addr, size);
+#endif
+ vmem_xfree(sc->pbl_arena, addr, roundup(size, (1 << MIN_PBL_SHIFT)));
+}
+
+uint32_t
+t4_stag_alloc(struct adapter *sc, int size)
+{
+ vmem_addr_t stag_idx;
+
+ if (vmem_alloc(sc->stag_arena, size, M_FIRSTFIT | M_NOWAIT,
+ &stag_idx) != 0)
+ return (T4_STAG_UNSET);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: idx 0x%lx size %d", __func__, stag_idx, size);
+#endif
+ return (stag_idx);
+}
+
+void
+t4_stag_free(struct adapter *sc, uint32_t stag_idx, int size)
+{
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: idx 0x%x size %d", __func__, stag_idx, size);
+#endif
+ vmem_free(sc->stag_arena, stag_idx, size);
+}
+
+void
+t4_init_tpt(struct adapter *sc)
+{
+ if (sc->vres.pbl.size != 0)
+ sc->pbl_arena = vmem_create("PBL_MEM_POOL", sc->vres.pbl.start,
+ sc->vres.pbl.size, 1, 0, M_FIRSTFIT | M_WAITOK);
+ if (sc->vres.stag.size != 0)
+ sc->stag_arena = vmem_create("STAG", 1,
+ sc->vres.stag.size >> 5, 1, 0, M_FIRSTFIT | M_WAITOK);
+}
+
+void
+t4_free_tpt(struct adapter *sc)
+{
+ if (sc->pbl_arena != NULL)
+ vmem_destroy(sc->pbl_arena);
+ if (sc->stag_arena != NULL)
+ vmem_destroy(sc->stag_arena);
+}
+
+/*
+ * TPT support routines. TPT entries are stored in the STAG adapter
+ * memory region and are written to via ULP_TX_MEM_WRITE commands in
+ * FW_ULPTX_WR work requests.
+ */
+
+void
+t4_write_mem_dma_wr(struct adapter *sc, void *wr, int wr_len, int tid,
+ uint32_t addr, uint32_t len, vm_paddr_t data, uint64_t cookie)
+{
+ struct ulp_mem_io *ulpmc;
+ struct ulptx_sgl *sgl;
+
+ MPASS(wr_len == T4_WRITE_MEM_DMA_LEN);
+
+ addr &= 0x7FFFFFF;
+
+ memset(wr, 0, wr_len);
+ ulpmc = wr;
+ INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
+ if (cookie != 0) {
+ ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
+ ulpmc->wr.wr_lo = cookie;
+ }
+ ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
+ V_T5_ULP_MEMIO_ORDER(1) |
+ V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(len >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(len >> 5));
+ ulpmc->len16 = htobe32((tid << 8) |
+ DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
+ ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
+
+ sgl = (struct ulptx_sgl *)(ulpmc + 1);
+ sgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(1));
+ sgl->len0 = htobe32(len);
+ sgl->addr0 = htobe64(data);
+}
+
+void
+t4_write_mem_inline_wr(struct adapter *sc, void *wr, int wr_len, int tid,
+ uint32_t addr, uint32_t len, void *data, uint64_t cookie)
+{
+ struct ulp_mem_io *ulpmc;
+ struct ulptx_idata *ulpsc;
+
+ MPASS(len > 0 && len <= T4_MAX_INLINE_SIZE);
+ MPASS(wr_len == T4_WRITE_MEM_INLINE_LEN(len));
+
+ addr &= 0x7FFFFFF;
+
+ memset(wr, 0, wr_len);
+ ulpmc = wr;
+ INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
+
+ if (cookie != 0) {
+ ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
+ ulpmc->wr.wr_lo = cookie;
+ }
+
+ ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
+ F_T5_ULP_MEMIO_IMM);
+
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
+ ulpmc->len16 = htobe32((tid << 8) |
+ DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
+ ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
+
+ ulpsc = (struct ulptx_idata *)(ulpmc + 1);
+ ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ ulpsc->len = htobe32(roundup(len, T4_ULPTX_MIN_IO));
+
+ if (data != NULL)
+ memcpy(ulpsc + 1, data, len);
+}
diff --git a/sys/dev/cxgbe/t4_tracer.c b/sys/dev/cxgbe/t4_tracer.c
index 80689a543e83..4f8d28626bc9 100644
--- a/sys/dev/cxgbe/t4_tracer.c
+++ b/sys/dev/cxgbe/t4_tracer.c
@@ -123,9 +123,8 @@ static int
t4_cloner_match(struct if_clone *ifc, const char *name)
{
- if (strncmp(name, "t4nex", 5) != 0 &&
- strncmp(name, "t5nex", 5) != 0 &&
- strncmp(name, "t6nex", 5) != 0)
+ if (strncmp(name, "t4nex", 5) != 0 && strncmp(name, "t5nex", 5) != 0 &&
+ strncmp(name, "t6nex", 5) != 0 && strncmp(name, "chnex", 5) != 0)
return (0);
if (name[5] < '0' || name[5] > '9')
return (0);
diff --git a/sys/dev/cxgbe/t4_vf.c b/sys/dev/cxgbe/t4_vf.c
index b7b08e226a57..89dae02e9332 100644
--- a/sys/dev/cxgbe/t4_vf.c
+++ b/sys/dev/cxgbe/t4_vf.c
@@ -125,6 +125,28 @@ struct {
{0x6885, "Chelsio T6240-SO 85 VF"},
{0x6886, "Chelsio T6225-SO-CR 86 VF"},
{0x6887, "Chelsio T6225-CR 87 VF"},
+}, t7vf_pciids[] = {
+ {0xd800, "Chelsio T7 FPGA VF"}, /* T7 PE12K FPGA */
+ {0x7800, "Chelsio T72200-DBG VF"}, /* 2 x 200G, debug */
+ {0x7801, "Chelsio T7250 VF"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7802, "Chelsio S7250 VF"}, /* 2 x 10/25/50G, nomem */
+ {0x7803, "Chelsio T7450 VF"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7804, "Chelsio S7450 VF"}, /* 4 x 10/25/50G, nomem */
+ {0x7805, "Chelsio T72200 VF"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7806, "Chelsio S72200 VF"}, /* 2 x 40/100/200G, nomem */
+ {0x7807, "Chelsio T72200-FH VF"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7808, "Chelsio T71400 VF"}, /* 1 x 400G, nomem */
+ {0x7809, "Chelsio S7210-BT VF"}, /* 2 x 10GBASE-T, nomem */
+ {0x780a, "Chelsio T7450-RC VF"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x780b, "Chelsio T72200-RC VF"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x780c, "Chelsio T72200-FH-RC VF"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x780d, "Chelsio S72200-OCP3 VF"}, /* 2 x 40/100/200G OCP3 */
+ {0x780e, "Chelsio S7450-OCP3 VF"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x780f, "Chelsio S7410-BT-OCP3 VF"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7810, "Chelsio S7210-BT-A VF"}, /* 2 x 10GBASE-T */
+ {0x7811, "Chelsio T7_MAYRA_7 VF"}, /* Motherboard */
+
+ {0x7880, "Custom T7 VF"},
};
static d_ioctl_t t4vf_ioctl;
@@ -183,6 +205,22 @@ t6vf_probe(device_t dev)
return (ENXIO);
}
+static int
+chvf_probe(device_t dev)
+{
+ uint16_t d;
+ size_t i;
+
+ d = pci_get_device(dev);
+ for (i = 0; i < nitems(t7vf_pciids); i++) {
+ if (d == t7vf_pciids[i].device) {
+ device_set_desc(dev, t7vf_pciids[i].desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+ return (ENXIO);
+}
+
#define FW_PARAM_DEV(param) \
(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
@@ -956,6 +994,20 @@ static driver_t t6vf_driver = {
sizeof(struct adapter)
};
+static device_method_t chvf_methods[] = {
+ DEVMETHOD(device_probe, chvf_probe),
+ DEVMETHOD(device_attach, t4vf_attach),
+ DEVMETHOD(device_detach, t4_detach_common),
+
+ DEVMETHOD_END
+};
+
+static driver_t chvf_driver = {
+ "chvf",
+ chvf_methods,
+ sizeof(struct adapter)
+};
+
static driver_t cxgbev_driver = {
"cxgbev",
cxgbe_methods,
@@ -974,6 +1026,12 @@ static driver_t ccv_driver = {
sizeof(struct port_info)
};
+static driver_t chev_driver = {
+ "chev",
+ cxgbe_methods,
+ sizeof(struct port_info)
+};
+
DRIVER_MODULE(t4vf, pci, t4vf_driver, 0, 0);
MODULE_VERSION(t4vf, 1);
MODULE_DEPEND(t4vf, t4nex, 1, 1, 1);
@@ -986,6 +1044,10 @@ DRIVER_MODULE(t6vf, pci, t6vf_driver, 0, 0);
MODULE_VERSION(t6vf, 1);
MODULE_DEPEND(t6vf, t6nex, 1, 1, 1);
+DRIVER_MODULE(chvf, pci, chvf_driver, 0, 0);
+MODULE_VERSION(chvf, 1);
+MODULE_DEPEND(chvf, chnex, 1, 1, 1);
+
DRIVER_MODULE(cxgbev, t4vf, cxgbev_driver, 0, 0);
MODULE_VERSION(cxgbev, 1);
@@ -994,3 +1056,6 @@ MODULE_VERSION(cxlv, 1);
DRIVER_MODULE(ccv, t6vf, ccv_driver, 0, 0);
MODULE_VERSION(ccv, 1);
+
+DRIVER_MODULE(chev, chvf, chev_driver, 0, 0);
+MODULE_VERSION(chev, 1);
diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c
index 99e4c222996d..c236ee060bc2 100644
--- a/sys/dev/cxgbe/tom/t4_connect.c
+++ b/sys/dev/cxgbe/tom/t4_connect.c
@@ -89,6 +89,12 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
INP_WLOCK(inp);
toep->tid = tid;
insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
+ if (sc->params.tid_qid_sel_mask != 0) {
+ update_tid_qid_sel(toep->vi, &toep->params, tid);
+ toep->ofld_txq = &sc->sge.ofld_txq[toep->params.txq_idx];
+ toep->ctrlq = &sc->sge.ctrlq[toep->params.ctrlq_idx];
+ }
+
if (inp->inp_flags & INP_DROPPED) {
/* socket closed by the kernel before hw told us it connected */
@@ -205,7 +211,7 @@ static inline int
act_open_cpl_size(struct adapter *sc, int isipv6)
{
int idx;
- static const int sz_table[3][2] = {
+ static const int sz_table[4][2] = {
{
sizeof (struct cpl_act_open_req),
sizeof (struct cpl_act_open_req6)
@@ -218,10 +224,14 @@ act_open_cpl_size(struct adapter *sc, int isipv6)
sizeof (struct cpl_t6_act_open_req),
sizeof (struct cpl_t6_act_open_req6)
},
+ {
+ sizeof (struct cpl_t7_act_open_req),
+ sizeof (struct cpl_t7_act_open_req6)
+ },
};
MPASS(chip_id(sc) >= CHELSIO_T4);
- idx = min(chip_id(sc) - CHELSIO_T4, 2);
+ idx = min(chip_id(sc) - CHELSIO_T4, 3);
return (sz_table[idx][!!isipv6]);
}
@@ -255,6 +265,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
struct offload_settings settings;
struct epoch_tracker et;
uint16_t vid = 0xfff, pcp = 0;
+ uint64_t ntuple;
INP_WLOCK_ASSERT(inp);
KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
@@ -308,10 +319,12 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) |
V_TID_COOKIE(CPL_COOKIE_TOM);
+ ntuple = select_ntuple(vi, toep->l2te);
if (isipv6) {
struct cpl_act_open_req6 *cpl = wrtod(wr);
struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
+ struct cpl_t7_act_open_req6 *cpl7 = (void *)cpl;
if ((inp->inp_vflag & INP_IPV6) == 0)
DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
@@ -323,18 +336,23 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
switch (chip_id(sc)) {
case CHELSIO_T4:
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(vi, toep->l2te);
+ cpl->params = htobe32((uint32_t)ntuple);
break;
case CHELSIO_T5:
INIT_TP_WR(cpl5, 0);
cpl5->iss = htobe32(tp->iss);
- cpl5->params = select_ntuple(vi, toep->l2te);
+ cpl5->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
case CHELSIO_T6:
- default:
INIT_TP_WR(cpl6, 0);
cpl6->iss = htobe32(tp->iss);
- cpl6->params = select_ntuple(vi, toep->l2te);
+ cpl6->params = htobe64(V_FILTER_TUPLE(ntuple));
+ break;
+ case CHELSIO_T7:
+ default:
+ INIT_TP_WR(cpl7, 0);
+ cpl7->iss = htobe32(tp->iss);
+ cpl7->params = htobe64(V_T7_FILTER_TUPLE(ntuple));
break;
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
@@ -356,23 +374,28 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
struct cpl_act_open_req *cpl = wrtod(wr);
struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
+ struct cpl_t7_act_open_req *cpl7 = (void *)cpl;
switch (chip_id(sc)) {
case CHELSIO_T4:
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(vi, toep->l2te);
+ cpl->params = htobe32((uint32_t)ntuple);
break;
case CHELSIO_T5:
INIT_TP_WR(cpl5, 0);
cpl5->iss = htobe32(tp->iss);
- cpl5->params = select_ntuple(vi, toep->l2te);
+ cpl5->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
case CHELSIO_T6:
- default:
INIT_TP_WR(cpl6, 0);
cpl6->iss = htobe32(tp->iss);
- cpl6->params = select_ntuple(vi, toep->l2te);
+ cpl6->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
+ case CHELSIO_T7:
+ default:
+ INIT_TP_WR(cpl7, 0);
+ cpl7->iss = htobe32(tp->iss);
+ cpl7->params = htobe64(V_T7_FILTER_TUPLE(ntuple));
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index be20ea42474e..84e31efa8b58 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -127,8 +127,9 @@ send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
paramidx = 0;
FLOWC_PARAM(PFNVFN, pfvf);
- FLOWC_PARAM(CH, pi->tx_chan);
- FLOWC_PARAM(PORT, pi->tx_chan);
+ /* Firmware expects hw port and will translate to channel itself. */
+ FLOWC_PARAM(CH, pi->hw_port);
+ FLOWC_PARAM(PORT, pi->hw_port);
FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id);
FLOWC_PARAM(SNDBUF, toep->params.sndbuf);
if (tp) {
@@ -2050,9 +2051,18 @@ write_set_tcb_field(struct adapter *sc, void *dst, struct toepcb *toep,
}
INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
- req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id));
- if (reply == 0)
- req->reply_ctrl |= htobe16(F_NO_REPLY);
+ if (reply == 0) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ } else {
+ const int qid = toep->ofld_rxq->iq.abs_id;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ }
req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
req->mask = htobe64(mask);
req->val = htobe64(val);
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index da0753296532..35fb1061d867 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -1655,7 +1655,10 @@ t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid,
INIT_ULPTX_WR(ulpmc, len, 0, 0);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -1842,7 +1845,10 @@ t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep,
ulpmc = mtod(m, struct ulp_mem_io *);
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -1922,7 +1928,10 @@ t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep,
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -2013,7 +2022,10 @@ t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep,
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index 06c495dcafc3..b879f6883f25 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -508,10 +508,11 @@ send_flowc_wr_synqe(struct adapter *sc, struct synq_entry *synqe)
V_FW_WR_FLOWID(synqe->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htobe32(pfvf);
+ /* Firmware expects hw port and will translate to channel itself. */
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
- flowc->mnemval[1].val = htobe32(pi->tx_chan);
+ flowc->mnemval[1].val = htobe32(pi->hw_port);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
- flowc->mnemval[2].val = htobe32(pi->tx_chan);
+ flowc->mnemval[2].val = htobe32(pi->hw_port);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(ofld_rxq->iq.abs_id);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
@@ -1507,6 +1508,8 @@ found:
init_conn_params(vi, &settings, &inc, so, &cpl->tcpopt, e->idx,
&synqe->params);
+ if (sc->params.tid_qid_sel_mask != 0)
+ update_tid_qid_sel(vi, &synqe->params, tid);
/*
* If all goes well t4_syncache_respond will get called during
diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c
index ad72c6a6b025..bbcc1c88c3db 100644
--- a/sys/dev/cxgbe/tom/t4_tls.c
+++ b/sys/dev/cxgbe/tom/t4_tls.c
@@ -207,7 +207,7 @@ int
tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
{
struct adapter *sc = td_adapter(toep->td);
- int error, explicit_iv_size, mac_first;
+ int error, iv_size, mac_first;
if (!can_tls_offload(sc))
return (EINVAL);
@@ -228,6 +228,21 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
}
}
+ /* TLS 1.1 through TLS 1.3 are currently supported. */
+ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
+ tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
+ tls->params.tls_vminor > TLS_MINOR_VER_THREE) {
+ return (EPROTONOSUPPORT);
+ }
+
+ /* TLS 1.3 is only supported on T7+. */
+ if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) {
+ if (is_t6(sc)) {
+ return (EPROTONOSUPPORT);
+ }
+ }
+
+ /* Sanity check values in *tls. */
switch (tls->params.cipher_algorithm) {
case CRYPTO_AES_CBC:
/* XXX: Explicitly ignore any provided IV. */
@@ -247,13 +262,10 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
default:
return (EPROTONOSUPPORT);
}
- explicit_iv_size = AES_BLOCK_LEN;
+ iv_size = AES_BLOCK_LEN;
mac_first = 1;
break;
case CRYPTO_AES_NIST_GCM_16:
- if (tls->params.iv_len != SALT_SIZE) {
- return (EINVAL);
- }
switch (tls->params.cipher_key_len) {
case 128 / 8:
case 192 / 8:
@@ -262,20 +274,19 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
default:
return (EINVAL);
}
- explicit_iv_size = 8;
+
+ /*
+ * The IV size for TLS 1.2 is the explicit IV in the
+ * record header. For TLS 1.3 it is the size of the
+ * sequence number.
+ */
+ iv_size = 8;
mac_first = 0;
break;
default:
return (EPROTONOSUPPORT);
}
- /* Only TLS 1.1 and TLS 1.2 are currently supported. */
- if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
- tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
- tls->params.tls_vminor > TLS_MINOR_VER_TWO) {
- return (EPROTONOSUPPORT);
- }
-
/* Bail if we already have a key. */
if (direction == KTLS_TX) {
if (toep->tls.tx_key_addr != -1)
@@ -289,6 +300,7 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
if (error)
return (error);
+ toep->tls.tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
if (direction == KTLS_TX) {
toep->tls.scmd0.seqno_numivs =
(V_SCMD_SEQ_NO_CTRL(3) |
@@ -298,14 +310,14 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
V_SCMD_CIPH_MODE(t4_tls_cipher_mode(tls)) |
V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
- V_SCMD_IV_SIZE(explicit_iv_size / 2));
+ V_SCMD_IV_SIZE(iv_size / 2));
toep->tls.scmd0.ivgen_hdrlen =
(V_SCMD_IV_GEN_CTRL(1) |
V_SCMD_KEY_CTX_INLINE(0) |
V_SCMD_TLS_FRAG_ENABLE(1));
- toep->tls.iv_len = explicit_iv_size;
+ toep->tls.iv_len = iv_size;
toep->tls.frag_size = tls->params.max_frame_len;
toep->tls.fcplenmax = get_tp_plen_max(tls);
toep->tls.expn_per_ulp = tls->params.tls_hlen +
@@ -352,7 +364,8 @@ tls_uninit_toep(struct toepcb *toep)
static void
write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
- unsigned int plen, unsigned int expn, uint8_t credits, int shove)
+ unsigned int plen, unsigned int expn, uint8_t credits, int shove,
+ int num_ivs)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
unsigned int len = plen + expn;
@@ -365,7 +378,7 @@ write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
txwr->plen = htobe32(len);
txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
- txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(1) |
+ txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(num_ivs) |
V_FW_TLSTX_DATA_WR_EXP(expn) |
V_FW_TLSTX_DATA_WR_CTXLOC(TLS_SFO_WR_CONTEXTLOC_DDR) |
V_FW_TLSTX_DATA_WR_IVDSGL(0) |
@@ -381,20 +394,20 @@ write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
static void
write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
- struct tls_hdr *tls_hdr, unsigned int plen, uint64_t seqno)
+ struct tls_hdr *tls_hdr, unsigned int plen, uint8_t rec_type,
+ uint64_t seqno)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
int data_type, seglen;
seglen = plen;
- data_type = tls_content_type(tls_hdr->type);
+ data_type = tls_content_type(rec_type);
cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
cpl->pld_len = htobe32(plen);
if (data_type == CPL_TX_TLS_SFO_TYPE_CUSTOM)
- cpl->type_protover = htobe32(
- V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
+ cpl->type_protover = htobe32(V_CPL_TX_TLS_SFO_TYPE(rec_type));
cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
V_SCMD_NUM_IVS(1));
cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
@@ -498,6 +511,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
int tls_size, tx_credits, shove, sowwakeup;
struct ofld_tx_sdesc *txsd;
char *buf;
+ bool tls13;
INP_WLOCK_ASSERT(inp);
KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
@@ -533,6 +547,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
return;
}
+ tls13 = toep->tls.tls13;
txsd = &toep->txsd[toep->txsd_pidx];
for (;;) {
tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
@@ -599,9 +614,11 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
sizeof(struct cpl_tx_tls_sfo) +
sizeof(struct ulptx_idata) + sizeof(struct ulptx_sc_memrd);
- /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
- MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
- wr_len += AES_BLOCK_LEN;
+ if (!tls13) {
+ /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
+ MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
+ wr_len += AES_BLOCK_LEN;
+ }
/* Account for SGL in work request length. */
nsegs = count_ext_pgs_segs(m);
@@ -671,8 +688,10 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
expn_size = m->m_epg_hdrlen +
m->m_epg_trllen;
tls_size = m->m_len - expn_size;
- write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove);
- write_tlstx_cpl(cpl, toep, thdr, tls_size, m->m_epg_seqno);
+ write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove,
+ tls13 ? 0 : 1);
+ write_tlstx_cpl(cpl, toep, thdr, tls_size,
+ tls13 ? m->m_epg_record_type : thdr->type, m->m_epg_seqno);
idata = (struct ulptx_idata *)(cpl + 1);
idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
@@ -683,10 +702,12 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4));
memrd->addr = htobe32(toep->tls.tx_key_addr >> 5);
- /* Copy IV. */
buf = (char *)(memrd + 1);
- memcpy(buf, thdr + 1, toep->tls.iv_len);
- buf += AES_BLOCK_LEN;
+ if (!tls13) {
+ /* Copy IV. */
+ memcpy(buf, thdr + 1, toep->tls.iv_len);
+ buf += AES_BLOCK_LEN;
+ }
write_ktlstx_sgl(buf, m, nsegs);
@@ -808,8 +829,8 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
struct sockbuf *sb;
struct mbuf *tls_data;
struct tls_get_record *tgr;
- struct mbuf *control;
- int pdu_length, trailer_len;
+ struct mbuf *control, *n;
+ int pdu_length, resid, trailer_len;
#if defined(KTR) || defined(INVARIANTS)
int len;
#endif
@@ -857,7 +878,9 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
/*
* The payload of this CPL is the TLS header followed by
- * additional fields.
+ * additional fields. For TLS 1.3 the type field holds the
+ * inner record type and the length field has been updated to
+ * strip the inner record type, padding, and MAC.
*/
KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
("%s: payload too small", __func__));
@@ -869,7 +892,14 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
("%s: sequence mismatch", __func__));
}
- /* Report decryption errors as EBADMSG. */
+ /*
+ * Report decryption errors as EBADMSG.
+ *
+ * XXX: To support rekeying for TLS 1.3 this will eventually
+ * have to be updated to recrypt the data with the old key and
+ * then decrypt with the new key. Punt for now as KTLS
+ * doesn't yet support rekeying.
+ */
if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) {
CTR4(KTR_CXGBE, "%s: tid %u TLS error %#x ddp_vld %#x",
__func__, toep->tid, tls_hdr_pkt->res_to_mac_error,
@@ -887,6 +917,33 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (0);
}
+ /* For TLS 1.3 trim the header and trailer. */
+ if (toep->tls.tls13) {
+ KASSERT(tls_data != NULL, ("%s: TLS 1.3 record without data",
+ __func__));
+ MPASS(tls_data->m_pkthdr.len == pdu_length);
+ m_adj(tls_data, sizeof(struct tls_record_layer));
+ if (tls_data->m_pkthdr.len > be16toh(tls_hdr_pkt->length))
+ tls_data->m_pkthdr.len = be16toh(tls_hdr_pkt->length);
+ resid = tls_data->m_pkthdr.len;
+ if (resid == 0) {
+ m_freem(tls_data);
+ tls_data = NULL;
+ } else {
+ for (n = tls_data;; n = n->m_next) {
+ if (n->m_len < resid) {
+ resid -= n->m_len;
+ continue;
+ }
+
+ n->m_len = resid;
+ m_freem(n->m_next);
+ n->m_next = NULL;
+ break;
+ }
+ }
+ }
+
/* Handle data received after the socket is closed. */
sb = &so->so_rcv;
SOCKBUF_LOCK(sb);
@@ -1091,33 +1148,60 @@ out:
}
/*
- * Send a work request setting multiple TCB fields to enable
- * ULP_MODE_TLS.
+ * Send a work request setting one or more TCB fields to partially or
+ * fully enable ULP_MODE_TLS.
+ *
+ * - If resid == 0, the socket buffer ends at a record boundary
+ * (either empty or contains one or more complete records). Switch
+ * to ULP_MODE_TLS (if not already) and enable TLS decryption.
+ *
+ * - If resid != 0, the socket buffer contains a partial record. In
+ * this case, switch to ULP_MODE_TLS partially and configure the TCB
+ * to pass along the remaining resid bytes undecrypted. Once they
+ * arrive, this is called again with resid == 0 and enables TLS
+ * decryption.
*/
static void
-tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
+tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno,
+ size_t resid)
{
struct mbuf *m;
struct work_request_hdr *wrh;
struct ulp_txpkt *ulpmc;
int fields, key_offset, len;
- KASSERT(ulp_mode(toep) == ULP_MODE_NONE,
- ("%s: tid %d already ULP_MODE_TLS", __func__, toep->tid));
+ /*
+ * If we are already in ULP_MODE_TLS, then we should now be at
+ * a record boundary and ready to finish enabling TLS RX.
+ */
+ KASSERT(resid == 0 || ulp_mode(toep) == ULP_MODE_NONE,
+ ("%s: tid %d needs %zu more data but already ULP_MODE_TLS",
+ __func__, toep->tid, resid));
fields = 0;
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* 2 writes for the overlay region */
+ fields += 2;
+ }
- /* 2 writes for the overlay region */
- fields += 2;
+ if (resid == 0) {
+ /* W_TCB_TLS_SEQ */
+ fields++;
- /* W_TCB_TLS_SEQ */
- fields++;
+ /* W_TCB_ULP_RAW */
+ fields++;
+ } else {
+ /* W_TCB_PDU_LEN */
+ fields++;
- /* W_TCB_ULP_RAW */
- fields++;
+ /* W_TCB_ULP_RAW */
+ fields++;
+ }
- /* W_TCB_ULP_TYPE */
- fields ++;
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* W_TCB_ULP_TYPE */
+ fields ++;
+ }
/* W_TCB_T_FLAGS */
fields++;
@@ -1136,43 +1220,78 @@ tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
INIT_ULPTX_WRH(wrh, len, 1, toep->tid); /* atomic */
ulpmc = (struct ulp_txpkt *)(wrh + 1);
- /*
- * Clear the TLS overlay region: 1023:832.
- *
- * Words 26/27 are always set to zero. Words 28/29
- * contain seqno and are set when enabling TLS
- * decryption. Word 30 is zero and Word 31 contains
- * the keyid.
- */
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
- 0xffffffffffffffff, 0);
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /*
+ * Clear the TLS overlay region: 1023:832.
+ *
+ * Words 26/27 are always set to zero. Words 28/29
+ * contain seqno and are set when enabling TLS
+ * decryption. Word 30 is zero and Word 31 contains
+ * the keyid.
+ */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
+ 0xffffffffffffffff, 0);
- /*
- * RX key tags are an index into the key portion of MA
- * memory stored as an offset from the base address in
- * units of 64 bytes.
- */
- key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
- 0xffffffffffffffff,
- (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
-
- CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
- toep->tid, seqno);
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_TLS_SEQ,
- V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), V_TCB_TLS_SEQ(seqno));
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_RAW,
- V_TCB_ULP_RAW(M_TCB_ULP_RAW),
- V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
- V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
-
- toep->flags &= ~TPF_TLS_STARTING;
- toep->flags |= TPF_TLS_RECEIVE;
-
- /* Set the ULP mode to ULP_MODE_TLS. */
- toep->params.ulp_mode = ULP_MODE_TLS;
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE,
- V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_TLS));
+ /*
+ * RX key tags are an index into the key portion of MA
+ * memory stored as an offset from the base address in
+ * units of 64 bytes.
+ */
+ key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
+ 0xffffffffffffffff,
+ (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
+ }
+
+ if (resid == 0) {
+ /*
+ * The socket buffer is empty or only contains
+ * complete TLS records: Set the sequence number and
+ * enable TLS decryption.
+ */
+ CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
+ toep->tid, seqno);
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_RX_TLS_SEQ, V_TCB_RX_TLS_SEQ(M_TCB_RX_TLS_SEQ),
+ V_TCB_RX_TLS_SEQ(seqno));
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
+ V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
+ V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
+
+ toep->flags &= ~TPF_TLS_STARTING;
+ toep->flags |= TPF_TLS_RECEIVE;
+ } else {
+ /*
+ * The socket buffer ends with a partial record with a
+ * full header and needs at least 6 bytes.
+ *
+ * Set PDU length. This is treating the 'resid' bytes
+ * as a TLS PDU, so the first 5 bytes are a fake
+ * header and the rest are the PDU length.
+ */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_PDU_LEN, V_TCB_PDU_LEN(M_TCB_PDU_LEN),
+ V_TCB_PDU_LEN(resid - sizeof(struct tls_hdr)));
+ CTR3(KTR_CXGBE, "%s: tid %d setting PDU_LEN to %zu",
+ __func__, toep->tid, resid - sizeof(struct tls_hdr));
+
+ /* Clear all bits in ULP_RAW except for ENABLE. */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
+ V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
+
+ /* Wait for 'resid' bytes to be delivered as CPL_RX_DATA. */
+ toep->tls.rx_resid = resid;
+ }
+
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* Set the ULP mode to ULP_MODE_TLS. */
+ toep->params.ulp_mode = ULP_MODE_TLS;
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_TYPE, V_TCB_ULP_TYPE(M_TCB_ULP_TYPE),
+ V_TCB_ULP_TYPE(ULP_MODE_TLS));
+ }
/* Clear TF_RX_QUIESCE. */
ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS,
@@ -1205,7 +1324,8 @@ tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
* size of a TLS record, re-enable receive and pause again once
* we get more data to try again.
*/
- if (!have_header || resid != 0) {
+ if (!have_header || (resid != 0 && (resid < sizeof(struct tls_hdr) ||
+ is_t6(sc)))) {
CTR(KTR_CXGBE, "%s: tid %d waiting for more data", __func__,
toep->tid);
toep->flags &= ~TPF_TLS_RX_QUIESCED;
@@ -1213,7 +1333,7 @@ tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
return;
}
- tls_update_tcb(sc, toep, seqno);
+ tls_update_tcb(sc, toep, seqno, resid);
}
void
diff --git a/sys/dev/cxgbe/tom/t4_tls.h b/sys/dev/cxgbe/tom/t4_tls.h
index 753a30890fdc..6faf946e9e3c 100644
--- a/sys/dev/cxgbe/tom/t4_tls.h
+++ b/sys/dev/cxgbe/tom/t4_tls.h
@@ -74,6 +74,7 @@ struct tls_ofld_info {
unsigned short adjusted_plen;
unsigned short expn_per_ulp;
unsigned short pdus_per_ulp;
+ bool tls13;
struct tls_scmd scmd0;
u_int iv_len;
unsigned int tx_key_info_size;
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 0b54fdaa5c80..53a945f8b4cc 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -182,7 +182,7 @@ init_toepcb(struct vi_info *vi, struct toepcb *toep)
}
toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx];
- toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
+ toep->ctrlq = &sc->sge.ctrlq[cp->ctrlq_idx];
tls_init_toep(toep);
MPASS(ulp_mode(toep) != ULP_MODE_TCPDDP);
@@ -494,8 +494,15 @@ send_get_tcb(struct adapter *sc, u_int tid)
bzero(cpl, sizeof(*cpl));
INIT_TP_WR(cpl, tid);
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid));
- cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) |
- V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id));
+ if (chip_id(sc) >= CHELSIO_T7) {
+ cpl->reply_ctrl =
+ htobe16(V_T7_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ cpl->reply_ctrl =
+ htobe16(V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
cpl->cookie = 0xff;
commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie);
@@ -1221,7 +1228,7 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0)
- ntuple |= (uint64_t)e->lport << tp->port_shift;
+ ntuple |= (uint64_t)e->hw_port << tp->port_shift;
if (tp->protocol_shift >= 0)
ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
@@ -1232,10 +1239,7 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
tp->vnic_shift;
}
- if (is_t4(sc))
- return (htobe32((uint32_t)ntuple));
- else
- return (htobe64(V_FILTER_TUPLE(ntuple)));
+ return (ntuple);
}
/*
@@ -1326,6 +1330,9 @@ init_conn_params(struct vi_info *vi , struct offload_settings *s,
*/
cp->mtu_idx = find_best_mtu_idx(sc, inc, s);
+ /* Control queue. */
+ cp->ctrlq_idx = vi->pi->port_id;
+
/* Tx queue for this connection. */
if (s->txq == QUEUE_RANDOM)
q_idx = arc4random();
@@ -1438,6 +1445,32 @@ init_conn_params(struct vi_info *vi , struct offload_settings *s,
cp->emss = 0;
}
+void
+update_tid_qid_sel(struct vi_info *vi, struct conn_params *cp, int tid)
+{
+ struct adapter *sc = vi->adapter;
+ const int mask = sc->params.tid_qid_sel_mask;
+ struct sge_ofld_txq *ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
+ uint32_t ngroup;
+ int g, nqpg;
+
+ cp->ctrlq_idx = ofld_txq_group(tid, mask);
+ CTR(KTR_CXGBE, "tid %u is on core %u", tid, cp->ctrlq_idx);
+ if ((ofld_txq->wrq.eq.cntxt_id & mask) == (tid & mask))
+ return;
+
+ ngroup = 1 << bitcount32(mask);
+ MPASS(vi->nofldtxq % ngroup == 0);
+ g = ofld_txq_group(tid, mask);
+ nqpg = vi->nofldtxq / ngroup;
+ cp->txq_idx = vi->first_ofld_txq + g * nqpg + arc4random() % nqpg;
+#ifdef INVARIANTS
+ MPASS(cp->txq_idx < vi->first_ofld_txq + vi->nofldtxq);
+ ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
+ MPASS((ofld_txq->wrq.eq.cntxt_id & mask) == (tid & mask));
+#endif
+}
+
int
negative_advice(int status)
{
@@ -2233,6 +2266,98 @@ t4_aio_queue_tom(struct socket *so, struct kaiocb *job)
return (0);
}
+/*
+ * Request/response structure used to find out the adapter offloading
+ * a socket.
+ */
+struct find_offload_adapter_data {
+ struct socket *so;
+ struct adapter *sc; /* result */
+};
+
+static void
+find_offload_adapter_cb(struct adapter *sc, void *arg)
+{
+ struct find_offload_adapter_data *fa = arg;
+ struct socket *so = fa->so;
+ struct tom_data *td = sc->tom_softc;
+ struct tcpcb *tp;
+ struct inpcb *inp;
+
+ /* Non-TCP were filtered out earlier. */
+ MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
+
+ if (fa->sc != NULL)
+ return; /* Found already. */
+
+ if (td == NULL)
+ return; /* TOE not enabled on this adapter. */
+
+ inp = sotoinpcb(so);
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & INP_DROPPED) == 0) {
+ tp = intotcpcb(inp);
+ if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
+ fa->sc = sc; /* Found. */
+ }
+ INP_WUNLOCK(inp);
+}
+
+struct adapter *
+find_offload_adapter(struct socket *so)
+{
+ struct find_offload_adapter_data fa;
+
+ fa.sc = NULL;
+ fa.so = so;
+ t4_iterate(find_offload_adapter_cb, &fa);
+ return (fa.sc);
+}
+
+void
+send_txdataplen_max_flowc_wr(struct adapter *sc, struct toepcb *toep,
+ int maxlen)
+{
+ struct wrqe *wr;
+ struct fw_flowc_wr *flowc;
+ const u_int nparams = 1;
+ u_int flowclen;
+ struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
+
+ CTR(KTR_CXGBE, "%s: tid %u maxlen=%d", __func__, toep->tid, maxlen);
+
+ flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
+
+ wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
+ if (wr == NULL) {
+ /* XXX */
+ panic("%s: allocation failure.", __func__);
+ }
+ flowc = wrtod(wr);
+ memset(flowc, 0, wr->wr_len);
+
+ flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
+ V_FW_FLOWC_WR_NPARAMS(nparams));
+ flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
+ V_FW_WR_FLOWID(toep->tid));
+
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ flowc->mnemval[0].val = htobe32(maxlen);
+
+ KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
+ txsd->tx_credits = howmany(flowclen, 16);
+ txsd->plen = 0;
+ KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
+ ("%s: not enough credits (%d)", __func__, toep->tx_credits));
+ toep->tx_credits -= txsd->tx_credits;
+ if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
+ toep->txsd_pidx = 0;
+ toep->txsd_avail--;
+
+ t4_wrq_tx(sc, wr);
+}
+
static int
t4_tom_mod_load(void)
{
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index 4fb87d92d91e..c8c2d432b8f1 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -113,6 +113,7 @@ struct conn_params {
int8_t mtu_idx;
int8_t ulp_mode;
int8_t tx_align;
+ int8_t ctrlq_idx; /* ctrlq = &sc->sge.ctrlq[ctrlq_idx] */
int16_t txq_idx; /* ofld_txq = &sc->sge.ofld_txq[txq_idx] */
int16_t rxq_idx; /* ofld_rxq = &sc->sge.ofld_rxq[rxq_idx] */
int16_t l2t_idx;
@@ -477,11 +478,14 @@ int select_rcv_wscale(void);
void init_conn_params(struct vi_info *, struct offload_settings *,
struct in_conninfo *, struct socket *, const struct tcp_options *, int16_t,
struct conn_params *cp);
+void update_tid_qid_sel(struct vi_info *, struct conn_params *, int);
__be64 calc_options0(struct vi_info *, struct conn_params *);
__be32 calc_options2(struct vi_info *, struct conn_params *);
uint64_t select_ntuple(struct vi_info *, struct l2t_entry *);
int negative_advice(int);
int add_tid_to_history(struct adapter *, u_int);
+struct adapter *find_offload_adapter(struct socket *);
+void send_txdataplen_max_flowc_wr(struct adapter *, struct toepcb *, int);
void t4_pcb_detach(struct toedev *, struct tcpcb *);
/* t4_connect.c */
@@ -582,4 +586,10 @@ int tls_tx_key(struct toepcb *);
void tls_uninit_toep(struct toepcb *);
int tls_alloc_ktls(struct toepcb *, struct ktls_session *, int);
+/* t4_tpt.c */
+uint32_t t4_pblpool_alloc(struct adapter *, int);
+void t4_pblpool_free(struct adapter *, uint32_t, int);
+int t4_pblpool_create(struct adapter *);
+void t4_pblpool_destroy(struct adapter *);
+
#endif
diff --git a/sys/dev/cxgbe/tom/t4_tom_l2t.c b/sys/dev/cxgbe/tom/t4_tom_l2t.c
index 3fd0d5ca41d4..e245c2b6fd5b 100644
--- a/sys/dev/cxgbe/tom/t4_tom_l2t.c
+++ b/sys/dev/cxgbe/tom/t4_tom_l2t.c
@@ -403,7 +403,7 @@ t4_l2t_get(struct port_info *pi, if_t ifp, struct sockaddr *sa)
l2_store(sa, e);
e->ifp = ifp;
e->hash = hash;
- e->lport = pi->lport;
+ e->hw_port = pi->hw_port;
e->wrq = &sc->sge.ctrlq[pi->port_id];
e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
atomic_store_rel_int(&e->refcnt, 1);
diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c
index 5a60f939dc78..6c6f79227166 100644
--- a/sys/dev/gpio/gpioc.c
+++ b/sys/dev/gpio/gpioc.c
@@ -704,7 +704,7 @@ gpioc_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
* npins isn't a horrible fifo size for that either.
*/
priv->numevents = priv->sc->sc_npins * 2;
- priv->events = malloc(priv->numevents * sizeof(struct gpio_event_detail),
+ priv->events = malloc(priv->numevents * sizeof(struct gpioc_pin_event),
M_GPIOC, M_WAITOK | M_ZERO);
priv->evidx_head = priv->evidx_tail = 0;
@@ -793,6 +793,7 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct gpio_access_32 *a32;
struct gpio_config_32 *c32;
struct gpio_event_config *evcfg;
+ struct gpioc_pin_event *tmp;
uint32_t caps, intrflags;
switch (cmd) {
@@ -908,27 +909,35 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
res = devfs_get_cdevpriv((void **)&priv);
if (res != 0)
break;
- /* If any pins have been configured, changes aren't allowed. */
- if (!SLIST_EMPTY(&priv->pins)) {
- res = EINVAL;
- break;
- }
if (evcfg->gp_report_type != GPIO_EVENT_REPORT_DETAIL &&
evcfg->gp_report_type != GPIO_EVENT_REPORT_SUMMARY) {
res = EINVAL;
break;
}
- priv->report_option = evcfg->gp_report_type;
/* Reallocate the events buffer if the user wants it bigger. */
- if (priv->report_option == GPIO_EVENT_REPORT_DETAIL &&
+ tmp = NULL;
+ if (evcfg->gp_report_type == GPIO_EVENT_REPORT_DETAIL &&
priv->numevents < evcfg->gp_fifo_size) {
+ tmp = malloc(evcfg->gp_fifo_size *
+ sizeof(struct gpioc_pin_event), M_GPIOC,
+ M_WAITOK | M_ZERO);
+ }
+ mtx_lock(&priv->mtx);
+ /* If any pins have been configured, changes aren't allowed. */
+ if (!SLIST_EMPTY(&priv->pins)) {
+ mtx_unlock(&priv->mtx);
+ free(tmp, M_GPIOC);
+ res = EINVAL;
+ break;
+ }
+ if (tmp != NULL) {
free(priv->events, M_GPIOC);
+ priv->events = tmp;
priv->numevents = evcfg->gp_fifo_size;
- priv->events = malloc(priv->numevents *
- sizeof(struct gpio_event_detail), M_GPIOC,
- M_WAITOK | M_ZERO);
priv->evidx_head = priv->evidx_tail = 0;
}
+ priv->report_option = evcfg->gp_report_type;
+ mtx_unlock(&priv->mtx);
break;
case FIONBIO:
/*
diff --git a/sys/dev/gpio/pl061.c b/sys/dev/gpio/pl061.c
index 32109e5982bc..9996b0253c7d 100644
--- a/sys/dev/gpio/pl061.c
+++ b/sys/dev/gpio/pl061.c
@@ -558,8 +558,7 @@ static device_method_t pl061_methods[] = {
/* Bus interface */
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
- DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
- DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, pl061_get_bus),
diff --git a/sys/dev/hid/ietp.c b/sys/dev/hid/ietp.c
index 73a5cb7414d4..a9d0295fb121 100644
--- a/sys/dev/hid/ietp.c
+++ b/sys/dev/hid/ietp.c
@@ -199,17 +199,32 @@ static const struct hid_device_id ietp_iic_devs[] = {
IETP_IIC_DEV("ELAN1000"),
};
-static uint8_t const ietp_dummy_rdesc[] = {
+static uint8_t const ietp_dummy_rdesc_lo[] = {
0x05, HUP_GENERIC_DESKTOP, /* Usage Page (Generic Desktop Ctrls) */
0x09, HUG_MOUSE, /* Usage (Mouse) */
0xA1, 0x01, /* Collection (Application) */
0x09, 0x01, /* Usage (0x01) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
0x95, IETP_REPORT_LEN_LO, /* Report Count (IETP_REPORT_LEN_LO) */
0x75, 0x08, /* Report Size (8) */
0x81, 0x02, /* Input (Data,Var,Abs) */
0xC0, /* End Collection */
};
+static uint8_t const ietp_dummy_rdesc_hi[] = {
+ 0x05, HUP_GENERIC_DESKTOP, /* Usage Page (Generic Desktop Ctrls) */
+ 0x09, HUG_MOUSE, /* Usage (Mouse) */
+ 0xA1, 0x01, /* Collection (Application) */
+ 0x09, 0x01, /* Usage (0x01) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x95, IETP_REPORT_LEN_HI, /* Report Count (IETP_REPORT_LEN_HI) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0xC0, /* End Collection */
+};
+
static const struct evdev_methods ietp_evdev_methods = {
.ev_open = &ietp_ev_open,
.ev_close = &ietp_ev_close,
@@ -433,28 +448,38 @@ ietp_res2dpmm(uint8_t res, bool hi_precision)
static void
ietp_iic_identify(driver_t *driver, device_t parent)
{
- void *d_ptr;
- hid_size_t d_len;
- int isize;
- uint8_t iid;
+ device_t iichid = device_get_parent(parent);
+ static const uint16_t reg = IETP_PATTERN;
+ uint16_t addr = iicbus_get_addr(iichid) << 1;
+ uint8_t resp[2];
+ uint8_t cmd[2] = { reg & 0xff, (reg >> 8) & 0xff };
+ struct iic_msg msgs[2] = {
+ { addr, IIC_M_WR | IIC_M_NOSTOP, sizeof(cmd), cmd },
+ { addr, IIC_M_RD, sizeof(resp), resp },
+ };
+ struct iic_rdwr_data ird = { msgs, nitems(msgs) };
+ uint8_t pattern;
if (HIDBUS_LOOKUP_ID(parent, ietp_iic_devs) == NULL)
return;
- if (hid_get_report_descr(parent, &d_ptr, &d_len) != 0)
+
+ if (device_get_devclass(iichid) != devclass_find("iichid"))
return;
- /*
- * Some Elantech trackpads have a mangled HID report descriptor, which
- * reads as having an incorrect input size (i.e. < IETP_REPORT_LEN_LO).
- * If the input size is incorrect, load a dummy report descriptor.
- */
+ DPRINTF("Read reg 0x%04x with size %zu\n", reg, sizeof(resp));
- isize = hid_report_size_max(d_ptr, d_len, hid_input, &iid);
- if (isize >= IETP_REPORT_LEN_LO)
+ if (hid_ioctl(parent, I2CRDWR, (uintptr_t)&ird) != 0)
return;
- hid_set_report_descr(parent, ietp_dummy_rdesc,
- sizeof(ietp_dummy_rdesc));
+ DPRINTF("Response: %*D\n", (int)size(resp), resp, " ");
+
+ pattern = (resp[0] == 0xFF && resp[1] == 0xFF) ? 0 : resp[1];
+ if (pattern >= 0x02)
+ hid_set_report_descr(parent, ietp_dummy_rdesc_hi,
+ sizeof(ietp_dummy_rdesc_hi));
+ else
+ hid_set_report_descr(parent, ietp_dummy_rdesc_lo,
+ sizeof(ietp_dummy_rdesc_lo));
}
static int
diff --git a/sys/dev/hid/u2f.c b/sys/dev/hid/u2f.c
index ac2eba7a499d..08f1a5ceedba 100644
--- a/sys/dev/hid/u2f.c
+++ b/sys/dev/hid/u2f.c
@@ -47,6 +47,7 @@
#include <sys/selinfo.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/taskqueue.h>
#include <sys/uio.h>
#include <dev/evdev/input.h>
@@ -78,6 +79,7 @@ struct u2f_softc {
struct cdev *dev;
struct mtx sc_mtx; /* hidbus private mutex */
+ struct task sc_kqtask; /* kqueue task */
void *sc_rdesc;
hid_size_t sc_rdesc_size;
hid_size_t sc_isize;
@@ -121,6 +123,7 @@ static device_probe_t u2f_probe;
static device_attach_t u2f_attach;
static device_detach_t u2f_detach;
+static void u2f_kqtask(void *context, int pending);
static int u2f_kqread(struct knote *, long);
static void u2f_kqdetach(struct knote *);
static void u2f_notify(struct u2f_softc *);
@@ -174,6 +177,7 @@ u2f_attach(device_t dev)
mtx_init(&sc->sc_mtx, "u2f lock", NULL, MTX_DEF);
knlist_init_mtx(&sc->sc_rsel.si_note, &sc->sc_mtx);
+ TASK_INIT(&sc->sc_kqtask, 0, u2f_kqtask, sc);
make_dev_args_init(&mda);
mda.mda_flags = MAKEDEV_WAITOK;
@@ -189,7 +193,7 @@ u2f_attach(device_t dev)
u2f_detach(dev);
return (error);
}
-#ifdef U2F_MAKE_UHID_ALIAS
+#ifndef U2F_DROP_UHID_ALIAS
(void)make_dev_alias(sc->dev, "uhid%d", device_get_unit(dev));
#endif
@@ -217,6 +221,7 @@ u2f_detach(device_t dev)
destroy_dev(sc->dev);
}
+ taskqueue_drain(taskqueue_thread, &sc->sc_kqtask);
hid_intr_stop(sc->sc_dev);
knlist_clear(&sc->sc_rsel.si_note, 0);
@@ -519,6 +524,14 @@ u2f_kqfilter(struct cdev *dev, struct knote *kn)
return (0);
}
+static void
+u2f_kqtask(void *context, int pending)
+{
+ struct u2f_softc *sc = context;
+
+ hid_intr_start(sc->sc_dev);
+}
+
static int
u2f_kqread(struct knote *kn, long hint)
{
@@ -533,7 +546,7 @@ u2f_kqread(struct knote *kn, long hint)
} else {
ret = sc->sc_state.data ? 1 : 0;
if (!sc->sc_state.data)
- hid_intr_start(sc->sc_dev);
+ taskqueue_enqueue(taskqueue_thread, &sc->sc_kqtask);
}
return (ret);
@@ -574,10 +587,10 @@ static device_method_t u2f_methods[] = {
};
static driver_t u2f_driver = {
-#ifdef U2F_MAKE_UHID_ALIAS
- "uhid",
+#ifdef U2F_DROP_UHID_ALIAS
+ "uf2",
#else
- "u2f",
+ "uhid",
#endif
u2f_methods,
sizeof(struct u2f_softc)
diff --git a/sys/dev/iicbus/iichid.c b/sys/dev/iicbus/iichid.c
index fdb4816b8bd9..5ca3f1b84e48 100644
--- a/sys/dev/iicbus/iichid.c
+++ b/sys/dev/iicbus/iichid.c
@@ -540,7 +540,7 @@ iichid_sampling_task(void *context, int pending)
error = iichid_cmd_read(sc, sc->intr_buf, sc->intr_bufsize, &actual);
if (error == 0) {
if (actual > 0) {
- sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, actual);
+ sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, actual - 2);
sc->missing_samples = 0;
if (sc->dup_size != actual ||
memcmp(sc->dup_buf, sc->intr_buf, actual) != 0) {
@@ -607,7 +607,7 @@ iichid_intr(void *context)
if (sc->power_on && sc->open) {
if (actual != 0)
sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2,
- actual);
+ actual - 2);
else
DPRINTF(sc, "no data received\n");
}
@@ -816,12 +816,13 @@ iichid_intr_setup(device_t dev, device_t child __unused, hid_intr_t intr,
sc = device_get_softc(dev);
/*
- * Do not rely just on wMaxInputLength, as some devices (which?)
- * may set it to a wrong length. Also find the longest input report
- * in report descriptor, and add two for the length field.
+ * Start with wMaxInputLength to follow HID-over-I2C specs. Than if
+ * semi-HID device like ietp(4) requested changing of input buffer
+ * size with report descriptor overloading, find the longest input
+ * report in the descriptor, and add two for the length field.
*/
- rdesc->rdsize = 2 +
- MAX(rdesc->isize, le16toh(sc->desc.wMaxInputLength));
+ rdesc->rdsize = rdesc->rdsize == 0 ?
+ le16toh(sc->desc.wMaxInputLength) - 2 : rdesc->isize;
/* Write and get/set_report sizes are limited by I2C-HID protocol. */
rdesc->grsize = rdesc->srsize = IICHID_SIZE_MAX;
rdesc->wrsize = IICHID_SIZE_MAX;
@@ -831,7 +832,7 @@ iichid_intr_setup(device_t dev, device_t child __unused, hid_intr_t intr,
sc->intr_handler = intr;
sc->intr_ctx = context;
- sc->intr_bufsize = rdesc->rdsize;
+ sc->intr_bufsize = rdesc->rdsize + 2;
sc->intr_buf = realloc(sc->intr_buf, sc->intr_bufsize,
M_DEVBUF, M_WAITOK | M_ZERO);
#ifdef IICHID_SAMPLING
@@ -1093,7 +1094,8 @@ iichid_probe(device_t dev)
}
if (le16toh(sc->desc.wHIDDescLength) != 30 ||
- le16toh(sc->desc.bcdVersion) != 0x100) {
+ le16toh(sc->desc.bcdVersion) != 0x100 ||
+ le16toh(sc->desc.wMaxInputLength) < 2) {
DPRINTF(sc, "HID descriptor is broken\n");
return (ENXIO);
}
diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c
index fb9ca94278db..d1f454a5ec41 100644
--- a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c
+++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c
@@ -1134,6 +1134,11 @@ static int tx_add_kspi_rule(struct mlx5e_ipsec_sa_entry *sa_entry,
setup_fte_no_frags(spec);
setup_fte_reg_a_with_tag(spec, sa_entry->kspi);
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
+
rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, flow_act, dest, num_dest);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1169,6 +1174,10 @@ static int tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry *sa_entry,
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
if(attrs->reqid) {
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
setup_fte_no_frags(spec);
setup_fte_reg_c0(spec, attrs->reqid);
rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest);
@@ -1181,6 +1190,11 @@ static int tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry *sa_entry,
memset(spec, 0, sizeof(*spec));
}
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
+
if (attrs->family == AF_INET)
setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
else
@@ -1322,6 +1336,11 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
goto err_mod_header;
}
+ if (attrs->vid != VLAN_NONE)
+ setup_fte_vid(spec, attrs->vid);
+ else
+ setup_fte_no_vid(spec);
+
flow_act.flags |= FLOW_ACT_NO_APPEND;
dest[dstn].ft = tx->ft.sa;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
diff --git a/sys/dev/sound/pci/hda/hdaa.c b/sys/dev/sound/pci/hda/hdaa.c
index 1e486b01b168..5dbb5c4f4453 100644
--- a/sys/dev/sound/pci/hda/hdaa.c
+++ b/sys/dev/sound/pci/hda/hdaa.c
@@ -532,9 +532,11 @@ static void
hdaa_presence_handler(struct hdaa_widget *w)
{
struct hdaa_devinfo *devinfo = w->devinfo;
- struct hdaa_audio_as *as;
+ struct hdaa_audio_as *as, *asp;
+ char buf[32];
uint32_t res;
- int connected, old;
+ int connected, old, i;
+ bool active;
if (w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
@@ -552,13 +554,6 @@ hdaa_presence_handler(struct hdaa_widget *w)
if (connected == old)
return;
w->wclass.pin.connected = connected;
- HDA_BOOTVERBOSE(
- if (connected || old != 2) {
- device_printf(devinfo->dev,
- "Pin sense: nid=%d sense=0x%08x (%sconnected)\n",
- w->nid, res, !connected ? "dis" : "");
- }
- );
as = &devinfo->as[w->bindas];
if (as->hpredir >= 0 && as->pins[15] == w->nid)
@@ -567,6 +562,38 @@ hdaa_presence_handler(struct hdaa_widget *w)
hdaa_autorecsrc_handler(as, w);
if (old != 2)
hdaa_channels_handler(as);
+
+ if (connected || old != 2) {
+ HDA_BOOTVERBOSE(
+ device_printf(devinfo->dev,
+ "Pin sense: nid=%d sense=0x%08x (%sconnected)\n",
+ w->nid, res, !connected ? "dis" : "");
+ );
+ if (as->hpredir >= 0)
+ return;
+ for (i = 0, active = false; i < devinfo->num_devs; i++) {
+ if (device_get_unit(devinfo->devs[i].dev) == snd_unit) {
+ active = true;
+ break;
+ }
+ }
+ /* Proceed only if we are currently using this codec. */
+ if (!active)
+ return;
+ for (i = 0; i < devinfo->ascnt; i++) {
+ asp = &devinfo->as[i];
+ if (!asp->enable)
+ continue;
+ if ((connected && asp->index == as->index) ||
+ (!connected && asp->dir == as->dir)) {
+ snprintf(buf, sizeof(buf), "cdev=dsp%d",
+ device_get_unit(asp->pdevinfo->dev));
+ devctl_notify("SND", "CONN",
+ asp->dir == HDAA_CTL_IN ? "IN" : "OUT", buf);
+ break;
+ }
+ }
+ }
}
/*
@@ -6194,15 +6221,15 @@ hdaa_configure(device_t dev)
);
hdaa_patch_direct(devinfo);
HDA_BOOTHVERBOSE(
- device_printf(dev, "Pin sense init...\n");
- );
- hdaa_sense_init(devinfo);
- HDA_BOOTHVERBOSE(
device_printf(dev, "Creating PCM devices...\n");
);
hdaa_unlock(devinfo);
hdaa_create_pcms(devinfo);
hdaa_lock(devinfo);
+ HDA_BOOTHVERBOSE(
+ device_printf(dev, "Pin sense init...\n");
+ );
+ hdaa_sense_init(devinfo);
HDA_BOOTVERBOSE(
if (devinfo->quirks != 0) {
diff --git a/sys/dev/sound/pcm/dsp.c b/sys/dev/sound/pcm/dsp.c
index da38f52021ae..fe5576baf017 100644
--- a/sys/dev/sound/pcm/dsp.c
+++ b/sys/dev/sound/pcm/dsp.c
@@ -671,6 +671,43 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *ch,
return (0);
}
+#ifdef COMPAT_FREEBSD32
+typedef struct _snd_chan_param32 {
+ uint32_t play_rate;
+ uint32_t rec_rate;
+ uint32_t play_format;
+ uint32_t rec_format;
+} snd_chan_param32;
+#define AIOGFMT32 _IOC_NEWTYPE(AIOGFMT, snd_chan_param32)
+#define AIOSFMT32 _IOC_NEWTYPE(AIOSFMT, snd_chan_param32)
+
+typedef struct _snd_capabilities32 {
+ uint32_t rate_min, rate_max;
+ uint32_t formats;
+ uint32_t bufsize;
+ uint32_t mixers;
+ uint32_t inputs;
+ uint16_t left, right;
+} snd_capabilities32;
+#define AIOGCAP32 _IOC_NEWTYPE(AIOGCAP, snd_capabilities32)
+
+typedef struct audio_errinfo32
+{
+ int32_t play_underruns;
+ int32_t rec_overruns;
+ uint32_t play_ptradjust;
+ uint32_t rec_ptradjust;
+ int32_t play_errorcount;
+ int32_t rec_errorcount;
+ int32_t play_lasterror;
+ int32_t rec_lasterror;
+ int32_t play_errorparm;
+ int32_t rec_errorparm;
+ int32_t filler[16];
+} audio_errinfo32;
+#define SNDCTL_DSP_GETERROR32 _IOC_NEWTYPE(SNDCTL_DSP_GETERROR, audio_errinfo32)
+#endif
+
static int
dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct thread *td)
@@ -829,9 +866,25 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
case AIOSFMT:
case AIOGFMT:
+#ifdef COMPAT_FREEBSD32
+ case AIOSFMT32:
+ case AIOGFMT32:
+#endif
{
snd_chan_param *p = (snd_chan_param *)arg;
+#ifdef COMPAT_FREEBSD32
+ snd_chan_param32 *p32 = (snd_chan_param32 *)arg;
+ snd_chan_param param;
+
+ if (cmd == AIOSFMT32) {
+ p = &param;
+ p->play_rate = p32->play_rate;
+ p->rec_rate = p32->rec_rate;
+ p->play_format = p32->play_format;
+ p->rec_format = p32->rec_format;
+ }
+#endif
if (cmd == AIOSFMT &&
((p->play_format != 0 && p->play_rate == 0) ||
(p->rec_format != 0 && p->rec_rate == 0))) {
@@ -872,15 +925,41 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
p->rec_format = 0;
}
PCM_RELEASE_QUICK(d);
+#ifdef COMPAT_FREEBSD32
+ if (cmd == AIOSFMT32 || cmd == AIOGFMT32) {
+ p32->play_rate = p->play_rate;
+ p32->rec_rate = p->rec_rate;
+ p32->play_format = p->play_format;
+ p32->rec_format = p->rec_format;
+ }
+#endif
}
break;
case AIOGCAP: /* get capabilities */
+#ifdef COMPAT_FREEBSD32
+ case AIOGCAP32:
+#endif
{
snd_capabilities *p = (snd_capabilities *)arg;
struct pcmchan_caps *pcaps = NULL, *rcaps = NULL;
struct cdev *pdev;
-
+#ifdef COMPAT_FREEBSD32
+ snd_capabilities32 *p32 = (snd_capabilities32 *)arg;
+ snd_capabilities capabilities;
+
+ if (cmd == AIOGCAP32) {
+ p = &capabilities;
+ p->rate_min = p32->rate_min;
+ p->rate_max = p32->rate_max;
+ p->formats = p32->formats;
+ p->bufsize = p32->bufsize;
+ p->mixers = p32->mixers;
+ p->inputs = p32->inputs;
+ p->left = p32->left;
+ p->right = p32->right;
+ }
+#endif
PCM_LOCK(d);
if (rdch) {
CHN_LOCK(rdch);
@@ -913,6 +992,18 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
if (rdch)
CHN_UNLOCK(rdch);
PCM_UNLOCK(d);
+#ifdef COMPAT_FREEBSD32
+ if (cmd == AIOGCAP32) {
+ p32->rate_min = p->rate_min;
+ p32->rate_max = p->rate_max;
+ p32->formats = p->formats;
+ p32->bufsize = p->bufsize;
+ p32->mixers = p->mixers;
+ p32->inputs = p->inputs;
+ p32->left = p->left;
+ p32->right = p->right;
+ }
+#endif
}
break;
@@ -1635,6 +1726,9 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
break;
case SNDCTL_DSP_GETERROR:
+#ifdef COMPAT_FREEBSD32
+ case SNDCTL_DSP_GETERROR32:
+#endif
/*
* OSSv4 docs: "All errors and counters will automatically be
* cleared to zeroes after the call so each call will return only
@@ -1644,6 +1738,14 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
*/
{
audio_errinfo *ei = (audio_errinfo *)arg;
+#ifdef COMPAT_FREEBSD32
+ audio_errinfo errinfo;
+ audio_errinfo32 *ei32 = (audio_errinfo32 *)arg;
+
+ if (cmd == SNDCTL_DSP_GETERROR32) {
+ ei = &errinfo;
+ }
+#endif
bzero((void *)ei, sizeof(*ei));
@@ -1659,6 +1761,21 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
rdch->xruns = 0;
CHN_UNLOCK(rdch);
}
+#ifdef COMPAT_FREEBSD32
+ if (cmd == SNDCTL_DSP_GETERROR32) {
+ bzero((void *)ei32, sizeof(*ei32));
+ ei32->play_underruns = ei->play_underruns;
+ ei32->rec_overruns = ei->rec_overruns;
+ ei32->play_ptradjust = ei->play_ptradjust;
+ ei32->rec_ptradjust = ei->rec_ptradjust;
+ ei32->play_errorcount = ei->play_errorcount;
+ ei32->rec_errorcount = ei->rec_errorcount;
+ ei32->play_lasterror = ei->play_lasterror;
+ ei32->rec_lasterror = ei->rec_lasterror;
+ ei32->play_errorparm = ei->play_errorparm;
+ ei32->rec_errorparm = ei->rec_errorparm;
+ }
+#endif
}
break;
diff --git a/sys/dev/thunderbolt/hcm.c b/sys/dev/thunderbolt/hcm.c
new file mode 100644
index 000000000000..b8f703fc3b52
--- /dev/null
+++ b/sys/dev/thunderbolt/hcm.c
@@ -0,0 +1,223 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* Host Configuration Manager (HCM) for USB4 and later TB3 */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/hcm_var.h>
+
+static void hcm_cfg_task(void *, int);
+
+int
+hcm_attach(struct nhi_softc *nsc)
+{
+ struct hcm_softc *hcm;
+
+ tb_debug(nsc, DBG_HCM|DBG_EXTRA, "hcm_attach called\n");
+
+ hcm = malloc(sizeof(struct hcm_softc), M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (hcm == NULL) {
+ tb_debug(nsc, DBG_HCM, "Cannot allocate hcm object\n");
+ return (ENOMEM);
+ }
+
+ hcm->dev = nsc->dev;
+ hcm->nsc = nsc;
+ nsc->hcm = hcm;
+
+ hcm->taskqueue = taskqueue_create("hcm_event", M_NOWAIT,
+ taskqueue_thread_enqueue, &hcm->taskqueue);
+ if (hcm->taskqueue == NULL)
+ return (ENOMEM);
+ taskqueue_start_threads(&hcm->taskqueue, 1, PI_DISK, "tbhcm%d_tq",
+ device_get_unit(nsc->dev));
+ TASK_INIT(&hcm->cfg_task, 0, hcm_cfg_task, hcm);
+
+ return (0);
+}
+
+int
+hcm_detach(struct nhi_softc *nsc)
+{
+ struct hcm_softc *hcm;
+
+ hcm = nsc->hcm;
+ if (hcm->taskqueue)
+ taskqueue_free(hcm->taskqueue);
+
+ return (0);
+}
+
+int
+hcm_router_discover(struct hcm_softc *hcm)
+{
+
+ taskqueue_enqueue(hcm->taskqueue, &hcm->cfg_task);
+
+ return (0);
+}
+
+static void
+hcm_cfg_task(void *arg, int pending)
+{
+ struct hcm_softc *hcm;
+ struct router_softc *rsc;
+ struct router_cfg_cap cap;
+ struct tb_cfg_router *cfg;
+ struct tb_cfg_adapter *adp;
+ struct tb_cfg_cap_lane *lane;
+ uint32_t *buf;
+ uint8_t *u;
+ u_int error, i, offset;
+
+ hcm = (struct hcm_softc *)arg;
+
+ tb_debug(hcm, DBG_HCM|DBG_EXTRA, "hcm_cfg_task called\n");
+
+ buf = malloc(8 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL) {
+ tb_debug(hcm, DBG_HCM, "Cannot alloc memory for discovery\n");
+ return;
+ }
+
+ rsc = hcm->nsc->root_rsc;
+ error = tb_config_router_read(rsc, 0, 5, buf);
+ if (error != 0) {
+ free(buf, M_NHI);
+ return;
+ }
+
+ cfg = (struct tb_cfg_router *)buf;
+
+ cap.space = TB_CFG_CS_ROUTER;
+ cap.adap = 0;
+ cap.next_cap = GET_ROUTER_CS_NEXT_CAP(cfg);
+ while (cap.next_cap != 0) {
+ error = tb_config_next_cap(rsc, &cap);
+ if (error != 0)
+ break;
+
+ if ((cap.cap_id == TB_CFG_CAP_VSEC) && (cap.vsc_len == 0)) {
+ tb_debug(hcm, DBG_HCM, "Router Cap= %d, vsec= %d, "
+ "len= %d, next_cap= %d\n", cap.cap_id,
+ cap.vsc_id, cap.vsec_len, cap.next_cap);
+ } else if (cap.cap_id == TB_CFG_CAP_VSC) {
+ tb_debug(hcm, DBG_HCM, "Router cap= %d, vsc= %d, "
+ "len= %d, next_cap= %d\n", cap.cap_id,
+ cap.vsc_id, cap.vsc_len, cap.next_cap);
+ } else
+ tb_debug(hcm, DBG_HCM, "Router cap= %d, "
+ "next_cap= %d\n", cap.cap_id, cap.next_cap);
+ if (cap.next_cap > TB_CFG_CAP_OFFSET_MAX)
+ cap.next_cap = 0;
+ }
+
+ u = (uint8_t *)buf;
+ error = tb_config_get_lc_uuid(rsc, u);
+ if (error == 0) {
+ tb_debug(hcm, DBG_HCM, "Router LC UUID: %02x%02x%02x%02x-"
+ "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
+ u[0], u[1], u[2], u[3], u[4], u[5], u[6], u[7], u[8],
+ u[9], u[10], u[11], u[12], u[13], u[14], u[15]);
+ } else
+ tb_printf(hcm, "Error finding LC registers: %d\n", error);
+
+ for (i = 1; i <= rsc->max_adap; i++) {
+ error = tb_config_adapter_read(rsc, i, 0, 8, buf);
+ if (error != 0) {
+ tb_debug(hcm, DBG_HCM, "Adapter %d: no adapter\n", i);
+ continue;
+ }
+ adp = (struct tb_cfg_adapter *)buf;
+ tb_debug(hcm, DBG_HCM, "Adapter %d: %s, max_counters= 0x%08x,"
+ " adapter_num= %d\n", i,
+ tb_get_string(GET_ADP_CS_TYPE(adp), tb_adapter_type),
+ GET_ADP_CS_MAX_COUNTERS(adp), GET_ADP_CS_ADP_NUM(adp));
+
+ if (GET_ADP_CS_TYPE(adp) != ADP_CS2_LANE)
+ continue;
+
+ error = tb_config_find_adapter_cap(rsc, i, TB_CFG_CAP_LANE,
+ &offset);
+ if (error)
+ continue;
+
+ error = tb_config_adapter_read(rsc, i, offset, 3, buf);
+ if (error)
+ continue;
+
+ lane = (struct tb_cfg_cap_lane *)buf;
+ tb_debug(hcm, DBG_HCM, "Lane Adapter State= %s %s\n",
+ tb_get_string((lane->current_lws & CAP_LANE_STATE_MASK),
+ tb_adapter_state), (lane->targ_lwp & CAP_LANE_DISABLE) ?
+ "disabled" : "enabled");
+
+ if ((lane->current_lws & CAP_LANE_STATE_MASK) ==
+ CAP_LANE_STATE_CL0) {
+ tb_route_t newr;
+
+ newr.hi = rsc->route.hi;
+ newr.lo = rsc->route.lo | (i << rsc->depth * 8);
+
+ tb_printf(hcm, "want to add router at 0x%08x%08x\n",
+ newr.hi, newr.lo);
+ error = tb_router_attach(rsc, newr);
+ tb_printf(rsc, "tb_router_attach returned %d\n", error);
+ }
+ }
+
+ free(buf, M_THUNDERBOLT);
+}
diff --git a/sys/dev/thunderbolt/hcm_var.h b/sys/dev/thunderbolt/hcm_var.h
new file mode 100644
index 000000000000..a11c8e9b6a92
--- /dev/null
+++ b/sys/dev/thunderbolt/hcm_var.h
@@ -0,0 +1,47 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _HCM_VAR_H
+#define _HCM_VAR_H
+
+struct hcm_softc {
+ u_int debug;
+ device_t dev;
+ struct nhi_softc *nsc;
+
+ struct task cfg_task;
+ struct taskqueue *taskqueue;
+};
+
+int hcm_attach(struct nhi_softc *);
+int hcm_detach(struct nhi_softc *);
+int hcm_router_discover(struct hcm_softc *);
+
+#endif /* _HCM_VAR_H */
diff --git a/sys/dev/thunderbolt/nhi.c b/sys/dev/thunderbolt/nhi.c
new file mode 100644
index 000000000000..205e69c16253
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi.c
@@ -0,0 +1,1170 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* PCIe interface for Thunderbolt Native Host Interface (nhi) */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include <dev/thunderbolt/hcm_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_dev.h>
+#include "tb_if.h"
+
+static int nhi_alloc_ring(struct nhi_softc *, int, int, int,
+ struct nhi_ring_pair **);
+static void nhi_free_ring(struct nhi_ring_pair *);
+static void nhi_free_rings(struct nhi_softc *);
+static int nhi_configure_ring(struct nhi_softc *, struct nhi_ring_pair *);
+static int nhi_activate_ring(struct nhi_ring_pair *);
+static int nhi_deactivate_ring(struct nhi_ring_pair *);
+static int nhi_alloc_ring0(struct nhi_softc *);
+static void nhi_free_ring0(struct nhi_softc *);
+static void nhi_fill_rx_ring(struct nhi_softc *, struct nhi_ring_pair *);
+static int nhi_init(struct nhi_softc *);
+static void nhi_post_init(void *);
+static int nhi_tx_enqueue(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+static int nhi_setup_sysctl(struct nhi_softc *);
+
+SYSCTL_NODE(_hw, OID_AUTO, nhi, CTLFLAG_RD, 0, "NHI Driver Parameters");
+
+MALLOC_DEFINE(M_NHI, "nhi", "nhi driver memory");
+
+#ifndef NHI_DEBUG_LEVEL
+#define NHI_DEBUG_LEVEL 0
+#endif
+
+/* 0 = default, 1 = force-on, 2 = force-off */
+#ifndef NHI_FORCE_HCM
+#define NHI_FORCE_HCM 0
+#endif
+
+void
+nhi_get_tunables(struct nhi_softc *sc)
+{
+ devclass_t dc;
+ device_t ufp;
+ char tmpstr[80], oid[80];
+ u_int val;
+
+ /* Set local defaults */
+ sc->debug = NHI_DEBUG_LEVEL;
+ sc->max_ring_count = NHI_DEFAULT_NUM_RINGS;
+ sc->force_hcm = NHI_FORCE_HCM;
+
+ /* Inherit setting from the upstream thunderbolt switch node */
+ val = TB_GET_DEBUG(sc->dev, &sc->debug);
+ if (val != 0) {
+ dc = devclass_find("tbolt");
+ if (dc != NULL) {
+ ufp = devclass_get_device(dc, device_get_unit(sc->dev));
+ if (ufp != NULL)
+ TB_GET_DEBUG(ufp, &sc->debug);
+ } else {
+ if (TUNABLE_STR_FETCH("hw.tbolt.debug_level", oid,
+ 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+ }
+ }
+
+ /*
+ * Grab global variables. Allow nhi debug flags to override
+ * thunderbolt debug flags, if present.
+ */
+ bzero(oid, 80);
+ if (TUNABLE_STR_FETCH("hw.nhi.debug_level", oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+ if (TUNABLE_INT_FETCH("hw.nhi.max_rings", &val) != 0) {
+ val = min(val, NHI_MAX_NUM_RINGS);
+ sc->max_ring_count = max(val, 1);
+ }
+ if (TUNABLE_INT_FETCH("hw.nhi.force_hcm", &val) != 0)
+ sc->force_hcm = val;
+
+ /* Grab instance variables */
+ bzero(oid, 80);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.nhi.%d.debug_level",
+ device_get_unit(sc->dev));
+ if (TUNABLE_STR_FETCH(tmpstr, oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.nhi.%d.max_rings",
+ device_get_unit(sc->dev));
+ if (TUNABLE_INT_FETCH(tmpstr, &val) != 0) {
+ val = min(val, NHI_MAX_NUM_RINGS);
+ sc->max_ring_count = max(val, 1);
+ }
+ snprintf(tmpstr, sizeof(tmpstr), "dev, nhi.%d.force_hcm",
+ device_get_unit(sc->dev));
+ if (TUNABLE_INT_FETCH(tmpstr, &val) != 0)
+ sc->force_hcm = val;
+
+ return;
+}
+
+static void
+nhi_configure_caps(struct nhi_softc *sc)
+{
+
+ if (NHI_IS_USB4(sc) || (sc->force_hcm == NHI_FORCE_HCM_ON))
+ sc->caps |= NHI_CAP_HCM;
+ if (sc->force_hcm == NHI_FORCE_HCM_OFF)
+ sc->caps &= ~NHI_CAP_HCM;
+}
+
+struct nhi_cmd_frame *
+nhi_alloc_tx_frame(struct nhi_ring_pair *r)
+{
+ struct nhi_cmd_frame *cmd;
+
+ mtx_lock(&r->mtx);
+ cmd = nhi_alloc_tx_frame_locked(r);
+ mtx_unlock(&r->mtx);
+
+ return (cmd);
+}
+
+void
+nhi_free_tx_frame(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ mtx_lock(&r->mtx);
+ nhi_free_tx_frame_locked(r, cmd);
+ mtx_unlock(&r->mtx);
+}
+
+/*
+ * Push a command and data dword through the mailbox to the firmware.
+ * Response is either good, error, or timeout. Commands that return data
+ * do so by reading OUTMAILDATA.
+ */
+int
+nhi_inmail_cmd(struct nhi_softc *sc, uint32_t cmd, uint32_t data)
+{
+ uint32_t val;
+ u_int error, timeout;
+
+ mtx_lock(&sc->nhi_mtx);
+ /*
+ * XXX Should a defer/reschedule happen here, or is it not worth
+ * worrying about?
+ */
+ if (sc->hwflags & NHI_MBOX_BUSY) {
+ mtx_unlock(&sc->nhi_mtx);
+ tb_debug(sc, DBG_MBOX, "Driver busy with mailbox\n");
+ return (EBUSY);
+ }
+ sc->hwflags |= NHI_MBOX_BUSY;
+
+ val = nhi_read_reg(sc, TBT_INMAILCMD);
+ tb_debug(sc, DBG_MBOX|DBG_FULL, "Reading INMAILCMD= 0x%08x\n", val);
+ if (val & INMAILCMD_ERROR)
+ tb_debug(sc, DBG_MBOX, "Error already set in INMAILCMD\n");
+ if (val & INMAILCMD_OPREQ) {
+ mtx_unlock(&sc->nhi_mtx);
+ tb_debug(sc, DBG_MBOX,
+ "INMAILCMD request already in progress\n");
+ return (EBUSY);
+ }
+
+ nhi_write_reg(sc, TBT_INMAILDATA, data);
+ nhi_write_reg(sc, TBT_INMAILCMD, cmd | INMAILCMD_OPREQ);
+
+ /* Poll at 1s intervals */
+ timeout = NHI_MAILBOX_TIMEOUT;
+ while (timeout--) {
+ DELAY(1000000);
+ val = nhi_read_reg(sc, TBT_INMAILCMD);
+ tb_debug(sc, DBG_MBOX|DBG_EXTRA,
+ "Polling INMAILCMD= 0x%08x\n", val);
+ if ((val & INMAILCMD_OPREQ) == 0)
+ break;
+ }
+ sc->hwflags &= ~NHI_MBOX_BUSY;
+ mtx_unlock(&sc->nhi_mtx);
+
+ error = 0;
+ if (val & INMAILCMD_OPREQ) {
+ tb_printf(sc, "Timeout waiting for mailbox\n");
+ error = ETIMEDOUT;
+ }
+ if (val & INMAILCMD_ERROR) {
+ tb_printf(sc, "Firmware reports error in mailbox\n");
+ error = EINVAL;
+ }
+
+ return (error);
+}
+
+/*
+ * Pull command status and data from the firmware mailbox.
+ */
+int
+nhi_outmail_cmd(struct nhi_softc *sc, uint32_t *val)
+{
+
+ if (val == NULL)
+ return (EINVAL);
+ *val = nhi_read_reg(sc, TBT_OUTMAILCMD);
+ return (0);
+}
+
+int
+nhi_attach(struct nhi_softc *sc)
+{
+ uint32_t val;
+ int error = 0;
+
+ if ((error = nhi_setup_sysctl(sc)) != 0)
+ return (error);
+
+ mtx_init(&sc->nhi_mtx, "nhimtx", "NHI Control Mutex", MTX_DEF);
+
+ nhi_configure_caps(sc);
+
+ /*
+ * Get the number of TX/RX paths. This sizes some of the register
+ * arrays during allocation and initialization. USB4 spec says that
+ * the max is 21. Alpine Ridge appears to default to 12.
+ */
+ val = GET_HOST_CAPS_PATHS(nhi_read_reg(sc, NHI_HOST_CAPS));
+ tb_debug(sc, DBG_INIT|DBG_NOISY, "Total Paths= %d\n", val);
+ if ((val == 0) || (val > 21) || ((NHI_IS_AR(sc) && val != 12))) {
+ tb_printf(sc, "WARN: unexpected number of paths: %d\n", val);
+ /* return (ENXIO); */
+ }
+ sc->path_count = val;
+
+ SLIST_INIT(&sc->ring_list);
+
+ error = nhi_pci_configure_interrupts(sc);
+ if (error == 0)
+ error = nhi_alloc_ring0(sc);
+ if (error == 0) {
+ nhi_configure_ring(sc, sc->ring0);
+ nhi_activate_ring(sc->ring0);
+ nhi_fill_rx_ring(sc, sc->ring0);
+ }
+
+ if (error == 0)
+ error = tbdev_add_interface(sc);
+
+ if ((error == 0) && (NHI_USE_ICM(sc)))
+ tb_printf(sc, "WARN: device uses an internal connection manager\n");
+ if ((error == 0) && (NHI_USE_HCM(sc)))
+ ;
+ error = hcm_attach(sc);
+
+ if (error == 0)
+ error = nhi_init(sc);
+
+ return (error);
+}
+
+int
+nhi_detach(struct nhi_softc *sc)
+{
+
+ if (NHI_USE_HCM(sc))
+ hcm_detach(sc);
+
+ if (sc->root_rsc != NULL)
+ tb_router_detach(sc->root_rsc);
+
+ tbdev_remove_interface(sc);
+
+ nhi_pci_disable_interrupts(sc);
+
+ nhi_free_ring0(sc);
+
+ /* XXX Should the rings be marked as !VALID in the descriptors? */
+ nhi_free_rings(sc);
+
+ mtx_destroy(&sc->nhi_mtx);
+
+ return (0);
+}
+
+static void
+nhi_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ bus_addr_t *addr;
+
+ addr = arg;
+ if (error == 0 && nsegs == 1) {
+ *addr = segs[0].ds_addr;
+ } else
+ *addr = 0;
+}
+
+static int
+nhi_alloc_ring(struct nhi_softc *sc, int ringnum, int tx_depth, int rx_depth,
+ struct nhi_ring_pair **rp)
+{
+ bus_dma_template_t t;
+ bus_addr_t ring_busaddr;
+ struct nhi_ring_pair *r;
+ int ring_size, error;
+ u_int rxring_len, txring_len;
+ char *ring;
+
+ if (ringnum >= sc->max_ring_count) {
+ tb_debug(sc, DBG_INIT, "Tried to allocate ring number %d\n",
+ ringnum);
+ return (EINVAL);
+ }
+
+ /* Allocate the ring structure and the RX ring tacker together. */
+ rxring_len = rx_depth * sizeof(void *);
+ txring_len = tx_depth * sizeof(void *);
+ r = malloc(sizeof(struct nhi_ring_pair) + rxring_len + txring_len,
+ M_NHI, M_NOWAIT|M_ZERO);
+ if (r == NULL) {
+ tb_printf(sc, "ERROR: Cannot allocate ring memory\n");
+ return (ENOMEM);
+ }
+
+ r->sc = sc;
+ TAILQ_INIT(&r->tx_head);
+ TAILQ_INIT(&r->rx_head);
+ r->ring_num = ringnum;
+ r->tx_ring_depth = tx_depth;
+ r->tx_ring_mask = tx_depth - 1;
+ r->rx_ring_depth = rx_depth;
+ r->rx_ring_mask = rx_depth - 1;
+ r->rx_pici_reg = NHI_RX_RING_PICI + ringnum * 16;
+ r->tx_pici_reg = NHI_TX_RING_PICI + ringnum * 16;
+ r->rx_cmd_ring = (struct nhi_cmd_frame **)((uint8_t *)r + sizeof (*r));
+ r->tx_cmd_ring = (struct nhi_cmd_frame **)((uint8_t *)r->rx_cmd_ring +
+ rxring_len);
+
+ snprintf(r->name, NHI_RING_NAMELEN, "nhiring%d\n", ringnum);
+ mtx_init(&r->mtx, r->name, "NHI Ring Lock", MTX_DEF);
+ tb_debug(sc, DBG_INIT | DBG_FULL, "Allocated ring context at %p, "
+ "mutex %p\n", r, &r->mtx);
+
+ /* Allocate the RX and TX buffer descriptor rings */
+ ring_size = sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
+ ring_size += sizeof(struct nhi_rx_buffer_desc) * r->rx_ring_depth;
+ tb_debug(sc, DBG_INIT | DBG_FULL, "Ring %d ring_size= %d\n",
+ ringnum, ring_size);
+
+ bus_dma_template_init(&t, sc->parent_dmat);
+ t.alignment = 4;
+ t.maxsize = t.maxsegsize = ring_size;
+ t.nsegments = 1;
+ if ((error = bus_dma_template_tag(&t, &r->ring_dmat)) != 0) {
+ tb_printf(sc, "Cannot allocate ring %d DMA tag: %d\n",
+ ringnum, error);
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(r->ring_dmat, (void **)&ring, BUS_DMA_NOWAIT,
+ &r->ring_map)) {
+ tb_printf(sc, "Cannot allocate ring memory\n");
+ return (ENOMEM);
+ }
+ bzero(ring, ring_size);
+ bus_dmamap_load(r->ring_dmat, r->ring_map, ring, ring_size,
+ nhi_memaddr_cb, &ring_busaddr, 0);
+
+ r->ring = ring;
+
+ r->tx_ring = (union nhi_ring_desc *)(ring);
+ r->tx_ring_busaddr = ring_busaddr;
+ ring += sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
+ ring_busaddr += sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
+
+ r->rx_ring = (union nhi_ring_desc *)(ring);
+ r->rx_ring_busaddr = ring_busaddr;
+
+ tb_debug(sc, DBG_INIT | DBG_EXTRA, "Ring %d: RX %p [0x%jx] "
+ "TX %p [0x%jx]\n", ringnum, r->tx_ring, r->tx_ring_busaddr,
+ r->rx_ring, r->rx_ring_busaddr);
+
+ *rp = r;
+ return (0);
+}
+
+static void
+nhi_free_ring(struct nhi_ring_pair *r)
+{
+
+ tb_debug(r->sc, DBG_INIT, "Freeing ring %d resources\n", r->ring_num);
+ nhi_deactivate_ring(r);
+
+ if (r->tx_ring_busaddr != 0) {
+ bus_dmamap_unload(r->ring_dmat, r->ring_map);
+ r->tx_ring_busaddr = 0;
+ }
+ if (r->ring != NULL) {
+ bus_dmamem_free(r->ring_dmat, r->ring, r->ring_map);
+ r->ring = NULL;
+ }
+ if (r->ring_dmat != NULL) {
+ bus_dma_tag_destroy(r->ring_dmat);
+ r->ring_dmat = NULL;
+ }
+ mtx_destroy(&r->mtx);
+}
+
+static void
+nhi_free_rings(struct nhi_softc *sc)
+{
+ struct nhi_ring_pair *r;
+
+ while ((r = SLIST_FIRST(&sc->ring_list)) != NULL) {
+ nhi_free_ring(r);
+ mtx_lock(&sc->nhi_mtx);
+ SLIST_REMOVE_HEAD(&sc->ring_list, ring_link);
+ mtx_unlock(&sc->nhi_mtx);
+ free(r, M_NHI);
+ }
+
+ return;
+}
+
+static int
+nhi_configure_ring(struct nhi_softc *sc, struct nhi_ring_pair *ring)
+{
+ bus_addr_t busaddr;
+ uint32_t val;
+ int idx;
+
+ idx = ring->ring_num * 16;
+
+ /* Program the TX ring address and size */
+ busaddr = ring->tx_ring_busaddr;
+ nhi_write_reg(sc, NHI_TX_RING_ADDR_LO + idx, busaddr & 0xffffffff);
+ nhi_write_reg(sc, NHI_TX_RING_ADDR_HI + idx, busaddr >> 32);
+ nhi_write_reg(sc, NHI_TX_RING_SIZE + idx, ring->tx_ring_depth);
+ nhi_write_reg(sc, NHI_TX_RING_TABLE_TIMESTAMP + idx, 0x0);
+ tb_debug(sc, DBG_INIT, "TX Ring %d TX_RING_SIZE= 0x%x\n",
+ ring->ring_num, ring->tx_ring_depth);
+
+ /* Program the RX ring address and size */
+ busaddr = ring->rx_ring_busaddr;
+ val = (ring->rx_buffer_size << 16) | ring->rx_ring_depth;
+ nhi_write_reg(sc, NHI_RX_RING_ADDR_LO + idx, busaddr & 0xffffffff);
+ nhi_write_reg(sc, NHI_RX_RING_ADDR_HI + idx, busaddr >> 32);
+ nhi_write_reg(sc, NHI_RX_RING_SIZE + idx, val);
+ nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE1 + idx, 0xffffffff);
+ tb_debug(sc, DBG_INIT, "RX Ring %d RX_RING_SIZE= 0x%x\n",
+ ring->ring_num, val);
+
+ return (0);
+}
+
+static int
+nhi_activate_ring(struct nhi_ring_pair *ring)
+{
+ struct nhi_softc *sc = ring->sc;
+ int idx;
+
+ nhi_pci_enable_interrupt(ring);
+
+ idx = ring->ring_num * 32;
+ tb_debug(sc, DBG_INIT, "Activating ring %d at idx %d\n",
+ ring->ring_num, idx);
+ nhi_write_reg(sc, NHI_TX_RING_TABLE_BASE0 + idx,
+ TX_TABLE_RAW | TX_TABLE_VALID);
+ nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE0 + idx,
+ RX_TABLE_RAW | RX_TABLE_VALID);
+
+ return (0);
+}
+
+static int
+nhi_deactivate_ring(struct nhi_ring_pair *r)
+{
+ struct nhi_softc *sc = r->sc;
+ int idx;
+
+ idx = r->ring_num * 32;
+ tb_debug(sc, DBG_INIT, "Deactiving ring %d at idx %d\n",
+ r->ring_num, idx);
+ nhi_write_reg(sc, NHI_TX_RING_TABLE_BASE0 + idx, 0);
+ nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE0 + idx, 0);
+
+ idx = r->ring_num * 16;
+ tb_debug(sc, DBG_INIT, "Setting ring %d sizes to 0\n", r->ring_num);
+ nhi_write_reg(sc, NHI_TX_RING_SIZE + idx, 0);
+ nhi_write_reg(sc, NHI_RX_RING_SIZE + idx, 0);
+
+ return (0);
+}
+
+static int
+nhi_alloc_ring0(struct nhi_softc *sc)
+{
+ bus_addr_t frames_busaddr;
+ bus_dma_template_t t;
+ struct nhi_intr_tracker *trkr;
+ struct nhi_ring_pair *r;
+ struct nhi_cmd_frame *cmd;
+ char *frames;
+ int error, size, i;
+
+ if ((error = nhi_alloc_ring(sc, 0, NHI_RING0_TX_DEPTH,
+ NHI_RING0_RX_DEPTH, &r)) != 0) {
+ tb_printf(sc, "Error allocating control ring\n");
+ return (error);
+ }
+
+ r->rx_buffer_size = NHI_RING0_FRAME_SIZE;/* Control packets are small */
+
+ /* Allocate the RX and TX buffers that are used for Ring0 comms */
+ size = r->tx_ring_depth * NHI_RING0_FRAME_SIZE;
+ size += r->rx_ring_depth * NHI_RING0_FRAME_SIZE;
+
+ bus_dma_template_init(&t, sc->parent_dmat);
+ t.maxsize = t.maxsegsize = size;
+ t.nsegments = 1;
+ if (bus_dma_template_tag(&t, &sc->ring0_dmat)) {
+ tb_printf(sc, "Error allocating control ring buffer tag\n");
+ return (ENOMEM);
+ }
+
+ if (bus_dmamem_alloc(sc->ring0_dmat, (void **)&frames, BUS_DMA_NOWAIT,
+ &sc->ring0_map) != 0) {
+ tb_printf(sc, "Error allocating control ring memory\n");
+ return (ENOMEM);
+ }
+ bzero(frames, size);
+ bus_dmamap_load(sc->ring0_dmat, sc->ring0_map, frames, size,
+ nhi_memaddr_cb, &frames_busaddr, 0);
+ sc->ring0_frames_busaddr = frames_busaddr;
+ sc->ring0_frames = frames;
+
+ /* Allocate the driver command trackers */
+ sc->ring0_cmds = malloc(sizeof(struct nhi_cmd_frame) *
+ (r->tx_ring_depth + r->rx_ring_depth), M_NHI, M_NOWAIT | M_ZERO);
+ if (sc->ring0_cmds == NULL)
+ return (ENOMEM);
+
+ /* Initialize the RX frames so they can be used */
+ mtx_lock(&r->mtx);
+ for (i = 0; i < r->rx_ring_depth; i++) {
+ cmd = &sc->ring0_cmds[i];
+ cmd->data = (uint32_t *)(frames + NHI_RING0_FRAME_SIZE * i);
+ cmd->data_busaddr = frames_busaddr + NHI_RING0_FRAME_SIZE * i;
+ cmd->flags = CMD_MAPPED;
+ cmd->idx = i;
+ TAILQ_INSERT_TAIL(&r->rx_head, cmd, cm_link);
+ }
+
+ /* Inititalize the TX frames */
+ for ( ; i < r->tx_ring_depth + r->rx_ring_depth - 1; i++) {
+ cmd = &sc->ring0_cmds[i];
+ cmd->data = (uint32_t *)(frames + NHI_RING0_FRAME_SIZE * i);
+ cmd->data_busaddr = frames_busaddr + NHI_RING0_FRAME_SIZE * i;
+ cmd->flags = CMD_MAPPED;
+ cmd->idx = i;
+ nhi_free_tx_frame_locked(r, cmd);
+ }
+ mtx_unlock(&r->mtx);
+
+ /* Do a 1:1 mapping of rings to interrupt vectors. */
+ /* XXX Should be abstracted */
+ trkr = &sc->intr_trackers[0];
+ trkr->ring = r;
+ r->tracker = trkr;
+
+ /* XXX Should be an array */
+ sc->ring0 = r;
+ SLIST_INSERT_HEAD(&sc->ring_list, r, ring_link);
+
+ return (0);
+}
+
+static void
+nhi_free_ring0(struct nhi_softc *sc)
+{
+ if (sc->ring0_cmds != NULL) {
+ free(sc->ring0_cmds, M_NHI);
+ sc->ring0_cmds = NULL;
+ }
+
+ if (sc->ring0_frames_busaddr != 0) {
+ bus_dmamap_unload(sc->ring0_dmat, sc->ring0_map);
+ sc->ring0_frames_busaddr = 0;
+ }
+
+ if (sc->ring0_frames != NULL) {
+ bus_dmamem_free(sc->ring0_dmat, sc->ring0_frames,
+ sc->ring0_map);
+ sc->ring0_frames = NULL;
+ }
+
+ if (sc->ring0_dmat != NULL)
+ bus_dma_tag_destroy(sc->ring0_dmat);
+
+ return;
+}
+
+static void
+nhi_fill_rx_ring(struct nhi_softc *sc, struct nhi_ring_pair *rp)
+{
+ struct nhi_cmd_frame *cmd;
+ struct nhi_rx_buffer_desc *desc;
+ u_int ci;
+
+ /* Assume that we never grow or shrink the ring population */
+ rp->rx_ci = ci = 0;
+ rp->rx_pi = 0;
+
+ do {
+ cmd = TAILQ_FIRST(&rp->rx_head);
+ if (cmd == NULL)
+ break;
+ TAILQ_REMOVE(&rp->rx_head, cmd, cm_link);
+ desc = &rp->rx_ring[ci].rx;
+ if ((cmd->flags & CMD_MAPPED) == 0)
+ panic("Need rx buffer mapping code");
+
+ desc->addr_lo = cmd->data_busaddr & 0xffffffff;
+ desc->addr_hi = (cmd->data_busaddr >> 32) & 0xffffffff;
+ desc->offset = 0;
+ desc->flags = RX_BUFFER_DESC_RS | RX_BUFFER_DESC_IE;
+ rp->rx_ci = ci;
+ rp->rx_cmd_ring[ci] = cmd;
+ tb_debug(sc, DBG_RXQ | DBG_FULL,
+ "Updating ring%d ci= %d cmd= %p, busaddr= 0x%jx\n",
+ rp->ring_num, ci, cmd, cmd->data_busaddr);
+
+ ci = (rp->rx_ci + 1) & rp->rx_ring_mask;
+ } while (ci != rp->rx_pi);
+
+ /* Update the CI in one shot */
+ tb_debug(sc, DBG_RXQ, "Writing RX CI= %d\n", rp->rx_ci);
+ nhi_write_reg(sc, rp->rx_pici_reg, rp->rx_ci);
+
+ return;
+}
+
+static int
+nhi_init(struct nhi_softc *sc)
+{
+ tb_route_t root_route = {0x0, 0x0};
+ uint32_t val;
+ int error;
+
+ tb_debug(sc, DBG_INIT, "Initializing NHI\n");
+
+ /* Set interrupt Auto-ACK */
+ val = nhi_read_reg(sc, NHI_DMA_MISC);
+ tb_debug(sc, DBG_INIT|DBG_FULL, "Read NHI_DMA_MISC= 0x%08x\n", val);
+ val |= DMA_MISC_INT_AUTOCLEAR;
+ tb_debug(sc, DBG_INIT, "Setting interrupt auto-ACK, 0x%08x\n", val);
+ nhi_write_reg(sc, NHI_DMA_MISC, val);
+
+ if (NHI_IS_AR(sc) || NHI_IS_TR(sc) || NHI_IS_ICL(sc))
+ tb_printf(sc, "WARN: device uses an internal connection manager\n");
+
+ /*
+ * Populate the controller (local) UUID, necessary for cross-domain
+ * communications.
+ if (NHI_IS_ICL(sc))
+ nhi_pci_get_uuid(sc);
+ */
+
+ /*
+ * Attach the router to the root thunderbolt bridge now that the DMA
+ * channel is configured and ready.
+ * The root router always has a route of 0x0...0, so set it statically
+ * here.
+ */
+ if ((error = tb_router_attach_root(sc, root_route)) != 0)
+ tb_printf(sc, "tb_router_attach_root() error."
+ " The driver should be loaded at boot\n");
+
+ if (error == 0) {
+ sc->ich.ich_func = nhi_post_init;
+ sc->ich.ich_arg = sc;
+ error = config_intrhook_establish(&sc->ich);
+ if (error)
+ tb_printf(sc, "Failed to establish config hook\n");
+ }
+
+ return (error);
+}
+
+static void
+nhi_post_init(void *arg)
+{
+ struct nhi_softc *sc;
+ uint8_t *u;
+ int error;
+
+ sc = (struct nhi_softc *)arg;
+ tb_debug(sc, DBG_INIT | DBG_EXTRA, "nhi_post_init\n");
+
+ bzero(sc->lc_uuid, 16);
+ error = tb_config_get_lc_uuid(sc->root_rsc, sc->lc_uuid);
+ if (error == 0) {
+ u = sc->lc_uuid;
+ tb_printf(sc, "Root Router LC UUID: %02x%02x%02x%02x-"
+ "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
+ u[15], u[14], u[13], u[12], u[11], u[10], u[9], u[8], u[7],
+ u[6], u[5], u[4], u[3], u[2], u[1], u[0]);
+ } else
+ tb_printf(sc, "Error finding LC registers: %d\n", error);
+
+ u = sc->uuid;
+ tb_printf(sc, "Root Router UUID: %02x%02x%02x%02x-"
+ "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
+ u[15], u[14], u[13], u[12], u[11], u[10], u[9], u[8], u[7],
+ u[6], u[5], u[4], u[3], u[2], u[1], u[0]);
+
+ config_intrhook_disestablish(&sc->ich);
+}
+
+static int
+nhi_tx_enqueue(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ struct nhi_softc *sc;
+ struct nhi_tx_buffer_desc *desc;
+ uint16_t pi;
+
+ sc = r->sc;
+
+ /* A length of 0 means 4096. Can't have longer lengths */
+ if (cmd->req_len > TX_BUFFER_DESC_LEN_MASK + 1) {
+ tb_debug(sc, DBG_TXQ, "Error: TX frame too big\n");
+ return (EINVAL);
+ }
+ cmd->req_len &= TX_BUFFER_DESC_LEN_MASK;
+
+ mtx_lock(&r->mtx);
+ desc = &r->tx_ring[r->tx_pi].tx;
+ pi = (r->tx_pi + 1) & r->tx_ring_mask;
+ if (pi == r->tx_ci) {
+ mtx_unlock(&r->mtx);
+ return (EBUSY);
+ }
+ r->tx_cmd_ring[r->tx_pi] = cmd;
+ r->tx_pi = pi;
+
+ desc->addr_lo = htole32(cmd->data_busaddr & 0xffffffff);
+ desc->addr_hi = htole32(cmd->data_busaddr >> 32);
+ desc->eof_len = htole16((cmd->pdf << TX_BUFFER_DESC_EOF_SHIFT) |
+ cmd->req_len);
+ desc->flags_sof = cmd->pdf | TX_BUFFER_DESC_IE | TX_BUFFER_DESC_RS;
+ desc->offset = 0;
+ desc->payload_time = 0;
+
+ tb_debug(sc, DBG_TXQ, "enqueue TXdescIdx= %d cmdidx= %d len= %d, "
+ "busaddr= 0x%jx\n", r->tx_pi, cmd->idx, cmd->req_len,
+ cmd->data_busaddr);
+
+ nhi_write_reg(sc, r->tx_pici_reg, pi << TX_RING_PI_SHIFT | r->tx_ci);
+ mtx_unlock(&r->mtx);
+ return (0);
+}
+
+/*
+ * No scheduling happens for now. Ring0 scheduling is done in the TB
+ * layer.
+ */
+int
+nhi_tx_schedule(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ int error;
+
+ error = nhi_tx_enqueue(r, cmd);
+ if (error == EBUSY)
+ nhi_write_reg(r->sc, r->tx_pici_reg, r->tx_pi << TX_RING_PI_SHIFT | r->tx_ci);
+ return (error);
+}
+
+int
+nhi_tx_synchronous(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ int error, count;
+
+ if ((error = nhi_tx_schedule(r, cmd)) != 0)
+ return (error);
+
+ if (cmd->flags & CMD_POLLED) {
+ error = 0;
+ count = cmd->timeout * 100;
+
+ /* Enter the loop at least once */
+ while ((count-- > 0) && (cmd->flags & CMD_REQ_COMPLETE) == 0) {
+ DELAY(10000);
+ rmb();
+ nhi_intr(r->tracker);
+ }
+ } else {
+ error = msleep(cmd, &r->mtx, PCATCH, "nhi_tx", cmd->timeout);
+ if ((error == 0) && (cmd->flags & CMD_REQ_COMPLETE) != 0)
+ error = EWOULDBLOCK;
+ }
+
+ if ((cmd->flags & CMD_REQ_COMPLETE) == 0)
+ error = ETIMEDOUT;
+
+ tb_debug(r->sc, DBG_TXQ|DBG_FULL, "tx_synchronous done waiting, "
+ "err= %d, TX_COMPLETE= %d\n", error,
+ !!(cmd->flags & CMD_REQ_COMPLETE));
+
+ if (error == ERESTART) {
+ tb_printf(r->sc, "TX command interrupted\n");
+ } else if ((error == EWOULDBLOCK) || (error == ETIMEDOUT)) {
+ tb_printf(r->sc, "TX command timed out\n");
+ } else if (error != 0) {
+ tb_printf(r->sc, "TX command failed error= %d\n", error);
+ }
+
+ return (error);
+}
+
+static int
+nhi_tx_complete(struct nhi_ring_pair *r, struct nhi_tx_buffer_desc *desc,
+ struct nhi_cmd_frame *cmd)
+{
+ struct nhi_softc *sc;
+ struct nhi_pdf_dispatch *txpdf;
+ u_int sof;
+
+ sc = r->sc;
+ sof = desc->flags_sof & TX_BUFFER_DESC_SOF_MASK;
+ tb_debug(sc, DBG_TXQ, "Recovered TX pdf= %s cmdidx= %d flags= 0x%x\n",
+ tb_get_string(sof, nhi_frame_pdf), cmd->idx, desc->flags_sof);
+
+ if ((desc->flags_sof & TX_BUFFER_DESC_DONE) == 0)
+ tb_debug(sc, DBG_TXQ,
+ "warning, TX descriptor DONE flag not set\n");
+
+ /* XXX Atomics */
+ cmd->flags |= CMD_REQ_COMPLETE;
+
+ txpdf = &r->tracker->txpdf[sof];
+ if (txpdf->cb != NULL) {
+ tb_debug(sc, DBG_INTR|DBG_TXQ, "Calling PDF TX callback\n");
+ txpdf->cb(txpdf->context, (union nhi_ring_desc *)desc, cmd);
+ return (0);
+ }
+
+ tb_debug(sc, DBG_TXQ, "Unhandled TX complete %s\n",
+ tb_get_string(sof, nhi_frame_pdf));
+ nhi_free_tx_frame(r, cmd);
+
+ return (0);
+}
+
+static int
+nhi_rx_complete(struct nhi_ring_pair *r, struct nhi_rx_post_desc *desc,
+ struct nhi_cmd_frame *cmd)
+{
+ struct nhi_softc *sc;
+ struct nhi_pdf_dispatch *rxpdf;
+ u_int eof, len;
+
+ sc = r->sc;
+ eof = desc->eof_len >> RX_BUFFER_DESC_EOF_SHIFT;
+ len = desc->eof_len & RX_BUFFER_DESC_LEN_MASK;
+ tb_debug(sc, DBG_INTR|DBG_RXQ,
+ "Recovered RX pdf= %s len= %d cmdidx= %d, busaddr= 0x%jx\n",
+ tb_get_string(eof, nhi_frame_pdf), len, cmd->idx,
+ cmd->data_busaddr);
+
+ rxpdf = &r->tracker->rxpdf[eof];
+ if (rxpdf->cb != NULL) {
+ tb_debug(sc, DBG_INTR|DBG_RXQ, "Calling PDF RX callback\n");
+ rxpdf->cb(rxpdf->context, (union nhi_ring_desc *)desc, cmd);
+ return (0);
+ }
+
+ tb_debug(sc, DBG_INTR, "Unhandled RX frame %s\n",
+ tb_get_string(eof, nhi_frame_pdf));
+
+ return (0);
+}
+
+int
+nhi_register_pdf(struct nhi_ring_pair *rp, struct nhi_dispatch *tx,
+ struct nhi_dispatch *rx)
+{
+ struct nhi_intr_tracker *trkr;
+ struct nhi_pdf_dispatch *slot;
+
+ KASSERT(rp != NULL, ("ring_pair is null\n"));
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "nhi_register_pdf called\n");
+
+ trkr = rp->tracker;
+ if (trkr == NULL) {
+ tb_debug(rp->sc, DBG_INTR, "Invalid tracker\n");
+ return (EINVAL);
+ }
+
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "Registering TX interrupts\n");
+ if (tx != NULL) {
+ while (tx->cb != NULL) {
+ if ((tx->pdf < 0) || (tx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->txpdf[tx->pdf];
+ if (slot->cb != NULL) {
+ tb_debug(rp->sc, DBG_INTR,
+ "Attempted to register busy callback\n");
+ return (EBUSY);
+ }
+ slot->cb = tx->cb;
+ slot->context = tx->context;
+ tb_debug(rp->sc, DBG_INTR,
+ "Registered TX callback for PDF %d\n", tx->pdf);
+ tx++;
+ }
+ }
+
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "Registering RX interrupts\n");
+ if (rx != NULL) {
+ while (rx->cb != NULL) {
+ if ((rx->pdf < 0) || (rx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->rxpdf[rx->pdf];
+ if (slot->cb != NULL) {
+ tb_debug(rp->sc, DBG_INTR,
+ "Attempted to register busy callback\n");
+ return (EBUSY);
+ }
+ slot->cb = rx->cb;
+ slot->context = rx->context;
+ tb_debug(rp->sc, DBG_INTR,
+ "Registered RX callback for PDF %d\n", rx->pdf);
+ rx++;
+ }
+ }
+
+ return (0);
+}
+
+int
+nhi_deregister_pdf(struct nhi_ring_pair *rp, struct nhi_dispatch *tx,
+ struct nhi_dispatch *rx)
+{
+ struct nhi_intr_tracker *trkr;
+ struct nhi_pdf_dispatch *slot;
+
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "nhi_register_pdf called\n");
+
+ trkr = rp->tracker;
+
+ if (tx != NULL) {
+ while (tx->cb != NULL) {
+ if ((tx->pdf < 0) || (tx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->txpdf[tx->pdf];
+ slot->cb = NULL;
+ slot->context = NULL;
+ tx++;
+ }
+ }
+
+ if (rx != NULL) {
+ while (rx->cb != NULL) {
+ if ((rx->pdf < 0) || (rx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->rxpdf[rx->pdf];
+ slot->cb = NULL;
+ slot->context = NULL;
+ rx++;
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * The CI and PI indexes are not read from the hardware. We track them in
+ * software, so we know where in the ring to start a scan on an interrupt.
+ * All we have to do is check for the appropriate Done bit in the next
+ * descriptor, and we know if we have reached the last descriptor that the
+ * hardware touched. This technique saves at least 2 MEMIO reads per
+ * interrupt.
+ */
+void
+nhi_intr(void *data)
+{
+ union nhi_ring_desc *rxd;
+ struct nhi_cmd_frame *cmd;
+ struct nhi_intr_tracker *trkr = data;
+ struct nhi_softc *sc;
+ struct nhi_ring_pair *r;
+ struct nhi_tx_buffer_desc *txd;
+ uint32_t val, old_ci;
+ u_int count;
+
+ sc = trkr->sc;
+
+ tb_debug(sc, DBG_INTR|DBG_FULL, "Interrupt @ vector %d\n",
+ trkr->vector);
+ if ((r = trkr->ring) == NULL)
+ return;
+
+ /*
+ * Process TX completions from the adapter. Only go through
+ * the ring once to prevent unbounded looping.
+ */
+ count = r->tx_ring_depth;
+ while (count-- > 0) {
+ txd = &r->tx_ring[r->tx_ci].tx;
+ if ((txd->flags_sof & TX_BUFFER_DESC_DONE) == 0)
+ break;
+ cmd = r->tx_cmd_ring[r->tx_ci];
+ tb_debug(sc, DBG_INTR|DBG_TXQ|DBG_FULL,
+ "Found tx cmdidx= %d cmd= %p\n", r->tx_ci, cmd);
+
+ /* Pass the completion up the stack */
+ nhi_tx_complete(r, txd, cmd);
+
+ /*
+ * Advance to the next item in the ring via the cached
+ * copy of the CI. Clear the flags so we can detect
+ * a new done condition the next time the ring wraps
+ * around. Anything higher up the stack that needs this
+ * field should have already copied it.
+ *
+ * XXX is a memory barrier needed?
+ */
+ txd->flags_sof = 0;
+ r->tx_ci = (r->tx_ci + 1) & r->tx_ring_mask;
+ }
+
+ /* Process RX packets from the adapter */
+ count = r->rx_ring_depth;
+ old_ci = r->rx_ci;
+
+ while (count-- > 0) {
+ tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
+ "Checking RX descriptor at %d\n", r->rx_pi);
+
+ /* Look up RX descriptor and cmd */
+ rxd = &r->rx_ring[r->rx_pi];
+ tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
+ "rx desc len= 0x%04x flags= 0x%04x\n", rxd->rxpost.eof_len,
+ rxd->rxpost.flags_sof);
+ if ((rxd->rxpost.flags_sof & RX_BUFFER_DESC_DONE) == 0)
+ break;
+ cmd = r->rx_cmd_ring[r->rx_pi];
+ tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
+ "Found rx cmdidx= %d cmd= %p\n", r->rx_pi, cmd);
+
+ /*
+ * Pass the RX frame up the stack. RX frames are re-used
+ * in-place, so their contents must be copied before this
+ * function returns.
+ *
+ * XXX Rings other than Ring0 might want to have a different
+ * re-use and re-populate policy
+ */
+ nhi_rx_complete(r, &rxd->rxpost, cmd);
+
+ /*
+ * Advance the CI and move forward to the next item in the
+ * ring via our cached copy of the PI. Clear out the
+ * length field so we can detect a new RX frame when the
+ * ring wraps around. Reset the flags of the descriptor.
+ */
+ rxd->rxpost.eof_len = 0;
+ rxd->rx.flags = RX_BUFFER_DESC_RS | RX_BUFFER_DESC_IE;
+ r->rx_ci = (r->rx_ci + 1) & r->rx_ring_mask;
+ r->rx_pi = (r->rx_pi + 1) & r->rx_ring_mask;
+ }
+
+ /*
+ * Tell the firmware about the new RX CI
+ *
+ * XXX There's a chance this will overwrite an update to the PI.
+ * Is that OK? We keep our own copy of the PI and never read it from
+ * hardware. However, will overwriting it result in a missed
+ * interrupt?
+ */
+ if (r->rx_ci != old_ci) {
+ val = r->rx_pi << RX_RING_PI_SHIFT | r->rx_ci;
+ tb_debug(sc, DBG_INTR | DBG_RXQ,
+ "Writing new RX PICI= 0x%08x\n", val);
+ nhi_write_reg(sc, r->rx_pici_reg, val);
+ }
+}
+
+static int
+nhi_setup_sysctl(struct nhi_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = NULL;
+ struct sysctl_oid *tree = NULL;
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ if (ctx != NULL)
+ tree = device_get_sysctl_tree(sc->dev);
+
+ /*
+ * Not being able to create sysctls is going to hamper other
+ * parts of the driver.
+ */
+ if (tree == NULL) {
+ tb_printf(sc, "Error: cannot create sysctl nodes\n");
+ return (EINVAL);
+ }
+ sc->sysctl_tree = tree;
+ sc->sysctl_ctx = ctx;
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
+ OID_AUTO, "debug_level", CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ &sc->debug, 0, tb_debug_sysctl, "A", "Thunderbolt debug level");
+ SYSCTL_ADD_U16(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "max_rings", CTLFLAG_RD, &sc->max_ring_count, 0,
+ "Max number of rings available");
+ SYSCTL_ADD_U8(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "force_hcm", CTLFLAG_RD, &sc->force_hcm, 0,
+ "Force on/off the function of the host connection manager");
+
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/nhi_pci.c b/sys/dev/thunderbolt/nhi_pci.c
new file mode 100644
index 000000000000..7dacff523cef
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_pci.c
@@ -0,0 +1,529 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* PCIe interface for Thunderbolt Native Host Interface */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include "tb_if.h"
+
+static int nhi_pci_probe(device_t);
+static int nhi_pci_attach(device_t);
+static int nhi_pci_detach(device_t);
+static int nhi_pci_suspend(device_t);
+static int nhi_pci_resume(device_t);
+static void nhi_pci_free(struct nhi_softc *);
+static int nhi_pci_allocate_interrupts(struct nhi_softc *);
+static void nhi_pci_free_interrupts(struct nhi_softc *);
+static int nhi_pci_icl_poweron(struct nhi_softc *);
+
+static device_method_t nhi_methods[] = {
+ DEVMETHOD(device_probe, nhi_pci_probe),
+ DEVMETHOD(device_attach, nhi_pci_attach),
+ DEVMETHOD(device_detach, nhi_pci_detach),
+ DEVMETHOD(device_suspend, nhi_pci_suspend),
+ DEVMETHOD(device_resume, nhi_pci_resume),
+
+ DEVMETHOD(tb_find_ufp, tb_generic_find_ufp),
+ DEVMETHOD(tb_get_debug, tb_generic_get_debug),
+
+ DEVMETHOD_END
+};
+
+static driver_t nhi_pci_driver = {
+ "nhi",
+ nhi_methods,
+ sizeof(struct nhi_softc)
+};
+
+struct nhi_ident {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ uint32_t flags;
+ const char *desc;
+} nhi_identifiers[] = {
+ { VENDOR_INTEL, DEVICE_AR_2C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge 2C)" },
+ { VENDOR_INTEL, DEVICE_AR_DP_B_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge 4C Rev B)" },
+ { VENDOR_INTEL, DEVICE_AR_DP_C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge 4C Rev C)" },
+ { VENDOR_INTEL, DEVICE_AR_LP_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge LP 2C)" },
+ { VENDOR_INTEL, DEVICE_ICL_NHI_0, 0xffff, 0xffff, NHI_TYPE_ICL,
+ "Thunderbolt 3 NHI Port 0 (IceLake)" },
+ { VENDOR_INTEL, DEVICE_ICL_NHI_1, 0xffff, 0xffff, NHI_TYPE_ICL,
+ "Thunderbolt 3 NHI Port 1 (IceLake)" },
+ { VENDOR_AMD, DEVICE_PINK_SARDINE_0, 0xffff, 0xffff, NHI_TYPE_USB4,
+ "USB4 NHI Port 0 (Pink Sardine)" },
+ { VENDOR_AMD, DEVICE_PINK_SARDINE_1, 0xffff, 0xffff, NHI_TYPE_USB4,
+ "USB4 NHI Port 1 (Pink Sardine)" },
+ { 0, 0, 0, 0, 0, NULL }
+};
+
+DRIVER_MODULE_ORDERED(nhi, pci, nhi_pci_driver, NULL, NULL,
+ SI_ORDER_ANY);
+MODULE_PNP_INFO("U16:vendor;U16:device;V16:subvendor;V16:subdevice;U32:#;D:#",
+ pci, nhi, nhi_identifiers, nitems(nhi_identifiers) - 1);
+
+static struct nhi_ident *
+nhi_find_ident(device_t dev)
+{
+ struct nhi_ident *n;
+
+ for (n = nhi_identifiers; n->vendor != 0; n++) {
+ if (n->vendor != pci_get_vendor(dev))
+ continue;
+ if (n->device != pci_get_device(dev))
+ continue;
+ if ((n->subvendor != 0xffff) &&
+ (n->subvendor != pci_get_subvendor(dev)))
+ continue;
+ if ((n->subdevice != 0xffff) &&
+ (n->subdevice != pci_get_subdevice(dev)))
+ continue;
+ return (n);
+ }
+
+ return (NULL);
+}
+
+static int
+nhi_pci_probe(device_t dev)
+{
+ struct nhi_ident *n;
+
+ if (resource_disabled("tb", 0))
+ return (ENXIO);
+ if ((n = nhi_find_ident(dev)) != NULL) {
+ device_set_desc(dev, n->desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ return (ENXIO);
+}
+
+static int
+nhi_pci_attach(device_t dev)
+{
+ devclass_t dc;
+ bus_dma_template_t t;
+ struct nhi_softc *sc;
+ struct nhi_ident *n;
+ int error = 0;
+
+ sc = device_get_softc(dev);
+ bzero(sc, sizeof(*sc));
+ sc->dev = dev;
+ n = nhi_find_ident(dev);
+ sc->hwflags = n->flags;
+ nhi_get_tunables(sc);
+
+ tb_debug(sc, DBG_INIT|DBG_FULL, "busmaster status was %s\n",
+ (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN)
+ ? "enabled" : "disabled");
+ pci_enable_busmaster(dev);
+
+ sc->ufp = NULL;
+ if ((TB_FIND_UFP(dev, &sc->ufp) != 0) || (sc->ufp == NULL)) {
+ dc = devclass_find("tbolt");
+ if (dc != NULL)
+ sc->ufp = devclass_get_device(dc, device_get_unit(dev));
+ }
+ if (sc->ufp == NULL)
+ tb_printf(sc, "Cannot find Upstream Facing Port\n");
+ else
+ tb_printf(sc, "Upstream Facing Port is %s\n",
+ device_get_nameunit(sc->ufp));
+
+ if (NHI_IS_ICL(sc)) {
+ if ((error = nhi_pci_icl_poweron(sc)) != 0)
+ return (error);
+ }
+
+
+ /* Allocate BAR0 DMA registers */
+ sc->regs_rid = PCIR_BAR(0);
+ if ((sc->regs_resource = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE)) == NULL) {
+ tb_printf(sc, "Cannot allocate PCI registers\n");
+ return (ENXIO);
+ }
+ sc->regs_btag = rman_get_bustag(sc->regs_resource);
+ sc->regs_bhandle = rman_get_bushandle(sc->regs_resource);
+
+ /* Allocate parent DMA tag */
+ bus_dma_template_init(&t, bus_get_dma_tag(dev));
+ if (bus_dma_template_tag(&t, &sc->parent_dmat) != 0) {
+ tb_printf(sc, "Cannot allocate parent DMA tag\n");
+ nhi_pci_free(sc);
+ return (ENOMEM);
+ }
+
+ error = nhi_pci_allocate_interrupts(sc);
+ if (error == 0)
+ error = nhi_attach(sc);
+ if (error != 0)
+ nhi_pci_detach(sc->dev);
+ return (error);
+}
+
+static int
+nhi_pci_detach(device_t dev)
+{
+ struct nhi_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ nhi_detach(sc);
+ nhi_pci_free(sc);
+
+ return (0);
+}
+
+static int
+nhi_pci_suspend(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+nhi_pci_resume(device_t dev)
+{
+
+ return (0);
+}
+
+static void
+nhi_pci_free(struct nhi_softc *sc)
+{
+
+ nhi_pci_free_interrupts(sc);
+
+ if (sc->parent_dmat != NULL) {
+ bus_dma_tag_destroy(sc->parent_dmat);
+ sc->parent_dmat = NULL;
+ }
+
+ if (sc->regs_resource != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ sc->regs_rid, sc->regs_resource);
+ sc->regs_resource = NULL;
+ }
+
+ return;
+}
+
+static int
+nhi_pci_allocate_interrupts(struct nhi_softc *sc)
+{
+ int msgs, error = 0;
+
+ /* Map the Pending Bit Array and Vector Table BARs for MSI-X */
+ sc->irq_pba_rid = pci_msix_pba_bar(sc->dev);
+ sc->irq_table_rid = pci_msix_table_bar(sc->dev);
+
+ if (sc->irq_pba_rid != -1)
+ sc->irq_pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &sc->irq_pba_rid, RF_ACTIVE);
+ if (sc->irq_table_rid != -1)
+ sc->irq_table = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &sc->irq_table_rid, RF_ACTIVE);
+
+ msgs = pci_msix_count(sc->dev);
+ tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
+ "Counted %d MSI-X messages\n", msgs);
+ msgs = min(msgs, NHI_MSIX_MAX);
+ msgs = max(msgs, 1);
+ if (msgs != 0) {
+ tb_debug(sc, DBG_INIT|DBG_INTR, "Attempting to allocate %d "
+ "MSI-X interrupts\n", msgs);
+ error = pci_alloc_msix(sc->dev, &msgs);
+ tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
+ "pci_alloc_msix return msgs= %d, error= %d\n", msgs, error);
+ }
+
+ if ((error != 0) || (msgs <= 0)) {
+ tb_printf(sc, "Failed to allocate any interrupts\n");
+ msgs = 0;
+ }
+
+ sc->msix_count = msgs;
+ return (error);
+}
+
+static void
+nhi_pci_free_interrupts(struct nhi_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->msix_count; i++) {
+ bus_teardown_intr(sc->dev, sc->irqs[i], sc->intrhand[i]);
+ bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid[i],
+ sc->irqs[i]);
+ }
+
+ pci_release_msi(sc->dev);
+
+ if (sc->irq_table != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ sc->irq_table_rid, sc->irq_table);
+ sc->irq_table = NULL;
+ }
+
+ if (sc->irq_pba != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ sc->irq_pba_rid, sc->irq_pba);
+ sc->irq_pba = NULL;
+ }
+
+ if (sc->intr_trackers != NULL)
+ free(sc->intr_trackers, M_NHI);
+ return;
+}
+
+int
+nhi_pci_configure_interrupts(struct nhi_softc *sc)
+{
+ struct nhi_intr_tracker *trkr;
+ int rid, i, error;
+
+ nhi_pci_disable_interrupts(sc);
+
+ sc->intr_trackers = malloc(sizeof(struct nhi_intr_tracker) *
+ sc->msix_count, M_NHI, M_ZERO | M_NOWAIT);
+ if (sc->intr_trackers == NULL) {
+ tb_debug(sc, DBG_INIT, "Cannot allocate intr trackers\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < sc->msix_count; i++) {
+ rid = i + 1;
+ trkr = &sc->intr_trackers[i];
+ trkr->sc = sc;
+ trkr->ring = NULL;
+ trkr->vector = i;
+
+ sc->irq_rid[i] = rid;
+ sc->irqs[i] = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
+ &sc->irq_rid[i], RF_ACTIVE);
+ if (sc->irqs[i] == NULL) {
+ tb_debug(sc, DBG_INIT,
+ "Cannot allocate interrupt RID %d\n",
+ sc->irq_rid[i]);
+ break;
+ }
+ error = bus_setup_intr(sc->dev, sc->irqs[i], INTR_TYPE_BIO |
+ INTR_MPSAFE, NULL, nhi_intr, trkr, &sc->intrhand[i]);
+ if (error) {
+ tb_debug(sc, DBG_INIT,
+ "cannot setup interrupt RID %d\n", sc->irq_rid[i]);
+ break;
+ }
+ }
+
+ tb_debug(sc, DBG_INIT, "Set up %d interrupts\n", sc->msix_count);
+
+ /* Set the interrupt throttle rate to 128us */
+ for (i = 0; i < 16; i ++)
+ nhi_write_reg(sc, NHI_ITR0 + i * 4, 0x1f4);
+
+ return (error);
+}
+
+#define NHI_SET_INTERRUPT(offset, mask, val) \
+do { \
+ reg = offset / 32; \
+ offset %= 32; \
+ ivr[reg] &= ~(mask << offset); \
+ ivr[reg] |= (val << offset); \
+} while (0)
+
+void
+nhi_pci_enable_interrupt(struct nhi_ring_pair *r)
+{
+ struct nhi_softc *sc = r->sc;
+ uint32_t ivr[5];
+ u_int offset, reg;
+
+ tb_debug(sc, DBG_INIT|DBG_INTR, "Enabling interrupts for ring %d\n",
+ r->ring_num);
+ /*
+ * Compute the routing between event type and MSI-X vector.
+ * 4 bits per descriptor.
+ */
+ ivr[0] = nhi_read_reg(sc, NHI_IVR0);
+ ivr[1] = nhi_read_reg(sc, NHI_IVR1);
+ ivr[2] = nhi_read_reg(sc, NHI_IVR2);
+ ivr[3] = nhi_read_reg(sc, NHI_IVR3);
+ ivr[4] = nhi_read_reg(sc, NHI_IVR4);
+
+ /* Program TX */
+ offset = (r->ring_num + IVR_TX_OFFSET) * 4;
+ NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
+
+ /* Now program RX */
+ offset = (r->ring_num + IVR_RX_OFFSET) * 4;
+ NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
+
+ /* Last, program Nearly Empty. This one always going to vector 15 */
+ offset = (r->ring_num + IVR_NE_OFFSET) * 4;
+ NHI_SET_INTERRUPT(offset, 0x0f, 0x0f);
+
+ nhi_write_reg(sc, NHI_IVR0, ivr[0]);
+ nhi_write_reg(sc, NHI_IVR1, ivr[1]);
+ nhi_write_reg(sc, NHI_IVR2, ivr[2]);
+ nhi_write_reg(sc, NHI_IVR3, ivr[3]);
+ nhi_write_reg(sc, NHI_IVR4, ivr[4]);
+
+ tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
+ "Wrote IVR 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ivr[0], ivr[1], ivr[2], ivr[3], ivr[4]);
+
+ /* Now do the Interrupt Mask Register, 1 bit per descriptor */
+ ivr[0] = nhi_read_reg(sc, NHI_IMR0);
+ ivr[1] = nhi_read_reg(sc, NHI_IMR1);
+
+ /* Tx */
+ offset = r->ring_num + IMR_TX_OFFSET;
+ NHI_SET_INTERRUPT(offset, 0x01, 1);
+
+ /* Rx */
+ offset = r->ring_num + IMR_RX_OFFSET;
+ NHI_SET_INTERRUPT(offset, 0x01, 1);
+
+ /* NE */
+ offset = r->ring_num + IMR_NE_OFFSET;
+ NHI_SET_INTERRUPT(offset, 0x01, 1);
+
+ nhi_write_reg(sc, NHI_IMR0, ivr[0]);
+ nhi_write_reg(sc, NHI_IMR1, ivr[1]);
+ tb_debug(sc, DBG_INIT|DBG_FULL,
+ "Wrote IMR 0x%08x 0x%08x\n", ivr[0], ivr[1]);
+}
+
+void
+nhi_pci_disable_interrupts(struct nhi_softc *sc)
+{
+
+ tb_debug(sc, DBG_INIT, "Disabling interrupts\n");
+ nhi_write_reg(sc, NHI_IMR0, 0);
+ nhi_write_reg(sc, NHI_IMR1, 0);
+ nhi_write_reg(sc, NHI_IVR0, 0);
+ nhi_write_reg(sc, NHI_IVR1, 0);
+ nhi_write_reg(sc, NHI_IVR2, 0);
+ nhi_write_reg(sc, NHI_IVR3, 0);
+ nhi_write_reg(sc, NHI_IVR4, 0);
+
+ /* Dummy reads to clear pending bits */
+ nhi_read_reg(sc, NHI_ISR0);
+ nhi_read_reg(sc, NHI_ISR1);
+}
+
+/*
+ * Icelake controllers need to be notified of power-on
+ */
+static int
+nhi_pci_icl_poweron(struct nhi_softc *sc)
+{
+ device_t dev;
+ uint32_t val;
+ int i, error = 0;
+
+ dev = sc->dev;
+ val = pci_read_config(dev, ICL_VSCAP_9, 4);
+ tb_debug(sc, DBG_INIT, "icl_poweron val= 0x%x\n", val);
+ if (val & ICL_VSCAP9_FWREADY)
+ return (0);
+
+ val = pci_read_config(dev, ICL_VSCAP_22, 4);
+ val |= ICL_VSCAP22_FORCEPWR;
+ tb_debug(sc, DBG_INIT|DBG_FULL, "icl_poweron writing 0x%x\n", val);
+ pci_write_config(dev, ICL_VSCAP_22, val, 4);
+
+ error = ETIMEDOUT;
+ for (i = 0; i < 15; i++) {
+ DELAY(1000000);
+ val = pci_read_config(dev, ICL_VSCAP_9, 4);
+ if (val & ICL_VSCAP9_FWREADY) {
+ error = 0;
+ break;
+ }
+ }
+
+ return (error);
+}
+
+/*
+ * Icelake and Alderlake controllers store their UUID in PCI config space
+ */
+int
+nhi_pci_get_uuid(struct nhi_softc *sc)
+{
+ device_t dev;
+ uint32_t val[4];
+
+ dev = sc->dev;
+ val[0] = pci_read_config(dev, ICL_VSCAP_10, 4);
+ val[1] = pci_read_config(dev, ICL_VSCAP_11, 4);
+ val[2] = 0xffffffff;
+ val[3] = 0xffffffff;
+
+ bcopy(val, &sc->uuid, 16);
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/nhi_reg.h b/sys/dev/thunderbolt/nhi_reg.h
new file mode 100644
index 000000000000..6e71f4c9646b
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_reg.h
@@ -0,0 +1,332 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt 3 register definitions
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _NHI_REG_H
+#define _NHI_REG_H
+
+/* Some common definitions */
+#define TBT_SEC_NONE 0x00
+#define TBT_SEC_USER 0x01
+#define TBT_SEC_SECURE 0x02
+#define TBT_SEC_DP 0x03
+
+#define GENMASK(h, l) (((~0U) >> (31 - (h))) ^ ((~0U) >> (31 - (l)) >> 1))
+
+/* PCI Vendor and Device ID's */
+#define VENDOR_INTEL 0x8086
+#define DEVICE_AR_2C_NHI 0x1575
+#define DEVICE_AR_DP_B_NHI 0x1577
+#define DEVICE_AR_DP_C_NHI 0x15d2
+#define DEVICE_AR_LP_NHI 0x15bf
+#define DEVICE_ICL_NHI_0 0x8a17
+#define DEVICE_ICL_NHI_1 0x8a0d
+
+#define VENDOR_AMD 0x1022
+#define DEVICE_PINK_SARDINE_0 0x1668
+#define DEVICE_PINK_SARDINE_1 0x1669
+
+/* * * MMIO Registers
+ * * Ring buffer registers
+ *
+ * 32 transmit and receive rings are available, with Ring 0 being the most
+ * important one. The ring descriptors are 16 bytes each, and each set of
+ * TX and RX descriptors are packed together. There are only definitions
+ * for the Ring 0 addresses, others can be directly computed.
+ */
+#define NHI_TX_RING_ADDR_LO 0x00000
+#define NHI_TX_RING_ADDR_HI 0x00004
+#define NHI_TX_RING_PICI 0x00008
+#define TX_RING_CI_MASK GENMASK(15, 0)
+#define TX_RING_PI_SHIFT 16
+#define NHI_TX_RING_SIZE 0x0000c
+
+#define NHI_RX_RING_ADDR_LO 0x08000
+#define NHI_RX_RING_ADDR_HI 0x08004
+#define NHI_RX_RING_PICI 0x08008
+#define RX_RING_CI_MASK GENMASK(15, 0)
+#define RX_RING_PI_SHIFT 16
+#define NHI_RX_RING_SIZE 0x0800c
+#define RX_RING_BUF_SIZE_SHIFT 16
+
+/*
+ * One 32-bit status register encodes one status bit per ring indicates that
+ * the watermark from the control descriptor has been reached.
+ */
+#define NHI_RX_RING_STATUS 0x19400
+
+/*
+ * TX and RX Tables. These are 32 byte control fields for each ring.
+ * Only 8 bytes are controllable by the host software, the rest are a
+ * shadow copy by the controller of the current packet that's being
+ * processed.
+ */
+#define NHI_TX_RING_TABLE_BASE0 0x19800
+#define TX_TABLE_INTERVAL_MASK GENMASK(23,0) /* Isoch interval 256ns */
+#define TX_TABLE_ITE (1 << 27) /* Isoch tx enable */
+#define TX_TABLE_E2E (1 << 28) /* End-to-end flow control */
+#define TX_TABLE_NS (1 << 29) /* PCIe No Snoop */
+#define TX_TABLE_RAW (1 << 30) /* Raw (1)/frame(0) mode */
+#define TX_TABLE_VALID (1 << 31) /* Table entry is valid */
+#define NHI_TX_RING_TABLE_TIMESTAMP 0x19804
+
+#define NHI_RX_RING_TABLE_BASE0 0x29800
+#define RX_TABLE_TX_E2E_HOPID_SHIFT (1 << 12)
+#define RX_TABLE_E2E (1 << 28) /* End-to-end flow control */
+#define RX_TABLE_NS (1 << 29) /* PCIe No Snoop */
+#define RX_TABLE_RAW (1 << 30) /* Raw (1)/frame(0) mode */
+#define RX_TABLE_VALID (1 << 31) /* Table entry is valid */
+#define NHI_RX_RING_TABLE_BASE1 0x29804
+#define RX_TABLE_EOF_MASK (1 << 0)
+#define RX_TABLE_SOF_MASK (1 << 16)
+
+/* * Interrupt Control/Status Registers
+ * Interrupt Status Register (ISR)
+ * Interrupt status for RX, TX, and Nearly Empty events, one bit per
+ * MSI-X vector. Clear on read.
+ * Only 12 bits per operation, instead of 16? I guess it relates to the
+ * number paths, advertised in the HOST_CAPS register, which is wired to
+ * 0x0c for Alpine Ridge.
+ */
+#define NHI_ISR0 0x37800
+#define ISR0_TX_DESC_SHIFT 0
+#define ISR0_RX_DESC_SHIFT 12
+#define ISR0_RX_EMPTY_SHIFT 24
+#define NHI_ISR1 0x37804
+#define ISR1_RX_EMPTY_SHIFT 0
+
+/* * Interrupt Status Clear, corresponds to ISR0/ISR1. Write Only */
+#define NHI_ISC0 0x37808
+#define NHI_ISC1 0x3780c
+
+/* * Interrupt Status Set, corresponds to ISR0/ISR1. Write Only */
+#define NHI_ISS0 0x37810
+#define NHI_ISS1 0x37814
+
+/* * Interrupt Mask, corresponds to ISR0/ISR1. Read-Write */
+#define NHI_IMR0 0x38200
+#define NHI_IMR1 0x38204
+#define IMR_TX_OFFSET 0
+#define IMR_RX_OFFSET 12
+#define IMR_NE_OFFSET 24
+
+/* * Interrupt Mask Clear, corresponds to ISR0/ISR1. Write-only */
+#define NHI_IMC0 0x38208
+#define NHI_IMC1 0x3820c
+
+/* * Interrupt Mask Set, corresponds to ISR0/ISR1. Write-only */
+#define NHI_IMS0 0x38210
+#define NHI_IMS1 0x38214
+
+/*
+ * Interrupt Throttle Rate. One 32 bit register per interrupt,
+ * 16 registers for the 16 MSI-X interrupts. Interval is in 256ns
+ * increments.
+ */
+#define NHI_ITR0 0x38c00
+#define ITR_INTERVAL_SHIFT 0
+#define ITR_COUNTER_SHIFT 16
+
+/*
+ * Interrupt Vector Allocation.
+ * There are 12 4-bit descriptors for TX, 12 4-bit descriptors for RX,
+ * and 12 4-bit descriptors for Nearly Empty. Each descriptor holds
+ * the numerical value of the MSI-X vector that will receive the
+ * corresponding interrupt.
+ * Bits 0-31 of IVR0 and 0-15 of IVR1 are for TX
+ * Bits 16-31 of IVR1 and 0-31 of IVR2 are for RX
+ * Bits 0-31 of IVR3 and 0-15 of IVR4 are for Nearly Empty
+ */
+#define NHI_IVR0 0x38c40
+#define NHI_IVR1 0x38c44
+#define NHI_IVR2 0x38c48
+#define NHI_IVR3 0x38c4c
+#define NHI_IVR4 0x38c50
+#define IVR_TX_OFFSET 0
+#define IVR_RX_OFFSET 12
+#define IVR_NE_OFFSET 24
+
+/* Native Host Interface Control registers */
+#define NHI_HOST_CAPS 0x39640
+#define GET_HOST_CAPS_PATHS(val) ((val) & 0x3f)
+
+/*
+ * This definition comes from the Linux driver. In the USB4 spec, this
+ * register is named Host Interface Control, and the Interrupt Autoclear bit
+ * is at bit17, not bit2. The Linux driver doesn't seem to acknowledge this.
+ */
+#define NHI_DMA_MISC 0x39864
+#define DMA_MISC_INT_AUTOCLEAR (1 << 2)
+
+/* Thunderbolt firmware mailbox registers */
+#define TBT_INMAILDATA 0x39900
+
+#define TBT_INMAILCMD 0x39904
+#define INMAILCMD_CMD_MASK 0xff
+#define INMAILCMD_SAVE_CONNECTED 0x05
+#define INMAILCMD_DISCONNECT_PCIE 0x06
+#define INMAILCMD_DRIVER_UNLOAD_DISCONNECT 0x07
+#define INMAILCMD_DISCONNECT_PORTA 0x10
+#define INMAILCMD_DISCONNECT_PORTB 0x11
+#define INMAILCMD_SETMODE_CERT_TB_1ST_DEPTH 0x20
+#define INMAILCMD_SETMODE_ANY_TB_1ST_DEPTH 0x21
+#define INMAILCMD_SETMODE_CERT_TB_ANY_DEPTH 0x22
+#define INMAILCMD_SETMODE_ANY_TB_ANY_DEPTH 0x23
+#define INMAILCMD_CIO_RESET 0xf0
+#define INMAILCMD_ERROR (1 << 30)
+#define INMAILCMD_OPREQ (1 << 31)
+
+#define TBT_OUTMAILCMD 0x3990c
+#define OUTMAILCMD_STATUS_BUSY (1 << 12)
+#define OUTMAILCMD_OPMODE_MASK 0xf00
+#define OUTMAILCMD_OPMODE_SAFE 0x000
+#define OUTMAILCMD_OPMODE_AUTH 0x100
+#define OUTMAILCMD_OPMODE_ENDPOINT 0x200
+#define OUTMAILCMD_OPMODE_CM_FULL 0x300
+
+#define TBT_FW_STATUS 0x39944
+#define FWSTATUS_ENABLE (1 << 0)
+#define FWSTATUS_INVERT (1 << 1)
+#define FWSTATUS_START (1 << 2)
+#define FWSTATUS_CIO_RESET (1 << 30)
+#define FWSTATUS_CM_READY (1 << 31)
+
+/*
+ * Link Controller (LC) registers. These are in the Vendor Specific
+ * Extended Capability registers in PCICFG.
+ */
+#define AR_LC_MBOX_OUT 0x4c
+#define ICL_LC_MBOX_OUT 0xf0
+#define LC_MBOXOUT_VALID (1 << 0)
+#define LC_MBOXOUT_CMD_SHIFT 1
+#define LC_MBOXOUT_CMD_MASK (0x7f << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_GO2SX (0x02 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_GO2SX_NOWAKE (0x03 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_SXEXIT_TBT (0x04 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_SXEXIT_NOTBT (0x05 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_OS_UP (0x06 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_DATA_SHIFT 8
+#define SET_LC_MBOXOUT_DATA(val) ((val) << LC_MBOXOUT_DATA_SHIFT)
+
+#define AR_LC_MBOX_IN 0x48
+#define ICL_LC_MBOX_IN 0xec
+#define LC_MBOXIN_DONE (1 << 0)
+#define LC_MBOXIN_CMD_SHIFT 1
+#define LC_MBOXIN_CMD_MASK (0x7f << LC_MBOXIN_CMD_SHIFT)
+#define LC_MBOXIN_DATA_SHIFT 8
+#define GET_LC_MBOXIN_DATA(val) ((val) >> LC_MBOXIN_DATA_SHIFT)
+
+/* Other Vendor Specific registers */
+#define AR_VSCAP_1C 0x1c
+#define AR_VSCAP_B0 0xb0
+
+#define ICL_VSCAP_9 0xc8
+#define ICL_VSCAP9_FWREADY (1 << 31)
+#define ICL_VSCAP_10 0xcc
+#define ICL_VSCAP_11 0xd0
+#define ICL_VSCAP_22 0xfc
+#define ICL_VSCAP22_FORCEPWR (1 << 1)
+
+/* * Data structures
+ * Transmit buffer descriptor, 12.3.1. Must be aligned on a 4byte boundary.
+ */
+struct nhi_tx_buffer_desc {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint16_t eof_len;
+#define TX_BUFFER_DESC_LEN_MASK 0xfff
+#define TX_BUFFER_DESC_EOF_SHIFT 12
+ uint8_t flags_sof;
+#define TX_BUFFER_DESC_SOF_MASK 0xf
+#define TX_BUFFER_DESC_IDE (1 << 4) /* Isoch DMA enable */
+#define TX_BUFFER_DESC_DONE (1 << 5) /* Descriptor Done */
+#define TX_BUFFER_DESC_RS (1 << 6) /* Request Status/Done */
+#define TX_BUFFER_DESC_IE (1 << 7) /* Interrupt Enable */
+ uint8_t offset;
+ uint32_t payload_time;
+} __packed;
+
+/*
+ * Receive buffer descriptor, 12.4.1. 4 byte aligned. This goes into
+ * the descriptor ring, but changes into the _post form when the
+ * controller uses it.
+ */
+struct nhi_rx_buffer_desc {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint16_t reserved0;
+ uint8_t flags;
+#define RX_BUFFER_DESC_RS (1 << 6) /* Request Status/Done */
+#define RX_BUFFER_DESC_IE (1 << 7) /* Interrupt Enable */
+ uint8_t offset;
+ uint32_t reserved1;
+} __packed;
+
+/*
+ * Receive buffer descriptor, after the controller fills it in
+ */
+struct nhi_rx_post_desc {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint16_t eof_len;
+#define RX_BUFFER_DESC_LEN_MASK 0xfff
+#define RX_BUFFER_DESC_EOF_SHIFT 12
+ uint8_t flags_sof;
+#define RX_BUFFER_DESC_SOF_MASK 0xf
+#define RX_BUFFER_DESC_CRC_ERROR (1 << 4) /* CRC error (frame mode) */
+#define RX_BUFFER_DESC_DONE (1 << 5) /* Descriptor Done */
+#define RX_BUFFER_DESC_OVERRUN (1 << 6) /* Buffer overrun */
+#define RX_BUFFER_DESC_IE (1 << 7) /* Interrupt Enable */
+ uint8_t offset;
+ uint32_t payload_time;
+} __packed;
+
+union nhi_ring_desc {
+ struct nhi_tx_buffer_desc tx;
+ struct nhi_rx_buffer_desc rx;
+ struct nhi_rx_post_desc rxpost;
+ uint32_t dword[4];
+};
+
+/* Protocol Defined Field (PDF) */
+#define PDF_READ 0x01
+#define PDF_WRITE 0x02
+#define PDF_NOTIFY 0x03
+#define PDF_NOTIFY_ACK 0x04
+#define PDF_HOTPLUG 0x05
+#define PDF_XDOMAIN_REQ 0x06
+#define PDF_XDOMAIN_RESP 0x07
+/* Thunderbolt-only */
+#define PDF_CM_EVENT 0x0a
+#define PDF_CM_REQ 0x0b
+#define PDF_CM_RESP 0x0c
+
+#endif /* _NHI_REG_H */
diff --git a/sys/dev/thunderbolt/nhi_var.h b/sys/dev/thunderbolt/nhi_var.h
new file mode 100644
index 000000000000..2b9e878af47d
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_var.h
@@ -0,0 +1,277 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt 3 / Native Host Interface driver variables
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NHI_VAR
+#define _NHI_VAR
+
+MALLOC_DECLARE(M_NHI);
+
+#define NHI_MSIX_MAX 32
+#define NHI_RING0_TX_DEPTH 16
+#define NHI_RING0_RX_DEPTH 16
+#define NHI_DEFAULT_NUM_RINGS 1
+#define NHI_MAX_NUM_RINGS 32 /* XXX 2? */
+#define NHI_RING0_FRAME_SIZE 256
+#define NHI_MAILBOX_TIMEOUT 15
+
+#define NHI_CMD_TIMEOUT 3 /* 3 seconds */
+
+struct nhi_softc;
+struct nhi_ring_pair;
+struct nhi_intr_tracker;
+struct nhi_cmd_frame;
+struct hcm_softc;
+struct router_softc;
+
+struct nhi_cmd_frame {
+ TAILQ_ENTRY(nhi_cmd_frame) cm_link;
+ uint32_t *data;
+ bus_addr_t data_busaddr;
+ u_int req_len;
+ uint16_t flags;
+#define CMD_MAPPED (1 << 0)
+#define CMD_POLLED (1 << 1)
+#define CMD_REQ_COMPLETE (1 << 2)
+#define CMD_RESP_COMPLETE (1 << 3)
+#define CMD_RESP_OVERRUN (1 << 4)
+ uint16_t retries;
+ uint16_t pdf;
+ uint16_t idx;
+
+ void *context;
+ u_int timeout;
+
+ uint32_t *resp_buffer;
+ u_int resp_len;
+};
+
+#define NHI_RING_NAMELEN 16
+struct nhi_ring_pair {
+ struct nhi_softc *sc;
+
+ union nhi_ring_desc *tx_ring;
+ union nhi_ring_desc *rx_ring;
+
+ uint16_t tx_pi;
+ uint16_t tx_ci;
+ uint16_t rx_pi;
+ uint16_t rx_ci;
+
+ uint16_t rx_pici_reg;
+ uint16_t tx_pici_reg;
+
+ struct nhi_cmd_frame **rx_cmd_ring;
+ struct nhi_cmd_frame **tx_cmd_ring;
+
+ struct mtx mtx;
+ char name[NHI_RING_NAMELEN];
+ struct nhi_intr_tracker *tracker;
+ SLIST_ENTRY(nhi_ring_pair) ring_link;
+
+ TAILQ_HEAD(, nhi_cmd_frame) tx_head;
+ TAILQ_HEAD(, nhi_cmd_frame) rx_head;
+
+ uint16_t tx_ring_depth;
+ uint16_t tx_ring_mask;
+ uint16_t rx_ring_depth;
+ uint16_t rx_ring_mask;
+ uint16_t rx_buffer_size;
+ u_char ring_num;
+
+ bus_dma_tag_t ring_dmat;
+ bus_dmamap_t ring_map;
+ void *ring;
+ bus_addr_t tx_ring_busaddr;
+ bus_addr_t rx_ring_busaddr;
+
+ bus_dma_tag_t frames_dmat;
+ bus_dmamap_t frames_map;
+ void *frames;
+ bus_addr_t tx_frames_busaddr;
+ bus_addr_t rx_frames_busaddr;
+};
+
+/* PDF-indexed array of dispatch routines for interrupts */
+typedef void (nhi_ring_cb_t)(void *, union nhi_ring_desc *,
+ struct nhi_cmd_frame *);
+struct nhi_pdf_dispatch {
+ nhi_ring_cb_t *cb;
+ void *context;
+};
+
+struct nhi_intr_tracker {
+ struct nhi_softc *sc;
+ struct nhi_ring_pair *ring;
+ struct nhi_pdf_dispatch txpdf[16];
+ struct nhi_pdf_dispatch rxpdf[16];
+ u_int vector;
+};
+
+struct nhi_softc {
+ device_t dev;
+ device_t ufp;
+ u_int debug;
+ u_int hwflags;
+#define NHI_TYPE_UNKNOWN 0x00
+#define NHI_TYPE_AR 0x01 /* Alpine Ridge */
+#define NHI_TYPE_TR 0x02 /* Titan Ridge */
+#define NHI_TYPE_ICL 0x03 /* IceLake */
+#define NHI_TYPE_MR 0x04 /* Maple Ridge */
+#define NHI_TYPE_ADL 0x05 /* AlderLake */
+#define NHI_TYPE_USB4 0x0f
+#define NHI_TYPE_MASK 0x0f
+#define NHI_MBOX_BUSY 0x10
+ u_int caps;
+#define NHI_CAP_ICM 0x01
+#define NHI_CAP_HCM 0x02
+#define NHI_USE_ICM(sc) ((sc)->caps & NHI_CAP_ICM)
+#define NHI_USE_HCM(sc) ((sc)->caps & NHI_CAP_HCM)
+ struct hcm_softc *hcm;
+ struct router_softc *root_rsc;
+
+ struct nhi_ring_pair *ring0;
+ struct nhi_intr_tracker *intr_trackers;
+
+ uint16_t path_count;
+ uint16_t max_ring_count;
+
+ struct mtx nhi_mtx;
+ SLIST_HEAD(, nhi_ring_pair) ring_list;
+
+ int msix_count;
+ struct resource *irqs[NHI_MSIX_MAX];
+ void *intrhand[NHI_MSIX_MAX];
+ int irq_rid[NHI_MSIX_MAX];
+ struct resource *irq_pba;
+ int irq_pba_rid;
+ struct resource *irq_table;
+ int irq_table_rid;
+
+ struct resource *regs_resource;
+ bus_space_handle_t regs_bhandle;
+ bus_space_tag_t regs_btag;
+ int regs_rid;
+
+ bus_dma_tag_t parent_dmat;
+
+ bus_dma_tag_t ring0_dmat;
+ bus_dmamap_t ring0_map;
+ void *ring0_frames;
+ bus_addr_t ring0_frames_busaddr;
+ struct nhi_cmd_frame *ring0_cmds;
+
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+
+ struct intr_config_hook ich;
+
+ uint8_t force_hcm;
+#define NHI_FORCE_HCM_DEFAULT 0x00
+#define NHI_FORCE_HCM_ON 0x01
+#define NHI_FORCE_HCM_OFF 0x02
+
+ uint8_t uuid[16];
+ uint8_t lc_uuid[16];
+};
+
+struct nhi_dispatch {
+ uint8_t pdf;
+ nhi_ring_cb_t *cb;
+ void *context;
+};
+
+#define NHI_IS_AR(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_AR)
+#define NHI_IS_TR(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_TR)
+#define NHI_IS_ICL(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_ICL)
+#define NHI_IS_USB4(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_USB4)
+
+int nhi_pci_configure_interrupts(struct nhi_softc *sc);
+void nhi_pci_enable_interrupt(struct nhi_ring_pair *r);
+void nhi_pci_disable_interrupts(struct nhi_softc *sc);
+int nhi_pci_get_uuid(struct nhi_softc *sc);
+int nhi_read_lc_mailbox(struct nhi_softc *, u_int reg, uint32_t *val);
+int nhi_write_lc_mailbox(struct nhi_softc *, u_int reg, uint32_t val);
+
+void nhi_get_tunables(struct nhi_softc *);
+int nhi_attach(struct nhi_softc *);
+int nhi_detach(struct nhi_softc *);
+
+struct nhi_cmd_frame * nhi_alloc_tx_frame(struct nhi_ring_pair *);
+void nhi_free_tx_frame(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+
+int nhi_inmail_cmd(struct nhi_softc *, uint32_t, uint32_t);
+int nhi_outmail_cmd(struct nhi_softc *, uint32_t *);
+
+int nhi_tx_schedule(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+int nhi_tx_synchronous(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+void nhi_intr(void *);
+
+int nhi_register_pdf(struct nhi_ring_pair *, struct nhi_dispatch *,
+ struct nhi_dispatch *);
+int nhi_deregister_pdf(struct nhi_ring_pair *, struct nhi_dispatch *,
+ struct nhi_dispatch *);
+
+/* Low level read/write MMIO registers */
+static __inline uint32_t
+nhi_read_reg(struct nhi_softc *sc, u_int offset)
+{
+ return (le32toh(bus_space_read_4(sc->regs_btag, sc->regs_bhandle,
+ offset)));
+}
+
+static __inline void
+nhi_write_reg(struct nhi_softc *sc, u_int offset, uint32_t val)
+{
+ bus_space_write_4(sc->regs_btag, sc->regs_bhandle, offset,
+ htole32(val));
+}
+
+static __inline struct nhi_cmd_frame *
+nhi_alloc_tx_frame_locked(struct nhi_ring_pair *r)
+{
+ struct nhi_cmd_frame *cmd;
+
+ if ((cmd = TAILQ_FIRST(&r->tx_head)) != NULL)
+ TAILQ_REMOVE(&r->tx_head, cmd, cm_link);
+ return (cmd);
+}
+
+static __inline void
+nhi_free_tx_frame_locked(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ /* Clear all flags except for MAPPED */
+ cmd->flags &= CMD_MAPPED;
+ cmd->resp_buffer = NULL;
+ TAILQ_INSERT_TAIL(&r->tx_head, cmd, cm_link);
+}
+
+#endif /* _NHI_VAR */
diff --git a/sys/dev/thunderbolt/nhi_wmi.c b/sys/dev/thunderbolt/nhi_wmi.c
new file mode 100644
index 000000000000..3feba3bcd8d1
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_wmi.c
@@ -0,0 +1,198 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_thunderbolt.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/sbuf.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include "acpi_wmi_if.h"
+
+ACPI_MODULE_NAME("THUNDERBOLT-NHI-WMI")
+
+#define ACPI_INTEL_THUNDERBOLT_GUID "86CCFD48-205E-4A77-9C48-2021CBEDE341"
+
+struct nhi_wmi_softc {
+ device_t dev;
+ device_t wmi_dev;
+ u_int state;
+ char *guid;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+};
+
+ACPI_SERIAL_DECL(nhi_wmi, "Thunderbolt NHI WMI device");
+
+static void nhi_wmi_identify(driver_t *driver, device_t parent);
+static int nhi_wmi_probe(device_t dev);
+static int nhi_wmi_attach(device_t dev);
+static int nhi_wmi_detach(device_t dev);
+static int nhi_wmi_sysctl(SYSCTL_HANDLER_ARGS);
+static int nhi_wmi_evaluate_method(struct nhi_wmi_softc *sc,
+ int method, uint32_t arg0, uint32_t *retval);
+
+static device_method_t nhi_wmi_methods[] = {
+ DEVMETHOD(device_identify, nhi_wmi_identify),
+ DEVMETHOD(device_probe, nhi_wmi_probe),
+ DEVMETHOD(device_attach, nhi_wmi_attach),
+ DEVMETHOD(device_detach, nhi_wmi_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t nhi_wmi_driver = {
+ "nhi_wmi",
+ nhi_wmi_methods,
+ sizeof(struct nhi_wmi_softc)
+};
+
+DRIVER_MODULE(nhi_wmi, acpi_wmi, nhi_wmi_driver,
+ NULL, NULL);
+MODULE_DEPEND(nhi_wmi, acpi_wmi, 1, 1, 1);
+MODULE_DEPEND(nhi_wmi, acpi, 1, 1, 1);
+
+static void
+nhi_wmi_identify(driver_t *driver, device_t parent)
+{
+
+ if (acpi_disabled("nhi_wmi") != 0)
+ return;
+
+ if (device_find_child(parent, "nhi_wmi", -1) != NULL)
+ return;
+
+ if (ACPI_WMI_PROVIDES_GUID_STRING(parent,
+ ACPI_INTEL_THUNDERBOLT_GUID) == 0)
+ return;
+
+ if (BUS_ADD_CHILD(parent, 0, "nhi_wmi", -1) == NULL)
+ device_printf(parent, "failed to add nhi_wmi\n");
+}
+
+static int
+nhi_wmi_probe(device_t dev)
+{
+
+ if (ACPI_WMI_PROVIDES_GUID_STRING(device_get_parent(dev),
+ ACPI_INTEL_THUNDERBOLT_GUID) == 0)
+ return (EINVAL);
+ device_set_desc(dev, "Thunderbolt WMI Endpoint");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+nhi_wmi_attach(device_t dev)
+{
+ struct nhi_wmi_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->wmi_dev = device_get_parent(dev);
+
+ sc->sysctl_ctx = device_get_sysctl_ctx(dev);
+ sc->sysctl_tree = device_get_sysctl_tree(dev);
+ sc->state = 0;
+ sc->guid = ACPI_INTEL_THUNDERBOLT_GUID;
+
+ SYSCTL_ADD_STRING(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "GUID", CTLFLAG_RD, sc->guid, 0, "WMI GUID");
+ SYSCTL_ADD_PROC(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "force_power", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ sc, 0, nhi_wmi_sysctl, "I", "Force controller power on");
+
+ return (0);
+}
+
+static int
+nhi_wmi_detach(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+nhi_wmi_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct nhi_wmi_softc *sc;
+ int error, arg;
+
+ sc = (struct nhi_wmi_softc *)arg1;
+ arg = !!sc->state;
+ error = sysctl_handle_int(oidp, &arg, 0, req);
+ if (!error && req->newptr != NULL) {
+ ACPI_SERIAL_BEGIN(nhi_wmi);
+ error = nhi_wmi_evaluate_method(sc, 1, arg, NULL);
+ ACPI_SERIAL_END(nhi_wmi);
+ if (error == 0)
+ sc->state = arg;
+ }
+ return (error);
+}
+
+static int
+nhi_wmi_evaluate_method(struct nhi_wmi_softc *sc, int method, uint32_t arg0,
+ uint32_t *retval)
+{
+ ACPI_OBJECT *obj;
+ ACPI_BUFFER in, out;
+ uint32_t val, params[1];
+
+ params[0] = arg0;
+ in.Pointer = &params;
+ in.Length = sizeof(params);
+ out.Pointer = NULL;
+ out.Length = ACPI_ALLOCATE_BUFFER;
+
+ if (ACPI_FAILURE(ACPI_WMI_EVALUATE_CALL(sc->wmi_dev,
+ ACPI_INTEL_THUNDERBOLT_GUID, 0, method, &in, &out))) {
+ AcpiOsFree(out.Pointer);
+ return (EINVAL);
+ }
+
+ obj = out.Pointer;
+ if (obj != NULL && obj->Type == ACPI_TYPE_INTEGER)
+ val = (uint32_t)obj->Integer.Value;
+ else
+ val = 0;
+
+ AcpiOsFree(out.Pointer);
+ if (retval)
+ *retval = val;
+
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/router.c b/sys/dev/thunderbolt/router.c
new file mode 100644
index 000000000000..a3b418d77fac
--- /dev/null
+++ b/sys/dev/thunderbolt/router.c
@@ -0,0 +1,939 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* Config space access for switches, ports, and devices in TB3 and USB4 */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+
+static int router_alloc_cmd(struct router_softc *, struct router_command **);
+static void router_free_cmd(struct router_softc *, struct router_command *);
+static int _tb_router_attach(struct router_softc *);
+static void router_prepare_read(struct router_softc *, struct router_command *,
+ int);
+static int _tb_config_read(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *, void *, struct router_command **);
+static int router_schedule(struct router_softc *, struct router_command *);
+static int router_schedule_locked(struct router_softc *,
+ struct router_command *);
+static nhi_ring_cb_t router_complete_intr;
+static nhi_ring_cb_t router_response_intr;
+static nhi_ring_cb_t router_notify_intr;
+
+#define CFG_DEFAULT_RETRIES 3
+#define CFG_DEFAULT_TIMEOUT 2
+
+static int
+router_lookup_device(struct router_softc *sc, tb_route_t route,
+ struct router_softc **dev)
+{
+ struct router_softc *cursor;
+ uint64_t search_rt, remainder_rt, this_rt;
+ uint8_t hop;
+
+ KASSERT(dev != NULL, ("dev cannot be NULL\n"));
+
+ cursor = tb_config_get_root(sc);
+ remainder_rt = search_rt = route.lo | ((uint64_t)route.hi << 32);
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "%s: Searching for router 0x%016jx\n", __func__, search_rt);
+
+ while (cursor != NULL) {
+ this_rt = TB_ROUTE(cursor);
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Comparing cursor route 0x%016jx\n", this_rt);
+ if (this_rt == search_rt)
+ break;
+
+ /* Prepare to go to the next hop node in the route */
+ hop = remainder_rt & 0xff;
+ remainder_rt >>= 8;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "hop= 0x%02x, remainder= 0x%016jx\n", hop, remainder_rt);
+
+ /*
+ * An adapter index of 0x0 is only for the host interface
+ * adapter on the root route. The only time that
+ * it's valid for searches is when you're looking for the
+ * root route, and that case has already been handled.
+ */
+ if (hop == 0) {
+ tb_debug(sc, DBG_ROUTER,
+ "End of route chain, route not found\n");
+ return (ENOENT);
+ }
+
+ if (hop > cursor->max_adap) {
+ tb_debug(sc, DBG_ROUTER,
+ "Route hop out of range for parent\n");
+ return (EINVAL);
+ }
+
+ if (cursor->adapters == NULL) {
+ tb_debug(sc, DBG_ROUTER,
+ "Error, router not fully initialized\n");
+ return (EINVAL);
+ }
+
+ cursor = cursor->adapters[hop];
+ }
+
+ if (cursor == NULL)
+ return (ENOENT);
+
+ *dev = cursor;
+ return (0);
+}
+
+static int
+router_insert(struct router_softc *sc, struct router_softc *parent)
+{
+ uint64_t this_rt;
+ uint8_t this_hop;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_insert called\n");
+
+ if (parent == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Parent cannot be NULL in insert\n");
+ return (EINVAL);
+ }
+
+ this_rt = TB_ROUTE(sc);
+ if (((this_rt >> (sc->depth * 8)) > 0xffULL) ||
+ (parent->depth + 1 != sc->depth)) {
+ tb_debug(sc, DBG_ROUTER, "Added route 0x%08x%08x is not a "
+ "direct child of the parent route 0x%08x%08x\n",
+ sc->route.hi, sc->route.lo, parent->route.hi,
+ parent->route.lo);
+ return (EINVAL);
+ }
+
+ this_hop = (uint8_t)(this_rt >> (sc->depth * 8));
+
+ tb_debug(sc, DBG_ROUTER, "Inserting route 0x%08x%08x with last hop "
+ "of 0x%02x and depth of %d\n", sc->route.hi, sc->route.lo,
+ this_hop, sc->depth);
+
+ if (this_hop > parent->max_adap) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Inserted route is out of range of the parent\n");
+ return (EINVAL);
+ }
+
+ if (parent->adapters[this_hop] != NULL) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Inserted route already exists\n");
+ return (EEXIST);
+ }
+
+ parent->adapters[this_hop] = sc;
+
+ tb_debug(sc, DBG_ROUTER, "Added router 0x%08x%08x to parent "
+ "0x%08x%08x\n", sc->route.hi, sc->route.lo, parent->route.hi,
+ parent->route.lo);
+ return (0);
+}
+
+static int
+router_register_interrupts(struct router_softc *sc)
+{
+ struct nhi_dispatch tx[] = { { PDF_READ, router_complete_intr, sc },
+ { PDF_WRITE, router_complete_intr, sc },
+ { 0, NULL, NULL } };
+ struct nhi_dispatch rx[] = { { PDF_READ, router_response_intr, sc },
+ { PDF_WRITE, router_response_intr, sc },
+ { PDF_NOTIFY, router_notify_intr, sc },
+ { 0, NULL, NULL } };
+
+ return (nhi_register_pdf(sc->ring0, tx, rx));
+}
+
+int
+tb_router_attach(struct router_softc *parent, tb_route_t route)
+{
+ struct router_softc *sc;
+
+ tb_debug(parent, DBG_ROUTER|DBG_EXTRA, "tb_router_attach called\n");
+
+ sc = malloc(sizeof(*sc), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (sc == NULL) {
+ tb_debug(parent, DBG_ROUTER, "Cannot allocate root router\n");
+ return (ENOMEM);
+ }
+
+ sc->dev = parent->dev;
+ sc->debug = parent->debug;
+ sc->ring0 = parent->ring0;
+ sc->route = route;
+ sc->nsc = parent->nsc;
+
+ mtx_init(&sc->mtx, "tbcfg", "Thunderbolt Router Config", MTX_DEF);
+ TAILQ_INIT(&sc->cmd_queue);
+
+ router_insert(sc, parent);
+
+ return (_tb_router_attach(sc));
+}
+
+int
+tb_router_attach_root(struct nhi_softc *nsc, tb_route_t route)
+{
+ struct router_softc *sc;
+ int error;
+
+ tb_debug(nsc, DBG_ROUTER|DBG_EXTRA, "tb_router_attach_root called\n");
+
+ sc = malloc(sizeof(*sc), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (sc == NULL) {
+ tb_debug(nsc, DBG_ROUTER, "Cannot allocate root router\n");
+ return (ENOMEM);
+ }
+
+ sc->dev = nsc->dev;
+ sc->debug = nsc->debug;
+ sc->ring0 = nsc->ring0;
+ sc->route = route;
+ sc->nsc = nsc;
+
+ mtx_init(&sc->mtx, "tbcfg", "Thunderbolt Router Config", MTX_DEF);
+ TAILQ_INIT(&sc->cmd_queue);
+
+ /*
+ * This router is semi-virtual and represents the router that's part
+ * of the NHI DMA engine. Commands can't be issued to the topology
+ * until the NHI is initialized and this router is initialized, so
+ * there's no point in registering router interrupts earlier than this,
+ * even if other routers are found first.
+ */
+ tb_config_set_root(sc);
+ error = router_register_interrupts(sc);
+ if (error) {
+ tb_router_detach(sc);
+ return (error);
+ }
+
+ error = _tb_router_attach(sc);
+ if (error)
+ return (error);
+
+ bcopy((uint8_t *)sc->uuid, nsc->uuid, 16);
+ return (0);
+}
+
+static int
+_tb_router_attach(struct router_softc *sc)
+{
+ struct tb_cfg_router *cfg;
+ uint32_t *buf;
+ int error, up;
+
+ buf = malloc(9 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ error = tb_config_router_read_polled(sc, 0, 9, buf);
+ if (error != 0) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cfg = (struct tb_cfg_router *)buf;
+ up = GET_ROUTER_CS_UPSTREAM_ADAP(cfg);
+ sc->max_adap = GET_ROUTER_CS_MAX_ADAP(cfg);
+ sc->depth = GET_ROUTER_CS_DEPTH(cfg);
+ sc->uuid[0] = cfg->uuid_lo;
+ sc->uuid[1] = cfg->uuid_hi;
+ sc->uuid[2] = 0xffffffff;
+ sc->uuid[3] = 0xffffffff;
+ tb_debug(sc, DBG_ROUTER, "Router upstream_port= %d, max_port= %d, "
+ "depth= %d\n", up, sc->max_adap, sc->depth);
+ free(buf, M_THUNDERBOLT);
+
+ /* Downstream adapters are indexed in the array allocated here. */
+ sc->max_adap = MIN(sc->max_adap, ROUTER_CS1_MAX_ADAPTERS);
+ sc->adapters = malloc((1 + sc->max_adap) * sizeof(void *),
+ M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (sc->adapters == NULL) {
+ tb_debug(sc, DBG_ROUTER,
+ "Cannot allocate downstream adapter memory\n");
+ return (ENOMEM);
+ }
+
+ tb_debug(sc, DBG_ROUTER, "Router created, route 0x%08x%08x\n",
+ sc->route.hi, sc->route.lo);
+
+ return (0);
+}
+
+int
+tb_router_detach(struct router_softc *sc)
+{
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "tb_router_deattach called\n");
+
+ if (TAILQ_FIRST(&sc->cmd_queue) != NULL)
+ return (EBUSY);
+
+ mtx_destroy(&sc->mtx);
+
+ if (sc->adapters != NULL)
+ free(sc->adapters, M_THUNDERBOLT);
+
+ if (sc != NULL)
+ free(sc, M_THUNDERBOLT);
+
+ return (0);
+}
+
+static void
+router_get_config_cb(struct router_softc *sc, struct router_command *cmd,
+ void *arg)
+{
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_get_config_cb called\n");
+
+ /*
+ * Only do the copy if the command didn't have a notify event thrown.
+ * These events serve as asynchronous exception signals, which is
+ * cumbersome.
+ */
+ if (cmd->ev == 0)
+ bcopy((uint8_t *)cmd->resp_buffer,
+ (uint8_t *)cmd->callback_arg, cmd->dwlen * 4);
+
+ mtx_lock(&sc->mtx);
+ sc->inflight_cmd = NULL;
+
+ if ((cmd->flags & RCMD_POLLED) == 0)
+ wakeup(cmd);
+ else
+ cmd->flags |= RCMD_POLL_COMPLETE;
+
+ router_schedule_locked(sc, NULL);
+ mtx_unlock(&sc->mtx);
+}
+
+int
+tb_config_read(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf)
+{
+ struct router_command *cmd;
+ int error, retries;
+
+ if ((error = _tb_config_read(sc, space, adapter, offset, dwlen, buf,
+ router_get_config_cb, &cmd)) != 0)
+ return (error);
+
+ retries = cmd->retries;
+ mtx_lock(&sc->mtx);
+ while (retries-- >= 0) {
+ error = router_schedule_locked(sc, cmd);
+ if (error)
+ break;
+
+ error = msleep(cmd, &sc->mtx, 0, "tbtcfg", cmd->timeout * hz);
+ if (error != EWOULDBLOCK)
+ break;
+ sc->inflight_cmd = NULL;
+ tb_debug(sc, DBG_ROUTER, "Config command timed out, retries=%d\n", retries);
+ }
+
+ if (cmd->ev != 0)
+ error = EINVAL;
+ router_free_cmd(sc, cmd);
+ mtx_unlock(&sc->mtx);
+ return (error);
+}
+
+int
+tb_config_read_polled(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf)
+{
+ struct router_command *cmd;
+ int error, retries, timeout;
+
+ if ((error = _tb_config_read(sc, space, adapter, offset, dwlen, buf,
+ router_get_config_cb, &cmd)) != 0)
+ return (error);
+
+ retries = cmd->retries;
+ cmd->flags |= RCMD_POLLED;
+ timeout = cmd->timeout * 1000000;
+
+ mtx_lock(&sc->mtx);
+ while (retries-- >= 0) {
+ error = router_schedule_locked(sc, cmd);
+ if (error)
+ break;
+ mtx_unlock(&sc->mtx);
+
+ while (timeout > 0) {
+ DELAY(100 * 1000);
+ if ((cmd->flags & RCMD_POLL_COMPLETE) != 0)
+ break;
+ timeout -= 100000;
+ }
+
+ mtx_lock(&sc->mtx);
+ if ((cmd->flags & RCMD_POLL_COMPLETE) == 0) {
+ error = ETIMEDOUT;
+ sc->inflight_cmd = NULL;
+ tb_debug(sc, DBG_ROUTER, "Config command timed out, retries=%d\n", retries);
+ continue;
+ } else
+ break;
+ }
+
+ if (cmd->ev != 0)
+ error = EINVAL;
+ router_free_cmd(sc, cmd);
+ mtx_unlock(&sc->mtx);
+ return (error);
+}
+
+int
+tb_config_read_async(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf, void *cb)
+{
+ struct router_command *cmd;
+ int error;
+
+ if ((error = _tb_config_read(sc, space, adapter, offset, dwlen, buf,
+ cb, &cmd)) != 0)
+ return (error);
+
+ error = router_schedule(sc, cmd);
+
+ return (error);
+}
+
+static int
+_tb_config_read(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf, void *cb,
+ struct router_command **rcmd)
+{
+ struct router_command *cmd;
+ struct tb_cfg_read *msg;
+ int error;
+
+ if ((error = router_alloc_cmd(sc, &cmd)) != 0)
+ return (error);
+
+ msg = router_get_frame_data(cmd);
+ bzero(msg, sizeof(*msg));
+ msg->route.hi = sc->route.hi;
+ msg->route.lo = sc->route.lo;
+ msg->addr_attrs = TB_CONFIG_ADDR(0, space, adapter, dwlen, offset);
+ cmd->callback = cb;
+ cmd->callback_arg = buf;
+ cmd->dwlen = dwlen;
+ router_prepare_read(sc, cmd, sizeof(*msg));
+
+ if (rcmd != NULL)
+ *rcmd = cmd;
+
+ return (0);
+}
+
+int
+tb_config_write(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf)
+{
+
+ return(0);
+}
+
+static int
+router_alloc_cmd(struct router_softc *sc, struct router_command **rcmd)
+{
+ struct router_command *cmd;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_alloc_cmd\n");
+
+ cmd = malloc(sizeof(*cmd), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (cmd == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Cannot allocate cmd/response\n");
+ return (ENOMEM);
+ }
+
+ cmd->nhicmd = nhi_alloc_tx_frame(sc->ring0);
+ if (cmd->nhicmd == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Cannot allocate command frame\n");
+ free(cmd, M_THUNDERBOLT);
+ return (EBUSY);
+ }
+
+ cmd->sc = sc;
+ *rcmd = cmd;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Allocated command with index %d\n",
+ cmd->nhicmd->idx);
+
+ return (0);
+}
+
+static void
+router_free_cmd(struct router_softc *sc, struct router_command *cmd)
+{
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_free_cmd\n");
+
+ if (cmd == NULL)
+ return;
+
+ if (cmd->nhicmd != NULL) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Freeing nhi command %d\n",
+ cmd->nhicmd->idx);
+ nhi_free_tx_frame(sc->ring0, cmd->nhicmd);
+ }
+ free(cmd, M_THUNDERBOLT);
+
+ return;
+}
+
+static void
+router_prepare_read(struct router_softc *sc, struct router_command *cmd,
+ int len)
+{
+ struct nhi_cmd_frame *nhicmd;
+ uint32_t *msg;
+ int msglen, i;
+
+ KASSERT(cmd != NULL, ("cmd cannot be NULL\n"));
+ KASSERT(len != 0, ("Invalid zero-length command\n"));
+ KASSERT(len % 4 == 0, ("Message must be 32bit padded\n"));
+
+ nhicmd = cmd->nhicmd;
+ msglen = (len - 4) / 4;
+ for (i = 0; i < msglen; i++)
+ nhicmd->data[i] = htobe32(nhicmd->data[i]);
+
+ msg = (uint32_t *)nhicmd->data;
+ msg[msglen] = htobe32(tb_calc_crc(nhicmd->data, len-4));
+
+ nhicmd->pdf = PDF_READ;
+ nhicmd->req_len = len;
+
+ nhicmd->timeout = NHI_CMD_TIMEOUT;
+ nhicmd->retries = 0;
+ nhicmd->resp_buffer = (uint32_t *)cmd->resp_buffer;
+ nhicmd->resp_len = (cmd->dwlen + 3) * 4;
+ nhicmd->context = cmd;
+
+ cmd->retries = CFG_DEFAULT_RETRIES;
+ cmd->timeout = CFG_DEFAULT_TIMEOUT;
+
+ return;
+}
+
+static int
+router_schedule(struct router_softc *sc, struct router_command *cmd)
+{
+ int error;
+
+ mtx_lock(&sc->mtx);
+ error = router_schedule_locked(sc, cmd);
+ mtx_unlock(&sc->mtx);
+
+ return(error);
+}
+
+static int
+router_schedule_locked(struct router_softc *sc, struct router_command *cmd)
+{
+ struct nhi_cmd_frame *nhicmd;
+ int error;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_schedule\n");
+
+ if (cmd != NULL)
+ TAILQ_INSERT_TAIL(&sc->cmd_queue, cmd, link);
+
+ while ((sc->inflight_cmd == NULL) &&
+ ((cmd = TAILQ_FIRST(&sc->cmd_queue)) != NULL)) {
+
+ TAILQ_REMOVE(&sc->cmd_queue, cmd, link);
+ nhicmd = cmd->nhicmd;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Scheduling command with index %d\n", nhicmd->idx);
+ sc->inflight_cmd = cmd;
+ if ((error = nhi_tx_schedule(sc->ring0, nhicmd)) != 0) {
+ tb_debug(sc, DBG_ROUTER, "nhi ring error "
+ "%d\n", error);
+ sc->inflight_cmd = NULL;
+ if (error == EBUSY) {
+ TAILQ_INSERT_HEAD(&sc->cmd_queue, cmd, link);
+ error = 0;
+ }
+ break;
+ }
+ }
+
+ return (error);
+}
+
+static void
+router_complete_intr(void *context, union nhi_ring_desc *ring,
+ struct nhi_cmd_frame *nhicmd)
+{
+ struct router_softc *sc;
+ struct router_command *cmd;
+
+ KASSERT(context != NULL, ("context cannot be NULL\n"));
+ KASSERT(nhicmd != NULL, ("nhicmd cannot be NULL\n"));
+
+ cmd = (struct router_command *)(nhicmd->context);
+ sc = cmd->sc;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_complete_intr called\n");
+
+ if (nhicmd->flags & CMD_RESP_COMPLETE) {
+ cmd->callback(sc, cmd, cmd->callback_arg);
+ }
+
+ return;
+}
+
+static void
+router_response_intr(void *context, union nhi_ring_desc *ring, struct nhi_cmd_frame *nhicmd)
+{
+ struct router_softc *sc, *dev;
+ struct tb_cfg_read_resp *read;
+ struct tb_cfg_write_resp *write;
+ struct router_command *cmd;
+ tb_route_t route;
+ u_int error, i, eof, len;
+ uint32_t attrs;
+
+ KASSERT(context != NULL, ("context cannot be NULL\n"));
+
+ sc = (struct router_softc *)context;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_response_intr called\n");
+
+ eof = ring->rxpost.eof_len >> RX_BUFFER_DESC_EOF_SHIFT;
+
+ if (eof == PDF_WRITE) {
+ write = (struct tb_cfg_write_resp *)nhicmd->data;
+ route.hi = be32toh(write->route.hi);
+ route.lo = be32toh(write->route.lo);
+ } else {
+ read = (struct tb_cfg_read_resp *)nhicmd->data;
+ route.hi = be32toh(read->route.hi);
+ route.lo = be32toh(read->route.lo);
+ attrs = be32toh(read->addr_attrs);
+ len = (attrs & TB_CFG_SIZE_MASK) >> TB_CFG_SIZE_SHIFT;
+ }
+
+ /* XXX Is this a problem? */
+ if ((route.hi & 0x80000000) == 0)
+ tb_debug(sc, DBG_ROUTER, "Invalid route\n");
+ route.hi &= ~0x80000000;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Looking up route 0x%08x%08x\n",
+ route.hi, route.lo);
+
+ error = router_lookup_device(sc, route, &dev);
+ if (error != 0 || dev == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Cannot find device, error= %d\n",
+ error);
+ return;
+ }
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Found device %s route 0x%08x%08x, "
+ "inflight_cmd= %p\n", device_get_nameunit(dev->dev), dev->route.hi,
+ dev->route.lo, dev->inflight_cmd);
+
+ cmd = dev->inflight_cmd;
+ if (cmd == NULL) {
+ tb_debug(dev, DBG_ROUTER, "Null inflight cmd\n");
+ return;
+ }
+
+ if (eof == PDF_READ) {
+ for (i = 0; i < len; i++)
+ cmd->nhicmd->resp_buffer[i] = be32toh(read->data[i]);
+ }
+
+ cmd->nhicmd->flags |= CMD_RESP_COMPLETE;
+ if (cmd->nhicmd->flags & CMD_REQ_COMPLETE) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "TX_COMPLETE set\n");
+ cmd->callback(dev, cmd, cmd->callback_arg);
+ }
+
+ return;
+}
+
+static void
+router_notify_intr(void *context, union nhi_ring_desc *ring, struct nhi_cmd_frame *nhicmd)
+{
+ struct router_softc *sc;
+ struct router_command *cmd;
+ struct tb_cfg_notify event;
+ u_int ev, adap;
+
+ KASSERT(context != NULL, ("context cannot be NULL\n"));
+
+ sc = (struct router_softc *)context;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_notify_intr called\n");
+
+ event.route.hi = be32toh(nhicmd->data[0]);
+ event.route.lo = be32toh(nhicmd->data[1]);
+ event.event_adap = be32toh(nhicmd->data[2]);
+
+ ev = GET_NOTIFY_EVENT(&event);
+ adap = GET_NOTIFY_ADAPTER(&event);
+
+ tb_debug(sc, DBG_ROUTER, "Event route 0x%08x%08x adap %d code %s\n",
+ event.route.hi, event.route.lo, adap,
+ tb_get_string(ev, tb_notify_event));
+
+ switch (ev) {
+ case TB_CFG_ERR_CONN:
+ case TB_CFG_ERR_LINK:
+ case TB_CFG_ERR_ADDR:
+ case TB_CFG_ERR_ADP:
+ case TB_CFG_ERR_ENUM:
+ case TB_CFG_ERR_NUA:
+ case TB_CFG_ERR_LEN:
+ case TB_CFG_ERR_HEC:
+ case TB_CFG_ERR_FC:
+ case TB_CFG_ERR_PLUG:
+ case TB_CFG_ERR_LOCK:
+ case TB_CFG_HP_ACK:
+ case TB_CFG_DP_BW:
+ if (sc->inflight_cmd != NULL) {
+ cmd = sc->inflight_cmd;
+ cmd->ev = ev;
+ cmd->callback(sc, cmd, cmd->callback_arg);
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+int
+tb_config_next_cap(struct router_softc *sc, struct router_cfg_cap *cap)
+{
+ union tb_cfg_cap *tbcap;
+ uint32_t *buf;
+ uint16_t current;
+ int error;
+
+ KASSERT(cap != NULL, ("cap cannot be NULL\n"));
+ KASSERT(cap->next_cap != 0, ("next_cap cannot be 0\n"));
+
+ buf = malloc(sizeof(*tbcap), M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+
+ current = cap->next_cap;
+ error = tb_config_read(sc, cap->space, cap->adap, current, 1, buf);
+ if (error)
+ return (error);
+
+ tbcap = (union tb_cfg_cap *)buf;
+ cap->cap_id = tbcap->hdr.cap_id;
+ cap->next_cap = tbcap->hdr.next_cap;
+ cap->current_cap = current;
+
+ if ((cap->space != TB_CFG_CS_ROUTER) &&
+ (tbcap->hdr.cap_id != TB_CFG_CAP_VSC)) {
+ free(buf, M_THUNDERBOLT);
+ return (0);
+ }
+
+ tb_config_read(sc, cap->space, cap->adap, current, 2, buf);
+ if (error) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cap->vsc_id = tbcap->vsc.vsc_id;
+ cap->vsc_len = tbcap->vsc.len;
+ if (tbcap->vsc.len == 0) {
+ cap->next_cap = tbcap->vsec.vsec_next_cap;
+ cap->vsec_len = tbcap->vsec.vsec_len;
+ }
+
+ free(buf, M_THUNDERBOLT);
+ return (0);
+}
+
+int
+tb_config_find_cap(struct router_softc *sc, struct router_cfg_cap *cap)
+{
+ u_int cap_id, vsc_id;
+ int error;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "tb_config_find_cap called\n");
+
+ cap_id = cap->cap_id;
+ vsc_id = cap->vsc_id;
+
+ cap->cap_id = cap->vsc_id = 0;
+ while ((cap->cap_id != cap_id) || (cap->vsc_id != vsc_id)) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Looking for cap %d at offset %d\n", cap->cap_id,
+ cap->next_cap);
+ if ((cap->next_cap == 0) ||
+ (cap->next_cap > TB_CFG_CAP_OFFSET_MAX))
+ return (EINVAL);
+ error = tb_config_next_cap(sc, cap);
+ if (error)
+ break;
+ }
+
+ return (0);
+}
+
+int
+tb_config_find_router_cap(struct router_softc *sc, u_int cap, u_int vsc, u_int *offset)
+{
+ struct router_cfg_cap rcap;
+ struct tb_cfg_router *cfg;
+ uint32_t *buf;
+ int error;
+
+ buf = malloc(8 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ error = tb_config_router_read(sc, 0, 5, buf);
+ if (error != 0) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cfg = (struct tb_cfg_router *)buf;
+ rcap.space = TB_CFG_CS_ROUTER;
+ rcap.adap = 0;
+ rcap.next_cap = GET_ROUTER_CS_NEXT_CAP(cfg);
+ rcap.cap_id = cap;
+ rcap.vsc_id = vsc;
+ error = tb_config_find_cap(sc, &rcap);
+ if (error == 0)
+ *offset = rcap.current_cap;
+
+ free(buf, M_THUNDERBOLT);
+ return (error);
+}
+
+int
+tb_config_find_router_vsc(struct router_softc *sc, u_int cap, u_int *offset)
+{
+
+ return (tb_config_find_router_cap(sc, TB_CFG_CAP_VSC, cap, offset));
+}
+
+int
+tb_config_find_router_vsec(struct router_softc *sc, u_int cap, u_int *offset)
+{
+
+ return (tb_config_find_router_cap(sc, TB_CFG_CAP_VSEC, cap, offset));
+}
+
+int
+tb_config_find_adapter_cap(struct router_softc *sc, u_int adap, u_int cap, u_int *offset)
+{
+ struct router_cfg_cap rcap;
+ struct tb_cfg_adapter *cfg;
+ uint32_t *buf;
+ int error;
+
+ buf = malloc(8 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ error = tb_config_adapter_read(sc, adap, 0, 8, buf);
+ if (error != 0) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cfg = (struct tb_cfg_adapter *)buf;
+ rcap.space = TB_CFG_CS_ADAPTER;
+ rcap.adap = adap;
+ rcap.next_cap = GET_ADP_CS_NEXT_CAP(cfg);
+ rcap.cap_id = cap;
+ rcap.vsc_id = 0;
+ error = tb_config_find_cap(sc, &rcap);
+ if (error == 0)
+ *offset = rcap.current_cap;
+
+ free(buf, M_THUNDERBOLT);
+ return (error);
+}
+
+int
+tb_config_get_lc_uuid(struct router_softc *rsc, uint8_t *uuid)
+{
+ u_int error, offset;
+ uint32_t buf[8];
+
+ bzero(buf, sizeof(buf));
+
+ error = tb_config_find_router_vsec(rsc, TB_CFG_VSEC_LC, &offset);
+ if (error != 0) {
+ tb_debug(rsc, DBG_ROUTER, "Error finding LC registers: %d\n",
+ error);
+ return (error);
+ }
+
+ error = tb_config_router_read(rsc, offset + TB_LC_UUID, 4, buf);
+ if (error != 0) {
+ tb_debug(rsc, DBG_ROUTER, "Error fetching UUID: %d\n", error);
+ return (error);
+ }
+
+ bcopy(buf, uuid, 16);
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/router_var.h b/sys/dev/thunderbolt/router_var.h
new file mode 100644
index 000000000000..8366ede852e7
--- /dev/null
+++ b/sys/dev/thunderbolt/router_var.h
@@ -0,0 +1,242 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ROUTER_VAR_H
+#define _ROUTER_VAR_H
+
+struct router_softc;
+struct router_command;
+struct router_topo;
+
+typedef void (*router_callback_t)(struct router_softc *,
+ struct router_command *, void *);
+
+struct router_command {
+ TAILQ_ENTRY(router_command) link;
+ struct router_softc *sc;
+ struct nhi_cmd_frame *nhicmd;
+ u_int flags;
+#define RCMD_POLLED (1 << 0)
+#define RCMD_POLL_COMPLETE (1 << 1)
+ int resp_len;
+ router_callback_t callback;
+ void *callback_arg;
+ u_int dwlen;
+ u_int timeout;
+ int retries;
+ u_int ev;
+ uint8_t resp_buffer[NHI_RING0_FRAME_SIZE];
+};
+
+struct router_softc {
+ TAILQ_ENTRY(router_softc) link;
+ u_int debug;
+ tb_route_t route;
+ device_t dev;
+ struct nhi_softc *nsc;
+
+ struct mtx mtx;
+ struct nhi_ring_pair *ring0;
+ TAILQ_HEAD(,router_command) cmd_queue;
+
+ struct router_command *inflight_cmd;
+
+ uint8_t depth;
+ uint8_t max_adap;
+
+ struct router_softc **adapters;
+
+ uint32_t uuid[4];
+};
+
+struct router_cfg_cap {
+ uint16_t current_cap;
+ uint16_t next_cap;
+ uint32_t space;
+ uint8_t adap;
+ uint8_t cap_id;
+ uint8_t vsc_id;
+ uint8_t vsc_len;
+ uint16_t vsec_len;
+};
+
+int tb_router_attach(struct router_softc *, tb_route_t);
+int tb_router_attach_root(struct nhi_softc *, tb_route_t);
+int tb_router_detach(struct router_softc *);
+int tb_config_read(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *);
+int tb_config_read_polled(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *);
+int tb_config_read_async(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *, void *);
+int tb_config_write(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *);
+int tb_config_next_cap(struct router_softc *, struct router_cfg_cap *);
+int tb_config_find_cap(struct router_softc *, struct router_cfg_cap *);
+int tb_config_find_router_cap(struct router_softc *, u_int, u_int, u_int *);
+int tb_config_find_router_vsc(struct router_softc *, u_int, u_int *);
+int tb_config_find_router_vsec(struct router_softc *, u_int, u_int *);
+int tb_config_find_adapter_cap(struct router_softc *, u_int, u_int, u_int *);
+int tb_config_get_lc_uuid(struct router_softc *, uint8_t *);
+
+#define TB_CONFIG_ADDR(seq, space, adapter, dwlen, offset) \
+ ((seq << TB_CFG_SEQ_SHIFT) | space | \
+ (adapter << TB_CFG_ADAPTER_SHIFT) | (dwlen << TB_CFG_SIZE_SHIFT) | \
+ (offset & TB_CFG_ADDR_MASK))
+
+#define TB_ROUTE(router) \
+ ((uint64_t)(router)->route.hi << 32) | (router)->route.lo
+
+static __inline void *
+router_get_frame_data(struct router_command *cmd)
+{
+ return ((void *)cmd->nhicmd->data);
+}
+
+/*
+ * Read the Router config space for the router referred to in the softc.
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_router_read(struct router_softc *sc, u_int addr, u_int dwlen,
+ uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_ROUTER, 0, addr, dwlen, buf));
+}
+
+static __inline int
+tb_config_router_read_polled(struct router_softc *sc, u_int addr, u_int dwlen,
+ uint32_t *buf)
+{
+ return (tb_config_read_polled(sc, TB_CFG_CS_ROUTER, 0, addr, dwlen, buf));
+}
+
+/*
+ * Write the Router config space for the router referred to in the softc.
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_router_write(struct router_softc *sc, u_int addr, u_int dwlen,
+ uint32_t *buf)
+{
+ return (tb_config_write(sc, TB_CFG_CS_ROUTER, 0, addr, dwlen, buf));
+}
+
+/*
+ * Read the Adapter config space for the router referred to in the softc.
+ * adap - Adapter number
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_adapter_read(struct router_softc *sc, u_int adap, u_int addr,
+ u_int dwlen, uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_ADAPTER, adap, addr, dwlen, buf));
+}
+
+/*
+ * Read the Adapter config space for the router referred to in the softc.
+ * adap - Adapter number
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_adapter_write(struct router_softc *sc, u_int adap, u_int addr,
+ u_int dwlen, uint32_t *buf)
+{
+ return (tb_config_write(sc, TB_CFG_CS_ADAPTER, adap, addr, dwlen, buf));
+}
+
+/*
+ * Read the Path config space for the router referred to in the softc.
+ * adap - Adapter number
+ * hopid - HopID of the path
+ * len - The number of adjacent paths
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_path_read(struct router_softc *sc, u_int adap, u_int hopid,
+ u_int num, uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_PATH, adap, hopid * 2,
+ num * 2, buf));
+}
+
+/*
+ * Write the Path config space for the router referred to in the softc.
+ * adap - Adapter number
+ * hopid - HopID of the path
+ * len - The number of adjacent paths
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_path_write(struct router_softc *sc, u_int adap, u_int hopid,
+ u_int num, uint32_t *buf)
+{
+ return (tb_config_write(sc, TB_CFG_CS_PATH, adap, hopid * 2,
+ num * 2, buf));
+}
+
+/*
+ * Read the Counters config space for the router referred to in the softc.
+ * Counters come in sets of 3 dwords.
+ * adap - Adapter number
+ * set - The counter set index
+ * num - The number of adjacent counter sets to read
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_counters_read(struct router_softc *sc, u_int adap, u_int set,
+ u_int num, uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_COUNTERS, adap, set * 3,
+ num * 3, buf));
+}
+
+static __inline void
+tb_config_set_root(struct router_softc *sc)
+{
+ sc->nsc->root_rsc = sc;
+}
+
+static __inline void *
+tb_config_get_root(struct router_softc *sc)
+{
+ return (sc->nsc->root_rsc);
+}
+
+#endif /* _ROUTER_VAR_H */
diff --git a/sys/dev/thunderbolt/tb_acpi_pcib.c b/sys/dev/thunderbolt/tb_acpi_pcib.c
new file mode 100644
index 000000000000..947df3688535
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_acpi_pcib.c
@@ -0,0 +1,181 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_thunderbolt.h"
+
+/* ACPI identified PCIe bridge for Thunderbolt */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <sys/rman.h>
+
+#include <machine/pci_cfgreg.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcib_private.h>
+#include <dev/pci/pci_private.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+#include <machine/md_var.h>
+
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_pcib.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/tb_debug.h>
+
+static int tb_acpi_pcib_probe(device_t);
+static int tb_acpi_pcib_attach(device_t);
+static int tb_acpi_pcib_detach(device_t);
+
+/* ACPI attachment for Thudnerbolt Bridges */
+
+static int
+tb_acpi_pcib_probe(device_t dev)
+{
+ char desc[TB_DESC_MAX], desc1[TB_DESC_MAX];
+ int val;
+
+ if (pci_get_class(dev) != PCIC_BRIDGE ||
+ pci_get_subclass(dev) != PCIS_BRIDGE_PCI ||
+ acpi_disabled("pci"))
+ return (ENXIO);
+ if (acpi_get_handle(dev) == NULL)
+ return (ENXIO);
+ if (pci_cfgregopen() == 0)
+ return (ENXIO);
+
+ /*
+ * Success? Specify a higher probe priority than the conventional
+ * Thunderbolt PCIb driver
+ */
+ if ((val = tb_pcib_probe_common(dev, desc)) < 0) {
+ val++;
+ snprintf(desc1, TB_DESC_MAX, "ACPI %s", desc);
+ device_set_desc_copy(dev, desc1);
+ }
+
+ return (val);
+}
+
+static int
+tb_acpi_pcib_attach(device_t dev)
+{
+ struct tb_pcib_softc *sc;
+ int error;
+
+ error = tb_pcib_attach_common(dev);
+ if (error)
+ return (error);
+
+ sc = device_get_softc(dev);
+ sc->ap_handle = acpi_get_handle(dev);
+ KASSERT(sc->ap_handle != NULL, ("ACPI handle cannot be NULL\n"));
+
+ /* Execute OSUP in case the BIOS didn't */
+ if (TB_IS_ROOT(sc)) {
+ ACPI_OBJECT_LIST list;
+ ACPI_OBJECT arg;
+ ACPI_BUFFER buf;
+ ACPI_STATUS s;
+
+ tb_debug(sc, DBG_BRIDGE, "Executing OSUP\n");
+
+ list.Pointer = &arg;
+ list.Count = 1;
+ arg.Integer.Value = 0;
+ arg.Type = ACPI_TYPE_INTEGER;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ buf.Pointer = NULL;
+
+ s = AcpiEvaluateObject(sc->ap_handle, "\\_GPE.OSUP", &list,
+ &buf);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL,
+ "ACPI returned %d, buf= %p\n", s, buf.Pointer);
+ if (buf.Pointer != NULL)
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "buffer= 0x%x\n",
+ *(uint32_t *)buf.Pointer);
+
+ AcpiOsFree(buf.Pointer);
+ }
+
+ pcib_attach_common(dev);
+ acpi_pcib_fetch_prt(dev, &sc->ap_prt);
+
+ return (pcib_attach_child(dev));
+}
+
+static int
+tb_acpi_pcib_detach(device_t dev)
+{
+ struct tb_pcib_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ tb_debug(sc, DBG_BRIDGE|DBG_ROUTER|DBG_EXTRA, "tb_acpi_pcib_detach\n");
+
+ error = pcib_detach(dev);
+ if (error == 0)
+ AcpiOsFree(sc->ap_prt.Pointer);
+ return (error);
+}
+
+static device_method_t tb_acpi_pcib_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, tb_acpi_pcib_probe),
+ DEVMETHOD(device_attach, tb_acpi_pcib_attach),
+ DEVMETHOD(device_detach, tb_acpi_pcib_detach),
+
+ /* Thunderbolt interface is inherited */
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_2(tbolt, tb_acpi_pcib_driver, tb_acpi_pcib_methods,
+ sizeof(struct tb_pcib_softc), pcib_driver, tb_pcib_driver);
+DRIVER_MODULE_ORDERED(tb_acpi_pcib, pci, tb_acpi_pcib_driver,
+ NULL, NULL, SI_ORDER_MIDDLE);
+MODULE_DEPEND(tb_acpi_pcib, acpi, 1, 1, 1);
diff --git a/sys/dev/thunderbolt/tb_debug.c b/sys/dev/thunderbolt/tb_debug.c
new file mode 100644
index 000000000000..f455ee72e9f6
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_debug.c
@@ -0,0 +1,334 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* PCIe bridge for Thunderbolt */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/tb_debug.h>
+
+tb_string_t nhi_outmailcmd_opmode[] = {
+ { 0x000, "Safe Mode" },
+ { 0x100, "Authentication Mode" },
+ { 0x200, "Endpoint Mode" },
+ { 0x300, "Connection Manager Fully Functional" },
+ { 0, NULL }
+};
+
+tb_string_t nhi_frame_pdf[] = {
+ { 0x01, "PDF_READ" },
+ { 0x02, "PDF_WRITE" },
+ { 0x03, "PDF_NOTIFY" },
+ { 0x04, "PDF_NOTIFY_ACK" },
+ { 0x05, "PDF_HOTPLUG" },
+ { 0x06, "PDF_XDOMAIN_REQ" },
+ { 0x07, "PDF_XDOMAIN_RESP" },
+ { 0x0a, "PDF_CM_EVENT" },
+ { 0x0b, "PDF_CM_REQ" },
+ { 0x0c, "PDF_CM_RESP" },
+ { 0, NULL }
+};
+
+tb_string_t tb_security_level[] = {
+ { TBSEC_NONE, "None" },
+ { TBSEC_USER, "User" },
+ { TBSEC_SECURE, "Secure Authorization" },
+ { TBSEC_DP, "Display Port" },
+ { TBSEC_UNKNOWN,"Unknown" },
+ { 0, NULL }
+};
+
+tb_string_t tb_mbox_connmode[] = {
+ { INMAILCMD_SETMODE_CERT_TB_1ST_DEPTH, "Certified/1st" },
+ { INMAILCMD_SETMODE_ANY_TB_1ST_DEPTH, "Any/1st" },
+ { INMAILCMD_SETMODE_CERT_TB_ANY_DEPTH, "Certified/Any" },
+ { INMAILCMD_SETMODE_ANY_TB_ANY_DEPTH, "Any/Any" },
+ { 0, NULL }
+};
+
+tb_string_t tb_device_power[] = {
+ { 0x0, "Self-powered" },
+ { 0x1, "Normal power" },
+ { 0x2, "High power" },
+ { 0x3, "Unknown power draw" },
+ { 0, NULL }
+};
+
+tb_string_t tb_notify_code[] = {
+ { 0x03, "DEVCONN" },
+ { 0x04, "DISCONN" },
+ { 0x05, "DPCONN" },
+ { 0x06, "DOMCONN" },
+ { 0x07, "DOMDISCONN" },
+ { 0x08, "DPCHANGE" },
+ { 0x09, "I2C" },
+ { 0x0a, "RTD3" },
+ { 0, NULL }
+};
+
+tb_string_t tb_adapter_type[] = {
+ { ADP_CS2_UNSUPPORTED, "Unsupported Adapter" },
+ { ADP_CS2_LANE, "Lane Adapter" },
+ { ADP_CS2_HOSTIF, "Host Interface Adapter" },
+ { ADP_CS2_PCIE_DFP, "Downstream PCIe Adapter" },
+ { ADP_CS2_PCIE_UFP, "Upstream PCIe Adapter" },
+ { ADP_CS2_DP_OUT, "DP OUT Adapter" },
+ { ADP_CS2_DP_IN, "DP IN Adapter" },
+ { ADP_CS2_USB3_DFP, "Downstream USB3 Adapter" },
+ { ADP_CS2_USB3_UFP, "Upstream USB3 Adapter" },
+ { 0, NULL }
+};
+
+tb_string_t tb_adapter_state[] = {
+ { CAP_LANE_STATE_DISABLE, "Disabled" },
+ { CAP_LANE_STATE_TRAINING, "Training" },
+ { CAP_LANE_STATE_CL0, "CL0" },
+ { CAP_LANE_STATE_TXCL0, "TX CL0s" },
+ { CAP_LANE_STATE_RXCL0, "RX CL0s" },
+ { CAP_LANE_STATE_CL1, "CL1" },
+ { CAP_LANE_STATE_CL2, "CL2" },
+ { CAP_LANE_STATE_CLD, "CLd" },
+ { 0, NULL }
+};
+
+tb_string_t tb_notify_event[] = {
+ { TB_CFG_ERR_CONN, "Connection error" },
+ { TB_CFG_ERR_LINK, "Link error" },
+ { TB_CFG_ERR_ADDR, "Addressing error" },
+ { TB_CFG_ERR_ADP, "Invalid adapter" },
+ { TB_CFG_ERR_ENUM, "Enumeration error" },
+ { TB_CFG_ERR_NUA, "Adapter not enumerated" },
+ { TB_CFG_ERR_LEN, "Invalid request length" },
+ { TB_CFG_ERR_HEC, "Invalid packet header" },
+ { TB_CFG_ERR_FC, "Flow control error" },
+ { TB_CFG_ERR_PLUG, "Hot plug error" },
+ { TB_CFG_ERR_LOCK, "Adapter locked" },
+ { TB_CFG_HP_ACK, "Hotplug acknowledgement" },
+ { TB_CFG_DP_BW, "Display port bandwidth change" },
+ { 0, NULL }
+};
+
+const char *
+tb_get_string(uintmax_t key, tb_string_t *table)
+{
+
+ if (table == NULL)
+ return ("<null>");
+
+ while (table->value != NULL) {
+ if (table->key == key)
+ return (table->value);
+ table++;
+ }
+
+ return ("<unknown>");
+}
+
+static struct tb_debug_string {
+ char *name;
+ int flag;
+} tb_debug_strings[] = {
+ {"info", DBG_INFO},
+ {"init", DBG_INIT},
+ {"info", DBG_INFO},
+ {"rxq", DBG_RXQ},
+ {"txq", DBG_TXQ},
+ {"intr", DBG_INTR},
+ {"tb", DBG_TB},
+ {"mbox", DBG_MBOX},
+ {"bridge", DBG_BRIDGE},
+ {"cfg", DBG_CFG},
+ {"router", DBG_ROUTER},
+ {"port", DBG_PORT},
+ {"hcm", DBG_HCM},
+ {"extra", DBG_EXTRA},
+ {"noisy", DBG_NOISY},
+ {"full", DBG_FULL}
+};
+
+enum tb_debug_level_combiner {
+ COMB_NONE,
+ COMB_ADD,
+ COMB_SUB
+};
+
+int
+tb_debug_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct sbuf *sbuf;
+#if defined (THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+ struct tb_debug_string *string;
+ char *buffer;
+ size_t sz;
+ u_int *debug;
+ int i, len;
+#endif
+ int error;
+
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error != 0)
+ return (error);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+#if defined (THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+ debug = (u_int *)arg1;
+
+ sbuf_printf(sbuf, "%#x", *debug);
+
+ sz = sizeof(tb_debug_strings) / sizeof(tb_debug_strings[0]);
+ for (i = 0; i < sz; i++) {
+ string = &tb_debug_strings[i];
+ if (*debug & string->flag)
+ sbuf_printf(sbuf, ",%s", string->name);
+ }
+
+ error = sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ if (error || req->newptr == NULL)
+ return (error);
+
+ len = req->newlen - req->newidx;
+ if (len == 0)
+ return (0);
+
+ buffer = malloc(len, M_THUNDERBOLT, M_ZERO|M_WAITOK);
+ error = SYSCTL_IN(req, buffer, len);
+
+ tb_parse_debug(debug, buffer);
+
+ free(buffer, M_THUNDERBOLT);
+#else
+ sbuf_printf(sbuf, "debugging unavailable");
+ error = sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+#endif
+
+ return (error);
+}
+
+void
+tb_parse_debug(u_int *debug, char *list)
+{
+ struct tb_debug_string *string;
+ enum tb_debug_level_combiner op;
+ char *token, *endtoken;
+ size_t sz;
+ int flags, i;
+
+ if (list == NULL || *list == '\0')
+ return;
+
+ if (*list == '+') {
+ op = COMB_ADD;
+ list++;
+ } else if (*list == '-') {
+ op = COMB_SUB;
+ list++;
+ } else
+ op = COMB_NONE;
+ if (*list == '\0')
+ return;
+
+ flags = 0;
+ sz = sizeof(tb_debug_strings) / sizeof(tb_debug_strings[0]);
+ while ((token = strsep(&list, ":,")) != NULL) {
+
+ /* Handle integer flags */
+ flags |= strtol(token, &endtoken, 0);
+ if (token != endtoken)
+ continue;
+
+ /* Handle text flags */
+ for (i = 0; i < sz; i++) {
+ string = &tb_debug_strings[i];
+ if (strcasecmp(token, string->name) == 0) {
+ flags |= string->flag;
+ break;
+ }
+ }
+ }
+
+ switch (op) {
+ case COMB_NONE:
+ *debug = flags;
+ break;
+ case COMB_ADD:
+ *debug |= flags;
+ break;
+ case COMB_SUB:
+ *debug &= (~flags);
+ break;
+ }
+ return;
+}
+
+void
+tbdbg_dprintf(device_t dev, u_int debug, u_int val, const char *fmt, ...)
+{
+#if defined(THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+ va_list ap;
+ u_int lvl, dbg;
+
+ lvl = debug & 0xc0000000;
+ dbg = debug & 0x3fffffff;
+ va_start(ap, fmt);
+ if ((lvl >= (val & 0xc0000000)) &&
+ ((dbg & (val & 0x3fffffff)) != 0)) {
+ device_printf(dev, "");
+ vprintf(fmt, ap);
+ }
+ va_end(ap);
+#endif
+}
diff --git a/sys/dev/thunderbolt/tb_debug.h b/sys/dev/thunderbolt/tb_debug.h
new file mode 100644
index 000000000000..4f5584420882
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_debug.h
@@ -0,0 +1,93 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt 3 driver debug strings
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_DEBUG_H
+#define _TB_DEBUG_H
+
+typedef struct {
+ uintmax_t key;
+ const char * value;
+} tb_string_t;
+
+const char * tb_get_string(uintmax_t, tb_string_t *);
+int tb_debug_sysctl(SYSCTL_HANDLER_ARGS);
+void tb_parse_debug(u_int *, char *);
+
+extern tb_string_t nhi_outmailcmd_opmode[];
+extern tb_string_t nhi_frame_pdf[];
+extern tb_string_t tb_security_level[];
+extern tb_string_t tb_rdy_connmode[];
+extern tb_string_t tb_mbox_connmode[];
+extern tb_string_t tb_device_power[];
+extern tb_string_t tb_notify_code[];
+extern tb_string_t tb_adapter_type[];
+extern tb_string_t tb_adapter_state[];
+extern tb_string_t tb_notify_event[];
+
+enum {
+ /* Debug subsystems */
+ DBG_NONE = 0,
+ DBG_INIT = (1 << 0),
+ DBG_INFO = (1 << 1),
+ DBG_RXQ = (1 << 2),
+ DBG_TXQ = (1 << 3),
+ DBG_INTR = (1 << 4),
+ DBG_TB = (1 << 5),
+ DBG_MBOX = (1 << 6),
+ DBG_BRIDGE = (1 << 7),
+ DBG_CFG = (1 << 8),
+ DBG_ROUTER = (1 << 9),
+ DBG_PORT = (1 << 10),
+ DBG_HCM = (1 << 11),
+ /* Debug levels */
+ DBG_EXTRA = (1 << 30),
+ DBG_NOISY = (1 << 31),
+ DBG_FULL = DBG_EXTRA | DBG_NOISY
+};
+
+/*
+ * Macros to wrap printing.
+ * Each softc type needs a `dev` and `debug` field. Do tbdbg_printf as a
+ * function to make format errors more clear during compile.
+ */
+void tbdbg_dprintf(device_t dev, u_int debug, u_int val, const char *fmt, ...) __printflike(4, 5);
+
+#if defined(THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+#define tb_debug(sc, level, fmt...) \
+ tbdbg_dprintf((sc)->dev, (sc)->debug, level, ##fmt)
+#else
+#define tb_debug(sc, level, fmt...)
+#endif
+#define tb_printf(sc, fmt...) \
+ device_printf((sc)->dev, ##fmt)
+
+#endif /* _TB_DEBUG_H */
diff --git a/sys/dev/thunderbolt/tb_dev.c b/sys/dev/thunderbolt/tb_dev.c
new file mode 100644
index 000000000000..7ea545dee0c3
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_dev.c
@@ -0,0 +1,331 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* Userspace control device for USB4 / TB3 */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/nv.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include <dev/thunderbolt/tb_dev.h>
+#include <dev/thunderbolt/tb_ioctl.h>
+
+struct tbdev_if;
+struct tbdev_dm;
+struct tbdev_rt;
+
+struct tbdev_if {
+ TAILQ_ENTRY(tbdev_if) dev_next;
+ char name[SPECNAMELEN];
+};
+
+struct tbdev_dm {
+ TAILQ_ENTRY(tbdev_dm) dev_next;
+ char uid[16];
+};
+
+struct tbdev_rt {
+ TAILQ_ENTRY(tbdev_rt) dev_next;
+ uint64_t route;
+};
+
+static int tbdev_static_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td);
+
+static struct cdevsw tbdev_static_devsw = {
+ .d_version = D_VERSION,
+ .d_ioctl = tbdev_static_ioctl,
+ .d_name = "tbt"
+};
+static struct cdev *tb_dev = NULL;
+
+static TAILQ_HEAD(, tbdev_if) tbdev_head = TAILQ_HEAD_INITIALIZER(tbdev_head);
+static TAILQ_HEAD(, tbdev_dm) tbdomain_head = TAILQ_HEAD_INITIALIZER(tbdomain_head);
+static TAILQ_HEAD(, tbdev_rt) tbrouter_head = TAILQ_HEAD_INITIALIZER(tbrouter_head);
+
+static struct mtx tbdev_mtx;
+MTX_SYSINIT(tbdev_mtx, &tbdev_mtx, "TBT Device Mutex", MTX_DEF);
+
+MALLOC_DEFINE(M_THUNDERBOLT, "thunderbolt", "memory for thunderbolt");
+
+static void
+tbdev_init(void *arg)
+{
+
+ tb_dev = make_dev(&tbdev_static_devsw, 0, UID_ROOT, GID_OPERATOR,
+ 0644, TBT_DEVICE_NAME);
+ if (tb_dev == NULL)
+ printf("Cannot create Thunderbolt system device\n");
+
+ return;
+}
+
+SYSINIT(tbdev_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, tbdev_init, NULL);
+
+static void
+tbdev_uninit(void *arg)
+{
+ if (tb_dev != NULL) {
+ destroy_dev(tb_dev);
+ tb_dev = NULL;
+ }
+}
+
+SYSUNINIT(tbdev_uninit, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, tbdev_uninit, NULL);
+
+int
+tbdev_add_interface(struct nhi_softc *nhi)
+{
+ struct tbdev_if *ifce;
+
+ ifce = malloc(sizeof(struct tbdev_if), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (ifce == NULL)
+ return (ENOMEM);
+
+ strlcpy(ifce->name, device_get_nameunit(nhi->dev), SPECNAMELEN);
+ mtx_lock(&tbdev_mtx);
+ TAILQ_INSERT_TAIL(&tbdev_head, ifce, dev_next);
+ mtx_unlock(&tbdev_mtx);
+
+ return (0);
+}
+
+int
+tbdev_remove_interface(struct nhi_softc *nhi)
+{
+ struct tbdev_if *ifce = NULL, *if_back;
+ const char *name;
+
+ name = device_get_nameunit(nhi->dev);
+ mtx_lock(&tbdev_mtx);
+ TAILQ_FOREACH_SAFE(ifce, &tbdev_head, dev_next, if_back) {
+ if (strncmp(name, ifce->name, SPECNAMELEN) == 0) {
+ TAILQ_REMOVE(&tbdev_head, ifce, dev_next);
+ break;
+ }
+ }
+ mtx_unlock(&tbdev_mtx);
+
+ if (ifce != NULL)
+ free(ifce, M_THUNDERBOLT);
+
+ return (0);
+}
+
+int
+tbdev_add_domain(void *domain)
+{
+
+ return (0);
+}
+
+int
+tbdev_remove_domain(void *domain)
+{
+
+ return (0);
+}
+
+int
+tbdev_add_router(struct router_softc *rt)
+{
+
+ return (0);
+}
+
+int
+tbdev_remove_router(struct router_softc *rt)
+{
+
+ return (0);
+}
+
+static int
+tbdev_discover(caddr_t addr)
+{
+ nvlist_t *nvl = NULL;
+ struct tbt_ioc *ioc = (struct tbt_ioc *)addr;
+ struct tbdev_if *dev;
+ struct tbdev_dm *dm;
+ struct tbdev_rt *rt;
+ void *nvlpacked = NULL;
+ const char *cmd = NULL;
+ int error = 0;
+
+ if ((ioc->data == NULL) || (ioc->size == 0)) {
+ printf("data or size is 0\n");
+ return (EINVAL);
+ }
+
+ if ((ioc->len == 0) || (ioc->len > TBT_IOCMAXLEN) ||
+ (ioc->len > ioc->size)) {
+ printf("len is wrong\n");
+ return (EINVAL);
+ }
+
+ nvlpacked = malloc(ioc->len, M_THUNDERBOLT, M_NOWAIT);
+ if (nvlpacked == NULL) {
+ printf("cannot allocate nvlpacked\n");
+ return (ENOMEM);
+ }
+
+ error = copyin(ioc->data, nvlpacked, ioc->len);
+ if (error) {
+ free(nvlpacked, M_THUNDERBOLT);
+ printf("error %d from copyin\n", error);
+ return (error);
+ }
+
+ nvl = nvlist_unpack(nvlpacked, ioc->len, NV_FLAG_NO_UNIQUE);
+ if (nvl == NULL) {
+ free(nvlpacked, M_THUNDERBOLT);
+ printf("cannot unpack nvlist\n");
+ return (EINVAL);
+ }
+ free(nvlpacked, M_THUNDERBOLT);
+ nvlpacked = NULL;
+
+ if (nvlist_exists_string(nvl, TBT_DISCOVER_TYPE))
+ cmd = nvlist_get_string(nvl, TBT_DISCOVER_TYPE);
+ if (cmd == NULL) {
+ printf("cannot find type string\n");
+ error = EINVAL;
+ goto out;
+ }
+
+ mtx_lock(&tbdev_mtx);
+ if (strncmp(cmd, TBT_DISCOVER_IFACE, TBT_NAMLEN) == 0) {
+ TAILQ_FOREACH(dev, &tbdev_head, dev_next)
+ nvlist_add_string(nvl, TBT_DISCOVER_IFACE, dev->name);
+ } else if (strncmp(cmd, TBT_DISCOVER_DOMAIN, TBT_NAMLEN) == 0) {
+ TAILQ_FOREACH(dm, &tbdomain_head, dev_next)
+ nvlist_add_string(nvl, TBT_DISCOVER_DOMAIN, dm->uid);
+ } else if (strncmp(cmd, TBT_DISCOVER_ROUTER, TBT_NAMLEN) == 0) {
+ TAILQ_FOREACH(rt, &tbrouter_head, dev_next)
+ nvlist_add_number(nvl, TBT_DISCOVER_ROUTER, rt->route);
+ } else {
+ printf("cannot find supported tpye\n");
+ error = EINVAL;
+ goto out;
+ }
+ mtx_unlock(&tbdev_mtx);
+
+ error = nvlist_error(nvl);
+ if (error != 0) {
+ printf("error %d state in nvlist\n", error);
+ return (error);
+ }
+
+ nvlpacked = nvlist_pack(nvl, &ioc->len);
+ if (nvlpacked == NULL) {
+ printf("cannot allocate new packed buffer\n");
+ return (ENOMEM);
+ }
+ if (ioc->size < ioc->len) {
+ printf("packed buffer is too big to copyout\n");
+ return (ENOSPC);
+ }
+
+ error = copyout(nvlpacked, ioc->data, ioc->len);
+ if (error)
+ printf("error %d on copyout\n", error);
+
+out:
+ if (nvlpacked != NULL)
+ free(nvlpacked, M_NVLIST);
+ if (nvl != NULL)
+ nvlist_destroy(nvl);
+
+ return (error);
+}
+
+static int
+tbdev_request(caddr_t addr)
+{
+ struct tbt_ioc *ioc = (struct tbt_ioc *)addr;
+ nvlist_t *nvl = NULL;
+ void *nvlpacked = NULL;
+ int error = 0;
+
+ if ((ioc->data == NULL) || (ioc->size == 0))
+ return (ENOMEM);
+
+ nvlpacked = nvlist_pack(nvl, &ioc->len);
+ if (nvlpacked == NULL)
+ return (ENOMEM);
+ if (ioc->size < ioc->len)
+ return (ENOSPC);
+
+ error = copyout(nvlpacked, ioc->data, ioc->len);
+ return (error);
+}
+
+static int
+tbdev_static_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
+ struct thread *td)
+{
+ int error = 0;
+
+ switch (cmd) {
+ case TBT_DISCOVER:
+ error = tbdev_discover(addr);
+ break;
+ case TBT_REQUEST:
+ error = tbdev_request(addr);
+ break;
+ default:
+ error = EINVAL;
+ }
+
+ return (error);
+}
diff --git a/sys/dev/thunderbolt/tb_dev.h b/sys/dev/thunderbolt/tb_dev.h
new file mode 100644
index 000000000000..c40a7fbc3d5a
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_dev.h
@@ -0,0 +1,41 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_DEV_H
+#define _TB_DEV_H
+
+int tbdev_add_interface(struct nhi_softc *);
+int tbdev_remove_interface(struct nhi_softc *);
+int tbdev_add_domain(void *);
+int tbdev_remove_domain(void *);
+int tbdev_add_router(struct router_softc *);
+int tbdev_remove_router(struct router_softc *);
+
+#endif /* _TB_DEV_H */
diff --git a/sys/dev/thunderbolt/tb_if.m b/sys/dev/thunderbolt/tb_if.m
new file mode 100644
index 000000000000..8b0918811a5d
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_if.m
@@ -0,0 +1,121 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+#
+# Copyright (c) 2022 Scott Long
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <sys/bus.h>
+#include <sys/types.h>
+#include <dev/thunderbolt/tb_reg.h>
+
+INTERFACE tb;
+
+CODE {
+ struct nhi_softc;
+
+ int
+ tb_generic_find_ufp(device_t dev, device_t *ufp)
+ {
+ device_t parent;
+
+ parent = device_get_parent(dev);
+ if (parent == NULL)
+ return (EOPNOTSUPP);
+
+ return (TB_FIND_UFP(parent, ufp));
+ }
+
+ int
+ tb_generic_get_debug(device_t dev, u_int *debug)
+ {
+ device_t parent;
+
+ parent = device_get_parent(dev);
+ if (parent == NULL)
+ return (EOPNOTSUPP);
+
+ return (TB_GET_DEBUG(parent, debug));
+ }
+
+}
+
+HEADER {
+ struct nhi_softc;
+
+ struct tb_lcmbox_cmd {
+ uint32_t cmd;
+ uint32_t cmd_resp;
+ uint32_t data_in;
+ uint32_t data_out;
+ };
+
+ int tb_generic_find_ufp(device_t, device_t *);
+ int tb_generic_get_debug(device_t, u_int *);
+}
+
+#
+# Read the LC Mailbox
+#
+METHOD int lc_mailbox {
+ device_t dev;
+ struct tb_lcmbox_cmd *cmd;
+};
+
+#
+# Read from the PCIE2CIO port
+#
+METHOD int pcie2cio_read {
+ device_t dev;
+ u_int space;
+ u_int port;
+ u_int index;
+ uint32_t *val;
+}
+
+#
+# Write to the PCIE2CIO port
+#
+METHOD int pcie2cio_write {
+ device_t dev;
+ u_int space;
+ u_int port;
+ u_int index;
+ uint32_t val;
+}
+
+#
+# Return the device that's the upstream facing port
+#
+METHOD int find_ufp {
+ device_t dev;
+ device_t *ufp;
+} DEFAULT tb_generic_find_ufp;
+
+METHOD int get_debug {
+ device_t dev;
+ u_int *debug;
+} DEFAULT tb_generic_get_debug;
diff --git a/sys/dev/thunderbolt/tb_ioctl.h b/sys/dev/thunderbolt/tb_ioctl.h
new file mode 100644
index 000000000000..60fafb091cef
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_ioctl.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_IOCTL_H
+#define _TB_IOCTL_H
+
+struct tbt_ioc {
+ void *data; /* user-supplied buffer for the nvlist */
+ size_t size; /* size of the user-supplied buffer */
+ size_t len; /* amount of data in the nvlist */
+};
+
+#define TBT_NAMLEN 16
+#define TBT_DEVICE_NAME "tbtctl"
+#define TBT_IOCMAXLEN 4096
+
+#define TBT_DISCOVER _IOWR('h', 1, struct tbt_ioc)
+#define TBT_DISCOVER_TYPE "type"
+#define TBT_DISCOVER_IFACE "iface"
+#define TBT_DISCOVER_DOMAIN "domain"
+#define TBT_DISCOVER_ROUTER "router"
+
+#define TBT_REQUEST _IOWR('h', 2, struct tbt_ioc)
+
+#endif /* _TB_IOCTL_H */
diff --git a/sys/dev/thunderbolt/tb_pcib.c b/sys/dev/thunderbolt/tb_pcib.c
new file mode 100644
index 000000000000..00738984ad1c
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_pcib.c
@@ -0,0 +1,614 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_thunderbolt.h"
+
+/* PCIe bridge for Thunderbolt */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <sys/rman.h>
+
+#include <machine/pci_cfgreg.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcib_private.h>
+#include <dev/pci/pci_private.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+#include <machine/md_var.h>
+
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_pcib.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include "tb_if.h"
+
+static int tb_pcib_probe(device_t);
+static int tb_pcib_attach(device_t);
+static int tb_pcib_detach(device_t);
+static int tb_pcib_lc_mailbox(device_t, struct tb_lcmbox_cmd *);
+static int tb_pcib_pcie2cio_read(device_t, u_int, u_int, u_int,
+ uint32_t *);
+static int tb_pcib_pcie2cio_write(device_t, u_int, u_int, u_int, uint32_t);
+static int tb_pcib_find_ufp(device_t, device_t *);
+static int tb_pcib_get_debug(device_t, u_int *);
+
+static int tb_pci_probe(device_t);
+static int tb_pci_attach(device_t);
+static int tb_pci_detach(device_t);
+
+struct tb_pcib_ident {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ uint32_t flags; /* This follows the tb_softc flags */
+ const char *desc;
+} tb_pcib_identifiers[] = {
+ { VENDOR_INTEL, TB_DEV_AR_2C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge 2C)" },
+ { VENDOR_INTEL, TB_DEV_AR_LP, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge LP)" },
+ { VENDOR_INTEL, TB_DEV_AR_C_4C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge C 4C)" },
+ { VENDOR_INTEL, TB_DEV_AR_C_2C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge C (Alpine Ridge C 2C)" },
+ { VENDOR_INTEL, TB_DEV_ICL_0, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_ICL,
+ "Thunderbolt 3 PCI-PCI Bridge (IceLake)" },
+ { VENDOR_INTEL, TB_DEV_ICL_1, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_ICL,
+ "Thunderbolt 3 PCI-PCI Bridge (IceLake)" },
+ { 0, 0, 0, 0, 0, NULL }
+};
+
+static struct tb_pcib_ident *
+tb_pcib_find_ident(device_t dev)
+{
+ struct tb_pcib_ident *n;
+ uint16_t v, d, sv, sd;
+
+ v = pci_get_vendor(dev);
+ d = pci_get_device(dev);
+ sv = pci_get_subvendor(dev);
+ sd = pci_get_subdevice(dev);
+
+ for (n = tb_pcib_identifiers; n->vendor != 0; n++) {
+ if ((n->vendor != v) || (n->device != d))
+ continue;
+ if (((n->subvendor != 0xffff) && (n->subvendor != sv)) ||
+ ((n->subdevice != 0xffff) && (n->subdevice != sd)))
+ continue;
+ return (n);
+ }
+
+ return (NULL);
+}
+
+static void
+tb_pcib_get_tunables(struct tb_pcib_softc *sc)
+{
+ char tmpstr[80], oid[80];
+
+ /* Set the default */
+ sc->debug = 0;
+
+ /* Grab global variables */
+ bzero(oid, 80);
+ if (TUNABLE_STR_FETCH("hw.tbolt.debug_level", oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+
+ /* Grab instance variables */
+ bzero(oid, 80);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.tbolt.%d.debug_level",
+ device_get_unit(sc->dev));
+ if (TUNABLE_STR_FETCH(tmpstr, oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+
+ return;
+}
+
+static int
+tb_pcib_setup_sysctl(struct tb_pcib_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = NULL;
+ struct sysctl_oid *tree = NULL;
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ if (ctx != NULL)
+ tree = device_get_sysctl_tree(sc->dev);
+
+ if (tree == NULL) {
+ tb_printf(sc, "Error: cannot create sysctl nodes\n");
+ return (EINVAL);
+ }
+ sc->sysctl_tree = tree;
+ sc->sysctl_ctx = ctx;
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
+ OID_AUTO, "debug_level", CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ &sc->debug, 0, tb_debug_sysctl, "A", "Thunderbolt debug level");
+
+ return (0);
+}
+
+/*
+ * This is used for both the PCI and ACPI attachments. It shouldn't return
+ * 0, doing so will force the ACPI attachment to fail.
+ */
+int
+tb_pcib_probe_common(device_t dev, char *desc)
+{
+ device_t ufp;
+ struct tb_pcib_ident *n;
+ char *suffix;
+
+ if ((n = tb_pcib_find_ident(dev)) != NULL) {
+ ufp = NULL;
+ if ((TB_FIND_UFP(dev, &ufp) == 0) && (ufp == dev))
+ suffix = "(Upstream port)";
+ else
+ suffix = "(Downstream port)";
+ snprintf(desc, TB_DESC_MAX, "%s %s", n->desc, suffix);
+ return (BUS_PROBE_VENDOR);
+ }
+ return (ENXIO);
+}
+
+static int
+tb_pcib_probe(device_t dev)
+{
+ char desc[TB_DESC_MAX];
+ int val;
+
+ if ((val = tb_pcib_probe_common(dev, desc)) <= 0)
+ device_set_desc_copy(dev, desc);
+
+ return (val);
+}
+
+int
+tb_pcib_attach_common(device_t dev)
+{
+ device_t ufp;
+ struct tb_pcib_ident *n;
+ struct tb_pcib_softc *sc;
+ uint32_t val;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->vsec = -1;
+
+ n = tb_pcib_find_ident(dev);
+ KASSERT(n != NULL, ("Cannot find TB ident"));
+ sc->flags = n->flags;
+
+ tb_pcib_get_tunables(sc);
+ tb_pcib_setup_sysctl(sc);
+
+ /* XXX Is this necessary for ACPI attachments? */
+ tb_debug(sc, DBG_BRIDGE, "busmaster status was %s\n",
+ (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN)
+ ? "enabled" : "disabled");
+ pci_enable_busmaster(dev);
+
+ /*
+ * Determine if this is an upstream or downstream facing device, and
+ * whether it's the root of the Thunderbolt topology. It's too bad
+ * that there aren't unique PCI ID's to help with this.
+ */
+ ufp = NULL;
+ if ((TB_FIND_UFP(dev, &ufp) == 0) && (ufp != NULL)) {
+ if (ufp == dev) {
+ sc->flags |= TB_FLAGS_ISUFP;
+ if (TB_FIND_UFP(device_get_parent(dev), NULL) ==
+ EOPNOTSUPP) {
+ sc->flags |= TB_FLAGS_ISROOT;
+ }
+ }
+ }
+
+ /*
+ * Find the PCI Vendor Specific Extended Capability. It's the magic
+ * wand to configuring the Thunderbolt root bridges.
+ */
+ if (TB_IS_AR(sc) || TB_IS_TR(sc)) {
+ error = pci_find_extcap(dev, PCIZ_VENDOR, &sc->vsec);
+ if (error) {
+ tb_printf(sc, "Cannot find VSEC capability: %d\n",
+ error);
+ return (ENXIO);
+ }
+ }
+
+ /*
+ * Take the AR bridge out of low-power mode.
+ * XXX AR only?
+ */
+ if ((1 || TB_IS_AR(sc)) && TB_IS_ROOT(sc)) {
+ struct tb_lcmbox_cmd cmd;
+
+ cmd.cmd = LC_MBOXOUT_CMD_SXEXIT_TBT;
+ cmd.data_in = 0;
+
+ error = TB_LC_MAILBOX(dev, &cmd);
+ tb_debug(sc, DBG_BRIDGE, "SXEXIT returned error= %d resp= 0x%x "
+ "data= 0x%x\n", error, cmd.cmd_resp, cmd.data_out);
+ }
+
+ /* The downstream facing port on AR needs some help */
+ if (TB_IS_AR(sc) && TB_IS_DFP(sc)) {
+ tb_debug(sc, DBG_BRIDGE, "Doing AR L1 fixup\n");
+ val = pci_read_config(dev, sc->vsec + AR_VSCAP_1C, 4);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "VSEC+0x1c= 0x%08x\n", val);
+ val |= (1 << 8);
+ pci_write_config(dev, sc->vsec + AR_VSCAP_1C, val, 4);
+
+ val = pci_read_config(dev, sc->vsec + AR_VSCAP_B0, 4);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "VSEC+0xb0= 0x%08x\n", val);
+ val |= (1 << 12);
+ pci_write_config(dev, sc->vsec + AR_VSCAP_B0, val, 4);
+ }
+
+ return (0);
+}
+
+static int
+tb_pcib_attach(device_t dev)
+{
+ int error;
+
+ error = tb_pcib_attach_common(dev);
+ if (error)
+ return (error);
+ return (pcib_attach(dev));
+}
+
+static int
+tb_pcib_detach(device_t dev)
+{
+ struct tb_pcib_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ tb_debug(sc, DBG_BRIDGE|DBG_ROUTER|DBG_EXTRA, "tb_pcib_detach\n");
+
+ /* Put the AR bridge back to sleep */
+ /* XXX disable this until power control for downstream switches works */
+ if (0 && TB_IS_ROOT(sc)) {
+ struct tb_lcmbox_cmd cmd;
+
+ cmd.cmd = LC_MBOXOUT_CMD_GO2SX;
+ cmd.data_in = 0;
+
+ error = TB_LC_MAILBOX(dev, &cmd);
+ tb_debug(sc, DBG_BRIDGE, "SXEXIT returned error= %d resp= 0x%x "
+ "data= 0x%x\n", error, cmd.cmd_resp, cmd.data_out);
+ }
+
+ return (pcib_detach(dev));
+}
+
+/* Read/write the Link Controller registers in CFG space */
+static int
+tb_pcib_lc_mailbox(device_t dev, struct tb_lcmbox_cmd *cmd)
+{
+ struct tb_pcib_softc *sc;
+ uint32_t regcmd, result;
+ uint16_t m_in, m_out;
+ int vsec, i;
+
+ sc = device_get_softc(dev);
+ vsec = TB_PCIB_VSEC(dev);
+ if (vsec == -1)
+ return (EOPNOTSUPP);
+
+ if (TB_IS_AR(sc)) {
+ m_in = AR_LC_MBOX_IN;
+ m_out = AR_LC_MBOX_OUT;
+ } else if (TB_IS_ICL(sc)) {
+ m_in = ICL_LC_MBOX_IN;
+ m_out = ICL_LC_MBOX_OUT;
+ } else
+ return (EOPNOTSUPP);
+
+ /* Set the valid bit to signal we're sending a command */
+ regcmd = LC_MBOXOUT_VALID | (cmd->cmd & LC_MBOXOUT_CMD_MASK);
+ regcmd |= (cmd->data_in << LC_MBOXOUT_DATA_SHIFT);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "Writing LC cmd 0x%x\n", regcmd);
+ pci_write_config(dev, vsec + m_out, regcmd, 4);
+
+ for (i = 0; i < 10; i++) {
+ pause("nhi", 1 * hz);
+ result = pci_read_config(dev, vsec + m_in, 4);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "LC Mailbox= 0x%08x\n",
+ result);
+ if ((result & LC_MBOXIN_DONE) != 0)
+ break;
+ }
+
+ /* Clear the valid bit to signal we're done sending the command */
+ pci_write_config(dev, vsec + m_out, 0, 4);
+
+ cmd->cmd_resp = result & LC_MBOXIN_CMD_MASK;
+ cmd->data_out = result >> LC_MBOXIN_CMD_SHIFT;
+
+ if ((result & LC_MBOXIN_DONE) == 0)
+ return (ETIMEDOUT);
+
+ return (0);
+}
+
+static int
+tb_pcib_pcie2cio_wait(device_t dev, u_int timeout)
+{
+#if 0
+ uint32_t val;
+ int vsec;
+
+ vsec = TB_PCIB_VSEC(dev);
+ do {
+ pci_read_config(dev, vsec + PCIE2CIO_CMD, &val);
+ if ((val & PCIE2CIO_CMD_START) == 0) {
+ if (val & PCIE2CIO_CMD_TIMEOUT)
+ break;
+ return 0;
+ }
+
+ msleep(50);
+ } while (time_before(jiffies, end));
+
+#endif
+ return ETIMEDOUT;
+}
+
+static int
+tb_pcib_pcie2cio_read(device_t dev, u_int space, u_int port, u_int offset,
+ uint32_t *val)
+{
+#if 0
+ uint32_t cmd;
+ int ret, vsec;
+
+ vsec = TB_PCIB_VSEC(dev);
+ if (vsec == -1)
+ return (EOPNOTSUPP);
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (space << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_START;
+ pci_write_config(dev, vsec + PCIE2CIO_CMD, cmd, 4);
+
+ if ((ret = pci2cio_wait_completion(dev, 5000)) != 0)
+ return (ret);
+
+ *val = pci_read_config(dev, vsec + PCIE2CIO_RDDATA, 4);
+#endif
+ return (0);
+}
+
+static int
+tb_pcib_pcie2cio_write(device_t dev, u_int space, u_int port, u_int offset,
+ uint32_t val)
+{
+#if 0
+ uint32_t cmd;
+ int ret, vsec;
+
+ vsec = TB_PCIB_VSEC(dev);
+ if (vsec == -1)
+ return (EOPNOTSUPP);
+
+ pci_write_config(dev, vsec + PCIE2CIO_WRDATA, val, 4);
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (space << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
+ pci_write_config(dev, vsec + PCIE2CIO_CMD, cmd);
+
+#endif
+ return (tb_pcib_pcie2cio_wait(dev, 5000));
+}
+
+/*
+ * The Upstream Facing Port (UFP) in a switch is special, it's the function
+ * that responds to some of the special programming mailboxes. It can't be
+ * differentiated by PCI ID, so a heuristic approach to identifying it is
+ * required.
+ */
+static int
+tb_pcib_find_ufp(device_t dev, device_t *ufp)
+{
+ device_t upstream;
+ struct tb_pcib_softc *sc;
+ uint32_t vsec, val;
+ int error;
+
+ upstream = NULL;
+ sc = device_get_softc(dev);
+ if (sc == NULL)
+ return (EOPNOTSUPP);
+
+ if (TB_IS_UFP(sc)) {
+ upstream = dev;
+ error = 0;
+ goto out;
+ }
+
+ /*
+ * This register is supposed to be filled in on the upstream port
+ * and tells how many downstream ports there are. It doesn't seem
+ * to get filled in on AR host controllers, but is on various
+ * peripherals.
+ */
+ error = pci_find_extcap(dev, PCIZ_VENDOR, &vsec);
+ if (error == 0) {
+ val = pci_read_config(dev, vsec + 0x18, 4);
+ if ((val & 0x1f) > 0) {
+ upstream = dev;
+ goto out;
+ }
+ }
+
+ /*
+ * Since we can't trust that the VSEC register is filled in, the only
+ * other option is to see if we're at the top of the topology, which
+ * implies that we're at the upstream port of the host controller.
+ */
+ error = TB_FIND_UFP(device_get_parent(dev), ufp);
+ if (error == EOPNOTSUPP) {
+ upstream = dev;
+ error = 0;
+ goto out;
+ } else
+ return (error);
+
+out:
+ if (ufp != NULL)
+ *ufp = upstream;
+
+ return (error);
+}
+
+static int
+tb_pcib_get_debug(device_t dev, u_int *debug)
+{
+ struct tb_pcib_softc *sc;
+
+ sc = device_get_softc(dev);
+ if ((sc == NULL) || (debug == NULL))
+ return (EOPNOTSUPP);
+
+ *debug = sc->debug;
+ return (0);
+}
+
+static device_method_t tb_pcib_methods[] = {
+ DEVMETHOD(device_probe, tb_pcib_probe),
+ DEVMETHOD(device_attach, tb_pcib_attach),
+ DEVMETHOD(device_detach, tb_pcib_detach),
+
+ DEVMETHOD(tb_lc_mailbox, tb_pcib_lc_mailbox),
+ DEVMETHOD(tb_pcie2cio_read, tb_pcib_pcie2cio_read),
+ DEVMETHOD(tb_pcie2cio_write, tb_pcib_pcie2cio_write),
+
+ DEVMETHOD(tb_find_ufp, tb_pcib_find_ufp),
+ DEVMETHOD(tb_get_debug, tb_pcib_get_debug),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(tbolt, tb_pcib_driver, tb_pcib_methods,
+ sizeof(struct tb_pcib_softc), pcib_driver);
+DRIVER_MODULE_ORDERED(tb_pcib, pci, tb_pcib_driver,
+ NULL, NULL, SI_ORDER_MIDDLE);
+MODULE_DEPEND(tb_pcib, pci, 1, 1, 1);
+MODULE_PNP_INFO("U16:vendor;U16:device;U16:subvendor;U16:subdevice;U32:#;D:#",
+ pci, tb_pcib, tb_pcib_identifiers, nitems(tb_pcib_identifiers) - 1);
+
+static int
+tb_pci_probe(device_t dev)
+{
+ struct tb_pcib_ident *n;
+
+ if ((n = tb_pcib_find_ident(device_get_parent(dev))) != NULL) {
+ switch (n->flags & TB_GEN_MASK) {
+ case TB_GEN_TB1:
+ device_set_desc(dev, "Thunderbolt 1 Link");
+ break;
+ case TB_GEN_TB2:
+ device_set_desc(dev, "Thunderbolt 2 Link");
+ break;
+ case TB_GEN_TB3:
+ device_set_desc(dev, "Thunderbolt 3 Link");
+ break;
+ case TB_GEN_USB4:
+ device_set_desc(dev, "USB4 Link");
+ break;
+ case TB_GEN_UNK:
+ /* Fallthrough */
+ default:
+ device_set_desc(dev, "Thunderbolt Link");
+ }
+ return (BUS_PROBE_VENDOR);
+ }
+ return (ENXIO);
+}
+
+static int
+tb_pci_attach(device_t dev)
+{
+
+ return (pci_attach(dev));
+}
+
+static int
+tb_pci_detach(device_t dev)
+{
+
+ return (pci_detach(dev));
+}
+
+static device_method_t tb_pci_methods[] = {
+ DEVMETHOD(device_probe, tb_pci_probe),
+ DEVMETHOD(device_attach, tb_pci_attach),
+ DEVMETHOD(device_detach, tb_pci_detach),
+
+ DEVMETHOD(tb_find_ufp, tb_generic_find_ufp),
+ DEVMETHOD(tb_get_debug, tb_generic_get_debug),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(pci, tb_pci_driver, tb_pci_methods, sizeof(struct pci_softc),
+ pci_driver);
+DRIVER_MODULE(tb_pci, pcib, tb_pci_driver, NULL, NULL);
+MODULE_DEPEND(tb_pci, pci, 1, 1, 1);
+MODULE_VERSION(tb_pci, 1);
diff --git a/sys/dev/thunderbolt/tb_pcib.h b/sys/dev/thunderbolt/tb_pcib.h
new file mode 100644
index 000000000000..6928e866a083
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_pcib.h
@@ -0,0 +1,93 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt PCIe bridge/switch definitions
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_PCIB_H
+#define _TB_PCIB_H
+
+DECLARE_CLASS(tb_pcib_driver);
+
+/*
+ * The order of the fields is very important. Class inherentence replies on
+ * implicitly knowing the location of the first 3 fields.
+ */
+struct tb_pcib_softc {
+ struct pcib_softc pcibsc;
+ ACPI_HANDLE ap_handle;
+ ACPI_BUFFER ap_prt;
+ device_t dev;
+ u_int debug;
+ int vsec;
+ int flags;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+};
+
+/* Flags for tb_softc */
+#define TB_GEN_UNK 0x00
+#define TB_GEN_TB1 0x01
+#define TB_GEN_TB2 0x02
+#define TB_GEN_TB3 0x03
+#define TB_GEN_USB4 0x04
+#define TB_GEN_MASK 0x0f
+#define TB_HWIF_UNK 0x00
+#define TB_HWIF_AR 0x10
+#define TB_HWIF_TR 0x20
+#define TB_HWIF_ICL 0x30
+#define TB_HWIF_USB4 0x40
+#define TB_HWIF_MASK 0xf0
+#define TB_FLAGS_ISROOT 0x100
+#define TB_FLAGS_ISUFP 0x200
+
+#define TB_IS_AR(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_AR)
+#define TB_IS_TR(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_TR)
+#define TB_IS_ICL(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_ICL)
+#define TB_IS_USB4(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_USB4)
+
+#define TB_IS_ROOT(sc) (((sc)->flags & TB_FLAGS_ISROOT) != 0)
+#define TB_IS_UFP(sc) (((sc)->flags & TB_FLAGS_ISUFP) != 0)
+#define TB_IS_DFP(sc) (((sc)->flags & TB_FLAGS_ISUFP) == 0)
+
+/* PCI IDs for the TB bridges */
+#define TB_DEV_AR_2C 0x1576
+#define TB_DEV_AR_LP 0x15c0
+#define TB_DEV_AR_C_4C 0x15d3
+#define TB_DEV_AR_C_2C 0x15da
+#define TB_DEV_ICL_0 0x8a1d
+#define TB_DEV_ICL_1 0x8a21
+
+#define TB_PCIB_VSEC(dev) ((struct tb_pcib_softc *)(device_get_softc(dev)))->vsec;
+#define TB_DESC_MAX 80
+
+int tb_pcib_probe_common(device_t, char *);
+int tb_pcib_attach_common(device_t dev);
+
+#endif /* _TB_PCIB_H */
diff --git a/sys/dev/thunderbolt/tb_reg.h b/sys/dev/thunderbolt/tb_reg.h
new file mode 100644
index 000000000000..b065e01e6972
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_reg.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt Variables
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_REG_H
+#define _TB_REG_H
+
+#define TBSEC_NONE 0x00
+#define TBSEC_USER 0x01
+#define TBSEC_SECURE 0x02
+#define TBSEC_DP 0x03
+#define TBSEC_UNKNOWN 0xff
+
+/*
+ * SW-FW commands and responses. These are sent over Ring0 to communicate
+ * with the fabric and the TBT Connection Manager firmware.
+ */
+
+typedef struct {
+ uint32_t hi;
+ uint32_t lo;
+} __packed tb_route_t;
+
+#endif /* _TB_REG_H */
diff --git a/sys/dev/thunderbolt/tb_var.h b/sys/dev/thunderbolt/tb_var.h
new file mode 100644
index 000000000000..4874c420300e
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_var.h
@@ -0,0 +1,54 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt firmware connection manager functions.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_VAR_H
+#define _TB_VAR_H
+
+typedef struct {
+ int8_t link;
+ int8_t depth;
+} tb_addr_t;
+
+MALLOC_DECLARE(M_THUNDERBOLT);
+
+#define TB_VENDOR_LEN 48
+#define TB_MODEL_LEN 48
+#define TB_MAX_LINKS 4
+#define TB_MAX_DEPTH 6
+
+static __inline uint32_t
+tb_calc_crc(void *data, u_int len)
+{
+ return ( ~ (calculate_crc32c(~0L, data, len)));
+}
+
+#endif /* _TB_VAR_H */
diff --git a/sys/dev/thunderbolt/tbcfg_reg.h b/sys/dev/thunderbolt/tbcfg_reg.h
new file mode 100644
index 000000000000..bb68faa543b0
--- /dev/null
+++ b/sys/dev/thunderbolt/tbcfg_reg.h
@@ -0,0 +1,363 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt3/USB4 config space register definitions
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TBCFG_REG_H
+#define _TBCFG_REG_H
+
+/* Config space read request, 6.4.2.3 */
+struct tb_cfg_read {
+ tb_route_t route;
+ uint32_t addr_attrs;
+#define TB_CFG_ADDR_SHIFT 0
+#define TB_CFG_ADDR_MASK GENMASK(12,0)
+#define TB_CFG_SIZE_SHIFT 13
+#define TB_CFG_SIZE_MASK GENMASK(18,13)
+#define TB_CFG_ADAPTER_SHIFT 19
+#define TB_CFG_ADAPTER_MASK GENMASK(24,19)
+#define TB_CFG_CS_PATH (0x00 << 25)
+#define TB_CFG_CS_ADAPTER (0x01 << 25)
+#define TB_CFG_CS_ROUTER (0x02 << 25)
+#define TB_CFG_CS_COUNTERS (0x03 << 25)
+#define TB_CFG_SEQ_SHIFT 27
+#define TB_CFG_SEQ_MASK (28,27)
+ uint32_t crc;
+};
+
+/* Config space read request, 6.4.2.4 */
+struct tb_cfg_read_resp {
+ tb_route_t route;
+ uint32_t addr_attrs;
+ uint32_t data[0]; /* Up to 60 dwords */
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space write request, 6.4.2.5 */
+struct tb_cfg_write {
+ tb_route_t route;
+ uint32_t addr_attrs;
+ uint32_t data[0]; /* Up to 60 dwords */
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space write response, 6.4.2.6 */
+struct tb_cfg_write_resp {
+ tb_route_t route;
+ uint32_t addr_attrs;
+ uint32_t crc;
+} __packed;
+
+/* Config space event, 6.4.2.7 */
+struct tb_cfg_notify {
+ tb_route_t route;
+ uint32_t event_adap;
+#define TB_CFG_EVENT_MASK GENMASK(7,0)
+#define GET_NOTIFY_EVENT(n) ((n)->event_adap & TB_CFG_EVENT_MASK)
+#define TB_CFG_ERR_CONN 0x00
+#define TB_CFG_ERR_LINK 0x01
+#define TB_CFG_ERR_ADDR 0x02
+#define TB_CFG_ERR_ADP 0x04
+#define TB_CFG_ERR_ENUM 0x08
+#define TB_CFG_ERR_NUA 0x09
+#define TB_CFG_ERR_LEN 0x0b
+#define TB_CFG_ERR_HEC 0x0c
+#define TB_CFG_ERR_FC 0x0d
+#define TB_CFG_ERR_PLUG 0x0e
+#define TB_CFG_ERR_LOCK 0x0f
+#define TB_CFG_HP_ACK 0x07
+#define TB_CFG_DP_BW 0x20
+#define TB_CFG_EVENT_ADAPTER_SHIFT 8
+#define TB_CFG_EVENT_ADAPTER_MASK GENMASK(13,8)
+#define GET_NOTIFY_ADAPTER(n) (((n)->event_adap & \
+ TB_CFG_EVENT_ADAPTER_MASK) >> \
+ TB_CFG_EVENT_ADAPTER_SHIFT)
+#define TB_CFG_PG_NONE 0x00000000
+#define TB_CFG_PG_PLUG 0x80000000
+#define TB_CFG_PG_UNPLUG 0xc0000000
+ uint32_t crc;
+} __packed;
+
+/* Config space event acknowledgement, 6.4.2.8 */
+struct tb_cfg_notify_ack {
+ tb_route_t route;
+ uint32_t crc;
+} __packed;
+
+/* Config space hot plug event, 6.4.2.10 */
+struct tb_cfg_hotplug {
+ tb_route_t route;
+ uint32_t adapter_attrs;
+#define TB_CFG_ADPT_MASK GENMASK(5,0)
+#define TB_CFG_UPG_PLUG (0x0 << 31)
+#define TB_CFG_UPG_UNPLUG (0x1 << 31)
+ uint32_t crc;
+} __packed;
+
+/* Config space inter-domain request, 6.4.2.11 */
+struct tb_cfg_xdomain {
+ tb_route_t route;
+ uint32_t data[0];
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space inter-domain response, 6.4.2.12 */
+struct tb_cfg_xdomain_resp {
+ tb_route_t route;
+ uint32_t data[0];
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space router basic registers 8.2.1.1 */
+struct tb_cfg_router {
+ uint16_t vendor_id; /* ROUTER_CS_0 */
+ uint16_t product_id;
+ uint32_t router_cs_1; /* ROUTER_CS_1 */
+#define ROUTER_CS1_NEXT_CAP_MASK GENMASK(7,0)
+#define GET_ROUTER_CS_NEXT_CAP(r) (r->router_cs_1 & \
+ ROUTER_CS1_NEXT_CAP_MASK)
+#define ROUTER_CS1_UPSTREAM_SHIFT 8
+#define ROUTER_CS1_UPSTREAM_MASK GENMASK(13,8)
+#define GET_ROUTER_CS_UPSTREAM_ADAP(r) ((r->router_cs_1 & \
+ ROUTER_CS1_UPSTREAM_MASK) >> \
+ ROUTER_CS1_UPSTREAM_SHIFT)
+#define ROUTER_CS1_MAX_SHIFT 14
+#define ROUTER_CS1_MAX_MASK GENMASK(19,14)
+#define GET_ROUTER_CS_MAX_ADAP(r) ((r->router_cs_1 & \
+ ROUTER_CS1_MAX_MASK) >> \
+ ROUTER_CS1_MAX_SHIFT)
+#define ROUTER_CS1_MAX_ADAPTERS 64
+#define ROUTER_CS1_DEPTH_SHIFT 20
+#define ROUTER_CS1_DEPTH_MASK GENMASK(22,20)
+#define GET_ROUTER_CS_DEPTH(r) ((r->router_cs_1 & \
+ ROUTER_CS1_DEPTH_MASK) >> \
+ ROUTER_CS1_DEPTH_SHIFT)
+#define ROUTER_CS1_REVISION_SHIFT 24
+#define ROUTER_CS1_REVISION_MASK GENMASK(31,24)
+#define GET_ROUTER_CS_REVISION ((r->router_cs_1 & \
+ ROUTER_CS1_REVISION_MASK) >> \
+ ROUTER_CS1_REVISION_SHIFT)
+ uint32_t topology_lo; /* ROUTER_CS_2 */
+ uint32_t topology_hi; /* ROUTER_CS_3 */
+#define CFG_TOPOLOGY_VALID (1 << 31)
+ uint8_t notification_timeout; /* ROUTER_CS_4 */
+ uint8_t cm_version;
+#define CFG_CM_USB4 0x10
+ uint8_t rsrvd1;
+ uint8_t usb4_version;
+#define CFG_USB4_V1_0 0x10
+ uint32_t flags_cs5; /* ROUTER_CS_5 */
+#define CFG_CS5_SLP (1 << 0)
+#define CFG_CS5_WOP (1 << 1)
+#define CFG_CS5_WOU (1 << 2)
+#define CFG_CS5_DP (1 << 3)
+#define CFG_CS5_C3S (1 << 23)
+#define CFG_CS5_PTO (1 << 24)
+#define CFG_CS5_UTO (1 << 25)
+#define CFG_CS5_HCO (1 << 26)
+#define CFG_CS5_CV (1 << 31)
+ uint32_t flags_cs6; /* ROUTER_CS_6 */
+#define CFG_CS6_SLPR (1 << 0)
+#define CFG_CS6_TNS (1 << 1)
+#define CFG_CS6_WAKE_PCIE (1 << 2)
+#define CFG_CS6_WAKE_USB3 (1 << 3)
+#define CFG_CS6_WAKE_DP (1 << 4)
+#define CFG_CS6_HCI (1 << 18)
+#define CFG_CS6_RR (1 << 24)
+#define CFG_CS6_CR (1 << 25)
+ uint32_t uuid_hi; /* ROUTER_CS_7 */
+ uint32_t uuid_lo; /* ROUTER_CS_8 */
+ uint32_t data[16]; /* ROUTER_CS_9-24 */
+ uint32_t metadata; /* ROUTER_CS_25 */
+ uint32_t opcode_status; /* ROUTER_CS_26 */
+/* TBD: Opcodes and status */
+#define CFG_ONS (1 << 30)
+#define CFG_OV (1 << 31)
+} __packed;
+
+#define TB_CFG_CAP_OFFSET_MAX 0xfff
+
+/* Config space router capability header 8.2.1.3/8.2.1.4 */
+struct tb_cfg_cap_hdr {
+ uint8_t next_cap;
+ uint8_t cap_id;
+} __packed;
+
+/* Config space router TMU registers 8.2.1.2 */
+struct tb_cfg_cap_tmu {
+ struct tb_cfg_cap_hdr hdr;
+#define TB_CFG_CAP_TMU 0x03
+} __packed;
+
+struct tb_cfg_vsc_cap {
+ struct tb_cfg_cap_hdr hdr;
+#define TB_CFG_CAP_VSC 0x05
+ uint8_t vsc_id;
+ uint8_t len;
+} __packed;
+
+struct tb_cfg_vsec_cap {
+ struct tb_cfg_cap_hdr hdr;
+#define TB_CFG_CAP_VSEC 0x05
+ uint8_t vsec_id;
+ uint8_t len;
+ uint16_t vsec_next_cap;
+ uint16_t vsec_len;
+} __packed;
+
+union tb_cfg_cap {
+ struct tb_cfg_cap_hdr hdr;
+ struct tb_cfg_cap_tmu tmu;
+ struct tb_cfg_vsc_cap vsc;
+ struct tb_cfg_vsec_cap vsec;
+} __packed;
+
+#define TB_CFG_VSC_PLUG 0x01 /* Hot Plug and DROM */
+
+#define TB_CFG_VSEC_LC 0x06 /* Link Controller */
+#define TB_LC_DESC 0x02 /* LC Descriptor fields */
+#define TB_LC_DESC_NUM_LC_MASK GENMASK(3, 0)
+#define TB_LC_DESC_SIZE_SHIFT 8
+#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
+#define TB_LC_DESC_PORT_SHIFT 16
+#define TB_LC_DESC_PORT_MASK GENMASK(27, 16)
+#define TB_LC_UUID 0x03
+#define TB_LC_DP_SINK 0x10 /* Display Port config */
+#define TB_LC_PORT_ATTR 0x8d /* Port attributes */
+#define TB_LC_PORT_ATTR_BE (1 << 12) /* Bonding enabled */
+#define TB_LC_SX_CTRL 0x96 /* Sleep control */
+#define TB_LC_SX_CTRL_WOC (1 << 1)
+#define TB_LC_SX_CTRL_WOD (1 << 2)
+#define TB_LC_SX_CTRL_WOU4 (1 << 5)
+#define TB_LC_SX_CTRL_WOP (1 << 6)
+#define TB_LC_SX_CTRL_L1C (1 << 16)
+#define TB_LC_SX_CTRL_L1D (1 << 17)
+#define TB_LC_SX_CTRL_L2C (1 << 20)
+#define TB_LC_SX_CTRL_L2D (1 << 21)
+#define TB_LC_SX_CTRL_UFP (1 << 30)
+#define TB_LC_SX_CTRL_SLP (1 << 31)
+#define TB_LC_POWER 0x740
+
+/* Config space adapter basic registers 8.2.2.1 */
+struct tb_cfg_adapter {
+ uint16_t vendor_id; /* ADP CS0 */
+ uint16_t product_id;
+ uint32_t adp_cs1; /* ADP CS1 */
+#define ADP_CS1_NEXT_CAP_MASK GENMASK(7,0)
+#define GET_ADP_CS_NEXT_CAP(a) (a->adp_cs1 & \
+ ADP_CS1_NEXT_CAP_MASK)
+#define ADP_CS1_COUNTER_SHIFT 8
+#define ADP_CS1_COUNTER_MASK GENMASK(18,8)
+#define GET_ADP_CS_MAX_COUNTERS(a) ((a->adp_cs1 & \
+ ADP_CS1_COUNTER_MASK) >> \
+ ADP_CS1_COUNTER_SHIFT)
+#define CFG_COUNTER_CONFIG_FLAG (1 << 19)
+ uint32_t adp_cs2; /* ADP CS2 */
+#define ADP_CS2_TYPE_MASK GENMASK(23,0)
+#define GET_ADP_CS_TYPE(a) (a->adp_cs2 & ADP_CS2_TYPE_MASK)
+#define ADP_CS2_UNSUPPORTED 0x000000
+#define ADP_CS2_LANE 0x000001
+#define ADP_CS2_HOSTIF 0x000002
+#define ADP_CS2_PCIE_DFP 0x100101
+#define ADP_CS2_PCIE_UFP 0x100102
+#define ADP_CS2_DP_OUT 0x0e0102
+#define ADP_CS2_DP_IN 0x0e0101
+#define ADP_CS2_USB3_DFP 0x200101
+#define ADP_CS2_USB3_UFP 0x200102
+ uint32_t adp_cs3; /* ADP CS 3 */
+#define ADP_CS3_ADP_NUM_SHIFT 20
+#define ADP_CS3_ADP_NUM_MASK GENMASK(25,20)
+#define GET_ADP_CS_ADP_NUM(a) ((a->adp_cs3 & \
+ ADP_CS3_ADP_NUM_MASK) >> \
+ ADP_CS3_ADP_NUM_SHIFT)
+#define CFG_ADP_HEC_ERROR (1 << 29)
+#define CFG_ADP_FC_ERROR (1 << 30)
+#define CFG_ADP_SBC (1 << 31)
+} __packed;
+
+/* Config space lane adapter capability 8.2.2.3 */
+struct tb_cfg_cap_lane {
+ struct tb_cfg_cap_hdr hdr; /* LANE_ADP_CS_0 */
+#define TB_CFG_CAP_LANE 0x01
+ /* Supported link/width/power */
+ uint16_t supp_lwp;
+#define CAP_LANE_LINK_MASK GENMASK(3,0)
+#define CAP_LANE_LINK_GEN3 0x0004
+#define CAP_LANE_LINK_GEN2 0x0008
+#define CAP_LANE_WIDTH_MASK GENMASK(9,4)
+#define CAP_LANE_WIDTH_1X 0x0010
+#define CAP_LANE_WIDTH_2X 0x0020
+#define CAP_LANE_POWER_CL0 0x0400
+#define CAP_LANE_POWER_CL1 0x0800
+#define CAP_LANE_POWER_CL2 0x1000
+ /* Target link/width/power */
+ uint16_t targ_lwp; /* LANE_ADP_CS_1 */
+#define CAP_LANE_TARGET_GEN2 0x0008
+#define CAP_LANE_TARGET_GEN3 0x000c
+#define CAP_LANE_TARGET_SINGLE 0x0010
+#define CAP_LANE_TARGET_DUAL 0x0030
+#define CAP_LANE_DISABLE 0x4000
+#define CAP_LANE_BONDING 0x8000
+ /* Current link/width/state */
+ uint16_t current_lws;
+/* Same definitions a supp_lwp for bits 0 - 9 */
+#define CAP_LANE_STATE_SHIFT 10
+#define CAP_LANE_STATE_MASK GENMASK(13,10)
+#define CAP_LANE_STATE_DISABLE (0x0 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_TRAINING (0x1 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CL0 (0x2 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_TXCL0 (0x3 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_RXCL0 (0x4 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CL1 (0x5 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CL2 (0x6 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CLD (0x7 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_PMS 0x4000
+ /* Logical Layer Errors */
+ uint16_t lle; /* LANE_ADP_CS_2 */
+#define CAP_LANE_LLE_MASK GENMASK(6,0)
+#define CAP_LANE_LLE_ALE 0x01
+#define CAP_LANE_LLE_OSE 0x02
+#define CAP_LANE_LLE_TE 0x04
+#define CAP_LANE_LLE_EBE 0x08
+#define CAP_LANE_LLE_DBE 0x10
+#define CAP_LANE_LLE_RDE 0x20
+#define CAP_LANE_LLE_RST 0x40
+ uint16_t lle_enable;
+} __packed;
+
+/* Config space path registers 8.2.3.1 */
+struct tb_cfg_path {
+} __packed;
+
+/* Config space counter registers 8.2.4 */
+struct tb_cfg_counters {
+} __packed;
+
+#endif /* _TBCFG_REG_H */
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 528ff3372097..471c6b3714b2 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -281,7 +281,7 @@ static int vtnet_tso_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
&vtnet_tso_disable, 0, "Disables TSO");
-static int vtnet_lro_disable = 0;
+static int vtnet_lro_disable = 1;
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
&vtnet_lro_disable, 0, "Disables hardware LRO");
@@ -1153,11 +1153,9 @@ vtnet_setup_interface(struct vtnet_softc *sc)
}
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
- if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
-#ifdef notyet
/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
+ if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
if_setcapabilitiesbit(ifp, IFCAP_RXCSUM_IPV6, 0);
-#endif
if (vtnet_tunable_int(sc, "fixup_needs_csum",
vtnet_fixup_needs_csum) != 0)
@@ -1347,14 +1345,22 @@ vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
VTNET_CORE_LOCK_ASSERT(sc);
- if (mask & IFCAP_TXCSUM)
+ if (mask & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
- if (mask & IFCAP_TXCSUM_IPV6)
+ if_togglehwassist(ifp, VTNET_CSUM_OFFLOAD);
+ }
+ if (mask & IFCAP_TXCSUM_IPV6) {
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
- if (mask & IFCAP_TSO4)
+ if_togglehwassist(ifp, VTNET_CSUM_OFFLOAD_IPV6);
+ }
+ if (mask & IFCAP_TSO4) {
if_togglecapenable(ifp, IFCAP_TSO4);
- if (mask & IFCAP_TSO6)
+ if_togglehwassist(ifp, IFCAP_TSO4);
+ }
+ if (mask & IFCAP_TSO6) {
if_togglecapenable(ifp, IFCAP_TSO6);
+ if_togglehwassist(ifp, IFCAP_TSO6);
+ }
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
/*
@@ -1370,27 +1376,20 @@ vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
IFCAP_LRO && vtnet_software_lro(sc))
reinit = update = 0;
-
- if (mask & IFCAP_RXCSUM)
+ /*
+ * VirtIO does not distinguish between receive checksum offload
+ * for IPv4 and IPv6 packets, so treat them as a pair.
+ */
+ if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
- if (mask & IFCAP_RXCSUM_IPV6)
if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
+ }
if (mask & IFCAP_LRO)
if_togglecapenable(ifp, IFCAP_LRO);
-
- /*
- * VirtIO does not distinguish between IPv4 and IPv6 checksums
- * so treat them as a pair. Guest TSO (LRO) requires receive
- * checksums.
- */
- if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
- if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
-#ifdef notyet
- if_setcapenablebit(ifp, IFCAP_RXCSUM_IPV6, 0);
-#endif
- } else
- if_setcapenablebit(ifp, 0,
- (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO));
+ /* Both SW and HW TCP LRO require receive checksum offload. */
+ if ((if_getcapenable(ifp) &
+ (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
+ if_setcapenablebit(ifp, 0, IFCAP_LRO);
}
if (mask & IFCAP_VLAN_HWFILTER) {
diff --git a/sys/dev/watchdog/watchdog.c b/sys/dev/watchdog/watchdog.c
index e1b2e08c3f10..c599db56bf95 100644
--- a/sys/dev/watchdog/watchdog.c
+++ b/sys/dev/watchdog/watchdog.c
@@ -204,6 +204,7 @@ wd_valid_act(int act)
return true;
}
+#ifdef COMPAT_FREEBSD14
static int
wd_ioctl_patpat(caddr_t data)
{
@@ -223,6 +224,7 @@ wd_ioctl_patpat(caddr_t data)
return (wdog_kern_pat(u));
}
+#endif
static int
wd_get_time_left(struct thread *td, time_t *remainp)