aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZbigniew Bodek <zbb@FreeBSD.org>2016-01-26 14:45:25 +0000
committerZbigniew Bodek <zbb@FreeBSD.org>2016-01-26 14:45:25 +0000
commit3098c7f4883d83f28e887bc376250a6d2806ab20 (patch)
tree49151933a7593d0c55feea1851dda509caa4ef44
parent2fb9714dac14c7278c6c0a9383c2d7fb5329f967 (diff)
downloadsrc-vendor/alpine-hal/2.7.tar.gz
src-vendor/alpine-hal/2.7.zip
Update alpine-hal/dist/ accordingly after r294828vendor/alpine-hal/2.7
alpine-hal/dist was omitted in the previous commit. Files added here. HAL version: 2.7 Obtained from: Semihalf Sponsored by: Annapurna Labs
-rw-r--r--al_hal_iofic.c291
-rw-r--r--al_hal_serdes.c3228
-rw-r--r--al_hal_serdes.h1125
-rw-r--r--al_hal_serdes_internal_regs.h750
-rw-r--r--al_hal_serdes_regs.h495
-rw-r--r--al_hal_udma.h672
-rw-r--r--al_hal_udma_config.c1373
-rw-r--r--al_hal_udma_config.h755
-rw-r--r--al_hal_udma_debug.c497
-rw-r--r--al_hal_udma_debug.h134
-rw-r--r--al_hal_udma_iofic.c151
-rw-r--r--al_hal_udma_iofic.h614
-rw-r--r--al_hal_udma_iofic_regs.h66
-rw-r--r--al_hal_udma_main.c618
-rw-r--r--al_hal_udma_regs.h104
-rw-r--r--al_hal_udma_regs_gen.h414
-rw-r--r--al_hal_udma_regs_m2s.h1159
-rw-r--r--al_hal_udma_regs_s2m.h998
-rw-r--r--eth/al_hal_an_lt_wrapper_regs.h264
-rw-r--r--eth/al_hal_eth.h2381
-rw-r--r--eth/al_hal_eth_alu.h95
-rw-r--r--eth/al_hal_eth_ec_regs.h3362
-rw-r--r--eth/al_hal_eth_kr.c1030
-rw-r--r--eth/al_hal_eth_kr.h372
-rw-r--r--eth/al_hal_eth_mac_regs.h1809
-rw-r--r--eth/al_hal_eth_main.c5260
26 files changed, 28017 insertions, 0 deletions
diff --git a/al_hal_iofic.c b/al_hal_iofic.c
new file mode 100644
index 000000000000..28467f2e3b87
--- /dev/null
+++ b/al_hal_iofic.c
@@ -0,0 +1,291 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_iofic.c
+ *
+ * @brief interrupt controller hal
+ *
+ */
+
+#include "al_hal_iofic.h"
+#include "al_hal_iofic_regs.h"
+
+/*
+ * configure the interrupt registers, interrupts will are kept masked
+ */
+int al_iofic_config(void __iomem *regs_base, int group, uint32_t flags)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ al_reg_write32(&regs->ctrl[group].int_control_grp, flags);
+
+ return 0;
+}
+
+/*
+ * configure the moderation timer resolution for a given group
+ */
+int al_iofic_moder_res_config(void __iomem *regs_base, int group,
+ uint8_t resolution)
+
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ reg = al_reg_read32(&regs->ctrl[group].int_control_grp);
+ AL_REG_FIELD_SET(reg,
+ INT_CONTROL_GRP_MOD_RES_MASK,
+ INT_CONTROL_GRP_MOD_RES_SHIFT,
+ resolution);
+ al_reg_write32(&regs->ctrl[group].int_control_grp, reg);
+
+ return 0;
+}
+
+/*
+ * configure the moderation timer interval for a given legacy interrupt group
+ */
+int al_iofic_legacy_moder_interval_config(void __iomem *regs_base, int group,
+ uint8_t interval)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ reg = al_reg_read32(&regs->ctrl[group].int_control_grp);
+ AL_REG_FIELD_SET(reg,
+ INT_CONTROL_GRP_MOD_INTV_MASK,
+ INT_CONTROL_GRP_MOD_INTV_SHIFT,
+ interval);
+ al_reg_write32(&regs->ctrl[group].int_control_grp, reg);
+
+ return 0;
+}
+
+
+/*
+ * configure the moderation timer interval for a given msix vector.
+ */
+int al_iofic_msix_moder_interval_config(void __iomem *regs_base, int group,
+ uint8_t vector, uint8_t interval)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ reg = al_reg_read32(&regs->grp_int_mod[group][vector].grp_int_mod_reg);
+ AL_REG_FIELD_SET(reg,
+ INT_MOD_INTV_MASK,
+ INT_MOD_INTV_SHIFT,
+ interval);
+ al_reg_write32(&regs->grp_int_mod[group][vector].grp_int_mod_reg, reg);
+
+ return 0;
+}
+
+/*
+ * configure the vmid attributes for a given msix vector.
+ */
+int al_iofic_msix_vmid_attributes_config(void __iomem *regs_base, int group,
+ uint8_t vector, uint32_t vmid, uint8_t vmid_en)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg = 0;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ AL_REG_FIELD_SET(reg,
+ INT_MSIX_VMID_MASK,
+ INT_MSIX_VMID_SHIFT,
+ vmid);
+ AL_REG_BIT_VAL_SET(reg,
+ INT_MSIX_VMID_EN_SHIFT,
+ vmid_en);
+
+ al_reg_write32(&regs->grp_int_mod[group][vector].grp_int_vmid_reg, reg);
+
+ return 0;
+}
+
+/*
+ * return the offset of the unmask register for a given group
+ */
+uint32_t __iomem * al_iofic_unmask_offset_get(void __iomem *regs_base, int group)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ return &regs->ctrl[group].int_mask_clear_grp;
+}
+
+
+/*
+ * unmask specific interrupts for a given group
+ */
+void al_iofic_unmask(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ /*
+ * use the mask clear register, no need to read the mask register
+ * itself. write 0 to unmask, 1 has no effect
+ */
+ al_reg_write32_relaxed(&regs->ctrl[group].int_mask_clear_grp, ~mask);
+}
+
+/*
+ * mask specific interrupts for a given group
+ */
+void al_iofic_mask(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ reg = al_reg_read32(&regs->ctrl[group].int_mask_grp);
+
+ al_reg_write32(&regs->ctrl[group].int_mask_grp, reg | mask);
+}
+
+/*
+ * read the mask for a given group
+ */
+uint32_t al_iofic_read_mask(void __iomem *regs_base, int group)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ return al_reg_read32(&regs->ctrl[group].int_mask_grp);
+}
+
+/*
+ * read interrupt cause register for a given group
+ */
+uint32_t al_iofic_read_cause(void __iomem *regs_base, int group)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ return al_reg_read32(&regs->ctrl[group].int_cause_grp);
+}
+
+/*
+ * clear bits in the interrupt cause register for a given group
+ */
+void al_iofic_clear_cause(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ /* inverse mask, writing 1 has no effect */
+ al_reg_write32(&regs->ctrl[group].int_cause_grp, ~mask);
+}
+
+/*
+ * Set the cause register for a given group
+ */
+void al_iofic_set_cause(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ al_reg_write32(&regs->ctrl[group].int_cause_set_grp, mask);
+}
+
+
+/*
+ * unmask specific interrupts from aborting the udma a given group
+ */
+void al_iofic_abort_mask(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ al_reg_write32(&regs->ctrl[group].int_abort_msk_grp, mask);
+
+}
+
+/*
+ * trigger all interrupts that are waiting for moderation timers to expire
+ */
+void al_iofic_interrupt_moderation_reset(void __iomem *regs_base, int group)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg = 0;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ reg = al_reg_read32(&regs->ctrl[group].int_control_grp);
+ reg |= INT_CONTROL_GRP_MOD_RST;
+
+ al_reg_write32(&regs->ctrl[group].int_control_grp, reg);
+}
+
+/** @} end of interrupt controller group */
diff --git a/al_hal_serdes.c b/al_hal_serdes.c
new file mode 100644
index 000000000000..bb34d13c765f
--- /dev/null
+++ b/al_hal_serdes.c
@@ -0,0 +1,3228 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "al_hal_serdes.h"
+#include "al_hal_serdes_regs.h"
+#include "al_hal_serdes_internal_regs.h"
+
+#define SRDS_CORE_REG_ADDR(page, type, offset)\
+ (((page) << 13) | ((type) << 12) | (offset))
+
+/* Link Training configuration */
+#define AL_SERDES_TX_DEEMPH_SUM_MAX 0x1b
+
+/* c configurations */
+#define AL_SERDES_TX_DEEMPH_C_ZERO_MAX_VAL 0x1b
+#define AL_SERDES_TX_DEEMPH_C_ZERO_MIN_VAL 0
+#define AL_SERDES_TX_DEEMPH_C_ZERO_PRESET AL_SERDES_TX_DEEMPH_C_ZERO_MAX_VAL
+
+/* c(+1) configurations */
+#define AL_SERDES_TX_DEEMPH_C_PLUS_MAX_VAL 0x9
+#define AL_SERDES_TX_DEEMPH_C_PLUS_MIN_VAL 0
+#define AL_SERDES_TX_DEEMPH_C_PLUS_PRESET AL_SERDES_TX_DEEMPH_C_PLUS_MIN_VAL
+
+/* c(-1) configurations */
+#define AL_SERDES_TX_DEEMPH_C_MINUS_MAX_VAL 0x6
+#define AL_SERDES_TX_DEEMPH_C_MINUS_MIN_VAL 0
+#define AL_SERDES_TX_DEEMPH_C_MINUS_PRESET AL_SERDES_TX_DEEMPH_C_MINUS_MIN_VAL
+
+/* Rx equal total delay = MDELAY * TRIES */
+#define AL_SERDES_RX_EQUAL_MDELAY 10
+#define AL_SERDES_RX_EQUAL_TRIES 50
+
+/* Rx eye calculation delay = MDELAY * TRIES */
+#define AL_SERDES_RX_EYE_CAL_MDELAY 50
+#define AL_SERDES_RX_EYE_CAL_TRIES 70
+
+
+/**
+ * Prototypes for _lane_ compatibility
+ */
+int al_serdes_lane_read(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t *data);
+
+int al_serdes_lane_write(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data);
+
+
+/**
+ * SERDES core reg/lane read
+ */
+static inline uint8_t al_serdes_grp_reg_read(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset);
+
+static inline uint8_t al_serdes_grp_lane_read(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane page,
+ enum al_serdes_reg_type type,
+ uint16_t offset);
+
+/**
+ * SERDES core reg/lane write
+ */
+static inline void al_serdes_grp_reg_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data);
+
+static inline void al_serdes_grp_lane_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane lane,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data);
+
+/**
+ * SERDES core masked reg/lane write
+ */
+static inline void al_serdes_grp_reg_masked_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t mask,
+ uint8_t data);
+
+/**
+ * Lane Rx rate change software flow disable
+ */
+static void _al_serdes_lane_rx_rate_change_sw_flow_dis(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane lane);
+
+/**
+ * Group Rx rate change software flow enable if all conditions met
+ */
+static void al_serdes_group_rx_rate_change_sw_flow_dis(
+ struct al_serdes_group_info *grp_info);
+
+/**
+ * Lane Rx rate change software flow enable if all conditions met
+ */
+static void _al_serdes_lane_rx_rate_change_sw_flow_en_cond(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane lane);
+
+/**
+ * Group Rx rate change software flow enable if all conditions met
+ */
+static void al_serdes_group_rx_rate_change_sw_flow_en_cond(
+ struct al_serdes_group_info *grp_info);
+
+
+static inline void al_serdes_grp_lane_masked_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane lane,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t mask,
+ uint8_t data);
+
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_handle_init(
+ void __iomem *serdes_regs_base,
+ struct al_serdes_obj *obj)
+{
+ int i;
+
+ al_dbg(
+ "%s(%p, %p)\n",
+ __func__,
+ serdes_regs_base,
+ obj);
+
+ al_assert(serdes_regs_base);
+
+ for (i = 0; i < AL_SRDS_NUM_GROUPS; i++) {
+ obj->grp_info[i].pobj = obj;
+
+ obj->grp_info[i].regs_base =
+ &((struct al_serdes_regs *)serdes_regs_base)[i];
+ }
+
+ return 0;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_reg_read(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t *data)
+{
+ int status = 0;
+
+ al_dbg(
+ "%s(%p, %d, %d, %d, %u)\n",
+ __func__,
+ obj,
+ grp,
+ page,
+ type,
+ offset);
+
+ al_assert(obj);
+ al_assert(data);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+ al_assert(((int)page) >= AL_SRDS_REG_PAGE_0_LANE_0);
+ al_assert(((int)page) <= AL_SRDS_REG_PAGE_4_COMMON);
+ al_assert(((int)type) >= AL_SRDS_REG_TYPE_PMA);
+ al_assert(((int)type) <= AL_SRDS_REG_TYPE_PCS);
+
+ *data = al_serdes_grp_reg_read(
+ &obj->grp_info[grp],
+ page,
+ type,
+ offset);
+
+ al_dbg(
+ "%s: return(%u)\n",
+ __func__,
+ *data);
+
+ return status;
+}
+
+int al_serdes_lane_read(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t *data)
+{
+ return al_serdes_reg_read(obj, grp, (enum al_serdes_reg_page)lane, type,
+ offset, data);
+}
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_reg_write(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data)
+{
+ int status = 0;
+
+ al_dbg(
+ "%s(%p, %d, %d, %d, %u, %u)\n",
+ __func__,
+ obj,
+ grp,
+ page,
+ type,
+ offset,
+ data);
+
+ al_assert(obj);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+ al_assert(((int)page) >= AL_SRDS_REG_PAGE_0_LANE_0);
+ al_assert(((int)page) <= AL_SRDS_REG_PAGE_0123_LANES_0123);
+ al_assert(((int)type) >= AL_SRDS_REG_TYPE_PMA);
+ al_assert(((int)type) <= AL_SRDS_REG_TYPE_PCS);
+
+ al_serdes_grp_reg_write(
+ &obj->grp_info[grp],
+ page,
+ type,
+ offset,
+ data);
+
+ return status;
+}
+
+int al_serdes_lane_write(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data)
+{
+ return al_serdes_reg_write(obj, grp, (enum al_serdes_reg_page)lane,
+ type, offset, data);
+}
+/******************************************************************************/
+/******************************************************************************/
+#if (SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM != SERDES_IREG_FLD_PCSTX_DATAWIDTH_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM != SERDES_IREG_FLD_PCSTX_DIVRATE_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM != SERDES_IREG_FLD_CMNPCS_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM != SERDES_IREG_FLD_CMNPCSBIST_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM != SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_LB_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSRX_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSRXBIST_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSTX_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+void al_serdes_bist_overrides_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_rate rate)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ int i;
+
+ uint8_t rx_rate_val;
+ uint8_t tx_rate_val;
+
+ switch (rate) {
+ case AL_SRDS_RATE_1_8:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_8;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_8;
+ break;
+ case AL_SRDS_RATE_1_4:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_4;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_4;
+ break;
+ case AL_SRDS_RATE_1_2:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_2;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_2;
+ break;
+ case AL_SRDS_RATE_FULL:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1;
+ break;
+ default:
+ al_err("%s: invalid rate (%d)\n", __func__, rate);
+ al_assert(0);
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1;
+ }
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM,
+ SERDES_IREG_FLD_PCSRX_DATAWIDTH_MASK |
+ SERDES_IREG_FLD_PCSTX_DATAWIDTH_MASK,
+ SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_20 |
+ SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_20);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM,
+ SERDES_IREG_FLD_PCSRX_DIVRATE_MASK |
+ SERDES_IREG_FLD_PCSTX_DIVRATE_MASK,
+ rx_rate_val | tx_rate_val);
+ }
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN |
+ SERDES_IREG_FLD_CMNPCS_LOCWREN |
+ SERDES_IREG_FLD_CMNPCSBIST_LOCWREN |
+ SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN |
+ SERDES_IREG_FLD_CMNPCS_LOCWREN |
+ SERDES_IREG_FLD_CMNPCSBIST_LOCWREN |
+ SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCS_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_PCS_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCS_TXENABLE_REG_NUM,
+ SERDES_IREG_FLD_CMNPCS_TXENABLE,
+ SERDES_IREG_FLD_CMNPCS_TXENABLE);
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN |
+ SERDES_IREG_FLD_LB_LOCWREN |
+ SERDES_IREG_FLD_PCSRX_LOCWREN |
+ SERDES_IREG_FLD_PCSRXBIST_LOCWREN |
+ SERDES_IREG_FLD_PCSRXEQ_LOCWREN |
+ SERDES_IREG_FLD_PCSTX_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSTXBIST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_PCSTXBIST_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN);
+ }
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_bist_overrides_disable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ int i;
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_CMNPCSBIST_LOCWREN,
+ SERDES_IREG_FLD_CMNPCSBIST_LOCWREN);
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_LB_LOCWREN |
+ SERDES_IREG_FLD_PCSRXBIST_LOCWREN,
+ SERDES_IREG_FLD_LB_LOCWREN |
+ SERDES_IREG_FLD_PCSRXBIST_LOCWREN);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSTXBIST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_PCSTXBIST_LOCWREN,
+ SERDES_IREG_FLD_PCSTXBIST_LOCWREN);
+ }
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_rx_rate_change(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_rate rate)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ int i;
+
+ uint8_t rx_rate_val;
+
+ switch (rate) {
+ case AL_SRDS_RATE_1_8:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_8;
+ break;
+ case AL_SRDS_RATE_1_4:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_4;
+ break;
+ case AL_SRDS_RATE_1_2:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_2;
+ break;
+ case AL_SRDS_RATE_FULL:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1;
+ break;
+ default:
+ al_err("%s: invalid rate (%d)\n", __func__, rate);
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1;
+ break;
+ }
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM,
+ SERDES_IREG_FLD_PCSRX_DIVRATE_MASK,
+ rx_rate_val);
+ }
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_group_pm_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_pm pm)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ uint8_t pm_val;
+
+ switch (pm) {
+ case AL_SRDS_PM_PD:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_PD;
+ break;
+ case AL_SRDS_PM_P2:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P2;
+ break;
+ case AL_SRDS_PM_P1:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P1;
+ break;
+ case AL_SRDS_PM_P0S:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0S;
+ break;
+ case AL_SRDS_PM_P0:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0;
+ break;
+ default:
+ al_err("%s: invalid power mode (%d)\n", __func__, pm);
+ al_assert(0);
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0;
+ }
+
+ if (pm == AL_SRDS_PM_PD)
+ al_serdes_group_rx_rate_change_sw_flow_dis(grp_info);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_REG_NUM,
+ SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_MASK,
+ pm_val);
+
+ if (pm != AL_SRDS_PM_PD)
+ al_serdes_group_rx_rate_change_sw_flow_en_cond(grp_info);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_lane_rx_rate_change_sw_flow_en(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, 201, 0xfc);
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, 202, 0xff);
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, 203, 0xff);
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, 204, 0xff);
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, 205, 0x7f);
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, 205, 0xff);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_lane_rx_rate_change_sw_flow_dis(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA, 205, 0x7f);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_lane_pcie_rate_override_enable_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool en)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PCS,
+ SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA_REG_NUM,
+ SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA,
+ en ? SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA : 0);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+al_bool al_serdes_lane_pcie_rate_override_is_enabled(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ return (al_serdes_grp_lane_read(
+ grp_info,
+ lane,
+ AL_SRDS_REG_TYPE_PCS,
+ SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA_REG_NUM) &
+ SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA) ? AL_TRUE : AL_FALSE;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+enum al_serdes_pcie_rate al_serdes_lane_pcie_rate_get(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ return (al_serdes_grp_reg_read(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PCS,
+ SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_REG_NUM) &
+ SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_MASK) >>
+ SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_SHIFT;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_lane_pcie_rate_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_pcie_rate rate)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PCS,
+ SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_REG_NUM,
+ SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_MASK,
+ rate << SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_SHIFT);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_lane_pm_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_pm rx_pm,
+ enum al_serdes_pm tx_pm)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ uint8_t rx_pm_val;
+ uint8_t tx_pm_val;
+
+ switch (rx_pm) {
+ case AL_SRDS_PM_PD:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_PD;
+ break;
+ case AL_SRDS_PM_P2:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P2;
+ break;
+ case AL_SRDS_PM_P1:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P1;
+ break;
+ case AL_SRDS_PM_P0S:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0S;
+ break;
+ case AL_SRDS_PM_P0:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0;
+ break;
+ default:
+ al_err("%s: invalid rx power mode (%d)\n", __func__, rx_pm);
+ al_assert(0);
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0;
+ }
+
+ switch (tx_pm) {
+ case AL_SRDS_PM_PD:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_PD;
+ break;
+ case AL_SRDS_PM_P2:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P2;
+ break;
+ case AL_SRDS_PM_P1:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P1;
+ break;
+ case AL_SRDS_PM_P0S:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0S;
+ break;
+ case AL_SRDS_PM_P0:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0;
+ break;
+ default:
+ al_err("%s: invalid tx power mode (%d)\n", __func__, tx_pm);
+ al_assert(0);
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0;
+ }
+
+ if (rx_pm == AL_SRDS_PM_PD)
+ _al_serdes_lane_rx_rate_change_sw_flow_dis(grp_info, lane);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LANEPCSPSTATE_RX_REG_NUM,
+ SERDES_IREG_FLD_LANEPCSPSTATE_RX_MASK,
+ rx_pm_val);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LANEPCSPSTATE_TX_REG_NUM,
+ SERDES_IREG_FLD_LANEPCSPSTATE_TX_MASK,
+ tx_pm_val);
+
+ if (rx_pm != AL_SRDS_PM_PD)
+ _al_serdes_lane_rx_rate_change_sw_flow_en_cond(grp_info, lane);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_pma_hard_reset_group(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ al_bool enable)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ if (enable)
+ al_serdes_group_rx_rate_change_sw_flow_dis(grp_info);
+
+ /* Enable Hard Reset Override */
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_REG_NUM,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_MASK,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_REGS);
+
+ /* Assert/Deassert Hard Reset Override */
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_REG_NUM,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_MASK,
+ enable ?
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_ASSERT :
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_DEASSERT);
+
+ if (!enable)
+ al_serdes_group_rx_rate_change_sw_flow_en_cond(grp_info);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_pma_hard_reset_lane(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ if (enable)
+ _al_serdes_lane_rx_rate_change_sw_flow_dis(grp_info, lane);
+
+ /* Enable Hard Reset Override */
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_REG_NUM,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_MASK,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_REGS);
+
+ /* Assert/Deassert Hard Reset Override */
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_REG_NUM,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_MASK,
+ enable ?
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_ASSERT :
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_DEASSERT);
+
+ if (!enable)
+ _al_serdes_lane_rx_rate_change_sw_flow_en_cond(grp_info, lane);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+#if (SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM !=\
+ SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM) ||\
+ (SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM !=\
+ SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM) ||\
+ (SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM !=\
+ SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM) ||\
+ (SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM !=\
+ SERDES_IREG_FLD_LB_CDRCLK2TXEN_REG_NUM)
+#error Wrong assumption
+#endif
+
+void al_serdes_loopback_control(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_lb_mode mode)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ uint8_t val = 0;
+
+ switch (mode) {
+ case AL_SRDS_LB_MODE_OFF:
+ break;
+ case AL_SRDS_LB_MODE_PMA_IO_UN_TIMED_RX_TO_TX:
+ val = SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN;
+ break;
+ case AL_SRDS_LB_MODE_PMA_INTERNALLY_BUFFERED_SERIAL_TX_TO_RX:
+ val = SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN;
+ break;
+ case AL_SRDS_LB_MODE_PMA_SERIAL_TX_IO_TO_RX_IO:
+ val = SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN;
+ break;
+ case AL_SRDS_LB_MODE_PMA_PARALLEL_RX_TO_TX:
+ val = SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN |
+ SERDES_IREG_FLD_LB_CDRCLK2TXEN;
+ break;
+ default:
+ al_err("%s: invalid mode (%d)\n", __func__, mode);
+ al_assert(0);
+ }
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM,
+ SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN |
+ SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN |
+ SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN |
+ SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN |
+ SERDES_IREG_FLD_LB_CDRCLK2TXEN,
+ val);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_bist_pattern_select(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_bist_pattern pattern,
+ uint8_t *user_data)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ uint8_t val = 0;
+
+ switch (pattern) {
+ case AL_SRDS_BIST_PATTERN_USER:
+ al_assert(user_data);
+ val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_USER;
+ break;
+ case AL_SRDS_BIST_PATTERN_PRBS7:
+ val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS7;
+ break;
+ case AL_SRDS_BIST_PATTERN_PRBS23:
+ val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS23;
+ break;
+ case AL_SRDS_BIST_PATTERN_PRBS31:
+ val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS31;
+ break;
+ case AL_SRDS_BIST_PATTERN_CLK1010:
+ val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_CLK1010;
+ break;
+ default:
+ al_err("%s: invalid pattern (%d)\n", __func__, pattern);
+ al_assert(0);
+ }
+
+ if (pattern == AL_SRDS_BIST_PATTERN_USER) {
+ int i;
+
+ for (i = 0; i < SERDES_IREG_FLD_TX_BIST_PAT_NUM_BYTES; i++)
+ al_serdes_grp_reg_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TX_BIST_PAT_REG_NUM(i),
+ user_data[i]);
+ }
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCSBIST_MODESEL_REG_NUM,
+ SERDES_IREG_FLD_CMNPCSBIST_MODESEL_MASK,
+ val);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_bist_tx_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSTXBIST_EN_REG_NUM,
+ SERDES_IREG_FLD_PCSTXBIST_EN,
+ enable ? SERDES_IREG_FLD_PCSTXBIST_EN : 0);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_bist_tx_err_inject(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN,
+ 0);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_bist_rx_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXBIST_EN_REG_NUM,
+ SERDES_IREG_FLD_PCSRXBIST_EN,
+ enable ? SERDES_IREG_FLD_PCSRXBIST_EN : 0);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+#if (SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW_REG_NUM !=\
+ SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM)
+#error Wrong assumption
+#endif
+
+void al_serdes_bist_rx_status(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool *is_locked,
+ al_bool *err_cnt_overflow,
+ uint16_t *err_cnt)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ uint8_t status_reg_val;
+ uint16_t err_cnt_msb_reg_val;
+ uint16_t err_cnt_lsb_reg_val;
+
+ status_reg_val = al_serdes_grp_reg_read(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM);
+
+ err_cnt_msb_reg_val = al_serdes_grp_reg_read(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXBIST_ERRCOUNT_MSB_REG_NUM);
+
+ err_cnt_lsb_reg_val = al_serdes_grp_reg_read(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXBIST_ERRCOUNT_LSB_REG_NUM);
+
+ *is_locked =
+ (status_reg_val & SERDES_IREG_FLD_RXBIST_RXLOCKED) ?
+ AL_TRUE : AL_FALSE;
+
+ *err_cnt_overflow =
+ (status_reg_val & SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW) ?
+ AL_TRUE : AL_FALSE;
+
+ *err_cnt = (err_cnt_msb_reg_val << 8) + err_cnt_lsb_reg_val;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static inline uint8_t al_serdes_grp_reg_read(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset)
+{
+ al_reg_write32(
+ &grp_info->regs_base->gen.reg_addr,
+ SRDS_CORE_REG_ADDR(page, type, offset));
+
+ return al_reg_read32(&grp_info->regs_base->gen.reg_data);
+}
+
+static inline uint8_t al_serdes_grp_lane_read(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane page,
+ enum al_serdes_reg_type type,
+ uint16_t offset)
+{
+ return al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)page,
+ type, offset);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static inline void al_serdes_grp_reg_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data)
+{
+ al_reg_write32(
+ &grp_info->regs_base->gen.reg_addr,
+ SRDS_CORE_REG_ADDR(page, type, offset));
+
+ al_reg_write32(&grp_info->regs_base->gen.reg_data, data);
+}
+
+
+static inline void al_serdes_grp_lane_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane lane,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data)
+{
+ al_serdes_grp_reg_write(grp_info, (enum al_serdes_reg_page)lane,
+ type, offset, data);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static inline void al_serdes_ns_delay(int cnt)
+{
+ al_udelay((cnt + 999) / 1000);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static inline void al_serdes_grp_reg_masked_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t mask,
+ uint8_t data)
+{
+ uint8_t val;
+ enum al_serdes_reg_page start_page = page;
+ enum al_serdes_reg_page end_page = page;
+ enum al_serdes_reg_page iter_page;
+
+ if (page == AL_SRDS_REG_PAGE_0123_LANES_0123) {
+ start_page = AL_SRDS_REG_PAGE_0_LANE_0;
+ end_page = AL_SRDS_REG_PAGE_3_LANE_3;
+ }
+
+ for(iter_page = start_page; iter_page <= end_page; ++iter_page) {
+ val = al_serdes_grp_reg_read(grp_info, iter_page, type, offset);
+ val &= ~mask;
+ val |= data;
+ al_serdes_grp_reg_write(grp_info, iter_page, type, offset, val);
+ }
+}
+
+static inline void al_serdes_grp_lane_masked_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane lane,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t mask,
+ uint8_t data)
+{
+ al_serdes_grp_reg_masked_write(grp_info, (enum al_serdes_reg_page)lane,
+ type, offset, mask, data);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static void _al_serdes_lane_rx_rate_change_sw_flow_dis(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane lane)
+{
+ al_bool lane_sw_flow_enabled;
+
+ al_assert(lane != AL_SRDS_LANES_0123);
+
+ lane_sw_flow_enabled =
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 201) == 0xfc) &&
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 202) == 0xff) &&
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 203) == 0xff) &&
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 204) == 0xff) &&
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 205) == 0xff);
+
+ /**
+ * Disable the Rx rate change software flow by clearing bit 7 of lane PMA register 205
+ * (RSTPDOVR_RX_OVREN)
+ */
+ if (lane_sw_flow_enabled) {
+ al_dbg("%s(%d): actually disabling\n", __func__, lane);
+ al_serdes_grp_reg_masked_write(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 205, 0x80, 0x00);
+ }
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static void al_serdes_group_rx_rate_change_sw_flow_dis(
+ struct al_serdes_group_info *grp_info)
+{
+ int lane;
+
+ for (lane = AL_SRDS_LANE_0; lane < AL_SRDS_NUM_LANES; lane++)
+ _al_serdes_lane_rx_rate_change_sw_flow_dis(grp_info, lane);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static void _al_serdes_lane_rx_rate_change_sw_flow_en_cond(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_lane lane)
+{
+ al_bool lane_sw_flow_almost_enabled;
+ al_bool group_reset_enabled;
+ al_bool lane_reset_enabled;
+ al_bool group_pd_enabled;
+ al_bool lane_pd_enabled;
+
+ al_assert(lane != AL_SRDS_LANES_0123);
+
+ lane_sw_flow_almost_enabled =
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 201) == 0xfc) &&
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 202) == 0xff) &&
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 203) == 0xff) &&
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 204) == 0xff) &&
+ (al_serdes_grp_reg_read(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 205) == 0x7f);
+
+ group_reset_enabled =
+ ((al_serdes_grp_reg_read(
+ grp_info, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_REG_NUM) &
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_MASK) ==
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_REGS) &&
+ ((al_serdes_grp_reg_read(
+ grp_info, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_REG_NUM) &
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_MASK) ==
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_ASSERT);
+
+ lane_reset_enabled =
+ ((al_serdes_grp_reg_read(
+ grp_info, (enum al_serdes_reg_page)lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_REG_NUM) &
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_MASK) ==
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_REGS) &&
+ ((al_serdes_grp_reg_read(
+ grp_info, (enum al_serdes_reg_page)lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_REG_NUM) &
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_MASK) ==
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_ASSERT);
+
+ group_pd_enabled =
+ (al_serdes_grp_reg_read(
+ grp_info, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_REG_NUM) &
+ SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_MASK) ==
+ SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_PD;
+
+ lane_pd_enabled =
+ (al_serdes_grp_reg_read(
+ grp_info, (enum al_serdes_reg_page)lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LANEPCSPSTATE_RX_REG_NUM) &
+ SERDES_IREG_FLD_LANEPCSPSTATE_RX_MASK) ==
+ SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_PD;
+
+ /**
+ * Enable the Rx rate change software flow by setting bit 7 of lane PMA register 205
+ * (RSTPDOVR_RX_OVREN)
+ */
+ if (lane_sw_flow_almost_enabled && !group_reset_enabled && !lane_reset_enabled &&
+ !group_pd_enabled && !lane_pd_enabled) {
+ al_dbg("%s(%d): actually enabling\n", __func__, lane);
+
+ al_serdes_ns_delay(500);
+ al_serdes_grp_reg_masked_write(grp_info, (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA, 205, 0x80, 0x80);
+ }
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static void al_serdes_group_rx_rate_change_sw_flow_en_cond(
+ struct al_serdes_group_info *grp_info)
+{
+ int lane;
+
+ for (lane = AL_SRDS_LANE_0; lane < AL_SRDS_NUM_LANES; lane++)
+ _al_serdes_lane_rx_rate_change_sw_flow_en_cond(grp_info, lane);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_eye_measure_run(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint32_t timeout,
+ unsigned int *value)
+{
+ uint32_t reg = 0;
+ uint32_t i;
+ struct serdes_lane *lane_regs;
+
+ lane_regs = &obj->grp_info[grp].regs_base->lane[lane];
+
+ al_reg_write32(&lane_regs->ictl_multi_rxeq,
+ SERDES_LANE_ICTL_MULTI_RXEQ_START_L_A);
+
+ for (i = 0 ; i < timeout ; i++) {
+ reg = al_reg_read32(&lane_regs->octl_multi);
+
+ if (reg & SERDES_LANE_OCTL_MULTI_RXEQ_DONE_L_A)
+ break;
+
+ al_msleep(10);
+ }
+
+ if (i == timeout) {
+ al_err("%s: measure eye failed on timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ *value = al_reg_read32(&lane_regs->odat_multi_rxeq);
+
+ al_reg_write32(&lane_regs->ictl_multi_rxeq, 0);
+
+ return 0;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_eye_diag_sample(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ unsigned int x,
+ int y,
+ unsigned int timeout,
+ unsigned int *value)
+{
+ enum al_serdes_reg_page page = (enum al_serdes_reg_page)lane;
+ struct al_serdes_group_info *grp_info;
+ uint32_t i;
+ uint8_t sample_count_orig_msb;
+ uint8_t sample_count_orig_lsb;
+
+ al_assert(obj);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+ al_assert(((int)page) >= AL_SRDS_REG_PAGE_0_LANE_0);
+ al_assert(((int)page) <= AL_SRDS_REG_PAGE_0123_LANES_0123);
+
+ grp_info = &obj->grp_info[grp];
+
+ /* Obtain sample count by reading RXCALROAMEYEMEAS_COUNT */
+ sample_count_orig_msb = al_serdes_grp_reg_read(grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM);
+ sample_count_orig_lsb = al_serdes_grp_reg_read(grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM);
+
+ /* Set sample count to ~100000 samples */
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM, 0x13);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM, 0x88);
+
+ /* BER Contour Overwrite */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN,
+ 0);
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN,
+ 0);
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN,
+ 0);
+
+ /* RXROAM_XORBITSEL = 0x1 or 0x0 */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_2ND);
+
+ /* Set X */
+ al_serdes_grp_reg_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_REG_NUM, x);
+
+ /* Set Y */
+ al_serdes_grp_reg_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_REG_NUM,
+ y < 32 ? 31 - y : y + 1);
+
+ /* Start Measurement by setting RXCALROAMEYEMEASIN_CYCLEEN = 0x1 */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START);
+
+ /* Check RXCALROAMEYEMEASDONE Signal (Polling Until 0x1) */
+ for (i = 0 ; i < timeout ; i++) {
+ if (al_serdes_grp_reg_read(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM) &
+ SERDES_IREG_FLD_RXCALROAMEYEMEASDONE)
+ break;
+ al_udelay(1);
+ }
+ if (i == timeout) {
+ al_err("%s: eye diagram sampling timed out!\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* Stop Measurement by setting RXCALROAMEYEMEASIN_CYCLEEN = 0x0 */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START,
+ 0);
+
+ /* Obtain Error Counts by reading RXCALROAMEYEMEAS_ACC */
+ *value = ((unsigned int)al_serdes_grp_reg_read(grp_info, page,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_MSB_REG_NUM)) << 8 |
+ al_serdes_grp_reg_read(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_LSB_REG_NUM);
+
+ /* BER Contour Overwrite */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN);
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN);
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN);
+
+ /* Restore sample count */
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM,
+ sample_count_orig_msb);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM,
+ sample_count_orig_lsb);
+
+ return 0;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static void al_serdes_tx_deemph_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint32_t c_zero,
+ uint32_t c_plus_1,
+ uint32_t c_minus_1)
+{
+ al_serdes_grp_lane_masked_write(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_1_REG_NUM,
+ SERDES_IREG_TX_DRV_1_LEVN_MASK,
+ ((c_zero + c_plus_1 + c_minus_1)
+ << SERDES_IREG_TX_DRV_1_LEVN_SHIFT));
+
+ al_serdes_grp_lane_masked_write(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_2_REG_NUM,
+ SERDES_IREG_TX_DRV_2_LEVNM1_MASK,
+ (c_plus_1 << SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT));
+
+ al_serdes_grp_lane_masked_write(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_3_REG_NUM,
+ SERDES_IREG_TX_DRV_3_LEVNP1_MASK,
+ (c_minus_1 << SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT));
+}
+
+static void al_serdes_tx_deemph_get(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint32_t *c_zero,
+ uint32_t *c_plus_1,
+ uint32_t *c_minus_1)
+{
+ uint32_t reg = 0;
+
+ reg = al_serdes_grp_lane_read(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_2_REG_NUM);
+
+ *c_plus_1 = ((reg & SERDES_IREG_TX_DRV_2_LEVNM1_MASK) >>
+ SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT);
+
+ reg = al_serdes_grp_lane_read(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_3_REG_NUM);
+
+ *c_minus_1 = ((reg & SERDES_IREG_TX_DRV_3_LEVNP1_MASK) >>
+ SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT);
+
+ reg = al_serdes_grp_lane_read(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_1_REG_NUM);
+
+ *c_zero = (((reg & SERDES_IREG_TX_DRV_1_LEVN_MASK) >>
+ SERDES_IREG_TX_DRV_1_LEVN_SHIFT) - *c_plus_1 - *c_minus_1);
+}
+
+al_bool al_serdes_tx_deemph_inc(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_tx_deemph_param param)
+{
+ al_bool ret = AL_TRUE;
+ uint32_t c0;
+ uint32_t c1;
+ uint32_t c_1;
+
+ al_serdes_tx_deemph_get(obj, grp, lane, &c0, &c1, &c_1);
+
+ al_dbg("%s: current txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ __func__, c0, c1, c_1);
+
+ switch (param) {
+ case AL_SERDES_TX_DEEMP_C_ZERO:
+
+ if (c0 == AL_SERDES_TX_DEEMPH_C_ZERO_MAX_VAL)
+ return AL_FALSE;
+
+ c0++;
+
+ break;
+ case AL_SERDES_TX_DEEMP_C_PLUS:
+
+ if (c1 == AL_SERDES_TX_DEEMPH_C_PLUS_MAX_VAL)
+ return AL_FALSE;
+
+ c1++;
+
+ break;
+ case AL_SERDES_TX_DEEMP_C_MINUS:
+
+ if (c_1 == AL_SERDES_TX_DEEMPH_C_MINUS_MAX_VAL)
+ return AL_FALSE;
+
+ c_1++;
+
+ break;
+ }
+
+ if ((c0 + c1 + c_1) > AL_SERDES_TX_DEEMPH_SUM_MAX) {
+ al_dbg("%s: sum of all tx de-emphasis over the max limit\n",
+ __func__);
+
+ return AL_FALSE;
+ }
+
+ al_dbg("%s: new txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ __func__, c0, c1, c_1);
+
+ al_serdes_tx_deemph_set(obj, grp, lane, c0, c1, c_1);
+
+ return ret;
+}
+
+al_bool al_serdes_tx_deemph_dec(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_tx_deemph_param param)
+{
+ al_bool ret = AL_TRUE;
+ uint32_t c0;
+ uint32_t c1;
+ uint32_t c_1;
+
+ al_serdes_tx_deemph_get(obj, grp, lane, &c0, &c1, &c_1);
+
+ al_dbg("%s: current txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ __func__, c0, c1, c_1);
+
+ switch (param) {
+ case AL_SERDES_TX_DEEMP_C_ZERO:
+
+ if (c0 == AL_SERDES_TX_DEEMPH_C_ZERO_MIN_VAL)
+ return AL_FALSE;
+
+ c0--;
+
+ break;
+ case AL_SERDES_TX_DEEMP_C_PLUS:
+
+ if (c1 == AL_SERDES_TX_DEEMPH_C_PLUS_MIN_VAL)
+ return AL_FALSE;
+
+ c1--;
+
+ break;
+ case AL_SERDES_TX_DEEMP_C_MINUS:
+
+ if (c_1 == AL_SERDES_TX_DEEMPH_C_MINUS_MIN_VAL)
+ return AL_FALSE;
+
+ c_1--;
+
+ break;
+ }
+
+ al_dbg("%s: new txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ __func__, c0, c1, c_1);
+
+ al_serdes_tx_deemph_set(obj, grp, lane, c0, c1, c_1);
+
+ return ret;
+}
+
+void al_serdes_tx_deemph_preset(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ uint32_t c0;
+ uint32_t c1;
+ uint32_t c_1;
+
+ c0 = AL_SERDES_TX_DEEMPH_C_ZERO_PRESET;
+
+ c1 = AL_SERDES_TX_DEEMPH_C_PLUS_PRESET;
+
+ c_1 = AL_SERDES_TX_DEEMPH_C_MINUS_PRESET;
+
+ al_dbg("preset: new txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ c0, c1, c_1);
+
+ al_serdes_tx_deemph_set(obj, grp, lane, c0, c1, c_1);
+}
+
+al_bool al_serdes_signal_is_detected(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ uint32_t reg = 0;
+
+ reg = al_serdes_grp_lane_read(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXRANDET_REG_NUM);
+
+ return ((reg & SERDES_IREG_FLD_RXRANDET_STAT) ? AL_TRUE : AL_FALSE);
+}
+
+void al_serdes_tx_advanced_params_set(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_tx_params *params)
+{
+ uint8_t reg = 0;
+
+ if(!params->override) {
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN);
+
+ return;
+ }
+
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN,
+ 0);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_1_HLEV_MASK,
+ SERDES_IREG_TX_DRV_1_HLEV_SHIFT,
+ params->amp);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_1_LEVN_MASK,
+ SERDES_IREG_TX_DRV_1_LEVN_SHIFT,
+ params->total_driver_units);
+
+ al_serdes_grp_lane_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_1_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_2_LEVNM1_MASK,
+ SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT,
+ params->c_plus_1);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_2_LEVNM2_MASK,
+ SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT,
+ params->c_plus_2);
+
+ al_serdes_grp_lane_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_2_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_3_LEVNP1_MASK,
+ SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT,
+ params->c_minus_1);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_3_SLEW_MASK,
+ SERDES_IREG_TX_DRV_3_SLEW_SHIFT,
+ params->slew_rate);
+
+ al_serdes_grp_lane_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_3_REG_NUM,
+ reg);
+
+}
+
+void al_serdes_tx_advanced_params_get(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_tx_params *tx_params)
+{
+ uint8_t reg_val = 0;
+
+ al_serdes_lane_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_1_REG_NUM,
+ &reg_val);
+ tx_params->amp = (reg_val & SERDES_IREG_TX_DRV_1_HLEV_MASK) >>
+ SERDES_IREG_TX_DRV_1_HLEV_SHIFT;
+ tx_params->total_driver_units = (reg_val &
+ SERDES_IREG_TX_DRV_1_LEVN_MASK) >>
+ SERDES_IREG_TX_DRV_1_LEVN_SHIFT;
+
+ al_serdes_lane_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_2_REG_NUM,
+ &reg_val);
+ tx_params->c_plus_1 = (reg_val & SERDES_IREG_TX_DRV_2_LEVNM1_MASK) >>
+ SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT;
+ tx_params->c_plus_2 = (reg_val & SERDES_IREG_TX_DRV_2_LEVNM2_MASK) >>
+ SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT;
+
+ al_serdes_lane_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_3_REG_NUM,
+ &reg_val);
+ tx_params->c_minus_1 = (reg_val & SERDES_IREG_TX_DRV_3_LEVNP1_MASK) >>
+ SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT;
+ tx_params->slew_rate = (reg_val & SERDES_IREG_TX_DRV_3_SLEW_MASK) >>
+ SERDES_IREG_TX_DRV_3_SLEW_SHIFT;
+
+ al_serdes_lane_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM,
+ &reg_val);
+ tx_params->override = ((reg_val & SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN) == 0);
+}
+
+
+void al_serdes_rx_advanced_params_set(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_rx_params *params)
+{
+ uint8_t reg = 0;
+
+ if(!params->override) {
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN);
+
+ return;
+ }
+
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN,
+ 0);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK,
+ SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT,
+ params->dcgain);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK,
+ SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT,
+ params->dfe_3db_freq);
+
+ al_serdes_grp_lane_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_1_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK,
+ SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT,
+ params->dfe_gain);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK,
+ SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT,
+ params->dfe_first_tap_ctrl);
+
+ al_serdes_grp_lane_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_2_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK,
+ SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT,
+ params->dfe_secound_tap_ctrl);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK,
+ SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT,
+ params->dfe_third_tap_ctrl);
+
+ al_serdes_grp_lane_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_3_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK,
+ SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT,
+ params->dfe_fourth_tap_ctrl);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK,
+ SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT,
+ params->low_freq_agc_gain);
+
+ al_serdes_grp_lane_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_4_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK,
+ SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT,
+ params->precal_code_sel);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK,
+ SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT,
+ params->high_freq_agc_boost);
+
+ al_serdes_grp_lane_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_5_REG_NUM,
+ reg);
+}
+
+static inline void al_serdes_common_cfg_eth(struct al_serdes_group_info *grp_info)
+{
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_MASK,
+ (0x1 << SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_MASK,
+ (0 << SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_MASK,
+ (0x2 << SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_MASK,
+ (0 << SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_COARSE_STEP_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_COARSE_STEP_MASK,
+ (0x1 << SERDES_IREG_FLD_RXEQ_COARSE_STEP_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_MASK,
+ (0x1 << SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_MASK,
+ (0xf0 << SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_MASK,
+ (0 << SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_FINE_STEP_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_FINE_STEP_MASK,
+ (1 << SERDES_IREG_FLD_RXEQ_FINE_STEP_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_MASK,
+ (0x8 << SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_MASK,
+ (0 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_MASK,
+ (0x64 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_MASK,
+ (0x3 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_MASK,
+ (0x1 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_MASK,
+ (3 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_MASK,
+ (1 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_MASK,
+ (0xc << SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_MASK,
+ (0xcc << SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_SHIFT));
+}
+
+struct al_serdes_mode_rx_tx_inv_state {
+ al_bool restore;
+ uint32_t pipe_rst;
+ uint32_t ipd_multi[AL_SRDS_NUM_LANES];
+ uint8_t inv_value[AL_SRDS_NUM_LANES];
+};
+
+static void al_serdes_mode_rx_tx_inv_state_save(
+ struct al_serdes_group_info *grp_info,
+ struct al_serdes_mode_rx_tx_inv_state *state)
+{
+ if (al_reg_read32(&grp_info->regs_base->gen.irst) & SERDES_GEN_IRST_POR_B_A) {
+ int i;
+
+ state->restore = AL_TRUE;
+ state->pipe_rst = al_reg_read32(&grp_info->regs_base->gen.irst);
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ state->inv_value[i] = al_serdes_grp_reg_read(
+ grp_info,
+ i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_POLARITY_RX_REG_NUM);
+ state->ipd_multi[i] =
+ al_reg_read32(&grp_info->regs_base->lane[i].ipd_multi);
+ }
+ } else {
+ state->restore = AL_FALSE;
+ }
+}
+
+static void al_serdes_mode_rx_tx_inv_state_restore(
+ struct al_serdes_group_info *grp_info,
+ struct al_serdes_mode_rx_tx_inv_state *state)
+{
+ if (state->restore) {
+ int i;
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_grp_reg_write(
+ grp_info,
+ i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_POLARITY_RX_REG_NUM,
+ state->inv_value[i]);
+ al_reg_write32(
+ &grp_info->regs_base->lane[i].ipd_multi, state->ipd_multi[i]);
+ al_reg_write32_masked(
+ &grp_info->regs_base->gen.irst,
+ (SERDES_GEN_IRST_PIPE_RST_L0_B_A_SEL >> i) |
+ (SERDES_GEN_IRST_PIPE_RST_L0_B_A >> i),
+ state->pipe_rst);
+ }
+ }
+}
+
+void al_serdes_mode_set_sgmii(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp)
+{
+ struct al_serdes_group_info *grp_info;
+ struct al_serdes_mode_rx_tx_inv_state rx_tx_inv_state;
+
+ al_assert(obj);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+
+ grp_info = &obj->grp_info[grp];
+
+ al_serdes_mode_rx_tx_inv_state_save(grp_info, &rx_tx_inv_state);
+
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000);
+ al_reg_write32(&grp_info->regs_base->lane[0].ictl_multi, 0x10110010);
+ al_reg_write32(&grp_info->regs_base->lane[1].ictl_multi, 0x10110010);
+ al_reg_write32(&grp_info->regs_base->lane[2].ictl_multi, 0x10110010);
+ al_reg_write32(&grp_info->regs_base->lane[3].ictl_multi, 0x10110010);
+ al_reg_write32(&grp_info->regs_base->gen.ipd_multi_synth , 0x0001);
+ al_reg_write32(&grp_info->regs_base->lane[0].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[1].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[2].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[3].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->gen.ictl_pcs , 0);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000);
+ al_serdes_ns_delay(800);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000);
+ al_serdes_ns_delay(500);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000);
+ al_serdes_ns_delay(500);
+
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 101, 183);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 102, 183);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 103, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 104, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 105, 26);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 106, 26);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 107, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 108, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 109, 17);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 110, 13);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 101, 153);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 102, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 103, 108);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 104, 183);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 105, 183);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 106, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 107, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 108, 26);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 109, 26);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 110, 7);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 111, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 112, 8);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 113, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 114, 8);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 115, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 116, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 117, 179);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 118, 246);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 119, 208);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 120, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 121, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 122, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 123, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 124, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 125, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 126, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 127, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 128, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 129, 226);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 130, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 131, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 132, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 133, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 134, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 135, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 136, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 137, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 138, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 139, 226);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 140, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 141, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 142, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 143, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 144, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 145, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 146, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 147, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 148, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 149, 63);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 150, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 151, 100);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 152, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 153, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 154, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 155, 5);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 156, 5);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 157, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 158, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 159, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 160, 8);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 161, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 162, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 163, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 164, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0_LANE_0,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_1_LANE_1,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_2_LANE_2,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_3_LANE_3,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 13, 16);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 48, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 49, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 54, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 55, 180);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 93, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 165, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 41, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 354, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 355, 58);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 356, 9);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 357, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 358, 62);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 359, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 701, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 87, 0x1f);
+
+ al_serdes_common_cfg_eth(grp_info);
+
+ al_serdes_mode_rx_tx_inv_state_restore(grp_info, &rx_tx_inv_state);
+
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x0011F0);
+ al_serdes_ns_delay(500);
+}
+
+void al_serdes_mode_set_kr(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp)
+{
+ struct al_serdes_group_info *grp_info;
+ struct al_serdes_mode_rx_tx_inv_state rx_tx_inv_state;
+
+ al_assert(obj);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+
+ grp_info = &obj->grp_info[grp];
+
+ al_serdes_mode_rx_tx_inv_state_save(grp_info, &rx_tx_inv_state);
+
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000);
+ al_reg_write32(&grp_info->regs_base->lane[0].ictl_multi, 0x30330030);
+ al_reg_write32(&grp_info->regs_base->lane[1].ictl_multi, 0x30330030);
+ al_reg_write32(&grp_info->regs_base->lane[2].ictl_multi, 0x30330030);
+ al_reg_write32(&grp_info->regs_base->lane[3].ictl_multi, 0x30330030);
+ al_reg_write32(&grp_info->regs_base->gen.ipd_multi_synth , 0x0001);
+ al_reg_write32(&grp_info->regs_base->lane[0].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[1].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[2].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[3].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->gen.ictl_pcs , 0);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000);
+ al_serdes_ns_delay(800);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000);
+ al_serdes_ns_delay(500);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000);
+ al_serdes_ns_delay(500);
+
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 101, 189);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 102, 189);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 103, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 104, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 105, 27);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 106, 27);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 107, 1);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 108, 1);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 109, 119);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 110, 5);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 101, 170);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 102, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 103, 108);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 104, 189);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 105, 189);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 106, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 107, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 108, 27);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 109, 27);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 110, 7);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 111, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 112, 16);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 113, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 114, 16);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 115, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 116, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 117, 179);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 118, 246);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 119, 208);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 120, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 121, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 122, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 123, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 124, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 125, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 126, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 127, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 128, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 129, 226);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 130, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 131, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 132, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 133, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 134, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 135, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 136, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 137, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 138, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 139, 226);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 140, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 141, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 142, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 143, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 144, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 145, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 146, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 147, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 148, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 149, 63);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 150, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 151, 50);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 152, 17);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 153, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 154, 1);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 155, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 156, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 157, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 158, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 159, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 160, 8);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 161, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 162, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 163, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 164, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0_LANE_0,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_1_LANE_1,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_2_LANE_2,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_3_LANE_3,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 13, 16);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 48, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 49, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 54, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 55, 149); /*Was 182*/
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 93, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 165, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 41, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 354, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 355, 58);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 356, 9);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 357, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 358, 62);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 359, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 701, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 87, 0x1f);
+
+ al_serdes_common_cfg_eth(grp_info);
+
+ al_serdes_mode_rx_tx_inv_state_restore(grp_info, &rx_tx_inv_state);
+
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x0011F0);
+ al_serdes_ns_delay(500);
+}
+
+void al_serdes_rx_advanced_params_get(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_rx_params* rx_params)
+{
+ uint8_t temp_val;
+
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_1_REG_NUM,
+ &temp_val);
+ rx_params->dcgain = (temp_val & SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT;
+ rx_params->dfe_3db_freq = (temp_val &
+ SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK) >>
+ SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT;
+
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_2_REG_NUM,
+ &temp_val);
+ rx_params->dfe_gain = (temp_val &
+ SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT;
+ rx_params->dfe_first_tap_ctrl = (temp_val &
+ SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT;
+
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_3_REG_NUM,
+ &temp_val);
+ rx_params->dfe_secound_tap_ctrl = (temp_val &
+ SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT;
+ rx_params->dfe_third_tap_ctrl = (temp_val &
+ SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT;
+
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_4_REG_NUM,
+ &temp_val);
+ rx_params->dfe_fourth_tap_ctrl = (temp_val &
+ SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT;
+ rx_params->low_freq_agc_gain = (temp_val &
+ SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT;
+
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_5_REG_NUM,
+ &temp_val);
+ rx_params->precal_code_sel = (temp_val &
+ SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK) >>
+ SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT;
+ rx_params->high_freq_agc_boost = (temp_val &
+ SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK) >>
+ SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT;
+
+ al_serdes_lane_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM,
+ &temp_val);
+ rx_params->override = ((temp_val & SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN) == 0);
+}
+
+#if ( SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM || \
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM)
+#error Wrong assumption
+#endif
+int al_serdes_rx_equalization(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ uint8_t serdes_ireg_fld_rxcalroamyadjust_locwren_val;
+ uint8_t serdes_ireg_fld_rxroam_xorbitsel_val;
+ uint8_t serdes_ireg_fld_pcsrxeq_locwren_val;
+ uint8_t serdes_ireg_fld_rxcal_locwren_val;
+ uint8_t temp_val;
+ uint8_t done;
+
+ int test_score;
+ int i;
+
+ /*
+ * Make sure Roam Eye mechanism is not overridden
+ * Lane SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN = 1,
+ * so Rx 4-Point Eye process is not overridden
+ * Lane SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN = 1,
+ * so Eye Roam latch is not overridden
+ * Lane SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN = 1,
+ * so Eye Roam latch 'X adjust' is not overridden
+ * Lane SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN = 1,
+ * so Eye Roam latch 'Y adjust' is not overridden
+ * Lane SERDES_IREG_FLD_RXROAM_XORBITSEL = 0/1,
+ * so Eye Roamlatch works on the right Eye position (XORBITSEL)
+ * For most cases 0 is needed, but sometimes 1 is needed.
+ * I couldn't sort out why is this so the code uses a global
+ * XORBITSELmode variable, set by the user (GUI). Default is 0.
+ * control must be internal. At the end we restore original setting
+ */
+
+ /* save current values for restoring them later in the end */
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ &serdes_ireg_fld_rxcal_locwren_val);
+
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ &serdes_ireg_fld_rxcalroamyadjust_locwren_val );
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ &serdes_ireg_fld_rxroam_xorbitsel_val );
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM,
+ &serdes_ireg_fld_pcsrxeq_locwren_val );
+
+ /*
+ * Set Bits:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN
+ * SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN
+ * SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN
+ * to return 4pt-RxEye and EyeRoam Latch to internal logic
+ *
+ * clear bit SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN
+ * AGC/DFE controlled via PMA registers
+ */
+ temp_val = serdes_ireg_fld_rxcal_locwren_val;
+ temp_val |= SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN;
+ temp_val |= SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN;
+ temp_val |= SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN;
+ temp_val |= SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN;
+
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ temp_val );
+
+ /*
+ * Set bit SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN
+ * to return EyeRoam Latch Y to internal logic
+ */
+ temp_val = serdes_ireg_fld_rxcalroamyadjust_locwren_val |
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN;
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ temp_val );
+
+ /*
+ * Clear Bit: SERDES_IREG_FLD_RXROAM_XORBITSEL
+ * so XORBITSEL=0, needed for the Eye mapping.
+ */
+ temp_val = serdes_ireg_fld_rxroam_xorbitsel_val &
+ ~SERDES_IREG_FLD_RXROAM_XORBITSEL;
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ temp_val );
+
+ /*
+ * Take Control from int.pin over RxEQ process.
+ * Clear Bit SERDES_IREG_FLD_PCSRXEQ_LOCWREN
+ * to override RxEQ via PMA
+ */
+ temp_val = serdes_ireg_fld_pcsrxeq_locwren_val &
+ ~SERDES_IREG_FLD_PCSRXEQ_LOCWREN;
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM,
+ temp_val );
+
+
+ /*
+ * Start/Stop RxEQ Cal is via PCSRXEQ_START: 1=START. 0=STOP.
+ * Clear Bit SERDES_IREG_FLD_PCSRXEQ_START
+ * to start fresh from Stop
+ */
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM,
+ &temp_val );
+ temp_val &= ~SERDES_IREG_FLD_PCSRXEQ_START;
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM,
+ temp_val );
+
+ /* Set Bit SERDES_IREG_FLD_PCSRXEQ_START
+ * to begin Rx Eq Cal */
+ temp_val |= SERDES_IREG_FLD_PCSRXEQ_START;
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM,
+ temp_val );
+
+ /* Poll on RxEq Cal completion. SERDES_IREG_FLD_RXEQ_DONE. 1=Done. */
+ for( i = 0; i < AL_SERDES_RX_EQUAL_TRIES; ++i ) {
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM,
+ &done );
+ done &= SERDES_IREG_FLD_RXEQ_DONE;
+
+ /* Check if RxEQ Cal is done */
+ if (done)
+ break;
+ al_msleep(AL_SERDES_RX_EQUAL_MDELAY);
+ }
+
+ if (!done) {
+ al_err("%s: Timeout!\n", __func__);
+ return -1;
+ }
+
+ /* Stop the RxEQ process. */
+ temp_val &= ~SERDES_IREG_FLD_PCSRXEQ_START;
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM,
+ temp_val );
+ /* Get score */
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RXEQ_BEST_EYE_MSB_VAL_REG_NUM,
+ &temp_val );
+ test_score = (int)( (temp_val & 0xFF) << 6 );
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_REG_NUM,
+ &temp_val );
+ test_score += (int)(temp_val & SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_MASK);
+
+ /* Restore start values */
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ serdes_ireg_fld_rxcal_locwren_val);
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ serdes_ireg_fld_rxcalroamyadjust_locwren_val );
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ serdes_ireg_fld_rxroam_xorbitsel_val );
+ al_serdes_lane_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM,
+ serdes_ireg_fld_pcsrxeq_locwren_val );
+
+ return test_score;
+}
+
+#if ( SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM || \
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM || \
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM)
+#error Wrong assumption
+#endif
+int al_serdes_calc_eye_size(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ int* width,
+ int* height)
+{
+ uint8_t rxcaleyediagfsm_x_y_valweight_val;
+ uint8_t rxcaleyediagfsm_xvalcoarse_val;
+ uint8_t rxcaleyediagfsm_xvalfine_val;
+ uint8_t rxcaleyediagfsm_yvalcoarse_val;
+ uint8_t rxcaleyediagfsm_yvalfine_val;
+ uint8_t rxlock2ref_locwren_val;
+ uint8_t rxcal_locwren_val;
+ uint8_t rxcalroamyadjust_locwren_val;
+ uint8_t rxlock2ref_ovren_val;
+
+ int i;
+ uint8_t status;
+ uint8_t reg_value;
+
+ /* Save Registers */
+ al_serdes_lane_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM,
+ &rxlock2ref_locwren_val);
+ al_serdes_lane_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ &rxcal_locwren_val);
+ al_serdes_lane_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ &rxcalroamyadjust_locwren_val);
+ al_serdes_lane_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM,
+ &rxlock2ref_ovren_val);
+
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM,
+ &rxcaleyediagfsm_x_y_valweight_val);
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ &rxcaleyediagfsm_xvalcoarse_val);
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ &rxcaleyediagfsm_xvalfine_val);
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ &rxcaleyediagfsm_yvalcoarse_val);
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ &rxcaleyediagfsm_yvalfine_val);
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN
+ * to override RxEQ via PMA
+ * Set Bits:
+ * SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN,
+ * SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN
+ * to keep Eye Diag Roam controlled internally
+ */
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN |
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN |
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN |
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN);
+ /*
+ * Set Bit:
+ * SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN
+ * to keep Eye Diag Roam controlled internally
+ */
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN);
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXROAM_XORBITSEL,
+ * so XORBITSEL=0, needed for the Eye mapping
+ * Set Bit:
+ * SERDES_IREG_FLD_RXLOCK2REF_OVREN,
+ * so RXLOCK2REF_OVREN=1, keeping lock to data, preventing data hit
+ */
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN |
+ SERDES_IREG_FLD_RXROAM_XORBITSEL,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN);
+
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXLOCK2REF_LOCWREN,
+ * so RXLOCK2REF_LOCWREN=0, to override control
+ */
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXLOCK2REF_LOCWREN,
+ 0);
+
+ /* Width Calculation */
+
+ /* Return Value = 0*Y + 1*X */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM,
+ 0x01);
+ /* X coarse scan step = 3 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ 0x03);
+ /* X fine scan step = 1 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ 0x01);
+ /* Y coarse scan step = 0 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ 0x00);
+ /* Y fine scan step = 0 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ 0x00);
+
+ /*
+ * Set Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ * to start Eye measurement
+ */
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START);
+
+ for( i = 0; i < AL_SERDES_RX_EYE_CAL_TRIES; ++i ) {
+ /* Check if RxEQ Cal is done */
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM,
+ &status );
+ if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)
+ break;
+ al_msleep(AL_SERDES_RX_EYE_CAL_MDELAY);
+ }
+
+ if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR) {
+ al_err("%s: eye measure error!\n", __func__);
+ return -1;
+ }
+
+ if (!(status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)) {
+ al_err("%s: eye measure timeout!\n", __func__);
+ return -1;
+ }
+
+ /* Read Eye Opening Metrics, Bits:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB,
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB
+ */
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM,
+ &reg_value );
+ *width = reg_value << 6;
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM,
+ &reg_value );
+ *width =+ reg_value & SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE;
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ * to stop Eye measurement
+ */
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ 0);
+
+ /* Height Calculation */
+
+ /* Return Value = 1*Y + 0*X */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM,
+ 0x10);
+ /* X coarse scan step = 0 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ 0x00);
+ /* X fine scan step = 0 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ 0x00);
+ /* Y coarse scan step = 3 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ 0x03);
+ /* Y fine scan step = 1 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ 0x01);
+
+ /*
+ * Set Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ * to start Eye measurement
+ */
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START);
+
+ for( i = 0; i < AL_SERDES_RX_EYE_CAL_TRIES; ++i ) {
+ /* Check if RxEQ Cal is done */
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM,
+ &status );
+ if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)
+ break;
+ al_msleep(AL_SERDES_RX_EYE_CAL_MDELAY);
+ }
+
+ if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR) {
+ al_err("%s: eye measure error!\n", __func__);
+ return -1;
+ }
+
+ if (!(status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)) {
+ al_err("%s: eye measure timeout!\n", __func__);
+ return -1;
+ }
+
+ /* Read Eye Opening Metrics, Bits:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB,
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB
+ */
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM,
+ &reg_value );
+ *height = reg_value << 6;
+ al_serdes_lane_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM,
+ &reg_value );
+ *height =+ reg_value & SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE;
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ * to stop Eye measurement
+ */
+ al_serdes_grp_lane_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ 0);
+
+ /* Restore Registers */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM,
+ rxcaleyediagfsm_x_y_valweight_val);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ rxcaleyediagfsm_xvalcoarse_val);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ rxcaleyediagfsm_xvalfine_val);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ rxcaleyediagfsm_yvalcoarse_val);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ rxcaleyediagfsm_yvalfine_val);
+
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM,
+ rxlock2ref_locwren_val);
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ rxcal_locwren_val);
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ rxcalroamyadjust_locwren_val);
+ al_serdes_lane_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM,
+ rxlock2ref_ovren_val);
+ return 0;
+}
+
+void al_serdes_sris_config(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ struct al_serdes_sris_params *params)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PPMDRIFTCOUNT1_REG_NUM,
+ (params->ppm_drift_count & AL_FIELD_MASK(7, 0)) >> 0);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PPMDRIFTCOUNT2_REG_NUM,
+ (params->ppm_drift_count & AL_FIELD_MASK(15, 8)) >> 8);
+
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PPMDRIFTMAX1_REG_NUM,
+ (params->ppm_drift_max & AL_FIELD_MASK(7, 0)) >> 0);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PPMDRIFTMAX2_REG_NUM,
+ (params->ppm_drift_max & AL_FIELD_MASK(15, 8)) >> 8);
+
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_SYNTHPPMDRIFTMAX1_REG_NUM,
+ (params->synth_ppm_drift_max & AL_FIELD_MASK(7, 0)) >> 0);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_SYNTHPPMDRIFTMAX2_REG_NUM,
+ (params->synth_ppm_drift_max & AL_FIELD_MASK(15, 8)) >> 8);
+
+ al_serdes_grp_reg_masked_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PCS,
+ SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_NUM,
+ SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_MASK,
+ (params->full_d2r1)
+ << SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_SHIFT);
+
+ al_serdes_grp_reg_masked_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PCS,
+ SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_NUM,
+ SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_MASK,
+ (params->full_pcie_g3)
+ << SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_SHIFT);
+
+ al_serdes_grp_reg_masked_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PCS,
+ SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_NUM,
+ SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_MASK,
+ (params->rd_threshold_d2r1)
+ << SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_SHIFT);
+
+ al_serdes_grp_reg_masked_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PCS,
+ SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_NUM,
+ SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_MASK,
+ (params->rd_threshold_pcie_g3)
+ << SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_SHIFT);
+}
diff --git a/al_hal_serdes.h b/al_hal_serdes.h
new file mode 100644
index 000000000000..37aec839b2f2
--- /dev/null
+++ b/al_hal_serdes.h
@@ -0,0 +1,1125 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_serdes_api API
+ * SerDes HAL driver API
+ * @ingroup group_serdes SerDes
+ * @{
+ *
+ * @file al_hal_serdes.h
+ *
+ * @brief Header file for the SerDes HAL driver
+ *
+ */
+
+#ifndef __AL_HAL_SERDES_H__
+#define __AL_HAL_SERDES_H__
+
+#include "al_hal_common.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+struct al_serdes_obj;
+
+enum al_serdes_group {
+ AL_SRDS_GRP_A = 0,
+ AL_SRDS_GRP_B,
+ AL_SRDS_GRP_C,
+ AL_SRDS_GRP_D,
+
+ AL_SRDS_NUM_GROUPS,
+};
+
+struct al_serdes_group_info {
+ /*
+ * Group parent object - filled automatically by al_serdes_handle_init
+ */
+ struct al_serdes_obj *pobj;
+
+ /*
+ * Group specific register base - filled automatically by
+ * al_sedres_handle_init
+ */
+ struct al_serdes_regs __iomem *regs_base;
+};
+
+struct al_serdes_obj {
+ struct al_serdes_group_info grp_info[AL_SRDS_NUM_GROUPS];
+};
+
+enum al_serdes_reg_page {
+ AL_SRDS_REG_PAGE_0_LANE_0 = 0,
+ AL_SRDS_REG_PAGE_1_LANE_1,
+ AL_SRDS_REG_PAGE_2_LANE_2,
+ AL_SRDS_REG_PAGE_3_LANE_3,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_PAGE_0123_LANES_0123 = 7,
+};
+
+enum al_serdes_reg_type {
+ AL_SRDS_REG_TYPE_PMA = 0,
+ AL_SRDS_REG_TYPE_PCS,
+};
+
+enum al_serdes_lane {
+ AL_SRDS_LANE_0 = AL_SRDS_REG_PAGE_0_LANE_0,
+ AL_SRDS_LANE_1 = AL_SRDS_REG_PAGE_1_LANE_1,
+ AL_SRDS_LANE_2 = AL_SRDS_REG_PAGE_2_LANE_2,
+ AL_SRDS_LANE_3 = AL_SRDS_REG_PAGE_3_LANE_3,
+
+ AL_SRDS_NUM_LANES,
+ AL_SRDS_LANES_0123 = AL_SRDS_REG_PAGE_0123_LANES_0123,
+};
+
+/** Serdes loopback mode */
+enum al_serdes_lb_mode {
+ /** No loopback */
+ AL_SRDS_LB_MODE_OFF,
+
+ /**
+ * Transmits the untimed, partial equalized RX signal out the transmit
+ * IO pins.
+ * No clock used (untimed)
+ */
+ AL_SRDS_LB_MODE_PMA_IO_UN_TIMED_RX_TO_TX,
+
+ /**
+ * Loops back the TX serializer output into the CDR.
+ * CDR recovered bit clock used (without attenuation)
+ */
+ AL_SRDS_LB_MODE_PMA_INTERNALLY_BUFFERED_SERIAL_TX_TO_RX,
+
+ /**
+ * Loops back the TX driver IO signal to the RX IO pins
+ * CDR recovered bit clock used (only through IO)
+ */
+ AL_SRDS_LB_MODE_PMA_SERIAL_TX_IO_TO_RX_IO,
+
+ /**
+ * Parallel loopback from the PMA receive lane data ports, to the
+ * transmit lane data ports
+ * CDR recovered bit clock used
+ */
+ AL_SRDS_LB_MODE_PMA_PARALLEL_RX_TO_TX,
+
+ /** Loops received data after elastic buffer to transmit path */
+ AL_SRDS_LB_MODE_PCS_PIPE,
+
+ /** Loops TX data (to PMA) to RX path (instead of PMA data) */
+ AL_SRDS_LB_MODE_PCS_NEAR_END,
+
+ /** Loops receive data prior to interface block to transmit path */
+ AL_SRDS_LB_MODE_PCS_FAR_END,
+};
+
+/** Serdes BIST pattern */
+enum al_serdes_bist_pattern {
+ AL_SRDS_BIST_PATTERN_USER,
+ AL_SRDS_BIST_PATTERN_PRBS7,
+ AL_SRDS_BIST_PATTERN_PRBS23,
+ AL_SRDS_BIST_PATTERN_PRBS31,
+ AL_SRDS_BIST_PATTERN_CLK1010,
+};
+
+/** SerDes group rate */
+enum al_serdes_rate {
+ AL_SRDS_RATE_1_8,
+ AL_SRDS_RATE_1_4,
+ AL_SRDS_RATE_1_2,
+ AL_SRDS_RATE_FULL,
+};
+
+/** SerDes power mode */
+enum al_serdes_pm {
+ AL_SRDS_PM_PD,
+ AL_SRDS_PM_P2,
+ AL_SRDS_PM_P1,
+ AL_SRDS_PM_P0S,
+ AL_SRDS_PM_P0,
+};
+
+/** SerDes PCIe Rate - values are important for proper behavior */
+enum al_serdes_pcie_rate {
+ AL_SRDS_PCIE_RATE_GEN1 = 0,
+ AL_SRDS_PCIE_RATE_GEN2,
+ AL_SRDS_PCIE_RATE_GEN3,
+};
+
+/**
+ * Initializes a SERDES object
+ *
+ * @param serdes_regs_base
+ * The SERDES register file base pointer
+ *
+ * @param obj
+ * An allocated, non initialized object context
+ *
+ *
+ * @return 0 if no error found.
+ *
+ */
+int al_serdes_handle_init(
+ void __iomem *serdes_regs_base,
+ struct al_serdes_obj *obj);
+
+/**
+ * SERDES register read
+ *
+ * Reads a SERDES register
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param page
+ * The SERDES register page within the group
+ *
+ * @param type
+ * The SERDES register type (PMA /PCS)
+ *
+ * @param offset
+ * The SERDES register offset (0 - 4095)
+ *
+ * @param data
+ * The read data
+ *
+ *
+ * @return 0 if no error found.
+ *
+ */
+int al_serdes_reg_read(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t *data);
+
+/**
+ * SERDES register write
+ *
+ * Writes a SERDES register
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param page
+ * The SERDES register page within the group
+ *
+ * @param type
+ * The SERDES register type (PMA /PCS)
+ *
+ * @param offset
+ * The SERDES register offset (0 - 4095)
+ *
+ * @param data
+ * The data to write
+ *
+ *
+ * @return 0 if no error found.
+ *
+ */
+int al_serdes_reg_write(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data);
+
+/**
+ * Enable BIST required overrides
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param rate
+ * The required speed rate
+ */
+void al_serdes_bist_overrides_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_rate rate);
+
+/**
+ * Disable BIST required overrides
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param rate
+ * The required speed rate
+ */
+void al_serdes_bist_overrides_disable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp);
+
+/**
+ * Rx rate change
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param rate
+ * The Rx required rate
+ */
+void al_serdes_rx_rate_change(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_rate rate);
+
+/**
+ * SERDES lane Rx rate change software flow enable
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param lane
+ * The SERDES lane within the group
+ */
+void al_serdes_lane_rx_rate_change_sw_flow_en(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+/**
+ * SERDES lane Rx rate change software flow disable
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param lane
+ * The SERDES lane within the group
+ */
+void al_serdes_lane_rx_rate_change_sw_flow_dis(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+/**
+ * PCIe lane rate override check
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param lane
+ * The SERDES lane within the group
+ * @returns AL_TRUE if the override is enabled
+ */
+al_bool al_serdes_lane_pcie_rate_override_is_enabled(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+/**
+ * PCIe lane rate override control
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param lane
+ * The SERDES lane within the group
+ * @param en
+ * Enable/disable
+ */
+void al_serdes_lane_pcie_rate_override_enable_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool en);
+
+/**
+ * PCIe lane rate get
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param lane
+ * The SERDES lane within the group
+ */
+enum al_serdes_pcie_rate al_serdes_lane_pcie_rate_get(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+/**
+ * PCIe lane rate set
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param lane
+ * The SERDES lane within the group
+ * @param rate
+ * The required rate
+ */
+void al_serdes_lane_pcie_rate_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_pcie_rate rate);
+
+/**
+ * SERDES group power mode control
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param pm
+ * The required power mode
+ */
+void al_serdes_group_pm_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_pm pm);
+
+/**
+ * SERDES lane power mode control
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param lane
+ * The SERDES lane within the group
+ * @param rx_pm
+ * The required RX power mode
+ * @param tx_pm
+ * The required TX power mode
+ */
+void al_serdes_lane_pm_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_pm rx_pm,
+ enum al_serdes_pm tx_pm);
+
+/**
+ * SERDES group PMA hard reset
+ *
+ * Controls Serdes group PMA hard reset
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param enable
+ * Enable/disable hard reset
+ */
+void al_serdes_pma_hard_reset_group(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ al_bool enable);
+
+/**
+ * SERDES lane PMA hard reset
+ *
+ * Controls Serdes lane PMA hard reset
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param enable
+ * Enable/disable hard reset
+ */
+void al_serdes_pma_hard_reset_lane(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable);
+
+/**
+ * SERDES loopback control
+ *
+ * Controls the loopback
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param mode
+ * The requested loopback mode
+ *
+ */
+void al_serdes_loopback_control(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_lb_mode mode);
+
+/**
+ * SERDES BIST pattern selection
+ *
+ * Selects the BIST pattern to be used
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param pattern
+ * The pattern to set
+ *
+ * @param user_data
+ * The pattern user data (when pattern == AL_SRDS_BIST_PATTERN_USER)
+ * 80 bits (8 bytes array)
+ *
+ */
+void al_serdes_bist_pattern_select(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_bist_pattern pattern,
+ uint8_t *user_data);
+
+/**
+ * SERDES BIST TX Enable
+ *
+ * Enables/disables TX BIST per lane
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param enable
+ * Enable or disable TX BIST
+ */
+void al_serdes_bist_tx_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable);
+
+/**
+ * SERDES BIST TX single bit error injection
+ *
+ * Injects single bit error during a TX BIST
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ */
+void al_serdes_bist_tx_err_inject(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp);
+
+/**
+ * SERDES BIST RX Enable
+ *
+ * Enables/disables RX BIST per lane
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param enable
+ * Enable or disable TX BIST
+ */
+void al_serdes_bist_rx_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable);
+
+/**
+ * SERDES BIST RX status
+ *
+ * Checks the RX BIST status for a specific SERDES lane
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param is_locked
+ * An indication whether RX BIST is locked
+ *
+ * @param err_cnt_overflow
+ * An indication whether error count overflow occured
+ *
+ * @param err_cnt
+ * Current bit error count
+ */
+void al_serdes_bist_rx_status(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool *is_locked,
+ al_bool *err_cnt_overflow,
+ uint16_t *err_cnt);
+
+/**
+ * SERDES Digital Test Bus
+ *
+ * Samples the digital test bus of a specific SERDES lane
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param sel
+ * The selected sampling group (0 - 31)
+ *
+ * @param sampled_data
+ * The sampled data (5 bytes array)
+ *
+ *
+ * @return 0 if no error found.
+ *
+ */
+int al_serdes_digital_test_bus(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint8_t sel,
+ uint8_t *sampled_data);
+
+
+/* KR link training */
+/**
+ * Set the tx de-emphasis to preset values
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ */
+void al_serdes_tx_deemph_preset(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+/**
+ * Tx de-emphasis parameters
+ */
+enum al_serdes_tx_deemph_param {
+ AL_SERDES_TX_DEEMP_C_ZERO, /*< c(0) */
+ AL_SERDES_TX_DEEMP_C_PLUS, /*< c(1) */
+ AL_SERDES_TX_DEEMP_C_MINUS, /*< c(-1) */
+};
+
+/**
+ * Increase tx de-emphasis param.
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param param which tx de-emphasis to change
+ *
+ * @return false in case max is reached. true otherwise.
+ */
+al_bool al_serdes_tx_deemph_inc(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_tx_deemph_param param);
+
+/**
+ * Decrease tx de-emphasis param.
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param param which tx de-emphasis to change
+ *
+ * @return false in case min is reached. true otherwise.
+ */
+al_bool al_serdes_tx_deemph_dec(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_tx_deemph_param param);
+
+/**
+ * run Rx eye measurement.
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param timeout timeout in uSec
+ *
+ * @param value Rx eye measurement value
+ * (0 - completely closed eye, 0xffff - completely open eye).
+ *
+ * @return 0 if no error found.
+ */
+int al_serdes_eye_measure_run(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint32_t timeout,
+ unsigned int *value);
+
+/**
+ * Eye diagram single sampling
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param x Sampling X position (0 - 63 --> -1.00 UI ... 1.00 UI)
+ *
+ * @param y Sampling Y position (0 - 62 --> 500mV ... -500mV)
+ *
+ * @param timeout timeout in uSec
+ *
+ * @param value Eye diagram sample value (BER - 0x0000 - 0xffff)
+ *
+ * @return 0 if no error found.
+ */
+int al_serdes_eye_diag_sample(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ unsigned int x,
+ int y,
+ unsigned int timeout,
+ unsigned int *value);
+
+/**
+ * Check if signal is detected
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @return true if signal is detected. false otherwise.
+ */
+al_bool al_serdes_signal_is_detected(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+
+struct al_serdes_adv_tx_params {
+ /*
+ * select the input values location.
+ * When set to true the values will be taken from the internal registers
+ * that will be override with the next following parameters.
+ * When set to false the values will be taken from external pins (the
+ * other parameters in this case is not needed)
+ */
+ al_bool override;
+ /*
+ * Transmit Amplitude control signal. Used to define the full-scale
+ * maximum swing of the driver.
+ * 000 - Not Supported
+ * 001 - 952mVdiff-pkpk
+ * 010 - 1024mVdiff-pkpk
+ * 011 - 1094mVdiff-pkpk
+ * 100 - 1163mVdiff-pkpk
+ * 101 - 1227mVdiff-pkpk
+ * 110 - 1283mVdiff-pkpk
+ * 111 - 1331mVdiff-pkpk
+ */
+ uint8_t amp;
+ /* Defines the total number of driver units allocated in the driver */
+ uint8_t total_driver_units;
+ /* Defines the total number of driver units allocated to the
+ * first post-cursor (C+1) tap. */
+ uint8_t c_plus_1;
+ /* Defines the total number of driver units allocated to the
+ * second post-cursor (C+2) tap. */
+ uint8_t c_plus_2;
+ /* Defines the total number of driver units allocated to the
+ * first pre-cursor (C-1) tap. */
+ uint8_t c_minus_1;
+ /* TX driver Slew Rate control:
+ * 00 - 31ps
+ * 01 - 33ps
+ * 10 - 68ps
+ * 11 - 170ps
+ */
+ uint8_t slew_rate;
+};
+
+struct al_serdes_adv_rx_params {
+ /*
+ * select the input values location.
+ * When set to true the values will be taken from the internal registers
+ * that will be override with the next following parameters.
+ * When set to false the values will be taken based in the equalization
+ * results (the other parameters in this case is not needed)
+ */
+ al_bool override;
+ /* RX agc high frequency dc gain:
+ * -3'b000: -3dB
+ * -3'b001: -2.5dB
+ * -3'b010: -2dB
+ * -3'b011: -1.5dB
+ * -3'b100: -1dB
+ * -3'b101: -0.5dB
+ * -3'b110: -0dB
+ * -3'b111: 0.5dB
+ */
+ uint8_t dcgain;
+ /* DFE post-shaping tap 3dB frequency
+ * -3'b000: 684MHz
+ * -3'b001: 576MHz
+ * -3'b010: 514MHz
+ * -3'b011: 435MHz
+ * -3'b100: 354MHz
+ * -3'b101: 281MHz
+ * -3'b110: 199MHz
+ * -3'b111: 125MHz
+ */
+ uint8_t dfe_3db_freq;
+ /* DFE post-shaping tap gain
+ * 0: no pulse shaping tap
+ * 1: -24mVpeak
+ * 2: -45mVpeak
+ * 3: -64mVpeak
+ * 4: -80mVpeak
+ * 5: -93mVpeak
+ * 6: -101mVpeak
+ * 7: -105mVpeak
+ */
+ uint8_t dfe_gain;
+ /* DFE first tap gain control
+ * -4'b0000: +1mVpeak
+ * -4'b0001: +10mVpeak
+ * ....
+ * -4'b0110: +55mVpeak
+ * -4'b0111: +64mVpeak
+ * -4'b1000: -1mVpeak
+ * -4'b1001: -10mVpeak
+ * ....
+ * -4'b1110: -55mVpeak
+ * -4'b1111: -64mVpeak
+ */
+ uint8_t dfe_first_tap_ctrl;
+ /* DFE second tap gain control
+ * -4'b0000: +0mVpeak
+ * -4'b0001: +9mVpeak
+ * ....
+ * -4'b0110: +46mVpeak
+ * -4'b0111: +53mVpeak
+ * -4'b1000: -0mVpeak
+ * -4'b1001: -9mVpeak
+ * ....
+ * -4'b1110: -46mVpeak
+ * -4'b1111: -53mVpeak
+ */
+ uint8_t dfe_secound_tap_ctrl;
+ /* DFE third tap gain control
+ * -4'b0000: +0mVpeak
+ * -4'b0001: +7mVpeak
+ * ....
+ * -4'b0110: +38mVpeak
+ * -4'b0111: +44mVpeak
+ * -4'b1000: -0mVpeak
+ * -4'b1001: -7mVpeak
+ * ....
+ * -4'b1110: -38mVpeak
+ * -4'b1111: -44mVpeak
+ */
+ uint8_t dfe_third_tap_ctrl;
+ /* DFE fourth tap gain control
+ * -4'b0000: +0mVpeak
+ * -4'b0001: +6mVpeak
+ * ....
+ * -4'b0110: +29mVpeak
+ * -4'b0111: +33mVpeak
+ * -4'b1000: -0mVpeak
+ * -4'b1001: -6mVpeak
+ * ....
+ * -4'b1110: -29mVpeak
+ * -4'b1111: -33mVpeak
+ */
+ uint8_t dfe_fourth_tap_ctrl;
+ /* Low frequency agc gain (att) select
+ * -3'b000: Disconnected
+ * -3'b001: -18.5dB
+ * -3'b010: -12.5dB
+ * -3'b011: -9dB
+ * -3'b100: -6.5dB
+ * -3'b101: -4.5dB
+ * -3'b110: -2.9dB
+ * -3'b111: -1.6dB
+ */
+ uint8_t low_freq_agc_gain;
+ /* Provides a RX Equalizer pre-hint, prior to beginning
+ * adaptive equalization */
+ uint8_t precal_code_sel;
+ /* High frequency agc boost control
+ * Min d0: Boost ~4dB
+ * Max d31: Boost ~20dB
+ */
+ uint8_t high_freq_agc_boost;
+};
+
+/**
+ * configure tx advanced parameters
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param params pointer to the tx parameters
+ */
+void al_serdes_tx_advanced_params_set(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_tx_params *params);
+
+/**
+ * read tx advanced parameters
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param params pointer to the tx parameters
+ */
+void al_serdes_tx_advanced_params_get(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_tx_params *params);
+
+/**
+ * configure rx advanced parameters
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param params pointer to the rx parameters
+ */
+void al_serdes_rx_advanced_params_set(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_rx_params *params);
+
+/**
+ * read rx advanced parameters
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param params pointer to the rx parameters
+ */
+void al_serdes_rx_advanced_params_get(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_rx_params* params);
+
+/**
+ * Switch entire SerDes group to SGMII mode based on 156.25 Mhz reference clock
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ */
+void al_serdes_mode_set_sgmii(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp);
+
+/**
+ * Switch entire SerDes group to KR mode based on 156.25 Mhz reference clock
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ */
+void al_serdes_mode_set_kr(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp);
+
+/**
+ * performs SerDes HW equalization test and update equalization parameters
+ *
+ * @param obj the object context
+ *
+ * @param grp the SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ */
+int al_serdes_rx_equalization(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+/**
+ * performs Rx equalization and compute the width and height of the eye
+ *
+ * @param obj the object context
+ *
+ * @param grp the SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param width the output width of the eye
+ *
+ * @param height the output height of the eye
+ */
+int al_serdes_calc_eye_size(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ int* width,
+ int* height);
+
+/**
+ * SRIS parameters
+ */
+struct al_serdes_sris_params {
+ /* Controls the frequency accuracy threshold (ppm) for lock detection CDR */
+ uint16_t ppm_drift_count;
+ /* Controls the frequency accuracy threshold (ppm) for lock detection in the CDR */
+ uint16_t ppm_drift_max;
+ /* Controls the frequency accuracy threshold (ppm) for lock detection in PLL */
+ uint16_t synth_ppm_drift_max;
+ /* Elastic buffer full threshold for PCIE modes: GEN1/GEN2 */
+ uint8_t full_d2r1;
+ /* Elastic buffer full threshold for PCIE modes: GEN3 */
+ uint8_t full_pcie_g3;
+ /* Elastic buffer midpoint threshold.
+ * Sets the depth of the buffer while in PCIE mode, GEN1/GEN2
+ */
+ uint8_t rd_threshold_d2r1;
+ /* Elastic buffer midpoint threshold.
+ * Sets the depth of the buffer while in PCIE mode, GEN3
+ */
+ uint8_t rd_threshold_pcie_g3;
+};
+
+/**
+ * SRIS: Separate Refclk Independent SSC (Spread Spectrum Clocking)
+ * Currently available only for PCIe interfaces.
+ * When working with local Refclk, same SRIS configuration in both serdes sides
+ * (EP and RC in PCIe interface) is required.
+ *
+ * performs SRIS configuration according to params
+ *
+ * @param obj the object context
+ *
+ * @param grp the SERDES group
+ *
+ * @param params the SRIS parameters
+ */
+void al_serdes_sris_config(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ struct al_serdes_sris_params *params);
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+
+/* *INDENT-ON* */
+#endif /* __AL_SRDS__ */
+
+/** @} end of SERDES group */
+
diff --git a/al_hal_serdes_internal_regs.h b/al_hal_serdes_internal_regs.h
new file mode 100644
index 000000000000..8f53469bc4cf
--- /dev/null
+++ b/al_hal_serdes_internal_regs.h
@@ -0,0 +1,750 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __AL_SERDES_INTERNAL_REGS_H__
+#define __AL_SERDES_INTERNAL_REGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ * Per lane register fields
+ ******************************************************************************/
+/*
+ * RX and TX lane hard reset
+ * 0 - Hard reset is asserted
+ * 1 - Hard reset is de-asserted
+ */
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_REG_NUM 2
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_MASK 0x01
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_ASSERT 0x00
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_DEASSERT 0x01
+
+/*
+ * RX and TX lane hard reset control
+ * 0 - Hard reset is taken from the interface pins
+ * 1 - Hard reset is taken from registers
+ */
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_REG_NUM 2
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_MASK 0x02
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_IFACE 0x00
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_REGS 0x02
+
+/* RX lane power state control */
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_REG_NUM 3
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_MASK 0x1f
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_PD 0x01
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P2 0x02
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P1 0x04
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0S 0x08
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0 0x10
+
+/* TX lane power state control */
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_REG_NUM 4
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_MASK 0x1f
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_PD 0x01
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P2 0x02
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P1 0x04
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0S 0x08
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0 0x10
+
+/* RX lane word width */
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM 5
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_MASK 0x07
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_8 0x00
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_10 0x01
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_16 0x02
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_20 0x03
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_32 0x04
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_40 0x05
+
+/* TX lane word width */
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_REG_NUM 5
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_MASK 0x70
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_8 0x00
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_10 0x10
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_16 0x20
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_20 0x30
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_32 0x40
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_40 0x50
+
+/* RX lane rate select */
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM 6
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_MASK 0x07
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_8 0x00
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_4 0x01
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_2 0x02
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1 0x03
+
+/* TX lane rate select */
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_REG_NUM 6
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_MASK 0x70
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_8 0x00
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_4 0x10
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_2 0x20
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1 0x30
+
+/*
+ * PMA serial RX-to-TX loop-back enable (from AGC to IO Driver). Serial receive
+ * to transmit loopback: 0 - Disables loopback 1 - Transmits the untimed,
+ * partial equalized RX signal out the transmit IO pins
+ */
+#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN 0x10
+
+/*
+ * PMA TX-to-RX buffered serial loop-back enable (bypasses IO Driver). Serial
+ * transmit to receive buffered loopback: 0 - Disables loopback 1 - Loops back
+ * the TX serializer output into the CDR
+ */
+#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN 0x20
+
+/*
+ * PMA TX-to-RX I/O serial loop-back enable (loop back done directly from TX to
+ * RX pads). Serial IO loopback from the transmit lane IO pins to the receive
+ * lane IO pins: 0 - Disables loopback 1 - Loops back the driver IO signal to
+ * the RX IO pins
+ */
+#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN 0x40
+
+/*
+ * PMA Parallel RX-to-TX loop-back enable. Parallel loopback from the PMA
+ * receive lane 20-bit data ports, to the transmit lane 20-bit data ports 0 -
+ * Disables loopback 1 - Loops back the 20-bit receive data port to the
+ * transmitter
+ */
+#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN 0x80
+
+/*
+ * PMA CDR recovered-clock loopback enable; asserted when PARRX2TXTIMEDEN is 1.
+ * Transmit bit clock select: 0 - Selects synthesizer bit clock for transmit 1
+ * - Selects CDR clock for transmit
+ */
+#define SERDES_IREG_FLD_LB_CDRCLK2TXEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_CDRCLK2TXEN 0x01
+
+/* Receive lane BIST enable. Active High */
+#define SERDES_IREG_FLD_PCSRXBIST_EN_REG_NUM 8
+#define SERDES_IREG_FLD_PCSRXBIST_EN 0x01
+
+/* TX lane BIST enable. Active High */
+#define SERDES_IREG_FLD_PCSTXBIST_EN_REG_NUM 8
+#define SERDES_IREG_FLD_PCSTXBIST_EN 0x02
+
+/*
+ * RX BIST completion signal 0 - Indicates test is not completed 1 - Indicates
+ * the test has completed, and will remain high until a new test is initiated
+ */
+#define SERDES_IREG_FLD_RXBIST_DONE_REG_NUM 8
+#define SERDES_IREG_FLD_RXBIST_DONE 0x04
+
+/*
+ * RX BIST error count overflow indicator. Indicates an overflow in the number
+ * of byte errors identified during the course of the test. This word is stable
+ * to sample when *_DONE_* signal has asserted
+ */
+#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW_REG_NUM 8
+#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW 0x08
+
+/*
+ * RX BIST locked indicator 0 - Indicates BIST is not word locked and error
+ * comparisons have not begun yet 1 - Indicates BIST is word locked and error
+ * comparisons have begun
+ */
+#define SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM 8
+#define SERDES_IREG_FLD_RXBIST_RXLOCKED 0x10
+
+/*
+ * RX BIST error count word. Indicates the number of byte errors identified
+ * during the course of the test. This word is stable to sample when *_DONE_*
+ * signal has asserted
+ */
+#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_MSB_REG_NUM 9
+#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_LSB_REG_NUM 10
+
+/* Tx params */
+#define SERDES_IREG_TX_DRV_1_REG_NUM 21
+#define SERDES_IREG_TX_DRV_1_HLEV_MASK 0x7
+#define SERDES_IREG_TX_DRV_1_HLEV_SHIFT 0
+#define SERDES_IREG_TX_DRV_1_LEVN_MASK 0xf8
+#define SERDES_IREG_TX_DRV_1_LEVN_SHIFT 3
+
+#define SERDES_IREG_TX_DRV_2_REG_NUM 22
+#define SERDES_IREG_TX_DRV_2_LEVNM1_MASK 0xf
+#define SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT 0
+#define SERDES_IREG_TX_DRV_2_LEVNM2_MASK 0x30
+#define SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT 4
+
+#define SERDES_IREG_TX_DRV_3_REG_NUM 23
+#define SERDES_IREG_TX_DRV_3_LEVNP1_MASK 0x7
+#define SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT 0
+#define SERDES_IREG_TX_DRV_3_SLEW_MASK 0x18
+#define SERDES_IREG_TX_DRV_3_SLEW_SHIFT 3
+
+/* Rx params */
+#define SERDES_IREG_RX_CALEQ_1_REG_NUM 24
+#define SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK 0x7
+#define SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT 0
+/* DFE post-shaping tap 3dB frequency */
+#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK 0x38
+#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT 3
+
+#define SERDES_IREG_RX_CALEQ_2_REG_NUM 25
+/* DFE post-shaping tap gain */
+#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK 0x7
+#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT 0
+/* DFE first tap gain control */
+#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK 0x78
+#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT 3
+
+#define SERDES_IREG_RX_CALEQ_3_REG_NUM 26
+#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK 0xf
+#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT 0
+#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK 0xf0
+#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT 4
+
+#define SERDES_IREG_RX_CALEQ_4_REG_NUM 27
+#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK 0xf
+#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT 0
+#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK 0x70
+#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT 4
+
+#define SERDES_IREG_RX_CALEQ_5_REG_NUM 28
+#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK 0x7
+#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT 0
+#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK 0xf8
+#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT 3
+
+/* RX lane best eye point measurement result */
+#define SERDES_IREG_RXEQ_BEST_EYE_MSB_VAL_REG_NUM 29
+#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_REG_NUM 30
+#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_MASK 0x3F
+
+/*
+ * Adaptive RX Equalization enable
+ * 0 - Disables adaptive RX equalization.
+ * 1 - Enables adaptive RX equalization.
+ */
+#define SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM 31
+#define SERDES_IREG_FLD_PCSRXEQ_START (1 << 0)
+
+/*
+ * Enables an eye diagram measurement
+ * within the PHY.
+ * 0 - Disables eye diagram measurement
+ * 1 - Enables eye diagram measurement
+ */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM 31
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START (1 << 1)
+
+
+/*
+ * RX lane single roam eye point measurement start signal.
+ * If asserted, single measurement at fix XADJUST and YADJUST is started.
+ */
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM 31
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START (1 << 2)
+
+
+/*
+ * PHY Eye diagram measurement status
+ * signal
+ * 0 - Indicates eye diagram results are not
+ * valid for sampling
+ * 1 - Indicates eye diagram is complete and
+ * results are valid for sampling
+ */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM 32
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE (1 << 0)
+
+/*
+ * Eye diagram error signal. Indicates if the
+ * measurement was invalid because the eye
+ * diagram was interrupted by the link entering
+ * electrical idle.
+ * 0 - Indicates eye diagram is valid
+ * 1- Indicates an error occurred, and the eye
+ * diagram measurement should be re-run
+ */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR_REG_NUM 32
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR (1 << 1)
+
+/*
+ * PHY Adaptive Equalization status
+ * 0 - Indicates Adaptive Equalization results are not valid for sampling
+ * 1 - Indicates Adaptive Equalization is complete and results are valid for
+ * sampling
+ */
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM 32
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE (1 << 2)
+
+/*
+ *
+ * PHY Adaptive Equalization Status Signal
+ * 0 – Indicates adaptive equalization results
+ * are not valid for sampling
+ * 1 – Indicates adaptive equalization is
+ * complete and results are valid for sampling.
+ */
+#define SERDES_IREG_FLD_RXEQ_DONE_REG_NUM 32
+#define SERDES_IREG_FLD_RXEQ_DONE (1 << 3)
+
+
+/*
+ * 7-bit eye diagram time adjust control
+ * - 6-bits per UI
+ * - spans 2 UI
+ */
+#define SERDES_IREG_FLD_RXCALROAMXADJUST_REG_NUM 33
+
+/* 6-bit eye diagram voltage adjust control - spans +/-300mVdiff */
+#define SERDES_IREG_FLD_RXCALROAMYADJUST_REG_NUM 34
+
+/*
+ * Eye diagram status signal. Safe for
+ * sampling when *DONE* signal has
+ * asserted
+ * 14'h0000 - Completely Closed Eye
+ * 14'hFFFF - Completely Open Eye
+ */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM 35
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_MAKE 0xFF
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_SHIFT 0
+
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM 36
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE 0x3F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_SHIFT 0
+
+/*
+ * RX lane single roam eye point measurement result.
+ * If 0, eye is open at current XADJUST and YADJUST settings.
+ */
+#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_MSB_REG_NUM 37
+#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_LSB_REG_NUM 38
+
+/*
+ * Override enable for CDR lock to reference clock
+ * 0 - CDR is always locked to reference
+ * 1 - CDR operation mode (Lock2Reference or Lock2data are controlled internally
+ * depending on the incoming signal and ppm status)
+ */
+#define SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM 39
+#define SERDES_IREG_FLD_RXLOCK2REF_OVREN (1 << 1)
+
+/*
+ * Selects Eye to capture based on edge
+ * 0 - Capture 1st Eye in Eye Diagram
+ * 1 - Capture 2nd Eye in Eye Diagram measurement
+ */
+#define SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM 39
+#define SERDES_IREG_FLD_RXROAM_XORBITSEL (1 << 2)
+#define SERDES_IREG_FLD_RXROAM_XORBITSEL_1ST 0
+#define SERDES_IREG_FLD_RXROAM_XORBITSEL_2ND (1 << 2)
+
+/*
+ * RX Signal detect. 0 indicates no signal, 1 indicates signal detected.
+ */
+#define SERDES_IREG_FLD_RXRANDET_REG_NUM 41
+#define SERDES_IREG_FLD_RXRANDET_STAT 0x20
+
+/*
+ * RX data polarity inversion control:
+ * 1'b0: no inversion
+ * 1'b1: invert polarity
+ */
+#define SERDES_IREG_FLD_POLARITY_RX_REG_NUM 46
+#define SERDES_IREG_FLD_POLARITY_RX_INV (1 << 0)
+
+/*
+ * TX data polarity inversion control:
+ * 1'b0: no inversion
+ * 1'b1: invert polarity
+ */
+#define SERDES_IREG_FLD_POLARITY_TX_REG_NUM 46
+#define SERDES_IREG_FLD_POLARITY_TX_INV (1 << 1)
+
+/* LANEPCSPSTATE* override enable (Active low) */
+#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN (1 << 0)
+
+/* LB* override enable (Active low) */
+#define SERDES_IREG_FLD_LB_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_LB_LOCWREN (1 << 1)
+
+/* PCSRX* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSRX_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_PCSRX_LOCWREN (1 << 4)
+
+/* PCSRXBIST* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN (1 << 5)
+
+/* PCSRXEQ* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN (1 << 6)
+
+/* PCSTX* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSTX_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_PCSTX_LOCWREN (1 << 7)
+
+/*
+ * group registers:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN,
+ * SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN
+ * SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN
+ */
+#define SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM 86
+
+/* PCSTXBIST* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN_REG_NUM 86
+#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN (1 << 0)
+
+/* Override RX_CALCEQ through the internal registers (Active low) */
+#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM 86
+#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN (1 << 3)
+
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM 86
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN (1 << 4)
+
+
+/* RXCALROAMEYEMEASIN* override enable - Active Low */
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM 86
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN (1 << 6)
+
+/* RXCALROAMXADJUST* override enable - Active Low */
+#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM 86
+#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN (1 << 7)
+
+/* RXCALROAMYADJUST* override enable - Active Low */
+#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN (1 << 0)
+
+/* RXCDRCALFOSC* override enable. Active Low */
+#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN (1 << 1)
+
+/* Over-write enable for RXEYEDIAGFSM_INITXVAL */
+#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN (1 << 2)
+
+/* Over-write enable for CMNCLKGENMUXSEL_TXINTERNAL */
+#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN (1 << 3)
+
+/* TXCALTCLKDUTY* override enable. Active Low */
+#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN (1 << 4)
+
+/* Override TX_DRV through the internal registers (Active low) */
+#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM 87
+#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN (1 << 5)
+
+/*******************************************************************************
+ * Common lane register fields - PMA
+ ******************************************************************************/
+/*
+ * Common lane hard reset control
+ * 0 - Hard reset is taken from the interface pins
+ * 1 - Hard reset is taken from registers
+ */
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_REG_NUM 2
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_MASK 0x01
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_IFACE 0x00
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_REGS 0x01
+
+/*
+ * Common lane hard reset
+ * 0 - Hard reset is asserted
+ * 1 - Hard reset is de-asserted
+ */
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_REG_NUM 2
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_MASK 0x02
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_ASSERT 0x00
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_DEASSERT 0x02
+
+/* Synth power state control */
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_REG_NUM 3
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_MASK 0x1f
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_PD 0x01
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P2 0x02
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P1 0x04
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0S 0x08
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0 0x10
+
+/* Transmit datapath FIFO enable (Active High) */
+#define SERDES_IREG_FLD_CMNPCS_TXENABLE_REG_NUM 8
+#define SERDES_IREG_FLD_CMNPCS_TXENABLE (1 << 2)
+
+/*
+ * RX lost of signal detector enable
+ * - 0 - disable
+ * - 1 - enable
+ */
+#define SERDES_IREG_FLD_RXLOSDET_ENABLE_REG_NUM 13
+#define SERDES_IREG_FLD_RXLOSDET_ENABLE AL_BIT(4)
+
+/* Signal Detect Threshold Level */
+#define SERDES_IREG_FLD_RXELECIDLE_SIGDETTHRESH_REG_NUM 15
+#define SERDES_IREG_FLD_RXELECIDLE_SIGDETTHRESH_MASK AL_FIELD_MASK(2, 0)
+
+/* LOS Detect Threshold Level */
+#define SERDES_IREG_FLD_RXLOSDET_THRESH_REG_NUM 15
+#define SERDES_IREG_FLD_RXLOSDET_THRESH_MASK AL_FIELD_MASK(4, 3)
+#define SERDES_IREG_FLD_RXLOSDET_THRESH_SHIFT 3
+
+#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_REG_NUM 30
+#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_MASK 0x7f
+#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_REG_NUM 31
+#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_MASK 0x7f
+#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_REG_NUM 32
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_MASK 0xff
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_REG_NUM 33
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_MASK 0x1
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_REG_NUM 33
+#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_MASK 0x3e
+#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_SHIFT 1
+
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_REG_NUM 34
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_MASK 0xff
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_REG_NUM 35
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_MASK 0x1
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_FINE_STEP_REG_NUM 35
+#define SERDES_IREG_FLD_RXEQ_FINE_STEP_MASK 0x3e
+#define SERDES_IREG_FLD_RXEQ_FINE_STEP_SHIFT 1
+
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_REG_NUM 36
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_MASK 0xff
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_REG_NUM 37
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_MASK 0x7
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_REG_NUM 43
+#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_MASK 0x7
+#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_SHIFT 0
+
+#define SERDES_IREG_FLD_TX_BIST_PAT_REG_NUM(byte_num) (56 + (byte_num))
+#define SERDES_IREG_FLD_TX_BIST_PAT_NUM_BYTES 10
+
+/*
+ * Selects the transmit BIST mode:
+ * 0 - Uses the 80-bit internal memory pattern (w/ OOB)
+ * 1 - Uses a 27 PRBS pattern
+ * 2 - Uses a 223 PRBS pattern
+ * 3 - Uses a 231 PRBS pattern
+ * 4 - Uses a 1010 clock pattern
+ * 5 and above - Reserved
+ */
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_REG_NUM 80
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_MASK 0x07
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_USER 0x00
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS7 0x01
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS23 0x02
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS31 0x03
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_CLK1010 0x04
+
+/* Single-Bit error injection enable (on posedge) */
+#define SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM 80
+#define SERDES_IREG_FLD_TXBIST_BITERROR_EN 0x20
+
+/* CMNPCIEGEN3* override enable (Active Low) */
+#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM 95
+#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN (1 << 2)
+
+/* CMNPCS* override enable (Active Low) */
+#define SERDES_IREG_FLD_CMNPCS_LOCWREN_REG_NUM 95
+#define SERDES_IREG_FLD_CMNPCS_LOCWREN (1 << 3)
+
+/* CMNPCSBIST* override enable (Active Low) */
+#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN_REG_NUM 95
+#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN (1 << 4)
+
+/* CMNPCSPSTATE* override enable (Active Low) */
+#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN_REG_NUM 95
+#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN (1 << 5)
+
+/* PCS_EN* override enable (Active Low) */
+#define SERDES_IREG_FLD_PCS_LOCWREN_REG_NUM 96
+#define SERDES_IREG_FLD_PCS_LOCWREN (1 << 3)
+
+/* Eye diagram sample count */
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM 150
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_MASK 0xff
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_SHIFT 0
+
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM 151
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_MASK 0xff
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_SHIFT 0
+
+/* override control */
+#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM 230
+#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN 1 << 0
+
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_REG_NUM 623
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_MASK 0xff
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_SHIFT 0
+
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_REG_NUM 624
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_MASK 0xff
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_SHIFT 0
+
+/* X and Y coefficient return value */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM 626
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_MASK 0x0F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_SHIFT 0
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_MASK 0xF0
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_SHIFT 4
+
+/* X coarse scan step */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM 627
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_MASK 0x7F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_SHIFT 0
+
+/* X fine scan step */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM 628
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_MASK 0x7F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_SHIFT 0
+
+/* Y coarse scan step */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM 629
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_MASK 0x0F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_SHIFT 0
+
+/* Y fine scan step */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM 630
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_MASK 0x0F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_SHIFT 0
+
+#define SERDES_IREG_FLD_PPMDRIFTCOUNT1_REG_NUM 157
+
+#define SERDES_IREG_FLD_PPMDRIFTCOUNT2_REG_NUM 158
+
+#define SERDES_IREG_FLD_PPMDRIFTMAX1_REG_NUM 159
+
+#define SERDES_IREG_FLD_PPMDRIFTMAX2_REG_NUM 160
+
+#define SERDES_IREG_FLD_SYNTHPPMDRIFTMAX1_REG_NUM 163
+
+#define SERDES_IREG_FLD_SYNTHPPMDRIFTMAX2_REG_NUM 164
+
+/*******************************************************************************
+ * Common lane register fields - PCS
+ ******************************************************************************/
+#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_REG_NUM 3
+#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_MASK AL_FIELD_MASK(5, 4)
+#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_SHIFT 4
+
+#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA_REG_NUM 6
+#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA AL_BIT(2)
+
+#define SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_NUM 18
+#define SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_MASK 0x1F
+#define SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_SHIFT 0
+
+#define SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_NUM 19
+#define SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_MASK 0x7C
+#define SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_SHIFT 2
+
+#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_NUM 20
+#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_MASK 0x1F
+#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_SHIFT 0
+
+#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_NUM 21
+#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_MASK 0x7C
+#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_SHIFT 2
+
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_ITER_NUM_REG_NUM 22
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_ITER_NUM_MASK 0x7f
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_ITER_NUM_SHIFT 0
+
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_ITER_NUM_REG_NUM 34
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_ITER_NUM_MASK 0x7f
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_ITER_NUM_SHIFT 0
+
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN1_MASK_REG_NUM 23
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN1_MASK_MASK 0xff
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN1_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN2_MASK_REG_NUM 22
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN2_MASK_MASK 0x80
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN2_MASK_SHIFT 7
+
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_STEP_REG_NUM 24
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_STEP_MASK 0x3e
+#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_STEP_SHIFT 1
+
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN1_MASK_REG_NUM 35
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN1_MASK_MASK 0xff
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN1_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN2_MASK_REG_NUM 34
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN2_MASK_MASK 0x80
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN2_MASK_SHIFT 7
+
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_STEP_REG_NUM 36
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_STEP_MASK 0x1f
+#define SERDES_IREG_FLD_PCS_RXEQ_FINE_STEP_SHIFT 0
+
+#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_CODE_EN_REG_NUM 37
+#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_CODE_EN_MASK 0xff
+#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_CODE_EN_SHIFT 0
+
+#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_LASTCODE_REG_NUM 36
+#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_LASTCODE_MASK 0xe0
+#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_LASTCODE_SHIFT 5
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_serdes_REG_H */
+
diff --git a/al_hal_serdes_regs.h b/al_hal_serdes_regs.h
new file mode 100644
index 000000000000..1af7a918e215
--- /dev/null
+++ b/al_hal_serdes_regs.h
@@ -0,0 +1,495 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_serdes_regs.h
+ *
+ * @brief ... registers
+ *
+ */
+
+#ifndef __AL_HAL_SERDES_REGS_H__
+#define __AL_HAL_SERDES_REGS_H__
+
+#include "al_hal_plat_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+struct serdes_gen {
+ /* [0x0] SerDes Registers Version */
+ uint32_t version;
+ uint32_t rsrvd_0[3];
+ /* [0x10] SerDes register file address */
+ uint32_t reg_addr;
+ /* [0x14] SerDes register file data */
+ uint32_t reg_data;
+ uint32_t rsrvd_1[2];
+ /* [0x20] SerDes control */
+ uint32_t ictl_multi_bist;
+ /* [0x24] SerDes control */
+ uint32_t ictl_pcs;
+ /* [0x28] SerDes control */
+ uint32_t ictl_pma;
+ uint32_t rsrvd_2;
+ /* [0x30] SerDes control */
+ uint32_t ipd_multi_synth;
+ /* [0x34] SerDes control */
+ uint32_t irst;
+ /* [0x38] SerDes control */
+ uint32_t octl_multi_synthready;
+ /* [0x3c] SerDes control */
+ uint32_t octl_multi_synthstatus;
+ /* [0x40] SerDes control */
+ uint32_t clk_out;
+ uint32_t rsrvd[47];
+};
+struct serdes_lane {
+ uint32_t rsrvd1[4];
+ /* [0x10] SerDes status */
+ uint32_t octl_pma;
+ /* [0x14] SerDes control */
+ uint32_t ictl_multi_andme;
+ /* [0x18] SerDes control */
+ uint32_t ictl_multi_lb;
+ /* [0x1c] SerDes control */
+ uint32_t ictl_multi_rxbist;
+ /* [0x20] SerDes control */
+ uint32_t ictl_multi_txbist;
+ /* [0x24] SerDes control */
+ uint32_t ictl_multi;
+ /* [0x28] SerDes control */
+ uint32_t ictl_multi_rxeq;
+ /* [0x2c] SerDes control */
+ uint32_t ictl_multi_rxeq_l_low;
+ /* [0x30] SerDes control */
+ uint32_t ictl_multi_rxeq_l_high;
+ /* [0x34] SerDes control */
+ uint32_t ictl_multi_rxeyediag;
+ /* [0x38] SerDes control */
+ uint32_t ictl_multi_txdeemph;
+ /* [0x3c] SerDes control */
+ uint32_t ictl_multi_txmargin;
+ /* [0x40] SerDes control */
+ uint32_t ictl_multi_txswing;
+ /* [0x44] SerDes control */
+ uint32_t idat_multi;
+ /* [0x48] SerDes control */
+ uint32_t ipd_multi;
+ /* [0x4c] SerDes control */
+ uint32_t octl_multi_rxbist;
+ /* [0x50] SerDes control */
+ uint32_t octl_multi;
+ /* [0x54] SerDes control */
+ uint32_t octl_multi_rxeyediag;
+ /* [0x58] SerDes control */
+ uint32_t odat_multi_rxbist;
+ /* [0x5c] SerDes control */
+ uint32_t odat_multi_rxeq;
+ /* [0x60] SerDes control */
+ uint32_t multi_rx_dvalid;
+ /* [0x64] SerDes control */
+ uint32_t reserved;
+ uint32_t rsrvd[6];
+};
+
+struct al_serdes_regs {
+ uint32_t rsrvd_0[64];
+ struct serdes_gen gen; /* [0x100] */
+ struct serdes_lane lane[4]; /* [0x200] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** version register ****/
+/* Revision number (Minor) */
+#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF
+#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0
+/* Revision number (Major) */
+#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
+#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
+/* Date of release */
+#define SERDES_GEN_VERSION_DATE_DAY_MASK 0x001F0000
+#define SERDES_GEN_VERSION_DATE_DAY_SHIFT 16
+/* Month of release */
+#define SERDES_GEN_VERSION_DATA_MONTH_MASK 0x01E00000
+#define SERDES_GEN_VERSION_DATA_MONTH_SHIFT 21
+/* Year of release (starting from 2000) */
+#define SERDES_GEN_VERSION_DATE_YEAR_MASK 0x3E000000
+#define SERDES_GEN_VERSION_DATE_YEAR_SHIFT 25
+/* Reserved */
+#define SERDES_GEN_VERSION_RESERVED_MASK 0xC0000000
+#define SERDES_GEN_VERSION_RESERVED_SHIFT 30
+
+/**** reg_addr register ****/
+/* Address value */
+#define SERDES_GEN_REG_ADDR_VAL_MASK 0x0000FFFF
+#define SERDES_GEN_REG_ADDR_VAL_SHIFT 0
+
+/**** reg_data register ****/
+/* Data value */
+#define SERDES_GEN_REG_DATA_VAL_MASK 0x000000FF
+#define SERDES_GEN_REG_DATA_VAL_SHIFT 0
+
+/**** ICTL_MULTI_BIST register ****/
+
+#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_MASK 0x00000007
+#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_SHIFT 0
+
+/**** ICTL_PCS register ****/
+
+#define SERDES_GEN_ICTL_PCS_EN_NT (1 << 0)
+
+/**** ICTL_PMA register ****/
+
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_MASK 0x00000007
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT 0
+
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_REF \
+ (0 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_R2L \
+ (3 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_L2R \
+ (4 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
+
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_MASK 0x00000070
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT 4
+
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_0 \
+ (0 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_REF \
+ (2 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_R2L \
+ (3 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
+
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_MASK 0x00000700
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT 8
+
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_0 \
+ (0 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_REF \
+ (2 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_L2R \
+ (3 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
+
+#define SERDES_GEN_ICTL_PMA_TXENABLE_A_SRC (1 << 11)
+#define SERDES_GEN_ICTL_PMA_TXENABLE_A_SRC_THIS (0 << 11)
+#define SERDES_GEN_ICTL_PMA_TXENABLE_A_SRC_MASTER (1 << 11)
+
+#define SERDES_GEN_ICTL_PMA_TXENABLE_A (1 << 12)
+
+#define SERDES_GEN_ICTL_PMA_SYNTHCKBYPASSEN_NT (1 << 13)
+
+/**** IPD_MULTI_SYNTH register ****/
+
+#define SERDES_GEN_IPD_MULTI_SYNTH_B (1 << 0)
+
+/**** IRST register ****/
+
+#define SERDES_GEN_IRST_PIPE_RST_L3_B_A (1 << 0)
+
+#define SERDES_GEN_IRST_PIPE_RST_L2_B_A (1 << 1)
+
+#define SERDES_GEN_IRST_PIPE_RST_L1_B_A (1 << 2)
+
+#define SERDES_GEN_IRST_PIPE_RST_L0_B_A (1 << 3)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A (1 << 4)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A (1 << 5)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A (1 << 6)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A (1 << 7)
+
+#define SERDES_GEN_IRST_MULTI_HARD_SYNTH_B_A (1 << 8)
+
+#define SERDES_GEN_IRST_POR_B_A (1 << 12)
+
+#define SERDES_GEN_IRST_PIPE_RST_L3_B_A_SEL (1 << 16)
+
+#define SERDES_GEN_IRST_PIPE_RST_L2_B_A_SEL (1 << 17)
+
+#define SERDES_GEN_IRST_PIPE_RST_L1_B_A_SEL (1 << 18)
+
+#define SERDES_GEN_IRST_PIPE_RST_L0_B_A_SEL (1 << 19)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A_SEL (1 << 20)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A_SEL (1 << 21)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A_SEL (1 << 22)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A_SEL (1 << 23)
+
+/**** OCTL_MULTI_SYNTHREADY register ****/
+
+#define SERDES_GEN_OCTL_MULTI_SYNTHREADY_A (1 << 0)
+
+/**** OCTL_MULTI_SYNTHSTATUS register ****/
+
+#define SERDES_GEN_OCTL_MULTI_SYNTHSTATUS_A (1 << 0)
+
+/**** clk_out register ****/
+
+#define SERDES_GEN_CLK_OUT_SEL_MASK 0x0000003F
+#define SERDES_GEN_CLK_OUT_SEL_SHIFT 0
+
+/**** OCTL_PMA register ****/
+
+#define SERDES_LANE_OCTL_PMA_TXSTATUS_L_A (1 << 0)
+
+/**** ICTL_MULTI_ANDME register ****/
+
+#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A (1 << 0)
+
+#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A_SEL (1 << 1)
+
+/**** ICTL_MULTI_LB register ****/
+
+#define SERDES_LANE_ICTL_MULTI_LB_TX2RXIOTIMEDEN_L_NT (1 << 0)
+
+#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT (1 << 1)
+
+#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT (1 << 2)
+
+#define SERDES_LANE_ICTL_MULTI_LB_PARRX2TXTIMEDEN_L_NT (1 << 3)
+
+#define SERDES_LANE_ICTL_MULTI_LB_CDRCLK2TXEN_L_NT (1 << 4)
+
+#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT_SEL (1 << 8)
+
+#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT_SEL (1 << 9)
+
+/**** ICTL_MULTI_RXBIST register ****/
+
+#define SERDES_LANE_ICTL_MULTI_RXBIST_EN_L_A (1 << 0)
+
+/**** ICTL_MULTI_TXBIST register ****/
+
+#define SERDES_LANE_ICTL_MULTI_TXBIST_EN_L_A (1 << 0)
+
+/**** ICTL_MULTI register ****/
+
+#define SERDES_LANE_ICTL_MULTI_PSTATE_L_MASK 0x00000003
+#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SHIFT 0
+
+#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SEL (1 << 2)
+
+#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_MASK 0x00000070
+#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_SHIFT 4
+
+#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATAEN_L_A (1 << 8)
+
+#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATA_L_A (1 << 9)
+
+#define SERDES_LANE_ICTL_MULTI_TXBEACON_L_A (1 << 12)
+
+#define SERDES_LANE_ICTL_MULTI_TXDETECTRXREQ_L_A (1 << 13)
+
+#define SERDES_LANE_ICTL_MULTI_RXRATE_L_MASK 0x00070000
+#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SHIFT 16
+
+#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SEL (1 << 19)
+
+#define SERDES_LANE_ICTL_MULTI_TXRATE_L_MASK 0x00700000
+#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SHIFT 20
+
+#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SEL (1 << 23)
+
+#define SERDES_LANE_ICTL_MULTI_TXAMP_L_MASK 0x07000000
+#define SERDES_LANE_ICTL_MULTI_TXAMP_L_SHIFT 24
+
+#define SERDES_LANE_ICTL_MULTI_TXAMP_EN_L (1 << 27)
+
+#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_MASK 0x70000000
+#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_SHIFT 28
+
+/**** ICTL_MULTI_RXEQ register ****/
+
+#define SERDES_LANE_ICTL_MULTI_RXEQ_EN_L (1 << 0)
+
+#define SERDES_LANE_ICTL_MULTI_RXEQ_START_L_A (1 << 1)
+
+#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_MASK 0x00000070
+#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_SHIFT 4
+
+/**** ICTL_MULTI_RXEQ_L_high register ****/
+
+#define SERDES_LANE_ICTL_MULTI_RXEQ_L_HIGH_VAL (1 << 0)
+
+/**** ICTL_MULTI_RXEYEDIAG register ****/
+
+#define SERDES_LANE_ICTL_MULTI_RXEYEDIAG_START_L_A (1 << 0)
+
+/**** ICTL_MULTI_TXDEEMPH register ****/
+
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_MASK 0x0003FFFF
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_SHIFT 0
+
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_MASK 0x7c0
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_SHIFT 6
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_MASK 0xf000
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_SHIFT 12
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_MASK 0x7
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_SHIFT 0
+
+/**** ICTL_MULTI_TXMARGIN register ****/
+
+#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_MASK 0x00000007
+#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_SHIFT 0
+
+/**** ICTL_MULTI_TXSWING register ****/
+
+#define SERDES_LANE_ICTL_MULTI_TXSWING_L (1 << 0)
+
+/**** IDAT_MULTI register ****/
+
+#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_MASK 0x0000000F
+#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SHIFT 0
+
+#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SEL (1 << 4)
+
+/**** IPD_MULTI register ****/
+
+#define SERDES_LANE_IPD_MULTI_TX_L_B (1 << 0)
+
+#define SERDES_LANE_IPD_MULTI_RX_L_B (1 << 1)
+
+/**** OCTL_MULTI_RXBIST register ****/
+
+#define SERDES_LANE_OCTL_MULTI_RXBIST_DONE_L_A (1 << 0)
+
+#define SERDES_LANE_OCTL_MULTI_RXBIST_RXLOCKED_L_A (1 << 1)
+
+/**** OCTL_MULTI register ****/
+
+#define SERDES_LANE_OCTL_MULTI_RXCDRLOCK2DATA_L_A (1 << 0)
+
+#define SERDES_LANE_OCTL_MULTI_RXEQ_DONE_L_A (1 << 1)
+
+#define SERDES_LANE_OCTL_MULTI_RXREADY_L_A (1 << 2)
+
+#define SERDES_LANE_OCTL_MULTI_RXSTATUS_L_A (1 << 3)
+
+#define SERDES_LANE_OCTL_MULTI_TXREADY_L_A (1 << 4)
+
+#define SERDES_LANE_OCTL_MULTI_TXDETECTRXSTAT_L_A (1 << 5)
+
+#define SERDES_LANE_OCTL_MULTI_TXDETECTRXACK_L_A (1 << 6)
+
+#define SERDES_LANE_OCTL_MULTI_RXSIGNALDETECT_L_A (1 << 7)
+
+/**** OCTL_MULTI_RXEYEDIAG register ****/
+
+#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_MASK 0x00003FFF
+#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_SHIFT 0
+
+#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_DONE_L_A (1 << 16)
+
+#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_ERR_L_A (1 << 17)
+
+/**** ODAT_MULTI_RXBIST register ****/
+
+#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_MASK 0x0000FFFF
+#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_SHIFT 0
+
+#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_OVERFLOW_L_A (1 << 16)
+
+/**** ODAT_MULTI_RXEQ register ****/
+
+#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_MASK 0x00003FFF
+#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_SHIFT 0
+
+/**** MULTI_RX_DVALID register ****/
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_CDR_LOCK (1 << 0)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_SIGNALDETECT (1 << 1)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_TX_READY (1 << 2)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_READY (1 << 3)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_SYNT_READY (1 << 4)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_ELECIDLE (1 << 5)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_MASK 0x00FF0000
+#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_SHIFT 16
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_00_SEL (1 << 24)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_00_VAL (1 << 25)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_01_SEL (1 << 26)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_01_VAL (1 << 27)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_10_SEL (1 << 28)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_10_VAL (1 << 29)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_11_SEL (1 << 30)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_11_VAL (1 << 31)
+
+/**** reserved register ****/
+
+#define SERDES_LANE_RESERVED_OUT_MASK 0x000000FF
+#define SERDES_LANE_RESERVED_OUT_SHIFT 0
+
+#define SERDES_LANE_RESERVED_IN_MASK 0x00FF0000
+#define SERDES_LANE_RESERVED_IN_SHIFT 16
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_serdes_REGS_H__ */
+
+/** @} end of ... group */
+
+
diff --git a/al_hal_udma.h b/al_hal_udma.h
new file mode 100644
index 000000000000..a1bdb4fe8dbb
--- /dev/null
+++ b/al_hal_udma.h
@@ -0,0 +1,672 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_udma_api API
+ * @ingroup group_udma
+ * UDMA API
+ * @{
+ * @}
+ *
+ * @defgroup group_udma_main UDMA Main
+ * @ingroup group_udma_api
+ * UDMA main API
+ * @{
+ * @file al_hal_udma.h
+ *
+ * @brief C Header file for the Universal DMA HAL driver
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_H__
+#define __AL_HAL_UDMA_H__
+
+#include "al_hal_common.h"
+#include "al_hal_udma_regs.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+#define DMA_MAX_Q 4
+#define AL_UDMA_MIN_Q_SIZE 4
+#define AL_UDMA_MAX_Q_SIZE (1 << 16) /* hw can do more, but we limit it */
+
+/* Default Max number of descriptors supported per action */
+#define AL_UDMA_DEFAULT_MAX_ACTN_DESCS 16
+
+#define AL_UDMA_REV_ID_0 0
+#define AL_UDMA_REV_ID_1 1
+#define AL_UDMA_REV_ID_2 2
+
+#define DMA_RING_ID_MASK 0x3
+/* New registers ?? */
+/* Statistics - TBD */
+
+/** UDMA submission descriptor */
+union al_udma_desc {
+ /* TX */
+ struct {
+ uint32_t len_ctrl;
+ uint32_t meta_ctrl;
+ uint64_t buf_ptr;
+ } tx;
+ /* TX Meta, used by upper layer */
+ struct {
+ uint32_t len_ctrl;
+ uint32_t meta_ctrl;
+ uint32_t meta1;
+ uint32_t meta2;
+ } tx_meta;
+ /* RX */
+ struct {
+ uint32_t len_ctrl;
+ uint32_t buf2_ptr_lo;
+ uint64_t buf1_ptr;
+ } rx;
+} __packed_a16;
+
+/* TX desc length and control fields */
+
+#define AL_M2S_DESC_CONCAT AL_BIT(31) /* concatenate */
+#define AL_M2S_DESC_DMB AL_BIT(30)
+ /** Data Memory Barrier */
+#define AL_M2S_DESC_NO_SNOOP_H AL_BIT(29)
+#define AL_M2S_DESC_INT_EN AL_BIT(28) /** enable interrupt */
+#define AL_M2S_DESC_LAST AL_BIT(27)
+#define AL_M2S_DESC_FIRST AL_BIT(26)
+#define AL_M2S_DESC_RING_ID_SHIFT 24
+#define AL_M2S_DESC_RING_ID_MASK (0x3 << AL_M2S_DESC_RING_ID_SHIFT)
+#define AL_M2S_DESC_META_DATA AL_BIT(23)
+#define AL_M2S_DESC_DUMMY AL_BIT(22) /* for Metdata only */
+#define AL_M2S_DESC_LEN_ADJ_SHIFT 20
+#define AL_M2S_DESC_LEN_ADJ_MASK (0x7 << AL_M2S_DESC_LEN_ADJ_SHIFT)
+#define AL_M2S_DESC_LEN_SHIFT 0
+#define AL_M2S_DESC_LEN_MASK (0xfffff << AL_M2S_DESC_LEN_SHIFT)
+
+#define AL_S2M_DESC_DUAL_BUF AL_BIT(31)
+#define AL_S2M_DESC_NO_SNOOP_H AL_BIT(29)
+#define AL_S2M_DESC_INT_EN AL_BIT(28) /** enable interrupt */
+#define AL_S2M_DESC_RING_ID_SHIFT 24
+#define AL_S2M_DESC_RING_ID_MASK (0x3 << AL_S2M_DESC_RING_ID_SHIFT)
+#define AL_S2M_DESC_LEN_SHIFT 0
+#define AL_S2M_DESC_LEN_MASK (0xffff << AL_S2M_DESC_LEN_SHIFT)
+#define AL_S2M_DESC_LEN2_SHIFT 16
+#define AL_S2M_DESC_LEN2_MASK (0x3fff << AL_S2M_DESC_LEN2_SHIFT)
+#define AL_S2M_DESC_LEN2_GRANULARITY_SHIFT 6
+
+/* TX/RX descriptor VMID field (in the buffer address 64 bit field) */
+#define AL_UDMA_DESC_VMID_SHIFT 48
+
+/** UDMA completion descriptor */
+union al_udma_cdesc {
+ /* TX completion */
+ struct {
+ uint32_t ctrl_meta;
+ } al_desc_comp_tx;
+ /* RX completion */
+ struct {
+ /* TBD */
+ uint32_t ctrl_meta;
+ } al_desc_comp_rx;
+} __packed_a4;
+
+/* TX/RX common completion desc ctrl_meta feilds */
+#define AL_UDMA_CDESC_ERROR AL_BIT(31)
+#define AL_UDMA_CDESC_BUF1_USED AL_BIT(30)
+#define AL_UDMA_CDESC_DDP AL_BIT(29)
+#define AL_UDMA_CDESC_LAST AL_BIT(27)
+#define AL_UDMA_CDESC_FIRST AL_BIT(26)
+/* word 2 */
+#define AL_UDMA_CDESC_BUF2_USED AL_BIT(31)
+#define AL_UDMA_CDESC_BUF2_LEN_SHIFT 16
+#define AL_UDMA_CDESC_BUF2_LEN_MASK AL_FIELD_MASK(29, 16)
+/** Basic Buffer structure */
+struct al_buf {
+ al_phys_addr_t addr; /**< Buffer physical address */
+ uint32_t len; /**< Buffer lenght in bytes */
+};
+
+/** Block is a set of buffers that belong to same source or destination */
+struct al_block {
+ struct al_buf *bufs; /**< The buffers of the block */
+ uint32_t num; /**< Number of buffers of the block */
+
+ /**<
+ * VMID to be assigned to the block descriptors
+ * Requires VMID in descriptor to be enabled for the specific UDMA
+ * queue.
+ */
+ uint16_t vmid;
+};
+
+/** UDMA type */
+enum al_udma_type {
+ UDMA_TX,
+ UDMA_RX
+};
+
+/** UDMA state */
+enum al_udma_state {
+ UDMA_DISABLE = 0,
+ UDMA_IDLE,
+ UDMA_NORMAL,
+ UDMA_ABORT,
+ UDMA_RESET
+};
+
+extern const char *const al_udma_states_name[];
+
+/** UDMA Q specific parameters from upper layer */
+struct al_udma_q_params {
+ uint32_t size; /**< ring size (in descriptors), submission and
+ * completion rings must have same size
+ */
+ union al_udma_desc *desc_base; /**< cpu address for submission ring
+ * descriptors
+ */
+ al_phys_addr_t desc_phy_base; /**< submission ring descriptors
+ * physical base address
+ */
+#ifdef __FreeBSD__
+ bus_dma_tag_t desc_phy_base_tag;
+ bus_dmamap_t desc_phy_base_map;
+#endif
+ uint8_t *cdesc_base; /**< completion descriptors pointer, NULL */
+ /* means no completion update */
+ al_phys_addr_t cdesc_phy_base; /**< completion descriptors ring
+ * physical base address
+ */
+#ifdef __FreeBSD__
+ bus_dma_tag_t cdesc_phy_base_tag;
+ bus_dmamap_t cdesc_phy_base_map;
+#endif
+ uint32_t cdesc_size; /**< size (in bytes) of a single dma completion
+ * descriptor
+ */
+
+ uint8_t adapter_rev_id; /**<PCI adapter revision ID */
+};
+
+/** UDMA parameters from upper layer */
+struct al_udma_params {
+ struct unit_regs __iomem *udma_regs_base;
+ enum al_udma_type type; /**< Tx or Rx */
+ uint8_t num_of_queues; /**< number of queues supported by the UDMA */
+ const char *name; /**< the upper layer must keep the string area */
+};
+
+/* Fordward decleration */
+struct al_udma;
+
+/** SW status of a queue */
+enum al_udma_queue_status {
+ AL_QUEUE_NOT_INITIALIZED = 0,
+ AL_QUEUE_DISABLED,
+ AL_QUEUE_ENABLED,
+ AL_QUEUE_ABORTED
+};
+
+/** UDMA Queue private data structure */
+struct __cache_aligned al_udma_q {
+ uint16_t size_mask; /**< mask used for pointers wrap around
+ * equals to size - 1
+ */
+ union udma_q_regs __iomem *q_regs; /**< pointer to the per queue UDMA
+ * registers
+ */
+ union al_udma_desc *desc_base_ptr; /**< base address submission ring
+ * descriptors
+ */
+ uint16_t next_desc_idx; /**< index to the next available submission
+ * descriptor
+ */
+
+ uint32_t desc_ring_id; /**< current submission ring id */
+
+ uint8_t *cdesc_base_ptr;/**< completion descriptors pointer, NULL */
+ /* means no completion */
+ uint32_t cdesc_size; /**< size (in bytes) of the udma completion ring
+ * descriptor
+ */
+ uint16_t next_cdesc_idx; /**< index in descriptors for next completing
+ * ring descriptor
+ */
+ uint8_t *end_cdesc_ptr; /**< used for wrap around detection */
+ uint16_t comp_head_idx; /**< completion ring head pointer register
+ *shadow
+ */
+ volatile union al_udma_cdesc *comp_head_ptr; /**< when working in get_packet mode
+ * we maintain pointer instead of the
+ * above idx
+ */
+
+ uint32_t pkt_crnt_descs; /**< holds the number of processed descriptors
+ * of the current packet
+ */
+ uint32_t comp_ring_id; /**< current completion Ring Id */
+
+
+ al_phys_addr_t desc_phy_base; /**< submission desc. physical base */
+ al_phys_addr_t cdesc_phy_base; /**< completion desc. physical base */
+
+ uint32_t flags; /**< flags used for completion modes */
+ uint32_t size; /**< ring size in descriptors */
+ enum al_udma_queue_status status;
+ struct al_udma *udma; /**< pointer to parent UDMA */
+ uint32_t qid; /**< the index number of the queue */
+
+ /*
+ * The following fields are duplicated from the UDMA parent adapter
+ * due to performance considerations.
+ */
+ uint8_t adapter_rev_id; /**<PCI adapter revision ID */
+};
+
+/* UDMA */
+struct al_udma {
+ const char *name;
+ enum al_udma_type type; /* Tx or Rx */
+ enum al_udma_state state;
+ uint8_t num_of_queues; /* number of queues supported by the UDMA */
+ union udma_regs __iomem *udma_regs; /* pointer to the UDMA registers */
+ struct udma_gen_regs *gen_regs; /* pointer to the Gen registers*/
+ struct al_udma_q udma_q[DMA_MAX_Q]; /* Array of UDMA Qs pointers */
+ unsigned int rev_id; /* UDMA revision ID */
+};
+
+
+/*
+ * Configurations
+ */
+
+/* Initializations functions */
+/**
+ * Initialize the udma engine
+ *
+ * @param udma udma data structure
+ * @param udma_params udma parameters from upper layer
+ *
+ * @return 0 on success. -EINVAL otherwise.
+ */
+int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params);
+
+/**
+ * Initialize the udma queue data structure
+ *
+ * @param udma
+ * @param qid
+ * @param q_params
+ *
+ * @return 0 if no error found.
+ * -EINVAL if the qid is out of range
+ * -EIO if queue was already initialized
+ */
+
+int al_udma_q_init(struct al_udma *udma, uint32_t qid,
+ struct al_udma_q_params *q_params);
+
+/**
+ * Reset a udma queue
+ *
+ * Prior to calling this function make sure:
+ * 1. Queue interrupts are masked
+ * 2. No additional descriptors are written to the descriptor ring of the queue
+ * 3. No completed descriptors are being fetched
+ *
+ * The queue can be initialized again using 'al_udma_q_init'
+ *
+ * @param udma_q
+ *
+ * @return 0 if no error found.
+ */
+
+int al_udma_q_reset(struct al_udma_q *udma_q);
+
+/**
+ * return (by reference) a pointer to a specific queue date structure.
+ * this pointer needed for calling functions (i.e. al_udma_desc_action_add) that
+ * require this pointer as input argument.
+ *
+ * @param udma udma data structure
+ * @param qid queue index
+ * @param q_handle pointer to the location where the queue structure pointer
+ * written to.
+ *
+ * @return 0 on success. -EINVAL otherwise.
+ */
+int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
+ struct al_udma_q **q_handle);
+
+/**
+ * Change the UDMA's state
+ *
+ * @param udma udma data structure
+ * @param state the target state
+ *
+ * @return 0
+ */
+int al_udma_state_set(struct al_udma *udma, enum al_udma_state state);
+
+/**
+ * return the current UDMA hardware state
+ *
+ * @param udma udma handle
+ *
+ * @return the UDMA state as reported by the hardware.
+ */
+enum al_udma_state al_udma_state_get(struct al_udma *udma);
+
+/*
+ * Action handling
+ */
+
+/**
+ * get number of descriptors that can be submitted to the udma.
+ * keep one free descriptor to simplify full/empty management
+ * @param udma_q queue handle
+ *
+ * @return num of free descriptors.
+ */
+static INLINE uint32_t al_udma_available_get(struct al_udma_q *udma_q)
+{
+ uint16_t tmp = udma_q->next_cdesc_idx - (udma_q->next_desc_idx + 1);
+ tmp &= udma_q->size_mask;
+
+ return (uint32_t) tmp;
+}
+
+/**
+ * check if queue has pending descriptors
+ *
+ * @param udma_q queue handle
+ *
+ * @return AL_TRUE if descriptors are submitted to completion ring and still
+ * not completed (with ack). AL_FALSE otherwise.
+ */
+static INLINE al_bool al_udma_is_empty(struct al_udma_q *udma_q)
+{
+ if (((udma_q->next_cdesc_idx - udma_q->next_desc_idx) &
+ udma_q->size_mask) == 0)
+ return AL_TRUE;
+
+ return AL_FALSE;
+}
+
+/**
+ * get next available descriptor
+ * @param udma_q queue handle
+ *
+ * @return pointer to the next available descriptor
+ */
+static INLINE union al_udma_desc *al_udma_desc_get(struct al_udma_q *udma_q)
+{
+ union al_udma_desc *desc;
+ uint16_t next_desc_idx;
+
+ al_assert(udma_q);
+
+ next_desc_idx = udma_q->next_desc_idx;
+ desc = udma_q->desc_base_ptr + next_desc_idx;
+
+ next_desc_idx++;
+
+ /* if reached end of queue, wrap around */
+ udma_q->next_desc_idx = next_desc_idx & udma_q->size_mask;
+
+ return desc;
+}
+
+/**
+ * get ring id for the last allocated descriptor
+ * @param udma_q
+ *
+ * @return ring id for the last allocated descriptor
+ * this function must be called each time a new descriptor is allocated
+ * by the al_udma_desc_get(), unless ring id is ignored.
+ */
+static INLINE uint32_t al_udma_ring_id_get(struct al_udma_q *udma_q)
+{
+ uint32_t ring_id;
+
+ al_assert(udma_q);
+
+ ring_id = udma_q->desc_ring_id;
+
+ /* calculate the ring id of the next desc */
+ /* if next_desc points to first desc, then queue wrapped around */
+ if (unlikely(udma_q->next_desc_idx) == 0)
+ udma_q->desc_ring_id = (udma_q->desc_ring_id + 1) &
+ DMA_RING_ID_MASK;
+ return ring_id;
+}
+
+/* add DMA action - trigger the engine */
+/**
+ * add num descriptors to the submission queue.
+ *
+ * @param udma_q queue handle
+ * @param num number of descriptors to add to the queues ring.
+ *
+ * @return 0;
+ */
+static INLINE int al_udma_desc_action_add(struct al_udma_q *udma_q,
+ uint32_t num)
+{
+ uint32_t *addr;
+
+ al_assert(udma_q);
+ al_assert((num > 0) && (num <= udma_q->size));
+
+ addr = &udma_q->q_regs->rings.drtp_inc;
+ /* make sure data written to the descriptors will be visible by the */
+ /* DMA */
+ al_local_data_memory_barrier();
+
+ /*
+ * As we explicitly invoke the synchronization function
+ * (al_data_memory_barrier()), then we can use the relaxed version.
+ */
+ al_reg_write32_relaxed(addr, num);
+
+ return 0;
+}
+
+#define cdesc_is_first(flags) ((flags) & AL_UDMA_CDESC_FIRST)
+#define cdesc_is_last(flags) ((flags) & AL_UDMA_CDESC_LAST)
+
+/**
+ * return pointer to the cdesc + offset desciptors. wrap around when needed.
+ *
+ * @param udma_q queue handle
+ * @param cdesc pointer that set by this function
+ * @param offset offset desciptors
+ *
+ */
+static INLINE volatile union al_udma_cdesc *al_cdesc_next(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc *cdesc,
+ uint32_t offset)
+{
+ volatile uint8_t *tmp = (volatile uint8_t *) cdesc + offset * udma_q->cdesc_size;
+ al_assert(udma_q);
+ al_assert(cdesc);
+
+ /* if wrap around */
+ if (unlikely((tmp > udma_q->end_cdesc_ptr)))
+ return (union al_udma_cdesc *)
+ (udma_q->cdesc_base_ptr +
+ (tmp - udma_q->end_cdesc_ptr - udma_q->cdesc_size));
+
+ return (volatile union al_udma_cdesc *) tmp;
+}
+
+/**
+ * check if the flags of the descriptor indicates that is new one
+ * the function uses the ring id from the descriptor flags to know whether it
+ * new one by comparing it with the curring ring id of the queue
+ *
+ * @param udma_q queue handle
+ * @param flags the flags of the completion descriptor
+ *
+ * @return AL_TRUE if the completion descriptor is new one.
+ * AL_FALSE if it old one.
+ */
+static INLINE al_bool al_udma_new_cdesc(struct al_udma_q *udma_q,
+ uint32_t flags)
+{
+ if (((flags & AL_M2S_DESC_RING_ID_MASK) >> AL_M2S_DESC_RING_ID_SHIFT)
+ == udma_q->comp_ring_id)
+ return AL_TRUE;
+ return AL_FALSE;
+}
+
+/**
+ * get next completion descriptor
+ * this function will also increment the completion ring id when the ring wraps
+ * around
+ *
+ * @param udma_q queue handle
+ * @param cdesc current completion descriptor
+ *
+ * @return pointer to the completion descriptor that follows the one pointed by
+ * cdesc
+ */
+static INLINE volatile union al_udma_cdesc *al_cdesc_next_update(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc *cdesc)
+{
+ /* if last desc, wrap around */
+ if (unlikely(((volatile uint8_t *) cdesc == udma_q->end_cdesc_ptr))) {
+ udma_q->comp_ring_id =
+ (udma_q->comp_ring_id + 1) & DMA_RING_ID_MASK;
+ return (union al_udma_cdesc *) udma_q->cdesc_base_ptr;
+ }
+ return (volatile union al_udma_cdesc *) ((volatile uint8_t *) cdesc + udma_q->cdesc_size);
+}
+
+/**
+ * get next completed packet from completion ring of the queue
+ *
+ * @param udma_q udma queue handle
+ * @param desc pointer that set by this function to the first descriptor
+ * note: desc is valid only when return value is not zero
+ * @return number of descriptors that belong to the packet. 0 means no completed
+ * full packet was found.
+ * If the descriptors found in the completion queue don't form full packet (no
+ * desc with LAST flag), then this function will do the following:
+ * (1) save the number of processed descriptors.
+ * (2) save last processed descriptor, so next time it called, it will resume
+ * from there.
+ * (3) return 0.
+ * note: the descriptors that belong to the completed packet will still be
+ * considered as used, that means the upper layer is safe to access those
+ * descriptors when this function returns. the al_udma_cdesc_ack() should be
+ * called to inform the udma driver that those descriptors are freed.
+ */
+uint32_t al_udma_cdesc_packet_get(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc **desc);
+
+/** get completion descriptor pointer from its index */
+#define al_udma_cdesc_idx_to_ptr(udma_q, idx) \
+ ((volatile union al_udma_cdesc *) ((udma_q)->cdesc_base_ptr + \
+ (idx) * (udma_q)->cdesc_size))
+
+
+/**
+ * return number of all completed descriptors in the completion ring
+ *
+ * @param udma_q udma queue handle
+ * @param cdesc pointer that set by this function to the first descriptor
+ * note: desc is valid only when return value is not zero
+ * note: pass NULL if not interested
+ * @return number of descriptors. 0 means no completed descriptors were found.
+ * note: the descriptors that belong to the completed packet will still be
+ * considered as used, that means the upper layer is safe to access those
+ * descriptors when this function returns. the al_udma_cdesc_ack() should be
+ * called to inform the udma driver that those descriptors are freed.
+ */
+static INLINE uint32_t al_udma_cdesc_get_all(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc **cdesc)
+{
+ uint16_t count = 0;
+
+ al_assert(udma_q);
+
+ udma_q->comp_head_idx = (uint16_t)
+ (al_reg_read32(&udma_q->q_regs->rings.crhp) &
+ 0xFFFF);
+
+ count = (udma_q->comp_head_idx - udma_q->next_cdesc_idx) &
+ udma_q->size_mask;
+
+ if (cdesc)
+ *cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
+
+ return (uint32_t)count;
+}
+
+/**
+ * acknowledge the driver that the upper layer completed processing completion
+ * descriptors
+ *
+ * @param udma_q udma queue handle
+ * @param num number of descriptors to acknowledge
+ *
+ * @return 0
+ */
+static INLINE int al_udma_cdesc_ack(struct al_udma_q *udma_q, uint32_t num)
+{
+ al_assert(udma_q);
+
+ udma_q->next_cdesc_idx += num;
+ udma_q->next_cdesc_idx &= udma_q->size_mask;
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+
+#endif /* __AL_HAL_UDMA_H__ */
+/** @} end of UDMA group */
diff --git a/al_hal_udma_config.c b/al_hal_udma_config.c
new file mode 100644
index 000000000000..a06f78983080
--- /dev/null
+++ b/al_hal_udma_config.c
@@ -0,0 +1,1373 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_config.c
+ *
+ * @brief Universal DMA HAL driver for configurations
+ *
+ */
+
+#include <al_hal_common.h>
+#include <al_hal_udma_regs.h>
+#include <al_hal_udma_config.h>
+
+/**************** Misc configurations *********************/
+/** Configure AXI generic configuration */
+int al_udma_axi_set(struct udma_gen_axi *axi_regs,
+ struct al_udma_axi_conf *axi)
+{
+ uint32_t reg;
+
+ al_reg_write32(&axi_regs->cfg_1, axi->axi_timeout);
+
+ reg = al_reg_read32(&axi_regs->cfg_2);
+ reg &= ~UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_MASK;
+ reg |= axi->arb_promotion;
+ al_reg_write32(&axi_regs->cfg_2, reg);
+
+ reg = al_reg_read32(&axi_regs->endian_cfg);
+ if (axi->swap_8_bytes == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN;
+
+ if (axi->swap_s2m_data == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA;
+
+ if (axi->swap_s2m_desc == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC;
+
+ if (axi->swap_m2s_data == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA;
+
+ if (axi->swap_m2s_desc == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC;
+
+ al_reg_write32(&axi_regs->endian_cfg, reg);
+ return 0;
+}
+
+/* Configure UDMA AXI M2S configuration */
+/** Configure AXI M2S submaster */
+static int al_udma_m2s_axi_sm_set(struct al_udma_axi_submaster *m2s_sm,
+ uint32_t *cfg_1, uint32_t *cfg_2,
+ uint32_t *cfg_max_beats)
+{
+ uint32_t reg;
+ reg = al_reg_read32(cfg_1);
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK;
+ reg |= m2s_sm->id & UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK;
+ reg |= (m2s_sm->cache_type <<
+ UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK;
+ reg |= (m2s_sm->burst << UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK;
+ al_reg_write32(cfg_1, reg);
+
+ reg = al_reg_read32(cfg_2);
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK;
+ reg |= m2s_sm->used_ext & UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK;
+ reg |= (m2s_sm->bus_size <<
+ UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK;
+ reg |= (m2s_sm->qos << UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK;
+ reg |= (m2s_sm->prot << UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK;
+ al_reg_write32(cfg_2, reg);
+
+ reg = al_reg_read32(cfg_max_beats);
+ reg &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ reg |= m2s_sm->max_beats &
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ al_reg_write32(cfg_max_beats, reg);
+
+ return 0;
+}
+
+/** Configure UDMA AXI M2S configuration */
+int al_udma_m2s_axi_set(struct al_udma *udma,
+ struct al_udma_m2s_axi_conf *axi_m2s)
+{
+ uint32_t reg;
+
+ al_udma_m2s_axi_sm_set(&axi_m2s->comp_write,
+ &udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_1,
+ &udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_2,
+ &udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
+
+ al_udma_m2s_axi_sm_set(&axi_m2s->data_read,
+ &udma->udma_regs->m2s.axi_m2s.data_rd_cfg_1,
+ &udma->udma_regs->m2s.axi_m2s.data_rd_cfg_2,
+ &udma->udma_regs->m2s.axi_m2s.data_rd_cfg);
+
+ al_udma_m2s_axi_sm_set(&axi_m2s->desc_read,
+ &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_1,
+ &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_2,
+ &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_3);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg);
+ if (axi_m2s->break_on_max_boundary == AL_TRUE)
+ reg |= UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY;
+ else
+ reg &= ~UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
+ reg &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
+ reg |= (axi_m2s->min_axi_beats <<
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT) &
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg);
+ reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK;
+ reg |= axi_m2s->ostand_max_data_read &
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK;
+ reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK;
+ reg |= (axi_m2s->ostand_max_desc_read <<
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_SHIFT) &
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK;
+ reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK;
+ reg |= (axi_m2s->ostand_max_comp_req <<
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_SHIFT) &
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK;
+ reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK;
+ reg |= (axi_m2s->ostand_max_comp_write <<
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_SHIFT) &
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg, reg);
+ return 0;
+}
+
+/** Configure AXI S2M submaster */
+static int al_udma_s2m_axi_sm_set(struct al_udma_axi_submaster *s2m_sm,
+ uint32_t *cfg_1, uint32_t *cfg_2,
+ uint32_t *cfg_max_beats)
+{
+ uint32_t reg;
+ reg = al_reg_read32(cfg_1);
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK;
+ reg |= s2m_sm->id & UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK;
+ reg |= (s2m_sm->cache_type <<
+ UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK;
+ reg |= (s2m_sm->burst << UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK;
+ al_reg_write32(cfg_1, reg);
+
+ reg = al_reg_read32(cfg_2);
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK;
+ reg |= s2m_sm->used_ext & UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK;
+ reg |= (s2m_sm->bus_size << UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK;
+ reg |= (s2m_sm->qos << UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK;
+ reg |= (s2m_sm->prot << UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK;
+ al_reg_write32(cfg_2, reg);
+
+ reg = al_reg_read32(cfg_max_beats);
+ reg &= ~UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ reg |= s2m_sm->max_beats &
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ al_reg_write32(cfg_max_beats, reg);
+
+ return 0;
+}
+
+/** Configure UDMA AXI S2M configuration */
+int al_udma_s2m_axi_set(struct al_udma *udma,
+ struct al_udma_s2m_axi_conf *axi_s2m)
+{
+
+ uint32_t reg;
+
+ al_udma_s2m_axi_sm_set(&axi_s2m->data_write,
+ &udma->udma_regs->s2m.axi_s2m.data_wr_cfg_1,
+ &udma->udma_regs->s2m.axi_s2m.data_wr_cfg_2,
+ &udma->udma_regs->s2m.axi_s2m.data_wr_cfg);
+
+ al_udma_s2m_axi_sm_set(&axi_s2m->desc_read,
+ &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_4,
+ &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_5,
+ &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3);
+
+ al_udma_s2m_axi_sm_set(&axi_s2m->comp_write,
+ &udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_1,
+ &udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_2,
+ &udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3);
+ if (axi_s2m->break_on_max_boundary == AL_TRUE)
+ reg |= UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY;
+ else
+ reg &= ~UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY;
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1);
+ reg &= ~UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
+ reg |= (axi_s2m->min_axi_beats <<
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT) &
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd);
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK;
+ reg |= axi_s2m->ostand_max_desc_read &
+ UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK;
+
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK;
+ reg |= (axi_s2m->ack_fifo_depth <<
+ UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_SHIFT) &
+ UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK;
+
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr);
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK;
+ reg |= axi_s2m->ostand_max_data_req &
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK;
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK;
+ reg |= (axi_s2m->ostand_max_data_write <<
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_SHIFT) &
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK;
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK;
+ reg |= (axi_s2m->ostand_max_comp_req <<
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_SHIFT) &
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK;
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK;
+ reg |= (axi_s2m->ostand_max_comp_write <<
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_SHIFT) &
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr, reg);
+ return 0;
+}
+
+/** M2S packet len configuration */
+int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma,
+ struct al_udma_m2s_pkt_len_conf *conf)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s.cfg_len);
+ uint32_t max_supported_size = UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK;
+
+ al_assert(udma->type == UDMA_TX);
+
+ if (conf->encode_64k_as_zero == AL_TRUE)
+ max_supported_size += 1; /* 64K */
+
+ if (conf->max_pkt_size > max_supported_size) {
+ al_err("udma [%s]: requested max_pkt_size (0x%x) exceeds the"
+ "supported limit (0x%x)\n", udma->name,
+ conf->max_pkt_size, max_supported_size);
+ return -EINVAL;
+ }
+
+ reg &= ~UDMA_M2S_CFG_LEN_ENCODE_64K;
+ if (conf->encode_64k_as_zero == AL_TRUE)
+ reg |= UDMA_M2S_CFG_LEN_ENCODE_64K;
+ else
+ reg &= ~UDMA_M2S_CFG_LEN_ENCODE_64K;
+
+ reg &= ~UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK;
+ reg |= conf->max_pkt_size;
+
+ al_reg_write32(&udma->udma_regs->m2s.m2s.cfg_len, reg);
+ return 0;
+}
+
+/** Report Error - to be used for abort */
+void al_udma_err_report(struct al_udma *udma __attribute__((__unused__)))
+{
+ return;
+}
+
+/** Statistics - TBD */
+void al_udma_stats_get(struct al_udma *udma __attribute__((__unused__)))
+{
+ return;
+}
+
+/** Configure UDMA M2S descriptor prefetch */
+int al_udma_m2s_pref_set(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1);
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK;
+ reg |= conf->desc_fifo_depth;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2);
+
+ if (conf->sch_mode == SRR)
+ reg |= UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
+ else if (conf->sch_mode == STRICT)
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
+ else {
+ al_err("udma [%s]: requested descriptor preferch arbiter "
+ "mode (%d) is invalid\n", udma->name, conf->sch_mode);
+ return -EINVAL;
+ }
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK;
+ reg |= conf->max_desc_per_packet &
+ UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3);
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+ reg |= conf->min_burst_below_thr &
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
+ reg |=(conf->min_burst_above_thr <<
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT) &
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
+
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
+ reg |= (conf->pref_thr <<
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) &
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
+
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.data_cfg);
+ reg &= ~UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK;
+ reg |= conf->data_fifo_depth &
+ UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK;
+
+ reg &= ~UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK;
+ reg |= (conf->max_pkt_limit
+ << UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_SHIFT) &
+ UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.data_cfg, reg);
+
+ return 0;
+}
+
+/** Ger the M2S UDMA descriptor prefetch */
+int al_udma_m2s_pref_get(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1);
+ conf->desc_fifo_depth =
+ AL_REG_FIELD_GET(reg, UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2);
+ if (reg & UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK)
+ conf->sch_mode = SRR;
+ else
+ conf->sch_mode = STRICT;
+ conf->max_desc_per_packet =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3);
+
+ conf->min_burst_below_thr =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT);
+
+ conf->min_burst_above_thr =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT);
+
+ conf->pref_thr = AL_REG_FIELD_GET(reg,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT);
+ return 0;
+}
+
+/* set max descriptors */
+int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs)
+{
+ uint32_t pref_thr = max_descs;
+ uint32_t min_burst_above_thr = 4;
+ al_assert(max_descs <= AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET);
+ al_assert(max_descs > 0);
+
+ /* increase min_burst_above_thr so larger burst can be used to fetch
+ * descriptors */
+ if (pref_thr >= 8)
+ min_burst_above_thr = 8;
+ else {
+ /* don't set prefetch threshold too low so we can have the
+ * min_burst_above_thr >= 4 */
+ pref_thr = 4;
+ }
+
+ al_reg_write32_masked(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2,
+ UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK,
+ max_descs << UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT);
+
+ al_reg_write32_masked(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK |
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK,
+ (pref_thr << UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) |
+ (min_burst_above_thr << UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT));
+
+ return 0;
+}
+
+/* set s2m max descriptors */
+int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs)
+{
+ uint32_t pref_thr = max_descs;
+ uint32_t min_burst_above_thr = 4;
+ al_assert(max_descs <= AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET);
+ al_assert(max_descs > 0);
+
+ /* increase min_burst_above_thr so larger burst can be used to fetch
+ * descriptors */
+ if (pref_thr >= 8)
+ min_burst_above_thr = 8;
+ else
+ /* don't set prefetch threshold too low so we can have the
+ * min_burst_above_thr >= 4 */
+ pref_thr = 4;
+
+ al_reg_write32_masked(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3,
+ UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK |
+ UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK,
+ (pref_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) |
+ (min_burst_above_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT));
+
+ return 0;
+}
+
+int al_udma_s2m_full_line_write_set(struct al_udma *udma, al_bool enable)
+{
+ uint32_t val = 0;
+
+ if (enable == AL_TRUE) {
+ val = UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE;
+ al_info("udma [%s]: full line write enabled\n", udma->name);
+ }
+
+ al_reg_write32_masked(&udma->udma_regs->s2m.s2m_wr.data_cfg_2,
+ UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE,
+ val);
+ return 0;
+}
+
+/** Configure S2M UDMA descriptor prefetch */
+int al_udma_s2m_pref_set(struct al_udma *udma,
+ struct al_udma_s2m_desc_pref_conf *conf)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1);
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK;
+ reg |= conf->desc_fifo_depth;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_2);
+
+ if (conf->sch_mode == SRR)
+ reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
+ else if (conf->sch_mode == STRICT)
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
+ else {
+ al_err("udma [%s]: requested descriptor preferch arbiter "
+ "mode (%d) is invalid\n", udma->name, conf->sch_mode);
+ return -EINVAL;
+ }
+ if (conf->q_promotion == AL_TRUE)
+ reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION;
+ else
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION;
+
+ if (conf->force_promotion == AL_TRUE)
+ reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION;
+ else
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION;
+
+ if (conf->en_pref_prediction == AL_TRUE)
+ reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION;
+ else
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION;
+
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK;
+ reg |= (conf->promotion_th
+ << UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_SHIFT) &
+ UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK;
+
+ al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_2, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3);
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
+ reg |= (conf->pref_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) &
+ UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
+
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+ reg |= conf->min_burst_below_thr &
+ UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
+ reg |=(conf->min_burst_above_thr <<
+ UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT) &
+ UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+
+ al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_4);
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK;
+ reg |= conf->a_full_thr & UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_4, reg);
+
+
+ return 0;
+}
+
+/* Configure S2M UDMA data write */
+int al_udma_s2m_data_write_set(struct al_udma *udma,
+ struct al_udma_s2m_data_write_conf *conf)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1);
+ reg &= ~UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK;
+ reg |= conf->data_fifo_depth &
+ UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK;
+ reg &= ~UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK;
+ reg |= (conf->max_pkt_limit <<
+ UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_SHIFT) &
+ UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK;
+ reg &= ~UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK;
+ reg |= (conf->fifo_margin <<
+ UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_SHIFT) &
+ UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2);
+ reg &= ~UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK;
+ reg |= conf->desc_wait_timer &
+ UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK;
+ reg &= ~(UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC |
+ UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC |
+ UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF |
+ UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE |
+ UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1);
+ reg |= conf->flags &
+ (UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC |
+ UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC |
+ UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF |
+ UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE |
+ UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1);
+ al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2, reg);
+
+ return 0;
+}
+
+/* Configure S2M UDMA completion */
+int al_udma_s2m_completion_set(struct al_udma *udma,
+ struct al_udma_s2m_completion_conf *conf)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_1c);
+ reg &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
+ reg |= conf->desc_size & UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
+ if (conf->cnt_words == AL_TRUE)
+ reg |= UDMA_S2M_COMP_CFG_1C_CNT_WORDS;
+ else
+ reg &= ~UDMA_S2M_COMP_CFG_1C_CNT_WORDS;
+ if (conf->q_promotion == AL_TRUE)
+ reg |= UDMA_S2M_COMP_CFG_1C_Q_PROMOTION;
+ else
+ reg &= ~UDMA_S2M_COMP_CFG_1C_Q_PROMOTION;
+ if (conf->force_rr == AL_TRUE)
+ reg |= UDMA_S2M_COMP_CFG_1C_FORCE_RR;
+ else
+ reg &= ~UDMA_S2M_COMP_CFG_1C_FORCE_RR;
+ reg &= ~UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK;
+ reg |= (conf->q_free_min << UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_SHIFT) &
+ UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_1c, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_2c);
+ reg &= ~UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK;
+ reg |= conf->comp_fifo_depth
+ & UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK;
+ reg &= ~UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK;
+ reg |= (conf->unack_fifo_depth
+ << UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_SHIFT) &
+ UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_2c, reg);
+
+ al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_application_ack,
+ conf->timeout);
+ return 0;
+}
+
+/** Configure the M2S UDMA scheduling mode */
+int al_udma_m2s_sc_set(struct al_udma *udma,
+ struct al_udma_m2s_dwrr_conf *sched)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_dwrr.cfg_sched);
+
+ if (sched->enable_dwrr == AL_TRUE)
+ reg |= UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR;
+ else
+ reg &= ~UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR;
+
+ if (sched->pkt_mode == AL_TRUE)
+ reg |= UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN;
+ else
+ reg &= ~UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN;
+
+ reg &= ~UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_MASK;
+ reg |= sched->weight << UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_SHIFT;
+ reg &= ~UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_MASK;
+ reg |= sched->inc_factor << UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_dwrr.cfg_sched, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_dwrr.ctrl_deficit_cnt);
+ reg &= ~UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_MASK;
+ reg |= sched->deficit_init_val;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_dwrr.ctrl_deficit_cnt, reg);
+
+ return 0;
+}
+
+/** Configure the M2S UDMA rate limitation */
+int al_udma_m2s_rlimit_set(struct al_udma *udma,
+ struct al_udma_m2s_rlimit_mode *mode)
+{
+ uint32_t reg = al_reg_read32(
+ &udma->udma_regs->m2s.m2s_rate_limiter.gen_cfg);
+
+ if (mode->pkt_mode_en == AL_TRUE)
+ reg |= UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN;
+ else
+ reg &= ~UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN;
+ reg &= ~UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK;
+ reg |= mode->short_cycle_sz &
+ UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.gen_cfg, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_token);
+ reg &= ~UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK;
+ reg |= mode->token_init_val &
+ UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_token, reg);
+
+ return 0;
+}
+
+int al_udma_m2s_rlimit_reset(struct al_udma *udma)
+{
+ uint32_t reg = al_reg_read32(
+ &udma->udma_regs->m2s.m2s_rate_limiter.ctrl_cycle_cnt);
+ reg |= UDMA_M2S_RATE_LIMITER_CTRL_CYCLE_CNT_RST;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_cycle_cnt,
+ reg);
+ return 0;
+}
+
+/** Configure the Stream/Q rate limitation */
+static int al_udma_common_rlimit_set(struct udma_rlimit_common *regs,
+ struct al_udma_m2s_rlimit_cfg *conf)
+{
+ uint32_t reg = al_reg_read32(&regs->cfg_1s);
+ /* mask max burst size, and enable/pause control bits */
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK;
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN;
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE;
+ reg |= conf->max_burst_sz &
+ UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK;
+ al_reg_write32(&regs->cfg_1s, reg);
+
+ reg = al_reg_read32(&regs->cfg_cycle);
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK;
+ reg |= conf->long_cycle_sz &
+ UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK;
+ al_reg_write32(&regs->cfg_cycle, reg);
+
+ reg = al_reg_read32(&regs->cfg_token_size_1);
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK;
+ reg |= conf->long_cycle &
+ UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK;
+ al_reg_write32(&regs->cfg_token_size_1, reg);
+
+ reg = al_reg_read32(&regs->cfg_token_size_2);
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK;
+ reg |= conf->short_cycle &
+ UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK;
+ al_reg_write32(&regs->cfg_token_size_2, reg);
+
+ reg = al_reg_read32(&regs->mask);
+ reg &= ~0xf; /* only bits 0-3 defined */
+ reg |= conf->mask & 0xf;
+ al_reg_write32(&regs->mask, reg);
+
+ return 0;
+}
+
+static int al_udma_common_rlimit_act(struct udma_rlimit_common *regs,
+ enum al_udma_m2s_rlimit_action act)
+{
+ uint32_t reg;
+
+ switch (act) {
+ case AL_UDMA_STRM_RLIMIT_ENABLE:
+ reg = al_reg_read32(&regs->cfg_1s);
+ reg |= UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN;
+ al_reg_write32(&regs->cfg_1s, reg);
+ break;
+ case AL_UDMA_STRM_RLIMIT_PAUSE:
+ reg = al_reg_read32(&regs->cfg_1s);
+ reg |= UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE;
+ al_reg_write32(&regs->cfg_1s, reg);
+ break;
+ case AL_UDMA_STRM_RLIMIT_RESET:
+ reg = al_reg_read32(&regs->sw_ctrl);
+ reg |= UDMA_M2S_STREAM_RATE_LIMITER_SW_CTRL_RST_TOKEN_CNT;
+ al_reg_write32(&regs->sw_ctrl, reg);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/** Configure the M2S Stream rate limitation */
+int al_udma_m2s_strm_rlimit_set(struct al_udma *udma,
+ struct al_udma_m2s_rlimit_cfg *conf)
+{
+ struct udma_rlimit_common *rlimit_regs =
+ &udma->udma_regs->m2s.m2s_stream_rate_limiter.rlimit;
+
+ return al_udma_common_rlimit_set(rlimit_regs, conf);
+}
+
+int al_udma_m2s_strm_rlimit_act(struct al_udma *udma,
+ enum al_udma_m2s_rlimit_action act)
+{
+ struct udma_rlimit_common *rlimit_regs =
+ &udma->udma_regs->m2s.m2s_stream_rate_limiter.rlimit;
+
+ if (al_udma_common_rlimit_act(rlimit_regs, act) == -EINVAL) {
+ al_err("udma [%s]: udma stream rate limit invalid action "
+ "(%d)\n", udma->name, act);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/** Configure the M2S UDMA Q rate limitation */
+int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q,
+ struct al_udma_m2s_rlimit_cfg *conf)
+{
+ struct udma_rlimit_common *rlimit_regs = &udma_q->q_regs->m2s_q.rlimit;
+
+ return al_udma_common_rlimit_set(rlimit_regs, conf);
+}
+
+int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q,
+ enum al_udma_m2s_rlimit_action act)
+{
+ struct udma_rlimit_common *rlimit_regs = &udma_q->q_regs->m2s_q.rlimit;
+
+ if (al_udma_common_rlimit_act(rlimit_regs, act) == -EINVAL) {
+ al_err("udma [%s %d]: udma stream rate limit invalid action "
+ "(%d)\n",
+ udma_q->udma->name, udma_q->qid, act);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/** Configure the M2S UDMA Q scheduling mode */
+int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q,
+ struct al_udma_m2s_q_dwrr_conf *conf)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_1);
+
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK;
+ reg |= conf->max_deficit_cnt_sz &
+ UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK;
+ if (conf->strict == AL_TRUE)
+ reg |= UDMA_M2S_Q_DWRR_CFG_1_STRICT;
+ else
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_1_STRICT;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_1, reg);
+
+ reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_2);
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
+ reg |= (conf->axi_qos << UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_SHIFT) &
+ UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
+ reg |= conf->q_qos & UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_2, reg);
+
+ reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_3);
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK;
+ reg |= conf->weight & UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_3, reg);
+
+ return 0;
+}
+
+int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_1);
+
+ if (set == AL_TRUE)
+ reg |= UDMA_M2S_Q_DWRR_CFG_1_PAUSE;
+ else
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_1_PAUSE;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_1, reg);
+
+ return 0;
+}
+
+int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_sw_ctrl);
+
+ reg |= UDMA_M2S_Q_DWRR_SW_CTRL_RST_CNT;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_sw_ctrl, reg);
+
+ return 0;
+}
+
+/** M2S UDMA completion and application timeouts */
+int al_udma_m2s_comp_timeouts_set(struct al_udma *udma,
+ struct al_udma_m2s_comp_timeouts *conf)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c);
+
+ if (conf->sch_mode == SRR)
+ reg |= UDMA_M2S_COMP_CFG_1C_FORCE_RR;
+ else if (conf->sch_mode == STRICT)
+ reg &= ~UDMA_M2S_COMP_CFG_1C_FORCE_RR;
+ else {
+ al_err("udma [%s]: requested completion descriptor preferch "
+ "arbiter mode (%d) is invalid\n",
+ udma->name, conf->sch_mode);
+ return -EINVAL;
+ }
+ if (conf->enable_q_promotion == AL_TRUE)
+ reg |= UDMA_M2S_COMP_CFG_1C_Q_PROMOTION;
+ else
+ reg &= ~UDMA_M2S_COMP_CFG_1C_Q_PROMOTION;
+ reg &= ~UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK;
+ reg |=
+ conf->comp_fifo_depth << UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT;
+
+ reg &= ~UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK;
+ reg |= conf->unack_fifo_depth
+ << UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_1c, reg);
+
+ al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_coal
+ , conf->coal_timeout);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_application_ack);
+ reg &= ~UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK;
+ reg |= conf->app_timeout << UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_application_ack, reg);
+ return 0;
+}
+
+int al_udma_m2s_comp_timeouts_get(struct al_udma *udma,
+ struct al_udma_m2s_comp_timeouts *conf)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c);
+
+ if (reg & UDMA_M2S_COMP_CFG_1C_FORCE_RR)
+ conf->sch_mode = SRR;
+ else
+ conf->sch_mode = STRICT;
+
+ if (reg & UDMA_M2S_COMP_CFG_1C_Q_PROMOTION)
+ conf->enable_q_promotion = AL_TRUE;
+ else
+ conf->enable_q_promotion = AL_FALSE;
+
+ conf->comp_fifo_depth =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK,
+ UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT);
+ conf->unack_fifo_depth =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK,
+ UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT);
+
+ conf->coal_timeout = al_reg_read32(
+ &udma->udma_regs->m2s.m2s_comp.cfg_coal);
+
+ reg = al_reg_read32(
+ &udma->udma_regs->m2s.m2s_comp.cfg_application_ack);
+
+ conf->app_timeout =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK,
+ UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT);
+
+ return 0;
+}
+
+/**
+ * S2M UDMA configure no descriptors behaviour
+ */
+int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2);
+
+ if ((drop_packet == AL_TRUE) && (wait_for_desc_timeout == 0)) {
+ al_err("udam [%s]: setting timeout to 0 will cause the udma to wait forever instead of dropping the packet", udma->name);
+ return -EINVAL;
+ }
+
+ if (drop_packet == AL_TRUE)
+ reg |= UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC;
+ else
+ reg &= ~UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC;
+
+ if (gen_interrupt == AL_TRUE)
+ reg |= UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC;
+ else
+ reg &= ~UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC;
+
+ AL_REG_FIELD_SET(reg, UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK, UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_SHIFT, wait_for_desc_timeout);
+
+ al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2, reg);
+
+ return 0;
+}
+
+/* S2M UDMA configure a queue's completion update */
+int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg);
+
+ if (enable == AL_TRUE)
+ reg |= UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+ else
+ reg &= ~UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg);
+
+ return 0;
+}
+
+/* S2M UDMA configure a queue's completion descriptors coalescing */
+int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t
+ coal_timeout)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg);
+
+ if (enable == AL_TRUE)
+ reg &= ~UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
+ else
+ reg |= UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg);
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg_2, coal_timeout);
+ return 0;
+}
+
+/* S2M UDMA configure completion descriptors write burst parameters */
+int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t
+ burst_size)
+{
+ if ((burst_size != 64) && (burst_size != 128) && (burst_size != 256)) {
+ al_err("%s: invalid burst_size value (%d)\n", __func__,
+ burst_size);
+ return -EINVAL;
+ }
+
+ /* convert burst size from bytes to beats (16 byte) */
+ burst_size = burst_size / 16;
+ al_reg_write32_masked(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1,
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK |
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK,
+ burst_size << UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT |
+ burst_size << UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT);
+ return 0;
+}
+
+/* S2M UDMA configure a queue's completion descriptors header split */
+int al_udma_s2m_q_compl_hdr_split_config(struct al_udma_q *udma_q, al_bool enable,
+ al_bool force_hdr_split, uint32_t hdr_len)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.pkt_cfg);
+
+ reg &= ~UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
+ reg &= ~UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
+ reg &= ~UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
+
+ if (enable == AL_TRUE) {
+ reg |= hdr_len & UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
+ reg |= UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
+
+ if (force_hdr_split == AL_TRUE)
+ reg |= UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
+ }
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.pkt_cfg, reg);
+
+ return 0;
+}
+
+/* S2M UDMA per queue completion configuration */
+int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
+ struct al_udma_s2m_q_comp_conf *conf)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg);
+ if (conf->en_comp_ring_update == AL_TRUE)
+ reg |= UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+ else
+ reg &= ~UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+
+ if (conf->dis_comp_coal == AL_TRUE)
+ reg |= UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
+ else
+ reg &= ~UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg);
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg_2, conf->comp_timer);
+
+ reg = al_reg_read32(&udma_q->q_regs->s2m_q.pkt_cfg);
+
+ reg &= ~UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
+ reg |= conf->hdr_split_size & UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
+ if (conf->force_hdr_split == AL_TRUE)
+ reg |= UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
+ else
+ reg &= ~UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
+ if (conf->en_hdr_split == AL_TRUE)
+ reg |= UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
+ else
+ reg &= ~UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.pkt_cfg, reg);
+
+ reg = al_reg_read32(&udma_q->q_regs->s2m_q.qos_cfg);
+ reg &= ~UDMA_S2M_QOS_CFG_Q_QOS_MASK;
+ reg |= conf->q_qos & UDMA_S2M_QOS_CFG_Q_QOS_MASK;
+ al_reg_write32(&udma_q->q_regs->s2m_q.qos_cfg, reg);
+
+ return 0;
+}
+
+/* UDMA VMID control configuration */
+void al_udma_gen_vmid_conf_set(
+ struct unit_regs *unit_regs,
+ struct al_udma_gen_vmid_conf *conf)
+{
+ unsigned int rev_id;
+
+ al_reg_write32_masked(
+ &unit_regs->gen.vmid.cfg_vmid_0,
+ UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_MASK |
+ UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_MASK |
+ UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_MASK |
+ UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_MASK,
+ (((conf->tx_q_conf[0].desc_en << 0) |
+ (conf->tx_q_conf[1].desc_en << 1) |
+ (conf->tx_q_conf[2].desc_en << 2) |
+ (conf->tx_q_conf[3].desc_en << 3)) <<
+ UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_SHIFT) |
+ (((conf->tx_q_conf[0].queue_en << 0) |
+ (conf->tx_q_conf[1].queue_en << 1) |
+ (conf->tx_q_conf[2].queue_en << 2) |
+ (conf->tx_q_conf[3].queue_en << 3)) <<
+ UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_SHIFT) |
+ (((conf->rx_q_conf[0].desc_en << 0) |
+ (conf->rx_q_conf[1].desc_en << 1) |
+ (conf->rx_q_conf[2].desc_en << 2) |
+ (conf->rx_q_conf[3].desc_en << 3)) <<
+ UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_SHIFT) |
+ (((conf->rx_q_conf[0].queue_en << 0) |
+ (conf->rx_q_conf[1].queue_en << 1) |
+ (conf->rx_q_conf[2].queue_en << 2) |
+ (conf->rx_q_conf[3].queue_en << 3)) <<
+ UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_SHIFT));
+
+ /* VMID per queue */
+ al_reg_write32(
+ &unit_regs->gen.vmid.cfg_vmid_1,
+ (conf->tx_q_conf[0].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_SHIFT) |
+ (conf->tx_q_conf[1].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmid.cfg_vmid_2,
+ (conf->tx_q_conf[2].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_SHIFT) |
+ (conf->tx_q_conf[3].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmid.cfg_vmid_3,
+ (conf->rx_q_conf[0].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_SHIFT) |
+ (conf->rx_q_conf[1].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmid.cfg_vmid_4,
+ (conf->rx_q_conf[2].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_SHIFT) |
+ (conf->rx_q_conf[3].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_SHIFT));
+
+ /* VMADDR per queue */
+ rev_id = al_udma_get_revision(unit_regs);
+ if (rev_id >= AL_UDMA_REV_ID_REV2) {
+ al_reg_write32(
+ &unit_regs->gen.vmaddr.cfg_vmaddr_0,
+ (conf->tx_q_conf[0].vmaddr <<
+ UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_SHIFT) |
+ (conf->tx_q_conf[1].vmaddr <<
+ UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmaddr.cfg_vmaddr_1,
+ (conf->tx_q_conf[2].vmaddr <<
+ UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_SHIFT) |
+ (conf->tx_q_conf[3].vmaddr <<
+ UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmaddr.cfg_vmaddr_2,
+ (conf->rx_q_conf[0].vmaddr <<
+ UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_SHIFT) |
+ (conf->rx_q_conf[1].vmaddr <<
+ UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmaddr.cfg_vmaddr_3,
+ (conf->rx_q_conf[2].vmaddr <<
+ UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_SHIFT) |
+ (conf->rx_q_conf[3].vmaddr <<
+ UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_SHIFT));
+ }
+}
+
+/* UDMA VMID MSIX control configuration */
+void al_udma_gen_vmid_msix_conf_set(
+ struct unit_regs *unit_regs,
+ struct al_udma_gen_vmid_msix_conf *conf)
+{
+ al_reg_write32_masked(
+ &unit_regs->gen.vmid.cfg_vmid_0,
+ UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN |
+ UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL,
+ (conf->access_en ? UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN : 0) |
+ (conf->sel ? UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL : 0));
+}
+
+/* UDMA VMID control advanced Tx queue configuration */
+void al_udma_gen_vmid_advanced_tx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_vmid_advanced_tx_q_conf *conf)
+{
+ struct udma_gen_regs *gen_regs = q->udma->gen_regs;
+ struct udma_gen_vmpr *vmpr = &gen_regs->vmpr[q->qid];
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_0,
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN,
+ conf->tx_q_addr_hi_sel |
+ ((conf->tx_q_data_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN : 0) |
+ ((conf->tx_q_prefetch_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN : 0) |
+ ((conf->tx_q_compl_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN : 0));
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_1,
+ conf->tx_q_addr_hi);
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_2,
+ UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_MASK,
+ (conf->tx_q_prefetch_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_SHIFT) |
+ (conf->tx_q_compl_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_SHIFT));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_3,
+ UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_MASK,
+ (conf->tx_q_data_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SHIFT) |
+ (conf->tx_q_data_vmid_mask <<
+ UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_SHIFT));
+}
+
+/** UDMA VMID control advanced Rx queue configuration */
+void al_udma_gen_vmid_advanced_rx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_vmid_advanced_rx_q_conf *conf)
+{
+ struct udma_gen_regs *gen_regs = q->udma->gen_regs;
+ struct udma_gen_vmpr *vmpr = &gen_regs->vmpr[q->qid];
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_4,
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN,
+ (conf->rx_q_addr_hi_sel <<
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_SHIFT) |
+ ((conf->rx_q_data_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN : 0) |
+ (conf->rx_q_data_buff2_addr_hi_sel <<
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_SHIFT) |
+ ((conf->rx_q_data_buff2_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN : 0) |
+ (conf->rx_q_ddp_addr_hi_sel <<
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_SHIFT) |
+ ((conf->rx_q_ddp_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN : 0) |
+ ((conf->rx_q_prefetch_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN : 0) |
+ ((conf->rx_q_compl_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN : 0));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_6,
+ UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_MASK,
+ (conf->rx_q_prefetch_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_SHIFT) |
+ (conf->rx_q_compl_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_SHIFT));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_7,
+ UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_MASK,
+ (conf->rx_q_data_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SHIFT) |
+ (conf->rx_q_data_vmid_mask <<
+ UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_SHIFT));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_8,
+ UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_MASK,
+ (conf->rx_q_data_buff2_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SHIFT) |
+ (conf->rx_q_data_buff2_mask <<
+ UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_SHIFT));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_9,
+ UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_MASK,
+ (conf->rx_q_ddp_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SHIFT) |
+ (conf->rx_q_ddp_mask <<
+ UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_SHIFT));
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_10,
+ conf->rx_q_addr_hi);
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_11,
+ conf->rx_q_data_buff2_addr_hi);
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_12,
+ conf->rx_q_ddp_addr_hi);
+}
+
+/* UDMA header split buffer 2 Rx queue configuration */
+void al_udma_gen_hdr_split_buff2_rx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_hdr_split_buff2_q_conf *conf)
+{
+ struct udma_gen_regs *gen_regs = q->udma->gen_regs;
+ struct udma_gen_vmpr *vmpr = &gen_regs->vmpr[q->qid];
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_4,
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_MASK,
+ conf->add_msb_sel <<
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_SHIFT);
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_5,
+ conf->addr_msb);
+}
+
diff --git a/al_hal_udma_config.h b/al_hal_udma_config.h
new file mode 100644
index 000000000000..b742e1824c92
--- /dev/null
+++ b/al_hal_udma_config.h
@@ -0,0 +1,755 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_udma_config UDMA Config
+ * @ingroup group_udma_api
+ * UDMA Config API
+ * @{
+ * @file al_hal_udma_config.h
+ *
+ * @brief C Header file for the Universal DMA HAL driver for configuration APIs
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_CONFIG_H__
+#define __AL_HAL_UDMA_CONFIG_H__
+
+#include <al_hal_udma.h>
+
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/** Scheduling mode */
+enum al_udma_sch_mode {
+ STRICT, /* Strict */
+ SRR, /* Simple Sound Rubin */
+ DWRR /* Deficit Weighted Round Rubin */
+};
+
+/** AXI configuration */
+struct al_udma_axi_conf {
+ uint32_t axi_timeout; /* Timeout for AXI transactions */
+ uint8_t arb_promotion; /* arbitration promotion */
+ al_bool swap_8_bytes; /* enable 8 bytes swap instead of 4 bytes */
+ al_bool swap_s2m_data;
+ al_bool swap_s2m_desc;
+ al_bool swap_m2s_data;
+ al_bool swap_m2s_desc;
+};
+
+/** UDMA AXI M2S configuration */
+struct al_udma_axi_submaster {
+ uint8_t id; /* AXI ID */
+ uint8_t cache_type;
+ uint8_t burst;
+ uint16_t used_ext;
+ uint8_t bus_size;
+ uint8_t qos;
+ uint8_t prot;
+ uint8_t max_beats;
+};
+
+/** UDMA AXI M2S configuration */
+struct al_udma_m2s_axi_conf {
+ struct al_udma_axi_submaster comp_write;
+ struct al_udma_axi_submaster data_read;
+ struct al_udma_axi_submaster desc_read;
+ al_bool break_on_max_boundary; /* Data read break on max boundary */
+ uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
+ uint8_t ostand_max_data_read;
+ uint8_t ostand_max_desc_read;
+ uint8_t ostand_max_comp_req;
+ uint8_t ostand_max_comp_write;
+};
+
+/** UDMA AXI S2M configuration */
+struct al_udma_s2m_axi_conf {
+ struct al_udma_axi_submaster data_write;
+ struct al_udma_axi_submaster desc_read;
+ struct al_udma_axi_submaster comp_write;
+ al_bool break_on_max_boundary; /* Data read break on max boundary */
+ uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
+ uint8_t ostand_max_data_req;
+ uint8_t ostand_max_data_write;
+ uint8_t ostand_max_comp_req;
+ uint8_t ostand_max_comp_write;
+ uint8_t ostand_max_desc_read;
+ uint8_t ack_fifo_depth; /* size of the stream application ack fifo */
+};
+
+/** M2S error logging */
+struct al_udma_err_log {
+ uint32_t error_status;
+ uint32_t header[4];
+};
+
+/** M2S max packet size configuration */
+struct al_udma_m2s_pkt_len_conf {
+ uint32_t max_pkt_size;
+ al_bool encode_64k_as_zero;
+};
+
+/** M2S Descriptor Prefetch configuration */
+struct al_udma_m2s_desc_pref_conf {
+ uint8_t desc_fifo_depth;
+ enum al_udma_sch_mode sch_mode; /* Scheduling mode
+ * (either strict or RR) */
+
+ uint8_t max_desc_per_packet; /* max number of descriptors to
+ * prefetch */
+ /* in one burst (5b) */
+ uint8_t pref_thr;
+ uint8_t min_burst_above_thr; /* min burst size when fifo above
+ * pref_thr (4b)
+ */
+ uint8_t min_burst_below_thr; /* min burst size when fifo below
+ * pref_thr (4b)
+ */
+ uint8_t max_pkt_limit; /* maximum number of packets in the data
+ * read FIFO, defined based on header
+ * FIFO size
+ */
+ uint16_t data_fifo_depth; /* maximum number of data beats in the
+ * data read FIFO,
+ * defined based on header FIFO size
+ */
+};
+
+/** S2M Descriptor Prefetch configuration */
+struct al_udma_s2m_desc_pref_conf {
+ uint8_t desc_fifo_depth;
+ enum al_udma_sch_mode sch_mode; /* Scheduling mode *
+ * (either strict or RR)
+ */
+
+ al_bool q_promotion; /* enable promotion */
+ al_bool force_promotion; /* force promotion */
+ al_bool en_pref_prediction; /* enable prefetch prediction */
+ uint8_t promotion_th; /* Threshold for queue promotion */
+
+ uint8_t pref_thr;
+ uint8_t min_burst_above_thr; /* min burst size when fifo above
+ * pref_thr (4b)
+ */
+ uint8_t min_burst_below_thr; /* min burst size when fifo below
+ * pref_thr (4b)
+ */
+ uint8_t a_full_thr; /* almost full threshold */
+};
+
+/** S2M Data write configuration */
+struct al_udma_s2m_data_write_conf {
+ uint16_t data_fifo_depth; /* maximum number of data beats in the
+ * data write FIFO, defined based on
+ * header FIFO size
+ */
+ uint8_t max_pkt_limit; /* maximum number of packets in the
+ * data write FIFO,defined based on
+ * header FIFO size
+ */
+ uint8_t fifo_margin;
+ uint32_t desc_wait_timer; /* waiting time for the host to write
+ * new descriptor to the queue
+ * (for the current packet in process)
+ */
+ uint32_t flags; /* bitwise of flags of s2m
+ * data_cfg_2 register
+ */
+};
+
+/** S2M Completion configuration */
+struct al_udma_s2m_completion_conf {
+ uint8_t desc_size; /* Size of completion descriptor
+ * in words
+ */
+ al_bool cnt_words; /* Completion fifo in use counter:
+ * AL_TRUE words, AL_FALS descriptors
+ */
+ al_bool q_promotion; /* Enable promotion of the current
+ * unack in progress */
+ /* in the completion write scheduler */
+ al_bool force_rr; /* force RR arbitration in the
+ * scheduler
+ */
+ // uint8_t ack_fifo_depth; /* size of the stream application ack fifo */
+ uint8_t q_free_min; /* minimum number of free completion
+ * entries
+ */
+ /* to qualify for promotion */
+
+ uint16_t comp_fifo_depth; /* Size of completion fifo in words */
+ uint16_t unack_fifo_depth; /* Size of unacked fifo in descs */
+ uint32_t timeout; /* Ack timout from stream interface */
+};
+
+/** M2S UDMA DWRR configuration */
+struct al_udma_m2s_dwrr_conf {
+ al_bool enable_dwrr;
+ uint8_t inc_factor;
+ uint8_t weight;
+ al_bool pkt_mode;
+ uint32_t deficit_init_val;
+};
+
+/** M2S DMA Rate Limitation mode */
+struct al_udma_m2s_rlimit_mode {
+ al_bool pkt_mode_en;
+ uint16_t short_cycle_sz;
+ uint32_t token_init_val;
+};
+
+/** M2S Stream/Q Rate Limitation */
+struct al_udma_m2s_rlimit_cfg {
+ uint32_t max_burst_sz; /* maximum number of accumulated bytes in the
+ * token counter
+ */
+ uint16_t long_cycle_sz; /* number of short cycles between token fill */
+ uint32_t long_cycle; /* number of bits to add in each long cycle */
+ uint32_t short_cycle; /* number of bits to add in each cycle */
+ uint32_t mask; /* mask the different types of rate limiters */
+};
+
+enum al_udma_m2s_rlimit_action {
+ AL_UDMA_STRM_RLIMIT_ENABLE,
+ AL_UDMA_STRM_RLIMIT_PAUSE,
+ AL_UDMA_STRM_RLIMIT_RESET
+};
+
+/** M2S UDMA Q scheduling configuration */
+struct al_udma_m2s_q_dwrr_conf {
+ uint32_t max_deficit_cnt_sz; /*maximum number of accumulated bytes
+ * in the deficit counter
+ */
+ al_bool strict; /* bypass DWRR */
+ uint8_t axi_qos;
+ uint16_t q_qos;
+ uint8_t weight;
+};
+
+/** M2S UDMA / UDMA Q scheduling configuration */
+struct al_udma_m2s_sc {
+ enum al_udma_sch_mode sch_mode; /* Scheduling Mode */
+ struct al_udma_m2s_dwrr_conf dwrr; /* DWRR configuration */
+};
+
+/** UDMA / UDMA Q rate limitation configuration */
+struct al_udma_m2s_rlimit {
+ struct al_udma_m2s_rlimit_mode rlimit_mode;
+ /* rate limitation enablers */
+#if 0
+ struct al_udma_tkn_bkt_conf token_bkt; /* Token Bucket configuration */
+#endif
+};
+
+/** UDMA Data read configuration */
+struct al_udma_m2s_data_rd_conf {
+ uint8_t max_rd_d_beats; /* max burst size for reading data
+ * (in AXI beats-128b) (5b)
+ */
+ uint8_t max_rd_d_out_req; /* max number of outstanding data
+ * read requests (6b)
+ */
+ uint16_t max_rd_d_out_beats; /* max num. of data read beats (10b) */
+};
+
+/** M2S UDMA completion and application timeouts */
+struct al_udma_m2s_comp_timeouts {
+ enum al_udma_sch_mode sch_mode; /* Scheduling mode
+ * (either strict or RR)
+ */
+ al_bool enable_q_promotion;
+ uint8_t unack_fifo_depth; /* unacked desc fifo size */
+ uint8_t comp_fifo_depth; /* desc fifo size */
+ uint32_t coal_timeout; /* (24b) */
+ uint32_t app_timeout; /* (24b) */
+};
+
+/** S2M UDMA per queue completion configuration */
+struct al_udma_s2m_q_comp_conf {
+ al_bool dis_comp_coal; /* disable completion coalescing */
+ al_bool en_comp_ring_update; /* enable writing completion descs */
+ uint32_t comp_timer; /* completion coalescing timer */
+ al_bool en_hdr_split; /* enable header split */
+ al_bool force_hdr_split; /* force header split */
+ uint16_t hdr_split_size; /* size used for the header split */
+ uint8_t q_qos; /* queue QoS */
+};
+
+/** UDMA per queue VMID control configuration */
+struct al_udma_gen_vmid_q_conf {
+ /* Enable usage of the VMID per queue according to 'vmid' */
+ al_bool queue_en;
+
+ /* Enable usage of the VMID from the descriptor buffer address 63:48 */
+ al_bool desc_en;
+
+ /* VMID to be applied when 'queue_en' is asserted */
+ uint16_t vmid;
+
+ /* VMADDR to be applied to msbs when 'desc_en' is asserted.
+ * Relevant for revisions >= AL_UDMA_REV_ID_REV2 */
+ uint16_t vmaddr;
+};
+
+/** UDMA VMID control configuration */
+struct al_udma_gen_vmid_conf {
+ /* TX queue configuration */
+ struct al_udma_gen_vmid_q_conf tx_q_conf[DMA_MAX_Q];
+
+ /* RX queue configuration */
+ struct al_udma_gen_vmid_q_conf rx_q_conf[DMA_MAX_Q];
+};
+
+/** UDMA VMID MSIX control configuration */
+struct al_udma_gen_vmid_msix_conf {
+ /* Enable write to all VMID_n registers in the MSI-X Controller */
+ al_bool access_en;
+
+ /* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
+ al_bool sel;
+};
+
+/** UDMA per Tx queue advanced VMID control configuration */
+struct al_udma_gen_vmid_advanced_tx_q_conf {
+ /**********************************************************************
+ * Tx Data VMID
+ **********************************************************************/
+ /* Tx data VMID enable */
+ al_bool tx_q_data_vmid_en;
+
+ /*
+ * For Tx data reads, replacement bits for the original address.
+ * The number of bits replaced is determined according to
+ * 'tx_q_addr_hi_sel'
+ */
+ unsigned int tx_q_addr_hi;
+
+ /*
+ * For Tx data reads, 6 bits serving the number of bits taken from the
+ * extra register on account of bits coming from the original address
+ * field.
+ * When 'tx_q_addr_hi_sel'=32 all of 'tx_q_addr_hi' will be taken.
+ * When 'tx_q_addr_hi_sel'=0 none of it will be taken, and when any
+ * value in between, it will start from the MSB bit and sweep down as
+ * many bits as needed. For example if 'tx_q_addr_hi_sel'=8, the final
+ * address [63:56] will carry 'tx_q_addr_hi'[31:24] while [55:32] will
+ * carry the original buffer address[55:32].
+ */
+ unsigned int tx_q_addr_hi_sel;
+
+ /*
+ * Tx data read VMID
+ * Masked per bit with 'tx_q_data_vmid_mask'
+ */
+ unsigned int tx_q_data_vmid;
+
+ /*
+ * Tx data read VMID mask
+ * Each '1' selects from the buffer address, each '0' selects from
+ * 'tx_q_data_vmid'
+ */
+ unsigned int tx_q_data_vmid_mask;
+
+ /**********************************************************************
+ * Tx prefetch VMID
+ **********************************************************************/
+ /* Tx prefetch VMID enable */
+ al_bool tx_q_prefetch_vmid_en;
+
+ /* Tx prefetch VMID */
+ unsigned int tx_q_prefetch_vmid;
+
+ /**********************************************************************
+ * Tx completion VMID
+ **********************************************************************/
+ /* Tx completion VMID enable */
+ al_bool tx_q_compl_vmid_en;
+
+ /* Tx completion VMID */
+ unsigned int tx_q_compl_vmid;
+};
+
+/** UDMA per Rx queue advanced VMID control configuration */
+struct al_udma_gen_vmid_advanced_rx_q_conf {
+ /**********************************************************************
+ * Rx Data VMID
+ **********************************************************************/
+ /* Rx data VMID enable */
+ al_bool rx_q_data_vmid_en;
+
+ /*
+ * For Rx data writes, replacement bits for the original address.
+ * The number of bits replaced is determined according to
+ * 'rx_q_addr_hi_sel'
+ */
+ unsigned int rx_q_addr_hi;
+
+ /*
+ * For Rx data writes, 6 bits serving the number of bits taken from the
+ * extra register on account of bits coming from the original address
+ * field.
+ */
+ unsigned int rx_q_addr_hi_sel;
+
+ /*
+ * Rx data write VMID
+ * Masked per bit with 'rx_q_data_vmid_mask'
+ */
+ unsigned int rx_q_data_vmid;
+
+ /* Rx data write VMID mask */
+ unsigned int rx_q_data_vmid_mask;
+
+ /**********************************************************************
+ * Rx Data Buffer 2 VMID
+ **********************************************************************/
+ /* Rx data buff2 VMID enable */
+ al_bool rx_q_data_buff2_vmid_en;
+
+ /*
+ * For Rx data buff2 writes, replacement bits for the original address.
+ * The number of bits replaced is determined according to
+ * 'rx_q_data_buff2_addr_hi_sel'
+ */
+ unsigned int rx_q_data_buff2_addr_hi;
+
+ /*
+ * For Rx data buff2 writes, 6 bits serving the number of bits taken
+ * from the extra register on account of bits coming from the original
+ * address field.
+ */
+ unsigned int rx_q_data_buff2_addr_hi_sel;
+
+ /*
+ * Rx data buff2 write VMID
+ * Masked per bit with 'rx_q_data_buff2_mask'
+ */
+ unsigned int rx_q_data_buff2_vmid;
+
+ /* Rx data buff2 write VMID mask */
+ unsigned int rx_q_data_buff2_mask;
+
+ /**********************************************************************
+ * Rx DDP VMID
+ **********************************************************************/
+ /* Rx DDP write VMID enable */
+ al_bool rx_q_ddp_vmid_en;
+
+ /*
+ * For Rx DDP writes, replacement bits for the original address.
+ * The number of bits replaced is determined according to
+ * 'rx_q_ddp_addr_hi_sel'
+ */
+ unsigned int rx_q_ddp_addr_hi;
+
+ /*
+ * For Rx DDP writes, 6 bits serving the number of bits taken from the
+ * extra register on account of bits coming from the original address
+ * field.
+ */
+ unsigned int rx_q_ddp_addr_hi_sel;
+
+ /*
+ * Rx DDP write VMID
+ * Masked per bit with 'rx_q_ddp_mask'
+ */
+ unsigned int rx_q_ddp_vmid;
+
+ /* Rx DDP write VMID mask */
+ unsigned int rx_q_ddp_mask;
+
+ /**********************************************************************
+ * Rx prefetch VMID
+ **********************************************************************/
+ /* Rx prefetch VMID enable */
+ al_bool rx_q_prefetch_vmid_en;
+
+ /* Rx prefetch VMID */
+ unsigned int rx_q_prefetch_vmid;
+
+ /**********************************************************************
+ * Rx completion VMID
+ **********************************************************************/
+ /* Rx completion VMID enable */
+ al_bool rx_q_compl_vmid_en;
+
+ /* Rx completion VMID */
+ unsigned int rx_q_compl_vmid;
+};
+
+/**
+ * Header split, buffer 2 per queue configuration
+ * When header split is enabled, Buffer_2 is used as an address for the header
+ * data. Buffer_2 is defined as 32-bits in the RX descriptor and it is defined
+ * that the MSB ([63:32]) of Buffer_1 is used as address [63:32] for the header
+ * address.
+ */
+struct al_udma_gen_hdr_split_buff2_q_conf {
+ /*
+ * MSB of the 64-bit address (bits [63:32]) that can be used for header
+ * split for this queue
+ */
+ unsigned int addr_msb;
+
+ /*
+ * Determine how to select the MSB (bits [63:32]) of the address when
+ * header split is enabled (4 bits, one per byte)
+ * - Bits [3:0]:
+ * [0] – selector for bits [39:32]
+ * [1] – selector for bits [47:40]
+ * [2] – selector for bits [55:48]
+ * [3] – selector for bits [63:55]
+ * - Bit value:
+ * 0 – Use Buffer_1 (legacy operation)
+ * 1 – Use the queue configuration 'addr_msb'
+ */
+ unsigned int add_msb_sel;
+};
+
+/* Report Error - to be used for abort */
+void al_udma_err_report(struct al_udma *udma);
+
+/* Statistics - TBD */
+void al_udma_stats_get(struct al_udma *udma);
+
+/* Misc configurations */
+/* Configure AXI configuration */
+int al_udma_axi_set(struct udma_gen_axi *axi_regs,
+ struct al_udma_axi_conf *axi);
+
+/* Configure UDMA AXI M2S configuration */
+int al_udma_m2s_axi_set(struct al_udma *udma,
+ struct al_udma_m2s_axi_conf *axi_m2s);
+
+/* Configure UDMA AXI S2M configuration */
+int al_udma_s2m_axi_set(struct al_udma *udma,
+ struct al_udma_s2m_axi_conf *axi_s2m);
+
+/* Configure M2S packet len */
+int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma,
+ struct al_udma_m2s_pkt_len_conf *conf);
+
+/* Configure M2S UDMA descriptor prefetch */
+int al_udma_m2s_pref_set(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf);
+int al_udma_m2s_pref_get(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf);
+
+/* set m2s packet's max descriptors (including meta descriptors) */
+#define AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET 31
+int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs);
+
+/* set s2m packets' max descriptors */
+#define AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET 31
+int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs);
+
+
+/* Configure S2M UDMA descriptor prefetch */
+int al_udma_s2m_pref_set(struct al_udma *udma,
+ struct al_udma_s2m_desc_pref_conf *conf);
+int al_udma_m2s_pref_get(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf);
+
+/* Configure S2M UDMA data write */
+int al_udma_s2m_data_write_set(struct al_udma *udma,
+ struct al_udma_s2m_data_write_conf *conf);
+
+/* Configure the s2m full line write feature */
+int al_udma_s2m_full_line_write_set(struct al_udma *umda, al_bool enable);
+
+/* Configure S2M UDMA completion */
+int al_udma_s2m_completion_set(struct al_udma *udma,
+ struct al_udma_s2m_completion_conf *conf);
+
+/* Configure the M2S UDMA scheduling mode */
+int al_udma_m2s_sc_set(struct al_udma *udma,
+ struct al_udma_m2s_dwrr_conf *sched);
+
+/* Configure the M2S UDMA rate limitation */
+int al_udma_m2s_rlimit_set(struct al_udma *udma,
+ struct al_udma_m2s_rlimit_mode *mode);
+int al_udma_m2s_rlimit_reset(struct al_udma *udma);
+
+/* Configure the M2S Stream rate limitation */
+int al_udma_m2s_strm_rlimit_set(struct al_udma *udma,
+ struct al_udma_m2s_rlimit_cfg *conf);
+int al_udma_m2s_strm_rlimit_act(struct al_udma *udma,
+ enum al_udma_m2s_rlimit_action act);
+
+/* Configure the M2S UDMA Q rate limitation */
+int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q,
+ struct al_udma_m2s_rlimit_cfg *conf);
+int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q,
+ enum al_udma_m2s_rlimit_action act);
+
+/* Configure the M2S UDMA Q scheduling mode */
+int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q,
+ struct al_udma_m2s_q_dwrr_conf *conf);
+int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set);
+int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q);
+
+/* M2S UDMA completion and application timeouts */
+int al_udma_m2s_comp_timeouts_set(struct al_udma *udma,
+ struct al_udma_m2s_comp_timeouts *conf);
+int al_udma_m2s_comp_timeouts_get(struct al_udma *udma,
+ struct al_udma_m2s_comp_timeouts *conf);
+
+/* UDMA get revision */
+static INLINE unsigned int al_udma_get_revision(struct unit_regs __iomem *unit_regs)
+{
+ return (al_reg_read32(&unit_regs->gen.dma_misc.revision)
+ & UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_MASK) >>
+ UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_SHIFT;
+}
+
+/**
+ * S2M UDMA Configure the expected behavior of Rx/S2M UDMA when there are no Rx Descriptors.
+ *
+ * @param udma
+ * @param drop_packet when set to true, the UDMA will drop packet.
+ * @param gen_interrupt when set to true, the UDMA will generate
+ * no_desc_hint interrupt when a packet received and the UDMA
+ * doesn't find enough free descriptors for it.
+ * @param wait_for_desc_timeout timeout in SB cycles to wait for new
+ * descriptors before dropping the packets.
+ * Notes:
+ * - The hint interrupt is raised immediately without waiting
+ * for new descs.
+ * - value 0 means wait for ever.
+ *
+ * Notes:
+ * - When get_interrupt is set, the API won't program the iofic to unmask this
+ * interrupt, in this case the callee should take care for doing that unmask
+ * using the al_udma_iofic_config() API.
+ *
+ * - The hardware's default configuration is: no drop packet, generate hint
+ * interrupt.
+ * - This API must be called once and before enabling the UDMA
+ *
+ * @return 0 if no error found.
+ */
+int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout);
+
+/**
+ * S2M UDMA configure a queue's completion update
+ *
+ * @param q_udma
+ * @param enable set to true to enable completion update
+ *
+ * completion update better be disabled for tx queues as those descriptors
+ * doesn't carry useful information, thus disabling it saves DMA accesses.
+ *
+ * @return 0 if no error found.
+ */
+int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable);
+
+/**
+ * S2M UDMA configure a queue's completion descriptors coalescing
+ *
+ * @param q_udma
+ * @param enable set to true to enable completion coalescing
+ * @param coal_timeout in South Bridge cycles.
+ *
+ * @return 0 if no error found.
+ */
+int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t coal_timeout);
+
+/**
+ * S2M UDMA configure completion descriptors write burst parameters
+ *
+ * @param udma
+ * @param burst_size completion descriptors write burst size in bytes.
+ *
+ * @return 0 if no error found.
+ */int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t
+ burst_size);
+
+/**
+ * S2M UDMA configure a queue's completion header split
+ *
+ * @param q_udma
+ * @param enable set to true to enable completion header split
+ * @param force_hdr_split the header split length will be taken from the queue configuration
+ * @param hdr_len header split length.
+ *
+ * @return 0 if no error found.
+ */
+int al_udma_s2m_q_compl_hdr_split_config(struct al_udma_q *udma_q,
+ al_bool enable,
+ al_bool force_hdr_split,
+ uint32_t hdr_len);
+
+/* S2M UDMA per queue completion configuration */
+int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
+ struct al_udma_s2m_q_comp_conf *conf);
+
+/** UDMA VMID control configuration */
+void al_udma_gen_vmid_conf_set(
+ struct unit_regs __iomem *unit_regs,
+ struct al_udma_gen_vmid_conf *conf);
+
+/** UDMA VMID MSIX control configuration */
+void al_udma_gen_vmid_msix_conf_set(
+ struct unit_regs __iomem *unit_regs,
+ struct al_udma_gen_vmid_msix_conf *conf);
+
+/** UDMA VMID control advanced Tx queue configuration */
+void al_udma_gen_vmid_advanced_tx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_vmid_advanced_tx_q_conf *conf);
+
+/** UDMA VMID control advanced Rx queue configuration */
+void al_udma_gen_vmid_advanced_rx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_vmid_advanced_rx_q_conf *conf);
+
+/** UDMA header split buffer 2 Rx queue configuration */
+void al_udma_gen_hdr_split_buff2_rx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_hdr_split_buff2_q_conf *conf);
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of UDMA config group */
+#endif /* __AL_HAL_UDMA_CONFIG_H__ */
diff --git a/al_hal_udma_debug.c b/al_hal_udma_debug.c
new file mode 100644
index 000000000000..c6b9bf4b9bf0
--- /dev/null
+++ b/al_hal_udma_debug.c
@@ -0,0 +1,497 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_debug.c
+ *
+ * @brief Universal DMA HAL driver for debug
+ *
+ */
+
+#define DEBUG
+
+#include <al_hal_common.h>
+#include <al_hal_udma_regs.h>
+#include <al_hal_udma_debug.h>
+
+static void al_udma_regs_m2s_axi_print(struct al_udma *udma)
+{
+ al_dbg("M2S AXI regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, comp_wr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, comp_wr_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_3);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_wr_cfg_1);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, axi_m2s,
+ desc_wr_cfg_1,
+ max_axi_beats,
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, axi_m2s,
+ desc_wr_cfg_1,
+ min_axi_beats,
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, ostand_cfg);
+}
+
+static void al_udma_regs_m2s_general_print(struct al_udma *udma)
+{
+ al_dbg("M2S general regs:\n");
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, state);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
+ comp_ctrl,
+ UDMA_M2S_STATE_COMP_CTRL);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
+ stream_if,
+ UDMA_M2S_STATE_STREAM_IF);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
+ rd_ctrl,
+ UDMA_M2S_STATE_DATA_RD_CTRL);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
+ desc_pref,
+ UDMA_M2S_STATE_DESC_PREF);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, err_log_mask);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_0);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, data_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, header_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, unack_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, check_en);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, fifo_en);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, cfg_len);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, stream_cfg);
+}
+
+static void al_udma_regs_m2s_rd_print(struct al_udma *udma)
+{
+ al_dbg("M2S read regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_2);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_3);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd,
+ desc_pref_cfg_3,
+ min_burst_below_thr,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd,
+ desc_pref_cfg_3,
+ min_burst_above_thr,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd,
+ desc_pref_cfg_3,
+ pref_thr,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, data_cfg);
+}
+
+static void al_udma_regs_m2s_dwrr_print(struct al_udma *udma)
+{
+ al_dbg("M2S DWRR regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_dwrr, cfg_sched);
+}
+
+static void al_udma_regs_m2s_rate_limiter_print(struct al_udma *udma)
+{
+ al_dbg("M2S rate limiter regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rate_limiter, gen_cfg);
+}
+
+static void al_udma_regs_m2s_stream_rate_limiter_print(struct al_udma *udma)
+{
+ al_dbg("M2S stream rate limiter regs:\n");
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.cfg_1s);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.cfg_cycle);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.cfg_token_size_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.cfg_token_size_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.mask);
+}
+
+static void al_udma_regs_m2s_comp_print(struct al_udma *udma)
+{
+ al_dbg("M2S completion regs:\n");
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_1c);
+
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c,
+ comp_fifo_depth,
+ UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c,
+ unack_fifo_depth,
+ UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH);
+ AL_UDMA_PRINT_REG_BIT(udma, " ", "\n", m2s, m2s_comp, cfg_1c,
+ q_promotion,
+ UDMA_M2S_COMP_CFG_1C_Q_PROMOTION);
+ AL_UDMA_PRINT_REG_BIT(udma, " ", "\n", m2s, m2s_comp, cfg_1c,
+ force_rr,
+ UDMA_M2S_COMP_CFG_1C_FORCE_RR);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c,
+ q_free_min,
+ UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_coal);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_application_ack);
+}
+
+static void al_udma_regs_m2s_stat_print(struct al_udma *udma)
+{
+ al_dbg("M2S statistics regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, cfg_st);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_pkt);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_bytes_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_bytes_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, prefed_desc);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, comp_pkt);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, comp_desc);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, ack_pkts);
+}
+
+static void al_udma_regs_m2s_feature_print(struct al_udma *udma)
+{
+ al_dbg("M2S feature regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_4);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_5);
+}
+
+static void al_udma_regs_m2s_q_print(struct al_udma *udma, uint32_t qid)
+{
+ al_dbg("M2S Q[%d] status regs:\n", qid);
+ al_reg_write32(&udma->udma_regs->m2s.m2s.indirect_ctrl, qid);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_pref_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_comp_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_rate_limit_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_dwrr_status);
+
+ al_dbg("M2S Q[%d] regs:\n", qid);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrbp_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrbp_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrl);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrhp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrtp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdcp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrbp_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrbp_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrhp);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.cfg_1s);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.cfg_cycle);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid],
+ rlimit.cfg_token_size_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid],
+ rlimit.cfg_token_size_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.mask);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], comp_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], q_tx_pkt);
+}
+
+static void al_udma_regs_s2m_axi_print(struct al_udma *udma)
+{
+ al_dbg("S2M AXI regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_4);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_5);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, comp_wr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, comp_wr_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_wr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, ostand_cfg_rd);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, ostand_cfg_wr);
+}
+
+static void al_udma_regs_s2m_general_print(struct al_udma *udma)
+{
+ al_dbg("S2M general regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, state);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, err_log_mask);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_0);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, s_data_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, s_header_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, axi_data_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, unack_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, check_en);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, fifo_en);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, stream_cfg);
+}
+
+static void al_udma_regs_s2m_rd_print(struct al_udma *udma)
+{
+ al_dbg("S2M read regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_4);
+}
+
+static void al_udma_regs_s2m_wr_print(struct al_udma *udma)
+{
+ al_dbg("S2M write regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_wr, data_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_wr, data_cfg_1);
+}
+
+static void al_udma_regs_s2m_comp_print(struct al_udma *udma)
+{
+ al_dbg("S2M completion regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_1c);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_2c);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_application_ack);
+}
+
+static void al_udma_regs_s2m_stat_print(struct al_udma *udma)
+{
+ al_dbg("S2M statistics regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, drop_pkt);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, rx_bytes_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, rx_bytes_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, prefed_desc);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, comp_pkt);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, comp_desc);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, ack_pkts);
+}
+
+static void al_udma_regs_s2m_feature_print(struct al_udma *udma)
+{
+ al_dbg("S2M feature regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_4);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_5);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_6);
+}
+
+static void al_udma_regs_s2m_q_print(struct al_udma *udma, uint32_t qid)
+{
+ al_dbg("S2M Q[%d] status regs:\n", qid);
+ al_reg_write32(&udma->udma_regs->m2s.m2s.indirect_ctrl, qid);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, sel_pref_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, sel_comp_fifo_status);
+
+ al_dbg("S2M Q[%d] regs:\n", qid);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrbp_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrbp_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrl);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrhp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrtp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdcp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrbp_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrbp_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrhp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrhp_internal);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], comp_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], comp_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], pkt_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], qos_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], q_rx_pkt);
+}
+
+void al_udma_regs_print(struct al_udma *udma, unsigned int mask)
+{
+ uint32_t i;
+
+ if (!udma)
+ return;
+
+ if (udma->type == UDMA_TX) {
+ if (mask & AL_UDMA_DEBUG_AXI)
+ al_udma_regs_m2s_axi_print(udma);
+ if (mask & AL_UDMA_DEBUG_GENERAL)
+ al_udma_regs_m2s_general_print(udma);
+ if (mask & AL_UDMA_DEBUG_READ)
+ al_udma_regs_m2s_rd_print(udma);
+ if (mask & AL_UDMA_DEBUG_DWRR)
+ al_udma_regs_m2s_dwrr_print(udma);
+ if (mask & AL_UDMA_DEBUG_RATE_LIMITER)
+ al_udma_regs_m2s_rate_limiter_print(udma);
+ if (mask & AL_UDMA_DEBUG_STREAM_RATE_LIMITER)
+ al_udma_regs_m2s_stream_rate_limiter_print(udma);
+ if (mask & AL_UDMA_DEBUG_COMP)
+ al_udma_regs_m2s_comp_print(udma);
+ if (mask & AL_UDMA_DEBUG_STAT)
+ al_udma_regs_m2s_stat_print(udma);
+ if (mask & AL_UDMA_DEBUG_FEATURE)
+ al_udma_regs_m2s_feature_print(udma);
+ for (i = 0; i < DMA_MAX_Q; i++) {
+ if (mask & AL_UDMA_DEBUG_QUEUE(i))
+ al_udma_regs_m2s_q_print(udma, i);
+ }
+ } else {
+ if (mask & AL_UDMA_DEBUG_AXI)
+ al_udma_regs_s2m_axi_print(udma);
+ if (mask & AL_UDMA_DEBUG_GENERAL)
+ al_udma_regs_s2m_general_print(udma);
+ if (mask & AL_UDMA_DEBUG_READ)
+ al_udma_regs_s2m_rd_print(udma);
+ if (mask & AL_UDMA_DEBUG_WRITE)
+ al_udma_regs_s2m_wr_print(udma);
+ if (mask & AL_UDMA_DEBUG_COMP)
+ al_udma_regs_s2m_comp_print(udma);
+ if (mask & AL_UDMA_DEBUG_STAT)
+ al_udma_regs_s2m_stat_print(udma);
+ if (mask & AL_UDMA_DEBUG_FEATURE)
+ al_udma_regs_s2m_feature_print(udma);
+ for (i = 0; i < DMA_MAX_Q; i++) {
+ if (mask & AL_UDMA_DEBUG_QUEUE(i))
+ al_udma_regs_s2m_q_print(udma, i);
+ }
+ }
+}
+
+void al_udma_q_struct_print(struct al_udma *udma, uint32_t qid)
+{
+ struct al_udma_q *queue;
+
+ if (!udma)
+ return;
+
+ if (qid >= DMA_MAX_Q)
+ return;
+
+ queue = &udma->udma_q[qid];
+
+ al_dbg("Q[%d] struct:\n", qid);
+ al_dbg(" size_mask = 0x%08x\n", (uint32_t)queue->size_mask);
+ al_dbg(" q_regs = %p\n", queue->q_regs);
+ al_dbg(" desc_base_ptr = %p\n", queue->desc_base_ptr);
+ al_dbg(" next_desc_idx = %d\n", (uint16_t)queue->next_desc_idx);
+ al_dbg(" desc_ring_id = %d\n", (uint32_t)queue->desc_ring_id);
+ al_dbg(" cdesc_base_ptr = %p\n", queue->cdesc_base_ptr);
+ al_dbg(" cdesc_size = %d\n", (uint32_t)queue->cdesc_size);
+ al_dbg(" next_cdesc_idx = %d\n", (uint16_t)queue->next_cdesc_idx);
+ al_dbg(" end_cdesc_ptr = %p\n", queue->end_cdesc_ptr);
+ al_dbg(" comp_head_idx = %d\n", (uint16_t)queue->comp_head_idx);
+ al_dbg(" comp_head_ptr = %p\n", queue->comp_head_ptr);
+ al_dbg(" pkt_crnt_descs = %d\n", (uint32_t)queue->pkt_crnt_descs);
+ al_dbg(" comp_ring_id = %d\n", (uint32_t)queue->comp_ring_id);
+ al_dbg(" desc_phy_base = 0x%016llx\n", (uint64_t)queue->desc_phy_base);
+ al_dbg(" cdesc_phy_base = 0x%016llx\n",
+ (uint64_t)queue->cdesc_phy_base);
+ al_dbg(" flags = 0x%08x\n", (uint32_t)queue->flags);
+ al_dbg(" size = %d\n", (uint32_t)queue->size);
+ al_dbg(" status = %d\n", (uint32_t)queue->status);
+ al_dbg(" udma = %p\n", queue->udma);
+ al_dbg(" qid = %d\n", (uint32_t)queue->qid);
+}
+
+void al_udma_ring_print(struct al_udma *udma, uint32_t qid,
+ enum al_udma_ring_type rtype)
+{
+ struct al_udma_q *queue;
+ uint32_t desc_size;
+ void *base_ptr;
+ uint32_t i;
+
+ if (!udma)
+ return;
+
+ if (qid >= DMA_MAX_Q)
+ return;
+
+ queue = &udma->udma_q[qid];
+ if (rtype == AL_RING_SUBMISSION) {
+ base_ptr = queue->desc_base_ptr;
+ desc_size = sizeof(union al_udma_desc);
+ if (base_ptr)
+ al_dbg("Q[%d] submission ring pointers:\n", qid);
+ else {
+ al_dbg("Q[%d] submission ring is not allocated\n", qid);
+ return;
+ }
+ } else {
+ base_ptr = queue->cdesc_base_ptr;
+ desc_size = queue->cdesc_size;
+ if (base_ptr)
+ al_dbg("Q[%d] completion ring pointers:\n", qid);
+ else {
+ al_dbg("Q[%d] completion ring is not allocated\n", qid);
+ return;
+ }
+ }
+
+ for (i = 0; i < queue->size; i++) {
+ uint32_t *curr_addr = (void*)((uint32_t)base_ptr + i * desc_size);
+ if (desc_size == 16)
+ al_dbg("[%04d](%p): %08x %08x %08x %08x\n",
+ i,
+ curr_addr,
+ (uint32_t)*curr_addr,
+ (uint32_t)*(curr_addr+1),
+ (uint32_t)*(curr_addr+2),
+ (uint32_t)*(curr_addr+3));
+ else if (desc_size == 8)
+ al_dbg("[%04d](%p): %08x %08x\n",
+ i,
+ curr_addr,
+ (uint32_t)*curr_addr,
+ (uint32_t)*(curr_addr+1));
+ else if (desc_size == 4)
+ al_dbg("[%04d](%p): %08x\n",
+ i,
+ curr_addr,
+ (uint32_t)*curr_addr);
+ else
+ break;
+ }
+}
diff --git a/al_hal_udma_debug.h b/al_hal_udma_debug.h
new file mode 100644
index 000000000000..7bd1d972917a
--- /dev/null
+++ b/al_hal_udma_debug.h
@@ -0,0 +1,134 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_udma_debug UDMA Debug
+ * @ingroup group_udma_api
+ * UDMA Debug
+ * @{
+ * @file al_hal_udma_debug.h
+ *
+ * @brief C Header file for the Universal DMA HAL driver for debug APIs
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_DEBUG_H__
+#define __AL_HAL_UDMA_DEBUG_H__
+
+#include <al_hal_udma.h>
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/* UDMA register print helper macros */
+#define AL_UDMA_PRINT_REG(UDMA, PREFIX, POSTFIX, TYPE, GROUP, REG) \
+ al_dbg(PREFIX #REG " = 0x%08x" POSTFIX, al_reg_read32( \
+ &(UDMA->udma_regs->TYPE.GROUP.REG)))
+
+#define AL_UDMA_PRINT_REG_FIELD( \
+ UDMA, PREFIX, POSTFIX, FMT, TYPE, GROUP, REG, LBL, FIELD) \
+ al_dbg(PREFIX #LBL " = " FMT POSTFIX, al_reg_read32( \
+ &(UDMA->udma_regs->TYPE.GROUP.REG)) \
+ & FIELD ## _MASK >> FIELD ## _SHIFT)
+
+#define AL_UDMA_PRINT_REG_BIT( \
+ UDMA, PREFIX, POSTFIX, TYPE, GROUP, REG, LBL, FIELD) \
+ al_dbg(PREFIX #LBL " = %d" POSTFIX, ((al_reg_read32( \
+ &(UDMA->udma_regs->TYPE.GROUP.REG)) \
+ & FIELD) != 0))
+
+/* UDMA register print mask definitions */
+#define AL_UDMA_DEBUG_QUEUE(n) AL_BIT(n)
+#define AL_UDMA_DEBUG_AXI AL_BIT(DMA_MAX_Q)
+#define AL_UDMA_DEBUG_GENERAL AL_BIT(DMA_MAX_Q + 1)
+#define AL_UDMA_DEBUG_READ AL_BIT(DMA_MAX_Q + 2)
+#define AL_UDMA_DEBUG_WRITE AL_BIT(DMA_MAX_Q + 3)
+#define AL_UDMA_DEBUG_DWRR AL_BIT(DMA_MAX_Q + 4)
+#define AL_UDMA_DEBUG_RATE_LIMITER AL_BIT(DMA_MAX_Q + 5)
+#define AL_UDMA_DEBUG_STREAM_RATE_LIMITER AL_BIT(DMA_MAX_Q + 6)
+#define AL_UDMA_DEBUG_COMP AL_BIT(DMA_MAX_Q + 7)
+#define AL_UDMA_DEBUG_STAT AL_BIT(DMA_MAX_Q + 8)
+#define AL_UDMA_DEBUG_FEATURE AL_BIT(DMA_MAX_Q + 9)
+#define AL_UDMA_DEBUG_ALL 0xFFFFFFFF
+
+/* Debug functions */
+
+/**
+ * Print udma registers according to the provided mask
+ *
+ * @param udma udma data structure
+ * @param mask mask that specifies which registers groups to print
+ * e.g. AL_UDMA_DEBUG_AXI prints AXI registers, AL_UDMA_DEBUG_ALL prints all
+ * registers
+ */
+void al_udma_regs_print(struct al_udma *udma, unsigned int mask);
+
+/**
+ * Print udma queue software structure
+ *
+ * @param udma udma data structure
+ * @param qid queue index
+ */
+void al_udma_q_struct_print(struct al_udma *udma, uint32_t qid);
+
+/** UDMA ring type */
+enum al_udma_ring_type {
+ AL_RING_SUBMISSION,
+ AL_RING_COMPLETION
+};
+
+/**
+ * Print the ring entries for the specified queue index and ring type
+ * (submission/completion)
+ *
+ * @param udma udma data structure
+ * @param qid queue index
+ * @param rtype udma ring type
+ */
+void al_udma_ring_print(struct al_udma *udma, uint32_t qid,
+ enum al_udma_ring_type rtype);
+
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+#endif /* __AL_HAL_UDMA_DEBUG_H__ */
+/** @} end of UDMA debug group */
diff --git a/al_hal_udma_iofic.c b/al_hal_udma_iofic.c
new file mode 100644
index 000000000000..d6ba485296c3
--- /dev/null
+++ b/al_hal_udma_iofic.c
@@ -0,0 +1,151 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_udma_iofic.c
+ *
+ * @brief unit interrupts configurations
+ *
+ */
+
+#include "al_hal_udma_iofic.h"
+#include "al_hal_udma_regs.h"
+
+/*
+ * configure the interrupt registers, interrupts will are kept masked
+ */
+static int al_udma_main_iofic_config(struct al_iofic_regs __iomem *base,
+ enum al_iofic_mode mode)
+{
+ switch (mode) {
+ case AL_IOFIC_MODE_LEGACY:
+ al_iofic_config(base, AL_INT_GROUP_A,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_MASK_MSI_X |
+ INT_CONTROL_GRP_CLEAR_ON_READ);
+ al_iofic_config(base, AL_INT_GROUP_B,
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ al_iofic_config(base, AL_INT_GROUP_C,
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ al_iofic_config(base, AL_INT_GROUP_D,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_MASK_MSI_X |
+ INT_CONTROL_GRP_CLEAR_ON_READ);
+ break;
+ case AL_IOFIC_MODE_MSIX_PER_Q:
+ al_iofic_config(base, AL_INT_GROUP_A,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_AUTO_MASK |
+ INT_CONTROL_GRP_AUTO_CLEAR);
+ al_iofic_config(base, AL_INT_GROUP_B,
+ INT_CONTROL_GRP_AUTO_CLEAR |
+ INT_CONTROL_GRP_AUTO_MASK |
+ INT_CONTROL_GRP_CLEAR_ON_READ);
+ al_iofic_config(base, AL_INT_GROUP_C,
+ INT_CONTROL_GRP_AUTO_CLEAR |
+ INT_CONTROL_GRP_AUTO_MASK |
+ INT_CONTROL_GRP_CLEAR_ON_READ);
+ al_iofic_config(base, AL_INT_GROUP_D,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ break;
+ case AL_IOFIC_MODE_MSIX_PER_GROUP:
+ al_iofic_config(base, AL_INT_GROUP_A,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_AUTO_CLEAR |
+ INT_CONTROL_GRP_AUTO_MASK);
+ al_iofic_config(base, AL_INT_GROUP_B,
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ al_iofic_config(base, AL_INT_GROUP_C,
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ al_iofic_config(base, AL_INT_GROUP_D,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ break;
+ default:
+ al_err("%s: invalid mode (%d)\n", __func__, mode);
+ return -EINVAL;
+ }
+
+ al_dbg("%s: base.%p mode %d\n", __func__, base, mode);
+ return 0;
+}
+
+/*
+ * configure the UDMA interrupt registers, interrupts are kept masked
+ */
+int al_udma_iofic_config(struct unit_regs __iomem *regs, enum al_iofic_mode mode,
+ uint32_t m2s_errors_disable,
+ uint32_t m2s_aborts_disable,
+ uint32_t s2m_errors_disable,
+ uint32_t s2m_aborts_disable)
+{
+ int rc;
+
+ rc = al_udma_main_iofic_config(&regs->gen.interrupt_regs.main_iofic, mode);
+ if (rc != 0)
+ return rc;
+
+ al_iofic_unmask(&regs->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_A, ~m2s_errors_disable);
+ al_iofic_abort_mask(&regs->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_A, m2s_aborts_disable);
+
+ al_iofic_unmask(&regs->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_B, ~s2m_errors_disable);
+ al_iofic_abort_mask(&regs->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_B, s2m_aborts_disable);
+
+ al_dbg("%s base.%p mode %d\n", __func__, regs, mode);
+ return 0;
+}
+
+/*
+ * return the offset of the unmask register for a given group
+ */
+uint32_t __iomem * al_udma_iofic_unmask_offset_get(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group)
+{
+ al_assert(al_udma_iofic_level_and_group_valid(level, group));
+ return al_iofic_unmask_offset_get(al_udma_iofic_reg_base_get(regs, level), group);
+}
+
+/** @} end of UDMA group */
diff --git a/al_hal_udma_iofic.h b/al_hal_udma_iofic.h
new file mode 100644
index 000000000000..9e7950048374
--- /dev/null
+++ b/al_hal_udma_iofic.h
@@ -0,0 +1,614 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_udma_interrupts UDMA I/O Fabric Interrupt Controller
+ * @ingroup group_udma_api
+ * UDMA IOFIC API
+ * @{
+ * @file al_hal_udma_iofic.h
+ *
+ * @brief C Header file for programming the interrupt controller that found
+ * in UDMA based units. These APIs rely and use some the Interrupt controller
+ * API under al_hal_iofic.h
+ */
+
+#ifndef __AL_HAL_UDMA_IOFIC_H__
+#define __AL_HAL_UDMA_IOFIC_H__
+
+#include <al_hal_common.h>
+#include <al_hal_iofic.h>
+#include <al_hal_udma_regs.h>
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/**
+ * Interrupt Mode
+ * This is the interrupt mode for the primary interrupt level The secondary
+ * interrupt level does not have mode and it is always a level sensitive
+ * interrupt that is reflected in group D of the primary.
+ */
+enum al_iofic_mode {
+ AL_IOFIC_MODE_LEGACY, /**< level-sensitive interrupt wire */
+ AL_IOFIC_MODE_MSIX_PER_Q, /**< per UDMA queue MSI-X interrupt */
+ AL_IOFIC_MODE_MSIX_PER_GROUP
+};
+
+/** interrupt controller level (primary/secondary) */
+enum al_udma_iofic_level {
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_UDMA_IOFIC_LEVEL_SECONDARY
+};
+
+/*
+ * The next four groups represents the standard 4 groups in the primary
+ * interrupt controller of each bus-master unit in the I/O Fabric.
+ * The first two groups can be used when accessing the secondary interrupt
+ * controller as well.
+ */
+#define AL_INT_GROUP_A 0 /**< summary of the below events */
+#define AL_INT_GROUP_B 1 /**< RX completion queues */
+#define AL_INT_GROUP_C 2 /**< TX completion queues */
+#define AL_INT_GROUP_D 3 /**< Misc */
+
+/*******************************************************************************
+ * Primary interrupt controller, group A bits
+ ******************************************************************************/
+/* Group A bits which are just summary bits of GROUP B, C and D */
+#define AL_INT_GROUP_A_GROUP_B_SUM AL_BIT(0)
+#define AL_INT_GROUP_A_GROUP_C_SUM AL_BIT(1)
+#define AL_INT_GROUP_A_GROUP_D_SUM AL_BIT(2)
+
+/*******************************************************************************
+ * MSIX entry indices
+ ******************************************************************************/
+/** MSIX entry index for summary of group D in group A */
+#define AL_INT_MSIX_GROUP_A_SUM_D_IDX 2
+/** MSIX entry index for RX completion queue 0 */
+#define AL_INT_MSIX_RX_COMPLETION_START 3
+
+/*******************************************************************************
+ * Primary interrupt controller, group D bits
+ ******************************************************************************/
+#define AL_INT_GROUP_D_CROSS_MAIL_BOXES \
+ (AL_BIT(0) | AL_BIT(1) | AL_BIT(2) | AL_BIT(3))
+/** Summary of secondary interrupt controller, group A) */
+#define AL_INT_GROUP_D_M2S AL_BIT(8)
+/** Summary of secondary interrupt controller, group B) */
+#define AL_INT_GROUP_D_S2M AL_BIT(9)
+#define AL_INT_GROUP_D_SW_TIMER_INT AL_BIT(10)
+#define AL_INT_GROUP_D_APP_EXT_INT AL_BIT(11)
+#define AL_INT_GROUP_D_ALL \
+ AL_INT_GROUP_D_CROSS_MAIL_BOXES | \
+ AL_INT_GROUP_D_M2S | \
+ AL_INT_GROUP_D_S2M | \
+ AL_INT_GROUP_D_SW_TIMER_INT | \
+ AL_INT_GROUP_D_APP_EXT_INT
+
+/*
+ * Until this point, all description above is for Groups A/B/C/D in the PRIMARY
+ * Interrupt controller.
+ * Following are definitions related to the secondary interrupt controller with
+ * two cause registers (group A and group B) that covers UDMA M2S/S2M errors.
+ * Secondary interrupt controller summary bits are not mapped to the Processor
+ * GIC directly, rather they are represented in Group D of the primary interrupt
+ * controller.
+ */
+
+/******************************************************************************
+ * Secondary interrupt Controller, Group A, which holds the TX (M2S) error
+ * interrupt bits
+ ******************************************************************************/
+
+/**
+ * MSIx response
+ * MSIX Bus generator response error, the Bus response received with error indication
+ */
+#define AL_INT_2ND_GROUP_A_M2S_MSIX_RESP AL_BIT(27)
+/**
+ * MSIx timeout MSIX Bus generator timeout error.
+ * The generator didn't receive bus response for the MSIx write transaction.
+ */
+#define AL_INT_2ND_GROUP_A_M2S_MSIX_TO AL_BIT(26)
+/** Prefetch header buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_HDR_PARITY AL_BIT(25)
+/** Prefetch descriptor buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_DESC_PARITY AL_BIT(24)
+/** Data buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_DATA_PARITY AL_BIT(23)
+/** Data header buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_HDR_PARITY AL_BIT(22)
+/** Completion coalescing buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_COMPL_COAL_PARITY AL_BIT(21)
+/** UNACK packets buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_UNACK_PKT_PARITY AL_BIT(20)
+/** ACK packets buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_ACK_PKT_PARITY AL_BIT(19)
+/** AXI data buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_AX_DATA_PARITY AL_BIT(18)
+/**
+ * Prefetch Ring ID error
+ * A wrong RingId was received while prefetching submission descriptor. This
+ * could indicate a software bug or hardware failure, unless the UDMA is
+ * working in a mode to ignore RingId (the al_udma_iofic_config() API can be
+ * used to configure the UDMA to ignore the Ring ID check)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_RING_ID AL_BIT(17)
+/**
+ * Prefetch last
+ * Error in last bit indication of the descriptor
+ * Descriptor with Last bit asserted is read from the queue to the prefetch
+ * FIFO when the prefetch engine is not in a middle of packet processing (a
+ * descriptor with First bit asserted should be read first to indicate start of
+ * packet)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_LAST AL_BIT(16)
+/**
+ * Prefetch first
+ * Error in first bit indication of the descriptor
+ * Descriptor with First bit asserted is read from the queue to the prefetch
+ * FIFO while the prefetch engine is in a middle of packet processing ( a
+ * descriptor with Last bit asserted should be read to indicate end of packet
+ * before starting a new one)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_FIRST AL_BIT(15)
+/**
+ * Prefetch max descriptors
+ * Number of descriptors per packet exceeds the configurable maximum
+ * descriptors per packet. This could indicate a software bug or a hardware
+ * failure. (The al_udma_m2s_max_descs_set() API is used to configure the
+ * maximum descriptors per packet)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_MAX_DESC AL_BIT(14)
+/**
+ * Packet length
+ * Packet length exceeds the configurable maximum packet size. The
+ * al_udma_m2s_packet_size_cfg_set() API is used to configure the maximum
+ * packet size)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PKT_LEN AL_BIT(13)
+/**
+ * Prefetch AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_TO AL_BIT(12)
+/**
+ * Prefetch AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_RESP AL_BIT(11)
+/**
+ * Prefetch AXI parity
+ * Bus parity error on descriptor being prefetched
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_PARITY AL_BIT(10)
+/**
+ * Data AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_TO AL_BIT(9)
+/**
+ * Data AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_RESP AL_BIT(8)
+/**
+ * Data AXI parity
+ * Bus parity error on data being read
+ */
+#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_PARITY AL_BIT(7)
+/**
+ * Completion AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_CONPL_AXI_TO AL_BIT(6)
+/**
+ * Completion AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_COMPL_AXI_RESP AL_BIT(5)
+/**
+ * Completion AXI parity
+ * Bus generator internal SRAM parity error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_COMP_AXI_PARITY AL_BIT(4)
+/**
+ * Stream timeout
+ * Application stream interface timeout indicating a failure at the Application
+ * layer (RAID, Ethernet etc)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_STRM_TO AL_BIT(3)
+/**
+ * Stream response
+ * Application stream interface response error indicating a failure at the
+ * Application layer (RAID, Ethernet etc)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_STRM_RESP AL_BIT(2)
+/**
+ * Stream parity
+ * Application stream interface parity error indicating a failure at the
+ * Application layer (RAID, Ethernet etc)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_STRM_PARITY AL_BIT(1)
+/**
+ * Stream completion mismatch
+ * Application stream interface, packet serial mismatch error indicating a
+ * failure at the Application layer (RAID, Ethernet etc)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_STRM_COMPL_MISMATCH AL_BIT(0)
+
+/*******************************************************************************
+ * Secondary interrupt Controller, Group B, which holds the RX (S2M) error
+ * interrupt bits
+ ******************************************************************************/
+
+/** Prefetch descriptor buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_DESC_PARITY AL_BIT(30)
+/** Completion coalescing buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_COAL_PARITY AL_BIT(29)
+/** PRE-UNACK packets buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_PRE_UNACK_PKT_PARITY AL_BIT(28)
+/** UNACK packets buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_UNACK_PKT_PARITY AL_BIT(27)
+/** Data buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_PARITY AL_BIT(26)
+/** Data header buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_HDR_PARITY AL_BIT(25)
+/**
+ * Packet length
+ * Application stream interface, Data counter length mismatch with metadata
+ * packet length indicating a failure at the Application layer (RAID, Ethernet
+ * etc)
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PKT_LEN AL_BIT(24)
+/**
+ * Stream last
+ * Application stream interface, error in Last bit indication, this error is
+ * asserted when a 'last' indication is asserted on the stream interface
+ * (between the application and the UDMA) when the interface is not in the
+ * middle of packet, meaning that there was no 'first' indication before. This
+ * indicates a failure at the application layer.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_LAST AL_BIT(23)
+/**
+ * Stream first
+ * Application stream interface error in first bit indication, this error is
+ * asserted when a 'first' indication is asserted on the stream interface
+ * (between the application and the UDMA) when the interface is in the middle
+ * of packet, meaning that there was a 'first' indication before and the UDMA
+ * is waiting for a 'last' indication to end the packet. This indicates a
+ * failure at the application layer.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_FIRST AL_BIT(22)
+/**
+ * Stream data
+ * Application stream interface, error indication during data transaction
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_DATA AL_BIT(21)
+/**
+ * Stream Data parity
+ * Application stream interface, parity error during data transaction
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_DATA_PARITY AL_BIT(20)
+/**
+ * Stream Header error
+ * Application stream interface, error indication during header transaction
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_HDR AL_BIT(19)
+/**
+ * Stream Header parity
+ * Application stream interface, parity error during header transaction
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_HDR_PARITY AL_BIT(18)
+/**
+ * Completion UNACK
+ * Completion write, UNACK timeout due to completion FIFO back pressure
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_UNACK AL_BIT(17)
+/**
+ * Completion stream
+ * Completion write, UNACK timeout due to stream ACK FIFO back pressure
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_STRM AL_BIT(16)
+/**
+ * Completion AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_TO AL_BIT(15)
+/**
+ * Completion AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_RESP AL_BIT(14)
+/**
+ * Completion AXI parity
+ * Completion Bus generator internal SRAM parity error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_PARITY AL_BIT(13)
+/**
+ * Prefetch saturate
+ * Prefetch engine, packet length counter saturated (32 bit) , this is caused
+ * by an error at the application layer which sends packet data without
+ * 'last'/'first' indication.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_SAT AL_BIT(12)
+/**
+ * Prefetch ring ID
+ * Prefetch engine, Ring ID is not matching the expected RingID. This could
+ * indicate a software bug or hardware failure, unless the UDMA is working in a
+ * mode to ignore RingId (the al_udma_iofic_config() API can be used to
+ * configure the UDMA to ignore the Ring ID check)
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_RING_ID AL_BIT(11)
+/**
+ * Prefetch AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_TO AL_BIT(10)
+/**
+ * Prefetch AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_RESP AL_BIT(9)
+/**
+ * Prefetch AXI parity
+ * Bus parity error on descriptor being prefetched
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_PARITY AL_BIT(8)
+/**
+ * No descriptors hint
+ * Data write, Hint to the SW that there are not enough descriptors in the
+ * queue for the current received packet. This is considered a hint and not an
+ * error, as it could be a normal situation in certain application. The S2M
+ * UDMA behavior when it runs out of Rx Descriptor is controlled by driver
+ * which can use this hint to add more descriptors to the Rx queue.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_NO_DESC_HINT AL_BIT(7)
+/**
+ * No descriptors timeout
+ * Data write, Timeout indication when there are not enough descriptors for the
+ * current packet and the timeout expires. The S2M UDMA behavior when it runs
+ * out of Rx Descriptor is controlled by driver which can use this hint to add
+ * more descriptors to the Rx queue. The al_udma_s2m_no_desc_cfg_set() is used
+ * to configure theUDMA S2M timeout and behavior when there are no Rx
+ * descriptors for the received packet.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_NO_DESC_TO AL_BIT(6)
+/**
+ * Promotion indication
+ * Data write, the data write engine checks the queue number of the two packets
+ * at the head of the data FIFO, the data write engine notify the prefetch
+ * engine to promote these queue numbers in the prefetch scheduler to make sure
+ * that these queue will have RX descriptors for these packets. This error
+ * indicates that the prefetch promotion didn't work for the second packet in
+ * the FIFO. This is an indication used for system debug and not an error.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PROM_IND AL_BIT(5)
+/**
+ * Header split ignored
+ * Data write, The application requested header split but the buffer descriptor
+ * doesn't include a second buffer for the header
+ */
+#define AL_INT_2ND_GROUP_B_S2M_HDR_SPLT_IGNORED AL_BIT(4)
+/**
+ * Header split length
+ * Data write, The application requested header split and the length of the
+ * second buffer allocated for the header is not enough for the requested
+ * header length. The remaining of the header is written to buffer 1 (data
+ * buffer).
+ */
+#define AL_INT_2ND_GROUP_B_S2M_HDR_SPLT_LEN AL_BIT(3)
+/**
+ * Data AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_TO AL_BIT(2)
+/**
+ * Data AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_RESP AL_BIT(1)
+/**
+ * Data AXI parity
+ * Bus parity error on data being read
+ */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_PARITY AL_BIT(0)
+
+/*******************************************************************************
+ * Configurations
+ ******************************************************************************/
+
+/**
+ * Configure the UDMA interrupt controller registers, interrupts will are kept
+ * masked.
+ * This is a static setting that should be called while initialized the
+ * interrupt controller within a given UDMA, and should not be modified during
+ * runtime unless the UDMA is completely disabled. The first argument sets the
+ * interrupt and MSIX modes. The m2s/s2m errors/abort are a set of bit-wise
+ * masks to define the behaviour of the UDMA once an error happens: The _abort
+ * will put the UDMA in abort state once an error happens The _error bitmask
+ * will indicate and error in the secondary cause register but will not abort.
+ * The bit-mask that the _errors_disable and _aborts_disable are described in
+ * 'AL_INT_2ND_GROUP_A_*' and 'AL_INT_2ND_GROUP_B_*'
+ *
+ * @param regs pointer to unit registers
+ * @param mode interrupt scheme mode (legacy, MSI-X..)
+ * @param m2s_errors_disable
+ * This is a bit-wise mask, to indicate which one of the error causes in
+ * secondary interrupt group_A should generate an interrupt. When a bit is
+ * set, the error cause is ignored.
+ * Recommended value: 0 (enable all errors).
+ * @param m2s_aborts_disable
+ * This is a bit-wise mask, to indicate which one of the error causes in
+ * secondary interrupt group_A should automatically put the UDMA in
+ * abort state. When a bit is set, the error cause does cause an abort.
+ * Recommended value: 0 (enable all aborts).
+ * @param s2m_errors_disable
+ * This is a bit-wise mask, to indicate which one of the error causes in
+ * secondary interrupt group_A should generate an interrupt. When a bit is
+ * set, the error cause is ignored.
+ * Recommended value: 0xE0 (disable hint errors).
+ * @param s2m_aborts_disable
+ * This is a bit-wise mask, to indicate which one of the error causes in
+ * secondary interrupt group_A should automatically put the UDMA in
+ * abort state. When a bit is set, the error cause does cause an abort.
+ * Recommended value: 0xE0 (disable hint aborts).
+ *
+ * @return 0 on success. -EINVAL otherwise.
+ */
+int al_udma_iofic_config(struct unit_regs __iomem *regs,
+ enum al_iofic_mode mode,
+ uint32_t m2s_errors_disable,
+ uint32_t m2s_aborts_disable,
+ uint32_t s2m_errors_disable,
+ uint32_t s2m_aborts_disable);
+/**
+ * return the offset of the unmask register for a given group.
+ * this function can be used when the upper layer wants to directly
+ * access the unmask regiter and bypass the al_udma_iofic_unmask() API.
+ *
+ * @param regs pointer to udma registers
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ * @return the offset of the unmask register.
+ */
+uint32_t __iomem * al_udma_iofic_unmask_offset_get(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group);
+
+/**
+ * Get the interrupt controller base address for either the primary or secondary
+ * interrupt controller
+ *
+ * @param regs pointer to udma unit registers
+ * @param level the interrupt controller level (primary / secondary)
+ *
+ * @returns The interrupt controller base address
+ *
+ */
+static INLINE void __iomem *al_udma_iofic_reg_base_get(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level)
+{
+ void __iomem *iofic_regs = (level == AL_UDMA_IOFIC_LEVEL_PRIMARY) ?
+ (void __iomem *)&regs->gen.interrupt_regs.main_iofic :
+ (void __iomem *)&regs->gen.interrupt_regs.secondary_iofic_ctrl;
+
+ return iofic_regs;
+}
+
+/**
+ * Check the interrupt controller level/group validity
+ *
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ *
+ * @returns 0 - invalid, 1 - valid
+ *
+ */
+static INLINE int al_udma_iofic_level_and_group_valid(
+ enum al_udma_iofic_level level,
+ int group)
+{
+ if (((level == AL_UDMA_IOFIC_LEVEL_PRIMARY) && (group >= 0) && (group < 4)) ||
+ ((level == AL_UDMA_IOFIC_LEVEL_SECONDARY) && (group >= 0) && (group < 2)))
+ return 1;
+
+ return 0;
+}
+/**
+ * unmask specific interrupts for a given group
+ * this functions uses the interrupt mask clear register to guarantee atomicity
+ * it's safe to call it while the mask is changed by the HW (auto mask) or another cpu.
+ *
+ * @param regs pointer to udma unit registers
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ * @param mask bitwise of interrupts to unmask, set bits will be unmasked.
+ */
+static INLINE void al_udma_iofic_unmask(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group,
+ uint32_t mask)
+{
+ al_assert(al_udma_iofic_level_and_group_valid(level, group));
+ al_iofic_unmask(al_udma_iofic_reg_base_get(regs, level), group, mask);
+}
+
+/**
+ * mask specific interrupts for a given group
+ * this functions modifies interrupt mask register, the callee must make sure
+ * the mask is not changed by another cpu.
+ *
+ * @param regs pointer to udma unit registers
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ * @param mask bitwise of interrupts to mask, set bits will be masked.
+ */
+static INLINE void al_udma_iofic_mask(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group,
+ uint32_t mask)
+{
+ al_assert(al_udma_iofic_level_and_group_valid(level, group));
+ al_iofic_mask(al_udma_iofic_reg_base_get(regs, level), group, mask);
+}
+
+/**
+ * read interrupt cause register for a given group
+ * this will clear the set bits if the Clear on Read mode enabled.
+ * @param regs pointer to udma unit registers
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ */
+static INLINE uint32_t al_udma_iofic_read_cause(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group)
+{
+ al_assert(al_udma_iofic_level_and_group_valid(level, group));
+ return al_iofic_read_cause(al_udma_iofic_reg_base_get(regs, level), group);
+}
+
+#endif
+/** @} end of UDMA group */
diff --git a/al_hal_udma_iofic_regs.h b/al_hal_udma_iofic_regs.h
new file mode 100644
index 000000000000..8e53aa673cce
--- /dev/null
+++ b/al_hal_udma_iofic_regs.h
@@ -0,0 +1,66 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __AL_HAL_UDMA_IOFIC_REG_H
+#define __AL_HAL_UDMA_IOFIC_REG_H
+
+#include "al_hal_iofic_regs.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** This structure covers all interrupt registers of a given UDMA, which is
+ * built of an al_iofic_regs, which is the common I/O Fabric Interrupt
+ * controller (IOFIC), and additional two interrupts groups dedicated for the
+ * application-specific engine attached to the UDMA, the interrupt summary
+ * of those two groups routed to gourp D of the main controller.
+ */
+struct udma_iofic_regs {
+ struct al_iofic_regs main_iofic;
+ uint32_t rsrvd1[(0x1c00) >> 2];
+ struct al_iofic_grp_ctrl secondary_iofic_ctrl[2];
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_IOFIC_REG_H */
+
+
+
+
diff --git a/al_hal_udma_main.c b/al_hal_udma_main.c
new file mode 100644
index 000000000000..6e9919b3596c
--- /dev/null
+++ b/al_hal_udma_main.c
@@ -0,0 +1,618 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_udma_main.c
+ *
+ * @brief Universal DMA HAL driver for main functions (initialization, data path)
+ *
+ */
+
+#include <al_hal_udma.h>
+#include <al_hal_udma_config.h>
+
+#define AL_UDMA_Q_RST_TOUT 10000 /* Queue reset timeout [uSecs] */
+
+#define UDMA_STATE_IDLE 0x0
+#define UDMA_STATE_NORMAL 0x1
+#define UDMA_STATE_ABORT 0x2
+#define UDMA_STATE_RESERVED 0x3
+
+const char *const al_udma_states_name[] = {
+ "Disable",
+ "Idle",
+ "Normal",
+ "Abort",
+ "Reset"
+};
+
+#define AL_UDMA_INITIAL_RING_ID 1
+
+/* dma_q flags */
+#define AL_UDMA_Q_FLAGS_IGNORE_RING_ID AL_BIT(0)
+#define AL_UDMA_Q_FLAGS_NO_COMP_UPDATE AL_BIT(1)
+#define AL_UDMA_Q_FLAGS_EN_COMP_COAL AL_BIT(2)
+
+
+static void al_udma_set_defaults(struct al_udma *udma)
+{
+ uint32_t tmp;
+ uint8_t rev_id = udma->rev_id;
+
+ if (udma->type == UDMA_TX) {
+ struct unit_regs* tmp_unit_regs =
+ (struct unit_regs*)udma->udma_regs;
+
+ /* Setting the data fifo depth to 4K (256 strips of 16B)
+ * This allows the UDMA to have 16 outstanding writes */
+ if (rev_id >= AL_UDMA_REV_ID_2) {
+ al_reg_write32_masked(&tmp_unit_regs->m2s.m2s_rd.data_cfg,
+ UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK,
+ 256 << UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT);
+ }
+
+ if (rev_id == AL_UDMA_REV_ID_0)
+ /* disable AXI timeout for M0*/
+ al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 0);
+ else
+ /* set AXI timeout to 1M (~2.6 ms) */
+ al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 1000000);
+
+ al_reg_write32(&tmp_unit_regs->m2s.m2s_comp.cfg_application_ack
+ , 0); /* Ack time out */
+
+
+ if (rev_id == AL_UDMA_REV_ID_0) {
+ tmp = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
+ tmp &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ tmp |= 4 << UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1
+ , tmp);
+ }
+
+ }
+ if (udma->type == UDMA_RX) {
+ al_reg_write32(
+ &udma->udma_regs->s2m.s2m_comp.cfg_application_ack, 0);
+ /* Ack time out */
+
+ }
+}
+/**
+ * misc queue configurations
+ *
+ * @param udma_q udma queue data structure
+ *
+ * @return 0
+ */
+static int al_udma_q_config(struct al_udma_q *udma_q)
+{
+ uint32_t *reg_addr;
+ uint32_t val;
+
+ if (udma_q->udma->type == UDMA_TX) {
+ reg_addr = &udma_q->q_regs->m2s_q.rlimit.mask;
+
+ val = al_reg_read32(reg_addr);
+ // enable DMB
+ val &= ~UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB;
+ al_reg_write32(reg_addr, val);
+ }
+ return 0;
+}
+
+/**
+ * set the queue's completion configuration register
+ *
+ * @param udma_q udma queue data structure
+ *
+ * @return 0
+ */
+static int al_udma_q_config_compl(struct al_udma_q *udma_q)
+{
+ uint32_t *reg_addr;
+ uint32_t val;
+
+ if (udma_q->udma->type == UDMA_TX)
+ reg_addr = &udma_q->q_regs->m2s_q.comp_cfg;
+ else
+ reg_addr = &udma_q->q_regs->s2m_q.comp_cfg;
+
+ val = al_reg_read32(reg_addr);
+
+ if (udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE)
+ val &= ~UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+ else
+ val |= UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+
+ if (udma_q->flags & AL_UDMA_Q_FLAGS_EN_COMP_COAL)
+ val &= ~UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
+ else
+ val |= UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
+
+ al_reg_write32(reg_addr, val);
+
+ /* set the completion queue size */
+ if (udma_q->udma->type == UDMA_RX) {
+ val = al_reg_read32(
+ &udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c);
+ val &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
+ /* the register expects it to be in words */
+ val |= (udma_q->cdesc_size >> 2)
+ & UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
+ al_reg_write32(&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c
+ , val);
+ }
+ return 0;
+}
+
+/**
+ * reset the queues pointers (Head, Tail, etc) and set the base addresses
+ *
+ * @param udma_q udma queue data structure
+ */
+static int al_udma_q_set_pointers(struct al_udma_q *udma_q)
+{
+ /* reset the descriptors ring pointers */
+ /* assert descriptor base address aligned. */
+ al_assert((AL_ADDR_LOW(udma_q->desc_phy_base) &
+ ~UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK) == 0);
+ al_reg_write32(&udma_q->q_regs->rings.drbp_low,
+ AL_ADDR_LOW(udma_q->desc_phy_base));
+ al_reg_write32(&udma_q->q_regs->rings.drbp_high,
+ AL_ADDR_HIGH(udma_q->desc_phy_base));
+
+ al_reg_write32(&udma_q->q_regs->rings.drl, udma_q->size);
+
+ /* if completion ring update disabled */
+ if (udma_q->cdesc_base_ptr == NULL) {
+ udma_q->flags |= AL_UDMA_Q_FLAGS_NO_COMP_UPDATE;
+ } else {
+ /* reset the completion descriptors ring pointers */
+ /* assert completion base address aligned. */
+ al_assert((AL_ADDR_LOW(udma_q->cdesc_phy_base) &
+ ~UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK) == 0);
+ al_reg_write32(&udma_q->q_regs->rings.crbp_low,
+ AL_ADDR_LOW(udma_q->cdesc_phy_base));
+ al_reg_write32(&udma_q->q_regs->rings.crbp_high,
+ AL_ADDR_HIGH(udma_q->cdesc_phy_base));
+ }
+ al_udma_q_config_compl(udma_q);
+ return 0;
+}
+
+/**
+ * enable/disable udma queue
+ *
+ * @param udma_q udma queue data structure
+ * @param enable none zero value enables the queue, zero means disable
+ *
+ * @return 0
+ */
+static int al_udma_q_enable(struct al_udma_q *udma_q, int enable)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->rings.cfg);
+
+ if (enable) {
+ reg |= (UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
+ udma_q->status = AL_QUEUE_ENABLED;
+ } else {
+ reg &= ~(UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
+ udma_q->status = AL_QUEUE_DISABLED;
+ }
+ al_reg_write32(&udma_q->q_regs->rings.cfg, reg);
+ return 0;
+}
+
+
+/************************ API functions ***************************************/
+
+/* Initializations functions */
+/*
+ * Initialize the udma engine
+ */
+int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params)
+{
+ int i;
+
+ al_assert(udma);
+
+ if (udma_params->num_of_queues > DMA_MAX_Q) {
+ al_err("udma: invalid num_of_queues parameter\n");
+ return -EINVAL;
+ }
+
+ udma->type = udma_params->type;
+ udma->num_of_queues = udma_params->num_of_queues;
+ udma->gen_regs = &udma_params->udma_regs_base->gen;
+
+ if (udma->type == UDMA_TX)
+ udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->m2s;
+ else
+ udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->s2m;
+
+ udma->rev_id = al_udma_get_revision(udma_params->udma_regs_base);
+
+ if (udma_params->name == NULL)
+ udma->name = "";
+ else
+ udma->name = udma_params->name;
+
+ udma->state = UDMA_DISABLE;
+ for (i = 0; i < DMA_MAX_Q; i++) {
+ udma->udma_q[i].status = AL_QUEUE_NOT_INITIALIZED;
+ }
+ /* initialize configuration registers to correct values */
+ al_udma_set_defaults(udma);
+ al_dbg("udma [%s] initialized. base %p\n", udma->name,
+ udma->udma_regs);
+ return 0;
+}
+
+/*
+ * Initialize the udma queue data structure
+ */
+int al_udma_q_init(struct al_udma *udma, uint32_t qid,
+ struct al_udma_q_params *q_params)
+{
+ struct al_udma_q *udma_q;
+
+ al_assert(udma);
+ al_assert(q_params);
+
+ if (qid >= udma->num_of_queues) {
+ al_err("udma: invalid queue id (%d)\n", qid);
+ return -EINVAL;
+ }
+
+ if (udma->udma_q[qid].status == AL_QUEUE_ENABLED) {
+ al_err("udma: queue (%d) already enabled!\n", qid);
+ return -EIO;
+ }
+
+ if (q_params->size < AL_UDMA_MIN_Q_SIZE) {
+ al_err("udma: queue (%d) size too small\n", qid);
+ return -EINVAL;
+ }
+
+ if (q_params->size > AL_UDMA_MAX_Q_SIZE) {
+ al_err("udma: queue (%d) size too large\n", qid);
+ return -EINVAL;
+ }
+
+ if (q_params->size & (q_params->size - 1)) {
+ al_err("udma: queue (%d) size (%d) must be power of 2\n",
+ q_params->size, qid);
+ return -EINVAL;
+ }
+
+ udma_q = &udma->udma_q[qid];
+ /* set the queue's regs base address */
+ if (udma->type == UDMA_TX)
+ udma_q->q_regs = (union udma_q_regs __iomem *)
+ &udma->udma_regs->m2s.m2s_q[qid];
+ else
+ udma_q->q_regs = (union udma_q_regs __iomem *)
+ &udma->udma_regs->s2m.s2m_q[qid];
+
+ udma_q->adapter_rev_id = q_params->adapter_rev_id;
+ udma_q->size = q_params->size;
+ udma_q->size_mask = q_params->size - 1;
+ udma_q->desc_base_ptr = q_params->desc_base;
+ udma_q->desc_phy_base = q_params->desc_phy_base;
+ udma_q->cdesc_base_ptr = q_params->cdesc_base;
+ udma_q->cdesc_phy_base = q_params->cdesc_phy_base;
+ udma_q->cdesc_size = q_params->cdesc_size;
+
+ udma_q->next_desc_idx = 0;
+ udma_q->next_cdesc_idx = 0;
+ udma_q->end_cdesc_ptr = (uint8_t *) udma_q->cdesc_base_ptr +
+ (udma_q->size - 1) * udma_q->cdesc_size;
+ udma_q->comp_head_idx = 0;
+ udma_q->comp_head_ptr = (union al_udma_cdesc *)udma_q->cdesc_base_ptr;
+ udma_q->desc_ring_id = AL_UDMA_INITIAL_RING_ID;
+ udma_q->comp_ring_id = AL_UDMA_INITIAL_RING_ID;
+#if 0
+ udma_q->desc_ctrl_bits = AL_UDMA_INITIAL_RING_ID <<
+ AL_M2S_DESC_RING_ID_SHIFT;
+#endif
+ udma_q->pkt_crnt_descs = 0;
+ udma_q->flags = 0;
+ udma_q->status = AL_QUEUE_DISABLED;
+ udma_q->udma = udma;
+ udma_q->qid = qid;
+
+ /* start hardware configuration: */
+ al_udma_q_config(udma_q);
+ /* reset the queue pointers */
+ al_udma_q_set_pointers(udma_q);
+
+ /* enable the q */
+ al_udma_q_enable(udma_q, 1);
+
+ al_dbg("udma [%s %d]: %s q init. size 0x%x\n"
+ " desc ring info: phys base 0x%llx virt base %p\n"
+ " cdesc ring info: phys base 0x%llx virt base %p "
+ "entry size 0x%x",
+ udma_q->udma->name, udma_q->qid,
+ udma->type == UDMA_TX ? "Tx" : "Rx",
+ q_params->size,
+ (unsigned long long)q_params->desc_phy_base,
+ q_params->desc_base,
+ (unsigned long long)q_params->cdesc_phy_base,
+ q_params->cdesc_base,
+ q_params->cdesc_size);
+
+ return 0;
+}
+
+/*
+ * Reset a udma queue
+ */
+int al_udma_q_reset(struct al_udma_q *udma_q)
+{
+ unsigned int remaining_time = AL_UDMA_Q_RST_TOUT;
+ uint32_t *status_reg;
+ uint32_t *dcp_reg;
+ uint32_t *crhp_reg;
+ uint32_t *q_sw_ctrl_reg;
+
+ al_assert(udma_q);
+
+ /* De-assert scheduling and prefetch */
+ al_udma_q_enable(udma_q, 0);
+
+ /* Wait for scheduling and prefetch to stop */
+ status_reg = &udma_q->q_regs->rings.status;
+
+ while (remaining_time) {
+ uint32_t status = al_reg_read32(status_reg);
+
+ if (!(status & (UDMA_M2S_Q_STATUS_PREFETCH |
+ UDMA_M2S_Q_STATUS_SCHEDULER)))
+ break;
+
+ remaining_time--;
+ al_udelay(1);
+ }
+
+ if (!remaining_time) {
+ al_err("udma [%s %d]: %s timeout waiting for prefetch and "
+ "scheduler disable\n", udma_q->udma->name, udma_q->qid,
+ __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* Wait for the completion queue to reach to the same pointer as the
+ * prefetch stopped at ([TR]DCP == [TR]CRHP) */
+ dcp_reg = &udma_q->q_regs->rings.dcp;
+ crhp_reg = &udma_q->q_regs->rings.crhp;
+
+ while (remaining_time) {
+ uint32_t dcp = al_reg_read32(dcp_reg);
+ uint32_t crhp = al_reg_read32(crhp_reg);
+
+ if (dcp == crhp)
+ break;
+
+ remaining_time--;
+ al_udelay(1);
+ };
+
+ if (!remaining_time) {
+ al_err("udma [%s %d]: %s timeout waiting for dcp==crhp\n",
+ udma_q->udma->name, udma_q->qid, __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* Assert the queue reset */
+ if (udma_q->udma->type == UDMA_TX)
+ q_sw_ctrl_reg = &udma_q->q_regs->m2s_q.q_sw_ctrl;
+ else
+ q_sw_ctrl_reg = &udma_q->q_regs->s2m_q.q_sw_ctrl;
+
+ al_reg_write32(q_sw_ctrl_reg, UDMA_M2S_Q_SW_CTRL_RST_Q);
+
+ return 0;
+}
+
+/*
+ * return (by reference) a pointer to a specific queue date structure.
+ */
+int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
+ struct al_udma_q **q_handle)
+{
+
+ al_assert(udma);
+ al_assert(q_handle);
+
+ if (unlikely(qid >= udma->num_of_queues)) {
+ al_err("udma [%s]: invalid queue id (%d)\n", udma->name, qid);
+ return -EINVAL;
+ }
+ *q_handle = &udma->udma_q[qid];
+ return 0;
+}
+
+/*
+ * Change the UDMA's state
+ */
+int al_udma_state_set(struct al_udma *udma, enum al_udma_state state)
+{
+ uint32_t reg;
+
+ al_assert(udma != NULL);
+ if (state == udma->state)
+ al_dbg("udma [%s]: requested state identical to "
+ "current state (%d)\n", udma->name, state);
+
+ al_dbg("udma [%s]: change state from (%s) to (%s)\n",
+ udma->name, al_udma_states_name[udma->state],
+ al_udma_states_name[state]);
+
+ reg = 0;
+ switch (state) {
+ case UDMA_DISABLE:
+ reg |= UDMA_M2S_CHANGE_STATE_DIS;
+ break;
+ case UDMA_NORMAL:
+ reg |= UDMA_M2S_CHANGE_STATE_NORMAL;
+ break;
+ case UDMA_ABORT:
+ reg |= UDMA_M2S_CHANGE_STATE_ABORT;
+ break;
+ default:
+ al_err("udma: invalid state (%d)\n", state);
+ return -EINVAL;
+ }
+
+ if (udma->type == UDMA_TX)
+ al_reg_write32(&udma->udma_regs->m2s.m2s.change_state, reg);
+ else
+ al_reg_write32(&udma->udma_regs->s2m.s2m.change_state, reg);
+
+ udma->state = state;
+ return 0;
+}
+
+/*
+ * return the current UDMA hardware state
+ */
+enum al_udma_state al_udma_state_get(struct al_udma *udma)
+{
+ uint32_t state_reg;
+ uint32_t comp_ctrl;
+ uint32_t stream_if;
+ uint32_t data_rd;
+ uint32_t desc_pref;
+
+ if (udma->type == UDMA_TX)
+ state_reg = al_reg_read32(&udma->udma_regs->m2s.m2s.state);
+ else
+ state_reg = al_reg_read32(&udma->udma_regs->s2m.s2m.state);
+
+ comp_ctrl = AL_REG_FIELD_GET(state_reg,
+ UDMA_M2S_STATE_COMP_CTRL_MASK,
+ UDMA_M2S_STATE_COMP_CTRL_SHIFT);
+ stream_if = AL_REG_FIELD_GET(state_reg,
+ UDMA_M2S_STATE_STREAM_IF_MASK,
+ UDMA_M2S_STATE_STREAM_IF_SHIFT);
+ data_rd = AL_REG_FIELD_GET(state_reg,
+ UDMA_M2S_STATE_DATA_RD_CTRL_MASK,
+ UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT);
+ desc_pref = AL_REG_FIELD_GET(state_reg,
+ UDMA_M2S_STATE_DESC_PREF_MASK,
+ UDMA_M2S_STATE_DESC_PREF_SHIFT);
+
+ al_assert(comp_ctrl != UDMA_STATE_RESERVED);
+ al_assert(stream_if != UDMA_STATE_RESERVED);
+ al_assert(data_rd != UDMA_STATE_RESERVED);
+ al_assert(desc_pref != UDMA_STATE_RESERVED);
+
+ /* if any of the states is abort then return abort */
+ if ((comp_ctrl == UDMA_STATE_ABORT) || (stream_if == UDMA_STATE_ABORT)
+ || (data_rd == UDMA_STATE_ABORT)
+ || (desc_pref == UDMA_STATE_ABORT))
+ return UDMA_ABORT;
+
+ /* if any of the states is normal then return normal */
+ if ((comp_ctrl == UDMA_STATE_NORMAL)
+ || (stream_if == UDMA_STATE_NORMAL)
+ || (data_rd == UDMA_STATE_NORMAL)
+ || (desc_pref == UDMA_STATE_NORMAL))
+ return UDMA_NORMAL;
+
+ return UDMA_IDLE;
+}
+
+/*
+ * Action handling
+ */
+
+/*
+ * get next completed packet from completion ring of the queue
+ */
+uint32_t al_udma_cdesc_packet_get(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc **cdesc)
+{
+ uint32_t count;
+ volatile union al_udma_cdesc *curr;
+ uint32_t comp_flags;
+
+ /* this function requires the completion ring update */
+ al_assert(!(udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE));
+
+ /* comp_head points to the last comp desc that was processed */
+ curr = udma_q->comp_head_ptr;
+ comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
+
+ /* check if the completion descriptor is new */
+ if (unlikely(al_udma_new_cdesc(udma_q, comp_flags) == AL_FALSE))
+ return 0;
+ /* if new desc found, increment the current packets descriptors */
+ count = udma_q->pkt_crnt_descs + 1;
+ while (!cdesc_is_last(comp_flags)) {
+ curr = al_cdesc_next_update(udma_q, curr);
+ comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
+ if (unlikely(al_udma_new_cdesc(udma_q, comp_flags)
+ == AL_FALSE)) {
+ /* the current packet here doesn't have all */
+ /* descriptors completed. log the current desc */
+ /* location and number of completed descriptors so */
+ /* far. then return */
+ udma_q->pkt_crnt_descs = count;
+ udma_q->comp_head_ptr = curr;
+ return 0;
+ }
+ count++;
+ /* check against max descs per packet. */
+ al_assert(count <= udma_q->size);
+ }
+ /* return back the first descriptor of the packet */
+ *cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
+ udma_q->pkt_crnt_descs = 0;
+ udma_q->comp_head_ptr = al_cdesc_next_update(udma_q, curr);
+
+ al_dbg("udma [%s %d]: packet completed. first desc %p (ixd 0x%x)"
+ " descs %d\n", udma_q->udma->name, udma_q->qid, *cdesc,
+ udma_q->next_cdesc_idx, count);
+
+ return count;
+}
+
+/** @} end of UDMA group */
diff --git a/al_hal_udma_regs.h b/al_hal_udma_regs.h
new file mode 100644
index 000000000000..ed37215ae445
--- /dev/null
+++ b/al_hal_udma_regs.h
@@ -0,0 +1,104 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_udma_regs.h
+ *
+ * @brief udma registers definition
+ *
+ *
+ */
+#ifndef __AL_HAL_UDMA_REG_H
+#define __AL_HAL_UDMA_REG_H
+
+#include "al_hal_udma_regs_m2s.h"
+#include "al_hal_udma_regs_s2m.h"
+#include "al_hal_udma_regs_gen.h"
+
+#define AL_UDMA_REV_ID_REV0 0
+#define AL_UDMA_REV_ID_REV1 1
+#define AL_UDMA_REV_ID_REV2 2
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** UDMA registers, either m2s or s2m */
+union udma_regs {
+ struct udma_m2s_regs m2s;
+ struct udma_s2m_regs s2m;
+};
+
+struct unit_regs {
+ struct udma_m2s_regs m2s;
+ uint32_t rsrvd0[(0x10000 - sizeof(struct udma_m2s_regs)) >> 2];
+ struct udma_s2m_regs s2m;
+ uint32_t rsrvd1[((0x1C000 - 0x10000) - sizeof(struct udma_s2m_regs)) >> 2];
+ struct udma_gen_regs gen;
+};
+
+/** UDMA submission and completion registers, M2S and S2M UDMAs have same stucture */
+struct udma_rings_regs {
+ uint32_t rsrvd0[8];
+ uint32_t cfg; /* Descriptor ring configuration */
+ uint32_t status; /* Descriptor ring status and information */
+ uint32_t drbp_low; /* Descriptor Ring Base Pointer [31:4] */
+ uint32_t drbp_high; /* Descriptor Ring Base Pointer [63:32] */
+ uint32_t drl; /* Descriptor Ring Length[23:2] */
+ uint32_t drhp; /* Descriptor Ring Head Pointer */
+ uint32_t drtp_inc; /* Descriptor Tail Pointer increment */
+ uint32_t drtp; /* Descriptor Tail Pointer */
+ uint32_t dcp; /* Descriptor Current Pointer */
+ uint32_t crbp_low; /* Completion Ring Base Pointer [31:4] */
+ uint32_t crbp_high; /* Completion Ring Base Pointer [63:32] */
+ uint32_t crhp; /* Completion Ring Head Pointer */
+ uint32_t crhp_internal; /* Completion Ring Head Pointer internal, before AX ... */
+};
+
+/** M2S and S2M generic structure of Q registers */
+union udma_q_regs {
+ struct udma_rings_regs rings;
+ struct udma_m2s_q m2s_q;
+ struct udma_s2m_q s2m_q;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_REG_H */
+/** @} end of UDMA group */
diff --git a/al_hal_udma_regs_gen.h b/al_hal_udma_regs_gen.h
new file mode 100644
index 000000000000..89f94b85a56b
--- /dev/null
+++ b/al_hal_udma_regs_gen.h
@@ -0,0 +1,414 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_regs_gen.h
+ *
+ * @brief C Header file for the UDMA general registers
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_GEN_REG_H
+#define __AL_HAL_UDMA_GEN_REG_H
+
+#include "al_hal_udma_iofic_regs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct udma_gen_dma_misc {
+ /* [0x0] Reserved register for the interrupt controller */
+ uint32_t int_cfg;
+ /* [0x4] Revision register */
+ uint32_t revision;
+ /* [0x8] Reserved for future use */
+ uint32_t general_cfg_1;
+ /* [0xc] Reserved for future use */
+ uint32_t general_cfg_2;
+ /* [0x10] Reserved for future use */
+ uint32_t general_cfg_3;
+ /* [0x14] Reserved for future use */
+ uint32_t general_cfg_4;
+ /* [0x18] General timer configuration */
+ uint32_t general_cfg_5;
+ uint32_t rsrvd[57];
+};
+struct udma_gen_mailbox {
+ /*
+ * [0x0] Mailbox interrupt generator.
+ * Generates interrupt to neighbor DMA
+ */
+ uint32_t interrupt;
+ /* [0x4] Mailbox message data out */
+ uint32_t msg_out;
+ /* [0x8] Mailbox message data in */
+ uint32_t msg_in;
+ uint32_t rsrvd[13];
+};
+struct udma_gen_axi {
+ /* [0x0] Configuration of the AXI masters */
+ uint32_t cfg_1;
+ /* [0x4] Configuration of the AXI masters */
+ uint32_t cfg_2;
+ /* [0x8] Configuration of the AXI masters. Endianess configuration */
+ uint32_t endian_cfg;
+ uint32_t rsrvd[61];
+};
+struct udma_gen_sram_ctrl {
+ /* [0x0] Timing configuration */
+ uint32_t timing;
+};
+struct udma_gen_vmid {
+ /* [0x0] VMID control */
+ uint32_t cfg_vmid_0;
+ /* [0x4] TX queue 0/1 VMID */
+ uint32_t cfg_vmid_1;
+ /* [0x8] TX queue 2/3 VMID */
+ uint32_t cfg_vmid_2;
+ /* [0xc] RX queue 0/1 VMID */
+ uint32_t cfg_vmid_3;
+ /* [0x10] RX queue 2/3 VMID */
+ uint32_t cfg_vmid_4;
+};
+struct udma_gen_vmaddr {
+ /* [0x0] TX queue 0/1 VMADDR */
+ uint32_t cfg_vmaddr_0;
+ /* [0x4] TX queue 2/3 VMADDR */
+ uint32_t cfg_vmaddr_1;
+ /* [0x8] RX queue 0/1 VMADDR */
+ uint32_t cfg_vmaddr_2;
+ /* [0xc] RX queue 2/3 VMADDR */
+ uint32_t cfg_vmaddr_3;
+};
+struct udma_gen_vmpr {
+ /* [0x0] TX VMPR control */
+ uint32_t cfg_vmpr_0;
+ /* [0x4] TX VMPR Address High Regsiter */
+ uint32_t cfg_vmpr_1;
+ /* [0x8] TX queue VMID values */
+ uint32_t cfg_vmpr_2;
+ /* [0xc] TX queue VMID values */
+ uint32_t cfg_vmpr_3;
+ /* [0x10] RX VMPR control */
+ uint32_t cfg_vmpr_4;
+ /* [0x14] RX VMPR Buffer2 MSB address */
+ uint32_t cfg_vmpr_5;
+ /* [0x18] RX queue VMID values */
+ uint32_t cfg_vmpr_6;
+ /* [0x1c] RX queue BUF1 VMID values */
+ uint32_t cfg_vmpr_7;
+ /* [0x20] RX queue BUF2 VMID values */
+ uint32_t cfg_vmpr_8;
+ /* [0x24] RX queue Direct Data Placement VMID values */
+ uint32_t cfg_vmpr_9;
+ /* [0x28] RX VMPR BUF1 Address High Regsiter */
+ uint32_t cfg_vmpr_10;
+ /* [0x2c] RX VMPR BUF2 Address High Regsiter */
+ uint32_t cfg_vmpr_11;
+ /* [0x30] RX VMPR DDP Address High Regsiter */
+ uint32_t cfg_vmpr_12;
+ uint32_t rsrvd[3];
+};
+
+struct udma_gen_regs {
+ struct udma_iofic_regs interrupt_regs; /* [0x0000] */
+ struct udma_gen_dma_misc dma_misc; /* [0x2080] */
+ struct udma_gen_mailbox mailbox[4]; /* [0x2180] */
+ struct udma_gen_axi axi; /* [0x2280] */
+ struct udma_gen_sram_ctrl sram_ctrl[25]; /* [0x2380] */
+ uint32_t rsrvd_1[2];
+ struct udma_gen_vmid vmid; /* [0x23ec] */
+ struct udma_gen_vmaddr vmaddr; /* [0x2400] */
+ uint32_t rsrvd_2[252];
+ struct udma_gen_vmpr vmpr[4]; /* [0x2800] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** int_cfg register ****/
+/*
+ * MSIX data width
+ * 1 - 64 bit
+ * 0 – 32 bit
+ */
+#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_64 (1 << 0)
+/* General configuration */
+#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_3_1_MASK 0x0000000E
+#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_3_1_SHIFT 1
+/* MSIx AXI QoS */
+#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_AXI_QOS_MASK 0x00000070
+#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_AXI_QOS_SHIFT 4
+
+#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_31_7_MASK 0xFFFFFF80
+#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_31_7_SHIFT 7
+
+/**** revision register ****/
+/* Design programming interface revision ID */
+#define UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_MASK 0x00000FFF
+#define UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_SHIFT 0
+/* Design minor revision ID */
+#define UDMA_GEN_DMA_MISC_REVISION_MINOR_ID_MASK 0x00FFF000
+#define UDMA_GEN_DMA_MISC_REVISION_MINOR_ID_SHIFT 12
+/* Design major revision ID */
+#define UDMA_GEN_DMA_MISC_REVISION_MAJOR_ID_MASK 0xFF000000
+#define UDMA_GEN_DMA_MISC_REVISION_MAJOR_ID_SHIFT 24
+
+/**** Interrupt register ****/
+/* Generate interrupt to another DMA */
+#define UDMA_GEN_MAILBOX_INTERRUPT_SET (1 << 0)
+
+/**** cfg_2 register ****/
+/*
+ * Enable arbitration promotion.
+ * Increment master priority after configured number of arbitration cycles
+ */
+#define UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_MASK 0x0000000F
+#define UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_SHIFT 0
+
+/**** endian_cfg register ****/
+/* Swap M2S descriptor read and completion descriptor write. */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC (1 << 0)
+/* Swap M2S data read. */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA (1 << 1)
+/* Swap S2M descriptor read and completion descriptor write. */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC (1 << 2)
+/* Swap S2M data write. */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA (1 << 3)
+/*
+ * Swap 32 or 64 bit mode:
+ * 0 - Swap groups of 4 bytes
+ * 1 - Swap groups of 8 bytes
+ */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN (1 << 4)
+
+/**** timing register ****/
+/* Write margin */
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMA_MASK 0x0000000F
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMA_SHIFT 0
+/* Write margin enable */
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMEA (1 << 8)
+/* Read margin */
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMB_MASK 0x000F0000
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMB_SHIFT 16
+/* Read margin enable */
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMEB (1 << 24)
+
+/**** cfg_vmid_0 register ****/
+/* For M2S queues 3:0, enable usage of the VMID from the buffer address 63:56 */
+#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_MASK 0x0000000F
+#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_SHIFT 0
+/*
+ * For M2S queues 3:0, enable usage of the VMID from the configuration register
+ * (cfg_vmid_1/2 used for M2S queue_x)
+ */
+#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_MASK 0x000000F0
+#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_SHIFT 4
+/* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
+#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL (1 << 8)
+/* Enable write to all VMID_n registers in the MSI-X Controller */
+#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN (1 << 9)
+/* For S2M queues 3:0, enable usage of the VMID from the buffer address 63:56 */
+#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_MASK 0x000F0000
+#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_SHIFT 16
+/*
+ * For S2M queues 3:0, enable usage of the VMID from the configuration register
+ * (cfg_vmid_3/4 used for M2S queue_x)
+ */
+#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_MASK 0x00F00000
+#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_SHIFT 20
+
+/**** cfg_vmid_1 register ****/
+/* TX queue 0 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_SHIFT 0
+/* TX queue 1 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_SHIFT 16
+
+/**** cfg_vmid_2 register ****/
+/* TX queue 2 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_SHIFT 0
+/* TX queue 3 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_SHIFT 16
+
+/**** cfg_vmid_3 register ****/
+/* RX queue 0 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_SHIFT 0
+/* RX queue 1 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_SHIFT 16
+
+/**** cfg_vmid_4 register ****/
+/* RX queue 2 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_SHIFT 0
+/* RX queue 3 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_SHIFT 16
+
+/**** cfg_vmaddr_0 register ****/
+/* TX queue 0 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_MASK 0x0000FFFF
+#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_SHIFT 0
+/* TX queue 1 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_MASK 0xFFFF0000
+#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_SHIFT 16
+
+/**** cfg_vmaddr_1 register ****/
+/* TX queue 2 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_MASK 0x0000FFFF
+#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_SHIFT 0
+/* TX queue 3 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_MASK 0xFFFF0000
+#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_SHIFT 16
+
+/**** cfg_vmaddr_2 register ****/
+/* RX queue 0 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_MASK 0x0000FFFF
+#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_SHIFT 0
+/* RX queue 1 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_MASK 0xFFFF0000
+#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_SHIFT 16
+
+/**** cfg_vmaddr_3 register ****/
+/* RX queue 2 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_MASK 0x0000FFFF
+#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_SHIFT 0
+/* RX queue 3 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_MASK 0xFFFF0000
+#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_SHIFT 16
+
+/**** cfg_vmpr_0 register ****/
+/* TX High Address Select Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_MASK 0x0000003F
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_SHIFT 0
+/* TX Data VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN (1 << 7)
+/* TX Prefetch VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN (1 << 28)
+/* TX Completions VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN (1 << 29)
+
+/**** cfg_vmpr_2 register ****/
+/* TX queue Prefetch VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_SHIFT 0
+/* TX queue Completion VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_SHIFT 16
+
+/**** cfg_vmpr_3 register ****/
+/* TX queue Data VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SHIFT 0
+/* TX queue Data VMID select */
+#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_SHIFT 16
+
+/**** cfg_vmpr_4 register ****/
+/* RX Data Buffer1 - High Address Select Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_MASK 0x0000003F
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_SHIFT 0
+/* RX Data Buffer1 VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN (1 << 7)
+/* RX Data Buffer2 - High Address Select Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_MASK 0x00003F00
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_SHIFT 8
+/* RX Data Buffer2 VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN (1 << 15)
+/* RX Direct Data Placement - High Address Select Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_MASK 0x003F0000
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_SHIFT 16
+/* RX Direct Data Placement VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN (1 << 23)
+/* RX Buffer 2 MSB address word selects per bytes, per queue */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_MASK 0x0F000000
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_SHIFT 24
+/* RX Prefetch VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN (1 << 28)
+/* RX Completions VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN (1 << 29)
+
+/**** cfg_vmpr_6 register ****/
+/* RX queue Prefetch VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_SHIFT 0
+/* RX queue Completion VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_SHIFT 16
+
+/**** cfg_vmpr_7 register ****/
+/* RX queue Data Buffer 1 VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SHIFT 0
+/* RX queue Data Buffer 1 VMID select */
+#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_SHIFT 16
+
+/**** cfg_vmpr_8 register ****/
+/* RX queue Data Buffer 2 VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SHIFT 0
+/* RX queue Data Buffer 2 VMID select */
+#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_SHIFT 16
+
+/**** cfg_vmpr_9 register ****/
+/* RX queue DDP VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SHIFT 0
+/* RX queue DDP VMID select */
+#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_SHIFT 16
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_GEN_REG_H */
diff --git a/al_hal_udma_regs_m2s.h b/al_hal_udma_regs_m2s.h
new file mode 100644
index 000000000000..06cea8db8f6d
--- /dev/null
+++ b/al_hal_udma_regs_m2s.h
@@ -0,0 +1,1159 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_regs_m2s.h
+ *
+ * @brief C Header file for the UDMA M2S registers
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_M2S_REG_H
+#define __AL_HAL_UDMA_M2S_REG_H
+
+#include "al_hal_plat_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct udma_axi_m2s {
+ /* [0x0] Completion write master configuration */
+ uint32_t comp_wr_cfg_1;
+ /* [0x4] Completion write master configuration */
+ uint32_t comp_wr_cfg_2;
+ /* [0x8] Data read master configuration */
+ uint32_t data_rd_cfg_1;
+ /* [0xc] Data read master configuration */
+ uint32_t data_rd_cfg_2;
+ /* [0x10] Descriptor read master configuration */
+ uint32_t desc_rd_cfg_1;
+ /* [0x14] Descriptor read master configuration */
+ uint32_t desc_rd_cfg_2;
+ /* [0x18] Data read master configuration */
+ uint32_t data_rd_cfg;
+ /* [0x1c] Descriptors read master configuration */
+ uint32_t desc_rd_cfg_3;
+ /* [0x20] Descriptors write master configuration (completion) */
+ uint32_t desc_wr_cfg_1;
+ /* [0x24] AXI outstanding configuration */
+ uint32_t ostand_cfg;
+ uint32_t rsrvd[54];
+};
+struct udma_m2s {
+ /*
+ * [0x0] DMA state.
+ * 00 - No pending tasks
+ * 01 – Normal (active)
+ * 10 – Abort (error condition)
+ * 11 – Reserved
+ */
+ uint32_t state;
+ /* [0x4] CPU request to change DMA state */
+ uint32_t change_state;
+ uint32_t rsrvd_0;
+ /*
+ * [0xc] M2S DMA error log mask.
+ * Each error has an interrupt controller cause bit.
+ * This register determines if these errors cause the M2S DMA to log the
+ * error condition.
+ * 0 - Log is enabled.
+ * 1 - Log is masked.
+ */
+ uint32_t err_log_mask;
+ uint32_t rsrvd_1;
+ /*
+ * [0x14] DMA header log.
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_0;
+ /*
+ * [0x18] DMA header log.
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_1;
+ /*
+ * [0x1c] DMA header log.
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_2;
+ /*
+ * [0x20] DMA header log.
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_3;
+ /* [0x24] DMA clear error log */
+ uint32_t clear_err_log;
+ /* [0x28] M2S data FIFO status */
+ uint32_t data_fifo_status;
+ /* [0x2c] M2S header FIFO status */
+ uint32_t header_fifo_status;
+ /* [0x30] M2S unack FIFO status */
+ uint32_t unack_fifo_status;
+ /* [0x34] Select queue for debug */
+ uint32_t indirect_ctrl;
+ /*
+ * [0x38] M2S prefetch FIFO status.
+ * Status of the selected queue in M2S_indirect_ctrl
+ */
+ uint32_t sel_pref_fifo_status;
+ /*
+ * [0x3c] M2S completion FIFO status.
+ * Status of the selected queue in M2S_indirect_ctrl
+ */
+ uint32_t sel_comp_fifo_status;
+ /*
+ * [0x40] M2S rate limit status.
+ * Status of the selected queue in M2S_indirect_ctrl
+ */
+ uint32_t sel_rate_limit_status;
+ /*
+ * [0x44] M2S DWRR scheduler status.
+ * Status of the selected queue in M2S_indirect_ctrl
+ */
+ uint32_t sel_dwrr_status;
+ /* [0x48] M2S state machine and FIFO clear control */
+ uint32_t clear_ctrl;
+ /* [0x4c] Misc Check enable */
+ uint32_t check_en;
+ /* [0x50] M2S FIFO enable control, internal */
+ uint32_t fifo_en;
+ /* [0x54] M2S packet length configuration */
+ uint32_t cfg_len;
+ /* [0x58] Stream interface configuration */
+ uint32_t stream_cfg;
+ uint32_t rsrvd[41];
+};
+struct udma_m2s_rd {
+ /* [0x0] M2S descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_1;
+ /* [0x4] M2S descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_2;
+ /* [0x8] M2S descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_3;
+ uint32_t rsrvd_0;
+ /* [0x10] Data burst read configuration */
+ uint32_t data_cfg;
+ uint32_t rsrvd[11];
+};
+struct udma_m2s_dwrr {
+ /* [0x0] Tx DMA DWRR scheduler configuration */
+ uint32_t cfg_sched;
+ /* [0x4] Token bucket rate limit control */
+ uint32_t ctrl_deficit_cnt;
+ uint32_t rsrvd[14];
+};
+struct udma_m2s_rate_limiter {
+ /* [0x0] Token bucket rate limit configuration */
+ uint32_t gen_cfg;
+ /*
+ * [0x4] Token bucket rate limit control.
+ * Controls the cycle counters.
+ */
+ uint32_t ctrl_cycle_cnt;
+ /*
+ * [0x8] Token bucket rate limit control.
+ * Controls the token bucket counter.
+ */
+ uint32_t ctrl_token;
+ uint32_t rsrvd[13];
+};
+
+struct udma_rlimit_common {
+ /* [0x0] Token bucket configuration */
+ uint32_t cfg_1s;
+ /* [0x4] Token bucket rate limit configuration */
+ uint32_t cfg_cycle;
+ /* [0x8] Token bucket rate limit configuration */
+ uint32_t cfg_token_size_1;
+ /* [0xc] Token bucket rate limit configuration */
+ uint32_t cfg_token_size_2;
+ /* [0x10] Token bucket rate limit configuration */
+ uint32_t sw_ctrl;
+ /*
+ * [0x14] Mask the different types of rate limiter.
+ * 0 - Rate limit is active.
+ * 1 - Rate limit is masked.
+ */
+ uint32_t mask;
+};
+
+struct udma_m2s_stream_rate_limiter {
+ struct udma_rlimit_common rlimit;
+ uint32_t rsrvd[10];
+};
+struct udma_m2s_comp {
+ /* [0x0] Completion controller configuration */
+ uint32_t cfg_1c;
+ /* [0x4] Completion controller coalescing configuration */
+ uint32_t cfg_coal;
+ /* [0x8] Completion controller application acknowledge configuration */
+ uint32_t cfg_application_ack;
+ uint32_t rsrvd[61];
+};
+struct udma_m2s_stat {
+ /* [0x0] Statistics counters configuration */
+ uint32_t cfg_st;
+ /* [0x4] Counting number of descriptors with First-bit set. */
+ uint32_t tx_pkt;
+ /*
+ * [0x8] Counting the net length of the data buffers [64-bit]
+ * Should be read before tx_bytes_high
+ */
+ uint32_t tx_bytes_low;
+ /*
+ * [0xc] Counting the net length of the data buffers [64-bit],
+ * Should be read after tx_bytes_low (value is sampled when reading
+ * Should be read before tx_bytes_low
+ */
+ uint32_t tx_bytes_high;
+ /* [0x10] Total number of descriptors read from the host memory */
+ uint32_t prefed_desc;
+ /* [0x14] Number of packets read from the unack FIFO */
+ uint32_t comp_pkt;
+ /* [0x18] Number of descriptors written into the completion ring */
+ uint32_t comp_desc;
+ /*
+ * [0x1c] Number of acknowledged packets.
+ * (acknowledge received from the stream interface)
+ */
+ uint32_t ack_pkts;
+ uint32_t rsrvd[56];
+};
+struct udma_m2s_feature {
+ /*
+ * [0x0] M2S Feature register.
+ * M2S instantiation parameters
+ */
+ uint32_t reg_1;
+ /* [0x4] Reserved M2S feature register */
+ uint32_t reg_2;
+ /*
+ * [0x8] M2S Feature register.
+ * M2S instantiation parameters
+ */
+ uint32_t reg_3;
+ /*
+ * [0xc] M2S Feature register.
+ * M2S instantiation parameters
+ */
+ uint32_t reg_4;
+ /*
+ * [0x10] M2S Feature register.
+ * M2S instantiation parameters
+ */
+ uint32_t reg_5;
+ uint32_t rsrvd[59];
+};
+struct udma_m2s_q {
+ uint32_t rsrvd_0[8];
+ /* [0x20] M2S descriptor ring configuration */
+ uint32_t cfg;
+ /* [0x24] M2S descriptor ring status and information */
+ uint32_t status;
+ /* [0x28] TX Descriptor Ring Base Pointer [31:4] */
+ uint32_t tdrbp_low;
+ /* [0x2c] TX Descriptor Ring Base Pointer [63:32] */
+ uint32_t tdrbp_high;
+ /*
+ * [0x30] TX Descriptor Ring Length[23:2]
+ */
+ uint32_t tdrl;
+ /* [0x34] TX Descriptor Ring Head Pointer */
+ uint32_t tdrhp;
+ /* [0x38] Tx Descriptor Tail Pointer increment */
+ uint32_t tdrtp_inc;
+ /* [0x3c] Tx Descriptor Tail Pointer */
+ uint32_t tdrtp;
+ /* [0x40] TX Descriptor Current Pointer */
+ uint32_t tdcp;
+ /* [0x44] Tx Completion Ring Base Pointer [31:4] */
+ uint32_t tcrbp_low;
+ /* [0x48] TX Completion Ring Base Pointer [63:32] */
+ uint32_t tcrbp_high;
+ /* [0x4c] TX Completion Ring Head Pointer */
+ uint32_t tcrhp;
+ /*
+ * [0x50] Tx Completion Ring Head Pointer internal (Before the
+ * coalescing FIFO)
+ */
+ uint32_t tcrhp_internal;
+ uint32_t rsrvd_1[3];
+ /* [0x60] Rate limit configuration */
+ struct udma_rlimit_common rlimit;
+ uint32_t rsrvd_2[2];
+ /* [0x80] DWRR scheduler configuration */
+ uint32_t dwrr_cfg_1;
+ /* [0x84] DWRR scheduler configuration */
+ uint32_t dwrr_cfg_2;
+ /* [0x88] DWRR scheduler configuration */
+ uint32_t dwrr_cfg_3;
+ /* [0x8c] DWRR scheduler software control */
+ uint32_t dwrr_sw_ctrl;
+ uint32_t rsrvd_3[4];
+ /* [0xa0] Completion controller configuration */
+ uint32_t comp_cfg;
+ uint32_t rsrvd_4[3];
+ /* [0xb0] SW control */
+ uint32_t q_sw_ctrl;
+ uint32_t rsrvd_5[3];
+ /* [0xc0] Number of M2S Tx packets after the scheduler */
+ uint32_t q_tx_pkt;
+ uint32_t rsrvd[975];
+};
+
+struct udma_m2s_regs {
+ uint32_t rsrvd_0[64];
+ struct udma_axi_m2s axi_m2s; /* [0x100] */
+ struct udma_m2s m2s; /* [0x200] */
+ struct udma_m2s_rd m2s_rd; /* [0x300] */
+ struct udma_m2s_dwrr m2s_dwrr; /* [0x340] */
+ struct udma_m2s_rate_limiter m2s_rate_limiter; /* [0x380] */
+ struct udma_m2s_stream_rate_limiter m2s_stream_rate_limiter; /* [0x3c0] */
+ struct udma_m2s_comp m2s_comp; /* [0x400] */
+ struct udma_m2s_stat m2s_stat; /* [0x500] */
+ struct udma_m2s_feature m2s_feature; /* [0x600] */
+ uint32_t rsrvd_1[576];
+ struct udma_m2s_q m2s_q[4]; /* [0x1000] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** comp_wr_cfg_1 register ****/
+/* AXI write ID (AWID) */
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK 0x000000FF
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK 0x000F0000
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK 0x03000000
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_SHIFT 24
+
+/**** comp_wr_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK 0x000FFFFF
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK 0x00700000
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK 0x07000000
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK 0x70000000
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_SHIFT 28
+
+/**** data_rd_cfg_1 register ****/
+/* AXI read ID (ARID) */
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARID_MASK 0x000000FF
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARCACHE_MASK 0x000F0000
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_MASK 0x03000000
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_SHIFT 24
+
+/**** data_rd_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARUSER_MASK 0x000FFFFF
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARSIZE_MASK 0x00700000
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARQOS_MASK 0x07000000
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARPROT_MASK 0x70000000
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARPROT_SHIFT 28
+
+/**** desc_rd_cfg_1 register ****/
+/* AXI read ID (ARID) */
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARID_MASK 0x000000FF
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARCACHE_MASK 0x000F0000
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_MASK 0x03000000
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_SHIFT 24
+
+/**** desc_rd_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARUSER_MASK 0x000FFFFF
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARSIZE_MASK 0x00700000
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARSIZE_SHIFT 20
+/*
+ * AXI Master QoS
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARQOS_MASK 0x07000000
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARPROT_MASK 0x70000000
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARPROT_SHIFT 28
+
+/**** data_rd_cfg register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst.
+ * This value is used for a burst split decision.
+ */
+#define UDMA_AXI_M2S_DATA_RD_CFG_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_M2S_DATA_RD_CFG_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Enable breaking data read request.
+ * Aligned to max_AXI_beats when the total read size is less than max_AXI_beats
+ */
+#define UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16)
+
+/**** desc_rd_cfg_3 register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst.
+ * This value is used for a burst split decision.
+ * Maximum burst size for reading data( in AXI beats, 128-bits)
+ * (default – 16 beats, 256 bytes)
+ */
+#define UDMA_AXI_M2S_DESC_RD_CFG_3_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_M2S_DESC_RD_CFG_3_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Enable breaking descriptor read request.
+ * Aligned to max_AXI_beats when the total read size is less than max_AXI_beats.
+ */
+#define UDMA_AXI_M2S_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16)
+
+/**** desc_wr_cfg_1 register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst.
+ * This value is used for a burst split decision.
+ */
+#define UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Minimum burst for writing completion descriptors.
+ * Defined in AXI beats
+ * 4 Descriptors per beat.
+ * Value must be aligned to cache lines (64 bytes).
+ * Default value is 2 cache lines, 32 descriptors, 8 beats.
+ */
+#define UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK 0x00FF0000
+#define UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT 16
+
+/**** ostand_cfg register ****/
+/* Maximum number of outstanding data reads to the AXI (AXI transactions) */
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK 0x0000003F
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_SHIFT 0
+/*
+ * Maximum number of outstanding descriptor reads to the AXI (AXI transactions)
+ */
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK 0x00003F00
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_SHIFT 8
+/*
+ * Maximum number of outstanding descriptor writes to the AXI (AXI transactions)
+ */
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK 0x003F0000
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_SHIFT 16
+/*
+ * Maximum number of outstanding data beats for descriptor write to AXI (AXI
+ * beats)
+ */
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK 0xFF000000
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_SHIFT 24
+
+/**** state register ****/
+/* Completion control */
+#define UDMA_M2S_STATE_COMP_CTRL_MASK 0x00000003
+#define UDMA_M2S_STATE_COMP_CTRL_SHIFT 0
+/* Stream interface */
+#define UDMA_M2S_STATE_STREAM_IF_MASK 0x00000030
+#define UDMA_M2S_STATE_STREAM_IF_SHIFT 4
+/* Data read control */
+#define UDMA_M2S_STATE_DATA_RD_CTRL_MASK 0x00000300
+#define UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT 8
+/* Descriptor prefetch */
+#define UDMA_M2S_STATE_DESC_PREF_MASK 0x00003000
+#define UDMA_M2S_STATE_DESC_PREF_SHIFT 12
+
+/**** change_state register ****/
+/* Start normal operation */
+#define UDMA_M2S_CHANGE_STATE_NORMAL (1 << 0)
+/* Stop normal operation */
+#define UDMA_M2S_CHANGE_STATE_DIS (1 << 1)
+/*
+ * Stop all machines.
+ * (Prefetch, scheduling, completion and stream interface)
+ */
+#define UDMA_M2S_CHANGE_STATE_ABORT (1 << 2)
+
+/**** err_log_mask register ****/
+/*
+ * Mismatch of packet serial number.
+ * (between first packet in the unacknowledged FIFO and received ack from the
+ * stream)
+ */
+#define UDMA_M2S_ERR_LOG_MASK_COMP_PKT_MISMATCH (1 << 0)
+/* Parity error */
+#define UDMA_M2S_ERR_LOG_MASK_STREAM_AXI_PARITY (1 << 1)
+/* AXI response error */
+#define UDMA_M2S_ERR_LOG_MASK_STREAM_AXI_RESPONSE (1 << 2)
+/* AXI timeout (ack not received) */
+#define UDMA_M2S_ERR_LOG_MASK_STREAM_AXI_TOUT (1 << 3)
+/* Parity error */
+#define UDMA_M2S_ERR_LOG_MASK_COMP_AXI_PARITY (1 << 4)
+/* AXI response error */
+#define UDMA_M2S_ERR_LOG_MASK_COMP_AXI_RESPONSE (1 << 5)
+/* AXI timeout */
+#define UDMA_M2S_ERR_LOG_MASK_COMP_AXI_TOUT (1 << 6)
+/* Parity error */
+#define UDMA_M2S_ERR_LOG_MASK_DATA_AXI_PARITY (1 << 7)
+/* AXI response error */
+#define UDMA_M2S_ERR_LOG_MASK_DATA_AXI_RESPONSE (1 << 8)
+/* AXI timeout */
+#define UDMA_M2S_ERR_LOG_MASK_DATA_AXI_TOUT (1 << 9)
+/* Parity error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_AXI_PARITY (1 << 10)
+/* AXI response error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_AXI_RESPONSE (1 << 11)
+/* AXI timeout */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_AXI_TOUT (1 << 12)
+/* Packet length error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_PKT_LEN_OVERFLOW (1 << 13)
+/* Maximum number of descriptors per packet error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_MAX_DESC_CNT (1 << 14)
+/* Error in first bit indication of the descriptor */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_FIRST (1 << 15)
+/* Error in last bit indication of the descriptor */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_LAST (1 << 16)
+/* Ring_ID error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_RING_ID (1 << 17)
+/* Data buffer parity error */
+#define UDMA_M2S_ERR_LOG_MASK_DATA_BUFF_PARITY (1 << 18)
+/* Internal error */
+#define UDMA_M2S_ERR_LOG_MASK_INTERNAL_MASK 0xFFF80000
+#define UDMA_M2S_ERR_LOG_MASK_INTERNAL_SHIFT 19
+
+/**** clear_err_log register ****/
+/* Clear error log */
+#define UDMA_M2S_CLEAR_ERR_LOG_CLEAR (1 << 0)
+
+/**** data_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_DATA_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_DATA_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_DATA_FIFO_STATUS_FULL (1 << 28)
+
+/**** header_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_HEADER_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_HEADER_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_HEADER_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_HEADER_FIFO_STATUS_FULL (1 << 28)
+
+/**** unack_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_UNACK_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_UNACK_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_UNACK_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_UNACK_FIFO_STATUS_FULL (1 << 28)
+
+/**** indirect_ctrl register ****/
+/* Selected queue for status read */
+#define UDMA_M2S_INDIRECT_CTRL_Q_NUM_MASK 0x00000FFF
+#define UDMA_M2S_INDIRECT_CTRL_Q_NUM_SHIFT 0
+
+/**** sel_pref_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_SEL_PREF_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_SEL_PREF_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_SEL_PREF_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_SEL_PREF_FIFO_STATUS_FULL (1 << 28)
+
+/**** sel_comp_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_SEL_COMP_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_SEL_COMP_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_SEL_COMP_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_SEL_COMP_FIFO_STATUS_FULL (1 << 28)
+
+/**** sel_rate_limit_status register ****/
+/* Token counter */
+#define UDMA_M2S_SEL_RATE_LIMIT_STATUS_TOKEN_CNT_MASK 0x00FFFFFF
+#define UDMA_M2S_SEL_RATE_LIMIT_STATUS_TOKEN_CNT_SHIFT 0
+
+/**** sel_dwrr_status register ****/
+/* Deficit counter */
+#define UDMA_M2S_SEL_DWRR_STATUS_DEFICIT_CNT_MASK 0x00FFFFFF
+#define UDMA_M2S_SEL_DWRR_STATUS_DEFICIT_CNT_SHIFT 0
+
+/**** cfg_len register ****/
+/* Maximum packet size for the M2S */
+#define UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK 0x000FFFFF
+#define UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_SHIFT 0
+/*
+ * Length encoding for 64K.
+ * 0 - length 0x0000 = 0
+ * 1 - length 0x0000 = 64k
+ */
+#define UDMA_M2S_CFG_LEN_ENCODE_64K (1 << 24)
+
+/**** stream_cfg register ****/
+/*
+ * Disables the stream interface operation.
+ * Changing to 1 stops at the end of packet transmission.
+ */
+#define UDMA_M2S_STREAM_CFG_DISABLE (1 << 0)
+/*
+ * Configuration of the stream FIFO read control.
+ * 0 - Cut through
+ * 1 - Threshold based
+ */
+#define UDMA_M2S_STREAM_CFG_RD_MODE (1 << 1)
+/* Minimum number of beats to start packet transmission. */
+#define UDMA_M2S_STREAM_CFG_RD_TH_MASK 0x0003FF00
+#define UDMA_M2S_STREAM_CFG_RD_TH_SHIFT 8
+
+/**** desc_pref_cfg_1 register ****/
+/* Size of the descriptor prefetch FIFO (in descriptors) */
+#define UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT 0
+
+/**** desc_pref_cfg_2 register ****/
+/* Maximum number of descriptors per packet */
+#define UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK 0x0000001F
+#define UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT 0
+/*
+ * Force RR arbitration in the prefetch arbiter.
+ * 0 -Standard arbitration based on queue QoS
+ * 1 - Force Round Robin arbitration
+ */
+#define UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR (1 << 16)
+
+/**** desc_pref_cfg_3 register ****/
+/*
+ * Minimum descriptor burst size when prefetch FIFO level is below the
+ * descriptor prefetch threshold
+ * (must be 1)
+ */
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK 0x0000000F
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT 0
+/*
+ * Minimum descriptor burst size when prefetch FIFO level is above the
+ * descriptor prefetch threshold
+ */
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK 0x000000F0
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT 4
+/*
+ * Descriptor fetch threshold.
+ * Used as a threshold to determine the allowed minimum descriptor burst size.
+ * (Must be at least max_desc_per_pkt)
+ */
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK 0x0000FF00
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT 8
+
+/**** data_cfg register ****/
+/*
+ * Maximum number of data beats in the data read FIFO.
+ * Defined based on data FIFO size
+ * (default FIFO size 2KB → 128 beats)
+ */
+#define UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK 0x000003FF
+#define UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT 0
+/*
+ * Maximum number of packets in the data read FIFO.
+ * Defined based on header FIFO size
+ */
+#define UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK 0x00FF0000
+#define UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_SHIFT 16
+
+/**** cfg_sched register ****/
+/*
+ * Enable the DWRR scheduler.
+ * If this bit is 0, queues with same QoS will be served with RR scheduler.
+ */
+#define UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR (1 << 0)
+/*
+ * Scheduler operation mode.
+ * 0 - Byte mode
+ * 1 - Packet mode
+ */
+#define UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN (1 << 4)
+/*
+ * Enable incrementing the weight factor between DWRR iterations.
+ * 00 - Don't increase the increment factor.
+ * 01 - Increment once
+ * 10 - Increment exponential
+ * 11 - Reserved
+ */
+#define UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_MASK 0x00000300
+#define UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_SHIFT 8
+/*
+ * Increment factor power of 2.
+ * 7 --> 128 bytes
+ * This is the factor used to multiply the weight.
+ */
+#define UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_MASK 0x000F0000
+#define UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_SHIFT 16
+
+/**** ctrl_deficit_cnt register ****/
+/*
+ * Init value for the deficit counter.
+ * Initializes the deficit counters of all queues to this value any time this
+ * register is written.
+ */
+#define UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_MASK 0x00FFFFFF
+#define UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_SHIFT 0
+
+/**** gen_cfg register ****/
+/* Size of the basic token fill cycle, system clock cycles */
+#define UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK 0x0000FFFF
+#define UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_SHIFT 0
+/*
+ * Rate limiter operation mode.
+ * 0 - Byte mode
+ * 1 - Packet mode
+ */
+#define UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN (1 << 24)
+
+/**** ctrl_cycle_cnt register ****/
+/* Reset the short and long cycle counters. */
+#define UDMA_M2S_RATE_LIMITER_CTRL_CYCLE_CNT_RST (1 << 0)
+
+/**** ctrl_token register ****/
+/*
+ * Init value for the token counter.
+ * Initializes the token counters of all queues to this value any time this
+ * register is written.
+ */
+#define UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK 0x00FFFFFF
+#define UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_SHIFT 0
+
+/**** cfg_1s register ****/
+/* Maximum number of accumulated bytes in the token counter */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK 0x00FFFFFF
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_SHIFT 0
+/* Enable the rate limiter. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN (1 << 24)
+/* Stop token fill. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE (1 << 25)
+
+/**** cfg_cycle register ****/
+/* Number of short cycles between token fills */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK 0x0000FFFF
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_SHIFT 0
+
+/**** cfg_token_size_1 register ****/
+/* Number of bits to add in each long cycle */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK 0x0007FFFF
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_SHIFT 0
+
+/**** cfg_token_size_2 register ****/
+/* Number of bits to add in each short cycle */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK 0x0007FFFF
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_SHIFT 0
+
+/**** sw_ctrl register ****/
+/* Reset the token bucket counter. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_SW_CTRL_RST_TOKEN_CNT (1 << 0)
+
+/**** mask register ****/
+/* Mask the external rate limiter. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_MASK_EXTERNAL_RATE_LIMITER (1 << 0)
+/* Mask the internal rate limiter. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_MASK_INTERNAL_RATE_LIMITER (1 << 1)
+/* Mask the external application pause interface. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_MASK_EXTERNAL_PAUSE (1 << 3)
+
+/**** cfg_1c register ****/
+/*
+ * Completion FIFO size
+ * (descriptors per queue)
+ */
+#define UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT 0
+/*
+ * Unacknowledged FIFO size.
+ * (descriptors)
+ */
+#define UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK 0x0001FF00
+#define UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT 8
+/*
+ * Enable promotion.
+ * Enable the promotion of the current queue in progress for the completion
+ * write scheduler.
+ */
+#define UDMA_M2S_COMP_CFG_1C_Q_PROMOTION (1 << 24)
+/* Force RR arbitration in the completion arbiter */
+#define UDMA_M2S_COMP_CFG_1C_FORCE_RR (1 << 25)
+/* Minimum number of free completion entries to qualify for promotion */
+#define UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN_MASK 0xF0000000
+#define UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN_SHIFT 28
+
+/**** cfg_application_ack register ****/
+/*
+ * Acknowledge timeout timer.
+ * ACK from the application through the stream interface)
+ */
+#define UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK 0x00FFFFFF
+#define UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT 0
+
+/**** cfg_st register ****/
+/* Use additional length value for all statistics counters. */
+#define UDMA_M2S_STAT_CFG_ST_USE_EXTRA_LEN (1 << 0)
+
+/**** reg_1 register ****/
+/*
+ * Read the size of the descriptor prefetch FIFO
+ * (descriptors).
+ */
+#define UDMA_M2S_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_M2S_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_SHIFT 0
+
+/**** reg_3 register ****/
+/*
+ * Maximum number of data beats in the data read FIFO.
+ * Defined based on data FIFO size
+ * (default FIFO size 2KB → 128 beats)
+ */
+#define UDMA_M2S_FEATURE_REG_3_DATA_FIFO_DEPTH_MASK 0x000003FF
+#define UDMA_M2S_FEATURE_REG_3_DATA_FIFO_DEPTH_SHIFT 0
+/*
+ * Maximum number of packets in the data read FIFO.
+ * Defined based on header FIFO size
+ */
+#define UDMA_M2S_FEATURE_REG_3_DATA_RD_MAX_PKT_LIMIT_MASK 0x00FF0000
+#define UDMA_M2S_FEATURE_REG_3_DATA_RD_MAX_PKT_LIMIT_SHIFT 16
+
+/**** reg_4 register ****/
+/*
+ * Size of the completion FIFO of each queue
+ * (words)
+ */
+#define UDMA_M2S_FEATURE_REG_4_COMP_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_M2S_FEATURE_REG_4_COMP_FIFO_DEPTH_SHIFT 0
+/* Size of the unacknowledged FIFO (descriptors) */
+#define UDMA_M2S_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_MASK 0x0001FF00
+#define UDMA_M2S_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_SHIFT 8
+
+/**** reg_5 register ****/
+/* Maximum number of outstanding data reads to AXI */
+#define UDMA_M2S_FEATURE_REG_5_MAX_DATA_RD_OSTAND_MASK 0x0000003F
+#define UDMA_M2S_FEATURE_REG_5_MAX_DATA_RD_OSTAND_SHIFT 0
+/* Maximum number of outstanding descriptor reads to AXI */
+#define UDMA_M2S_FEATURE_REG_5_MAX_DESC_RD_OSTAND_MASK 0x00003F00
+#define UDMA_M2S_FEATURE_REG_5_MAX_DESC_RD_OSTAND_SHIFT 8
+/*
+ * Maximum number of outstanding descriptor writes to AXI.
+ * (AXI transactions)
+ */
+#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_REQ_MASK 0x003F0000
+#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_REQ_SHIFT 16
+/*
+ * Maximum number of outstanding data beats for descriptor write to AXI.
+ * (AXI beats)
+ */
+#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000
+#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_SHIFT 24
+
+/**** cfg register ****/
+/*
+ * Length offset to be used for each packet from this queue.
+ * (length offset is used for the scheduler and rate limiter).
+ */
+#define UDMA_M2S_Q_CFG_PKT_LEN_OFFSET_MASK 0x0000FFFF
+#define UDMA_M2S_Q_CFG_PKT_LEN_OFFSET_SHIFT 0
+/*
+ * Enable operation of this queue.
+ * Start prefetch.
+ */
+#define UDMA_M2S_Q_CFG_EN_PREF (1 << 16)
+/*
+ * Enable operation of this queue.
+ * Start scheduling.
+ */
+#define UDMA_M2S_Q_CFG_EN_SCHEDULING (1 << 17)
+/* Allow prefetch of less than minimum prefetch burst size. */
+#define UDMA_M2S_Q_CFG_ALLOW_LT_MIN_PREF (1 << 20)
+/* Configure the AXI AWCACHE for completion write. */
+#define UDMA_M2S_Q_CFG_AXI_AWCACHE_COMP_MASK 0x0F000000
+#define UDMA_M2S_Q_CFG_AXI_AWCACHE_COMP_SHIFT 24
+/*
+ * AXI QoS for the selected queue.
+ * This value is used in AXI transactions associated with this queue and the
+ * prefetch and completion arbiters.
+ */
+#define UDMA_M2S_Q_CFG_AXI_QOS_MASK 0x70000000
+#define UDMA_M2S_Q_CFG_AXI_QOS_SHIFT 28
+
+/**** status register ****/
+/* Indicates how many entries are used in the queue */
+#define UDMA_M2S_Q_STATUS_Q_USED_MASK 0x01FFFFFF
+#define UDMA_M2S_Q_STATUS_Q_USED_SHIFT 0
+/*
+ * prefetch status
+ * 0 – prefetch operation is stopped
+ * 1 – prefetch is operational
+ */
+#define UDMA_M2S_Q_STATUS_PREFETCH (1 << 28)
+/*
+ * Queue scheduler status
+ * 0 – queue is not active and not participating in scheduling
+ * 1 – queue is active and participating in the scheduling process
+ */
+#define UDMA_M2S_Q_STATUS_SCHEDULER (1 << 29)
+/* Queue is suspended due to DMB */
+#define UDMA_M2S_Q_STATUS_Q_DMB (1 << 30)
+/*
+ * Queue full indication.
+ * (used by the host when head pointer equals tail pointer).
+ */
+#define UDMA_M2S_Q_STATUS_Q_FULL (1 << 31)
+/*
+ * M2S Descriptor Ring Base address [31:4].
+ * Value of the base address of the M2S descriptor ring
+ * [3:0] - 0 - 16B alignment is enforced
+ * ([11:4] should be 0 for 4KB alignment)
+ */
+#define UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK 0xFFFFFFF0
+#define UDMA_M2S_Q_TDRBP_LOW_ADDR_SHIFT 4
+
+/**** TDRL register ****/
+/*
+ * Length of the descriptor ring.
+ * (descriptors)
+ * Associated with the ring base address, ends at maximum burst size alignment.
+ */
+#define UDMA_M2S_Q_TDRL_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDRL_OFFSET_SHIFT 0
+
+/**** TDRHP register ****/
+/*
+ * Relative offset of the next descriptor that needs to be read into the
+ * prefetch FIFO.
+ * Incremented when the DMA reads valid descriptors from the host memory to the
+ * prefetch FIFO.
+ * Note that this is the offset in # of descriptors and not in byte address.
+ */
+#define UDMA_M2S_Q_TDRHP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDRHP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TDRHP_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TDRHP_RING_ID_SHIFT 30
+
+/**** TDRTP_inc register ****/
+/* Increments the value in Q_TDRTP (descriptors) */
+#define UDMA_M2S_Q_TDRTP_INC_VAL_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDRTP_INC_VAL_SHIFT 0
+
+/**** TDRTP register ****/
+/*
+ * Relative offset of the next free descriptor in the host memory.
+ * Note that this is the offset in # of descriptors and not in byte address.
+ */
+#define UDMA_M2S_Q_TDRTP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDRTP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TDRTP_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TDRTP_RING_ID_SHIFT 30
+
+/**** TDCP register ****/
+/*
+ * Relative offset of the first descriptor in the prefetch FIFO.
+ * This is the next descriptor that will be read by the scheduler.
+ */
+#define UDMA_M2S_Q_TDCP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDCP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TDCP_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TDCP_RING_ID_SHIFT 30
+/*
+ * M2S Descriptor Ring Base address [31:4].
+ * Value of the base address of the M2S descriptor ring
+ * [3:0] - 0 - 16B alignment is enforced
+ * ([11:4] should be 0 for 4KB alignment)
+ * NOTE:
+ * Length of the descriptor ring (in descriptors) associated with the ring base
+ * address. Ends at maximum burst size alignment.
+ */
+#define UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK 0xFFFFFFF0
+#define UDMA_M2S_Q_TCRBP_LOW_ADDR_SHIFT 4
+
+/**** TCRHP register ****/
+/*
+ * Relative offset of the next descriptor that needs to be updated by the
+ * completion controller.
+ * Note: This is in descriptors and not in byte address.
+ */
+#define UDMA_M2S_Q_TCRHP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TCRHP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TCRHP_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TCRHP_RING_ID_SHIFT 30
+
+/**** TCRHP_internal register ****/
+/*
+ * Relative offset of the next descriptor that needs to be updated by the
+ * completion controller.
+ * Note: This is in descriptors and not in byte address.
+ */
+#define UDMA_M2S_Q_TCRHP_INTERNAL_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TCRHP_INTERNAL_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TCRHP_INTERNAL_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TCRHP_INTERNAL_RING_ID_SHIFT 30
+
+/**** rate_limit_cfg_1 register ****/
+/* Maximum number of accumulated bytes in the token counter. */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_MAX_BURST_SIZE_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_MAX_BURST_SIZE_SHIFT 0
+/* Enable the rate limiter. */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_EN (1 << 24)
+/* Stop token fill. */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_PAUSE (1 << 25)
+
+/**** rate_limit_cfg_cycle register ****/
+/* Number of short cycles between token fills */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_CYCLE_LONG_CYCLE_SIZE_MASK 0x0000FFFF
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_CYCLE_LONG_CYCLE_SIZE_SHIFT 0
+
+/**** rate_limit_cfg_token_size_1 register ****/
+/* Number of bits to add in each long cycle */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK 0x0007FFFF
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_1_LONG_CYCLE_SHIFT 0
+
+/**** rate_limit_cfg_token_size_2 register ****/
+/* Number of bits to add in each cycle */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK 0x0007FFFF
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_2_SHORT_CYCLE_SHIFT 0
+
+/**** rate_limit_sw_ctrl register ****/
+/* Reset the token bucket counter. */
+#define UDMA_M2S_Q_RATE_LIMIT_SW_CTRL_RST_TOKEN_CNT (1 << 0)
+
+/**** rate_limit_mask register ****/
+/* Mask the external rate limiter. */
+#define UDMA_M2S_Q_RATE_LIMIT_MASK_EXTERNAL_RATE_LIMITER (1 << 0)
+/* Mask the internal rate limiter. */
+#define UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_RATE_LIMITER (1 << 1)
+/*
+ * Mask the internal pause mechanism for DMB.
+ * (Data Memory Barrier).
+ */
+#define UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB (1 << 2)
+/* Mask the external application pause interface. */
+#define UDMA_M2S_Q_RATE_LIMIT_MASK_EXTERNAL_PAUSE (1 << 3)
+
+/**** dwrr_cfg_1 register ****/
+/* Maximum number of accumulated bytes in the deficit counter */
+#define UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_SHIFT 0
+/* Bypass the DWRR. */
+#define UDMA_M2S_Q_DWRR_CFG_1_STRICT (1 << 24)
+/* Stop deficit counter increment. */
+#define UDMA_M2S_Q_DWRR_CFG_1_PAUSE (1 << 25)
+
+/**** dwrr_cfg_2 register ****/
+/*
+ * Value for the queue QoS.
+ * Queues with the same QoS value are scheduled with RR/DWRR.
+ * Only LOG(number of queues) is used.
+ */
+#define UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK 0x000000FF
+#define UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_SHIFT 0
+
+/**** dwrr_cfg_3 register ****/
+/* Queue weight */
+#define UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK 0x000000FF
+#define UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_SHIFT 0
+
+/**** dwrr_sw_ctrl register ****/
+/* Reset the DWRR deficit counter. */
+#define UDMA_M2S_Q_DWRR_SW_CTRL_RST_CNT (1 << 0)
+
+/**** comp_cfg register ****/
+/* Enable writing to the completion ring */
+#define UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE (1 << 0)
+/* Disable the completion coalescing function. */
+#define UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL (1 << 1)
+
+/**** q_sw_ctrl register ****/
+/*
+ * Reset the DMB hardware barrier
+ * (enable queue operation).
+ */
+#define UDMA_M2S_Q_SW_CTRL_RST_DMB (1 << 0)
+/* Reset the tail pointer hardware. */
+#define UDMA_M2S_Q_SW_CTRL_RST_TAIL_PTR (1 << 1)
+/* Reset the head pointer hardware. */
+#define UDMA_M2S_Q_SW_CTRL_RST_HEAD_PTR (1 << 2)
+/* Reset the current pointer hardware. */
+#define UDMA_M2S_Q_SW_CTRL_RST_CURRENT_PTR (1 << 3)
+/* Reset the queue */
+#define UDMA_M2S_Q_SW_CTRL_RST_Q (1 << 8)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_M2S_REG_H */
diff --git a/al_hal_udma_regs_s2m.h b/al_hal_udma_regs_s2m.h
new file mode 100644
index 000000000000..4b3149b97ae6
--- /dev/null
+++ b/al_hal_udma_regs_s2m.h
@@ -0,0 +1,998 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_regs_s2m.h
+ *
+ * @brief C Header file for the UDMA S2M registers
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_S2M_REG_H
+#define __AL_HAL_UDMA_S2M_REG_H
+
+#include "al_hal_plat_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct udma_axi_s2m {
+ /* [0x0] Data write master configuration */
+ uint32_t data_wr_cfg_1;
+ /* [0x4] Data write master configuration */
+ uint32_t data_wr_cfg_2;
+ /* [0x8] Descriptor read master configuration */
+ uint32_t desc_rd_cfg_4;
+ /* [0xc] Descriptor read master configuration */
+ uint32_t desc_rd_cfg_5;
+ /* [0x10] Completion write master configuration */
+ uint32_t comp_wr_cfg_1;
+ /* [0x14] Completion write master configuration */
+ uint32_t comp_wr_cfg_2;
+ /* [0x18] Data write master configuration */
+ uint32_t data_wr_cfg;
+ /* [0x1c] Descriptors read master configuration */
+ uint32_t desc_rd_cfg_3;
+ /* [0x20] Completion descriptors write master configuration */
+ uint32_t desc_wr_cfg_1;
+ /* [0x24] AXI outstanding read configuration */
+ uint32_t ostand_cfg_rd;
+ /* [0x28] AXI outstanding write configuration */
+ uint32_t ostand_cfg_wr;
+ uint32_t rsrvd[53];
+};
+struct udma_s2m {
+ /*
+ * [0x0] DMA state
+ * 00 - No pending tasks
+ * 01 – Normal (active)
+ * 10 – Abort (error condition)
+ * 11 – Reserved
+ */
+ uint32_t state;
+ /* [0x4] CPU request to change DMA state */
+ uint32_t change_state;
+ uint32_t rsrvd_0;
+ /*
+ * [0xc] S2M DMA error log mask.
+ * Each error has an interrupt controller cause bit.
+ * This register determines if these errors cause the S2M DMA to log the
+ * error condition.
+ * 0 - Log is enable
+ * 1 - Log is masked.
+ */
+ uint32_t err_log_mask;
+ uint32_t rsrvd_1;
+ /*
+ * [0x14] DMA header log
+ * Sample the packet header that caused the error
+ */
+ uint32_t log_0;
+ /*
+ * [0x18] DMA header log
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_1;
+ /*
+ * [0x1c] DMA header log
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_2;
+ /*
+ * [0x20] DMA header log
+ * Sample the packet header that caused the error
+ */
+ uint32_t log_3;
+ /* [0x24] DMA clear error log */
+ uint32_t clear_err_log;
+ /* [0x28] S2M stream data FIFO status */
+ uint32_t s_data_fifo_status;
+ /* [0x2c] S2M stream header FIFO status */
+ uint32_t s_header_fifo_status;
+ /* [0x30] S2M AXI data FIFO status */
+ uint32_t axi_data_fifo_status;
+ /* [0x34] S2M unack FIFO status */
+ uint32_t unack_fifo_status;
+ /* [0x38] Select queue for debug */
+ uint32_t indirect_ctrl;
+ /*
+ * [0x3c] S2M prefetch FIFO status.
+ * Status of the selected queue in S2M_indirect_ctrl
+ */
+ uint32_t sel_pref_fifo_status;
+ /*
+ * [0x40] S2M completion FIFO status.
+ * Status of the selected queue in S2M_indirect_ctrl
+ */
+ uint32_t sel_comp_fifo_status;
+ /* [0x44] S2M state machine and FIFO clear control */
+ uint32_t clear_ctrl;
+ /* [0x48] S2M Misc Check enable */
+ uint32_t check_en;
+ /* [0x4c] S2M FIFO enable control, internal */
+ uint32_t fifo_en;
+ /* [0x50] Stream interface configuration */
+ uint32_t stream_cfg;
+ uint32_t rsrvd[43];
+};
+struct udma_s2m_rd {
+ /* [0x0] S2M descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_1;
+ /* [0x4] S2M descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_2;
+ /* [0x8] S2M descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_3;
+ /* [0xc] S2M descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_4;
+ uint32_t rsrvd[12];
+};
+struct udma_s2m_wr {
+ /* [0x0] Stream data FIFO configuration */
+ uint32_t data_cfg_1;
+ /* [0x4] Data write configuration */
+ uint32_t data_cfg_2;
+ uint32_t rsrvd[14];
+};
+struct udma_s2m_comp {
+ /* [0x0] Completion controller configuration */
+ uint32_t cfg_1c;
+ /* [0x4] Completion controller configuration */
+ uint32_t cfg_2c;
+ uint32_t rsrvd_0;
+ /* [0xc] Completion controller application acknowledge configuration */
+ uint32_t cfg_application_ack;
+ uint32_t rsrvd[12];
+};
+struct udma_s2m_stat {
+ uint32_t rsrvd_0;
+ /* [0x4] Number of dropped packets */
+ uint32_t drop_pkt;
+ /*
+ * [0x8] Counting the net length of the data buffers [64-bit]
+ * Should be read before rx_bytes_high
+ */
+ uint32_t rx_bytes_low;
+ /*
+ * [0xc] Counting the net length of the data buffers [64-bit]
+ * Should be read after tx_bytes_low (value is sampled when reading
+ * Should be read before rx_bytes_low
+ */
+ uint32_t rx_bytes_high;
+ /* [0x10] Total number of descriptors read from the host memory */
+ uint32_t prefed_desc;
+ /* [0x14] Number of packets written into the completion ring */
+ uint32_t comp_pkt;
+ /* [0x18] Number of descriptors written into the completion ring */
+ uint32_t comp_desc;
+ /*
+ * [0x1c] Number of acknowledged packets.
+ * (acknowledge sent to the stream interface)
+ */
+ uint32_t ack_pkts;
+ uint32_t rsrvd[56];
+};
+struct udma_s2m_feature {
+ /*
+ * [0x0] S2M Feature register
+ * S2M instantiation parameters
+ */
+ uint32_t reg_1;
+ /* [0x4] Reserved S2M feature register */
+ uint32_t reg_2;
+ /*
+ * [0x8] S2M Feature register
+ * S2M instantiation parameters
+ */
+ uint32_t reg_3;
+ /*
+ * [0xc] S2M Feature register.
+ * S2M instantiation parameters.
+ */
+ uint32_t reg_4;
+ /*
+ * [0x10] S2M Feature register.
+ * S2M instantiation parameters.
+ */
+ uint32_t reg_5;
+ /* [0x14] S2M Feature register. S2M instantiation parameters. */
+ uint32_t reg_6;
+ uint32_t rsrvd[58];
+};
+struct udma_s2m_q {
+ uint32_t rsrvd_0[8];
+ /* [0x20] S2M Descriptor ring configuration */
+ uint32_t cfg;
+ /* [0x24] S2M Descriptor ring status and information */
+ uint32_t status;
+ /* [0x28] Rx Descriptor Ring Base Pointer [31:4] */
+ uint32_t rdrbp_low;
+ /* [0x2c] Rx Descriptor Ring Base Pointer [63:32] */
+ uint32_t rdrbp_high;
+ /*
+ * [0x30] Rx Descriptor Ring Length[23:2]
+ */
+ uint32_t rdrl;
+ /* [0x34] RX Descriptor Ring Head Pointer */
+ uint32_t rdrhp;
+ /* [0x38] Rx Descriptor Tail Pointer increment */
+ uint32_t rdrtp_inc;
+ /* [0x3c] Rx Descriptor Tail Pointer */
+ uint32_t rdrtp;
+ /* [0x40] RX Descriptor Current Pointer */
+ uint32_t rdcp;
+ /* [0x44] Rx Completion Ring Base Pointer [31:4] */
+ uint32_t rcrbp_low;
+ /* [0x48] Rx Completion Ring Base Pointer [63:32] */
+ uint32_t rcrbp_high;
+ /* [0x4c] Rx Completion Ring Head Pointer */
+ uint32_t rcrhp;
+ /*
+ * [0x50] RX Completion Ring Head Pointer internal.
+ * (Before the coalescing FIFO)
+ */
+ uint32_t rcrhp_internal;
+ /* [0x54] Completion controller configuration for the queue */
+ uint32_t comp_cfg;
+ /* [0x58] Completion controller configuration for the queue */
+ uint32_t comp_cfg_2;
+ /* [0x5c] Packet handler configuration */
+ uint32_t pkt_cfg;
+ /* [0x60] Queue QoS configuration */
+ uint32_t qos_cfg;
+ /* [0x64] DMB software control */
+ uint32_t q_sw_ctrl;
+ /* [0x68] Number of S2M Rx packets after completion */
+ uint32_t q_rx_pkt;
+ uint32_t rsrvd[997];
+};
+
+struct udma_s2m_regs {
+ uint32_t rsrvd_0[64];
+ struct udma_axi_s2m axi_s2m; /* [0x100] */
+ struct udma_s2m s2m; /* [0x200] */
+ struct udma_s2m_rd s2m_rd; /* [0x300] */
+ struct udma_s2m_wr s2m_wr; /* [0x340] */
+ struct udma_s2m_comp s2m_comp; /* [0x380] */
+ uint32_t rsrvd_1[80];
+ struct udma_s2m_stat s2m_stat; /* [0x500] */
+ struct udma_s2m_feature s2m_feature; /* [0x600] */
+ uint32_t rsrvd_2[576];
+ struct udma_s2m_q s2m_q[4]; /* [0x1000] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** data_wr_cfg_1 register ****/
+/* AXI write ID (AWID) */
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWID_MASK 0x000000FF
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWCACHE_MASK 0x000F0000
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_MASK 0x03000000
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_SHIFT 24
+
+/**** data_wr_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWUSER_MASK 0x000FFFFF
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWSIZE_MASK 0x00700000
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWQOS_MASK 0x07000000
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWPROT_MASK 0x70000000
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWPROT_SHIFT 28
+
+/**** desc_rd_cfg_4 register ****/
+/* AXI read ID (ARID) */
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARID_MASK 0x000000FF
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARCACHE_MASK 0x000F0000
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_MASK 0x03000000
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_SHIFT 24
+
+/**** desc_rd_cfg_5 register ****/
+/* User extension */
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARUSER_MASK 0x000FFFFF
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARSIZE_MASK 0x00700000
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARQOS_MASK 0x07000000
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARPROT_MASK 0x70000000
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARPROT_SHIFT 28
+
+/**** comp_wr_cfg_1 register ****/
+/* AXI write ID (AWID) */
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK 0x000000FF
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK 0x000F0000
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK 0x03000000
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT 24
+
+/**** comp_wr_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK 0x000FFFFF
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK 0x00700000
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK 0x07000000
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK 0x70000000
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_SHIFT 28
+
+/**** data_wr_cfg register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst. This value is
+ * used for the burst split decision.
+ */
+#define UDMA_AXI_S2M_DATA_WR_CFG_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_S2M_DATA_WR_CFG_MAX_AXI_BEATS_SHIFT 0
+
+/**** desc_rd_cfg_3 register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst. This value is
+ * used for the burst split decision.
+ */
+#define UDMA_AXI_S2M_DESC_RD_CFG_3_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_S2M_DESC_RD_CFG_3_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Enables breaking descriptor read request.
+ * Aligned to max_AXI_beats when the total read size is less than max_AXI_beats.
+ */
+#define UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16)
+
+/**** desc_wr_cfg_1 register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst. This value is
+ * used for the burst split decision.
+ */
+#define UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Minimum burst for writing completion descriptors.
+ * (AXI beats).
+ * Value must be aligned to cache lines (64 bytes).
+ * Default value is 2 cache lines, 8 beats.
+ */
+#define UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK 0x00FF0000
+#define UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT 16
+
+/**** ostand_cfg_rd register ****/
+/*
+ * Maximum number of outstanding descriptor reads to the AXI.
+ * (AXI transactions).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK 0x0000003F
+#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_SHIFT 0
+/* Maximum number of outstanding stream acknowledges. */
+#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK 0x001F0000
+#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_SHIFT 16
+
+/**** ostand_cfg_wr register ****/
+/*
+ * Maximum number of outstanding data writes to the AXI.
+ * (AXI transactions).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK 0x0000003F
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_SHIFT 0
+/*
+ * Maximum number of outstanding data beats for data write to AXI.
+ * (AXI beats).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK 0x0000FF00
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_SHIFT 8
+/*
+ * Maximum number of outstanding descriptor writes to the AXI.
+ * (AXI transactions).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK 0x003F0000
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_SHIFT 16
+/*
+ * Maximum number of outstanding data beats for descriptor write to AXI.
+ * (AXI beats).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_SHIFT 24
+
+/**** state register ****/
+
+#define UDMA_S2M_STATE_COMP_CTRL_MASK 0x00000003
+#define UDMA_S2M_STATE_COMP_CTRL_SHIFT 0
+
+#define UDMA_S2M_STATE_STREAM_IF_MASK 0x00000030
+#define UDMA_S2M_STATE_STREAM_IF_SHIFT 4
+
+#define UDMA_S2M_STATE_DATA_WR_CTRL_MASK 0x00000300
+#define UDMA_S2M_STATE_DATA_WR_CTRL_SHIFT 8
+
+#define UDMA_S2M_STATE_DESC_PREF_MASK 0x00003000
+#define UDMA_S2M_STATE_DESC_PREF_SHIFT 12
+
+#define UDMA_S2M_STATE_AXI_WR_DATA_MASK 0x00030000
+#define UDMA_S2M_STATE_AXI_WR_DATA_SHIFT 16
+
+/**** change_state register ****/
+/* Start normal operation */
+#define UDMA_S2M_CHANGE_STATE_NORMAL (1 << 0)
+/* Stop normal operation */
+#define UDMA_S2M_CHANGE_STATE_DIS (1 << 1)
+/*
+ * Stop all machines.
+ * (Prefetch, scheduling, completion and stream interface)
+ */
+#define UDMA_S2M_CHANGE_STATE_ABORT (1 << 2)
+
+/**** clear_err_log register ****/
+/* Clear error log */
+#define UDMA_S2M_CLEAR_ERR_LOG_CLEAR (1 << 0)
+
+/**** s_data_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_S_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_S_DATA_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_S_DATA_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_S_DATA_FIFO_STATUS_FULL (1 << 28)
+
+/**** s_header_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_S_HEADER_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_S_HEADER_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_S_HEADER_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_S_HEADER_FIFO_STATUS_FULL (1 << 28)
+
+/**** axi_data_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_AXI_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_AXI_DATA_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_AXI_DATA_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_AXI_DATA_FIFO_STATUS_FULL (1 << 28)
+
+/**** unack_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_UNACK_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_UNACK_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_UNACK_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_UNACK_FIFO_STATUS_FULL (1 << 28)
+
+/**** indirect_ctrl register ****/
+/* Selected queue for status read */
+#define UDMA_S2M_INDIRECT_CTRL_Q_NUM_MASK 0x00000FFF
+#define UDMA_S2M_INDIRECT_CTRL_Q_NUM_SHIFT 0
+
+/**** sel_pref_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_SEL_PREF_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_SEL_PREF_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_SEL_PREF_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_SEL_PREF_FIFO_STATUS_FULL (1 << 28)
+
+/**** sel_comp_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_USED_SHIFT 0
+/* Coalescing ACTIVE FSM state indication. */
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_COAL_ACTIVE_STATE_MASK 0x00300000
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_COAL_ACTIVE_STATE_SHIFT 20
+/* FIFO empty indication */
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_FULL (1 << 28)
+
+/**** stream_cfg register ****/
+/*
+ * Disables the stream interface operation.
+ * Changing to 1 stops at the end of packet reception.
+ */
+#define UDMA_S2M_STREAM_CFG_DISABLE (1 << 0)
+/*
+ * Flush the stream interface operation.
+ * Changing to 1 stops at the end of packet reception and assert ready to the
+ * stream I/F.
+ */
+#define UDMA_S2M_STREAM_CFG_FLUSH (1 << 4)
+/* Stop descriptor prefetch when the stream is disabled and the S2M is idle. */
+#define UDMA_S2M_STREAM_CFG_STOP_PREFETCH (1 << 8)
+
+/**** desc_pref_cfg_1 register ****/
+/*
+ * Size of the descriptor prefetch FIFO.
+ * (descriptors)
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT 0
+
+/**** desc_pref_cfg_2 register ****/
+/* Enable promotion of the current queue in progress */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION (1 << 0)
+/* Force promotion of the current queue in progress */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION (1 << 1)
+/* Enable prefetch prediction of next packet in line. */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION (1 << 2)
+/*
+ * Threshold for queue promotion.
+ * Queue is promoted for prefetch if there are less descriptors in the prefetch
+ * FIFO than the threshold
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK 0x0000FF00
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_SHIFT 8
+/*
+ * Force RR arbitration in the prefetch arbiter.
+ * 0 - Standard arbitration based on queue QoS
+ * 1 - Force round robin arbitration
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR (1 << 16)
+
+/**** desc_pref_cfg_3 register ****/
+/*
+ * Minimum descriptor burst size when prefetch FIFO level is below the
+ * descriptor prefetch threshold
+ * (must be 1)
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK 0x0000000F
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT 0
+/*
+ * Minimum descriptor burst size when prefetch FIFO level is above the
+ * descriptor prefetch threshold
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK 0x000000F0
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT 4
+/*
+ * Descriptor fetch threshold.
+ * Used as a threshold to determine the allowed minimum descriptor burst size.
+ * (Must be at least "max_desc_per_pkt")
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK 0x0000FF00
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT 8
+
+/**** desc_pref_cfg_4 register ****/
+/*
+ * Used as a threshold for generating almost FULL indication to the application
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK 0x000000FF
+#define UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_SHIFT 0
+
+/**** data_cfg_1 register ****/
+/*
+ * Maximum number of data beats in the data write FIFO.
+ * Defined based on data FIFO size
+ * (default FIFO size 512B → 32 beats)
+ */
+#define UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK 0x000003FF
+#define UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_SHIFT 0
+/*
+ * Maximum number of packets in the data write FIFO.
+ * Defined based on header FIFO size
+ */
+#define UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK 0x00FF0000
+#define UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_SHIFT 16
+/*
+ * Internal use
+ * Data FIFO margin
+ */
+#define UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK 0xFF000000
+#define UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_SHIFT 24
+
+/**** data_cfg_2 register ****/
+/*
+ * Drop timer.
+ * Waiting time for the host to write new descriptor to the queue
+ * (for the current packet in process)
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK 0x00FFFFFF
+#define UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_SHIFT 0
+/*
+ * Drop enable.
+ * Enable packet drop if there are no available descriptors in the system for
+ * this queue
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC (1 << 27)
+/*
+ * Lack of descriptors hint.
+ * Generate interrupt when a packet is waiting but there are no available
+ * descriptors in the queue
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC (1 << 28)
+/*
+ * Drop conditions
+ * Wait until a descriptor is available in the prefetch FIFO or the host before
+ * dropping packet.
+ * 1 - Drop if a descriptor is not available in the prefetch.
+ * 0 - Drop if a descriptor is not available in the system
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF (1 << 29)
+/*
+ * DRAM write optimization
+ * 0 - Data write with byte enable
+ * 1 - Data write is always in Full AXI bus width (128 bit)
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE (1 << 30)
+/*
+ * Direct data write address
+ * 1 - Use buffer 1 instead of buffer 2 when direct data placement is used with
+ * header split.
+ * 0 - Use buffer 2 for the header.
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1 (1 << 31)
+
+/**** cfg_1c register ****/
+/*
+ * Completion descriptor size.
+ * (words)
+ */
+#define UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK 0x0000000F
+#define UDMA_S2M_COMP_CFG_1C_DESC_SIZE_SHIFT 0
+/*
+ * Completion queue counter configuration.
+ * Completion FIFO in use counter measured in words or descriptors
+ * 1 - Words
+ * 0 - Descriptors
+ */
+#define UDMA_S2M_COMP_CFG_1C_CNT_WORDS (1 << 8)
+/*
+ * Enable promotion of the current queue in progress in the completion write
+ * scheduler.
+ */
+#define UDMA_S2M_COMP_CFG_1C_Q_PROMOTION (1 << 12)
+/* Force RR arbitration in the completion arbiter */
+#define UDMA_S2M_COMP_CFG_1C_FORCE_RR (1 << 16)
+/* Minimum number of free completion entries to qualify for promotion */
+#define UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK 0xF0000000
+#define UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_SHIFT 28
+
+/**** cfg_2c register ****/
+/*
+ * Completion FIFO size.
+ * (words per queue)
+ */
+#define UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK 0x00000FFF
+#define UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_SHIFT 0
+/*
+ * Unacknowledged FIFO size.
+ * (descriptors)
+ */
+#define UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK 0x0FFF0000
+#define UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_SHIFT 16
+
+/**** reg_1 register ****/
+/*
+ * Descriptor prefetch FIFO size
+ * (descriptors)
+ */
+#define UDMA_S2M_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_S2M_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_SHIFT 0
+
+/**** reg_3 register ****/
+/*
+ * Maximum number of data beats in the data write FIFO.
+ * Defined based on data FIFO size
+ * (default FIFO size 512B →32 beats)
+ */
+#define UDMA_S2M_FEATURE_REG_3_DATA_FIFO_DEPTH_MASK 0x000003FF
+#define UDMA_S2M_FEATURE_REG_3_DATA_FIFO_DEPTH_SHIFT 0
+/*
+ * Maximum number of packets in the data write FIFO.
+ * Defined based on header FIFO size
+ */
+#define UDMA_S2M_FEATURE_REG_3_DATA_WR_MAX_PKT_LIMIT_MASK 0x00FF0000
+#define UDMA_S2M_FEATURE_REG_3_DATA_WR_MAX_PKT_LIMIT_SHIFT 16
+
+/**** reg_4 register ****/
+/*
+ * Completion FIFO size.
+ * (words per queue)
+ */
+#define UDMA_S2M_FEATURE_REG_4_COMP_FIFO_DEPTH_MASK 0x00000FFF
+#define UDMA_S2M_FEATURE_REG_4_COMP_FIFO_DEPTH_SHIFT 0
+/*
+ * Unacknowledged FIFO size.
+ * (descriptors)
+ */
+#define UDMA_S2M_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_MASK 0x0FFF0000
+#define UDMA_S2M_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_SHIFT 16
+
+/**** reg_5 register ****/
+/* Maximum number of outstanding data writes to the AXI */
+#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_MASK 0x0000003F
+#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_SHIFT 0
+/*
+ * Maximum number of outstanding data beats for data write to AXI.
+ * (AXI beats)
+ */
+#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_BEATS_WR_OSTAND_MASK 0x0000FF00
+#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_BEATS_WR_OSTAND_SHIFT 8
+/*
+ * Maximum number of outstanding descriptor reads to the AXI.
+ * (AXI transactions)
+ */
+#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_MASK 0x003F0000
+#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_SHIFT 16
+/*
+ * Maximum number of outstanding data beats for descriptor write to AXI.
+ * (AXI beats)
+ */
+#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000
+#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_SHIFT 24
+
+/**** reg_6 register ****/
+/* Maximum number of outstanding descriptor reads to the AXI */
+#define UDMA_S2M_FEATURE_REG_6_MAX_DESC_RD_OSTAND_MASK 0x0000003F
+#define UDMA_S2M_FEATURE_REG_6_MAX_DESC_RD_OSTAND_SHIFT 0
+/* Maximum number of outstanding stream acknowledges */
+#define UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_MASK 0x001F0000
+#define UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_SHIFT 16
+
+/**** cfg register ****/
+/*
+ * Configure the AXI AWCACHE
+ * for header write.
+ */
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_HDR_MASK 0x0000000F
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_HDR_SHIFT 0
+/*
+ * Configure the AXI AWCACHE
+ * for data write.
+ */
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_DATA_MASK 0x000000F0
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_DATA_SHIFT 4
+/*
+ * Enable operation of this queue.
+ * Start prefetch.
+ */
+#define UDMA_S2M_Q_CFG_EN_PREF (1 << 16)
+/* Enables the reception of packets from the stream to this queue */
+#define UDMA_S2M_Q_CFG_EN_STREAM (1 << 17)
+/* Allow prefetch of less than minimum prefetch burst size. */
+#define UDMA_S2M_Q_CFG_ALLOW_LT_MIN_PREF (1 << 20)
+/*
+ * Configure the AXI AWCACHE
+ * for completion descriptor write
+ */
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_COMP_MASK 0x0F000000
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_COMP_SHIFT 24
+/*
+ * AXI QoS
+ * This value is used in AXI transactions associated with this queue and the
+ * prefetch and completion arbiters.
+ */
+#define UDMA_S2M_Q_CFG_AXI_QOS_MASK 0x70000000
+#define UDMA_S2M_Q_CFG_AXI_QOS_SHIFT 28
+
+/**** status register ****/
+/* Indicates how many entries are used in the Queue */
+#define UDMA_S2M_Q_STATUS_Q_USED_MASK 0x01FFFFFF
+#define UDMA_S2M_Q_STATUS_Q_USED_SHIFT 0
+/*
+ * prefetch status
+ * 0 – prefetch operation is stopped
+ * 1 – prefetch is operational
+ */
+#define UDMA_S2M_Q_STATUS_PREFETCH (1 << 28)
+/*
+ * Queue receive status
+ * 0 -queue RX operation is stopped
+ * 1 – RX queue is active and processing packets
+ */
+#define UDMA_S2M_Q_STATUS_RX (1 << 29)
+/*
+ * Indicates if the queue is full.
+ * (Used by the host when head pointer equals tail pointer)
+ */
+#define UDMA_S2M_Q_STATUS_Q_FULL (1 << 31)
+/*
+ * S2M Descriptor Ring Base address [31:4].
+ * Value of the base address of the S2M descriptor ring
+ * [3:0] - 0 - 16B alignment is enforced
+ * ([11:4] should be 0 for 4KB alignment)
+ */
+#define UDMA_S2M_Q_RDRBP_LOW_ADDR_MASK 0xFFFFFFF0
+#define UDMA_S2M_Q_RDRBP_LOW_ADDR_SHIFT 4
+
+/**** RDRL register ****/
+/*
+ * Length of the descriptor ring.
+ * (descriptors)
+ * Associated with the ring base address ends at maximum burst size alignment
+ */
+#define UDMA_S2M_Q_RDRL_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDRL_OFFSET_SHIFT 0
+
+/**** RDRHP register ****/
+/*
+ * Relative offset of the next descriptor that needs to be read into the
+ * prefetch FIFO.
+ * Incremented when the DMA reads valid descriptors from the host memory to the
+ * prefetch FIFO.
+ * Note that this is the offset in # of descriptors and not in byte address.
+ */
+#define UDMA_S2M_Q_RDRHP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDRHP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RDRHP_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RDRHP_RING_ID_SHIFT 30
+
+/**** RDRTP_inc register ****/
+/*
+ * Increments the value in Q_RDRTP with the value written to this field in
+ * number of descriptors.
+ */
+#define UDMA_S2M_Q_RDRTP_INC_VAL_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDRTP_INC_VAL_SHIFT 0
+
+/**** RDRTP register ****/
+/*
+ * Relative offset of the next free descriptor in the host memory.
+ * Note that this is the offset in # of descriptors and not in byte address.
+ */
+#define UDMA_S2M_Q_RDRTP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDRTP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RDRTP_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RDRTP_RING_ID_SHIFT 30
+
+/**** RDCP register ****/
+/* Relative offset of the first descriptor in the prefetch FIFO. */
+#define UDMA_S2M_Q_RDCP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDCP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RDCP_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RDCP_RING_ID_SHIFT 30
+/*
+ * S2M Descriptor Ring Base address [31:4].
+ * Value of the base address of the S2M descriptor ring
+ * [3:0] - 0 - 16B alignment is enforced
+ * ([11:4] Must be 0 for 4KB alignment)
+ * NOTE:
+ * Length of the descriptor ring (in descriptors) associated with the ring base
+ * address ends at maximum burst size alignment
+ */
+#define UDMA_S2M_Q_RCRBP_LOW_ADDR_MASK 0xFFFFFFF0
+#define UDMA_S2M_Q_RCRBP_LOW_ADDR_SHIFT 4
+
+/**** RCRHP register ****/
+/*
+ * Relative offset of the next descriptor that needs to be updated by the
+ * completion controller.
+ * Note: This is in descriptors and not in byte address.
+ */
+#define UDMA_S2M_Q_RCRHP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RCRHP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RCRHP_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RCRHP_RING_ID_SHIFT 30
+
+/**** RCRHP_internal register ****/
+/*
+ * Relative offset of the next descriptor that needs to be updated by the
+ * completion controller.
+ * Note: This is in descriptors and not in byte address.
+ */
+#define UDMA_S2M_Q_RCRHP_INTERNAL_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RCRHP_INTERNAL_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RCRHP_INTERNAL_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RCRHP_INTERNAL_RING_ID_SHIFT 30
+
+/**** comp_cfg register ****/
+/* Enables writing to the completion ring. */
+#define UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE (1 << 0)
+/* Disables the completion coalescing function. */
+#define UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL (1 << 1)
+/* Reserved */
+#define UDMA_S2M_Q_COMP_CFG_FIRST_PKT_PROMOTION (1 << 2)
+/*
+ * Buffer 2 location.
+ * Determines the position of the buffer 2 length in the S2M completion
+ * descriptor.
+ * 0 - WORD 1 [31:16]
+ * 1 - WORD 2 [31:16]
+ */
+#define UDMA_S2M_Q_COMP_CFG_BUF2_LEN_LOCATION (1 << 3)
+
+/**** pkt_cfg register ****/
+/* Header size. (bytes) */
+#define UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK 0x0000FFFF
+#define UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_SHIFT 0
+/* Force header split */
+#define UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT (1 << 16)
+/* Enable header split. */
+#define UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT (1 << 17)
+
+/**** qos_cfg register ****/
+/* Queue QoS */
+#define UDMA_S2M_QOS_CFG_Q_QOS_MASK 0x000000FF
+#define UDMA_S2M_QOS_CFG_Q_QOS_SHIFT 0
+/* Reset the tail pointer hardware. */
+#define UDMA_S2M_Q_SW_CTRL_RST_TAIL_PTR (1 << 1)
+/* Reset the head pointer hardware. */
+#define UDMA_S2M_Q_SW_CTRL_RST_HEAD_PTR (1 << 2)
+/* Reset the current pointer hardware. */
+#define UDMA_S2M_Q_SW_CTRL_RST_CURRENT_PTR (1 << 3)
+/* Reset the prefetch FIFO */
+#define UDMA_S2M_Q_SW_CTRL_RST_PREFETCH (1 << 4)
+/* Reset the queue */
+#define UDMA_S2M_Q_SW_CTRL_RST_Q (1 << 8)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_S2M_REG_H */
diff --git a/eth/al_hal_an_lt_wrapper_regs.h b/eth/al_hal_an_lt_wrapper_regs.h
new file mode 100644
index 000000000000..72b5cc66fb44
--- /dev/null
+++ b/eth/al_hal_an_lt_wrapper_regs.h
@@ -0,0 +1,264 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_an_lt_wrapper_regs.h
+ *
+ * @brief ... registers
+ *
+ */
+
+#ifndef __AL_HAL_AN_LT_wrapper_REGS_H__
+#define __AL_HAL_AN_LT_wrapper_REGS_H__
+
+#include "al_hal_plat_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct al_an_lt_wrapper_gen {
+ /* [0x0] AN LT wrapper Version */
+ uint32_t version;
+ /* [0x4] AN LT general configuration */
+ uint32_t cfg;
+ uint32_t rsrvd[14];
+};
+struct al_an_lt_wrapper_an_lt {
+ /* [0x0] AN LT register file address */
+ uint32_t addr;
+ /* [0x4] PCS register file data */
+ uint32_t data;
+ /* [0x8] AN LT control signals */
+ uint32_t ctrl;
+ /* [0xc] AN LT status signals */
+ uint32_t status;
+ uint32_t rsrvd[4];
+};
+
+enum al_eth_an_lt_unit {
+ AL_ETH_AN_LT_UNIT_32_BIT = 0,
+ AL_ETH_AN_LT_UNIT_20_BIT = 1,
+ AL_ETH_AN_LT_UNIT_16_BIT = 2,
+};
+
+struct al_an_lt_wrapper_regs {
+ uint32_t rsrvd_0[64];
+ struct al_an_lt_wrapper_gen gen; /* [0x100] */
+ struct al_an_lt_wrapper_an_lt an_lt[3]; /* [0x140] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** version register ****/
+/* Revision number (Minor) */
+#define AN_LT_WRAPPER_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF
+#define AN_LT_WRAPPER_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0
+/* Revision number (Major) */
+#define AN_LT_WRAPPER_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
+#define AN_LT_WRAPPER_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
+/* Date of release */
+#define AN_LT_WRAPPER_GEN_VERSION_DATE_DAY_MASK 0x001F0000
+#define AN_LT_WRAPPER_GEN_VERSION_DATE_DAY_SHIFT 16
+/* Month of release */
+#define AN_LT_WRAPPER_GEN_VERSION_DATA_MONTH_MASK 0x01E00000
+#define AN_LT_WRAPPER_GEN_VERSION_DATA_MONTH_SHIFT 21
+/* Year of release (starting from 2000) */
+#define AN_LT_WRAPPER_GEN_VERSION_DATE_YEAR_MASK 0x3E000000
+#define AN_LT_WRAPPER_GEN_VERSION_DATE_YEAR_SHIFT 25
+/* Reserved */
+#define AN_LT_WRAPPER_GEN_VERSION_RESERVED_MASK 0xC0000000
+#define AN_LT_WRAPPER_GEN_VERSION_RESERVED_SHIFT 30
+
+/**** cfg register ****/
+/*
+ * selection between different bus widths:
+ * 0 – 16
+ * 1 – 20
+ * 2 – 32
+ * 3 – N/A
+ */
+#define AN_LT_WRAPPER_GEN_CFG_AN_LT_SEL_RX_MASK 0x00000003
+#define AN_LT_WRAPPER_GEN_CFG_AN_LT_SEL_RX_SHIFT 0
+/*
+ * selection between different bus widths:
+ * 0 – 16
+ * 1 – 20
+ * 2 – 32
+ * 3 – N/A
+ */
+#define AN_LT_WRAPPER_GEN_CFG_AN_LT_SEL_TX_MASK 0x0000000C
+#define AN_LT_WRAPPER_GEN_CFG_AN_LT_SEL_TX_SHIFT 2
+/* bypass the AN/LT block */
+#define AN_LT_WRAPPER_GEN_CFG_BYPASS_RX (1 << 4)
+/* bypass the AN/LT block */
+#define AN_LT_WRAPPER_GEN_CFG_BYPASS_TX (1 << 5)
+
+/**** addr register ****/
+/* Address value */
+#define AN_LT_WRAPPER_AN_LT_ADDR_VAL_MASK 0x000007FF
+#define AN_LT_WRAPPER_AN_LT_ADDR_VAL_SHIFT 0
+
+/**** data register ****/
+/* Data value */
+#define AN_LT_WRAPPER_AN_LT_DATA_VAL_MASK 0x0000FFFF
+#define AN_LT_WRAPPER_AN_LT_DATA_VAL_SHIFT 0
+
+/**** ctrl register ****/
+/*
+ * Default Auto-Negotiation Enable. If ‘1’, the auto-negotiation process will
+ * start after reset de-assertion. The application can also start the
+ * auto-negotiation process by writing the KXAN_CONTROL.an_enable bit with ‘1’.
+ * Important: This signal is OR'ed with the KXAN_CONTROL.an_enable bit. Hence,
+ * when asserted (1) the application is unable to disable autonegotiation and
+ * writing the an_enable bit has no effect.
+ * Note: Even if enabled by this pin, the application must write the correct
+ * abilities in the KXAN_ABILITY_1/2/3 registers within 60ms from reset
+ * deassertion (break_link_timer).
+ */
+#define AN_LT_WRAPPER_AN_LT_CTRL_AN_ENA (1 << 0)
+/*
+ * If set to 1, the Arbitration State Machine reached the TRANSMIT_DISABLE
+ * state.
+ */
+#define AN_LT_WRAPPER_AN_LT_CTRL_AN_DIS_TIMER (1 << 1)
+
+#define AN_LT_WRAPPER_AN_LT_CTRL_LINK_STATUS_KX (1 << 4)
+
+#define AN_LT_WRAPPER_AN_LT_CTRL_LINK_STATUS_KX4 (1 << 5)
+
+#define AN_LT_WRAPPER_AN_LT_CTRL_LINK_STATUS (1 << 6)
+/*
+ * PHY LOS indication selection
+ * 0 - Select input from the SerDes
+ * 1 - Select register value from phy_los_in_def
+ */
+#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_IN_SEL (1 << 8)
+/* PHY LOS default value */
+#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_IN_DEF (1 << 9)
+/* PHY LOS polarity */
+#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_IN_POL (1 << 10)
+/*
+ * PHY LOS indication selection
+ * 0 – select AN output
+ * 1 - Select register value from phy_los_out_def
+ * 2 - Select input from the SerDes
+ * 3 – 0
+ */
+#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_OUT_SEL_MASK 0x00003000
+#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_OUT_SEL_SHIFT 12
+/* PHY LOS default value */
+#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_OUT_DEF (1 << 14)
+/* PHY LOS polarity */
+#define AN_LT_WRAPPER_AN_LT_CTRL_PHY_LOS_OUT_POL (1 << 15)
+
+/**** status register ****/
+/* Auto-Negotiation Done. If ‘1’, the auto-negotiation process has completed. */
+#define AN_LT_WRAPPER_AN_LT_STATUS_AN_DONE (1 << 0)
+/*
+ * If set to 1, auto-negotiation is enabled on the link. It represents the
+ * enable control bit KXAN_CONTROL.an_enable. When set to 1, the signals
+ * an_status/an_select are valid.
+ */
+#define AN_LT_WRAPPER_AN_LT_STATUS_AN_VAL (1 << 1)
+/*
+ * If set to 0, auto-negotiation is in progress, if set to 1, the Arbitration
+ * State Machine reached the AN_GOOD_CHECK state (i.e. before autonegotiation is
+ * done, but the link no longer is used to transfer DME pages). Stays asserted
+ * also during AN_GOOD (autoneg done).
+ */
+#define AN_LT_WRAPPER_AN_LT_STATUS_AN_STATUS (1 << 2)
+/*
+ * Selected Technology. Becomes valid when an_status is 1.
+ * The selection mode number (from 0 to 24) corresponds to the Technology
+ * Ability (A0-A24) from the ability pages (see 4.3.2.3 page 13). The mode
+ * selection is based on the matching technology abilities and priority.
+ * A value of 31 is an invalid setting that indicates that no common technology
+ * could be resolved. The application should then inspect the base page results
+ * to determine if the link is operable or not.
+ */
+#define AN_LT_WRAPPER_AN_LT_STATUS_AN_SELECT_MASK 0x000001F0
+#define AN_LT_WRAPPER_AN_LT_STATUS_AN_SELECT_SHIFT 4
+/*
+ * If set to 1, the Arbitration State Machine reached the TRANSMIT_DISABLE state
+ */
+#define AN_LT_WRAPPER_AN_LT_STATUS_AN_TR_DIS_STATUS (1 << 16)
+/*
+ * FEC Enable. Asserts when autonegotiation base page exchange identified both
+ * link partners advertising FEC capability and at least one is requesting FEC.
+ * The signal stays constant following base page exchange until autonegotiation
+ * is disabled or restarted.
+ * Note: the information can also be extracted from the base page exchange or
+ * the BP_ETH_STATUS register.
+ */
+#define AN_LT_WRAPPER_AN_LT_STATUS_FEC_ENA (1 << 17)
+/*
+ * Link Training Frame Lock. If set to 1 the training frame delineation has been
+ * acquired.
+ */
+#define AN_LT_WRAPPER_AN_LT_STATUS_LT_LOCK (1 << 20)
+/*
+ * If set to 0, link-training is in progress, if set to 1, the training is
+ * completed and the PCS datapath has been enabled (phy_los_out no longer
+ * gated).
+ */
+#define AN_LT_WRAPPER_AN_LT_STATUS_LT_STATUS (1 << 21)
+/*
+ * If set to 1, link-training is enabled on the link. It represents the enable
+ * control bit PMD Control.taining enable. When set to 1, the signal lt_status
+ * is valid
+ */
+#define AN_LT_WRAPPER_AN_LT_STATUS_LT_VAL (1 << 22)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_AN_LT_wrapper_REGS_H__ */
+
+/** @} end of ... group */
+
+
diff --git a/eth/al_hal_eth.h b/eth/al_hal_eth.h
new file mode 100644
index 000000000000..86108b0df4c1
--- /dev/null
+++ b/eth/al_hal_eth.h
@@ -0,0 +1,2381 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_eth_api API
+ * Ethernet Controller HAL driver API
+ * @ingroup group_eth
+ * @{
+ * @file al_hal_eth.h
+ *
+ * @brief Header file for Unified GbE and 10GbE Ethernet Controllers This is a
+ * common header file that covers both Standard and Advanced Controller
+ *
+ *
+ */
+
+#ifndef __AL_HAL_ETH_H__
+#define __AL_HAL_ETH_H__
+
+#include "al_hal_common.h"
+#include "al_hal_udma.h"
+#include "al_hal_eth_alu.h"
+#ifdef AL_ETH_EX
+#include "al_hal_eth_ex.h"
+#include "al_hal_eth_ex_internal.h"
+#endif
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+#ifndef AL_ETH_PKT_MAX_BUFS
+#ifndef AL_ETH_EX
+#define AL_ETH_PKT_MAX_BUFS 19
+#else
+#define AL_ETH_PKT_MAX_BUFS 29
+#endif
+#endif
+
+#define AL_ETH_UDMA_TX_QUEUES 4
+#define AL_ETH_UDMA_RX_QUEUES 4
+
+/* PCI Adapter Device/Revision ID */
+#define AL_ETH_DEV_ID_STANDARD 0x0001
+#define AL_ETH_DEV_ID_ADVANCED 0x0002
+#define AL_ETH_REV_ID_0 0 /* Alpine V1 Rev 0 */
+#define AL_ETH_REV_ID_1 1 /* Alpine V1 Rev 1 */
+#define AL_ETH_REV_ID_2 2 /* Alpine V2 basic */
+#define AL_ETH_REV_ID_3 3 /* Alpine V2 advanced */
+
+/* PCI BARs */
+#define AL_ETH_UDMA_BAR 0
+#define AL_ETH_EC_BAR 4
+#define AL_ETH_MAC_BAR 2
+
+#define AL_ETH_MAX_FRAME_LEN 10000
+#define AL_ETH_MIN_FRAME_LEN 60
+
+#define AL_ETH_TSO_MSS_MAX_IDX 8
+#define AL_ETH_TSO_MSS_MIN_VAL 1
+/*TODO: update with correct value*/
+#define AL_ETH_TSO_MSS_MAX_VAL (AL_ETH_MAX_FRAME_LEN - 200)
+
+enum AL_ETH_PROTO_ID {
+ AL_ETH_PROTO_ID_UNKNOWN = 0,
+ AL_ETH_PROTO_ID_IPv4 = 8,
+ AL_ETH_PROTO_ID_IPv6 = 11,
+ AL_ETH_PROTO_ID_TCP = 12,
+ AL_ETH_PROTO_ID_UDP = 13,
+ AL_ETH_PROTO_ID_FCOE = 21,
+ AL_ETH_PROTO_ID_GRH = 22, /** RoCE l3 header */
+ AL_ETH_PROTO_ID_BTH = 23, /** RoCE l4 header */
+ AL_ETH_PROTO_ID_ANY = 32, /**< for sw usage only */
+};
+#define AL_ETH_PROTOCOLS_NUM (AL_ETH_PROTO_ID_ANY)
+
+enum AL_ETH_TX_TUNNEL_MODE {
+ AL_ETH_NO_TUNNELING = 0,
+ AL_ETH_TUNNEL_NO_UDP = 1, /* NVGRE / IP over IP */
+ AL_ETH_TUNNEL_WITH_UDP = 3, /* VXLAN */
+};
+
+#define AL_ETH_RX_THASH_TABLE_SIZE (1 << 8)
+#define AL_ETH_RX_FSM_TABLE_SIZE (1 << 7)
+#define AL_ETH_RX_CTRL_TABLE_SIZE (1 << 11)
+#define AL_ETH_RX_HASH_KEY_NUM 10
+#define AL_ETH_FWD_MAC_NUM 32
+#define AL_ETH_FWD_MAC_HASH_NUM 256
+#define AL_ETH_FWD_PBITS_TABLE_NUM (1 << 3)
+#define AL_ETH_FWD_PRIO_TABLE_NUM (1 << 3)
+#define AL_ETH_FWD_VID_TABLE_NUM (1 << 12)
+#define AL_ETH_FWD_DSCP_TABLE_NUM (1 << 8)
+#define AL_ETH_FWD_TC_TABLE_NUM (1 << 8)
+
+/** MAC media mode */
+enum al_eth_mac_mode {
+ AL_ETH_MAC_MODE_RGMII,
+ AL_ETH_MAC_MODE_SGMII,
+ AL_ETH_MAC_MODE_SGMII_2_5G,
+ AL_ETH_MAC_MODE_10GbE_Serial, /**< Applies to XFI and KR modes */
+ AL_ETH_MAC_MODE_10G_SGMII, /**< SGMII using the 10G MAC, don't use*/
+ AL_ETH_MAC_MODE_XLG_LL_40G, /**< applies to 40G mode using the 40G low latency (LL) MAC */
+ AL_ETH_MAC_MODE_KR_LL_25G, /**< applies to 25G mode using the 10/25G low latency (LL) MAC */
+ AL_ETH_MAC_MODE_XLG_LL_50G /**< applies to 50G mode using the 40/50G low latency (LL) MAC */
+};
+
+struct al_eth_capabilities {
+ al_bool speed_10_HD;
+ al_bool speed_10_FD;
+ al_bool speed_100_HD;
+ al_bool speed_100_FD;
+ al_bool speed_1000_HD;
+ al_bool speed_1000_FD;
+ al_bool speed_10000_HD;
+ al_bool speed_10000_FD;
+ al_bool pfc; /**< priority flow control */
+ al_bool eee; /**< Energy Efficient Ethernet */
+};
+
+/** interface type used for MDIO */
+enum al_eth_mdio_if {
+ AL_ETH_MDIO_IF_1G_MAC = 0,
+ AL_ETH_MDIO_IF_10G_MAC = 1
+};
+
+/** MDIO protocol type */
+enum al_eth_mdio_type {
+ AL_ETH_MDIO_TYPE_CLAUSE_22 = 0,
+ AL_ETH_MDIO_TYPE_CLAUSE_45 = 1
+};
+
+/** flow control mode */
+enum al_eth_flow_control_type {
+ AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE,
+ AL_ETH_FLOW_CONTROL_TYPE_PFC
+};
+
+/** Tx to Rx switching decision type */
+enum al_eth_tx_switch_dec_type {
+ AL_ETH_TX_SWITCH_TYPE_MAC = 0,
+ AL_ETH_TX_SWITCH_TYPE_VLAN_TABLE = 1,
+ AL_ETH_TX_SWITCH_TYPE_VLAN_TABLE_AND_MAC = 2,
+ AL_ETH_TX_SWITCH_TYPE_BITMAP = 3
+};
+
+/** Tx to Rx VLAN ID selection type */
+enum al_eth_tx_switch_vid_sel_type {
+ AL_ETH_TX_SWITCH_VID_SEL_TYPE_VLAN1 = 0,
+ AL_ETH_TX_SWITCH_VID_SEL_TYPE_VLAN2 = 1,
+ AL_ETH_TX_SWITCH_VID_SEL_TYPE_NEW_VLAN1 = 2,
+ AL_ETH_TX_SWITCH_VID_SEL_TYPE_NEW_VLAN2 = 3,
+ AL_ETH_TX_SWITCH_VID_SEL_TYPE_DEFAULT_VLAN1 = 4,
+ AL_ETH_TX_SWITCH_VID_SEL_TYPE_FINAL_VLAN1 = 5
+};
+
+/** Rx descriptor configurations */
+/* Note: when selecting rx descriptor field to inner packet, then that field
+* will be set according to inner packet when packet is tunneled, for non-tunneled
+* packets, the field will be set according to the packets header */
+
+/** selection of the LRO_context_value result in the Metadata */
+enum al_eth_rx_desc_lro_context_val_res {
+ AL_ETH_LRO_CONTEXT_VALUE = 0, /**< LRO_context_value */
+ AL_ETH_L4_OFFSET = 1, /**< L4_offset */
+};
+
+/** selection of the L4 offset in the Metadata */
+enum al_eth_rx_desc_l4_offset_sel {
+ AL_ETH_L4_OFFSET_OUTER = 0, /**< set L4 offset of the outer packet */
+ AL_ETH_L4_OFFSET_INNER = 1, /**< set L4 offset of the inner packet */
+};
+
+/** selection of the L4 checksum result in the Metadata */
+enum al_eth_rx_desc_l4_chk_res_sel {
+ AL_ETH_L4_INNER_CHK = 0, /**< L4 checksum */
+ AL_ETH_L4_INNER_OUTER_CHK = 1, /**< Logic AND between outer and inner
+ L4 checksum result */
+};
+
+/** selection of the L3 checksum result in the Metadata */
+enum al_eth_rx_desc_l3_chk_res_sel {
+ AL_ETH_L3_CHK_TYPE_0 = 0, /**< L3 checksum */
+ AL_ETH_L3_CHK_TYPE_1 = 1, /**< L3 checksum or RoCE/FCoE CRC,
+ based on outer header */
+ AL_ETH_L3_CHK_TYPE_2 = 2, /**< If tunnel exist = 0,
+ L3 checksum or RoCE/FCoE CRC,
+ based on outer header.
+ Else,
+ logic AND between outer L3 checksum
+ (Ipv4) and inner CRC (RoCE or FcoE) */
+ AL_ETH_L3_CHK_TYPE_3 = 3, /**< combination of the L3 checksum result and
+ CRC result,based on the checksum and
+ RoCE/FCoE CRC input selections. */
+};
+
+/** selection of the L3 protocol index in the Metadata */
+enum al_eth_rx_desc_l3_proto_idx_sel {
+ AL_ETH_L3_PROTO_IDX_OUTER = 0, /**< set L3 proto index of the outer packet */
+ AL_ETH_L3_PROTO_IDX_INNER = 1, /**< set L3 proto index of the inner packet */
+};
+
+/** selection of the L3 offset in the Metadata */
+enum al_eth_rx_desc_l3_offset_sel {
+ AL_ETH_L3_OFFSET_OUTER = 0, /**< set L3 offset of the outer packet */
+ AL_ETH_L3_OFFSET_INNER = 1, /**< set L3 offset of the inner packet */
+};
+
+
+/** selection of the L4 protocol index in the Metadata */
+enum al_eth_rx_desc_l4_proto_idx_sel {
+ AL_ETH_L4_PROTO_IDX_OUTER = 0, /**< set L4 proto index of the outer packet */
+ AL_ETH_L4_PROTO_IDX_INNER = 1, /**< set L4 proto index of the inner packet */
+};
+
+/** selection of the frag indication in the Metadata */
+enum al_eth_rx_desc_frag_sel {
+ AL_ETH_FRAG_OUTER = 0, /**< set frag of the outer packet */
+ AL_ETH_FRAG_INNER = 1, /**< set frag of the inner packet */
+};
+
+/** Ethernet Rx completion descriptor */
+typedef struct {
+ uint32_t ctrl_meta;
+ uint32_t len;
+ uint32_t word2;
+ uint32_t word3;
+} al_eth_rx_cdesc;
+
+/** Flow Contol parameters */
+struct al_eth_flow_control_params{
+ enum al_eth_flow_control_type type; /**< flow control type */
+ al_bool obay_enable; /**< stop tx when pause received */
+ al_bool gen_enable; /**< generate pause frames */
+ uint16_t rx_fifo_th_high;
+ uint16_t rx_fifo_th_low;
+ uint16_t quanta;
+ uint16_t quanta_th;
+ uint8_t prio_q_map[4][8]; /**< for each UDMA, defines the mapping between
+ * PFC priority and queues(in bit mask).
+ * same mapping used for obay and generation.
+ * for example:
+ * if prio_q_map[1][7] = 0xC, then TX queues 2
+ * and 3 of UDMA 1 will be stopped when pause
+ * received with priority 7, also, when RX queues
+ * 2 and 3 of UDMA 1 become almost full, then
+ * pause frame with priority 7 will be sent.
+ *
+ *note:
+ * 1) if specific a queue is not used, the caller must
+ * make set the prio_q_map to 0 otherwise that queue
+ * will make the controller keep sending PAUSE packets.
+ * 2) queues of unused UDMA must be treated as above.
+ * 3) when working in LINK PAUSE mode, only entries at
+ * priority 0 will be considered.
+ */
+};
+
+/* Packet Tx flags */
+#define AL_ETH_TX_FLAGS_TSO AL_BIT(7) /**< Enable TCP/UDP segmentation offloading */
+#define AL_ETH_TX_FLAGS_IPV4_L3_CSUM AL_BIT(13) /**< Enable IPv4 header checksum calculation */
+#define AL_ETH_TX_FLAGS_L4_CSUM AL_BIT(14) /**< Enable TCP/UDP checksum calculation */
+#define AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM AL_BIT(17) /**< L4 partial checksum calculation */
+#define AL_ETH_TX_FLAGS_L2_MACSEC_PKT AL_BIT(16) /**< L2 Packet type 802_3 or 802_3_MACSEC, V2 */
+#define AL_ETH_TX_FLAGS_ENCRYPT AL_BIT(16) /**< Enable TX packet encryption, V3 */
+#define AL_ETH_TX_FLAGS_L2_DIS_FCS AL_BIT(15) /**< Disable CRC calculation*/
+#define AL_ETH_TX_FLAGS_TS AL_BIT(21) /**< Timestamp the packet */
+
+#define AL_ETH_TX_FLAGS_INT AL_M2S_DESC_INT_EN
+#define AL_ETH_TX_FLAGS_NO_SNOOP AL_M2S_DESC_NO_SNOOP_H
+
+/** this structure used for tx packet meta data */
+struct al_eth_meta_data{
+ uint8_t store :1; /**< store the meta into the queues cache */
+ uint8_t words_valid :4; /**< valid bit per word */
+
+ uint8_t vlan1_cfi_sel:2;
+ uint8_t vlan2_vid_sel:2;
+ uint8_t vlan2_cfi_sel:2;
+ uint8_t vlan2_pbits_sel:2;
+ uint8_t vlan2_ether_sel:2;
+
+ uint16_t vlan1_new_vid:12;
+ uint8_t vlan1_new_cfi :1;
+ uint8_t vlan1_new_pbits :3;
+ uint16_t vlan2_new_vid:12;
+ uint8_t vlan2_new_cfi :1;
+ uint8_t vlan2_new_pbits :3;
+
+ uint8_t l3_header_len; /**< in bytes */
+ uint8_t l3_header_offset;
+ uint8_t l4_header_len; /**< in words(32-bits) */
+
+ /* rev 0 specific */
+ uint8_t mss_idx_sel:3; /**< for TSO, select the register that holds the MSS */
+
+ /* rev 1 specific */
+ uint8_t ts_index:4; /**< index of regiser where to store the tx timestamp */
+ uint16_t mss_val :14; /**< for TSO, set the mss value */
+ uint8_t outer_l3_offset; /**< for tunneling mode. up to 64 bytes */
+ uint8_t outer_l3_len; /**< for tunneling mode. up to 128 bytes */
+};
+
+/* Packet Rx flags when adding buffer to receive queue */
+
+/**<
+ * VMID to be assigned to the packet descriptors
+ * Requires VMID in descriptor to be enabled for the specific UDMA
+ * queue.
+ */
+#define AL_ETH_RX_FLAGS_VMID_MASK AL_FIELD_MASK(15, 0)
+#define AL_ETH_RX_FLAGS_NO_SNOOP AL_M2S_DESC_NO_SNOOP_H
+#define AL_ETH_RX_FLAGS_INT AL_M2S_DESC_INT_EN
+#define AL_ETH_RX_FLAGS_DUAL_BUF AL_BIT(31)
+
+/* Packet Rx flags set by HW when receiving packet */
+#define AL_ETH_RX_ERROR AL_BIT(16) /**< layer 2 errors (FCS, bad len, etc) */
+#define AL_ETH_RX_FLAGS_L4_CSUM_ERR AL_BIT(14)
+#define AL_ETH_RX_FLAGS_L3_CSUM_ERR AL_BIT(13)
+
+/* Packet Rx flags - word 3 in Rx completion descriptor */
+#define AL_ETH_RX_FLAGS_CRC AL_BIT(31)
+#define AL_ETH_RX_FLAGS_L3_CSUM_2 AL_BIT(30)
+#define AL_ETH_RX_FLAGS_L4_CSUM_2 AL_BIT(29)
+#define AL_ETH_RX_FLAGS_SW_SRC_PORT_SHIFT 13
+#define AL_ETH_RX_FLAGS_SW_SRC_PORT_MASK AL_FIELD_MASK(15, 13)
+#define AL_ETH_RX_FLAGS_LRO_CONTEXT_VAL_SHIFT 3
+#define AL_ETH_RX_FLAGS_LRO_CONTEXT_VAL_MASK AL_FIELD_MASK(10, 3)
+#define AL_ETH_RX_FLAGS_L4_OFFSET_SHIFT 3
+#define AL_ETH_RX_FLAGS_L4_OFFSET_MASK AL_FIELD_MASK(10, 3)
+#define AL_ETH_RX_FLAGS_PRIORITY_SHIFT 0
+#define AL_ETH_RX_FLAGS_PRIORITY_MASK AL_FIELD_MASK(2, 0)
+
+/** packet structure. used for packet transmission and reception */
+struct al_eth_pkt{
+ uint32_t flags; /**< see flags above, depends on context(tx or rx) */
+ enum AL_ETH_PROTO_ID l3_proto_idx;
+ enum AL_ETH_PROTO_ID l4_proto_idx;
+ uint8_t source_vlan_count:2;
+ uint8_t vlan_mod_add_count:2;
+ uint8_t vlan_mod_del_count:2;
+ uint8_t vlan_mod_v1_ether_sel:2;
+ uint8_t vlan_mod_v1_vid_sel:2;
+ uint8_t vlan_mod_v1_pbits_sel:2;
+
+ /* rev 1 specific */
+ enum AL_ETH_TX_TUNNEL_MODE tunnel_mode;
+ enum AL_ETH_PROTO_ID outer_l3_proto_idx; /**< for tunneling mode */
+
+ /**<
+ * VMID to be assigned to the packet descriptors
+ * Requires VMID in descriptor to be enabled for the specific UDMA
+ * queue.
+ */
+ uint16_t vmid;
+
+ uint32_t rx_header_len; /**< header buffer length of rx packet, not used */
+ struct al_eth_meta_data *meta; /**< if null, then no meta added */
+#ifdef AL_ETH_RX_DESC_RAW_GET
+ uint32_t rx_desc_raw[4];
+#endif
+ uint16_t rxhash;
+ uint16_t l3_offset;
+
+#ifdef AL_ETH_EX
+ struct al_eth_ext_metadata *ext_meta_data;
+#endif
+
+ uint8_t num_of_bufs;
+ struct al_buf bufs[AL_ETH_PKT_MAX_BUFS];
+};
+
+struct al_ec_regs;
+
+
+/** Ethernet Adapter private data structure used by this driver */
+struct al_hal_eth_adapter{
+ uint8_t rev_id; /**<PCI adapter revision ID */
+ uint8_t udma_id; /**< the id of the UDMA used by this adapter */
+ struct unit_regs __iomem * unit_regs;
+ void __iomem *udma_regs_base;
+ struct al_ec_regs __iomem *ec_regs_base;
+ void __iomem *ec_ints_base;
+ struct al_eth_mac_regs __iomem *mac_regs_base;
+ struct interrupt_controller_ctrl __iomem *mac_ints_base;
+
+ char *name; /**< the upper layer must keep the string area */
+
+ struct al_udma tx_udma;
+ /* uint8_t tx_queues;*//* number of tx queues */
+ struct al_udma rx_udma;
+ /* uint8_t rx_queues;*//* number of tx queues */
+
+ uint8_t enable_rx_parser; /**< config and enable rx parsing */
+
+ enum al_eth_flow_control_type fc_type; /**< flow control*/
+
+ enum al_eth_mac_mode mac_mode;
+ enum al_eth_mdio_if mdio_if; /**< which mac mdio interface to use */
+ enum al_eth_mdio_type mdio_type; /**< mdio protocol type */
+ al_bool shared_mdio_if; /**< when AL_TRUE, the mdio interface is shared with other controllers.*/
+ uint8_t curr_lt_unit;
+#ifdef AL_ETH_EX
+ struct al_eth_ex_state ex_state;
+#endif
+};
+
+/** parameters from upper layer */
+struct al_eth_adapter_params{
+ uint8_t rev_id; /**<PCI adapter revision ID */
+ uint8_t udma_id; /**< the id of the UDMA used by this adapter */
+ uint8_t enable_rx_parser; /**< when true, the rx epe parser will be enabled */
+ void __iomem *udma_regs_base; /**< UDMA register base address */
+ void __iomem *ec_regs_base; /**< Ethernet controller registers base address
+ * can be null if the function is virtual
+ */
+ void __iomem *mac_regs_base; /**< Ethernet MAC registers base address
+ * can be null if the function is virtual
+ */
+ char *name; /**< the upper layer must keep the string area */
+};
+
+/* adapter management */
+/**
+ * initialize the ethernet adapter's DMA
+ * - initialize the adapter data structure
+ * - initialize the Tx and Rx UDMA
+ * - enable the Tx and Rx UDMA, the rings will be still disabled at this point.
+ *
+ * @param adapter pointer to the private structure
+ * @param params the parameters passed from upper layer
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_adapter_init(struct al_hal_eth_adapter *adapter, struct al_eth_adapter_params *params);
+
+/**
+ * stop the DMA of the ethernet adapter
+ *
+ * @param adapter pointer to the private structure
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_adapter_stop(struct al_hal_eth_adapter *adapter);
+
+int al_eth_adapter_reset(struct al_hal_eth_adapter *adapter);
+
+/**
+ * enable the ec and mac interrupts
+ *
+ * @param adapter pointer to the private structure
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_ec_mac_ints_config(struct al_hal_eth_adapter *adapter);
+
+/**
+ * ec and mac interrupt service routine
+ * read and print asserted interrupts
+ *
+ * @param adapter pointer to the private structure
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_ec_mac_isr(struct al_hal_eth_adapter *adapter);
+
+/* Q management */
+/**
+ * Configure and enable a queue ring
+ *
+ * @param adapter pointer to the private structure
+ * @param type tx or rx
+ * @param qid queue index
+ * @param q_params queue parameters
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_queue_config(struct al_hal_eth_adapter *adapter, enum al_udma_type type, uint32_t qid,
+ struct al_udma_q_params *q_params);
+
+
+/**
+ * enable a queue if it was previously disabled
+ *
+ * @param adapter pointer to the private structure
+ * @param type tx or rx
+ * @param qid queue index
+ *
+ * @return -EPERM (not implemented yet).
+ */
+int al_eth_queue_enable(struct al_hal_eth_adapter *adapter, enum al_udma_type type, uint32_t qid);
+
+/**
+ * disable a queue
+ * @param adapter pointer to the private structure
+ * @param type tx or rx
+ * @param qid queue index
+ *
+ * @return -EPERM (not implemented yet).
+ */
+int al_eth_queue_disable(struct al_hal_eth_adapter *adapter, enum al_udma_type type, uint32_t qid);
+
+/* MAC layer */
+
+/**
+ * configure the mac media type.
+ * this function only sets the mode, but not the speed as certain mac modes
+ * support multiple speeds as will be negotiated by the link layer.
+ * @param adapter pointer to the private structure.
+ * @param mode media mode
+ *
+ * @return 0 on success. negative errno on failure.
+ */
+int al_eth_mac_config(struct al_hal_eth_adapter *adapter, enum al_eth_mac_mode mode);
+
+/**
+ * stop the mac tx and rx paths.
+ * @param adapter pointer to the private structure.
+ *
+ * @return 0 on success. negative error on failure.
+ */
+int al_eth_mac_stop(struct al_hal_eth_adapter *adapter);
+
+/**
+ * start the mac tx and rx paths.
+ * @param adapter pointer to the private structure.
+ *
+ * @return 0 on success. negative error on failure.
+ */
+int al_eth_mac_start(struct al_hal_eth_adapter *adapter);
+
+
+/**
+ * get the adapter capabilities (speed, duplex,..)
+ * this function must not be called before configuring the mac mode using al_eth_mac_config()
+ * @param adapter pointer to the private structure.
+ * @param caps pointer to structure that will be updated by this function
+ *
+ * @return 0 on success. negative errno on failure.
+ */
+int al_eth_capabilities_get(struct al_hal_eth_adapter *adapter, struct al_eth_capabilities *caps);
+
+/**
+ * update link auto negotiation speed and duplex mode
+ * this function assumes the mac mode already set using the al_eth_mac_config()
+ * function.
+ *
+ * @param adapter pointer to the private structure
+ * @param force_1000_base_x set to AL_TRUE to force the mac to work on 1000baseX
+ * (not relevant to RGMII)
+ * @param an_enable set to AL_TRUE to enable auto negotiation
+ * (not relevant to RGMII)
+ * @param speed in mega bits, e.g 1000 stands for 1Gbps (relevant only in case
+ * an_enable is AL_FALSE)
+ * @param full_duplex set to AL_TRUE to enable full duplex mode (relevant only
+ * in case an_enable is AL_FALSE)
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_mac_link_config(struct al_hal_eth_adapter *adapter,
+ al_bool force_1000_base_x,
+ al_bool an_enable,
+ uint32_t speed,
+ al_bool full_duplex);
+/**
+ * Enable/Disable Loopback mode
+ *
+ * @param adapter pointer to the private structure
+ * @param enable set to AL_TRUE to enable full duplex mode
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_mac_loopback_config(struct al_hal_eth_adapter *adapter, int enable);
+
+/**
+ * configure minimum and maximum rx packet length
+ *
+ * @param adapter pointer to the private structure
+ * @param min_rx_len minimum rx packet length
+ * @param max_rx_len maximum rx packet length
+ * both length limits in bytes and it includes the MAC Layer header and FCS.
+ * @return 0 on success, otherwise on failure.
+ */
+int al_eth_rx_pkt_limit_config(struct al_hal_eth_adapter *adapter, uint32_t min_rx_len, uint32_t max_rx_len);
+
+
+/* MDIO */
+
+/* Reference clock frequency (platform specific) */
+enum al_eth_ref_clk_freq {
+ AL_ETH_REF_FREQ_375_MHZ = 0,
+ AL_ETH_REF_FREQ_187_5_MHZ = 1,
+ AL_ETH_REF_FREQ_250_MHZ = 2,
+ AL_ETH_REF_FREQ_500_MHZ = 3,
+ AL_ETH_REF_FREQ_428_MHZ = 4,
+};
+
+/**
+ * configure the MDIO hardware interface
+ * @param adapter pointer to the private structure
+ * @param mdio_type clause type
+ * @param shared_mdio_if set to AL_TRUE if multiple controllers using the same
+ * @param ref_clk_freq reference clock frequency
+ * @param mdio_clk_freq_khz the required MDC/MDIO clock frequency [Khz]
+ * MDIO pins of the chip.
+ *
+ * @return 0 on success, otherwise on failure.
+ */
+int al_eth_mdio_config(struct al_hal_eth_adapter *adapter,
+ enum al_eth_mdio_type mdio_type,
+ al_bool shared_mdio_if,
+ enum al_eth_ref_clk_freq ref_clk_freq,
+ unsigned int mdio_clk_freq_khz);
+
+/**
+ * read mdio register
+ * this function uses polling mode, and as the mdio is slow interface, it might
+ * block the cpu for long time (milliseconds).
+ * @param adapter pointer to the private structure
+ * @param phy_addr address of mdio phy
+ * @param device address of mdio device (used only in CLAUSE 45)
+ * @param reg index of the register
+ * @param val pointer for read value of the register
+ *
+ * @return 0 on success, negative errno on failure
+ */
+int al_eth_mdio_read(struct al_hal_eth_adapter *adapter, uint32_t phy_addr,
+ uint32_t device, uint32_t reg, uint16_t *val);
+
+/**
+ * write mdio register
+ * this function uses polling mode, and as the mdio is slow interface, it might
+ * block the cpu for long time (milliseconds).
+ * @param adapter pointer to the private structure
+ * @param phy_addr address of mdio phy
+ * @param device address of mdio device (used only in CLAUSE 45)
+ * @param reg index of the register
+ * @param val value to write
+ *
+ * @return 0 on success, negative errno on failure
+ */
+int al_eth_mdio_write(struct al_hal_eth_adapter *adapter, uint32_t phy_addr,
+ uint32_t device, uint32_t reg, uint16_t val);
+
+/* TX */
+/**
+ * get number of free tx descriptors
+ *
+ * @param adapter adapter handle
+ * @param qid queue index
+ *
+ * @return num of free descriptors.
+ */
+static INLINE uint32_t al_eth_tx_available_get(struct al_hal_eth_adapter *adapter,
+ uint32_t qid)
+{
+ struct al_udma_q *udma_q;
+
+ al_udma_q_handle_get(&adapter->tx_udma, qid, &udma_q);
+
+ return al_udma_available_get(udma_q);
+}
+
+/**
+ * prepare packet descriptors in tx queue.
+ *
+ * This functions prepares the descriptors for the given packet in the tx
+ * submission ring. the caller must call al_eth_tx_pkt_action() below
+ * in order to notify the hardware about the new descriptors.
+ *
+ * @param tx_dma_q pointer to UDMA tx queue
+ * @param pkt the packet to transmit
+ *
+ * @return number of descriptors used for this packet, 0 if no free
+ * room in the descriptors ring
+ */
+int al_eth_tx_pkt_prepare(struct al_udma_q *tx_dma_q, struct al_eth_pkt *pkt);
+
+
+/**
+ * Trigger the DMA about previously added tx descriptors.
+ *
+ * @param tx_dma_q pointer to UDMA tx queue
+ * @param tx_descs number of descriptors to notify the DMA about.
+ * the tx_descs can be sum of descriptor numbers of multiple prepared packets,
+ * this way the caller can use this function to notify the DMA about multiple
+ * packets.
+ */
+void al_eth_tx_dma_action(struct al_udma_q *tx_dma_q, uint32_t tx_descs);
+
+/**
+ * get number of completed tx descriptors, upper layer should derive from
+ * this information which packets were completed.
+ *
+ * @param tx_dma_q pointer to UDMA tx queue
+ *
+ * @return number of completed tx descriptors.
+ */
+int al_eth_comp_tx_get(struct al_udma_q *tx_dma_q);
+
+/**
+ * configure a TSO MSS val
+ *
+ * the TSO MSS vals are preconfigured values for MSS stored in hardware and the
+ * packet could use them when not working in MSS explicit mode.
+ * @param adapter pointer to the private structure
+ * @param idx the mss index
+ * @param mss_val the MSS value
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_tso_mss_config(struct al_hal_eth_adapter *adapter, uint8_t idx, uint32_t mss_val);
+
+/* RX */
+/**
+ * Config the RX descriptor fields
+ *
+ * @param adapter pointer to the private structure
+ * @param lro_sel select LRO context or l4 offset
+ * @param l4_offset_sel select l4 offset source
+ * @param l4_sel select the l4 checksum result
+ * @param l3_sel select the l3 checksum result
+ * @param l3_proto_sel select the l3 protocol index source
+ * @param l4_proto_sel select the l4 protocol index source
+ * @param frag_sel select the frag indication source
+ */
+void al_eth_rx_desc_config(
+ struct al_hal_eth_adapter *adapter,
+ enum al_eth_rx_desc_lro_context_val_res lro_sel,
+ enum al_eth_rx_desc_l4_offset_sel l4_offset_sel,
+ enum al_eth_rx_desc_l3_offset_sel l3_offset_sel,
+ enum al_eth_rx_desc_l4_chk_res_sel l4_sel,
+ enum al_eth_rx_desc_l3_chk_res_sel l3_sel,
+ enum al_eth_rx_desc_l3_proto_idx_sel l3_proto_sel,
+ enum al_eth_rx_desc_l4_proto_idx_sel l4_proto_sel,
+ enum al_eth_rx_desc_frag_sel frag_sel);
+
+/**
+ * Configure RX header split
+ *
+ * @param adapter pointer to the private structure
+ * @param enable header split when AL_TRUE
+ * @param header_split_len length in bytes of the header split, this value used when
+ * CTRL TABLE header split len select is set to
+ * AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_REG, in this case the controller will
+ * store the first header_split_len bytes into buf2, then the rest (if any) into buf1.
+ * when CTRL_TABLE header split len select set to other value, then the header_len
+ * determined according to the parser, and the header_split_len parameter is not
+ * used.
+ *
+ * return 0 on success. otherwise on failure.
+ */
+int al_eth_rx_header_split_config(struct al_hal_eth_adapter *adapter, al_bool enable, uint32_t header_len);
+
+/**
+ * enable / disable header split in the udma queue.
+ * length will be taken from the udma configuration to enable different length per queue.
+ *
+ * @param adapter pointer to the private structure
+ * @param enable header split when AL_TRUE
+ * @param qid the queue id to enable/disable header split
+ * @param header_len in what len the udma will cut the header
+ *
+ * return 0 on success.
+ */
+int al_eth_rx_header_split_force_len_config(struct al_hal_eth_adapter *adapter,
+ al_bool enable,
+ uint32_t qid,
+ uint32_t header_len);
+
+/**
+ * add buffer to receive queue
+ *
+ * @param rx_dma_q pointer to UDMA rx queue
+ * @param buf pointer to data buffer
+ * @param flags bitwise of AL_ETH_RX_FLAGS
+ * @param header_buf this is not used for far and header_buf should be set to
+ * NULL.
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_rx_buffer_add(struct al_udma_q *rx_dma_q,
+ struct al_buf *buf, uint32_t flags,
+ struct al_buf *header_buf);
+
+/**
+ * notify the hw engine about rx descriptors that were added to the receive queue
+ *
+ * @param rx_dma_q pointer to UDMA rx queue
+ * @param descs_num number of rx descriptors
+ */
+void al_eth_rx_buffer_action(struct al_udma_q *rx_dma_q,
+ uint32_t descs_num);
+
+/**
+ * get packet from RX completion ring
+ *
+ * @param rx_dma_q pointer to UDMA rx queue
+ * @param pkt pointer to a packet data structure, this function fills this
+ * structure with the information about the received packet. the buffers
+ * structures filled only with the length of the data written into the buffer,
+ * the address fields are not updated as the upper layer can retrieve this
+ * information by itself because the hardware uses the buffers in the same order
+ * were those buffers inserted into the ring of the receive queue.
+ * this structure should be allocated by the caller function.
+ *
+ * @return return number of descriptors or 0 if no completed packet found.
+ */
+ uint32_t al_eth_pkt_rx(struct al_udma_q *rx_dma_q, struct al_eth_pkt *pkt);
+
+
+/* RX parser table */
+struct al_eth_epe_p_reg_entry {
+ uint32_t data;
+ uint32_t mask;
+ uint32_t ctrl;
+};
+
+struct al_eth_epe_control_entry {
+ uint32_t data[6];
+};
+
+/**
+ * update rx parser entry
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the protocol index to update
+ * @param reg_entry contents of parser register entry
+ * @param control entry contents of control table entry
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_rx_parser_entry_update(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ struct al_eth_epe_p_reg_entry *reg_entry,
+ struct al_eth_epe_control_entry *control_entry);
+
+/* Flow Steering and filtering */
+int al_eth_thash_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t udma, uint32_t queue);
+
+/* FSM table bits */
+/** FSM table has 7 bits input address:
+ * bits[2:0] are the outer packet's type (IPv4, TCP...)
+ * bits[5:3] are the inner packet's type
+ * bit[6] is set when packet is tunneled.
+ *
+ * The output of each entry:
+ * bits[1:0] - input selection: selects the input for the thash (2/4 tuple, inner/outer)
+ * bit[2] - selects whether to use thash output, or default values for the queue and udma
+ * bits[6:3] default UDMA mask: the UDMAs to select when bit 2 above was unset
+ * bits[9:5] defualt queue: the queue index to select when bit 2 above was unset
+ */
+
+#define AL_ETH_FSM_ENTRY_IPV4_TCP 0
+#define AL_ETH_FSM_ENTRY_IPV4_UDP 1
+#define AL_ETH_FSM_ENTRY_IPV6_TCP 2
+#define AL_ETH_FSM_ENTRY_IPV6_UDP 3
+#define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP 4
+#define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP 5
+#define AL_ETH_FSM_ENTRY_IPV4_FRAGMENTED 6
+#define AL_ETH_FSM_ENTRY_NOT_IP 7
+
+#define AL_ETH_FSM_ENTRY_OUTER(idx) ((idx) & 7)
+#define AL_ETH_FSM_ENTRY_INNER(idx) (((idx) >> 3) & 7)
+#define AL_ETH_FSM_ENTRY_TUNNELED(idx) (((idx) >> 6) & 1)
+
+/* FSM DATA format */
+#define AL_ETH_FSM_DATA_OUTER_2_TUPLE 0
+#define AL_ETH_FSM_DATA_OUTER_4_TUPLE 1
+#define AL_ETH_FSM_DATA_INNER_2_TUPLE 2
+#define AL_ETH_FSM_DATA_INNER_4_TUPLE 3
+
+#define AL_ETH_FSM_DATA_HASH_SEL (1 << 2)
+
+#define AL_ETH_FSM_DATA_DEFAULT_Q_SHIFT 5
+#define AL_ETH_FSM_DATA_DEFAULT_UDMA_SHIFT 3
+
+/* set fsm table entry */
+int al_eth_fsm_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t entry);
+
+enum AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT {
+ AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT_0 = 0,
+ AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT_1 = 1,
+ AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT_ANY = 2,
+};
+
+enum AL_ETH_FWD_CTRL_IDX_TUNNEL {
+ AL_ETH_FWD_CTRL_IDX_TUNNEL_NOT_EXIST = 0,
+ AL_ETH_FWD_CTRL_IDX_TUNNEL_EXIST = 1,
+ AL_ETH_FWD_CTRL_IDX_TUNNEL_ANY = 2,
+};
+
+enum AL_ETH_FWD_CTRL_IDX_VLAN {
+ AL_ETH_FWD_CTRL_IDX_VLAN_NOT_EXIST = 0,
+ AL_ETH_FWD_CTRL_IDX_VLAN_EXIST = 1,
+ AL_ETH_FWD_CTRL_IDX_VLAN_ANY = 2,
+};
+
+enum AL_ETH_FWD_CTRL_IDX_MAC_TABLE {
+ AL_ETH_FWD_CTRL_IDX_MAC_TABLE_NO_MATCH = 0,
+ AL_ETH_FWD_CTRL_IDX_MAC_TABLE_MATCH = 1,
+ AL_ETH_FWD_CTRL_IDX_MAC_TABLE_ANY = 2,
+};
+
+enum AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE {
+ AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE_UC = 0, /**< unicast */
+ AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE_MC = 1, /**< multicast */
+ AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE_BC = 2, /**< broadcast */
+ AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE_ANY = 4, /**< for sw usage */
+};
+
+/**
+ * This structure defines the index or group of indeces within the control table.
+ * each field has special enum value (with _ANY postfix) that indicates all
+ * possible values of that field.
+ */
+struct al_eth_fwd_ctrl_table_index {
+ enum AL_ETH_FWD_CTRL_IDX_VLAN_TABLE_OUT vlan_table_out;
+ enum AL_ETH_FWD_CTRL_IDX_TUNNEL tunnel_exist;
+ enum AL_ETH_FWD_CTRL_IDX_VLAN vlan_exist;
+ enum AL_ETH_FWD_CTRL_IDX_MAC_TABLE mac_table_match;
+ enum AL_ETH_PROTO_ID protocol_id;
+ enum AL_ETH_FWD_CTRL_IDX_MAC_DA_TYPE mac_type;
+};
+
+enum AL_ETH_CTRL_TABLE_PRIO_SEL {
+ AL_ETH_CTRL_TABLE_PRIO_SEL_PBITS_TABLE = 0,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_DSCP_TABLE = 1,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_TC_TABLE = 2,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_REG1 = 3,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_REG2 = 4,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_REG3 = 5,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_REG4 = 6,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_REG5 = 7,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_REG6 = 7,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_REG7 = 9,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_REG8 = 10,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_3 = 11,
+ AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0 = 12,
+};
+/** where to select the initial queue from */
+enum AL_ETH_CTRL_TABLE_QUEUE_SEL_1 {
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_PRIO_TABLE = 0,
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE = 1,
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_MAC_TABLE = 2,
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_MHASH_TABLE = 3,
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG1 = 4,
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG2 = 5,
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG3 = 6,
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG4 = 7,
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_VAL_3 = 12,
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_1_VAL_0 = 13,
+};
+
+/** target queue will be built up from the priority and initial queue */
+enum AL_ETH_CTRL_TABLE_QUEUE_SEL_2 {
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_2_PRIO_TABLE = 0, /**< target queue is the output of priority table */
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_2_PRIO = 1, /**< target queue is the priority */
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_2_PRIO_QUEUE = 2, /**< target queue is initial queue[0], priority[1] */
+ AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO = 3, /**< target queue is the initial */
+};
+
+enum AL_ETH_CTRL_TABLE_UDMA_SEL {
+ AL_ETH_CTRL_TABLE_UDMA_SEL_THASH_TABLE = 0,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_THASH_AND_VLAN = 1,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_VLAN_TABLE = 2,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_VLAN_AND_MAC = 3,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE = 4,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_AND_MHASH = 5,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_MHASH_TABLE = 6,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_REG1 = 7,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_REG2 = 8,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_REG3 = 9,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_REG4 = 10,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_REG5 = 11,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_REG6 = 12,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_REG7 = 13,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_REG8 = 14,
+ AL_ETH_CTRL_TABLE_UDMA_SEL_VAL_0 = 15,
+};
+
+enum AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL {
+ AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_0 = 0,
+ AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_REG = 1, /**< select header len from the hdr_split register (set by al_eth_rx_header_split_config())*/
+ AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_OUTER_L3_OFFSET = 2,
+ AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_OUTER_L4_OFFSET = 3,
+ AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_TUNNEL_START_OFFSET = 4,
+ AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_INNER_L3_OFFSET = 5,
+ AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL_INNER_L4_OFFSET = 6,
+};
+
+struct al_eth_fwd_ctrl_table_entry {
+ enum AL_ETH_CTRL_TABLE_PRIO_SEL prio_sel;
+ enum AL_ETH_CTRL_TABLE_QUEUE_SEL_1 queue_sel_1; /**< queue id source */
+ enum AL_ETH_CTRL_TABLE_QUEUE_SEL_2 queue_sel_2; /**< mix queue id with priority */
+ enum AL_ETH_CTRL_TABLE_UDMA_SEL udma_sel;
+ enum AL_ETH_CTRL_TABLE_HDR_SPLIT_LEN_SEL hdr_split_len_sel;
+ al_bool filter; /**< set to AL_TRUE to enable filtering */
+};
+/**
+ * Configure default control table entry
+ *
+ * @param adapter pointer to the private structure
+ * @param use_table set to AL_TRUE if control table is used, when set to AL_FALSE
+ * then control table will be bypassed and the entry value will be used.
+ * @param entry defines the value to be used when bypassing control table.
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_ctrl_table_def_set(struct al_hal_eth_adapter *adapter,
+ al_bool use_table,
+ struct al_eth_fwd_ctrl_table_entry *entry);
+
+/**
+ * Configure control table entry
+ *
+ * @param adapter pointer to the private structure
+ * @param index the entry index within the control table.
+ * @param entry the value to write to the control table entry
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_ctrl_table_set(struct al_hal_eth_adapter *adapter,
+ struct al_eth_fwd_ctrl_table_index *index,
+ struct al_eth_fwd_ctrl_table_entry *entry);
+
+int al_eth_ctrl_table_raw_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t entry);
+int al_eth_ctrl_table_def_raw_set(struct al_hal_eth_adapter *adapter, uint32_t val);
+
+/**
+ * Configure hash key initial registers
+ * Those registers define the initial key values, those values used for
+ * the THASH and MHASH hash functions.
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the register index
+ * @param val the register value
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_hash_key_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t val);
+
+struct al_eth_fwd_mac_table_entry {
+ uint8_t addr[6]; /**< byte 0 is the first byte seen on the wire */
+ uint8_t mask[6];
+ al_bool tx_valid;
+ uint8_t tx_target;
+ al_bool rx_valid;
+ uint8_t udma_mask; /**< target udma */
+ uint8_t qid; /**< target queue */
+ al_bool filter; /**< set to AL_TRUE to enable filtering */
+};
+
+/**
+ * Configure mac table entry
+ * The HW traverse this table and looks for match from lowest index,
+ * when the packets MAC DA & mask == addr, and the valid bit is set, then match occurs.
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index within the mac table.
+ * @param entry the contents of the MAC table entry
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_mac_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ struct al_eth_fwd_mac_table_entry *entry);
+
+int al_eth_fwd_mac_addr_raw_set(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ uint32_t addr_lo, uint32_t addr_hi, uint32_t mask_lo, uint32_t mask_hi);
+int al_eth_fwd_mac_ctrl_raw_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint32_t ctrl);
+
+int al_eth_mac_addr_store(void * __iomem ec_base, uint32_t idx, uint8_t *addr);
+int al_eth_mac_addr_read(void * __iomem ec_base, uint32_t idx, uint8_t *addr);
+
+/**
+ * Configure pbits table entry
+ * The HW uses this table to translate between vlan pbits field to priority.
+ * The vlan pbits is used as the index of this table.
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index within the table.
+ * @param prio the priority to set for this entry
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_pbits_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t prio);
+
+/**
+ * Configure priority table entry
+ * The HW uses this table to translate between priority to queue index.
+ * The priority is used as the index of this table.
+ *
+ * @param adapter pointer to the private structure
+ * @param prio the entry index within the table.
+ * @param qid the queue index to set for this entry (priority).
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_priority_table_set(struct al_hal_eth_adapter *adapter, uint8_t prio, uint8_t qid);
+
+/**
+ * Configure DSCP table entry
+ * The HW uses this table to translate between IPv4 DSCP field to priority.
+ * The IPv4 byte 1 (DSCP+ECN) used as index to this table.
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index within the table.
+ * @param prio the queue index to set for this entry (priority).
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_dscp_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t prio);
+
+/**
+ * Configure TC table entry
+ * The HW uses this table to translate between IPv6 TC field to priority.
+ * The IPv6 TC used as index to this table.
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index within the table.
+ * @param prio the queue index to set for this entry (priority).
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_tc_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t prio);
+
+/**
+ * Configure MAC HASH table entry
+ * The HW uses 8 bits from the hash result on the MAC DA as index to this table.
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index within the table.
+ * @param udma_mask the target udma to set for this entry.
+ * @param qid the target queue index to set for this entry.
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_mhash_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t udma_mask, uint8_t qid);
+
+struct al_eth_fwd_vid_table_entry {
+ uint8_t control:1; /**< used as input for the control table */
+ uint8_t filter:1; /**< set to 1 to enable filtering */
+ uint8_t udma_mask:4; /**< target udmas */
+};
+
+/**
+ * Configure default vlan table entry
+ *
+ * @param adapter pointer to the private structure
+ * @param use_table set to AL_TRUE if vlan table is used, when set to AL_FALSE
+ * then vid table will be bypassed and the default_entry value will be used.
+ * @param default_entry defines the value to be used when bypassing vid table.
+ * @param default_vlan defines the value will be used when untagget packet
+ * received. this value will be used only for steering and filtering control,
+ * the packet's data will not be changed.
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_vid_config_set(struct al_hal_eth_adapter *adapter, al_bool use_table,
+ struct al_eth_fwd_vid_table_entry *default_entry,
+ uint32_t default_vlan);
+/**
+ * Configure vlan table entry
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index within the vlan table. The HW uses the vlan id
+ * field of the packet when accessing this table.
+ * @param entry the value to write to the vlan table entry
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_vid_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ struct al_eth_fwd_vid_table_entry *entry);
+
+
+/**
+ * Configure default UDMA register
+ * When the control table entry udma selection set to AL_ETH_CTRL_TABLE_UDMA_SEL_REG<n>,
+ * then the target UDMA will be set according to the register n of the default
+ * UDMA registers.
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the index of the default register.
+ * @param udma_mask the value of the register.
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_default_udma_config(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ uint8_t udma_mask);
+
+/**
+ * Configure default queue register
+ * When the control table entry queue selection 1 set to AL_ETH_CTRL_TABLE_QUEUE_SEL_1_REG<n>,
+ * then the target queue will be set according to the register n of the default
+ * queue registers.
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the index of the default register.
+ * @param qid the value of the register.
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_default_queue_config(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ uint8_t qid);
+
+/**
+ * Configure default priority register
+ * When the control table entry queue selection 1 set to AL_ETH_CTRL_TABLE_PRIO_SEL_1_REG<n>,
+ * then the target priority will be set according to the register n of the default
+ * priority registers.
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the index of the default register.
+ * @param prio the value of the register.
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_fwd_default_priority_config(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ uint8_t prio);
+
+
+
+/* filter undetected MAC DA */
+#define AL_ETH_RFW_FILTER_UNDET_MAC (1 << 0)
+/* filter specific MAC DA based on MAC table output */
+#define AL_ETH_RFW_FILTER_DET_MAC (1 << 1)
+/* filter all tagged */
+#define AL_ETH_RFW_FILTER_TAGGED (1 << 2)
+/* filter all untagged */
+#define AL_ETH_RFW_FILTER_UNTAGGED (1 << 3)
+/* filter all broadcast */
+#define AL_ETH_RFW_FILTER_BC (1 << 4)
+/* filter all multicast */
+#define AL_ETH_RFW_FILTER_MC (1 << 5)
+/* filter packet based on parser drop */
+#define AL_ETH_RFW_FILTER_PARSE (1 << 6)
+/* filter packet based on VLAN table output */
+#define AL_ETH_RFW_FILTER_VLAN_VID (1 << 7)
+/* filter packet based on control table output */
+#define AL_ETH_RFW_FILTER_CTRL_TABLE (1 << 8)
+/* filter packet based on protocol index */
+#define AL_ETH_RFW_FILTER_PROT_INDEX (1 << 9)
+/* filter packet based on WoL decision */
+#define AL_ETH_RFW_FILTER_WOL (1 << 10)
+
+
+struct al_eth_filter_params {
+ al_bool enable;
+ uint32_t filters; /**< bitmask of AL_ETH_RFW_FILTER.. for filters to enable */
+ al_bool filter_proto[AL_ETH_PROTOCOLS_NUM]; /**< set AL_TRUE for protocols to filter */
+};
+
+struct al_eth_filter_override_params {
+ uint32_t filters; /**< bitmask of AL_ETH_RFW_FILTER.. for filters to override */
+ uint8_t udma; /**< target udma id */
+ uint8_t qid; /**< target queue id */
+};
+
+/**
+ * Configure the receive filters
+ * this function enables/disables filtering packets and which filtering
+ * types to apply.
+ * filters that indicated in tables (MAC table, VLAN and Control tables)
+ * are not configured by this function. This functions only enables/disables
+ * respecting the filter indication from those tables.
+ *
+ * @param adapter pointer to the private structure
+ * @param params the parameters passed from upper layer
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_filter_config(struct al_hal_eth_adapter *adapter, struct al_eth_filter_params *params);
+
+/**
+ * Configure the receive override filters
+ * This function controls whither to force forwarding filtered packets
+ * to a specific UDMA/queue. The override filters apply only for
+ * filters that enabled by al_eth_filter_config().
+ *
+ * @param adapter pointer to the private structure
+ * @param params override config parameters
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_filter_override_config(struct al_hal_eth_adapter *adapter,
+ struct al_eth_filter_override_params *params);
+
+
+int al_eth_switching_config_set(struct al_hal_eth_adapter *adapter, uint8_t udma_id, uint8_t forward_all_to_mac, uint8_t enable_int_switching,
+ enum al_eth_tx_switch_vid_sel_type vid_sel_type,
+ enum al_eth_tx_switch_dec_type uc_dec,
+ enum al_eth_tx_switch_dec_type mc_dec,
+ enum al_eth_tx_switch_dec_type bc_dec);
+int al_eth_switching_default_bitmap_set(struct al_hal_eth_adapter *adapter, uint8_t udma_id, uint8_t udma_uc_bitmask,
+ uint8_t udma_mc_bitmask,uint8_t udma_bc_bitmask);
+int al_eth_flow_control_config(struct al_hal_eth_adapter *adapter, struct al_eth_flow_control_params *params);
+
+struct al_eth_eee_params{
+ uint8_t enable;
+ uint32_t tx_eee_timer; /**< time in cycles the interface delays prior to entering eee state */
+ uint32_t min_interval; /**< minimum interval in cycles between two eee states */
+ uint32_t stop_cnt; /**< time in cycles to stop Tx mac i/f after getting out of eee state */
+};
+
+/**
+ * configure EEE mode
+ * @param adapter pointer to the private structure.
+ * @param params pointer to the eee input parameters.
+ *
+ * @return return 0 on success. otherwise on failure.
+ */
+int al_eth_eee_config(struct al_hal_eth_adapter *adapter, struct al_eth_eee_params *params);
+
+/**
+ * get EEE configuration
+ * @param adapter pointer to the private structure.
+ * @param params pointer to the eee output parameters.
+ *
+ * @return return 0 on success. otherwise on failure.
+ */
+int al_eth_eee_get(struct al_hal_eth_adapter *adapter, struct al_eth_eee_params *params);
+
+int al_eth_vlan_mod_config(struct al_hal_eth_adapter *adapter, uint8_t udma_id, uint16_t udma_etype, uint16_t vlan1_data, uint16_t vlan2_data);
+
+/* Timestamp
+ * This is a generic time-stamp mechanism that can be used as generic to
+ * time-stamp every received or transmit packet it can also support IEEE 1588v2
+ * PTP time synchronization protocol.
+ * In addition to time-stamp, an internal system time is maintained. For
+ * further accuracy, the chip support transmit/receive clock synchronization
+ * including recovery of master clock from one of the ports and distributing it
+ * to the rest of the ports - that is outside the scope of the Ethernet
+ * Controller - please refer to Annapurna Labs Alpine Hardware Wiki
+ */
+
+/* Timestamp management APIs */
+
+/**
+ * prepare the adapter for timestamping packets.
+ * Rx timestamps requires using 8 words (8x4 bytes) rx completion descriptor
+ * size as the timestamp value added into word 4.
+ *
+ * This function should be called after al_eth_mac_config() and before
+ * enabling the queues.
+ * @param adapter pointer to the private structure.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_ts_init(struct al_hal_eth_adapter *adapter);
+
+/* Timestamp data path APIs */
+
+/*
+ * This is the size of the on-chip array that keeps the time-stamp of the
+ * latest transmitted packets
+ */
+#define AL_ETH_PTH_TX_SAMPLES_NUM 16
+
+/**
+ * read Timestamp sample value of previously transmitted packet.
+ *
+ * The adapter includes AL_ETH_PTH_TX_SAMPLES_NUM timestamp samples for tx
+ * packets, those samples shared for all the UDMAs and queues. the al_eth_pkt
+ * data structure includes the index of which sample to use for the packet
+ * to transmit. It's the caller's responsibility to manage those samples,
+ * for example, when using an index, the caller must make sure the packet
+ * is completed and the tx time is sampled before using that index for
+ * another packet.
+ *
+ * This function should be called after the completion indication of the
+ * tx packet. however, there is a little chance that the timestamp sample
+ * won't be updated yet, thus this function must be called again when it
+ * returns -EAGAIN.
+ * @param adapter pointer to the private structure.
+ * @param ts_index the index (out of 16) of the timestamp register
+ * @param timestamp the timestamp value in 2^18 femtoseconds resolution.
+ * @return -EAGAIN if the sample was not updated yet. 0 when the sample
+ * was updated and no errors found.
+ */
+int al_eth_tx_ts_val_get(struct al_hal_eth_adapter *adapter, uint8_t ts_index,
+ uint32_t *timestamp);
+
+/* Timestamp PTH (PTP Timestamp Handler) control and times management */
+/** structure for describing PTH epoch time */
+struct al_eth_pth_time {
+ uint32_t seconds; /**< seconds */
+ uint64_t femto; /**< femto seconds */
+};
+
+/**
+ * Read the systime value
+ * This API should not be used to get the timestamp of packets.
+ * The HW maintains 50 bits for the sub-seconds portion in femto resolution,
+ * but this function reads only the 32 MSB bits since the LSB provides
+ * sub-nanoseconds accuracy, which is not needed.
+ * @param adapter pointer to the private structure.
+ * @param systime pointer to structure where the time will be stored.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_systime_read(struct al_hal_eth_adapter *adapter,
+ struct al_eth_pth_time *systime);
+
+/**
+ * Set the clock period to a given value.
+ * The systime will be incremented by this value on each posedge of the
+ * adapters internal clock which driven by the SouthBridge clock.
+ * @param adapter pointer to the private structure.
+ * @param clk_period the clock period in femto seconds.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_clk_period_write(struct al_hal_eth_adapter *adapter,
+ uint64_t clk_period);
+
+/**< enum for methods when updating systime using triggers */
+enum al_eth_pth_update_method {
+ AL_ETH_PTH_UPDATE_METHOD_SET = 0, /**< Set the time in int/ext update time */
+ AL_ETH_PTH_UPDATE_METHOD_INC = 1, /**< increment */
+ AL_ETH_PTH_UPDATE_METHOD_DEC = 2, /**< decrement */
+ AL_ETH_PTH_UPDATE_METHOD_ADD_TO_LAST = 3, /**< Set to last time + int/ext update time.*/
+};
+
+/**< systime internal update trigger types */
+enum al_eth_pth_int_trig {
+ AL_ETH_PTH_INT_TRIG_OUT_PULSE_0 = 0, /**< use output pulse as trigger */
+ AL_ETH_PTH_INT_TRIG_REG_WRITE = 1, /**< use the int update register
+ * write as a trigger
+ */
+};
+
+/**< parameters for internal trigger update */
+struct al_eth_pth_int_update_params {
+ al_bool enable; /**< enable internal trigger update */
+ enum al_eth_pth_update_method method; /**< internal trigger update
+ * method
+ */
+ enum al_eth_pth_int_trig trigger; /**< which internal trigger to
+ * use
+ */
+};
+
+/**
+ * Configure the systime internal update
+ *
+ * @param adapter pointer to the private structure.
+ * @param params the configuration of the internal update.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_int_update_config(struct al_hal_eth_adapter *adapter,
+ struct al_eth_pth_int_update_params *params);
+
+/**
+ * set internal update time
+ *
+ * The update time used when updating the systime with
+ * internal update method.
+ *
+ * @param adapter pointer to the private structure.
+ * @param time the internal update time value
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_int_update_time_set(struct al_hal_eth_adapter *adapter,
+ struct al_eth_pth_time *time);
+
+/**< parameters for external trigger update */
+struct al_eth_pth_ext_update_params {
+ uint8_t triggers; /**< bitmask of external triggers to enable */
+ enum al_eth_pth_update_method method; /**< external trigger update
+ * method
+ */
+};
+
+/**
+ * Configure the systime external update.
+ * external update triggered by external signals such as GPIO or pulses
+ * from other eth controllers on the SoC.
+ *
+ * @param adapter pointer to the private structure.
+ * @param params the configuration of the external update.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_ext_update_config(struct al_hal_eth_adapter *adapter,
+ struct al_eth_pth_ext_update_params *params);
+
+/**
+ * set external update time
+ *
+ * The update time used when updating the systime with
+ * external update method.
+ * @param adapter pointer to the private structure.
+ * @param time the external update time value
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_ext_update_time_set(struct al_hal_eth_adapter *adapter,
+ struct al_eth_pth_time *time);
+/**
+ * set the read compensation delay
+ *
+ * When reading the systime, the HW adds this value to compensate
+ * read latency.
+ *
+ * @param adapter pointer to the private structure.
+ * @param subseconds the read latency delay in femto seconds.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_read_compensation_set(struct al_hal_eth_adapter *adapter,
+ uint64_t subseconds);
+/**
+ * set the internal write compensation delay
+ *
+ * When updating the systime due to an internal trigger's event, the HW adds
+ * this value to compensate latency.
+ *
+ * @param adapter pointer to the private structure.
+ * @param subseconds the write latency delay in femto seconds.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_int_write_compensation_set(struct al_hal_eth_adapter *adapter,
+ uint64_t subseconds);
+
+/**
+ * set the external write compensation delay
+ *
+ * When updating the systime due to an external trigger's event, the HW adds
+ * this value to compensate pulse propagation latency.
+ *
+ * @param adapter pointer to the private structure.
+ * @param subseconds the write latency delay in femto seconds.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_ext_write_compensation_set(struct al_hal_eth_adapter *adapter,
+ uint64_t subseconds);
+
+/**
+ * set the sync compensation delay
+ *
+ * When the adapter passes systime from PTH to MAC to do the packets
+ * timestamping, the sync compensation delay is added to systime value to
+ * compensate the latency between the PTH and the MAC.
+ *
+ * @param adapter pointer to the private structure.
+ * @param subseconds the sync latency delay in femto seconds.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_sync_compensation_set(struct al_hal_eth_adapter *adapter,
+ uint64_t subseconds);
+
+#define AL_ETH_PTH_PULSE_OUT_NUM 8
+struct al_eth_pth_pulse_out_params {
+ uint8_t index; /**< id of the pulse (0..7) */
+ al_bool enable;
+ al_bool periodic; /**< when true, generate periodic pulse (PPS) */
+ uint8_t period_sec; /**< for periodic pulse, this is seconds
+ * portion of the period time
+ */
+ uint32_t period_us; /**< this is microseconds portion of the
+ * period
+ */
+ struct al_eth_pth_time start_time; /**< when to start pulse triggering */
+ uint64_t pulse_width; /**< pulse width in femto seconds */
+};
+
+/**
+ * Configure an output pulse
+ * This function configures an output pulse coming from the internal System
+ * Time. This is typically a 1Hhz pulse that is used to synchronize the
+ * rest of the components of the system. This API configure the Ethernet
+ * Controller pulse. An additional set up is required to configure the chip
+ * General Purpose I/O (GPIO) to enable the chip output pin.
+ *
+ * @param adapter pointer to the private structure.
+ * @param params output pulse configuration.
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_pth_pulse_out_config(struct al_hal_eth_adapter *adapter,
+ struct al_eth_pth_pulse_out_params *params);
+
+/* link */
+struct al_eth_link_status {
+ al_bool link_up;
+};
+
+/**
+ * get link status
+ *
+ * this function should be used when no external phy is used to get
+ * information about the link
+ *
+ * @param adapter pointer to the private structure.
+ * @param status pointer to struct where to set link information
+ *
+ * @return return 0 on success. otherwise on failure.
+ */
+int al_eth_link_status_get(struct al_hal_eth_adapter *adapter, struct al_eth_link_status *status);
+
+/**
+ * Set LEDs to represent link status.
+ *
+ * @param adapter pointer to the private structure.
+ * @param link_is_up boolean indicating current link status.
+ * In case link is down the leds will be turned off.
+ * In case link is up the leds will be turned on, that means
+ * leds will be blinking on traffic and will be constantly lighting
+ * on inactive link
+ * @return return 0 on success. otherwise on failure.
+ */
+int al_eth_led_set(struct al_hal_eth_adapter *adapter, al_bool link_is_up);
+
+/* get statistics */
+
+struct al_eth_mac_stats{
+ /* sum the data and padding octets (i.e. without header and FCS) received with a valid frame. */
+ uint64_t aOctetsReceivedOK;
+ /* sum of Payload and padding octets of frames transmitted without error*/
+ uint64_t aOctetsTransmittedOK;
+ /* total number of packets received. Good and bad packets */
+ uint32_t etherStatsPkts;
+ /* number of received unicast packets */
+ uint32_t ifInUcastPkts;
+ /* number of received multicast packets */
+ uint32_t ifInMulticastPkts;
+ /* number of received broadcast packets */
+ uint32_t ifInBroadcastPkts;
+ /* Number of frames received with FIFO Overflow, CRC, Payload Length, Jabber and Oversized, Alignment or PHY/PCS error indication */
+ uint32_t ifInErrors;
+
+ /* number of transmitted unicast packets */
+ uint32_t ifOutUcastPkts;
+ /* number of transmitted multicast packets */
+ uint32_t ifOutMulticastPkts;
+ /* number of transmitted broadcast packets */
+ uint32_t ifOutBroadcastPkts;
+ /* number of frames transmitted with FIFO Overflow, FIFO Underflow or Controller indicated error */
+ uint32_t ifOutErrors;
+
+ /* number of Frame received without error (Including Pause Frames). */
+ uint32_t aFramesReceivedOK;
+ /* number of Frames transmitter without error (Including Pause Frames) */
+ uint32_t aFramesTransmittedOK;
+ /* number of packets received with less than 64 octets */
+ uint32_t etherStatsUndersizePkts;
+ /* Too short frames with CRC error, available only for RGMII and 1G Serial modes */
+ uint32_t etherStatsFragments;
+ /* Too long frames with CRC error */
+ uint32_t etherStatsJabbers;
+ /* packet that exceeds the valid maximum programmed frame length */
+ uint32_t etherStatsOversizePkts;
+ /* number of frames received with a CRC error */
+ uint32_t aFrameCheckSequenceErrors;
+ /* number of frames received with alignment error */
+ uint32_t aAlignmentErrors;
+ /* number of dropped packets due to FIFO overflow */
+ uint32_t etherStatsDropEvents;
+ /* number of transmitted pause frames. */
+ uint32_t aPAUSEMACCtrlFramesTransmitted;
+ /* number of received pause frames. */
+ uint32_t aPAUSEMACCtrlFramesReceived;
+ /* frame received exceeded the maximum length programmed with register FRM_LGTH, available only for 10G modes */
+ uint32_t aFrameTooLongErrors;
+ /* received frame with bad length/type (between 46 and 0x600 or less
+ * than 46 for packets longer than 64), available only for 10G modes */
+ uint32_t aInRangeLengthErrors;
+ /* Valid VLAN tagged frames transmitted */
+ uint32_t VLANTransmittedOK;
+ /* Valid VLAN tagged frames received */
+ uint32_t VLANReceivedOK;
+ /* Total number of octets received. Good and bad packets */
+ uint32_t etherStatsOctets;
+
+ /* packets of 64 octets length is received (good and bad frames are counted) */
+ uint32_t etherStatsPkts64Octets;
+ /* Frames (good and bad) with 65 to 127 octets */
+ uint32_t etherStatsPkts65to127Octets;
+ /* Frames (good and bad) with 128 to 255 octets */
+ uint32_t etherStatsPkts128to255Octets;
+ /* Frames (good and bad) with 256 to 511 octets */
+ uint32_t etherStatsPkts256to511Octets;
+ /* Frames (good and bad) with 512 to 1023 octets */
+ uint32_t etherStatsPkts512to1023Octets;
+ /* Frames (good and bad) with 1024 to 1518 octets */
+ uint32_t etherStatsPkts1024to1518Octets;
+ /* frames with 1519 bytes to the maximum length programmed in the register FRAME_LENGTH. */
+ uint32_t etherStatsPkts1519toX;
+
+ uint32_t eee_in;
+ uint32_t eee_out;
+};
+
+/**
+ * get mac statistics
+ * @param adapter pointer to the private structure.
+ * @param stats pointer to structure that will be filled with statistics.
+ *
+ * @return return 0 on success. otherwise on failure.
+ */
+int al_eth_mac_stats_get(struct al_hal_eth_adapter *adapter, struct al_eth_mac_stats *stats);
+
+struct al_eth_ec_stats{
+ /* Rx Frequency adjust FIFO input packets */
+ uint32_t faf_in_rx_pkt;
+ /* Rx Frequency adjust FIFO input short error packets */
+ uint32_t faf_in_rx_short;
+ /* Rx Frequency adjust FIFO input long error packets */
+ uint32_t faf_in_rx_long;
+ /* Rx Frequency adjust FIFO output packets */
+ uint32_t faf_out_rx_pkt;
+ /* Rx Frequency adjust FIFO output short error packets */
+ uint32_t faf_out_rx_short;
+ /* Rx Frequency adjust FIFO output long error packets */
+ uint32_t faf_out_rx_long;
+ /* Rx Frequency adjust FIFO output drop packets */
+ uint32_t faf_out_drop;
+ /* Number of packets written into the Rx FIFO (without FIFO error indication) */
+ uint32_t rxf_in_rx_pkt;
+ /* Number of error packets written into the Rx FIFO (with FIFO error indication, */
+ /* FIFO full indication during packet reception) */
+ uint32_t rxf_in_fifo_err;
+ /* Number of packets read from Rx FIFO 1 */
+ uint32_t lbf_in_rx_pkt;
+ /* Number of packets read from Rx FIFO 2 (loopback FIFO) */
+ uint32_t lbf_in_fifo_err;
+ /* Rx FIFO output drop packets from FIFO 1 */
+ uint32_t rxf_out_rx_1_pkt;
+ /* Rx FIFO output drop packets from FIFO 2 (loop back) */
+ uint32_t rxf_out_rx_2_pkt;
+ /* Rx FIFO output drop packets from FIFO 1 */
+ uint32_t rxf_out_drop_1_pkt;
+ /* Rx FIFO output drop packets from FIFO 2 (loop back) */
+ uint32_t rxf_out_drop_2_pkt;
+ /* Rx Parser 1, input packet counter */
+ uint32_t rpe_1_in_rx_pkt;
+ /* Rx Parser 1, output packet counter */
+ uint32_t rpe_1_out_rx_pkt;
+ /* Rx Parser 2, input packet counter */
+ uint32_t rpe_2_in_rx_pkt;
+ /* Rx Parser 2, output packet counter */
+ uint32_t rpe_2_out_rx_pkt;
+ /* Rx Parser 3 (MACsec), input packet counter */
+ uint32_t rpe_3_in_rx_pkt;
+ /* Rx Parser 3 (MACsec), output packet counter */
+ uint32_t rpe_3_out_rx_pkt;
+ /* Tx parser, input packet counter */
+ uint32_t tpe_in_tx_pkt;
+ /* Tx parser, output packet counter */
+ uint32_t tpe_out_tx_pkt;
+ /* Tx packet modification, input packet counter */
+ uint32_t tpm_tx_pkt;
+ /* Tx forwarding input packet counter */
+ uint32_t tfw_in_tx_pkt;
+ /* Tx forwarding input packet counter */
+ uint32_t tfw_out_tx_pkt;
+ /* Rx forwarding input packet counter */
+ uint32_t rfw_in_rx_pkt;
+ /* Rx Forwarding, packet with VLAN command drop indication */
+ uint32_t rfw_in_vlan_drop;
+ /* Rx Forwarding, packets with parse drop indication */
+ uint32_t rfw_in_parse_drop;
+ /* Rx Forwarding, multicast packets */
+ uint32_t rfw_in_mc;
+ /* Rx Forwarding, broadcast packets */
+ uint32_t rfw_in_bc;
+ /* Rx Forwarding, tagged packets */
+ uint32_t rfw_in_vlan_exist;
+ /* Rx Forwarding, untagged packets */
+ uint32_t rfw_in_vlan_nexist;
+ /* Rx Forwarding, packets with MAC address drop indication (from the MAC address table) */
+ uint32_t rfw_in_mac_drop;
+ /* Rx Forwarding, packets with undetected MAC address */
+ uint32_t rfw_in_mac_ndet_drop;
+ /* Rx Forwarding, packets with drop indication from the control table */
+ uint32_t rfw_in_ctrl_drop;
+ /* Rx Forwarding, packets with L3_protocol_index drop indication */
+ uint32_t rfw_in_prot_i_drop;
+ /* EEE, number of times the system went into EEE state */
+ uint32_t eee_in;
+};
+
+/**
+ * get ec statistics
+ * @param adapter pointer to the private structure.
+ * @param stats pointer to structure that will be filled with statistics.
+ *
+ * @return return 0 on success. otherwise on failure.
+ */
+int al_eth_ec_stats_get(struct al_hal_eth_adapter *adapter, struct al_eth_ec_stats *stats);
+
+struct al_eth_ec_stat_udma{
+ /* Rx forwarding output packet counter */
+ uint32_t rfw_out_rx_pkt;
+ /* Rx forwarding output drop packet counter */
+ uint32_t rfw_out_drop;
+ /* Multi-stream write, number of Rx packets */
+ uint32_t msw_in_rx_pkt;
+ /* Multi-stream write, number of dropped packets at SOP, Q full indication */
+ uint32_t msw_drop_q_full;
+ /* Multi-stream write, number of dropped packets at SOP */
+ uint32_t msw_drop_sop;
+ /* Multi-stream write, number of dropped packets at EOP, */
+ /*EOP was written with error indication (not all packet data was written) */
+ uint32_t msw_drop_eop;
+ /* Multi-stream write, number of packets written to the stream FIFO with EOP and without packet loss */
+ uint32_t msw_wr_eop;
+ /* Multi-stream write, number of packets read from the FIFO into the stream */
+ uint32_t msw_out_rx_pkt;
+ /* Number of transmitted packets without TSO enabled */
+ uint32_t tso_no_tso_pkt;
+ /* Number of transmitted packets with TSO enabled */
+ uint32_t tso_tso_pkt;
+ /* Number of TSO segments that were generated */
+ uint32_t tso_seg_pkt;
+ /* Number of TSO segments that required padding */
+ uint32_t tso_pad_pkt;
+ /* Tx Packet modification, MAC SA spoof error */
+ uint32_t tpm_tx_spoof;
+ /* Tx MAC interface, input packet counter */
+ uint32_t tmi_in_tx_pkt;
+ /* Tx MAC interface, number of packets forwarded to the MAC */
+ uint32_t tmi_out_to_mac;
+ /* Tx MAC interface, number of packets forwarded to the Rx data path */
+ uint32_t tmi_out_to_rx;
+ /* Tx MAC interface, number of transmitted bytes */
+ uint32_t tx_q0_bytes;
+ /* Tx MAC interface, number of transmitted bytes */
+ uint32_t tx_q1_bytes;
+ /* Tx MAC interface, number of transmitted bytes */
+ uint32_t tx_q2_bytes;
+ /* Tx MAC interface, number of transmitted bytes */
+ uint32_t tx_q3_bytes;
+ /* Tx MAC interface, number of transmitted packets */
+ uint32_t tx_q0_pkts;
+ /* Tx MAC interface, number of transmitted packets */
+ uint32_t tx_q1_pkts;
+ /* Tx MAC interface, number of transmitted packets */
+ uint32_t tx_q2_pkts;
+ /* Tx MAC interface, number of transmitted packets */
+ uint32_t tx_q3_pkts;
+};
+
+/**
+ * get per_udma statistics
+ * @param adapter pointer to the private structure.
+ * @param idx udma_id value
+ * @param stats pointer to structure that will be filled with statistics.
+ *
+ * @return return 0 on success. otherwise on failure.
+ */
+int al_eth_ec_stat_udma_get(struct al_hal_eth_adapter *adapter, uint8_t idx, struct al_eth_ec_stat_udma *stats);
+
+/* trafic control */
+
+/**
+ * perform Function Level Reset RMN
+ *
+ * Addressing RMN: 714
+ *
+ * @param pci_read_config_u32 pointer to function that reads register from pci header
+ * @param pci_write_config_u32 pointer to function that writes register from pci header
+ * @param handle pointer passes to the above functions as first parameter
+ * @param mac_base base address of the MAC registers
+ *
+ * @return 0.
+ */
+int al_eth_flr_rmn(int (* pci_read_config_u32)(void *handle, int where, uint32_t *val),
+ int (* pci_write_config_u32)(void *handle, int where, uint32_t val),
+ void *handle,
+ void __iomem *mac_base);
+
+/**
+ * perform Function Level Reset RMN but restore registers that contain board specific data
+ *
+ * the data that save and restored is the board params and mac addresses
+ *
+ * @param pci_read_config_u32 pointer to function that reads register from pci header
+ * @param pci_write_config_u32 pointer to function that writes register from pci header
+ * @param handle pointer passes to the above functions as first parameter
+ * @param mac_base base address of the MAC registers
+ * @param ec_base base address of the Ethernet Controller registers
+ * @param mac_addresses_num number of mac addresses to restore
+ *
+ * @return 0.
+ */
+int al_eth_flr_rmn_restore_params(int (* pci_read_config_u32)(void *handle, int where, uint32_t *val),
+ int (* pci_write_config_u32)(void *handle, int where, uint32_t val),
+ void *handle,
+ void __iomem *mac_base,
+ void __iomem *ec_base,
+ int mac_addresses_num);
+
+/* board specific information (media type, phy address, etc.. */
+
+
+enum al_eth_board_media_type {
+ AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT = 0,
+ AL_ETH_BOARD_MEDIA_TYPE_RGMII = 1,
+ AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR = 2,
+ AL_ETH_BOARD_MEDIA_TYPE_SGMII = 3,
+ AL_ETH_BOARD_MEDIA_TYPE_1000BASE_X = 4,
+ AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED = 5,
+ AL_ETH_BOARD_MEDIA_TYPE_SGMII_2_5G = 6,
+ AL_ETH_BOARD_MEDIA_TYPE_NBASE_T = 7,
+};
+
+enum al_eth_board_mdio_freq {
+ AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ = 0,
+ AL_ETH_BOARD_MDIO_FREQ_1_MHZ = 1,
+};
+
+enum al_eth_board_ext_phy_if {
+ AL_ETH_BOARD_PHY_IF_MDIO = 0,
+ AL_ETH_BOARD_PHY_IF_XMDIO = 1,
+ AL_ETH_BOARD_PHY_IF_I2C = 2,
+
+};
+
+enum al_eth_board_auto_neg_mode {
+ AL_ETH_BOARD_AUTONEG_OUT_OF_BAND = 0,
+ AL_ETH_BOARD_AUTONEG_IN_BAND = 1,
+
+};
+
+/* declare the 1G mac active speed when auto negotiation disabled */
+enum al_eth_board_1g_speed {
+ AL_ETH_BOARD_1G_SPEED_1000M = 0,
+ AL_ETH_BOARD_1G_SPEED_100M = 1,
+ AL_ETH_BOARD_1G_SPEED_10M = 2,
+};
+
+enum al_eth_retimer_channel {
+ AL_ETH_RETIMER_CHANNEL_A = 0,
+ AL_ETH_RETIMER_CHANNEL_B = 1,
+ AL_ETH_RETIMER_CHANNEL_C = 2,
+ AL_ETH_RETIMER_CHANNEL_D = 3,
+ AL_ETH_RETIMER_CHANNEL_MAX = 4
+};
+
+/* list of supported retimers */
+enum al_eth_retimer_type {
+ AL_ETH_RETIMER_BR_210 = 0,
+ AL_ETH_RETIMER_BR_410 = 1,
+
+ AL_ETH_RETIMER_TYPE_MAX = 4,
+};
+
+/** structure represents the board information. this info set by boot loader
+ * and read by OS driver.
+ */
+struct al_eth_board_params {
+ enum al_eth_board_media_type media_type;
+ al_bool phy_exist; /**< external phy exist */
+ uint8_t phy_mdio_addr; /**< mdio address of external phy */
+ al_bool sfp_plus_module_exist; /**< SFP+ module connected */
+ al_bool autoneg_enable; /**< enable Auto-Negotiation */
+ al_bool kr_lt_enable; /**< enable KR Link-Training */
+ al_bool kr_fec_enable; /**< enable KR FEC */
+ enum al_eth_board_mdio_freq mdio_freq; /**< MDIO frequency */
+ uint8_t i2c_adapter_id; /**< identifier for the i2c adapter to use to access SFP+ module */
+ enum al_eth_board_ext_phy_if phy_if; /**< phy interface */
+ enum al_eth_board_auto_neg_mode an_mode; /**< auto-negotiation mode (in-band / out-of-band) */
+ uint8_t serdes_grp; /**< serdes's group id */
+ uint8_t serdes_lane; /**< serdes's lane id */
+ enum al_eth_ref_clk_freq ref_clk_freq; /**< reference clock frequency */
+ al_bool dont_override_serdes; /**< prevent override serdes parameters */
+ al_bool force_1000_base_x; /**< set mac to 1000 base-x mode (instead sgmii) */
+ al_bool an_disable; /**< disable auto negotiation */
+ enum al_eth_board_1g_speed speed; /**< port speed if AN disabled */
+ al_bool half_duplex; /**< force half duplex if AN disabled */
+ al_bool fc_disable; /**< disable flow control */
+ al_bool retimer_exist; /**< retimer is exist on the board */
+ uint8_t retimer_bus_id; /**< in what i2c bus the retimer is on */
+ uint8_t retimer_i2c_addr; /**< i2c address of the retimer */
+ enum al_eth_retimer_channel retimer_channel; /**< what channel connected to this port */
+ al_bool dac; /**< assume direct attached cable is connected if auto detect is off or failed */
+ uint8_t dac_len; /**< assume this cable length if auto detect is off or failed */
+ enum al_eth_retimer_type retimer_type; /**< the type of the specific retimer */
+};
+
+/**
+ * set board parameter of the eth port
+ * this function used to set the board parameters into scratchpad
+ * registers. those paramters can be read later by OS driver.
+ *
+ * @param mac_base the virtual address of the mac registers (PCI BAR 2)
+ * @param params pointer to structure the includes the paramters
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_board_params_set(void * __iomem mac_base, struct al_eth_board_params *params);
+
+/**
+ * get board parameter of the eth port
+ * this function used to get the board parameters from scratchpad
+ * registers.
+ *
+ * @param mac_base the virtual address of the mac registers (PCI BAR 2)
+ * @param params pointer to structure where the parameters will be stored.
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_board_params_get(void * __iomem mac_base, struct al_eth_board_params *params);
+
+/*
+ * Wake-On-Lan (WoL)
+ *
+ * The following few functions configure the Wake-On-Lan packet detection
+ * inside the Integrated Ethernet MAC.
+ *
+ * There are other alternative ways to set WoL, such using the
+ * external 1000Base-T transceiver to set WoL mode.
+ *
+ * These APIs do not set the system-wide power-state, nor responsible on the
+ * transition from Sleep to Normal power state.
+ *
+ * For system level considerations, please refer to Annapurna Labs Alpine Wiki.
+ */
+/* Interrupt enable WoL MAC DA Unicast detected packet */
+#define AL_ETH_WOL_INT_UNICAST AL_BIT(0)
+/* Interrupt enable WoL L2 Multicast detected packet */
+#define AL_ETH_WOL_INT_MULTICAST AL_BIT(1)
+/* Interrupt enable WoL L2 Broadcast detected packet */
+#define AL_ETH_WOL_INT_BROADCAST AL_BIT(2)
+/* Interrupt enable WoL IPv4 detected packet */
+#define AL_ETH_WOL_INT_IPV4 AL_BIT(3)
+/* Interrupt enable WoL IPv6 detected packet */
+#define AL_ETH_WOL_INT_IPV6 AL_BIT(4)
+/* Interrupt enable WoL EtherType+MAC DA detected packet */
+#define AL_ETH_WOL_INT_ETHERTYPE_DA AL_BIT(5)
+/* Interrupt enable WoL EtherType+L2 Broadcast detected packet */
+#define AL_ETH_WOL_INT_ETHERTYPE_BC AL_BIT(6)
+/* Interrupt enable WoL parser detected packet */
+#define AL_ETH_WOL_INT_PARSER AL_BIT(7)
+/* Interrupt enable WoL magic detected packet */
+#define AL_ETH_WOL_INT_MAGIC AL_BIT(8)
+/* Interrupt enable WoL magic+password detected packet */
+#define AL_ETH_WOL_INT_MAGIC_PSWD AL_BIT(9)
+
+/* Forward enable WoL MAC DA Unicast detected packet */
+#define AL_ETH_WOL_FWRD_UNICAST AL_BIT(0)
+/* Forward enable WoL L2 Multicast detected packet */
+#define AL_ETH_WOL_FWRD_MULTICAST AL_BIT(1)
+/* Forward enable WoL L2 Broadcast detected packet */
+#define AL_ETH_WOL_FWRD_BROADCAST AL_BIT(2)
+/* Forward enable WoL IPv4 detected packet */
+#define AL_ETH_WOL_FWRD_IPV4 AL_BIT(3)
+/* Forward enable WoL IPv6 detected packet */
+#define AL_ETH_WOL_FWRD_IPV6 AL_BIT(4)
+/* Forward enable WoL EtherType+MAC DA detected packet */
+#define AL_ETH_WOL_FWRD_ETHERTYPE_DA AL_BIT(5)
+/* Forward enable WoL EtherType+L2 Broadcast detected packet */
+#define AL_ETH_WOL_FWRD_ETHERTYPE_BC AL_BIT(6)
+/* Forward enable WoL parser detected packet */
+#define AL_ETH_WOL_FWRD_PARSER AL_BIT(7)
+
+struct al_eth_wol_params {
+ uint8_t *dest_addr; /**< 6 bytes array of destanation address for
+ magic packet detection */
+ uint8_t *pswd; /**< 6 bytes array of the password to use */
+ uint8_t *ipv4; /**< 4 bytes array of the ipv4 to use.
+ example: for ip = 192.168.1.2
+ ipv4[0]=2, ipv4[1]=1, ipv4[2]=168, ipv4[3]=192 */
+ uint8_t *ipv6; /** 16 bytes array of the ipv6 to use.
+ example: ip = 2607:f0d0:1002:0051:0000:0000:5231:1234
+ ipv6[0]=34, ipv6[1]=12, ipv6[2]=31 .. */
+ uint16_t ethr_type1; /**< first ethertype to use */
+ uint16_t ethr_type2; /**< secound ethertype to use */
+ uint16_t forward_mask; /**< bitmask of AL_ETH_WOL_FWRD_* of the packet
+ types needed to be forward. */
+ uint16_t int_mask; /**< bitmask of AL_ETH_WOL_INT_* of the packet types
+ that will send interrupt to wake the system. */
+};
+
+/**
+ * enable the wol mechanism
+ * set what type of packets will wake up the system and what type of packets
+ * neet to forward after the system is up
+ *
+ * beside this function wol filter also need to be set by
+ * calling al_eth_filter_config with AL_ETH_RFW_FILTER_WOL
+ *
+ * @param adapter pointer to the private structure
+ * @param wol the parameters needed to configure the wol
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_wol_enable(
+ struct al_hal_eth_adapter *adapter,
+ struct al_eth_wol_params *wol);
+
+/**
+ * Disable the WoL mechnism.
+ *
+ * @param adapter pointer to the private structure
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_wol_disable(
+ struct al_hal_eth_adapter *adapter);
+
+/**
+ * Configure tx fwd vlan table entry
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index within the vlan table. The HW uses the vlan id
+ * field of the packet when accessing this table.
+ * @param udma_mask vlan table value that indicates that the packet should be forward back to
+ * the udmas, through the Rx path (udma_mask is one-hot representation)
+ * @param fwd_to_mac vlan table value that indicates that the packet should be forward to mac
+ *
+ * @return 0 on success. otherwise on failure.
+ */
+int al_eth_tx_fwd_vid_table_set(struct al_hal_eth_adapter *adapter, uint32_t idx, uint8_t udma_mask, al_bool fwd_to_mac);
+
+/** Tx Generic protocol detect Cam compare table entry */
+struct al_eth_tx_gpd_cam_entry {
+ enum AL_ETH_PROTO_ID l3_proto_idx;
+ enum AL_ETH_PROTO_ID l4_proto_idx;
+ enum AL_ETH_TX_TUNNEL_MODE tunnel_control;
+ uint8_t source_vlan_count:2;
+ uint8_t tx_gpd_cam_ctrl:1;
+ uint8_t l3_proto_idx_mask:5;
+ uint8_t l4_proto_idx_mask:5;
+ uint8_t tunnel_control_mask:3;
+ uint8_t source_vlan_count_mask:2;
+};
+
+/** Rx Generic protocol detect Cam compare table entry */
+struct al_eth_rx_gpd_cam_entry {
+ enum AL_ETH_PROTO_ID outer_l3_proto_idx;
+ enum AL_ETH_PROTO_ID outer_l4_proto_idx;
+ enum AL_ETH_PROTO_ID inner_l3_proto_idx;
+ enum AL_ETH_PROTO_ID inner_l4_proto_idx;
+ uint8_t parse_ctrl;
+ uint8_t outer_l3_len;
+ uint8_t l3_priority;
+ uint8_t l4_dst_port_lsb;
+ uint8_t rx_gpd_cam_ctrl:1;
+ uint8_t outer_l3_proto_idx_mask:5;
+ uint8_t outer_l4_proto_idx_mask:5;
+ uint8_t inner_l3_proto_idx_mask:5;
+ uint8_t inner_l4_proto_idx_mask:5;
+ uint8_t parse_ctrl_mask;
+ uint8_t outer_l3_len_mask;
+ uint8_t l3_priority_mask;
+ uint8_t l4_dst_port_lsb_mask;
+};
+
+enum AL_ETH_TX_GCP_ALU_OPSEL {
+ AL_ETH_TX_GCP_ALU_L3_OFFSET = 0,
+ AL_ETH_TX_GCP_ALU_OUTER_L3_OFFSET = 1,
+ AL_ETH_TX_GCP_ALU_L3_LEN = 2,
+ AL_ETH_TX_GCP_ALU_OUTER_L3_LEN = 3,
+ AL_ETH_TX_GCP_ALU_L4_OFFSET = 4,
+ AL_ETH_TX_GCP_ALU_L4_LEN = 5,
+ AL_ETH_TX_GCP_ALU_TABLE_VAL = 10
+};
+
+enum AL_ETH_RX_GCP_ALU_OPSEL {
+ AL_ETH_RX_GCP_ALU_OUTER_L3_OFFSET = 0,
+ AL_ETH_RX_GCP_ALU_INNER_L3_OFFSET = 1,
+ AL_ETH_RX_GCP_ALU_OUTER_L4_OFFSET = 2,
+ AL_ETH_RX_GCP_ALU_INNER_L4_OFFSET = 3,
+ AL_ETH_RX_GCP_ALU_OUTER_L3_HDR_LEN_LAT = 4,
+ AL_ETH_RX_GCP_ALU_INNER_L3_HDR_LEN_LAT = 5,
+ AL_ETH_RX_GCP_ALU_OUTER_L3_HDR_LEN_SEL = 6,
+ AL_ETH_RX_GCP_ALU_INNER_L3_HDR_LEN_SEL = 7,
+ AL_ETH_RX_GCP_ALU_PARSE_RESULT_VECTOR_OFFSET_1 = 8,
+ AL_ETH_RX_GCP_ALU_PARSE_RESULT_VECTOR_OFFSET_2 = 9,
+ AL_ETH_RX_GCP_ALU_TABLE_VAL = 10
+};
+
+/** Tx Generic crc prameters table entry */
+
+struct al_eth_tx_gcp_table_entry {
+ uint8_t poly_sel:1;
+ uint8_t crc32_bit_comp:1;
+ uint8_t crc32_bit_swap:1;
+ uint8_t crc32_byte_swap:1;
+ uint8_t data_bit_swap:1;
+ uint8_t data_byte_swap:1;
+ uint8_t trail_size:4;
+ uint8_t head_size:8;
+ uint8_t head_calc:1;
+ uint8_t mask_polarity:1;
+ enum AL_ETH_ALU_OPCODE tx_alu_opcode_1;
+ enum AL_ETH_ALU_OPCODE tx_alu_opcode_2;
+ enum AL_ETH_ALU_OPCODE tx_alu_opcode_3;
+ enum AL_ETH_TX_GCP_ALU_OPSEL tx_alu_opsel_1;
+ enum AL_ETH_TX_GCP_ALU_OPSEL tx_alu_opsel_2;
+ enum AL_ETH_TX_GCP_ALU_OPSEL tx_alu_opsel_3;
+ enum AL_ETH_TX_GCP_ALU_OPSEL tx_alu_opsel_4;
+ uint32_t gcp_mask[6];
+ uint32_t crc_init;
+ uint8_t gcp_table_res:7;
+ uint16_t alu_val:9;
+};
+
+/** Rx Generic crc prameters table entry */
+
+struct al_eth_rx_gcp_table_entry {
+ uint8_t poly_sel:1;
+ uint8_t crc32_bit_comp:1;
+ uint8_t crc32_bit_swap:1;
+ uint8_t crc32_byte_swap:1;
+ uint8_t data_bit_swap:1;
+ uint8_t data_byte_swap:1;
+ uint8_t trail_size:4;
+ uint8_t head_size:8;
+ uint8_t head_calc:1;
+ uint8_t mask_polarity:1;
+ enum AL_ETH_ALU_OPCODE rx_alu_opcode_1;
+ enum AL_ETH_ALU_OPCODE rx_alu_opcode_2;
+ enum AL_ETH_ALU_OPCODE rx_alu_opcode_3;
+ enum AL_ETH_RX_GCP_ALU_OPSEL rx_alu_opsel_1;
+ enum AL_ETH_RX_GCP_ALU_OPSEL rx_alu_opsel_2;
+ enum AL_ETH_RX_GCP_ALU_OPSEL rx_alu_opsel_3;
+ enum AL_ETH_RX_GCP_ALU_OPSEL rx_alu_opsel_4;
+ uint32_t gcp_mask[6];
+ uint32_t crc_init;
+ uint32_t gcp_table_res:27;
+ uint16_t alu_val:9;
+};
+
+/** Tx per_protocol_number crc & l3_checksum & l4_checksum command table entry */
+
+struct al_eth_tx_crc_chksum_replace_cmd_for_protocol_num_entry {
+ al_bool crc_en_00; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 0 */
+ al_bool crc_en_01; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 1 */
+ al_bool crc_en_10; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 0 */
+ al_bool crc_en_11; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 1 */
+ al_bool l4_csum_en_00; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 0 */
+ al_bool l4_csum_en_01; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 1 */
+ al_bool l4_csum_en_10; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 0 */
+ al_bool l4_csum_en_11; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 1 */
+ al_bool l3_csum_en_00; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 0 */
+ al_bool l3_csum_en_01; /*from Tx_buffer_descriptor: enable_l4_checksum is 0 ,enable_l3_checksum is 1 */
+ al_bool l3_csum_en_10; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 0 */
+ al_bool l3_csum_en_11; /*from Tx_buffer_descriptor: enable_l4_checksum is 1 ,enable_l3_checksum is 1 */
+};
+
+/**
+ * Configure tx_gpd_entry
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index
+ * @param tx_gpd_entry entry data for the Tx protocol detect Cam compare table
+ *
+ * @return 0 on success. otherwise on failure.
+ *
+ */
+int al_eth_tx_protocol_detect_table_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ struct al_eth_tx_gpd_cam_entry *tx_gpd_entry);
+
+/**
+ * Configure tx_gcp_entry
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index
+ * @param tx_gcp_entry entry data for the Tx Generic crc prameters table
+ *
+ * @return 0 on success. otherwise on failure.
+ *
+ */
+int al_eth_tx_generic_crc_table_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ struct al_eth_tx_gcp_table_entry *tx_gcp_entry);
+
+/**
+ * Configure tx_crc_chksum_replace_cmd_entry
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index
+ * @param tx_replace_entry entry data for the Tx crc_&_l3_checksum_&_l4_checksum replace command table
+ *
+ * @return 0 on success. otherwise on failure.
+ *
+ */
+int al_eth_tx_crc_chksum_replace_cmd_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ struct al_eth_tx_crc_chksum_replace_cmd_for_protocol_num_entry *tx_replace_entry);
+
+/**
+ * Configure rx_gpd_entry
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index
+ * @param rx_gpd_entry entry data for the Tx protocol detect Cam compare table
+ *
+ * @return 0 on success. otherwise on failure.
+ *
+ */
+int al_eth_rx_protocol_detect_table_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ struct al_eth_rx_gpd_cam_entry *rx_gpd_entry);
+
+/**
+ * Configure rx_gcp_entry
+ *
+ * @param adapter pointer to the private structure
+ * @param idx the entry index
+ * @param rx_gpd_entry entry data for the Tx protocol detect Cam compare table
+ * @param rx_gcp_entry entry data for the Tx Generic crc prameters table
+ *
+ * @return 0 on success. otherwise on failure.
+ *
+ */
+int al_eth_rx_generic_crc_table_entry_set(struct al_hal_eth_adapter *adapter, uint32_t idx,
+ struct al_eth_rx_gcp_table_entry *rx_gcp_entry);
+
+/**
+ * Configure tx_gpd_table and regs
+ *
+ * @param adapter pointer to the private structure
+ *
+ */
+int al_eth_tx_protocol_detect_table_init(struct al_hal_eth_adapter *adapter);
+
+/**
+ * Configure crc_chksum_replace_cmd_table
+ *
+ * @param adapter pointer to the private structure
+ *
+ */
+int al_eth_tx_crc_chksum_replace_cmd_init(struct al_hal_eth_adapter *adapter);
+
+/**
+ * Configure tx_gcp_table and regs
+ *
+ * @param adapter pointer to the private structure
+ *
+ */
+int al_eth_tx_generic_crc_table_init(struct al_hal_eth_adapter *adapter);
+
+/**
+ * Configure rx_gpd_table and regs
+ *
+ * @param adapter pointer to the private structure
+ *
+ */
+int al_eth_rx_protocol_detect_table_init(struct al_hal_eth_adapter *adapter);
+
+/**
+ * Configure rx_gcp_table and regs
+ *
+ * @param adapter pointer to the private structure
+ *
+ */
+int al_eth_rx_generic_crc_table_init(struct al_hal_eth_adapter *adapter);
+
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+#endif /* __AL_HAL_ETH_H__ */
+/** @} end of Ethernet group */
diff --git a/eth/al_hal_eth_alu.h b/eth/al_hal_eth_alu.h
new file mode 100644
index 000000000000..2f5f1fa2301e
--- /dev/null
+++ b/eth/al_hal_eth_alu.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_eth_alu_api API
+ * Ethernet Controller generic ALU API
+ * @ingroup group_eth
+ * @{
+ * @file al_hal_eth_alu.h
+ *
+ * @brief Header file for control parameters for the generic ALU unit in the Ethernet Datapath for Advanced Ethernet port.
+ *
+ */
+
+#ifndef __AL_HAL_ETH_ALU_H__
+#define __AL_HAL_ETH_ALU_H__
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+enum AL_ETH_ALU_OPCODE
+{
+ AL_ALU_FWD_A = 0,
+ AL_ALU_ARITHMETIC_ADD = 1,
+ AL_ALU_ARITHMETIC_SUBTRACT = 2,
+ AL_ALU_BITWISE_AND = 3,
+ AL_ALU_BITWISE_OR = 4,
+ AL_ALU_SHIFT_RIGHT_A_BY_B = 5,
+ AL_ALU_SHIFT_LEFT_A_BY_B = 6,
+ AL_ALU_BITWISE_XOR = 7,
+ AL_ALU_FWD_INV_A = 16,
+ AL_ALU_ARITHMETIC_ADD_INV_A_AND_B = 17,
+ AL_ALU_ARITHMETIC_SUBTRACT_INV_A_AND_B = 18,
+ AL_ALU_BITWISE_AND_INV_A_AND_B = 19,
+ AL_ALU_BITWISE_OR_INV_A_AND_B = 20,
+ AL_ALU_SHIFT_RIGHT_INV_A_BY_B = 21,
+ AL_ALU_SHIFT_LEFT_INV_A_BY_B = 22,
+ AL_ALU_BITWISE_XOR_INV_A_AND_B = 23,
+ AL_ALU_ARITHMETIC_ADD_A_AND_INV_B = 33,
+ AL_ALU_ARITHMETIC_SUBTRACT_A_AND_INV_B = 34,
+ AL_ALU_BITWISE_AND_A_AND_INV_B = 35,
+ AL_ALU_BITWISE_OR_A_AND_INV_B = 36,
+ AL_ALU_SHIFT_RIGHT_A_BY_INV_B = 37,
+ AL_ALU_SHIFT_LEFT_A_BY_INV_B = 38,
+ AL_ALU_BITWISE_XOR_A_AND_INV_B = 39,
+ AL_ALU_ARITHMETIC_ADD_INV_A_AND_INV_B = 49,
+ AL_ALU_ARITHMETIC_SUBTRACT_INV_A_AND = 50,
+ AL_ALU_BITWISE_AND_INV_A_AND_INV_B = 51,
+ AL_ALU_BITWISE_OR_INV_A_AND_INV_B = 52,
+ AL_ALU_SHIFT_RIGHT_INV_A_BY_INV_B = 53,
+ AL_ALU_SHIFT_LEFT_INV_A_BY_INV_B = 54,
+ AL_ALU_BITWISE_XOR_INV_A_AND_INV_B = 55
+};
+
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+#endif /* __AL_HAL_ETH_ALU_H__ */
+/** @} end of Ethernet group */
diff --git a/eth/al_hal_eth_ec_regs.h b/eth/al_hal_eth_ec_regs.h
new file mode 100644
index 000000000000..153e0d57a452
--- /dev/null
+++ b/eth/al_hal_eth_ec_regs.h
@@ -0,0 +1,3362 @@
+/*-
+*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_eth_ec_regs.h
+ *
+ * @brief Ethernet controller registers
+ *
+ */
+
+#ifndef __AL_HAL_EC_REG_H
+#define __AL_HAL_EC_REG_H
+
+#include "al_hal_plat_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct al_ec_gen {
+ /* [0x0] Ethernet controller Version */
+ uint32_t version;
+ /* [0x4] Enable modules operation. */
+ uint32_t en;
+ /* [0x8] Enable FIFO operation on the EC side. */
+ uint32_t fifo_en;
+ /* [0xc] General L2 configuration for the Ethernet controlle ... */
+ uint32_t l2;
+ /* [0x10] Configure protocol index values */
+ uint32_t cfg_i;
+ /* [0x14] Configure protocol index values (extended protocols ... */
+ uint32_t cfg_i_ext;
+ /* [0x18] Enable modules operation (extended operations). */
+ uint32_t en_ext;
+ uint32_t rsrvd[9];
+};
+struct al_ec_mac {
+ /* [0x0] General configuration of the MAC side of the Ethern ... */
+ uint32_t gen;
+ /* [0x4] Minimum packet size */
+ uint32_t min_pkt;
+ /* [0x8] Maximum packet size */
+ uint32_t max_pkt;
+ uint32_t rsrvd[13];
+};
+struct al_ec_rxf {
+ /* [0x0] Rx FIFO input controller configuration 1 */
+ uint32_t cfg_1;
+ /* [0x4] Rx FIFO input controller configuration 2 */
+ uint32_t cfg_2;
+ /* [0x8] Threshold to start reading packet from the Rx FIFO */
+ uint32_t rd_fifo;
+ /* [0xc] Threshold to stop writing packet to the Rx FIFO */
+ uint32_t wr_fifo;
+ /* [0x10] Threshold to stop writing packet to the loopback FI ... */
+ uint32_t lb_fifo;
+ /* [0x14] Rx FIFO input controller loopback FIFO configuratio ... */
+ uint32_t cfg_lb;
+ /* [0x18] Configuration for dropping packet at the FIFO outpu ... */
+ uint32_t out_drop;
+ uint32_t rsrvd[25];
+};
+struct al_ec_epe {
+ /* [0x0] Ethernet parsing engine configuration 1 */
+ uint32_t parse_cfg;
+ /* [0x4] Protocol index action table address */
+ uint32_t act_table_addr;
+ /* [0x8] Protocol index action table data */
+ uint32_t act_table_data_1;
+ /* [0xc] Protocol index action table data */
+ uint32_t act_table_data_2;
+ /* [0x10] Protocol index action table data */
+ uint32_t act_table_data_3;
+ /* [0x14] Protocol index action table data */
+ uint32_t act_table_data_4;
+ /* [0x18] Protocol index action table data */
+ uint32_t act_table_data_5;
+ /* [0x1c] Protocol index action table data */
+ uint32_t act_table_data_6;
+ /* [0x20] Input result vector, default values for parser inpu ... */
+ uint32_t res_def;
+ /* [0x24] Result input vector selection */
+ uint32_t res_in;
+ uint32_t rsrvd[6];
+};
+struct al_ec_epe_res {
+ /* [0x0] Parser result vector pointer */
+ uint32_t p1;
+ /* [0x4] Parser result vector pointer */
+ uint32_t p2;
+ /* [0x8] Parser result vector pointer */
+ uint32_t p3;
+ /* [0xc] Parser result vector pointer */
+ uint32_t p4;
+ /* [0x10] Parser result vector pointer */
+ uint32_t p5;
+ /* [0x14] Parser result vector pointer */
+ uint32_t p6;
+ /* [0x18] Parser result vector pointer */
+ uint32_t p7;
+ /* [0x1c] Parser result vector pointer */
+ uint32_t p8;
+ /* [0x20] Parser result vector pointer */
+ uint32_t p9;
+ /* [0x24] Parser result vector pointer */
+ uint32_t p10;
+ /* [0x28] Parser result vector pointer */
+ uint32_t p11;
+ /* [0x2c] Parser result vector pointer */
+ uint32_t p12;
+ /* [0x30] Parser result vector pointer */
+ uint32_t p13;
+ /* [0x34] Parser result vector pointer */
+ uint32_t p14;
+ /* [0x38] Parser result vector pointer */
+ uint32_t p15;
+ /* [0x3c] Parser result vector pointer */
+ uint32_t p16;
+ /* [0x40] Parser result vector pointer */
+ uint32_t p17;
+ /* [0x44] Parser result vector pointer */
+ uint32_t p18;
+ /* [0x48] Parser result vector pointer */
+ uint32_t p19;
+ /* [0x4c] Parser result vector pointer */
+ uint32_t p20;
+ uint32_t rsrvd[12];
+};
+struct al_ec_epe_h {
+ /* [0x0] Header length, support for header length table for ... */
+ uint32_t hdr_len;
+};
+struct al_ec_epe_p {
+ /* [0x0] Data for comparison */
+ uint32_t comp_data;
+ /* [0x4] Mask for comparison */
+ uint32_t comp_mask;
+ /* [0x8] Compare control */
+ uint32_t comp_ctrl;
+ uint32_t rsrvd[4];
+};
+struct al_ec_epe_a {
+ /* [0x0] Protocol index action register */
+ uint32_t prot_act;
+};
+struct al_ec_rfw {
+ /* [0x0] Tuple (4/2) Hash configuration */
+ uint32_t thash_cfg_1;
+ /* [0x4] Tuple (4/2) Hash configuration */
+ uint32_t thash_cfg_2;
+ /* [0x8] MAC Hash configuration */
+ uint32_t mhash_cfg_1;
+ /* [0xc] MAC Hash configuration */
+ uint32_t mhash_cfg_2;
+ /* [0x10] MAC Hash configuration */
+ uint32_t hdr_split;
+ /* [0x14] Masking the errors described in register rxf_drop ... */
+ uint32_t meta_err;
+ /* [0x18] Configuration for generating the MetaData for the R ... */
+ uint32_t meta;
+ /* [0x1c] Configuration for generating the MetaData for the R ... */
+ uint32_t filter;
+ /* [0x20] 4 tupple hash table address */
+ uint32_t thash_table_addr;
+ /* [0x24] 4 tupple hash table data */
+ uint32_t thash_table_data;
+ /* [0x28] MAC hash table address */
+ uint32_t mhash_table_addr;
+ /* [0x2c] MAC hash table data */
+ uint32_t mhash_table_data;
+ /* [0x30] VLAN table address */
+ uint32_t vid_table_addr;
+ /* [0x34] VLAN table data */
+ uint32_t vid_table_data;
+ /* [0x38] VLAN p-bits table address */
+ uint32_t pbits_table_addr;
+ /* [0x3c] VLAN p-bits table data */
+ uint32_t pbits_table_data;
+ /* [0x40] DSCP table address */
+ uint32_t dscp_table_addr;
+ /* [0x44] DSCP table data */
+ uint32_t dscp_table_data;
+ /* [0x48] TC table address */
+ uint32_t tc_table_addr;
+ /* [0x4c] TC table data */
+ uint32_t tc_table_data;
+ /* [0x50] Control table address */
+ uint32_t ctrl_table_addr;
+ /* [0x54] Control table data */
+ uint32_t ctrl_table_data;
+ /* [0x58] Forwarding output configuration */
+ uint32_t out_cfg;
+ /* [0x5c] Flow steering mechanism,
+Table address */
+ uint32_t fsm_table_addr;
+ /* [0x60] Flow steering mechanism,
+Table data */
+ uint32_t fsm_table_data;
+ /* [0x64] Selection of data to be used in packet forwarding0 ... */
+ uint32_t ctrl_sel;
+ /* [0x68] Default VLAN data, used for untagged packets */
+ uint32_t default_vlan;
+ /* [0x6c] Default HASH output values */
+ uint32_t default_hash;
+ /* [0x70] Default override values, if a packet was filtered b ... */
+ uint32_t default_or;
+ /* [0x74] Latched information when a drop condition occurred */
+ uint32_t drop_latch;
+ /* [0x78] Check sum calculation configuration */
+ uint32_t checksum;
+ /* [0x7c] LRO offload engine configuration register */
+ uint32_t lro_cfg_1;
+ /* [0x80] LRO offload engine Check rules configurations for I ... */
+ uint32_t lro_check_ipv4;
+ /* [0x84] LRO offload engine IPv4 values configuration */
+ uint32_t lro_ipv4;
+ /* [0x88] LRO offload engine Check rules configurations for I ... */
+ uint32_t lro_check_ipv6;
+ /* [0x8c] LRO offload engine IPv6 values configuration */
+ uint32_t lro_ipv6;
+ /* [0x90] LRO offload engine Check rules configurations for T ... */
+ uint32_t lro_check_tcp;
+ /* [0x94] LRO offload engine IPv6 values configuration */
+ uint32_t lro_tcp;
+ /* [0x98] LRO offload engine Check rules configurations for U ... */
+ uint32_t lro_check_udp;
+ /* [0x9c] LRO offload engine Check rules configurations for U ... */
+ uint32_t lro_check_l2;
+ /* [0xa0] LRO offload engine Check rules configurations for U ... */
+ uint32_t lro_check_gen;
+ /* [0xa4] Rules for storing packet information into the cache ... */
+ uint32_t lro_store;
+ /* [0xa8] VLAN table default */
+ uint32_t vid_table_def;
+ /* [0xac] Control table default */
+ uint32_t ctrl_table_def;
+ /* [0xb0] Additional configuration 0 */
+ uint32_t cfg_a_0;
+ /* [0xb4] Tuple (4/2) Hash configuration (extended for RoCE a ... */
+ uint32_t thash_cfg_3;
+ /* [0xb8] Tuple (4/2) Hash configuration , mask for the input ... */
+ uint32_t thash_mask_outer_ipv6;
+ /* [0xbc] Tuple (4/2) Hash configuration , mask for the input ... */
+ uint32_t thash_mask_outer;
+ /* [0xc0] Tuple (4/2) Hash configuration , mask for the input ... */
+ uint32_t thash_mask_inner_ipv6;
+ /* [0xc4] Tuple (4/2) Hash configuration , mask for the input ... */
+ uint32_t thash_mask_inner;
+ uint32_t rsrvd[10];
+};
+struct al_ec_rfw_udma {
+ /* [0x0] Per UDMA default configuration */
+ uint32_t def_cfg;
+};
+struct al_ec_rfw_hash {
+ /* [0x0] key configuration (320 bits) */
+ uint32_t key;
+};
+struct al_ec_rfw_priority {
+ /* [0x0] Priority to queue mapping configuration */
+ uint32_t queue;
+};
+struct al_ec_rfw_default {
+ /* [0x0] Default forwarding configuration options */
+ uint32_t opt_1;
+};
+struct al_ec_fwd_mac {
+ /* [0x0] MAC address data [31:0] */
+ uint32_t data_l;
+ /* [0x4] MAC address data [15:0] */
+ uint32_t data_h;
+ /* [0x8] MAC address mask [31:0] */
+ uint32_t mask_l;
+ /* [0xc] MAC address mask [15:0] */
+ uint32_t mask_h;
+ /* [0x10] MAC compare control */
+ uint32_t ctrl;
+};
+struct al_ec_msw {
+ /* [0x0] Configuration for unicast packets */
+ uint32_t uc;
+ /* [0x4] Configuration for multicast packets */
+ uint32_t mc;
+ /* [0x8] Configuration for broadcast packets */
+ uint32_t bc;
+ uint32_t rsrvd[3];
+};
+struct al_ec_tso {
+ /* [0x0] Input configuration */
+ uint32_t in_cfg;
+ /* [0x4] MetaData default cache table address */
+ uint32_t cache_table_addr;
+ /* [0x8] MetaData default cache table data */
+ uint32_t cache_table_data_1;
+ /* [0xc] MetaData default cache table data */
+ uint32_t cache_table_data_2;
+ /* [0x10] MetaData default cache table data */
+ uint32_t cache_table_data_3;
+ /* [0x14] MetaData default cache table data */
+ uint32_t cache_table_data_4;
+ /* [0x18] TCP control bit operation for first segment */
+ uint32_t ctrl_first;
+ /* [0x1c] TCP control bit operation for middle segments */
+ uint32_t ctrl_middle;
+ /* [0x20] TCP control bit operation for last segment */
+ uint32_t ctrl_last;
+ /* [0x24] Additional TSO configurations */
+ uint32_t cfg_add_0;
+ /* [0x28] TSO configuration for tunnelled packets */
+ uint32_t cfg_tunnel;
+ uint32_t rsrvd[13];
+};
+struct al_ec_tso_sel {
+ /* [0x0] MSS value */
+ uint32_t mss;
+};
+struct al_ec_tpe {
+ /* [0x0] Parsing configuration */
+ uint32_t parse;
+ uint32_t rsrvd[15];
+};
+struct al_ec_tpm_udma {
+ /* [0x0] Default VLAN data */
+ uint32_t vlan_data;
+ /* [0x4] UDMA MAC SA information for spoofing */
+ uint32_t mac_sa_1;
+ /* [0x8] UDMA MAC SA information for spoofing */
+ uint32_t mac_sa_2;
+};
+struct al_ec_tpm_sel {
+ /* [0x0] Ethertype values for VLAN modification */
+ uint32_t etype;
+};
+struct al_ec_tfw {
+ /* [0x0] Tx FIFO Wr configuration */
+ uint32_t tx_wr_fifo;
+ /* [0x4] VLAN table address */
+ uint32_t tx_vid_table_addr;
+ /* [0x8] VLAN table data */
+ uint32_t tx_vid_table_data;
+ /* [0xc] Tx FIFO Rd configuration */
+ uint32_t tx_rd_fifo;
+ /* [0x10] Tx FIFO Rd configuration, checksum insertion */
+ uint32_t tx_checksum;
+ /* [0x14] Tx forwarding general configuration register */
+ uint32_t tx_gen;
+ /* [0x18] Tx spoofing configuration */
+ uint32_t tx_spf;
+ /* [0x1c] TX data FIFO status */
+ uint32_t data_fifo;
+ /* [0x20] Tx control FIFO status */
+ uint32_t ctrl_fifo;
+ /* [0x24] Tx header FIFO status */
+ uint32_t hdr_fifo;
+ uint32_t rsrvd[14];
+};
+struct al_ec_tfw_udma {
+ /* [0x0] Default GMDA output bitmap for unicast packet */
+ uint32_t uc_udma;
+ /* [0x4] Default GMDA output bitmap for multicast packet */
+ uint32_t mc_udma;
+ /* [0x8] Default GMDA output bitmap for broadcast packet */
+ uint32_t bc_udma;
+ /* [0xc] Tx spoofing configuration */
+ uint32_t spf_cmd;
+ /* [0x10] Forwarding decision control */
+ uint32_t fwd_dec;
+ uint32_t rsrvd;
+};
+struct al_ec_tmi {
+ /* [0x0] Forward packets back to the Rx data path for local ... */
+ uint32_t tx_cfg;
+ uint32_t rsrvd[3];
+};
+struct al_ec_efc {
+ /* [0x0] Mask of pause_on [7:0] for the Ethernet controller ... */
+ uint32_t ec_pause;
+ /* [0x4] Mask of Ethernet controller Almost Full indication ... */
+ uint32_t ec_xoff;
+ /* [0x8] Mask for generating XON indication pulse */
+ uint32_t xon;
+ /* [0xc] Mask for generating GPIO output XOFF indication fro ... */
+ uint32_t gpio;
+ /* [0x10] Rx FIFO threshold for generating the Almost Full in ... */
+ uint32_t rx_fifo_af;
+ /* [0x14] Rx FIFO threshold for generating the Almost Full in ... */
+ uint32_t rx_fifo_hyst;
+ /* [0x18] Rx FIFO threshold for generating the Almost Full in ... */
+ uint32_t stat;
+ /* [0x1c] XOFF timer for the 1G MACSets the interval (in SB_C ... */
+ uint32_t xoff_timer_1g;
+ /* [0x20] PFC force flow control generation */
+ uint32_t ec_pfc;
+ uint32_t rsrvd[3];
+};
+struct al_ec_fc_udma {
+ /* [0x0] Mask of "pause_on" [0] for all queues */
+ uint32_t q_pause_0;
+ /* [0x4] Mask of "pause_on" [1] for all queues */
+ uint32_t q_pause_1;
+ /* [0x8] Mask of "pause_on" [2] for all queues */
+ uint32_t q_pause_2;
+ /* [0xc] Mask of "pause_on" [3] for all queues */
+ uint32_t q_pause_3;
+ /* [0x10] Mask of "pause_on" [4] for all queues */
+ uint32_t q_pause_4;
+ /* [0x14] Mask of "pause_on" [5] for all queues */
+ uint32_t q_pause_5;
+ /* [0x18] Mask of "pause_on" [6] for all queues */
+ uint32_t q_pause_6;
+ /* [0x1c] Mask of "pause_on" [7] for all queues */
+ uint32_t q_pause_7;
+ /* [0x20] Mask of external GPIO input pause [0] for all queue ... */
+ uint32_t q_gpio_0;
+ /* [0x24] Mask of external GPIO input pause [1] for all queue ... */
+ uint32_t q_gpio_1;
+ /* [0x28] Mask of external GPIO input pause [2] for all queue ... */
+ uint32_t q_gpio_2;
+ /* [0x2c] Mask of external GPIO input pause [3] for all queue ... */
+ uint32_t q_gpio_3;
+ /* [0x30] Mask of external GPIO input [4] for all queues */
+ uint32_t q_gpio_4;
+ /* [0x34] Mask of external GPIO input [5] for all queues */
+ uint32_t q_gpio_5;
+ /* [0x38] Mask of external GPIO input [6] for all queues */
+ uint32_t q_gpio_6;
+ /* [0x3c] Mask of external GPIO input [7] for all queues */
+ uint32_t q_gpio_7;
+ /* [0x40] Mask of "pause_on" [7:0] for the UDMA stream inter ... */
+ uint32_t s_pause;
+ /* [0x44] Mask of Rx Almost Full indication for generating XO ... */
+ uint32_t q_xoff_0;
+ /* [0x48] Mask of Rx Almost Full indication for generating XO ... */
+ uint32_t q_xoff_1;
+ /* [0x4c] Mask of Rx Almost Full indication for generating XO ... */
+ uint32_t q_xoff_2;
+ /* [0x50] Mask of Rx Almost Full indication for generating XO ... */
+ uint32_t q_xoff_3;
+ /* [0x54] Mask of Rx Almost Full indication for generating XO ... */
+ uint32_t q_xoff_4;
+ /* [0x58] Mask of Rx Almost Full indication for generating XO ... */
+ uint32_t q_xoff_5;
+ /* [0x5c] Mask of Rx Almost Full indication for generating XO ... */
+ uint32_t q_xoff_6;
+ /* [0x60] Mask of Rx Almost Full indication for generating XO ... */
+ uint32_t q_xoff_7;
+ uint32_t rsrvd[7];
+};
+struct al_ec_tpg_rpa_res {
+ /* [0x0] NOT used */
+ uint32_t not_used;
+ uint32_t rsrvd[63];
+};
+struct al_ec_eee {
+ /* [0x0] EEE configuration */
+ uint32_t cfg_e;
+ /* [0x4] Number of clocks to get into EEE mode. */
+ uint32_t pre_cnt;
+ /* [0x8] Number of clocks to stop MAC EEE mode after getting ... */
+ uint32_t post_cnt;
+ /* [0xc] Number of clocks to stop the Tx MAC interface after ... */
+ uint32_t stop_cnt;
+ /* [0x10] EEE status */
+ uint32_t stat_eee;
+ uint32_t rsrvd[59];
+};
+struct al_ec_stat {
+ /* [0x0] Rx Frequency adjust FIFO input packets */
+ uint32_t faf_in_rx_pkt;
+ /* [0x4] Rx Frequency adjust FIFO input short error packets */
+ uint32_t faf_in_rx_short;
+ /* [0x8] Rx Frequency adjust FIFO input long error packets */
+ uint32_t faf_in_rx_long;
+ /* [0xc] Rx Frequency adjust FIFO output packets */
+ uint32_t faf_out_rx_pkt;
+ /* [0x10] Rx Frequency adjust FIFO output short error packets ... */
+ uint32_t faf_out_rx_short;
+ /* [0x14] Rx Frequency adjust FIFO output long error packets */
+ uint32_t faf_out_rx_long;
+ /* [0x18] Rx Frequency adjust FIFO output drop packets */
+ uint32_t faf_out_drop;
+ /* [0x1c] Number of packets written into the Rx FIFO (without ... */
+ uint32_t rxf_in_rx_pkt;
+ /* [0x20] Number of error packets written into the Rx FIFO (w ... */
+ uint32_t rxf_in_fifo_err;
+ /* [0x24] Number of packets written into the loopback FIFO (w ... */
+ uint32_t lbf_in_rx_pkt;
+ /* [0x28] Number of error packets written into the loopback F ... */
+ uint32_t lbf_in_fifo_err;
+ /* [0x2c] Number of packets read from Rx FIFO 1 */
+ uint32_t rxf_out_rx_1_pkt;
+ /* [0x30] Number of packets read from Rx FIFO 2 (loopback FIF ... */
+ uint32_t rxf_out_rx_2_pkt;
+ /* [0x34] Rx FIFO output drop packets from FIFO 1 */
+ uint32_t rxf_out_drop_1_pkt;
+ /* [0x38] Rx FIFO output drop packets from FIFO 2 (loopback) */
+ uint32_t rxf_out_drop_2_pkt;
+ /* [0x3c] Rx Parser 1, input packet counter */
+ uint32_t rpe_1_in_rx_pkt;
+ /* [0x40] Rx Parser 1, output packet counter */
+ uint32_t rpe_1_out_rx_pkt;
+ /* [0x44] Rx Parser 2, input packet counter */
+ uint32_t rpe_2_in_rx_pkt;
+ /* [0x48] Rx Parser 2, output packet counter */
+ uint32_t rpe_2_out_rx_pkt;
+ /* [0x4c] Rx Parser 3 (MACsec), input packet counter */
+ uint32_t rpe_3_in_rx_pkt;
+ /* [0x50] Rx Parser 3 (MACsec), output packet counter */
+ uint32_t rpe_3_out_rx_pkt;
+ /* [0x54] Tx parser, input packet counter */
+ uint32_t tpe_in_tx_pkt;
+ /* [0x58] Tx parser, output packet counter */
+ uint32_t tpe_out_tx_pkt;
+ /* [0x5c] Tx packet modification, input packet counter */
+ uint32_t tpm_tx_pkt;
+ /* [0x60] Tx forwarding input packet counter */
+ uint32_t tfw_in_tx_pkt;
+ /* [0x64] Tx forwarding input packet counter */
+ uint32_t tfw_out_tx_pkt;
+ /* [0x68] Rx forwarding input packet counter */
+ uint32_t rfw_in_rx_pkt;
+ /* [0x6c] Rx Forwarding, packet with VLAN command drop indica ... */
+ uint32_t rfw_in_vlan_drop;
+ /* [0x70] Rx Forwarding, packets with parse drop indication */
+ uint32_t rfw_in_parse_drop;
+ /* [0x74] Rx Forwarding, multicast packets */
+ uint32_t rfw_in_mc;
+ /* [0x78] Rx Forwarding, broadcast packets */
+ uint32_t rfw_in_bc;
+ /* [0x7c] Rx Forwarding, tagged packets */
+ uint32_t rfw_in_vlan_exist;
+ /* [0x80] Rx Forwarding, untagged packets */
+ uint32_t rfw_in_vlan_nexist;
+ /* [0x84] Rx Forwarding, packets with MAC address drop indica ... */
+ uint32_t rfw_in_mac_drop;
+ /* [0x88] Rx Forwarding, packets with undetected MAC address */
+ uint32_t rfw_in_mac_ndet_drop;
+ /* [0x8c] Rx Forwarding, packets with drop indication from th ... */
+ uint32_t rfw_in_ctrl_drop;
+ /* [0x90] Rx Forwarding, packets with L3_protocol_index drop ... */
+ uint32_t rfw_in_prot_i_drop;
+ /* [0x94] EEE, number of times the system went into EEE state ... */
+ uint32_t eee_in;
+ uint32_t rsrvd[90];
+};
+struct al_ec_stat_udma {
+ /* [0x0] Rx forwarding output packet counter */
+ uint32_t rfw_out_rx_pkt;
+ /* [0x4] Rx forwarding output drop packet counter */
+ uint32_t rfw_out_drop;
+ /* [0x8] Multi-stream write, number of Rx packets */
+ uint32_t msw_in_rx_pkt;
+ /* [0xc] Multi-stream write, number of dropped packets at SO ... */
+ uint32_t msw_drop_q_full;
+ /* [0x10] Multi-stream write, number of dropped packets at SO ... */
+ uint32_t msw_drop_sop;
+ /* [0x14] Multi-stream write, number of dropped packets at EO ... */
+ uint32_t msw_drop_eop;
+ /* [0x18] Multi-stream write, number of packets written to th ... */
+ uint32_t msw_wr_eop;
+ /* [0x1c] Multi-stream write, number of packets read from the ... */
+ uint32_t msw_out_rx_pkt;
+ /* [0x20] Number of transmitted packets without TSO enabled */
+ uint32_t tso_no_tso_pkt;
+ /* [0x24] Number of transmitted packets with TSO enabled */
+ uint32_t tso_tso_pkt;
+ /* [0x28] Number of TSO segments that were generated */
+ uint32_t tso_seg_pkt;
+ /* [0x2c] Number of TSO segments that required padding */
+ uint32_t tso_pad_pkt;
+ /* [0x30] Tx Packet modification, MAC SA spoof error */
+ uint32_t tpm_tx_spoof;
+ /* [0x34] Tx MAC interface, input packet counter */
+ uint32_t tmi_in_tx_pkt;
+ /* [0x38] Tx MAC interface, number of packets forwarded to th ... */
+ uint32_t tmi_out_to_mac;
+ /* [0x3c] Tx MAC interface, number of packets forwarded to th ... */
+ uint32_t tmi_out_to_rx;
+ /* [0x40] Tx MAC interface, number of transmitted bytes */
+ uint32_t tx_q0_bytes;
+ /* [0x44] Tx MAC interface, number of transmitted bytes */
+ uint32_t tx_q1_bytes;
+ /* [0x48] Tx MAC interface, number of transmitted bytes */
+ uint32_t tx_q2_bytes;
+ /* [0x4c] Tx MAC interface, number of transmitted bytes */
+ uint32_t tx_q3_bytes;
+ /* [0x50] Tx MAC interface, number of transmitted packets */
+ uint32_t tx_q0_pkts;
+ /* [0x54] Tx MAC interface, number of transmitted packets */
+ uint32_t tx_q1_pkts;
+ /* [0x58] Tx MAC interface, number of transmitted packets */
+ uint32_t tx_q2_pkts;
+ /* [0x5c] Tx MAC interface, number of transmitted packets */
+ uint32_t tx_q3_pkts;
+ uint32_t rsrvd[40];
+};
+struct al_ec_msp {
+ /* [0x0] Ethernet parsing engine configuration 1 */
+ uint32_t p_parse_cfg;
+ /* [0x4] Protocol index action table address */
+ uint32_t p_act_table_addr;
+ /* [0x8] Protocol index action table data */
+ uint32_t p_act_table_data_1;
+ /* [0xc] Protocol index action table data */
+ uint32_t p_act_table_data_2;
+ /* [0x10] Protocol index action table data */
+ uint32_t p_act_table_data_3;
+ /* [0x14] Protocol index action table data */
+ uint32_t p_act_table_data_4;
+ /* [0x18] Protocol index action table data */
+ uint32_t p_act_table_data_5;
+ /* [0x1c] Protocol index action table data */
+ uint32_t p_act_table_data_6;
+ /* [0x20] Input result vector, default values for parser inpu ... */
+ uint32_t p_res_def;
+ /* [0x24] Result input vector selection */
+ uint32_t p_res_in;
+ uint32_t rsrvd[6];
+};
+struct al_ec_msp_p {
+ /* [0x0] Header length, support for header length table for ... */
+ uint32_t h_hdr_len;
+};
+struct al_ec_msp_c {
+ /* [0x0] Data for comparison */
+ uint32_t p_comp_data;
+ /* [0x4] Mask for comparison */
+ uint32_t p_comp_mask;
+ /* [0x8] Compare control */
+ uint32_t p_comp_ctrl;
+ uint32_t rsrvd[4];
+};
+struct al_ec_wol {
+ /* [0x0] WoL enable configuration,Packet forwarding and inte ... */
+ uint32_t wol_en;
+ /* [0x4] Password for magic_password packet detection - bits ... */
+ uint32_t magic_pswd_l;
+ /* [0x8] Password for magic+password packet detection - 47: ... */
+ uint32_t magic_pswd_h;
+ /* [0xc] Configured L3 Destination IP address for WoL IPv6 p ... */
+ uint32_t ipv6_dip_word0;
+ /* [0x10] Configured L3 Destination IP address for WoL IPv6 p ... */
+ uint32_t ipv6_dip_word1;
+ /* [0x14] Configured L3 Destination IP address for WoL IPv6 p ... */
+ uint32_t ipv6_dip_word2;
+ /* [0x18] Configured L3 Destination IP address for WoL IPv6 p ... */
+ uint32_t ipv6_dip_word3;
+ /* [0x1c] Configured L3 Destination IP address for WoL IPv4 p ... */
+ uint32_t ipv4_dip;
+ /* [0x20] Configured EtherType for WoL EtherType_da/EtherType ... */
+ uint32_t ethertype;
+ uint32_t rsrvd[7];
+};
+struct al_ec_pth {
+ /* [0x0] System time counter (Time of Day) */
+ uint32_t system_time_seconds;
+ /* [0x4] System time subseconds in a second (MSBs) */
+ uint32_t system_time_subseconds_msb;
+ /* [0x8] System time subseconds in a second (LSBs) */
+ uint32_t system_time_subseconds_lsb;
+ /* [0xc] Clock period in femtoseconds (MSB) */
+ uint32_t clock_period_msb;
+ /* [0x10] Clock period in femtoseconds (LSB) */
+ uint32_t clock_period_lsb;
+ /* [0x14] Control register for internal updates to the system ... */
+ uint32_t int_update_ctrl;
+ /* [0x18] Value to update system_time_seconds with */
+ uint32_t int_update_seconds;
+ /* [0x1c] Value to update system_time_subseconds_msb with */
+ uint32_t int_update_subseconds_msb;
+ /* [0x20] Value to update system_time_subseconds_lsb with */
+ uint32_t int_update_subseconds_lsb;
+ /* [0x24] Control register for external updates to the system ... */
+ uint32_t ext_update_ctrl;
+ /* [0x28] Value to update system_time_seconds with */
+ uint32_t ext_update_seconds;
+ /* [0x2c] Value to update system_time_subseconds_msb with */
+ uint32_t ext_update_subseconds_msb;
+ /* [0x30] Value to update system_time_subseconds_lsb with */
+ uint32_t ext_update_subseconds_lsb;
+ /* [0x34] This value represents the APB transaction delay fro ... */
+ uint32_t read_compensation_subseconds_msb;
+ /* [0x38] This value represents the APB transaction delay fro ... */
+ uint32_t read_compensation_subseconds_lsb;
+ /* [0x3c] This value is used for two purposes:1 */
+ uint32_t int_write_compensation_subseconds_msb;
+ /* [0x40] This value is used for two purposes:1 */
+ uint32_t int_write_compensation_subseconds_lsb;
+ /* [0x44] This value represents the number of cycles it for a ... */
+ uint32_t ext_write_compensation_subseconds_msb;
+ /* [0x48] This value represents the number of cycles it for a ... */
+ uint32_t ext_write_compensation_subseconds_lsb;
+ /* [0x4c] Value to be added to system_time before transferrin ... */
+ uint32_t sync_compensation_subseconds_msb;
+ /* [0x50] Value to be added to system_time before transferrin ... */
+ uint32_t sync_compensation_subseconds_lsb;
+ uint32_t rsrvd[11];
+};
+struct al_ec_pth_egress {
+ /* [0x0] Control register for egress trigger #k */
+ uint32_t trigger_ctrl;
+ /* [0x4] threshold for next egress trigger (#k) - secondsWri ... */
+ uint32_t trigger_seconds;
+ /* [0x8] Threshold for next egress trigger (#k) - subseconds ... */
+ uint32_t trigger_subseconds_msb;
+ /* [0xc] threshold for next egress trigger (#k) - subseconds ... */
+ uint32_t trigger_subseconds_lsb;
+ /* [0x10] External output pulse width (subseconds_msb)(Atomic ... */
+ uint32_t pulse_width_subseconds_msb;
+ /* [0x14] External output pulse width (subseconds_lsb)(Atomic ... */
+ uint32_t pulse_width_subseconds_lsb;
+ uint32_t rsrvd[2];
+};
+struct al_ec_pth_db {
+ /* [0x0] timestamp[k], in resolution of 2^18 femtosec =~ 0 */
+ uint32_t ts;
+ /* [0x4] Timestamp entry is valid */
+ uint32_t qual;
+ uint32_t rsrvd[4];
+};
+struct al_ec_gen_v3 {
+ /* [0x0] Bypass enable */
+ uint32_t bypass;
+ /* [0x4] Rx Completion descriptor */
+ uint32_t rx_comp_desc;
+ /* [0x8] general configuration */
+ uint32_t conf;
+ uint32_t rsrvd[13];
+};
+struct al_ec_tfw_v3 {
+ /* [0x0] Generic protocol detect Cam compare table address */
+ uint32_t tx_gpd_cam_addr;
+ /* [0x4] Tx Generic protocol detect Cam compare data_1 (low) ... */
+ uint32_t tx_gpd_cam_data_1;
+ /* [0x8] Tx Generic protocol detect Cam compare data_2 (high ... */
+ uint32_t tx_gpd_cam_data_2;
+ /* [0xc] Tx Generic protocol detect Cam compare mask_1 (low) ... */
+ uint32_t tx_gpd_cam_mask_1;
+ /* [0x10] Tx Generic protocol detect Cam compare mask_1 (high ... */
+ uint32_t tx_gpd_cam_mask_2;
+ /* [0x14] Tx Generic protocol detect Cam compare control */
+ uint32_t tx_gpd_cam_ctrl;
+ /* [0x18] Tx Generic crc parameters legacy */
+ uint32_t tx_gcp_legacy;
+ /* [0x1c] Tx Generic crc prameters table address */
+ uint32_t tx_gcp_table_addr;
+ /* [0x20] Tx Generic crc prameters table general */
+ uint32_t tx_gcp_table_gen;
+ /* [0x24] Tx Generic crc parametrs tabel mask word 1 */
+ uint32_t tx_gcp_table_mask_1;
+ /* [0x28] Tx Generic crc parametrs tabel mask word 2 */
+ uint32_t tx_gcp_table_mask_2;
+ /* [0x2c] Tx Generic crc parametrs tabel mask word 3 */
+ uint32_t tx_gcp_table_mask_3;
+ /* [0x30] Tx Generic crc parametrs tabel mask word 4 */
+ uint32_t tx_gcp_table_mask_4;
+ /* [0x34] Tx Generic crc parametrs tabel mask word 5 */
+ uint32_t tx_gcp_table_mask_5;
+ /* [0x38] Tx Generic crc parametrs tabel mask word 6 */
+ uint32_t tx_gcp_table_mask_6;
+ /* [0x3c] Tx Generic crc parametrs tabel crc init */
+ uint32_t tx_gcp_table_crc_init;
+ /* [0x40] Tx Generic crc parametrs tabel result configuration ... */
+ uint32_t tx_gcp_table_res;
+ /* [0x44] Tx Generic crc parameters table alu opcode */
+ uint32_t tx_gcp_table_alu_opcode;
+ /* [0x48] Tx Generic crc parameters table alu opsel */
+ uint32_t tx_gcp_table_alu_opsel;
+ /* [0x4c] Tx Generic crc parameters table alu constant value */
+ uint32_t tx_gcp_table_alu_val;
+ /* [0x50] Tx CRC/Checksum replace */
+ uint32_t crc_csum_replace;
+ /* [0x54] CRC/Checksum replace table address */
+ uint32_t crc_csum_replace_table_addr;
+ /* [0x58] CRC/Checksum replace table */
+ uint32_t crc_csum_replace_table;
+ uint32_t rsrvd[9];
+};
+
+struct al_ec_rfw_v3 {
+ /* [0x0] Rx Generic protocol detect Cam compare table addres ... */
+ uint32_t rx_gpd_cam_addr;
+ /* [0x4] Rx Generic protocol detect Cam compare data_1 (low) ... */
+ uint32_t rx_gpd_cam_data_1;
+ /* [0x8] Rx Generic protocol detect Cam compare data_2 (high ... */
+ uint32_t rx_gpd_cam_data_2;
+ /* [0xc] Rx Generic protocol detect Cam compare mask_1 (low) ... */
+ uint32_t rx_gpd_cam_mask_1;
+ /* [0x10] Rx Generic protocol detect Cam compare mask_1 (high ... */
+ uint32_t rx_gpd_cam_mask_2;
+ /* [0x14] Rx Generic protocol detect Cam compare control */
+ uint32_t rx_gpd_cam_ctrl;
+ /* [0x18] Generic protocol detect Parser result vector pointe ... */
+ uint32_t gpd_p1;
+ /* [0x1c] Generic protocol detect Parser result vector pointe ... */
+ uint32_t gpd_p2;
+ /* [0x20] Generic protocol detect Parser result vector pointe ... */
+ uint32_t gpd_p3;
+ /* [0x24] Generic protocol detect Parser result vector pointe ... */
+ uint32_t gpd_p4;
+ /* [0x28] Generic protocol detect Parser result vector pointe ... */
+ uint32_t gpd_p5;
+ /* [0x2c] Generic protocol detect Parser result vector pointe ... */
+ uint32_t gpd_p6;
+ /* [0x30] Generic protocol detect Parser result vector pointe ... */
+ uint32_t gpd_p7;
+ /* [0x34] Generic protocol detect Parser result vector pointe ... */
+ uint32_t gpd_p8;
+ /* [0x38] Rx Generic crc parameters legacy */
+ uint32_t rx_gcp_legacy;
+ /* [0x3c] Rx Generic crc prameters table address */
+ uint32_t rx_gcp_table_addr;
+ /* [0x40] Rx Generic crc prameters table general */
+ uint32_t rx_gcp_table_gen;
+ /* [0x44] Rx Generic crc parametrs tabel mask word 1 */
+ uint32_t rx_gcp_table_mask_1;
+ /* [0x48] Rx Generic crc parametrs tabel mask word 2 */
+ uint32_t rx_gcp_table_mask_2;
+ /* [0x4c] Rx Generic crc parametrs tabel mask word 3 */
+ uint32_t rx_gcp_table_mask_3;
+ /* [0x50] Rx Generic crc parametrs tabel mask word 4 */
+ uint32_t rx_gcp_table_mask_4;
+ /* [0x54] Rx Generic crc parametrs tabel mask word 5 */
+ uint32_t rx_gcp_table_mask_5;
+ /* [0x58] Rx Generic crc parametrs tabel mask word 6 */
+ uint32_t rx_gcp_table_mask_6;
+ /* [0x5c] Rx Generic crc parametrs tabel crc init */
+ uint32_t rx_gcp_table_crc_init;
+ /* [0x60] Rx Generic crc parametrs tabel result configuration ... */
+ uint32_t rx_gcp_table_res;
+ /* [0x64] Rx Generic crc parameters table alu opcode */
+ uint32_t rx_gcp_table_alu_opcode;
+ /* [0x68] Rx Generic crc parameters table alu opsel */
+ uint32_t rx_gcp_table_alu_opsel;
+ /* [0x6c] Rx Generic crc parameters table alu constant value ... */
+ uint32_t rx_gcp_table_alu_val;
+ /* [0x70] Generic crc engin parameters alu Parser result vect ... */
+ uint32_t rx_gcp_alu_p1;
+ /* [0x74] Generic crc engine parameters alu Parser result vec ... */
+ uint32_t rx_gcp_alu_p2;
+ /* [0x78] Header split control table address */
+ uint32_t hs_ctrl_table_addr;
+ /* [0x7c] Header split control table */
+ uint32_t hs_ctrl_table;
+ /* [0x80] Header split control alu opcode */
+ uint32_t hs_ctrl_table_alu_opcode;
+ /* [0x84] Header split control alu opsel */
+ uint32_t hs_ctrl_table_alu_opsel;
+ /* [0x88] Header split control alu constant value */
+ uint32_t hs_ctrl_table_alu_val;
+ /* [0x8c] Header split control configuration */
+ uint32_t hs_ctrl_cfg;
+ /* [0x90] Header split control alu Parser result vector point ... */
+ uint32_t hs_ctrl_alu_p1;
+ /* [0x94] Header split control alu Parser result vector point ... */
+ uint32_t hs_ctrl_alu_p2;
+ uint32_t rsrvd[26];
+};
+struct al_ec_crypto {
+ /* [0x0] Tx inline crypto configuration */
+ uint32_t tx_config;
+ /* [0x4] Rx inline crypto configuration */
+ uint32_t rx_config;
+ /* [0x8] reserved FFU */
+ uint32_t tx_override;
+ /* [0xc] reserved FFU */
+ uint32_t rx_override;
+ /* [0x10] inline XTS alpha [31:0] */
+ uint32_t xts_alpha_1;
+ /* [0x14] inline XTS alpha [63:32] */
+ uint32_t xts_alpha_2;
+ /* [0x18] inline XTS alpha [95:64] */
+ uint32_t xts_alpha_3;
+ /* [0x1c] inline XTS alpha [127:96] */
+ uint32_t xts_alpha_4;
+ /* [0x20] inline XTS sector ID increment [31:0] */
+ uint32_t xts_sector_id_1;
+ /* [0x24] inline XTS sector ID increment [63:32] */
+ uint32_t xts_sector_id_2;
+ /* [0x28] inline XTS sector ID increment [95:64] */
+ uint32_t xts_sector_id_3;
+ /* [0x2c] inline XTS sector ID increment [127:96] */
+ uint32_t xts_sector_id_4;
+ /* [0x30] IV formation configuration */
+ uint32_t tx_enc_iv_construction;
+ /* [0x34] IV formation configuration */
+ uint32_t rx_enc_iv_construction;
+ /* [0x38] IV formation configuration */
+ uint32_t rx_enc_iv_map;
+ /*
+ [0x3c] effectively shorten shift-registers used for
+ eop-pkt-trim, in order to improve performance.
+ Each value must be built of consecutive 1's (bypassed regs),
+ and then consecutive 0's (non-bypassed regs)
+ */
+ uint32_t tx_pkt_trim_len;
+ /*
+ [0x40] effectively shorten shift-registers used for
+ eop-pkt-trim, in order to improve performance.
+ Each value must be built of consecutive 1's (bypassed regs),
+ and then consecutive 0's (non-bypassed regs)
+ */
+ uint32_t rx_pkt_trim_len;
+ /* [0x44] reserved FFU */
+ uint32_t tx_reserved;
+ /* [0x48] reserved FFU */
+ uint32_t rx_reserved;
+ uint32_t rsrvd[13];
+};
+struct al_ec_crypto_perf_cntr {
+ /* [0x0] */
+ uint32_t total_tx_pkts;
+ /* [0x4] */
+ uint32_t total_rx_pkts;
+ /* [0x8] */
+ uint32_t total_tx_secured_pkts;
+ /* [0xc] */
+ uint32_t total_rx_secured_pkts;
+ /* [0x10] */
+ uint32_t total_tx_secured_pkts_cipher_mode;
+ /* [0x14] */
+ uint32_t total_tx_secured_pkts_cipher_mode_cmpr;
+ /* [0x18] */
+ uint32_t total_rx_secured_pkts_cipher_mode;
+ /* [0x1c] */
+ uint32_t total_rx_secured_pkts_cipher_mode_cmpr;
+ /* [0x20] */
+ uint32_t total_tx_secured_bytes_low;
+ /* [0x24] */
+ uint32_t total_tx_secured_bytes_high;
+ /* [0x28] */
+ uint32_t total_rx_secured_bytes_low;
+ /* [0x2c] */
+ uint32_t total_rx_secured_bytes_high;
+ /* [0x30] */
+ uint32_t total_tx_sign_calcs;
+ /* [0x34] */
+ uint32_t total_rx_sign_calcs;
+ /* [0x38] */
+ uint32_t total_tx_sign_errs;
+ /* [0x3c] */
+ uint32_t total_rx_sign_errs;
+};
+struct al_ec_crypto_tx_tid {
+ /* [0x0] tid_default_entry */
+ uint32_t def_val;
+};
+
+struct al_ec_regs {
+ uint32_t rsrvd_0[32];
+ struct al_ec_gen gen; /* [0x80] */
+ struct al_ec_mac mac; /* [0xc0] */
+ struct al_ec_rxf rxf; /* [0x100] */
+ struct al_ec_epe epe[2]; /* [0x180] */
+ struct al_ec_epe_res epe_res; /* [0x200] */
+ struct al_ec_epe_h epe_h[32]; /* [0x280] */
+ struct al_ec_epe_p epe_p[32]; /* [0x300] */
+ struct al_ec_epe_a epe_a[32]; /* [0x680] */
+ struct al_ec_rfw rfw; /* [0x700] */
+ struct al_ec_rfw_udma rfw_udma[4]; /* [0x7f0] */
+ struct al_ec_rfw_hash rfw_hash[10]; /* [0x800] */
+ struct al_ec_rfw_priority rfw_priority[8]; /* [0x828] */
+ struct al_ec_rfw_default rfw_default[8]; /* [0x848] */
+ struct al_ec_fwd_mac fwd_mac[32]; /* [0x868] */
+ struct al_ec_msw msw; /* [0xae8] */
+ struct al_ec_tso tso; /* [0xb00] */
+ struct al_ec_tso_sel tso_sel[8]; /* [0xb60] */
+ struct al_ec_tpe tpe; /* [0xb80] */
+ struct al_ec_tpm_udma tpm_udma[4]; /* [0xbc0] */
+ struct al_ec_tpm_sel tpm_sel[4]; /* [0xbf0] */
+ struct al_ec_tfw tfw; /* [0xc00] */
+ struct al_ec_tfw_udma tfw_udma[4]; /* [0xc60] */
+ struct al_ec_tmi tmi; /* [0xcc0] */
+ struct al_ec_efc efc; /* [0xcd0] */
+ struct al_ec_fc_udma fc_udma[4]; /* [0xd00] */
+ struct al_ec_tpg_rpa_res tpg_rpa_res; /* [0xf00] */
+ struct al_ec_eee eee; /* [0x1000] */
+ struct al_ec_stat stat; /* [0x1100] */
+ struct al_ec_stat_udma stat_udma[4]; /* [0x1300] */
+ struct al_ec_msp msp; /* [0x1700] */
+ struct al_ec_msp_p msp_p[32]; /* [0x1740] */
+ struct al_ec_msp_c msp_c[32]; /* [0x17c0] */
+ uint32_t rsrvd_1[16];
+ struct al_ec_wol wol; /* [0x1b80] */
+ uint32_t rsrvd_2[80];
+ struct al_ec_pth pth; /* [0x1d00] */
+ struct al_ec_pth_egress pth_egress[8]; /* [0x1d80] */
+ struct al_ec_pth_db pth_db[16]; /* [0x1e80] */
+ uint32_t rsrvd_3[416];
+ struct al_ec_gen_v3 gen_v3; /* [0x2680] */
+ struct al_ec_tfw_v3 tfw_v3; /* [0x26c0] */
+ struct al_ec_rfw_v3 rfw_v3; /* [0x2740] */
+ struct al_ec_crypto crypto; /* [0x2840] */
+ struct al_ec_crypto_perf_cntr crypto_perf_cntr[2]; /* [0x28c0] */
+ uint32_t rsrvd_4[48];
+ struct al_ec_crypto_tx_tid crypto_tx_tid[8]; /* [0x2a00] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** version register ****/
+/* Revision number (Minor) */
+#define EC_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF
+#define EC_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0
+/* Revision number (Major) */
+#define EC_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
+#define EC_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
+/* Day of release */
+#define EC_GEN_VERSION_DATE_DAY_MASK 0x001F0000
+#define EC_GEN_VERSION_DATE_DAY_SHIFT 16
+/* Month of release */
+#define EC_GEN_VERSION_DATA_MONTH_MASK 0x01E00000
+#define EC_GEN_VERSION_DATA_MONTH_SHIFT 21
+/* Year of release (starting from 2000) */
+#define EC_GEN_VERSION_DATE_YEAR_MASK 0x3E000000
+#define EC_GEN_VERSION_DATE_YEAR_SHIFT 25
+/* Reserved */
+#define EC_GEN_VERSION_RESERVED_MASK 0xC0000000
+#define EC_GEN_VERSION_RESERVED_SHIFT 30
+
+/**** en register ****/
+/* Enable Frequency adjust FIFO input controller operation. */
+#define EC_GEN_EN_FAF_IN (1 << 0)
+/* Enable Frequency adjust FIFO output controller operation. */
+#define EC_GEN_EN_FAF_OUT (1 << 1)
+/* Enable Rx FIFO input controller 1 operation. */
+#define EC_GEN_EN_RXF_IN (1 << 2)
+/* Enable Rx FIFO output controller operation. */
+#define EC_GEN_EN_RXF_OUT (1 << 3)
+/* Enable Rx forwarding input controller operation. */
+#define EC_GEN_EN_RFW_IN (1 << 4)
+/* Enable Rx forwarding output controller operation. */
+#define EC_GEN_EN_RFW_OUT (1 << 5)
+/* Enable Rx multi-stream write controller operation. */
+#define EC_GEN_EN_MSW_IN (1 << 6)
+/* Enable Rx first parsing engine output operation. */
+#define EC_GEN_EN_RPE_1_OUT (1 << 7)
+/* Enable Rx first parsing engine input operation. */
+#define EC_GEN_EN_RPE_1_IN (1 << 8)
+/* Enable Rx second parsing engine output operation. */
+#define EC_GEN_EN_RPE_2_OUT (1 << 9)
+/* Enable Rx second parsing engine input operation. */
+#define EC_GEN_EN_RPE_2_IN (1 << 10)
+/* Enable Rx MACsec parsing engine output operation. */
+#define EC_GEN_EN_RPE_3_OUT (1 << 11)
+/* Enable Rx MACsec parsing engine input operation. */
+#define EC_GEN_EN_RPE_3_IN (1 << 12)
+/* Enable Loopback FIFO input controller 1 operation. */
+#define EC_GEN_EN_LBF_IN (1 << 13)
+/* Enable Rx packet analyzer operation. */
+#define EC_GEN_EN_RPA (1 << 14)
+
+#define EC_GEN_EN_RESERVED_15 (1 << 15)
+/* Enable Tx stream interface operation. */
+#define EC_GEN_EN_TSO (1 << 16)
+/* Enable Tx parser input controller operation. */
+#define EC_GEN_EN_TPE_IN (1 << 17)
+/* Enable Tx parser output controller operation. */
+#define EC_GEN_EN_TPE_OUT (1 << 18)
+/* Enable Tx packet modification operation. */
+#define EC_GEN_EN_TPM (1 << 19)
+/* Enable Tx forwarding input controller operation. */
+#define EC_GEN_EN_TFW_IN (1 << 20)
+/* Enable Tx forwarding output controller operation. */
+#define EC_GEN_EN_TFW_OUT (1 << 21)
+/* Enable Tx MAC interface controller operation. */
+#define EC_GEN_EN_TMI (1 << 22)
+/* Enable Tx packet generator operation. */
+#define EC_GEN_EN_TPG (1 << 23)
+
+#define EC_GEN_EN_RESERVED_31_MASK 0xFF000000
+#define EC_GEN_EN_RESERVED_31_SHIFT 24
+
+/**** fifo_en register ****/
+/* Enable Frequency adjust FIFO operation (input). */
+#define EC_GEN_FIFO_EN_FAF_IN (1 << 0)
+/* Enable Frequency adjust FIFO operation (output). */
+#define EC_GEN_FIFO_EN_FAF_OUT (1 << 1)
+/* Enable Rx FIFO operation. */
+#define EC_GEN_FIFO_EN_RX_FIFO (1 << 2)
+/* Enable Rx forwarding FIFO operation. */
+#define EC_GEN_FIFO_EN_RFW_FIFO (1 << 3)
+/* Enable Rx multi-stream write FIFO operation */
+#define EC_GEN_FIFO_EN_MSW_FIFO (1 << 4)
+/* Enable Rx first parser FIFO operation. */
+#define EC_GEN_FIFO_EN_RPE_1_FIFO (1 << 5)
+/* Enable Rx second parser FIFO operation. */
+#define EC_GEN_FIFO_EN_RPE_2_FIFO (1 << 6)
+/* Enable Rx MACsec parser FIFO operation. */
+#define EC_GEN_FIFO_EN_RPE_3_FIFO (1 << 7)
+/* Enable Loopback FIFO operation. */
+#define EC_GEN_FIFO_EN_LB_FIFO (1 << 8)
+
+#define EC_GEN_FIFO_EN_RESERVED_15_9_MASK 0x0000FE00
+#define EC_GEN_FIFO_EN_RESERVED_15_9_SHIFT 9
+/* Enable Tx parser FIFO operation. */
+#define EC_GEN_FIFO_EN_TPE_FIFO (1 << 16)
+/* Enable Tx forwarding FIFO operation. */
+#define EC_GEN_FIFO_EN_TFW_FIFO (1 << 17)
+
+#define EC_GEN_FIFO_EN_RESERVED_31_18_MASK 0xFFFC0000
+#define EC_GEN_FIFO_EN_RESERVED_31_18_SHIFT 18
+
+/**** l2 register ****/
+/* Size of a 802.3 Ethernet header (DA+SA) */
+#define EC_GEN_L2_SIZE_802_3_MASK 0x0000003F
+#define EC_GEN_L2_SIZE_802_3_SHIFT 0
+/* Size of a 802.3 + MACsec 8 byte header */
+#define EC_GEN_L2_SIZE_802_3_MS_8_MASK 0x00003F00
+#define EC_GEN_L2_SIZE_802_3_MS_8_SHIFT 8
+/* Offset of the L2 header from the beginning of the packet. */
+#define EC_GEN_L2_OFFSET_MASK 0x7F000000
+#define EC_GEN_L2_OFFSET_SHIFT 24
+
+/**** cfg_i register ****/
+/* IPv4 protocol index */
+#define EC_GEN_CFG_I_IPV4_INDEX_MASK 0x0000001F
+#define EC_GEN_CFG_I_IPV4_INDEX_SHIFT 0
+/* IPv6 protocol index */
+#define EC_GEN_CFG_I_IPV6_INDEX_MASK 0x000003E0
+#define EC_GEN_CFG_I_IPV6_INDEX_SHIFT 5
+/* TCP protocol index */
+#define EC_GEN_CFG_I_TCP_INDEX_MASK 0x00007C00
+#define EC_GEN_CFG_I_TCP_INDEX_SHIFT 10
+/* UDP protocol index */
+#define EC_GEN_CFG_I_UDP_INDEX_MASK 0x000F8000
+#define EC_GEN_CFG_I_UDP_INDEX_SHIFT 15
+/* MACsec with 8 bytes SecTAG */
+#define EC_GEN_CFG_I_MACSEC_8_INDEX_MASK 0x01F00000
+#define EC_GEN_CFG_I_MACSEC_8_INDEX_SHIFT 20
+/* MACsec with 16 bytes SecTAG */
+#define EC_GEN_CFG_I_MACSEC_16_INDEX_MASK 0x3E000000
+#define EC_GEN_CFG_I_MACSEC_16_INDEX_SHIFT 25
+
+/**** cfg_i_ext register ****/
+/* FcoE protocol index */
+#define EC_GEN_CFG_I_EXT_FCOE_INDEX_MASK 0x0000001F
+#define EC_GEN_CFG_I_EXT_FCOE_INDEX_SHIFT 0
+/* RoCE protocol index */
+#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L3_1_MASK 0x000003E0
+#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L3_1_SHIFT 5
+/* RoCE protocol index */
+#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L3_2_MASK 0x00007C00
+#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L3_2_SHIFT 10
+/* RoCE protocol index */
+#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L4_MASK 0x000F8000
+#define EC_GEN_CFG_I_EXT_ROCE_INDEX_L4_SHIFT 15
+
+/**** en_ext register ****/
+/* Enable Usage of Ethernet port memories for testing */
+#define EC_GEN_EN_EXT_MEM_FOR_TEST_MASK 0x0000000F
+#define EC_GEN_EN_EXT_MEM_FOR_TEST_SHIFT 0
+#define EC_GEN_EN_EXT_MEM_FOR_TEST_VAL_EN \
+ (0xa << EC_GEN_EN_EXT_MEM_FOR_TEST_SHIFT)
+#define EC_GEN_EN_EXT_MEM_FOR_TEST_VAL_DIS \
+ (0x0 << EC_GEN_EN_EXT_MEM_FOR_TEST_SHIFT)
+/* Enable MAC loop back (Rx --> Tx, after MAC layer) for 802 */
+#define EC_GEN_EN_EXT_MAC_LB (1 << 4)
+/* CRC forward value for the MAC Tx when working in loopback mod ... */
+#define EC_GEN_EN_EXT_MAC_LB_CRC_FWD (1 << 5)
+/* Ready signal configuration when in loopback mode:00 - Ready f ... */
+#define EC_GEN_EN_EXT_MAC_LB_READY_CFG_MASK 0x000000C0
+#define EC_GEN_EN_EXT_MAC_LB_READY_CFG_SHIFT 6
+/* Bypass the PTH completion update. */
+#define EC_GEN_EN_EXT_PTH_COMPLETION_BYPASS (1 << 16)
+/* Selection between the 1G and 10G MAC:
+0 - 1G
+1 - 10G */
+#define EC_GEN_EN_EXT_PTH_1_10_SEL (1 << 17)
+/* avoid timestamping every pkt in 1G */
+#define EC_GEN_EN_EXT_PTH_CFG_1G_TIMESTAMP_OPT (1 << 18)
+/* Selection between descriptor caching options (WORD selection) ... */
+#define EC_GEN_EN_EXT_CACHE_WORD_SPLIT (1 << 20)
+
+/**** gen register ****/
+/* Enable swap of input byte order */
+#define EC_MAC_GEN_SWAP_IN_BYTE (1 << 0)
+
+/**** min_pkt register ****/
+/* Minimum packet size */
+#define EC_MAC_MIN_PKT_SIZE_MASK 0x000FFFFF
+#define EC_MAC_MIN_PKT_SIZE_SHIFT 0
+
+/**** max_pkt register ****/
+/* Maximum packet size */
+#define EC_MAC_MAX_PKT_SIZE_MASK 0x000FFFFF
+#define EC_MAC_MAX_PKT_SIZE_SHIFT 0
+
+/**** cfg_1 register ****/
+/* Drop packet at the ingress0 - Packets are not dropped at the ... */
+#define EC_RXF_CFG_1_DROP_AT_INGRESS (1 << 0)
+/* Accept packet criteria at start of packet indication */
+#define EC_RXF_CFG_1_SOP_ACCEPT (1 << 1)
+/* Select the arbiter between Rx packets and Tx packets (packets ... */
+#define EC_RXF_CFG_1_ARB_SEL (1 << 2)
+/* Arbiter priority when strict priority is selected in arb_sel0 ... */
+#define EC_RXF_CFG_1_ARB_P (1 << 3)
+/* Force loopback operation */
+#define EC_RXF_CFG_1_FORCE_LB (1 << 4)
+/* Forwarding selection between Rx path and/or packet analyzer */
+#define EC_RXF_CFG_1_FWD_SEL_MASK 0x00000300
+#define EC_RXF_CFG_1_FWD_SEL_SHIFT 8
+
+/**** cfg_2 register ****/
+/* FIFO USED threshold for accepting new packets, low threshold ... */
+#define EC_RXF_CFG_2_FIFO_USED_TH_L_MASK 0x0000FFFF
+#define EC_RXF_CFG_2_FIFO_USED_TH_L_SHIFT 0
+/* FIFO USED threshold for accepting new packets, high threshold ... */
+#define EC_RXF_CFG_2_FIFO_USED_TH_H_MASK 0xFFFF0000
+#define EC_RXF_CFG_2_FIFO_USED_TH_H_SHIFT 16
+
+/**** rd_fifo register ****/
+/* Minimum number of entries in the data FIFO to start reading p ... */
+#define EC_RXF_RD_FIFO_TH_DATA_MASK 0x0000FFFF
+#define EC_RXF_RD_FIFO_TH_DATA_SHIFT 0
+/* Enable cut through operation */
+#define EC_RXF_RD_FIFO_EN_CUT_TH (1 << 16)
+
+/**** wr_fifo register ****/
+
+#define EC_RXF_WR_FIFO_TH_DATA_MASK 0x0000FFFF
+#define EC_RXF_WR_FIFO_TH_DATA_SHIFT 0
+
+#define EC_RXF_WR_FIFO_TH_INFO_MASK 0xFFFF0000
+#define EC_RXF_WR_FIFO_TH_INFO_SHIFT 16
+
+/**** lb_fifo register ****/
+
+#define EC_RXF_LB_FIFO_TH_DATA_MASK 0x0000FFFF
+#define EC_RXF_LB_FIFO_TH_DATA_SHIFT 0
+
+#define EC_RXF_LB_FIFO_TH_INFO_MASK 0xFFFF0000
+#define EC_RXF_LB_FIFO_TH_INFO_SHIFT 16
+
+/**** cfg_lb register ****/
+/* FIFO USED threshold for accepting new packets */
+#define EC_RXF_CFG_LB_FIFO_USED_TH_INT_MASK 0x0000FFFF
+#define EC_RXF_CFG_LB_FIFO_USED_TH_INT_SHIFT 0
+/* FIFO USED threshold for generating ready for the Tx path */
+#define EC_RXF_CFG_LB_FIFO_USED_TH_EXT_MASK 0xFFFF0000
+#define EC_RXF_CFG_LB_FIFO_USED_TH_EXT_SHIFT 16
+
+/**** out_drop register ****/
+
+#define EC_RXF_OUT_DROP_MAC_ERR (1 << 0)
+
+#define EC_RXF_OUT_DROP_MAC_COL (1 << 1)
+
+#define EC_RXF_OUT_DROP_MAC_DEC (1 << 2)
+
+#define EC_RXF_OUT_DROP_MAC_LEN (1 << 3)
+
+#define EC_RXF_OUT_DROP_MAC_PHY (1 << 4)
+
+#define EC_RXF_OUT_DROP_MAC_FIFO (1 << 5)
+
+#define EC_RXF_OUT_DROP_MAC_FCS (1 << 6)
+
+#define EC_RXF_OUT_DROP_MAC_ETYPE (1 << 7)
+
+#define EC_RXF_OUT_DROP_EC_LEN (1 << 8)
+
+#define EC_RXF_OUT_DROP_EC_FIFO (1 << 9)
+
+/**** parse_cfg register ****/
+/* MAX number of beats for packet parsing */
+#define EC_EPE_PARSE_CFG_MAX_BEATS_MASK 0x000000FF
+#define EC_EPE_PARSE_CFG_MAX_BEATS_SHIFT 0
+/* MAX number of parsing iterations for packet parsing */
+#define EC_EPE_PARSE_CFG_MAX_ITER_MASK 0x0000FF00
+#define EC_EPE_PARSE_CFG_MAX_ITER_SHIFT 8
+
+/**** act_table_addr register ****/
+/* Address for accessing the table */
+#define EC_EPE_ACT_TABLE_ADDR_VAL_MASK 0x0000001F
+#define EC_EPE_ACT_TABLE_ADDR_VAL_SHIFT 0
+
+/**** act_table_data_1 register ****/
+/* Table data[5:0] - Offset to next protocol [bytes][6] - Next p ... */
+#define EC_EPE_ACT_TABLE_DATA_1_VAL_MASK 0x03FFFFFF
+#define EC_EPE_ACT_TABLE_DATA_1_VAL_SHIFT 0
+
+/**** act_table_data_2 register ****/
+/* Table Data [8:0] - Offset to data in the packet [bits][17:9] ... */
+#define EC_EPE_ACT_TABLE_DATA_2_VAL_MASK 0x1FFFFFFF
+#define EC_EPE_ACT_TABLE_DATA_2_VAL_SHIFT 0
+
+/**** act_table_data_3 register ****/
+/* Table Data [8:0] - Offset to data in the packet [bits] [17:9 ... */
+#define EC_EPE_ACT_TABLE_DATA_3_VAL_MASK 0x1FFFFFFF
+#define EC_EPE_ACT_TABLE_DATA_3_VAL_SHIFT 0
+
+/**** act_table_data_4 register ****/
+/* Table data[7:0] - Offset to header length location in the pac ... */
+#define EC_EPE_ACT_TABLE_DATA_4_VAL_MASK 0x0FFFFFFF
+#define EC_EPE_ACT_TABLE_DATA_4_VAL_SHIFT 0
+
+/**** act_table_data_6 register ****/
+/* Table data[0] - WR header length[10:1] - Write header length ... */
+#define EC_EPE_ACT_TABLE_DATA_6_VAL_MASK 0x007FFFFF
+#define EC_EPE_ACT_TABLE_DATA_6_VAL_SHIFT 0
+
+/**** res_in register ****/
+/* Selector for input parse_en0 - Input vector1 - Default value ... */
+#define EC_EPE_RES_IN_SEL_PARSE_EN (1 << 0)
+/* Selector for input protocol_index 0 - Input vector 1 - Defaul ... */
+#define EC_EPE_RES_IN_SEL_PROT_INDEX (1 << 1)
+/* Selector for input hdr_offset 0 - Input vector 1 - Default va ... */
+#define EC_EPE_RES_IN_SEL_HDR_OFFSET (1 << 2)
+
+/**** p1 register ****/
+/* Location of the input protocol index in the parser result vec ... */
+#define EC_EPE_RES_P1_IN_PROT_INDEX_MASK 0x000003FF
+#define EC_EPE_RES_P1_IN_PROT_INDEX_SHIFT 0
+
+/**** p2 register ****/
+/* Location of the input offset in the parser result vector */
+#define EC_EPE_RES_P2_IN_OFFSET_MASK 0x000003FF
+#define EC_EPE_RES_P2_IN_OFFSET_SHIFT 0
+
+/**** p3 register ****/
+/* Location of the input parse enable in the parser result vecto ... */
+#define EC_EPE_RES_P3_IN_PARSE_EN_MASK 0x000003FF
+#define EC_EPE_RES_P3_IN_PARSE_EN_SHIFT 0
+
+/**** p4 register ****/
+/* Location of the control bits in the parser result vector */
+#define EC_EPE_RES_P4_CTRL_BITS_MASK 0x000003FF
+#define EC_EPE_RES_P4_CTRL_BITS_SHIFT 0
+
+/**** p5 register ****/
+/* Location of the MAC DA in the parser result vector */
+#define EC_EPE_RES_P5_DA_MASK 0x000003FF
+#define EC_EPE_RES_P5_DA_SHIFT 0
+
+/**** p6 register ****/
+/* Location of the MAC SA in the parser result vector */
+#define EC_EPE_RES_P6_SA_MASK 0x000003FF
+#define EC_EPE_RES_P6_SA_SHIFT 0
+
+/**** p7 register ****/
+/* Location of the first VLAN in the parser result vector */
+#define EC_EPE_RES_P7_VLAN_1_MASK 0x000003FF
+#define EC_EPE_RES_P7_VLAN_1_SHIFT 0
+
+/**** p8 register ****/
+/* Location of the second VLAN in the parser result vector */
+#define EC_EPE_RES_P8_VLAN_2_MASK 0x000003FF
+#define EC_EPE_RES_P8_VLAN_2_SHIFT 0
+
+/**** p9 register ****/
+/* Location of the L3 protocol index in the parser result vector ... */
+#define EC_EPE_RES_P9_L3_PROT_INDEX_MASK 0x000003FF
+#define EC_EPE_RES_P9_L3_PROT_INDEX_SHIFT 0
+
+/**** p10 register ****/
+/* Location of the L3 offset in the parser result vector */
+#define EC_EPE_RES_P10_L3_OFFSET_MASK 0x000003FF
+#define EC_EPE_RES_P10_L3_OFFSET_SHIFT 0
+
+/**** p11 register ****/
+/* Location of the L3 SIP in the parser result vector */
+#define EC_EPE_RES_P11_L3_SIP_MASK 0x000003FF
+#define EC_EPE_RES_P11_L3_SIP_SHIFT 0
+
+/**** p12 register ****/
+/* Location of the L3 DIP in the parser result vector */
+#define EC_EPE_RES_P12_L3_DIP_MASK 0x000003FF
+#define EC_EPE_RES_P12_L3_DIP_SHIFT 0
+
+/**** p13 register ****/
+/* Location of the L3 priority in the parser result vector */
+#define EC_EPE_RES_P13_L3_PRIORITY_MASK 0x000003FF
+#define EC_EPE_RES_P13_L3_PRIORITY_SHIFT 0
+
+/**** p14 register ****/
+/* Location of the L3 header length in the parser result vector */
+#define EC_EPE_RES_P14_L3_HDR_LEN_MASK 0x000003FF
+#define EC_EPE_RES_P14_L3_HDR_LEN_SHIFT 0
+
+/**** p15 register ****/
+/* Location of the L4 protocol index in the parser result vector ... */
+#define EC_EPE_RES_P15_L4_PROT_INDEX_MASK 0x000003FF
+#define EC_EPE_RES_P15_L4_PROT_INDEX_SHIFT 0
+
+/**** p16 register ****/
+/* Location of the L4 source port in the parser result vector */
+#define EC_EPE_RES_P16_L4_SRC_PORT_MASK 0x000003FF
+#define EC_EPE_RES_P16_L4_SRC_PORT_SHIFT 0
+
+/**** p17 register ****/
+/* Location of the L4 destination port in the parser result vect ... */
+#define EC_EPE_RES_P17_L4_DST_PORT_MASK 0x000003FF
+#define EC_EPE_RES_P17_L4_DST_PORT_SHIFT 0
+
+/**** p18 register ****/
+/* Location of the L4 offset in the parser result vector */
+#define EC_EPE_RES_P18_L4_OFFSET_MASK 0x000003FF
+#define EC_EPE_RES_P18_L4_OFFSET_SHIFT 0
+
+/**** p19 register ****/
+/* Location of the Ether type in the parser result vector when w ... */
+#define EC_EPE_RES_P19_WOL_ETYPE_MASK 0x000003FF
+#define EC_EPE_RES_P19_WOL_ETYPE_SHIFT 0
+
+/**** p20 register ****/
+/* Location of the RoCE QP number field in the parser result vec ... */
+#define EC_EPE_RES_P20_ROCE_QPN_MASK 0x000003FF
+#define EC_EPE_RES_P20_ROCE_QPN_SHIFT 0
+
+/**** hdr_len register ****/
+/* Value for selecting table 1 */
+#define EC_EPE_H_HDR_LEN_TABLE_1_MASK 0x000000FF
+#define EC_EPE_H_HDR_LEN_TABLE_1_SHIFT 0
+/* Value for selecting table 2 */
+#define EC_EPE_H_HDR_LEN_TABLE_2_MASK 0x00FF0000
+#define EC_EPE_H_HDR_LEN_TABLE_2_SHIFT 16
+
+/**** comp_data register ****/
+/* Data 1 for comparison */
+#define EC_EPE_P_COMP_DATA_DATA_1_MASK 0x0000FFFF
+#define EC_EPE_P_COMP_DATA_DATA_1_SHIFT 0
+/* Data 2 for comparison
+[18:16] - Stage
+[24:19] - Branch ID */
+#define EC_EPE_P_COMP_DATA_DATA_2_MASK 0x01FF0000
+#define EC_EPE_P_COMP_DATA_DATA_2_SHIFT 16
+
+/**** comp_mask register ****/
+/* Data 1 for comparison */
+#define EC_EPE_P_COMP_MASK_DATA_1_MASK 0x0000FFFF
+#define EC_EPE_P_COMP_MASK_DATA_1_SHIFT 0
+/* Data 2 for comparison
+[18:16] - Stage
+[24:19] -