aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBartosz Sobczak <bartosz.sobczak@intel.com>2022-12-22 01:10:15 +0000
committerEric Joyner <erj@FreeBSD.org>2023-02-08 00:24:06 +0000
commitc051c1ff703cf279c80490c143a99bec0ce98e7f (patch)
treeef039a3e4e498601e3eca7d2069b90e7183d594e
parente894da6d9a1d3a96bdac5f330c3c94d75be8f83b (diff)
downloadsrc-c051c1ff703cf279c80490c143a99bec0ce98e7f.tar.gz
src-c051c1ff703cf279c80490c143a99bec0ce98e7f.zip
irdma(4): Upgrade driver to 1.1.5-k
This is to upgrade current irdma driver version (in support of RDMA on Intel(R) Ethernet Controller E810) to 1.1.5-k change summary: - refactor defines for hardware registers - rereg_mr verb added in libirdma - fix print warning during compilation - rt_ros2priority macro fix - irdma.4 validated with mandoc - fixing nd6_resolve usage - added libirdma_query_device - sysctl for irdma version - aeq_alloc_db fix - dwork_flush protected with qp refcount - PFC fixes Signed-off-by: Eric Joyner <erj@FreeBSD.org> Reviewed by: erj@ Relnotes: yes Sponsored by: Intel Corporation Differential Revision: https://reviews.freebsd.org/D36944 (cherry picked from commit 777e472cd86b9394d07bf96c19dbafc2e1ff4fdc)
-rw-r--r--contrib/ofed/libirdma/abi.h10
-rw-r--r--contrib/ofed/libirdma/i40iw_hw.h4
-rw-r--r--contrib/ofed/libirdma/irdma.h18
-rw-r--r--contrib/ofed/libirdma/irdma_defs.h259
-rw-r--r--contrib/ofed/libirdma/irdma_uk.c774
-rw-r--r--contrib/ofed/libirdma/irdma_umain.c28
-rw-r--r--contrib/ofed/libirdma/irdma_umain.h30
-rw-r--r--contrib/ofed/libirdma/irdma_uquery.h50
-rw-r--r--contrib/ofed/libirdma/irdma_user.h227
-rw-r--r--contrib/ofed/libirdma/irdma_uverbs.c377
-rw-r--r--contrib/ofed/libirdma/libirdma.map8
-rw-r--r--contrib/ofed/libirdma/osdep.h17
-rw-r--r--share/man/man4/irdma.4168
-rw-r--r--sys/dev/irdma/fbsd_kcompat.c17
-rw-r--r--sys/dev/irdma/fbsd_kcompat.h19
-rw-r--r--sys/dev/irdma/icrdma.c7
-rw-r--r--sys/dev/irdma/icrdma_hw.c39
-rw-r--r--sys/dev/irdma/icrdma_hw.h35
-rw-r--r--sys/dev/irdma/irdma-abi.h2
-rw-r--r--sys/dev/irdma/irdma.h158
-rw-r--r--sys/dev/irdma/irdma_cm.c110
-rw-r--r--sys/dev/irdma/irdma_cm.h22
-rw-r--r--sys/dev/irdma/irdma_ctrl.c1330
-rw-r--r--sys/dev/irdma/irdma_defs.h1678
-rw-r--r--sys/dev/irdma/irdma_hmc.c33
-rw-r--r--sys/dev/irdma/irdma_hmc.h12
-rw-r--r--sys/dev/irdma/irdma_hw.c205
-rw-r--r--sys/dev/irdma/irdma_kcompat.c348
-rw-r--r--sys/dev/irdma/irdma_main.h23
-rw-r--r--sys/dev/irdma/irdma_pble.c4
-rw-r--r--sys/dev/irdma/irdma_protos.h13
-rw-r--r--sys/dev/irdma/irdma_puda.c146
-rw-r--r--sys/dev/irdma/irdma_type.h99
-rw-r--r--sys/dev/irdma/irdma_uda.c79
-rw-r--r--sys/dev/irdma/irdma_uda_d.h361
-rw-r--r--sys/dev/irdma/irdma_uk.c766
-rw-r--r--sys/dev/irdma/irdma_user.h227
-rw-r--r--sys/dev/irdma/irdma_utils.c160
-rw-r--r--sys/dev/irdma/irdma_verbs.c706
-rw-r--r--sys/dev/irdma/irdma_verbs.h27
-rw-r--r--sys/dev/irdma/irdma_ws.c4
-rw-r--r--sys/dev/irdma/osdep.h6
42 files changed, 4321 insertions, 4285 deletions
diff --git a/contrib/ofed/libirdma/abi.h b/contrib/ofed/libirdma/abi.h
index ff7a2828efe0..e45a7b49caf8 100644
--- a/contrib/ofed/libirdma/abi.h
+++ b/contrib/ofed/libirdma/abi.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (C) 2019 - 2020 Intel Corporation
+ * Copyright (C) 2019 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -146,6 +146,14 @@ struct irdma_ureg_mr {
__u16 sq_pages;
};
+struct irdma_urereg_mr {
+ struct ibv_rereg_mr ibv_cmd;
+ __u16 reg_type; /* enum irdma_memreg_type */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+
+};
struct irdma_ucreate_ah_resp {
struct ibv_create_ah_resp ibv_resp;
__u32 ah_id;
diff --git a/contrib/ofed/libirdma/i40iw_hw.h b/contrib/ofed/libirdma/i40iw_hw.h
index 38c7e37c35c9..c51d89a0fcb2 100644
--- a/contrib/ofed/libirdma/i40iw_hw.h
+++ b/contrib/ofed/libirdma/i40iw_hw.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2020 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -50,11 +50,11 @@ enum i40iw_device_caps_const {
I40IW_MAX_CQ_SIZE = 1048575,
I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
+ I40IW_MIN_WQ_SIZE = 4 /* WQEs */,
};
#define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128
-#define I40IW_QP_SW_MIN_WQSIZE 4
#define I40IW_MAX_RQ_WQE_SHIFT 2
#define I40IW_MAX_QUANTA_PER_WR 2
diff --git a/contrib/ofed/libirdma/irdma.h b/contrib/ofed/libirdma/irdma.h
index 27fa3d53d3e8..1dd09c36c7ea 100644
--- a/contrib/ofed/libirdma/irdma.h
+++ b/contrib/ofed/libirdma/irdma.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2021 Intel Corporation
+ * Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -39,13 +39,15 @@
#define RDMA_BIT2(type, a) ((u##type) 1UL << a)
#define RDMA_MASK3(type, mask, shift) ((u##type) mask << shift)
#define MAKEMASK(m, s) ((m) << (s))
-#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
-#define IRDMA_WQEALLOC_WQE_DESC_INDEX_M (0xfff << IRDMA_WQEALLOC_WQE_DESC_INDEX_S)
+
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20)
enum irdma_vers {
- IRDMA_GEN_RSVD,
- IRDMA_GEN_1,
- IRDMA_GEN_2,
+ IRDMA_GEN_RSVD = 0,
+ IRDMA_GEN_1 = 1,
+ IRDMA_GEN_2 = 2,
+ IRDMA_GEN_MAX = 2,
};
struct irdma_uk_attrs {
@@ -58,8 +60,7 @@ struct irdma_uk_attrs {
u32 min_hw_cq_size;
u32 max_hw_cq_size;
u16 max_hw_sq_chunk;
- u16 max_hw_wq_size;
- u16 min_sw_wq_size;
+ u16 min_hw_wq_size;
u8 hw_rev;
};
@@ -68,6 +69,7 @@ struct irdma_hw_attrs {
u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size;
u64 max_mr_size;
+ u64 page_size_cap;
u32 min_hw_qp_id;
u32 min_hw_aeq_size;
u32 max_hw_aeq_size;
diff --git a/contrib/ofed/libirdma/irdma_defs.h b/contrib/ofed/libirdma/irdma_defs.h
index 8fb9f1e2b622..932993fd44ce 100644
--- a/contrib/ofed/libirdma/irdma_defs.h
+++ b/contrib/ofed/libirdma/irdma_defs.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -75,7 +75,6 @@
#define IRDMA_CQE_QTYPE_RQ 0
#define IRDMA_CQE_QTYPE_SQ 1
-#define IRDMA_QP_SW_MIN_WQSIZE 8u /* in WRs*/
#define IRDMA_QP_WQE_MIN_SIZE 32
#define IRDMA_QP_WQE_MAX_SIZE 256
#define IRDMA_QP_WQE_MIN_QUANTA 1
@@ -85,9 +84,11 @@
#define IRDMA_SQ_RSVD 258
#define IRDMA_RQ_RSVD 1
-#define IRDMA_FEATURE_RTS_AE 1ULL
-#define IRDMA_FEATURE_CQ_RESIZE 2ULL
-#define IRDMA_FEATURE_RELAX_RQ_ORDER 4ULL
+#define IRDMA_FEATURE_RTS_AE BIT_ULL(0)
+#define IRDMA_FEATURE_CQ_RESIZE BIT_ULL(1)
+#define IRDMA_FEATURE_RELAX_RQ_ORDER BIT_ULL(2)
+#define IRDMA_FEATURE_64_BYTE_CQE BIT_ULL(5)
+
#define IRDMAQP_OP_RDMA_WRITE 0x00
#define IRDMAQP_OP_RDMA_READ 0x01
#define IRDMAQP_OP_RDMA_SEND 0x03
@@ -106,262 +107,198 @@
#define LS_32_1(val, bits) ((u32)((val) << (bits)))
#define RS_32_1(val, bits) ((u32)((val) >> (bits)))
#endif
-#define LS_64(val, field) (((u64)(val) << field ## _S) & (field ## _M))
-#define RS_64(val, field) ((u64)((val) & field ## _M) >> field ## _S)
-#define LS_32(val, field) (((val) << field ## _S) & (field ## _M))
-#define RS_32(val, field) (((val) & field ## _M) >> field ## _S)
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(high, low) ((0xFFFFFFFFFFFFFFFFULL >> (64ULL - ((high) - (low) + 1ULL))) << (low))
+#endif /* GENMASK_ULL */
+#ifndef GENMASK
+#define GENMASK(high, low) ((0xFFFFFFFFUL >> (32UL - ((high) - (low) + 1UL))) << (low))
+#endif /* GENMASK */
+#ifndef FIELD_PREP
+#define FIELD_PREP(mask, val) (((u64)(val) << mask##_S) & (mask))
+#define FIELD_GET(mask, val) (((val) & mask) >> mask##_S)
+#endif /* FIELD_PREP */
#define IRDMA_CQPHC_QPCTX_S 0
-#define IRDMA_CQPHC_QPCTX_M \
- (0xffffffffffffffffULL << IRDMA_CQPHC_QPCTX_S)
-
-/* iWARP QP Doorbell shadow area */
+#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
#define IRDMA_QP_DBSA_HW_SQ_TAIL_S 0
-#define IRDMA_QP_DBSA_HW_SQ_TAIL_M \
- (0x7fffULL << IRDMA_QP_DBSA_HW_SQ_TAIL_S)
-
-/* Completion Queue Doorbell shadow area */
+#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
#define IRDMA_CQ_DBSA_CQEIDX_S 0
-#define IRDMA_CQ_DBSA_CQEIDX_M (0xfffffULL << IRDMA_CQ_DBSA_CQEIDX_S)
-
+#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
#define IRDMA_CQ_DBSA_SW_CQ_SELECT_S 0
-#define IRDMA_CQ_DBSA_SW_CQ_SELECT_M \
- (0x3fffULL << IRDMA_CQ_DBSA_SW_CQ_SELECT_S)
-
+#define IRDMA_CQ_DBSA_SW_CQ_SELECT GENMASK_ULL(13, 0)
#define IRDMA_CQ_DBSA_ARM_NEXT_S 14
-#define IRDMA_CQ_DBSA_ARM_NEXT_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_S)
-
+#define IRDMA_CQ_DBSA_ARM_NEXT BIT_ULL(14)
#define IRDMA_CQ_DBSA_ARM_NEXT_SE_S 15
-#define IRDMA_CQ_DBSA_ARM_NEXT_SE_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_SE_S)
-
+#define IRDMA_CQ_DBSA_ARM_NEXT_SE BIT_ULL(15)
#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_S 16
-#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_M \
- (0x3ULL << IRDMA_CQ_DBSA_ARM_SEQ_NUM_S)
+#define IRDMA_CQ_DBSA_ARM_SEQ_NUM GENMASK_ULL(17, 16)
/* CQP and iWARP Completion Queue */
#define IRDMA_CQ_QPCTX_S IRDMA_CQPHC_QPCTX_S
-#define IRDMA_CQ_QPCTX_M IRDMA_CQPHC_QPCTX_M
+#define IRDMA_CQ_QPCTX IRDMA_CQPHC_QPCTX
#define IRDMA_CQ_MINERR_S 0
-#define IRDMA_CQ_MINERR_M (0xffffULL << IRDMA_CQ_MINERR_S)
-
+#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
#define IRDMA_CQ_MAJERR_S 16
-#define IRDMA_CQ_MAJERR_M (0xffffULL << IRDMA_CQ_MAJERR_S)
-
+#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
#define IRDMA_CQ_WQEIDX_S 32
-#define IRDMA_CQ_WQEIDX_M (0x7fffULL << IRDMA_CQ_WQEIDX_S)
-
+#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
#define IRDMA_CQ_EXTCQE_S 50
-#define IRDMA_CQ_EXTCQE_M BIT_ULL(IRDMA_CQ_EXTCQE_S)
-
+#define IRDMA_CQ_EXTCQE BIT_ULL(50)
#define IRDMA_OOO_CMPL_S 54
-#define IRDMA_OOO_CMPL_M BIT_ULL(IRDMA_OOO_CMPL_S)
-
+#define IRDMA_OOO_CMPL BIT_ULL(54)
#define IRDMA_CQ_ERROR_S 55
-#define IRDMA_CQ_ERROR_M BIT_ULL(IRDMA_CQ_ERROR_S)
-
+#define IRDMA_CQ_ERROR BIT_ULL(55)
#define IRDMA_CQ_SQ_S 62
-#define IRDMA_CQ_SQ_M BIT_ULL(IRDMA_CQ_SQ_S)
+#define IRDMA_CQ_SQ BIT_ULL(62)
#define IRDMA_CQ_VALID_S 63
-#define IRDMA_CQ_VALID_M BIT_ULL(IRDMA_CQ_VALID_S)
-
-#define IRDMA_CQ_IMMVALID_S 62
-#define IRDMA_CQ_IMMVALID_M BIT_ULL(IRDMA_CQ_IMMVALID_S)
-
+#define IRDMA_CQ_VALID BIT_ULL(63)
+#define IRDMA_CQ_IMMVALID BIT_ULL(62)
#define IRDMA_CQ_UDSMACVALID_S 61
-#define IRDMA_CQ_UDSMACVALID_M BIT_ULL(IRDMA_CQ_UDSMACVALID_S)
-
+#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
#define IRDMA_CQ_UDVLANVALID_S 60
-#define IRDMA_CQ_UDVLANVALID_M BIT_ULL(IRDMA_CQ_UDVLANVALID_S)
-
+#define IRDMA_CQ_UDVLANVALID BIT_ULL(60)
#define IRDMA_CQ_UDSMAC_S 0
-#define IRDMA_CQ_UDSMAC_M (0xffffffffffffULL << IRDMA_CQ_UDSMAC_S)
-
+#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
#define IRDMA_CQ_UDVLAN_S 48
-#define IRDMA_CQ_UDVLAN_M (0xffffULL << IRDMA_CQ_UDVLAN_S)
+#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
#define IRDMA_CQ_IMMDATA_S 0
-#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
-
+#define IRDMA_CQ_IMMVALID_S 62
+#define IRDMA_CQ_IMMDATA GENMASK_ULL(125, 62)
#define IRDMA_CQ_IMMDATALOW32_S 0
-#define IRDMA_CQ_IMMDATALOW32_M (0xffffffffULL << IRDMA_CQ_IMMDATALOW32_S)
-
+#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
#define IRDMA_CQ_IMMDATAUP32_S 32
-#define IRDMA_CQ_IMMDATAUP32_M (0xffffffffULL << IRDMA_CQ_IMMDATAUP32_S)
-
+#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
#define IRDMACQ_PAYLDLEN_S 0
-#define IRDMACQ_PAYLDLEN_M (0xffffffffULL << IRDMACQ_PAYLDLEN_S)
-
-#define IRDMACQ_TCPSEQNUMRTT_S 32
-#define IRDMACQ_TCPSEQNUMRTT_M (0xffffffffULL << IRDMACQ_TCPSEQNUMRTT_S)
-
+#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
+#define IRDMACQ_TCPSQN_ROCEPSN_RTT_TS_S 32
+#define IRDMACQ_TCPSQN_ROCEPSN_RTT_TS GENMASK_ULL(63, 32)
#define IRDMACQ_INVSTAG_S 0
-#define IRDMACQ_INVSTAG_M (0xffffffffULL << IRDMACQ_INVSTAG_S)
-
+#define IRDMACQ_INVSTAG GENMASK_ULL(31, 0)
#define IRDMACQ_QPID_S 32
-#define IRDMACQ_QPID_M (0xffffffULL << IRDMACQ_QPID_S)
+#define IRDMACQ_QPID GENMASK_ULL(55, 32)
#define IRDMACQ_UDSRCQPN_S 0
-#define IRDMACQ_UDSRCQPN_M (0xffffffffULL << IRDMACQ_UDSRCQPN_S)
-
+#define IRDMACQ_UDSRCQPN GENMASK_ULL(31, 0)
#define IRDMACQ_PSHDROP_S 51
-#define IRDMACQ_PSHDROP_M BIT_ULL(IRDMACQ_PSHDROP_S)
-
+#define IRDMACQ_PSHDROP BIT_ULL(51)
#define IRDMACQ_STAG_S 53
-#define IRDMACQ_STAG_M BIT_ULL(IRDMACQ_STAG_S)
-
+#define IRDMACQ_STAG BIT_ULL(53)
#define IRDMACQ_IPV4_S 53
-#define IRDMACQ_IPV4_M BIT_ULL(IRDMACQ_IPV4_S)
-
+#define IRDMACQ_IPV4 BIT_ULL(53)
#define IRDMACQ_SOEVENT_S 54
-#define IRDMACQ_SOEVENT_M BIT_ULL(IRDMACQ_SOEVENT_S)
-
+#define IRDMACQ_SOEVENT BIT_ULL(54)
#define IRDMACQ_OP_S 56
-#define IRDMACQ_OP_M (0x3fULL << IRDMACQ_OP_S)
+#define IRDMACQ_OP GENMASK_ULL(61, 56)
/* Manage Push Page - MPP */
#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
-/* iwarp QP SQ WQE common fields */
#define IRDMAQPSQ_OPCODE_S 32
-#define IRDMAQPSQ_OPCODE_M (0x3fULL << IRDMAQPSQ_OPCODE_S)
-
+#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMAQPSQ_COPY_HOST_PBL_S 43
-#define IRDMAQPSQ_COPY_HOST_PBL_M BIT_ULL(IRDMAQPSQ_COPY_HOST_PBL_S)
-
+#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
#define IRDMAQPSQ_ADDFRAGCNT_S 38
-#define IRDMAQPSQ_ADDFRAGCNT_M (0xfULL << IRDMAQPSQ_ADDFRAGCNT_S)
-
+#define IRDMAQPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
#define IRDMAQPSQ_PUSHWQE_S 56
-#define IRDMAQPSQ_PUSHWQE_M BIT_ULL(IRDMAQPSQ_PUSHWQE_S)
-
+#define IRDMAQPSQ_PUSHWQE BIT_ULL(56)
#define IRDMAQPSQ_STREAMMODE_S 58
-#define IRDMAQPSQ_STREAMMODE_M BIT_ULL(IRDMAQPSQ_STREAMMODE_S)
-
+#define IRDMAQPSQ_STREAMMODE BIT_ULL(58)
#define IRDMAQPSQ_WAITFORRCVPDU_S 59
-#define IRDMAQPSQ_WAITFORRCVPDU_M BIT_ULL(IRDMAQPSQ_WAITFORRCVPDU_S)
-
+#define IRDMAQPSQ_WAITFORRCVPDU BIT_ULL(59)
#define IRDMAQPSQ_READFENCE_S 60
-#define IRDMAQPSQ_READFENCE_M BIT_ULL(IRDMAQPSQ_READFENCE_S)
-
+#define IRDMAQPSQ_READFENCE BIT_ULL(60)
#define IRDMAQPSQ_LOCALFENCE_S 61
-#define IRDMAQPSQ_LOCALFENCE_M BIT_ULL(IRDMAQPSQ_LOCALFENCE_S)
-
+#define IRDMAQPSQ_LOCALFENCE BIT_ULL(61)
#define IRDMAQPSQ_UDPHEADER_S 61
-#define IRDMAQPSQ_UDPHEADER_M BIT_ULL(IRDMAQPSQ_UDPHEADER_S)
-
+#define IRDMAQPSQ_UDPHEADER BIT_ULL(61)
#define IRDMAQPSQ_L4LEN_S 42
-#define IRDMAQPSQ_L4LEN_M ((u64)0xF << IRDMAQPSQ_L4LEN_S)
-
+#define IRDMAQPSQ_L4LEN GENMASK_ULL(45, 42)
#define IRDMAQPSQ_SIGCOMPL_S 62
-#define IRDMAQPSQ_SIGCOMPL_M BIT_ULL(IRDMAQPSQ_SIGCOMPL_S)
-
+#define IRDMAQPSQ_SIGCOMPL BIT_ULL(62)
#define IRDMAQPSQ_VALID_S 63
-#define IRDMAQPSQ_VALID_M BIT_ULL(IRDMAQPSQ_VALID_S)
+#define IRDMAQPSQ_VALID BIT_ULL(63)
#define IRDMAQPSQ_FRAG_TO_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPSQ_FRAG_TO_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMAQPSQ_FRAG_TO IRDMA_CQPHC_QPCTX
#define IRDMAQPSQ_FRAG_VALID_S 63
-#define IRDMAQPSQ_FRAG_VALID_M BIT_ULL(IRDMAQPSQ_FRAG_VALID_S)
-
+#define IRDMAQPSQ_FRAG_VALID BIT_ULL(63)
#define IRDMAQPSQ_FRAG_LEN_S 32
-#define IRDMAQPSQ_FRAG_LEN_M (0x7fffffffULL << IRDMAQPSQ_FRAG_LEN_S)
-
+#define IRDMAQPSQ_FRAG_LEN GENMASK_ULL(62, 32)
#define IRDMAQPSQ_FRAG_STAG_S 0
-#define IRDMAQPSQ_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_FRAG_STAG_S)
-
+#define IRDMAQPSQ_FRAG_STAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_GEN1_FRAG_LEN_S 0
-#define IRDMAQPSQ_GEN1_FRAG_LEN_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_LEN_S)
-
+#define IRDMAQPSQ_GEN1_FRAG_LEN GENMASK_ULL(31, 0)
#define IRDMAQPSQ_GEN1_FRAG_STAG_S 32
-#define IRDMAQPSQ_GEN1_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_STAG_S)
-
+#define IRDMAQPSQ_GEN1_FRAG_STAG GENMASK_ULL(63, 32)
#define IRDMAQPSQ_REMSTAGINV_S 0
-#define IRDMAQPSQ_REMSTAGINV_M (0xffffffffULL << IRDMAQPSQ_REMSTAGINV_S)
-
+#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQKEY_S 0
-#define IRDMAQPSQ_DESTQKEY_M (0xffffffffULL << IRDMAQPSQ_DESTQKEY_S)
-
+#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQPN_S 32
-#define IRDMAQPSQ_DESTQPN_M (0x00ffffffULL << IRDMAQPSQ_DESTQPN_S)
-
+#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
#define IRDMAQPSQ_AHID_S 0
-#define IRDMAQPSQ_AHID_M (0x0001ffffULL << IRDMAQPSQ_AHID_S)
-
+#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
#define IRDMAQPSQ_INLINEDATAFLAG_S 57
-#define IRDMAQPSQ_INLINEDATAFLAG_M BIT_ULL(IRDMAQPSQ_INLINEDATAFLAG_S)
+#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
#define IRDMA_INLINE_VALID_S 7
-
#define IRDMAQPSQ_INLINEDATALEN_S 48
-#define IRDMAQPSQ_INLINEDATALEN_M \
- (0xffULL << IRDMAQPSQ_INLINEDATALEN_S)
+#define IRDMAQPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
#define IRDMAQPSQ_IMMDATAFLAG_S 47
-#define IRDMAQPSQ_IMMDATAFLAG_M \
- BIT_ULL(IRDMAQPSQ_IMMDATAFLAG_S)
+#define IRDMAQPSQ_IMMDATAFLAG BIT_ULL(47)
#define IRDMAQPSQ_REPORTRTT_S 46
-#define IRDMAQPSQ_REPORTRTT_M \
- BIT_ULL(IRDMAQPSQ_REPORTRTT_S)
+#define IRDMAQPSQ_REPORTRTT BIT_ULL(46)
#define IRDMAQPSQ_IMMDATA_S 0
-#define IRDMAQPSQ_IMMDATA_M \
- (0xffffffffffffffffULL << IRDMAQPSQ_IMMDATA_S)
-
-/* rdma write */
+#define IRDMAQPSQ_IMMDATA GENMASK_ULL(63, 0)
#define IRDMAQPSQ_REMSTAG_S 0
-#define IRDMAQPSQ_REMSTAG_M (0xffffffffULL << IRDMAQPSQ_REMSTAG_S)
+#define IRDMAQPSQ_REMSTAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_REMTO_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPSQ_REMTO_M IRDMA_CQPHC_QPCTX_M
+#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
-/* memory window */
#define IRDMAQPSQ_STAGRIGHTS_S 48
-#define IRDMAQPSQ_STAGRIGHTS_M (0x1fULL << IRDMAQPSQ_STAGRIGHTS_S)
-
+#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
#define IRDMAQPSQ_VABASEDTO_S 53
-#define IRDMAQPSQ_VABASEDTO_M BIT_ULL(IRDMAQPSQ_VABASEDTO_S)
-
+#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
#define IRDMAQPSQ_MEMWINDOWTYPE_S 54
-#define IRDMAQPSQ_MEMWINDOWTYPE_M BIT_ULL(IRDMAQPSQ_MEMWINDOWTYPE_S)
+#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
#define IRDMAQPSQ_MWLEN_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPSQ_MWLEN_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMAQPSQ_MWLEN IRDMA_CQPHC_QPCTX
#define IRDMAQPSQ_PARENTMRSTAG_S 32
-#define IRDMAQPSQ_PARENTMRSTAG_M \
- (0xffffffffULL << IRDMAQPSQ_PARENTMRSTAG_S)
-
+#define IRDMAQPSQ_PARENTMRSTAG GENMASK_ULL(63, 32)
#define IRDMAQPSQ_MWSTAG_S 0
-#define IRDMAQPSQ_MWSTAG_M (0xffffffffULL << IRDMAQPSQ_MWSTAG_S)
+#define IRDMAQPSQ_MWSTAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_BASEVA_TO_FBO_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPSQ_BASEVA_TO_FBO_M IRDMA_CQPHC_QPCTX_M
+#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
-/* Local Invalidate */
#define IRDMAQPSQ_LOCSTAG_S 0
-#define IRDMAQPSQ_LOCSTAG_M (0xffffffffULL << IRDMAQPSQ_LOCSTAG_S)
+#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
/* iwarp QP RQ WQE common fields */
#define IRDMAQPRQ_ADDFRAGCNT_S IRDMAQPSQ_ADDFRAGCNT_S
-#define IRDMAQPRQ_ADDFRAGCNT_M IRDMAQPSQ_ADDFRAGCNT_M
+#define IRDMAQPRQ_ADDFRAGCNT IRDMAQPSQ_ADDFRAGCNT
#define IRDMAQPRQ_VALID_S IRDMAQPSQ_VALID_S
-#define IRDMAQPRQ_VALID_M IRDMAQPSQ_VALID_M
+#define IRDMAQPRQ_VALID IRDMAQPSQ_VALID
#define IRDMAQPRQ_COMPLCTX_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPRQ_COMPLCTX_M IRDMA_CQPHC_QPCTX_M
+#define IRDMAQPRQ_COMPLCTX IRDMA_CQPHC_QPCTX
#define IRDMAQPRQ_FRAG_LEN_S IRDMAQPSQ_FRAG_LEN_S
-#define IRDMAQPRQ_FRAG_LEN_M IRDMAQPSQ_FRAG_LEN_M
+#define IRDMAQPRQ_FRAG_LEN IRDMAQPSQ_FRAG_LEN
#define IRDMAQPRQ_STAG_S IRDMAQPSQ_FRAG_STAG_S
-#define IRDMAQPRQ_STAG_M IRDMAQPSQ_FRAG_STAG_M
+#define IRDMAQPRQ_STAG IRDMAQPSQ_FRAG_STAG
#define IRDMAQPRQ_TO_S IRDMAQPSQ_FRAG_TO_S
-#define IRDMAQPRQ_TO_M IRDMAQPSQ_FRAG_TO_M
+#define IRDMAQPRQ_TO IRDMAQPSQ_FRAG_TO
#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26)
#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
@@ -500,6 +437,12 @@
IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
}
+enum irdma_protocol_used {
+ IRDMA_ANY_PROTOCOL = 0,
+ IRDMA_IWARP_PROTOCOL_ONLY = 1,
+ IRDMA_ROCE_PROTOCOL_ONLY = 2,
+};
+
enum irdma_qp_wqe_size {
IRDMA_WQE_SIZE_32 = 32,
IRDMA_WQE_SIZE_64 = 64,
diff --git a/contrib/ofed/libirdma/irdma_uk.c b/contrib/ofed/libirdma/irdma_uk.c
index 2f77c132d296..5201ad692dc1 100644
--- a/contrib/ofed/libirdma/irdma_uk.c
+++ b/contrib/ofed/libirdma/irdma_uk.c
@@ -51,15 +51,15 @@ irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
{
if (sge) {
set_64bit_val(wqe, offset,
- LS_64(sge->tag_off, IRDMAQPSQ_FRAG_TO));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
- LS_64(valid, IRDMAQPSQ_VALID) |
- LS_64(sge->len, IRDMAQPSQ_FRAG_LEN) |
- LS_64(sge->stag, IRDMAQPSQ_FRAG_STAG));
+ FIELD_PREP(IRDMAQPSQ_VALID, valid) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
- LS_64(valid, IRDMAQPSQ_VALID));
+ FIELD_PREP(IRDMAQPSQ_VALID, valid));
}
}
@@ -76,10 +76,10 @@ irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
{
if (sge) {
set_64bit_val(wqe, offset,
- LS_64(sge->tag_off, IRDMAQPSQ_FRAG_TO));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
- LS_64(sge->len, IRDMAQPSQ_GEN1_FRAG_LEN) |
- LS_64(sge->stag, IRDMAQPSQ_GEN1_FRAG_STAG));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
@@ -87,16 +87,24 @@ irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
}
/**
+ * irdma_nop_hdr - Format header section of noop WQE
+ * @qp: hw qp ptr
+ */
+static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){
+ return FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, false) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+}
+
+/**
* irdma_nop_1 - insert a NOP wqe
* @qp: hw qp ptr
*/
static int
irdma_nop_1(struct irdma_qp_uk *qp)
{
- u64 hdr;
__le64 *wqe;
u32 wqe_idx;
- bool signaled = false;
if (!qp->sq_ring.head)
return EINVAL;
@@ -110,14 +118,10 @@ irdma_nop_1(struct irdma_qp_uk *qp)
set_64bit_val(wqe, IRDMA_BYTE_8, 0);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |
- LS_64(signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
-
/* make sure WQE is written before valid bit is set */
udma_to_device_barrier();
- set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+ set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp));
return 0;
}
@@ -160,7 +164,7 @@ irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
/* read the doorbell shadow area */
get_64bit_val(qp->shadow_area, IRDMA_BYTE_0, &temp);
- hw_sq_tail = (u32)RS_64(temp, IRDMA_QP_DBSA_HW_SQ_TAIL);
+ hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
if (sw_sq_head != qp->initial_ring.head) {
if (qp->push_dropped) {
@@ -191,7 +195,7 @@ static void
irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
{
set_32bit_val(qp->push_db, 0,
- LS_32(wqe_idx >> 3, IRDMA_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
+ FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
qp->initial_ring.head = qp->sq_ring.head;
qp->push_mode = true;
qp->push_dropped = false;
@@ -220,31 +224,32 @@ irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
* irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
* @qp: hw qp ptr
* @wqe_idx: return wqe index
- * @quanta: size of WR in quanta
+ * @quanta: (in/out) ptr to size of WR in quanta. Modified in case pad is needed
* @total_size: size of WR in bytes
* @info: info on WR
*/
__le64 *
irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
- u16 quanta, u32 total_size,
+ u16 *quanta, u32 total_size,
struct irdma_post_sq_info *info)
{
__le64 *wqe;
__le64 *wqe_0 = NULL;
u32 nop_wqe_idx;
- u16 avail_quanta;
+ u16 avail_quanta, wqe_quanta = *quanta;
u16 i;
avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
(IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
qp->uk_attrs->max_hw_sq_chunk);
- if (quanta <= avail_quanta) {
+
+ if (*quanta <= avail_quanta) {
/* WR fits in current chunk */
- if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ if (*quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
return NULL;
} else {
/* Need to pad with NOP */
- if (quanta + avail_quanta >
+ if (*quanta + avail_quanta >
IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
return NULL;
@@ -262,17 +267,19 @@ irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
if (!*wqe_idx)
qp->swqe_polarity = !qp->swqe_polarity;
- IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, *quanta);
+
+ irdma_clr_wqes(qp, *wqe_idx);
wqe = qp->sq_base[*wqe_idx].elem;
- if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 &&
(IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
- wqe_0[3] = htole64(LS_64(!qp->swqe_polarity, IRDMAQPSQ_VALID));
+ wqe_0[3] = htole64(FIELD_PREP(IRDMAQPSQ_VALID, !qp->swqe_polarity));
}
qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
- qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
+ qp->sq_wrtrk_array[*wqe_idx].quanta = wqe_quanta;
qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
return wqe;
@@ -344,20 +351,17 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
- info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
if (info->imm_data_valid) {
set_64bit_val(wqe, IRDMA_BYTE_0,
- LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
i = 0;
} else {
qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
@@ -382,28 +386,24 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
++addl_frag_cnt;
}
- if (!op_info->rem_addr.stag && !total_size)
- op_info->rem_addr.stag = 0x1234;
- hdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |
- LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
- LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |
- LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
- LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -443,8 +443,7 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
- info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return ENOSPC;
@@ -453,8 +452,6 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
qp->ord_cnt = 0;
}
- irdma_clr_wqes(qp, wqe_idx);
-
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
addl_frag_cnt = op_info->num_lo_sges > 1 ?
(op_info->num_lo_sges - 1) : 0;
@@ -478,28 +475,26 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
++addl_frag_cnt;
}
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));
- hdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |
- LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
- LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
- LS_64((inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ),
- IRDMAQPSQ_OPCODE) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(info->read_fence || qp->force_fence || ord_fence ? 1 : 0,
- IRDMAQPSQ_READFENCE) |
- LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE,
+ (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE,
+ info->read_fence || ord_fence ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -540,21 +535,19 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
- info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
read_fence |= info->read_fence;
addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
if (info->imm_data_valid) {
set_64bit_val(wqe, IRDMA_BYTE_0,
- LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
i = 0;
} else {
- qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->sg_list,
+ qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
+ frag_cnt ? op_info->sg_list : NULL,
qp->swqe_polarity);
i = 1;
}
@@ -575,31 +568,30 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
}
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->qkey, IRDMAQPSQ_DESTQKEY) |
- LS_64(op_info->dest_qp, IRDMAQPSQ_DESTQPN));
- hdr = LS_64(info->stag_to_inv, IRDMAQPSQ_REMSTAG) |
- LS_64(op_info->ah_id, IRDMAQPSQ_AHID) |
- LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |
- LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
- LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
- LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(info->udp_hdr, IRDMAQPSQ_UDPHEADER) |
- LS_64(info->l4len, IRDMAQPSQ_L4LEN) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -615,29 +607,45 @@ irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,
{
set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(op_info->mw_stag, IRDMAQPSQ_PARENTMRSTAG) |
- LS_64(op_info->mr_stag, IRDMAQPSQ_MWSTAG));
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
}
/**
* irdma_copy_inline_data_gen_1 - Copy inline data to wqe
- * @dest: pointer to wqe
- * @src: pointer to inline data
- * @len: length of inline data to copy
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: Total inline data length
* @polarity: compatibility parameter
*/
static void
-irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
- u8 polarity)
+irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
+ u32 num_sges, u8 polarity)
{
- if (len <= IRDMA_BYTE_16) {
- irdma_memcpy(dest, src, len);
- } else {
- irdma_memcpy(dest, src, IRDMA_BYTE_16);
- src += IRDMA_BYTE_16;
- dest = dest + IRDMA_BYTE_32;
- irdma_memcpy(dest, src, len - IRDMA_BYTE_16);
+ u32 quanta_bytes_remaining = 16;
+ u32 i;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
+ u32 sge_len = sge_list[i].len;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ irdma_memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ /* Remaining inline bytes reside after the hdr */
+ wqe += 16;
+ quanta_bytes_remaining = 32;
+ }
+ }
}
}
@@ -662,43 +670,59 @@ irdma_set_mw_bind_wqe(__le64 * wqe,
{
set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(op_info->mr_stag, IRDMAQPSQ_PARENTMRSTAG) |
- LS_64(op_info->mw_stag, IRDMAQPSQ_MWSTAG));
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
}
/**
* irdma_copy_inline_data - Copy inline data to wqe
- * @dest: pointer to wqe
- * @src: pointer to inline data
- * @len: length of inline data to copy
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: number of SGE's
* @polarity: polarity of wqe valid bit
*/
static void
-irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
+irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list, u32 num_sges,
+ u8 polarity)
{
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
- u32 copy_size;
-
- dest += IRDMA_BYTE_8;
- if (len <= IRDMA_BYTE_8) {
- irdma_memcpy(dest, src, len);
- return;
- }
-
- *((u64 *)dest) = *((u64 *)src);
- len -= IRDMA_BYTE_8;
- src += IRDMA_BYTE_8;
- dest += IRDMA_BYTE_24; /* point to additional 32 byte quanta */
-
- while (len) {
- copy_size = len < 31 ? len : 31;
- irdma_memcpy(dest, src, copy_size);
- *(dest + 31) = inline_valid;
- len -= copy_size;
- dest += IRDMA_BYTE_32;
- src += copy_size;
+ u32 quanta_bytes_remaining = 8;
+ u32 i;
+ bool first_quanta = true;
+
+ wqe += 8;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
+ u32 sge_len = sge_list[i].len;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ irdma_memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ quanta_bytes_remaining = 31;
+
+ /* Remaining inline bytes reside after the hdr */
+ if (first_quanta) {
+ first_quanta = false;
+ wqe += 16;
+ } else {
+ *wqe = inline_valid;
+ wqe++;
+ }
+ }
+ }
}
+ if (!first_quanta && quanta_bytes_remaining < 31)
+ *(wqe + quanta_bytes_remaining) = inline_valid;
}
/**
@@ -737,59 +761,62 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
- struct irdma_inline_rdma_write *op_info;
+ struct irdma_rdma_write *op_info;
u64 hdr = 0;
u32 wqe_idx;
bool read_fence = false;
u16 quanta;
+ u32 i, total_size = 0;
info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.inline_rdma_write;
+ op_info = &info->op.rdma_write;
+
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
+ return EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].len;
- if (op_info->len > qp->max_inline_data)
+ if (unlikely(total_size > qp->max_inline_data))
return EINVAL;
- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
- info);
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
read_fence |= info->read_fence;
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));
-
- hdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |
- LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
- LS_64(op_info->len, IRDMAQPSQ_INLINEDATALEN) |
- LS_64(info->report_rtt ? 1 : 0, IRDMAQPSQ_REPORTRTT) |
- LS_64(1, IRDMAQPSQ_INLINEDATAFLAG) |
- LS_64(info->imm_data_valid ? 1 : 0, IRDMAQPSQ_IMMDATAFLAG) |
- LS_64(info->push_wqe ? 1 : 0, IRDMAQPSQ_PUSHWQE) |
- LS_64(read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
if (info->imm_data_valid)
set_64bit_val(wqe, IRDMA_BYTE_0,
- LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
+ op_info->num_lo_sges, qp->swqe_polarity);
- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
- qp->swqe_polarity);
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -805,62 +832,65 @@ irdma_uk_inline_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
- struct irdma_post_inline_send *op_info;
+ struct irdma_post_send *op_info;
u64 hdr;
u32 wqe_idx;
bool read_fence = false;
u16 quanta;
+ u32 i, total_size = 0;
info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.inline_send;
+ op_info = &info->op.send;
- if (op_info->len > qp->max_inline_data)
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
return EINVAL;
- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
- info);
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].len;
+
+ if (unlikely(total_size > qp->max_inline_data))
+ return EINVAL;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->qkey, IRDMAQPSQ_DESTQKEY) |
- LS_64(op_info->dest_qp, IRDMAQPSQ_DESTQPN));
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
read_fence |= info->read_fence;
- hdr = LS_64(info->stag_to_inv, IRDMAQPSQ_REMSTAG) |
- LS_64(op_info->ah_id, IRDMAQPSQ_AHID) |
- LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
- LS_64(op_info->len, IRDMAQPSQ_INLINEDATALEN) |
- LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |
- LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
- LS_64(1, IRDMAQPSQ_INLINEDATAFLAG) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(info->udp_hdr, IRDMAQPSQ_UDPHEADER) |
- LS_64(info->l4len, IRDMAQPSQ_L4LEN) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
if (info->imm_data_valid)
set_64bit_val(wqe, IRDMA_BYTE_0,
- LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
- qp->swqe_polarity);
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
+ op_info->num_sges, qp->swqe_polarity);
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -882,41 +912,36 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
u32 wqe_idx;
bool local_fence = false;
struct irdma_sge sge = {0};
+ u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.inv_local_stag;
local_fence = info->local_fence;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
- 0, info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
if (!wqe)
return ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
sge.stag = op_info->target_stag;
qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(IRDMA_OP_TYPE_INV_STAG, IRDMAQPSQ_OPCODE) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (info->push_wqe)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -936,44 +961,39 @@ irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
u64 hdr;
u32 wqe_idx;
bool local_fence;
+ u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.bind_window;
local_fence = info->local_fence;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
- 0, info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
if (!wqe)
return ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
- hdr = LS_64(IRDMA_OP_TYPE_BIND_MW, IRDMAQPSQ_OPCODE) |
- LS_64(((op_info->ena_reads << 2) | (op_info->ena_writes << 3)),
- IRDMAQPSQ_STAGRIGHTS) |
- LS_64((op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0),
- IRDMAQPSQ_VABASEDTO) |
- LS_64((op_info->mem_window_type_1 ? 1 : 0),
- IRDMAQPSQ_MEMWINDOWTYPE) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
+ FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
+ ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
+ FIELD_PREP(IRDMAQPSQ_VABASEDTO,
+ (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
+ (op_info->mem_window_type_1 ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (info->push_wqe)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -1020,8 +1040,8 @@ irdma_uk_post_receive(struct irdma_qp_uk *qp,
}
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
- LS_64(qp->rwqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
@@ -1061,17 +1081,17 @@ irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
get_64bit_val(cq->shadow_area, 32, &temp_val);
- sw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
sw_cq_sel += cq_cnt;
- arm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);
- arm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);
- arm_next = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
- temp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |
- LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |
- LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |
- LS_64(arm_next, IRDMA_CQ_DBSA_ARM_NEXT);
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
set_64bit_val(cq->shadow_area, 32, temp_val);
}
@@ -1093,17 +1113,17 @@ irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
cq->armed = true;
get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
- arm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
arm_seq_num++;
- sw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);
- arm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
arm_next_se |= 1;
if (cq_notify == IRDMA_CQ_COMPL_EVENT)
arm_next = 1;
- temp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |
- LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |
- LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |
- LS_64(arm_next, IRDMA_CQ_DBSA_ARM_NEXT);
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
set_64bit_val(cq->shadow_area, IRDMA_BYTE_32, temp_val);
@@ -1123,12 +1143,12 @@ irdma_copy_quanta(__le64 * dst, __le64 * src, u32 offset, bool flip,
get_64bit_val(src, offset + 8, &val);
if (flip)
- val ^= IRDMAQPSQ_VALID_M;
+ val ^= IRDMAQPSQ_VALID;
set_64bit_val(dst, offset + 8, val);
get_64bit_val(src, offset + 24, &val);
if (flip)
- val ^= IRDMAQPSQ_VALID_M;
+ val ^= IRDMAQPSQ_VALID;
if (barrier)
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(dst, offset + 24, val);
@@ -1160,7 +1180,8 @@ irdma_repost_rq_wqes(struct irdma_qp_uk *qp, u32 start_idx,
u64 val;
libirdma_debug("reposting_wqes: from start_idx=%d to end_idx = %d\n", start_idx, end_idx);
- pthread_spin_lock(qp->lock);
+ if (pthread_spin_lock(qp->lock))
+ return;
while (start_idx != end_idx) {
IRDMA_RING_SET_TAIL(qp->rq_ring, start_idx + 1);
src_wqe = qp->rq_base[start_idx * qp->rq_wqe_size_multiplier].elem;
@@ -1168,7 +1189,7 @@ irdma_repost_rq_wqes(struct irdma_qp_uk *qp, u32 start_idx,
/* Check to see if polarity has changed */
get_64bit_val(src_wqe, 24, &val);
- if (RS_64(val, IRDMAQPSQ_VALID) != qp->rwqe_polarity)
+ if (FIELD_GET(IRDMAQPSQ_VALID, val) != qp->rwqe_polarity)
flip_polarity = true;
else
flip_polarity = false;
@@ -1242,7 +1263,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
__le64 *cqe;
struct irdma_qp_uk *qp;
struct irdma_ring *pring = NULL;
- u32 wqe_idx, q_type;
+ u32 wqe_idx;
int ret_code;
bool move_cq_head = true;
u8 polarity;
@@ -1255,14 +1276,14 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
- polarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (polarity != cq->polarity)
return ENOENT;
/* Ensure CQE contents are read after valid bit is checked */
udma_from_device_barrier();
- ext_valid = (bool)RS_64(qword3, IRDMA_CQ_EXTCQE);
+ ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
if (ext_valid) {
u64 qword6, qword7;
u32 peek_head;
@@ -1270,12 +1291,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
if (cq->avoid_mem_cflct) {
ext_cqe = (__le64 *) ((u8 *)cqe + 32);
get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
- polarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
} else {
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
ext_cqe = cq->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
- polarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
}
@@ -1285,19 +1306,19 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
/* Ensure ext CQE contents are read after ext valid bit is checked */
udma_from_device_barrier();
- info->imm_valid = (bool)RS_64(qword7, IRDMA_CQ_IMMVALID);
+ info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
if (info->imm_valid) {
u64 qword4;
get_64bit_val(ext_cqe, IRDMA_BYTE_0, &qword4);
- info->imm_data = (u32)RS_64(qword4, IRDMA_CQ_IMMDATALOW32);
+ info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
}
- info->ud_smac_valid = (bool)RS_64(qword7, IRDMA_CQ_UDSMACVALID);
- info->ud_vlan_valid = (bool)RS_64(qword7, IRDMA_CQ_UDVLANVALID);
+ info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
+ info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
if (info->ud_smac_valid || info->ud_vlan_valid) {
get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
if (info->ud_vlan_valid)
- info->ud_vlan = (u16)RS_64(qword6, IRDMA_CQ_UDVLAN);
+ info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
if (info->ud_smac_valid) {
info->ud_smac[5] = qword6 & 0xFF;
info->ud_smac[4] = (qword6 >> 8) & 0xFF;
@@ -1313,23 +1334,26 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->ud_vlan_valid = false;
}
- q_type = (u8)RS_64(qword3, IRDMA_CQ_SQ);
- info->error = (bool)RS_64(qword3, IRDMA_CQ_ERROR);
- info->push_dropped = (bool)RS_64(qword3, IRDMACQ_PSHDROP);
- info->ipv4 = (bool)RS_64(qword3, IRDMACQ_IPV4);
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
if (info->error) {
- info->major_err = RS_64(qword3, IRDMA_CQ_MAJERR);
- info->minor_err = RS_64(qword3, IRDMA_CQ_MINERR);
- if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
- info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
+ info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
+ switch (info->major_err) {
+ case IRDMA_FLUSH_MAJOR_ERR:
/* Set the min error to standard flush error code for remaining cqes */
if (info->minor_err != FLUSH_GENERAL_ERR) {
- qword3 &= ~IRDMA_CQ_MINERR_M;
- qword3 |= LS_64(FLUSH_GENERAL_ERR, IRDMA_CQ_MINERR);
+ qword3 &= ~IRDMA_CQ_MINERR;
+ qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
}
- } else {
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ break;
+ default:
info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ break;
}
} else {
info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
@@ -1338,22 +1362,23 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
- info->tcp_seq_num_rtt = (u32)RS_64(qword0, IRDMACQ_TCPSEQNUMRTT);
- info->qp_id = (u32)RS_64(qword2, IRDMACQ_QPID);
- info->ud_src_qpn = (u32)RS_64(qword2, IRDMACQ_UDSRCQPN);
+ info->stat.raw = (u32)FIELD_GET(IRDMACQ_TCPSQN_ROCEPSN_RTT_TS, qword0);
+ info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
+ info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
- info->solicited_event = (bool)RS_64(qword3, IRDMACQ_SOEVENT);
+ info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
if (!qp || qp->destroy_pending) {
ret_code = EFAULT;
goto exit;
}
- wqe_idx = (u32)RS_64(qword3, IRDMA_CQ_WQEIDX);
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
- if (q_type == IRDMA_CQE_QTYPE_RQ) {
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
u32 array_idx;
ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
@@ -1386,16 +1411,11 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
}
}
- info->bytes_xfered = (u32)RS_64(qword0, IRDMACQ_PAYLDLEN);
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
- if (info->imm_valid)
- info->op_type = IRDMA_OP_TYPE_REC_IMM;
- else
- info->op_type = IRDMA_OP_TYPE_REC;
-
- if (qword3 & IRDMACQ_STAG_M) {
+ if (qword3 & IRDMACQ_STAG) {
info->stag_invalid_set = true;
- info->inv_stag = (u32)RS_64(qword2, IRDMACQ_INVSTAG);
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
} else {
info->stag_invalid_set = false;
}
@@ -1439,11 +1459,16 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
if (!info->comp_status)
info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
- info->op_type = (u8)RS_64(qword3, IRDMACQ_OP);
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
IRDMA_RING_SET_TAIL(qp->sq_ring,
wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
} else {
+ if (pthread_spin_lock(qp->lock)) {
+ ret_code = ENOENT;
+ goto exit;
+ }
if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
+ pthread_spin_unlock(qp->lock);
ret_code = ENOENT;
goto exit;
}
@@ -1451,27 +1476,29 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
do {
__le64 *sw_wqe;
u64 wqe_qword;
- u8 op_type;
u32 tail;
tail = qp->sq_ring.tail;
sw_wqe = qp->sq_base[tail].elem;
get_64bit_val(sw_wqe, IRDMA_BYTE_24,
&wqe_qword);
- op_type = (u8)RS_64(wqe_qword, IRDMAQPSQ_OPCODE);
- info->op_type = op_type;
+ info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
IRDMA_RING_SET_TAIL(qp->sq_ring,
tail + qp->sq_wrtrk_array[tail].quanta);
- if (op_type != IRDMAQP_OP_NOP) {
+ if (info->op_type != IRDMAQP_OP_NOP) {
info->wr_id = qp->sq_wrtrk_array[tail].wrid;
info->signaled = qp->sq_wrtrk_array[tail].signaled;
info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
break;
}
} while (1);
+
+ if (info->op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
qp->sq_flush_complete = true;
+ pthread_spin_unlock(qp->lock);
}
pring = &qp->sq_ring;
}
@@ -1479,9 +1506,10 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
ret_code = 0;
exit:
- if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
if (pring && IRDMA_RING_MORE_WORK(*pring))
move_cq_head = false;
+ }
if (move_cq_head) {
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
@@ -1500,8 +1528,8 @@ exit:
set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
} else {
- qword3 &= ~IRDMA_CQ_WQEIDX_M;
- qword3 |= LS_64(pring->tail, IRDMA_CQ_WQEIDX);
+ qword3 &= ~IRDMA_CQ_WQEIDX;
+ qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
}
@@ -1509,11 +1537,11 @@ exit:
}
/**
- * irdma_qp_round_up - return round up qp wq depth
+ * irdma_round_up_wq - return round up qp wq depth
* @wqdepth: wq depth in quanta to round up
*/
static int
-irdma_qp_round_up(u32 wqdepth)
+irdma_round_up_wq(u32 wqdepth)
{
int scount = 1;
@@ -1557,34 +1585,34 @@ irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
}
/*
- * irdma_get_sqdepth - get SQ depth (quanta) @max_hw_wq_quanta: HW SQ size limit @sq_size: SQ size @shift: shift which
+ * irdma_get_sqdepth - get SQ depth (quanta) @uk_attrs: qp HW attributes @sq_size: SQ size @shift: shift which
* determines size of WQE @sqdepth: depth of SQ
*/
int
-irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *sqdepth)
+irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
{
- *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
+ *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
- if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
- else if (*sqdepth > max_hw_wq_quanta)
+ if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
+ *sqdepth = uk_attrs->min_hw_wq_size << shift;
+ else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
return EINVAL;
return 0;
}
/*
- * irdma_get_rqdepth - get RQ/SRQ depth (quanta) @max_hw_rq_quanta: HW RQ/SRQ size limit @rq_size: RQ/SRQ size @shift:
- * shift which determines size of WQE @rqdepth: depth of RQ/SRQ
+ * irdma_get_rqdepth - get RQ depth (quanta) @uk_attrs: qp HW attributes @rq_size: SRQ size @shift: shift which
+ * determines size of WQE @rqdepth: depth of RQ/SRQ
*/
int
-irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *rqdepth)
+irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
{
- *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
+ *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
- else if (*rqdepth > max_hw_rq_quanta)
+ if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
+ *rqdepth = uk_attrs->min_hw_wq_size << shift;
+ else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
return EINVAL;
return 0;
@@ -1626,6 +1654,80 @@ irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
}
/**
+ * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
+ * @ukinfo: qp initialization info
+ * @sq_shift: Returns shift of SQ
+ * @rq_shift: Returns shift of RQ
+ */
+void
+irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+}
+
+/**
+ * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
+ * @ukinfo: qp initialization info
+ * @sq_depth: Returns depth of SQ
+ * @sq_shift: Returns shift of SQ
+ */
+int
+irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
+ int status;
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+ status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
+ *sq_shift, sq_depth);
+
+ return status;
+}
+
+/**
+ * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
+ * @ukinfo: qp initialization info
+ * @rq_depth: Returns depth of RQ
+ * @rq_shift: Returns shift of RQ
+ */
+int
+irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift)
+{
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+
+ status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
+ *rq_shift, rq_depth);
+
+ return status;
+}
+
+/**
* irdma_uk_qp_init - initialize shared qp
* @qp: hw qp (user and kernel)
* @info: qp initialization info
@@ -1640,23 +1742,12 @@ irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
{
int ret_code = 0;
u32 sq_ring_size;
- u8 sqshift, rqshift;
qp->uk_attrs = info->uk_attrs;
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
return EINVAL;
- irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
- if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
- info->max_inline_data, &sqshift);
- if (info->abi_ver > 4)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- } else {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
- info->max_inline_data, &sqshift);
- }
qp->qp_caps = info->qp_caps;
qp->sq_base = info->sq;
qp->rq_base = info->rq;
@@ -1672,7 +1763,7 @@ irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->sq_size = info->sq_size;
qp->push_mode = false;
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
- sq_ring_size = qp->sq_size << sqshift;
+ sq_ring_size = qp->sq_size << info->sq_shift;
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
if (info->first_sq_wq) {
@@ -1687,9 +1778,9 @@ irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
qp->max_inline_data = info->max_inline_data;
- qp->rq_wqe_size = rqshift;
+ qp->rq_wqe_size = info->rq_shift;
IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
- qp->rq_wqe_size_multiplier = 1 << rqshift;
+ qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
@@ -1739,7 +1830,7 @@ irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
else
cqe = cq->cq_base[cq_head].buf;
get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
- polarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (polarity != temp)
break;
@@ -1769,28 +1860,29 @@ irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
u64 hdr;
u32 wqe_idx;
struct irdma_post_sq_info info = {0};
+ u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
- info.push_wqe = false;
+ info.push_wqe = qp->push_db ? true : false;
info.wr_id = wr_id;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
- 0, &info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, &info);
if (!wqe)
return ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
set_64bit_val(wqe, IRDMA_BYTE_0, 0);
set_64bit_val(wqe, IRDMA_BYTE_8, 0);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |
- LS_64(signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (post_sq)
+
+ if (info.push_wqe)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ else if (post_sq)
irdma_uk_qp_post_wr(qp);
return 0;
diff --git a/contrib/ofed/libirdma/irdma_umain.c b/contrib/ofed/libirdma/irdma_umain.c
index c26ac69d9014..8d27f648f969 100644
--- a/contrib/ofed/libirdma/irdma_umain.c
+++ b/contrib/ofed/libirdma/irdma_umain.c
@@ -39,6 +39,7 @@
#include <stdlib.h>
#include "irdma_umain.h"
#include "irdma-abi.h"
+#include "irdma_uquery.h"
#include "ice_devids.h"
#include "i40e_devids.h"
@@ -48,7 +49,7 @@
/**
* Driver version
*/
-char libirdma_version[] = "0.0.51-k";
+char libirdma_version[] = "1.1.5-k";
unsigned int irdma_dbg;
@@ -118,6 +119,28 @@ static struct ibv_context_ops irdma_ctx_ops = {
.detach_mcast = irdma_udetach_mcast,
};
+/**
+ * libirdma_query_device - fill libirdma_device structure
+ * @ctx_in - ibv_context identifying device
+ * @out - libirdma_device structure to fill quered info
+ *
+ * ctx_in is not used at the moment
+ */
+int
+libirdma_query_device(struct ibv_context *ctx_in, struct libirdma_device *out)
+{
+ if (!out)
+ return EIO;
+ if (sizeof(out->lib_ver) < sizeof(libirdma_version))
+ return ERANGE;
+
+ out->query_ver = 1;
+ snprintf(out->lib_ver, min(sizeof(libirdma_version), sizeof(out->lib_ver)),
+ "%s", libirdma_version);
+
+ return 0;
+}
+
static int
irdma_init_context(struct verbs_device *vdev,
struct ibv_context *ctx, int cmd_fd)
@@ -147,6 +170,7 @@ irdma_init_context(struct verbs_device *vdev,
iwvctx->uk_attrs.max_hw_sq_chunk = resp.max_hw_sq_chunk;
iwvctx->uk_attrs.max_hw_cq_size = resp.max_hw_cq_size;
iwvctx->uk_attrs.min_hw_cq_size = resp.min_hw_cq_size;
+ iwvctx->uk_attrs.min_hw_wq_size = IRDMA_MIN_WQ_SIZE_GEN2;
iwvctx->abi_ver = IRDMA_ABI_VER;
mmap_key = resp.db_mmap_key;
@@ -180,8 +204,6 @@ irdma_cleanup_context(struct verbs_device *device,
{
struct irdma_uvcontext *iwvctx;
- printf("%s %s CALL\n", __FILE__, __func__);
-
iwvctx = container_of(ibctx, struct irdma_uvcontext, ibv_ctx);
irdma_ufree_pd(&iwvctx->iwupd->ibv_pd);
munmap(iwvctx->db, IRDMA_HW_PAGE_SIZE);
diff --git a/contrib/ofed/libirdma/irdma_umain.h b/contrib/ofed/libirdma/irdma_umain.h
index a040c92728b3..f6dafc6bf39a 100644
--- a/contrib/ofed/libirdma/irdma_umain.h
+++ b/contrib/ofed/libirdma/irdma_umain.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (C) 2019 - 2020 Intel Corporation
+ * Copyright (C) 2019 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -46,12 +46,6 @@
#include "i40iw_hw.h"
#include "irdma_user.h"
-#ifndef likely
-#define likely(x) __builtin_expect((x), 1)
-#endif
-#ifndef unlikely
-#define unlikely(x) __builtin_expect((x), 0)
-#endif
#define PFX "libirdma-"
#define IRDMA_BASE_PUSH_PAGE 1
@@ -62,13 +56,12 @@
LIST_HEAD(list_head, irdma_cq_buf);
LIST_HEAD(list_head_cmpl, irdma_cmpl_gen);
-enum irdma_supported_wc_flags {
- IRDMA_CQ_SUPPORTED_WC_FLAGS = IBV_WC_EX_WITH_BYTE_LEN
+enum irdma_supported_wc_flags_ex {
+ IRDMA_STANDARD_WC_FLAGS_EX = IBV_WC_EX_WITH_BYTE_LEN
| IBV_WC_EX_WITH_IMM
| IBV_WC_EX_WITH_QP_NUM
| IBV_WC_EX_WITH_SRC_QP
- | IBV_WC_EX_WITH_SL
- | IBV_WC_EX_WITH_COMPLETION_TIMESTAMP,
+ | IBV_WC_EX_WITH_SL,
};
struct irdma_udevice {
@@ -103,8 +96,11 @@ struct irdma_cq_buf {
LIST_ENTRY(irdma_cq_buf) list;
struct irdma_cq_uk cq;
struct verbs_mr vmr;
+ size_t buf_size;
};
+extern pthread_mutex_t sigusr1_wait_mutex;
+
struct verbs_cq {
union {
struct ibv_cq cq;
@@ -128,7 +124,6 @@ struct irdma_ucq {
bool arm_sol;
bool skip_sol;
int comp_vector;
- uint32_t report_rtt;
struct irdma_uqp *uqp;
struct irdma_cq_uk cq;
struct list_head resize_list;
@@ -139,7 +134,6 @@ struct irdma_ucq {
struct irdma_uqp {
struct ibv_qp ibv_qp;
- struct ibv_qp_attr attr;
struct irdma_ucq *send_cq;
struct irdma_ucq *recv_cq;
struct verbs_mr vmr;
@@ -154,16 +148,10 @@ struct irdma_uqp {
struct ibv_recv_wr *pend_rx_wr;
struct irdma_qp_uk qp;
enum ibv_qp_type qp_type;
- enum ibv_qp_attr_mask attr_mask;
struct irdma_sge *recv_sges;
pthread_t flush_thread;
};
-struct irdma_umr {
- struct verbs_mr vmr;
- uint32_t acc_flags;
-};
-
/* irdma_uverbs.c */
int irdma_uquery_device_ex(struct ibv_context *context,
const struct ibv_query_device_ex_input *input,
@@ -176,6 +164,10 @@ int irdma_uquery_device(struct ibv_context *, struct ibv_device_attr *);
struct ibv_mr *irdma_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
int access);
int irdma_udereg_mr(struct ibv_mr *mr);
+
+int irdma_urereg_mr(struct verbs_mr *mr, int flags, struct ibv_pd *pd, void *addr,
+ size_t length, int access);
+
struct ibv_mw *irdma_ualloc_mw(struct ibv_pd *pd, enum ibv_mw_type type);
int irdma_ubind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
struct ibv_mw_bind *mw_bind);
diff --git a/contrib/ofed/libirdma/irdma_uquery.h b/contrib/ofed/libirdma/irdma_uquery.h
new file mode 100644
index 000000000000..cf56818e4d51
--- /dev/null
+++ b/contrib/ofed/libirdma/irdma_uquery.h
@@ -0,0 +1,50 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (C) 2022 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+
+#ifndef IRDMA_UQUERY_H
+#define IRDMA_UQUERY_H
+
+#include <infiniband/verbs.h>
+#include "osdep.h"
+
+struct libirdma_device {
+ uint32_t query_ver;
+ char lib_ver[32];
+ uint8_t rsvd[128];
+};
+
+int libirdma_query_device(struct ibv_context *ctx_in, struct libirdma_device *out);
+#endif /* IRDMA_UQUERY_H */
diff --git a/contrib/ofed/libirdma/irdma_user.h b/contrib/ofed/libirdma/irdma_user.h
index 7fe83b8c399f..089619e1e3b1 100644
--- a/contrib/ofed/libirdma/irdma_user.h
+++ b/contrib/ofed/libirdma/irdma_user.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -53,7 +53,7 @@
#define irdma_address_list u64 *
#define irdma_sgl struct irdma_sge *
-#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
+#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
@@ -80,7 +80,97 @@
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
-#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
+
+/* Async Events codes */
+#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
+#define IRDMA_AE_AMP_INVALID_STAG 0x0103
+#define IRDMA_AE_AMP_BAD_QP 0x0104
+#define IRDMA_AE_AMP_BAD_PD 0x0105
+#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
+#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
+#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define IRDMA_AE_AMP_TO_WRAP 0x010a
+#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
+#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
+#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
+#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
+#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
+#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
+#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
+#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
+#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
+#define IRDMA_AE_BAD_CLOSE 0x0201
+#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
+#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
+#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
+#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
+#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
+#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
+#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
+#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
+#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
+#define IRDMA_AE_DDP_NO_L_BIT 0x0308
+#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
+#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
+#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
+#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
+#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
+#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
+#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
+#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
+#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
+#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
+#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
+#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
+#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+#define IRDMA_AE_RESET_SENT 0x0601
+#define IRDMA_AE_TERMINATE_SENT 0x0602
+#define IRDMA_AE_RESET_NOT_SENT 0x0603
+#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
+#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
@@ -121,6 +211,7 @@ enum irdma_device_caps_const {
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
+ IRDMA_MIN_WQ_SIZE_GEN2 = 8,
};
enum irdma_addressing_type {
@@ -137,9 +228,15 @@ enum irdma_flush_opcode {
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
+ FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
- FLUSH_RETRY_EXC_ERR,
+};
+
+enum irdma_qp_event_type {
+ IRDMA_QP_EVENT_CATASTROPHIC,
+ IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_cmpl_status {
@@ -195,7 +292,7 @@ struct irdma_sge {
struct irdma_ring {
volatile u32 head;
- volatile u32 tail;
+ volatile u32 tail; /* effective tail */
u32 size;
};
@@ -215,14 +312,6 @@ struct irdma_post_send {
u32 ah_id;
};
-struct irdma_post_inline_send {
- void *data;
- u32 len;
- u32 qkey;
- u32 dest_qp;
- u32 ah_id;
-};
-
struct irdma_post_rq_info {
u64 wr_id;
irdma_sgl sg_list;
@@ -235,12 +324,6 @@ struct irdma_rdma_write {
struct irdma_sge rem_addr;
};
-struct irdma_inline_rdma_write {
- void *data;
- u32 len;
- struct irdma_sge rem_addr;
-};
-
struct irdma_rdma_read {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
@@ -283,8 +366,6 @@ struct irdma_post_sq_info {
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
- struct irdma_inline_rdma_write inline_rdma_write;
- struct irdma_post_inline_send inline_send;
} op;
};
@@ -292,7 +373,6 @@ struct irdma_cq_poll_info {
u64 wr_id;
irdma_qp_handle qp_handle;
u32 bytes_xfered;
- u32 tcp_seq_num_rtt;
u32 qp_id;
u32 ud_src_qpn;
u32 imm_data;
@@ -303,6 +383,7 @@ struct irdma_cq_poll_info {
u16 ud_vlan;
u8 ud_smac[6];
u8 op_type;
+ u8 q_type;
bool stag_invalid_set:1; /* or L_R_Key set */
bool push_dropped:1;
bool error:1;
@@ -312,6 +393,17 @@ struct irdma_cq_poll_info {
bool ud_smac_valid:1;
bool imm_valid:1;
bool signaled:1;
+ union {
+ u32 tcp_sqn;
+ u32 roce_psn;
+ u32 rtt;
+ u32 raw;
+ } stat;
+};
+
+struct qp_err_code {
+ enum irdma_flush_opcode flush_code;
+ enum irdma_qp_event_type event_type;
};
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
@@ -336,7 +428,7 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
bool post_sq);
struct irdma_wqe_uk_ops {
- void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
+ void (*iw_copy_inline_data)(u8 *dest, struct irdma_sge *sge_list, u32 num_sges, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
u8 valid);
@@ -354,6 +446,12 @@ int irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift);
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift);
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
@@ -407,7 +505,6 @@ struct irdma_qp_uk {
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
pthread_spinlock_t *lock;
- bool force_fence;
u8 dbg_rq_flushed;
u16 ord_cnt;
u8 sq_flush_seen;
@@ -444,8 +541,12 @@ struct irdma_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
+ u32 sq_depth;
+ u32 rq_depth;
u8 first_sq_wq;
u8 type;
+ u8 sq_shift;
+ u8 rq_shift;
u8 rd_fence_rate;
int abi_ver;
bool legacy_mode;
@@ -462,7 +563,7 @@ struct irdma_cq_uk_init_info {
};
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
- u16 quanta, u32 total_size,
+ u16 *quanta, u32 total_size,
struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
int irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
@@ -471,9 +572,81 @@ int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
-int irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *wqdepth);
-int irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *wqdepth);
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth);
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth);
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift, u32 *srqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
+
+static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
+{
+ struct qp_err_code qp_err = { 0 };
+
+ switch (ae_id) {
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ case IRDMA_AE_AMP_INVALID_STAG:
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ case IRDMA_AE_AMP_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp_err.flush_code = FLUSH_PROT_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp_err.flush_code = FLUSH_LOC_LEN_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ case IRDMA_AE_AMP_MWBIND_VALID_STAG:
+ qp_err.flush_code = FLUSH_MW_BIND_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp_err.flush_code = FLUSH_REM_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ qp_err.flush_code = FLUSH_FATAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ default:
+ qp_err.flush_code = FLUSH_GENERAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ }
+
+ return qp_err;
+}
#endif /* IRDMA_USER_H */
diff --git a/contrib/ofed/libirdma/irdma_uverbs.c b/contrib/ofed/libirdma/irdma_uverbs.c
index 64759b2965ce..58c5c6cf617c 100644
--- a/contrib/ofed/libirdma/irdma_uverbs.c
+++ b/contrib/ofed/libirdma/irdma_uverbs.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (C) 2019 - 2021 Intel Corporation
+ * Copyright (C) 2019 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -46,6 +46,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <stdbool.h>
+#include <infiniband/opcode.h>
#include "irdma_umain.h"
#include "abi.h"
@@ -137,7 +138,7 @@ irdma_ualloc_pd(struct ibv_context *context)
struct irdma_upd *iwupd;
int err;
- iwupd = malloc(sizeof(*iwupd));
+ iwupd = calloc(1, sizeof(*iwupd));
if (!iwupd)
return NULL;
@@ -163,6 +164,7 @@ err_free:
int
irdma_ufree_pd(struct ibv_pd *pd)
{
+ struct irdma_uvcontext *iwvctx = container_of(pd->context, struct irdma_uvcontext, ibv_ctx);
struct irdma_upd *iwupd;
int ret;
@@ -188,27 +190,44 @@ struct ibv_mr *
irdma_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
int access)
{
- struct irdma_umr *umr;
- struct irdma_ureg_mr cmd;
+ struct verbs_mr *vmr;
+ struct irdma_ureg_mr cmd = {};
struct ibv_reg_mr_resp resp;
int err;
- umr = malloc(sizeof(*umr));
- if (!umr)
+ vmr = malloc(sizeof(*vmr));
+ if (!vmr)
return NULL;
cmd.reg_type = IRDMA_MEMREG_TYPE_MEM;
err = ibv_cmd_reg_mr(pd, addr, length,
- (uintptr_t)addr, access, &umr->vmr.ibv_mr, &cmd.ibv_cmd,
+ (uintptr_t)addr, access, &vmr->ibv_mr, &cmd.ibv_cmd,
sizeof(cmd), &resp, sizeof(resp));
if (err) {
- free(umr);
+ free(vmr);
errno = err;
return NULL;
}
- umr->acc_flags = access;
- return &umr->vmr.ibv_mr;
+ return &vmr->ibv_mr;
+}
+
+/*
+ * irdma_urereg_mr - re-register memory region @vmr: mr that was allocated @flags: bit mask to indicate which of the
+ * attr's of MR modified @pd: pd of the mr @addr: user address of the memory region @length: length of the memory
+ * @access: access allowed on this mr
+ */
+int
+irdma_urereg_mr(struct verbs_mr *vmr, int flags, struct ibv_pd *pd,
+ void *addr, size_t length, int access)
+{
+ struct irdma_urereg_mr cmd = {};
+ struct ibv_rereg_mr_resp resp;
+
+ cmd.reg_type = IRDMA_MEMREG_TYPE_MEM;
+ return ibv_cmd_rereg_mr(&vmr->ibv_mr, flags, addr, length, (uintptr_t)addr,
+ access, pd, &cmd.ibv_cmd, sizeof(cmd), &resp,
+ sizeof(resp));
}
/**
@@ -218,19 +237,15 @@ irdma_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
int
irdma_udereg_mr(struct ibv_mr *mr)
{
- struct irdma_umr *umr;
struct verbs_mr *vmr;
int ret;
vmr = container_of(mr, struct verbs_mr, ibv_mr);
- umr = container_of(vmr, struct irdma_umr, vmr);
ret = ibv_cmd_dereg_mr(mr);
if (ret)
return ret;
- free(umr);
-
return 0;
}
@@ -245,6 +260,7 @@ irdma_ualloc_mw(struct ibv_pd *pd, enum ibv_mw_type type)
struct ibv_mw *mw;
struct ibv_alloc_mw cmd;
struct ibv_alloc_mw_resp resp;
+ int err;
mw = calloc(1, sizeof(*mw));
if (!mw)
@@ -273,7 +289,6 @@ irdma_ubind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
{
struct ibv_mw_bind_info *bind_info = &mw_bind->bind_info;
struct verbs_mr *vmr;
- struct irdma_umr *umr;
struct ibv_send_wr wr = {};
struct ibv_send_wr *bad_wr;
@@ -284,11 +299,10 @@ irdma_ubind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
if (bind_info->mr) {
vmr = verbs_get_mr(bind_info->mr);
- umr = container_of(vmr, struct irdma_umr, vmr);
if (vmr->mr_type != IBV_MR_TYPE_MR)
return ENOTSUP;
- if (umr->acc_flags & IBV_ACCESS_ZERO_BASED)
+ if (vmr->access & IBV_ACCESS_ZERO_BASED)
return EINVAL;
if (mw->pd != bind_info->mr->pd)
@@ -356,14 +370,15 @@ irdma_free_hw_buf(void *buf, size_t size)
* get_cq_size - returns actual cqe needed by HW
* @ncqe: minimum cqes requested by application
* @hw_rev: HW generation
+ * @cqe_64byte_ena: enable 64byte cqe
*/
static inline int
-get_cq_size(int ncqe, u8 hw_rev)
+get_cq_size(int ncqe, u8 hw_rev, bool cqe_64byte_ena)
{
ncqe++;
/* Completions with immediate require 1 extra entry */
- if (hw_rev > IRDMA_GEN_1)
+ if (!cqe_64byte_ena && hw_rev > IRDMA_GEN_1)
ncqe *= 2;
if (ncqe < IRDMA_U_MINCQ_SIZE)
@@ -372,8 +387,11 @@ get_cq_size(int ncqe, u8 hw_rev)
return ncqe;
}
-static inline size_t get_cq_total_bytes(u32 cq_size) {
- return roundup(cq_size * sizeof(struct irdma_cqe), IRDMA_HW_PAGE_SIZE);
+static inline size_t get_cq_total_bytes(u32 cq_size, bool cqe_64byte_ena){
+ if (cqe_64byte_ena)
+ return roundup(cq_size * sizeof(struct irdma_extended_cqe), IRDMA_HW_PAGE_SIZE);
+ else
+ return roundup(cq_size * sizeof(struct irdma_cqe), IRDMA_HW_PAGE_SIZE);
}
/**
@@ -401,17 +419,22 @@ ucreate_cq(struct ibv_context *context,
u32 cq_pages;
int ret, ncqe;
u8 hw_rev;
+ bool cqe_64byte_ena;
iwvctx = container_of(context, struct irdma_uvcontext, ibv_ctx);
uk_attrs = &iwvctx->uk_attrs;
hw_rev = uk_attrs->hw_rev;
- if (ext_cq && hw_rev == IRDMA_GEN_1) {
- errno = EOPNOTSUPP;
- return NULL;
+ if (ext_cq) {
+ u32 supported_flags = IRDMA_STANDARD_WC_FLAGS_EX;
+
+ if (hw_rev == IRDMA_GEN_1 || attr_ex->wc_flags & ~supported_flags) {
+ errno = EOPNOTSUPP;
+ return NULL;
+ }
}
- if (attr_ex->cqe < IRDMA_MIN_CQ_SIZE || attr_ex->cqe > uk_attrs->max_hw_cq_size) {
+ if (attr_ex->cqe < uk_attrs->min_hw_cq_size || attr_ex->cqe > uk_attrs->max_hw_cq_size - 1) {
errno = EINVAL;
return NULL;
}
@@ -428,11 +451,12 @@ ucreate_cq(struct ibv_context *context,
return NULL;
}
- info.cq_size = get_cq_size(attr_ex->cqe, hw_rev);
+ cqe_64byte_ena = uk_attrs->feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false;
+ info.cq_size = get_cq_size(attr_ex->cqe, hw_rev, cqe_64byte_ena);
iwucq->comp_vector = attr_ex->comp_vector;
LIST_INIT(&iwucq->resize_list);
LIST_INIT(&iwucq->cmpl_generated);
- total_size = get_cq_total_bytes(info.cq_size);
+ total_size = get_cq_total_bytes(info.cq_size, cqe_64byte_ena);
cq_pages = total_size >> IRDMA_HW_PAGE_SHIFT;
if (!(uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE))
@@ -462,7 +486,7 @@ ucreate_cq(struct ibv_context *context,
if (uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE) {
info.shadow_area = irdma_alloc_hw_buf(IRDMA_DB_SHADOW_AREA_SIZE);
if (!info.shadow_area)
- goto err_dereg_mr;
+ goto err_alloc_shadow;
memset(info.shadow_area, 0, IRDMA_DB_SHADOW_AREA_SIZE);
reg_mr_shadow_cmd.reg_type = IRDMA_MEMREG_TYPE_CQ;
@@ -474,8 +498,9 @@ ucreate_cq(struct ibv_context *context,
&reg_mr_shadow_cmd.ibv_cmd, sizeof(reg_mr_shadow_cmd),
&reg_mr_shadow_resp, sizeof(reg_mr_shadow_resp));
if (ret) {
+ irdma_free_hw_buf(info.shadow_area, IRDMA_DB_SHADOW_AREA_SIZE);
errno = ret;
- goto err_dereg_shadow;
+ goto err_alloc_shadow;
}
iwucq->vmr_shadow_area.ibv_mr.pd = &iwvctx->iwupd->ibv_pd;
@@ -491,28 +516,30 @@ ucreate_cq(struct ibv_context *context,
ret = ibv_cmd_create_cq_ex(context, attr_ex, &iwucq->verbs_cq.cq_ex,
&cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd), &resp.ibv_resp,
sizeof(resp.ibv_resp), sizeof(resp));
+ attr_ex->cqe = ncqe;
if (ret) {
errno = ret;
- goto err_dereg_shadow;
+ goto err_create_cq;
}
if (ext_cq)
irdma_ibvcq_ex_fill_priv_funcs(iwucq, attr_ex);
info.cq_id = resp.cq_id;
- /* Do not report the cqe's burned by HW */
+ /* Do not report the CQE's reserved for immediate and burned by HW */
iwucq->verbs_cq.cq.cqe = ncqe;
-
+ if (cqe_64byte_ena)
+ info.avoid_mem_cflct = true;
info.cqe_alloc_db = (u32 *)((u8 *)iwvctx->db + IRDMA_DB_CQ_OFFSET);
irdma_uk_cq_init(&iwucq->cq, &info);
-
return &iwucq->verbs_cq.cq_ex;
-err_dereg_shadow:
- ibv_cmd_dereg_mr(&iwucq->vmr.ibv_mr);
+err_create_cq:
if (iwucq->vmr_shadow_area.ibv_mr.handle) {
ibv_cmd_dereg_mr(&iwucq->vmr_shadow_area.ibv_mr);
- irdma_free_hw_buf(info.shadow_area, IRDMA_HW_PAGE_SIZE);
+ irdma_free_hw_buf(info.shadow_area, IRDMA_DB_SHADOW_AREA_SIZE);
}
+err_alloc_shadow:
+ ibv_cmd_dereg_mr(&iwucq->vmr.ibv_mr);
err_dereg_mr:
irdma_free_hw_buf(info.cq_base, total_size);
err_cq_base:
@@ -545,11 +572,6 @@ struct ibv_cq_ex *
irdma_ucreate_cq_ex(struct ibv_context *context,
struct ibv_cq_init_attr_ex *attr_ex)
{
- if (attr_ex->wc_flags & ~IRDMA_CQ_SUPPORTED_WC_FLAGS) {
- errno = EOPNOTSUPP;
- return NULL;
- }
-
return ucreate_cq(context, attr_ex, true);
}
@@ -561,7 +583,7 @@ static void
irdma_free_cq_buf(struct irdma_cq_buf *cq_buf)
{
ibv_cmd_dereg_mr(&cq_buf->vmr.ibv_mr);
- irdma_free_hw_buf(cq_buf->cq.cq_base, get_cq_total_bytes(cq_buf->cq.cq_size));
+ irdma_free_hw_buf(cq_buf->cq.cq_base, cq_buf->buf_size);
free(cq_buf);
}
@@ -645,7 +667,7 @@ irdma_cq_empty(struct irdma_ucq *iwucq)
ukcq = &iwucq->cq;
cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
get_64bit_val(cqe, 24, &qword3);
- polarity = (__u8) RS_64(qword3, IRDMA_CQ_VALID);
+ polarity = (__u8) FIELD_GET(IRDMA_CQ_VALID, qword3);
return polarity != ukcq->polarity;
}
@@ -680,7 +702,7 @@ irdma_generate_flush_completions(struct irdma_uqp *iwuqp)
cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
sw_wqe = qp->sq_base[wqe_idx].elem;
get_64bit_val(sw_wqe, 24, &wqe_qword);
- cmpl->cpi.op_type = (__u8) RS_64(wqe_qword, IRDMAQPSQ_OPCODE);
+ cmpl->cpi.op_type = (__u8) FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
/* remove the SQ WR by moving SQ tail */
IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
LIST_INSERT_HEAD(&iwuqp->send_cq->cmpl_generated, cmpl, list);
@@ -794,6 +816,55 @@ irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
}
}
+static inline void
+set_ib_wc_op_sq(struct irdma_cq_poll_info *cur_cqe, struct ibv_wc *entry)
+{
+ switch (cur_cqe->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IBV_WC_RDMA_WRITE;
+ break;
+ case IRDMA_OP_TYPE_RDMA_READ:
+ entry->opcode = IBV_WC_RDMA_READ;
+ break;
+ case IRDMA_OP_TYPE_SEND_SOL:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND:
+ entry->opcode = IBV_WC_SEND;
+ break;
+ case IRDMA_OP_TYPE_BIND_MW:
+ entry->opcode = IBV_WC_BIND_MW;
+ break;
+ case IRDMA_OP_TYPE_INV_STAG:
+ entry->opcode = IBV_WC_LOCAL_INV;
+ break;
+ default:
+ entry->status = IBV_WC_GENERAL_ERR;
+ printf("%s: Invalid opcode = %d in CQE\n",
+ __func__, cur_cqe->op_type);
+ }
+}
+
+static inline void
+set_ib_wc_op_rq(struct irdma_cq_poll_info *cur_cqe,
+ struct ibv_wc *entry, bool send_imm_support)
+{
+ if (!send_imm_support) {
+ entry->opcode = cur_cqe->imm_valid ? IBV_WC_RECV_RDMA_WITH_IMM :
+ IBV_WC_RECV;
+ return;
+ }
+ switch (cur_cqe->op_type) {
+ case IBV_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
+ case IBV_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ entry->opcode = IBV_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IBV_WC_RECV;
+ }
+}
+
/**
* irdma_process_cqe_ext - process current cqe for extended CQ
* @cur_cqe - current cqe info
@@ -830,9 +901,8 @@ irdma_process_cqe(struct ibv_wc *entry, struct irdma_cq_poll_info *cur_cqe)
ib_qp = qp->back_qp;
if (cur_cqe->error) {
- if (cur_cqe->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
- entry->status = (cur_cqe->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
- irdma_flush_err_to_ib_wc_status(cur_cqe->minor_err) : IBV_WC_GENERAL_ERR;
+ entry->status = (cur_cqe->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
+ irdma_flush_err_to_ib_wc_status(cur_cqe->minor_err) : IBV_WC_GENERAL_ERR;
entry->vendor_err = cur_cqe->major_err << 16 |
cur_cqe->minor_err;
} else {
@@ -844,47 +914,17 @@ irdma_process_cqe(struct ibv_wc *entry, struct irdma_cq_poll_info *cur_cqe)
entry->wc_flags |= IBV_WC_WITH_IMM;
}
- switch (cur_cqe->op_type) {
- case IRDMA_OP_TYPE_RDMA_WRITE:
- case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
- entry->opcode = IBV_WC_RDMA_WRITE;
- break;
- case IRDMA_OP_TYPE_RDMA_READ:
- entry->opcode = IBV_WC_RDMA_READ;
- break;
- case IRDMA_OP_TYPE_SEND_SOL:
- case IRDMA_OP_TYPE_SEND_SOL_INV:
- case IRDMA_OP_TYPE_SEND_INV:
- case IRDMA_OP_TYPE_SEND:
- entry->opcode = IBV_WC_SEND;
- break;
- case IRDMA_OP_TYPE_BIND_MW:
- entry->opcode = IBV_WC_BIND_MW;
- break;
- case IRDMA_OP_TYPE_REC:
- entry->opcode = IBV_WC_RECV;
- if (ib_qp->qp_type != IBV_QPT_UD &&
- cur_cqe->stag_invalid_set) {
- entry->invalidated_rkey = cur_cqe->inv_stag;
- entry->wc_flags |= IBV_WC_WITH_INV;
- }
- break;
- case IRDMA_OP_TYPE_REC_IMM:
- entry->opcode = IBV_WC_RECV_RDMA_WITH_IMM;
+ if (cur_cqe->q_type == IRDMA_CQE_QTYPE_SQ) {
+ set_ib_wc_op_sq(cur_cqe, entry);
+ } else {
+ set_ib_wc_op_rq(cur_cqe, entry,
+ qp->qp_caps & IRDMA_SEND_WITH_IMM ?
+ true : false);
if (ib_qp->qp_type != IBV_QPT_UD &&
cur_cqe->stag_invalid_set) {
entry->invalidated_rkey = cur_cqe->inv_stag;
entry->wc_flags |= IBV_WC_WITH_INV;
}
- break;
- case IRDMA_OP_TYPE_INV_STAG:
- entry->opcode = IBV_WC_LOCAL_INV;
- break;
- default:
- entry->status = IBV_WC_GENERAL_ERR;
- printf("%s: Invalid opcode = %d in CQE\n",
- __func__, cur_cqe->op_type);
- return;
}
if (ib_qp->qp_type == IBV_QPT_UD) {
@@ -1111,20 +1151,6 @@ irdma_end_poll(struct ibv_cq_ex *ibvcq_ex)
pthread_spin_unlock(&iwucq->lock);
}
-/**
- * irdma_wc_read_completion_ts - Get completion timestamp
- * @ibvcq_ex: ibv extended CQ
- *
- * Get completion timestamp in HCA clock units
- */
-static uint64_t irdma_wc_read_completion_ts(struct ibv_cq_ex *ibvcq_ex){
- struct irdma_ucq *iwucq = container_of(ibvcq_ex, struct irdma_ucq,
- verbs_cq.cq_ex);
-#define HCA_CORE_CLOCK_800_MHZ 800
-
- return iwucq->cur_cqe.tcp_seq_num_rtt / HCA_CORE_CLOCK_800_MHZ;
-}
-
static enum ibv_wc_opcode
irdma_wc_read_opcode(struct ibv_cq_ex *ibvcq_ex)
{
@@ -1255,11 +1281,6 @@ irdma_ibvcq_ex_fill_priv_funcs(struct irdma_ucq *iwucq,
ibvcq_ex->end_poll = irdma_end_poll;
ibvcq_ex->next_poll = irdma_next_poll;
- if (attr_ex->wc_flags & IBV_WC_EX_WITH_COMPLETION_TIMESTAMP) {
- ibvcq_ex->read_completion_ts = irdma_wc_read_completion_ts;
- iwucq->report_rtt = true;
- }
-
ibvcq_ex->read_opcode = irdma_wc_read_opcode;
ibvcq_ex->read_vendor_err = irdma_wc_read_vendor_err;
ibvcq_ex->read_wc_flags = irdma_wc_read_wc_flags;
@@ -1403,15 +1424,13 @@ irdma_destroy_vmapped_qp(struct irdma_uqp *iwuqp)
* @pd: pd for the qp
* @attr: attributes of qp passed
* @resp: response back from create qp
- * @sqdepth: depth of sq
- * @rqdepth: depth of rq
- * @info: info for initializing user level qp
+ * @info: uk info for initializing user level qp
* @abi_ver: abi version of the create qp command
*/
static int
irdma_vmapped_qp(struct irdma_uqp *iwuqp, struct ibv_pd *pd,
- struct ibv_qp_init_attr *attr, int sqdepth,
- int rqdepth, struct irdma_qp_uk_init_info *info,
+ struct ibv_qp_init_attr *attr,
+ struct irdma_qp_uk_init_info *info,
bool legacy_mode)
{
struct irdma_ucreate_qp cmd = {};
@@ -1421,8 +1440,8 @@ irdma_vmapped_qp(struct irdma_uqp *iwuqp, struct ibv_pd *pd,
struct ibv_reg_mr_resp reg_mr_resp = {};
int ret;
- sqsize = roundup(sqdepth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
- rqsize = roundup(rqdepth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
+ sqsize = roundup(info->sq_depth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
+ rqsize = roundup(info->rq_depth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
totalqpsize = rqsize + sqsize + IRDMA_DB_SHADOW_AREA_SIZE;
info->sq = irdma_alloc_hw_buf(totalqpsize);
iwuqp->buf_size = totalqpsize;
@@ -1491,8 +1510,6 @@ irdma_ucreate_qp(struct ibv_pd *pd,
struct irdma_uk_attrs *uk_attrs;
struct irdma_uvcontext *iwvctx;
struct irdma_uqp *iwuqp;
- u32 sqdepth, rqdepth;
- u8 sqshift, rqshift;
int status;
if (attr->qp_type != IBV_QPT_RC && attr->qp_type != IBV_QPT_UD) {
@@ -1512,12 +1529,15 @@ irdma_ucreate_qp(struct ibv_pd *pd,
return NULL;
}
- irdma_get_wqe_shift(uk_attrs,
- uk_attrs->hw_rev > IRDMA_GEN_1 ? attr->cap.max_send_sge + 1 :
- attr->cap.max_send_sge,
- attr->cap.max_inline_data, &sqshift);
- status = irdma_get_sqdepth(uk_attrs->max_hw_wq_quanta,
- attr->cap.max_send_wr, sqshift, &sqdepth);
+ info.uk_attrs = uk_attrs;
+ info.sq_size = attr->cap.max_send_wr;
+ info.rq_size = attr->cap.max_recv_wr;
+ info.max_sq_frag_cnt = attr->cap.max_send_sge;
+ info.max_rq_frag_cnt = attr->cap.max_recv_sge;
+ info.max_inline_data = attr->cap.max_inline_data;
+ info.abi_ver = iwvctx->abi_ver;
+
+ status = irdma_uk_calc_depth_shift_sq(&info, &info.sq_depth, &info.sq_shift);
if (status) {
printf("%s: invalid SQ attributes, max_send_wr=%d max_send_sge=%d max_inline=%d\n",
__func__, attr->cap.max_send_wr, attr->cap.max_send_sge,
@@ -1526,14 +1546,7 @@ irdma_ucreate_qp(struct ibv_pd *pd,
return NULL;
}
- if (uk_attrs->hw_rev == IRDMA_GEN_1 && iwvctx->abi_ver > 4)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- else
- irdma_get_wqe_shift(uk_attrs, attr->cap.max_recv_sge, 0,
- &rqshift);
-
- status = irdma_get_rqdepth(uk_attrs->max_hw_rq_quanta,
- attr->cap.max_recv_wr, rqshift, &rqdepth);
+ status = irdma_uk_calc_depth_shift_rq(&info, &info.rq_depth, &info.rq_shift);
if (status) {
printf("%s: invalid RQ attributes, recv_wr=%d recv_sge=%d\n",
__func__, attr->cap.max_recv_wr, attr->cap.max_recv_sge);
@@ -1550,31 +1563,35 @@ irdma_ucreate_qp(struct ibv_pd *pd,
if (pthread_spin_init(&iwuqp->lock, PTHREAD_PROCESS_PRIVATE))
goto err_free_qp;
- info.sq_size = sqdepth >> sqshift;
- info.rq_size = rqdepth >> rqshift;
- attr->cap.max_send_wr = info.sq_size;
- attr->cap.max_recv_wr = info.rq_size;
+ info.sq_size = info.sq_depth >> info.sq_shift;
+ info.rq_size = info.rq_depth >> info.rq_shift;
+ /**
+ * For older ABI version (less than 6) passes raw sq and rq
+ * quanta in cap.max_send_wr and cap.max_recv_wr.
+ * But then kernel had no way of calculating the actual qp size.
+ */
+ if (iwvctx->abi_ver <= 5) {
+ attr->cap.max_send_wr = info.sq_size;
+ attr->cap.max_recv_wr = info.rq_size;
+ }
- info.uk_attrs = uk_attrs;
- info.max_sq_frag_cnt = attr->cap.max_send_sge;
- info.max_rq_frag_cnt = attr->cap.max_recv_sge;
iwuqp->recv_sges = calloc(attr->cap.max_recv_sge, sizeof(*iwuqp->recv_sges));
if (!iwuqp->recv_sges)
goto err_destroy_lock;
info.wqe_alloc_db = (u32 *)iwvctx->db;
info.legacy_mode = iwvctx->legacy_mode;
- info.sq_wrtrk_array = calloc(sqdepth, sizeof(*info.sq_wrtrk_array));
+ info.sq_wrtrk_array = calloc(info.sq_depth, sizeof(*info.sq_wrtrk_array));
if (!info.sq_wrtrk_array)
goto err_free_rsges;
- info.rq_wrid_array = calloc(rqdepth, sizeof(*info.rq_wrid_array));
+ info.rq_wrid_array = calloc(info.rq_depth, sizeof(*info.rq_wrid_array));
if (!info.rq_wrid_array)
goto err_free_sq_wrtrk;
iwuqp->sq_sig_all = attr->sq_sig_all;
iwuqp->qp_type = attr->qp_type;
- status = irdma_vmapped_qp(iwuqp, pd, attr, sqdepth, rqdepth, &info, iwvctx->legacy_mode);
+ status = irdma_vmapped_qp(iwuqp, pd, attr, &info, iwvctx->legacy_mode);
if (status) {
errno = status;
goto err_free_rq_wrid;
@@ -1583,18 +1600,15 @@ irdma_ucreate_qp(struct ibv_pd *pd,
iwuqp->qp.back_qp = iwuqp;
iwuqp->qp.lock = &iwuqp->lock;
- info.max_sq_frag_cnt = attr->cap.max_send_sge;
- info.max_rq_frag_cnt = attr->cap.max_recv_sge;
- info.max_inline_data = attr->cap.max_inline_data;
- iwuqp->qp.force_fence = true;
status = irdma_uk_qp_init(&iwuqp->qp, &info);
if (status) {
errno = status;
goto err_free_vmap_qp;
}
- attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
- attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
+ attr->cap.max_send_wr = (info.sq_depth - IRDMA_SQ_RSVD) >> info.sq_shift;
+ attr->cap.max_recv_wr = (info.rq_depth - IRDMA_RQ_RSVD) >> info.rq_shift;
+
return &iwuqp->ibv_qp;
err_free_vmap_qp:
@@ -1649,8 +1663,6 @@ irdma_umodify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
iwuqp = container_of(qp, struct irdma_uqp, ibv_qp);
iwvctx = container_of(qp->context, struct irdma_uvcontext, ibv_ctx);
- iwuqp->attr_mask = attr_mask;
- memcpy(&iwuqp->attr, attr, sizeof(iwuqp->attr));
if (iwuqp->qp.qp_caps & IRDMA_PUSH_MODE && attr_mask & IBV_QP_STATE &&
iwvctx->uk_attrs.hw_rev > IRDMA_GEN_1) {
@@ -1707,13 +1719,13 @@ irdma_issue_flush(struct ibv_qp *qp, bool sq_flush, bool rq_flush)
{
struct irdma_umodify_qp_resp resp = {};
struct irdma_modify_qp_cmd cmd_ex = {};
- struct irdma_uqp *iwuqp;
+ struct ibv_qp_attr attr = {};
+ attr.qp_state = IBV_QPS_ERR;
cmd_ex.sq_flush = sq_flush;
cmd_ex.rq_flush = rq_flush;
- iwuqp = container_of(qp, struct irdma_uqp, ibv_qp);
- ibv_cmd_modify_qp_ex(qp, &iwuqp->attr, iwuqp->attr_mask,
+ ibv_cmd_modify_qp_ex(qp, &attr, IBV_QP_STATE,
&cmd_ex.ibv_cmd,
sizeof(cmd_ex.ibv_cmd),
sizeof(cmd_ex), &resp.ibv_resp,
@@ -1857,8 +1869,6 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
info.signaled = true;
if (ib_wr->send_flags & IBV_SEND_FENCE)
info.read_fence = true;
- if (iwuqp->send_cq->report_rtt)
- info.report_rtt = true;
switch (ib_wr->opcode) {
case IBV_WR_SEND_WITH_IMM:
@@ -1885,31 +1895,21 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
info.op_type = IRDMA_OP_TYPE_SEND_INV;
info.stag_to_inv = ib_wr->imm_data;
}
- if (ib_wr->send_flags & IBV_SEND_INLINE) {
- info.op.inline_send.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
- info.op.inline_send.len = ib_wr->sg_list[0].length;
- if (ib_qp->qp_type == IBV_QPT_UD) {
- struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
- struct irdma_uah, ibv_ah);
-
- info.op.inline_send.ah_id = ah->ah_id;
- info.op.inline_send.qkey = ib_wr->wr.ud.remote_qkey;
- info.op.inline_send.dest_qp = ib_wr->wr.ud.remote_qpn;
- }
+ info.op.send.num_sges = ib_wr->num_sge;
+ info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
+ if (ib_qp->qp_type == IBV_QPT_UD) {
+ struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
+ struct irdma_uah, ibv_ah);
+
+ info.op.send.ah_id = ah->ah_id;
+ info.op.send.qkey = ib_wr->wr.ud.remote_qkey;
+ info.op.send.dest_qp = ib_wr->wr.ud.remote_qpn;
+ }
+
+ if (ib_wr->send_flags & IBV_SEND_INLINE)
err = irdma_uk_inline_send(&iwuqp->qp, &info, false);
- } else {
- info.op.send.num_sges = ib_wr->num_sge;
- info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
- if (ib_qp->qp_type == IBV_QPT_UD) {
- struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
- struct irdma_uah, ibv_ah);
-
- info.op.inline_send.ah_id = ah->ah_id;
- info.op.inline_send.qkey = ib_wr->wr.ud.remote_qkey;
- info.op.inline_send.dest_qp = ib_wr->wr.ud.remote_qpn;
- }
+ else
err = irdma_uk_send(&iwuqp->qp, &info, false);
- }
break;
case IBV_WR_RDMA_WRITE_WITH_IMM:
if (iwuqp->qp.qp_caps & IRDMA_WRITE_WITH_IMM) {
@@ -1926,19 +1926,14 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
else
info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
- if (ib_wr->send_flags & IBV_SEND_INLINE) {
- info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
- info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
- info.op.inline_rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
- info.op.inline_rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
+ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+ info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
+ info.op.rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
+ if (ib_wr->send_flags & IBV_SEND_INLINE)
err = irdma_uk_inline_rdma_write(&iwuqp->qp, &info, false);
- } else {
- info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
- info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
- info.op.rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
- info.op.rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
+ else
err = irdma_uk_rdma_write(&iwuqp->qp, &info, false);
- }
break;
case IBV_WR_RDMA_READ:
if (ib_wr->num_sge > uk_attrs->max_hw_read_sges) {
@@ -1965,9 +1960,8 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
info.op.bind_window.mw_stag = ib_wr->bind_mw.rkey;
} else {
struct verbs_mr *vmr = verbs_get_mr(ib_wr->bind_mw.bind_info.mr);
- struct irdma_umr *umr = container_of(vmr, struct irdma_umr, vmr);
- if (umr->acc_flags & IBV_ACCESS_ZERO_BASED) {
+ if (vmr->access & IBV_ACCESS_ZERO_BASED) {
err = EINVAL;
break;
}
@@ -2085,7 +2079,7 @@ irdma_ucreate_ah(struct ibv_pd *ibpd, struct ibv_ah_attr *attr)
{
struct irdma_uah *ah;
union ibv_gid sgid;
- struct irdma_ucreate_ah_resp resp;
+ struct irdma_ucreate_ah_resp resp = {};
int err;
err = ibv_query_gid(ibpd->context, attr->port_num, attr->grh.sgid_index,
@@ -2182,6 +2176,7 @@ irdma_uresize_cq(struct ibv_cq *cq, int cqe)
u32 cq_pages;
int cqe_needed;
int ret = 0;
+ bool cqe_64byte_ena;
iwucq = container_of(cq, struct irdma_ucq, verbs_cq.cq);
iwvctx = container_of(cq->context, struct irdma_uvcontext, ibv_ctx);
@@ -2190,20 +2185,17 @@ irdma_uresize_cq(struct ibv_cq *cq, int cqe)
if (!(uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE))
return EOPNOTSUPP;
- if (cqe > IRDMA_MAX_CQ_SIZE)
+ if (cqe < uk_attrs->min_hw_cq_size || cqe > uk_attrs->max_hw_cq_size - 1)
return EINVAL;
- cqe_needed = cqe + 1;
- if (uk_attrs->hw_rev > IRDMA_GEN_1)
- cqe_needed *= 2;
+ cqe_64byte_ena = uk_attrs->feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false;
- if (cqe_needed < IRDMA_U_MINCQ_SIZE)
- cqe_needed = IRDMA_U_MINCQ_SIZE;
+ cqe_needed = get_cq_size(cqe, uk_attrs->hw_rev, cqe_64byte_ena);
if (cqe_needed == iwucq->cq.cq_size)
return 0;
- cq_size = get_cq_total_bytes(cqe_needed);
+ cq_size = get_cq_total_bytes(cqe_needed, cqe_64byte_ena);
cq_pages = cq_size >> IRDMA_HW_PAGE_SHIFT;
cq_base = irdma_alloc_hw_buf(cq_size);
if (!cq_base)
@@ -2239,6 +2231,7 @@ irdma_uresize_cq(struct ibv_cq *cq, int cqe)
goto err_resize;
memcpy(&cq_buf->cq, &iwucq->cq, sizeof(cq_buf->cq));
+ cq_buf->buf_size = cq_size;
cq_buf->vmr = iwucq->vmr;
iwucq->vmr = new_mr;
irdma_uk_cq_resize(&iwucq->cq, cq_base, cqe_needed);
diff --git a/contrib/ofed/libirdma/libirdma.map b/contrib/ofed/libirdma/libirdma.map
index 0db347c48281..95be5c389138 100644
--- a/contrib/ofed/libirdma/libirdma.map
+++ b/contrib/ofed/libirdma/libirdma.map
@@ -1,10 +1,8 @@
/* Export symbols should be added below according to
Documentation/versioning.md document. */
IRDMA_1.0 {
- global: *;
- local: *;
+ global:
+ libirdma_query_device;
+ local: *;
};
-IRDMA_1.1 {
- global: *;
-} IRDMA_1.0;
diff --git a/contrib/ofed/libirdma/osdep.h b/contrib/ofed/libirdma/osdep.h
index 70daf4d9d3e7..4c226c8e5992 100644
--- a/contrib/ofed/libirdma/osdep.h
+++ b/contrib/ofed/libirdma/osdep.h
@@ -91,6 +91,13 @@
#define SPEED_100000 100000
#define BIT_ULL(a) (1ULL << (a))
+#define min(a, b) ((a) > (b) ? (b) : (a))
+#ifndef likely
+#define likely(x) __builtin_expect((x), 1)
+#endif
+#ifndef unlikely
+#define unlikely(x) __builtin_expect((x), 0)
+#endif
#define __aligned_u64 uint64_t __aligned(8)
@@ -131,7 +138,7 @@ do { \
#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
-#define ibdev_err(ibdev, fmt, ...) dev_err(&((ibdev)->dev), fmt, ##__VA_ARGS__)
+#define ibdev_err(ibdev, fmt, ...) printf("%s:"fmt, (ibdev)->name, ##__VA_ARGS__)
#define dump_struct(s, sz, name) \
do { \
@@ -166,8 +173,13 @@ struct irdma_sc_vsi;
#define irdma_usec_delay(x) DELAY(x)
#define mdelay(x) DELAY((x) * 1000)
-#define rt_tos2priority(tos) (((tos >> 1) & 0x8 >> 1) | ((tos >> 2) ^ ((tos >> 3) << 1)))
+#define rt_tos2priority(tos) (tos >> 5)
#define ah_attr_to_dmac(attr) ((attr).dmac)
+#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \
+ ib_modify_qp_is_ok(cur_state, next_state, type, mask)
+#define kc_typeq_ib_wr const
+#define kc_ifp_find ip_ifp_find
+#define kc_ifp6_find ip6_ifp_find
#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \
ib_gid_to_network_type(gid_type, gid)
#define irdma_del_timer_compat(tt) del_timer((tt))
@@ -207,6 +219,7 @@ enum ibv_mr_type {
struct verbs_mr {
struct ibv_mr ibv_mr;
enum ibv_mr_type mr_type;
+ int access;
};
#define verbs_get_mr(mr) container_of((mr), struct verbs_mr, ibv_mr)
#endif
diff --git a/share/man/man4/irdma.4 b/share/man/man4/irdma.4
index c8da2ad795dd..078d50923d22 100644
--- a/share/man/man4/irdma.4
+++ b/share/man/man4/irdma.4
@@ -39,8 +39,8 @@
.Nd RDMA FreeBSD driver for Intel(R) Ethernet Controller E810
.Sh SYNOPSIS
This module relies on
-.Xr if_ice 4
-.Bl -tag -nested-width indent
+.Xr ice 4
+.Bl -tag -width indent
.It The following kernel options should be included in the configuration:
.Cd options OFED
.Cd options OFED_DEBUG_INIT
@@ -52,8 +52,9 @@ This module relies on
.Ss Features
The
.Nm
-driver provides RDMA protocol support on RDMA-capable Intel Ethernet 800 Series NICs which are supported by
-.Xr if_ice 4
+driver provides RDMA protocol support on RDMA-capable Intel Ethernet 800 Series
+NICs which are supported by
+.Xr ice 4
.
.Pp
The driver supports both iWARP and RoCEv2 protocols.
@@ -66,48 +67,65 @@ prompt before booting the kernel or stored in
.Bl -tag -width indent
.It Va dev.irdma<interface_number>.roce_enable
enables RoCEv2 protocol usage on <interface_numer> interface.
-.Pp By default RoCEv2 protocol is used.
+.Pp
+By default RoCEv2 protocol is used.
.It Va dev.irdma<interface_number>.dcqcn_cc_cfg_valid
-indicates that all DCQCN parameters are valid and should be updated in registers or QP context.
+indicates that all DCQCN parameters are valid and should be updated in
+registers or QP context.
.Pp
-Setting this parameter to 1 means that settings in
-.Em dcqcn_min_dec_factor, dcqcn_min_rate_MBps, dcqcn_F, dcqcn_T,
-.Em dcqcn_B, dcqcn_rai_factor, dcqcn_hai_factor, dcqcn_rreduce_mperiod
-are taken into account. Otherwise default values are used.
+Setting this parameter to 1 means that settings in
+.Em dcqcn_min_dec_factor , dcqcn_min_rate_MBps , dcqcn_F , dcqcn_T ,
+.Em dcqcn_B, dcqcn_rai_factor, dcqcn_hai_factor, dcqcn_rreduce_mperiod
+are taken into account.
+Otherwise default values are used.
.Pp
Note: "roce_enable" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_min_dec_factor
-The minimum factor by which the current transmit rate can be changed when processing a CNP. Value is given as a percentage (1-100).
+The minimum factor by which the current transmit rate can be changed when
+processing a CNP.
+Value is given as a percentage (1-100).
.Pp
-Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
+to take effect.
.It Va dev.irdma<interface_number>.dcqcn_min_rate_MBps
The minimum value, in Mbits per second, for rate to limit.
.Pp
-Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
+to take effect.
.It Va dev.irdma<interface_number>.dcqcn_F
The number of times to stay in each stage of bandwidth recovery.
.Pp
-Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
+to take effect.
.It Va dev.irdma<interface_number>.dcqcn_T
-The number of microseconds that should elapse before increasing the CWND in DCQCN mode.
+The number of microseconds that should elapse before increasing the CWND
+in DCQCN mode.
.Pp
-Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
+to take effect.
.It Va dev.irdma<interface_number>.dcqcn_B
The number of bytes to transmit before updating CWND in DCQCN mode.
.Pp
-Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
+to take effect.
.It Va dev.irdma<interface_number>.dcqcn_rai_factor
The number of MSS to add to the congestion window in additive increase mode.
.Pp
-Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
+to take effect.
.It Va dev.irdma<interface_number>.dcqcn_hai_factor
The number of MSS to add to the congestion window in hyperactive increase mode.
.Pp
-Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
+to take effect.
.It Va dev.irdma<interface_number>.dcqcn_rreduce_mperiod
-The minimum time between 2 consecutive rate reductions for a single flow. Rate reduction will occur only if a CNP is received during the relevant time interval.
+The minimum time between 2 consecutive rate reductions for a single flow.
+Rate reduction will occur only if a CNP is received during the relevant time
+interval.
.Pp
-Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
+to take effect.
+.El
.Ss SYSCTL PROCEDURES
Sysctl controls are available for runtime adjustments.
.Bl -tag -width indent
@@ -120,64 +138,60 @@ enables the DCQCN algorithm for RoCEv2.
.Pp
Note: "roce_enable" must also be set for this sysctl to take effect.
.Pp
-Note: The change may be set at any time, but it will be applied only to newly created QPs.
+Note: The change may be set at any time, but it will be applied only to newly
+created QPs.
+.El
.Ss TESTING
.Bl -enum
.It
To load the irdma driver, run:
-.Bl -tag -width indent
-.It
+.Bd -literal -offset indent
kldload irdma
-.El
-If if_ice is not already loaded, the system will load it on its own. Please check whether the value of sysctl
+.Ed
+If if_ice is not already loaded, the system will load it on its own.
+Please check whether the value of sysctl
.Va hw.ice.irdma
-is 1, if the irdma driver is not loading. To change the value put:
-.Bl -tag -width indent
-.It
+is 1, if the irdma driver is not loading.
+To change the value put:
+.Bd -literal -offset indent
hw.ice.irdma=1
-.El
-to
+.Ed
+in
.Pa /boot/loader.conf
and reboot.
.It
To check that the driver was loaded, run:
-.Bl -tag -width indent
-.It
+.Bd -literal -offset indent
sysctl -a | grep infiniband
-.El
+.Ed
Typically, if everything goes well, around 190 entries per PF will appear.
.It
-Each interface of the card may work in either iWARP or RoCEv2 mode. To enable RoCEv2 compatibility, add:
-.Bl -tag -width indent
-.It
+Each interface of the card may work in either iWARP or RoCEv2 mode.
+To enable RoCEv2 compatibility, add:
+.Bd -literal -offset indent
dev.irdma<interface_number>.roce_enable=1
-.El
+.Ed
where <interface_number> is a desired ice interface number on which
-RoCEv2 protocol needs to be enabled, to:
-.Bl -tag -width indent
-.It
+RoCEv2 protocol needs to be enabled, into:
.Pa /boot/loader.conf
-.El
-for instance:
+, for instance:
.Bl -tag -width indent
-.It
-dev.irdma0.roce_enable=0
-.It
-dev.irdma1.roce_enable=1
+.It dev.irdma0.roce_enable=0
+.It dev.irdma1.roce_enable=1
.El
-will keep iWARP mode on ice0 and enable RoCEv2 mode on interface ice1. The RoCEv2 mode is the default.
-.Dl
+will keep iWARP mode on ice0 and enable RoCEv2 mode on interface ice1.
+The RoCEv2 mode is the default.
+.Pp
To check irdma roce_enable status, run:
-.Bl -tag -width indent
-.It
+.Bd -literal -offset indent
sysctl dev.irdma<interface_number>.roce_enable
-.El
+.Ed
for instance:
-.Bl -tag -width indent
-.It
+.Bd -literal -offset indent
sysctl dev.irdma2.roce_enable
-.El
-with returned value of '0' indicate the iWARP mode, and the value of '1' indicate the RoCEv2 mode.
+.Ed
+with returned value of '0' indicate the iWARP mode, and the value of '1'
+indicate the RoCEv2 mode.
.Pp
Note: An interface configured in one mode will not be able to connect
to a node configured in another mode.
@@ -187,44 +201,42 @@ DCB and Priority Flow Controller (PFC) are not currently supported which
may lead to significant performance loss or connectivity issues.
.It
Enable flow control in the ice driver:
-.Bl -tag -width indent
-.It
+.Bd -literal -offset indent
sysctl dev.ice.<interface_number>.fc=3
-.El
-Enable flow control on the switch your system is connected to. See your
-switch documentation for details.
+.Ed
+Enable flow control on the switch your system is connected to.
+See your switch documentation for details.
.It
The source code for krping software is provided with the kernel in
-/usr/src/sys/contrib/rdma/krping/. To compile the software, change
-directory to /usr/src/sys/modules/rdma/krping/ and invoke the following:
+/usr/src/sys/contrib/rdma/krping/.
+To compile the software, change directory to
+/usr/src/sys/modules/rdma/krping/ and invoke the following:
.Bl -tag -width indent
-.It
-make clean
-.It
-make
-.It
-make install
+.It make clean
+.It make
+.It make install
+.It kldload krping
.El
.It
Start a krping server on one machine:
-.Bl -tag -width indent
-.It
- echo size=64,count=1,port=6601,addr=100.0.0.189,server > /dev/krping
-.El
+.Bd -literal -offset indent
+echo size=64,count=1,port=6601,addr=100.0.0.189,server > /dev/krping
+.Ed
.It
Connect a client from another machine:
-.Bl -tag -width indent
-.It
- echo size=64,count=1,port=6601,addr=100.0.0.189,client > /dev/krping
+.Bd -literal -offset indent
+echo size=64,count=1,port=6601,addr=100.0.0.189,client > /dev/krping
+.Ed
.El
.Sh SUPPORT
For general information and support, go to the Intel support website at:
.Lk http://support.intel.com/ .
.Pp
-If an issue is identified with this driver with a supported adapter, email all the specific information related to the issue to
+If an issue is identified with this driver with a supported adapter, email all
+the specific information related to the issue to
.Mt freebsd@intel.com .
.Sh SEE ALSO
-.Xr if_ice 4
+.Xr ice 4
.Sh AUTHORS
.An -nosplit
The
diff --git a/sys/dev/irdma/fbsd_kcompat.c b/sys/dev/irdma/fbsd_kcompat.c
index cc6dd5c947ae..e0b3bce5ec4f 100644
--- a/sys/dev/irdma/fbsd_kcompat.c
+++ b/sys/dev/irdma/fbsd_kcompat.c
@@ -41,6 +41,7 @@
#include <netinet/in_fib.h>
#include <netinet6/in6_fib.h>
#include <net/route/nhop.h>
+#include <net/if_llatbl.h>
/* additional QP debuging option. Keep false unless needed */
bool irdma_upload_context = false;
@@ -389,7 +390,8 @@ irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *d
if (dst_sin->sa_family == AF_INET) {
err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
} else if (dst_sin->sa_family == AF_INET6) {
- err = nd6_resolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
+ err = nd6_resolve(ifp, LLE_SF(AF_INET6, gateway), NULL, nexthop,
+ dst_mac, NULL, &lle);
} else {
err = -EPROTONOSUPPORT;
}
@@ -467,15 +469,20 @@ int
irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
struct irdma_cm_info *cm_info)
{
+#ifdef VIMAGE
struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
+#endif
int arpindex;
int oldarpindex;
+ bool is_lpb = false;
- if ((cm_node->ipv4 &&
- irdma_ipv4_is_lpb(vnet, cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
- (!cm_node->ipv4 &&
- irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
+ CURVNET_SET_QUIET(vnet);
+ is_lpb = cm_node->ipv4 ?
+ irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0]) :
+ irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr);
+ CURVNET_RESTORE();
+ if (is_lpb) {
cm_node->do_lpb = true;
arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
NULL,
diff --git a/sys/dev/irdma/fbsd_kcompat.h b/sys/dev/irdma/fbsd_kcompat.h
index 60be5e74d91d..3b03189e41af 100644
--- a/sys/dev/irdma/fbsd_kcompat.h
+++ b/sys/dev/irdma/fbsd_kcompat.h
@@ -56,6 +56,7 @@
BUILD_BUG_ON_ZERO( \
!__same_type(((struct drv_struct *)NULL)->member, \
struct ib_struct)))
+
#define set_ibdev_dma_device(ibdev, dev) \
ibdev.dma_device = (dev)
#define set_max_sge(props, rf) \
@@ -73,7 +74,6 @@
#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION
#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp, udata)
-
#ifndef IB_QP_ATTR_STANDARD_BITS
#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0)
#endif
@@ -81,12 +81,15 @@
#define IRDMA_QOS_MODE_VLAN 0x0
#define IRDMA_QOS_MODE_DSCP 0x1
+#define IRDMA_VER_LEN 24
+
void kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev);
void kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev);
struct irdma_tunable_info {
struct sysctl_ctx_list irdma_sysctl_ctx;
struct sysctl_oid *irdma_sysctl_tree;
+ char drv_ver[IRDMA_VER_LEN];
u8 roce_ena;
};
@@ -168,6 +171,7 @@ struct irdma_device *kc_irdma_get_device(struct ifnet *netdev);
void kc_irdma_put_device(struct irdma_device *iwdev);
void kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node);
+u16 kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn);
void irdma_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len);
@@ -185,11 +189,19 @@ void irdma_dcqcn_tunables_init(struct irdma_pci_f *rf);
u32 irdma_create_stag(struct irdma_device *iwdev);
void irdma_free_stag(struct irdma_device *iwdev, u32 stag);
+int irdma_hwdereg_mr(struct ib_mr *ib_mr);
+int irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
+ u64 virt, int new_access, struct ib_pd *new_pd,
+ struct ib_udata *udata);
struct irdma_mr;
struct irdma_cq;
struct irdma_cq_buf;
struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
+int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
+ u16 access);
+struct ib_mr *irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
+ u64 virt, struct ib_udata *udata);
int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr);
struct ib_mw *irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
@@ -204,6 +216,11 @@ int irdma_setup_kmode_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *info,
struct ib_qp_init_attr *init_attr);
+int irdma_setup_umode_qp(struct ib_udata *udata,
+ struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr);
void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
struct irdma_qp_host_ctx_info *ctx_info);
void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
diff --git a/sys/dev/irdma/icrdma.c b/sys/dev/irdma/icrdma.c
index c5a42eb863e6..ed67dfdb8847 100644
--- a/sys/dev/irdma/icrdma.c
+++ b/sys/dev/irdma/icrdma.c
@@ -53,7 +53,7 @@
/**
* Driver version
*/
-char irdma_driver_version[] = "0.0.51-k";
+char irdma_driver_version[] = "1.1.5-k";
#define pf_if_d(peer) peer->ifp->if_dunit
@@ -103,6 +103,11 @@ irdma_init_tunable(struct irdma_pci_f *rf, uint8_t pf_id)
(rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? "iWARP" : "RoCEv2",
rf->tun_info.roce_ena);
+ snprintf(rf->tun_info.drv_ver, IRDMA_VER_LEN, "%s", irdma_driver_version);
+ SYSCTL_ADD_STRING(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "drv_ver", CTLFLAG_RDTUN, rf->tun_info.drv_ver,
+ IRDMA_VER_LEN, "driver version");
+
irdma_dcqcn_tunables_init(rf);
}
diff --git a/sys/dev/irdma/icrdma_hw.c b/sys/dev/irdma/icrdma_hw.c
index 4a1d2a17269e..7013f06ca8c3 100644
--- a/sys/dev/irdma/icrdma_hw.c
+++ b/sys/dev/irdma/icrdma_hw.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2021 Intel Corporation
+ * Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -71,21 +71,23 @@ static u32 icrdma_regs[IRDMA_MAX_REGS] = {
};
static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
- ICRDMA_CCQPSTATUS_CCQP_DONE_M,
- ICRDMA_CCQPSTATUS_CCQP_ERR_M,
- ICRDMA_CQPSQ_STAG_PDID_M,
- ICRDMA_CQPSQ_CQ_CEQID_M,
- ICRDMA_CQPSQ_CQ_CQID_M,
- ICRDMA_COMMIT_FPM_CQCNT_M,
+ ICRDMA_CCQPSTATUS_CCQP_DONE,
+ ICRDMA_CCQPSTATUS_CCQP_ERR,
+ ICRDMA_CQPSQ_STAG_PDID,
+ ICRDMA_CQPSQ_CQ_CEQID,
+ ICRDMA_CQPSQ_CQ_CQID,
+ ICRDMA_COMMIT_FPM_CQCNT,
+ ICRDMA_CQPSQ_UPESD_HMCFNID
};
-static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
+static u8 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
ICRDMA_CCQPSTATUS_CCQP_DONE_S,
ICRDMA_CCQPSTATUS_CCQP_ERR_S,
ICRDMA_CQPSQ_STAG_PDID_S,
ICRDMA_CQPSQ_CQ_CEQID_S,
ICRDMA_CQPSQ_CQ_CQID_S,
ICRDMA_COMMIT_FPM_CQCNT_S,
+ ICRDMA_CQPSQ_UPESD_HMCFNID_S
};
/**
@@ -101,9 +103,10 @@ icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
if (dev->ceq_itr && dev->aeq->msix_idx != idx)
interval = dev->ceq_itr >> 1; /* 2 usec units */
- val = LS_64(0, IRDMA_GLINT_DYN_CTL_ITR_INDX) |
- LS_64(interval, IRDMA_GLINT_DYN_CTL_INTERVAL) |
- IRDMA_GLINT_DYN_CTL_INTENA_M | IRDMA_GLINT_DYN_CTL_CLEARPBA_M;
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, IRDMA_IDX_ITR0) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, true) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, true);
writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
}
@@ -131,9 +134,9 @@ icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
{
u32 reg_val;
- reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA_M : 0;
+ reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA : 0;
reg_val |= (idx << IRDMA_GLINT_CEQCTL_MSIX_INDX_S) |
- IRDMA_GLINT_CEQCTL_ITR_INDX_M;
+ IRDMA_GLINT_CEQCTL_ITR_INDX;
writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
}
@@ -224,7 +227,7 @@ icrdma_init_hw(struct irdma_sc_dev *dev)
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->irq_ops = &icrdma_irq_ops;
dev->hw_stats_map = icrdma_hw_stat_map;
-
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
@@ -232,8 +235,7 @@ icrdma_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT;
dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD;
- dev->hw_attrs.uk_attrs.max_hw_wq_size = IRDMA_QP_WQE_MAX_SIZE;
- dev->hw_attrs.uk_attrs.min_sw_wq_size = IRDMA_QP_SW_MIN_WQSIZE;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
disable_tx_spad(dev->hw);
disable_prefetch(dev->hw);
@@ -320,6 +322,9 @@ irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
#define IRDMA_CWND_NO_FC 0x1
#define IRDMA_CWND_FC 0x18
+#define IRDMA_RTOMIN_NO_FC 0x5
+#define IRDMA_RTOMIN_FC 0x32
+
#define IRDMA_ACKCREDS_NO_FC 0x02
#define IRDMA_ACKCREDS_FC 0x06
@@ -405,7 +410,7 @@ disable_tx_spad(struct irdma_hw *hw)
wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
}
-#define GL_RDPU_CNTRL 0x52054
+#define GL_RDPU_CNTRL 0x52054
void
rdpu_ackreqpmthresh(struct irdma_hw *hw)
{
diff --git a/sys/dev/irdma/icrdma_hw.h b/sys/dev/irdma/icrdma_hw.h
index 4da0d7fcc9e8..0b0b46c0d567 100644
--- a/sys/dev/irdma/icrdma_hw.h
+++ b/sys/dev/irdma/icrdma_hw.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2019 - 2020 Intel Corporation
+ * Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -101,28 +101,29 @@
#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024)
-/* CCQSTATUS */
-#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
-#define ICRDMA_CCQPSTATUS_CCQP_DONE_M (0x1ULL << ICRDMA_CCQPSTATUS_CCQP_DONE_S)
-#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
-#define ICRDMA_CCQPSTATUS_CCQP_ERR_M (0x1ULL << ICRDMA_CCQPSTATUS_CCQP_ERR_S)
-#define ICRDMA_CQPSQ_STAG_PDID_S 46
-#define ICRDMA_CQPSQ_STAG_PDID_M (0x3ffffULL << ICRDMA_CQPSQ_STAG_PDID_S)
-#define ICRDMA_CQPSQ_CQ_CEQID_S 22
-#define ICRDMA_CQPSQ_CQ_CEQID_M (0x3ffULL << ICRDMA_CQPSQ_CQ_CEQID_S)
-#define ICRDMA_CQPSQ_CQ_CQID_S 0
-#define ICRDMA_CQPSQ_CQ_CQID_M (0x7ffffULL << ICRDMA_CQPSQ_CQ_CQID_S)
-#define ICRDMA_COMMIT_FPM_CQCNT_S 0
-#define ICRDMA_COMMIT_FPM_CQCNT_M (0xfffffULL << ICRDMA_COMMIT_FPM_CQCNT_S)
+#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
+#define ICRDMA_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
+#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
+#define ICRDMA_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
+#define ICRDMA_CQPSQ_STAG_PDID_S 46
+#define ICRDMA_CQPSQ_STAG_PDID GENMASK_ULL(63, 46)
+#define ICRDMA_CQPSQ_CQ_CEQID_S 22
+#define ICRDMA_CQPSQ_CQ_CEQID GENMASK_ULL(31, 22)
+#define ICRDMA_CQPSQ_CQ_CQID_S 0
+#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
+#define ICRDMA_COMMIT_FPM_CQCNT_S 0
+#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
+#define ICRDMA_CQPSQ_UPESD_HMCFNID_S 0
+#define ICRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
enum icrdma_device_caps_const {
ICRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
ICRDMA_MAX_SGE_RD = 13,
-
ICRDMA_MAX_STATS_COUNT = 128,
- ICRDMA_MAX_IRD_SIZE = 127,
- ICRDMA_MAX_ORD_SIZE = 255,
+ ICRDMA_MAX_IRD_SIZE = 32,
+ ICRDMA_MAX_ORD_SIZE = 64,
+ ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
};
diff --git a/sys/dev/irdma/irdma-abi.h b/sys/dev/irdma/irdma-abi.h
index 779c14fa30ac..4e4d8e63a9d0 100644
--- a/sys/dev/irdma/irdma-abi.h
+++ b/sys/dev/irdma/irdma-abi.h
@@ -2,7 +2,7 @@
* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB)
*
*
- * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2022 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
diff --git a/sys/dev/irdma/irdma.h b/sys/dev/irdma/irdma.h
index 793ba3c2ae39..fd72036397f9 100644
--- a/sys/dev/irdma/irdma.h
+++ b/sys/dev/irdma/irdma.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2021 Intel Corporation
+ * Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -39,81 +39,74 @@
#define RDMA_BIT2(type, a) ((u##type) 1UL << a)
#define RDMA_MASK3(type, mask, shift) ((u##type) mask << shift)
#define MAKEMASK(m, s) ((m) << (s))
-#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
-#define IRDMA_WQEALLOC_WQE_DESC_INDEX_M (0xfff << IRDMA_WQEALLOC_WQE_DESC_INDEX_S)
-#define IRDMA_CQPTAIL_WQTAIL_S 0
-#define IRDMA_CQPTAIL_WQTAIL_M (0x7ff << IRDMA_CQPTAIL_WQTAIL_S)
-
-#define IRDMA_CQPTAIL_CQP_OP_ERR_S 31
-#define IRDMA_CQPTAIL_CQP_OP_ERR_M (0x1 << IRDMA_CQPTAIL_CQP_OP_ERR_S)
-
-#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_S 0
-#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_M (0xffff << IRDMA_CQPERRCODES_CQP_MINOR_CODE_S)
-#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S 16
-#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_M (0xffff << IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S)
-
-#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S 4
-#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_M (0x3 << IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S)
-
-#define IRDMA_GLINT_RATE_INTERVAL_S 0
-#define IRDMA_GLINT_RATE_INTERVAL_M (0x3c << IRDMA_GLINT_RATE_INTERVAL_S)
-
-#define IRDMA_GLINT_RATE_INTRL_ENA_S 6
-#define IRDMA_GLINT_RATE_INTRL_ENA_M BIT(6)
-
-#define IRDMA_GLINT_DYN_CTL_INTENA_S 0
-#define IRDMA_GLINT_DYN_CTL_INTENA_M (0x1 << IRDMA_GLINT_DYN_CTL_INTENA_S)
-
-#define IRDMA_GLINT_DYN_CTL_CLEARPBA_S 1
-#define IRDMA_GLINT_DYN_CTL_CLEARPBA_M (0x1 << IRDMA_GLINT_DYN_CTL_CLEARPBA_S)
-
-#define IRDMA_GLINT_DYN_CTL_ITR_INDX_S 3
-#define IRDMA_GLINT_DYN_CTL_ITR_INDX_M (0x3 << IRDMA_GLINT_DYN_CTL_ITR_INDX_S)
-
-#define IRDMA_GLINT_DYN_CTL_INTERVAL_S 5
-#define IRDMA_GLINT_DYN_CTL_INTERVAL_M (0xfff << IRDMA_GLINT_DYN_CTL_INTERVAL_S)
-
-#define IRDMA_GLINT_CEQCTL_ITR_INDX_S 11
-#define IRDMA_GLINT_CEQCTL_ITR_INDX_M (0x3 << IRDMA_GLINT_CEQCTL_ITR_INDX_S)
-
-#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_S 30
-#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_M (0x1 << IRDMA_GLINT_CEQCTL_CAUSE_ENA_S)
-
-#define IRDMA_GLINT_CEQCTL_MSIX_INDX_S 0
-#define IRDMA_GLINT_CEQCTL_MSIX_INDX_M (0x7ff << IRDMA_GLINT_CEQCTL_MSIX_INDX_S)
-
-#define IRDMA_PFINT_AEQCTL_MSIX_INDX_S 0
-#define IRDMA_PFINT_AEQCTL_MSIX_INDX_M (0x7ff << IRDMA_PFINT_AEQCTL_MSIX_INDX_S)
-
-#define IRDMA_PFINT_AEQCTL_ITR_INDX_S 11
-#define IRDMA_PFINT_AEQCTL_ITR_INDX_M (0x3 << IRDMA_PFINT_AEQCTL_ITR_INDX_S)
-
-#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_S 30
-#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_M (0x1 << IRDMA_PFINT_AEQCTL_CAUSE_ENA_S)
-
-#define IRDMA_PFHMC_PDINV_PMSDIDX_S 0
-#define IRDMA_PFHMC_PDINV_PMSDIDX_M (0xfff << IRDMA_PFHMC_PDINV_PMSDIDX_S)
-
-#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_S 15
-#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_M (0x1 << IRDMA_PFHMC_PDINV_PMSDPARTSEL_S)
-
-#define IRDMA_PFHMC_PDINV_PMPDIDX_S 16
-#define IRDMA_PFHMC_PDINV_PMPDIDX_M (0x1ff << IRDMA_PFHMC_PDINV_PMPDIDX_S)
-
-#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_S 0
-#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_M (0x1 << IRDMA_PFHMC_SDDATALOW_PMSDVALID_S)
-#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S 1
-#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_M (0x1 << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S)
-#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S 2
-#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_M (0x3ff << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S)
-#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S 12
-#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_M (0xfffff << IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S)
-
-#define IRDMA_PFHMC_SDCMD_PMSDWR_S 31
-#define IRDMA_PFHMC_SDCMD_PMSDWR_M (0x1 << IRDMA_PFHMC_SDCMD_PMSDWR_S)
-
-#define IRDMA_INVALID_CQ_IDX 0xffffffff
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20)
+
+#define IRDMA_CQPTAIL_WQTAIL_S 0
+#define IRDMA_CQPTAIL_WQTAIL GENMASK(10, 0)
+#define IRDMA_CQPTAIL_CQP_OP_ERR_S 31
+#define IRDMA_CQPTAIL_CQP_OP_ERR BIT(31)
+
+#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_S 0
+#define IRDMA_CQPERRCODES_CQP_MINOR_CODE GENMASK(15, 0)
+#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S 16
+#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE GENMASK(31, 16)
+#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S 4
+#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE GENMASK(5, 4)
+#define IRDMA_GLINT_RATE_INTERVAL_S 0
+#define IRDMA_GLINT_RATE_INTERVAL GENMASK(4, 0)
+#define IRDMA_GLINT_RATE_INTRL_ENA_S 6
+#define IRDMA_GLINT_RATE_INTRL_ENA_M BIT(6)
+#define IRDMA_GLINT_RATE_INTRL_ENA BIT(6)
+
+#define IRDMA_GLINT_DYN_CTL_INTENA_S 0
+#define IRDMA_GLINT_DYN_CTL_INTENA BIT(0)
+#define IRDMA_GLINT_DYN_CTL_CLEARPBA_S 1
+#define IRDMA_GLINT_DYN_CTL_CLEARPBA BIT(1)
+#define IRDMA_GLINT_DYN_CTL_ITR_INDX_S 3
+#define IRDMA_GLINT_DYN_CTL_ITR_INDX GENMASK(4, 3)
+#define IRDMA_GLINT_DYN_CTL_INTERVAL_S 5
+#define IRDMA_GLINT_DYN_CTL_INTERVAL GENMASK(16, 5)
+#define IRDMA_GLINT_CEQCTL_ITR_INDX_S 11
+#define IRDMA_GLINT_CEQCTL_ITR_INDX GENMASK(12, 11)
+#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_S 30
+#define IRDMA_GLINT_CEQCTL_CAUSE_ENA BIT(30)
+#define IRDMA_GLINT_CEQCTL_MSIX_INDX_S 0
+#define IRDMA_GLINT_CEQCTL_MSIX_INDX GENMASK(10, 0)
+#define IRDMA_PFINT_AEQCTL_MSIX_INDX_S 0
+#define IRDMA_PFINT_AEQCTL_MSIX_INDX GENMASK(10, 0)
+#define IRDMA_PFINT_AEQCTL_ITR_INDX_S 11
+#define IRDMA_PFINT_AEQCTL_ITR_INDX GENMASK(12, 11)
+#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_S 30
+#define IRDMA_PFINT_AEQCTL_CAUSE_ENA BIT(30)
+#define IRDMA_PFHMC_PDINV_PMSDIDX_S 0
+#define IRDMA_PFHMC_PDINV_PMSDIDX GENMASK(11, 0)
+#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_S 15
+#define IRDMA_PFHMC_PDINV_PMSDPARTSEL BIT(15)
+#define IRDMA_PFHMC_PDINV_PMPDIDX_S 16
+#define IRDMA_PFHMC_PDINV_PMPDIDX GENMASK(24, 16)
+#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_S 0
+#define IRDMA_PFHMC_SDDATALOW_PMSDVALID BIT(0)
+#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S 1
+#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE BIT(1)
+#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S 2
+#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT GENMASK(11, 2)
+#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S 12
+#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
+#define IRDMA_PFHMC_SDCMD_PMSDWR_S 31
+#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
+#define IRDMA_PFHMC_SDCMD_PMSDPARTSEL_S 15
+#define IRDMA_PFHMC_SDCMD_PMSDPARTSEL BIT(15)
+
+#define IRDMA_INVALID_CQ_IDX 0xffffffff
+
+enum irdma_dyn_idx_t {
+ IRDMA_IDX_ITR0 = 0,
+ IRDMA_IDX_ITR1 = 1,
+ IRDMA_IDX_ITR2 = 2,
+ IRDMA_IDX_NOITR = 3,
+};
enum irdma_registers {
IRDMA_CQPTAIL,
@@ -149,6 +142,7 @@ enum irdma_shifts {
IRDMA_CQPSQ_CQ_CEQID_S,
IRDMA_CQPSQ_CQ_CQID_S,
IRDMA_COMMIT_FPM_CQCNT_S,
+ IRDMA_CQPSQ_UPESD_HMCFNID_S,
IRDMA_MAX_SHIFTS,
};
@@ -159,6 +153,7 @@ enum irdma_masks {
IRDMA_CQPSQ_CQ_CEQID_M,
IRDMA_CQPSQ_CQ_CQID_M,
IRDMA_COMMIT_FPM_CQCNT_M,
+ IRDMA_CQPSQ_UPESD_HMCFNID_M,
IRDMA_MAX_MASKS, /* Must be last entry */
};
@@ -174,7 +169,7 @@ struct irdma_mcast_grp_ctx_entry_info {
struct irdma_mcast_grp_info {
u8 dest_mac_addr[ETH_ALEN];
u16 vlan_id;
- u8 hmc_fcn_id;
+ u16 hmc_fcn_id;
bool ipv4_valid:1;
bool vlan_valid:1;
u16 mg_id;
@@ -186,9 +181,10 @@ struct irdma_mcast_grp_info {
};
enum irdma_vers {
- IRDMA_GEN_RSVD,
- IRDMA_GEN_1,
- IRDMA_GEN_2,
+ IRDMA_GEN_RSVD = 0,
+ IRDMA_GEN_1 = 1,
+ IRDMA_GEN_2 = 2,
+ IRDMA_GEN_MAX = 2,
};
struct irdma_uk_attrs {
@@ -201,8 +197,7 @@ struct irdma_uk_attrs {
u32 min_hw_cq_size;
u32 max_hw_cq_size;
u16 max_hw_sq_chunk;
- u16 max_hw_wq_size;
- u16 min_sw_wq_size;
+ u16 min_hw_wq_size;
u8 hw_rev;
};
@@ -211,6 +206,7 @@ struct irdma_hw_attrs {
u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size;
u64 max_mr_size;
+ u64 page_size_cap;
u32 min_hw_qp_id;
u32 min_hw_aeq_size;
u32 max_hw_aeq_size;
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
index 410774226e78..8cfd62a790ab 100644
--- a/sys/dev/irdma/irdma_cm.c
+++ b/sys/dev/irdma/irdma_cm.c
@@ -1628,6 +1628,7 @@ irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
{
struct ifnet *ip_dev = NULL;
struct in6_addr laddr6;
+ u16 scope_id = 0;
irdma_copy_ip_htonl(laddr6.__u6_addr.__u6_addr32, addr);
if (vlan_id)
@@ -1635,7 +1636,11 @@ irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
if (mac)
eth_zero_addr(mac);
- ip_dev = ip6_ifp_find(&init_net, laddr6, 0);
+ if (IN6_IS_SCOPE_LINKLOCAL(&laddr6) ||
+ IN6_IS_ADDR_MC_INTFACELOCAL(&laddr6))
+ scope_id = ntohs(laddr6.__u6_addr.__u6_addr16[1]);
+
+ ip_dev = ip6_ifp_find(&init_net, laddr6, scope_id);
if (ip_dev) {
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
@@ -2055,15 +2060,9 @@ irdma_add_hte_node(struct irdma_cm_core *cm_core,
* @rem_addr: remote address
*/
bool
-irdma_ipv4_is_lpb(struct vnet *vnet, u32 loc_addr, u32 rem_addr)
+irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr)
{
- bool ret;
-
- CURVNET_SET_QUIET(vnet);
- ret = ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
- CURVNET_RESTORE();
-
- return (ret);
+ return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
}
/**
@@ -2089,10 +2088,12 @@ irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr)
static int
irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
{
- struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
- struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
struct irdma_ah_info ah_info = {0};
struct irdma_device *iwdev = cm_node->iwdev;
+#ifdef VIMAGE
+ struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
+ struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
+#endif
ether_addr_copy(ah_info.mac_addr, IF_LLADDR(iwdev->netdev));
@@ -2104,9 +2105,12 @@ irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
ah_info.ipv4_valid = true;
ah_info.dest_ip_addr[0] = cm_node->rem_addr[0];
ah_info.src_ip_addr[0] = cm_node->loc_addr[0];
- ah_info.do_lpbk = irdma_ipv4_is_lpb(vnet,
- ah_info.src_ip_addr[0],
+#ifdef VIMAGE
+ CURVNET_SET_QUIET(vnet);
+ ah_info.do_lpbk = irdma_ipv4_is_lpb(ah_info.src_ip_addr[0],
ah_info.dest_ip_addr[0]);
+ CURVNET_RESTORE();
+#endif
} else {
memcpy(ah_info.dest_ip_addr, cm_node->rem_addr,
sizeof(ah_info.dest_ip_addr));
@@ -2235,10 +2239,8 @@ err:
}
static void
-irdma_cm_node_free_cb(struct rcu_head *rcu_head)
+irdma_destroy_connection(struct irdma_cm_node *cm_node)
{
- struct irdma_cm_node *cm_node =
- container_of(rcu_head, struct irdma_cm_node, rcu_head);
struct irdma_cm_core *cm_core = cm_node->cm_core;
struct irdma_qp *iwqp;
struct irdma_cm_info nfo;
@@ -2286,7 +2288,6 @@ irdma_cm_node_free_cb(struct rcu_head *rcu_head)
}
cm_core->cm_free_ah(cm_node);
- kfree(cm_node);
}
/**
@@ -2314,8 +2315,9 @@ irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- /* wait for all list walkers to exit their grace period */
- call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb);
+ irdma_destroy_connection(cm_node);
+
+ kfree_rcu(cm_node, rcu_head);
}
/**
@@ -3410,12 +3412,6 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
}
cm_id = iwqp->cm_id;
- /* make sure we havent already closed this connection */
- if (!cm_id) {
- spin_unlock_irqrestore(&iwqp->lock, flags);
- return;
- }
-
original_hw_tcp_state = iwqp->hw_tcp_state;
original_ibqp_state = iwqp->ibqp_state;
last_ae = iwqp->last_aeq;
@@ -3437,11 +3433,11 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
disconn_status = -ECONNRESET;
}
- if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
- original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
- last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
- last_ae == IRDMA_AE_BAD_CLOSE ||
- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
+ if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
+ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
+ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
+ last_ae == IRDMA_AE_BAD_CLOSE ||
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
issue_close = 1;
iwqp->cm_id = NULL;
qp->term_flags = 0;
@@ -3453,10 +3449,6 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
spin_unlock_irqrestore(&iwqp->lock, flags);
if (issue_flush && !iwqp->sc_qp.qp_uk.destroy_pending) {
- if (!iwqp->user_mode)
- queue_delayed_work(iwqp->iwdev->cleanup_wq,
- &iwqp->dwork_flush,
- msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ |
IRDMA_FLUSH_WAIT);
@@ -4193,10 +4185,6 @@ irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_node *cm_node;
struct list_head teardown_list;
struct ib_qp_attr attr;
- struct irdma_sc_vsi *vsi = &iwdev->vsi;
- struct irdma_sc_qp *sc_qp;
- struct irdma_qp *qp;
- int i;
INIT_LIST_HEAD(&teardown_list);
@@ -4213,50 +4201,4 @@ irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node);
}
- if (!iwdev->roce_mode)
- return;
-
- INIT_LIST_HEAD(&teardown_list);
- for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
- mutex_lock(&vsi->qos[i].qos_mutex);
- list_for_each_safe(list_node, list_core_temp,
- &vsi->qos[i].qplist) {
- u32 qp_ip[4];
-
- sc_qp = container_of(list_node, struct irdma_sc_qp,
- list);
- if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
- continue;
-
- qp = sc_qp->qp_uk.back_qp;
- if (!disconnect_all) {
- if (nfo->ipv4)
- qp_ip[0] = qp->udp_info.local_ipaddr[3];
- else
- memcpy(qp_ip,
- &qp->udp_info.local_ipaddr[0],
- sizeof(qp_ip));
- }
-
- if (disconnect_all ||
- (nfo->vlan_id == (qp->udp_info.vlan_tag & EVL_VLID_MASK) &&
- !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
- spin_lock(&iwdev->rf->qptable_lock);
- if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
- irdma_qp_add_ref(&qp->ibqp);
- list_add(&qp->teardown_entry,
- &teardown_list);
- }
- spin_unlock(&iwdev->rf->qptable_lock);
- }
- }
- mutex_unlock(&vsi->qos[i].qos_mutex);
- }
-
- list_for_each_safe(list_node, list_core_temp, &teardown_list) {
- qp = container_of(list_node, struct irdma_qp, teardown_entry);
- attr.qp_state = IB_QPS_ERR;
- irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
- irdma_qp_rem_ref(&qp->ibqp);
- }
}
diff --git a/sys/dev/irdma/irdma_cm.h b/sys/dev/irdma/irdma_cm.h
index d3d55977cf3c..52f3e267a9b1 100644
--- a/sys/dev/irdma/irdma_cm.h
+++ b/sys/dev/irdma/irdma_cm.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -74,7 +74,7 @@
#define TCP_OPTIONS_PADDING 3
#define IRDMA_DEFAULT_RETRYS 64
-#define IRDMA_DEFAULT_RETRANS 8
+#define IRDMA_DEFAULT_RETRANS 32
#define IRDMA_DEFAULT_TTL 0x40
#define IRDMA_DEFAULT_RTT_VAR 6
#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
@@ -192,14 +192,6 @@ enum irdma_cm_event_type {
IRDMA_CM_EVENT_ABORTED,
};
-struct irdma_bth { /* Base Trasnport Header */
- u8 opcode;
- u8 flags;
- __be16 pkey;
- __be32 qpn;
- __be32 apsn;
-};
-
struct ietf_mpa_v1 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
@@ -426,8 +418,8 @@ int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
static inline u8 irdma_tos2dscp(u8 tos)
{
#define IRDMA_DSCP_S 2
-#define IRDMA_DSCP_M (0x3f << IRDMA_DSCP_S)
- return RS_32(tos, IRDMA_DSCP);
+#define IRDMA_DSCP GENMASK(7, 2)
+ return FIELD_GET(IRDMA_DSCP, tos);
}
int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
@@ -435,16 +427,16 @@ int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
int irdma_destroy_listen(struct iw_cm_id *cm_id);
-int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, u8 *mac);
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, const u8 *mac);
void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all);
int irdma_cm_start(struct irdma_device *dev);
int irdma_cm_stop(struct irdma_device *dev);
-bool irdma_ipv4_is_lpb(struct vnet *, u32 loc_addr, u32 rem_addr);
+bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr,
- u8 *mac_addr, u32 action);
+ const u8 *mac_addr, u32 action);
bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
void irdma_send_ack(struct irdma_cm_node *cm_node);
void irdma_lpb_nop(struct irdma_sc_qp *qp);
diff --git a/sys/dev/irdma/irdma_ctrl.c b/sys/dev/irdma/irdma_ctrl.c
index cfc9b161d643..4b45da1a8866 100644
--- a/sys/dev/irdma/irdma_ctrl.c
+++ b/sys/dev/irdma/irdma_ctrl.c
@@ -156,18 +156,16 @@ void
irdma_change_l2params(struct irdma_sc_vsi *vsi,
struct irdma_l2params *l2params)
{
+ if (l2params->tc_changed) {
+ vsi->tc_change_pending = false;
+ irdma_set_qos_info(vsi, l2params);
+ irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
+ }
if (l2params->mtu_changed) {
vsi->mtu = l2params->mtu;
if (vsi->ieq)
irdma_reinitialize_ieq(vsi);
}
-
- if (!l2params->tc_changed)
- return;
-
- vsi->tc_change_pending = false;
- irdma_set_qos_info(vsi, l2params);
- irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
}
/**
@@ -253,10 +251,10 @@ irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
set_64bit_val(wqe, IRDMA_BYTE_16, temp);
hdr = info->arp_index |
- LS_64(IRDMA_CQP_OP_MANAGE_ARP, IRDMA_CQPSQ_OPCODE) |
- LS_64((info->permanent ? 1 : 0), IRDMA_CQPSQ_MAT_PERMANENT) |
- LS_64(1, IRDMA_CQPSQ_MAT_ENTRYVALID) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
+ FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, info->permanent) |
+ FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, true) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -287,8 +285,9 @@ irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
if (!wqe)
return -ENOSPC;
- hdr = arp_index | LS_64(IRDMA_CQP_OP_MANAGE_ARP, IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = arp_index |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -322,9 +321,9 @@ irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
set_64bit_val(wqe, IRDMA_BYTE_16, info->port);
- hdr = LS_64(IRDMA_CQP_OP_MANAGE_APBVT, IRDMA_CQPSQ_OPCODE) |
- LS_64(info->add, IRDMA_CQPSQ_MAPT_ADDPORT) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
+ FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -375,48 +374,48 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
set_64bit_val(wqe, IRDMA_BYTE_0, temp);
- qw1 = LS_64(info->qp_num, IRDMA_CQPSQ_QHASH_QPN) |
- LS_64(info->dest_port, IRDMA_CQPSQ_QHASH_DEST_PORT);
+ qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
if (info->ipv4_valid) {
set_64bit_val(wqe, IRDMA_BYTE_48,
- LS_64(info->dest_ip[0], IRDMA_CQPSQ_QHASH_ADDR3));
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
} else {
set_64bit_val(wqe, IRDMA_BYTE_56,
- LS_64(info->dest_ip[0], IRDMA_CQPSQ_QHASH_ADDR0) |
- LS_64(info->dest_ip[1], IRDMA_CQPSQ_QHASH_ADDR1));
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
set_64bit_val(wqe, IRDMA_BYTE_48,
- LS_64(info->dest_ip[2], IRDMA_CQPSQ_QHASH_ADDR2) |
- LS_64(info->dest_ip[3], IRDMA_CQPSQ_QHASH_ADDR3));
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
}
- qw2 = LS_64(vsi->qos[info->user_pri].qs_handle,
- IRDMA_CQPSQ_QHASH_QS_HANDLE);
+ qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
+ vsi->qos[info->user_pri].qs_handle);
if (info->vlan_valid)
- qw2 |= LS_64(info->vlan_id, IRDMA_CQPSQ_QHASH_VLANID);
+ qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
set_64bit_val(wqe, IRDMA_BYTE_16, qw2);
if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
- qw1 |= LS_64(info->src_port, IRDMA_CQPSQ_QHASH_SRC_PORT);
+ qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
if (!info->ipv4_valid) {
set_64bit_val(wqe, IRDMA_BYTE_40,
- LS_64(info->src_ip[0], IRDMA_CQPSQ_QHASH_ADDR0) |
- LS_64(info->src_ip[1], IRDMA_CQPSQ_QHASH_ADDR1));
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
set_64bit_val(wqe, IRDMA_BYTE_32,
- LS_64(info->src_ip[2], IRDMA_CQPSQ_QHASH_ADDR2) |
- LS_64(info->src_ip[3], IRDMA_CQPSQ_QHASH_ADDR3));
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
} else {
set_64bit_val(wqe, IRDMA_BYTE_32,
- LS_64(info->src_ip[0], IRDMA_CQPSQ_QHASH_ADDR3));
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
}
}
set_64bit_val(wqe, IRDMA_BYTE_8, qw1);
- temp = LS_64(cqp->polarity, IRDMA_CQPSQ_QHASH_WQEVALID) |
- LS_64(IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY,
- IRDMA_CQPSQ_QHASH_OPCODE) |
- LS_64(info->manage, IRDMA_CQPSQ_QHASH_MANAGE) |
- LS_64(info->ipv4_valid, IRDMA_CQPSQ_QHASH_IPV4VALID) |
- LS_64(info->vlan_valid, IRDMA_CQPSQ_QHASH_VLANVALID) |
- LS_64(info->entry_type, IRDMA_CQPSQ_QHASH_ENTRYTYPE);
+ temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
+ IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, temp);
@@ -471,7 +470,6 @@ irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
return -EINVAL;
qp->llp_stream_handle = (void *)(-1);
- qp->qp_uk.force_fence = true;
qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
IRDMA_QUEUE_TYPE_SQ_RQ);
irdma_debug(qp->dev, IRDMA_DEBUG_WQE,
@@ -533,17 +531,19 @@ irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
hdr = qp->qp_uk.qp_id |
- LS_64(IRDMA_CQP_OP_CREATE_QP, IRDMA_CQPSQ_OPCODE) |
- LS_64((info->ord_valid ? 1 : 0), IRDMA_CQPSQ_QP_ORDVALID) |
- LS_64(info->tcp_ctx_valid, IRDMA_CQPSQ_QP_TOECTXVALID) |
- LS_64(info->mac_valid, IRDMA_CQPSQ_QP_MACVALID) |
- LS_64(qp->qp_uk.qp_type, IRDMA_CQPSQ_QP_QPTYPE) |
- LS_64(qp->virtual_map, IRDMA_CQPSQ_QP_VQ) |
- LS_64(info->force_lpb, IRDMA_CQPSQ_QP_FORCELOOPBACK) |
- LS_64(info->cq_num_valid, IRDMA_CQPSQ_QP_CQNUMVALID) |
- LS_64(info->arp_cache_idx_valid, IRDMA_CQPSQ_QP_ARPTABIDXVALID) |
- LS_64(info->next_iwarp_state, IRDMA_CQPSQ_QP_NEXTIWSTATE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
+ info->arp_cache_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -589,28 +589,32 @@ irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
}
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(info->new_mss, IRDMA_CQPSQ_QP_NEWMSS) |
- LS_64(term_len, IRDMA_CQPSQ_QP_TERMLEN));
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
hdr = qp->qp_uk.qp_id |
- LS_64(IRDMA_CQP_OP_MODIFY_QP, IRDMA_CQPSQ_OPCODE) |
- LS_64(info->ord_valid, IRDMA_CQPSQ_QP_ORDVALID) |
- LS_64(info->tcp_ctx_valid, IRDMA_CQPSQ_QP_TOECTXVALID) |
- LS_64(info->cached_var_valid, IRDMA_CQPSQ_QP_CACHEDVARVALID) |
- LS_64(qp->virtual_map, IRDMA_CQPSQ_QP_VQ) |
- LS_64(info->force_lpb, IRDMA_CQPSQ_QP_FORCELOOPBACK) |
- LS_64(info->cq_num_valid, IRDMA_CQPSQ_QP_CQNUMVALID) |
- LS_64(info->mac_valid, IRDMA_CQPSQ_QP_MACVALID) |
- LS_64(qp->qp_uk.qp_type, IRDMA_CQPSQ_QP_QPTYPE) |
- LS_64(info->mss_change, IRDMA_CQPSQ_QP_MSSCHANGE) |
- LS_64(info->remove_hash_idx, IRDMA_CQPSQ_QP_REMOVEHASHENTRY) |
- LS_64(term_actions, IRDMA_CQPSQ_QP_TERMACT) |
- LS_64(info->reset_tcp_conn, IRDMA_CQPSQ_QP_RESETCON) |
- LS_64(info->arp_cache_idx_valid, IRDMA_CQPSQ_QP_ARPTABIDXVALID) |
- LS_64(info->next_iwarp_state, IRDMA_CQPSQ_QP_NEXTIWSTATE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
+ info->cached_var_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
+ info->remove_hash_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
+ info->arp_cache_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -648,11 +652,11 @@ irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
hdr = qp->qp_uk.qp_id |
- LS_64(IRDMA_CQP_OP_DESTROY_QP, IRDMA_CQPSQ_OPCODE) |
- LS_64(qp->qp_uk.qp_type, IRDMA_CQPSQ_QP_QPTYPE) |
- LS_64(ignore_mw_bnd, IRDMA_CQPSQ_QP_IGNOREMWBOUND) |
- LS_64(remove_hash_idx, IRDMA_CQPSQ_QP_REMOVEHASHENTRY) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -728,20 +732,20 @@ irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx,
push_idx = qp->push_idx;
}
set_64bit_val(qp_ctx, IRDMA_BYTE_0,
- LS_64(qp->qp_uk.rq_wqe_size, IRDMAQPC_RQWQESIZE) |
- LS_64(qp->rcv_tph_en, IRDMAQPC_RCVTPHEN) |
- LS_64(qp->xmit_tph_en, IRDMAQPC_XMITTPHEN) |
- LS_64(qp->rq_tph_en, IRDMAQPC_RQTPHEN) |
- LS_64(qp->sq_tph_en, IRDMAQPC_SQTPHEN) |
- LS_64(push_idx, IRDMAQPC_PPIDX) |
- LS_64(push_mode_en, IRDMAQPC_PMENA) |
- LS_64(roce_info->pd_id >> 16, IRDMAQPC_PDIDXHI) |
- LS_64(roce_info->dctcp_en, IRDMAQPC_DC_TCP_EN) |
- LS_64(roce_info->err_rq_idx_valid, IRDMAQPC_ERR_RQ_IDX_VALID) |
- LS_64(roce_info->is_qp1, IRDMAQPC_ISQP1) |
- LS_64(roce_info->roce_tver, IRDMAQPC_ROCE_TVER) |
- LS_64(udp->ipv4, IRDMAQPC_IPV4) |
- LS_64(udp->insert_vlan_tag, IRDMAQPC_INSERTVLANTAG));
+ FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
+ FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
+ FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
+ FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
+ FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
if (roce_info->dcqcn_en || roce_info->dctcp_en) {
@@ -750,80 +754,81 @@ irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx,
}
set_64bit_val(qp_ctx, IRDMA_BYTE_24,
- LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) |
- LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE) |
- LS_64(udp->ttl, IRDMAQPC_TTL) | LS_64(udp->tos, IRDMAQPC_TOS) |
- LS_64(udp->src_port, IRDMAQPC_SRCPORTNUM) |
- LS_64(udp->dst_port, IRDMAQPC_DESTPORTNUM));
+ FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
+ FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
set_64bit_val(qp_ctx, IRDMA_BYTE_32,
- LS_64(udp->dest_ip_addr[2], IRDMAQPC_DESTIPADDR2) |
- LS_64(udp->dest_ip_addr[3], IRDMAQPC_DESTIPADDR3));
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
set_64bit_val(qp_ctx, IRDMA_BYTE_40,
- LS_64(udp->dest_ip_addr[0], IRDMAQPC_DESTIPADDR0) |
- LS_64(udp->dest_ip_addr[1], IRDMAQPC_DESTIPADDR1));
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
set_64bit_val(qp_ctx, IRDMA_BYTE_48,
- LS_64(udp->snd_mss, IRDMAQPC_SNDMSS) |
- LS_64(udp->vlan_tag, IRDMAQPC_VLANTAG) |
- LS_64(udp->arp_idx, IRDMAQPC_ARPIDX));
+ FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
set_64bit_val(qp_ctx, IRDMA_BYTE_56,
- LS_64(roce_info->p_key, IRDMAQPC_PKEY) |
- LS_64(roce_info->pd_id, IRDMAQPC_PDIDX) |
- LS_64(roce_info->ack_credits, IRDMAQPC_ACKCREDITS) |
- LS_64(udp->flow_label, IRDMAQPC_FLOWLABEL));
+ FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
+ FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
+ FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
+ FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
set_64bit_val(qp_ctx, IRDMA_BYTE_64,
- LS_64(roce_info->qkey, IRDMAQPC_QKEY) |
- LS_64(roce_info->dest_qp, IRDMAQPC_DESTQP));
+ FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
+ FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
set_64bit_val(qp_ctx, IRDMA_BYTE_80,
- LS_64(udp->psn_nxt, IRDMAQPC_PSNNXT) |
- LS_64(udp->lsn, IRDMAQPC_LSN));
- set_64bit_val(qp_ctx, IRDMA_BYTE_88, LS_64(udp->epsn, IRDMAQPC_EPSN));
+ FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
+ FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_88,
+ FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
set_64bit_val(qp_ctx, IRDMA_BYTE_96,
- LS_64(udp->psn_max, IRDMAQPC_PSNMAX) |
- LS_64(udp->psn_una, IRDMAQPC_PSNUNA));
+ FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
+ FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
set_64bit_val(qp_ctx, IRDMA_BYTE_112,
- LS_64(udp->cwnd, IRDMAQPC_CWNDROCE));
+ FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
set_64bit_val(qp_ctx, IRDMA_BYTE_128,
- LS_64(roce_info->err_rq_idx, IRDMAQPC_ERR_RQ_IDX) |
- LS_64(udp->rnr_nak_thresh, IRDMAQPC_RNRNAK_THRESH) |
- LS_64(udp->rexmit_thresh, IRDMAQPC_REXMIT_THRESH) |
- LS_64(roce_info->rtomin, IRDMAQPC_RTOMIN));
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
set_64bit_val(qp_ctx, IRDMA_BYTE_136,
- LS_64(info->send_cq_num, IRDMAQPC_TXCQNUM) |
- LS_64(info->rcv_cq_num, IRDMAQPC_RXCQNUM));
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
set_64bit_val(qp_ctx, IRDMA_BYTE_144,
- LS_64(info->stats_idx, IRDMAQPC_STAT_INDEX));
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
set_64bit_val(qp_ctx, IRDMA_BYTE_152, mac);
set_64bit_val(qp_ctx, IRDMA_BYTE_160,
- LS_64(roce_info->ord_size, IRDMAQPC_ORDSIZE) |
- LS_64(irdma_sc_get_encoded_ird_size(roce_info->ird_size), IRDMAQPC_IRDSIZE) |
- LS_64(roce_info->wr_rdresp_en, IRDMAQPC_WRRDRSPOK) |
- LS_64(roce_info->rd_en, IRDMAQPC_RDOK) |
- LS_64(info->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE) |
- LS_64(roce_info->bind_en, IRDMAQPC_BINDEN) |
- LS_64(roce_info->fast_reg_en, IRDMAQPC_FASTREGEN) |
- LS_64(roce_info->dcqcn_en, IRDMAQPC_DCQCNENABLE) |
- LS_64(roce_info->rcv_no_icrc, IRDMAQPC_RCVNOICRC) |
- LS_64(roce_info->fw_cc_enable, IRDMAQPC_FW_CC_ENABLE) |
- LS_64(roce_info->udprivcq_en, IRDMAQPC_UDPRIVCQENABLE) |
- LS_64(roce_info->priv_mode_en, IRDMAQPC_PRIVEN) |
- LS_64(roce_info->timely_en, IRDMAQPC_TIMELYENABLE));
+ FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
+ FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
+ FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
+ FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
set_64bit_val(qp_ctx, IRDMA_BYTE_168,
- LS_64(info->qp_compl_ctx, IRDMAQPC_QPCOMPCTX));
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
set_64bit_val(qp_ctx, IRDMA_BYTE_176,
- LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) |
- LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) |
- LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE));
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
set_64bit_val(qp_ctx, IRDMA_BYTE_184,
- LS_64(udp->local_ipaddr[3], IRDMAQPC_LOCAL_IPADDR3) |
- LS_64(udp->local_ipaddr[2], IRDMAQPC_LOCAL_IPADDR2));
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
set_64bit_val(qp_ctx, IRDMA_BYTE_192,
- LS_64(udp->local_ipaddr[1], IRDMAQPC_LOCAL_IPADDR1) |
- LS_64(udp->local_ipaddr[0], IRDMAQPC_LOCAL_IPADDR0));
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
set_64bit_val(qp_ctx, IRDMA_BYTE_200,
- LS_64(roce_info->t_high, IRDMAQPC_THIGH) |
- LS_64(roce_info->t_low, IRDMAQPC_TLOW));
+ FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
+ FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
set_64bit_val(qp_ctx, IRDMA_BYTE_208,
- LS_64(info->rem_endpoint_idx, IRDMAQPC_REMENDPOINTIDX));
+ FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX WQE", qp_ctx,
IRDMA_QP_CTX_SIZE);
@@ -844,9 +849,9 @@ irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
if (!wqe)
return -ENOSPC;
- hdr = LS_64(IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY,
- IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -884,9 +889,10 @@ irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
set_64bit_val(wqe, IRDMA_BYTE_32, temp);
- header = LS_64(info->entry_idx, IRDMA_CQPSQ_MLM_TABLEIDX) |
- LS_64(IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE, IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -919,11 +925,12 @@ irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOSPC;
- header = LS_64(entry_idx, IRDMA_CQPSQ_MLM_TABLEIDX) |
- LS_64(IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE, IRDMA_CQPSQ_OPCODE) |
- LS_64(1, IRDMA_CQPSQ_MLM_FREEENTRY) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID) |
- LS_64(ignore_ref_count, IRDMA_CQPSQ_MLM_IGNORE_REF_CNT);
+ header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -970,45 +977,47 @@ irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx,
push_mode_en = 1;
push_idx = qp->push_idx;
}
- qw0 = LS_64(qp->qp_uk.rq_wqe_size, IRDMAQPC_RQWQESIZE) |
- LS_64(qp->rcv_tph_en, IRDMAQPC_RCVTPHEN) |
- LS_64(qp->xmit_tph_en, IRDMAQPC_XMITTPHEN) |
- LS_64(qp->rq_tph_en, IRDMAQPC_RQTPHEN) |
- LS_64(qp->sq_tph_en, IRDMAQPC_SQTPHEN) |
- LS_64(push_idx, IRDMAQPC_PPIDX) |
- LS_64(push_mode_en, IRDMAQPC_PMENA);
+ qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
- qw3 = LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) |
- LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE);
+ qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
- qw3 |= LS_64(qp->src_mac_addr_idx, IRDMAQPC_GEN1_SRCMACADDRIDX);
+ qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
+ qp->src_mac_addr_idx);
set_64bit_val(qp_ctx, IRDMA_BYTE_136,
- LS_64(info->send_cq_num, IRDMAQPC_TXCQNUM) |
- LS_64(info->rcv_cq_num, IRDMAQPC_RXCQNUM));
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
set_64bit_val(qp_ctx, IRDMA_BYTE_168,
- LS_64(info->qp_compl_ctx, IRDMAQPC_QPCOMPCTX));
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
set_64bit_val(qp_ctx, IRDMA_BYTE_176,
- LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) |
- LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) |
- LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE) |
- LS_64(qp->ieq_qp, IRDMAQPC_EXCEPTION_LAN_QUEUE));
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
+ FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
if (info->iwarp_info_valid) {
- qw0 |= LS_64(iw->ddp_ver, IRDMAQPC_DDP_VER) |
- LS_64(iw->rdmap_ver, IRDMAQPC_RDMAP_VER) |
- LS_64(iw->dctcp_en, IRDMAQPC_DC_TCP_EN) |
- LS_64(iw->ecn_en, IRDMAQPC_ECN_EN) |
- LS_64(iw->ib_rd_en, IRDMAQPC_IBRDENABLE) |
- LS_64(iw->pd_id >> 16, IRDMAQPC_PDIDXHI) |
- LS_64(iw->err_rq_idx_valid, IRDMAQPC_ERR_RQ_IDX_VALID);
- qw7 |= LS_64(iw->pd_id, IRDMAQPC_PDIDX);
- qw16 |= LS_64(iw->err_rq_idx, IRDMAQPC_ERR_RQ_IDX) |
- LS_64(iw->rtomin, IRDMAQPC_RTOMIN);
+ qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
+ FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
+ FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
+ FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
+ iw->err_rq_idx_valid);
+ qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
+ qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
set_64bit_val(qp_ctx, IRDMA_BYTE_144,
- LS_64(qp->q2_pa >> 8, IRDMAQPC_Q2ADDR) |
- LS_64(info->stats_idx, IRDMAQPC_STAT_INDEX));
+ FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
mac = LS_64_1(iw->mac_addr[5], 16) |
@@ -1020,103 +1029,104 @@ irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx,
}
set_64bit_val(qp_ctx, IRDMA_BYTE_152,
- mac | LS_64(iw->last_byte_sent, IRDMAQPC_LASTBYTESENT));
+ mac | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
set_64bit_val(qp_ctx, IRDMA_BYTE_160,
- LS_64(iw->ord_size, IRDMAQPC_ORDSIZE) |
- LS_64(irdma_sc_get_encoded_ird_size(iw->ird_size), IRDMAQPC_IRDSIZE) |
- LS_64(iw->wr_rdresp_en, IRDMAQPC_WRRDRSPOK) |
- LS_64(iw->rd_en, IRDMAQPC_RDOK) |
- LS_64(iw->snd_mark_en, IRDMAQPC_SNDMARKERS) |
- LS_64(iw->bind_en, IRDMAQPC_BINDEN) |
- LS_64(iw->fast_reg_en, IRDMAQPC_FASTREGEN) |
- LS_64(iw->priv_mode_en, IRDMAQPC_PRIVEN) |
- LS_64(info->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE) |
- LS_64(1, IRDMAQPC_IWARPMODE) |
- LS_64(iw->rcv_mark_en, IRDMAQPC_RCVMARKERS) |
- LS_64(iw->align_hdrs, IRDMAQPC_ALIGNHDRS) |
- LS_64(iw->rcv_no_mpa_crc, IRDMAQPC_RCVNOMPACRC) |
- LS_64(iw->rcv_mark_offset, IRDMAQPC_RCVMARKOFFSET) |
- LS_64(iw->snd_mark_offset, IRDMAQPC_SNDMARKOFFSET) |
- LS_64(iw->timely_en, IRDMAQPC_TIMELYENABLE));
+ FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
+ FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
+ FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
+ FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
+ FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
+ FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
+ FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset) |
+ FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
}
if (info->tcp_info_valid) {
- qw0 |= LS_64(tcp->ipv4, IRDMAQPC_IPV4) |
- LS_64(tcp->no_nagle, IRDMAQPC_NONAGLE) |
- LS_64(tcp->insert_vlan_tag, IRDMAQPC_INSERTVLANTAG) |
- LS_64(tcp->time_stamp, IRDMAQPC_TIMESTAMP) |
- LS_64(tcp->cwnd_inc_limit, IRDMAQPC_LIMIT) |
- LS_64(tcp->drop_ooo_seg, IRDMAQPC_DROPOOOSEG) |
- LS_64(tcp->dup_ack_thresh, IRDMAQPC_DUPACK_THRESH);
+ qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
+ FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
+ tcp->insert_vlan_tag) |
+ FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
+ FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
+ FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
+ FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
if (iw->ecn_en || iw->dctcp_en) {
tcp->tos &= ~ECN_CODE_PT_MASK;
tcp->tos |= ECN_CODE_PT_VAL;
}
- qw3 |= LS_64(tcp->ttl, IRDMAQPC_TTL) |
- LS_64(tcp->avoid_stretch_ack, IRDMAQPC_AVOIDSTRETCHACK) |
- LS_64(tcp->tos, IRDMAQPC_TOS) |
- LS_64(tcp->src_port, IRDMAQPC_SRCPORTNUM) |
- LS_64(tcp->dst_port, IRDMAQPC_DESTPORTNUM);
+ qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
+ FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
+ FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
- qw3 |= LS_64(tcp->src_mac_addr_idx,
- IRDMAQPC_GEN1_SRCMACADDRIDX);
+ qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
}
set_64bit_val(qp_ctx, IRDMA_BYTE_32,
- LS_64(tcp->dest_ip_addr[2], IRDMAQPC_DESTIPADDR2) |
- LS_64(tcp->dest_ip_addr[3], IRDMAQPC_DESTIPADDR3));
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
set_64bit_val(qp_ctx, IRDMA_BYTE_40,
- LS_64(tcp->dest_ip_addr[0], IRDMAQPC_DESTIPADDR0) |
- LS_64(tcp->dest_ip_addr[1], IRDMAQPC_DESTIPADDR1));
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
set_64bit_val(qp_ctx, IRDMA_BYTE_48,
- LS_64(tcp->snd_mss, IRDMAQPC_SNDMSS) |
- LS_64(tcp->syn_rst_handling, IRDMAQPC_SYN_RST_HANDLING) |
- LS_64(tcp->vlan_tag, IRDMAQPC_VLANTAG) |
- LS_64(tcp->arp_idx, IRDMAQPC_ARPIDX));
- qw7 |= LS_64(tcp->flow_label, IRDMAQPC_FLOWLABEL) |
- LS_64(tcp->wscale, IRDMAQPC_WSCALE) |
- LS_64(tcp->ignore_tcp_opt, IRDMAQPC_IGNORE_TCP_OPT) |
- LS_64(tcp->ignore_tcp_uns_opt,
- IRDMAQPC_IGNORE_TCP_UNS_OPT) |
- LS_64(tcp->tcp_state, IRDMAQPC_TCPSTATE) |
- LS_64(tcp->rcv_wscale, IRDMAQPC_RCVSCALE) |
- LS_64(tcp->snd_wscale, IRDMAQPC_SNDSCALE);
+ FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
+ qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
+ FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
+ FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
+ tcp->ignore_tcp_opt) |
+ FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
+ tcp->ignore_tcp_uns_opt) |
+ FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
+ FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
+ FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
set_64bit_val(qp_ctx, IRDMA_BYTE_72,
- LS_64(tcp->time_stamp_recent, IRDMAQPC_TIMESTAMP_RECENT) |
- LS_64(tcp->time_stamp_age, IRDMAQPC_TIMESTAMP_AGE));
+ FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
+ FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
set_64bit_val(qp_ctx, IRDMA_BYTE_80,
- LS_64(tcp->snd_nxt, IRDMAQPC_SNDNXT) |
- LS_64(tcp->snd_wnd, IRDMAQPC_SNDWND));
+ FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
+ FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
set_64bit_val(qp_ctx, IRDMA_BYTE_88,
- LS_64(tcp->rcv_nxt, IRDMAQPC_RCVNXT) |
- LS_64(tcp->rcv_wnd, IRDMAQPC_RCVWND));
+ FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
+ FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
set_64bit_val(qp_ctx, IRDMA_BYTE_96,
- LS_64(tcp->snd_max, IRDMAQPC_SNDMAX) |
- LS_64(tcp->snd_una, IRDMAQPC_SNDUNA));
+ FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
+ FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
set_64bit_val(qp_ctx, IRDMA_BYTE_104,
- LS_64(tcp->srtt, IRDMAQPC_SRTT) |
- LS_64(tcp->rtt_var, IRDMAQPC_RTTVAR));
+ FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
+ FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
set_64bit_val(qp_ctx, IRDMA_BYTE_112,
- LS_64(tcp->ss_thresh, IRDMAQPC_SSTHRESH) |
- LS_64(tcp->cwnd, IRDMAQPC_CWND));
+ FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
+ FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
set_64bit_val(qp_ctx, IRDMA_BYTE_120,
- LS_64(tcp->snd_wl1, IRDMAQPC_SNDWL1) |
- LS_64(tcp->snd_wl2, IRDMAQPC_SNDWL2));
- qw16 |= LS_64(tcp->max_snd_window, IRDMAQPC_MAXSNDWND) |
- LS_64(tcp->rexmit_thresh, IRDMAQPC_REXMIT_THRESH);
+ FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
+ FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
+ qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
set_64bit_val(qp_ctx, IRDMA_BYTE_184,
- LS_64(tcp->local_ipaddr[3], IRDMAQPC_LOCAL_IPADDR3) |
- LS_64(tcp->local_ipaddr[2], IRDMAQPC_LOCAL_IPADDR2));
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
set_64bit_val(qp_ctx, IRDMA_BYTE_192,
- LS_64(tcp->local_ipaddr[1], IRDMAQPC_LOCAL_IPADDR1) |
- LS_64(tcp->local_ipaddr[0], IRDMAQPC_LOCAL_IPADDR0));
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
set_64bit_val(qp_ctx, IRDMA_BYTE_200,
- LS_64(iw->t_high, IRDMAQPC_THIGH) |
- LS_64(iw->t_low, IRDMAQPC_TLOW));
+ FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
+ FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
set_64bit_val(qp_ctx, IRDMA_BYTE_208,
- LS_64(info->rem_endpoint_idx, IRDMAQPC_REMENDPOINTIDX));
+ FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
}
set_64bit_val(qp_ctx, IRDMA_BYTE_0, qw0);
@@ -1145,6 +1155,9 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
u64 hdr;
enum irdma_page_size page_size;
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
if (info->page_size == 0x40000000)
page_size = IRDMA_PAGE_SIZE_1G;
else if (info->page_size == 0x200000)
@@ -1159,25 +1172,25 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
set_64bit_val(wqe, IRDMA_BYTE_8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
- LS_64(info->total_len, IRDMA_CQPSQ_STAG_STAGLEN));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(info->stag_idx, IRDMA_CQPSQ_STAG_IDX));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
set_64bit_val(wqe, IRDMA_BYTE_40,
- LS_64(info->hmc_fcn_index, IRDMA_CQPSQ_STAG_HMCFNIDX));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
if (info->chunk_size)
set_64bit_val(wqe, IRDMA_BYTE_48,
- LS_64(info->first_pm_pbl_idx, IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX));
-
- hdr = LS_64(IRDMA_CQP_OP_ALLOC_STAG, IRDMA_CQPSQ_OPCODE) |
- LS_64(1, IRDMA_CQPSQ_STAG_MR) |
- LS_64(info->access_rights, IRDMA_CQPSQ_STAG_ARIGHTS) |
- LS_64(info->chunk_size, IRDMA_CQPSQ_STAG_LPBLSIZE) |
- LS_64(page_size, IRDMA_CQPSQ_STAG_HPAGESIZE) |
- LS_64(info->remote_access, IRDMA_CQPSQ_STAG_REMACCENABLED) |
- LS_64(info->use_hmc_fcn_index, IRDMA_CQPSQ_STAG_USEHMCFNIDX) |
- LS_64(info->use_pf_rid, IRDMA_CQPSQ_STAG_USEPFRID) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -1211,6 +1224,9 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
u8 addr_type;
enum irdma_page_size page_size;
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
if (info->page_size == 0x40000000)
page_size = IRDMA_PAGE_SIZE_1G;
else if (info->page_size == 0x200000)
@@ -1240,30 +1256,30 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
(info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
info->va : fbo));
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(info->total_len, IRDMA_CQPSQ_STAG_STAGLEN) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(info->stag_key, IRDMA_CQPSQ_STAG_KEY) |
- LS_64(info->stag_idx, IRDMA_CQPSQ_STAG_IDX));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
if (!info->chunk_size)
set_64bit_val(wqe, IRDMA_BYTE_32, info->reg_addr_pa);
else
set_64bit_val(wqe, IRDMA_BYTE_48,
- LS_64(info->first_pm_pbl_index, IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
set_64bit_val(wqe, IRDMA_BYTE_40, info->hmc_fcn_index);
addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
- hdr = LS_64(IRDMA_CQP_OP_REG_MR, IRDMA_CQPSQ_OPCODE) |
- LS_64(1, IRDMA_CQPSQ_STAG_MR) |
- LS_64(info->chunk_size, IRDMA_CQPSQ_STAG_LPBLSIZE) |
- LS_64(page_size, IRDMA_CQPSQ_STAG_HPAGESIZE) |
- LS_64(info->access_rights, IRDMA_CQPSQ_STAG_ARIGHTS) |
- LS_64(remote_access, IRDMA_CQPSQ_STAG_REMACCENABLED) |
- LS_64(addr_type, IRDMA_CQPSQ_STAG_VABASEDTO) |
- LS_64(info->use_hmc_fcn_index, IRDMA_CQPSQ_STAG_USEHMCFNIDX) |
- LS_64(info->use_pf_rid, IRDMA_CQPSQ_STAG_USEPFRID) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -1300,11 +1316,11 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
set_64bit_val(wqe, IRDMA_BYTE_8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(info->stag_idx, IRDMA_CQPSQ_STAG_IDX));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
- hdr = LS_64(IRDMA_CQP_OP_DEALLOC_STAG, IRDMA_CQPSQ_OPCODE) |
- LS_64(info->mr, IRDMA_CQPSQ_STAG_MR) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -1341,13 +1357,13 @@ irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
set_64bit_val(wqe, IRDMA_BYTE_8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(info->mw_stag_index, IRDMA_CQPSQ_STAG_IDX));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
- hdr = LS_64(IRDMA_CQP_OP_ALLOC_STAG, IRDMA_CQPSQ_OPCODE) |
- LS_64(info->mw_wide, IRDMA_CQPSQ_STAG_MWTYPE) |
- LS_64(info->mw1_bind_dont_vldt_key,
- IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
+ info->mw1_bind_dont_vldt_key) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -1374,6 +1390,7 @@ irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
u64 temp, hdr;
__le64 *wqe;
u32 wqe_idx;
+ u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
enum irdma_page_size page_size;
struct irdma_post_sq_info sq_info = {0};
@@ -1388,13 +1405,10 @@ irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
sq_info.signaled = info->signaled;
sq_info.push_wqe = info->push_wqe;
- wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
- IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
+ wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, &quanta, 0, &sq_info);
if (!wqe)
return -ENOSPC;
- irdma_clr_wqes(&qp->qp_uk, wqe_idx);
-
qp->qp_uk.sq_wrtrk_array[wqe_idx].signaled = info->signaled;
irdma_debug(qp->dev, IRDMA_DEBUG_MR,
"wr_id[%llxh] wqe_idx[%04d] location[%p]\n", (unsigned long long)info->wr_id,
@@ -1404,39 +1418,37 @@ irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
(uintptr_t)info->va : info->fbo;
set_64bit_val(wqe, IRDMA_BYTE_0, temp);
- temp = RS_64(info->first_pm_pbl_index >> 16, IRDMAQPSQ_FIRSTPMPBLIDXHI);
+ temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
+ info->first_pm_pbl_index >> 16);
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(temp, IRDMAQPSQ_FIRSTPMPBLIDXHI) |
- LS_64(info->reg_addr_pa >> IRDMAQPSQ_PBLADDR_S, IRDMAQPSQ_PBLADDR));
+ FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
+ FIELD_PREP(IRDMAQPSQ_PBLADDR, info->reg_addr_pa >> IRDMA_HW_PAGE_SHIFT));
set_64bit_val(wqe, IRDMA_BYTE_16,
info->total_len |
- LS_64(info->first_pm_pbl_index, IRDMAQPSQ_FIRSTPMPBLIDXLO));
-
- hdr = LS_64(info->stag_key, IRDMAQPSQ_STAGKEY) |
- LS_64(info->stag_idx, IRDMAQPSQ_STAGINDEX) |
- LS_64(IRDMAQP_OP_FAST_REGISTER, IRDMAQPSQ_OPCODE) |
- LS_64(info->chunk_size, IRDMAQPSQ_LPBLSIZE) |
- LS_64(page_size, IRDMAQPSQ_HPAGESIZE) |
- LS_64(info->access_rights, IRDMAQPSQ_STAGRIGHTS) |
- LS_64(info->addr_type, IRDMAQPSQ_VABASEDTO) |
- LS_64((sq_info.push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
+
+ hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
+ FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
+ FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "FAST_REG WQE", wqe,
IRDMA_QP_WQE_MIN_SIZE);
- if (sq_info.push_wqe) {
- irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA,
- wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(&qp->qp_uk);
- }
+ if (sq_info.push_wqe)
+ irdma_qp_push_wqe(&qp->qp_uk, wqe, quanta, wqe_idx, post_sq);
+ else if (post_sq)
+ irdma_uk_qp_post_wr(&qp->qp_uk);
return 0;
}
@@ -1456,9 +1468,9 @@ irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
wqe = qp_uk->sq_base[1].elem;
- hdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |
- LS_64(1, IRDMAQPSQ_LOCALFENCE) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -1466,8 +1478,8 @@ irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
IRDMA_QP_WQE_MIN_SIZE);
wqe = qp_uk->sq_base[2].elem;
- hdr = LS_64(IRDMAQP_OP_GEN_RTS_AE, IRDMAQPSQ_OPCODE) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -1482,7 +1494,7 @@ irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
* @size: size of lsmm buffer
* @stag: stag of lsmm buffer
*/
-int
+void
irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
irdma_stag stag)
{
@@ -1496,20 +1508,20 @@ irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf);
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(size, IRDMAQPSQ_GEN1_FRAG_LEN) |
- LS_64(stag, IRDMAQPSQ_GEN1_FRAG_STAG));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
} else {
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(size, IRDMAQPSQ_FRAG_LEN) |
- LS_64(stag, IRDMAQPSQ_FRAG_STAG) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID));
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
}
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(IRDMAQP_OP_RDMA_SEND, IRDMAQPSQ_OPCODE) |
- LS_64(1, IRDMAQPSQ_STREAMMODE) |
- LS_64(1, IRDMAQPSQ_WAITFORRCVPDU) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
+ FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
+ FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -1519,8 +1531,6 @@ irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
irdma_sc_gen_rts_ae(qp);
-
- return 0;
}
/**
@@ -1529,7 +1539,7 @@ irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
* @lsmm_buf: buffer with lsmm message
* @size: size of lsmm buffer
*/
-int
+void
irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
{
__le64 *wqe;
@@ -1543,25 +1553,23 @@ irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(size, IRDMAQPSQ_GEN1_FRAG_LEN));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size));
else
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(size, IRDMAQPSQ_FRAG_LEN) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID));
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(IRDMAQP_OP_RDMA_SEND, IRDMAQPSQ_OPCODE) |
- LS_64(1, IRDMAQPSQ_STREAMMODE) |
- LS_64(1, IRDMAQPSQ_WAITFORRCVPDU) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
+ FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
+ FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", wqe,
IRDMA_QP_WQE_MIN_SIZE);
-
- return 0;
}
/**
@@ -1569,7 +1577,7 @@ irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
* @qp: sc qp struct
* @read: Do read0 or write0
*/
-int
+void
irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
{
__le64 *wqe;
@@ -1584,26 +1592,25 @@ irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
if (read) {
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(0xabcd, IRDMAQPSQ_GEN1_FRAG_STAG));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
} else {
set_64bit_val(wqe, IRDMA_BYTE_8,
- (u64)0xabcd | LS_64(qp->qp_uk.swqe_polarity,
- IRDMAQPSQ_VALID));
+ (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID,
+ qp->qp_uk.swqe_polarity));
}
- hdr = LS_64(0x1234, IRDMAQPSQ_REMSTAG) |
- LS_64(IRDMAQP_OP_RDMA_READ, IRDMAQPSQ_OPCODE) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
} else {
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
set_64bit_val(wqe, IRDMA_BYTE_8, 0);
} else {
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(qp->qp_uk.swqe_polarity,
- IRDMAQPSQ_VALID));
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
}
- hdr = LS_64(IRDMAQP_OP_RDMA_WRITE, IRDMAQPSQ_OPCODE) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
}
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1615,8 +1622,6 @@ irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
irdma_sc_gen_rts_ae(qp);
-
- return 0;
}
/**
@@ -2009,8 +2014,6 @@ irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
vsi->mtu = info->params->mtu;
vsi->exception_lan_q = info->exception_lan_q;
vsi->vsi_idx = info->pf_data_vsi_num;
- if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
- vsi->fcn_id = info->dev->hmc_fn_id;
irdma_set_qos_info(vsi, info->params);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
@@ -2029,31 +2032,20 @@ irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
}
/**
- * irdma_get_fcn_id - Return the function id
+ * irdma_get_stats_idx - Return stats index
* @vsi: pointer to the vsi
*/
-static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi){
+static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi){
struct irdma_stats_inst_info stats_info = {0};
struct irdma_sc_dev *dev = vsi->dev;
- u8 fcn_id = IRDMA_INVALID_FCN_ID;
- u8 start_idx, max_stats, i;
- if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
&stats_info))
return stats_info.stats_idx;
}
- start_idx = 1;
- max_stats = 16;
- for (i = start_idx; i < max_stats; i++)
- if (!dev->fcn_id_array[i]) {
- fcn_id = i;
- dev->fcn_id_array[i] = true;
- break;
- }
-
- return fcn_id;
+ return IRDMA_INVALID_STATS_IDX;
}
/**
@@ -2065,12 +2057,12 @@ int
irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_stats_info *info)
{
- u8 fcn_id = info->fcn_id;
struct irdma_dma_mem *stats_buff_mem;
vsi->pestat = info->pestat;
vsi->pestat->hw = vsi->dev->hw;
vsi->pestat->vsi = vsi;
+
stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
stats_buff_mem->size = IRDMA_GATHER_STATS_BUF_SIZE * 2;
stats_buff_mem->va = irdma_allocate_dma_mem(vsi->pestat->hw,
@@ -2085,24 +2077,21 @@ irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
IRDMA_GATHER_STATS_BUF_SIZE);
irdma_hw_stats_start_timer(vsi);
- if (info->alloc_fcn_id)
- fcn_id = irdma_get_fcn_id(vsi);
- if (fcn_id == IRDMA_INVALID_FCN_ID)
- goto stats_error;
-
- vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
- vsi->fcn_id = fcn_id;
- if (info->alloc_fcn_id) {
- vsi->pestat->gather_info.use_stats_inst = true;
- vsi->pestat->gather_info.stats_inst_index = fcn_id;
- }
- return 0;
+ /* when stat allocation is not required default to fcn_id. */
+ vsi->stats_idx = info->fcn_id;
+ if (info->alloc_stats_inst) {
+ u8 stats_idx = irdma_get_stats_idx(vsi);
-stats_error:
- irdma_free_dma_mem(vsi->pestat->hw, stats_buff_mem);
+ if (stats_idx != IRDMA_INVALID_STATS_IDX) {
+ vsi->stats_inst_alloc = true;
+ vsi->stats_idx = stats_idx;
+ vsi->pestat->gather_info.use_stats_inst = true;
+ vsi->pestat->gather_info.stats_inst_index = stats_idx;
+ }
+ }
- return -EIO;
+ return 0;
}
/**
@@ -2113,23 +2102,19 @@ void
irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
{
struct irdma_stats_inst_info stats_info = {0};
- u8 fcn_id = vsi->fcn_id;
struct irdma_sc_dev *dev = vsi->dev;
- if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
- if (vsi->stats_fcn_id_alloc) {
- stats_info.stats_idx = vsi->fcn_id;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (vsi->stats_inst_alloc) {
+ stats_info.stats_idx = vsi->stats_idx;
irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
&stats_info);
}
- } else {
- if (vsi->stats_fcn_id_alloc &&
- fcn_id < vsi->dev->hw_attrs.max_stat_inst)
- vsi->dev->fcn_id_array[fcn_id] = false;
}
if (!vsi->pestat)
return;
+
irdma_hw_stats_stop_timer(vsi);
irdma_free_dma_mem(vsi->pestat->hw,
&vsi->pestat->gather_info.stats_buff_mem);
@@ -2179,15 +2164,16 @@ irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
return -ENOSPC;
set_64bit_val(wqe, IRDMA_BYTE_40,
- LS_64(info->hmc_fcn_index, IRDMA_CQPSQ_STATS_HMC_FCN_INDEX));
+ FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
set_64bit_val(wqe, IRDMA_BYTE_32, info->stats_buff_mem.pa);
- temp = LS_64(cqp->polarity, IRDMA_CQPSQ_STATS_WQEVALID) |
- LS_64(info->use_stats_inst, IRDMA_CQPSQ_STATS_USE_INST) |
- LS_64(info->stats_inst_index, IRDMA_CQPSQ_STATS_INST_INDEX) |
- LS_64(info->use_hmc_fcn_index,
- IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX) |
- LS_64(IRDMA_CQP_OP_GATHER_STATS, IRDMA_CQPSQ_STATS_OP);
+ temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
+ info->stats_inst_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
+ info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, temp);
@@ -2223,12 +2209,13 @@ irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
return -ENOSPC;
set_64bit_val(wqe, IRDMA_BYTE_40,
- LS_64(info->hmc_fn_id, IRDMA_CQPSQ_STATS_HMC_FCN_INDEX));
- temp = LS_64(cqp->polarity, IRDMA_CQPSQ_STATS_WQEVALID) |
- LS_64(alloc, IRDMA_CQPSQ_STATS_ALLOC_INST) |
- LS_64(info->use_hmc_fcn_index, IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX) |
- LS_64(info->stats_idx, IRDMA_CQPSQ_STATS_INST_INDEX) |
- LS_64(IRDMA_CQP_OP_MANAGE_STATS, IRDMA_CQPSQ_STATS_OP);
+ FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
+ temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
+ info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -2265,13 +2252,14 @@ irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
set_64bit_val(wqe, IRDMA_BYTE_0, temp);
set_64bit_val(wqe, IRDMA_BYTE_40,
- LS_64(info->cnp_up_override, IRDMA_CQPSQ_UP_CNPOVERRIDE) |
- LS_64(info->hmc_fcn_idx, IRDMA_CQPSQ_UP_HMCFCNIDX));
-
- temp = LS_64(cqp->polarity, IRDMA_CQPSQ_UP_WQEVALID) |
- LS_64(info->use_vlan, IRDMA_CQPSQ_UP_USEVLAN) |
- LS_64(info->use_cnp_up_override, IRDMA_CQPSQ_UP_USEOVERRIDE) |
- LS_64(IRDMA_CQP_OP_UP_MAP, IRDMA_CQPSQ_UP_OP);
+ FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
+ info->use_cnp_up_override) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, temp);
@@ -2303,18 +2291,18 @@ irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
return -ENOSPC;
set_64bit_val(wqe, IRDMA_BYTE_32,
- LS_64(info->vsi, IRDMA_CQPSQ_WS_VSI) |
- LS_64(info->weight, IRDMA_CQPSQ_WS_WEIGHT));
-
- temp = LS_64(cqp->polarity, IRDMA_CQPSQ_WS_WQEVALID) |
- LS_64(node_op, IRDMA_CQPSQ_WS_NODEOP) |
- LS_64(info->enable, IRDMA_CQPSQ_WS_ENABLENODE) |
- LS_64(info->type_leaf, IRDMA_CQPSQ_WS_NODETYPE) |
- LS_64(info->prio_type, IRDMA_CQPSQ_WS_PRIOTYPE) |
- LS_64(info->tc, IRDMA_CQPSQ_WS_TC) |
- LS_64(IRDMA_CQP_OP_WORK_SCHED_NODE, IRDMA_CQPSQ_WS_OP) |
- LS_64(info->parent_id, IRDMA_CQPSQ_WS_PARENTID) |
- LS_64(info->id, IRDMA_CQPSQ_WS_NODEID);
+ FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, temp);
@@ -2364,25 +2352,30 @@ irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
if (info->userflushcode) {
if (flush_rq)
- temp |= LS_64(info->rq_minor_code, IRDMA_CQPSQ_FWQE_RQMNERR) |
- LS_64(info->rq_major_code, IRDMA_CQPSQ_FWQE_RQMJERR);
+ temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
+ info->rq_minor_code) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
+ info->rq_major_code);
if (flush_sq)
- temp |= LS_64(info->sq_minor_code, IRDMA_CQPSQ_FWQE_SQMNERR) |
- LS_64(info->sq_major_code, IRDMA_CQPSQ_FWQE_SQMJERR);
+ temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
+ info->sq_minor_code) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
+ info->sq_major_code);
}
set_64bit_val(wqe, IRDMA_BYTE_16, temp);
temp = (info->generate_ae) ?
- info->ae_code | LS_64(info->ae_src, IRDMA_CQPSQ_FWQE_AESOURCE) : 0;
+ info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
+ info->ae_src) : 0;
set_64bit_val(wqe, IRDMA_BYTE_8, temp);
hdr = qp->qp_uk.qp_id |
- LS_64(IRDMA_CQP_OP_FLUSH_WQES, IRDMA_CQPSQ_OPCODE) |
- LS_64(info->generate_ae, IRDMA_CQPSQ_FWQE_GENERATE_AE) |
- LS_64(info->userflushcode, IRDMA_CQPSQ_FWQE_USERFLCODE) |
- LS_64(flush_sq, IRDMA_CQPSQ_FWQE_FLUSHSQ) |
- LS_64(flush_rq, IRDMA_CQPSQ_FWQE_FLUSHRQ) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -2417,12 +2410,14 @@ irdma_sc_gen_ae(struct irdma_sc_qp *qp,
if (!wqe)
return -ENOSPC;
- temp = info->ae_code | LS_64(info->ae_src, IRDMA_CQPSQ_FWQE_AESOURCE);
+ temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
+ info->ae_src);
set_64bit_val(wqe, IRDMA_BYTE_8, temp);
- hdr = qp->qp_uk.qp_id | LS_64(IRDMA_CQP_OP_GEN_AE, IRDMA_CQPSQ_OPCODE) |
- LS_64(1, IRDMA_CQPSQ_FWQE_GENERATE_AE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_GEN_AE) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -2457,12 +2452,12 @@ irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
set_64bit_val(wqe, IRDMA_BYTE_16, info->buf_pa);
- hdr = LS_64(info->qp_id, IRDMA_CQPSQ_UCTX_QPID) |
- LS_64(IRDMA_CQP_OP_UPLOAD_CONTEXT, IRDMA_CQPSQ_OPCODE) |
- LS_64(info->qp_type, IRDMA_CQPSQ_UCTX_QPTYPE) |
- LS_64(info->raw_format, IRDMA_CQPSQ_UCTX_RAWFORMAT) |
- LS_64(info->freeze_qp, IRDMA_CQPSQ_UCTX_FREEZEQP) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -2499,11 +2494,11 @@ irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
return -ENOSPC;
set_64bit_val(wqe, IRDMA_BYTE_16, info->qs_handle);
- hdr = LS_64(info->push_idx, IRDMA_CQPSQ_MPP_PPIDX) |
- LS_64(info->push_page_type, IRDMA_CQPSQ_MPP_PPTYPE) |
- LS_64(IRDMA_CQP_OP_MANAGE_PUSH_PAGES, IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID) |
- LS_64(info->free_page, IRDMA_CQPSQ_MPP_FREE_PAGE);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -2533,9 +2528,9 @@ irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
if (!wqe)
return -ENOSPC;
- hdr = LS_64(qp->qp_uk.qp_id, IRDMA_CQPSQ_SUSPENDQP_QPID) |
- LS_64(IRDMA_CQP_OP_SUSPEND_QP, IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -2565,11 +2560,11 @@ irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
return -ENOSPC;
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(qp->qs_handle, IRDMA_CQPSQ_RESUMEQP_QSHANDLE));
+ FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
- hdr = LS_64(qp->qp_uk.qp_id, IRDMA_CQPSQ_RESUMEQP_QPID) |
- LS_64(IRDMA_CQP_OP_RESUME_QP, IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -2671,29 +2666,29 @@ irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(cq->shadow_read_threshold,
- IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
set_64bit_val(wqe, IRDMA_BYTE_32, (cq->virtual_map ? 0 : cq->cq_pa));
set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
set_64bit_val(wqe, IRDMA_BYTE_48,
- LS_64((cq->virtual_map ? cq->first_pm_pbl_idx : 0),
- IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX));
+ FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
set_64bit_val(wqe, IRDMA_BYTE_56,
- LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) |
- LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX));
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
IRDMA_CQPSQ_CQ_CEQID) |
- LS_64(IRDMA_CQP_OP_CREATE_CQ, IRDMA_CQPSQ_OPCODE) |
- LS_64(cq->pbl_chunk_size, IRDMA_CQPSQ_CQ_LPBLSIZE) |
- LS_64(check_overflow, IRDMA_CQPSQ_CQ_CHKOVERFLOW) |
- LS_64(cq->virtual_map, IRDMA_CQPSQ_CQ_VIRTMAP) |
- LS_64(cq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(cq->ceq_id_valid, IRDMA_CQPSQ_CQ_CEQIDVALID) |
- LS_64(cq->tph_en, IRDMA_CQPSQ_TPHEN) |
- LS_64(cq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
+ cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -2738,14 +2733,14 @@ irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
hdr = cq->cq_uk.cq_id |
FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
IRDMA_CQPSQ_CQ_CEQID) |
- LS_64(IRDMA_CQP_OP_DESTROY_CQ, IRDMA_CQPSQ_OPCODE) |
- LS_64(cq->pbl_chunk_size, IRDMA_CQPSQ_CQ_LPBLSIZE) |
- LS_64(cq->virtual_map, IRDMA_CQPSQ_CQ_VIRTMAP) |
- LS_64(cq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(cq->ceq_id_valid, IRDMA_CQPSQ_CQ_CEQIDVALID) |
- LS_64(cq->tph_en, IRDMA_CQPSQ_TPHEN) |
- LS_64(cq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -2803,25 +2798,25 @@ irdma_sc_cq_modify(struct irdma_sc_cq *cq,
set_64bit_val(wqe, IRDMA_BYTE_0, info->cq_size);
set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(info->shadow_read_threshold,
- IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
set_64bit_val(wqe, IRDMA_BYTE_32, info->cq_pa);
set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
set_64bit_val(wqe, IRDMA_BYTE_48, info->first_pm_pbl_idx);
set_64bit_val(wqe, IRDMA_BYTE_56,
- LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) |
- LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX));
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
hdr = cq->cq_uk.cq_id |
- LS_64(IRDMA_CQP_OP_MODIFY_CQ, IRDMA_CQPSQ_OPCODE) |
- LS_64(info->cq_resize, IRDMA_CQPSQ_CQ_CQRESIZE) |
- LS_64(info->pbl_chunk_size, IRDMA_CQPSQ_CQ_LPBLSIZE) |
- LS_64(info->check_overflow, IRDMA_CQPSQ_CQ_CHKOVERFLOW) |
- LS_64(info->virtual_map, IRDMA_CQPSQ_CQ_VIRTMAP) |
- LS_64(cq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(cq->tph_en, IRDMA_CQPSQ_TPHEN) |
- LS_64(cq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
+ cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -2840,15 +2835,15 @@ irdma_sc_cq_modify(struct irdma_sc_cq *cq,
* @dev: sc device struct
*/
void
-irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
+irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout,
+ struct irdma_sc_dev *dev)
{
if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
timeout->count = 0;
- } else {
- if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
- timeout->compl_cqp_cmds)
- timeout->count++;
+ } else if (timeout->compl_cqp_cmds !=
+ dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]) {
+ timeout->count++;
}
}
@@ -2864,8 +2859,8 @@ irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
u32 *tail, u32 *error)
{
*val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
- *tail = RS_32(*val, IRDMA_CQPTAIL_WQTAIL);
- *error = RS_32(*val, IRDMA_CQPTAIL_CQP_OP_ERR);
+ *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
+ *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
}
/**
@@ -2918,13 +2913,16 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 * buf,
switch (rsrc_idx) {
case IRDMA_HMC_IW_QP:
- obj_info[rsrc_idx].cnt = (u32)RS_64(temp, IRDMA_COMMIT_FPM_QPCNT);
+ obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
break;
case IRDMA_HMC_IW_CQ:
obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
break;
case IRDMA_HMC_IW_APBVT_ENTRY:
- obj_info[rsrc_idx].cnt = 1;
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ obj_info[rsrc_idx].cnt = 1;
+ else
+ obj_info[rsrc_idx].cnt = 0;
break;
default:
obj_info[rsrc_idx].cnt = (u32)temp;
@@ -3066,18 +3064,18 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
obj_info = hmc_info->hmc_obj;
get_64bit_val(buf, IRDMA_BYTE_0, &temp);
- hmc_info->first_sd_index = (u16)RS_64(temp, IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX);
- max_pe_sds = (u16)RS_64(temp, IRDMA_QUERY_FPM_MAX_PE_SDS);
+ hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
hmc_fpm_misc->max_sds = max_pe_sds;
hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
get_64bit_val(buf, 8, &temp);
- obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)RS_64(temp, IRDMA_QUERY_FPM_MAX_QPS);
+ obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
size = (u32)RS_64_1(temp, 32);
obj_info[IRDMA_HMC_IW_QP].size = LS_64_1(1, size);
get_64bit_val(buf, 16, &temp);
- obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, IRDMA_QUERY_FPM_MAX_CQS);
+ obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
size = (u32)RS_64_1(temp, 32);
obj_info[IRDMA_HMC_IW_CQ].size = LS_64_1(1, size);
@@ -3093,7 +3091,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
get_64bit_val(buf, 64, &temp);
obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
obj_info[IRDMA_HMC_IW_XFFL].size = 4;
- hmc_fpm_misc->xf_block_size = RS_64(temp, IRDMA_QUERY_FPM_XFBLOCKSIZE);
+ hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
if (!hmc_fpm_misc->xf_block_size)
return -EINVAL;
@@ -3102,7 +3100,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
- hmc_fpm_misc->q1_block_size = RS_64(temp, IRDMA_QUERY_FPM_Q1BLOCKSIZE);
+ hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
if (!hmc_fpm_misc->q1_block_size)
return -EINVAL;
@@ -3113,9 +3111,9 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
obj_info[IRDMA_HMC_IW_PBLE].size = 8;
get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->max_ceqs = RS_64(temp, IRDMA_QUERY_FPM_MAX_CEQS);
- hmc_fpm_misc->ht_multiplier = RS_64(temp, IRDMA_QUERY_FPM_HTMULTIPLIER);
- hmc_fpm_misc->timer_bucket = RS_64(temp, IRDMA_QUERY_FPM_TIMERBUCKET);
+ hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
+ hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
+ hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
return 0;
irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
@@ -3125,7 +3123,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
get_64bit_val(buf, IRDMA_BYTE_136, &temp);
obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
- hmc_fpm_misc->rrf_block_size = RS_64(temp, IRDMA_QUERY_FPM_RRFBLOCKSIZE);
+ hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
if (!hmc_fpm_misc->rrf_block_size &&
obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
return -EINVAL;
@@ -3137,7 +3135,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
get_64bit_val(buf, IRDMA_BYTE_168, &temp);
obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
- hmc_fpm_misc->ooiscf_block_size = RS_64(temp, IRDMA_QUERY_FPM_OOISCFBLOCKSIZE);
+ hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
if (!hmc_fpm_misc->ooiscf_block_size &&
obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
return -EINVAL;
@@ -3296,45 +3294,48 @@ irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
spin_lock_init(&cqp->dev->cqp_lock);
- temp = LS_64(cqp->hw_sq_size, IRDMA_CQPHC_SQSIZE) |
- LS_64(cqp->struct_ver, IRDMA_CQPHC_SVER) |
- LS_64(cqp->disable_packed, IRDMA_CQPHC_DISABLE_PFPDUS) |
- LS_64(cqp->ceqs_per_vf, IRDMA_CQPHC_CEQPERVF);
+ temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
+ FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
+ FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
+ FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
if (hw_rev >= IRDMA_GEN_2) {
- temp |= LS_64(cqp->rocev2_rto_policy, IRDMA_CQPHC_ROCEV2_RTO_POLICY) |
- LS_64(cqp->protocol_used, IRDMA_CQPHC_PROTOCOL_USED);
+ temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
+ cqp->rocev2_rto_policy) |
+ FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
+ cqp->protocol_used);
}
set_64bit_val(cqp->host_ctx, IRDMA_BYTE_0, temp);
set_64bit_val(cqp->host_ctx, IRDMA_BYTE_8, cqp->sq_pa);
- temp = LS_64(cqp->ena_vf_count, IRDMA_CQPHC_ENABLED_VFS) |
- LS_64(cqp->hmc_profile, IRDMA_CQPHC_HMC_PROFILE);
+ temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
+ FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
if (hw_rev >= IRDMA_GEN_2)
- temp |= LS_64(cqp->en_rem_endpoint_trk, IRDMA_CQPHC_EN_REM_ENDPOINT_TRK);
+ temp |= FIELD_PREP(IRDMA_CQPHC_EN_REM_ENDPOINT_TRK,
+ cqp->en_rem_endpoint_trk);
set_64bit_val(cqp->host_ctx, IRDMA_BYTE_16, temp);
set_64bit_val(cqp->host_ctx, IRDMA_BYTE_24, (uintptr_t)cqp);
- temp = LS_64(cqp->hw_maj_ver, IRDMA_CQPHC_HW_MAJVER) |
- LS_64(cqp->hw_min_ver, IRDMA_CQPHC_HW_MINVER);
+ temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
+ FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
if (hw_rev >= IRDMA_GEN_2) {
- temp |= LS_64(cqp->dcqcn_params.min_rate, IRDMA_CQPHC_MIN_RATE) |
- LS_64(cqp->dcqcn_params.min_dec_factor, IRDMA_CQPHC_MIN_DEC_FACTOR);
+ temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
+ FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
}
set_64bit_val(cqp->host_ctx, IRDMA_BYTE_32, temp);
set_64bit_val(cqp->host_ctx, IRDMA_BYTE_40, 0);
temp = 0;
if (hw_rev >= IRDMA_GEN_2) {
- temp |= LS_64(cqp->dcqcn_params.dcqcn_t, IRDMA_CQPHC_DCQCN_T) |
- LS_64(cqp->dcqcn_params.rai_factor, IRDMA_CQPHC_RAI_FACTOR) |
- LS_64(cqp->dcqcn_params.hai_factor, IRDMA_CQPHC_HAI_FACTOR);
+ temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
+ FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
+ FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
}
set_64bit_val(cqp->host_ctx, IRDMA_BYTE_48, temp);
temp = 0;
if (hw_rev >= IRDMA_GEN_2) {
- temp |= LS_64(cqp->dcqcn_params.dcqcn_b, IRDMA_CQPHC_DCQCN_B) |
- LS_64(cqp->dcqcn_params.dcqcn_f, IRDMA_CQPHC_DCQCN_F) |
- LS_64(cqp->dcqcn_params.cc_cfg_valid, IRDMA_CQPHC_CC_CFG_VALID) |
- LS_64(cqp->dcqcn_params.rreduce_mperiod, IRDMA_CQPHC_RREDUCE_MPERIOD);
+ temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
+ FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
+ FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
+ FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
}
set_64bit_val(cqp->host_ctx, IRDMA_BYTE_56, temp);
irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQP_HOST_CTX WQE",
@@ -3366,8 +3367,8 @@ err:
spin_lock_destroy(&cqp->dev->cqp_lock);
irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
- *min_err = RS_32(err_code, IRDMA_CQPERRCODES_CQP_MINOR_CODE);
- *maj_err = RS_32(err_code, IRDMA_CQPERRCODES_CQP_MAJOR_CODE);
+ *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
+ *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
return ret_code;
}
@@ -3425,24 +3426,26 @@ irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
/**
* irdma_sc_cqp_destroy - destroy cqp during close
* @cqp: struct for cqp hw
+ * @free_hwcqp: true for regular cqp destroy; false for reset path
*/
int
-irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
+irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp)
{
u32 cnt = 0, val;
int ret_code = 0;
- writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
- writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
- do {
- if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
- ret_code = -ETIMEDOUT;
- break;
- }
- irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
- val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
- } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
-
+ if (free_hwcqp) {
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
+ do {
+ if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
+ ret_code = -ETIMEDOUT;
+ break;
+ }
+ irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
+ val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
+ }
irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
spin_lock_destroy(&cqp->dev->cqp_lock);
return ret_code;
@@ -3461,14 +3464,14 @@ irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
u8 arm_seq_num;
get_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, &temp_val);
- sw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);
- arm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);
- arm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
arm_seq_num++;
- temp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |
- LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |
- LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |
- LS_64(1, IRDMA_CQ_DBSA_ARM_NEXT);
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, temp_val);
irdma_wmb(); /* make sure shadow area is updated before arming */
@@ -3499,29 +3502,30 @@ irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
get_64bit_val(cqe, IRDMA_BYTE_24, &temp);
- polarity = (u8)RS_64(temp, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
if (polarity != ccq->cq_uk.polarity)
return -ENOENT;
get_64bit_val(cqe, IRDMA_BYTE_8, &qp_ctx);
cqp = (struct irdma_sc_cqp *)(irdma_uintptr) qp_ctx;
- info->error = (bool)RS_64(temp, IRDMA_CQ_ERROR);
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
- info->min_err_code = (u16)RS_64(temp, IRDMA_CQ_MINERR);
+ info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
if (info->error) {
- info->maj_err_code = (u16)RS_64(temp, IRDMA_CQ_MAJERR);
+ info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
irdma_debug(cqp->dev, IRDMA_DEBUG_CQP,
"CQPERRCODES error_code[x%08X]\n", error);
}
- wqe_idx = (u32)RS_64(temp, IRDMA_CQ_WQEIDX);
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
info->scratch = cqp->scratch_array[wqe_idx];
get_64bit_val(cqe, IRDMA_BYTE_16, &temp1);
- info->op_ret_val = (u32)RS_64(temp1, IRDMA_CCQ_OPRETVAL);
+ info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
+
get_64bit_val(cqp->sq_base[wqe_idx].elem, IRDMA_BYTE_24, &temp1);
- info->op_code = (u8)RS_64(temp1, IRDMA_CQPSQ_OPCODE);
+ info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
info->cqp = cqp;
/* move the head for cq */
@@ -3562,9 +3566,6 @@ irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
return -ETIMEDOUT;
- if (cqp->dev->no_cqp)
- return -ETIMEDOUT;
-
if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
continue;
@@ -3606,11 +3607,11 @@ irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
if (!wqe)
return -ENOSPC;
- hdr = LS_64(info->vf_id, IRDMA_CQPSQ_MHMC_VFIDX) |
- LS_64(IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE,
- IRDMA_CQPSQ_OPCODE) |
- LS_64(info->free_fcn, IRDMA_CQPSQ_MHMC_FREEPMFN) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -3647,7 +3648,7 @@ irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
*/
static int
irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
- u8 hmc_fn_id,
+ u16 hmc_fn_id,
struct irdma_dma_mem *commit_fpm_mem,
bool post_sq, u8 wait_type)
{
@@ -3663,9 +3664,9 @@ irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id);
set_64bit_val(wqe, IRDMA_BYTE_32, commit_fpm_mem->pa);
- hdr = LS_64(IRDMA_COMMIT_FPM_BUF_SIZE, IRDMA_CQPSQ_BUFSIZE) |
- LS_64(IRDMA_CQP_OP_COMMIT_FPM_VAL, IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3710,7 +3711,7 @@ irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
*/
static int
irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
- u8 hmc_fn_id,
+ u16 hmc_fn_id,
struct irdma_dma_mem *query_fpm_mem,
bool post_sq, u8 wait_type)
{
@@ -3726,8 +3727,8 @@ irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id);
set_64bit_val(wqe, IRDMA_BYTE_32, query_fpm_mem->pa);
- hdr = LS_64(IRDMA_CQP_OP_QUERY_FPM_VAL, IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -3800,7 +3801,6 @@ irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-
static int
irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
bool post_sq)
@@ -3819,15 +3819,15 @@ irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
set_64bit_val(wqe, IRDMA_BYTE_48,
(ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
set_64bit_val(wqe, IRDMA_BYTE_56,
- LS_64(ceq->tph_val, IRDMA_CQPSQ_TPHVAL) |
- LS_64(ceq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX));
- hdr = LS_64(ceq->ceq_id, IRDMA_CQPSQ_CEQ_CEQID) |
- LS_64(IRDMA_CQP_OP_CREATE_CEQ, IRDMA_CQPSQ_OPCODE) |
- LS_64(ceq->pbl_chunk_size, IRDMA_CQPSQ_CEQ_LPBLSIZE) |
- LS_64(ceq->virtual_map, IRDMA_CQPSQ_CEQ_VMAP) |
- LS_64(ceq->itr_no_expire, IRDMA_CQPSQ_CEQ_ITRNOEXPIRE) |
- LS_64(ceq->tph_en, IRDMA_CQPSQ_TPHEN) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
+ hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -3919,11 +3919,11 @@ irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt);
set_64bit_val(wqe, IRDMA_BYTE_48, ceq->first_pm_pbl_idx);
hdr = ceq->ceq_id |
- LS_64(IRDMA_CQP_OP_DESTROY_CEQ, IRDMA_CQPSQ_OPCODE) |
- LS_64(ceq->pbl_chunk_size, IRDMA_CQPSQ_CEQ_LPBLSIZE) |
- LS_64(ceq->virtual_map, IRDMA_CQPSQ_CEQ_VMAP) |
- LS_64(ceq->tph_en, IRDMA_CQPSQ_TPHEN) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -3960,7 +3960,7 @@ irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
cq_idx = 0;
ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
get_64bit_val(ceqe, IRDMA_BYTE_0, &temp);
- polarity = (u8)RS_64(temp, IRDMA_CEQE_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
if (polarity != ceq->polarity)
return NULL;
@@ -4019,13 +4019,13 @@ irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
get_64bit_val(ceqe, IRDMA_BYTE_0, &temp);
- polarity = (u8)RS_64(temp, IRDMA_CEQE_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
if (polarity != ceq_polarity)
return;
next_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1);
if (cq == next_cq)
- set_64bit_val(ceqe, IRDMA_BYTE_0, temp & IRDMA_CEQE_VALID_M);
+ set_64bit_val(ceqe, IRDMA_BYTE_0, temp & IRDMA_CEQE_VALID);
next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
if (!next)
@@ -4094,10 +4094,10 @@ irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
set_64bit_val(wqe, IRDMA_BYTE_48,
(aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
- hdr = LS_64(IRDMA_CQP_OP_CREATE_AEQ, IRDMA_CQPSQ_OPCODE) |
- LS_64(aeq->pbl_chunk_size, IRDMA_CQPSQ_AEQ_LPBLSIZE) |
- LS_64(aeq->virtual_map, IRDMA_CQPSQ_AEQ_VMAP) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -4116,9 +4116,8 @@ irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static int
-irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
- bool post_sq)
+int
+irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -4134,10 +4133,10 @@ irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
return -ENOSPC;
set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt);
set_64bit_val(wqe, IRDMA_BYTE_48, aeq->first_pm_pbl_idx);
- hdr = LS_64(IRDMA_CQP_OP_DESTROY_AEQ, IRDMA_CQPSQ_OPCODE) |
- LS_64(aeq->pbl_chunk_size, IRDMA_CQPSQ_AEQ_LPBLSIZE) |
- LS_64(aeq->virtual_map, IRDMA_CQPSQ_AEQ_VMAP) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -4160,29 +4159,28 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
{
u64 temp, compl_ctx;
__le64 *aeqe;
- u16 wqe_idx;
u8 ae_src;
u8 polarity;
aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
get_64bit_val(aeqe, IRDMA_BYTE_0, &compl_ctx);
get_64bit_val(aeqe, IRDMA_BYTE_8, &temp);
- polarity = (u8)RS_64(temp, IRDMA_AEQE_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
if (aeq->polarity != polarity)
return -ENOENT;
irdma_debug_buf(aeq->dev, IRDMA_DEBUG_WQE, "AEQ_ENTRY WQE", aeqe, 16);
- ae_src = (u8)RS_64(temp, IRDMA_AEQE_AESRC);
- wqe_idx = (u16)RS_64(temp, IRDMA_AEQE_WQDESCIDX);
- info->qp_cq_id = (u32)RS_64(temp, IRDMA_AEQE_QPCQID_LOW) |
- ((u32)RS_64(temp, IRDMA_AEQE_QPCQID_HI) << 18);
- info->ae_id = (u16)RS_64(temp, IRDMA_AEQE_AECODE);
- info->tcp_state = (u8)RS_64(temp, IRDMA_AEQE_TCPSTATE);
- info->iwarp_state = (u8)RS_64(temp, IRDMA_AEQE_IWSTATE);
- info->q2_data_written = (u8)RS_64(temp, IRDMA_AEQE_Q2DATA);
- info->aeqe_overflow = (bool)RS_64(temp, IRDMA_AEQE_OVERFLOW);
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
+ ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
info->ae_src = ae_src;
switch (info->ae_id) {
@@ -4258,7 +4256,6 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_SOURCE_RQ_0011:
info->qp = true;
info->rq = true;
- info->wqe_idx = wqe_idx;
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_CQ:
@@ -4272,7 +4269,6 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_SOURCE_SQ_0111:
info->qp = true;
info->sq = true;
- info->wqe_idx = wqe_idx;
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_IN_WR:
@@ -4307,7 +4303,7 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
int
irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
{
- writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
+ db_wr32(count, dev->aeq_alloc_db);
return 0;
}
@@ -4433,12 +4429,12 @@ irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
hdr = ccq->cq_uk.cq_id |
FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
IRDMA_CQPSQ_CQ_CEQID) |
- LS_64(IRDMA_CQP_OP_DESTROY_CQ, IRDMA_CQPSQ_OPCODE) |
- LS_64(ccq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(ccq->ceq_id_valid, IRDMA_CQPSQ_CQ_CEQIDVALID) |
- LS_64(ccq->tph_en, IRDMA_CQPSQ_TPHEN) |
- LS_64(ccq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -4464,7 +4460,7 @@ irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
* @hmc_fn_id: hmc function id
*/
int
-irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
+irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u16 hmc_fn_id)
{
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_fpm_misc *hmc_fpm_misc;
@@ -4500,7 +4496,7 @@ irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
* @hmc_fn_id: hmc function id
*/
static int
-irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
+irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u16 hmc_fn_id)
{
struct irdma_hmc_obj_info *obj_info;
__le64 *buf;
@@ -4593,27 +4589,27 @@ cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
} else {
data = 0;
}
- data |= LS_64(info->hmc_fn_id, IRDMA_CQPSQ_UPESD_HMCFNID);
+ data |= FLD_LS_64(cqp->dev, info->hmc_fn_id, IRDMA_CQPSQ_UPESD_HMCFNID);
set_64bit_val(wqe, IRDMA_BYTE_16, data);
switch (wqe_entries) {
case 3:
set_64bit_val(wqe, IRDMA_BYTE_48,
- (LS_64(info->entry[2].cmd, IRDMA_CQPSQ_UPESD_SDCMD) |
- LS_64(1, IRDMA_CQPSQ_UPESD_ENTRY_VALID)));
+ (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
set_64bit_val(wqe, IRDMA_BYTE_56, info->entry[2].data);
/* fallthrough */
case 2:
set_64bit_val(wqe, IRDMA_BYTE_32,
- (LS_64(info->entry[1].cmd, IRDMA_CQPSQ_UPESD_SDCMD) |
- LS_64(1, IRDMA_CQPSQ_UPESD_ENTRY_VALID)));
+ (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
set_64bit_val(wqe, IRDMA_BYTE_40, info->entry[1].data);
/* fallthrough */
case 1:
set_64bit_val(wqe, IRDMA_BYTE_0,
- LS_64(info->entry[0].cmd, IRDMA_CQPSQ_UPESD_SDCMD));
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
set_64bit_val(wqe, IRDMA_BYTE_8, info->entry[0].data);
break;
@@ -4621,9 +4617,9 @@ cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
break;
}
- hdr = LS_64(IRDMA_CQP_OP_UPDATE_PE_SDS, IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID) |
- LS_64(mem_entries, IRDMA_CQPSQ_UPESD_ENTRY_COUNT);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -4692,7 +4688,7 @@ irdma_update_sds_noccq(struct irdma_sc_dev *dev,
*/
int
irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
- u8 hmc_fn_id, bool post_sq,
+ u16 hmc_fn_id, bool post_sq,
bool poll_registers)
{
u64 hdr;
@@ -4704,10 +4700,11 @@ irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
return -ENOSPC;
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(hmc_fn_id, IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
+ FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
- hdr = LS_64(IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED, IRDMA_CQPSQ_OPCODE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -4792,9 +4789,10 @@ irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
temp = buf->pa;
set_64bit_val(wqe, IRDMA_BYTE_32, temp);
- temp = LS_64(cqp->polarity, IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID) |
- LS_64(buf->size, IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN) |
- LS_64(IRDMA_CQP_OP_QUERY_RDMA_FEATURES, IRDMA_CQPSQ_UP_OP);
+ temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
+ cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, temp);
@@ -4835,7 +4833,7 @@ irdma_get_rdma_features(struct irdma_sc_dev *dev)
goto exit;
get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp);
- feat_cnt = (u16)RS_64(temp, IRDMA_FEATURE_CNT);
+ feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
if (feat_cnt < IRDMA_MIN_FEATURES) {
ret_code = -EINVAL;
goto exit;
@@ -4856,7 +4854,7 @@ irdma_get_rdma_features(struct irdma_sc_dev *dev)
goto exit;
get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp);
- feat_cnt = (u16)RS_64(temp, IRDMA_FEATURE_CNT);
+ feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
if (feat_cnt < IRDMA_MIN_FEATURES) {
ret_code = -EINVAL;
goto exit;
@@ -4869,7 +4867,7 @@ irdma_get_rdma_features(struct irdma_sc_dev *dev)
for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
feat_idx++, byte_idx += 8) {
get_64bit_val(feat_buf.va, byte_idx, &temp);
- feat_type = RS_64(temp, IRDMA_FEATURE_TYPE);
+ feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
dev->feature_info[feat_type] = temp;
}
exit:
@@ -4990,8 +4988,8 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
-
- hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
qpwanted /= 2;
@@ -5109,7 +5107,7 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
mem_size = sizeof(struct irdma_hmc_sd_entry) *
(hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
virt_mem.size = mem_size;
- virt_mem.va = kzalloc(virt_mem.size, GFP_ATOMIC);
+ virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
if (!virt_mem.va) {
irdma_debug(dev, IRDMA_DEBUG_HMC,
"failed to allocate memory for sd_entry buffer\n");
@@ -5144,7 +5142,6 @@ irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
pcmdinfo->in.u.aeq_destroy.scratch,
pcmdinfo->post_sq);
-
break;
case IRDMA_OP_CEQ_CREATE:
status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
@@ -5410,9 +5407,6 @@ irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
int status = 0;
unsigned long flags;
- if (dev->no_cqp)
- return -EFAULT;
-
spin_lock_irqsave(&dev->cqp_lock, flags);
if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
status = irdma_exec_cqp_cmd(dev, pcmdinfo);
@@ -5455,9 +5449,10 @@ void
irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
{
u32 reg_val;
- reg_val = enable ? IRDMA_PFINT_AEQCTL_CAUSE_ENA_M : 0;
- reg_val |= (idx << IRDMA_PFINT_AEQCTL_MSIX_INDX_S) |
- IRDMA_PFINT_AEQCTL_ITR_INDX_M;
+ reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, IRDMA_IDX_NOITR);
+
writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
}
@@ -5514,13 +5509,11 @@ irdma_sc_init_hw(struct irdma_sc_dev *dev)
/**
* irdma_sc_dev_init - Initialize control part of device
- * @ver: version
* @dev: Device pointer
* @info: Device init info
*/
int
-irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
- struct irdma_device_init_info *info)
+irdma_sc_dev_init(struct irdma_sc_dev *dev, struct irdma_device_init_info *info)
{
u32 val;
int ret_code = 0;
@@ -5562,14 +5555,13 @@ irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
- dev->hw_attrs.uk_attrs.hw_rev = ver;
irdma_sc_init_hw(dev);
if (irdma_wait_pe_ready(dev))
return -ETIMEDOUT;
val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
- db_size = (u8)RS_32(val, IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE);
+ db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
irdma_debug(dev, IRDMA_DEBUG_DEV,
"RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
diff --git a/sys/dev/irdma/irdma_defs.h b/sys/dev/irdma/irdma_defs.h
index e0775f53810d..9150ae5cc47f 100644
--- a/sys/dev/irdma/irdma_defs.h
+++ b/sys/dev/irdma/irdma_defs.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -48,83 +48,12 @@
#define IRDMA_PE_DB_SIZE_4M 1
#define IRDMA_PE_DB_SIZE_8M 2
-#define IRDMA_DDP_VER 1
-#define IRDMA_RDMAP_VER 1
-
-#define IRDMA_RDMA_MODE_RDMAC 0
-#define IRDMA_RDMA_MODE_IETF 1
-
-#define IRDMA_STAG_STATE_INVALID 0
-#define IRDMA_STAG_STATE_VALID 1
-#define IRDMA_STAG_TYPE_SHARED 0
-#define IRDMA_STAG_TYPE_NONSHARED 1
-
-#define QS_HANDLE_UNKNOWN 0xffff
-#define USER_PRI_UNKNOWN 0xff
-
-#define IRDMA_INVALID_WQE_INDEX 0xffffffff
-
-#define IRDMA_CQP_SW_SQSIZE_8 8
-#define IRDMA_CQP_SW_SQSIZE_16 16
-#define IRDMA_CQP_SW_SQSIZE_32 32
-#define IRDMA_CQP_SW_SQSIZE_64 64
-#define IRDMA_CQP_SW_SQSIZE_128 128
-#define IRDMA_CQP_SW_SQSIZE_256 256
-#define IRDMA_CQP_SW_SQSIZE_512 512
-#define IRDMA_CQP_SW_SQSIZE_1024 1024
-
-#define IRDMA_CQP_HW_SQSIZE_4 1
-#define IRDMA_CQP_HW_SQSIZE_8 2
-#define IRDMA_CQP_HW_SQSIZE_16 3
-#define IRDMA_CQP_HW_SQSIZE_32 4
-#define IRDMA_CQP_HW_SQSIZE_64 5
-#define IRDMA_CQP_HW_SQSIZE_128 6
-#define IRDMA_CQP_HW_SQSIZE_256 7
-#define IRDMA_CQP_HW_SQSIZE_512 8
-#define IRDMA_CQP_HW_SQSIZE_1024 9
-#define IRDMA_CQP_HW_SQSIZE_2048 10
-
-/* WQE size considering 32 bytes per WQE*/
-#define IRDMAQP_SW_WQSIZE_8 8 /* 256 bytes */
-#define IRDMAQP_SW_WQSIZE_16 16 /* 512 bytes */
-#define IRDMAQP_SW_WQSIZE_32 32 /* 1024 bytes */
-#define IRDMAQP_SW_WQSIZE_64 64 /* 2048 bytes */
-#define IRDMAQP_SW_WQSIZE_128 128 /* 4096 bytes */
-#define IRDMAQP_SW_WQSIZE_256 256 /* 8192 bytes */
-#define IRDMAQP_SW_WQSIZE_512 512 /* 16384 bytes */
-#define IRDMAQP_SW_WQSIZE_1024 1024 /* 32768 bytes */
-#define IRDMAQP_SW_WQSIZE_2048 2048 /* 65536 bytes */
-#define IRDMAQP_SW_WQSIZE_4096 4096 /* 131072 bytes */
-#define IRDMAQP_SW_WQSIZE_8192 8192 /* 262144 bytes */
-#define IRDMAQP_SW_WQSIZE_16384 16384 /* 524288 bytes */
-#define IRDMAQP_SW_WQSIZE_32768 32768 /* 1048576 bytes */
-
-#define IRDMAQP_HW_WQSIZE_8 1
-#define IRDMAQP_HW_WQSIZE_16 2
-#define IRDMAQP_HW_WQSIZE_32 3
-#define IRDMAQP_HW_WQSIZE_64 4
-#define IRDMAQP_HW_WQSIZE_128 5
-#define IRDMAQP_HW_WQSIZE_256 6
-#define IRDMAQP_HW_WQSIZE_512 7
-#define IRDMAQP_HW_WQSIZE_1024 8
-#define IRDMAQP_HW_WQSIZE_2048 9
-#define IRDMAQP_HW_WQSIZE_4096 10
-#define IRDMAQP_HW_WQSIZE_8192 11
-#define IRDMAQP_HW_WQSIZE_16384 12
-#define IRDMAQP_HW_WQSIZE_32768 13
-
#define IRDMA_IRD_HW_SIZE_4 0
#define IRDMA_IRD_HW_SIZE_16 1
#define IRDMA_IRD_HW_SIZE_64 2
#define IRDMA_IRD_HW_SIZE_128 3
#define IRDMA_IRD_HW_SIZE_256 4
-enum irdma_protocol_used {
- IRDMA_ANY_PROTOCOL = 0,
- IRDMA_IWARP_PROTOCOL_ONLY = 1,
- IRDMA_ROCE_PROTOCOL_ONLY = 2,
-};
-
#define IRDMA_QP_STATE_INVALID 0
#define IRDMA_QP_STATE_IDLE 1
#define IRDMA_QP_STATE_RTS 2
@@ -245,6 +174,7 @@ enum irdma_protocol_used {
#define IRDMA_QP_SW_MAX_WQ_QUANTA 32768
#define IRDMA_QP_SW_MAX_SQ_QUANTA 32768
#define IRDMA_QP_SW_MAX_RQ_QUANTA 32768
+
#define IRDMA_MAX_QP_WRS(max_quanta_per_wr) \
((IRDMA_QP_SW_MAX_WQ_QUANTA - IRDMA_SQ_RSVD) / (max_quanta_per_wr))
@@ -263,7 +193,6 @@ enum irdma_protocol_used {
#define IRDMA_CQE_QTYPE_RQ 0
#define IRDMA_CQE_QTYPE_SQ 1
-#define IRDMA_QP_SW_MIN_WQSIZE 8u /* in WRs*/
#define IRDMA_QP_WQE_MIN_SIZE 32
#define IRDMA_QP_WQE_MAX_SIZE 256
#define IRDMA_QP_WQE_MIN_QUANTA 1
@@ -273,9 +202,11 @@ enum irdma_protocol_used {
#define IRDMA_SQ_RSVD 258
#define IRDMA_RQ_RSVD 1
-#define IRDMA_FEATURE_RTS_AE 1ULL
-#define IRDMA_FEATURE_CQ_RESIZE 2ULL
-#define IRDMA_FEATURE_RELAX_RQ_ORDER 4ULL
+#define IRDMA_FEATURE_RTS_AE BIT_ULL(0)
+#define IRDMA_FEATURE_CQ_RESIZE BIT_ULL(1)
+#define IRDMA_FEATURE_RELAX_RQ_ORDER BIT_ULL(2)
+#define IRDMA_FEATURE_64_BYTE_CQE BIT_ULL(5)
+
#define IRDMAQP_OP_RDMA_WRITE 0x00
#define IRDMAQP_OP_RDMA_READ 0x01
#define IRDMAQP_OP_RDMA_SEND 0x03
@@ -342,7 +273,7 @@ enum irdma_cqp_op_type {
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 49,
IRDMA_OP_CQ_MODIFY = 50,
- /* Must be last entry*/
+ /* Must be last entry */
IRDMA_MAX_CQP_OPS = 51,
};
@@ -394,105 +325,22 @@ enum irdma_cqp_op_type {
#define IRDMA_CQP_OP_GATHER_STATS 0x2e
#define IRDMA_CQP_OP_UP_MAP 0x2f
-/* Async Events codes */
-#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
-#define IRDMA_AE_AMP_INVALID_STAG 0x0103
-#define IRDMA_AE_AMP_BAD_QP 0x0104
-#define IRDMA_AE_AMP_BAD_PD 0x0105
-#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
-#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
-#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
-#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
-#define IRDMA_AE_AMP_TO_WRAP 0x010a
-#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
-#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
-#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
-#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
-#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
-#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
-#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
-#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
-#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
-#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
-#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
-#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
-#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
-#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
-#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
-#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
-#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
-#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
-#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
-#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
-#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
-#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
-#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
-#define IRDMA_AE_BAD_CLOSE 0x0201
-#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
-#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
-#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
-#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
-#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
-#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
-#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
-#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
-#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
-#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
-#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
-#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
-#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
-#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
-#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
-#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
-#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
-#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
-#define IRDMA_AE_DDP_NO_L_BIT 0x0308
-#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
-#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
-#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
-#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
-#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
-#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
-#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
-#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
-#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
-#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
-#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
-#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
-#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
-#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
-#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
-#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
-#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
-#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
-#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
-#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
-#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
-#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
-#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
-#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
-#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
-#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
-#define IRDMA_AE_RESET_SENT 0x0601
-#define IRDMA_AE_TERMINATE_SENT 0x0602
-#define IRDMA_AE_RESET_NOT_SENT 0x0603
-#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
-#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
-#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
-#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
-
#ifndef LS_64_1
#define LS_64_1(val, bits) ((u64)(uintptr_t)(val) << (bits))
#define RS_64_1(val, bits) ((u64)(uintptr_t)(val) >> (bits))
#define LS_32_1(val, bits) ((u32)((val) << (bits)))
#define RS_32_1(val, bits) ((u32)((val) >> (bits)))
#endif
-#define LS_64(val, field) (((u64)(val) << field ## _S) & (field ## _M))
-#define RS_64(val, field) ((u64)((val) & field ## _M) >> field ## _S)
-#define LS_32(val, field) (((val) << field ## _S) & (field ## _M))
-#define RS_32(val, field) (((val) & field ## _M) >> field ## _S)
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(high, low) ((0xFFFFFFFFFFFFFFFFULL >> (64ULL - ((high) - (low) + 1ULL))) << (low))
+#endif /* GENMASK_ULL */
+#ifndef GENMASK
+#define GENMASK(high, low) ((0xFFFFFFFFUL >> (32UL - ((high) - (low) + 1UL))) << (low))
+#endif /* GENMASK */
+#ifndef FIELD_PREP
+#define FIELD_PREP(mask, val) (((u64)(val) << mask##_S) & (mask))
+#define FIELD_GET(mask, val) (((val) & mask) >> mask##_S)
+#endif /* FIELD_PREP */
#define FLD_LS_64(dev, val, field) \
(((u64)(val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
@@ -511,954 +359,594 @@ enum irdma_cqp_op_type {
#define IRDMA_MAX_STATS_64 0xffffffffffffffffULL
#define IRDMA_MAX_CQ_READ_THRESH 0x3FFFF
-/* ILQ CQP hash table fields */
#define IRDMA_CQPSQ_QHASH_VLANID_S 32
-#define IRDMA_CQPSQ_QHASH_VLANID_M \
- ((u64)0xfff << IRDMA_CQPSQ_QHASH_VLANID_S)
-
+#define IRDMA_CQPSQ_QHASH_VLANID GENMASK_ULL(43, 32)
#define IRDMA_CQPSQ_QHASH_QPN_S 32
-#define IRDMA_CQPSQ_QHASH_QPN_M \
- ((u64)0x3ffff << IRDMA_CQPSQ_QHASH_QPN_S)
-
+#define IRDMA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
#define IRDMA_CQPSQ_QHASH_QS_HANDLE_S 0
-#define IRDMA_CQPSQ_QHASH_QS_HANDLE_M ((u64)0x3ff << IRDMA_CQPSQ_QHASH_QS_HANDLE_S)
-
+#define IRDMA_CQPSQ_QHASH_QS_HANDLE GENMASK_ULL(9, 0)
#define IRDMA_CQPSQ_QHASH_SRC_PORT_S 16
-#define IRDMA_CQPSQ_QHASH_SRC_PORT_M \
- ((u64)0xffff << IRDMA_CQPSQ_QHASH_SRC_PORT_S)
-
+#define IRDMA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
#define IRDMA_CQPSQ_QHASH_DEST_PORT_S 0
-#define IRDMA_CQPSQ_QHASH_DEST_PORT_M \
- ((u64)0xffff << IRDMA_CQPSQ_QHASH_DEST_PORT_S)
-
+#define IRDMA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_QHASH_ADDR0_S 32
-#define IRDMA_CQPSQ_QHASH_ADDR0_M \
- ((u64)0xffffffff << IRDMA_CQPSQ_QHASH_ADDR0_S)
-
+#define IRDMA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
#define IRDMA_CQPSQ_QHASH_ADDR1_S 0
-#define IRDMA_CQPSQ_QHASH_ADDR1_M \
- ((u64)0xffffffff << IRDMA_CQPSQ_QHASH_ADDR1_S)
-
+#define IRDMA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_QHASH_ADDR2_S 32
-#define IRDMA_CQPSQ_QHASH_ADDR2_M \
- ((u64)0xffffffff << IRDMA_CQPSQ_QHASH_ADDR2_S)
-
+#define IRDMA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
#define IRDMA_CQPSQ_QHASH_ADDR3_S 0
-#define IRDMA_CQPSQ_QHASH_ADDR3_M \
- ((u64)0xffffffff << IRDMA_CQPSQ_QHASH_ADDR3_S)
-
+#define IRDMA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_QHASH_WQEVALID_S 63
-#define IRDMA_CQPSQ_QHASH_WQEVALID_M \
- BIT_ULL(IRDMA_CQPSQ_QHASH_WQEVALID_S)
+#define IRDMA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_QHASH_OPCODE_S 32
-#define IRDMA_CQPSQ_QHASH_OPCODE_M \
- ((u64)0x3f << IRDMA_CQPSQ_QHASH_OPCODE_S)
-
+#define IRDMA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_QHASH_MANAGE_S 61
-#define IRDMA_CQPSQ_QHASH_MANAGE_M \
- ((u64)0x3 << IRDMA_CQPSQ_QHASH_MANAGE_S)
-
+#define IRDMA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
#define IRDMA_CQPSQ_QHASH_IPV4VALID_S 60
-#define IRDMA_CQPSQ_QHASH_IPV4VALID_M \
- BIT_ULL(IRDMA_CQPSQ_QHASH_IPV4VALID_S)
-
+#define IRDMA_CQPSQ_QHASH_IPV4VALID BIT_ULL(60)
#define IRDMA_CQPSQ_QHASH_VLANVALID_S 59
-#define IRDMA_CQPSQ_QHASH_VLANVALID_M \
- BIT_ULL(IRDMA_CQPSQ_QHASH_VLANVALID_S)
-
+#define IRDMA_CQPSQ_QHASH_VLANVALID BIT_ULL(59)
#define IRDMA_CQPSQ_QHASH_ENTRYTYPE_S 42
-#define IRDMA_CQPSQ_QHASH_ENTRYTYPE_M \
- ((u64)0x7 << IRDMA_CQPSQ_QHASH_ENTRYTYPE_S)
-
-/* Stats */
+#define IRDMA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
#define IRDMA_CQPSQ_STATS_WQEVALID_S 63
-#define IRDMA_CQPSQ_STATS_WQEVALID_M \
- BIT_ULL(IRDMA_CQPSQ_STATS_WQEVALID_S)
-
+#define IRDMA_CQPSQ_STATS_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_STATS_ALLOC_INST_S 62
-#define IRDMA_CQPSQ_STATS_ALLOC_INST_M \
- BIT_ULL(IRDMA_CQPSQ_STATS_ALLOC_INST_S)
-
+#define IRDMA_CQPSQ_STATS_ALLOC_INST BIT_ULL(62)
#define IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX_S 60
-#define IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX_M \
- BIT_ULL(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX_S)
-
+#define IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX BIT_ULL(60)
#define IRDMA_CQPSQ_STATS_USE_INST_S 61
-#define IRDMA_CQPSQ_STATS_USE_INST_M \
- BIT_ULL(IRDMA_CQPSQ_STATS_USE_INST_S)
-
+#define IRDMA_CQPSQ_STATS_USE_INST BIT_ULL(61)
#define IRDMA_CQPSQ_STATS_OP_S 32
-#define IRDMA_CQPSQ_STATS_OP_M \
- ((u64)0x3f << IRDMA_CQPSQ_STATS_OP_S)
-
+#define IRDMA_CQPSQ_STATS_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_STATS_INST_INDEX_S 0
-#define IRDMA_CQPSQ_STATS_INST_INDEX_M \
- ((u64)0x7f << IRDMA_CQPSQ_STATS_INST_INDEX_S)
-
+#define IRDMA_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0)
#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX_S 0
-#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX_M \
- ((u64)0x3f << IRDMA_CQPSQ_STATS_HMC_FCN_INDEX_S)
-
-/* WS - Work Scheduler */
+#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_WS_WQEVALID_S 63
-#define IRDMA_CQPSQ_WS_WQEVALID_M \
- BIT_ULL(IRDMA_CQPSQ_WS_WQEVALID_S)
-
+#define IRDMA_CQPSQ_WS_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_WS_NODEOP_S 52
-#define IRDMA_CQPSQ_WS_NODEOP_M \
- ((u64)0x3 << IRDMA_CQPSQ_WS_NODEOP_S)
+#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(55, 52)
#define IRDMA_CQPSQ_WS_ENABLENODE_S 62
-#define IRDMA_CQPSQ_WS_ENABLENODE_M \
- BIT_ULL(IRDMA_CQPSQ_WS_ENABLENODE_S)
-
+#define IRDMA_CQPSQ_WS_ENABLENODE BIT_ULL(62)
#define IRDMA_CQPSQ_WS_NODETYPE_S 61
-#define IRDMA_CQPSQ_WS_NODETYPE_M \
- BIT_ULL(IRDMA_CQPSQ_WS_NODETYPE_S)
-
+#define IRDMA_CQPSQ_WS_NODETYPE BIT_ULL(61)
#define IRDMA_CQPSQ_WS_PRIOTYPE_S 59
-#define IRDMA_CQPSQ_WS_PRIOTYPE_M \
- ((u64)0x3 << IRDMA_CQPSQ_WS_PRIOTYPE_S)
-
+#define IRDMA_CQPSQ_WS_PRIOTYPE GENMASK_ULL(60, 59)
#define IRDMA_CQPSQ_WS_TC_S 56
-#define IRDMA_CQPSQ_WS_TC_M \
- ((u64)0x7 << IRDMA_CQPSQ_WS_TC_S)
-
+#define IRDMA_CQPSQ_WS_TC GENMASK_ULL(58, 56)
#define IRDMA_CQPSQ_WS_VMVFTYPE_S 54
-#define IRDMA_CQPSQ_WS_VMVFTYPE_M \
- ((u64)0x3 << IRDMA_CQPSQ_WS_VMVFTYPE_S)
-
+#define IRDMA_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54)
#define IRDMA_CQPSQ_WS_VMVFNUM_S 42
-#define IRDMA_CQPSQ_WS_VMVFNUM_M \
- ((u64)0x3ff << IRDMA_CQPSQ_WS_VMVFNUM_S)
-
+#define IRDMA_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42)
#define IRDMA_CQPSQ_WS_OP_S 32
-#define IRDMA_CQPSQ_WS_OP_M \
- ((u64)0x3f << IRDMA_CQPSQ_WS_OP_S)
-
+#define IRDMA_CQPSQ_WS_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_WS_PARENTID_S 16
-#define IRDMA_CQPSQ_WS_PARENTID_M \
- ((u64)0x3ff << IRDMA_CQPSQ_WS_PARENTID_S)
-
+#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16)
#define IRDMA_CQPSQ_WS_NODEID_S 0
-#define IRDMA_CQPSQ_WS_NODEID_M \
- ((u64)0x3ff << IRDMA_CQPSQ_WS_NODEID_S)
-
+#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(9, 0)
#define IRDMA_CQPSQ_WS_VSI_S 48
-#define IRDMA_CQPSQ_WS_VSI_M \
- ((u64)0x3ff << IRDMA_CQPSQ_WS_VSI_S)
-
+#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(57, 48)
#define IRDMA_CQPSQ_WS_WEIGHT_S 32
-#define IRDMA_CQPSQ_WS_WEIGHT_M \
- ((u64)0x7f << IRDMA_CQPSQ_WS_WEIGHT_S)
+#define IRDMA_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32)
-/* UP to UP mapping */
#define IRDMA_CQPSQ_UP_WQEVALID_S 63
-#define IRDMA_CQPSQ_UP_WQEVALID_M \
- BIT_ULL(IRDMA_CQPSQ_UP_WQEVALID_S)
-
+#define IRDMA_CQPSQ_UP_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_UP_USEVLAN_S 62
-#define IRDMA_CQPSQ_UP_USEVLAN_M \
- BIT_ULL(IRDMA_CQPSQ_UP_USEVLAN_S)
-
+#define IRDMA_CQPSQ_UP_USEVLAN BIT_ULL(62)
#define IRDMA_CQPSQ_UP_USEOVERRIDE_S 61
-#define IRDMA_CQPSQ_UP_USEOVERRIDE_M \
- BIT_ULL(IRDMA_CQPSQ_UP_USEOVERRIDE_S)
-
+#define IRDMA_CQPSQ_UP_USEOVERRIDE BIT_ULL(61)
#define IRDMA_CQPSQ_UP_OP_S 32
-#define IRDMA_CQPSQ_UP_OP_M \
- ((u64)0x3f << IRDMA_CQPSQ_UP_OP_S)
-
+#define IRDMA_CQPSQ_UP_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_UP_HMCFCNIDX_S 0
-#define IRDMA_CQPSQ_UP_HMCFCNIDX_M \
- ((u64)0x3f << IRDMA_CQPSQ_UP_HMCFCNIDX_S)
-
+#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(5, 0)
#define IRDMA_CQPSQ_UP_CNPOVERRIDE_S 32
-#define IRDMA_CQPSQ_UP_CNPOVERRIDE_M \
- ((u64)0x3f << IRDMA_CQPSQ_UP_CNPOVERRIDE_S)
-
-/* Query RDMA features*/
+#define IRDMA_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID_S 63
-#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID_M \
- BIT_ULL(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID_S)
-
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN_S 0
-#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN_M \
- ((u64)0xffffffff << IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN_S)
-
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_OP_S 32
-#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_OP_M \
- ((u64)0x3f << IRDMA_CQPSQ_QUERY_RDMA_FEATURES_OP_S)
-
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED_S 32
-#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED_M \
- (0xffffULL << IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED_S)
-
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED GENMASK_ULL(47, 32)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION_S 16
-#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION_M \
- (0xffULL << IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION_S)
-
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION GENMASK_ULL(23, 16)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION_S 0
-#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION_M \
- (0xffULL << IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION_S)
-
-/* CQP Host Context */
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION GENMASK_ULL(7, 0)
#define IRDMA_CQPHC_SQSIZE_S 8
-#define IRDMA_CQPHC_SQSIZE_M (0xfULL << IRDMA_CQPHC_SQSIZE_S)
-
+#define IRDMA_CQPHC_SQSIZE GENMASK_ULL(11, 8)
#define IRDMA_CQPHC_DISABLE_PFPDUS_S 1
-#define IRDMA_CQPHC_DISABLE_PFPDUS_M BIT_ULL(IRDMA_CQPHC_DISABLE_PFPDUS_S)
-
+#define IRDMA_CQPHC_DISABLE_PFPDUS BIT_ULL(1)
#define IRDMA_CQPHC_ROCEV2_RTO_POLICY_S 2
-#define IRDMA_CQPHC_ROCEV2_RTO_POLICY_M BIT_ULL(IRDMA_CQPHC_ROCEV2_RTO_POLICY_S)
-
+#define IRDMA_CQPHC_ROCEV2_RTO_POLICY BIT_ULL(2)
#define IRDMA_CQPHC_PROTOCOL_USED_S 3
-#define IRDMA_CQPHC_PROTOCOL_USED_M (0x3ULL << IRDMA_CQPHC_PROTOCOL_USED_S)
-
+#define IRDMA_CQPHC_PROTOCOL_USED GENMASK_ULL(4, 3)
#define IRDMA_CQPHC_MIN_RATE_S 48
-#define IRDMA_CQPHC_MIN_RATE_M (0xfULL << IRDMA_CQPHC_MIN_RATE_S)
-
+#define IRDMA_CQPHC_MIN_RATE GENMASK_ULL(51, 48)
#define IRDMA_CQPHC_MIN_DEC_FACTOR_S 56
-#define IRDMA_CQPHC_MIN_DEC_FACTOR_M (0xfULL << IRDMA_CQPHC_MIN_DEC_FACTOR_S)
-
+#define IRDMA_CQPHC_MIN_DEC_FACTOR GENMASK_ULL(59, 56)
#define IRDMA_CQPHC_DCQCN_T_S 0
-#define IRDMA_CQPHC_DCQCN_T_M (0xffffULL << IRDMA_CQPHC_DCQCN_T_S)
-
+#define IRDMA_CQPHC_DCQCN_T GENMASK_ULL(15, 0)
#define IRDMA_CQPHC_HAI_FACTOR_S 32
-#define IRDMA_CQPHC_HAI_FACTOR_M \
- (0xffffULL << IRDMA_CQPHC_HAI_FACTOR_S)
-
+#define IRDMA_CQPHC_HAI_FACTOR GENMASK_ULL(47, 32)
#define IRDMA_CQPHC_RAI_FACTOR_S 48
-#define IRDMA_CQPHC_RAI_FACTOR_M \
- (0xffffULL << IRDMA_CQPHC_RAI_FACTOR_S)
-
+#define IRDMA_CQPHC_RAI_FACTOR GENMASK_ULL(63, 48)
#define IRDMA_CQPHC_DCQCN_B_S 0
-#define IRDMA_CQPHC_DCQCN_B_M (0x1ffffffULL << IRDMA_CQPHC_DCQCN_B_S)
-
+#define IRDMA_CQPHC_DCQCN_B GENMASK_ULL(24, 0)
#define IRDMA_CQPHC_DCQCN_F_S 25
-#define IRDMA_CQPHC_DCQCN_F_M (0x7ULL << IRDMA_CQPHC_DCQCN_F_S)
-
+#define IRDMA_CQPHC_DCQCN_F GENMASK_ULL(27, 25)
#define IRDMA_CQPHC_CC_CFG_VALID_S 31
-#define IRDMA_CQPHC_CC_CFG_VALID_M BIT_ULL(IRDMA_CQPHC_CC_CFG_VALID_S)
-
+#define IRDMA_CQPHC_CC_CFG_VALID BIT_ULL(31)
#define IRDMA_CQPHC_RREDUCE_MPERIOD_S 32
-#define IRDMA_CQPHC_RREDUCE_MPERIOD_M \
- (0xffffffffULL << IRDMA_CQPHC_RREDUCE_MPERIOD_S)
-
+#define IRDMA_CQPHC_RREDUCE_MPERIOD GENMASK_ULL(63, 32)
#define IRDMA_CQPHC_HW_MINVER_S 0
-#define IRDMA_CQPHC_HW_MINVER_M (0xffffULL << IRDMA_CQPHC_HW_MINVER_S)
+#define IRDMA_CQPHC_HW_MINVER GENMASK_ULL(15, 0)
#define IRDMA_CQPHC_HW_MAJVER_GEN_1 0
#define IRDMA_CQPHC_HW_MAJVER_GEN_2 1
#define IRDMA_CQPHC_HW_MAJVER_GEN_3 2
-
#define IRDMA_CQPHC_HW_MAJVER_S 16
-#define IRDMA_CQPHC_HW_MAJVER_M (0xffffULL << IRDMA_CQPHC_HW_MAJVER_S)
-
+#define IRDMA_CQPHC_HW_MAJVER GENMASK_ULL(31, 16)
#define IRDMA_CQPHC_CEQPERVF_S 32
-#define IRDMA_CQPHC_CEQPERVF_M (0xffULL << IRDMA_CQPHC_CEQPERVF_S)
+#define IRDMA_CQPHC_CEQPERVF GENMASK_ULL(39, 32)
#define IRDMA_CQPHC_EN_REM_ENDPOINT_TRK_S 3
-#define IRDMA_CQPHC_EN_REM_ENDPOINT_TRK_M BIT_ULL(IRDMA_CQPHC_EN_REM_ENDPOINT_TRK_S)
+#define IRDMA_CQPHC_EN_REM_ENDPOINT_TRK BIT_ULL(3)
#define IRDMA_CQPHC_ENABLED_VFS_S 32
-#define IRDMA_CQPHC_ENABLED_VFS_M (0x3fULL << IRDMA_CQPHC_ENABLED_VFS_S)
+#define IRDMA_CQPHC_ENABLED_VFS GENMASK_ULL(37, 32)
#define IRDMA_CQPHC_HMC_PROFILE_S 0
-#define IRDMA_CQPHC_HMC_PROFILE_M (0x7ULL << IRDMA_CQPHC_HMC_PROFILE_S)
-
+#define IRDMA_CQPHC_HMC_PROFILE GENMASK_ULL(2, 0)
#define IRDMA_CQPHC_SVER_S 24
-#define IRDMA_CQPHC_SVER_M (0xffULL << IRDMA_CQPHC_SVER_S)
-
+#define IRDMA_CQPHC_SVER GENMASK_ULL(31, 24)
#define IRDMA_CQPHC_SQBASE_S 9
-#define IRDMA_CQPHC_SQBASE_M \
- (0xfffffffffffffeULL << IRDMA_CQPHC_SQBASE_S)
+#define IRDMA_CQPHC_SQBASE GENMASK_ULL(63, 9)
#define IRDMA_CQPHC_QPCTX_S 0
-#define IRDMA_CQPHC_QPCTX_M \
- (0xffffffffffffffffULL << IRDMA_CQPHC_QPCTX_S)
-
-/* iWARP QP Doorbell shadow area */
+#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
#define IRDMA_QP_DBSA_HW_SQ_TAIL_S 0
-#define IRDMA_QP_DBSA_HW_SQ_TAIL_M \
- (0x7fffULL << IRDMA_QP_DBSA_HW_SQ_TAIL_S)
-
-/* Completion Queue Doorbell shadow area */
+#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
#define IRDMA_CQ_DBSA_CQEIDX_S 0
-#define IRDMA_CQ_DBSA_CQEIDX_M (0xfffffULL << IRDMA_CQ_DBSA_CQEIDX_S)
-
+#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
#define IRDMA_CQ_DBSA_SW_CQ_SELECT_S 0
-#define IRDMA_CQ_DBSA_SW_CQ_SELECT_M \
- (0x3fffULL << IRDMA_CQ_DBSA_SW_CQ_SELECT_S)
-
+#define IRDMA_CQ_DBSA_SW_CQ_SELECT GENMASK_ULL(13, 0)
#define IRDMA_CQ_DBSA_ARM_NEXT_S 14
-#define IRDMA_CQ_DBSA_ARM_NEXT_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_S)
-
+#define IRDMA_CQ_DBSA_ARM_NEXT BIT_ULL(14)
#define IRDMA_CQ_DBSA_ARM_NEXT_SE_S 15
-#define IRDMA_CQ_DBSA_ARM_NEXT_SE_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_SE_S)
-
+#define IRDMA_CQ_DBSA_ARM_NEXT_SE BIT_ULL(15)
#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_S 16
-#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_M \
- (0x3ULL << IRDMA_CQ_DBSA_ARM_SEQ_NUM_S)
+#define IRDMA_CQ_DBSA_ARM_SEQ_NUM GENMASK_ULL(17, 16)
/* CQP and iWARP Completion Queue */
#define IRDMA_CQ_QPCTX_S IRDMA_CQPHC_QPCTX_S
-#define IRDMA_CQ_QPCTX_M IRDMA_CQPHC_QPCTX_M
+#define IRDMA_CQ_QPCTX IRDMA_CQPHC_QPCTX
#define IRDMA_CCQ_OPRETVAL_S 0
-#define IRDMA_CCQ_OPRETVAL_M (0xffffffffULL << IRDMA_CCQ_OPRETVAL_S)
+#define IRDMA_CCQ_OPRETVAL GENMASK_ULL(31, 0)
#define IRDMA_CQ_MINERR_S 0
-#define IRDMA_CQ_MINERR_M (0xffffULL << IRDMA_CQ_MINERR_S)
-
+#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
#define IRDMA_CQ_MAJERR_S 16
-#define IRDMA_CQ_MAJERR_M (0xffffULL << IRDMA_CQ_MAJERR_S)
-
+#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
#define IRDMA_CQ_WQEIDX_S 32
-#define IRDMA_CQ_WQEIDX_M (0x7fffULL << IRDMA_CQ_WQEIDX_S)
-
+#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
#define IRDMA_CQ_EXTCQE_S 50
-#define IRDMA_CQ_EXTCQE_M BIT_ULL(IRDMA_CQ_EXTCQE_S)
-
+#define IRDMA_CQ_EXTCQE BIT_ULL(50)
#define IRDMA_OOO_CMPL_S 54
-#define IRDMA_OOO_CMPL_M BIT_ULL(IRDMA_OOO_CMPL_S)
-
+#define IRDMA_OOO_CMPL BIT_ULL(54)
#define IRDMA_CQ_ERROR_S 55
-#define IRDMA_CQ_ERROR_M BIT_ULL(IRDMA_CQ_ERROR_S)
-
+#define IRDMA_CQ_ERROR BIT_ULL(55)
#define IRDMA_CQ_SQ_S 62
-#define IRDMA_CQ_SQ_M BIT_ULL(IRDMA_CQ_SQ_S)
+#define IRDMA_CQ_SQ BIT_ULL(62)
#define IRDMA_CQ_VALID_S 63
-#define IRDMA_CQ_VALID_M BIT_ULL(IRDMA_CQ_VALID_S)
-
-#define IRDMA_CQ_IMMVALID_S 62
-#define IRDMA_CQ_IMMVALID_M BIT_ULL(IRDMA_CQ_IMMVALID_S)
-
+#define IRDMA_CQ_VALID BIT_ULL(63)
+#define IRDMA_CQ_IMMVALID BIT_ULL(62)
#define IRDMA_CQ_UDSMACVALID_S 61
-#define IRDMA_CQ_UDSMACVALID_M BIT_ULL(IRDMA_CQ_UDSMACVALID_S)
-
+#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
#define IRDMA_CQ_UDVLANVALID_S 60
-#define IRDMA_CQ_UDVLANVALID_M BIT_ULL(IRDMA_CQ_UDVLANVALID_S)
-
+#define IRDMA_CQ_UDVLANVALID BIT_ULL(60)
#define IRDMA_CQ_UDSMAC_S 0
-#define IRDMA_CQ_UDSMAC_M (0xffffffffffffULL << IRDMA_CQ_UDSMAC_S)
-
+#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
#define IRDMA_CQ_UDVLAN_S 48
-#define IRDMA_CQ_UDVLAN_M (0xffffULL << IRDMA_CQ_UDVLAN_S)
+#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
#define IRDMA_CQ_IMMDATA_S 0
-#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
-
+#define IRDMA_CQ_IMMVALID_S 62
+#define IRDMA_CQ_IMMDATA GENMASK_ULL(125, 62)
#define IRDMA_CQ_IMMDATALOW32_S 0
-#define IRDMA_CQ_IMMDATALOW32_M (0xffffffffULL << IRDMA_CQ_IMMDATALOW32_S)
-
+#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
#define IRDMA_CQ_IMMDATAUP32_S 32
-#define IRDMA_CQ_IMMDATAUP32_M (0xffffffffULL << IRDMA_CQ_IMMDATAUP32_S)
-
+#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
#define IRDMACQ_PAYLDLEN_S 0
-#define IRDMACQ_PAYLDLEN_M (0xffffffffULL << IRDMACQ_PAYLDLEN_S)
-
-#define IRDMACQ_TCPSEQNUMRTT_S 32
-#define IRDMACQ_TCPSEQNUMRTT_M (0xffffffffULL << IRDMACQ_TCPSEQNUMRTT_S)
-
+#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
+#define IRDMACQ_TCPSQN_ROCEPSN_RTT_TS_S 32
+#define IRDMACQ_TCPSQN_ROCEPSN_RTT_TS GENMASK_ULL(63, 32)
#define IRDMACQ_INVSTAG_S 0
-#define IRDMACQ_INVSTAG_M (0xffffffffULL << IRDMACQ_INVSTAG_S)
-
+#define IRDMACQ_INVSTAG GENMASK_ULL(31, 0)
#define IRDMACQ_QPID_S 32
-#define IRDMACQ_QPID_M (0xffffffULL << IRDMACQ_QPID_S)
+#define IRDMACQ_QPID GENMASK_ULL(55, 32)
#define IRDMACQ_UDSRCQPN_S 0
-#define IRDMACQ_UDSRCQPN_M (0xffffffffULL << IRDMACQ_UDSRCQPN_S)
-
+#define IRDMACQ_UDSRCQPN GENMASK_ULL(31, 0)
#define IRDMACQ_PSHDROP_S 51
-#define IRDMACQ_PSHDROP_M BIT_ULL(IRDMACQ_PSHDROP_S)
-
+#define IRDMACQ_PSHDROP BIT_ULL(51)
#define IRDMACQ_STAG_S 53
-#define IRDMACQ_STAG_M BIT_ULL(IRDMACQ_STAG_S)
-
+#define IRDMACQ_STAG BIT_ULL(53)
#define IRDMACQ_IPV4_S 53
-#define IRDMACQ_IPV4_M BIT_ULL(IRDMACQ_IPV4_S)
-
+#define IRDMACQ_IPV4 BIT_ULL(53)
#define IRDMACQ_SOEVENT_S 54
-#define IRDMACQ_SOEVENT_M BIT_ULL(IRDMACQ_SOEVENT_S)
-
+#define IRDMACQ_SOEVENT BIT_ULL(54)
#define IRDMACQ_OP_S 56
-#define IRDMACQ_OP_M (0x3fULL << IRDMACQ_OP_S)
+#define IRDMACQ_OP GENMASK_ULL(61, 56)
-/* CEQE format */
#define IRDMA_CEQE_CQCTX_S 0
-#define IRDMA_CEQE_CQCTX_M \
- (0x7fffffffffffffffULL << IRDMA_CEQE_CQCTX_S)
-
+#define IRDMA_CEQE_CQCTX GENMASK_ULL(62, 0)
#define IRDMA_CEQE_VALID_S 63
-#define IRDMA_CEQE_VALID_M BIT_ULL(IRDMA_CEQE_VALID_S)
+#define IRDMA_CEQE_VALID BIT_ULL(63)
/* AEQE format */
#define IRDMA_AEQE_COMPCTX_S IRDMA_CQPHC_QPCTX_S
-#define IRDMA_AEQE_COMPCTX_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMA_AEQE_COMPCTX IRDMA_CQPHC_QPCTX
#define IRDMA_AEQE_QPCQID_LOW_S 0
-#define IRDMA_AEQE_QPCQID_LOW_M (0x3ffffULL << IRDMA_AEQE_QPCQID_LOW_S)
-
+#define IRDMA_AEQE_QPCQID_LOW GENMASK_ULL(17, 0)
#define IRDMA_AEQE_QPCQID_HI_S 46
-#define IRDMA_AEQE_QPCQID_HI_M BIT_ULL(IRDMA_AEQE_QPCQID_HI_S)
-
+#define IRDMA_AEQE_QPCQID_HI BIT_ULL(46)
#define IRDMA_AEQE_WQDESCIDX_S 18
-#define IRDMA_AEQE_WQDESCIDX_M (0x7fffULL << IRDMA_AEQE_WQDESCIDX_S)
-
+#define IRDMA_AEQE_WQDESCIDX GENMASK_ULL(32, 18)
#define IRDMA_AEQE_OVERFLOW_S 33
-#define IRDMA_AEQE_OVERFLOW_M BIT_ULL(IRDMA_AEQE_OVERFLOW_S)
-
+#define IRDMA_AEQE_OVERFLOW BIT_ULL(33)
#define IRDMA_AEQE_AECODE_S 34
-#define IRDMA_AEQE_AECODE_M (0xfffULL << IRDMA_AEQE_AECODE_S)
-
+#define IRDMA_AEQE_AECODE GENMASK_ULL(45, 34)
#define IRDMA_AEQE_AESRC_S 50
-#define IRDMA_AEQE_AESRC_M (0xfULL << IRDMA_AEQE_AESRC_S)
-
+#define IRDMA_AEQE_AESRC GENMASK_ULL(53, 50)
#define IRDMA_AEQE_IWSTATE_S 54
-#define IRDMA_AEQE_IWSTATE_M (0x7ULL << IRDMA_AEQE_IWSTATE_S)
-
+#define IRDMA_AEQE_IWSTATE GENMASK_ULL(56, 54)
#define IRDMA_AEQE_TCPSTATE_S 57
-#define IRDMA_AEQE_TCPSTATE_M (0xfULL << IRDMA_AEQE_TCPSTATE_S)
-
+#define IRDMA_AEQE_TCPSTATE GENMASK_ULL(60, 57)
#define IRDMA_AEQE_Q2DATA_S 61
-#define IRDMA_AEQE_Q2DATA_M (0x3ULL << IRDMA_AEQE_Q2DATA_S)
-
+#define IRDMA_AEQE_Q2DATA GENMASK_ULL(62, 61)
#define IRDMA_AEQE_VALID_S 63
-#define IRDMA_AEQE_VALID_M BIT_ULL(IRDMA_AEQE_VALID_S)
+#define IRDMA_AEQE_VALID BIT_ULL(63)
#define IRDMA_UDA_QPSQ_NEXT_HDR_S 16
-#define IRDMA_UDA_QPSQ_NEXT_HDR_M ((u64)0xff << IRDMA_UDA_QPSQ_NEXT_HDR_S)
-
+#define IRDMA_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16)
#define IRDMA_UDA_QPSQ_OPCODE_S 32
-#define IRDMA_UDA_QPSQ_OPCODE_M ((u64)0x3f << IRDMA_UDA_QPSQ_OPCODE_S)
-
+#define IRDMA_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_QPSQ_L4LEN_S 42
-#define IRDMA_UDA_QPSQ_L4LEN_M ((u64)0xf << IRDMA_UDA_QPSQ_L4LEN_S)
-
+#define IRDMA_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42)
#define IRDMA_GEN1_UDA_QPSQ_L4LEN_S 24
-#define IRDMA_GEN1_UDA_QPSQ_L4LEN_M ((u64)0xf << IRDMA_GEN1_UDA_QPSQ_L4LEN_S)
-
+#define IRDMA_GEN1_UDA_QPSQ_L4LEN GENMASK_ULL(27, 24)
#define IRDMA_UDA_QPSQ_AHIDX_S 0
-#define IRDMA_UDA_QPSQ_AHIDX_M ((u64)0x1ffff << IRDMA_UDA_QPSQ_AHIDX_S)
-
+#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
#define IRDMA_UDA_QPSQ_VALID_S 63
-#define IRDMA_UDA_QPSQ_VALID_M \
- BIT_ULL(IRDMA_UDA_QPSQ_VALID_S)
-
+#define IRDMA_UDA_QPSQ_VALID BIT_ULL(63)
#define IRDMA_UDA_QPSQ_SIGCOMPL_S 62
-#define IRDMA_UDA_QPSQ_SIGCOMPL_M BIT_ULL(IRDMA_UDA_QPSQ_SIGCOMPL_S)
-
+#define IRDMA_UDA_QPSQ_SIGCOMPL BIT_ULL(62)
#define IRDMA_UDA_QPSQ_MACLEN_S 56
-#define IRDMA_UDA_QPSQ_MACLEN_M \
- ((u64)0x7f << IRDMA_UDA_QPSQ_MACLEN_S)
-
+#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
#define IRDMA_UDA_QPSQ_IPLEN_S 48
-#define IRDMA_UDA_QPSQ_IPLEN_M \
- ((u64)0x7f << IRDMA_UDA_QPSQ_IPLEN_S)
-
+#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
#define IRDMA_UDA_QPSQ_L4T_S 30
-#define IRDMA_UDA_QPSQ_L4T_M \
- ((u64)0x3 << IRDMA_UDA_QPSQ_L4T_S)
-
+#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
#define IRDMA_UDA_QPSQ_IIPT_S 28
-#define IRDMA_UDA_QPSQ_IIPT_M \
- ((u64)0x3 << IRDMA_UDA_QPSQ_IIPT_S)
-
+#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
#define IRDMA_UDA_PAYLOADLEN_S 0
-#define IRDMA_UDA_PAYLOADLEN_M ((u64)0x3fff << IRDMA_UDA_PAYLOADLEN_S)
-
+#define IRDMA_UDA_PAYLOADLEN GENMASK_ULL(13, 0)
#define IRDMA_UDA_HDRLEN_S 16
-#define IRDMA_UDA_HDRLEN_M ((u64)0x1ff << IRDMA_UDA_HDRLEN_S)
-
+#define IRDMA_UDA_HDRLEN GENMASK_ULL(24, 16)
#define IRDMA_VLAN_TAG_VALID_S 50
-#define IRDMA_VLAN_TAG_VALID_M BIT_ULL(IRDMA_VLAN_TAG_VALID_S)
-
+#define IRDMA_VLAN_TAG_VALID BIT_ULL(50)
#define IRDMA_UDA_L3PROTO_S 0
-#define IRDMA_UDA_L3PROTO_M ((u64)0x3 << IRDMA_UDA_L3PROTO_S)
-
+#define IRDMA_UDA_L3PROTO GENMASK_ULL(1, 0)
#define IRDMA_UDA_L4PROTO_S 16
-#define IRDMA_UDA_L4PROTO_M ((u64)0x3 << IRDMA_UDA_L4PROTO_S)
-
+#define IRDMA_UDA_L4PROTO GENMASK_ULL(17, 16)
#define IRDMA_UDA_QPSQ_DOLOOPBACK_S 44
-#define IRDMA_UDA_QPSQ_DOLOOPBACK_M \
- BIT_ULL(IRDMA_UDA_QPSQ_DOLOOPBACK_S)
-
-/* CQP SQ WQE common fields */
+#define IRDMA_UDA_QPSQ_DOLOOPBACK BIT_ULL(44)
#define IRDMA_CQPSQ_BUFSIZE_S 0
-#define IRDMA_CQPSQ_BUFSIZE_M (0xffffffffULL << IRDMA_CQPSQ_BUFSIZE_S)
-
+#define IRDMA_CQPSQ_BUFSIZE GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_OPCODE_S 32
-#define IRDMA_CQPSQ_OPCODE_M (0x3fULL << IRDMA_CQPSQ_OPCODE_S)
-
+#define IRDMA_CQPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_WQEVALID_S 63
-#define IRDMA_CQPSQ_WQEVALID_M BIT_ULL(IRDMA_CQPSQ_WQEVALID_S)
-
+#define IRDMA_CQPSQ_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_TPHVAL_S 0
-#define IRDMA_CQPSQ_TPHVAL_M (0xffULL << IRDMA_CQPSQ_TPHVAL_S)
+#define IRDMA_CQPSQ_TPHVAL GENMASK_ULL(7, 0)
#define IRDMA_CQPSQ_VSIIDX_S 8
-#define IRDMA_CQPSQ_VSIIDX_M (0x3ffULL << IRDMA_CQPSQ_VSIIDX_S)
-
+#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(17, 8)
#define IRDMA_CQPSQ_TPHEN_S 60
-#define IRDMA_CQPSQ_TPHEN_M BIT_ULL(IRDMA_CQPSQ_TPHEN_S)
+#define IRDMA_CQPSQ_TPHEN BIT_ULL(60)
#define IRDMA_CQPSQ_PBUFADDR_S IRDMA_CQPHC_QPCTX_S
-#define IRDMA_CQPSQ_PBUFADDR_M IRDMA_CQPHC_QPCTX_M
+#define IRDMA_CQPSQ_PBUFADDR IRDMA_CQPHC_QPCTX
/* Create/Modify/Destroy QP */
#define IRDMA_CQPSQ_QP_NEWMSS_S 32
-#define IRDMA_CQPSQ_QP_NEWMSS_M (0x3fffULL << IRDMA_CQPSQ_QP_NEWMSS_S)
-
+#define IRDMA_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32)
#define IRDMA_CQPSQ_QP_TERMLEN_S 48
-#define IRDMA_CQPSQ_QP_TERMLEN_M (0xfULL << IRDMA_CQPSQ_QP_TERMLEN_S)
+#define IRDMA_CQPSQ_QP_TERMLEN GENMASK_ULL(51, 48)
#define IRDMA_CQPSQ_QP_QPCTX_S IRDMA_CQPHC_QPCTX_S
-#define IRDMA_CQPSQ_QP_QPCTX_M IRDMA_CQPHC_QPCTX_M
+#define IRDMA_CQPSQ_QP_QPCTX IRDMA_CQPHC_QPCTX
#define IRDMA_CQPSQ_QP_QPID_S 0
#define IRDMA_CQPSQ_QP_QPID_M (0xFFFFFFUL)
#define IRDMA_CQPSQ_QP_OP_S 32
#define IRDMA_CQPSQ_QP_OP_M IRDMACQ_OP_M
-
#define IRDMA_CQPSQ_QP_ORDVALID_S 42
-#define IRDMA_CQPSQ_QP_ORDVALID_M BIT_ULL(IRDMA_CQPSQ_QP_ORDVALID_S)
-
+#define IRDMA_CQPSQ_QP_ORDVALID BIT_ULL(42)
#define IRDMA_CQPSQ_QP_TOECTXVALID_S 43
-#define IRDMA_CQPSQ_QP_TOECTXVALID_M \
- BIT_ULL(IRDMA_CQPSQ_QP_TOECTXVALID_S)
-
+#define IRDMA_CQPSQ_QP_TOECTXVALID BIT_ULL(43)
#define IRDMA_CQPSQ_QP_CACHEDVARVALID_S 44
-#define IRDMA_CQPSQ_QP_CACHEDVARVALID_M \
- BIT_ULL(IRDMA_CQPSQ_QP_CACHEDVARVALID_S)
-
+#define IRDMA_CQPSQ_QP_CACHEDVARVALID BIT_ULL(44)
#define IRDMA_CQPSQ_QP_VQ_S 45
-#define IRDMA_CQPSQ_QP_VQ_M BIT_ULL(IRDMA_CQPSQ_QP_VQ_S)
-
+#define IRDMA_CQPSQ_QP_VQ BIT_ULL(45)
#define IRDMA_CQPSQ_QP_FORCELOOPBACK_S 46
-#define IRDMA_CQPSQ_QP_FORCELOOPBACK_M \
- BIT_ULL(IRDMA_CQPSQ_QP_FORCELOOPBACK_S)
-
+#define IRDMA_CQPSQ_QP_FORCELOOPBACK BIT_ULL(46)
#define IRDMA_CQPSQ_QP_CQNUMVALID_S 47
-#define IRDMA_CQPSQ_QP_CQNUMVALID_M \
- BIT_ULL(IRDMA_CQPSQ_QP_CQNUMVALID_S)
-
+#define IRDMA_CQPSQ_QP_CQNUMVALID BIT_ULL(47)
#define IRDMA_CQPSQ_QP_QPTYPE_S 48
-#define IRDMA_CQPSQ_QP_QPTYPE_M (0x7ULL << IRDMA_CQPSQ_QP_QPTYPE_S)
-
+#define IRDMA_CQPSQ_QP_QPTYPE GENMASK_ULL(50, 48)
#define IRDMA_CQPSQ_QP_MACVALID_S 51
-#define IRDMA_CQPSQ_QP_MACVALID_M BIT_ULL(IRDMA_CQPSQ_QP_MACVALID_S)
-
+#define IRDMA_CQPSQ_QP_MACVALID BIT_ULL(51)
#define IRDMA_CQPSQ_QP_MSSCHANGE_S 52
-#define IRDMA_CQPSQ_QP_MSSCHANGE_M BIT_ULL(IRDMA_CQPSQ_QP_MSSCHANGE_S)
+#define IRDMA_CQPSQ_QP_MSSCHANGE BIT_ULL(52)
#define IRDMA_CQPSQ_QP_IGNOREMWBOUND_S 54
-#define IRDMA_CQPSQ_QP_IGNOREMWBOUND_M \
- BIT_ULL(IRDMA_CQPSQ_QP_IGNOREMWBOUND_S)
-
+#define IRDMA_CQPSQ_QP_IGNOREMWBOUND BIT_ULL(54)
#define IRDMA_CQPSQ_QP_REMOVEHASHENTRY_S 55
-#define IRDMA_CQPSQ_QP_REMOVEHASHENTRY_M \
- BIT_ULL(IRDMA_CQPSQ_QP_REMOVEHASHENTRY_S)
-
+#define IRDMA_CQPSQ_QP_REMOVEHASHENTRY BIT_ULL(55)
#define IRDMA_CQPSQ_QP_TERMACT_S 56
-#define IRDMA_CQPSQ_QP_TERMACT_M (0x3ULL << IRDMA_CQPSQ_QP_TERMACT_S)
-
+#define IRDMA_CQPSQ_QP_TERMACT GENMASK_ULL(57, 56)
#define IRDMA_CQPSQ_QP_RESETCON_S 58
-#define IRDMA_CQPSQ_QP_RESETCON_M BIT_ULL(IRDMA_CQPSQ_QP_RESETCON_S)
-
+#define IRDMA_CQPSQ_QP_RESETCON BIT_ULL(58)
#define IRDMA_CQPSQ_QP_ARPTABIDXVALID_S 59
-#define IRDMA_CQPSQ_QP_ARPTABIDXVALID_M \
- BIT_ULL(IRDMA_CQPSQ_QP_ARPTABIDXVALID_S)
-
+#define IRDMA_CQPSQ_QP_ARPTABIDXVALID BIT_ULL(59)
#define IRDMA_CQPSQ_QP_NEXTIWSTATE_S 60
-#define IRDMA_CQPSQ_QP_NEXTIWSTATE_M \
- (0x7ULL << IRDMA_CQPSQ_QP_NEXTIWSTATE_S)
+#define IRDMA_CQPSQ_QP_NEXTIWSTATE GENMASK_ULL(62, 60)
#define IRDMA_CQPSQ_QP_DBSHADOWADDR_S IRDMA_CQPHC_QPCTX_S
-#define IRDMA_CQPSQ_QP_DBSHADOWADDR_M IRDMA_CQPHC_QPCTX_M
+#define IRDMA_CQPSQ_QP_DBSHADOWADDR IRDMA_CQPHC_QPCTX
-/* Create/Modify/Destroy CQ */
#define IRDMA_CQPSQ_CQ_CQSIZE_S 0
-#define IRDMA_CQPSQ_CQ_CQSIZE_M (0x1fffffULL << IRDMA_CQPSQ_CQ_CQSIZE_S)
-
+#define IRDMA_CQPSQ_CQ_CQSIZE GENMASK_ULL(20, 0)
#define IRDMA_CQPSQ_CQ_CQCTX_S 0
-#define IRDMA_CQPSQ_CQ_CQCTX_M \
- (0x7fffffffffffffffULL << IRDMA_CQPSQ_CQ_CQCTX_S)
-
+#define IRDMA_CQPSQ_CQ_CQCTX GENMASK_ULL(62, 0)
#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD_S 0
-#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD_M \
- (0x3ffff << IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD_S)
+#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(17, 0)
#define IRDMA_CQPSQ_CQ_OP_S 32
-#define IRDMA_CQPSQ_CQ_OP_M (0x3fULL << IRDMA_CQPSQ_CQ_OP_S)
-
+#define IRDMA_CQPSQ_CQ_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_CQ_CQRESIZE_S 43
-#define IRDMA_CQPSQ_CQ_CQRESIZE_M BIT_ULL(IRDMA_CQPSQ_CQ_CQRESIZE_S)
-
+#define IRDMA_CQPSQ_CQ_CQRESIZE BIT_ULL(43)
#define IRDMA_CQPSQ_CQ_LPBLSIZE_S 44
-#define IRDMA_CQPSQ_CQ_LPBLSIZE_M (3ULL << IRDMA_CQPSQ_CQ_LPBLSIZE_S)
-
+#define IRDMA_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(45, 44)
#define IRDMA_CQPSQ_CQ_CHKOVERFLOW_S 46
-#define IRDMA_CQPSQ_CQ_CHKOVERFLOW_M \
- BIT_ULL(IRDMA_CQPSQ_CQ_CHKOVERFLOW_S)
-
+#define IRDMA_CQPSQ_CQ_CHKOVERFLOW BIT_ULL(46)
#define IRDMA_CQPSQ_CQ_VIRTMAP_S 47
-#define IRDMA_CQPSQ_CQ_VIRTMAP_M BIT_ULL(IRDMA_CQPSQ_CQ_VIRTMAP_S)
-
+#define IRDMA_CQPSQ_CQ_VIRTMAP BIT_ULL(47)
#define IRDMA_CQPSQ_CQ_ENCEQEMASK_S 48
-#define IRDMA_CQPSQ_CQ_ENCEQEMASK_M \
- BIT_ULL(IRDMA_CQPSQ_CQ_ENCEQEMASK_S)
-
+#define IRDMA_CQPSQ_CQ_ENCEQEMASK BIT_ULL(48)
#define IRDMA_CQPSQ_CQ_CEQIDVALID_S 49
-#define IRDMA_CQPSQ_CQ_CEQIDVALID_M \
- BIT_ULL(IRDMA_CQPSQ_CQ_CEQIDVALID_S)
-
+#define IRDMA_CQPSQ_CQ_CEQIDVALID BIT_ULL(49)
#define IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT_S 61
-#define IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT_M \
- BIT_ULL(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT_S)
-
+#define IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT BIT_ULL(61)
#define IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX_S 0
-#define IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX_M \
- (0xfffffffULL << IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX_S)
+#define IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
/* Allocate/Register/Register Shared/Deallocate Stag */
#define IRDMA_CQPSQ_STAG_VA_FBO_S IRDMA_CQPHC_QPCTX_S
-#define IRDMA_CQPSQ_STAG_VA_FBO_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMA_CQPSQ_STAG_VA_FBO IRDMA_CQPHC_QPCTX
#define IRDMA_CQPSQ_STAG_STAGLEN_S 0
-#define IRDMA_CQPSQ_STAG_STAGLEN_M \
- (0x3fffffffffffULL << IRDMA_CQPSQ_STAG_STAGLEN_S)
-
+#define IRDMA_CQPSQ_STAG_STAGLEN GENMASK_ULL(45, 0)
#define IRDMA_CQPSQ_STAG_KEY_S 0
-#define IRDMA_CQPSQ_STAG_KEY_M (0xffULL << IRDMA_CQPSQ_STAG_KEY_S)
-
+#define IRDMA_CQPSQ_STAG_KEY GENMASK_ULL(7, 0)
#define IRDMA_CQPSQ_STAG_IDX_S 8
-#define IRDMA_CQPSQ_STAG_IDX_M (0xffffffULL << IRDMA_CQPSQ_STAG_IDX_S)
-
+#define IRDMA_CQPSQ_STAG_IDX GENMASK_ULL(31, 8)
#define IRDMA_CQPSQ_STAG_PARENTSTAGIDX_S 32
-#define IRDMA_CQPSQ_STAG_PARENTSTAGIDX_M \
- (0xffffffULL << IRDMA_CQPSQ_STAG_PARENTSTAGIDX_S)
-
+#define IRDMA_CQPSQ_STAG_PARENTSTAGIDX GENMASK_ULL(55, 32)
#define IRDMA_CQPSQ_STAG_MR_S 43
-#define IRDMA_CQPSQ_STAG_MR_M BIT_ULL(IRDMA_CQPSQ_STAG_MR_S)
-
+#define IRDMA_CQPSQ_STAG_MR BIT_ULL(43)
#define IRDMA_CQPSQ_STAG_MWTYPE_S 42
-#define IRDMA_CQPSQ_STAG_MWTYPE_M BIT_ULL(IRDMA_CQPSQ_STAG_MWTYPE_S)
-
+#define IRDMA_CQPSQ_STAG_MWTYPE BIT_ULL(42)
#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY_S 58
-#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY_M \
- BIT_ULL(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY_S)
+#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(58)
#define IRDMA_CQPSQ_STAG_LPBLSIZE_S IRDMA_CQPSQ_CQ_LPBLSIZE_S
#define IRDMA_CQPSQ_STAG_LPBLSIZE_M IRDMA_CQPSQ_CQ_LPBLSIZE_M
-
+#define IRDMA_CQPSQ_STAG_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
#define IRDMA_CQPSQ_STAG_HPAGESIZE_S 46
-#define IRDMA_CQPSQ_STAG_HPAGESIZE_M \
- ((u64)3 << IRDMA_CQPSQ_STAG_HPAGESIZE_S)
-
+#define IRDMA_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(47, 46)
#define IRDMA_CQPSQ_STAG_ARIGHTS_S 48
-#define IRDMA_CQPSQ_STAG_ARIGHTS_M \
- (0x1fULL << IRDMA_CQPSQ_STAG_ARIGHTS_S)
-
+#define IRDMA_CQPSQ_STAG_ARIGHTS GENMASK_ULL(52, 48)
#define IRDMA_CQPSQ_STAG_REMACCENABLED_S 53
-#define IRDMA_CQPSQ_STAG_REMACCENABLED_M \
- BIT_ULL(IRDMA_CQPSQ_STAG_REMACCENABLED_S)
-
+#define IRDMA_CQPSQ_STAG_REMACCENABLED BIT_ULL(53)
#define IRDMA_CQPSQ_STAG_VABASEDTO_S 59
-#define IRDMA_CQPSQ_STAG_VABASEDTO_M \
- BIT_ULL(IRDMA_CQPSQ_STAG_VABASEDTO_S)
-
+#define IRDMA_CQPSQ_STAG_VABASEDTO BIT_ULL(59)
#define IRDMA_CQPSQ_STAG_USEHMCFNIDX_S 60
-#define IRDMA_CQPSQ_STAG_USEHMCFNIDX_M \
- BIT_ULL(IRDMA_CQPSQ_STAG_USEHMCFNIDX_S)
-
+#define IRDMA_CQPSQ_STAG_USEHMCFNIDX BIT_ULL(60)
#define IRDMA_CQPSQ_STAG_USEPFRID_S 61
-#define IRDMA_CQPSQ_STAG_USEPFRID_M \
- BIT_ULL(IRDMA_CQPSQ_STAG_USEPFRID_S)
+#define IRDMA_CQPSQ_STAG_USEPFRID BIT_ULL(61)
#define IRDMA_CQPSQ_STAG_PBA_S IRDMA_CQPHC_QPCTX_S
-#define IRDMA_CQPSQ_STAG_PBA_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMA_CQPSQ_STAG_PBA IRDMA_CQPHC_QPCTX
#define IRDMA_CQPSQ_STAG_HMCFNIDX_S 0
-#define IRDMA_CQPSQ_STAG_HMCFNIDX_M \
- (0x3fULL << IRDMA_CQPSQ_STAG_HMCFNIDX_S)
+#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(5, 0)
#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX_S 0
-#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX_M \
- (0xfffffffULL << IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX_S)
+#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
#define IRDMA_CQPSQ_QUERYSTAG_IDX_S IRDMA_CQPSQ_STAG_IDX_S
-#define IRDMA_CQPSQ_QUERYSTAG_IDX_M IRDMA_CQPSQ_STAG_IDX_M
-
-/* Manage Local MAC Table - MLM */
+#define IRDMA_CQPSQ_QUERYSTAG_IDX IRDMA_CQPSQ_STAG_IDX
#define IRDMA_CQPSQ_MLM_TABLEIDX_S 0
-#define IRDMA_CQPSQ_MLM_TABLEIDX_M \
- (0x3fULL << IRDMA_CQPSQ_MLM_TABLEIDX_S)
-
+#define IRDMA_CQPSQ_MLM_TABLEIDX GENMASK_ULL(5, 0)
#define IRDMA_CQPSQ_MLM_FREEENTRY_S 62
-#define IRDMA_CQPSQ_MLM_FREEENTRY_M \
- BIT_ULL(IRDMA_CQPSQ_MLM_FREEENTRY_S)
-
+#define IRDMA_CQPSQ_MLM_FREEENTRY BIT_ULL(62)
#define IRDMA_CQPSQ_MLM_IGNORE_REF_CNT_S 61
-#define IRDMA_CQPSQ_MLM_IGNORE_REF_CNT_M \
- BIT_ULL(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT_S)
-
+#define IRDMA_CQPSQ_MLM_IGNORE_REF_CNT BIT_ULL(61)
#define IRDMA_CQPSQ_MLM_MAC0_S 0
-#define IRDMA_CQPSQ_MLM_MAC0_M (0xffULL << IRDMA_CQPSQ_MLM_MAC0_S)
-
+#define IRDMA_CQPSQ_MLM_MAC0 GENMASK_ULL(7, 0)
#define IRDMA_CQPSQ_MLM_MAC1_S 8
-#define IRDMA_CQPSQ_MLM_MAC1_M (0xffULL << IRDMA_CQPSQ_MLM_MAC1_S)
-
+#define IRDMA_CQPSQ_MLM_MAC1 GENMASK_ULL(15, 8)
#define IRDMA_CQPSQ_MLM_MAC2_S 16
-#define IRDMA_CQPSQ_MLM_MAC2_M (0xffULL << IRDMA_CQPSQ_MLM_MAC2_S)
-
+#define IRDMA_CQPSQ_MLM_MAC2 GENMASK_ULL(23, 16)
#define IRDMA_CQPSQ_MLM_MAC3_S 24
-#define IRDMA_CQPSQ_MLM_MAC3_M (0xffULL << IRDMA_CQPSQ_MLM_MAC3_S)
-
+#define IRDMA_CQPSQ_MLM_MAC3 GENMASK_ULL(31, 24)
#define IRDMA_CQPSQ_MLM_MAC4_S 32
-#define IRDMA_CQPSQ_MLM_MAC4_M (0xffULL << IRDMA_CQPSQ_MLM_MAC4_S)
-
+#define IRDMA_CQPSQ_MLM_MAC4 GENMASK_ULL(39, 32)
#define IRDMA_CQPSQ_MLM_MAC5_S 40
-#define IRDMA_CQPSQ_MLM_MAC5_M (0xffULL << IRDMA_CQPSQ_MLM_MAC5_S)
-
-/* Manage ARP Table - MAT */
+#define IRDMA_CQPSQ_MLM_MAC5 GENMASK_ULL(47, 40)
#define IRDMA_CQPSQ_MAT_REACHMAX_S 0
-#define IRDMA_CQPSQ_MAT_REACHMAX_M \
- (0xffffffffULL << IRDMA_CQPSQ_MAT_REACHMAX_S)
-
+#define IRDMA_CQPSQ_MAT_REACHMAX GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_MAT_MACADDR_S 0
-#define IRDMA_CQPSQ_MAT_MACADDR_M \
- (0xffffffffffffULL << IRDMA_CQPSQ_MAT_MACADDR_S)
-
+#define IRDMA_CQPSQ_MAT_MACADDR GENMASK_ULL(47, 0)
#define IRDMA_CQPSQ_MAT_ARPENTRYIDX_S 0
-#define IRDMA_CQPSQ_MAT_ARPENTRYIDX_M \
- (0xfffULL << IRDMA_CQPSQ_MAT_ARPENTRYIDX_S)
-
+#define IRDMA_CQPSQ_MAT_ARPENTRYIDX GENMASK_ULL(11, 0)
#define IRDMA_CQPSQ_MAT_ENTRYVALID_S 42
-#define IRDMA_CQPSQ_MAT_ENTRYVALID_M \
- BIT_ULL(IRDMA_CQPSQ_MAT_ENTRYVALID_S)
-
+#define IRDMA_CQPSQ_MAT_ENTRYVALID BIT_ULL(42)
#define IRDMA_CQPSQ_MAT_PERMANENT_S 43
-#define IRDMA_CQPSQ_MAT_PERMANENT_M \
- BIT_ULL(IRDMA_CQPSQ_MAT_PERMANENT_S)
-
+#define IRDMA_CQPSQ_MAT_PERMANENT BIT_ULL(43)
#define IRDMA_CQPSQ_MAT_QUERY_S 44
-#define IRDMA_CQPSQ_MAT_QUERY_M BIT_ULL(IRDMA_CQPSQ_MAT_QUERY_S)
-
-/* Manage VF PBLE Backing Pages - MVPBP*/
+#define IRDMA_CQPSQ_MAT_QUERY BIT_ULL(44)
#define IRDMA_CQPSQ_MVPBP_PD_ENTRY_CNT_S 0
-#define IRDMA_CQPSQ_MVPBP_PD_ENTRY_CNT_M \
- (0x3ffULL << IRDMA_CQPSQ_MVPBP_PD_ENTRY_CNT_S)
-
+#define IRDMA_CQPSQ_MVPBP_PD_ENTRY_CNT GENMASK_ULL(9, 0)
#define IRDMA_CQPSQ_MVPBP_FIRST_PD_INX_S 16
-#define IRDMA_CQPSQ_MVPBP_FIRST_PD_INX_M \
- (0x1ffULL << IRDMA_CQPSQ_MVPBP_FIRST_PD_INX_S)
-
+#define IRDMA_CQPSQ_MVPBP_FIRST_PD_INX GENMASK_ULL(24, 16)
#define IRDMA_CQPSQ_MVPBP_SD_INX_S 32
-#define IRDMA_CQPSQ_MVPBP_SD_INX_M \
- (0xfffULL << IRDMA_CQPSQ_MVPBP_SD_INX_S)
-
+#define IRDMA_CQPSQ_MVPBP_SD_INX GENMASK_ULL(43, 32)
#define IRDMA_CQPSQ_MVPBP_INV_PD_ENT_S 62
-#define IRDMA_CQPSQ_MVPBP_INV_PD_ENT_M \
- BIT_ULL(IRDMA_CQPSQ_MVPBP_INV_PD_ENT_S)
-
+#define IRDMA_CQPSQ_MVPBP_INV_PD_ENT BIT_ULL(62)
#define IRDMA_CQPSQ_MVPBP_PD_PLPBA_S 3
-#define IRDMA_CQPSQ_MVPBP_PD_PLPBA_M \
- (0x1fffffffffffffffULL << IRDMA_CQPSQ_MVPBP_PD_PLPBA_S)
+#define IRDMA_CQPSQ_MVPBP_PD_PLPBA GENMASK_ULL(63, 3)
/* Manage Push Page - MPP */
#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
#define IRDMA_CQPSQ_MPP_QS_HANDLE_S 0
-#define IRDMA_CQPSQ_MPP_QS_HANDLE_M \
- (0x3ffULL << IRDMA_CQPSQ_MPP_QS_HANDLE_S)
-
+#define IRDMA_CQPSQ_MPP_QS_HANDLE GENMASK_ULL(9, 0)
#define IRDMA_CQPSQ_MPP_PPIDX_S 0
-#define IRDMA_CQPSQ_MPP_PPIDX_M (0x3ffULL << IRDMA_CQPSQ_MPP_PPIDX_S)
-
+#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(9, 0)
#define IRDMA_CQPSQ_MPP_PPTYPE_S 60
-#define IRDMA_CQPSQ_MPP_PPTYPE_M (0x3ULL << IRDMA_CQPSQ_MPP_PPTYPE_S)
+#define IRDMA_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60)
#define IRDMA_CQPSQ_MPP_FREE_PAGE_S 62
-#define IRDMA_CQPSQ_MPP_FREE_PAGE_M BIT_ULL(IRDMA_CQPSQ_MPP_FREE_PAGE_S)
+#define IRDMA_CQPSQ_MPP_FREE_PAGE BIT_ULL(62)
/* Upload Context - UCTX */
#define IRDMA_CQPSQ_UCTX_QPCTXADDR_S IRDMA_CQPHC_QPCTX_S
-#define IRDMA_CQPSQ_UCTX_QPCTXADDR_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMA_CQPSQ_UCTX_QPCTXADDR IRDMA_CQPHC_QPCTX
#define IRDMA_CQPSQ_UCTX_QPID_S 0
-#define IRDMA_CQPSQ_UCTX_QPID_M (0xffffffULL << IRDMA_CQPSQ_UCTX_QPID_S)
-
+#define IRDMA_CQPSQ_UCTX_QPID GENMASK_ULL(23, 0)
#define IRDMA_CQPSQ_UCTX_QPTYPE_S 48
-#define IRDMA_CQPSQ_UCTX_QPTYPE_M (0xfULL << IRDMA_CQPSQ_UCTX_QPTYPE_S)
+#define IRDMA_CQPSQ_UCTX_QPTYPE GENMASK_ULL(51, 48)
#define IRDMA_CQPSQ_UCTX_RAWFORMAT_S 61
-#define IRDMA_CQPSQ_UCTX_RAWFORMAT_M \
- BIT_ULL(IRDMA_CQPSQ_UCTX_RAWFORMAT_S)
-
+#define IRDMA_CQPSQ_UCTX_RAWFORMAT BIT_ULL(61)
#define IRDMA_CQPSQ_UCTX_FREEZEQP_S 62
-#define IRDMA_CQPSQ_UCTX_FREEZEQP_M \
- BIT_ULL(IRDMA_CQPSQ_UCTX_FREEZEQP_S)
+#define IRDMA_CQPSQ_UCTX_FREEZEQP BIT_ULL(62)
-/* Manage HMC PM Function Table - MHMC */
#define IRDMA_CQPSQ_MHMC_VFIDX_S 0
-#define IRDMA_CQPSQ_MHMC_VFIDX_M (0xffffULL << IRDMA_CQPSQ_MHMC_VFIDX_S)
-
+#define IRDMA_CQPSQ_MHMC_VFIDX GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_MHMC_FREEPMFN_S 62
-#define IRDMA_CQPSQ_MHMC_FREEPMFN_M \
- BIT_ULL(IRDMA_CQPSQ_MHMC_FREEPMFN_S)
+#define IRDMA_CQPSQ_MHMC_FREEPMFN BIT_ULL(62)
-/* Set HMC Resource Profile - SHMCRP */
#define IRDMA_CQPSQ_SHMCRP_HMC_PROFILE_S 0
-#define IRDMA_CQPSQ_SHMCRP_HMC_PROFILE_M \
- (0x7ULL << IRDMA_CQPSQ_SHMCRP_HMC_PROFILE_S)
+#define IRDMA_CQPSQ_SHMCRP_HMC_PROFILE GENMASK_ULL(2, 0)
#define IRDMA_CQPSQ_SHMCRP_VFNUM_S 32
-#define IRDMA_CQPSQ_SHMCRP_VFNUM_M (0x3fULL << IRDMA_CQPSQ_SHMCRP_VFNUM_S)
-
-/* Create/Destroy CEQ */
+#define IRDMA_CQPSQ_SHMCRP_VFNUM GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_CEQ_CEQSIZE_S 0
-#define IRDMA_CQPSQ_CEQ_CEQSIZE_M \
- (0x3fffffULL << IRDMA_CQPSQ_CEQ_CEQSIZE_S)
-
+#define IRDMA_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0)
#define IRDMA_CQPSQ_CEQ_CEQID_S 0
-#define IRDMA_CQPSQ_CEQ_CEQID_M (0x3ffULL << IRDMA_CQPSQ_CEQ_CEQID_S)
+#define IRDMA_CQPSQ_CEQ_CEQID GENMASK_ULL(9, 0)
#define IRDMA_CQPSQ_CEQ_LPBLSIZE_S IRDMA_CQPSQ_CQ_LPBLSIZE_S
#define IRDMA_CQPSQ_CEQ_LPBLSIZE_M IRDMA_CQPSQ_CQ_LPBLSIZE_M
-
+#define IRDMA_CQPSQ_CEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
#define IRDMA_CQPSQ_CEQ_VMAP_S 47
-#define IRDMA_CQPSQ_CEQ_VMAP_M BIT_ULL(IRDMA_CQPSQ_CEQ_VMAP_S)
-
+#define IRDMA_CQPSQ_CEQ_VMAP BIT_ULL(47)
#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE_S 46
-#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE_M BIT_ULL(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE_S)
-
+#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46)
#define IRDMA_CQPSQ_CEQ_FIRSTPMPBLIDX_S 0
-#define IRDMA_CQPSQ_CEQ_FIRSTPMPBLIDX_M \
- (0xfffffffULL << IRDMA_CQPSQ_CEQ_FIRSTPMPBLIDX_S)
-
-/* Create/Destroy AEQ */
+#define IRDMA_CQPSQ_CEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
#define IRDMA_CQPSQ_AEQ_AEQECNT_S 0
-#define IRDMA_CQPSQ_AEQ_AEQECNT_M \
- (0x7ffffULL << IRDMA_CQPSQ_AEQ_AEQECNT_S)
+#define IRDMA_CQPSQ_AEQ_AEQECNT GENMASK_ULL(18, 0)
#define IRDMA_CQPSQ_AEQ_LPBLSIZE_S IRDMA_CQPSQ_CQ_LPBLSIZE_S
#define IRDMA_CQPSQ_AEQ_LPBLSIZE_M IRDMA_CQPSQ_CQ_LPBLSIZE_M
-
+#define IRDMA_CQPSQ_AEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
#define IRDMA_CQPSQ_AEQ_VMAP_S 47
-#define IRDMA_CQPSQ_AEQ_VMAP_M BIT_ULL(IRDMA_CQPSQ_AEQ_VMAP_S)
-
+#define IRDMA_CQPSQ_AEQ_VMAP BIT_ULL(47)
#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX_S 0
-#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX_M \
- (0xfffffffULL << IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX_S)
+#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
-/* Commit FPM Values - CFPM */
#define IRDMA_COMMIT_FPM_QPCNT_S 0
-#define IRDMA_COMMIT_FPM_QPCNT_M (0x7ffffULL << IRDMA_COMMIT_FPM_QPCNT_S)
+#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0)
#define IRDMA_COMMIT_FPM_BASE_S 32
-
#define IRDMA_CQPSQ_CFPM_HMCFNID_S 0
-#define IRDMA_CQPSQ_CFPM_HMCFNID_M (0x3fULL << IRDMA_CQPSQ_CFPM_HMCFNID_S)
+#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0)
-/* Flush WQEs - FWQE */
#define IRDMA_CQPSQ_FWQE_AECODE_S 0
-#define IRDMA_CQPSQ_FWQE_AECODE_M (0xffffULL << IRDMA_CQPSQ_FWQE_AECODE_S)
-
+#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_FWQE_AESOURCE_S 16
-#define IRDMA_CQPSQ_FWQE_AESOURCE_M \
- (0xfULL << IRDMA_CQPSQ_FWQE_AESOURCE_S)
-
+#define IRDMA_CQPSQ_FWQE_AESOURCE GENMASK_ULL(19, 16)
#define IRDMA_CQPSQ_FWQE_RQMNERR_S 0
-#define IRDMA_CQPSQ_FWQE_RQMNERR_M \
- (0xffffULL << IRDMA_CQPSQ_FWQE_RQMNERR_S)
-
+#define IRDMA_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_FWQE_RQMJERR_S 16
-#define IRDMA_CQPSQ_FWQE_RQMJERR_M \
- (0xffffULL << IRDMA_CQPSQ_FWQE_RQMJERR_S)
-
+#define IRDMA_CQPSQ_FWQE_RQMJERR GENMASK_ULL(31, 16)
#define IRDMA_CQPSQ_FWQE_SQMNERR_S 32
-#define IRDMA_CQPSQ_FWQE_SQMNERR_M \
- (0xffffULL << IRDMA_CQPSQ_FWQE_SQMNERR_S)
-
+#define IRDMA_CQPSQ_FWQE_SQMNERR GENMASK_ULL(47, 32)
#define IRDMA_CQPSQ_FWQE_SQMJERR_S 48
-#define IRDMA_CQPSQ_FWQE_SQMJERR_M \
- (0xffffULL << IRDMA_CQPSQ_FWQE_SQMJERR_S)
-
+#define IRDMA_CQPSQ_FWQE_SQMJERR GENMASK_ULL(63, 48)
#define IRDMA_CQPSQ_FWQE_QPID_S 0
-#define IRDMA_CQPSQ_FWQE_QPID_M (0xffffffULL << IRDMA_CQPSQ_FWQE_QPID_S)
-
+#define IRDMA_CQPSQ_FWQE_QPID GENMASK_ULL(23, 0)
#define IRDMA_CQPSQ_FWQE_GENERATE_AE_S 59
-#define IRDMA_CQPSQ_FWQE_GENERATE_AE_M \
- BIT_ULL(IRDMA_CQPSQ_FWQE_GENERATE_AE_S)
-
+#define IRDMA_CQPSQ_FWQE_GENERATE_AE BIT_ULL(59)
#define IRDMA_CQPSQ_FWQE_USERFLCODE_S 60
-#define IRDMA_CQPSQ_FWQE_USERFLCODE_M \
- BIT_ULL(IRDMA_CQPSQ_FWQE_USERFLCODE_S)
-
+#define IRDMA_CQPSQ_FWQE_USERFLCODE BIT_ULL(60)
#define IRDMA_CQPSQ_FWQE_FLUSHSQ_S 61
-#define IRDMA_CQPSQ_FWQE_FLUSHSQ_M BIT_ULL(IRDMA_CQPSQ_FWQE_FLUSHSQ_S)
-
+#define IRDMA_CQPSQ_FWQE_FLUSHSQ BIT_ULL(61)
#define IRDMA_CQPSQ_FWQE_FLUSHRQ_S 62
-#define IRDMA_CQPSQ_FWQE_FLUSHRQ_M BIT_ULL(IRDMA_CQPSQ_FWQE_FLUSHRQ_S)
-
-/* Manage Accelerated Port Table - MAPT */
+#define IRDMA_CQPSQ_FWQE_FLUSHRQ BIT_ULL(62)
#define IRDMA_CQPSQ_MAPT_PORT_S 0
-#define IRDMA_CQPSQ_MAPT_PORT_M (0xffffULL << IRDMA_CQPSQ_MAPT_PORT_S)
-
+#define IRDMA_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_MAPT_ADDPORT_S 62
-#define IRDMA_CQPSQ_MAPT_ADDPORT_M BIT_ULL(IRDMA_CQPSQ_MAPT_ADDPORT_S)
-
-/* Update Protocol Engine SDs */
+#define IRDMA_CQPSQ_MAPT_ADDPORT BIT_ULL(62)
#define IRDMA_CQPSQ_UPESD_SDCMD_S 0
-#define IRDMA_CQPSQ_UPESD_SDCMD_M (0xffffffffULL << IRDMA_CQPSQ_UPESD_SDCMD_S)
-
+#define IRDMA_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_UPESD_SDDATALOW_S 0
-#define IRDMA_CQPSQ_UPESD_SDDATALOW_M \
- (0xffffffffULL << IRDMA_CQPSQ_UPESD_SDDATALOW_S)
-
+#define IRDMA_CQPSQ_UPESD_SDDATALOW GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_UPESD_SDDATAHI_S 32
-#define IRDMA_CQPSQ_UPESD_SDDATAHI_M \
- (0xffffffffULL << IRDMA_CQPSQ_UPESD_SDDATAHI_S)
-#define IRDMA_CQPSQ_UPESD_HMCFNID_S 0
-#define IRDMA_CQPSQ_UPESD_HMCFNID_M \
- (0x3fULL << IRDMA_CQPSQ_UPESD_HMCFNID_S)
-
+#define IRDMA_CQPSQ_UPESD_SDDATAHI GENMASK_ULL(63, 32)
#define IRDMA_CQPSQ_UPESD_ENTRY_VALID_S 63
-#define IRDMA_CQPSQ_UPESD_ENTRY_VALID_M \
- BIT_ULL(IRDMA_CQPSQ_UPESD_ENTRY_VALID_S)
+#define IRDMA_CQPSQ_UPESD_ENTRY_VALID BIT_ULL(63)
#define IRDMA_CQPSQ_UPESD_BM_PF 0
#define IRDMA_CQPSQ_UPESD_BM_CP_LM 1
#define IRDMA_CQPSQ_UPESD_BM_AXF 2
#define IRDMA_CQPSQ_UPESD_BM_LM 4
-
#define IRDMA_CQPSQ_UPESD_BM_S 32
-#define IRDMA_CQPSQ_UPESD_BM_M \
- (0x7ULL << IRDMA_CQPSQ_UPESD_BM_S)
-
+#define IRDMA_CQPSQ_UPESD_BM GENMASK_ULL(34, 32)
#define IRDMA_CQPSQ_UPESD_ENTRY_COUNT_S 0
-#define IRDMA_CQPSQ_UPESD_ENTRY_COUNT_M \
- (0xfULL << IRDMA_CQPSQ_UPESD_ENTRY_COUNT_S)
-
+#define IRDMA_CQPSQ_UPESD_ENTRY_COUNT GENMASK_ULL(3, 0)
#define IRDMA_CQPSQ_UPESD_SKIP_ENTRY_S 7
-#define IRDMA_CQPSQ_UPESD_SKIP_ENTRY_M \
- BIT_ULL(IRDMA_CQPSQ_UPESD_SKIP_ENTRY_S)
+#define IRDMA_CQPSQ_UPESD_SKIP_ENTRY BIT_ULL(7)
/* Suspend QP */
#define IRDMA_CQPSQ_SUSPENDQP_QPID_S 0
-#define IRDMA_CQPSQ_SUSPENDQP_QPID_M (0xFFFFFFULL)
-
-/* Resume QP */
+#define IRDMA_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0)
#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE_S 0
-#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE_M \
- (0xffffffffULL << IRDMA_CQPSQ_RESUMEQP_QSHANDLE_S)
+#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0)
-#define IRDMA_CQPSQ_RESUMEQP_QPID_S 0
-#define IRDMA_CQPSQ_RESUMEQP_QPID_M (0xFFFFFFUL)
+#define IRDMA_CQPSQ_RESUMEQP_QPID_S IRDMA_CQPSQ_SUSPENDQP_QPID_S
+#define IRDMA_CQPSQ_RESUMEQP_QPID_M IRDMA_CQPSQ_SUSPENDQP_QPID_M
+#define IRDMA_CQPSQ_RESUMEQP_QPID IRDMA_CQPSQ_SUSPENDQP_QPID
#define IRDMA_CQPSQ_MIN_STAG_INVALID 0x0001
#define IRDMA_CQPSQ_MIN_SUSPEND_PND 0x0005
@@ -1467,636 +955,422 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000
#define IRDMA_CQPSQ_MAJ_CNTXTCACHE_ERROR 0xF001
#define IRDMA_CQPSQ_MAJ_ERROR 0xFFFF
-
-/* IW QP Context */
#define IRDMAQPC_DDP_VER_S 0
-#define IRDMAQPC_DDP_VER_M (3ULL << IRDMAQPC_DDP_VER_S)
-
+#define IRDMAQPC_DDP_VER GENMASK_ULL(1, 0)
#define IRDMAQPC_IBRDENABLE_S 2
-#define IRDMAQPC_IBRDENABLE_M BIT_ULL(IRDMAQPC_IBRDENABLE_S)
-
+#define IRDMAQPC_IBRDENABLE BIT_ULL(2)
#define IRDMAQPC_IPV4_S 3
-#define IRDMAQPC_IPV4_M BIT_ULL(IRDMAQPC_IPV4_S)
-
+#define IRDMAQPC_IPV4 BIT_ULL(3)
#define IRDMAQPC_NONAGLE_S 4
-#define IRDMAQPC_NONAGLE_M BIT_ULL(IRDMAQPC_NONAGLE_S)
-
+#define IRDMAQPC_NONAGLE BIT_ULL(4)
#define IRDMAQPC_INSERTVLANTAG_S 5
-#define IRDMAQPC_INSERTVLANTAG_M BIT_ULL(IRDMAQPC_INSERTVLANTAG_S)
-
+#define IRDMAQPC_INSERTVLANTAG BIT_ULL(5)
#define IRDMAQPC_ISQP1_S 6
-#define IRDMAQPC_ISQP1_M BIT_ULL(IRDMAQPC_ISQP1_S)
-
+#define IRDMAQPC_ISQP1 BIT_ULL(6)
#define IRDMAQPC_TIMESTAMP_S 7
-#define IRDMAQPC_TIMESTAMP_M BIT_ULL(IRDMAQPC_TIMESTAMP_S)
-
+#define IRDMAQPC_TIMESTAMP BIT_ULL(7)
#define IRDMAQPC_RQWQESIZE_S 8
-#define IRDMAQPC_RQWQESIZE_M (3ULL << IRDMAQPC_RQWQESIZE_S)
-
+#define IRDMAQPC_RQWQESIZE GENMASK_ULL(9, 8)
#define IRDMAQPC_INSERTL2TAG2_S 11
-#define IRDMAQPC_INSERTL2TAG2_M BIT_ULL(IRDMAQPC_INSERTL2TAG2_S)
-
+#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11)
#define IRDMAQPC_LIMIT_S 12
-#define IRDMAQPC_LIMIT_M (3ULL << IRDMAQPC_LIMIT_S)
+#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12)
#define IRDMAQPC_ECN_EN_S 14
-#define IRDMAQPC_ECN_EN_M BIT_ULL(IRDMAQPC_ECN_EN_S)
-
+#define IRDMAQPC_ECN_EN BIT_ULL(14)
#define IRDMAQPC_DROPOOOSEG_S 15
-#define IRDMAQPC_DROPOOOSEG_M BIT_ULL(IRDMAQPC_DROPOOOSEG_S)
-
+#define IRDMAQPC_DROPOOOSEG BIT_ULL(15)
#define IRDMAQPC_DUPACK_THRESH_S 16
-#define IRDMAQPC_DUPACK_THRESH_M (7ULL << IRDMAQPC_DUPACK_THRESH_S)
-
+#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16)
#define IRDMAQPC_ERR_RQ_IDX_VALID_S 19
-#define IRDMAQPC_ERR_RQ_IDX_VALID_M BIT_ULL(IRDMAQPC_ERR_RQ_IDX_VALID_S)
-
+#define IRDMAQPC_ERR_RQ_IDX_VALID BIT_ULL(19)
#define IRDMAQPC_DIS_VLAN_CHECKS_S 19
-#define IRDMAQPC_DIS_VLAN_CHECKS_M (7ULL << IRDMAQPC_DIS_VLAN_CHECKS_S)
-
+#define IRDMAQPC_DIS_VLAN_CHECKS GENMASK_ULL(21, 19)
#define IRDMAQPC_DC_TCP_EN_S 25
-#define IRDMAQPC_DC_TCP_EN_M BIT_ULL(IRDMAQPC_DC_TCP_EN_S)
-
+#define IRDMAQPC_DC_TCP_EN BIT_ULL(25)
#define IRDMAQPC_RCVTPHEN_S 28
-#define IRDMAQPC_RCVTPHEN_M BIT_ULL(IRDMAQPC_RCVTPHEN_S)
-
+#define IRDMAQPC_RCVTPHEN BIT_ULL(28)
#define IRDMAQPC_XMITTPHEN_S 29
-#define IRDMAQPC_XMITTPHEN_M BIT_ULL(IRDMAQPC_XMITTPHEN_S)
-
+#define IRDMAQPC_XMITTPHEN BIT_ULL(29)
#define IRDMAQPC_RQTPHEN_S 30
-#define IRDMAQPC_RQTPHEN_M BIT_ULL(IRDMAQPC_RQTPHEN_S)
-
+#define IRDMAQPC_RQTPHEN BIT_ULL(30)
#define IRDMAQPC_SQTPHEN_S 31
-#define IRDMAQPC_SQTPHEN_M BIT_ULL(IRDMAQPC_SQTPHEN_S)
-
+#define IRDMAQPC_SQTPHEN BIT_ULL(31)
#define IRDMAQPC_PPIDX_S 32
-#define IRDMAQPC_PPIDX_M (0x3ffULL << IRDMAQPC_PPIDX_S)
-
+#define IRDMAQPC_PPIDX GENMASK_ULL(41, 32)
#define IRDMAQPC_PMENA_S 47
-#define IRDMAQPC_PMENA_M BIT_ULL(IRDMAQPC_PMENA_S)
-
+#define IRDMAQPC_PMENA BIT_ULL(47)
#define IRDMAQPC_RDMAP_VER_S 62
-#define IRDMAQPC_RDMAP_VER_M (3ULL << IRDMAQPC_RDMAP_VER_S)
-
+#define IRDMAQPC_RDMAP_VER GENMASK_ULL(63, 62)
#define IRDMAQPC_ROCE_TVER_S 60
-#define IRDMAQPC_ROCE_TVER_M (0x0fULL << IRDMAQPC_ROCE_TVER_S)
+#define IRDMAQPC_ROCE_TVER GENMASK_ULL(63, 60)
#define IRDMAQPC_SQADDR_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPC_SQADDR_M IRDMA_CQPHC_QPCTX_M
+#define IRDMAQPC_SQADDR IRDMA_CQPHC_QPCTX
#define IRDMAQPC_RQADDR_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPC_RQADDR_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMAQPC_RQADDR IRDMA_CQPHC_QPCTX
#define IRDMAQPC_TTL_S 0
-#define IRDMAQPC_TTL_M (0xffULL << IRDMAQPC_TTL_S)
-
+#define IRDMAQPC_TTL GENMASK_ULL(7, 0)
#define IRDMAQPC_RQSIZE_S 8
-#define IRDMAQPC_RQSIZE_M (0xfULL << IRDMAQPC_RQSIZE_S)
-
+#define IRDMAQPC_RQSIZE GENMASK_ULL(11, 8)
#define IRDMAQPC_SQSIZE_S 12
-#define IRDMAQPC_SQSIZE_M (0xfULL << IRDMAQPC_SQSIZE_S)
-
+#define IRDMAQPC_SQSIZE GENMASK_ULL(15, 12)
#define IRDMAQPC_GEN1_SRCMACADDRIDX_S 16
-#define IRDMAQPC_GEN1_SRCMACADDRIDX_M (0x3fUL << IRDMAQPC_GEN1_SRCMACADDRIDX_S)
-
+#define IRDMAQPC_GEN1_SRCMACADDRIDX GENMASK(21, 16)
#define IRDMAQPC_AVOIDSTRETCHACK_S 23
-#define IRDMAQPC_AVOIDSTRETCHACK_M BIT_ULL(IRDMAQPC_AVOIDSTRETCHACK_S)
-
+#define IRDMAQPC_AVOIDSTRETCHACK BIT_ULL(23)
#define IRDMAQPC_TOS_S 24
-#define IRDMAQPC_TOS_M (0xffULL << IRDMAQPC_TOS_S)
-
+#define IRDMAQPC_TOS GENMASK_ULL(31, 24)
#define IRDMAQPC_SRCPORTNUM_S 32
-#define IRDMAQPC_SRCPORTNUM_M (0xffffULL << IRDMAQPC_SRCPORTNUM_S)
-
+#define IRDMAQPC_SRCPORTNUM GENMASK_ULL(47, 32)
#define IRDMAQPC_DESTPORTNUM_S 48
-#define IRDMAQPC_DESTPORTNUM_M (0xffffULL << IRDMAQPC_DESTPORTNUM_S)
-
+#define IRDMAQPC_DESTPORTNUM GENMASK_ULL(63, 48)
#define IRDMAQPC_DESTIPADDR0_S 32
-#define IRDMAQPC_DESTIPADDR0_M \
- (0xffffffffULL << IRDMAQPC_DESTIPADDR0_S)
-
+#define IRDMAQPC_DESTIPADDR0 GENMASK_ULL(63, 32)
#define IRDMAQPC_DESTIPADDR1_S 0
-#define IRDMAQPC_DESTIPADDR1_M \
- (0xffffffffULL << IRDMAQPC_DESTIPADDR1_S)
-
+#define IRDMAQPC_DESTIPADDR1 GENMASK_ULL(31, 0)
#define IRDMAQPC_DESTIPADDR2_S 32
-#define IRDMAQPC_DESTIPADDR2_M \
- (0xffffffffULL << IRDMAQPC_DESTIPADDR2_S)
-
+#define IRDMAQPC_DESTIPADDR2 GENMASK_ULL(63, 32)
#define IRDMAQPC_DESTIPADDR3_S 0
-#define IRDMAQPC_DESTIPADDR3_M \
- (0xffffffffULL << IRDMAQPC_DESTIPADDR3_S)
-
+#define IRDMAQPC_DESTIPADDR3 GENMASK_ULL(31, 0)
#define IRDMAQPC_SNDMSS_S 16
-#define IRDMAQPC_SNDMSS_M (0x3fffULL << IRDMAQPC_SNDMSS_S)
-
+#define IRDMAQPC_SNDMSS GENMASK_ULL(29, 16)
#define IRDMAQPC_SYN_RST_HANDLING_S 30
-#define IRDMAQPC_SYN_RST_HANDLING_M (0x3ULL << IRDMAQPC_SYN_RST_HANDLING_S)
-
+#define IRDMAQPC_SYN_RST_HANDLING GENMASK_ULL(31, 30)
#define IRDMAQPC_VLANTAG_S 32
-#define IRDMAQPC_VLANTAG_M (0xffffULL << IRDMAQPC_VLANTAG_S)
-
+#define IRDMAQPC_VLANTAG GENMASK_ULL(47, 32)
#define IRDMAQPC_ARPIDX_S 48
-#define IRDMAQPC_ARPIDX_M (0xffffULL << IRDMAQPC_ARPIDX_S)
-
+#define IRDMAQPC_ARPIDX GENMASK_ULL(63, 48)
#define IRDMAQPC_FLOWLABEL_S 0
-#define IRDMAQPC_FLOWLABEL_M (0xfffffULL << IRDMAQPC_FLOWLABEL_S)
-
+#define IRDMAQPC_FLOWLABEL GENMASK_ULL(19, 0)
#define IRDMAQPC_WSCALE_S 20
-#define IRDMAQPC_WSCALE_M BIT_ULL(IRDMAQPC_WSCALE_S)
-
+#define IRDMAQPC_WSCALE BIT_ULL(20)
#define IRDMAQPC_KEEPALIVE_S 21
-#define IRDMAQPC_KEEPALIVE_M BIT_ULL(IRDMAQPC_KEEPALIVE_S)
-
+#define IRDMAQPC_KEEPALIVE BIT_ULL(21)
#define IRDMAQPC_IGNORE_TCP_OPT_S 22
-#define IRDMAQPC_IGNORE_TCP_OPT_M BIT_ULL(IRDMAQPC_IGNORE_TCP_OPT_S)
-
+#define IRDMAQPC_IGNORE_TCP_OPT BIT_ULL(22)
#define IRDMAQPC_IGNORE_TCP_UNS_OPT_S 23
-#define IRDMAQPC_IGNORE_TCP_UNS_OPT_M \
- BIT_ULL(IRDMAQPC_IGNORE_TCP_UNS_OPT_S)
-
+#define IRDMAQPC_IGNORE_TCP_UNS_OPT BIT_ULL(23)
#define IRDMAQPC_TCPSTATE_S 28
-#define IRDMAQPC_TCPSTATE_M (0xfULL << IRDMAQPC_TCPSTATE_S)
-
+#define IRDMAQPC_TCPSTATE GENMASK_ULL(31, 28)
#define IRDMAQPC_RCVSCALE_S 32
-#define IRDMAQPC_RCVSCALE_M (0xfULL << IRDMAQPC_RCVSCALE_S)
-
+#define IRDMAQPC_RCVSCALE GENMASK_ULL(35, 32)
#define IRDMAQPC_SNDSCALE_S 40
-#define IRDMAQPC_SNDSCALE_M (0xfULL << IRDMAQPC_SNDSCALE_S)
-
+#define IRDMAQPC_SNDSCALE GENMASK_ULL(43, 40)
#define IRDMAQPC_PDIDX_S 48
-#define IRDMAQPC_PDIDX_M (0xffffULL << IRDMAQPC_PDIDX_S)
-
+#define IRDMAQPC_PDIDX GENMASK_ULL(63, 48)
#define IRDMAQPC_PDIDXHI_S 20
-#define IRDMAQPC_PDIDXHI_M (0x3ULL << IRDMAQPC_PDIDXHI_S)
-
+#define IRDMAQPC_PDIDXHI GENMASK_ULL(21, 20)
#define IRDMAQPC_PKEY_S 32
-#define IRDMAQPC_PKEY_M (0xffffULL << IRDMAQPC_PKEY_S)
-
+#define IRDMAQPC_PKEY GENMASK_ULL(47, 32)
#define IRDMAQPC_ACKCREDITS_S 20
-#define IRDMAQPC_ACKCREDITS_M (0x1fULL << IRDMAQPC_ACKCREDITS_S)
-
+#define IRDMAQPC_ACKCREDITS GENMASK_ULL(24, 20)
#define IRDMAQPC_QKEY_S 32
-#define IRDMAQPC_QKEY_M (0xffffffffULL << IRDMAQPC_QKEY_S)
-
+#define IRDMAQPC_QKEY GENMASK_ULL(63, 32)
#define IRDMAQPC_DESTQP_S 0
-#define IRDMAQPC_DESTQP_M (0xffffffULL << IRDMAQPC_DESTQP_S)
-
+#define IRDMAQPC_DESTQP GENMASK_ULL(23, 0)
#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES_S 16
-#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES_M \
- (0xffULL << IRDMAQPC_KALIVE_TIMER_MAX_PROBES_S)
-
+#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES GENMASK_ULL(23, 16)
#define IRDMAQPC_KEEPALIVE_INTERVAL_S 24
-#define IRDMAQPC_KEEPALIVE_INTERVAL_M \
- (0xffULL << IRDMAQPC_KEEPALIVE_INTERVAL_S)
-
+#define IRDMAQPC_KEEPALIVE_INTERVAL GENMASK_ULL(31, 24)
#define IRDMAQPC_TIMESTAMP_RECENT_S 0
-#define IRDMAQPC_TIMESTAMP_RECENT_M \
- (0xffffffffULL << IRDMAQPC_TIMESTAMP_RECENT_S)
-
+#define IRDMAQPC_TIMESTAMP_RECENT GENMASK_ULL(31, 0)
#define IRDMAQPC_TIMESTAMP_AGE_S 32
-#define IRDMAQPC_TIMESTAMP_AGE_M \
- (0xffffffffULL << IRDMAQPC_TIMESTAMP_AGE_S)
-
+#define IRDMAQPC_TIMESTAMP_AGE GENMASK_ULL(63, 32)
#define IRDMAQPC_SNDNXT_S 0
-#define IRDMAQPC_SNDNXT_M (0xffffffffULL << IRDMAQPC_SNDNXT_S)
-
+#define IRDMAQPC_SNDNXT GENMASK_ULL(31, 0)
#define IRDMAQPC_ISN_S 32
-#define IRDMAQPC_ISN_M (0x00ffffffULL << IRDMAQPC_ISN_S)
-
+#define IRDMAQPC_ISN GENMASK_ULL(55, 32)
#define IRDMAQPC_PSNNXT_S 0
-#define IRDMAQPC_PSNNXT_M (0x00ffffffULL << IRDMAQPC_PSNNXT_S)
-
+#define IRDMAQPC_PSNNXT GENMASK_ULL(23, 0)
#define IRDMAQPC_LSN_S 32
-#define IRDMAQPC_LSN_M (0x00ffffffULL << IRDMAQPC_LSN_S)
-
+#define IRDMAQPC_LSN GENMASK_ULL(55, 32)
#define IRDMAQPC_SNDWND_S 32
-#define IRDMAQPC_SNDWND_M (0xffffffffULL << IRDMAQPC_SNDWND_S)
-
+#define IRDMAQPC_SNDWND GENMASK_ULL(63, 32)
#define IRDMAQPC_RCVNXT_S 0
-#define IRDMAQPC_RCVNXT_M (0xffffffffULL << IRDMAQPC_RCVNXT_S)
-
+#define IRDMAQPC_RCVNXT GENMASK_ULL(31, 0)
#define IRDMAQPC_EPSN_S 0
-#define IRDMAQPC_EPSN_M (0x00ffffffULL << IRDMAQPC_EPSN_S)
-
+#define IRDMAQPC_EPSN GENMASK_ULL(23, 0)
#define IRDMAQPC_RCVWND_S 32
-#define IRDMAQPC_RCVWND_M (0xffffffffULL << IRDMAQPC_RCVWND_S)
-
+#define IRDMAQPC_RCVWND GENMASK_ULL(63, 32)
#define IRDMAQPC_SNDMAX_S 0
-#define IRDMAQPC_SNDMAX_M (0xffffffffULL << IRDMAQPC_SNDMAX_S)
-
+#define IRDMAQPC_SNDMAX GENMASK_ULL(31, 0)
#define IRDMAQPC_SNDUNA_S 32
-#define IRDMAQPC_SNDUNA_M (0xffffffffULL << IRDMAQPC_SNDUNA_S)
-
+#define IRDMAQPC_SNDUNA GENMASK_ULL(63, 32)
#define IRDMAQPC_PSNMAX_S 0
-#define IRDMAQPC_PSNMAX_M (0x00ffffffULL << IRDMAQPC_PSNMAX_S)
+#define IRDMAQPC_PSNMAX GENMASK_ULL(23, 0)
#define IRDMAQPC_PSNUNA_S 32
-#define IRDMAQPC_PSNUNA_M (0xffffffULL << IRDMAQPC_PSNUNA_S)
-
+#define IRDMAQPC_PSNUNA GENMASK_ULL(55, 32)
#define IRDMAQPC_SRTT_S 0
-#define IRDMAQPC_SRTT_M (0xffffffffULL << IRDMAQPC_SRTT_S)
-
+#define IRDMAQPC_SRTT GENMASK_ULL(31, 0)
#define IRDMAQPC_RTTVAR_S 32
-#define IRDMAQPC_RTTVAR_M (0xffffffffULL << IRDMAQPC_RTTVAR_S)
-
+#define IRDMAQPC_RTTVAR GENMASK_ULL(63, 32)
#define IRDMAQPC_SSTHRESH_S 0
-#define IRDMAQPC_SSTHRESH_M (0xffffffffULL << IRDMAQPC_SSTHRESH_S)
-
+#define IRDMAQPC_SSTHRESH GENMASK_ULL(31, 0)
#define IRDMAQPC_CWND_S 32
-#define IRDMAQPC_CWND_M (0xffffffffULL << IRDMAQPC_CWND_S)
-
+#define IRDMAQPC_CWND GENMASK_ULL(63, 32)
#define IRDMAQPC_CWNDROCE_S 32
-#define IRDMAQPC_CWNDROCE_M (0xffffffULL << IRDMAQPC_CWNDROCE_S)
-
+#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32)
#define IRDMAQPC_SNDWL1_S 0
-#define IRDMAQPC_SNDWL1_M (0xffffffffULL << IRDMAQPC_SNDWL1_S)
-
+#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0)
#define IRDMAQPC_SNDWL2_S 32
-#define IRDMAQPC_SNDWL2_M (0xffffffffULL << IRDMAQPC_SNDWL2_S)
-
+#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32)
#define IRDMAQPC_ERR_RQ_IDX_S 32
-#define IRDMAQPC_ERR_RQ_IDX_M (0x3fffULL << IRDMAQPC_ERR_RQ_IDX_S)
-
+#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(46, 32)
#define IRDMAQPC_RTOMIN_S 57
-#define IRDMAQPC_RTOMIN_M (0x7fULL << IRDMAQPC_RTOMIN_S)
-
+#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57)
#define IRDMAQPC_MAXSNDWND_S 0
-#define IRDMAQPC_MAXSNDWND_M (0xffffffffULL << IRDMAQPC_MAXSNDWND_S)
-
+#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0)
#define IRDMAQPC_REXMIT_THRESH_S 48
-#define IRDMAQPC_REXMIT_THRESH_M (0x3fULL << IRDMAQPC_REXMIT_THRESH_S)
-
+#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48)
#define IRDMAQPC_RNRNAK_THRESH_S 54
-#define IRDMAQPC_RNRNAK_THRESH_M (0x7ULL << IRDMAQPC_RNRNAK_THRESH_S)
-
+#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54)
#define IRDMAQPC_TXCQNUM_S 0
-#define IRDMAQPC_TXCQNUM_M (0x7ffffULL << IRDMAQPC_TXCQNUM_S)
-
+#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0)
#define IRDMAQPC_RXCQNUM_S 32
-#define IRDMAQPC_RXCQNUM_M (0x7ffffULL << IRDMAQPC_RXCQNUM_S)
-
+#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32)
#define IRDMAQPC_STAT_INDEX_S 0
-#define IRDMAQPC_STAT_INDEX_M (0x7fULL << IRDMAQPC_STAT_INDEX_S)
-
+#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0)
#define IRDMAQPC_Q2ADDR_S 8
-#define IRDMAQPC_Q2ADDR_M (0xffffffffffffffULL << IRDMAQPC_Q2ADDR_S)
-
+#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8)
#define IRDMAQPC_LASTBYTESENT_S 0
-#define IRDMAQPC_LASTBYTESENT_M (0xffULL << IRDMAQPC_LASTBYTESENT_S)
-
+#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0)
#define IRDMAQPC_MACADDRESS_S 16
-#define IRDMAQPC_MACADDRESS_M (0xffffffffffffULL << IRDMAQPC_MACADDRESS_S)
-
+#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16)
#define IRDMAQPC_ORDSIZE_S 0
-#define IRDMAQPC_ORDSIZE_M (0xffULL << IRDMAQPC_ORDSIZE_S)
+#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0)
#define IRDMAQPC_IRDSIZE_S 16
-#define IRDMAQPC_IRDSIZE_M (0x7ULL << IRDMAQPC_IRDSIZE_S)
+#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16)
#define IRDMAQPC_UDPRIVCQENABLE_S 19
-#define IRDMAQPC_UDPRIVCQENABLE_M BIT_ULL(IRDMAQPC_UDPRIVCQENABLE_S)
-
+#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19)
#define IRDMAQPC_WRRDRSPOK_S 20
-#define IRDMAQPC_WRRDRSPOK_M BIT_ULL(IRDMAQPC_WRRDRSPOK_S)
-
+#define IRDMAQPC_WRRDRSPOK BIT_ULL(20)
#define IRDMAQPC_RDOK_S 21
-#define IRDMAQPC_RDOK_M BIT_ULL(IRDMAQPC_RDOK_S)
-
+#define IRDMAQPC_RDOK BIT_ULL(21)
#define IRDMAQPC_SNDMARKERS_S 22
-#define IRDMAQPC_SNDMARKERS_M BIT_ULL(IRDMAQPC_SNDMARKERS_S)
-
+#define IRDMAQPC_SNDMARKERS BIT_ULL(22)
#define IRDMAQPC_DCQCNENABLE_S 22
-#define IRDMAQPC_DCQCNENABLE_M BIT_ULL(IRDMAQPC_DCQCNENABLE_S)
-
+#define IRDMAQPC_DCQCNENABLE BIT_ULL(22)
#define IRDMAQPC_FW_CC_ENABLE_S 28
-#define IRDMAQPC_FW_CC_ENABLE_M BIT_ULL(IRDMAQPC_FW_CC_ENABLE_S)
-
+#define IRDMAQPC_FW_CC_ENABLE BIT_ULL(28)
#define IRDMAQPC_RCVNOICRC_S 31
-#define IRDMAQPC_RCVNOICRC_M BIT_ULL(IRDMAQPC_RCVNOICRC_S)
-
+#define IRDMAQPC_RCVNOICRC BIT_ULL(31)
#define IRDMAQPC_BINDEN_S 23
-#define IRDMAQPC_BINDEN_M BIT_ULL(IRDMAQPC_BINDEN_S)
-
+#define IRDMAQPC_BINDEN BIT_ULL(23)
#define IRDMAQPC_FASTREGEN_S 24
-#define IRDMAQPC_FASTREGEN_M BIT_ULL(IRDMAQPC_FASTREGEN_S)
-
+#define IRDMAQPC_FASTREGEN BIT_ULL(24)
#define IRDMAQPC_PRIVEN_S 25
-#define IRDMAQPC_PRIVEN_M BIT_ULL(IRDMAQPC_PRIVEN_S)
-
+#define IRDMAQPC_PRIVEN BIT_ULL(25)
#define IRDMAQPC_TIMELYENABLE_S 27
-#define IRDMAQPC_TIMELYENABLE_M BIT_ULL(IRDMAQPC_TIMELYENABLE_S)
-
+#define IRDMAQPC_TIMELYENABLE BIT_ULL(27)
#define IRDMAQPC_THIGH_S 52
-#define IRDMAQPC_THIGH_M ((u64)0xfff << IRDMAQPC_THIGH_S)
-
+#define IRDMAQPC_THIGH GENMASK_ULL(63, 52)
#define IRDMAQPC_TLOW_S 32
-#define IRDMAQPC_TLOW_M ((u64)0xFF << IRDMAQPC_TLOW_S)
-
+#define IRDMAQPC_TLOW GENMASK_ULL(39, 32)
#define IRDMAQPC_REMENDPOINTIDX_S 0
-#define IRDMAQPC_REMENDPOINTIDX_M ((u64)0x1FFFF << IRDMAQPC_REMENDPOINTIDX_S)
-
+#define IRDMAQPC_REMENDPOINTIDX GENMASK_ULL(16, 0)
#define IRDMAQPC_USESTATSINSTANCE_S 26
-#define IRDMAQPC_USESTATSINSTANCE_M BIT_ULL(IRDMAQPC_USESTATSINSTANCE_S)
-
+#define IRDMAQPC_USESTATSINSTANCE BIT_ULL(26)
#define IRDMAQPC_IWARPMODE_S 28
-#define IRDMAQPC_IWARPMODE_M BIT_ULL(IRDMAQPC_IWARPMODE_S)
-
+#define IRDMAQPC_IWARPMODE BIT_ULL(28)
#define IRDMAQPC_RCVMARKERS_S 29
-#define IRDMAQPC_RCVMARKERS_M BIT_ULL(IRDMAQPC_RCVMARKERS_S)
-
+#define IRDMAQPC_RCVMARKERS BIT_ULL(29)
#define IRDMAQPC_ALIGNHDRS_S 30
-#define IRDMAQPC_ALIGNHDRS_M BIT_ULL(IRDMAQPC_ALIGNHDRS_S)
-
+#define IRDMAQPC_ALIGNHDRS BIT_ULL(30)
#define IRDMAQPC_RCVNOMPACRC_S 31
-#define IRDMAQPC_RCVNOMPACRC_M BIT_ULL(IRDMAQPC_RCVNOMPACRC_S)
-
+#define IRDMAQPC_RCVNOMPACRC BIT_ULL(31)
#define IRDMAQPC_RCVMARKOFFSET_S 32
-#define IRDMAQPC_RCVMARKOFFSET_M (0x1ffULL << IRDMAQPC_RCVMARKOFFSET_S)
-
+#define IRDMAQPC_RCVMARKOFFSET GENMASK_ULL(40, 32)
#define IRDMAQPC_SNDMARKOFFSET_S 48
-#define IRDMAQPC_SNDMARKOFFSET_M (0x1ffULL << IRDMAQPC_SNDMARKOFFSET_S)
+#define IRDMAQPC_SNDMARKOFFSET GENMASK_ULL(56, 48)
#define IRDMAQPC_QPCOMPCTX_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPC_QPCOMPCTX_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMAQPC_QPCOMPCTX IRDMA_CQPHC_QPCTX
#define IRDMAQPC_SQTPHVAL_S 0
-#define IRDMAQPC_SQTPHVAL_M (0xffULL << IRDMAQPC_SQTPHVAL_S)
-
+#define IRDMAQPC_SQTPHVAL GENMASK_ULL(7, 0)
#define IRDMAQPC_RQTPHVAL_S 8
-#define IRDMAQPC_RQTPHVAL_M (0xffULL << IRDMAQPC_RQTPHVAL_S)
-
+#define IRDMAQPC_RQTPHVAL GENMASK_ULL(15, 8)
#define IRDMAQPC_QSHANDLE_S 16
-#define IRDMAQPC_QSHANDLE_M (0x3ffULL << IRDMAQPC_QSHANDLE_S)
-
+#define IRDMAQPC_QSHANDLE GENMASK_ULL(25, 16)
#define IRDMAQPC_EXCEPTION_LAN_QUEUE_S 32
-#define IRDMAQPC_EXCEPTION_LAN_QUEUE_M \
- (0xfffULL << IRDMAQPC_EXCEPTION_LAN_QUEUE_S)
-
+#define IRDMAQPC_EXCEPTION_LAN_QUEUE GENMASK_ULL(43, 32)
#define IRDMAQPC_LOCAL_IPADDR3_S 0
-#define IRDMAQPC_LOCAL_IPADDR3_M \
- (0xffffffffULL << IRDMAQPC_LOCAL_IPADDR3_S)
-
+#define IRDMAQPC_LOCAL_IPADDR3 GENMASK_ULL(31, 0)
#define IRDMAQPC_LOCAL_IPADDR2_S 32
-#define IRDMAQPC_LOCAL_IPADDR2_M \
- (0xffffffffULL << IRDMAQPC_LOCAL_IPADDR2_S)
-
+#define IRDMAQPC_LOCAL_IPADDR2 GENMASK_ULL(63, 32)
#define IRDMAQPC_LOCAL_IPADDR1_S 0
-#define IRDMAQPC_LOCAL_IPADDR1_M \
- (0xffffffffULL << IRDMAQPC_LOCAL_IPADDR1_S)
-
+#define IRDMAQPC_LOCAL_IPADDR1 GENMASK_ULL(31, 0)
#define IRDMAQPC_LOCAL_IPADDR0_S 32
-#define IRDMAQPC_LOCAL_IPADDR0_M \
- (0xffffffffULL << IRDMAQPC_LOCAL_IPADDR0_S)
-
+#define IRDMAQPC_LOCAL_IPADDR0 GENMASK_ULL(63, 32)
#define IRDMA_FW_VER_MINOR_S 0
-#define IRDMA_FW_VER_MINOR_M \
- (0xffffULL << IRDMA_FW_VER_MINOR_S)
-
+#define IRDMA_FW_VER_MINOR GENMASK_ULL(15, 0)
#define IRDMA_FW_VER_MAJOR_S 16
-#define IRDMA_FW_VER_MAJOR_M \
- (0xffffULL << IRDMA_FW_VER_MAJOR_S)
-
+#define IRDMA_FW_VER_MAJOR GENMASK_ULL(31, 16)
#define IRDMA_FEATURE_INFO_S 0
-#define IRDMA_FEATURE_INFO_M \
- (0xffffffffffffULL << IRDMA_FEATURE_INFO_S)
-
+#define IRDMA_FEATURE_INFO GENMASK_ULL(47, 0)
#define IRDMA_FEATURE_CNT_S 32
-#define IRDMA_FEATURE_CNT_M \
- (0xffffULL << IRDMA_FEATURE_CNT_S)
-
+#define IRDMA_FEATURE_CNT GENMASK_ULL(47, 32)
#define IRDMA_FEATURE_TYPE_S 48
-#define IRDMA_FEATURE_TYPE_M \
- (0xffffULL << IRDMA_FEATURE_TYPE_S)
-
+#define IRDMA_FEATURE_TYPE GENMASK_ULL(63, 48)
#define IRDMA_RSVD_S 41
-#define IRDMA_RSVD_M (0x7fffULL << IRDMA_RSVD_S)
+#define IRDMA_RSVD GENMASK_ULL(55, 41)
-/* iwarp QP SQ WQE common fields */
#define IRDMAQPSQ_OPCODE_S 32
-#define IRDMAQPSQ_OPCODE_M (0x3fULL << IRDMAQPSQ_OPCODE_S)
-
+#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMAQPSQ_COPY_HOST_PBL_S 43
-#define IRDMAQPSQ_COPY_HOST_PBL_M BIT_ULL(IRDMAQPSQ_COPY_HOST_PBL_S)
-
+#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
#define IRDMAQPSQ_ADDFRAGCNT_S 38
-#define IRDMAQPSQ_ADDFRAGCNT_M (0xfULL << IRDMAQPSQ_ADDFRAGCNT_S)
-
+#define IRDMAQPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
#define IRDMAQPSQ_PUSHWQE_S 56
-#define IRDMAQPSQ_PUSHWQE_M BIT_ULL(IRDMAQPSQ_PUSHWQE_S)
-
+#define IRDMAQPSQ_PUSHWQE BIT_ULL(56)
#define IRDMAQPSQ_STREAMMODE_S 58
-#define IRDMAQPSQ_STREAMMODE_M BIT_ULL(IRDMAQPSQ_STREAMMODE_S)
-
+#define IRDMAQPSQ_STREAMMODE BIT_ULL(58)
#define IRDMAQPSQ_WAITFORRCVPDU_S 59
-#define IRDMAQPSQ_WAITFORRCVPDU_M BIT_ULL(IRDMAQPSQ_WAITFORRCVPDU_S)
-
+#define IRDMAQPSQ_WAITFORRCVPDU BIT_ULL(59)
#define IRDMAQPSQ_READFENCE_S 60
-#define IRDMAQPSQ_READFENCE_M BIT_ULL(IRDMAQPSQ_READFENCE_S)
-
+#define IRDMAQPSQ_READFENCE BIT_ULL(60)
#define IRDMAQPSQ_LOCALFENCE_S 61
-#define IRDMAQPSQ_LOCALFENCE_M BIT_ULL(IRDMAQPSQ_LOCALFENCE_S)
-
+#define IRDMAQPSQ_LOCALFENCE BIT_ULL(61)
#define IRDMAQPSQ_UDPHEADER_S 61
-#define IRDMAQPSQ_UDPHEADER_M BIT_ULL(IRDMAQPSQ_UDPHEADER_S)
-
+#define IRDMAQPSQ_UDPHEADER BIT_ULL(61)
#define IRDMAQPSQ_L4LEN_S 42
-#define IRDMAQPSQ_L4LEN_M ((u64)0xF << IRDMAQPSQ_L4LEN_S)
-
+#define IRDMAQPSQ_L4LEN GENMASK_ULL(45, 42)
#define IRDMAQPSQ_SIGCOMPL_S 62
-#define IRDMAQPSQ_SIGCOMPL_M BIT_ULL(IRDMAQPSQ_SIGCOMPL_S)
-
+#define IRDMAQPSQ_SIGCOMPL BIT_ULL(62)
#define IRDMAQPSQ_VALID_S 63
-#define IRDMAQPSQ_VALID_M BIT_ULL(IRDMAQPSQ_VALID_S)
+#define IRDMAQPSQ_VALID BIT_ULL(63)
#define IRDMAQPSQ_FRAG_TO_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPSQ_FRAG_TO_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMAQPSQ_FRAG_TO IRDMA_CQPHC_QPCTX
#define IRDMAQPSQ_FRAG_VALID_S 63
-#define IRDMAQPSQ_FRAG_VALID_M BIT_ULL(IRDMAQPSQ_FRAG_VALID_S)
-
+#define IRDMAQPSQ_FRAG_VALID BIT_ULL(63)
#define IRDMAQPSQ_FRAG_LEN_S 32
-#define IRDMAQPSQ_FRAG_LEN_M (0x7fffffffULL << IRDMAQPSQ_FRAG_LEN_S)
-
+#define IRDMAQPSQ_FRAG_LEN GENMASK_ULL(62, 32)
#define IRDMAQPSQ_FRAG_STAG_S 0
-#define IRDMAQPSQ_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_FRAG_STAG_S)
-
+#define IRDMAQPSQ_FRAG_STAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_GEN1_FRAG_LEN_S 0
-#define IRDMAQPSQ_GEN1_FRAG_LEN_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_LEN_S)
-
+#define IRDMAQPSQ_GEN1_FRAG_LEN GENMASK_ULL(31, 0)
#define IRDMAQPSQ_GEN1_FRAG_STAG_S 32
-#define IRDMAQPSQ_GEN1_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_STAG_S)
-
+#define IRDMAQPSQ_GEN1_FRAG_STAG GENMASK_ULL(63, 32)
#define IRDMAQPSQ_REMSTAGINV_S 0
-#define IRDMAQPSQ_REMSTAGINV_M (0xffffffffULL << IRDMAQPSQ_REMSTAGINV_S)
-
+#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQKEY_S 0
-#define IRDMAQPSQ_DESTQKEY_M (0xffffffffULL << IRDMAQPSQ_DESTQKEY_S)
-
+#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQPN_S 32
-#define IRDMAQPSQ_DESTQPN_M (0x00ffffffULL << IRDMAQPSQ_DESTQPN_S)
-
+#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
#define IRDMAQPSQ_AHID_S 0
-#define IRDMAQPSQ_AHID_M (0x0001ffffULL << IRDMAQPSQ_AHID_S)
-
+#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
#define IRDMAQPSQ_INLINEDATAFLAG_S 57
-#define IRDMAQPSQ_INLINEDATAFLAG_M BIT_ULL(IRDMAQPSQ_INLINEDATAFLAG_S)
+#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
#define IRDMA_INLINE_VALID_S 7
-
#define IRDMAQPSQ_INLINEDATALEN_S 48
-#define IRDMAQPSQ_INLINEDATALEN_M \
- (0xffULL << IRDMAQPSQ_INLINEDATALEN_S)
+#define IRDMAQPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
#define IRDMAQPSQ_IMMDATAFLAG_S 47
-#define IRDMAQPSQ_IMMDATAFLAG_M \
- BIT_ULL(IRDMAQPSQ_IMMDATAFLAG_S)
+#define IRDMAQPSQ_IMMDATAFLAG BIT_ULL(47)
#define IRDMAQPSQ_REPORTRTT_S 46
-#define IRDMAQPSQ_REPORTRTT_M \
- BIT_ULL(IRDMAQPSQ_REPORTRTT_S)
+#define IRDMAQPSQ_REPORTRTT BIT_ULL(46)
#define IRDMAQPSQ_IMMDATA_S 0
-#define IRDMAQPSQ_IMMDATA_M \
- (0xffffffffffffffffULL << IRDMAQPSQ_IMMDATA_S)
-
-/* rdma write */
+#define IRDMAQPSQ_IMMDATA GENMASK_ULL(63, 0)
#define IRDMAQPSQ_REMSTAG_S 0
-#define IRDMAQPSQ_REMSTAG_M (0xffffffffULL << IRDMAQPSQ_REMSTAG_S)
+#define IRDMAQPSQ_REMSTAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_REMTO_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPSQ_REMTO_M IRDMA_CQPHC_QPCTX_M
+#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
-/* memory window */
#define IRDMAQPSQ_STAGRIGHTS_S 48
-#define IRDMAQPSQ_STAGRIGHTS_M (0x1fULL << IRDMAQPSQ_STAGRIGHTS_S)
-
+#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
#define IRDMAQPSQ_VABASEDTO_S 53
-#define IRDMAQPSQ_VABASEDTO_M BIT_ULL(IRDMAQPSQ_VABASEDTO_S)
-
+#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
#define IRDMAQPSQ_MEMWINDOWTYPE_S 54
-#define IRDMAQPSQ_MEMWINDOWTYPE_M BIT_ULL(IRDMAQPSQ_MEMWINDOWTYPE_S)
+#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
#define IRDMAQPSQ_MWLEN_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPSQ_MWLEN_M IRDMA_CQPHC_QPCTX_M
-
+#define IRDMAQPSQ_MWLEN IRDMA_CQPHC_QPCTX
#define IRDMAQPSQ_PARENTMRSTAG_S 32
-#define IRDMAQPSQ_PARENTMRSTAG_M \
- (0xffffffffULL << IRDMAQPSQ_PARENTMRSTAG_S)
-
+#define IRDMAQPSQ_PARENTMRSTAG GENMASK_ULL(63, 32)
#define IRDMAQPSQ_MWSTAG_S 0
-#define IRDMAQPSQ_MWSTAG_M (0xffffffffULL << IRDMAQPSQ_MWSTAG_S)
+#define IRDMAQPSQ_MWSTAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_BASEVA_TO_FBO_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPSQ_BASEVA_TO_FBO_M IRDMA_CQPHC_QPCTX_M
+#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
-/* Local Invalidate */
#define IRDMAQPSQ_LOCSTAG_S 0
-#define IRDMAQPSQ_LOCSTAG_M (0xffffffffULL << IRDMAQPSQ_LOCSTAG_S)
+#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
-/* Fast Register */
#define IRDMAQPSQ_STAGKEY_S 0
-#define IRDMAQPSQ_STAGKEY_M (0xffULL << IRDMAQPSQ_STAGKEY_S)
-
+#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0)
#define IRDMAQPSQ_STAGINDEX_S 8
-#define IRDMAQPSQ_STAGINDEX_M (0xffffffULL << IRDMAQPSQ_STAGINDEX_S)
-
+#define IRDMAQPSQ_STAGINDEX GENMASK_ULL(31, 8)
#define IRDMAQPSQ_COPYHOSTPBLS_S 43
-#define IRDMAQPSQ_COPYHOSTPBLS_M BIT_ULL(IRDMAQPSQ_COPYHOSTPBLS_S)
-
+#define IRDMAQPSQ_COPYHOSTPBLS BIT_ULL(43)
#define IRDMAQPSQ_LPBLSIZE_S 44
-#define IRDMAQPSQ_LPBLSIZE_M (3ULL << IRDMAQPSQ_LPBLSIZE_S)
-
+#define IRDMAQPSQ_LPBLSIZE GENMASK_ULL(45, 44)
#define IRDMAQPSQ_HPAGESIZE_S 46
-#define IRDMAQPSQ_HPAGESIZE_M (3ULL << IRDMAQPSQ_HPAGESIZE_S)
-
+#define IRDMAQPSQ_HPAGESIZE GENMASK_ULL(47, 46)
#define IRDMAQPSQ_STAGLEN_S 0
-#define IRDMAQPSQ_STAGLEN_M (0x1ffffffffffULL << IRDMAQPSQ_STAGLEN_S)
-
+#define IRDMAQPSQ_STAGLEN GENMASK_ULL(40, 0)
#define IRDMAQPSQ_FIRSTPMPBLIDXLO_S 48
-#define IRDMAQPSQ_FIRSTPMPBLIDXLO_M \
- (0xffffULL << IRDMAQPSQ_FIRSTPMPBLIDXLO_S)
-
+#define IRDMAQPSQ_FIRSTPMPBLIDXLO GENMASK_ULL(63, 48)
#define IRDMAQPSQ_FIRSTPMPBLIDXHI_S 0
-#define IRDMAQPSQ_FIRSTPMPBLIDXHI_M \
- (0xfffULL << IRDMAQPSQ_FIRSTPMPBLIDXHI_S)
-
+#define IRDMAQPSQ_FIRSTPMPBLIDXHI GENMASK_ULL(11, 0)
#define IRDMAQPSQ_PBLADDR_S 12
-#define IRDMAQPSQ_PBLADDR_M (0xfffffffffffffULL << IRDMAQPSQ_PBLADDR_S)
+#define IRDMAQPSQ_PBLADDR GENMASK_ULL(63, 12)
/* iwarp QP RQ WQE common fields */
#define IRDMAQPRQ_ADDFRAGCNT_S IRDMAQPSQ_ADDFRAGCNT_S
-#define IRDMAQPRQ_ADDFRAGCNT_M IRDMAQPSQ_ADDFRAGCNT_M
+#define IRDMAQPRQ_ADDFRAGCNT IRDMAQPSQ_ADDFRAGCNT
#define IRDMAQPRQ_VALID_S IRDMAQPSQ_VALID_S
-#define IRDMAQPRQ_VALID_M IRDMAQPSQ_VALID_M
+#define IRDMAQPRQ_VALID IRDMAQPSQ_VALID
#define IRDMAQPRQ_COMPLCTX_S IRDMA_CQPHC_QPCTX_S
-#define IRDMAQPRQ_COMPLCTX_M IRDMA_CQPHC_QPCTX_M
+#define IRDMAQPRQ_COMPLCTX IRDMA_CQPHC_QPCTX
#define IRDMAQPRQ_FRAG_LEN_S IRDMAQPSQ_FRAG_LEN_S
-#define IRDMAQPRQ_FRAG_LEN_M IRDMAQPSQ_FRAG_LEN_M
+#define IRDMAQPRQ_FRAG_LEN IRDMAQPSQ_FRAG_LEN
#define IRDMAQPRQ_STAG_S IRDMAQPSQ_FRAG_STAG_S
-#define IRDMAQPRQ_STAG_M IRDMAQPSQ_FRAG_STAG_M
+#define IRDMAQPRQ_STAG IRDMAQPSQ_FRAG_STAG
#define IRDMAQPRQ_TO_S IRDMAQPSQ_FRAG_TO_S
-#define IRDMAQPRQ_TO_M IRDMAQPSQ_FRAG_TO_M
+#define IRDMAQPRQ_TO IRDMAQPSQ_FRAG_TO
#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26)
#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
-/* Query FPM CQP buf */
#define IRDMA_QUERY_FPM_MAX_QPS_S 0
-#define IRDMA_QUERY_FPM_MAX_QPS_M \
- (0x7ffffULL << IRDMA_QUERY_FPM_MAX_QPS_S)
-
+#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(18, 0)
#define IRDMA_QUERY_FPM_MAX_CQS_S 0
-#define IRDMA_QUERY_FPM_MAX_CQS_M \
- (0xfffffULL << IRDMA_QUERY_FPM_MAX_CQS_S)
-
+#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(19, 0)
#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX_S 0
-#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX_M \
- (0x3fffULL << IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX_S)
-
+#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0)
#define IRDMA_QUERY_FPM_MAX_PE_SDS_S 32
-#define IRDMA_QUERY_FPM_MAX_PE_SDS_M \
- (0x3fffULL << IRDMA_QUERY_FPM_MAX_PE_SDS_S)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32)
#define IRDMA_QUERY_FPM_MAX_CEQS_S 0
-#define IRDMA_QUERY_FPM_MAX_CEQS_M \
- (0x3ffULL << IRDMA_QUERY_FPM_MAX_CEQS_S)
-
+#define IRDMA_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0)
#define IRDMA_QUERY_FPM_XFBLOCKSIZE_S 32
-#define IRDMA_QUERY_FPM_XFBLOCKSIZE_M \
- (0xffffffffULL << IRDMA_QUERY_FPM_XFBLOCKSIZE_S)
-
+#define IRDMA_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_Q1BLOCKSIZE_S 32
-#define IRDMA_QUERY_FPM_Q1BLOCKSIZE_M \
- (0xffffffffULL << IRDMA_QUERY_FPM_Q1BLOCKSIZE_S)
-
+#define IRDMA_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_HTMULTIPLIER_S 16
-#define IRDMA_QUERY_FPM_HTMULTIPLIER_M \
- (0xfULL << IRDMA_QUERY_FPM_HTMULTIPLIER_S)
-
+#define IRDMA_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16)
#define IRDMA_QUERY_FPM_TIMERBUCKET_S 32
-#define IRDMA_QUERY_FPM_TIMERBUCKET_M \
- (0xffFFULL << IRDMA_QUERY_FPM_TIMERBUCKET_S)
-
+#define IRDMA_QUERY_FPM_TIMERBUCKET GENMASK_ULL(47, 32)
#define IRDMA_QUERY_FPM_RRFBLOCKSIZE_S 32
-#define IRDMA_QUERY_FPM_RRFBLOCKSIZE_M \
- (0xffffffffULL << IRDMA_QUERY_FPM_RRFBLOCKSIZE_S)
-
+#define IRDMA_QUERY_FPM_RRFBLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_RRFFLBLOCKSIZE_S 32
-#define IRDMA_QUERY_FPM_RRFFLBLOCKSIZE_M \
- (0xffffffffULL << IRDMA_QUERY_FPM_RRFFLBLOCKSIZE_S)
-
+#define IRDMA_QUERY_FPM_RRFFLBLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_OOISCFBLOCKSIZE_S 32
-#define IRDMA_QUERY_FPM_OOISCFBLOCKSIZE_M \
- (0xffffffffULL << IRDMA_QUERY_FPM_OOISCFBLOCKSIZE_S)
-
-/* Static HMC pages allocated buf */
+#define IRDMA_QUERY_FPM_OOISCFBLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID_S 0
-#define IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID_M \
- (0x3fULL << IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID_S)
+#define IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID GENMASK_ULL(15, 0)
#define IRDMA_GET_CURRENT_AEQ_ELEM(_aeq) \
( \
@@ -2251,6 +1525,12 @@ enum irdma_cqp_op_type {
IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
}
+enum irdma_protocol_used {
+ IRDMA_ANY_PROTOCOL = 0,
+ IRDMA_IWARP_PROTOCOL_ONLY = 1,
+ IRDMA_ROCE_PROTOCOL_ONLY = 2,
+};
+
enum irdma_qp_wqe_size {
IRDMA_WQE_SIZE_32 = 32,
IRDMA_WQE_SIZE_64 = 64,
diff --git a/sys/dev/irdma/irdma_hmc.c b/sys/dev/irdma/irdma_hmc.c
index 82029847e5f5..040e11162e19 100644
--- a/sys/dev/irdma/irdma_hmc.c
+++ b/sys/dev/irdma/irdma_hmc.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -106,10 +106,14 @@ static void
irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
struct irdma_update_sd_entry *entry)
{
- entry->data = pa | (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |
- (((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S) |
- (1 << IRDMA_PFHMC_SDDATALOW_PMSDVALID_S);
- entry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));
+ entry->data = pa |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
+ type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
+
+ entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) |
+ IRDMA_PFHMC_SDCMD_PMSDPARTSEL;
}
/**
@@ -122,9 +126,12 @@ static void
irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
struct irdma_update_sd_entry *entry)
{
- entry->data = (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |
- (((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S);
- entry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));
+ entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
+ type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
+
+ entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) |
+ IRDMA_PFHMC_SDCMD_PMSDPARTSEL;
}
/**
@@ -137,9 +144,9 @@ static inline void
irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
u32 pd_idx)
{
- u32 val = LS_32(sd_idx, IRDMA_PFHMC_PDINV_PMSDIDX) |
- LS_32(1, IRDMA_PFHMC_PDINV_PMSDPARTSEL) |
- LS_32(pd_idx, IRDMA_PFHMC_PDINV_PMPDIDX);
+ u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
+ FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
+ FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
}
@@ -154,7 +161,7 @@ irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
* @setsd: flag to set or clear sd
*/
int
-irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
+irdma_hmc_sd_one(struct irdma_sc_dev *dev, u16 hmc_fn_id, u64 pa, u32 sd_idx,
enum irdma_sd_entry_type type, bool setsd)
{
struct irdma_update_sds_info sdinfo;
@@ -534,7 +541,7 @@ irdma_add_sd_table_entry(struct irdma_hw *hw,
&sd_entry->u.pd_table.pd_entry_virt_mem;
vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
- vmem->va = kzalloc(vmem->size, GFP_ATOMIC);
+ vmem->va = kzalloc(vmem->size, GFP_KERNEL);
if (!vmem->va) {
irdma_free_dma_mem(hw, &dma_mem);
return -ENOMEM;
diff --git a/sys/dev/irdma/irdma_hmc.h b/sys/dev/irdma/irdma_hmc.h
index 44f39adedcf6..25341ee49759 100644
--- a/sys/dev/irdma/irdma_hmc.h
+++ b/sys/dev/irdma/irdma_hmc.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -128,7 +128,7 @@ struct irdma_hmc_sd_table {
struct irdma_hmc_info {
u32 signature;
- u8 hmc_fn_id;
+ u16 hmc_fn_id;
u16 first_sd_index;
struct irdma_hmc_obj_info *hmc_obj;
struct irdma_virt_mem hmc_obj_virt_mem;
@@ -143,7 +143,7 @@ struct irdma_update_sd_entry {
struct irdma_update_sds_info {
u32 cnt;
- u8 hmc_fn_id;
+ u16 hmc_fn_id;
struct irdma_update_sd_entry entry[IRDMA_MAX_SD_ENTRIES];
};
@@ -180,15 +180,15 @@ int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_create_obj_info *info);
int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_del_obj_info *info, bool reset);
-int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
+int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u16 hmc_fn_id, u64 pa, u32 sd_idx,
enum irdma_sd_entry_type type,
bool setsd);
int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
- u8 hmc_fn_id);
+ u16 hmc_fn_id);
struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
- u8 hmc_fn_id);
+ u16 hmc_fn_id);
int irdma_add_sd_table_entry(struct irdma_hw *hw,
struct irdma_hmc_info *hmc_info, u32 sd_index,
enum irdma_sd_entry_type type, u64 direct_mode_sz);
diff --git a/sys/dev/irdma/irdma_hw.c b/sys/dev/irdma/irdma_hw.c
index 43c0c3a66152..ff073625ebea 100644
--- a/sys/dev/irdma/irdma_hw.c
+++ b/sys/dev/irdma/irdma_hw.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -74,6 +74,7 @@ static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
IRDMA_HMC_IW_XFFL,
IRDMA_HMC_IW_Q1,
IRDMA_HMC_IW_Q1FL,
+ IRDMA_HMC_IW_PBLE,
IRDMA_HMC_IW_TIMER,
IRDMA_HMC_IW_FSIMC,
IRDMA_HMC_IW_FSIAV,
@@ -95,7 +96,7 @@ irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
struct irdma_cq *cq = iwcq->back_cq;
if (!cq->user_mode)
- cq->armed = false;
+ atomic_set(&cq->armed, 0);
if (cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -171,68 +172,36 @@ static void
irdma_set_flush_fields(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info)
{
+ struct qp_err_code qp_err;
+
qp->sq_flush_code = info->sq;
qp->rq_flush_code = info->rq;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- switch (info->ae_id) {
- case IRDMA_AE_AMP_BOUNDS_VIOLATION:
- case IRDMA_AE_AMP_INVALID_STAG:
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- /* fallthrough */
- case IRDMA_AE_UDA_XMIT_BAD_PD:
- qp->flush_code = FLUSH_PROT_ERR;
- break;
- case IRDMA_AE_AMP_UNALLOCATED_STAG:
- case IRDMA_AE_AMP_BAD_PD:
- qp->flush_code = FLUSH_PROT_ERR;
- break;
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case IRDMA_AE_AMP_BAD_QP:
- case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
- qp->flush_code = FLUSH_LOC_QP_OP_ERR;
- break;
- case IRDMA_AE_AMP_BAD_STAG_KEY:
- case IRDMA_AE_AMP_BAD_STAG_INDEX:
- case IRDMA_AE_AMP_TO_WRAP:
- case IRDMA_AE_AMP_RIGHTS_VIOLATION:
- case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case IRDMA_AE_PRIV_OPERATION_DENIED:
- case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
- qp->flush_code = FLUSH_REM_ACCESS_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
- case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- case IRDMA_AE_UDA_L4LEN_INVALID:
- case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
- qp->flush_code = FLUSH_LOC_LEN_ERR;
- break;
- case IRDMA_AE_LCE_QP_CATASTROPHIC:
- qp->flush_code = FLUSH_FATAL_ERR;
- break;
- case IRDMA_AE_DDP_UBE_INVALID_MO:
- case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
- case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
- qp->flush_code = FLUSH_GENERAL_ERR;
- break;
- case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
- case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
- case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
- qp->flush_code = FLUSH_MW_BIND_ERR;
- break;
- case IRDMA_AE_LLP_TOO_MANY_RETRIES:
- qp->flush_code = FLUSH_RETRY_EXC_ERR;
- break;
- case IRDMA_AE_IB_INVALID_REQUEST:
- qp->flush_code = FLUSH_REM_INV_REQ_ERR;
- break;
- default:
- qp->flush_code = FLUSH_FATAL_ERR;
- break;
+ qp_err = irdma_ae_to_qp_err_code(info->ae_id);
+
+ qp->flush_code = qp_err.flush_code;
+ qp->event_type = qp_err.event_type;
+}
+
+/**
+ * irdma_complete_cqp_request - perform post-completion cleanup
+ * @cqp: device CQP
+ * @cqp_request: CQP request
+ *
+ * Mark CQP request as done, wake up waiting thread or invoke
+ * callback function and release/free CQP request.
+ */
+static void
+irdma_complete_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (cqp_request->waiting) {
+ cqp_request->request_done = true;
+ wake_up(&cqp_request->waitq);
+ } else if (cqp_request->callback_fcn) {
+ cqp_request->callback_fcn(cqp_request);
}
+ irdma_put_cqp_request(cqp, cqp_request);
}
/**
@@ -315,13 +284,11 @@ irdma_process_aeq(struct irdma_pci_f *rf)
wake_up_interruptible(&iwqp->waitq);
break;
case IRDMA_AE_LLP_FIN_RECEIVED:
- case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
if (qp->term_flags)
break;
if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
- if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
- iwqp->ibqp_state == IB_QPS_RTS) {
+ if (iwqp->ibqp_state == IB_QPS_RTS) {
irdma_next_iw_state(iwqp,
IRDMA_QP_STATE_CLOSING,
0, 0, 0);
@@ -394,6 +361,7 @@ irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_RESOURCE_EXHAUSTION:
break;
case IRDMA_AE_PRIV_OPERATION_DENIED:
+ case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
case IRDMA_AE_STAG_ZERO_INVALID:
case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
@@ -442,10 +410,7 @@ irdma_process_aeq(struct irdma_pci_f *rf)
irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
irdma_cm_disconn(iwqp);
} else {
- iwqp->sc_qp.term_flags = 1;
- irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
- 0);
- irdma_cm_disconn(iwqp);
+ irdma_terminate_connection(qp, info);
}
break;
}
@@ -513,8 +478,10 @@ irdma_save_msix_info(struct irdma_pci_f *rf)
u32 i;
u32 size;
- if (!rf->msix_count)
+ if (!rf->msix_count) {
+ irdma_dev_err(&rf->sc_dev, "No MSI-X vectors reserved for RDMA.\n");
return -EINVAL;
+ }
size = sizeof(struct irdma_msix_vector) * rf->msix_count;
size += sizeof(struct irdma_qvlist_info);
@@ -546,7 +513,7 @@ irdma_save_msix_info(struct irdma_pci_f *rf)
iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
iw_qvinfo->ceq_idx = ceq_idx++;
}
- iw_qvinfo->itr_idx = 3;
+ iw_qvinfo->itr_idx = IRDMA_IDX_NOITR;
iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
}
@@ -636,8 +603,7 @@ irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
if (rf->cqp_cmpl_wq)
destroy_workqueue(rf->cqp_cmpl_wq);
- if (free_hwcqp)
- status = irdma_sc_cqp_destroy(dev->cqp);
+ status = irdma_sc_cqp_destroy(dev->cqp, free_hwcqp);
if (status)
irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy CQP failed %d\n",
status);
@@ -898,6 +864,8 @@ irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
info.entry_type = rf->sd_type;
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
+ continue;
if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
info.rsrc_type = iw_hmc_obj_types[i];
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
@@ -992,8 +960,8 @@ irdma_create_cqp(struct irdma_pci_f *rf)
cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
memset(cqp->scratch_array, 0, sqsize * sizeof(*cqp->scratch_array));
if (!cqp->scratch_array) {
- kfree(cqp->cqp_requests);
- return -ENOMEM;
+ status = -ENOMEM;
+ goto err_scratch;
}
dev->cqp = &cqp->sc_cqp;
@@ -1002,15 +970,14 @@ irdma_create_cqp(struct irdma_pci_f *rf)
cqp->sq.va = irdma_allocate_dma_mem(dev->hw, &cqp->sq, cqp->sq.size,
IRDMA_CQP_ALIGNMENT);
if (!cqp->sq.va) {
- kfree(cqp->scratch_array);
- kfree(cqp->cqp_requests);
- return -ENOMEM;
+ status = -ENOMEM;
+ goto err_sq;
}
status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
IRDMA_HOST_CTX_ALIGNMENT_M);
if (status)
- goto exit;
+ goto err_ctx;
dev->cqp->host_ctx_pa = mem.pa;
dev->cqp->host_ctx = mem.va;
@@ -1040,7 +1007,7 @@ irdma_create_cqp(struct irdma_pci_f *rf)
if (status) {
irdma_debug(dev, IRDMA_DEBUG_ERR, "cqp init status %d\n",
status);
- goto exit;
+ goto err_ctx;
}
spin_lock_init(&cqp->req_lock);
@@ -1051,7 +1018,7 @@ irdma_create_cqp(struct irdma_pci_f *rf)
irdma_debug(dev, IRDMA_DEBUG_ERR,
"cqp create failed - status %d maj_err %d min_err %d\n",
status, maj_err, min_err);
- goto exit;
+ goto err_create;
}
INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
@@ -1065,8 +1032,15 @@ irdma_create_cqp(struct irdma_pci_f *rf)
init_waitqueue_head(&cqp->remove_wq);
return 0;
-exit:
- irdma_destroy_cqp(rf, false);
+err_create:
+err_ctx:
+ irdma_free_dma_mem(dev->hw, &cqp->sq);
+err_sq:
+ kfree(cqp->scratch_array);
+ cqp->scratch_array = NULL;
+err_scratch:
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
return status;
}
@@ -1224,12 +1198,6 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
return status;
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "CEQ");
}
- status = bus_bind_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->cpu_affinity);
- if (status) {
- irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
- "ceq irq config fail\n");
- return status;
- }
msix_vec->ceq_id = ceq_id;
rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
@@ -1655,10 +1623,7 @@ irdma_hmc_setup(struct irdma_pci_f *rf)
struct irdma_sc_dev *dev = &rf->sc_dev;
u32 qpcnt;
- if (rf->rdma_ver == IRDMA_GEN_1)
- qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2;
- else
- qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
+ qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
rf->sd_type = IRDMA_SD_TYPE_DIRECT;
status = irdma_cfg_fpm_val(dev, qpcnt);
@@ -1687,8 +1652,8 @@ irdma_del_init_mem(struct irdma_pci_f *rf)
if (rf->rdma_ver != IRDMA_GEN_1) {
kfree(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
- mutex_destroy(&dev->ws_mutex);
}
+ mutex_destroy(&dev->ws_mutex);
kfree(rf->ceqlist);
rf->ceqlist = NULL;
kfree(rf->iw_msixtbl);
@@ -1696,7 +1661,6 @@ irdma_del_init_mem(struct irdma_pci_f *rf)
kfree(rf->hmc_info_mem);
rf->hmc_info_mem = NULL;
}
-
/**
* irdma_initialize_dev - initialize device
* @rf: RDMA PCI function
@@ -1746,7 +1710,7 @@ irdma_initialize_dev(struct irdma_pci_f *rf)
info.bar0 = rf->hw.hw_addr;
info.hmc_fn_id = rf->peer_info->pf_id;
info.hw = &rf->hw;
- status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
+ status = irdma_sc_dev_init(&rf->sc_dev, &info);
if (status)
goto error;
@@ -1996,10 +1960,6 @@ irdma_rt_init_hw(struct irdma_device *iwdev,
rf->rsrc_created = true;
}
- iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW |
- IB_DEVICE_MEM_MGT_EXTENSIONS;
-
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
irdma_alloc_set_mac(iwdev);
irdma_add_ip(iwdev);
@@ -2233,28 +2193,20 @@ irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
break;
cqp_request = (struct irdma_cqp_request *)
- (unsigned long)info.scratch;
+ (uintptr_t)info.scratch;
if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
info.maj_err_code,
info.min_err_code))
irdma_dev_err(dev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
info.op_code, info.maj_err_code,
info.min_err_code);
+
if (cqp_request) {
cqp_request->compl_info.maj_err_code = info.maj_err_code;
cqp_request->compl_info.min_err_code = info.min_err_code;
cqp_request->compl_info.op_ret_val = info.op_ret_val;
cqp_request->compl_info.error = info.error;
-
- if (cqp_request->waiting) {
- cqp_request->request_done = true;
- wake_up(&cqp_request->waitq);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
- } else {
- if (cqp_request->callback_fcn)
- cqp_request->callback_fcn(cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
- }
+ irdma_complete_cqp_request(&rf->cqp, cqp_request);
}
cqe_count++;
@@ -2545,7 +2497,7 @@ irdma_del_apbvt(struct irdma_device *iwdev,
* @action: add, delete or modify
*/
void
-irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
+irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
u32 *ip_addr, u32 action)
{
struct irdma_add_arp_cache_entry_info *info;
@@ -2798,29 +2750,30 @@ irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
info.sq = flush_mask & IRDMA_FLUSH_SQ;
info.rq = flush_mask & IRDMA_FLUSH_RQ;
- if (flush_mask & IRDMA_REFLUSH) {
- if (info.sq)
- iwqp->sc_qp.flush_sq = false;
- if (info.rq)
- iwqp->sc_qp.flush_rq = false;
- }
-
/* Generate userflush errors in CQE */
info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
info.sq_minor_code = FLUSH_GENERAL_ERR;
info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
info.rq_minor_code = FLUSH_GENERAL_ERR;
info.userflushcode = true;
- if (flush_code) {
- if (info.sq && iwqp->sc_qp.sq_flush_code)
- info.sq_minor_code = flush_code;
- if (info.rq && iwqp->sc_qp.rq_flush_code)
- info.rq_minor_code = flush_code;
- }
- if (irdma_upload_context && !(flush_mask & IRDMA_REFLUSH) &&
- irdma_upload_qp_context(iwqp, 0, 1))
- irdma_print("failed to upload QP context\n");
+ if (flush_mask & IRDMA_REFLUSH) {
+ if (info.sq)
+ iwqp->sc_qp.flush_sq = false;
+ if (info.rq)
+ iwqp->sc_qp.flush_rq = false;
+ } else {
+ if (flush_code) {
+ if (info.sq && iwqp->sc_qp.sq_flush_code)
+ info.sq_minor_code = flush_code;
+ if (info.rq && iwqp->sc_qp.rq_flush_code)
+ info.rq_minor_code = flush_code;
+ }
+ if (irdma_upload_context && irdma_upload_qp_context(iwqp, 0, 1))
+ irdma_print("failed to upload QP context\n");
+ if (!iwqp->user_mode)
+ irdma_sched_qp_flush_work(iwqp);
+ }
/* Issue flush */
(void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
diff --git a/sys/dev/irdma/irdma_kcompat.c b/sys/dev/irdma/irdma_kcompat.c
index 3d88187cb9ae..d0cf960b5147 100644
--- a/sys/dev/irdma/irdma_kcompat.c
+++ b/sys/dev/irdma/irdma_kcompat.c
@@ -35,6 +35,36 @@
#include "irdma_main.h"
+#define IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
+
+static u16 kc_rdma_flow_label_to_udp_sport(u32 fl) {
+ u32 fl_low = fl & 0x03FFF;
+ u32 fl_high = fl & 0xFC000;
+
+ fl_low ^= fl_high >> 14;
+
+ return (u16)(fl_low | IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN);
+}
+
+#define IRDMA_GRH_FLOWLABEL_MASK (0x000FFFFF)
+
+static u32 kc_rdma_calc_flow_label(u32 lqpn, u32 rqpn) {
+ u64 fl = (u64)lqpn * rqpn;
+
+ fl ^= fl >> 20;
+ fl ^= fl >> 40;
+
+ return (u32)(fl & IRDMA_GRH_FLOWLABEL_MASK);
+}
+
+u16
+kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
+{
+ if (!fl)
+ fl = kc_rdma_calc_flow_label(lqpn, rqpn);
+ return kc_rdma_flow_label_to_udp_sport(fl);
+}
+
void
irdma_get_dev_fw_str(struct ib_device *dev,
char *str,
@@ -106,8 +136,10 @@ irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
iwmr->type = IRDMA_MEMREG_TYPE_MEM;
palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg;
+ /* Assume system PAGE_SIZE as the sg page sizes are unknown. */
+ iwmr->len = max_num_sg * PAGE_SIZE;
status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
- true);
+ false);
if (status)
goto err_get_pble;
@@ -128,6 +160,8 @@ err:
return ERR_PTR(err_code);
}
+#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
+#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
/**
* irdma_alloc_ucontext - Allocate the user context data structure
* @uctx: context
@@ -141,11 +175,15 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
struct ib_device *ibdev = uctx->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
- struct irdma_alloc_ucontext_req req;
+ struct irdma_alloc_ucontext_req req = {0};
struct irdma_alloc_ucontext_resp uresp = {0};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
struct irdma_uk_attrs *uk_attrs;
+ if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
+ return -EINVAL;
+
if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
return -EINVAL;
@@ -157,7 +195,7 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
/* GEN_1 support for libi40iw */
- if (udata->outlen < sizeof(uresp)) {
+ if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
return -EOPNOTSUPP;
@@ -169,15 +207,8 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)))
return -EFAULT;
} else {
- u64 bar_off =
- (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
- ucontext->db_mmap_entry =
- irdma_user_mmap_entry_insert(ucontext, bar_off,
- IRDMA_MMAP_IO_NC,
- &uresp.db_mmap_key);
- if (!ucontext->db_mmap_entry) {
- return -ENOMEM;
- }
+ u64 bar_off;
+
uresp.kernel_ver = IRDMA_ABI_VER;
uresp.feature_flags = uk_attrs->feature_flags;
uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
@@ -189,6 +220,17 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
+
+ bar_off =
+ (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
+ ucontext->db_mmap_entry =
+ irdma_user_mmap_entry_insert(ucontext, bar_off,
+ IRDMA_MMAP_IO_NC,
+ &uresp.db_mmap_key);
+ if (!ucontext->db_mmap_entry) {
+ return -ENOMEM;
+ }
+
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
@@ -226,6 +268,7 @@ irdma_dealloc_ucontext(struct ib_ucontext *context)
return;
}
+#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
/**
* irdma_alloc_pd - allocate protection domain
* @pd: protection domain
@@ -243,6 +286,9 @@ irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
u32 pd_id = 0;
int err;
+ if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
+ return -EINVAL;
+
err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
&rf->next_pd);
if (err)
@@ -284,8 +330,7 @@ irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
}
static void
-irdma_fill_ah_info(struct vnet *vnet,
- struct irdma_ah_info *ah_info,
+irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
const struct ib_gid_attr *sgid_attr,
struct sockaddr *sgid_addr, struct sockaddr *dgid_addr,
u8 *dmac, u8 net_type)
@@ -296,9 +341,12 @@ irdma_fill_ah_info(struct vnet *vnet,
ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr);
ah_info->src_ip_addr[0] =
ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr);
- ah_info->do_lpbk = irdma_ipv4_is_lpb(vnet,
- ah_info->src_ip_addr[0],
+#ifdef VIMAGE
+ CURVNET_SET_QUIET(vnet);
+ ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
ah_info->dest_ip_addr[0]);
+ CURVNET_RESTORE();
+#endif
if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) {
irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
}
@@ -337,8 +385,13 @@ irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
if (ah_info->vlan_tag < VLAN_N_VID) {
ah_info->insert_vlan_tag = true;
ah_info->vlan_tag |=
- rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
+ (u16)rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
}
+ if (iwdev->roce_dcqcn_en) {
+ ah_info->tc_tos &= ~ECN_CODE_PT_MASK;
+ ah_info->tc_tos |= ECN_CODE_PT_VAL;
+ }
+
return 0;
}
@@ -347,7 +400,8 @@ irdma_create_ah_wait(struct irdma_pci_f *rf,
struct irdma_sc_ah *sc_ah, bool sleep)
{
if (!sleep) {
- int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
+ int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
+ CQP_TIMEOUT_THRESHOLD;
do {
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
@@ -360,6 +414,8 @@ irdma_create_ah_wait(struct irdma_pci_f *rf,
return 0;
}
+#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
+
/**
* irdma_create_ah - create address handle
* @ib_ah: ptr to AH
@@ -391,7 +447,10 @@ irdma_create_ah(struct ib_ah *ib_ah,
} sgid_addr, dgid_addr;
int err;
u8 dmac[ETH_ALEN];
- bool sleep;
+ bool sleep = (flags & RDMA_CREATE_AH_SLEEPABLE) != 0;
+
+ if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
+ return -EINVAL;
err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
rf->max_ah, &ah_id, &rf->next_ah);
@@ -415,7 +474,7 @@ irdma_create_ah(struct ib_ah *ib_ah,
"GID lookup at idx=%d with port=%d failed\n",
attr->grh.sgid_index, attr->port_num);
err = -EINVAL;
- goto error;
+ goto err_gid_l2;
}
rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
@@ -442,38 +501,42 @@ irdma_create_ah(struct ib_ah *ib_ah,
ether_addr_copy(dmac, attr->dmac);
- irdma_fill_ah_info(iwdev->netdev->if_vnet,
- ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
+ irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
dmac, ah->av.net_type);
err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
if (err)
- goto error;
-
- sleep = flags & RDMA_CREATE_AH_SLEEPABLE;
+ goto err_gid_l2;
err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP-OP Create AH fail");
- goto error;
+ goto err_gid_l2;
}
err = irdma_create_ah_wait(rf, sc_ah, sleep);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP create AH timed out");
- goto error;
+ goto err_gid_l2;
}
if (udata) {
uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (err) {
+ irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
+ IRDMA_OP_AH_DESTROY, false, NULL, ah);
+ goto err_gid_l2;
+ }
}
+
return 0;
-error:
+err_gid_l2:
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
+
return err;
}
@@ -539,35 +602,34 @@ irdma_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
+#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp;
- struct irdma_create_qp_req req;
struct irdma_create_qp_resp uresp = {0};
u32 qp_num = 0;
int ret;
int err_code;
- int sq_size;
- int rq_size;
struct irdma_sc_qp *qp;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {{0}};
struct irdma_qp_host_ctx_info *ctx_info;
- unsigned long flags;
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
return ERR_PTR(err_code);
- sq_size = init_attr->cap.max_send_wr;
- rq_size = init_attr->cap.max_recv_wr;
+ if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
+ return ERR_PTR(-EINVAL);
init_info.vsi = &iwdev->vsi;
init_info.qp_uk_init_info.uk_attrs = uk_attrs;
- init_info.qp_uk_init_info.sq_size = sq_size;
- init_info.qp_uk_init_info.rq_size = rq_size;
+ init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
+ init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
@@ -630,35 +692,8 @@ irdma_create_qp(struct ib_pd *ibpd,
init_waitqueue_head(&iwqp->mod_qp_waitq);
if (udata) {
- err_code = ib_copy_from_udata(&req, udata,
- min(sizeof(req), udata->inlen));
- if (err_code) {
- irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
- "ib_copy_from_data fail\n");
- goto error;
- }
-
- iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
- iwqp->user_mode = 1;
- if (req.user_wqe_bufs) {
- struct irdma_ucontext *ucontext = to_ucontext(ibpd->uobject->context);
-
- init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
- &ucontext->qp_reg_mem_list);
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
-
- if (!iwqp->iwpbl) {
- err_code = -ENODATA;
- irdma_debug(iwdev_to_idev(iwdev),
- IRDMA_DEBUG_VERBS,
- "no pbl info\n");
- goto error;
- }
- }
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
- irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+ err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr);
} else {
INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
@@ -713,8 +748,6 @@ irdma_create_qp(struct ib_pd *ibpd,
spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
rf->qp_table[qp_num] = iwqp;
- iwqp->max_send_wr = sq_size;
- iwqp->max_recv_wr = rq_size;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
if (dev->ws_add(&iwdev->vsi, 0)) {
@@ -735,8 +768,8 @@ irdma_create_qp(struct ib_pd *ibpd,
if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
uresp.lsmm = 1;
}
- uresp.actual_sq_size = sq_size;
- uresp.actual_rq_size = rq_size;
+ uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
+ uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
uresp.qp_id = qp_num;
uresp.qp_caps = qp->qp_uk.qp_caps;
@@ -776,9 +809,6 @@ irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
irdma_modify_qp_to_err(&iwqp->sc_qp);
- if (!iwqp->user_mode)
- cancel_delayed_work_sync(&iwqp->dwork_flush);
-
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
@@ -810,6 +840,8 @@ irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
+#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
struct ib_device *ibdev = ibcq->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf;
@@ -825,10 +857,15 @@ irdma_create_cq(struct ib_cq *ibcq,
unsigned long flags;
int err_code;
int entries = attr->cqe;
+ bool cqe_64byte_ena;
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
return err_code;
+
+ if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
+ return -EINVAL;
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
&rf->next_cq);
if (err_code)
@@ -842,6 +879,8 @@ irdma_create_cq(struct ib_cq *ibcq,
info.dev = dev;
ukinfo->cq_size = max(entries, 4);
ukinfo->cq_id = cq_num;
+ cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false;
+ ukinfo->avoid_mem_cflct = cqe_64byte_ena;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
if (attr->comp_vector < rf->ceqs_count)
info.ceq_id = attr->comp_vector;
@@ -860,6 +899,7 @@ irdma_create_cq(struct ib_cq *ibcq,
iwcq->user_mode = true;
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
+
if (ib_copy_from_udata(&req, udata,
min(sizeof(req), udata->inlen))) {
err_code = -EFAULT;
@@ -913,14 +953,17 @@ irdma_create_cq(struct ib_cq *ibcq,
}
entries++;
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
ukinfo->cq_size = entries;
- rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
- iwcq->kmem.size = round_up(rsize, 256);
+ if (cqe_64byte_ena)
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
+ else
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
+ iwcq->kmem.size = round_up(rsize, IRDMA_HW_PAGE_SIZE);
iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem,
- iwcq->kmem.size, 256);
+ iwcq->kmem.size, IRDMA_HW_PAGE_SIZE);
if (!iwcq->kmem.va) {
err_code = -ENOMEM;
goto cq_free_rsrc;
@@ -1058,61 +1101,97 @@ irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
int
irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
- struct ib_pd *ibpd = ib_mr->pd;
- struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_mr *iwmr = to_iwmr(ib_mr);
struct irdma_device *iwdev = to_iwdev(ib_mr->device);
- struct irdma_dealloc_stag_info *info;
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
- struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- int status;
+ int ret;
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
if (iwmr->region) {
struct irdma_ucontext *ucontext;
- ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext,
+ ibucontext);
irdma_del_memlist(iwmr, ucontext);
}
goto done;
}
- cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- info = &cqp_info->in.u.dealloc_stag.info;
- memset(info, 0, sizeof(*info));
- info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
- info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
- info->mr = true;
- if (iwpbl->pbl_allocated)
- info->dealloc_pbl = true;
-
- cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
- cqp_info->post_sq = 1;
- cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
- cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
- status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
- irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
- if (status)
- return status;
+ ret = irdma_hwdereg_mr(ib_mr);
+ if (ret)
+ return ret;
irdma_free_stag(iwdev, iwmr->stag);
done:
if (iwpbl->pbl_allocated)
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
if (iwmr->region)
ib_umem_release(iwmr->region);
+
kfree(iwmr);
return 0;
}
+/*
+ * irdma_rereg_user_mr - Re-Register a user memory region @ibmr: ib mem to access iwarp mr pointer @flags: bit mask to
+ * indicate which of the attr's of MR modified @start: virtual start address @len: length of mr @virt: virtual address
+ * @new access flags: bit mask of access flags @new_pd: ptr of pd @udata: user data
+ */
+int
+irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
+ u64 virt, int new_access, struct ib_pd *new_pd,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ib_mr->device);
+ struct irdma_mr *iwmr = to_iwmr(ib_mr);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ int ret;
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return -EINVAL;
+
+ if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
+ return -EOPNOTSUPP;
+
+ ret = irdma_hwdereg_mr(ib_mr);
+ if (ret)
+ return ret;
+
+ if (flags & IB_MR_REREG_ACCESS)
+ iwmr->access = new_access;
+
+ if (flags & IB_MR_REREG_PD) {
+ iwmr->ibmr.pd = new_pd;
+ iwmr->ibmr.device = new_pd->device;
+ }
+
+ if (flags & IB_MR_REREG_TRANS) {
+ if (iwpbl->pbl_allocated) {
+ irdma_free_pble(iwdev->rf->pble_rsrc,
+ &iwpbl->pble_alloc);
+ iwpbl->pbl_allocated = false;
+ }
+ if (iwmr->region) {
+ ib_umem_release(iwmr->region);
+ iwmr->region = NULL;
+ }
+
+ ib_mr = irdma_rereg_mr_trans(iwmr, start, len, virt, udata);
+ if (IS_ERR(ib_mr))
+ return PTR_ERR(ib_mr);
+
+ } else {
+ ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int
kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
u16 *vlan_id)
@@ -1134,8 +1213,8 @@ kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
}
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
-
dev_put(sgid_attr.ndev);
+ iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri;
return 0;
}
@@ -1167,11 +1246,11 @@ irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
wait_for_completion(&iwcq->free_cq);
irdma_cq_wq_destroy(iwdev->rf, cq);
- irdma_cq_free_rsrc(iwdev->rf, iwcq);
spin_lock_irqsave(&iwceq->ce_lock, flags);
irdma_sc_cleanup_ceqes(cq, ceq);
spin_unlock_irqrestore(&iwceq->ce_lock, flags);
+ irdma_cq_free_rsrc(iwdev->rf, iwcq);
}
/**
@@ -1441,7 +1520,59 @@ irdma_query_port(struct ib_device *ibdev, u8 port,
return 0;
}
-extern const char *const irdma_hw_stat_names[];
+static const char *const irdma_hw_stat_names[] = {
+ /* gen1 - 32-bit */
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
+ /* gen1 - 64-bit */
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets",
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts",
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = "ip4InReasmRqd",
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets",
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = "ip4OutSegRqd",
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets",
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts",
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = "ip6InReasmRqd",
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets",
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = "ip6OutSegRqd",
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts",
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = "InRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = "InRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = "InRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = "OutRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = "OutRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = "OutRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = "RdmaBnd",
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = "RdmaInv",
+
+ /* gen2 - 32-bit */
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
+ /* gen2 - 64-bit */
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets",
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = "RxUDP",
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = "TxUDP",
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = "RxECNMrkd",
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "RetransSegs",
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "InOptErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "InProtoErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = "InSegs",
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = "OutSegs",
+};
/**
* irdma_alloc_hw_stats - Allocate a hw stats structure
@@ -1546,6 +1677,7 @@ kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
+ BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
diff --git a/sys/dev/irdma/irdma_main.h b/sys/dev/irdma/irdma_main.h
index bfee3bef2e88..1eabf0497d93 100644
--- a/sys/dev/irdma/irdma_main.h
+++ b/sys/dev/irdma/irdma_main.h
@@ -44,6 +44,7 @@
#include <netinet/if_ether.h>
#include <linux/slab.h>
#include <linux/rculist.h>
+#include <rdma/uverbs_ioctl.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@@ -52,7 +53,6 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
-#include <rdma/uverbs_ioctl.h>
#include "osdep.h"
#include "irdma_defs.h"
#include "irdma_hmc.h"
@@ -101,7 +101,7 @@ extern bool irdma_upload_context;
#define IRDMA_NO_QSET 0xffff
#define IW_CFG_FPM_QP_COUNT 32768
-#define IRDMA_MAX_PAGES_PER_FMR 512
+#define IRDMA_MAX_PAGES_PER_FMR 262144
#define IRDMA_MIN_PAGES_PER_FMR 1
#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
@@ -123,9 +123,6 @@ extern bool irdma_upload_context;
#define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
-#define VSI_RXSWCTRL(_VSI) (0x00205000 + ((_VSI) * 4))
-#define VSI_RXSWCTRL_MACVSIPRUNEENABLE_M BIT(8)
-#define VSI_RXSWCTRL_SRCPRUNEENABLE_M BIT(13)
#define IRDMA_ROCE_CWND_DEFAULT 0x400
#define IRDMA_ROCE_ACKCREDS_DEFAULT 0x1E
@@ -278,6 +275,8 @@ struct irdma_pci_f {
u8 *mem_rsrc;
u8 rdma_ver;
u8 rst_to;
+ /* Not used in SRIOV VF mode */
+ u8 pf_id;
enum irdma_protocol_used protocol_used;
bool en_rem_endpoint_trk:1;
bool dcqcn_ena:1;
@@ -360,6 +359,7 @@ struct irdma_device {
struct ib_device ibdev;
struct irdma_pci_f *rf;
struct ifnet *netdev;
+ struct notifier_block nb_netdevice_event;
struct irdma_handler *hdl;
struct workqueue_struct *cleanup_wq;
struct irdma_sc_vsi vsi;
@@ -368,7 +368,6 @@ struct irdma_device {
u32 roce_ackcreds;
u32 vendor_id;
u32 vendor_part_id;
- u32 device_cap_flags;
u32 push_mode;
u32 rcv_wnd;
u16 mac_ip_table_idx;
@@ -376,6 +375,12 @@ struct irdma_device {
u8 rcv_wscale;
u8 iw_status;
u8 rd_fence_rate;
+ bool override_rcv_wnd:1;
+ bool override_cwnd:1;
+ bool override_ackcreds:1;
+ bool override_ooo:1;
+ bool override_rd_fence_rate:1;
+ bool override_rtomin:1;
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb_vlan_mode:1;
@@ -508,7 +513,7 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp);
void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
-void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
+void irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
u32 *ip_addr, u32 action);
struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
void irdma_del_apbvt(struct irdma_device *iwdev,
@@ -581,6 +586,10 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
void *cb_param);
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
bool irdma_cq_empty(struct irdma_cq *iwcq);
+int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+void irdma_unregister_notifiers(struct irdma_device *iwdev);
+int irdma_register_notifiers(struct irdma_device *iwdev);
void irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf);
void irdma_add_ip(struct irdma_device *iwdev);
void irdma_add_handler(struct irdma_handler *hdl);
diff --git a/sys/dev/irdma/irdma_pble.c b/sys/dev/irdma/irdma_pble.c
index 873399c4299c..c32619b117fe 100644
--- a/sys/dev/irdma/irdma_pble.c
+++ b/sys/dev/irdma/irdma_pble.c
@@ -265,7 +265,7 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
return -EINVAL;
chunkmem.size = sizeof(*chunk);
- chunkmem.va = kzalloc(chunkmem.size, GFP_ATOMIC);
+ chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
if (!chunkmem.va)
return -ENOMEM;
@@ -394,7 +394,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
lvl2->leaf_cnt = total;
lvl2->leafmem.size = (sizeof(*leaf) * total);
- lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_ATOMIC);
+ lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
if (!lvl2->leafmem.va)
return -ENOMEM;
diff --git a/sys/dev/irdma/irdma_protos.h b/sys/dev/irdma/irdma_protos.h
index da7f6110b9e5..24e5d42a26ed 100644
--- a/sys/dev/irdma/irdma_protos.h
+++ b/sys/dev/irdma/irdma_protos.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2016 - 2021 Intel Corporation
+ * Copyright (c) 2016 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -45,8 +45,7 @@
#define CQP_TIMEOUT_THRESHOLD 500
/* init operations */
-int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
- struct irdma_device_init_info *info);
+int irdma_sc_dev_init(struct irdma_sc_dev *dev, struct irdma_device_init_info *info);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
__le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
@@ -56,7 +55,7 @@ void irdma_init_config_check(struct irdma_config_check *cc,
u8 traffic_class,
u16 qs_handle);
/* HMC/FPM functions */
-int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id);
+int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u16 hmc_fn_id);
/* stats misc */
int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
struct irdma_vsi_pestat *pestat, bool wait);
@@ -114,7 +113,7 @@ int irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem);
u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp);
int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
- u8 hmc_fn_id, bool post_sq,
+ u16 hmc_fn_id, bool post_sq,
bool poll_registers);
int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count);
int irdma_get_rdma_features(struct irdma_sc_dev *dev);
@@ -129,9 +128,9 @@ void dumpcls(struct irdma_sc_dev *dev);
int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+ struct irdma_dma_mem *val_mem, u16 hmc_fn_id);
int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+ struct irdma_dma_mem *val_mem, u16 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
diff --git a/sys/dev/irdma/irdma_puda.c b/sys/dev/irdma/irdma_puda.c
index 909284ac0eb0..ea1ae8e28637 100644
--- a/sys/dev/irdma/irdma_puda.c
+++ b/sys/dev/irdma/irdma_puda.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -129,17 +129,17 @@ irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
if (!initial)
get_64bit_val(wqe, IRDMA_BYTE_24, &offset24);
- offset24 = (offset24) ? 0 : LS_64(1, IRDMAQPSQ_VALID);
+ offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
set_64bit_val(wqe, 0, buf->mem.pa);
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(buf->mem.size, IRDMAQPSQ_GEN1_FRAG_LEN));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
} else {
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(buf->mem.size,
- IRDMAQPSQ_FRAG_LEN) | (offset24 & IRDMAQPSQ_VALID_M));
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
+ offset24);
}
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -183,7 +183,7 @@ irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
struct irdma_virt_mem buf_mem;
buf_mem.size = sizeof(struct irdma_puda_buf);
- buf_mem.va = kzalloc(buf_mem.size, GFP_ATOMIC);
+ buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
if (!buf_mem.va)
return NULL;
@@ -269,18 +269,18 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
- valid_bit = (bool)RS_64(qword3, IRDMA_CQ_VALID);
+ valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (valid_bit != cq_uk->polarity)
return -ENOENT;
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
- ext_valid = (bool)RS_64(qword3, IRDMA_CQ_EXTCQE);
+ ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
if (ext_valid) {
peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
ext_cqe = cq_uk->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
- polarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
if (polarity != cq_uk->polarity)
@@ -298,11 +298,11 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA EXT-CQE",
ext_cqe, 32);
- error = (bool)RS_64(qword3, IRDMA_CQ_ERROR);
+ error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
if (error) {
irdma_debug(cq->dev, IRDMA_DEBUG_PUDA, "receive error\n");
- major_err = (u32)(RS_64(qword3, IRDMA_CQ_MAJERR));
- minor_err = (u32)(RS_64(qword3, IRDMA_CQ_MINERR));
+ major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
+ minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
info->compl_error = major_err << 16 | minor_err;
return -EIO;
}
@@ -310,23 +310,23 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
- info->q_type = (u8)RS_64(qword3, IRDMA_CQ_SQ);
- info->qp_id = (u32)RS_64(qword2, IRDMACQ_QPID);
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
- info->ipv4 = (bool)RS_64(qword3, IRDMACQ_IPV4);
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
info->qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
- info->wqe_idx = (u32)RS_64(qword3, IRDMA_CQ_WQEIDX);
+ info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
if (ext_valid) {
- info->vlan_valid = (bool)RS_64(qword7, IRDMA_CQ_UDVLANVALID);
+ info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
if (info->vlan_valid) {
get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
- info->vlan = (u16)RS_64(qword6, IRDMA_CQ_UDVLAN);
+ info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
}
- info->smac_valid = (bool)RS_64(qword7, IRDMA_CQ_UDSMACVALID);
+ info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
if (info->smac_valid) {
get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
@@ -339,12 +339,12 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
}
if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
- info->vlan_valid = (bool)RS_64(qword3, IRDMA_VLAN_TAG_VALID);
- info->l4proto = (u8)RS_64(qword2, IRDMA_UDA_L4PROTO);
- info->l3proto = (u8)RS_64(qword2, IRDMA_UDA_L3PROTO);
+ info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
+ info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
+ info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
}
- info->payload_len = (u32)RS_64(qword0, IRDMACQ_PAYLDLEN);
+ info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
}
return 0;
@@ -486,35 +486,36 @@ irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */
- hdr[1] = LS_64(IRDMA_OP_TYPE_SEND, IRDMA_UDA_QPSQ_OPCODE) |
- LS_64(l4len, IRDMA_UDA_QPSQ_L4LEN) |
- LS_64(info->ah_id, IRDMAQPSQ_AHID) |
- LS_64(1, IRDMA_UDA_QPSQ_SIGCOMPL) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID);
+ hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
+ FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
+ qp->qp_uk.swqe_polarity);
/* Forth line of WQE descriptor */
set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr);
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(info->len, IRDMAQPSQ_FRAG_LEN) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID));
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
} else {
- hdr[0] = LS_64((info->maclen >> 1), IRDMA_UDA_QPSQ_MACLEN) |
- LS_64(iplen, IRDMA_UDA_QPSQ_IPLEN) |
- LS_64(1, IRDMA_UDA_QPSQ_L4T) |
- LS_64(iipt, IRDMA_UDA_QPSQ_IIPT) |
- LS_64(l4len, IRDMA_GEN1_UDA_QPSQ_L4LEN);
+ hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
+ FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
- hdr[1] = LS_64(IRDMA_OP_TYPE_SEND, IRDMA_UDA_QPSQ_OPCODE) |
- LS_64(1, IRDMA_UDA_QPSQ_SIGCOMPL) |
- LS_64(info->do_lpb, IRDMA_UDA_QPSQ_DOLOOPBACK) |
- LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID);
+ hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
/* Forth line of WQE descriptor */
set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr);
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(info->len, IRDMAQPSQ_GEN1_FRAG_LEN));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
}
set_64bit_val(wqe, IRDMA_BYTE_16, hdr[0]);
@@ -606,27 +607,27 @@ irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
set_64bit_val(qp_ctx, IRDMA_BYTE_24,
- LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) |
- LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE));
+ FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
set_64bit_val(qp_ctx, IRDMA_BYTE_48,
- LS_64(rsrc->buf_size, IRDMAQPC_SNDMSS));
+ FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
set_64bit_val(qp_ctx, IRDMA_BYTE_56, 0);
if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
set_64bit_val(qp_ctx, IRDMA_BYTE_64, 1);
set_64bit_val(qp_ctx, IRDMA_BYTE_136,
- LS_64(rsrc->cq_id, IRDMAQPC_TXCQNUM) |
- LS_64(rsrc->cq_id, IRDMAQPC_RXCQNUM));
+ FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
set_64bit_val(qp_ctx, IRDMA_BYTE_144,
- LS_64(rsrc->stats_idx, IRDMAQPC_STAT_INDEX));
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
set_64bit_val(qp_ctx, IRDMA_BYTE_160,
- LS_64(1, IRDMAQPC_PRIVEN) |
- LS_64(rsrc->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE));
+ FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
set_64bit_val(qp_ctx, IRDMA_BYTE_168,
- LS_64((uintptr_t)qp, IRDMAQPC_QPCOMPCTX));
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
set_64bit_val(qp_ctx, IRDMA_BYTE_176,
- LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) |
- LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) |
- LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE));
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
irdma_debug_buf(rsrc->dev, IRDMA_DEBUG_PUDA, "PUDA QP CONTEXT", qp_ctx,
IRDMA_QP_CTX_SIZE);
@@ -655,11 +656,11 @@ irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
hdr = qp->qp_uk.qp_id |
- LS_64(IRDMA_CQP_OP_CREATE_QP, IRDMA_CQPSQ_OPCODE) |
- LS_64(IRDMA_QP_TYPE_UDA, IRDMA_CQPSQ_QP_QPTYPE) |
- LS_64(1, IRDMA_CQPSQ_QP_CQNUMVALID) |
- LS_64(2, IRDMA_CQPSQ_QP_NEXTIWSTATE) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -768,20 +769,19 @@ irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(cq->shadow_read_threshold,
- IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
set_64bit_val(wqe, IRDMA_BYTE_32, cq->cq_pa);
set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
set_64bit_val(wqe, IRDMA_BYTE_56,
- LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) |
- LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX));
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
hdr = cq->cq_uk.cq_id |
- LS_64(IRDMA_CQP_OP_CREATE_CQ, IRDMA_CQPSQ_OPCODE) |
- LS_64(1, IRDMA_CQPSQ_CQ_CHKOVERFLOW) |
- LS_64(1, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(1, IRDMA_CQPSQ_CQ_CEQIDVALID) |
- LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@@ -962,13 +962,13 @@ irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
case PUDA_QP_CREATED:
irdma_qp_rem_qos(&rsrc->qp);
- if (!(reset || dev->no_cqp))
+ if (!reset)
irdma_puda_free_qp(rsrc);
irdma_free_dma_mem(dev->hw, &rsrc->qpmem);
/* fallthrough */
case PUDA_CQ_CREATED:
- if (!(reset || dev->no_cqp))
+ if (!reset)
irdma_puda_free_cq(rsrc);
irdma_free_dma_mem(dev->hw, &rsrc->cqmem);
@@ -1007,7 +1007,7 @@ irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
unsigned long flags;
buf_mem.size = count * sizeof(struct irdma_puda_buf);
- buf_mem.va = kzalloc(buf_mem.size, GFP_ATOMIC);
+ buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
if (!buf_mem.va) {
irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
"error virt_mem for buf\n");
@@ -1115,7 +1115,7 @@ irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
return -EOPNOTSUPP;
}
vmem->size = pudasize + sqwridsize + rqwridsize;
- vmem->va = kzalloc(vmem->size, GFP_ATOMIC);
+ vmem->va = kzalloc(vmem->size, GFP_KERNEL);
if (!vmem->va)
return -ENOMEM;
@@ -1224,16 +1224,16 @@ irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
get_64bit_val(wqe, IRDMA_BYTE_8, &offset8);
if (offset24)
- offset8 &= ~LS_64(1, IRDMAQPSQ_VALID);
+ offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
else
- offset8 |= LS_64(1, IRDMAQPSQ_VALID);
+ offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
set_64bit_val(wqe, IRDMA_BYTE_8, offset8);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
}
if (offset24)
offset24 = 0;
else
- offset24 = LS_64(1, IRDMAQPSQ_VALID);
+ offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
set_64bit_val(wqe, IRDMA_BYTE_24, offset24);
}
diff --git a/sys/dev/irdma/irdma_type.h b/sys/dev/irdma/irdma_type.h
index 0ba93ae73339..e3079c1f93fd 100644
--- a/sys/dev/irdma/irdma_type.h
+++ b/sys/dev/irdma/irdma_type.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,7 +35,9 @@
#ifndef IRDMA_TYPE_H
#define IRDMA_TYPE_H
+
#include "osdep.h"
+
#include "irdma.h"
#include "irdma_user.h"
#include "irdma_hmc.h"
@@ -132,11 +134,6 @@ enum irdma_term_mpa_errors {
MPA_REQ_RSP = 0x04,
};
-enum irdma_qp_event_type {
- IRDMA_QP_EVENT_CATASTROPHIC,
- IRDMA_QP_EVENT_ACCESS_ERR,
-};
-
enum irdma_hw_stats_index {
/* gen1 - 32-bit */
IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
@@ -176,22 +173,21 @@ enum irdma_hw_stats_index {
IRDMA_HW_STAT_INDEX_RDMATXWRS = 33,
IRDMA_HW_STAT_INDEX_RDMAVBND = 34,
IRDMA_HW_STAT_INDEX_RDMAVINV = 35,
+ IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36,
+ IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37,
+ IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38,
+ IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39,
+ IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40,
+ IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41,
+ IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */
- IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 36, /* Must be same value as next entry */
-
- /* gen2 - 32-bit */
- IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 36,
- IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 37,
- IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 38,
/* gen2 - 64-bit */
- IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 39,
- IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 40,
- IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 41,
- IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 42,
- IRDMA_HW_STAT_INDEX_UDPRXPKTS = 43,
- IRDMA_HW_STAT_INDEX_UDPTXPKTS = 44,
- IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 45,
+ IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42,
+ /* gen2 - 32-bit */
+ IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43,
+ IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
+ IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
};
@@ -331,8 +327,8 @@ struct irdma_hw_stat_map {
struct irdma_stats_gather_info {
bool use_hmc_fcn_index:1;
bool use_stats_inst:1;
- u8 hmc_fcn_index;
- u8 stats_inst_index;
+ u16 hmc_fcn_index;
+ u16 stats_inst_index;
struct irdma_dma_mem stats_buff_mem;
void *gather_stats_va;
void *last_gather_stats_va;
@@ -524,14 +520,14 @@ struct irdma_sc_qp {
struct irdma_stats_inst_info {
bool use_hmc_fcn_index;
- u8 hmc_fn_id;
- u8 stats_idx;
+ u16 hmc_fn_id;
+ u16 stats_idx;
};
struct irdma_up_info {
u8 map[8];
u8 cnp_up_override;
- u8 hmc_fcn_idx;
+ u16 hmc_fcn_idx;
bool use_vlan:1;
bool use_cnp_up_override:1;
};
@@ -585,7 +581,7 @@ struct irdma_config_check {
u16 qs_handle;
};
-#define IRDMA_INVALID_FCN_ID 0xff
+#define IRDMA_INVALID_STATS_IDX 0xff
struct irdma_sc_vsi {
u16 vsi_idx;
struct irdma_sc_dev *dev;
@@ -598,11 +594,10 @@ struct irdma_sc_vsi {
struct irdma_puda_rsrc *ieq;
u32 exception_lan_q;
u16 mtu;
- u16 vm_id;
- u8 fcn_id;
enum irdma_vm_vf_type vm_vf_type;
- bool stats_fcn_id_alloc:1;
+ bool stats_inst_alloc:1;
bool tc_change_pending:1;
+ bool mtu_change_pending:1;
struct irdma_vsi_pestat *pestat;
ATOMIC qp_suspend_reqs;
int (*register_qset)(struct irdma_sc_vsi *vsi,
@@ -611,18 +606,17 @@ struct irdma_sc_vsi {
struct irdma_ws_node *tc_node);
struct irdma_config_check cfg_check[IRDMA_MAX_USER_PRIORITY];
bool tc_print_warning[IRDMA_MAX_TRAFFIC_CLASS];
- struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
u8 qos_rel_bw;
u8 qos_prio_type;
+ u16 stats_idx;
u8 dscp_map[IRDMA_DSCP_NUM_VAL];
+ struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
bool dscp_mode:1;
};
struct irdma_sc_dev {
struct list_head cqp_cmd_head; /* head of the CQP command list */
- bool volatile no_cqp;
spinlock_t cqp_lock; /* protect CQP list access */
- bool fcn_id_array[IRDMA_MAX_STATS_COUNT];
struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
u64 fpm_query_buf_pa;
u64 fpm_commit_buf_pa;
@@ -640,7 +634,7 @@ struct irdma_sc_dev {
u32 IOMEM *hw_regs[IRDMA_MAX_REGS];
u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
u64 hw_masks[IRDMA_MAX_MASKS];
- u64 hw_shifts[IRDMA_MAX_SHIFTS];
+ u8 hw_shifts[IRDMA_MAX_SHIFTS];
const struct irdma_hw_stat_map *hw_stats_map;
u64 feature_info[IRDMA_MAX_FEATURES];
u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
@@ -656,7 +650,7 @@ struct irdma_sc_dev {
struct mutex ws_mutex; /* ws tree mutex */
u32 debug_mask;
u16 num_vfs;
- u8 hmc_fn_id;
+ u16 hmc_fn_id;
u8 vf_id;
bool vchnl_up:1;
bool ceq_valid:1;
@@ -750,7 +744,6 @@ struct irdma_vsi_init_info {
u16 exception_lan_q;
u16 pf_data_vsi_num;
enum irdma_vm_vf_type vm_vf_type;
- u16 vm_id;
int (*register_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi,
@@ -760,7 +753,7 @@ struct irdma_vsi_init_info {
struct irdma_vsi_stats_info {
struct irdma_vsi_pestat *pestat;
u8 fcn_id;
- bool alloc_fcn_id;
+ bool alloc_stats_inst;
};
struct irdma_device_init_info {
@@ -771,7 +764,7 @@ struct irdma_device_init_info {
struct irdma_hw *hw;
void IOMEM *bar0;
u16 max_vfs;
- u8 hmc_fn_id;
+ u16 hmc_fn_id;
u32 debug_mask;
};
@@ -852,10 +845,9 @@ struct irdma_udp_offload_info {
struct irdma_roce_offload_info {
u16 p_key;
- u16 err_rq_idx;
+ u32 err_rq_idx;
u32 qkey;
u32 dest_qp;
- u32 local_qp;
u8 roce_tver;
u8 ack_credits;
u8 err_rq_idx_valid;
@@ -888,7 +880,7 @@ struct irdma_iwarp_offload_info {
u8 ddp_ver;
u8 rdmap_ver;
u8 iwarp_mode;
- u16 err_rq_idx;
+ u32 err_rq_idx;
u32 pd_id;
u16 ord_size;
u16 ird_size;
@@ -976,7 +968,7 @@ struct irdma_qp_host_ctx_info {
u32 send_cq_num;
u32 rcv_cq_num;
u32 rem_endpoint_idx;
- u8 stats_idx;
+ u16 stats_idx;
bool srq_valid:1;
bool tcp_info_valid:1;
bool iwarp_info_valid:1;
@@ -987,8 +979,8 @@ struct irdma_qp_host_ctx_info {
struct irdma_aeqe_info {
u64 compl_ctx;
u32 qp_cq_id;
+ u32 wqe_idx;
u16 ae_id;
- u16 wqe_idx;
u8 tcp_state;
u8 iwarp_state;
bool qp:1;
@@ -1013,7 +1005,8 @@ struct irdma_allocate_stag_info {
bool remote_access:1;
bool use_hmc_fcn_index:1;
bool use_pf_rid:1;
- u8 hmc_fcn_index;
+ bool all_memory:1;
+ u16 hmc_fcn_index;
};
struct irdma_mw_alloc_info {
@@ -1038,8 +1031,9 @@ struct irdma_reg_ns_stag_info {
u32 pd_id;
irdma_stag_key stag_key;
bool use_hmc_fcn_index:1;
- u8 hmc_fcn_index;
+ u16 hmc_fcn_index;
bool use_pf_rid:1;
+ bool all_memory:1;
};
struct irdma_fast_reg_stag_info {
@@ -1061,7 +1055,7 @@ struct irdma_fast_reg_stag_info {
bool signaled:1;
bool push_wqe:1;
bool use_hmc_fcn_index:1;
- u8 hmc_fcn_index;
+ u16 hmc_fcn_index;
bool use_pf_rid:1;
bool defer_flag:1;
};
@@ -1231,7 +1225,7 @@ void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
struct irdma_sc_dev *dev);
int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
-int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
+int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp);
int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
struct irdma_cqp_init_info *info);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
@@ -1249,10 +1243,10 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
struct irdma_modify_qp_info *info, u64 scratch,
bool post_sq);
-int irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
- irdma_stag);
-int irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
-int irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
+void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
+ irdma_stag stag);
+void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
+void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info);
void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
@@ -1260,8 +1254,9 @@ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
+int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq);
int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
- u8 hmc_fn_id, bool post_sq,
+ u16 hmc_fn_id, bool post_sq,
bool poll_registers);
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
@@ -1411,7 +1406,7 @@ struct cqp_info {
struct irdma_sc_cqp *cqp;
void *fpm_val_va;
u64 fpm_val_pa;
- u8 hmc_fn_id;
+ u16 hmc_fn_id;
u64 scratch;
} query_fpm_val;
@@ -1419,7 +1414,7 @@ struct cqp_info {
struct irdma_sc_cqp *cqp;
void *fpm_val_va;
u64 fpm_val_pa;
- u8 hmc_fn_id;
+ u16 hmc_fn_id;
u64 scratch;
} commit_fpm_val;
diff --git a/sys/dev/irdma/irdma_uda.c b/sys/dev/irdma/irdma_uda.c
index 3a448467ae48..a71b84d1a4c3 100644
--- a/sys/dev/irdma/irdma_uda.c
+++ b/sys/dev/irdma/irdma_uda.c
@@ -66,35 +66,35 @@ irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
LS_64_1(info->mac_addr[1], 48) |
LS_64_1(info->mac_addr[0], 56));
- qw1 = LS_64(info->pd_idx, IRDMA_UDA_CQPSQ_MAV_PDINDEXLO) |
- LS_64(info->tc_tos, IRDMA_UDA_CQPSQ_MAV_TC) |
- LS_64(info->vlan_tag, IRDMA_UDAQPC_VLANTAG);
+ qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
+ FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
- qw2 = LS_64(info->dst_arpindex, IRDMA_UDA_CQPSQ_MAV_ARPINDEX) |
- LS_64(info->flow_label, IRDMA_UDA_CQPSQ_MAV_FLOWLABEL) |
- LS_64(info->hop_ttl, IRDMA_UDA_CQPSQ_MAV_HOPLIMIT) |
- LS_64(info->pd_idx >> 16, IRDMA_UDA_CQPSQ_MAV_PDINDEXHI);
+ qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
if (!info->ipv4_valid) {
set_64bit_val(wqe, IRDMA_BYTE_40,
- LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR0) |
- LS_64(info->dest_ip_addr[1], IRDMA_UDA_CQPSQ_MAV_ADDR1));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
set_64bit_val(wqe, IRDMA_BYTE_32,
- LS_64(info->dest_ip_addr[2], IRDMA_UDA_CQPSQ_MAV_ADDR2) |
- LS_64(info->dest_ip_addr[3], IRDMA_UDA_CQPSQ_MAV_ADDR3));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
set_64bit_val(wqe, IRDMA_BYTE_56,
- LS_64(info->src_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR0) |
- LS_64(info->src_ip_addr[1], IRDMA_UDA_CQPSQ_MAV_ADDR1));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
set_64bit_val(wqe, IRDMA_BYTE_48,
- LS_64(info->src_ip_addr[2], IRDMA_UDA_CQPSQ_MAV_ADDR2) |
- LS_64(info->src_ip_addr[3], IRDMA_UDA_CQPSQ_MAV_ADDR3));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
} else {
set_64bit_val(wqe, IRDMA_BYTE_32,
- LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR3));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
set_64bit_val(wqe, IRDMA_BYTE_48,
- LS_64(info->src_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR3));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
}
set_64bit_val(wqe, IRDMA_BYTE_8, qw1);
@@ -104,13 +104,12 @@ irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
set_64bit_val(
wqe, IRDMA_BYTE_24,
- LS_64(cqp->polarity, IRDMA_UDA_CQPSQ_MAV_WQEVALID) |
- LS_64(op, IRDMA_UDA_CQPSQ_MAV_OPCODE) |
- LS_64(info->do_lpbk, IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK) |
- LS_64(info->ipv4_valid, IRDMA_UDA_CQPSQ_MAV_IPV4VALID) |
- LS_64(info->ah_idx, IRDMA_UDA_CQPSQ_MAV_AVIDX) |
- LS_64(info->insert_vlan_tag,
- IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_AH WQE", wqe,
IRDMA_CQP_WQE_SIZE * 8);
@@ -137,9 +136,9 @@ irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
if (entry_info->valid_entry) {
set_64bit_val((__le64 *) info->dma_mem_mc.va,
ctx_idx * sizeof(u64),
- LS_64(entry_info->dest_port, IRDMA_UDA_MGCTX_DESTPORT) |
- LS_64(entry_info->valid_entry, IRDMA_UDA_MGCTX_VALIDENT) |
- LS_64(entry_info->qp_id, IRDMA_UDA_MGCTX_QPID));
+ FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
+ FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
+ FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
ctx_idx++;
}
}
@@ -179,8 +178,8 @@ irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
set_64bit_val(wqe, IRDMA_BYTE_32, info->dma_mem_mc.pa);
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(info->vlan_id, IRDMA_UDA_CQPSQ_MG_VLANID) |
- LS_64(info->qs_handle, IRDMA_UDA_CQPSQ_QS_HANDLE));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
set_64bit_val(wqe, IRDMA_BYTE_0, LS_64_1(info->dest_mac_addr[5], 0) |
LS_64_1(info->dest_mac_addr[4], 8) |
LS_64_1(info->dest_mac_addr[3], 16) |
@@ -188,28 +187,28 @@ irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
LS_64_1(info->dest_mac_addr[1], 32) |
LS_64_1(info->dest_mac_addr[0], 40));
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(info->hmc_fcn_id, IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
if (!info->ipv4_valid) {
set_64bit_val(wqe, IRDMA_BYTE_56,
- LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR0) |
- LS_64(info->dest_ip_addr[1], IRDMA_UDA_CQPSQ_MAV_ADDR1));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
set_64bit_val(wqe, IRDMA_BYTE_48,
- LS_64(info->dest_ip_addr[2], IRDMA_UDA_CQPSQ_MAV_ADDR2) |
- LS_64(info->dest_ip_addr[3], IRDMA_UDA_CQPSQ_MAV_ADDR3));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
} else {
set_64bit_val(wqe, IRDMA_BYTE_48,
- LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR3));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
}
irdma_wmb(); /* need write memory block before writing the WQE header. */
set_64bit_val(wqe, IRDMA_BYTE_24,
- LS_64(cqp->polarity, IRDMA_UDA_CQPSQ_MG_WQEVALID) |
- LS_64(op, IRDMA_UDA_CQPSQ_MG_OPCODE) |
- LS_64(info->mg_id, IRDMA_UDA_CQPSQ_MG_MGIDX) |
- LS_64(info->vlan_valid, IRDMA_UDA_CQPSQ_MG_VLANVALID) |
- LS_64(info->ipv4_valid, IRDMA_UDA_CQPSQ_MG_IPV4VALID));
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_MCG WQE", wqe,
IRDMA_CQP_WQE_SIZE * 8);
diff --git a/sys/dev/irdma/irdma_uda_d.h b/sys/dev/irdma/irdma_uda_d.h
index 56f467a1522b..ef8751d21c22 100644
--- a/sys/dev/irdma/irdma_uda_d.h
+++ b/sys/dev/irdma/irdma_uda_d.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2019 Intel Corporation
+ * Copyright (c) 2016 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,381 +35,220 @@
#ifndef IRDMA_UDA_D_H
#define IRDMA_UDA_D_H
-
/* L4 packet type */
#define IRDMA_E_UDA_SQ_L4T_UNKNOWN 0
#define IRDMA_E_UDA_SQ_L4T_TCP 1
#define IRDMA_E_UDA_SQ_L4T_SCTP 2
#define IRDMA_E_UDA_SQ_L4T_UDP 3
-
/* Inner IP header type */
#define IRDMA_E_UDA_SQ_IIPT_UNKNOWN 0
#define IRDMA_E_UDA_SQ_IIPT_IPV6 1
#define IRDMA_E_UDA_SQ_IIPT_IPV4_NO_CSUM 2
#define IRDMA_E_UDA_SQ_IIPT_IPV4_CSUM 3
-
-/* UDA defined fields for transmit descriptors */
#define IRDMA_UDA_QPSQ_PUSHWQE_S 56
-#define IRDMA_UDA_QPSQ_PUSHWQE_M BIT_ULL(IRDMA_UDA_QPSQ_PUSHWQE_S)
-
+#define IRDMA_UDA_QPSQ_PUSHWQE BIT_ULL(56)
#define IRDMA_UDA_QPSQ_INLINEDATAFLAG_S 57
-#define IRDMA_UDA_QPSQ_INLINEDATAFLAG_M \
- BIT_ULL(IRDMA_UDA_QPSQ_INLINEDATAFLAG_S)
-
+#define IRDMA_UDA_QPSQ_INLINEDATAFLAG BIT_ULL(57)
#define IRDMA_UDA_QPSQ_INLINEDATALEN_S 48
-#define IRDMA_UDA_QPSQ_INLINEDATALEN_M \
- ((u64)0xff << IRDMA_UDA_QPSQ_INLINEDATALEN_S)
-
+#define IRDMA_UDA_QPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
#define IRDMA_UDA_QPSQ_ADDFRAGCNT_S 38
-#define IRDMA_UDA_QPSQ_ADDFRAGCNT_M \
- ((u64)0x0F << IRDMA_UDA_QPSQ_ADDFRAGCNT_S)
-
+#define IRDMA_UDA_QPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
#define IRDMA_UDA_QPSQ_IPFRAGFLAGS_S 42
-#define IRDMA_UDA_QPSQ_IPFRAGFLAGS_M \
- ((u64)0x3 << IRDMA_UDA_QPSQ_IPFRAGFLAGS_S)
-
+#define IRDMA_UDA_QPSQ_IPFRAGFLAGS GENMASK_ULL(43, 42)
#define IRDMA_UDA_QPSQ_NOCHECKSUM_S 45
-#define IRDMA_UDA_QPSQ_NOCHECKSUM_M \
- BIT_ULL(IRDMA_UDA_QPSQ_NOCHECKSUM_S)
-
+#define IRDMA_UDA_QPSQ_NOCHECKSUM BIT_ULL(45)
#define IRDMA_UDA_QPSQ_AHIDXVALID_S 46
-#define IRDMA_UDA_QPSQ_AHIDXVALID_M \
- BIT_ULL(IRDMA_UDA_QPSQ_AHIDXVALID_S)
-
+#define IRDMA_UDA_QPSQ_AHIDXVALID BIT_ULL(46)
#define IRDMA_UDA_QPSQ_LOCAL_FENCE_S 61
-#define IRDMA_UDA_QPSQ_LOCAL_FENCE_M \
- BIT_ULL(IRDMA_UDA_QPSQ_LOCAL_FENCE_S)
-
+#define IRDMA_UDA_QPSQ_LOCAL_FENCE BIT_ULL(61)
#define IRDMA_UDA_QPSQ_AHIDX_S 0
-#define IRDMA_UDA_QPSQ_AHIDX_M ((u64)0x1ffff << IRDMA_UDA_QPSQ_AHIDX_S)
-
+#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
#define IRDMA_UDA_QPSQ_PROTOCOL_S 16
-#define IRDMA_UDA_QPSQ_PROTOCOL_M \
- ((u64)0xff << IRDMA_UDA_QPSQ_PROTOCOL_S)
-
+#define IRDMA_UDA_QPSQ_PROTOCOL GENMASK_ULL(23, 16)
#define IRDMA_UDA_QPSQ_EXTHDRLEN_S 32
-#define IRDMA_UDA_QPSQ_EXTHDRLEN_M \
- ((u64)0x1ff << IRDMA_UDA_QPSQ_EXTHDRLEN_S)
-
+#define IRDMA_UDA_QPSQ_EXTHDRLEN GENMASK_ULL(40, 32)
#define IRDMA_UDA_QPSQ_MULTICAST_S 63
-#define IRDMA_UDA_QPSQ_MULTICAST_M \
- BIT_ULL(IRDMA_UDA_QPSQ_MULTICAST_S)
-
+#define IRDMA_UDA_QPSQ_MULTICAST BIT_ULL(63)
#define IRDMA_UDA_QPSQ_MACLEN_S 56
-#define IRDMA_UDA_QPSQ_MACLEN_M \
- ((u64)0x7f << IRDMA_UDA_QPSQ_MACLEN_S)
+#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
#define IRDMA_UDA_QPSQ_MACLEN_LINE 2
-
#define IRDMA_UDA_QPSQ_IPLEN_S 48
-#define IRDMA_UDA_QPSQ_IPLEN_M \
- ((u64)0x7f << IRDMA_UDA_QPSQ_IPLEN_S)
+#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
#define IRDMA_UDA_QPSQ_IPLEN_LINE 2
-
#define IRDMA_UDA_QPSQ_L4T_S 30
-#define IRDMA_UDA_QPSQ_L4T_M ((u64)0x3 << IRDMA_UDA_QPSQ_L4T_S)
+#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
#define IRDMA_UDA_QPSQ_L4T_LINE 2
-
#define IRDMA_UDA_QPSQ_IIPT_S 28
-#define IRDMA_UDA_QPSQ_IIPT_M ((u64)0x3 << IRDMA_UDA_QPSQ_IIPT_S)
+#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
#define IRDMA_UDA_QPSQ_IIPT_LINE 2
-
#define IRDMA_UDA_QPSQ_DO_LPB_LINE 3
-
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_S 45
-#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_M \
- BIT_ULL(IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_S)
+#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM BIT_ULL(45)
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_LINE 3
-
#define IRDMA_UDA_QPSQ_IMMDATA_S 0
-#define IRDMA_UDA_QPSQ_IMMDATA_M \
- ((u64)0xffffffffffffffff << IRDMA_UDA_QPSQ_IMMDATA_S)
-
+#define IRDMA_UDA_QPSQ_IMMDATA GENMASK_ULL(63, 0)
/* Byte Offset 0 */
#define IRDMA_UDAQPC_IPV4_S 3
-#define IRDMA_UDAQPC_IPV4_M BIT_ULL(IRDMAQPC_IPV4_S)
-
+#define IRDMA_UDAQPC_IPV4 BIT_ULL(3)
#define IRDMA_UDAQPC_INSERTVLANTAG_S 5
-#define IRDMA_UDAQPC_INSERTVLANTAG_M BIT_ULL(IRDMA_UDAQPC_INSERTVLANTAG_S)
-
+#define IRDMA_UDAQPC_INSERTVLANTAG BIT_ULL(5)
#define IRDMA_UDAQPC_ISQP1_S 6
-#define IRDMA_UDAQPC_ISQP1_M BIT_ULL(IRDMA_UDAQPC_ISQP1_S)
-
+#define IRDMA_UDAQPC_ISQP1 BIT_ULL(6)
#define IRDMA_UDAQPC_RQWQESIZE_S IRDMAQPC_RQWQESIZE_S
-#define IRDMA_UDAQPC_RQWQESIZE_M IRDMAQPC_RQWQESIZE_M
-
+#define IRDMA_UDAQPC_RQWQESIZE IRDMAQPC_RQWQESIZE
#define IRDMA_UDAQPC_ECNENABLE_S 14
-#define IRDMA_UDAQPC_ECNENABLE_M BIT_ULL(IRDMA_UDAQPC_ECNENABLE_S)
-
+#define IRDMA_UDAQPC_ECNENABLE BIT_ULL(14)
#define IRDMA_UDAQPC_PDINDEXHI_S 20
-#define IRDMA_UDAQPC_PDINDEXHI_M ((u64)3 << IRDMA_UDAQPC_PDINDEXHI_S)
-
+#define IRDMA_UDAQPC_PDINDEXHI GENMASK_ULL(21, 20)
#define IRDMA_UDAQPC_DCTCPENABLE_S 25
-#define IRDMA_UDAQPC_DCTCPENABLE_M BIT_ULL(IRDMA_UDAQPC_DCTCPENABLE_S)
-
+#define IRDMA_UDAQPC_DCTCPENABLE BIT_ULL(25)
#define IRDMA_UDAQPC_RCVTPHEN_S IRDMAQPC_RCVTPHEN_S
-#define IRDMA_UDAQPC_RCVTPHEN_M IRDMAQPC_RCVTPHEN_M
-
+#define IRDMA_UDAQPC_RCVTPHEN IRDMAQPC_RCVTPHEN
#define IRDMA_UDAQPC_XMITTPHEN_S IRDMAQPC_XMITTPHEN_S
-#define IRDMA_UDAQPC_XMITTPHEN_M IRDMAQPC_XMITTPHEN_M
-
+#define IRDMA_UDAQPC_XMITTPHEN IRDMAQPC_XMITTPHEN
#define IRDMA_UDAQPC_RQTPHEN_S IRDMAQPC_RQTPHEN_S
-#define IRDMA_UDAQPC_RQTPHEN_M IRDMAQPC_RQTPHEN_M
-
+#define IRDMA_UDAQPC_RQTPHEN IRDMAQPC_RQTPHEN
#define IRDMA_UDAQPC_SQTPHEN_S IRDMAQPC_SQTPHEN_S
-#define IRDMA_UDAQPC_SQTPHEN_M IRDMAQPC_SQTPHEN_M
-
+#define IRDMA_UDAQPC_SQTPHEN IRDMAQPC_SQTPHEN
#define IRDMA_UDAQPC_PPIDX_S IRDMAQPC_PPIDX_S
-#define IRDMA_UDAQPC_PPIDX_M IRDMAQPC_PPIDX_M
-
+#define IRDMA_UDAQPC_PPIDX IRDMAQPC_PPIDX
#define IRDMA_UDAQPC_PMENA_S IRDMAQPC_PMENA_S
-#define IRDMA_UDAQPC_PMENA_M IRDMAQPC_PMENA_M
-
+#define IRDMA_UDAQPC_PMENA IRDMAQPC_PMENA
#define IRDMA_UDAQPC_INSERTTAG2_S 11
-#define IRDMA_UDAQPC_INSERTTAG2_M BIT_ULL(IRDMA_UDAQPC_INSERTTAG2_S)
-
+#define IRDMA_UDAQPC_INSERTTAG2 BIT_ULL(11)
#define IRDMA_UDAQPC_INSERTTAG3_S 14
-#define IRDMA_UDAQPC_INSERTTAG3_M BIT_ULL(IRDMA_UDAQPC_INSERTTAG3_S)
-
+#define IRDMA_UDAQPC_INSERTTAG3 BIT_ULL(14)
#define IRDMA_UDAQPC_RQSIZE_S IRDMAQPC_RQSIZE_S
-#define IRDMA_UDAQPC_RQSIZE_M IRDMAQPC_RQSIZE_M
-
+#define IRDMA_UDAQPC_RQSIZE IRDMAQPC_RQSIZE
#define IRDMA_UDAQPC_SQSIZE_S IRDMAQPC_SQSIZE_S
-#define IRDMA_UDAQPC_SQSIZE_M IRDMAQPC_SQSIZE_M
-
+#define IRDMA_UDAQPC_SQSIZE IRDMAQPC_SQSIZE
#define IRDMA_UDAQPC_TXCQNUM_S IRDMAQPC_TXCQNUM_S
-#define IRDMA_UDAQPC_TXCQNUM_M IRDMAQPC_TXCQNUM_M
-
+#define IRDMA_UDAQPC_TXCQNUM IRDMAQPC_TXCQNUM
#define IRDMA_UDAQPC_RXCQNUM_S IRDMAQPC_RXCQNUM_S
-#define IRDMA_UDAQPC_RXCQNUM_M IRDMAQPC_RXCQNUM_M
-
+#define IRDMA_UDAQPC_RXCQNUM IRDMAQPC_RXCQNUM
#define IRDMA_UDAQPC_QPCOMPCTX_S IRDMAQPC_QPCOMPCTX_S
-#define IRDMA_UDAQPC_QPCOMPCTX_M IRDMAQPC_QPCOMPCTX_M
-
+#define IRDMA_UDAQPC_QPCOMPCTX IRDMAQPC_QPCOMPCTX
#define IRDMA_UDAQPC_SQTPHVAL_S IRDMAQPC_SQTPHVAL_S
-#define IRDMA_UDAQPC_SQTPHVAL_M IRDMAQPC_SQTPHVAL_M
-
+#define IRDMA_UDAQPC_SQTPHVAL IRDMAQPC_SQTPHVAL
#define IRDMA_UDAQPC_RQTPHVAL_S IRDMAQPC_RQTPHVAL_S
-#define IRDMA_UDAQPC_RQTPHVAL_M IRDMAQPC_RQTPHVAL_M
-
+#define IRDMA_UDAQPC_RQTPHVAL IRDMAQPC_RQTPHVAL
#define IRDMA_UDAQPC_QSHANDLE_S IRDMAQPC_QSHANDLE_S
-#define IRDMA_UDAQPC_QSHANDLE_M IRDMAQPC_QSHANDLE_M
-
+#define IRDMA_UDAQPC_QSHANDLE IRDMAQPC_QSHANDLE
#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE_S 48
-#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE_M \
- ((u64)0x3 << IRDMA_UDAQPC_RQHDRRINGBUFSIZE_S)
-
+#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE GENMASK_ULL(49, 48)
#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE_S 32
-#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE_M \
- ((u64)0x3 << IRDMA_UDAQPC_SQHDRRINGBUFSIZE_S)
-
+#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE GENMASK_ULL(33, 32)
#define IRDMA_UDAQPC_PRIVILEGEENABLE_S 25
-#define IRDMA_UDAQPC_PRIVILEGEENABLE_M \
- BIT_ULL(IRDMA_UDAQPC_PRIVILEGEENABLE_S)
-
+#define IRDMA_UDAQPC_PRIVILEGEENABLE BIT_ULL(25)
#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE_S 26
-#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE_M \
- BIT_ULL(IRDMA_UDAQPC_USE_STATISTICS_INSTANCE_S)
-
+#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE BIT_ULL(26)
#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX_S 0
-#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX_M \
- ((u64)0x7F << IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX_S)
-
+#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX GENMASK_ULL(6, 0)
#define IRDMA_UDAQPC_PRIVHDRGENENABLE_S 0
-#define IRDMA_UDAQPC_PRIVHDRGENENABLE_M \
- BIT_ULL(IRDMA_UDAQPC_PRIVHDRGENENABLE_S)
-
+#define IRDMA_UDAQPC_PRIVHDRGENENABLE BIT_ULL(0)
#define IRDMA_UDAQPC_RQHDRSPLITENABLE_S 3
-#define IRDMA_UDAQPC_RQHDRSPLITENABLE_M \
- BIT_ULL(IRDMA_UDAQPC_RQHDRSPLITENABLE_S)
-
+#define IRDMA_UDAQPC_RQHDRSPLITENABLE BIT_ULL(3)
#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE_S 2
-#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE_M \
- BIT_ULL(IRDMA_UDAQPC_RQHDRRINGBUFENABLE_S)
-
+#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE BIT_ULL(2)
#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE_S 1
-#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE_M \
- BIT_ULL(IRDMA_UDAQPC_SQHDRRINGBUFENABLE_S)
-
+#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE BIT_ULL(1)
#define IRDMA_UDAQPC_IPID_S 32
-#define IRDMA_UDAQPC_IPID_M ((u64)0xffff << IRDMA_UDAQPC_IPID_S)
-
+#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
#define IRDMA_UDAQPC_SNDMSS_S 16
-#define IRDMA_UDAQPC_SNDMSS_M ((u64)0x3fff << IRDMA_UDAQPC_SNDMSS_S)
-
+#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
#define IRDMA_UDAQPC_VLANTAG_S 0
-#define IRDMA_UDAQPC_VLANTAG_M ((u64)0xffff << IRDMA_UDAQPC_VLANTAG_S)
-
-/* Address Handle */
+#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI_S 20
-#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI_M \
- ((u64)0x3 << IRDMA_UDA_CQPSQ_MAV_PDINDEXHI_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO_S 48
-#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO_M \
- ((u64)0xffff << IRDMA_UDA_CQPSQ_MAV_PDINDEXLO_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX_S 24
-#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX_M \
- ((u64)0x3f << IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX_S 48
-#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX_M \
- ((u64)0xffff << IRDMA_UDA_CQPSQ_MAV_ARPINDEX_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
#define IRDMA_UDA_CQPSQ_MAV_TC_S 32
-#define IRDMA_UDA_CQPSQ_MAV_TC_M ((u64)0xff << IRDMA_UDA_CQPSQ_MAV_TC_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_TC GENMASK_ULL(39, 32)
#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT_S 32
-#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT_M \
- ((u64)0xff << IRDMA_UDA_CQPSQ_MAV_HOPLIMIT_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT GENMASK_ULL(39, 32)
#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL_S 0
-#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL_M \
- ((u64)0xfffff << IRDMA_UDA_CQPSQ_MAV_FLOWLABEL_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL GENMASK_ULL(19, 0)
#define IRDMA_UDA_CQPSQ_MAV_ADDR0_S 32
-#define IRDMA_UDA_CQPSQ_MAV_ADDR0_M \
- ((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR0_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_ADDR0 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_MAV_ADDR1_S 0
-#define IRDMA_UDA_CQPSQ_MAV_ADDR1_M \
- ((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR1_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_ADDR1 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_MAV_ADDR2_S 32
-#define IRDMA_UDA_CQPSQ_MAV_ADDR2_M \
- ((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR2_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_ADDR2 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_MAV_ADDR3_S 0
-#define IRDMA_UDA_CQPSQ_MAV_ADDR3_M \
- ((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR3_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_ADDR3 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_MAV_WQEVALID_S 63
-#define IRDMA_UDA_CQPSQ_MAV_WQEVALID_M \
- BIT_ULL(IRDMA_UDA_CQPSQ_MAV_WQEVALID_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_MAV_OPCODE_S 32
-#define IRDMA_UDA_CQPSQ_MAV_OPCODE_M \
- ((u64)0x3f << IRDMA_UDA_CQPSQ_MAV_OPCODE_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_S 62
-#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_M \
- BIT_ULL(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID_S 59
-#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID_M \
- BIT_ULL(IRDMA_UDA_CQPSQ_MAV_IPV4VALID_S)
+#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
#define IRDMA_UDA_CQPSQ_MAV_AVIDX_S 0
-#define IRDMA_UDA_CQPSQ_MAV_AVIDX_M \
- ((u64)0x1ffff << IRDMA_UDA_CQPSQ_MAV_AVIDX_S)
-
+#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(16, 0)
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG_S 60
-#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG_M BIT_ULL(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG_S)
-
-/* UDA multicast group */
-
+#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
#define IRDMA_UDA_MGCTX_VFFLAG_S 29
-#define IRDMA_UDA_MGCTX_VFFLAG_M BIT_ULL(IRDMA_UDA_MGCTX_VFFLAG_S)
-
+#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
#define IRDMA_UDA_MGCTX_DESTPORT_S 32
-#define IRDMA_UDA_MGCTX_DESTPORT_M ((u64)0xffff << IRDMA_UDA_MGCTX_DESTPORT_S)
-
+#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
#define IRDMA_UDA_MGCTX_VFID_S 22
-#define IRDMA_UDA_MGCTX_VFID_M ((u64)0x7f << IRDMA_UDA_MGCTX_VFID_S)
-
+#define IRDMA_UDA_MGCTX_VFID GENMASK_ULL(28, 22)
#define IRDMA_UDA_MGCTX_VALIDENT_S 31
-#define IRDMA_UDA_MGCTX_VALIDENT_M BIT_ULL(IRDMA_UDA_MGCTX_VALIDENT_S)
-
+#define IRDMA_UDA_MGCTX_VALIDENT BIT_ULL(31)
#define IRDMA_UDA_MGCTX_PFID_S 18
-#define IRDMA_UDA_MGCTX_PFID_M ((u64)0xf << IRDMA_UDA_MGCTX_PFID_S)
-
+#define IRDMA_UDA_MGCTX_PFID GENMASK_ULL(21, 18)
#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT_S 30
-#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT_M \
- BIT_ULL(IRDMA_UDA_MGCTX_FLAGIGNOREDPORT_S)
-
+#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT BIT_ULL(30)
#define IRDMA_UDA_MGCTX_QPID_S 0
-#define IRDMA_UDA_MGCTX_QPID_M ((u64)0x3ffff << IRDMA_UDA_MGCTX_QPID_S)
-
-/* multicast group create CQP command */
-
+#define IRDMA_UDA_MGCTX_QPID GENMASK_ULL(17, 0)
#define IRDMA_UDA_CQPSQ_MG_WQEVALID_S 63
-#define IRDMA_UDA_CQPSQ_MG_WQEVALID_M \
- BIT_ULL(IRDMA_UDA_CQPSQ_MG_WQEVALID_S)
-
+#define IRDMA_UDA_CQPSQ_MG_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_MG_OPCODE_S 32
-#define IRDMA_UDA_CQPSQ_MG_OPCODE_M ((u64)0x3f << IRDMA_UDA_CQPSQ_MG_OPCODE_S)
-
+#define IRDMA_UDA_CQPSQ_MG_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_MG_MGIDX_S 0
-#define IRDMA_UDA_CQPSQ_MG_MGIDX_M ((u64)0x1fff << IRDMA_UDA_CQPSQ_MG_MGIDX_S)
-
+#define IRDMA_UDA_CQPSQ_MG_MGIDX GENMASK_ULL(12, 0)
#define IRDMA_UDA_CQPSQ_MG_IPV4VALID_S 60
-#define IRDMA_UDA_CQPSQ_MG_IPV4VALID_M BIT_ULL(IRDMA_UDA_CQPSQ_MG_IPV4VALID_S)
-
+#define IRDMA_UDA_CQPSQ_MG_IPV4VALID BIT_ULL(60)
#define IRDMA_UDA_CQPSQ_MG_VLANVALID_S 59
-#define IRDMA_UDA_CQPSQ_MG_VLANVALID_M BIT_ULL(IRDMA_UDA_CQPSQ_MG_VLANVALID_S)
-
+#define IRDMA_UDA_CQPSQ_MG_VLANVALID BIT_ULL(59)
#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID_S 0
-#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID_M ((u64)0x3F << IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID_S)
-
+#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID GENMASK_ULL(5, 0)
#define IRDMA_UDA_CQPSQ_MG_VLANID_S 32
-#define IRDMA_UDA_CQPSQ_MG_VLANID_M ((u64)0xFFF << IRDMA_UDA_CQPSQ_MG_VLANID_S)
-
+#define IRDMA_UDA_CQPSQ_MG_VLANID GENMASK_ULL(43, 32)
#define IRDMA_UDA_CQPSQ_QS_HANDLE_S 0
-#define IRDMA_UDA_CQPSQ_QS_HANDLE_M ((u64)0x3FF << IRDMA_UDA_CQPSQ_QS_HANDLE_S)
-
-/* Quad hash table */
+#define IRDMA_UDA_CQPSQ_QS_HANDLE GENMASK_ULL(9, 0)
#define IRDMA_UDA_CQPSQ_QHASH_QPN_S 32
-#define IRDMA_UDA_CQPSQ_QHASH_QPN_M \
- ((u64)0x3ffff << IRDMA_UDA_CQPSQ_QHASH_QPN_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
#define IRDMA_UDA_CQPSQ_QHASH__S 0
-#define IRDMA_UDA_CQPSQ_QHASH__M BIT_ULL(IRDMA_UDA_CQPSQ_QHASH__S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_ BIT_ULL(0)
#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT_S 16
-#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT_M \
- ((u64)0xffff << IRDMA_UDA_CQPSQ_QHASH_SRC_PORT_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT_S 0
-#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT_M \
- ((u64)0xffff << IRDMA_UDA_CQPSQ_QHASH_DEST_PORT_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR0_S 32
-#define IRDMA_UDA_CQPSQ_QHASH_ADDR0_M \
- ((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR0_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR1_S 0
-#define IRDMA_UDA_CQPSQ_QHASH_ADDR1_M \
- ((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR1_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR2_S 32
-#define IRDMA_UDA_CQPSQ_QHASH_ADDR2_M \
- ((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR2_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR3_S 0
-#define IRDMA_UDA_CQPSQ_QHASH_ADDR3_M \
- ((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR3_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID_S 63
-#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID_M \
- BIT_ULL(IRDMA_UDA_CQPSQ_QHASH_WQEVALID_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_QHASH_OPCODE_S 32
-#define IRDMA_UDA_CQPSQ_QHASH_OPCODE_M \
- ((u64)0x3f << IRDMA_UDA_CQPSQ_QHASH_OPCODE_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_QHASH_MANAGE_S 61
-#define IRDMA_UDA_CQPSQ_QHASH_MANAGE_M \
- ((u64)0x3 << IRDMA_UDA_CQPSQ_QHASH_MANAGE_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID_S 60
-#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID_M \
- ((u64)0x1 << IRDMA_UDA_CQPSQ_QHASH_IPV4VALID_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID BIT_ULL(60)
#define IRDMA_UDA_CQPSQ_QHASH_LANFWD_S 59
-#define IRDMA_UDA_CQPSQ_QHASH_LANFWD_M \
- ((u64)0x1 << IRDMA_UDA_CQPSQ_QHASH_LANFWD_S)
-
+#define IRDMA_UDA_CQPSQ_QHASH_LANFWD BIT_ULL(59)
#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE_S 42
-#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE_M \
- ((u64)0x7 << IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE_S)
+#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
#endif /* IRDMA_UDA_D_H */
diff --git a/sys/dev/irdma/irdma_uk.c b/sys/dev/irdma/irdma_uk.c
index 496846b7870b..e8bab2e6ffc5 100644
--- a/sys/dev/irdma/irdma_uk.c
+++ b/sys/dev/irdma/irdma_uk.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -51,15 +51,15 @@ irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
{
if (sge) {
set_64bit_val(wqe, offset,
- LS_64(sge->tag_off, IRDMAQPSQ_FRAG_TO));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
- LS_64(valid, IRDMAQPSQ_VALID) |
- LS_64(sge->len, IRDMAQPSQ_FRAG_LEN) |
- LS_64(sge->stag, IRDMAQPSQ_FRAG_STAG));
+ FIELD_PREP(IRDMAQPSQ_VALID, valid) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
- LS_64(valid, IRDMAQPSQ_VALID));
+ FIELD_PREP(IRDMAQPSQ_VALID, valid));
}
}
@@ -76,10 +76,10 @@ irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
{
if (sge) {
set_64bit_val(wqe, offset,
- LS_64(sge->tag_off, IRDMAQPSQ_FRAG_TO));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
- LS_64(sge->len, IRDMAQPSQ_GEN1_FRAG_LEN) |
- LS_64(sge->stag, IRDMAQPSQ_GEN1_FRAG_STAG));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
@@ -87,16 +87,24 @@ irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
}
/**
+ * irdma_nop_hdr - Format header section of noop WQE
+ * @qp: hw qp ptr
+ */
+static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){
+ return FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, false) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+}
+
+/**
* irdma_nop_1 - insert a NOP wqe
* @qp: hw qp ptr
*/
static int
irdma_nop_1(struct irdma_qp_uk *qp)
{
- u64 hdr;
__le64 *wqe;
u32 wqe_idx;
- bool signaled = false;
if (!qp->sq_ring.head)
return -EINVAL;
@@ -110,14 +118,10 @@ irdma_nop_1(struct irdma_qp_uk *qp)
set_64bit_val(wqe, IRDMA_BYTE_8, 0);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |
- LS_64(signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
-
/* make sure WQE is written before valid bit is set */
irdma_wmb();
- set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+ set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp));
return 0;
}
@@ -160,7 +164,7 @@ irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
/* read the doorbell shadow area */
get_64bit_val(qp->shadow_area, IRDMA_BYTE_0, &temp);
- hw_sq_tail = (u32)RS_64(temp, IRDMA_QP_DBSA_HW_SQ_TAIL);
+ hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
if (sw_sq_head != qp->initial_ring.head) {
if (qp->push_dropped) {
@@ -191,7 +195,7 @@ static void
irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
{
set_32bit_val(qp->push_db, 0,
- LS_32(wqe_idx >> 3, IRDMA_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
+ FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
qp->initial_ring.head = qp->sq_ring.head;
qp->push_mode = true;
qp->push_dropped = false;
@@ -220,31 +224,32 @@ irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
* irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
* @qp: hw qp ptr
* @wqe_idx: return wqe index
- * @quanta: size of WR in quanta
+ * @quanta: (in/out) ptr to size of WR in quanta. Modified in case pad is needed
* @total_size: size of WR in bytes
* @info: info on WR
*/
__le64 *
irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
- u16 quanta, u32 total_size,
+ u16 *quanta, u32 total_size,
struct irdma_post_sq_info *info)
{
__le64 *wqe;
__le64 *wqe_0 = NULL;
u32 nop_wqe_idx;
- u16 avail_quanta;
+ u16 avail_quanta, wqe_quanta = *quanta;
u16 i;
avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
(IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
qp->uk_attrs->max_hw_sq_chunk);
- if (quanta <= avail_quanta) {
+
+ if (*quanta <= avail_quanta) {
/* WR fits in current chunk */
- if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ if (*quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
return NULL;
} else {
/* Need to pad with NOP */
- if (quanta + avail_quanta >
+ if (*quanta + avail_quanta >
IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
return NULL;
@@ -262,17 +267,19 @@ irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
if (!*wqe_idx)
qp->swqe_polarity = !qp->swqe_polarity;
- IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, *quanta);
+
+ irdma_clr_wqes(qp, *wqe_idx);
wqe = qp->sq_base[*wqe_idx].elem;
- if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 &&
(IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
- wqe_0[3] = cpu_to_le64(LS_64(!qp->swqe_polarity, IRDMAQPSQ_VALID));
+ wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, !qp->swqe_polarity));
}
qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
- qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
+ qp->sq_wrtrk_array[*wqe_idx].quanta = wqe_quanta;
qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
return wqe;
@@ -344,20 +351,17 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
- info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return -ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
if (info->imm_data_valid) {
set_64bit_val(wqe, IRDMA_BYTE_0,
- LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
i = 0;
} else {
qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
@@ -382,28 +386,24 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
++addl_frag_cnt;
}
- if (!op_info->rem_addr.stag && !total_size)
- op_info->rem_addr.stag = 0x1234;
- hdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |
- LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
- LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |
- LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
- LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -443,8 +443,7 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
- info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return -ENOSPC;
@@ -453,8 +452,6 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
qp->ord_cnt = 0;
}
- irdma_clr_wqes(qp, wqe_idx);
-
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
addl_frag_cnt = op_info->num_lo_sges > 1 ?
(op_info->num_lo_sges - 1) : 0;
@@ -478,28 +475,26 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
++addl_frag_cnt;
}
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));
- hdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |
- LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
- LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
- LS_64((inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ),
- IRDMAQPSQ_OPCODE) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(info->read_fence || qp->force_fence || ord_fence ? 1 : 0,
- IRDMAQPSQ_READFENCE) |
- LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE,
+ (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE,
+ info->read_fence || ord_fence ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -540,21 +535,19 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
- info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return -ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
read_fence |= info->read_fence;
addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
if (info->imm_data_valid) {
set_64bit_val(wqe, IRDMA_BYTE_0,
- LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
i = 0;
} else {
- qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->sg_list,
+ qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
+ frag_cnt ? op_info->sg_list : NULL,
qp->swqe_polarity);
i = 1;
}
@@ -575,31 +568,30 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
}
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->qkey, IRDMAQPSQ_DESTQKEY) |
- LS_64(op_info->dest_qp, IRDMAQPSQ_DESTQPN));
- hdr = LS_64(info->stag_to_inv, IRDMAQPSQ_REMSTAG) |
- LS_64(op_info->ah_id, IRDMAQPSQ_AHID) |
- LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |
- LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
- LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
- LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(info->udp_hdr, IRDMAQPSQ_UDPHEADER) |
- LS_64(info->l4len, IRDMAQPSQ_L4LEN) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -615,29 +607,45 @@ irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,
{
set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(op_info->mw_stag, IRDMAQPSQ_PARENTMRSTAG) |
- LS_64(op_info->mr_stag, IRDMAQPSQ_MWSTAG));
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
}
/**
* irdma_copy_inline_data_gen_1 - Copy inline data to wqe
- * @dest: pointer to wqe
- * @src: pointer to inline data
- * @len: length of inline data to copy
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: Total inline data length
* @polarity: compatibility parameter
*/
static void
-irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
- u8 polarity)
+irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
+ u32 num_sges, u8 polarity)
{
- if (len <= IRDMA_BYTE_16) {
- irdma_memcpy(dest, src, len);
- } else {
- irdma_memcpy(dest, src, IRDMA_BYTE_16);
- src += IRDMA_BYTE_16;
- dest = dest + IRDMA_BYTE_32;
- irdma_memcpy(dest, src, len - IRDMA_BYTE_16);
+ u32 quanta_bytes_remaining = 16;
+ u32 i;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
+ u32 sge_len = sge_list[i].len;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ irdma_memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ /* Remaining inline bytes reside after the hdr */
+ wqe += 16;
+ quanta_bytes_remaining = 32;
+ }
+ }
}
}
@@ -662,43 +670,59 @@ irdma_set_mw_bind_wqe(__le64 * wqe,
{
set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
set_64bit_val(wqe, IRDMA_BYTE_8,
- LS_64(op_info->mr_stag, IRDMAQPSQ_PARENTMRSTAG) |
- LS_64(op_info->mw_stag, IRDMAQPSQ_MWSTAG));
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
}
/**
* irdma_copy_inline_data - Copy inline data to wqe
- * @dest: pointer to wqe
- * @src: pointer to inline data
- * @len: length of inline data to copy
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: number of SGE's
* @polarity: polarity of wqe valid bit
*/
static void
-irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
+irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list, u32 num_sges,
+ u8 polarity)
{
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
- u32 copy_size;
-
- dest += IRDMA_BYTE_8;
- if (len <= IRDMA_BYTE_8) {
- irdma_memcpy(dest, src, len);
- return;
- }
-
- *((u64 *)dest) = *((u64 *)src);
- len -= IRDMA_BYTE_8;
- src += IRDMA_BYTE_8;
- dest += IRDMA_BYTE_24; /* point to additional 32 byte quanta */
-
- while (len) {
- copy_size = len < 31 ? len : 31;
- irdma_memcpy(dest, src, copy_size);
- *(dest + 31) = inline_valid;
- len -= copy_size;
- dest += IRDMA_BYTE_32;
- src += copy_size;
+ u32 quanta_bytes_remaining = 8;
+ u32 i;
+ bool first_quanta = true;
+
+ wqe += 8;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
+ u32 sge_len = sge_list[i].len;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ irdma_memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ quanta_bytes_remaining = 31;
+
+ /* Remaining inline bytes reside after the hdr */
+ if (first_quanta) {
+ first_quanta = false;
+ wqe += 16;
+ } else {
+ *wqe = inline_valid;
+ wqe++;
+ }
+ }
+ }
}
+ if (!first_quanta && quanta_bytes_remaining < 31)
+ *(wqe + quanta_bytes_remaining) = inline_valid;
}
/**
@@ -737,59 +761,62 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
- struct irdma_inline_rdma_write *op_info;
+ struct irdma_rdma_write *op_info;
u64 hdr = 0;
u32 wqe_idx;
bool read_fence = false;
u16 quanta;
+ u32 i, total_size = 0;
info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.inline_rdma_write;
+ op_info = &info->op.rdma_write;
- if (op_info->len > qp->max_inline_data)
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
return -EINVAL;
- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
- info);
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].len;
+
+ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return -ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
read_fence |= info->read_fence;
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));
-
- hdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |
- LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
- LS_64(op_info->len, IRDMAQPSQ_INLINEDATALEN) |
- LS_64(info->report_rtt ? 1 : 0, IRDMAQPSQ_REPORTRTT) |
- LS_64(1, IRDMAQPSQ_INLINEDATAFLAG) |
- LS_64(info->imm_data_valid ? 1 : 0, IRDMAQPSQ_IMMDATAFLAG) |
- LS_64(info->push_wqe ? 1 : 0, IRDMAQPSQ_PUSHWQE) |
- LS_64(read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
if (info->imm_data_valid)
set_64bit_val(wqe, IRDMA_BYTE_0,
- LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
+ op_info->num_lo_sges, qp->swqe_polarity);
- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
- qp->swqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -805,62 +832,65 @@ irdma_uk_inline_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
- struct irdma_post_inline_send *op_info;
+ struct irdma_post_send *op_info;
u64 hdr;
u32 wqe_idx;
bool read_fence = false;
u16 quanta;
+ u32 i, total_size = 0;
info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.inline_send;
+ op_info = &info->op.send;
- if (op_info->len > qp->max_inline_data)
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
return -EINVAL;
- quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
- info);
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].len;
+
+ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return -ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
set_64bit_val(wqe, IRDMA_BYTE_16,
- LS_64(op_info->qkey, IRDMAQPSQ_DESTQKEY) |
- LS_64(op_info->dest_qp, IRDMAQPSQ_DESTQPN));
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
read_fence |= info->read_fence;
- hdr = LS_64(info->stag_to_inv, IRDMAQPSQ_REMSTAG) |
- LS_64(op_info->ah_id, IRDMAQPSQ_AHID) |
- LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
- LS_64(op_info->len, IRDMAQPSQ_INLINEDATALEN) |
- LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |
- LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
- LS_64(1, IRDMAQPSQ_INLINEDATAFLAG) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(info->udp_hdr, IRDMAQPSQ_UDPHEADER) |
- LS_64(info->l4len, IRDMAQPSQ_L4LEN) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
if (info->imm_data_valid)
set_64bit_val(wqe, IRDMA_BYTE_0,
- LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
- qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
- qp->swqe_polarity);
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
+ op_info->num_sges, qp->swqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
+ if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -882,41 +912,36 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
u32 wqe_idx;
bool local_fence = false;
struct irdma_sge sge = {0};
+ u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.inv_local_stag;
local_fence = info->local_fence;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
- 0, info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
if (!wqe)
return -ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
sge.stag = op_info->target_stag;
qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(IRDMA_OP_TYPE_INV_STAG, IRDMAQPSQ_OPCODE) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (info->push_wqe)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -936,44 +961,39 @@ irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
u64 hdr;
u32 wqe_idx;
bool local_fence;
+ u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.bind_window;
local_fence = info->local_fence;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
- 0, info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
if (!wqe)
return -ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
- hdr = LS_64(IRDMA_OP_TYPE_BIND_MW, IRDMAQPSQ_OPCODE) |
- LS_64(((op_info->ena_reads << 2) | (op_info->ena_writes << 3)),
- IRDMAQPSQ_STAGRIGHTS) |
- LS_64((op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0),
- IRDMAQPSQ_VABASEDTO) |
- LS_64((op_info->mem_window_type_1 ? 1 : 0),
- IRDMAQPSQ_MEMWINDOWTYPE) |
- LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
- LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |
- LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |
- LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
+ FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
+ ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
+ FIELD_PREP(IRDMAQPSQ_VABASEDTO,
+ (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
+ (op_info->mem_window_type_1 ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (info->push_wqe)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ else if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -1020,8 +1040,8 @@ irdma_uk_post_receive(struct irdma_qp_uk *qp,
}
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
- LS_64(qp->rwqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
@@ -1061,17 +1081,17 @@ irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
get_64bit_val(cq->shadow_area, 32, &temp_val);
- sw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
sw_cq_sel += cq_cnt;
- arm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);
- arm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);
- arm_next = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
- temp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |
- LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |
- LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |
- LS_64(arm_next, IRDMA_CQ_DBSA_ARM_NEXT);
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
set_64bit_val(cq->shadow_area, 32, temp_val);
}
@@ -1093,17 +1113,17 @@ irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
cq->armed = true;
get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
- arm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
arm_seq_num++;
- sw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);
- arm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
arm_next_se |= 1;
if (cq_notify == IRDMA_CQ_COMPL_EVENT)
arm_next = 1;
- temp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |
- LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |
- LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |
- LS_64(arm_next, IRDMA_CQ_DBSA_ARM_NEXT);
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
set_64bit_val(cq->shadow_area, IRDMA_BYTE_32, temp_val);
@@ -1166,7 +1186,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
__le64 *cqe;
struct irdma_qp_uk *qp;
struct irdma_ring *pring = NULL;
- u32 wqe_idx, q_type;
+ u32 wqe_idx;
int ret_code;
bool move_cq_head = true;
u8 polarity;
@@ -1179,14 +1199,14 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
- polarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (polarity != cq->polarity)
return -ENOENT;
/* Ensure CQE contents are read after valid bit is checked */
rmb();
- ext_valid = (bool)RS_64(qword3, IRDMA_CQ_EXTCQE);
+ ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
if (ext_valid) {
u64 qword6, qword7;
u32 peek_head;
@@ -1194,12 +1214,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
if (cq->avoid_mem_cflct) {
ext_cqe = (__le64 *) ((u8 *)cqe + 32);
get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
- polarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
} else {
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
ext_cqe = cq->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
- polarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
}
@@ -1209,19 +1229,19 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
/* Ensure ext CQE contents are read after ext valid bit is checked */
rmb();
- info->imm_valid = (bool)RS_64(qword7, IRDMA_CQ_IMMVALID);
+ info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
if (info->imm_valid) {
u64 qword4;
get_64bit_val(ext_cqe, IRDMA_BYTE_0, &qword4);
- info->imm_data = (u32)RS_64(qword4, IRDMA_CQ_IMMDATALOW32);
+ info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
}
- info->ud_smac_valid = (bool)RS_64(qword7, IRDMA_CQ_UDSMACVALID);
- info->ud_vlan_valid = (bool)RS_64(qword7, IRDMA_CQ_UDVLANVALID);
+ info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
+ info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
if (info->ud_smac_valid || info->ud_vlan_valid) {
get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
if (info->ud_vlan_valid)
- info->ud_vlan = (u16)RS_64(qword6, IRDMA_CQ_UDVLAN);
+ info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
if (info->ud_smac_valid) {
info->ud_smac[5] = qword6 & 0xFF;
info->ud_smac[4] = (qword6 >> 8) & 0xFF;
@@ -1237,23 +1257,26 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->ud_vlan_valid = false;
}
- q_type = (u8)RS_64(qword3, IRDMA_CQ_SQ);
- info->error = (bool)RS_64(qword3, IRDMA_CQ_ERROR);
- info->push_dropped = (bool)RS_64(qword3, IRDMACQ_PSHDROP);
- info->ipv4 = (bool)RS_64(qword3, IRDMACQ_IPV4);
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
if (info->error) {
- info->major_err = RS_64(qword3, IRDMA_CQ_MAJERR);
- info->minor_err = RS_64(qword3, IRDMA_CQ_MINERR);
- if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
- info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
+ info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
+ switch (info->major_err) {
+ case IRDMA_FLUSH_MAJOR_ERR:
/* Set the min error to standard flush error code for remaining cqes */
if (info->minor_err != FLUSH_GENERAL_ERR) {
- qword3 &= ~IRDMA_CQ_MINERR_M;
- qword3 |= LS_64(FLUSH_GENERAL_ERR, IRDMA_CQ_MINERR);
+ qword3 &= ~IRDMA_CQ_MINERR;
+ qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
}
- } else {
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ break;
+ default:
info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ break;
}
} else {
info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
@@ -1262,22 +1285,23 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
- info->tcp_seq_num_rtt = (u32)RS_64(qword0, IRDMACQ_TCPSEQNUMRTT);
- info->qp_id = (u32)RS_64(qword2, IRDMACQ_QPID);
- info->ud_src_qpn = (u32)RS_64(qword2, IRDMACQ_UDSRCQPN);
+ info->stat.raw = (u32)FIELD_GET(IRDMACQ_TCPSQN_ROCEPSN_RTT_TS, qword0);
+ info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
+ info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
- info->solicited_event = (bool)RS_64(qword3, IRDMACQ_SOEVENT);
+ info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
if (!qp || qp->destroy_pending) {
ret_code = -EFAULT;
goto exit;
}
- wqe_idx = (u32)RS_64(qword3, IRDMA_CQ_WQEIDX);
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
- if (q_type == IRDMA_CQE_QTYPE_RQ) {
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
u32 array_idx;
ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
@@ -1310,16 +1334,11 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
}
}
- info->bytes_xfered = (u32)RS_64(qword0, IRDMACQ_PAYLDLEN);
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
- if (info->imm_valid)
- info->op_type = IRDMA_OP_TYPE_REC_IMM;
- else
- info->op_type = IRDMA_OP_TYPE_REC;
-
- if (qword3 & IRDMACQ_STAG_M) {
+ if (qword3 & IRDMACQ_STAG) {
info->stag_invalid_set = true;
- info->inv_stag = (u32)RS_64(qword2, IRDMACQ_INVSTAG);
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
} else {
info->stag_invalid_set = false;
}
@@ -1363,11 +1382,15 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
if (!info->comp_status)
info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
- info->op_type = (u8)RS_64(qword3, IRDMACQ_OP);
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
IRDMA_RING_SET_TAIL(qp->sq_ring,
wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
} else {
+ unsigned long flags;
+
+ spin_lock_irqsave(qp->lock, flags);
if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
+ spin_unlock_irqrestore(qp->lock, flags);
ret_code = -ENOENT;
goto exit;
}
@@ -1375,27 +1398,29 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
do {
__le64 *sw_wqe;
u64 wqe_qword;
- u8 op_type;
u32 tail;
tail = qp->sq_ring.tail;
sw_wqe = qp->sq_base[tail].elem;
get_64bit_val(sw_wqe, IRDMA_BYTE_24,
&wqe_qword);
- op_type = (u8)RS_64(wqe_qword, IRDMAQPSQ_OPCODE);
- info->op_type = op_type;
+ info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
IRDMA_RING_SET_TAIL(qp->sq_ring,
tail + qp->sq_wrtrk_array[tail].quanta);
- if (op_type != IRDMAQP_OP_NOP) {
+ if (info->op_type != IRDMAQP_OP_NOP) {
info->wr_id = qp->sq_wrtrk_array[tail].wrid;
info->signaled = qp->sq_wrtrk_array[tail].signaled;
info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
break;
}
} while (1);
+
+ if (info->op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
qp->sq_flush_complete = true;
+ spin_unlock_irqrestore(qp->lock, flags);
}
pring = &qp->sq_ring;
}
@@ -1403,9 +1428,10 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
ret_code = 0;
exit:
- if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
if (pring && IRDMA_RING_MORE_WORK(*pring))
move_cq_head = false;
+ }
if (move_cq_head) {
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
@@ -1424,8 +1450,8 @@ exit:
set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
} else {
- qword3 &= ~IRDMA_CQ_WQEIDX_M;
- qword3 |= LS_64(pring->tail, IRDMA_CQ_WQEIDX);
+ qword3 &= ~IRDMA_CQ_WQEIDX;
+ qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
}
@@ -1433,11 +1459,11 @@ exit:
}
/**
- * irdma_qp_round_up - return round up qp wq depth
+ * irdma_round_up_wq - return round up qp wq depth
* @wqdepth: wq depth in quanta to round up
*/
static int
-irdma_qp_round_up(u32 wqdepth)
+irdma_round_up_wq(u32 wqdepth)
{
int scount = 1;
@@ -1481,34 +1507,34 @@ irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
}
/*
- * irdma_get_sqdepth - get SQ depth (quanta) @max_hw_wq_quanta: HW SQ size limit @sq_size: SQ size @shift: shift which
+ * irdma_get_sqdepth - get SQ depth (quanta) @uk_attrs: qp HW attributes @sq_size: SQ size @shift: shift which
* determines size of WQE @sqdepth: depth of SQ
*/
int
-irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *sqdepth)
+irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
{
- *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
+ *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
- if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
- else if (*sqdepth > max_hw_wq_quanta)
+ if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
+ *sqdepth = uk_attrs->min_hw_wq_size << shift;
+ else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
return -EINVAL;
return 0;
}
/*
- * irdma_get_rqdepth - get RQ/SRQ depth (quanta) @max_hw_rq_quanta: HW RQ/SRQ size limit @rq_size: RQ/SRQ size @shift:
- * shift which determines size of WQE @rqdepth: depth of RQ/SRQ
+ * irdma_get_rqdepth - get RQ depth (quanta) @uk_attrs: qp HW attributes @rq_size: SRQ size @shift: shift which
+ * determines size of WQE @rqdepth: depth of RQ/SRQ
*/
int
-irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *rqdepth)
+irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
{
- *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
+ *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
- else if (*rqdepth > max_hw_rq_quanta)
+ if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
+ *rqdepth = uk_attrs->min_hw_wq_size << shift;
+ else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
return 0;
@@ -1550,6 +1576,80 @@ irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
}
/**
+ * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
+ * @ukinfo: qp initialization info
+ * @sq_shift: Returns shift of SQ
+ * @rq_shift: Returns shift of RQ
+ */
+void
+irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+}
+
+/**
+ * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
+ * @ukinfo: qp initialization info
+ * @sq_depth: Returns depth of SQ
+ * @sq_shift: Returns shift of SQ
+ */
+int
+irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
+ int status;
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+ status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
+ *sq_shift, sq_depth);
+
+ return status;
+}
+
+/**
+ * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
+ * @ukinfo: qp initialization info
+ * @rq_depth: Returns depth of RQ
+ * @rq_shift: Returns shift of RQ
+ */
+int
+irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift)
+{
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+
+ status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
+ *rq_shift, rq_depth);
+
+ return status;
+}
+
+/**
* irdma_uk_qp_init - initialize shared qp
* @qp: hw qp (user and kernel)
* @info: qp initialization info
@@ -1564,23 +1664,12 @@ irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
{
int ret_code = 0;
u32 sq_ring_size;
- u8 sqshift, rqshift;
qp->uk_attrs = info->uk_attrs;
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
return -EINVAL;
- irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
- if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
- info->max_inline_data, &sqshift);
- if (info->abi_ver > 4)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- } else {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
- info->max_inline_data, &sqshift);
- }
qp->qp_caps = info->qp_caps;
qp->sq_base = info->sq;
qp->rq_base = info->rq;
@@ -1596,7 +1685,7 @@ irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->sq_size = info->sq_size;
qp->push_mode = false;
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
- sq_ring_size = qp->sq_size << sqshift;
+ sq_ring_size = qp->sq_size << info->sq_shift;
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
if (info->first_sq_wq) {
@@ -1611,9 +1700,9 @@ irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
qp->max_inline_data = info->max_inline_data;
- qp->rq_wqe_size = rqshift;
+ qp->rq_wqe_size = info->rq_shift;
IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
- qp->rq_wqe_size_multiplier = 1 << rqshift;
+ qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
@@ -1663,7 +1752,7 @@ irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
else
cqe = cq->cq_base[cq_head].buf;
get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
- polarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (polarity != temp)
break;
@@ -1693,28 +1782,29 @@ irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
u64 hdr;
u32 wqe_idx;
struct irdma_post_sq_info info = {0};
+ u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
- info.push_wqe = false;
+ info.push_wqe = qp->push_db ? true : false;
info.wr_id = wr_id;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
- 0, &info);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, &info);
if (!wqe)
return -ENOSPC;
- irdma_clr_wqes(qp, wqe_idx);
-
set_64bit_val(wqe, IRDMA_BYTE_0, 0);
set_64bit_val(wqe, IRDMA_BYTE_8, 0);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
- hdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |
- LS_64(signaled, IRDMAQPSQ_SIGCOMPL) |
- LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (post_sq)
+
+ if (info.push_wqe)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ else if (post_sq)
irdma_uk_qp_post_wr(qp);
return 0;
diff --git a/sys/dev/irdma/irdma_user.h b/sys/dev/irdma/irdma_user.h
index 535962fa2cf7..b83f431e5521 100644
--- a/sys/dev/irdma/irdma_user.h
+++ b/sys/dev/irdma/irdma_user.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -51,7 +51,7 @@
#define irdma_address_list u64 *
#define irdma_sgl struct irdma_sge *
-#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
+#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
@@ -78,7 +78,97 @@
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
-#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
+
+/* Async Events codes */
+#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
+#define IRDMA_AE_AMP_INVALID_STAG 0x0103
+#define IRDMA_AE_AMP_BAD_QP 0x0104
+#define IRDMA_AE_AMP_BAD_PD 0x0105
+#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
+#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
+#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define IRDMA_AE_AMP_TO_WRAP 0x010a
+#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
+#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
+#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
+#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
+#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
+#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
+#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
+#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
+#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
+#define IRDMA_AE_BAD_CLOSE 0x0201
+#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
+#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
+#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
+#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
+#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
+#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
+#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
+#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
+#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
+#define IRDMA_AE_DDP_NO_L_BIT 0x0308
+#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
+#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
+#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
+#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
+#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
+#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
+#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
+#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
+#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
+#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
+#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
+#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
+#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+#define IRDMA_AE_RESET_SENT 0x0601
+#define IRDMA_AE_TERMINATE_SENT 0x0602
+#define IRDMA_AE_RESET_NOT_SENT 0x0603
+#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
+#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
@@ -119,6 +209,7 @@ enum irdma_device_caps_const {
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
+ IRDMA_MIN_WQ_SIZE_GEN2 = 8,
};
enum irdma_addressing_type {
@@ -135,9 +226,15 @@ enum irdma_flush_opcode {
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
+ FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
- FLUSH_RETRY_EXC_ERR,
+};
+
+enum irdma_qp_event_type {
+ IRDMA_QP_EVENT_CATASTROPHIC,
+ IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_cmpl_status {
@@ -193,7 +290,7 @@ struct irdma_sge {
struct irdma_ring {
volatile u32 head;
- volatile u32 tail;
+ volatile u32 tail; /* effective tail */
u32 size;
};
@@ -213,14 +310,6 @@ struct irdma_post_send {
u32 ah_id;
};
-struct irdma_post_inline_send {
- void *data;
- u32 len;
- u32 qkey;
- u32 dest_qp;
- u32 ah_id;
-};
-
struct irdma_post_rq_info {
u64 wr_id;
irdma_sgl sg_list;
@@ -233,12 +322,6 @@ struct irdma_rdma_write {
struct irdma_sge rem_addr;
};
-struct irdma_inline_rdma_write {
- void *data;
- u32 len;
- struct irdma_sge rem_addr;
-};
-
struct irdma_rdma_read {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
@@ -281,8 +364,6 @@ struct irdma_post_sq_info {
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
- struct irdma_inline_rdma_write inline_rdma_write;
- struct irdma_post_inline_send inline_send;
} op;
};
@@ -290,7 +371,6 @@ struct irdma_cq_poll_info {
u64 wr_id;
irdma_qp_handle qp_handle;
u32 bytes_xfered;
- u32 tcp_seq_num_rtt;
u32 qp_id;
u32 ud_src_qpn;
u32 imm_data;
@@ -301,6 +381,7 @@ struct irdma_cq_poll_info {
u16 ud_vlan;
u8 ud_smac[6];
u8 op_type;
+ u8 q_type;
bool stag_invalid_set:1; /* or L_R_Key set */
bool push_dropped:1;
bool error:1;
@@ -310,6 +391,17 @@ struct irdma_cq_poll_info {
bool ud_smac_valid:1;
bool imm_valid:1;
bool signaled:1;
+ union {
+ u32 tcp_sqn;
+ u32 roce_psn;
+ u32 rtt;
+ u32 raw;
+ } stat;
+};
+
+struct qp_err_code {
+ enum irdma_flush_opcode flush_code;
+ enum irdma_qp_event_type event_type;
};
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
@@ -334,7 +426,7 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
bool post_sq);
struct irdma_wqe_uk_ops {
- void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
+ void (*iw_copy_inline_data)(u8 *dest, struct irdma_sge *sge_list, u32 num_sges, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
u8 valid);
@@ -352,6 +444,12 @@ int irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift);
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift);
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
@@ -405,7 +503,6 @@ struct irdma_qp_uk {
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
spinlock_t *lock;
- bool force_fence;
u8 dbg_rq_flushed;
u16 ord_cnt;
u8 sq_flush_seen;
@@ -442,8 +539,12 @@ struct irdma_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
+ u32 sq_depth;
+ u32 rq_depth;
u8 first_sq_wq;
u8 type;
+ u8 sq_shift;
+ u8 rq_shift;
u8 rd_fence_rate;
int abi_ver;
bool legacy_mode;
@@ -460,7 +561,7 @@ struct irdma_cq_uk_init_info {
};
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
- u16 quanta, u32 total_size,
+ u16 *quanta, u32 total_size,
struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
int irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
@@ -469,9 +570,81 @@ int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
-int irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *wqdepth);
-int irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *wqdepth);
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth);
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth);
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift, u32 *srqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
+
+static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
+{
+ struct qp_err_code qp_err = { 0 };
+
+ switch (ae_id) {
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ case IRDMA_AE_AMP_INVALID_STAG:
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ case IRDMA_AE_AMP_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp_err.flush_code = FLUSH_PROT_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp_err.flush_code = FLUSH_LOC_LEN_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ case IRDMA_AE_AMP_MWBIND_VALID_STAG:
+ qp_err.flush_code = FLUSH_MW_BIND_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp_err.flush_code = FLUSH_REM_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ qp_err.flush_code = FLUSH_FATAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ default:
+ qp_err.flush_code = FLUSH_GENERAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ }
+
+ return qp_err;
+}
#endif /* IRDMA_USER_H */
diff --git a/sys/dev/irdma/irdma_utils.c b/sys/dev/irdma/irdma_utils.c
index 26b92bf2d454..29ca1dd3b54e 100644
--- a/sys/dev/irdma/irdma_utils.c
+++ b/sys/dev/irdma/irdma_utils.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -46,7 +46,7 @@ DEFINE_SPINLOCK(irdma_handler_lock);
* @action: modify, delete or add
*/
int
-irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, u8 *mac_addr,
+irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr,
u32 action)
{
unsigned long flags;
@@ -110,7 +110,7 @@ irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, u8 *mac_addr,
* @mac: MAC address
*/
int
-irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, u8 *mac)
+irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, const u8 *mac)
{
int arpidx;
@@ -129,6 +129,57 @@ irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, u8 *mac)
}
/**
+ * irdma_netdevice_event - system notifier for netdev events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: netdev
+ */
+int
+irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct irdma_device *iwdev;
+ struct ifnet *netdev = netdev_notifier_info_to_ifp(ptr);
+
+ iwdev = container_of(notifier, struct irdma_device, nb_netdevice_event);
+ if (iwdev->netdev != netdev)
+ return NOTIFY_DONE;
+
+ iwdev->iw_status = 1;
+ switch (event) {
+ case NETDEV_DOWN:
+ iwdev->iw_status = 0;
+ /* fallthrough */
+ case NETDEV_UP:
+ irdma_port_ibevent(iwdev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+void
+irdma_unregister_notifiers(struct irdma_device *iwdev)
+{
+ unregister_netdevice_notifier(&iwdev->nb_netdevice_event);
+}
+
+int
+irdma_register_notifiers(struct irdma_device *iwdev)
+{
+ int ret;
+
+ iwdev->nb_netdevice_event.notifier_call = irdma_netdevice_event;
+ ret = register_netdevice_notifier(&iwdev->nb_netdevice_event);
+ if (ret) {
+ ibdev_err(&iwdev->ibdev, "register_netdevice_notifier failed\n");
+ return ret;
+ }
+ return ret;
+}
+/**
* irdma_alloc_and_get_cqp_request - get cqp struct
* @cqp: device cqp ptr
* @wait: cqp to be used in wait mode
@@ -252,7 +303,7 @@ irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
for (i = 0; i < pending_work; i++) {
- cqp_request = (struct irdma_cqp_request *)(unsigned long)
+ cqp_request = (struct irdma_cqp_request *)(uintptr_t)
cqp->scratch_array[wqe_idx];
if (cqp_request)
irdma_free_pending_cqp_request(cqp, cqp_request);
@@ -278,20 +329,23 @@ irdma_wait_event(struct irdma_pci_f *rf,
struct irdma_cqp_request *cqp_request)
{
struct irdma_cqp_timeout cqp_timeout = {0};
+ int timeout_threshold = CQP_TIMEOUT_THRESHOLD;
bool cqp_error = false;
int err_code = 0;
cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
do {
+ int wait_time_ms = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms;
+
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
if (wait_event_timeout(cqp_request->waitq,
cqp_request->request_done,
- msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
+ msecs_to_jiffies(wait_time_ms)))
break;
irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
- if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
+ if (cqp_timeout.count < timeout_threshold)
continue;
if (!rf->reset) {
@@ -304,11 +358,14 @@ irdma_wait_event(struct irdma_pci_f *rf,
cqp_error = cqp_request->compl_info.error;
if (cqp_error) {
err_code = -EIO;
- if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
- cqp_request->compl_info.min_err_code == 0x8029) {
- if (!rf->reset) {
- rf->reset = true;
- rf->gen_ops.request_reset(rf);
+ if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
+ if (cqp_request->compl_info.min_err_code == 0x8002) {
+ err_code = -EBUSY;
+ } else if (cqp_request->compl_info.min_err_code == 0x8029) {
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
}
}
}
@@ -366,10 +423,12 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
};
static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
+ {0xffff, 0x8002, "Invalid State"},
{0xffff, 0x8006, "Flush No Wqe Pending"},
{0xffff, 0x8007, "Modify QP Bad Close"},
{0xffff, 0x8009, "LLP Closed"},
- {0xffff, 0x800a, "Reset Not Sent"}
+ {0xffff, 0x800a, "Reset Not Sent"},
+ {0xffff, 0x200, "Failover Pending"}
};
/**
@@ -522,16 +581,6 @@ irdma_get_qp(struct ib_device *device, int qpn)
}
/**
- * irdma_get_hw_addr - return hw addr
- * @par: points to shared dev
- */
-u8 __iomem * irdma_get_hw_addr(void *par){
- struct irdma_sc_dev *dev = par;
-
- return dev->hw->hw_addr;
-}
-
-/**
* irdma_remove_cqp_head - return head entry and remove
* @dev: device
*/
@@ -713,7 +762,7 @@ irdma_terminate_del_timer(struct irdma_sc_qp *qp)
*/
int
irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
+ struct irdma_dma_mem *val_mem, u16 hmc_fn_id)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -748,7 +797,7 @@ irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
*/
int
irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
+ struct irdma_dma_mem *val_mem, u16 hmc_fn_id)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -1803,7 +1852,7 @@ irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
pchunk->bitmapmem.size = sizeofbitmap >> 3;
- pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_ATOMIC);
+ pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
if (!pchunk->bitmapmem.va)
return -ENOMEM;
@@ -2056,6 +2105,9 @@ irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
case IRDMA_QP_EVENT_ACCESS_ERR:
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
break;
+ case IRDMA_QP_EVENT_REQ_ERR:
+ ibevent.event = IB_EVENT_QP_REQ_ERR;
+ break;
}
ibevent.device = iwqp->ibqp.device;
ibevent.element.qp = &iwqp->ibqp;
@@ -2168,7 +2220,7 @@ irdma_cq_empty(struct irdma_cq *iwcq)
ukcq = &iwcq->sc_cq.cq_uk;
cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
get_64bit_val(cqe, 24, &qword3);
- polarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
return polarity != ukcq->polarity;
}
@@ -2191,7 +2243,7 @@ irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_
{
struct irdma_cmpl_gen *cmpl;
- if (!iwcq || list_empty(&iwcq->cmpl_generated))
+ if (list_empty(&iwcq->cmpl_generated))
return -ENOENT;
cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
list_del(&cmpl->list);
@@ -2226,7 +2278,10 @@ irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
static inline void
irdma_comp_handler(struct irdma_cq *cq)
{
- if (cq->sc_cq.cq_uk.armed && cq->ibcq.comp_handler)
+ if (!cq->ibcq.comp_handler)
+ return;
+
+ if (atomic_cmpxchg(&cq->armed, 1, 0))
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -2246,15 +2301,20 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
u32 wqe_idx;
u8 compl_generated = 0;
unsigned long flags;
+ bool reschedule = false;
#define SQ_COMPL_GENERATED (0x01)
#define RQ_COMPL_GENERATED (0x02)
spin_lock_irqsave(&iwqp->iwscq->lock, flags);
if (irdma_cq_empty(iwqp->iwscq)) {
+ unsigned long flags2;
+
+ spin_lock_irqsave(&iwqp->lock, flags2);
while (IRDMA_RING_MORE_WORK(*sq_ring)) {
- cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
+ cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
return;
}
@@ -2266,7 +2326,8 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl->cpi.signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
sw_wqe = qp->sq_base[wqe_idx].elem;
get_64bit_val(sw_wqe, IRDMA_BYTE_24, &wqe_qword);
- cmpl->cpi.op_type = (u8)RS_64(wqe_qword, IRDMAQPSQ_OPCODE);
+ cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
+ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
/* remove the SQ WR by moving SQ tail */
IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
@@ -2275,16 +2336,22 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
compl_generated |= SQ_COMPL_GENERATED;
}
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
} else {
- mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS / 2);
+ spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
+ reschedule = true;
}
- spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
spin_lock_irqsave(&iwqp->iwrcq->lock, flags);
if (irdma_cq_empty(iwqp->iwrcq)) {
+ unsigned long flags2;
+
+ spin_lock_irqsave(&iwqp->lock, flags2);
while (IRDMA_RING_MORE_WORK(*rq_ring)) {
- cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
+ cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
return;
}
@@ -2295,6 +2362,7 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
cmpl->cpi.signaled = 1;
cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
+ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
/* remove the RQ WR by moving RQ tail */
IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
@@ -2304,22 +2372,26 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
compl_generated |= RQ_COMPL_GENERATED;
}
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
} else {
- mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS / 2);
+ spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
+ reschedule = true;
}
- spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
- if (iwqp->iwscq == iwqp->iwrcq) {
- if (compl_generated)
+ if (reschedule)
+ irdma_sched_qp_flush_work(iwqp);
+ if (compl_generated) {
+ if (iwqp->iwscq == iwqp->iwrcq) {
irdma_comp_handler(iwqp->iwscq);
- return;
- }
- if (compl_generated & SQ_COMPL_GENERATED)
- irdma_comp_handler(iwqp->iwscq);
- if (compl_generated & RQ_COMPL_GENERATED)
- irdma_comp_handler(iwqp->iwrcq);
- if (compl_generated)
+ } else {
+ if (compl_generated & SQ_COMPL_GENERATED)
+ irdma_comp_handler(iwqp->iwscq);
+ if (compl_generated & RQ_COMPL_GENERATED)
+ irdma_comp_handler(iwqp->iwrcq);
+ }
irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_VERBS,
"0x%X (SQ 0x1, RQ 0x2, both 0x3) completions generated for QP %d\n",
compl_generated, iwqp->ibqp.qp_num);
+ }
}
diff --git a/sys/dev/irdma/irdma_verbs.c b/sys/dev/irdma/irdma_verbs.c
index ad9d7054e09f..ef48f5f75fda 100644
--- a/sys/dev/irdma/irdma_verbs.c
+++ b/sys/dev/irdma/irdma_verbs.c
@@ -58,33 +58,38 @@ irdma_query_device(struct ib_device *ibdev,
ether_addr_copy((u8 *)&props->sys_image_guid, IF_LLADDR(iwdev->netdev));
props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
irdma_fw_minor_ver(&rf->sc_dev);
- props->device_cap_flags = iwdev->device_cap_flags;
+ props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
+ props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
props->vendor_id = pcidev->vendor;
props->vendor_part_id = pcidev->device;
props->hw_ver = pcidev->revision;
- props->page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ props->page_size_cap = hw_attrs->page_size_cap;
props->max_mr_size = hw_attrs->max_mr_size;
props->max_qp = rf->max_qp - rf->used_qps;
props->max_qp_wr = hw_attrs->max_qp_wr;
set_max_sge(props, rf);
props->max_cq = rf->max_cq - rf->used_cqs;
- props->max_cqe = rf->max_cqe;
+ props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
- if (rdma_protocol_roce(ibdev, 1))
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
props->max_pkeys = IRDMA_PKEY_TBL_SZ;
- props->max_ah = rf->max_ah;
- props->max_mcast_grp = rf->max_mcg;
- props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
- props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
+ props->max_ah = rf->max_ah;
+ if (hw_attrs->uk_attrs.hw_rev == IRDMA_GEN_2) {
+ props->max_mcast_grp = rf->max_mcg;
+ props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
+ props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
+ }
+ }
props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
-#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
- props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
return 0;
}
@@ -275,6 +280,22 @@ irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
spin_unlock_irqrestore(&iwcq->lock, flags);
}
+static u64 irdma_compute_push_wqe_offset(struct irdma_device *iwdev, u32 page_idx){
+ u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
+
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
+ /* skip over db page */
+ bar_off += IRDMA_HW_PAGE_SIZE;
+ /* skip over reserved space */
+ bar_off += IRDMA_PF_BAR_RSVD;
+ }
+
+ /* push wqe page */
+ bar_off += (u64)page_idx * IRDMA_HW_PAGE_SIZE;
+
+ return bar_off;
+}
+
void
irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
{
@@ -299,17 +320,8 @@ irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
WARN_ON_ONCE(iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_2);
- bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
+ bar_off = irdma_compute_push_wqe_offset(iwdev, iwqp->sc_qp.push_idx);
- if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
- /* skip over db page */
- bar_off += IRDMA_HW_PAGE_SIZE;
- /* skip over reserved space */
- bar_off += IRDMA_PF_BAR_RSVD;
- }
-
- /* push wqe page */
- bar_off += iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
bar_off, IRDMA_MMAP_IO_WC,
push_wqe_mmap_key);
@@ -356,6 +368,84 @@ irdma_setup_virt_qp(struct irdma_device *iwdev,
}
/**
+ * irdma_setup_umode_qp - setup sq and rq size in user mode qp
+ * @udata: user data
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ * @init_attr: Initial QP create attributes
+ */
+int
+irdma_setup_umode_qp(struct ib_udata *udata,
+ struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+ struct irdma_create_qp_req req = {0};
+ unsigned long flags;
+ int ret;
+
+ ret = ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen));
+ if (ret) {
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
+ "ib_copy_from_data fail\n");
+ return ret;
+ }
+
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+ iwqp->user_mode = 1;
+ if (req.user_wqe_bufs) {
+ struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
+
+ info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ if (!iwqp->iwpbl) {
+ ret = -ENODATA;
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
+ "no pbl info\n");
+ return ret;
+ }
+ }
+
+ if (ukinfo->abi_ver <= 5) {
+ /**
+ * For ABI version less than 6 passes raw sq and rq
+ * quanta in cap.max_send_wr and cap.max_recv_wr.
+ */
+ iwqp->max_send_wr = init_attr->cap.max_send_wr;
+ iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
+ ukinfo->sq_size = init_attr->cap.max_send_wr;
+ ukinfo->rq_size = init_attr->cap.max_recv_wr;
+ irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, &ukinfo->rq_shift);
+ } else {
+ ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
+ if (ret)
+ return ret;
+
+ ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
+ if (ret)
+ return ret;
+
+ iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ }
+ irdma_setup_virt_qp(iwdev, iwqp, info);
+
+ return 0;
+}
+
+/**
* irdma_setup_kmode_qp - setup initialization for kernel mode qp
* @iwdev: iwarp device
* @iwqp: qp ptr (user or kernel)
@@ -369,48 +459,35 @@ irdma_setup_kmode_qp(struct irdma_device *iwdev,
struct ib_qp_init_attr *init_attr)
{
struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
- u32 sqdepth, rqdepth;
- u8 sqshift, rqshift;
u32 size;
int status;
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
- struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
-
- irdma_get_wqe_shift(uk_attrs,
- uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
- ukinfo->max_sq_frag_cnt,
- ukinfo->max_inline_data, &sqshift);
- status = irdma_get_sqdepth(uk_attrs->max_hw_wq_quanta, ukinfo->sq_size,
- sqshift, &sqdepth);
+
+ status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
if (status)
return status;
- if (uk_attrs->hw_rev == IRDMA_GEN_1)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- else
- irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
- &rqshift);
-
- status = irdma_get_rqdepth(uk_attrs->max_hw_rq_quanta, ukinfo->rq_size,
- rqshift, &rqdepth);
+ status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
if (status)
return status;
iwqp->kqp.sq_wrid_mem =
- kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
+ kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
if (!iwqp->kqp.sq_wrid_mem)
return -ENOMEM;
iwqp->kqp.rq_wrid_mem =
- kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+ kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
if (!iwqp->kqp.rq_wrid_mem) {
kfree(iwqp->kqp.sq_wrid_mem);
iwqp->kqp.sq_wrid_mem = NULL;
return -ENOMEM;
}
- iwqp->kqp.sig_trk_mem = kcalloc(sqdepth, sizeof(u32), GFP_KERNEL);
- memset(iwqp->kqp.sig_trk_mem, 0, sqdepth * sizeof(u32));
+ iwqp->kqp.sig_trk_mem = kcalloc(ukinfo->sq_depth, sizeof(u32), GFP_KERNEL);
+ memset(iwqp->kqp.sig_trk_mem, 0, ukinfo->sq_depth * sizeof(u32));
if (!iwqp->kqp.sig_trk_mem) {
kfree(iwqp->kqp.sq_wrid_mem);
iwqp->kqp.sq_wrid_mem = NULL;
@@ -422,7 +499,7 @@ irdma_setup_kmode_qp(struct irdma_device *iwdev,
ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
- size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
+ size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
size += (IRDMA_SHADOW_AREA_SIZE << 3);
mem->size = size;
@@ -438,16 +515,18 @@ irdma_setup_kmode_qp(struct irdma_device *iwdev,
ukinfo->sq = mem->va;
info->sq_pa = mem->pa;
- ukinfo->rq = &ukinfo->sq[sqdepth];
- info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
- ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
- info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
- ukinfo->sq_size = sqdepth >> sqshift;
- ukinfo->rq_size = rqdepth >> rqshift;
+ ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
+ info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
+ info->shadow_area_pa = info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
ukinfo->qp_id = iwqp->ibqp.qp_num;
- init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
- init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
+ iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ init_attr->cap.max_send_wr = iwqp->max_send_wr;
+ init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
return 0;
}
@@ -583,15 +662,23 @@ irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
}
void
+irdma_sched_qp_flush_work(struct irdma_qp *iwqp)
+{
+ irdma_qp_add_ref(&iwqp->ibqp);
+ if (mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)))
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+void
irdma_flush_worker(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
- unsigned long flags;
- spin_lock_irqsave(&iwqp->lock, flags); /* Don't allow more posting while generating completions */
irdma_generate_flush_completions(iwqp);
- spin_unlock_irqrestore(&iwqp->lock, flags);
+ /* For the add in irdma_sched_qp_flush_work */
+ irdma_qp_rem_ref(&iwqp->ibqp);
}
static int
@@ -680,6 +767,8 @@ int
irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
@@ -689,7 +778,7 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct irdma_udp_offload_info *udp_info;
struct irdma_modify_qp_info info = {0};
struct irdma_modify_qp_resp uresp = {};
- struct irdma_modify_qp_req ureq = {};
+ struct irdma_modify_qp_req ureq;
unsigned long flags;
u8 issue_modify_qp = 0;
int ret = 0;
@@ -698,6 +787,12 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info = &iwqp->roce_info;
udp_info = &iwqp->udp_info;
+ if (udata) {
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -745,6 +840,11 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
udp_info->ttl = attr->ah_attr.grh.hop_limit;
udp_info->flow_label = attr->ah_attr.grh.flow_label;
udp_info->tos = attr->ah_attr.grh.traffic_class;
+
+ udp_info->src_port = kc_rdma_get_udp_sport(udp_info->flow_label,
+ ibqp->qp_num,
+ roce_info->dest_qp);
+
irdma_qp_rem_qos(&iwqp->sc_qp);
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
if (iwqp->sc_qp.vsi->dscp_mode)
@@ -752,14 +852,14 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
else
ctx_info->user_pri = rt_tos2priority(udp_info->tos);
- iwqp->sc_qp.user_pri = ctx_info->user_pri;
- if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
- return -ENOMEM;
- irdma_qp_add_qos(&iwqp->sc_qp);
}
ret = kc_irdma_set_roce_cm_info(iwqp, attr, &vlan_id);
if (ret)
return ret;
+ if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
+ return -ENOMEM;
+ iwqp->sc_qp.user_pri = ctx_info->user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
vlan_id = 0;
@@ -773,7 +873,6 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
av->attrs = attr->ah_attr;
rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
- roce_info->local_qp = ibqp->qp_num;
if (av->sgid_addr.saddr.sa_family == AF_INET6) {
__be32 *daddr =
av->dgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
@@ -853,7 +952,9 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
spin_lock_irqsave(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
- if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, iwqp->ibqp.qp_type, attr_mask)) {
+ if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
+ iwqp->ibqp.qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
irdma_print("modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
iwqp->ibqp.qp_num, iwqp->ibqp_state,
attr->qp_state);
@@ -932,7 +1033,7 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (udata) {
+ if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen)))
return -EINVAL;
@@ -973,22 +1074,19 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
if (iwqp->ibqp_state > IB_QPS_RTS &&
!iwqp->flush_issued) {
- iwqp->flush_issued = 1;
- if (!iwqp->user_mode)
- queue_delayed_work(iwqp->iwdev->cleanup_wq,
- &iwqp->dwork_flush,
- msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
IRDMA_FLUSH_RQ |
IRDMA_FLUSH_WAIT);
+ iwqp->flush_issued = 1;
+
} else {
spin_unlock_irqrestore(&iwqp->lock, flags);
}
} else {
iwqp->ibqp_state = attr->qp_state;
}
- if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata,
@@ -1031,6 +1129,8 @@ int
irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
@@ -1045,6 +1145,12 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
int err;
unsigned long flags;
+ if (udata) {
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -1122,12 +1228,14 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
goto exit;
}
- /* fallthrough */
+ info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
+ issue_modify_qp = 1;
+ break;
case IB_QPS_ERR:
case IB_QPS_RESET:
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (udata) {
+ if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen)))
return -EINVAL;
@@ -1197,13 +1305,13 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
if (dont_wait) {
- if (iwqp->cm_id && iwqp->hw_tcp_state) {
+ if (iwqp->hw_tcp_state) {
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
iwqp->last_aeq = IRDMA_AE_RESET_SENT;
spin_unlock_irqrestore(&iwqp->lock, flags);
- irdma_cm_disconn(iwqp);
}
+ irdma_cm_disconn(iwqp);
} else {
int close_timer_started;
@@ -1224,7 +1332,7 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
}
}
}
- if (attr_mask & IB_QP_STATE && udata &&
+ if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
@@ -1325,6 +1433,7 @@ static int
irdma_resize_cq(struct ib_cq *ibcq, int entries,
struct ib_udata *udata)
{
+#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
struct irdma_cq *iwcq = to_iwcq(ibcq);
struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
struct irdma_cqp_request *cqp_request;
@@ -1347,6 +1456,9 @@ irdma_resize_cq(struct ib_cq *ibcq, int entries,
IRDMA_FEATURE_CQ_RESIZE))
return -EOPNOTSUPP;
+ if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
+ return -EINVAL;
+
if (entries > rf->max_cqe)
return -EINVAL;
@@ -1362,7 +1474,7 @@ irdma_resize_cq(struct ib_cq *ibcq, int entries,
return 0;
if (udata) {
- struct irdma_resize_cq_req req = {0};
+ struct irdma_resize_cq_req req = {};
struct irdma_ucontext *ucontext =
rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
@@ -1499,7 +1611,7 @@ irdma_free_stag(struct irdma_device *iwdev, u32 stag)
u32
irdma_create_stag(struct irdma_device *iwdev)
{
- u32 stag = 0;
+ u32 stag;
u32 stag_index = 0;
u32 next_stag_index;
u32 driver_key;
@@ -1518,7 +1630,7 @@ irdma_create_stag(struct irdma_device *iwdev)
iwdev->rf->max_mr, &stag_index,
&next_stag_index);
if (ret)
- return stag;
+ return 0;
stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
stag |= driver_key;
stag += (u32)consumer_key;
@@ -1588,10 +1700,11 @@ irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
* @rf: RDMA PCI function
* @iwmr: mr pointer for this memory registration
* @use_pbles: flag if to use pble's
+ * @lvl_1_only: request only level 1 pble if true
*/
static int
irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
- bool use_pbles)
+ bool use_pbles, bool lvl_1_only)
{
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
@@ -1602,7 +1715,7 @@ irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
if (use_pbles) {
status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
- false);
+ lvl_1_only);
if (status)
return status;
@@ -1646,16 +1759,10 @@ irdma_handle_q_mem(struct irdma_device *iwdev,
bool ret = true;
pg_size = iwmr->page_size;
- err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
+ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
if (err)
return err;
- if (use_pbles && palloc->level != PBLE_LEVEL_1) {
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- return -ENOMEM;
- }
-
if (use_pbles)
arr = palloc->level1.addr;
@@ -1772,7 +1879,7 @@ irdma_dealloc_mw(struct ib_mw *ibmw)
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.dealloc_stag.info;
memset(info, 0, sizeof(*info));
- info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
+ info->pd_id = iwpd->sc_pd.pd_id;
info->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S);
info->mr = false;
cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
@@ -1797,10 +1904,11 @@ irdma_hw_alloc_stag(struct irdma_device *iwdev,
struct irdma_mr *iwmr)
{
struct irdma_allocate_stag_info *info;
- struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
- int status;
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
+ int status;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
if (!cqp_request)
@@ -1813,6 +1921,7 @@ irdma_hw_alloc_stag(struct irdma_device *iwdev,
info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
info->pd_id = iwpd->sc_pd.pd_id;
info->total_len = iwmr->len;
+ info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
info->remote_access = true;
cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
cqp_info->post_sq = 1;
@@ -1820,6 +1929,8 @@ irdma_hw_alloc_stag(struct irdma_device *iwdev,
cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (!status)
+ iwmr->is_hwreg = 1;
return status;
}
@@ -1840,9 +1951,17 @@ irdma_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(iwmr->npages == iwmr->page_cnt))
return -ENOMEM;
- pbl = palloc->level1.addr;
- pbl[iwmr->npages++] = addr;
+ if (palloc->level == PBLE_LEVEL_2) {
+ struct irdma_pble_info *palloc_info =
+ palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
+
+ palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
+ } else {
+ pbl = palloc->level1.addr;
+ pbl[iwmr->npages] = addr;
+ }
+ iwmr->npages++;
return 0;
}
@@ -1870,13 +1989,14 @@ irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
* @iwmr: irdma mr pointer
* @access: access for MR
*/
-static int
+int
irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
u16 access)
{
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_reg_ns_stag_info *stag_info;
- struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -1893,6 +2013,7 @@ irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
stag_info->stag_key = (u8)iwmr->stag;
stag_info->total_len = iwmr->len;
+ stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
stag_info->access_rights = irdma_get_mr_access(access);
stag_info->pd_id = iwpd->sc_pd.pd_id;
if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
@@ -1920,6 +2041,9 @@ irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (!ret)
+ iwmr->is_hwreg = 1;
+
return ret;
}
@@ -1937,13 +2061,14 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
u64 virt, int access,
struct ib_udata *udata)
{
+#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_ucontext *ucontext;
struct irdma_pble_alloc *palloc;
struct irdma_pbl *iwpbl;
struct irdma_mr *iwmr;
struct ib_umem *region;
- struct irdma_mem_reg_req req;
+ struct irdma_mem_reg_req req = {};
u32 total, stag = 0;
u8 shadow_pgcnt = 1;
bool use_pbles = false;
@@ -1951,7 +2076,10 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
int err = -EINVAL;
int ret;
- if (!len || len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
return ERR_PTR(-EINVAL);
region = ib_umem_get(pd->uobject->context, start, len, access, 0);
@@ -1979,9 +2107,9 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
iwmr->ibmr.pd = pd;
iwmr->ibmr.device = pd->device;
iwmr->ibmr.iova = virt;
- iwmr->page_size = PAGE_SIZE;
+ iwmr->page_size = IRDMA_HW_PAGE_SIZE;
+ iwmr->page_msk = ~(IRDMA_HW_PAGE_SIZE - 1);
- iwmr->page_msk = PAGE_MASK;
iwmr->len = region->length;
iwpbl->user_base = virt;
palloc = &iwpbl->pble_alloc;
@@ -2032,7 +2160,7 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
case IRDMA_MEMREG_TYPE_MEM:
use_pbles = (iwmr->page_cnt != 1);
- err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
+ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
if (err)
goto error;
@@ -2054,6 +2182,7 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
iwmr->stag = stag;
iwmr->ibmr.rkey = stag;
iwmr->ibmr.lkey = stag;
+ iwmr->access = access;
err = irdma_hwreg_mr(iwdev, iwmr, access);
if (err) {
irdma_free_stag(iwdev, stag);
@@ -2078,6 +2207,120 @@ error:
return ERR_PTR(err);
}
+int
+irdma_hwdereg_mr(struct ib_mr *ib_mr)
+{
+ struct irdma_device *iwdev = to_iwdev(ib_mr->device);
+ struct irdma_mr *iwmr = to_iwmr(ib_mr);
+ struct irdma_pd *iwpd = to_iwpd(ib_mr->pd);
+ struct irdma_dealloc_stag_info *info;
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ /*
+ * Skip HW MR de-register when it is already de-registered during an MR re-reregister and the re-registration
+ * fails
+ */
+ if (!iwmr->is_hwreg)
+ return 0;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
+ info->mr = true;
+ if (iwpbl->pbl_allocated)
+ info->dealloc_pbl = true;
+
+ cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ if (!status)
+ iwmr->is_hwreg = 0;
+
+ return status;
+}
+
+/*
+ * irdma_rereg_mr_trans - Re-register a user MR for a change translation. @iwmr: ptr of iwmr @start: virtual start
+ * address @len: length of mr @virt: virtual address
+ *
+ * Re-register a user memory region when a change translation is requested. Re-register a new region while reusing the
+ * stag from the original registration.
+ */
+struct ib_mr *
+irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
+ u64 virt, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct ib_umem *region;
+ bool use_pbles;
+ int err;
+
+ region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0);
+
+ if (IS_ERR(region)) {
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
+ "Failed to create ib_umem region\n");
+ return (struct ib_mr *)region;
+ }
+
+ iwmr->region = region;
+ iwmr->ibmr.iova = virt;
+ iwmr->ibmr.pd = pd;
+ iwmr->page_size = PAGE_SIZE;
+
+ iwmr->len = region->length;
+ iwpbl->user_base = virt;
+ iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size,
+ virt);
+
+ use_pbles = (iwmr->page_cnt != 1);
+
+ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+ if (err)
+ goto error;
+
+ if (use_pbles) {
+ err = irdma_check_mr_contiguous(palloc,
+ iwmr->page_size);
+ if (err) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+ }
+
+ err = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
+ if (err)
+ goto error;
+
+ return &iwmr->ibmr;
+
+error:
+ if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+ ib_umem_release(region);
+ iwmr->region = NULL;
+
+ return ERR_PTR(err);
+}
+
/**
* irdma_reg_phys_mr - register kernel physical memory
* @pd: ibpd pointer
@@ -2259,31 +2502,20 @@ irdma_post_send(struct ib_qp *ibqp,
info.stag_to_inv = ib_wr->ex.invalidate_rkey;
}
- if (ib_wr->send_flags & IB_SEND_INLINE) {
- info.op.inline_send.data = (void *)(unsigned long)
- ib_wr->sg_list[0].addr;
- info.op.inline_send.len = ib_wr->sg_list[0].length;
- if (iwqp->ibqp.qp_type == IB_QPT_UD ||
- iwqp->ibqp.qp_type == IB_QPT_GSI) {
- ah = to_iwah(ud_wr(ib_wr)->ah);
- info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
- info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
- info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
- }
+ info.op.send.num_sges = ib_wr->num_sge;
+ info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
+ if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+ iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ ah = to_iwah(ud_wr(ib_wr)->ah);
+ info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
+ info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
+ info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+ }
+
+ if (ib_wr->send_flags & IB_SEND_INLINE)
err = irdma_uk_inline_send(ukqp, &info, false);
- } else {
- info.op.send.num_sges = ib_wr->num_sge;
- info.op.send.sg_list = (struct irdma_sge *)
- ib_wr->sg_list;
- if (iwqp->ibqp.qp_type == IB_QPT_UD ||
- iwqp->ibqp.qp_type == IB_QPT_GSI) {
- ah = to_iwah(ud_wr(ib_wr)->ah);
- info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
- info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
- info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
- }
+ else
err = irdma_uk_send(ukqp, &info, false);
- }
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
@@ -2300,19 +2532,14 @@ irdma_post_send(struct ib_qp *ibqp,
else
info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
- if (ib_wr->send_flags & IB_SEND_INLINE) {
- info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
- info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
- info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+ info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ if (ib_wr->send_flags & IB_SEND_INLINE)
err = irdma_uk_inline_rdma_write(ukqp, &info, false);
- } else {
- info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
- info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
- info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ else
err = irdma_uk_rdma_write(ukqp, &info, false);
- }
break;
case IB_WR_RDMA_READ_WITH_INV:
inv_stag = true;
@@ -2350,11 +2577,14 @@ irdma_post_send(struct ib_qp *ibqp,
stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
stag_info.total_len = iwmr->ibmr.length;
- stag_info.reg_addr_pa = *palloc->level1.addr;
- stag_info.first_pm_pbl_index = palloc->level1.idx;
- stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
- if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
+ if (palloc->level == PBLE_LEVEL_2) {
+ stag_info.chunk_size = 3;
+ stag_info.first_pm_pbl_index = palloc->level2.root.idx;
+ } else {
stag_info.chunk_size = 1;
+ stag_info.first_pm_pbl_index = palloc->level1.idx;
+ }
+ stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
true);
break;
@@ -2373,11 +2603,14 @@ irdma_post_send(struct ib_qp *ibqp,
ib_wr = ib_wr->next;
}
- if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
- irdma_uk_qp_post_wr(ukqp);
- else if (iwqp->flush_issued)
- mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS);
- spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (!iwqp->flush_issued) {
+ if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
+ irdma_uk_qp_post_wr(ukqp);
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_sched_qp_flush_work(iwqp);
+ }
if (err)
*bad_wr = ib_wr;
@@ -2425,9 +2658,9 @@ irdma_post_recv(struct ib_qp *ibqp,
}
out:
- if (iwqp->flush_issued)
- mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS);
spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (iwqp->flush_issued)
+ irdma_sched_qp_flush_work(iwqp);
if (err)
*bad_wr = ib_wr;
@@ -2456,6 +2689,8 @@ irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
return IB_WC_WR_FLUSH_ERR;
case FLUSH_MW_BIND_ERR:
return IB_WC_MW_BIND_ERR;
+ case FLUSH_REM_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
case FLUSH_RETRY_EXC_ERR:
return IB_WC_RETRY_EXC_ERR;
case FLUSH_FATAL_ERR:
@@ -2464,6 +2699,64 @@ irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
}
}
+static inline void
+set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry)
+{
+ struct irdma_sc_qp *qp;
+
+ switch (cq_poll_info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
+ case IRDMA_OP_TYPE_RDMA_READ:
+ entry->opcode = IB_WC_RDMA_READ;
+ break;
+ case IRDMA_OP_TYPE_SEND_SOL:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND:
+ entry->opcode = IB_WC_SEND;
+ break;
+ case IRDMA_OP_TYPE_FAST_REG_NSMR:
+ entry->opcode = IB_WC_REG_MR;
+ break;
+ case IRDMA_OP_TYPE_INV_STAG:
+ entry->opcode = IB_WC_LOCAL_INV;
+ break;
+ default:
+ qp = cq_poll_info->qp_handle;
+ ibdev_err(irdma_get_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
+ cq_poll_info->op_type);
+ entry->status = IB_WC_GENERAL_ERR;
+ }
+}
+
+static inline void
+set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry, bool send_imm_support)
+{
+ /**
+ * iWARP does not support sendImm, so the presence of Imm data
+ * must be WriteImm.
+ */
+ if (!send_imm_support) {
+ entry->opcode = cq_poll_info->imm_valid ?
+ IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
+ return;
+ }
+ switch (cq_poll_info->op_type) {
+ case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
+ case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ }
+}
+
/**
* irdma_process_cqe - process cqe info
* @entry: processed cqe
@@ -2512,42 +2805,17 @@ irdma_process_cqe(struct ib_wc *entry,
}
}
- switch (cq_poll_info->op_type) {
- case IRDMA_OP_TYPE_RDMA_WRITE:
- case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
- entry->opcode = IB_WC_RDMA_WRITE;
- break;
- case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
- case IRDMA_OP_TYPE_RDMA_READ:
- entry->opcode = IB_WC_RDMA_READ;
- break;
- case IRDMA_OP_TYPE_SEND_INV:
- case IRDMA_OP_TYPE_SEND_SOL:
- case IRDMA_OP_TYPE_SEND_SOL_INV:
- case IRDMA_OP_TYPE_SEND:
- entry->opcode = IB_WC_SEND;
- break;
- case IRDMA_OP_TYPE_FAST_REG_NSMR:
- entry->opcode = IB_WC_REG_MR;
- break;
- case IRDMA_OP_TYPE_INV_STAG:
- entry->opcode = IB_WC_LOCAL_INV;
- break;
- case IRDMA_OP_TYPE_REC_IMM:
- case IRDMA_OP_TYPE_REC:
- entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ?
- IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
+ if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
+ set_ib_wc_op_sq(cq_poll_info, entry);
+ } else {
+ set_ib_wc_op_rq(cq_poll_info, entry,
+ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
+ true : false);
if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
cq_poll_info->stag_invalid_set) {
entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
entry->wc_flags |= IB_WC_WITH_INVALIDATE;
}
- break;
- default:
- ibdev_err(&iwqp->iwdev->ibdev,
- "Invalid opcode = %d in CQE\n", cq_poll_info->op_type);
- entry->status = IB_WC_GENERAL_ERR;
- return;
}
if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
@@ -2729,73 +2997,19 @@ irdma_req_notify_cq(struct ib_cq *ibcq,
promo_event = true;
}
- if (!iwcq->armed || promo_event) {
- iwcq->armed = true;
+ if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
iwcq->last_notify = cq_notify;
irdma_uk_cq_request_notification(ukcq, cq_notify);
}
- if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
ret = 1;
spin_unlock_irqrestore(&iwcq->lock, flags);
return ret;
}
-const char *const irdma_hw_stat_names[] = {
- /* gen1 - 32-bit */
- [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
- [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
- [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
- [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
- [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
- [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
- [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
- [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
- [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
- [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
- /* gen1 - 64-bit */
- [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets",
- [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts",
- [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = "ip4InReasmRqd",
- [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts",
- [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets",
- [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts",
- [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = "ip4OutSegRqd",
- [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts",
- [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets",
- [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts",
- [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = "ip6InReasmRqd",
- [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts",
- [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets",
- [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts",
- [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = "ip6OutSegRqd",
- [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts",
- [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = "tcpInSegs",
- [IRDMA_HW_STAT_INDEX_TCPTXSEG] = "tcpOutSegs",
- [IRDMA_HW_STAT_INDEX_RDMARXRDS] = "iwInRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = "iwInRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMARXWRS] = "iwInRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMATXRDS] = "iwOutRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = "iwOutRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMATXWRS] = "iwOutRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMAVBND] = "iwRdmaBnd",
- [IRDMA_HW_STAT_INDEX_RDMAVINV] = "iwRdmaInv",
-
- /* gen2 - 32-bit */
- [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
- [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
- [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
- /* gen2 - 64-bit */
- [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets",
- [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = "RxUDP",
- [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = "TxUDP",
- [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = "RxECNMrkd",
-};
-
/**
* mcast_list_add - Add a new mcast item to list
* @rf: RDMA PCI function
@@ -2963,7 +3177,7 @@ irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
if (vlan_id < VLAN_N_VID)
mc_qht_elem->mc_grp_ctx.vlan_valid = true;
- mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id;
+ mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
mc_qht_elem->mc_grp_ctx.qs_handle =
iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
@@ -3115,7 +3329,7 @@ irdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
}
static __be64 irdma_mac_to_guid(struct ifnet *ndev){
- unsigned char *mac = IF_LLADDR(ndev);
+ const unsigned char *mac = IF_LLADDR(ndev);
__be64 guid;
unsigned char *dst = (unsigned char *)&guid;
@@ -3186,19 +3400,25 @@ irdma_set_device_ops(struct ib_device *ibdev)
dev_ops->modify_port = irdma_modify_port;
dev_ops->query_qp = irdma_query_qp;
dev_ops->reg_user_mr = irdma_reg_user_mr;
+ dev_ops->rereg_user_mr = irdma_rereg_user_mr;
dev_ops->req_notify_cq = irdma_req_notify_cq;
dev_ops->resize_cq = irdma_resize_cq;
}
static void
-irdma_set_device_roce_ops(struct ib_device *ibdev)
+irdma_set_device_mcast_ops(struct ib_device *ibdev)
{
struct ib_device *dev_ops = ibdev;
-
dev_ops->attach_mcast = irdma_attach_mcast;
+ dev_ops->detach_mcast = irdma_detach_mcast;
+}
+
+static void
+irdma_set_device_roce_ops(struct ib_device *ibdev)
+{
+ struct ib_device *dev_ops = ibdev;
dev_ops->create_ah = irdma_create_ah;
dev_ops->destroy_ah = irdma_destroy_ah;
- dev_ops->detach_mcast = irdma_detach_mcast;
dev_ops->get_link_layer = irdma_get_link_layer;
dev_ops->get_port_immutable = irdma_roce_port_immutable;
dev_ops->modify_qp = irdma_modify_qp_roce;
@@ -3226,6 +3446,11 @@ irdma_set_device_iw_ops(struct ib_device *ibdev)
dev_ops->query_pkey = irdma_iw_query_pkey;
}
+static inline void
+irdma_set_device_gen1_ops(struct ib_device *ibdev)
+{
+}
+
/**
* irdma_init_roce_device - initialization of roce rdma device
* @iwdev: irdma device
@@ -3237,6 +3462,8 @@ irdma_init_roce_device(struct irdma_device *iwdev)
iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
irdma_set_device_roce_ops(&iwdev->ibdev);
+ if (iwdev->rf->rdma_ver == IRDMA_GEN_2)
+ irdma_set_device_mcast_ops(&iwdev->ibdev);
}
/**
@@ -3290,11 +3517,14 @@ irdma_init_rdma_device(struct irdma_device *iwdev)
if (ret)
return ret;
}
+
iwdev->ibdev.phys_port_cnt = 1;
iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
iwdev->ibdev.dev.parent = iwdev->rf->dev_ctx.dev;
set_ibdev_dma_device(iwdev->ibdev, &pcidev->dev);
irdma_set_device_ops(&iwdev->ibdev);
+ if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
+ irdma_set_device_gen1_ops(&iwdev->ibdev);
return 0;
}
@@ -3326,6 +3556,7 @@ irdma_ib_unregister_device(struct irdma_device *iwdev)
iwdev->iw_status = 0;
irdma_port_ibevent(iwdev);
ib_unregister_device(&iwdev->ibdev);
+ dev_put(iwdev->netdev);
kfree(iwdev->ibdev.iwcm);
iwdev->ibdev.iwcm = NULL;
}
@@ -3343,6 +3574,7 @@ irdma_ib_register_device(struct irdma_device *iwdev)
if (ret)
return ret;
+ dev_hold(iwdev->netdev);
sprintf(iwdev->ibdev.name, "irdma-%s", if_name(iwdev->netdev));
ret = ib_register_device(&iwdev->ibdev, NULL);
if (ret)
@@ -3356,9 +3588,7 @@ irdma_ib_register_device(struct irdma_device *iwdev)
error:
kfree(iwdev->ibdev.iwcm);
iwdev->ibdev.iwcm = NULL;
- if (ret)
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "Register RDMA device fail\n");
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "Register RDMA device fail\n");
return ret;
}
diff --git a/sys/dev/irdma/irdma_verbs.h b/sys/dev/irdma/irdma_verbs.h
index 58cc4b58cd49..15cd2f9a8da4 100644
--- a/sys/dev/irdma/irdma_verbs.h
+++ b/sys/dev/irdma/irdma_verbs.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2021 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,7 +37,7 @@
#define IRDMA_VERBS_H
#define IRDMA_MAX_SAVED_PHY_PGADDR 4
-#define IRDMA_FLUSH_DELAY_MS 1500
+#define IRDMA_FLUSH_DELAY_MS 20
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
@@ -132,6 +132,8 @@ struct irdma_mr {
struct ib_mw ibmw;
};
struct ib_umem *region;
+ int access;
+ u8 is_hwreg;
u16 type;
u32 page_cnt;
u64 page_size;
@@ -150,7 +152,7 @@ struct irdma_cq {
u16 cq_size;
u16 cq_num;
bool user_mode;
- bool armed;
+ atomic_t armed;
enum irdma_cmpl_notify last_notify;
u32 polled_cmpls;
u32 cq_mem_size;
@@ -224,13 +226,6 @@ struct irdma_qp {
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
- u8 active_conn : 1;
- u8 user_mode : 1;
- u8 hte_added : 1;
- u8 flush_issued : 1;
- u8 sig_all : 1;
- u8 pau_mode : 1;
- u8 rsvd : 1;
u8 iwarp_state;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
@@ -247,6 +242,12 @@ struct irdma_qp {
wait_queue_head_t waitq;
wait_queue_head_t mod_qp_waitq;
u8 rts_ae_rcvd;
+ u8 active_conn : 1;
+ u8 user_mode : 1;
+ u8 hte_added : 1;
+ u8 flush_issued : 1;
+ u8 sig_all : 1;
+ u8 pau_mode : 1;
};
enum irdma_mmap_flag {
@@ -262,12 +263,12 @@ struct irdma_user_mmap_entry {
static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
{
- return (u16)RS_64(dev->feature_info[IRDMA_FEATURE_FW_INFO], IRDMA_FW_VER_MAJOR);
+ return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
{
- return (u16)RS_64(dev->feature_info[IRDMA_FEATURE_FW_INFO], IRDMA_FW_VER_MINOR);
+ return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
/**
@@ -304,10 +305,10 @@ irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
int irdma_ib_register_device(struct irdma_device *iwdev);
void irdma_ib_unregister_device(struct irdma_device *iwdev);
-void irdma_ib_dealloc_device(struct ib_device *ibdev);
void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
void irdma_generate_flush_completions(struct irdma_qp *iwqp);
void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
+void irdma_sched_qp_flush_work(struct irdma_qp *iwqp);
void irdma_flush_worker(struct work_struct *work);
#endif /* IRDMA_VERBS_H */
diff --git a/sys/dev/irdma/irdma_ws.c b/sys/dev/irdma/irdma_ws.c
index c6c24b4ea032..a7920ec6dcdc 100644
--- a/sys/dev/irdma/irdma_ws.c
+++ b/sys/dev/irdma/irdma_ws.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2021 Intel Corporation
+ * Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -59,7 +59,7 @@ irdma_alloc_node(struct irdma_sc_vsi *vsi,
u16 node_index = 0;
ws_mem.size = sizeof(struct irdma_ws_node);
- ws_mem.va = kzalloc(ws_mem.size, GFP_ATOMIC);
+ ws_mem.va = kzalloc(ws_mem.size, GFP_KERNEL);
if (!ws_mem.va)
return NULL;
diff --git a/sys/dev/irdma/osdep.h b/sys/dev/irdma/osdep.h
index 9f8025a46a02..0bb062745ffe 100644
--- a/sys/dev/irdma/osdep.h
+++ b/sys/dev/irdma/osdep.h
@@ -133,7 +133,7 @@ do { \
#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
-#define ibdev_err(ibdev, fmt, ...) irdma_dev_err(&((ibdev)->dev), fmt, ##__VA_ARGS__)
+#define ibdev_err(ibdev, fmt, ...) printf("%s:"fmt, (ibdev)->name, ##__VA_ARGS__)
#define dump_struct(s, sz, name) \
do { \
@@ -183,8 +183,10 @@ struct irdma_dev_ctx {
#define irdma_usec_delay(x) DELAY(x)
#define mdelay(x) DELAY((x) * 1000)
-#define rt_tos2priority(tos) (((tos >> 1) & 0x8 >> 1) | ((tos >> 2) ^ ((tos >> 3) << 1)))
+#define rt_tos2priority(tos) (tos >> 5)
#define ah_attr_to_dmac(attr) ((attr).dmac)
+#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \
+ ib_modify_qp_is_ok(cur_state, next_state, type, mask)
#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \
ib_gid_to_network_type(gid_type, gid)
#define irdma_del_timer_compat(tt) del_timer((tt))