aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBartosz Sobczak <bartosz.sobczak@intel.com>2022-05-23 23:39:27 +0000
committerEric Joyner <erj@FreeBSD.org>2023-02-08 00:23:44 +0000
commit42bad04a21560f3a82ac3bdbd656d69634512cfc (patch)
tree2753301e647ca1247631ac25a5e97fa62dea7938
parent4f0087aa7c0d7a57c39f7ca8f6c6784ebd5e0b85 (diff)
downloadsrc-42bad04a2156.tar.gz
src-42bad04a2156.zip
irdma: Add RDMA driver for Intel(R) Ethernet Controller E810
This is an initial commit for RDMA FreeBSD driver for Intel(R) Ethernet Controller E810, called irdma. Supporting both RoCEv2 and iWARP protocols in per-PF manner, RoCEv2 being the default. Testing has been done using krping tool, perftest, ucmatose, rping, ud_pingpong, rc_pingpong and others. Signed-off-by: Eric Joyner <erj@FreeBSD.org> Reviewed by: #manpages (pauamma_gundo.com) [documentation] Relnotes: yes Sponsored by: Intel Corporation Differential Revision: https://reviews.freebsd.org/D34690 (cherry picked from commit cdcd52d41e246ba1c0fcfad0769bd691487355ef) (cherry picked from commit e602a30bb9fc7ee041a0e629d0fd2db7933ffa32) (cherry picked from commit a6e275af46644af1de365a1edf19742bfa28bb69)
-rw-r--r--contrib/ofed/libirdma/abi.h180
-rw-r--r--contrib/ofed/libirdma/i40e_devids.h69
-rw-r--r--contrib/ofed/libirdma/i40iw_hw.h64
-rw-r--r--contrib/ofed/libirdma/ice_devids.h92
-rw-r--r--contrib/ofed/libirdma/irdma-abi.h143
-rw-r--r--contrib/ofed/libirdma/irdma.h93
-rw-r--r--contrib/ofed/libirdma/irdma_defs.h554
-rw-r--r--contrib/ofed/libirdma/irdma_uk.c1884
-rw-r--r--contrib/ofed/libirdma/irdma_umain.c255
-rw-r--r--contrib/ofed/libirdma/irdma_umain.h218
-rw-r--r--contrib/ofed/libirdma/irdma_user.h479
-rw-r--r--contrib/ofed/libirdma/irdma_uverbs.c2262
-rw-r--r--contrib/ofed/libirdma/libirdma.map10
-rw-r--r--contrib/ofed/libirdma/osdep.h213
-rw-r--r--lib/ofed/Makefile2
-rw-r--r--lib/ofed/libirdma/Makefile23
-rw-r--r--share/man/man4/irdma.4233
-rw-r--r--share/mk/bsd.libnames.mk1
-rw-r--r--share/mk/src.libnames.mk3
-rw-r--r--sys/amd64/conf/NOTES3
-rw-r--r--sys/conf/files31
-rw-r--r--sys/dev/irdma/fbsd_kcompat.c736
-rw-r--r--sys/dev/irdma/fbsd_kcompat.h251
-rw-r--r--sys/dev/irdma/ice_devids.h92
-rw-r--r--sys/dev/irdma/icrdma.c704
-rw-r--r--sys/dev/irdma/icrdma_hw.c418
-rw-r--r--sys/dev/irdma/icrdma_hw.h137
-rw-r--r--sys/dev/irdma/irdma-abi.h143
-rw-r--r--sys/dev/irdma/irdma.h238
-rw-r--r--sys/dev/irdma/irdma_cm.c4253
-rw-r--r--sys/dev/irdma/irdma_cm.h453
-rw-r--r--sys/dev/irdma/irdma_ctrl.c5644
-rw-r--r--sys/dev/irdma/irdma_defs.h2337
-rw-r--r--sys/dev/irdma/irdma_hmc.c734
-rw-r--r--sys/dev/irdma/irdma_hmc.h202
-rw-r--r--sys/dev/irdma/irdma_hw.c2829
-rw-r--r--sys/dev/irdma/irdma_kcompat.c1568
-rw-r--r--sys/dev/irdma/irdma_main.h589
-rw-r--r--sys/dev/irdma/irdma_pble.c557
-rw-r--r--sys/dev/irdma/irdma_pble.h166
-rw-r--r--sys/dev/irdma/irdma_protos.h141
-rw-r--r--sys/dev/irdma/irdma_puda.c1856
-rw-r--r--sys/dev/irdma/irdma_puda.h221
-rw-r--r--sys/dev/irdma/irdma_type.h1533
-rw-r--r--sys/dev/irdma/irdma_uda.c318
-rw-r--r--sys/dev/irdma/irdma_uda.h120
-rw-r--r--sys/dev/irdma/irdma_uda_d.h415
-rw-r--r--sys/dev/irdma/irdma_uk.c1808
-rw-r--r--sys/dev/irdma/irdma_user.h477
-rw-r--r--sys/dev/irdma/irdma_utils.c2325
-rw-r--r--sys/dev/irdma/irdma_verbs.c3364
-rw-r--r--sys/dev/irdma/irdma_verbs.h313
-rw-r--r--sys/dev/irdma/irdma_ws.c447
-rw-r--r--sys/dev/irdma/irdma_ws.h74
-rw-r--r--sys/dev/irdma/osdep.h247
-rw-r--r--sys/modules/Makefile4
-rw-r--r--sys/modules/irdma/Makefile23
-rw-r--r--usr.bin/ofed/libibverbs/Makefile.inc2
-rw-r--r--usr.bin/ofed/librdmacm/Makefile.inc2
59 files changed, 42551 insertions, 2 deletions
diff --git a/contrib/ofed/libirdma/abi.h b/contrib/ofed/libirdma/abi.h
new file mode 100644
index 000000000000..ff7a2828efe0
--- /dev/null
+++ b/contrib/ofed/libirdma/abi.h
@@ -0,0 +1,180 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (C) 2019 - 2020 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef PROVIDER_IRDMA_ABI_H
+#define PROVIDER_IRDMA_ABI_H
+
+#include "irdma.h"
+#include <infiniband/kern-abi.h>
+#include "irdma-abi.h"
+
+#define IRDMA_MIN_ABI_VERSION 0
+#define IRDMA_MAX_ABI_VERSION 5
+
+struct irdma_ualloc_pd_resp {
+ struct ibv_alloc_pd_resp ibv_resp;
+ __u32 pd_id;
+ __u8 rsvd[4];
+
+};
+struct irdma_ucreate_cq {
+ struct ibv_create_cq ibv_cmd;
+ __aligned_u64 user_cq_buf;
+ __aligned_u64 user_shadow_area;
+
+};
+struct irdma_ucreate_cq_resp {
+ struct ibv_create_cq_resp ibv_resp;
+ __u32 cq_id;
+ __u32 cq_size;
+
+};
+struct irdma_ucreate_cq_ex {
+ struct ibv_create_cq_ex ibv_cmd;
+ __aligned_u64 user_cq_buf;
+ __aligned_u64 user_shadow_area;
+
+};
+struct irdma_ucreate_cq_ex_resp {
+ struct ibv_create_cq_resp_ex ibv_resp;
+ __u32 cq_id;
+ __u32 cq_size;
+
+};
+struct irdma_uresize_cq {
+ struct ibv_resize_cq ibv_cmd;
+ __aligned_u64 user_cq_buffer;
+
+};
+struct irdma_uresize_cq_resp {
+ struct ibv_resize_cq_resp ibv_resp;
+
+};
+struct irdma_ucreate_qp {
+ struct ibv_create_qp ibv_cmd;
+ __aligned_u64 user_wqe_bufs;
+ __aligned_u64 user_compl_ctx;
+
+};
+struct irdma_ucreate_qp_resp {
+ struct ibv_create_qp_resp ibv_resp;
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 irdma_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd;
+ __u32 qp_caps;
+
+};
+struct irdma_umodify_qp_resp {
+ struct ibv_modify_qp_resp_ex ibv_resp;
+ __aligned_u64 push_wqe_mmap_key;
+ __aligned_u64 push_db_mmap_key;
+ __u16 push_offset;
+ __u8 push_valid;
+ __u8 rd_fence_rate;
+ __u8 rsvd[4];
+
+};
+struct irdma_get_context {
+ struct ibv_get_context ibv_cmd;
+ __u32 rsvd32;
+ __u8 userspace_ver;
+ __u8 rsvd8[3];
+
+};
+struct irdma_get_context_resp {
+ struct ibv_get_context_resp ibv_resp;
+ __u32 max_pds;
+ __u32 max_qps;
+ __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
+ __u8 kernel_ver;
+ __u8 rsvd[3];
+ __aligned_u64 feature_flags;
+ __aligned_u64 db_mmap_key;
+ __u32 max_hw_wq_frags;
+ __u32 max_hw_read_sges;
+ __u32 max_hw_inline;
+ __u32 max_hw_rq_quanta;
+ __u32 max_hw_wq_quanta;
+ __u32 min_hw_cq_size;
+ __u32 max_hw_cq_size;
+ __u16 max_hw_sq_chunk;
+ __u8 hw_rev;
+ __u8 rsvd2;
+
+};
+struct irdma_ureg_mr {
+ struct ibv_reg_mr ibv_cmd;
+ __u16 reg_type; /* enum irdma_memreg_type */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+
+};
+struct irdma_ucreate_ah_resp {
+ struct ibv_create_ah_resp ibv_resp;
+ __u32 ah_id;
+ __u8 rsvd[4];
+
+};
+
+struct irdma_modify_qp_cmd {
+ struct ibv_modify_qp_ex ibv_cmd;
+ __u8 sq_flush;
+ __u8 rq_flush;
+ __u8 rsvd[6];
+};
+
+struct irdma_query_device_ex {
+ struct ibv_query_device_ex ibv_cmd;
+};
+
+struct irdma_query_device_ex_resp {
+ struct ibv_query_device_resp_ex ibv_resp;
+ __u32 comp_mask;
+ __u32 response_length;
+ struct ibv_odp_caps_resp odp_caps;
+ __u64 timestamp_mask;
+ __u64 hca_core_clock;
+ __u64 device_cap_flags_ex;
+ struct ibv_rss_caps_resp rss_caps;
+ __u32 max_wq_type_rq;
+ __u32 raw_packet_caps;
+ struct ibv_tso_caps tso_caps;
+};
+#endif /* PROVIDER_IRDMA_ABI_H */
diff --git a/contrib/ofed/libirdma/i40e_devids.h b/contrib/ofed/libirdma/i40e_devids.h
new file mode 100644
index 000000000000..1b0eaae95b82
--- /dev/null
+++ b/contrib/ofed/libirdma/i40e_devids.h
@@ -0,0 +1,69 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2015 - 2019 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef I40E_DEVIDS_H
+#define I40E_DEVIDS_H
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_X722_A0 0x374C
+#define I40E_DEV_ID_X722_A0_VF 0x374D
+#define I40E_DEV_ID_KX_X722 0x37CE
+#define I40E_DEV_ID_QSFP_X722 0x37CF
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
+
+#endif /* I40E_DEVIDS_H */
diff --git a/contrib/ofed/libirdma/i40iw_hw.h b/contrib/ofed/libirdma/i40iw_hw.h
new file mode 100644
index 000000000000..38c7e37c35c9
--- /dev/null
+++ b/contrib/ofed/libirdma/i40iw_hw.h
@@ -0,0 +1,64 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2015 - 2020 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef I40IW_HW_H
+#define I40IW_HW_H
+
+enum i40iw_device_caps_const {
+ I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
+ I40IW_MAX_SGE_RD = 1,
+ I40IW_MAX_PUSH_PAGE_COUNT = 0,
+ I40IW_MAX_INLINE_DATA_SIZE = 48,
+ I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_ORD_SIZE = 127,
+ I40IW_MAX_WQ_ENTRIES = 2048,
+ I40IW_MAX_WQE_SIZE_RQ = 128,
+ I40IW_MAX_PDS = 32768,
+ I40IW_MAX_STATS_COUNT = 16,
+ I40IW_MAX_CQ_SIZE = 1048575,
+ I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
+ I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
+};
+
+#define I40IW_QP_WQE_MIN_SIZE 32
+#define I40IW_QP_WQE_MAX_SIZE 128
+#define I40IW_QP_SW_MIN_WQSIZE 4
+#define I40IW_MAX_RQ_WQE_SHIFT 2
+#define I40IW_MAX_QUANTA_PER_WR 2
+
+#define I40IW_QP_SW_MAX_SQ_QUANTA 2048
+#define I40IW_QP_SW_MAX_RQ_QUANTA 16384
+#define I40IW_QP_SW_MAX_WQ_QUANTA 2048
+#endif /* I40IW_HW_H */
diff --git a/contrib/ofed/libirdma/ice_devids.h b/contrib/ofed/libirdma/ice_devids.h
new file mode 100644
index 000000000000..57f26bc33260
--- /dev/null
+++ b/contrib/ofed/libirdma/ice_devids.h
@@ -0,0 +1,92 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2019 - 2020 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef ICE_DEVIDS_H
+#define ICE_DEVIDS_H
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+
+/* Device IDs */
+/* Intel(R) Ethernet Connection E823-L for backplane */
+#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
+/* Intel(R) Ethernet Connection E823-L for SFP */
+#define ICE_DEV_ID_E823L_SFP 0x124D
+/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
+/* Intel(R) Ethernet Connection E823-L 1GbE */
+#define ICE_DEV_ID_E823L_1GBE 0x124F
+/* Intel(R) Ethernet Connection E823-L for QSFP */
+#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E810-C for backplane */
+#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
+/* Intel(R) Ethernet Controller E810-C for QSFP */
+#define ICE_DEV_ID_E810C_QSFP 0x1592
+/* Intel(R) Ethernet Controller E810-C for SFP */
+#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
+/* Intel(R) Ethernet Controller E810-XXV for SFP */
+#define ICE_DEV_ID_E810_XXV_SFP 0x159B
+/* Intel(R) Ethernet Connection E823-C for backplane */
+#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
+/* Intel(R) Ethernet Connection E823-C for QSFP */
+#define ICE_DEV_ID_E823C_QSFP 0x188B
+/* Intel(R) Ethernet Connection E823-C for SFP */
+#define ICE_DEV_ID_E823C_SFP 0x188C
+/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
+/* Intel(R) Ethernet Connection E823-C 1GbE */
+#define ICE_DEV_ID_E823C_SGMII 0x188E
+/* Intel(R) Ethernet Connection C822N for backplane */
+#define ICE_DEV_ID_C822N_BACKPLANE 0x1890
+/* Intel(R) Ethernet Connection C822N for QSFP */
+#define ICE_DEV_ID_C822N_QSFP 0x1891
+/* Intel(R) Ethernet Connection C822N for SFP */
+#define ICE_DEV_ID_C822N_SFP 0x1892
+/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
+/* Intel(R) Ethernet Connection E822-C 1GbE */
+#define ICE_DEV_ID_E822C_SGMII 0x1894
+/* Intel(R) Ethernet Connection E822-L for backplane */
+#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for SFP */
+#define ICE_DEV_ID_E822L_SFP 0x1898
+/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
+/* Intel(R) Ethernet Connection E822-L 1GbE */
+#define ICE_DEV_ID_E822L_SGMII 0x189A
+#endif /* ICE_DEVIDS_H */
diff --git a/contrib/ofed/libirdma/irdma-abi.h b/contrib/ofed/libirdma/irdma-abi.h
new file mode 100644
index 000000000000..8a06198608e2
--- /dev/null
+++ b/contrib/ofed/libirdma/irdma-abi.h
@@ -0,0 +1,143 @@
+/*-
+ * SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB)
+ *
+ *
+ * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef IRDMA_ABI_H
+#define IRDMA_ABI_H
+
+#include <infiniband/types.h>
+
+/* irdma must support legacy GEN_1 i40iw kernel
+ * and user-space whose last ABI ver is 5
+ */
+#define IRDMA_ABI_VER 5
+
+enum irdma_memreg_type {
+ IRDMA_MEMREG_TYPE_MEM = 0,
+ IRDMA_MEMREG_TYPE_QP = 1,
+ IRDMA_MEMREG_TYPE_CQ = 2,
+};
+
+struct irdma_alloc_ucontext_req {
+ __u32 rsvd32;
+ __u8 userspace_ver;
+ __u8 rsvd8[3];
+};
+
+struct irdma_alloc_ucontext_resp {
+ __u32 max_pds;
+ __u32 max_qps;
+ __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
+ __u8 kernel_ver;
+ __u8 rsvd[3];
+ __aligned_u64 feature_flags;
+ __aligned_u64 db_mmap_key;
+ __u32 max_hw_wq_frags;
+ __u32 max_hw_read_sges;
+ __u32 max_hw_inline;
+ __u32 max_hw_rq_quanta;
+ __u32 max_hw_wq_quanta;
+ __u32 min_hw_cq_size;
+ __u32 max_hw_cq_size;
+ __u16 max_hw_sq_chunk;
+ __u8 hw_rev;
+ __u8 rsvd2;
+};
+
+struct irdma_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 rsvd[4];
+};
+
+struct irdma_resize_cq_req {
+ __aligned_u64 user_cq_buffer;
+};
+
+struct irdma_create_cq_req {
+ __aligned_u64 user_cq_buf;
+ __aligned_u64 user_shadow_area;
+};
+
+struct irdma_create_qp_req {
+ __aligned_u64 user_wqe_bufs;
+ __aligned_u64 user_compl_ctx;
+};
+
+struct irdma_mem_reg_req {
+ __u16 reg_type; /* enum irdma_memreg_type */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct irdma_modify_qp_req {
+ __u8 sq_flush;
+ __u8 rq_flush;
+ __u8 rsvd[6];
+};
+
+struct irdma_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+};
+
+struct irdma_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 irdma_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd;
+ __u32 qp_caps;
+};
+
+struct irdma_modify_qp_resp {
+ __aligned_u64 push_wqe_mmap_key;
+ __aligned_u64 push_db_mmap_key;
+ __u16 push_offset;
+ __u8 push_valid;
+ __u8 rd_fence_rate;
+ __u8 rsvd[4];
+};
+
+struct irdma_create_ah_resp {
+ __u32 ah_id;
+ __u8 rsvd[4];
+};
+#endif /* IRDMA_ABI_H */
diff --git a/contrib/ofed/libirdma/irdma.h b/contrib/ofed/libirdma/irdma.h
new file mode 100644
index 000000000000..27fa3d53d3e8
--- /dev/null
+++ b/contrib/ofed/libirdma/irdma.h
@@ -0,0 +1,93 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2017 - 2021 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef IRDMA_H
+#define IRDMA_H
+
+#define RDMA_BIT2(type, a) ((u##type) 1UL << a)
+#define RDMA_MASK3(type, mask, shift) ((u##type) mask << shift)
+#define MAKEMASK(m, s) ((m) << (s))
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX_M (0xfff << IRDMA_WQEALLOC_WQE_DESC_INDEX_S)
+
+enum irdma_vers {
+ IRDMA_GEN_RSVD,
+ IRDMA_GEN_1,
+ IRDMA_GEN_2,
+};
+
+struct irdma_uk_attrs {
+ u64 feature_flags;
+ u32 max_hw_wq_frags;
+ u32 max_hw_read_sges;
+ u32 max_hw_inline;
+ u32 max_hw_rq_quanta;
+ u32 max_hw_wq_quanta;
+ u32 min_hw_cq_size;
+ u32 max_hw_cq_size;
+ u16 max_hw_sq_chunk;
+ u16 max_hw_wq_size;
+ u16 min_sw_wq_size;
+ u8 hw_rev;
+};
+
+struct irdma_hw_attrs {
+ struct irdma_uk_attrs uk_attrs;
+ u64 max_hw_outbound_msg_size;
+ u64 max_hw_inbound_msg_size;
+ u64 max_mr_size;
+ u32 min_hw_qp_id;
+ u32 min_hw_aeq_size;
+ u32 max_hw_aeq_size;
+ u32 min_hw_ceq_size;
+ u32 max_hw_ceq_size;
+ u32 max_hw_device_pages;
+ u32 max_hw_vf_fpm_id;
+ u32 first_hw_vf_fpm_id;
+ u32 max_hw_ird;
+ u32 max_hw_ord;
+ u32 max_hw_wqes;
+ u32 max_hw_pds;
+ u32 max_hw_ena_vf_count;
+ u32 max_qp_wr;
+ u32 max_pe_ready_count;
+ u32 max_done_count;
+ u32 max_sleep_count;
+ u32 max_cqp_compl_wait_time_ms;
+ u16 max_stat_inst;
+ u16 max_stat_idx;
+};
+
+#endif /* IRDMA_H*/
diff --git a/contrib/ofed/libirdma/irdma_defs.h b/contrib/ofed/libirdma/irdma_defs.h
new file mode 100644
index 000000000000..8fb9f1e2b622
--- /dev/null
+++ b/contrib/ofed/libirdma/irdma_defs.h
@@ -0,0 +1,554 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2015 - 2021 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef IRDMA_DEFS_H
+#define IRDMA_DEFS_H
+
+#define IRDMA_BYTE_0 0
+#define IRDMA_BYTE_8 8
+#define IRDMA_BYTE_16 16
+#define IRDMA_BYTE_24 24
+#define IRDMA_BYTE_32 32
+#define IRDMA_BYTE_40 40
+#define IRDMA_BYTE_48 48
+#define IRDMA_BYTE_56 56
+#define IRDMA_BYTE_64 64
+#define IRDMA_BYTE_72 72
+#define IRDMA_BYTE_80 80
+#define IRDMA_BYTE_88 88
+#define IRDMA_BYTE_96 96
+#define IRDMA_BYTE_104 104
+#define IRDMA_BYTE_112 112
+#define IRDMA_BYTE_120 120
+#define IRDMA_BYTE_128 128
+#define IRDMA_BYTE_136 136
+#define IRDMA_BYTE_144 144
+#define IRDMA_BYTE_152 152
+#define IRDMA_BYTE_160 160
+#define IRDMA_BYTE_168 168
+#define IRDMA_BYTE_176 176
+#define IRDMA_BYTE_184 184
+#define IRDMA_BYTE_192 192
+#define IRDMA_BYTE_200 200
+#define IRDMA_BYTE_208 208
+#define IRDMA_BYTE_216 216
+
+#define IRDMA_QP_TYPE_IWARP 1
+#define IRDMA_QP_TYPE_UDA 2
+#define IRDMA_QP_TYPE_ROCE_RC 3
+#define IRDMA_QP_TYPE_ROCE_UD 4
+
+#define IRDMA_HW_PAGE_SIZE 4096
+#define IRDMA_HW_PAGE_SHIFT 12
+#define IRDMA_CQE_QTYPE_RQ 0
+#define IRDMA_CQE_QTYPE_SQ 1
+
+#define IRDMA_QP_SW_MIN_WQSIZE 8u /* in WRs*/
+#define IRDMA_QP_WQE_MIN_SIZE 32
+#define IRDMA_QP_WQE_MAX_SIZE 256
+#define IRDMA_QP_WQE_MIN_QUANTA 1
+#define IRDMA_MAX_RQ_WQE_SHIFT_GEN1 2
+#define IRDMA_MAX_RQ_WQE_SHIFT_GEN2 3
+
+#define IRDMA_SQ_RSVD 258
+#define IRDMA_RQ_RSVD 1
+
+#define IRDMA_FEATURE_RTS_AE 1ULL
+#define IRDMA_FEATURE_CQ_RESIZE 2ULL
+#define IRDMA_FEATURE_RELAX_RQ_ORDER 4ULL
+#define IRDMAQP_OP_RDMA_WRITE 0x00
+#define IRDMAQP_OP_RDMA_READ 0x01
+#define IRDMAQP_OP_RDMA_SEND 0x03
+#define IRDMAQP_OP_RDMA_SEND_INV 0x04
+#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT 0x05
+#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT_INV 0x06
+#define IRDMAQP_OP_BIND_MW 0x08
+#define IRDMAQP_OP_FAST_REGISTER 0x09
+#define IRDMAQP_OP_LOCAL_INVALIDATE 0x0a
+#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
+#define IRDMAQP_OP_NOP 0x0c
+
+#ifndef LS_64_1
+#define LS_64_1(val, bits) ((u64)(uintptr_t)(val) << (bits))
+#define RS_64_1(val, bits) ((u64)(uintptr_t)(val) >> (bits))
+#define LS_32_1(val, bits) ((u32)((val) << (bits)))
+#define RS_32_1(val, bits) ((u32)((val) >> (bits)))
+#endif
+#define LS_64(val, field) (((u64)(val) << field ## _S) & (field ## _M))
+#define RS_64(val, field) ((u64)((val) & field ## _M) >> field ## _S)
+#define LS_32(val, field) (((val) << field ## _S) & (field ## _M))
+#define RS_32(val, field) (((val) & field ## _M) >> field ## _S)
+
+#define IRDMA_CQPHC_QPCTX_S 0
+#define IRDMA_CQPHC_QPCTX_M \
+ (0xffffffffffffffffULL << IRDMA_CQPHC_QPCTX_S)
+
+/* iWARP QP Doorbell shadow area */
+#define IRDMA_QP_DBSA_HW_SQ_TAIL_S 0
+#define IRDMA_QP_DBSA_HW_SQ_TAIL_M \
+ (0x7fffULL << IRDMA_QP_DBSA_HW_SQ_TAIL_S)
+
+/* Completion Queue Doorbell shadow area */
+#define IRDMA_CQ_DBSA_CQEIDX_S 0
+#define IRDMA_CQ_DBSA_CQEIDX_M (0xfffffULL << IRDMA_CQ_DBSA_CQEIDX_S)
+
+#define IRDMA_CQ_DBSA_SW_CQ_SELECT_S 0
+#define IRDMA_CQ_DBSA_SW_CQ_SELECT_M \
+ (0x3fffULL << IRDMA_CQ_DBSA_SW_CQ_SELECT_S)
+
+#define IRDMA_CQ_DBSA_ARM_NEXT_S 14
+#define IRDMA_CQ_DBSA_ARM_NEXT_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_S)
+
+#define IRDMA_CQ_DBSA_ARM_NEXT_SE_S 15
+#define IRDMA_CQ_DBSA_ARM_NEXT_SE_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_SE_S)
+
+#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_S 16
+#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_M \
+ (0x3ULL << IRDMA_CQ_DBSA_ARM_SEQ_NUM_S)
+
+/* CQP and iWARP Completion Queue */
+#define IRDMA_CQ_QPCTX_S IRDMA_CQPHC_QPCTX_S
+#define IRDMA_CQ_QPCTX_M IRDMA_CQPHC_QPCTX_M
+
+#define IRDMA_CQ_MINERR_S 0
+#define IRDMA_CQ_MINERR_M (0xffffULL << IRDMA_CQ_MINERR_S)
+
+#define IRDMA_CQ_MAJERR_S 16
+#define IRDMA_CQ_MAJERR_M (0xffffULL << IRDMA_CQ_MAJERR_S)
+
+#define IRDMA_CQ_WQEIDX_S 32
+#define IRDMA_CQ_WQEIDX_M (0x7fffULL << IRDMA_CQ_WQEIDX_S)
+
+#define IRDMA_CQ_EXTCQE_S 50
+#define IRDMA_CQ_EXTCQE_M BIT_ULL(IRDMA_CQ_EXTCQE_S)
+
+#define IRDMA_OOO_CMPL_S 54
+#define IRDMA_OOO_CMPL_M BIT_ULL(IRDMA_OOO_CMPL_S)
+
+#define IRDMA_CQ_ERROR_S 55
+#define IRDMA_CQ_ERROR_M BIT_ULL(IRDMA_CQ_ERROR_S)
+
+#define IRDMA_CQ_SQ_S 62
+#define IRDMA_CQ_SQ_M BIT_ULL(IRDMA_CQ_SQ_S)
+
+#define IRDMA_CQ_VALID_S 63
+#define IRDMA_CQ_VALID_M BIT_ULL(IRDMA_CQ_VALID_S)
+
+#define IRDMA_CQ_IMMVALID_S 62
+#define IRDMA_CQ_IMMVALID_M BIT_ULL(IRDMA_CQ_IMMVALID_S)
+
+#define IRDMA_CQ_UDSMACVALID_S 61
+#define IRDMA_CQ_UDSMACVALID_M BIT_ULL(IRDMA_CQ_UDSMACVALID_S)
+
+#define IRDMA_CQ_UDVLANVALID_S 60
+#define IRDMA_CQ_UDVLANVALID_M BIT_ULL(IRDMA_CQ_UDVLANVALID_S)
+
+#define IRDMA_CQ_UDSMAC_S 0
+#define IRDMA_CQ_UDSMAC_M (0xffffffffffffULL << IRDMA_CQ_UDSMAC_S)
+
+#define IRDMA_CQ_UDVLAN_S 48
+#define IRDMA_CQ_UDVLAN_M (0xffffULL << IRDMA_CQ_UDVLAN_S)
+
+#define IRDMA_CQ_IMMDATA_S 0
+#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
+
+#define IRDMA_CQ_IMMDATALOW32_S 0
+#define IRDMA_CQ_IMMDATALOW32_M (0xffffffffULL << IRDMA_CQ_IMMDATALOW32_S)
+
+#define IRDMA_CQ_IMMDATAUP32_S 32
+#define IRDMA_CQ_IMMDATAUP32_M (0xffffffffULL << IRDMA_CQ_IMMDATAUP32_S)
+
+#define IRDMACQ_PAYLDLEN_S 0
+#define IRDMACQ_PAYLDLEN_M (0xffffffffULL << IRDMACQ_PAYLDLEN_S)
+
+#define IRDMACQ_TCPSEQNUMRTT_S 32
+#define IRDMACQ_TCPSEQNUMRTT_M (0xffffffffULL << IRDMACQ_TCPSEQNUMRTT_S)
+
+#define IRDMACQ_INVSTAG_S 0
+#define IRDMACQ_INVSTAG_M (0xffffffffULL << IRDMACQ_INVSTAG_S)
+
+#define IRDMACQ_QPID_S 32
+#define IRDMACQ_QPID_M (0xffffffULL << IRDMACQ_QPID_S)
+
+#define IRDMACQ_UDSRCQPN_S 0
+#define IRDMACQ_UDSRCQPN_M (0xffffffffULL << IRDMACQ_UDSRCQPN_S)
+
+#define IRDMACQ_PSHDROP_S 51
+#define IRDMACQ_PSHDROP_M BIT_ULL(IRDMACQ_PSHDROP_S)
+
+#define IRDMACQ_STAG_S 53
+#define IRDMACQ_STAG_M BIT_ULL(IRDMACQ_STAG_S)
+
+#define IRDMACQ_IPV4_S 53
+#define IRDMACQ_IPV4_M BIT_ULL(IRDMACQ_IPV4_S)
+
+#define IRDMACQ_SOEVENT_S 54
+#define IRDMACQ_SOEVENT_M BIT_ULL(IRDMACQ_SOEVENT_S)
+
+#define IRDMACQ_OP_S 56
+#define IRDMACQ_OP_M (0x3fULL << IRDMACQ_OP_S)
+
+/* Manage Push Page - MPP */
+#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
+#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
+
+/* iwarp QP SQ WQE common fields */
+#define IRDMAQPSQ_OPCODE_S 32
+#define IRDMAQPSQ_OPCODE_M (0x3fULL << IRDMAQPSQ_OPCODE_S)
+
+#define IRDMAQPSQ_COPY_HOST_PBL_S 43
+#define IRDMAQPSQ_COPY_HOST_PBL_M BIT_ULL(IRDMAQPSQ_COPY_HOST_PBL_S)
+
+#define IRDMAQPSQ_ADDFRAGCNT_S 38
+#define IRDMAQPSQ_ADDFRAGCNT_M (0xfULL << IRDMAQPSQ_ADDFRAGCNT_S)
+
+#define IRDMAQPSQ_PUSHWQE_S 56
+#define IRDMAQPSQ_PUSHWQE_M BIT_ULL(IRDMAQPSQ_PUSHWQE_S)
+
+#define IRDMAQPSQ_STREAMMODE_S 58
+#define IRDMAQPSQ_STREAMMODE_M BIT_ULL(IRDMAQPSQ_STREAMMODE_S)
+
+#define IRDMAQPSQ_WAITFORRCVPDU_S 59
+#define IRDMAQPSQ_WAITFORRCVPDU_M BIT_ULL(IRDMAQPSQ_WAITFORRCVPDU_S)
+
+#define IRDMAQPSQ_READFENCE_S 60
+#define IRDMAQPSQ_READFENCE_M BIT_ULL(IRDMAQPSQ_READFENCE_S)
+
+#define IRDMAQPSQ_LOCALFENCE_S 61
+#define IRDMAQPSQ_LOCALFENCE_M BIT_ULL(IRDMAQPSQ_LOCALFENCE_S)
+
+#define IRDMAQPSQ_UDPHEADER_S 61
+#define IRDMAQPSQ_UDPHEADER_M BIT_ULL(IRDMAQPSQ_UDPHEADER_S)
+
+#define IRDMAQPSQ_L4LEN_S 42
+#define IRDMAQPSQ_L4LEN_M ((u64)0xF << IRDMAQPSQ_L4LEN_S)
+
+#define IRDMAQPSQ_SIGCOMPL_S 62
+#define IRDMAQPSQ_SIGCOMPL_M BIT_ULL(IRDMAQPSQ_SIGCOMPL_S)
+
+#define IRDMAQPSQ_VALID_S 63
+#define IRDMAQPSQ_VALID_M BIT_ULL(IRDMAQPSQ_VALID_S)
+
+#define IRDMAQPSQ_FRAG_TO_S IRDMA_CQPHC_QPCTX_S
+#define IRDMAQPSQ_FRAG_TO_M IRDMA_CQPHC_QPCTX_M
+
+#define IRDMAQPSQ_FRAG_VALID_S 63
+#define IRDMAQPSQ_FRAG_VALID_M BIT_ULL(IRDMAQPSQ_FRAG_VALID_S)
+
+#define IRDMAQPSQ_FRAG_LEN_S 32
+#define IRDMAQPSQ_FRAG_LEN_M (0x7fffffffULL << IRDMAQPSQ_FRAG_LEN_S)
+
+#define IRDMAQPSQ_FRAG_STAG_S 0
+#define IRDMAQPSQ_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_FRAG_STAG_S)
+
+#define IRDMAQPSQ_GEN1_FRAG_LEN_S 0
+#define IRDMAQPSQ_GEN1_FRAG_LEN_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_LEN_S)
+
+#define IRDMAQPSQ_GEN1_FRAG_STAG_S 32
+#define IRDMAQPSQ_GEN1_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_STAG_S)
+
+#define IRDMAQPSQ_REMSTAGINV_S 0
+#define IRDMAQPSQ_REMSTAGINV_M (0xffffffffULL << IRDMAQPSQ_REMSTAGINV_S)
+
+#define IRDMAQPSQ_DESTQKEY_S 0
+#define IRDMAQPSQ_DESTQKEY_M (0xffffffffULL << IRDMAQPSQ_DESTQKEY_S)
+
+#define IRDMAQPSQ_DESTQPN_S 32
+#define IRDMAQPSQ_DESTQPN_M (0x00ffffffULL << IRDMAQPSQ_DESTQPN_S)
+
+#define IRDMAQPSQ_AHID_S 0
+#define IRDMAQPSQ_AHID_M (0x0001ffffULL << IRDMAQPSQ_AHID_S)
+
+#define IRDMAQPSQ_INLINEDATAFLAG_S 57
+#define IRDMAQPSQ_INLINEDATAFLAG_M BIT_ULL(IRDMAQPSQ_INLINEDATAFLAG_S)
+
+#define IRDMA_INLINE_VALID_S 7
+
+#define IRDMAQPSQ_INLINEDATALEN_S 48
+#define IRDMAQPSQ_INLINEDATALEN_M \
+ (0xffULL << IRDMAQPSQ_INLINEDATALEN_S)
+#define IRDMAQPSQ_IMMDATAFLAG_S 47
+#define IRDMAQPSQ_IMMDATAFLAG_M \
+ BIT_ULL(IRDMAQPSQ_IMMDATAFLAG_S)
+#define IRDMAQPSQ_REPORTRTT_S 46
+#define IRDMAQPSQ_REPORTRTT_M \
+ BIT_ULL(IRDMAQPSQ_REPORTRTT_S)
+
+#define IRDMAQPSQ_IMMDATA_S 0
+#define IRDMAQPSQ_IMMDATA_M \
+ (0xffffffffffffffffULL << IRDMAQPSQ_IMMDATA_S)
+
+/* rdma write */
+#define IRDMAQPSQ_REMSTAG_S 0
+#define IRDMAQPSQ_REMSTAG_M (0xffffffffULL << IRDMAQPSQ_REMSTAG_S)
+
+#define IRDMAQPSQ_REMTO_S IRDMA_CQPHC_QPCTX_S
+#define IRDMAQPSQ_REMTO_M IRDMA_CQPHC_QPCTX_M
+
+/* memory window */
+#define IRDMAQPSQ_STAGRIGHTS_S 48
+#define IRDMAQPSQ_STAGRIGHTS_M (0x1fULL << IRDMAQPSQ_STAGRIGHTS_S)
+
+#define IRDMAQPSQ_VABASEDTO_S 53
+#define IRDMAQPSQ_VABASEDTO_M BIT_ULL(IRDMAQPSQ_VABASEDTO_S)
+
+#define IRDMAQPSQ_MEMWINDOWTYPE_S 54
+#define IRDMAQPSQ_MEMWINDOWTYPE_M BIT_ULL(IRDMAQPSQ_MEMWINDOWTYPE_S)
+
+#define IRDMAQPSQ_MWLEN_S IRDMA_CQPHC_QPCTX_S
+#define IRDMAQPSQ_MWLEN_M IRDMA_CQPHC_QPCTX_M
+
+#define IRDMAQPSQ_PARENTMRSTAG_S 32
+#define IRDMAQPSQ_PARENTMRSTAG_M \
+ (0xffffffffULL << IRDMAQPSQ_PARENTMRSTAG_S)
+
+#define IRDMAQPSQ_MWSTAG_S 0
+#define IRDMAQPSQ_MWSTAG_M (0xffffffffULL << IRDMAQPSQ_MWSTAG_S)
+
+#define IRDMAQPSQ_BASEVA_TO_FBO_S IRDMA_CQPHC_QPCTX_S
+#define IRDMAQPSQ_BASEVA_TO_FBO_M IRDMA_CQPHC_QPCTX_M
+
+/* Local Invalidate */
+#define IRDMAQPSQ_LOCSTAG_S 0
+#define IRDMAQPSQ_LOCSTAG_M (0xffffffffULL << IRDMAQPSQ_LOCSTAG_S)
+
+/* iwarp QP RQ WQE common fields */
+#define IRDMAQPRQ_ADDFRAGCNT_S IRDMAQPSQ_ADDFRAGCNT_S
+#define IRDMAQPRQ_ADDFRAGCNT_M IRDMAQPSQ_ADDFRAGCNT_M
+
+#define IRDMAQPRQ_VALID_S IRDMAQPSQ_VALID_S
+#define IRDMAQPRQ_VALID_M IRDMAQPSQ_VALID_M
+
+#define IRDMAQPRQ_COMPLCTX_S IRDMA_CQPHC_QPCTX_S
+#define IRDMAQPRQ_COMPLCTX_M IRDMA_CQPHC_QPCTX_M
+
+#define IRDMAQPRQ_FRAG_LEN_S IRDMAQPSQ_FRAG_LEN_S
+#define IRDMAQPRQ_FRAG_LEN_M IRDMAQPSQ_FRAG_LEN_M
+
+#define IRDMAQPRQ_STAG_S IRDMAQPSQ_FRAG_STAG_S
+#define IRDMAQPRQ_STAG_M IRDMAQPSQ_FRAG_STAG_M
+
+#define IRDMAQPRQ_TO_S IRDMAQPSQ_FRAG_TO_S
+#define IRDMAQPRQ_TO_M IRDMAQPSQ_FRAG_TO_M
+
+#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26)
+#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
+#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
+
+#define IRDMA_GET_CURRENT_CQ_ELEM(_cq) \
+ ( \
+ (_cq)->cq_base[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
+ )
+#define IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(_cq) \
+ ( \
+ ((struct irdma_extended_cqe *) \
+ ((_cq)->cq_base))[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
+ )
+
+#define IRDMA_RING_INIT(_ring, _size) \
+ { \
+ (_ring).head = 0; \
+ (_ring).tail = 0; \
+ (_ring).size = (_size); \
+ }
+#define IRDMA_RING_SIZE(_ring) ((_ring).size)
+#define IRDMA_RING_CURRENT_HEAD(_ring) ((_ring).head)
+#define IRDMA_RING_CURRENT_TAIL(_ring) ((_ring).tail)
+
+#define IRDMA_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!IRDMA_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = ENOSPC; \
+ } \
+ }
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < size) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = ENOSPC; \
+ } \
+ }
+#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!IRDMA_SQ_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = ENOSPC; \
+ } \
+ }
+#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < (size - 256)) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = ENOSPC; \
+ } \
+ }
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
+ (_ring).head = ((_ring).head + (_count)) % (_ring).size
+
+#define IRDMA_RING_MOVE_TAIL(_ring) \
+ (_ring).tail = ((_ring).tail + 1) % (_ring).size
+
+#define IRDMA_RING_MOVE_HEAD_NOCHECK(_ring) \
+ (_ring).head = ((_ring).head + 1) % (_ring).size
+
+#define IRDMA_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
+ (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
+
+#define IRDMA_RING_SET_TAIL(_ring, _pos) \
+ (_ring).tail = (_pos) % (_ring).size
+
+#define IRDMA_RING_FULL_ERR(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 1)) \
+ )
+
+#define IRDMA_ERR_RING_FULL2(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 2)) \
+ )
+
+#define IRDMA_ERR_RING_FULL3(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 3)) \
+ )
+
+#define IRDMA_SQ_RING_FULL_ERR(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 257)) \
+ )
+
+#define IRDMA_ERR_SQ_RING_FULL2(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 258)) \
+ )
+#define IRDMA_ERR_SQ_RING_FULL3(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 259)) \
+ )
+#define IRDMA_RING_MORE_WORK(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) != 0) \
+ )
+
+#define IRDMA_RING_USED_QUANTA(_ring) \
+ ( \
+ (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
+ )
+
+#define IRDMA_RING_FREE_QUANTA(_ring) \
+ ( \
+ ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 1) \
+ )
+
+#define IRDMA_SQ_RING_FREE_QUANTA(_ring) \
+ ( \
+ ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 257) \
+ )
+
+#define IRDMA_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
+ { \
+ index = IRDMA_RING_CURRENT_HEAD(_ring); \
+ IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
+ }
+
+enum irdma_qp_wqe_size {
+ IRDMA_WQE_SIZE_32 = 32,
+ IRDMA_WQE_SIZE_64 = 64,
+ IRDMA_WQE_SIZE_96 = 96,
+ IRDMA_WQE_SIZE_128 = 128,
+ IRDMA_WQE_SIZE_256 = 256,
+};
+
+/**
+ * set_64bit_val - set 64 bit value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @val: value to write
+ **/
+static inline void set_64bit_val(__le64 *wqe_words, u32 byte_index, u64 val)
+{
+ wqe_words[byte_index >> 3] = htole64(val);
+}
+
+/**
+ * set_32bit_val - set 32 bit value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @val: value to write
+ **/
+static inline void set_32bit_val(__le32 *wqe_words, u32 byte_index, u32 val)
+{
+ wqe_words[byte_index >> 2] = htole32(val);
+}
+
+/**
+ * get_64bit_val - read 64 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to read from
+ * @val: read value
+ **/
+static inline void get_64bit_val(__le64 *wqe_words, u32 byte_index, u64 *val)
+{
+ *val = le64toh(wqe_words[byte_index >> 3]);
+}
+
+/**
+ * get_32bit_val - read 32 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to reaad from
+ * @val: return 32 bit value
+ **/
+static inline void get_32bit_val(__le32 *wqe_words, u32 byte_index, u32 *val)
+{
+ *val = le32toh(wqe_words[byte_index >> 2]);
+}
+#endif /* IRDMA_DEFS_H */
diff --git a/contrib/ofed/libirdma/irdma_uk.c b/contrib/ofed/libirdma/irdma_uk.c
new file mode 100644
index 000000000000..2f77c132d296
--- /dev/null
+++ b/contrib/ofed/libirdma/irdma_uk.c
@@ -0,0 +1,1884 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2015 - 2022 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#include "osdep.h"
+#include "irdma_defs.h"
+#include "irdma_user.h"
+#include "irdma.h"
+
+/**
+ * irdma_set_fragment - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ * @valid: The wqe valid
+ */
+static void
+irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
+ u8 valid)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset,
+ LS_64(sge->tag_off, IRDMAQPSQ_FRAG_TO));
+ set_64bit_val(wqe, offset + IRDMA_BYTE_8,
+ LS_64(valid, IRDMAQPSQ_VALID) |
+ LS_64(sge->len, IRDMAQPSQ_FRAG_LEN) |
+ LS_64(sge->stag, IRDMAQPSQ_FRAG_STAG));
+ } else {
+ set_64bit_val(wqe, offset, 0);
+ set_64bit_val(wqe, offset + IRDMA_BYTE_8,
+ LS_64(valid, IRDMAQPSQ_VALID));
+ }
+}
+
+/**
+ * irdma_set_fragment_gen_1 - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ * @valid: wqe valid flag
+ */
+static void
+irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
+ struct irdma_sge *sge, u8 valid)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset,
+ LS_64(sge->tag_off, IRDMAQPSQ_FRAG_TO));
+ set_64bit_val(wqe, offset + IRDMA_BYTE_8,
+ LS_64(sge->len, IRDMAQPSQ_GEN1_FRAG_LEN) |
+ LS_64(sge->stag, IRDMAQPSQ_GEN1_FRAG_STAG));
+ } else {
+ set_64bit_val(wqe, offset, 0);
+ set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
+ }
+}
+
+/**
+ * irdma_nop_1 - insert a NOP wqe
+ * @qp: hw qp ptr
+ */
+static int
+irdma_nop_1(struct irdma_qp_uk *qp)
+{
+ u64 hdr;
+ __le64 *wqe;
+ u32 wqe_idx;
+ bool signaled = false;
+
+ if (!qp->sq_ring.head)
+ return EINVAL;
+
+ wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ wqe = qp->sq_base[wqe_idx].elem;
+
+ qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, 0);
+ set_64bit_val(wqe, IRDMA_BYTE_8, 0);
+ set_64bit_val(wqe, IRDMA_BYTE_16, 0);
+
+ hdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |
+ LS_64(signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+
+ /* make sure WQE is written before valid bit is set */
+ udma_to_device_barrier();
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ return 0;
+}
+
+/**
+ * irdma_clr_wqes - clear next 128 sq entries
+ * @qp: hw qp ptr
+ * @qp_wqe_idx: wqe_idx
+ */
+void
+irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
+{
+ __le64 *wqe;
+ u32 wqe_idx;
+
+ if (!(qp_wqe_idx & 0x7F)) {
+ wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
+ wqe = qp->sq_base[wqe_idx].elem;
+ if (wqe_idx)
+ memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
+ else
+ memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
+ }
+}
+
+/**
+ * irdma_uk_qp_post_wr - ring doorbell
+ * @qp: hw qp ptr
+ */
+void
+irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
+{
+ u64 temp;
+ u32 hw_sq_tail;
+ u32 sw_sq_head;
+
+ /* valid bit is written and loads completed before reading shadow */
+ atomic_thread_fence(memory_order_seq_cst);
+
+ /* read the doorbell shadow area */
+ get_64bit_val(qp->shadow_area, IRDMA_BYTE_0, &temp);
+
+ hw_sq_tail = (u32)RS_64(temp, IRDMA_QP_DBSA_HW_SQ_TAIL);
+ sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (sw_sq_head != qp->initial_ring.head) {
+ if (qp->push_dropped) {
+ db_wr32(qp->qp_id, qp->wqe_alloc_db);
+ qp->push_dropped = false;
+ } else if (sw_sq_head != hw_sq_tail) {
+ if (sw_sq_head > qp->initial_ring.head) {
+ if (hw_sq_tail >= qp->initial_ring.head &&
+ hw_sq_tail < sw_sq_head)
+ db_wr32(qp->qp_id, qp->wqe_alloc_db);
+ } else {
+ if (hw_sq_tail >= qp->initial_ring.head ||
+ hw_sq_tail < sw_sq_head)
+ db_wr32(qp->qp_id, qp->wqe_alloc_db);
+ }
+ }
+ }
+
+ qp->initial_ring.head = qp->sq_ring.head;
+}
+
+/**
+ * irdma_qp_ring_push_db - ring qp doorbell
+ * @qp: hw qp ptr
+ * @wqe_idx: wqe index
+ */
+static void
+irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
+{
+ set_32bit_val(qp->push_db, 0,
+ LS_32(wqe_idx >> 3, IRDMA_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
+ qp->initial_ring.head = qp->sq_ring.head;
+ qp->push_mode = true;
+ qp->push_dropped = false;
+}
+
+void
+irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
+ u32 wqe_idx, bool post_sq)
+{
+ __le64 *push;
+
+ if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
+ IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
+ !qp->push_mode) {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ } else {
+ push = (__le64 *) ((uintptr_t)qp->push_wqe +
+ (wqe_idx & 0x7) * 0x20);
+ irdma_memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
+ irdma_qp_ring_push_db(qp, wqe_idx);
+ }
+}
+
+/**
+ * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ * @quanta: size of WR in quanta
+ * @total_size: size of WR in bytes
+ * @info: info on WR
+ */
+__le64 *
+irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
+ u16 quanta, u32 total_size,
+ struct irdma_post_sq_info *info)
+{
+ __le64 *wqe;
+ __le64 *wqe_0 = NULL;
+ u32 nop_wqe_idx;
+ u16 avail_quanta;
+ u16 i;
+
+ avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
+ (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
+ qp->uk_attrs->max_hw_sq_chunk);
+ if (quanta <= avail_quanta) {
+ /* WR fits in current chunk */
+ if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ return NULL;
+ } else {
+ /* Need to pad with NOP */
+ if (quanta + avail_quanta >
+ IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ return NULL;
+
+ nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ for (i = 0; i < avail_quanta; i++) {
+ irdma_nop_1(qp);
+ IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
+ }
+ if (qp->push_db && info->push_wqe)
+ irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
+ avail_quanta, nop_wqe_idx, true);
+ }
+
+ *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
+
+ wqe = qp->sq_base[*wqe_idx].elem;
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
+ (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
+ wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
+ wqe_0[3] = htole64(LS_64(!qp->swqe_polarity, IRDMAQPSQ_VALID));
+ }
+ qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
+ qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
+ qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
+
+ return wqe;
+}
+
+/**
+ * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ */
+__le64 *
+irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
+{
+ __le64 *wqe;
+ int ret_code;
+
+ if (IRDMA_RING_FULL_ERR(qp->rq_ring))
+ return NULL;
+
+ IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ if (!*wqe_idx)
+ qp->rwqe_polarity = !qp->rwqe_polarity;
+ /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
+ wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
+
+ return wqe;
+}
+
+/**
+ * irdma_uk_rdma_write - rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int
+irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ u64 hdr;
+ __le64 *wqe;
+ struct irdma_rdma_write *op_info;
+ u32 i, wqe_idx;
+ u32 total_size = 0, byte_off;
+ int ret_code;
+ u32 frag_cnt, addl_frag_cnt;
+ bool read_fence = false;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+
+ op_info = &info->op.rdma_write;
+ if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
+ return EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].len;
+
+ read_fence |= info->read_fence;
+
+ if (info->imm_data_valid)
+ frag_cnt = op_info->num_lo_sges + 1;
+ else
+ frag_cnt = op_info->num_lo_sges;
+ addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
+ ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return ENOSPC;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));
+
+ if (info->imm_data_valid) {
+ set_64bit_val(wqe, IRDMA_BYTE_0,
+ LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+ i = 0;
+ } else {
+ qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
+ op_info->lo_sg_list,
+ qp->swqe_polarity);
+ i = 1;
+ }
+
+ for (byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off,
+ &op_info->lo_sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
+ frag_cnt) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ if (!op_info->rem_addr.stag && !total_size)
+ op_info->rem_addr.stag = 0x1234;
+ hdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |
+ LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
+ LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |
+ LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
+ LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
+ LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
+ LS_64(read_fence, IRDMAQPSQ_READFENCE) |
+ LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_rdma_read - rdma read command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @inv_stag: flag for inv_stag
+ * @post_sq: flag to post sq
+ */
+int
+irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq)
+{
+ struct irdma_rdma_read *op_info;
+ int ret_code;
+ u32 i, byte_off, total_size = 0;
+ bool local_fence = false;
+ bool ord_fence = false;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u32 wqe_idx;
+ u16 quanta;
+ u64 hdr;
+
+ info->push_wqe = qp->push_db ? true : false;
+
+ op_info = &info->op.rdma_read;
+ if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
+ return EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].len;
+
+ ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return ENOSPC;
+
+ if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) {
+ ord_fence = true;
+ qp->ord_cnt = 0;
+ }
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
+ addl_frag_cnt = op_info->num_lo_sges > 1 ?
+ (op_info->num_lo_sges - 1) : 0;
+ local_fence |= info->local_fence;
+
+ qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->lo_sg_list,
+ qp->swqe_polarity);
+ for (i = 1, byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; ++i) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off,
+ &op_info->lo_sg_list[i],
+ qp->swqe_polarity);
+ byte_off += IRDMA_BYTE_16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
+ !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));
+ hdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |
+ LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
+ LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
+ LS_64((inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ),
+ IRDMAQPSQ_OPCODE) |
+ LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
+ LS_64(info->read_fence || qp->force_fence || ord_fence ? 1 : 0,
+ IRDMAQPSQ_READFENCE) |
+ LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_send - rdma send command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int
+irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_post_send *op_info;
+ u64 hdr;
+ u32 i, wqe_idx, total_size = 0, byte_off;
+ int ret_code;
+ u32 frag_cnt, addl_frag_cnt;
+ bool read_fence = false;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+
+ op_info = &info->op.send;
+ if (qp->max_sq_frag_cnt < op_info->num_sges)
+ return EINVAL;
+
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].len;
+
+ if (info->imm_data_valid)
+ frag_cnt = op_info->num_sges + 1;
+ else
+ frag_cnt = op_info->num_sges;
+ ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return ENOSPC;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ read_fence |= info->read_fence;
+ addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
+ if (info->imm_data_valid) {
+ set_64bit_val(wqe, IRDMA_BYTE_0,
+ LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+ i = 0;
+ } else {
+ qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->sg_list,
+ qp->swqe_polarity);
+ i = 1;
+ }
+
+ for (byte_off = IRDMA_BYTE_32; i < op_info->num_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
+ qp->swqe_polarity);
+ byte_off += IRDMA_BYTE_16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
+ frag_cnt) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(op_info->qkey, IRDMAQPSQ_DESTQKEY) |
+ LS_64(op_info->dest_qp, IRDMAQPSQ_DESTQPN));
+ hdr = LS_64(info->stag_to_inv, IRDMAQPSQ_REMSTAG) |
+ LS_64(op_info->ah_id, IRDMAQPSQ_AHID) |
+ LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |
+ LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
+ LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
+ LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
+ LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
+ LS_64(read_fence, IRDMAQPSQ_READFENCE) |
+ LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(info->udp_hdr, IRDMAQPSQ_UDPHEADER) |
+ LS_64(info->l4len, IRDMAQPSQ_L4LEN) |
+ LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
+ * @wqe: wqe for setting fragment
+ * @op_info: info for setting bind wqe values
+ */
+static void
+irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,
+ struct irdma_bind_window *op_info)
+{
+ set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(op_info->mw_stag, IRDMAQPSQ_PARENTMRSTAG) |
+ LS_64(op_info->mr_stag, IRDMAQPSQ_MWSTAG));
+ set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
+}
+
+/**
+ * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
+ * @dest: pointer to wqe
+ * @src: pointer to inline data
+ * @len: length of inline data to copy
+ * @polarity: compatibility parameter
+ */
+static void
+irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
+ u8 polarity)
+{
+ if (len <= IRDMA_BYTE_16) {
+ irdma_memcpy(dest, src, len);
+ } else {
+ irdma_memcpy(dest, src, IRDMA_BYTE_16);
+ src += IRDMA_BYTE_16;
+ dest = dest + IRDMA_BYTE_32;
+ irdma_memcpy(dest, src, len - IRDMA_BYTE_16);
+ }
+}
+
+/**
+ * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
+ * @data_size: data size for inline
+ *
+ * Gets the quanta based on inline and immediate data.
+ */
+static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) {
+ return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
+}
+
+/**
+ * irdma_set_mw_bind_wqe - set mw bind in wqe
+ * @wqe: wqe for setting mw bind
+ * @op_info: info for setting wqe values
+ */
+static void
+irdma_set_mw_bind_wqe(__le64 * wqe,
+ struct irdma_bind_window *op_info)
+{
+ set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(op_info->mr_stag, IRDMAQPSQ_PARENTMRSTAG) |
+ LS_64(op_info->mw_stag, IRDMAQPSQ_MWSTAG));
+ set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
+}
+
+/**
+ * irdma_copy_inline_data - Copy inline data to wqe
+ * @dest: pointer to wqe
+ * @src: pointer to inline data
+ * @len: length of inline data to copy
+ * @polarity: polarity of wqe valid bit
+ */
+static void
+irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
+{
+ u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
+ u32 copy_size;
+
+ dest += IRDMA_BYTE_8;
+ if (len <= IRDMA_BYTE_8) {
+ irdma_memcpy(dest, src, len);
+ return;
+ }
+
+ *((u64 *)dest) = *((u64 *)src);
+ len -= IRDMA_BYTE_8;
+ src += IRDMA_BYTE_8;
+ dest += IRDMA_BYTE_24; /* point to additional 32 byte quanta */
+
+ while (len) {
+ copy_size = len < 31 ? len : 31;
+ irdma_memcpy(dest, src, copy_size);
+ *(dest + 31) = inline_valid;
+ len -= copy_size;
+ dest += IRDMA_BYTE_32;
+ src += copy_size;
+ }
+}
+
+/**
+ * irdma_inline_data_size_to_quanta - based on inline data, quanta
+ * @data_size: data size for inline
+ *
+ * Gets the quanta based on inline and immediate data.
+ */
+static u16 irdma_inline_data_size_to_quanta(u32 data_size) {
+ if (data_size <= 8)
+ return IRDMA_QP_WQE_MIN_QUANTA;
+ else if (data_size <= 39)
+ return 2;
+ else if (data_size <= 70)
+ return 3;
+ else if (data_size <= 101)
+ return 4;
+ else if (data_size <= 132)
+ return 5;
+ else if (data_size <= 163)
+ return 6;
+ else if (data_size <= 194)
+ return 7;
+ else
+ return 8;
+}
+
+/**
+ * irdma_uk_inline_rdma_write - inline rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int
+irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_inline_rdma_write *op_info;
+ u64 hdr = 0;
+ u32 wqe_idx;
+ bool read_fence = false;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+ op_info = &info->op.inline_rdma_write;
+
+ if (op_info->len > qp->max_inline_data)
+ return EINVAL;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
+ info);
+ if (!wqe)
+ return ENOSPC;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
+ read_fence |= info->read_fence;
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));
+
+ hdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |
+ LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
+ LS_64(op_info->len, IRDMAQPSQ_INLINEDATALEN) |
+ LS_64(info->report_rtt ? 1 : 0, IRDMAQPSQ_REPORTRTT) |
+ LS_64(1, IRDMAQPSQ_INLINEDATAFLAG) |
+ LS_64(info->imm_data_valid ? 1 : 0, IRDMAQPSQ_IMMDATAFLAG) |
+ LS_64(info->push_wqe ? 1 : 0, IRDMAQPSQ_PUSHWQE) |
+ LS_64(read_fence, IRDMAQPSQ_READFENCE) |
+ LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+
+ if (info->imm_data_valid)
+ set_64bit_val(wqe, IRDMA_BYTE_0,
+ LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
+ qp->swqe_polarity);
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_inline_send - inline send operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int
+irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_post_inline_send *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool read_fence = false;
+ u16 quanta;
+
+ info->push_wqe = qp->push_db ? true : false;
+ op_info = &info->op.inline_send;
+
+ if (op_info->len > qp->max_inline_data)
+ return EINVAL;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
+ info);
+ if (!wqe)
+ return ENOSPC;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(op_info->qkey, IRDMAQPSQ_DESTQKEY) |
+ LS_64(op_info->dest_qp, IRDMAQPSQ_DESTQPN));
+
+ read_fence |= info->read_fence;
+ hdr = LS_64(info->stag_to_inv, IRDMAQPSQ_REMSTAG) |
+ LS_64(op_info->ah_id, IRDMAQPSQ_AHID) |
+ LS_64(info->op_type, IRDMAQPSQ_OPCODE) |
+ LS_64(op_info->len, IRDMAQPSQ_INLINEDATALEN) |
+ LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |
+ LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |
+ LS_64(1, IRDMAQPSQ_INLINEDATAFLAG) |
+ LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
+ LS_64(read_fence, IRDMAQPSQ_READFENCE) |
+ LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(info->udp_hdr, IRDMAQPSQ_UDPHEADER) |
+ LS_64(info->l4len, IRDMAQPSQ_L4LEN) |
+ LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+
+ if (info->imm_data_valid)
+ set_64bit_val(wqe, IRDMA_BYTE_0,
+ LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
+ qp->swqe_polarity);
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_stag_local_invalidate - stag invalidate operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int
+irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_inv_local_stag *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool local_fence = false;
+ struct irdma_sge sge = {0};
+
+ info->push_wqe = qp->push_db ? true : false;
+ op_info = &info->op.inv_local_stag;
+ local_fence = info->local_fence;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, info);
+ if (!wqe)
+ return ENOSPC;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ sge.stag = op_info->target_stag;
+ qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, 0);
+
+ hdr = LS_64(IRDMA_OP_TYPE_INV_STAG, IRDMAQPSQ_OPCODE) |
+ LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
+ LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |
+ LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
+ post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_mw_bind - bind Memory Window
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int
+irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_bind_window *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool local_fence;
+
+ info->push_wqe = qp->push_db ? true : false;
+ op_info = &info->op.bind_window;
+ local_fence = info->local_fence;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, info);
+ if (!wqe)
+ return ENOSPC;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
+
+ hdr = LS_64(IRDMA_OP_TYPE_BIND_MW, IRDMAQPSQ_OPCODE) |
+ LS_64(((op_info->ena_reads << 2) | (op_info->ena_writes << 3)),
+ IRDMAQPSQ_STAGRIGHTS) |
+ LS_64((op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0),
+ IRDMAQPSQ_VABASEDTO) |
+ LS_64((op_info->mem_window_type_1 ? 1 : 0),
+ IRDMAQPSQ_MEMWINDOWTYPE) |
+ LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
+ LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |
+ LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ if (info->push_wqe) {
+ irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
+ post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_post_receive - post receive wqe
+ * @qp: hw qp ptr
+ * @info: post rq information
+ */
+int
+irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info)
+{
+ u32 wqe_idx, i, byte_off;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u64 hdr;
+
+ if (qp->max_rq_frag_cnt < info->num_sges)
+ return EINVAL;
+
+ wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
+ if (!wqe)
+ return ENOSPC;
+
+ qp->rq_wrid_array[wqe_idx] = info->wr_id;
+ addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
+ qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, info->sg_list,
+ qp->rwqe_polarity);
+
+ for (i = 1, byte_off = IRDMA_BYTE_32; i < info->num_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
+ qp->rwqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
+ info->num_sges) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->rwqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, 0);
+ hdr = LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |
+ LS_64(qp->rwqe_polarity, IRDMAQPSQ_VALID);
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_cq_resize - reset the cq buffer info
+ * @cq: cq to resize
+ * @cq_base: new cq buffer addr
+ * @cq_size: number of cqes
+ */
+void
+irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
+{
+ cq->cq_base = cq_base;
+ cq->cq_size = cq_size;
+ IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+}
+
+/**
+ * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
+ * @cq: cq to resize
+ * @cq_cnt: the count of the resized cq buffers
+ */
+void
+irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_next;
+ u8 arm_seq_num;
+
+ get_64bit_val(cq->shadow_area, 32, &temp_val);
+
+ sw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);
+ sw_cq_sel += cq_cnt;
+
+ arm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);
+ arm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);
+ arm_next = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT);
+
+ temp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |
+ LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |
+ LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |
+ LS_64(arm_next, IRDMA_CQ_DBSA_ARM_NEXT);
+
+ set_64bit_val(cq->shadow_area, 32, temp_val);
+}
+
+/**
+ * irdma_uk_cq_request_notification - cq notification request (door bell)
+ * @cq: hw cq
+ * @cq_notify: notification type
+ */
+void
+irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
+ enum irdma_cmpl_notify cq_notify)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se = 0;
+ u8 arm_next = 0;
+ u8 arm_seq_num;
+
+ cq->armed = true;
+ get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
+ arm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);
+ arm_seq_num++;
+ sw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);
+ arm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);
+ arm_next_se |= 1;
+ if (cq_notify == IRDMA_CQ_COMPL_EVENT)
+ arm_next = 1;
+ temp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |
+ LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |
+ LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |
+ LS_64(arm_next, IRDMA_CQ_DBSA_ARM_NEXT);
+
+ set_64bit_val(cq->shadow_area, IRDMA_BYTE_32, temp_val);
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ db_wr32(cq->cq_id, cq->cqe_alloc_db);
+}
+
+static void
+irdma_copy_quanta(__le64 * dst, __le64 * src, u32 offset, bool flip,
+ bool barrier)
+{
+ __le64 val;
+
+ get_64bit_val(src, offset, &val);
+ set_64bit_val(dst, offset, val);
+
+ get_64bit_val(src, offset + 8, &val);
+ if (flip)
+ val ^= IRDMAQPSQ_VALID_M;
+ set_64bit_val(dst, offset + 8, val);
+
+ get_64bit_val(src, offset + 24, &val);
+ if (flip)
+ val ^= IRDMAQPSQ_VALID_M;
+ if (barrier)
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+ set_64bit_val(dst, offset + 24, val);
+}
+
+static void
+irdma_copy_wqe(__le64 * dst, __le64 * src, u8 wqe_quanta,
+ bool flip_polarity)
+{
+ u32 offset;
+
+ offset = 32;
+ while (--wqe_quanta) {
+ irdma_copy_quanta(dst, src, offset, flip_polarity, false);
+ offset += 32;
+ }
+
+ irdma_copy_quanta(dst, src, 0, flip_polarity, true);
+}
+
+static void
+irdma_repost_rq_wqes(struct irdma_qp_uk *qp, u32 start_idx,
+ u32 end_idx)
+{
+ __le64 *dst_wqe, *src_wqe;
+ u32 wqe_idx;
+ u8 wqe_quanta = qp->rq_wqe_size_multiplier;
+ bool flip_polarity;
+ u64 val;
+
+ libirdma_debug("reposting_wqes: from start_idx=%d to end_idx = %d\n", start_idx, end_idx);
+ pthread_spin_lock(qp->lock);
+ while (start_idx != end_idx) {
+ IRDMA_RING_SET_TAIL(qp->rq_ring, start_idx + 1);
+ src_wqe = qp->rq_base[start_idx * qp->rq_wqe_size_multiplier].elem;
+ dst_wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
+
+ /* Check to see if polarity has changed */
+ get_64bit_val(src_wqe, 24, &val);
+ if (RS_64(val, IRDMAQPSQ_VALID) != qp->rwqe_polarity)
+ flip_polarity = true;
+ else
+ flip_polarity = false;
+
+ qp->rq_wrid_array[wqe_idx] = qp->rq_wrid_array[start_idx];
+ irdma_copy_wqe(dst_wqe, src_wqe, wqe_quanta, flip_polarity);
+
+ start_idx = (start_idx + 1) % qp->rq_size;
+ }
+
+ pthread_spin_unlock(qp->lock);
+}
+
+static int
+irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx)
+{
+ u32 exp_idx = (qp->last_rx_cmpl_idx + 1) % qp->rq_size;
+
+ if (*array_idx != exp_idx) {
+ if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RELAX_RQ_ORDER) {
+ irdma_repost_rq_wqes(qp, exp_idx, *array_idx);
+ qp->last_rx_cmpl_idx = *array_idx;
+
+ return 0;
+ }
+
+ *array_idx = exp_idx;
+ qp->last_rx_cmpl_idx = exp_idx;
+
+ return -1;
+ }
+
+ qp->last_rx_cmpl_idx = *array_idx;
+
+ return 0;
+}
+
+/**
+ * irdma_skip_duplicate_flush_cmpl - check last cmpl and update wqe if needed
+ *
+ * @ring: sq/rq ring
+ * @flush_seen: information if flush for specific ring was already seen
+ * @comp_status: completion status
+ * @wqe_idx: new value of WQE index returned if there is more work on ring
+ */
+static inline int
+irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen,
+ enum irdma_cmpl_status comp_status,
+ u32 *wqe_idx)
+{
+ if (flush_seen) {
+ if (IRDMA_RING_MORE_WORK(ring))
+ *wqe_idx = ring.tail;
+ else
+ return ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_uk_cq_poll_cmpl - get cq completion info
+ * @cq: hw cq
+ * @info: cq poll information returned
+ */
+int
+irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info)
+{
+ u64 comp_ctx, qword0, qword2, qword3;
+ __le64 *cqe;
+ struct irdma_qp_uk *qp;
+ struct irdma_ring *pring = NULL;
+ u32 wqe_idx, q_type;
+ int ret_code;
+ bool move_cq_head = true;
+ u8 polarity;
+ bool ext_valid;
+ __le64 *ext_cqe;
+
+ if (cq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
+
+ get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
+ polarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);
+ if (polarity != cq->polarity)
+ return ENOENT;
+
+ /* Ensure CQE contents are read after valid bit is checked */
+ udma_from_device_barrier();
+
+ ext_valid = (bool)RS_64(qword3, IRDMA_CQ_EXTCQE);
+ if (ext_valid) {
+ u64 qword6, qword7;
+ u32 peek_head;
+
+ if (cq->avoid_mem_cflct) {
+ ext_cqe = (__le64 *) ((u8 *)cqe + 32);
+ get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
+ polarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);
+ } else {
+ peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
+ ext_cqe = cq->cq_base[peek_head].buf;
+ get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
+ polarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);
+ if (!peek_head)
+ polarity ^= 1;
+ }
+ if (polarity != cq->polarity)
+ return ENOENT;
+
+ /* Ensure ext CQE contents are read after ext valid bit is checked */
+ udma_from_device_barrier();
+
+ info->imm_valid = (bool)RS_64(qword7, IRDMA_CQ_IMMVALID);
+ if (info->imm_valid) {
+ u64 qword4;
+
+ get_64bit_val(ext_cqe, IRDMA_BYTE_0, &qword4);
+ info->imm_data = (u32)RS_64(qword4, IRDMA_CQ_IMMDATALOW32);
+ }
+ info->ud_smac_valid = (bool)RS_64(qword7, IRDMA_CQ_UDSMACVALID);
+ info->ud_vlan_valid = (bool)RS_64(qword7, IRDMA_CQ_UDVLANVALID);
+ if (info->ud_smac_valid || info->ud_vlan_valid) {
+ get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
+ if (info->ud_vlan_valid)
+ info->ud_vlan = (u16)RS_64(qword6, IRDMA_CQ_UDVLAN);
+ if (info->ud_smac_valid) {
+ info->ud_smac[5] = qword6 & 0xFF;
+ info->ud_smac[4] = (qword6 >> 8) & 0xFF;
+ info->ud_smac[3] = (qword6 >> 16) & 0xFF;
+ info->ud_smac[2] = (qword6 >> 24) & 0xFF;
+ info->ud_smac[1] = (qword6 >> 32) & 0xFF;
+ info->ud_smac[0] = (qword6 >> 40) & 0xFF;
+ }
+ }
+ } else {
+ info->imm_valid = false;
+ info->ud_smac_valid = false;
+ info->ud_vlan_valid = false;
+ }
+
+ q_type = (u8)RS_64(qword3, IRDMA_CQ_SQ);
+ info->error = (bool)RS_64(qword3, IRDMA_CQ_ERROR);
+ info->push_dropped = (bool)RS_64(qword3, IRDMACQ_PSHDROP);
+ info->ipv4 = (bool)RS_64(qword3, IRDMACQ_IPV4);
+ if (info->error) {
+ info->major_err = RS_64(qword3, IRDMA_CQ_MAJERR);
+ info->minor_err = RS_64(qword3, IRDMA_CQ_MINERR);
+ if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ /* Set the min error to standard flush error code for remaining cqes */
+ if (info->minor_err != FLUSH_GENERAL_ERR) {
+ qword3 &= ~IRDMA_CQ_MINERR_M;
+ qword3 |= LS_64(FLUSH_GENERAL_ERR, IRDMA_CQ_MINERR);
+ set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
+ }
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ }
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
+ }
+
+ get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
+ get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
+
+ info->tcp_seq_num_rtt = (u32)RS_64(qword0, IRDMACQ_TCPSEQNUMRTT);
+ info->qp_id = (u32)RS_64(qword2, IRDMACQ_QPID);
+ info->ud_src_qpn = (u32)RS_64(qword2, IRDMACQ_UDSRCQPN);
+
+ get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
+
+ info->solicited_event = (bool)RS_64(qword3, IRDMACQ_SOEVENT);
+ qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
+ if (!qp || qp->destroy_pending) {
+ ret_code = EFAULT;
+ goto exit;
+ }
+ wqe_idx = (u32)RS_64(qword3, IRDMA_CQ_WQEIDX);
+ info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
+
+ if (q_type == IRDMA_CQE_QTYPE_RQ) {
+ u32 array_idx;
+
+ ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
+ qp->rq_flush_seen,
+ info->comp_status,
+ &wqe_idx);
+ if (ret_code != 0)
+ goto exit;
+
+ array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
+
+ if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
+ info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
+ ret_code = ENOENT;
+ goto exit;
+ }
+
+ info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
+ info->signaled = 1;
+ array_idx = qp->rq_ring.tail;
+ } else {
+ info->wr_id = qp->rq_wrid_array[array_idx];
+ info->signaled = 1;
+ if (irdma_check_rq_cqe(qp, &array_idx)) {
+ info->wr_id = qp->rq_wrid_array[array_idx];
+ info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
+ return 0;
+ }
+ }
+
+ info->bytes_xfered = (u32)RS_64(qword0, IRDMACQ_PAYLDLEN);
+
+ if (info->imm_valid)
+ info->op_type = IRDMA_OP_TYPE_REC_IMM;
+ else
+ info->op_type = IRDMA_OP_TYPE_REC;
+
+ if (qword3 & IRDMACQ_STAG_M) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)RS_64(qword2, IRDMACQ_INVSTAG);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
+ if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
+ qp->rq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
+ qp->rq_flush_complete = true;
+ else
+ move_cq_head = false;
+ }
+ pring = &qp->rq_ring;
+ } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
+ if (qp->first_sq_wq) {
+ if (wqe_idx + 1 >= qp->conn_wqes)
+ qp->first_sq_wq = false;
+
+ if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ memset(info, 0,
+ sizeof(struct irdma_cq_poll_info));
+ return irdma_uk_cq_poll_cmpl(cq, info);
+ }
+ }
+ /* cease posting push mode on push drop */
+ if (info->push_dropped) {
+ qp->push_mode = false;
+ qp->push_dropped = true;
+ }
+ ret_code = irdma_skip_duplicate_flush_cmpl(qp->sq_ring,
+ qp->sq_flush_seen,
+ info->comp_status,
+ &wqe_idx);
+ if (ret_code != 0)
+ goto exit;
+ if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
+ info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
+ if (!info->comp_status)
+ info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
+ info->op_type = (u8)RS_64(qword3, IRDMACQ_OP);
+ IRDMA_RING_SET_TAIL(qp->sq_ring,
+ wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
+ } else {
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
+ ret_code = ENOENT;
+ goto exit;
+ }
+
+ do {
+ __le64 *sw_wqe;
+ u64 wqe_qword;
+ u8 op_type;
+ u32 tail;
+
+ tail = qp->sq_ring.tail;
+ sw_wqe = qp->sq_base[tail].elem;
+ get_64bit_val(sw_wqe, IRDMA_BYTE_24,
+ &wqe_qword);
+ op_type = (u8)RS_64(wqe_qword, IRDMAQPSQ_OPCODE);
+ info->op_type = op_type;
+ IRDMA_RING_SET_TAIL(qp->sq_ring,
+ tail + qp->sq_wrtrk_array[tail].quanta);
+ if (op_type != IRDMAQP_OP_NOP) {
+ info->wr_id = qp->sq_wrtrk_array[tail].wrid;
+ info->signaled = qp->sq_wrtrk_array[tail].signaled;
+ info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
+ break;
+ }
+ } while (1);
+ qp->sq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
+ qp->sq_flush_complete = true;
+ }
+ pring = &qp->sq_ring;
+ }
+
+ ret_code = 0;
+
+exit:
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ if (pring && IRDMA_RING_MORE_WORK(*pring))
+ move_cq_head = false;
+
+ if (move_cq_head) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
+ cq->polarity ^= 1;
+
+ if (ext_valid && !cq->avoid_mem_cflct) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
+ cq->polarity ^= 1;
+ }
+
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ if (!cq->avoid_mem_cflct && ext_valid)
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ } else {
+ qword3 &= ~IRDMA_CQ_WQEIDX_M;
+ qword3 |= LS_64(pring->tail, IRDMA_CQ_WQEIDX);
+ set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_qp_round_up - return round up qp wq depth
+ * @wqdepth: wq depth in quanta to round up
+ */
+static int
+irdma_qp_round_up(u32 wqdepth)
+{
+ int scount = 1;
+
+ for (wqdepth--; scount <= 16; scount *= 2)
+ wqdepth |= wqdepth >> scount;
+
+ return ++wqdepth;
+}
+
+/**
+ * irdma_get_wqe_shift - get shift count for maximum wqe size
+ * @uk_attrs: qp HW attributes
+ * @sge: Maximum Scatter Gather Elements wqe
+ * @inline_data: Maximum inline data size
+ * @shift: Returns the shift needed based on sge
+ *
+ * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
+ * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
+ * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
+ * size of 64 bytes).
+ * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
+ * size of 256 bytes).
+ */
+void
+irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
+ u32 inline_data, u8 *shift)
+{
+ *shift = 0;
+ if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
+ if (sge > 1 || inline_data > 8) {
+ if (sge < 4 && inline_data <= 39)
+ *shift = 1;
+ else if (sge < 8 && inline_data <= 101)
+ *shift = 2;
+ else
+ *shift = 3;
+ }
+ } else if (sge > 1 || inline_data > 16) {
+ *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
+ }
+}
+
+/*
+ * irdma_get_sqdepth - get SQ depth (quanta) @max_hw_wq_quanta: HW SQ size limit @sq_size: SQ size @shift: shift which
+ * determines size of WQE @sqdepth: depth of SQ
+ */
+int
+irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *sqdepth)
+{
+ *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
+
+ if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
+ *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ else if (*sqdepth > max_hw_wq_quanta)
+ return EINVAL;
+
+ return 0;
+}
+
+/*
+ * irdma_get_rqdepth - get RQ/SRQ depth (quanta) @max_hw_rq_quanta: HW RQ/SRQ size limit @rq_size: RQ/SRQ size @shift:
+ * shift which determines size of WQE @rqdepth: depth of RQ/SRQ
+ */
+int
+irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *rqdepth)
+{
+ *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
+
+ if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
+ *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ else if (*rqdepth > max_hw_rq_quanta)
+ return EINVAL;
+
+ return 0;
+}
+
+static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
+ .iw_copy_inline_data = irdma_copy_inline_data,
+ .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
+ .iw_set_fragment = irdma_set_fragment,
+ .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
+};
+
+static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
+ .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
+ .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
+ .iw_set_fragment = irdma_set_fragment_gen_1,
+ .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
+};
+
+/**
+ * irdma_setup_connection_wqes - setup WQEs necessary to complete
+ * connection.
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ */
+static void
+irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info)
+{
+ u16 move_cnt = 1;
+
+ if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
+ move_cnt = 3;
+
+ qp->conn_wqes = move_cnt;
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
+ IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
+}
+
+/**
+ * irdma_uk_qp_init - initialize shared qp
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ *
+ * initializes the vars used in both user and kernel mode.
+ * size of the wqe depends on numbers of max. fragements
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for sq and rq.
+ */
+int
+irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
+{
+ int ret_code = 0;
+ u32 sq_ring_size;
+ u8 sqshift, rqshift;
+
+ qp->uk_attrs = info->uk_attrs;
+ if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
+ info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
+ return EINVAL;
+
+ irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
+ info->max_inline_data, &sqshift);
+ if (info->abi_ver > 4)
+ rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ } else {
+ irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
+ info->max_inline_data, &sqshift);
+ }
+ qp->qp_caps = info->qp_caps;
+ qp->sq_base = info->sq;
+ qp->rq_base = info->rq;
+ qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
+ qp->shadow_area = info->shadow_area;
+ qp->sq_wrtrk_array = info->sq_wrtrk_array;
+
+ qp->rq_wrid_array = info->rq_wrid_array;
+ qp->wqe_alloc_db = info->wqe_alloc_db;
+ qp->last_rx_cmpl_idx = 0xffffffff;
+ qp->rd_fence_rate = info->rd_fence_rate;
+ qp->qp_id = info->qp_id;
+ qp->sq_size = info->sq_size;
+ qp->push_mode = false;
+ qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
+ sq_ring_size = qp->sq_size << sqshift;
+ IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
+ IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
+ if (info->first_sq_wq) {
+ irdma_setup_connection_wqes(qp, info);
+ qp->swqe_polarity = 1;
+ qp->first_sq_wq = true;
+ } else {
+ qp->swqe_polarity = 0;
+ }
+ qp->swqe_polarity_deferred = 1;
+ qp->rwqe_polarity = 0;
+ qp->rq_size = info->rq_size;
+ qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
+ qp->max_inline_data = info->max_inline_data;
+ qp->rq_wqe_size = rqshift;
+ IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
+ qp->rq_wqe_size_multiplier = 1 << rqshift;
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
+ qp->wqe_ops = iw_wqe_uk_ops_gen_1;
+ else
+ qp->wqe_ops = iw_wqe_uk_ops;
+ return ret_code;
+}
+
+/**
+ * irdma_uk_cq_init - initialize shared cq (user and kernel)
+ * @cq: hw cq
+ * @info: hw cq initialization info
+ */
+int
+irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info)
+{
+ cq->cq_base = info->cq_base;
+ cq->cq_id = info->cq_id;
+ cq->cq_size = info->cq_size;
+ cq->cqe_alloc_db = info->cqe_alloc_db;
+ cq->cq_ack_db = info->cq_ack_db;
+ cq->shadow_area = info->shadow_area;
+ cq->avoid_mem_cflct = info->avoid_mem_cflct;
+ IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+
+ return 0;
+}
+
+/**
+ * irdma_uk_clean_cq - clean cq entries
+ * @q: completion context
+ * @cq: cq to clean
+ */
+int
+irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
+{
+ __le64 *cqe;
+ u64 qword3, comp_ctx;
+ u32 cq_head;
+ u8 polarity, temp;
+
+ cq_head = cq->cq_ring.head;
+ temp = cq->polarity;
+ do {
+ if (cq->avoid_mem_cflct)
+ cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
+ else
+ cqe = cq->cq_base[cq_head].buf;
+ get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
+ polarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);
+
+ if (polarity != temp)
+ break;
+
+ get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
+ if ((void *)(irdma_uintptr) comp_ctx == q)
+ set_64bit_val(cqe, IRDMA_BYTE_8, 0);
+
+ cq_head = (cq_head + 1) % cq->cq_ring.size;
+ if (!cq_head)
+ temp ^= 1;
+ } while (true);
+ return 0;
+}
+
+/**
+ * irdma_nop - post a nop
+ * @qp: hw qp ptr
+ * @wr_id: work request id
+ * @signaled: signaled for completion
+ * @post_sq: ring doorbell
+ */
+int
+irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 wqe_idx;
+ struct irdma_post_sq_info info = {0};
+
+ info.push_wqe = false;
+ info.wr_id = wr_id;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, &info);
+ if (!wqe)
+ return ENOSPC;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, 0);
+ set_64bit_val(wqe, IRDMA_BYTE_8, 0);
+ set_64bit_val(wqe, IRDMA_BYTE_16, 0);
+
+ hdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |
+ LS_64(signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
+ * @frag_cnt: number of fragments
+ * @quanta: quanta for frag_cnt
+ */
+int
+irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *quanta = IRDMA_QP_WQE_MIN_QUANTA;
+ break;
+ case 2:
+ case 3:
+ *quanta = 2;
+ break;
+ case 4:
+ case 5:
+ *quanta = 3;
+ break;
+ case 6:
+ case 7:
+ *quanta = 4;
+ break;
+ case 8:
+ case 9:
+ *quanta = 5;
+ break;
+ case 10:
+ case 11:
+ *quanta = 6;
+ break;
+ case 12:
+ case 13:
+ *quanta = 7;
+ break;
+ case 14:
+ case 15: /* when immediate data is present */
+ *quanta = 8;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size in bytes given frag_cnt
+ */
+int
+irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *wqe_size = 32;
+ break;
+ case 2:
+ case 3:
+ *wqe_size = 64;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ *wqe_size = 128;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ *wqe_size = 256;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ return 0;
+}
diff --git a/contrib/ofed/libirdma/irdma_umain.c b/contrib/ofed/libirdma/irdma_umain.c
new file mode 100644
index 000000000000..c26ac69d9014
--- /dev/null
+++ b/contrib/ofed/libirdma/irdma_umain.c
@@ -0,0 +1,255 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2021 - 2022 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+
+#include <sys/mman.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include "irdma_umain.h"
+#include "irdma-abi.h"
+
+#include "ice_devids.h"
+#include "i40e_devids.h"
+
+#include "abi.h"
+
+/**
+ * Driver version
+ */
+char libirdma_version[] = "0.0.51-k";
+
+unsigned int irdma_dbg;
+
+#define INTEL_HCA(d) \
+ { .vendor = PCI_VENDOR_ID_INTEL, \
+ .device = d }
+
+struct hca_info {
+ unsigned vendor;
+ unsigned device;
+};
+
+static const struct hca_info hca_table[] = {
+ INTEL_HCA(ICE_DEV_ID_E823L_BACKPLANE),
+ INTEL_HCA(ICE_DEV_ID_E823L_SFP),
+ INTEL_HCA(ICE_DEV_ID_E823L_10G_BASE_T),
+ INTEL_HCA(ICE_DEV_ID_E823L_1GBE),
+ INTEL_HCA(ICE_DEV_ID_E823L_QSFP),
+ INTEL_HCA(ICE_DEV_ID_E810C_BACKPLANE),
+ INTEL_HCA(ICE_DEV_ID_E810C_QSFP),
+ INTEL_HCA(ICE_DEV_ID_E810C_SFP),
+ INTEL_HCA(ICE_DEV_ID_E810_XXV_BACKPLANE),
+ INTEL_HCA(ICE_DEV_ID_E810_XXV_QSFP),
+ INTEL_HCA(ICE_DEV_ID_E810_XXV_SFP),
+ INTEL_HCA(ICE_DEV_ID_E823C_BACKPLANE),
+ INTEL_HCA(ICE_DEV_ID_E823C_QSFP),
+ INTEL_HCA(ICE_DEV_ID_E823C_SFP),
+ INTEL_HCA(ICE_DEV_ID_E823C_10G_BASE_T),
+ INTEL_HCA(ICE_DEV_ID_E823C_SGMII),
+ INTEL_HCA(ICE_DEV_ID_C822N_BACKPLANE),
+ INTEL_HCA(ICE_DEV_ID_C822N_QSFP),
+ INTEL_HCA(ICE_DEV_ID_C822N_SFP),
+ INTEL_HCA(ICE_DEV_ID_E822C_10G_BASE_T),
+ INTEL_HCA(ICE_DEV_ID_E822C_SGMII),
+ INTEL_HCA(ICE_DEV_ID_E822L_BACKPLANE),
+ INTEL_HCA(ICE_DEV_ID_E822L_SFP),
+ INTEL_HCA(ICE_DEV_ID_E822L_10G_BASE_T),
+ INTEL_HCA(ICE_DEV_ID_E822L_SGMII),
+};
+
+static struct ibv_context_ops irdma_ctx_ops = {
+ .query_device = irdma_uquery_device,
+ .query_port = irdma_uquery_port,
+ .alloc_pd = irdma_ualloc_pd,
+ .dealloc_pd = irdma_ufree_pd,
+ .reg_mr = irdma_ureg_mr,
+ .rereg_mr = NULL,
+ .dereg_mr = irdma_udereg_mr,
+ .alloc_mw = irdma_ualloc_mw,
+ .dealloc_mw = irdma_udealloc_mw,
+ .bind_mw = irdma_ubind_mw,
+ .create_cq = irdma_ucreate_cq,
+ .poll_cq = irdma_upoll_cq,
+ .req_notify_cq = irdma_uarm_cq,
+ .cq_event = irdma_cq_event,
+ .resize_cq = irdma_uresize_cq,
+ .destroy_cq = irdma_udestroy_cq,
+ .create_qp = irdma_ucreate_qp,
+ .query_qp = irdma_uquery_qp,
+ .modify_qp = irdma_umodify_qp,
+ .destroy_qp = irdma_udestroy_qp,
+ .post_send = irdma_upost_send,
+ .post_recv = irdma_upost_recv,
+ .create_ah = irdma_ucreate_ah,
+ .destroy_ah = irdma_udestroy_ah,
+ .attach_mcast = irdma_uattach_mcast,
+ .detach_mcast = irdma_udetach_mcast,
+};
+
+static int
+irdma_init_context(struct verbs_device *vdev,
+ struct ibv_context *ctx, int cmd_fd)
+{
+ struct irdma_uvcontext *iwvctx;
+ struct irdma_get_context cmd = {};
+ struct irdma_get_context_resp resp = {};
+ struct ibv_pd *ibv_pd;
+ u64 mmap_key;
+
+ iwvctx = container_of(ctx, struct irdma_uvcontext, ibv_ctx);
+ iwvctx->ibv_ctx.cmd_fd = cmd_fd;
+ cmd.userspace_ver = IRDMA_ABI_VER;
+ if (ibv_cmd_get_context(&iwvctx->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd),
+ &resp.ibv_resp, sizeof(resp))) {
+ /* failed first attempt */
+ printf("%s %s get context failure\n", __FILE__, __func__);
+ return -1;
+ }
+ iwvctx->uk_attrs.feature_flags = resp.feature_flags;
+ iwvctx->uk_attrs.hw_rev = resp.hw_rev;
+ iwvctx->uk_attrs.max_hw_wq_frags = resp.max_hw_wq_frags;
+ iwvctx->uk_attrs.max_hw_read_sges = resp.max_hw_read_sges;
+ iwvctx->uk_attrs.max_hw_inline = resp.max_hw_inline;
+ iwvctx->uk_attrs.max_hw_rq_quanta = resp.max_hw_rq_quanta;
+ iwvctx->uk_attrs.max_hw_wq_quanta = resp.max_hw_wq_quanta;
+ iwvctx->uk_attrs.max_hw_sq_chunk = resp.max_hw_sq_chunk;
+ iwvctx->uk_attrs.max_hw_cq_size = resp.max_hw_cq_size;
+ iwvctx->uk_attrs.min_hw_cq_size = resp.min_hw_cq_size;
+ iwvctx->abi_ver = IRDMA_ABI_VER;
+ mmap_key = resp.db_mmap_key;
+
+ iwvctx->db = mmap(NULL, IRDMA_HW_PAGE_SIZE, PROT_WRITE | PROT_READ,
+ MAP_SHARED, cmd_fd, mmap_key);
+ if (iwvctx->db == MAP_FAILED)
+ goto err_free;
+
+ iwvctx->ibv_ctx.ops = irdma_ctx_ops;
+
+ ibv_pd = irdma_ualloc_pd(&iwvctx->ibv_ctx);
+ if (!ibv_pd) {
+ munmap(iwvctx->db, IRDMA_HW_PAGE_SIZE);
+ goto err_free;
+ }
+
+ ibv_pd->context = &iwvctx->ibv_ctx;
+ iwvctx->iwupd = container_of(ibv_pd, struct irdma_upd, ibv_pd);
+
+ return 0;
+
+err_free:
+
+ printf("%s %s failure\n", __FILE__, __func__);
+ return -1;
+}
+
+static void
+irdma_cleanup_context(struct verbs_device *device,
+ struct ibv_context *ibctx)
+{
+ struct irdma_uvcontext *iwvctx;
+
+ printf("%s %s CALL\n", __FILE__, __func__);
+
+ iwvctx = container_of(ibctx, struct irdma_uvcontext, ibv_ctx);
+ irdma_ufree_pd(&iwvctx->iwupd->ibv_pd);
+ munmap(iwvctx->db, IRDMA_HW_PAGE_SIZE);
+
+}
+
+static struct verbs_device_ops irdma_dev_ops = {
+ .init_context = irdma_init_context,
+ .uninit_context = irdma_cleanup_context,
+};
+
+static struct verbs_device *
+irdma_driver_init(const char *uverbs_sys_path,
+ int abi_version)
+{
+ struct irdma_udevice *dev;
+ int i = 0;
+ unsigned int device_found = 0;
+ unsigned vendor_id, device_id;
+ unsigned hca_size;
+ char buf[8];
+
+ if (ibv_read_sysfs_file(uverbs_sys_path, "device/vendor",
+ buf, sizeof(buf)) < 0)
+ return NULL;
+ sscanf(buf, "%i", &vendor_id);
+ if (vendor_id != PCI_VENDOR_ID_INTEL)
+ return NULL;
+
+ if (ibv_read_sysfs_file(uverbs_sys_path, "device/device",
+ buf, sizeof(buf)) < 0)
+ return NULL;
+ sscanf(buf, "%i", &device_id);
+
+ hca_size = sizeof(hca_table) / sizeof(struct hca_info);
+ while (i < hca_size && !device_found) {
+ if (device_id != hca_table[i].device)
+ device_found = 1;
+ ++i;
+ }
+
+ if (!device_found)
+ return NULL;
+
+ if (abi_version < IRDMA_MIN_ABI_VERSION ||
+ abi_version > IRDMA_MAX_ABI_VERSION) {
+ printf("Invalid ABI version: %d of %s\n",
+ abi_version, uverbs_sys_path);
+ return NULL;
+ }
+
+ dev = calloc(1, sizeof(struct irdma_udevice));
+ if (!dev) {
+ printf("Device creation for %s failed\n", uverbs_sys_path);
+ return NULL;
+ }
+
+ dev->ibv_dev.ops = &irdma_dev_ops;
+ dev->ibv_dev.sz = sizeof(*dev);
+ dev->ibv_dev.size_of_context = sizeof(struct irdma_uvcontext) -
+ sizeof(struct ibv_context);
+
+ return &dev->ibv_dev;
+}
+
+static __attribute__((constructor))
+void
+irdma_register_driver(void)
+{
+ verbs_register_driver("irdma", irdma_driver_init);
+}
diff --git a/contrib/ofed/libirdma/irdma_umain.h b/contrib/ofed/libirdma/irdma_umain.h
new file mode 100644
index 000000000000..a040c92728b3
--- /dev/null
+++ b/contrib/ofed/libirdma/irdma_umain.h
@@ -0,0 +1,218 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (C) 2019 - 2020 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef IRDMA_UMAIN_H
+#define IRDMA_UMAIN_H
+
+#include <sys/queue.h>
+#include <infiniband/verbs.h>
+#include <infiniband/driver.h>
+
+#include "osdep.h"
+#include "irdma.h"
+#include "irdma_defs.h"
+#include "i40iw_hw.h"
+#include "irdma_user.h"
+
+#ifndef likely
+#define likely(x) __builtin_expect((x), 1)
+#endif
+#ifndef unlikely
+#define unlikely(x) __builtin_expect((x), 0)
+#endif
+#define PFX "libirdma-"
+
+#define IRDMA_BASE_PUSH_PAGE 1
+#define IRDMA_U_MINCQ_SIZE 4
+#define IRDMA_DB_SHADOW_AREA_SIZE 64
+#define IRDMA_DB_CQ_OFFSET 64
+
+LIST_HEAD(list_head, irdma_cq_buf);
+LIST_HEAD(list_head_cmpl, irdma_cmpl_gen);
+
+enum irdma_supported_wc_flags {
+ IRDMA_CQ_SUPPORTED_WC_FLAGS = IBV_WC_EX_WITH_BYTE_LEN
+ | IBV_WC_EX_WITH_IMM
+ | IBV_WC_EX_WITH_QP_NUM
+ | IBV_WC_EX_WITH_SRC_QP
+ | IBV_WC_EX_WITH_SL
+ | IBV_WC_EX_WITH_COMPLETION_TIMESTAMP,
+};
+
+struct irdma_udevice {
+ struct verbs_device ibv_dev;
+};
+
+struct irdma_uah {
+ struct ibv_ah ibv_ah;
+ uint32_t ah_id;
+ struct ibv_global_route grh;
+};
+
+struct irdma_upd {
+ struct ibv_pd ibv_pd;
+ void *arm_cq_page;
+ void *arm_cq;
+ uint32_t pd_id;
+};
+
+struct irdma_uvcontext {
+ struct ibv_context ibv_ctx;
+ struct irdma_upd *iwupd;
+ struct irdma_uk_attrs uk_attrs;
+ void *db;
+ int abi_ver;
+ bool legacy_mode;
+};
+
+struct irdma_uqp;
+
+struct irdma_cq_buf {
+ LIST_ENTRY(irdma_cq_buf) list;
+ struct irdma_cq_uk cq;
+ struct verbs_mr vmr;
+};
+
+struct verbs_cq {
+ union {
+ struct ibv_cq cq;
+ struct ibv_cq_ex cq_ex;
+ };
+};
+
+struct irdma_cmpl_gen {
+ LIST_ENTRY(irdma_cmpl_gen) list;
+ struct irdma_cq_poll_info cpi;
+};
+
+struct irdma_ucq {
+ struct verbs_cq verbs_cq;
+ struct verbs_mr vmr;
+ struct verbs_mr vmr_shadow_area;
+ pthread_spinlock_t lock;
+ size_t buf_size;
+ bool is_armed;
+ bool skip_arm;
+ bool arm_sol;
+ bool skip_sol;
+ int comp_vector;
+ uint32_t report_rtt;
+ struct irdma_uqp *uqp;
+ struct irdma_cq_uk cq;
+ struct list_head resize_list;
+ /* for extended CQ completion fields */
+ struct irdma_cq_poll_info cur_cqe;
+ struct list_head_cmpl cmpl_generated;
+};
+
+struct irdma_uqp {
+ struct ibv_qp ibv_qp;
+ struct ibv_qp_attr attr;
+ struct irdma_ucq *send_cq;
+ struct irdma_ucq *recv_cq;
+ struct verbs_mr vmr;
+ size_t buf_size;
+ uint32_t irdma_drv_opt;
+ pthread_spinlock_t lock;
+ uint16_t sq_sig_all;
+ uint16_t qperr;
+ uint16_t rsvd;
+ uint32_t pending_rcvs;
+ uint32_t wq_size;
+ struct ibv_recv_wr *pend_rx_wr;
+ struct irdma_qp_uk qp;
+ enum ibv_qp_type qp_type;
+ enum ibv_qp_attr_mask attr_mask;
+ struct irdma_sge *recv_sges;
+ pthread_t flush_thread;
+};
+
+struct irdma_umr {
+ struct verbs_mr vmr;
+ uint32_t acc_flags;
+};
+
+/* irdma_uverbs.c */
+int irdma_uquery_device_ex(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr, size_t attr_size);
+int irdma_uquery_port(struct ibv_context *context, uint8_t port,
+ struct ibv_port_attr *attr);
+struct ibv_pd *irdma_ualloc_pd(struct ibv_context *context);
+int irdma_ufree_pd(struct ibv_pd *pd);
+int irdma_uquery_device(struct ibv_context *, struct ibv_device_attr *);
+struct ibv_mr *irdma_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
+ int access);
+int irdma_udereg_mr(struct ibv_mr *mr);
+struct ibv_mw *irdma_ualloc_mw(struct ibv_pd *pd, enum ibv_mw_type type);
+int irdma_ubind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
+ struct ibv_mw_bind *mw_bind);
+int irdma_udealloc_mw(struct ibv_mw *mw);
+struct ibv_cq *irdma_ucreate_cq(struct ibv_context *context, int cqe,
+ struct ibv_comp_channel *channel,
+ int comp_vector);
+struct ibv_cq_ex *irdma_ucreate_cq_ex(struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *attr_ex);
+void irdma_ibvcq_ex_fill_priv_funcs(struct irdma_ucq *iwucq,
+ struct ibv_cq_init_attr_ex *attr_ex);
+int irdma_uresize_cq(struct ibv_cq *cq, int cqe);
+int irdma_udestroy_cq(struct ibv_cq *cq);
+int irdma_upoll_cq(struct ibv_cq *cq, int entries, struct ibv_wc *entry);
+int irdma_uarm_cq(struct ibv_cq *cq, int solicited);
+void irdma_cq_event(struct ibv_cq *cq);
+struct ibv_qp *irdma_ucreate_qp(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *attr);
+int irdma_uquery_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask,
+ struct ibv_qp_init_attr *init_attr);
+int irdma_umodify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+int irdma_udestroy_qp(struct ibv_qp *qp);
+int irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+ struct ibv_send_wr **bad_wr);
+int irdma_upost_recv(struct ibv_qp *ib_qp, struct ibv_recv_wr *ib_wr,
+ struct ibv_recv_wr **bad_wr);
+struct ibv_ah *irdma_ucreate_ah(struct ibv_pd *ibpd, struct ibv_ah_attr *attr);
+int irdma_udestroy_ah(struct ibv_ah *ibah);
+int irdma_uattach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
+ uint16_t lid);
+int irdma_udetach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
+ uint16_t lid);
+void irdma_async_event(struct ibv_context *context,
+ struct ibv_async_event *event);
+void irdma_set_hw_attrs(struct irdma_hw_attrs *attrs);
+void *irdma_mmap(int fd, off_t offset);
+void irdma_munmap(void *map);
+void *irdma_flush_thread(void *arg);
+#endif /* IRDMA_UMAIN_H */
diff --git a/contrib/ofed/libirdma/irdma_user.h b/contrib/ofed/libirdma/irdma_user.h
new file mode 100644
index 000000000000..7fe83b8c399f
--- /dev/null
+++ b/contrib/ofed/libirdma/irdma_user.h
@@ -0,0 +1,479 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2015 - 2021 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef IRDMA_USER_H
+#define IRDMA_USER_H
+
+#include "osdep.h"
+
+#define irdma_handle void *
+#define irdma_adapter_handle irdma_handle
+#define irdma_qp_handle irdma_handle
+#define irdma_cq_handle irdma_handle
+#define irdma_pd_id irdma_handle
+#define irdma_stag_handle irdma_handle
+#define irdma_stag_index u32
+#define irdma_stag u32
+#define irdma_stag_key u8
+#define irdma_tagged_offset u64
+#define irdma_access_privileges u32
+#define irdma_physical_fragment u64
+#define irdma_address_list u64 *
+#define irdma_sgl struct irdma_sge *
+
+#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
+
+#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
+#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
+#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
+#define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
+#define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
+#define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
+#define IRDMA_ACCESS_FLAGS_ALL 0x3f
+
+#define IRDMA_OP_TYPE_RDMA_WRITE 0x00
+#define IRDMA_OP_TYPE_RDMA_READ 0x01
+#define IRDMA_OP_TYPE_SEND 0x03
+#define IRDMA_OP_TYPE_SEND_INV 0x04
+#define IRDMA_OP_TYPE_SEND_SOL 0x05
+#define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
+#define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
+#define IRDMA_OP_TYPE_BIND_MW 0x08
+#define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
+#define IRDMA_OP_TYPE_INV_STAG 0x0a
+#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
+#define IRDMA_OP_TYPE_NOP 0x0c
+#define IRDMA_OP_TYPE_REC 0x3e
+#define IRDMA_OP_TYPE_REC_IMM 0x3f
+
+#define IRDMA_FLUSH_MAJOR_ERR 1
+
+enum irdma_device_caps_const {
+ IRDMA_WQE_SIZE = 4,
+ IRDMA_CQP_WQE_SIZE = 8,
+ IRDMA_CQE_SIZE = 4,
+ IRDMA_EXTENDED_CQE_SIZE = 8,
+ IRDMA_AEQE_SIZE = 2,
+ IRDMA_CEQE_SIZE = 1,
+ IRDMA_CQP_CTX_SIZE = 8,
+ IRDMA_SHADOW_AREA_SIZE = 8,
+ IRDMA_GATHER_STATS_BUF_SIZE = 1024,
+ IRDMA_MIN_IW_QP_ID = 0,
+ IRDMA_QUERY_FPM_BUF_SIZE = 176,
+ IRDMA_COMMIT_FPM_BUF_SIZE = 176,
+ IRDMA_MAX_IW_QP_ID = 262143,
+ IRDMA_MIN_CEQID = 0,
+ IRDMA_MAX_CEQID = 1023,
+ IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
+ IRDMA_MIN_CQID = 0,
+ IRDMA_MAX_CQID = 524287,
+ IRDMA_MIN_AEQ_ENTRIES = 1,
+ IRDMA_MAX_AEQ_ENTRIES = 524287,
+ IRDMA_MIN_CEQ_ENTRIES = 1,
+ IRDMA_MAX_CEQ_ENTRIES = 262143,
+ IRDMA_MIN_CQ_SIZE = 1,
+ IRDMA_MAX_CQ_SIZE = 1048575,
+ IRDMA_DB_ID_ZERO = 0,
+ /* 64K + 1 */
+ IRDMA_MAX_OUTBOUND_MSG_SIZE = 65537,
+ /* 64K +1 */
+ IRDMA_MAX_INBOUND_MSG_SIZE = 65537,
+ IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
+ IRDMA_MAX_PE_ENA_VF_COUNT = 32,
+ IRDMA_MAX_VF_FPM_ID = 47,
+ IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
+ IRDMA_MAX_INLINE_DATA_SIZE = 101,
+ IRDMA_MAX_WQ_ENTRIES = 32768,
+ IRDMA_Q2_BUF_SIZE = 256,
+ IRDMA_QP_CTX_SIZE = 256,
+ IRDMA_MAX_PDS = 262144,
+};
+
+enum irdma_addressing_type {
+ IRDMA_ADDR_TYPE_ZERO_BASED = 0,
+ IRDMA_ADDR_TYPE_VA_BASED = 1,
+};
+
+enum irdma_flush_opcode {
+ FLUSH_INVALID = 0,
+ FLUSH_GENERAL_ERR,
+ FLUSH_PROT_ERR,
+ FLUSH_REM_ACCESS_ERR,
+ FLUSH_LOC_QP_OP_ERR,
+ FLUSH_REM_OP_ERR,
+ FLUSH_LOC_LEN_ERR,
+ FLUSH_FATAL_ERR,
+ FLUSH_MW_BIND_ERR,
+ FLUSH_REM_INV_REQ_ERR,
+ FLUSH_RETRY_EXC_ERR,
+};
+
+enum irdma_cmpl_status {
+ IRDMA_COMPL_STATUS_SUCCESS = 0,
+ IRDMA_COMPL_STATUS_FLUSHED,
+ IRDMA_COMPL_STATUS_INVALID_WQE,
+ IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
+ IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
+ IRDMA_COMPL_STATUS_INVALID_STAG,
+ IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
+ IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
+ IRDMA_COMPL_STATUS_INVALID_PD_ID,
+ IRDMA_COMPL_STATUS_WRAP_ERROR,
+ IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
+ IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
+ IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
+ IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
+ IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
+ IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
+ IRDMA_COMPL_STATUS_INVALID_FBO,
+ IRDMA_COMPL_STATUS_INVALID_LEN,
+ IRDMA_COMPL_STATUS_INVALID_ACCESS,
+ IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
+ IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
+ IRDMA_COMPL_STATUS_INVALID_REGION,
+ IRDMA_COMPL_STATUS_INVALID_WINDOW,
+ IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
+ IRDMA_COMPL_STATUS_UNKNOWN,
+};
+
+enum irdma_cmpl_notify {
+ IRDMA_CQ_COMPL_EVENT = 0,
+ IRDMA_CQ_COMPL_SOLICITED = 1,
+};
+
+enum irdma_qp_caps {
+ IRDMA_WRITE_WITH_IMM = 1,
+ IRDMA_SEND_WITH_IMM = 2,
+ IRDMA_ROCE = 4,
+ IRDMA_PUSH_MODE = 8,
+};
+
+struct irdma_qp_uk;
+struct irdma_cq_uk;
+struct irdma_qp_uk_init_info;
+struct irdma_cq_uk_init_info;
+
+struct irdma_sge {
+ irdma_tagged_offset tag_off;
+ u32 len;
+ irdma_stag stag;
+};
+
+struct irdma_ring {
+ volatile u32 head;
+ volatile u32 tail;
+ u32 size;
+};
+
+struct irdma_cqe {
+ __le64 buf[IRDMA_CQE_SIZE];
+};
+
+struct irdma_extended_cqe {
+ __le64 buf[IRDMA_EXTENDED_CQE_SIZE];
+};
+
+struct irdma_post_send {
+ irdma_sgl sg_list;
+ u32 num_sges;
+ u32 qkey;
+ u32 dest_qp;
+ u32 ah_id;
+};
+
+struct irdma_post_inline_send {
+ void *data;
+ u32 len;
+ u32 qkey;
+ u32 dest_qp;
+ u32 ah_id;
+};
+
+struct irdma_post_rq_info {
+ u64 wr_id;
+ irdma_sgl sg_list;
+ u32 num_sges;
+};
+
+struct irdma_rdma_write {
+ irdma_sgl lo_sg_list;
+ u32 num_lo_sges;
+ struct irdma_sge rem_addr;
+};
+
+struct irdma_inline_rdma_write {
+ void *data;
+ u32 len;
+ struct irdma_sge rem_addr;
+};
+
+struct irdma_rdma_read {
+ irdma_sgl lo_sg_list;
+ u32 num_lo_sges;
+ struct irdma_sge rem_addr;
+};
+
+struct irdma_bind_window {
+ irdma_stag mr_stag;
+ u64 bind_len;
+ void *va;
+ enum irdma_addressing_type addressing_type;
+ bool ena_reads:1;
+ bool ena_writes:1;
+ irdma_stag mw_stag;
+ bool mem_window_type_1:1;
+};
+
+struct irdma_inv_local_stag {
+ irdma_stag target_stag;
+};
+
+struct irdma_post_sq_info {
+ u64 wr_id;
+ u8 op_type;
+ u8 l4len;
+ bool signaled:1;
+ bool read_fence:1;
+ bool local_fence:1;
+ bool inline_data:1;
+ bool imm_data_valid:1;
+ bool push_wqe:1;
+ bool report_rtt:1;
+ bool udp_hdr:1;
+ bool defer_flag:1;
+ u32 imm_data;
+ u32 stag_to_inv;
+ union {
+ struct irdma_post_send send;
+ struct irdma_rdma_write rdma_write;
+ struct irdma_rdma_read rdma_read;
+ struct irdma_bind_window bind_window;
+ struct irdma_inv_local_stag inv_local_stag;
+ struct irdma_inline_rdma_write inline_rdma_write;
+ struct irdma_post_inline_send inline_send;
+ } op;
+};
+
+struct irdma_cq_poll_info {
+ u64 wr_id;
+ irdma_qp_handle qp_handle;
+ u32 bytes_xfered;
+ u32 tcp_seq_num_rtt;
+ u32 qp_id;
+ u32 ud_src_qpn;
+ u32 imm_data;
+ irdma_stag inv_stag; /* or L_R_Key */
+ enum irdma_cmpl_status comp_status;
+ u16 major_err;
+ u16 minor_err;
+ u16 ud_vlan;
+ u8 ud_smac[6];
+ u8 op_type;
+ bool stag_invalid_set:1; /* or L_R_Key set */
+ bool push_dropped:1;
+ bool error:1;
+ bool solicited_event:1;
+ bool ipv4:1;
+ bool ud_vlan_valid:1;
+ bool ud_smac_valid:1;
+ bool imm_valid:1;
+ bool signaled:1;
+};
+
+int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq);
+int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
+ bool post_sq);
+int irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info);
+void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
+int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq);
+int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq);
+int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq);
+int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq);
+
+struct irdma_wqe_uk_ops {
+ void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
+ u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
+ void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+ u8 valid);
+ void (*iw_set_mw_bind_wqe)(__le64 *wqe,
+ struct irdma_bind_window *op_info);
+};
+
+int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info);
+void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
+ enum irdma_cmpl_notify cq_notify);
+void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
+void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
+int irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info);
+int irdma_uk_qp_init(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info);
+struct irdma_sq_uk_wr_trk_info {
+ u64 wrid;
+ u32 wr_len;
+ u16 quanta;
+ u8 signaled;
+ u8 reserved[1];
+};
+
+struct irdma_qp_quanta {
+ __le64 elem[IRDMA_WQE_SIZE];
+};
+
+struct irdma_qp_uk {
+ struct irdma_qp_quanta *sq_base;
+ struct irdma_qp_quanta *rq_base;
+ struct irdma_uk_attrs *uk_attrs;
+ u32 IOMEM *wqe_alloc_db;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ struct irdma_sig_wr_trk_info *sq_sigwrtrk_array;
+ u64 *rq_wrid_array;
+ __le64 *shadow_area;
+ __le32 *push_db;
+ __le64 *push_wqe;
+ struct irdma_ring sq_ring;
+ struct irdma_ring sq_sig_ring;
+ struct irdma_ring rq_ring;
+ struct irdma_ring initial_ring;
+ u32 qp_id;
+ u32 qp_caps;
+ u32 sq_size;
+ u32 rq_size;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
+ u32 max_inline_data;
+ u32 last_rx_cmpl_idx;
+ u32 last_tx_cmpl_idx;
+ struct irdma_wqe_uk_ops wqe_ops;
+ u16 conn_wqes;
+ u8 qp_type;
+ u8 swqe_polarity;
+ u8 swqe_polarity_deferred;
+ u8 rwqe_polarity;
+ u8 rq_wqe_size;
+ u8 rq_wqe_size_multiplier;
+ bool deferred_flag:1;
+ bool push_mode:1; /* whether the last post wqe was pushed */
+ bool push_dropped:1;
+ bool first_sq_wq:1;
+ bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
+ bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
+ bool destroy_pending:1; /* Indicates the QP is being destroyed */
+ void *back_qp;
+ pthread_spinlock_t *lock;
+ bool force_fence;
+ u8 dbg_rq_flushed;
+ u16 ord_cnt;
+ u8 sq_flush_seen;
+ u8 rq_flush_seen;
+ u8 rd_fence_rate;
+};
+
+struct irdma_cq_uk {
+ struct irdma_cqe *cq_base;
+ u32 IOMEM *cqe_alloc_db;
+ u32 IOMEM *cq_ack_db;
+ __le64 *shadow_area;
+ u32 cq_id;
+ u32 cq_size;
+ struct irdma_ring cq_ring;
+ u8 polarity;
+ bool armed:1;
+ bool avoid_mem_cflct:1;
+};
+
+struct irdma_qp_uk_init_info {
+ struct irdma_qp_quanta *sq;
+ struct irdma_qp_quanta *rq;
+ struct irdma_uk_attrs *uk_attrs;
+ u32 IOMEM *wqe_alloc_db;
+ __le64 *shadow_area;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ struct irdma_sig_wr_trk_info *sq_sigwrtrk_array;
+ u64 *rq_wrid_array;
+ u32 qp_id;
+ u32 qp_caps;
+ u32 sq_size;
+ u32 rq_size;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
+ u32 max_inline_data;
+ u8 first_sq_wq;
+ u8 type;
+ u8 rd_fence_rate;
+ int abi_ver;
+ bool legacy_mode;
+};
+
+struct irdma_cq_uk_init_info {
+ u32 IOMEM *cqe_alloc_db;
+ u32 IOMEM *cq_ack_db;
+ struct irdma_cqe *cq_base;
+ __le64 *shadow_area;
+ u32 cq_size;
+ u32 cq_id;
+ bool avoid_mem_cflct;
+};
+
+__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
+ u16 quanta, u32 total_size,
+ struct irdma_post_sq_info *info);
+__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
+int irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
+int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
+int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
+int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
+void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
+ u32 inline_data, u8 *shift);
+int irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *wqdepth);
+int irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *wqdepth);
+void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
+ u32 wqe_idx, bool post_sq);
+void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
+#endif /* IRDMA_USER_H */
diff --git a/contrib/ofed/libirdma/irdma_uverbs.c b/contrib/ofed/libirdma/irdma_uverbs.c
new file mode 100644
index 000000000000..64759b2965ce
--- /dev/null
+++ b/contrib/ofed/libirdma/irdma_uverbs.c
@@ -0,0 +1,2262 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (C) 2019 - 2021 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#include <config.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <signal.h>
+#include <errno.h>
+#include <sys/param.h>
+#include <sys/mman.h>
+#include <netinet/in.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdbool.h>
+
+#include "irdma_umain.h"
+#include "abi.h"
+
+static inline void
+print_fw_ver(uint64_t fw_ver, char *str, size_t len)
+{
+ uint16_t major, minor;
+
+ major = fw_ver >> 32 & 0xffff;
+ minor = fw_ver & 0xffff;
+
+ snprintf(str, len, "%d.%d", major, minor);
+}
+
+/**
+ * irdma_uquery_device_ex - query device attributes including extended properties
+ * @context: user context for the device
+ * @input: extensible input struct for ibv_query_device_ex verb
+ * @attr: extended device attribute struct
+ * @attr_size: size of extended device attribute struct
+ **/
+int
+irdma_uquery_device_ex(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr, size_t attr_size)
+{
+ struct irdma_query_device_ex cmd = {};
+ struct irdma_query_device_ex_resp resp = {};
+ uint64_t fw_ver;
+ int ret;
+
+ ret = ibv_cmd_query_device_ex(context, input, attr, attr_size, &fw_ver,
+ &cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd),
+ &resp.ibv_resp, sizeof(resp.ibv_resp), sizeof(resp));
+ if (ret)
+ return ret;
+
+ print_fw_ver(fw_ver, attr->orig_attr.fw_ver, sizeof(attr->orig_attr.fw_ver));
+
+ return 0;
+}
+
+/**
+ * irdma_uquery_device - call driver to query device for max resources
+ * @context: user context for the device
+ * @attr: where to save all the mx resources from the driver
+ **/
+int
+irdma_uquery_device(struct ibv_context *context, struct ibv_device_attr *attr)
+{
+ struct ibv_query_device cmd;
+ uint64_t fw_ver;
+ int ret;
+
+ ret = ibv_cmd_query_device(context, attr, &fw_ver, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+
+ print_fw_ver(fw_ver, attr->fw_ver, sizeof(attr->fw_ver));
+
+ return 0;
+}
+
+/**
+ * irdma_uquery_port - get port attributes (msg size, lnk, mtu...)
+ * @context: user context of the device
+ * @port: port for the attributes
+ * @attr: to return port attributes
+ **/
+int
+irdma_uquery_port(struct ibv_context *context, uint8_t port,
+ struct ibv_port_attr *attr)
+{
+ struct ibv_query_port cmd;
+
+ return ibv_cmd_query_port(context, port, attr, &cmd, sizeof(cmd));
+}
+
+/**
+ * irdma_ualloc_pd - allocates protection domain and return pd ptr
+ * @context: user context of the device
+ **/
+struct ibv_pd *
+irdma_ualloc_pd(struct ibv_context *context)
+{
+ struct ibv_alloc_pd cmd;
+ struct irdma_ualloc_pd_resp resp = {};
+ struct irdma_upd *iwupd;
+ int err;
+
+ iwupd = malloc(sizeof(*iwupd));
+ if (!iwupd)
+ return NULL;
+
+ err = ibv_cmd_alloc_pd(context, &iwupd->ibv_pd, &cmd, sizeof(cmd),
+ &resp.ibv_resp, sizeof(resp));
+ if (err)
+ goto err_free;
+
+ iwupd->pd_id = resp.pd_id;
+
+ return &iwupd->ibv_pd;
+
+err_free:
+ free(iwupd);
+ errno = err;
+ return NULL;
+}
+
+/**
+ * irdma_ufree_pd - free pd resources
+ * @pd: pd to free resources
+ */
+int
+irdma_ufree_pd(struct ibv_pd *pd)
+{
+ struct irdma_upd *iwupd;
+ int ret;
+
+ iwupd = container_of(pd, struct irdma_upd, ibv_pd);
+ ret = ibv_cmd_dealloc_pd(pd);
+ if (ret)
+ return ret;
+
+ free(iwupd);
+
+ return 0;
+}
+
+/**
+ * irdma_ureg_mr - register user memory region
+ * @pd: pd for the mr
+ * @addr: user address of the memory region
+ * @length: length of the memory
+ * @hca_va: hca_va
+ * @access: access allowed on this mr
+ */
+struct ibv_mr *
+irdma_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
+ int access)
+{
+ struct irdma_umr *umr;
+ struct irdma_ureg_mr cmd;
+ struct ibv_reg_mr_resp resp;
+ int err;
+
+ umr = malloc(sizeof(*umr));
+ if (!umr)
+ return NULL;
+
+ cmd.reg_type = IRDMA_MEMREG_TYPE_MEM;
+ err = ibv_cmd_reg_mr(pd, addr, length,
+ (uintptr_t)addr, access, &umr->vmr.ibv_mr, &cmd.ibv_cmd,
+ sizeof(cmd), &resp, sizeof(resp));
+ if (err) {
+ free(umr);
+ errno = err;
+ return NULL;
+ }
+ umr->acc_flags = access;
+
+ return &umr->vmr.ibv_mr;
+}
+
+/**
+ * irdma_udereg_mr - re-register memory region
+ * @mr: mr that was allocated
+ */
+int
+irdma_udereg_mr(struct ibv_mr *mr)
+{
+ struct irdma_umr *umr;
+ struct verbs_mr *vmr;
+ int ret;
+
+ vmr = container_of(mr, struct verbs_mr, ibv_mr);
+ umr = container_of(vmr, struct irdma_umr, vmr);
+
+ ret = ibv_cmd_dereg_mr(mr);
+ if (ret)
+ return ret;
+
+ free(umr);
+
+ return 0;
+}
+
+/**
+ * irdma_ualloc_mw - allocate memory window
+ * @pd: protection domain
+ * @type: memory window type
+ */
+struct ibv_mw *
+irdma_ualloc_mw(struct ibv_pd *pd, enum ibv_mw_type type)
+{
+ struct ibv_mw *mw;
+ struct ibv_alloc_mw cmd;
+ struct ibv_alloc_mw_resp resp;
+
+ mw = calloc(1, sizeof(*mw));
+ if (!mw)
+ return NULL;
+
+ if (ibv_cmd_alloc_mw(pd, type, mw, &cmd, sizeof(cmd), &resp,
+ sizeof(resp))) {
+ printf("%s: Failed to alloc memory window\n",
+ __func__);
+ free(mw);
+ return NULL;
+ }
+
+ return mw;
+}
+
+/**
+ * irdma_ubind_mw - bind a memory window
+ * @qp: qp to post WR
+ * @mw: memory window to bind
+ * @mw_bind: bind info
+ */
+int
+irdma_ubind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
+ struct ibv_mw_bind *mw_bind)
+{
+ struct ibv_mw_bind_info *bind_info = &mw_bind->bind_info;
+ struct verbs_mr *vmr;
+ struct irdma_umr *umr;
+
+ struct ibv_send_wr wr = {};
+ struct ibv_send_wr *bad_wr;
+ int err;
+
+ if (!bind_info->mr && (bind_info->addr || bind_info->length))
+ return EINVAL;
+
+ if (bind_info->mr) {
+ vmr = verbs_get_mr(bind_info->mr);
+ umr = container_of(vmr, struct irdma_umr, vmr);
+ if (vmr->mr_type != IBV_MR_TYPE_MR)
+ return ENOTSUP;
+
+ if (umr->acc_flags & IBV_ACCESS_ZERO_BASED)
+ return EINVAL;
+
+ if (mw->pd != bind_info->mr->pd)
+ return EPERM;
+ }
+
+ wr.opcode = IBV_WR_BIND_MW;
+ wr.bind_mw.bind_info = mw_bind->bind_info;
+ wr.bind_mw.mw = mw;
+ wr.bind_mw.rkey = ibv_inc_rkey(mw->rkey);
+
+ wr.wr_id = mw_bind->wr_id;
+ wr.send_flags = mw_bind->send_flags;
+
+ err = irdma_upost_send(qp, &wr, &bad_wr);
+ if (!err)
+ mw->rkey = wr.bind_mw.rkey;
+
+ return err;
+}
+
+/**
+ * irdma_udealloc_mw - deallocate memory window
+ * @mw: memory window to dealloc
+ */
+int
+irdma_udealloc_mw(struct ibv_mw *mw)
+{
+ int ret;
+ struct ibv_dealloc_mw cmd;
+
+ ret = ibv_cmd_dealloc_mw(mw, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+ free(mw);
+
+ return 0;
+}
+
+static void *
+irdma_alloc_hw_buf(size_t size)
+{
+ void *buf;
+
+ buf = memalign(IRDMA_HW_PAGE_SIZE, size);
+
+ if (!buf)
+ return NULL;
+ if (ibv_dontfork_range(buf, size)) {
+ free(buf);
+ return NULL;
+ }
+
+ return buf;
+}
+
+static void
+irdma_free_hw_buf(void *buf, size_t size)
+{
+ ibv_dofork_range(buf, size);
+ free(buf);
+}
+
+/**
+ * get_cq_size - returns actual cqe needed by HW
+ * @ncqe: minimum cqes requested by application
+ * @hw_rev: HW generation
+ */
+static inline int
+get_cq_size(int ncqe, u8 hw_rev)
+{
+ ncqe++;
+
+ /* Completions with immediate require 1 extra entry */
+ if (hw_rev > IRDMA_GEN_1)
+ ncqe *= 2;
+
+ if (ncqe < IRDMA_U_MINCQ_SIZE)
+ ncqe = IRDMA_U_MINCQ_SIZE;
+
+ return ncqe;
+}
+
+static inline size_t get_cq_total_bytes(u32 cq_size) {
+ return roundup(cq_size * sizeof(struct irdma_cqe), IRDMA_HW_PAGE_SIZE);
+}
+
+/**
+ * ucreate_cq - irdma util function to create a CQ
+ * @context: ibv context
+ * @attr_ex: CQ init attributes
+ * @ext_cq: flag to create an extendable or normal CQ
+ */
+static struct ibv_cq_ex *
+ucreate_cq(struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *attr_ex,
+ bool ext_cq)
+{
+ struct irdma_cq_uk_init_info info = {};
+ struct irdma_ureg_mr reg_mr_cmd = {};
+ struct irdma_ucreate_cq_ex cmd = {};
+ struct irdma_ucreate_cq_ex_resp resp = {};
+ struct ibv_reg_mr_resp reg_mr_resp = {};
+ struct irdma_ureg_mr reg_mr_shadow_cmd = {};
+ struct ibv_reg_mr_resp reg_mr_shadow_resp = {};
+ struct irdma_uk_attrs *uk_attrs;
+ struct irdma_uvcontext *iwvctx;
+ struct irdma_ucq *iwucq;
+ size_t total_size;
+ u32 cq_pages;
+ int ret, ncqe;
+ u8 hw_rev;
+
+ iwvctx = container_of(context, struct irdma_uvcontext, ibv_ctx);
+ uk_attrs = &iwvctx->uk_attrs;
+ hw_rev = uk_attrs->hw_rev;
+
+ if (ext_cq && hw_rev == IRDMA_GEN_1) {
+ errno = EOPNOTSUPP;
+ return NULL;
+ }
+
+ if (attr_ex->cqe < IRDMA_MIN_CQ_SIZE || attr_ex->cqe > uk_attrs->max_hw_cq_size) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ /* save the cqe requested by application */
+ ncqe = attr_ex->cqe;
+
+ iwucq = calloc(1, sizeof(*iwucq));
+ if (!iwucq)
+ return NULL;
+
+ if (pthread_spin_init(&iwucq->lock, PTHREAD_PROCESS_PRIVATE)) {
+ free(iwucq);
+ return NULL;
+ }
+
+ info.cq_size = get_cq_size(attr_ex->cqe, hw_rev);
+ iwucq->comp_vector = attr_ex->comp_vector;
+ LIST_INIT(&iwucq->resize_list);
+ LIST_INIT(&iwucq->cmpl_generated);
+ total_size = get_cq_total_bytes(info.cq_size);
+ cq_pages = total_size >> IRDMA_HW_PAGE_SHIFT;
+
+ if (!(uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE))
+ total_size = (cq_pages << IRDMA_HW_PAGE_SHIFT) + IRDMA_DB_SHADOW_AREA_SIZE;
+
+ iwucq->buf_size = total_size;
+ info.cq_base = irdma_alloc_hw_buf(total_size);
+ if (!info.cq_base)
+ goto err_cq_base;
+
+ memset(info.cq_base, 0, total_size);
+ reg_mr_cmd.reg_type = IRDMA_MEMREG_TYPE_CQ;
+ reg_mr_cmd.cq_pages = cq_pages;
+
+ ret = ibv_cmd_reg_mr(&iwvctx->iwupd->ibv_pd, info.cq_base,
+ total_size, (uintptr_t)info.cq_base,
+ IBV_ACCESS_LOCAL_WRITE, &iwucq->vmr.ibv_mr,
+ &reg_mr_cmd.ibv_cmd, sizeof(reg_mr_cmd),
+ &reg_mr_resp, sizeof(reg_mr_resp));
+ if (ret) {
+ errno = ret;
+ goto err_dereg_mr;
+ }
+
+ iwucq->vmr.ibv_mr.pd = &iwvctx->iwupd->ibv_pd;
+
+ if (uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE) {
+ info.shadow_area = irdma_alloc_hw_buf(IRDMA_DB_SHADOW_AREA_SIZE);
+ if (!info.shadow_area)
+ goto err_dereg_mr;
+
+ memset(info.shadow_area, 0, IRDMA_DB_SHADOW_AREA_SIZE);
+ reg_mr_shadow_cmd.reg_type = IRDMA_MEMREG_TYPE_CQ;
+ reg_mr_shadow_cmd.cq_pages = 1;
+
+ ret = ibv_cmd_reg_mr(&iwvctx->iwupd->ibv_pd, info.shadow_area,
+ IRDMA_DB_SHADOW_AREA_SIZE, (uintptr_t)info.shadow_area,
+ IBV_ACCESS_LOCAL_WRITE, &iwucq->vmr_shadow_area.ibv_mr,
+ &reg_mr_shadow_cmd.ibv_cmd, sizeof(reg_mr_shadow_cmd),
+ &reg_mr_shadow_resp, sizeof(reg_mr_shadow_resp));
+ if (ret) {
+ errno = ret;
+ goto err_dereg_shadow;
+ }
+
+ iwucq->vmr_shadow_area.ibv_mr.pd = &iwvctx->iwupd->ibv_pd;
+
+ } else {
+ info.shadow_area = (__le64 *) ((u8 *)info.cq_base + (cq_pages << IRDMA_HW_PAGE_SHIFT));
+ }
+
+ attr_ex->cqe = info.cq_size;
+ cmd.user_cq_buf = (__u64) ((uintptr_t)info.cq_base);
+ cmd.user_shadow_area = (__u64) ((uintptr_t)info.shadow_area);
+
+ ret = ibv_cmd_create_cq_ex(context, attr_ex, &iwucq->verbs_cq.cq_ex,
+ &cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd), &resp.ibv_resp,
+ sizeof(resp.ibv_resp), sizeof(resp));
+ if (ret) {
+ errno = ret;
+ goto err_dereg_shadow;
+ }
+
+ if (ext_cq)
+ irdma_ibvcq_ex_fill_priv_funcs(iwucq, attr_ex);
+ info.cq_id = resp.cq_id;
+ /* Do not report the cqe's burned by HW */
+ iwucq->verbs_cq.cq.cqe = ncqe;
+
+ info.cqe_alloc_db = (u32 *)((u8 *)iwvctx->db + IRDMA_DB_CQ_OFFSET);
+ irdma_uk_cq_init(&iwucq->cq, &info);
+
+ return &iwucq->verbs_cq.cq_ex;
+
+err_dereg_shadow:
+ ibv_cmd_dereg_mr(&iwucq->vmr.ibv_mr);
+ if (iwucq->vmr_shadow_area.ibv_mr.handle) {
+ ibv_cmd_dereg_mr(&iwucq->vmr_shadow_area.ibv_mr);
+ irdma_free_hw_buf(info.shadow_area, IRDMA_HW_PAGE_SIZE);
+ }
+err_dereg_mr:
+ irdma_free_hw_buf(info.cq_base, total_size);
+err_cq_base:
+ printf("%s: failed to initialize CQ\n", __func__);
+ pthread_spin_destroy(&iwucq->lock);
+
+ free(iwucq);
+
+ return NULL;
+}
+
+struct ibv_cq *
+irdma_ucreate_cq(struct ibv_context *context, int cqe,
+ struct ibv_comp_channel *channel,
+ int comp_vector)
+{
+ struct ibv_cq_init_attr_ex attr_ex = {
+ .cqe = cqe,
+ .channel = channel,
+ .comp_vector = comp_vector,
+ };
+ struct ibv_cq_ex *ibvcq_ex;
+
+ ibvcq_ex = ucreate_cq(context, &attr_ex, false);
+
+ return ibvcq_ex ? ibv_cq_ex_to_cq(ibvcq_ex) : NULL;
+}
+
+struct ibv_cq_ex *
+irdma_ucreate_cq_ex(struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *attr_ex)
+{
+ if (attr_ex->wc_flags & ~IRDMA_CQ_SUPPORTED_WC_FLAGS) {
+ errno = EOPNOTSUPP;
+ return NULL;
+ }
+
+ return ucreate_cq(context, attr_ex, true);
+}
+
+/**
+ * irdma_free_cq_buf - free memory for cq buffer
+ * @cq_buf: cq buf to free
+ */
+static void
+irdma_free_cq_buf(struct irdma_cq_buf *cq_buf)
+{
+ ibv_cmd_dereg_mr(&cq_buf->vmr.ibv_mr);
+ irdma_free_hw_buf(cq_buf->cq.cq_base, get_cq_total_bytes(cq_buf->cq.cq_size));
+ free(cq_buf);
+}
+
+/**
+ * irdma_process_resize_list - process the cq list to remove buffers
+ * @iwucq: cq which owns the list
+ * @lcqe_buf: cq buf where the last cqe is found
+ */
+static int
+irdma_process_resize_list(struct irdma_ucq *iwucq,
+ struct irdma_cq_buf *lcqe_buf)
+{
+ struct irdma_cq_buf *cq_buf, *next;
+ int cq_cnt = 0;
+
+ LIST_FOREACH_SAFE(cq_buf, &iwucq->resize_list, list, next) {
+ if (cq_buf == lcqe_buf)
+ return cq_cnt;
+
+ LIST_REMOVE(cq_buf, list);
+ irdma_free_cq_buf(cq_buf);
+ cq_cnt++;
+ }
+
+ return cq_cnt;
+}
+
+static void
+irdma_remove_cmpls_list(struct irdma_ucq *iwucq)
+{
+ struct irdma_cmpl_gen *cmpl_node, *next;
+
+ LIST_FOREACH_SAFE(cmpl_node, &iwucq->cmpl_generated, list, next) {
+ LIST_REMOVE(cmpl_node, list);
+ free(cmpl_node);
+ }
+}
+
+static int
+irdma_generated_cmpls(struct irdma_ucq *iwucq, struct irdma_cq_poll_info *cq_poll_info)
+{
+ struct irdma_cmpl_gen *cmpl;
+
+ if (!iwucq || LIST_EMPTY(&iwucq->cmpl_generated))
+ return ENOENT;
+ cmpl = LIST_FIRST(&iwucq->cmpl_generated);
+ LIST_REMOVE(cmpl, list);
+ memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
+
+ free(cmpl);
+
+ return 0;
+}
+
+/**
+ * irdma_set_cpi_common_values - fill in values for polling info struct
+ * @cpi: resulting structure of cq_poll_info type
+ * @qp: QPair
+ * @qp_num: id of the QP
+ */
+static void
+irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
+ struct irdma_qp_uk *qp, __u32 qp_num)
+{
+ cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ cpi->error = 1;
+ cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
+ cpi->minor_err = FLUSH_GENERAL_ERR;
+ cpi->qp_handle = (irdma_qp_handle) (uintptr_t)qp;
+ cpi->qp_id = qp_num;
+}
+
+static bool
+irdma_cq_empty(struct irdma_ucq *iwucq)
+{
+ struct irdma_cq_uk *ukcq;
+ __u64 qword3;
+ __le64 *cqe;
+ __u8 polarity;
+
+ ukcq = &iwucq->cq;
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (__u8) RS_64(qword3, IRDMA_CQ_VALID);
+
+ return polarity != ukcq->polarity;
+}
+
+/**
+ * irdma_generate_flush_completions - generate completion from WRs
+ * @iwuqp: pointer to QP
+ */
+static void
+irdma_generate_flush_completions(struct irdma_uqp *iwuqp)
+{
+ struct irdma_qp_uk *qp = &iwuqp->qp;
+ struct irdma_ring *sq_ring = &qp->sq_ring;
+ struct irdma_ring *rq_ring = &qp->rq_ring;
+ struct irdma_cmpl_gen *cmpl;
+ __le64 *sw_wqe;
+ __u64 wqe_qword;
+ __u32 wqe_idx;
+
+ if (pthread_spin_lock(&iwuqp->send_cq->lock))
+ return;
+ if (irdma_cq_empty(iwuqp->send_cq)) {
+ while (IRDMA_RING_MORE_WORK(*sq_ring)) {
+ cmpl = malloc(sizeof(*cmpl));
+ if (!cmpl) {
+ pthread_spin_unlock(&iwuqp->send_cq->lock);
+ return;
+ }
+
+ wqe_idx = sq_ring->tail;
+ irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
+ cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ sw_wqe = qp->sq_base[wqe_idx].elem;
+ get_64bit_val(sw_wqe, 24, &wqe_qword);
+ cmpl->cpi.op_type = (__u8) RS_64(wqe_qword, IRDMAQPSQ_OPCODE);
+ /* remove the SQ WR by moving SQ tail */
+ IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
+ LIST_INSERT_HEAD(&iwuqp->send_cq->cmpl_generated, cmpl, list);
+ }
+ }
+ pthread_spin_unlock(&iwuqp->send_cq->lock);
+ if (pthread_spin_lock(&iwuqp->recv_cq->lock))
+ return;
+ if (irdma_cq_empty(iwuqp->recv_cq)) {
+ while (IRDMA_RING_MORE_WORK(*rq_ring)) {
+ cmpl = malloc(sizeof(*cmpl));
+ if (!cmpl) {
+ pthread_spin_unlock(&iwuqp->recv_cq->lock);
+ return;
+ }
+
+ wqe_idx = rq_ring->tail;
+ irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
+ cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
+ cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
+ /* remove the RQ WR by moving RQ tail */
+ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
+ LIST_INSERT_HEAD(&iwuqp->recv_cq->cmpl_generated, cmpl, list);
+ }
+ }
+ pthread_spin_unlock(&iwuqp->recv_cq->lock);
+}
+
+void *
+irdma_flush_thread(void *arg)
+{
+ __u8 i = 5;
+ struct irdma_uqp *iwuqp = arg;
+
+ while (--i) {
+ if (pthread_spin_lock(&iwuqp->lock))
+ break;
+ irdma_generate_flush_completions(arg);
+ pthread_spin_unlock(&iwuqp->lock);
+ sleep(1);
+ }
+ pthread_exit(NULL);
+}
+
+/**
+ * irdma_udestroy_cq - destroys cq
+ * @cq: ptr to cq to be destroyed
+ */
+int
+irdma_udestroy_cq(struct ibv_cq *cq)
+{
+ struct irdma_uk_attrs *uk_attrs;
+ struct irdma_uvcontext *iwvctx;
+ struct irdma_ucq *iwucq;
+ int ret;
+
+ iwucq = container_of(cq, struct irdma_ucq, verbs_cq.cq);
+ iwvctx = container_of(cq->context, struct irdma_uvcontext, ibv_ctx);
+ uk_attrs = &iwvctx->uk_attrs;
+
+ ret = pthread_spin_destroy(&iwucq->lock);
+ if (ret)
+ goto err;
+
+ if (!LIST_EMPTY(&iwucq->cmpl_generated))
+ irdma_remove_cmpls_list(iwucq);
+ irdma_process_resize_list(iwucq, NULL);
+ ret = ibv_cmd_destroy_cq(cq);
+ if (ret)
+ goto err;
+
+ ibv_cmd_dereg_mr(&iwucq->vmr.ibv_mr);
+ irdma_free_hw_buf(iwucq->cq.cq_base, iwucq->buf_size);
+
+ if (uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE) {
+ ibv_cmd_dereg_mr(&iwucq->vmr_shadow_area.ibv_mr);
+ irdma_free_hw_buf(iwucq->cq.shadow_area, IRDMA_DB_SHADOW_AREA_SIZE);
+ }
+ free(iwucq);
+ return 0;
+
+err:
+ return ret;
+}
+
+static enum ibv_wc_status
+irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
+{
+ switch (opcode) {
+ case FLUSH_PROT_ERR:
+ return IBV_WC_LOC_PROT_ERR;
+ case FLUSH_REM_ACCESS_ERR:
+ return IBV_WC_REM_ACCESS_ERR;
+ case FLUSH_LOC_QP_OP_ERR:
+ return IBV_WC_LOC_QP_OP_ERR;
+ case FLUSH_REM_OP_ERR:
+ return IBV_WC_REM_OP_ERR;
+ case FLUSH_LOC_LEN_ERR:
+ return IBV_WC_LOC_LEN_ERR;
+ case FLUSH_GENERAL_ERR:
+ return IBV_WC_WR_FLUSH_ERR;
+ case FLUSH_MW_BIND_ERR:
+ return IBV_WC_MW_BIND_ERR;
+ case FLUSH_REM_INV_REQ_ERR:
+ return IBV_WC_REM_INV_REQ_ERR;
+ case FLUSH_RETRY_EXC_ERR:
+ return IBV_WC_RETRY_EXC_ERR;
+ case FLUSH_FATAL_ERR:
+ default:
+ return IBV_WC_FATAL_ERR;
+ }
+}
+
+/**
+ * irdma_process_cqe_ext - process current cqe for extended CQ
+ * @cur_cqe - current cqe info
+ */
+static void
+irdma_process_cqe_ext(struct irdma_cq_poll_info *cur_cqe)
+{
+ struct irdma_ucq *iwucq = container_of(cur_cqe, struct irdma_ucq, cur_cqe);
+ struct ibv_cq_ex *ibvcq_ex = &iwucq->verbs_cq.cq_ex;
+
+ ibvcq_ex->wr_id = cur_cqe->wr_id;
+ if (cur_cqe->error)
+ ibvcq_ex->status = (cur_cqe->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
+ irdma_flush_err_to_ib_wc_status(cur_cqe->minor_err) : IBV_WC_GENERAL_ERR;
+ else
+ ibvcq_ex->status = IBV_WC_SUCCESS;
+}
+
+/**
+ * irdma_process_cqe - process current cqe info
+ * @entry - ibv_wc object to fill in for non-extended CQ
+ * @cur_cqe - current cqe info
+ */
+static void
+irdma_process_cqe(struct ibv_wc *entry, struct irdma_cq_poll_info *cur_cqe)
+{
+ struct irdma_qp_uk *qp;
+ struct ibv_qp *ib_qp;
+
+ entry->wc_flags = 0;
+ entry->wr_id = cur_cqe->wr_id;
+ entry->qp_num = cur_cqe->qp_id;
+ qp = cur_cqe->qp_handle;
+ ib_qp = qp->back_qp;
+
+ if (cur_cqe->error) {
+ if (cur_cqe->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ entry->status = (cur_cqe->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
+ irdma_flush_err_to_ib_wc_status(cur_cqe->minor_err) : IBV_WC_GENERAL_ERR;
+ entry->vendor_err = cur_cqe->major_err << 16 |
+ cur_cqe->minor_err;
+ } else {
+ entry->status = IBV_WC_SUCCESS;
+ }
+
+ if (cur_cqe->imm_valid) {
+ entry->imm_data = htonl(cur_cqe->imm_data);
+ entry->wc_flags |= IBV_WC_WITH_IMM;
+ }
+
+ switch (cur_cqe->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IBV_WC_RDMA_WRITE;
+ break;
+ case IRDMA_OP_TYPE_RDMA_READ:
+ entry->opcode = IBV_WC_RDMA_READ;
+ break;
+ case IRDMA_OP_TYPE_SEND_SOL:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND:
+ entry->opcode = IBV_WC_SEND;
+ break;
+ case IRDMA_OP_TYPE_BIND_MW:
+ entry->opcode = IBV_WC_BIND_MW;
+ break;
+ case IRDMA_OP_TYPE_REC:
+ entry->opcode = IBV_WC_RECV;
+ if (ib_qp->qp_type != IBV_QPT_UD &&
+ cur_cqe->stag_invalid_set) {
+ entry->invalidated_rkey = cur_cqe->inv_stag;
+ entry->wc_flags |= IBV_WC_WITH_INV;
+ }
+ break;
+ case IRDMA_OP_TYPE_REC_IMM:
+ entry->opcode = IBV_WC_RECV_RDMA_WITH_IMM;
+ if (ib_qp->qp_type != IBV_QPT_UD &&
+ cur_cqe->stag_invalid_set) {
+ entry->invalidated_rkey = cur_cqe->inv_stag;
+ entry->wc_flags |= IBV_WC_WITH_INV;
+ }
+ break;
+ case IRDMA_OP_TYPE_INV_STAG:
+ entry->opcode = IBV_WC_LOCAL_INV;
+ break;
+ default:
+ entry->status = IBV_WC_GENERAL_ERR;
+ printf("%s: Invalid opcode = %d in CQE\n",
+ __func__, cur_cqe->op_type);
+ return;
+ }
+
+ if (ib_qp->qp_type == IBV_QPT_UD) {
+ entry->src_qp = cur_cqe->ud_src_qpn;
+ entry->wc_flags |= IBV_WC_GRH;
+ } else {
+ entry->src_qp = cur_cqe->qp_id;
+ }
+ entry->byte_len = cur_cqe->bytes_xfered;
+}
+
+/**
+ * irdma_poll_one - poll one entry of the CQ
+ * @ukcq: ukcq to poll
+ * @cur_cqe: current CQE info to be filled in
+ * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
+ *
+ * Returns the internal irdma device error code or 0 on success
+ */
+static int
+irdma_poll_one(struct irdma_cq_uk *ukcq, struct irdma_cq_poll_info *cur_cqe,
+ struct ibv_wc *entry)
+{
+ int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
+
+ if (ret)
+ return ret;
+
+ if (!entry)
+ irdma_process_cqe_ext(cur_cqe);
+ else
+ irdma_process_cqe(entry, cur_cqe);
+
+ return 0;
+}
+
+/**
+ * __irdma_upoll_cq - irdma util function to poll device CQ
+ * @iwucq: irdma cq to poll
+ * @num_entries: max cq entries to poll
+ * @entry: pointer to array of ibv_wc objects to be filled in for each completion or NULL if ext CQ
+ *
+ * Returns non-negative value equal to the number of completions
+ * found. On failure, EINVAL
+ */
+static int
+__irdma_upoll_cq(struct irdma_ucq *iwucq, int num_entries,
+ struct ibv_wc *entry)
+{
+ struct irdma_cq_buf *cq_buf, *next;
+ struct irdma_cq_buf *last_buf = NULL;
+ struct irdma_cq_poll_info *cur_cqe = &iwucq->cur_cqe;
+ bool cq_new_cqe = false;
+ int resized_bufs = 0;
+ int npolled = 0;
+ int ret;
+
+ /* go through the list of previously resized CQ buffers */
+ LIST_FOREACH_SAFE(cq_buf, &iwucq->resize_list, list, next) {
+ while (npolled < num_entries) {
+ ret = irdma_poll_one(&cq_buf->cq, cur_cqe,
+ entry ? entry + npolled : NULL);
+ if (!ret) {
+ ++npolled;
+ cq_new_cqe = true;
+ continue;
+ }
+ if (ret == ENOENT)
+ break;
+ /* QP using the CQ is destroyed. Skip reporting this CQE */
+ if (ret == EFAULT) {
+ cq_new_cqe = true;
+ continue;
+ }
+ goto error;
+ }
+
+ /* save the resized CQ buffer which received the last cqe */
+ if (cq_new_cqe)
+ last_buf = cq_buf;
+ cq_new_cqe = false;
+ }
+
+ /* check the current CQ for new cqes */
+ while (npolled < num_entries) {
+ ret = irdma_poll_one(&iwucq->cq, cur_cqe,
+ entry ? entry + npolled : NULL);
+ if (ret == ENOENT) {
+ ret = irdma_generated_cmpls(iwucq, cur_cqe);
+ if (!ret) {
+ if (entry)
+ irdma_process_cqe(entry + npolled, cur_cqe);
+ else
+ irdma_process_cqe_ext(cur_cqe);
+ }
+ }
+ if (!ret) {
+ ++npolled;
+ cq_new_cqe = true;
+ continue;
+ }
+ if (ret == ENOENT)
+ break;
+ /* QP using the CQ is destroyed. Skip reporting this CQE */
+ if (ret == EFAULT) {
+ cq_new_cqe = true;
+ continue;
+ }
+ goto error;
+ }
+
+ if (cq_new_cqe)
+ /* all previous CQ resizes are complete */
+ resized_bufs = irdma_process_resize_list(iwucq, NULL);
+ else if (last_buf)
+ /* only CQ resizes up to the last_buf are complete */
+ resized_bufs = irdma_process_resize_list(iwucq, last_buf);
+ if (resized_bufs)
+ /* report to the HW the number of complete CQ resizes */
+ irdma_uk_cq_set_resized_cnt(&iwucq->cq, resized_bufs);
+
+ return npolled;
+
+error:
+ printf("%s: Error polling CQ, irdma_err: %d\n", __func__, ret);
+
+ return EINVAL;
+}
+
+/**
+ * irdma_upoll_cq - verb API callback to poll device CQ
+ * @cq: ibv_cq to poll
+ * @num_entries: max cq entries to poll
+ * @entry: pointer to array of ibv_wc objects to be filled in for each completion
+ *
+ * Returns non-negative value equal to the number of completions
+ * found and a negative error code on failure
+ */
+int
+irdma_upoll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *entry)
+{
+ struct irdma_ucq *iwucq;
+ int ret;
+
+ iwucq = container_of(cq, struct irdma_ucq, verbs_cq.cq);
+ ret = pthread_spin_lock(&iwucq->lock);
+ if (ret)
+ return -ret;
+
+ ret = __irdma_upoll_cq(iwucq, num_entries, entry);
+
+ pthread_spin_unlock(&iwucq->lock);
+
+ return ret;
+}
+
+/**
+ * irdma_start_poll - verb_ex API callback to poll batch of WC's
+ * @ibvcq_ex: ibv extended CQ
+ * @attr: attributes (not used)
+ *
+ * Start polling batch of work completions. Return 0 on success, ENONENT when
+ * no completions are available on CQ. And an error code on errors
+ */
+static int
+irdma_start_poll(struct ibv_cq_ex *ibvcq_ex, struct ibv_poll_cq_attr *attr)
+{
+ struct irdma_ucq *iwucq;
+ int ret;
+
+ iwucq = container_of(ibvcq_ex, struct irdma_ucq, verbs_cq.cq_ex);
+ ret = pthread_spin_lock(&iwucq->lock);
+ if (ret)
+ return ret;
+
+ ret = __irdma_upoll_cq(iwucq, 1, NULL);
+ if (ret == 1)
+ return 0;
+
+ /* No Completions on CQ */
+ if (!ret)
+ ret = ENOENT;
+
+ pthread_spin_unlock(&iwucq->lock);
+
+ return ret;
+}
+
+/**
+ * irdma_next_poll - verb_ex API callback to get next WC
+ * @ibvcq_ex: ibv extended CQ
+ *
+ * Return 0 on success, ENONENT when no completions are available on CQ.
+ * And an error code on errors
+ */
+static int
+irdma_next_poll(struct ibv_cq_ex *ibvcq_ex)
+{
+ struct irdma_ucq *iwucq;
+ int ret;
+
+ iwucq = container_of(ibvcq_ex, struct irdma_ucq, verbs_cq.cq_ex);
+ ret = __irdma_upoll_cq(iwucq, 1, NULL);
+ if (ret == 1)
+ return 0;
+
+ /* No Completions on CQ */
+ if (!ret)
+ ret = ENOENT;
+
+ return ret;
+}
+
+/**
+ * irdma_end_poll - verb_ex API callback to end polling of WC's
+ * @ibvcq_ex: ibv extended CQ
+ */
+static void
+irdma_end_poll(struct ibv_cq_ex *ibvcq_ex)
+{
+ struct irdma_ucq *iwucq = container_of(ibvcq_ex, struct irdma_ucq,
+ verbs_cq.cq_ex);
+
+ pthread_spin_unlock(&iwucq->lock);
+}
+
+/**
+ * irdma_wc_read_completion_ts - Get completion timestamp
+ * @ibvcq_ex: ibv extended CQ
+ *
+ * Get completion timestamp in HCA clock units
+ */
+static uint64_t irdma_wc_read_completion_ts(struct ibv_cq_ex *ibvcq_ex){
+ struct irdma_ucq *iwucq = container_of(ibvcq_ex, struct irdma_ucq,
+ verbs_cq.cq_ex);
+#define HCA_CORE_CLOCK_800_MHZ 800
+
+ return iwucq->cur_cqe.tcp_seq_num_rtt / HCA_CORE_CLOCK_800_MHZ;
+}
+
+static enum ibv_wc_opcode
+irdma_wc_read_opcode(struct ibv_cq_ex *ibvcq_ex)
+{
+ struct irdma_ucq *iwucq = container_of(ibvcq_ex, struct irdma_ucq,
+ verbs_cq.cq_ex);
+
+ switch (iwucq->cur_cqe.op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ return IBV_WC_RDMA_WRITE;
+ case IRDMA_OP_TYPE_RDMA_READ:
+ return IBV_WC_RDMA_READ;
+ case IRDMA_OP_TYPE_SEND_SOL:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND:
+ return IBV_WC_SEND;
+ case IRDMA_OP_TYPE_BIND_MW:
+ return IBV_WC_BIND_MW;
+ case IRDMA_OP_TYPE_REC:
+ return IBV_WC_RECV;
+ case IRDMA_OP_TYPE_REC_IMM:
+ return IBV_WC_RECV_RDMA_WITH_IMM;
+ case IRDMA_OP_TYPE_INV_STAG:
+ return IBV_WC_LOCAL_INV;
+ }
+
+ printf("%s: Invalid opcode = %d in CQE\n", __func__,
+ iwucq->cur_cqe.op_type);
+
+ return 0;
+}
+
+static uint32_t irdma_wc_read_vendor_err(struct ibv_cq_ex *ibvcq_ex){
+ struct irdma_cq_poll_info *cur_cqe;
+ struct irdma_ucq *iwucq;
+
+ iwucq = container_of(ibvcq_ex, struct irdma_ucq, verbs_cq.cq_ex);
+ cur_cqe = &iwucq->cur_cqe;
+
+ return cur_cqe->error ? cur_cqe->major_err << 16 | cur_cqe->minor_err : 0;
+}
+
+static int
+irdma_wc_read_wc_flags(struct ibv_cq_ex *ibvcq_ex)
+{
+ struct irdma_cq_poll_info *cur_cqe;
+ struct irdma_ucq *iwucq;
+ struct irdma_qp_uk *qp;
+ struct ibv_qp *ib_qp;
+ int wc_flags = 0;
+
+ iwucq = container_of(ibvcq_ex, struct irdma_ucq, verbs_cq.cq_ex);
+ cur_cqe = &iwucq->cur_cqe;
+ qp = cur_cqe->qp_handle;
+ ib_qp = qp->back_qp;
+
+ if (cur_cqe->imm_valid)
+ wc_flags |= IBV_WC_WITH_IMM;
+
+ if (ib_qp->qp_type == IBV_QPT_UD) {
+ wc_flags |= IBV_WC_GRH;
+ } else {
+ if (cur_cqe->stag_invalid_set) {
+ switch (cur_cqe->op_type) {
+ case IRDMA_OP_TYPE_REC:
+ wc_flags |= IBV_WC_WITH_INV;
+ break;
+ case IRDMA_OP_TYPE_REC_IMM:
+ wc_flags |= IBV_WC_WITH_INV;
+ break;
+ }
+ }
+ }
+
+ return wc_flags;
+}
+
+static uint32_t irdma_wc_read_byte_len(struct ibv_cq_ex *ibvcq_ex){
+ struct irdma_ucq *iwucq = container_of(ibvcq_ex, struct irdma_ucq,
+ verbs_cq.cq_ex);
+
+ return iwucq->cur_cqe.bytes_xfered;
+}
+
+static __be32 irdma_wc_read_imm_data(struct ibv_cq_ex *ibvcq_ex){
+ struct irdma_cq_poll_info *cur_cqe;
+ struct irdma_ucq *iwucq;
+
+ iwucq = container_of(ibvcq_ex, struct irdma_ucq, verbs_cq.cq_ex);
+ cur_cqe = &iwucq->cur_cqe;
+
+ return cur_cqe->imm_valid ? htonl(cur_cqe->imm_data) : 0;
+}
+
+static uint32_t irdma_wc_read_qp_num(struct ibv_cq_ex *ibvcq_ex){
+ struct irdma_ucq *iwucq = container_of(ibvcq_ex, struct irdma_ucq,
+ verbs_cq.cq_ex);
+
+ return iwucq->cur_cqe.qp_id;
+}
+
+static uint32_t irdma_wc_read_src_qp(struct ibv_cq_ex *ibvcq_ex){
+ struct irdma_cq_poll_info *cur_cqe;
+ struct irdma_ucq *iwucq;
+ struct irdma_qp_uk *qp;
+ struct ibv_qp *ib_qp;
+
+ iwucq = container_of(ibvcq_ex, struct irdma_ucq, verbs_cq.cq_ex);
+ cur_cqe = &iwucq->cur_cqe;
+ qp = cur_cqe->qp_handle;
+ ib_qp = qp->back_qp;
+
+ return ib_qp->qp_type == IBV_QPT_UD ? cur_cqe->ud_src_qpn : cur_cqe->qp_id;
+}
+
+static uint8_t irdma_wc_read_sl(struct ibv_cq_ex *ibvcq_ex){
+ return 0;
+}
+
+void
+irdma_ibvcq_ex_fill_priv_funcs(struct irdma_ucq *iwucq,
+ struct ibv_cq_init_attr_ex *attr_ex)
+{
+ struct ibv_cq_ex *ibvcq_ex = &iwucq->verbs_cq.cq_ex;
+
+ ibvcq_ex->start_poll = irdma_start_poll;
+ ibvcq_ex->end_poll = irdma_end_poll;
+ ibvcq_ex->next_poll = irdma_next_poll;
+
+ if (attr_ex->wc_flags & IBV_WC_EX_WITH_COMPLETION_TIMESTAMP) {
+ ibvcq_ex->read_completion_ts = irdma_wc_read_completion_ts;
+ iwucq->report_rtt = true;
+ }
+
+ ibvcq_ex->read_opcode = irdma_wc_read_opcode;
+ ibvcq_ex->read_vendor_err = irdma_wc_read_vendor_err;
+ ibvcq_ex->read_wc_flags = irdma_wc_read_wc_flags;
+
+ if (attr_ex->wc_flags & IBV_WC_EX_WITH_BYTE_LEN)
+ ibvcq_ex->read_byte_len = irdma_wc_read_byte_len;
+ if (attr_ex->wc_flags & IBV_WC_EX_WITH_IMM)
+ ibvcq_ex->read_imm_data = irdma_wc_read_imm_data;
+ if (attr_ex->wc_flags & IBV_WC_EX_WITH_QP_NUM)
+ ibvcq_ex->read_qp_num = irdma_wc_read_qp_num;
+ if (attr_ex->wc_flags & IBV_WC_EX_WITH_SRC_QP)
+ ibvcq_ex->read_src_qp = irdma_wc_read_src_qp;
+ if (attr_ex->wc_flags & IBV_WC_EX_WITH_SL)
+ ibvcq_ex->read_sl = irdma_wc_read_sl;
+}
+
+/**
+ * irdma_arm_cq - arm of cq
+ * @iwucq: cq to which arm
+ * @cq_notify: notification params
+ */
+static void
+irdma_arm_cq(struct irdma_ucq *iwucq,
+ enum irdma_cmpl_notify cq_notify)
+{
+ iwucq->is_armed = true;
+ iwucq->arm_sol = true;
+ iwucq->skip_arm = false;
+ iwucq->skip_sol = true;
+ irdma_uk_cq_request_notification(&iwucq->cq, cq_notify);
+}
+
+/**
+ * irdma_uarm_cq - callback for arm of cq
+ * @cq: cq to arm
+ * @solicited: to get notify params
+ */
+int
+irdma_uarm_cq(struct ibv_cq *cq, int solicited)
+{
+ struct irdma_ucq *iwucq;
+ enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
+ int ret;
+
+ iwucq = container_of(cq, struct irdma_ucq, verbs_cq.cq);
+ if (solicited)
+ cq_notify = IRDMA_CQ_COMPL_SOLICITED;
+
+ ret = pthread_spin_lock(&iwucq->lock);
+ if (ret)
+ return ret;
+
+ if (iwucq->is_armed) {
+ if (iwucq->arm_sol && !solicited) {
+ irdma_arm_cq(iwucq, cq_notify);
+ } else {
+ iwucq->skip_arm = true;
+ iwucq->skip_sol = solicited ? true : false;
+ }
+ } else {
+ irdma_arm_cq(iwucq, cq_notify);
+ }
+
+ pthread_spin_unlock(&iwucq->lock);
+
+ return 0;
+}
+
+/**
+ * irdma_cq_event - cq to do completion event
+ * @cq: cq to arm
+ */
+void
+irdma_cq_event(struct ibv_cq *cq)
+{
+ struct irdma_ucq *iwucq;
+
+ iwucq = container_of(cq, struct irdma_ucq, verbs_cq.cq);
+ if (pthread_spin_lock(&iwucq->lock))
+ return;
+
+ if (iwucq->skip_arm)
+ irdma_arm_cq(iwucq, IRDMA_CQ_COMPL_EVENT);
+ else
+ iwucq->is_armed = false;
+
+ pthread_spin_unlock(&iwucq->lock);
+}
+
+void *
+irdma_mmap(int fd, off_t offset)
+{
+ void *map;
+
+ map = mmap(NULL, IRDMA_HW_PAGE_SIZE, PROT_WRITE | PROT_READ, MAP_SHARED,
+ fd, offset);
+ if (map == MAP_FAILED)
+ return map;
+
+ if (ibv_dontfork_range(map, IRDMA_HW_PAGE_SIZE)) {
+ munmap(map, IRDMA_HW_PAGE_SIZE);
+ return MAP_FAILED;
+ }
+
+ return map;
+}
+
+void
+irdma_munmap(void *map)
+{
+ ibv_dofork_range(map, IRDMA_HW_PAGE_SIZE);
+ munmap(map, IRDMA_HW_PAGE_SIZE);
+}
+
+/**
+ * irdma_destroy_vmapped_qp - destroy resources for qp
+ * @iwuqp: qp struct for resources
+ */
+static int
+irdma_destroy_vmapped_qp(struct irdma_uqp *iwuqp)
+{
+ int ret;
+
+ ret = ibv_cmd_destroy_qp(&iwuqp->ibv_qp);
+ if (ret)
+ return ret;
+
+ if (iwuqp->qp.push_db)
+ irdma_munmap(iwuqp->qp.push_db);
+ if (iwuqp->qp.push_wqe)
+ irdma_munmap(iwuqp->qp.push_wqe);
+
+ ibv_cmd_dereg_mr(&iwuqp->vmr.ibv_mr);
+
+ return 0;
+}
+
+/**
+ * irdma_vmapped_qp - create resources for qp
+ * @iwuqp: qp struct for resources
+ * @pd: pd for the qp
+ * @attr: attributes of qp passed
+ * @resp: response back from create qp
+ * @sqdepth: depth of sq
+ * @rqdepth: depth of rq
+ * @info: info for initializing user level qp
+ * @abi_ver: abi version of the create qp command
+ */
+static int
+irdma_vmapped_qp(struct irdma_uqp *iwuqp, struct ibv_pd *pd,
+ struct ibv_qp_init_attr *attr, int sqdepth,
+ int rqdepth, struct irdma_qp_uk_init_info *info,
+ bool legacy_mode)
+{
+ struct irdma_ucreate_qp cmd = {};
+ size_t sqsize, rqsize, totalqpsize;
+ struct irdma_ucreate_qp_resp resp = {};
+ struct irdma_ureg_mr reg_mr_cmd = {};
+ struct ibv_reg_mr_resp reg_mr_resp = {};
+ int ret;
+
+ sqsize = roundup(sqdepth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
+ rqsize = roundup(rqdepth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
+ totalqpsize = rqsize + sqsize + IRDMA_DB_SHADOW_AREA_SIZE;
+ info->sq = irdma_alloc_hw_buf(totalqpsize);
+ iwuqp->buf_size = totalqpsize;
+
+ if (!info->sq)
+ return ENOMEM;
+
+ memset(info->sq, 0, totalqpsize);
+ info->rq = &info->sq[sqsize / IRDMA_QP_WQE_MIN_SIZE];
+ info->shadow_area = info->rq[rqsize / IRDMA_QP_WQE_MIN_SIZE].elem;
+
+ reg_mr_cmd.reg_type = IRDMA_MEMREG_TYPE_QP;
+ reg_mr_cmd.sq_pages = sqsize >> IRDMA_HW_PAGE_SHIFT;
+ reg_mr_cmd.rq_pages = rqsize >> IRDMA_HW_PAGE_SHIFT;
+
+ ret = ibv_cmd_reg_mr(pd, info->sq, totalqpsize,
+ (uintptr_t)info->sq, IBV_ACCESS_LOCAL_WRITE,
+ &iwuqp->vmr.ibv_mr, &reg_mr_cmd.ibv_cmd,
+ sizeof(reg_mr_cmd), &reg_mr_resp,
+ sizeof(reg_mr_resp));
+ if (ret)
+ goto err_dereg_mr;
+
+ cmd.user_wqe_bufs = (__u64) ((uintptr_t)info->sq);
+ cmd.user_compl_ctx = (__u64) (uintptr_t)&iwuqp->qp;
+ ret = ibv_cmd_create_qp(pd, &iwuqp->ibv_qp, attr, &cmd.ibv_cmd,
+ sizeof(cmd), &resp.ibv_resp,
+ sizeof(struct irdma_ucreate_qp_resp));
+ if (ret)
+ goto err_qp;
+
+ info->sq_size = resp.actual_sq_size;
+ info->rq_size = resp.actual_rq_size;
+ info->first_sq_wq = legacy_mode ? 1 : resp.lsmm;
+ info->qp_caps = resp.qp_caps;
+ info->qp_id = resp.qp_id;
+ iwuqp->irdma_drv_opt = resp.irdma_drv_opt;
+ iwuqp->ibv_qp.qp_num = resp.qp_id;
+
+ iwuqp->send_cq = container_of(attr->send_cq, struct irdma_ucq,
+ verbs_cq.cq);
+ iwuqp->recv_cq = container_of(attr->recv_cq, struct irdma_ucq,
+ verbs_cq.cq);
+ iwuqp->send_cq->uqp = iwuqp;
+ iwuqp->recv_cq->uqp = iwuqp;
+
+ return 0;
+err_qp:
+ ibv_cmd_dereg_mr(&iwuqp->vmr.ibv_mr);
+err_dereg_mr:
+ printf("%s: failed to create QP, status %d\n", __func__, ret);
+ irdma_free_hw_buf(info->sq, iwuqp->buf_size);
+ return ret;
+}
+
+/**
+ * irdma_ucreate_qp - create qp on user app
+ * @pd: pd for the qp
+ * @attr: attributes of the qp to be created (sizes, sge, cq)
+ */
+struct ibv_qp *
+irdma_ucreate_qp(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *attr)
+{
+ struct irdma_qp_uk_init_info info = {};
+ struct irdma_uk_attrs *uk_attrs;
+ struct irdma_uvcontext *iwvctx;
+ struct irdma_uqp *iwuqp;
+ u32 sqdepth, rqdepth;
+ u8 sqshift, rqshift;
+ int status;
+
+ if (attr->qp_type != IBV_QPT_RC && attr->qp_type != IBV_QPT_UD) {
+ printf("%s: failed to create QP, unsupported QP type: 0x%x\n",
+ __func__, attr->qp_type);
+ errno = EOPNOTSUPP;
+ return NULL;
+ }
+
+ iwvctx = container_of(pd->context, struct irdma_uvcontext, ibv_ctx);
+ uk_attrs = &iwvctx->uk_attrs;
+
+ if (attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
+ attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
+ attr->cap.max_inline_data > uk_attrs->max_hw_inline) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ irdma_get_wqe_shift(uk_attrs,
+ uk_attrs->hw_rev > IRDMA_GEN_1 ? attr->cap.max_send_sge + 1 :
+ attr->cap.max_send_sge,
+ attr->cap.max_inline_data, &sqshift);
+ status = irdma_get_sqdepth(uk_attrs->max_hw_wq_quanta,
+ attr->cap.max_send_wr, sqshift, &sqdepth);
+ if (status) {
+ printf("%s: invalid SQ attributes, max_send_wr=%d max_send_sge=%d max_inline=%d\n",
+ __func__, attr->cap.max_send_wr, attr->cap.max_send_sge,
+ attr->cap.max_inline_data);
+ errno = status;
+ return NULL;
+ }
+
+ if (uk_attrs->hw_rev == IRDMA_GEN_1 && iwvctx->abi_ver > 4)
+ rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ else
+ irdma_get_wqe_shift(uk_attrs, attr->cap.max_recv_sge, 0,
+ &rqshift);
+
+ status = irdma_get_rqdepth(uk_attrs->max_hw_rq_quanta,
+ attr->cap.max_recv_wr, rqshift, &rqdepth);
+ if (status) {
+ printf("%s: invalid RQ attributes, recv_wr=%d recv_sge=%d\n",
+ __func__, attr->cap.max_recv_wr, attr->cap.max_recv_sge);
+ errno = status;
+ return NULL;
+ }
+
+ iwuqp = memalign(1024, sizeof(*iwuqp));
+ if (!iwuqp)
+ return NULL;
+
+ memset(iwuqp, 0, sizeof(*iwuqp));
+
+ if (pthread_spin_init(&iwuqp->lock, PTHREAD_PROCESS_PRIVATE))
+ goto err_free_qp;
+
+ info.sq_size = sqdepth >> sqshift;
+ info.rq_size = rqdepth >> rqshift;
+ attr->cap.max_send_wr = info.sq_size;
+ attr->cap.max_recv_wr = info.rq_size;
+
+ info.uk_attrs = uk_attrs;
+ info.max_sq_frag_cnt = attr->cap.max_send_sge;
+ info.max_rq_frag_cnt = attr->cap.max_recv_sge;
+ iwuqp->recv_sges = calloc(attr->cap.max_recv_sge, sizeof(*iwuqp->recv_sges));
+ if (!iwuqp->recv_sges)
+ goto err_destroy_lock;
+
+ info.wqe_alloc_db = (u32 *)iwvctx->db;
+ info.legacy_mode = iwvctx->legacy_mode;
+ info.sq_wrtrk_array = calloc(sqdepth, sizeof(*info.sq_wrtrk_array));
+ if (!info.sq_wrtrk_array)
+ goto err_free_rsges;
+
+ info.rq_wrid_array = calloc(rqdepth, sizeof(*info.rq_wrid_array));
+ if (!info.rq_wrid_array)
+ goto err_free_sq_wrtrk;
+
+ iwuqp->sq_sig_all = attr->sq_sig_all;
+ iwuqp->qp_type = attr->qp_type;
+ status = irdma_vmapped_qp(iwuqp, pd, attr, sqdepth, rqdepth, &info, iwvctx->legacy_mode);
+ if (status) {
+ errno = status;
+ goto err_free_rq_wrid;
+ }
+
+ iwuqp->qp.back_qp = iwuqp;
+ iwuqp->qp.lock = &iwuqp->lock;
+
+ info.max_sq_frag_cnt = attr->cap.max_send_sge;
+ info.max_rq_frag_cnt = attr->cap.max_recv_sge;
+ info.max_inline_data = attr->cap.max_inline_data;
+ iwuqp->qp.force_fence = true;
+ status = irdma_uk_qp_init(&iwuqp->qp, &info);
+ if (status) {
+ errno = status;
+ goto err_free_vmap_qp;
+ }
+
+ attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
+ attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
+ return &iwuqp->ibv_qp;
+
+err_free_vmap_qp:
+ irdma_destroy_vmapped_qp(iwuqp);
+ irdma_free_hw_buf(info.sq, iwuqp->buf_size);
+err_free_rq_wrid:
+ free(info.rq_wrid_array);
+err_free_sq_wrtrk:
+ free(info.sq_wrtrk_array);
+err_free_rsges:
+ free(iwuqp->recv_sges);
+err_destroy_lock:
+ pthread_spin_destroy(&iwuqp->lock);
+err_free_qp:
+ printf("%s: failed to create QP\n", __func__);
+ free(iwuqp);
+
+ return NULL;
+}
+
+/**
+ * irdma_uquery_qp - query qp for some attribute
+ * @qp: qp for the attributes query
+ * @attr: to return the attributes
+ * @attr_mask: mask of what is query for
+ * @init_attr: initial attributes during create_qp
+ */
+int
+irdma_uquery_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask,
+ struct ibv_qp_init_attr *init_attr)
+{
+ struct ibv_query_qp cmd;
+
+ return ibv_cmd_query_qp(qp, attr, attr_mask, init_attr, &cmd,
+ sizeof(cmd));
+}
+
+/**
+ * irdma_umodify_qp - send qp modify to driver
+ * @qp: qp to modify
+ * @attr: attribute to modify
+ * @attr_mask: mask of the attribute
+ */
+int
+irdma_umodify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
+{
+ struct irdma_umodify_qp_resp resp = {};
+ struct ibv_modify_qp cmd = {};
+ struct irdma_modify_qp_cmd cmd_ex = {};
+ struct irdma_uvcontext *iwvctx;
+ struct irdma_uqp *iwuqp;
+
+ iwuqp = container_of(qp, struct irdma_uqp, ibv_qp);
+ iwvctx = container_of(qp->context, struct irdma_uvcontext, ibv_ctx);
+ iwuqp->attr_mask = attr_mask;
+ memcpy(&iwuqp->attr, attr, sizeof(iwuqp->attr));
+
+ if (iwuqp->qp.qp_caps & IRDMA_PUSH_MODE && attr_mask & IBV_QP_STATE &&
+ iwvctx->uk_attrs.hw_rev > IRDMA_GEN_1) {
+ u64 offset;
+ void *map;
+ int ret;
+
+ ret = ibv_cmd_modify_qp_ex(qp, attr, attr_mask, &cmd_ex.ibv_cmd,
+ sizeof(cmd_ex.ibv_cmd),
+ sizeof(cmd_ex), &resp.ibv_resp,
+ sizeof(resp.ibv_resp),
+ sizeof(resp));
+ if (!ret)
+ iwuqp->qp.rd_fence_rate = resp.rd_fence_rate;
+ if (ret || !resp.push_valid)
+ return ret;
+
+ if (iwuqp->qp.push_wqe)
+ return ret;
+
+ offset = resp.push_wqe_mmap_key;
+ map = irdma_mmap(qp->context->cmd_fd, offset);
+ if (map == MAP_FAILED)
+ return ret;
+
+ iwuqp->qp.push_wqe = map;
+
+ offset = resp.push_db_mmap_key;
+ map = irdma_mmap(qp->context->cmd_fd, offset);
+ if (map == MAP_FAILED) {
+ irdma_munmap(iwuqp->qp.push_wqe);
+ iwuqp->qp.push_wqe = NULL;
+ printf("failed to map push page, errno %d\n", errno);
+ return ret;
+ }
+ iwuqp->qp.push_wqe += resp.push_offset;
+ iwuqp->qp.push_db = map + resp.push_offset;
+
+ return ret;
+ } else {
+ int ret;
+
+ ret = ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+ if (attr_mask & IBV_QP_STATE && attr->qp_state == IBV_QPS_ERR)
+ pthread_create(&iwuqp->flush_thread, NULL, irdma_flush_thread, iwuqp);
+ return 0;
+ }
+}
+
+static void
+irdma_issue_flush(struct ibv_qp *qp, bool sq_flush, bool rq_flush)
+{
+ struct irdma_umodify_qp_resp resp = {};
+ struct irdma_modify_qp_cmd cmd_ex = {};
+ struct irdma_uqp *iwuqp;
+
+ cmd_ex.sq_flush = sq_flush;
+ cmd_ex.rq_flush = rq_flush;
+ iwuqp = container_of(qp, struct irdma_uqp, ibv_qp);
+
+ ibv_cmd_modify_qp_ex(qp, &iwuqp->attr, iwuqp->attr_mask,
+ &cmd_ex.ibv_cmd,
+ sizeof(cmd_ex.ibv_cmd),
+ sizeof(cmd_ex), &resp.ibv_resp,
+ sizeof(resp.ibv_resp),
+ sizeof(resp));
+}
+
+/**
+ * irdma_clean_cqes - clean cq entries for qp
+ * @qp: qp for which completions are cleaned
+ * @iwcq: cq to be cleaned
+ */
+static void
+irdma_clean_cqes(struct irdma_qp_uk *qp, struct irdma_ucq *iwucq)
+{
+ struct irdma_cq_uk *ukcq = &iwucq->cq;
+ int ret;
+
+ ret = pthread_spin_lock(&iwucq->lock);
+ if (ret)
+ return;
+
+ irdma_uk_clean_cq(qp, ukcq);
+ pthread_spin_unlock(&iwucq->lock);
+}
+
+/**
+ * irdma_udestroy_qp - destroy qp
+ * @qp: qp to destroy
+ */
+int
+irdma_udestroy_qp(struct ibv_qp *qp)
+{
+ struct irdma_uqp *iwuqp;
+ int ret;
+
+ iwuqp = container_of(qp, struct irdma_uqp, ibv_qp);
+ if (iwuqp->flush_thread) {
+ pthread_cancel(iwuqp->flush_thread);
+ pthread_join(iwuqp->flush_thread, NULL);
+ }
+ ret = pthread_spin_destroy(&iwuqp->lock);
+ if (ret)
+ goto err;
+
+ ret = irdma_destroy_vmapped_qp(iwuqp);
+ if (ret)
+ goto err;
+
+ /* Clean any pending completions from the cq(s) */
+ if (iwuqp->send_cq)
+ irdma_clean_cqes(&iwuqp->qp, iwuqp->send_cq);
+
+ if (iwuqp->recv_cq && iwuqp->recv_cq != iwuqp->send_cq)
+ irdma_clean_cqes(&iwuqp->qp, iwuqp->recv_cq);
+
+ if (iwuqp->qp.sq_wrtrk_array)
+ free(iwuqp->qp.sq_wrtrk_array);
+ if (iwuqp->qp.rq_wrid_array)
+ free(iwuqp->qp.rq_wrid_array);
+
+ irdma_free_hw_buf(iwuqp->qp.sq_base, iwuqp->buf_size);
+ free(iwuqp->recv_sges);
+ free(iwuqp);
+ return 0;
+
+err:
+ printf("%s: failed to destroy QP, status %d\n",
+ __func__, ret);
+ return ret;
+}
+
+/**
+ * irdma_copy_sg_list - copy sg list for qp
+ * @sg_list: copied into sg_list
+ * @sgl: copy from sgl
+ * @num_sges: count of sg entries
+ * @max_sges: count of max supported sg entries
+ */
+static void
+irdma_copy_sg_list(struct irdma_sge *sg_list, struct ibv_sge *sgl,
+ int num_sges)
+{
+ int i;
+
+ for (i = 0; i < num_sges; i++) {
+ sg_list[i].tag_off = sgl[i].addr;
+ sg_list[i].len = sgl[i].length;
+ sg_list[i].stag = sgl[i].lkey;
+ }
+}
+
+/**
+ * calc_type2_mw_stag - calculate type 2 MW stag
+ * @rkey: desired rkey of the MW
+ * @mw_rkey: type2 memory window rkey
+ *
+ * compute type2 memory window stag by taking lower 8 bits
+ * of the desired rkey and leaving 24 bits if mw->rkey unchanged
+ */
+static inline u32 calc_type2_mw_stag(u32 rkey, u32 mw_rkey) {
+ const u32 mask = 0xff;
+
+ return (rkey & mask) | (mw_rkey & ~mask);
+}
+
+/**
+ * irdma_post_send - post send wr for user application
+ * @ib_qp: qp to post wr
+ * @ib_wr: work request ptr
+ * @bad_wr: return of bad wr if err
+ */
+int
+irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+ struct ibv_send_wr **bad_wr)
+{
+ struct irdma_post_sq_info info;
+ struct irdma_uvcontext *iwvctx;
+ struct irdma_uk_attrs *uk_attrs;
+ struct irdma_uqp *iwuqp;
+ bool reflush = false;
+ int err = 0;
+
+ iwuqp = container_of(ib_qp, struct irdma_uqp, ibv_qp);
+ iwvctx = container_of(ib_qp->context, struct irdma_uvcontext, ibv_ctx);
+ uk_attrs = &iwvctx->uk_attrs;
+
+ err = pthread_spin_lock(&iwuqp->lock);
+ if (err)
+ return err;
+
+ if (!IRDMA_RING_MORE_WORK(iwuqp->qp.sq_ring) &&
+ ib_qp->state == IBV_QPS_ERR)
+ reflush = true;
+
+ while (ib_wr) {
+ memset(&info, 0, sizeof(info));
+ info.wr_id = (u64)(ib_wr->wr_id);
+ if ((ib_wr->send_flags & IBV_SEND_SIGNALED) ||
+ iwuqp->sq_sig_all)
+ info.signaled = true;
+ if (ib_wr->send_flags & IBV_SEND_FENCE)
+ info.read_fence = true;
+ if (iwuqp->send_cq->report_rtt)
+ info.report_rtt = true;
+
+ switch (ib_wr->opcode) {
+ case IBV_WR_SEND_WITH_IMM:
+ if (iwuqp->qp.qp_caps & IRDMA_SEND_WITH_IMM) {
+ info.imm_data_valid = true;
+ info.imm_data = ntohl(ib_wr->imm_data);
+ } else {
+ err = EINVAL;
+ break;
+ }
+ /* fallthrough */
+ case IBV_WR_SEND:
+ case IBV_WR_SEND_WITH_INV:
+ if (ib_wr->opcode == IBV_WR_SEND ||
+ ib_wr->opcode == IBV_WR_SEND_WITH_IMM) {
+ if (ib_wr->send_flags & IBV_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_SEND_SOL;
+ else
+ info.op_type = IRDMA_OP_TYPE_SEND;
+ } else {
+ if (ib_wr->send_flags & IBV_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
+ else
+ info.op_type = IRDMA_OP_TYPE_SEND_INV;
+ info.stag_to_inv = ib_wr->imm_data;
+ }
+ if (ib_wr->send_flags & IBV_SEND_INLINE) {
+ info.op.inline_send.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
+ info.op.inline_send.len = ib_wr->sg_list[0].length;
+ if (ib_qp->qp_type == IBV_QPT_UD) {
+ struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
+ struct irdma_uah, ibv_ah);
+
+ info.op.inline_send.ah_id = ah->ah_id;
+ info.op.inline_send.qkey = ib_wr->wr.ud.remote_qkey;
+ info.op.inline_send.dest_qp = ib_wr->wr.ud.remote_qpn;
+ }
+ err = irdma_uk_inline_send(&iwuqp->qp, &info, false);
+ } else {
+ info.op.send.num_sges = ib_wr->num_sge;
+ info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
+ if (ib_qp->qp_type == IBV_QPT_UD) {
+ struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
+ struct irdma_uah, ibv_ah);
+
+ info.op.inline_send.ah_id = ah->ah_id;
+ info.op.inline_send.qkey = ib_wr->wr.ud.remote_qkey;
+ info.op.inline_send.dest_qp = ib_wr->wr.ud.remote_qpn;
+ }
+ err = irdma_uk_send(&iwuqp->qp, &info, false);
+ }
+ break;
+ case IBV_WR_RDMA_WRITE_WITH_IMM:
+ if (iwuqp->qp.qp_caps & IRDMA_WRITE_WITH_IMM) {
+ info.imm_data_valid = true;
+ info.imm_data = ntohl(ib_wr->imm_data);
+ } else {
+ err = EINVAL;
+ break;
+ }
+ /* fallthrough */
+ case IBV_WR_RDMA_WRITE:
+ if (ib_wr->send_flags & IBV_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
+ else
+ info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
+
+ if (ib_wr->send_flags & IBV_SEND_INLINE) {
+ info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
+ info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
+ info.op.inline_rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
+ info.op.inline_rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
+ err = irdma_uk_inline_rdma_write(&iwuqp->qp, &info, false);
+ } else {
+ info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+ info.op.rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
+ info.op.rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
+ err = irdma_uk_rdma_write(&iwuqp->qp, &info, false);
+ }
+ break;
+ case IBV_WR_RDMA_READ:
+ if (ib_wr->num_sge > uk_attrs->max_hw_read_sges) {
+ err = EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_RDMA_READ;
+ info.op.rdma_read.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
+ info.op.rdma_read.rem_addr.stag = ib_wr->wr.rdma.rkey;
+
+ info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
+ err = irdma_uk_rdma_read(&iwuqp->qp, &info, false, false);
+ break;
+ case IBV_WR_BIND_MW:
+ if (ib_qp->qp_type != IBV_QPT_RC) {
+ err = EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_BIND_MW;
+ info.op.bind_window.mr_stag = ib_wr->bind_mw.bind_info.mr->rkey;
+ if (ib_wr->bind_mw.mw->type == IBV_MW_TYPE_1) {
+ info.op.bind_window.mem_window_type_1 = true;
+ info.op.bind_window.mw_stag = ib_wr->bind_mw.rkey;
+ } else {
+ struct verbs_mr *vmr = verbs_get_mr(ib_wr->bind_mw.bind_info.mr);
+ struct irdma_umr *umr = container_of(vmr, struct irdma_umr, vmr);
+
+ if (umr->acc_flags & IBV_ACCESS_ZERO_BASED) {
+ err = EINVAL;
+ break;
+ }
+ info.op.bind_window.mw_stag =
+ calc_type2_mw_stag(ib_wr->bind_mw.rkey, ib_wr->bind_mw.mw->rkey);
+ ib_wr->bind_mw.mw->rkey = info.op.bind_window.mw_stag;
+
+ }
+
+ if (ib_wr->bind_mw.bind_info.mw_access_flags & IBV_ACCESS_ZERO_BASED) {
+ info.op.bind_window.addressing_type = IRDMA_ADDR_TYPE_ZERO_BASED;
+ info.op.bind_window.va = NULL;
+ } else {
+ info.op.bind_window.addressing_type = IRDMA_ADDR_TYPE_VA_BASED;
+ info.op.bind_window.va = (void *)(uintptr_t)ib_wr->bind_mw.bind_info.addr;
+ }
+ info.op.bind_window.bind_len = ib_wr->bind_mw.bind_info.length;
+ info.op.bind_window.ena_reads =
+ (ib_wr->bind_mw.bind_info.mw_access_flags & IBV_ACCESS_REMOTE_READ) ? 1 : 0;
+ info.op.bind_window.ena_writes =
+ (ib_wr->bind_mw.bind_info.mw_access_flags & IBV_ACCESS_REMOTE_WRITE) ? 1 : 0;
+
+ err = irdma_uk_mw_bind(&iwuqp->qp, &info, false);
+ break;
+ case IBV_WR_LOCAL_INV:
+ info.op_type = IRDMA_OP_TYPE_INV_STAG;
+ info.op.inv_local_stag.target_stag = ib_wr->imm_data;
+ err = irdma_uk_stag_local_invalidate(&iwuqp->qp, &info, true);
+ break;
+ default:
+ /* error */
+ err = EINVAL;
+ printf("%s: post work request failed, invalid opcode: 0x%x\n",
+ __func__, ib_wr->opcode);
+ break;
+ }
+ if (err)
+ break;
+
+ ib_wr = ib_wr->next;
+ }
+
+ if (err)
+ *bad_wr = ib_wr;
+
+ irdma_uk_qp_post_wr(&iwuqp->qp);
+ if (reflush)
+ irdma_issue_flush(ib_qp, 1, 0);
+
+ pthread_spin_unlock(&iwuqp->lock);
+
+ return err;
+}
+
+/**
+ * irdma_post_recv - post receive wr for user application
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+int
+irdma_upost_recv(struct ibv_qp *ib_qp, struct ibv_recv_wr *ib_wr,
+ struct ibv_recv_wr **bad_wr)
+{
+ struct irdma_post_rq_info post_recv = {};
+ struct irdma_sge *sg_list;
+ struct irdma_uqp *iwuqp;
+ bool reflush = false;
+ int err = 0;
+
+ iwuqp = container_of(ib_qp, struct irdma_uqp, ibv_qp);
+ sg_list = iwuqp->recv_sges;
+
+ err = pthread_spin_lock(&iwuqp->lock);
+ if (err)
+ return err;
+
+ if (!IRDMA_RING_MORE_WORK(iwuqp->qp.rq_ring) &&
+ ib_qp->state == IBV_QPS_ERR)
+ reflush = true;
+
+ while (ib_wr) {
+ if (ib_wr->num_sge > iwuqp->qp.max_rq_frag_cnt) {
+ *bad_wr = ib_wr;
+ err = EINVAL;
+ goto error;
+ }
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
+ post_recv.sg_list = sg_list;
+ err = irdma_uk_post_receive(&iwuqp->qp, &post_recv);
+ if (err) {
+ *bad_wr = ib_wr;
+ goto error;
+ }
+
+ if (reflush)
+ irdma_issue_flush(ib_qp, 0, 1);
+
+ ib_wr = ib_wr->next;
+ }
+error:
+ pthread_spin_unlock(&iwuqp->lock);
+
+ return err;
+}
+
+/**
+ * irdma_ucreate_ah - create address handle associated with a pd
+ * @ibpd: pd for the address handle
+ * @attr: attributes of address handle
+ */
+struct ibv_ah *
+irdma_ucreate_ah(struct ibv_pd *ibpd, struct ibv_ah_attr *attr)
+{
+ struct irdma_uah *ah;
+ union ibv_gid sgid;
+ struct irdma_ucreate_ah_resp resp;
+ int err;
+
+ err = ibv_query_gid(ibpd->context, attr->port_num, attr->grh.sgid_index,
+ &sgid);
+ if (err) {
+ fprintf(stderr, "irdma: Error from ibv_query_gid.\n");
+ errno = err;
+ return NULL;
+ }
+
+ ah = calloc(1, sizeof(*ah));
+ if (!ah)
+ return NULL;
+
+ err = ibv_cmd_create_ah(ibpd, &ah->ibv_ah, attr, &resp.ibv_resp,
+ sizeof(resp));
+ if (err) {
+ free(ah);
+ errno = err;
+ return NULL;
+ }
+
+ ah->ah_id = resp.ah_id;
+
+ return &ah->ibv_ah;
+}
+
+/**
+ * irdma_udestroy_ah - destroy the address handle
+ * @ibah: address handle
+ */
+int
+irdma_udestroy_ah(struct ibv_ah *ibah)
+{
+ struct irdma_uah *ah;
+ int ret;
+
+ ah = container_of(ibah, struct irdma_uah, ibv_ah);
+
+ ret = ibv_cmd_destroy_ah(ibah);
+ if (ret)
+ return ret;
+
+ free(ah);
+
+ return 0;
+}
+
+/**
+ * irdma_uattach_mcast - Attach qp to multicast group implemented
+ * @qp: The queue pair
+ * @gid:The Global ID for multicast group
+ * @lid: The Local ID
+ */
+int
+irdma_uattach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
+ uint16_t lid)
+{
+ return ibv_cmd_attach_mcast(qp, gid, lid);
+}
+
+/**
+ * irdma_udetach_mcast - Detach qp from multicast group
+ * @qp: The queue pair
+ * @gid:The Global ID for multicast group
+ * @lid: The Local ID
+ */
+int
+irdma_udetach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
+ uint16_t lid)
+{
+ return ibv_cmd_detach_mcast(qp, gid, lid);
+}
+
+/**
+ * irdma_uresize_cq - resizes a cq
+ * @cq: cq to resize
+ * @cqe: the number of cqes of the new cq
+ */
+int
+irdma_uresize_cq(struct ibv_cq *cq, int cqe)
+{
+ struct irdma_uvcontext *iwvctx;
+ struct irdma_uk_attrs *uk_attrs;
+ struct irdma_uresize_cq cmd = {};
+ struct ibv_resize_cq_resp resp = {};
+ struct irdma_ureg_mr reg_mr_cmd = {};
+ struct ibv_reg_mr_resp reg_mr_resp = {};
+ struct irdma_cq_buf *cq_buf = NULL;
+ struct irdma_cqe *cq_base = NULL;
+ struct verbs_mr new_mr = {};
+ struct irdma_ucq *iwucq;
+ size_t cq_size;
+ u32 cq_pages;
+ int cqe_needed;
+ int ret = 0;
+
+ iwucq = container_of(cq, struct irdma_ucq, verbs_cq.cq);
+ iwvctx = container_of(cq->context, struct irdma_uvcontext, ibv_ctx);
+ uk_attrs = &iwvctx->uk_attrs;
+
+ if (!(uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE))
+ return EOPNOTSUPP;
+
+ if (cqe > IRDMA_MAX_CQ_SIZE)
+ return EINVAL;
+
+ cqe_needed = cqe + 1;
+ if (uk_attrs->hw_rev > IRDMA_GEN_1)
+ cqe_needed *= 2;
+
+ if (cqe_needed < IRDMA_U_MINCQ_SIZE)
+ cqe_needed = IRDMA_U_MINCQ_SIZE;
+
+ if (cqe_needed == iwucq->cq.cq_size)
+ return 0;
+
+ cq_size = get_cq_total_bytes(cqe_needed);
+ cq_pages = cq_size >> IRDMA_HW_PAGE_SHIFT;
+ cq_base = irdma_alloc_hw_buf(cq_size);
+ if (!cq_base)
+ return ENOMEM;
+
+ memset(cq_base, 0, cq_size);
+
+ cq_buf = malloc(sizeof(*cq_buf));
+ if (!cq_buf) {
+ ret = ENOMEM;
+ goto err_buf;
+ }
+
+ new_mr.ibv_mr.pd = iwucq->vmr.ibv_mr.pd;
+ reg_mr_cmd.reg_type = IRDMA_MEMREG_TYPE_CQ;
+ reg_mr_cmd.cq_pages = cq_pages;
+
+ ret = ibv_cmd_reg_mr(new_mr.ibv_mr.pd, cq_base, cq_size,
+ (uintptr_t)cq_base, IBV_ACCESS_LOCAL_WRITE,
+ &new_mr.ibv_mr, &reg_mr_cmd.ibv_cmd, sizeof(reg_mr_cmd),
+ &reg_mr_resp, sizeof(reg_mr_resp));
+ if (ret)
+ goto err_dereg_mr;
+
+ ret = pthread_spin_lock(&iwucq->lock);
+ if (ret)
+ goto err_lock;
+
+ cmd.user_cq_buffer = (__u64) ((uintptr_t)cq_base);
+ ret = ibv_cmd_resize_cq(&iwucq->verbs_cq.cq, cqe_needed, &cmd.ibv_cmd,
+ sizeof(cmd), &resp, sizeof(resp));
+ if (ret)
+ goto err_resize;
+
+ memcpy(&cq_buf->cq, &iwucq->cq, sizeof(cq_buf->cq));
+ cq_buf->vmr = iwucq->vmr;
+ iwucq->vmr = new_mr;
+ irdma_uk_cq_resize(&iwucq->cq, cq_base, cqe_needed);
+ iwucq->verbs_cq.cq.cqe = cqe;
+ LIST_INSERT_HEAD(&iwucq->resize_list, cq_buf, list);
+
+ pthread_spin_unlock(&iwucq->lock);
+
+ return ret;
+
+err_resize:
+ pthread_spin_unlock(&iwucq->lock);
+err_lock:
+ ibv_cmd_dereg_mr(&new_mr.ibv_mr);
+err_dereg_mr:
+ free(cq_buf);
+err_buf:
+ fprintf(stderr, "failed to resize CQ cq_id=%d ret=%d\n", iwucq->cq.cq_id, ret);
+ irdma_free_hw_buf(cq_base, cq_size);
+ return ret;
+}
diff --git a/contrib/ofed/libirdma/libirdma.map b/contrib/ofed/libirdma/libirdma.map
new file mode 100644
index 000000000000..0db347c48281
--- /dev/null
+++ b/contrib/ofed/libirdma/libirdma.map
@@ -0,0 +1,10 @@
+/* Export symbols should be added below according to
+ Documentation/versioning.md document. */
+IRDMA_1.0 {
+ global: *;
+ local: *;
+};
+
+IRDMA_1.1 {
+ global: *;
+} IRDMA_1.0;
diff --git a/contrib/ofed/libirdma/osdep.h b/contrib/ofed/libirdma/osdep.h
new file mode 100644
index 000000000000..70daf4d9d3e7
--- /dev/null
+++ b/contrib/ofed/libirdma/osdep.h
@@ -0,0 +1,213 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2021 - 2022 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICRDMA_OSDEP_H_
+#define _ICRDMA_OSDEP_H_
+
+#include <stdatomic.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <infiniband/types.h>
+#include <infiniband/verbs.h>
+#include <infiniband/udma_barrier.h>
+#include <sys/bus.h>
+#include <sys/bus_dma.h>
+#include <sys/endian.h>
+
+#define ATOMIC atomic_t
+#define IOMEM
+#define IRDMA_NTOHL(a) ntohl(a)
+#define IRDMA_NTOHS(a) ntohs(a)
+#define MAKEMASK(m, s) ((m) << (s))
+#define OS_TIMER timer_list
+#define OS_LIST_HEAD list_head
+#define OS_LIST_ENTRY list_head
+#define DECLARE_HASHTABLE(n, b) struct hlist_head (n)[1 << (b)]
+#define HASH_MIN(v, b) (sizeof(v) <= 4 ? hash_32(v, b) : hash_long(v, b))
+#define HASH_FOR_EACH_RCU(n, b, o, m) for ((b) = 0, o = NULL; o == NULL && (b) < ARRAY_SIZE(n);\
+ (b)++)\
+ hlist_for_each_entry_rcu(o, &n[(b)], m)
+#define HASH_FOR_EACH_POSSIBLE_RCU(n, o, m, k) \
+ hlist_for_each_entry_rcu(o, &n[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(n)))],\
+ m)
+#define HASH_FOR_EACH_POSSIBLE(n, o, m, k) \
+ hlist_for_each_entry(o, &n[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(n)))],\
+ m)
+#define HASH_ADD_RCU(h, n, k) \
+ hlist_add_head_rcu(n, &h[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(h)))])
+#define HASH_DEL_RCU(tbl, node) hlist_del_rcu(node)
+#define HASH_ADD(h, n, k) \
+ hlist_add_head(n, &h[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(h)))])
+#define HASH_DEL(tbl, node) hlist_del(node)
+
+#define WQ_UNBOUND_MAX_ACTIVE max_t(int, 512, num_possible_cpus() * 4)
+#define if_addr_rlock(x)
+#define if_addr_runlock(x)
+
+/* constants */
+#define STATS_TIMER_DELAY 60000
+
+/* a couple of linux size defines */
+#define SZ_128 128
+#define SZ_2K SZ_128 * 16
+#define SZ_1G (SZ_1K * SZ_1K * SZ_1K)
+#define SPEED_1000 1000
+#define SPEED_10000 10000
+#define SPEED_20000 20000
+#define SPEED_25000 25000
+#define SPEED_40000 40000
+#define SPEED_100000 100000
+
+#define BIT_ULL(a) (1ULL << (a))
+
+#define __aligned_u64 uint64_t __aligned(8)
+
+#define VLAN_PRIO_SHIFT 13
+
+/*
+ * debug definition section
+ */
+#define irdma_print(S, ...) printf("%s:%d "S, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#define irdma_debug_buf(dev, mask, desc, buf, size) \
+do { \
+ u32 i; \
+ if (!((mask) & (dev)->debug_mask)) { \
+ break; \
+ } \
+ irdma_debug(dev, mask, "%s\n", desc); \
+ irdma_debug(dev, mask, "starting address virt=%p phy=%lxh\n", buf, irdma_get_virt_to_phy(buf)); \
+ for (i = 0; i < size ; i += 8) \
+ irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)buf)[i / 8]); \
+} while(0)
+
+#define irdma_debug(h, m, s, ...) \
+do { \
+ if (!(h)) { \
+ if ((m) == IRDMA_DEBUG_INIT) \
+ printf("irdma INIT " s, ##__VA_ARGS__); \
+ } else if (((m) & (h)->debug_mask)) { \
+ printf("irdma " s, ##__VA_ARGS__); \
+ } \
+} while (0)
+extern unsigned int irdma_dbg;
+#define libirdma_debug(fmt, args...) \
+do { \
+ if (irdma_dbg) \
+ printf("libirdma-%s: " fmt, __func__, ##args); \
+} while (0)
+#define irdma_dev_err(a, b, ...) printf(b, ##__VA_ARGS__)
+#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
+#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
+#define irdma_pr_warn printf
+#define ibdev_err(ibdev, fmt, ...) dev_err(&((ibdev)->dev), fmt, ##__VA_ARGS__)
+
+#define dump_struct(s, sz, name) \
+do { \
+ unsigned char *a; \
+ printf("%s %u", (name), (unsigned int)(sz)); \
+ for (a = (unsigned char*)(s); a < (unsigned char *)(s) + (sz) ; a ++) { \
+ if ((u64)a % 8 == 0) \
+ printf("\n%p ", a); \
+ printf("%2x ", *a); \
+ } \
+ printf("\n"); \
+}while(0)
+
+/*
+ * debug definition end
+ */
+
+typedef __be16 BE16;
+typedef __be32 BE32;
+typedef uintptr_t irdma_uintptr;
+
+struct irdma_hw;
+struct irdma_pci_f;
+struct irdma_sc_dev;
+struct irdma_sc_qp;
+struct irdma_sc_vsi;
+
+#define irdma_pr_info(fmt, args ...) printf("%s: WARN "fmt, __func__, ## args)
+#define irdma_pr_err(fmt, args ...) printf("%s: ERR "fmt, __func__, ## args)
+#define irdma_memcpy(a, b, c) memcpy((a), (b), (c))
+#define irdma_memset(a, b, c) memset((a), (b), (c))
+#define irdma_usec_delay(x) DELAY(x)
+#define mdelay(x) DELAY((x) * 1000)
+
+#define rt_tos2priority(tos) (((tos >> 1) & 0x8 >> 1) | ((tos >> 2) ^ ((tos >> 3) << 1)))
+#define ah_attr_to_dmac(attr) ((attr).dmac)
+#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \
+ ib_gid_to_network_type(gid_type, gid)
+#define irdma_del_timer_compat(tt) del_timer((tt))
+#define IRDMA_TAILQ_FOREACH CK_STAILQ_FOREACH
+#define IRDMA_TAILQ_FOREACH_SAFE CK_STAILQ_FOREACH_SAFE
+#define between(a, b, c) (bool)(c-a >= b-a)
+
+static inline void db_wr32(__u32 val, __u32 *wqe_word)
+{
+ *wqe_word = val;
+}
+
+void *hw_to_dev(struct irdma_hw *hw);
+
+struct irdma_dma_mem {
+ void *va;
+ u64 pa;
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+ bus_size_t size;
+ int nseg;
+ int flags;
+};
+
+struct irdma_virt_mem {
+ void *va;
+ u32 size;
+};
+
+#ifndef verbs_mr
+enum ibv_mr_type {
+ IBV_MR_TYPE_MR,
+ IBV_MR_TYPE_NULL_MR,
+};
+
+struct verbs_mr {
+ struct ibv_mr ibv_mr;
+ enum ibv_mr_type mr_type;
+};
+#define verbs_get_mr(mr) container_of((mr), struct verbs_mr, ibv_mr)
+#endif
+#endif /* _ICRDMA_OSDEP_H_ */
diff --git a/lib/ofed/Makefile b/lib/ofed/Makefile
index 224222b55e24..4dac5c675a49 100644
--- a/lib/ofed/Makefile
+++ b/lib/ofed/Makefile
@@ -12,6 +12,7 @@ SUBDIR= \
libcxgb4 \
libmlx4 \
libmlx5 \
+ libirdma \
libibnetdisc \
libopensm
@@ -22,6 +23,7 @@ SUBDIR_DEPEND_libvendor= libibumad
SUBDIR_DEPEND_libcxgb4= libibverbs
SUBDIR_DEPEND_libmlx4= libibverbs
SUBDIR_DEPEND_libmlx5= libibverbs
+SUBDIR_DEPEND_libirdma= libibverbs
SUBDIR_DEPEND_libibnetdisc= libibmad libibumad complib
SUBDIR_PARALLEL=
diff --git a/lib/ofed/libirdma/Makefile b/lib/ofed/libirdma/Makefile
new file mode 100644
index 000000000000..1280b8a6d7bc
--- /dev/null
+++ b/lib/ofed/libirdma/Makefile
@@ -0,0 +1,23 @@
+# $FreeBSD: releng/12.1/lib/ofed/libirdma/Makefile 336568 2018-07-20 23:49:57Z kib $
+
+_spath= ${SRCTOP}/contrib/ofed/libirdma
+_ipath= ${SRCTOP}/contrib/ofed/include
+lin_inc=/usr/src/sys/compat/linuxkpi/common/include
+.PATH: ${_spath}
+
+SHLIBDIR?= /lib
+LIB= irdma
+SHLIB_MAJOR= 1
+MK_PROFILE= no
+
+SRCS= \
+irdma_umain.c \
+irdma_uverbs.c \
+irdma_uk.c \
+
+LIBADD= ibverbs pthread
+CFLAGS+= -I${_spath} -I${SRCTOP}/contrib/ofed/libibverbs
+VERSION_MAP= ${_spath}/libirdma.map
+CFLAGS+= -ferror-limit=1000
+
+.include <bsd.lib.mk>
diff --git a/share/man/man4/irdma.4 b/share/man/man4/irdma.4
new file mode 100644
index 000000000000..c8da2ad795dd
--- /dev/null
+++ b/share/man/man4/irdma.4
@@ -0,0 +1,233 @@
+.\" Copyright(c) 2016 - 2022 Intel Corporation
+.\" All rights reserved.
+.\"
+.\" This software is available to you under a choice of one of two
+.\" licenses. You may choose to be licensed under the terms of the GNU
+.\" General Public License (GPL) Version 2, available from the file
+.\" COPYING in the main directory of this source tree, or the
+.\" OpenFabrics.org BSD license below:
+.\"
+.\" Redistribution and use in source and binary forms, with or
+.\" without modification, are permitted provided that the following
+.\" conditions are met:
+.\"
+.\" - Redistributions of source code must retain the above
+.\" copyright notice, this list of conditions and the following
+.\" disclaimer.
+.\"
+.\" - Redistributions in binary form must reproduce the above
+.\" copyright notice, this list of conditions and the following
+.\" disclaimer in the documentation and/or other materials
+.\" provided with the distribution.
+.\"
+.\" THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+.\" EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+.\" MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+.\" NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+.\" BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+.\" ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+.\" CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+.\" SOFTWARE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd March 30, 2022
+.Dt IRDMA 4
+.Os
+.Sh NAME
+.Nm irdma
+.Nd RDMA FreeBSD driver for Intel(R) Ethernet Controller E810
+.Sh SYNOPSIS
+This module relies on
+.Xr if_ice 4
+.Bl -tag -nested-width indent
+.It The following kernel options should be included in the configuration:
+.Cd options OFED
+.Cd options OFED_DEBUG_INIT
+.Cd options COMPAT_LINUXKPI
+.Cd options SDP
+.Cd options IPOIB_CM
+.El
+.Sh DESCRIPTION
+.Ss Features
+The
+.Nm
+driver provides RDMA protocol support on RDMA-capable Intel Ethernet 800 Series NICs which are supported by
+.Xr if_ice 4
+.
+.Pp
+The driver supports both iWARP and RoCEv2 protocols.
+.Sh CONFIGURATION
+.Ss TUNABLES
+Tunables can be set at the
+.Xr loader 8
+prompt before booting the kernel or stored in
+.Xr loader.conf 5 .
+.Bl -tag -width indent
+.It Va dev.irdma<interface_number>.roce_enable
+enables RoCEv2 protocol usage on <interface_numer> interface.
+.Pp By default RoCEv2 protocol is used.
+.It Va dev.irdma<interface_number>.dcqcn_cc_cfg_valid
+indicates that all DCQCN parameters are valid and should be updated in registers or QP context.
+.Pp
+Setting this parameter to 1 means that settings in
+.Em dcqcn_min_dec_factor, dcqcn_min_rate_MBps, dcqcn_F, dcqcn_T,
+.Em dcqcn_B, dcqcn_rai_factor, dcqcn_hai_factor, dcqcn_rreduce_mperiod
+are taken into account. Otherwise default values are used.
+.Pp
+Note: "roce_enable" must also be set for this tunable to take effect.
+.It Va dev.irdma<interface_number>.dcqcn_min_dec_factor
+The minimum factor by which the current transmit rate can be changed when processing a CNP. Value is given as a percentage (1-100).
+.Pp
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+.It Va dev.irdma<interface_number>.dcqcn_min_rate_MBps
+The minimum value, in Mbits per second, for rate to limit.
+.Pp
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+.It Va dev.irdma<interface_number>.dcqcn_F
+The number of times to stay in each stage of bandwidth recovery.
+.Pp
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+.It Va dev.irdma<interface_number>.dcqcn_T
+The number of microseconds that should elapse before increasing the CWND in DCQCN mode.
+.Pp
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+.It Va dev.irdma<interface_number>.dcqcn_B
+The number of bytes to transmit before updating CWND in DCQCN mode.
+.Pp
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+.It Va dev.irdma<interface_number>.dcqcn_rai_factor
+The number of MSS to add to the congestion window in additive increase mode.
+.Pp
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+.It Va dev.irdma<interface_number>.dcqcn_hai_factor
+The number of MSS to add to the congestion window in hyperactive increase mode.
+.Pp
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+.It Va dev.irdma<interface_number>.dcqcn_rreduce_mperiod
+The minimum time between 2 consecutive rate reductions for a single flow. Rate reduction will occur only if a CNP is received during the relevant time interval.
+.Pp
+Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
+.Ss SYSCTL PROCEDURES
+Sysctl controls are available for runtime adjustments.
+.Bl -tag -width indent
+.It Va dev.irdma<interface_number>.debug
+defines level of debug messages.
+.Pp
+Typical value: 1 for errors only, 0x7fffffff for full debug.
+.It Va dev.irdma<interface_number>.dcqcn_enable
+enables the DCQCN algorithm for RoCEv2.
+.Pp
+Note: "roce_enable" must also be set for this sysctl to take effect.
+.Pp
+Note: The change may be set at any time, but it will be applied only to newly created QPs.
+.Ss TESTING
+.Bl -enum
+.It
+To load the irdma driver, run:
+.Bl -tag -width indent
+.It
+kldload irdma
+.El
+If if_ice is not already loaded, the system will load it on its own. Please check whether the value of sysctl
+.Va hw.ice.irdma
+is 1, if the irdma driver is not loading. To change the value put:
+.Bl -tag -width indent
+.It
+hw.ice.irdma=1
+.El
+to
+.Pa /boot/loader.conf
+and reboot.
+.It
+To check that the driver was loaded, run:
+.Bl -tag -width indent
+.It
+sysctl -a | grep infiniband
+.El
+Typically, if everything goes well, around 190 entries per PF will appear.
+.It
+Each interface of the card may work in either iWARP or RoCEv2 mode. To enable RoCEv2 compatibility, add:
+.Bl -tag -width indent
+.It
+dev.irdma<interface_number>.roce_enable=1
+.El
+where <interface_number> is a desired ice interface number on which
+RoCEv2 protocol needs to be enabled, to:
+.Bl -tag -width indent
+.It
+.Pa /boot/loader.conf
+.El
+for instance:
+.Bl -tag -width indent
+.It
+dev.irdma0.roce_enable=0
+.It
+dev.irdma1.roce_enable=1
+.El
+will keep iWARP mode on ice0 and enable RoCEv2 mode on interface ice1. The RoCEv2 mode is the default.
+.Dl
+To check irdma roce_enable status, run:
+.Bl -tag -width indent
+.It
+sysctl dev.irdma<interface_number>.roce_enable
+.El
+for instance:
+.Bl -tag -width indent
+.It
+sysctl dev.irdma2.roce_enable
+.El
+with returned value of '0' indicate the iWARP mode, and the value of '1' indicate the RoCEv2 mode.
+.Pp
+Note: An interface configured in one mode will not be able to connect
+to a node configured in another mode.
+.Pp
+Note: RoCEv2 has currently limited support, for functional testing only.
+DCB and Priority Flow Controller (PFC) are not currently supported which
+may lead to significant performance loss or connectivity issues.
+.It
+Enable flow control in the ice driver:
+.Bl -tag -width indent
+.It
+sysctl dev.ice.<interface_number>.fc=3
+.El
+Enable flow control on the switch your system is connected to. See your
+switch documentation for details.
+.It
+The source code for krping software is provided with the kernel in
+/usr/src/sys/contrib/rdma/krping/. To compile the software, change
+directory to /usr/src/sys/modules/rdma/krping/ and invoke the following:
+.Bl -tag -width indent
+.It
+make clean
+.It
+make
+.It
+make install
+.El
+.It
+Start a krping server on one machine:
+.Bl -tag -width indent
+.It
+ echo size=64,count=1,port=6601,addr=100.0.0.189,server > /dev/krping
+.El
+.It
+Connect a client from another machine:
+.Bl -tag -width indent
+.It
+ echo size=64,count=1,port=6601,addr=100.0.0.189,client > /dev/krping
+.El
+.Sh SUPPORT
+For general information and support, go to the Intel support website at:
+.Lk http://support.intel.com/ .
+.Pp
+If an issue is identified with this driver with a supported adapter, email all the specific information related to the issue to
+.Mt freebsd@intel.com .
+.Sh SEE ALSO
+.Xr if_ice 4
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+driver was prepared by
+.An Bartosz Sobczak Aq Mt bartosz.sobczak@intel.com .
diff --git a/share/mk/bsd.libnames.mk b/share/mk/bsd.libnames.mk
index a45ed27170a0..499bdeb66805 100644
--- a/share/mk/bsd.libnames.mk
+++ b/share/mk/bsd.libnames.mk
@@ -84,6 +84,7 @@ LIBIBVERBS?= ${LIBDESTDIR}${LIBDIR_BASE}/libibverbs.a
LIBICP?= ${LIBDESTDIR}${LIBDIR_BASE}/libicp.a
LIBIPSEC?= ${LIBDESTDIR}${LIBDIR_BASE}/libipsec.a
LIBIPT?= ${LIBDESTDIR}${LIBDIR_BASE}/libipt.a
+LIBIRDMA?= ${LIBDESTDIR}${LIBDIR_BASE}/libirdma.a
LIBISCSIUTIL?= ${LIBDESTDIR}${LIBDIR_BASE}/libiscsiutil.a
LIBJAIL?= ${LIBDESTDIR}${LIBDIR_BASE}/libjail.a
LIBKADM5CLNT?= ${LIBDESTDIR}${LIBDIR_BASE}/libkadm5clnt.a
diff --git a/share/mk/src.libnames.mk b/share/mk/src.libnames.mk
index eaf0fd3e20ee..d74926c7aca7 100644
--- a/share/mk/src.libnames.mk
+++ b/share/mk/src.libnames.mk
@@ -240,6 +240,7 @@ _LIBRARIES+= \
ibnetdisc \
ibumad \
ibverbs \
+ irdma \
mlx4 \
mlx5 \
rdmacm \
@@ -429,6 +430,7 @@ _DP_ibmad= ibumad
_DP_ibnetdisc= osmcomp ibmad ibumad
_DP_ibumad=
_DP_ibverbs=
+_DP_irdma= ibverbs pthread
_DP_mlx4= ibverbs pthread
_DP_mlx5= ibverbs pthread
_DP_rdmacm= ibverbs
@@ -678,6 +680,7 @@ LIBIBMADDIR= ${OBJTOP}/lib/ofed/libibmad
LIBIBNETDISCDIR=${OBJTOP}/lib/ofed/libibnetdisc
LIBIBUMADDIR= ${OBJTOP}/lib/ofed/libibumad
LIBIBVERBSDIR= ${OBJTOP}/lib/ofed/libibverbs
+LIBIRDMADIR= ${OBJTOP}/lib/ofed/libirdma
LIBMLX4DIR= ${OBJTOP}/lib/ofed/libmlx4
LIBMLX5DIR= ${OBJTOP}/lib/ofed/libmlx5
LIBRDMACMDIR= ${OBJTOP}/lib/ofed/librdmacm
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index 2bc84675f7ce..300a8a48bab7 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -292,6 +292,8 @@ device cpufreq
# igc: Intel I225 2.5Gb Ethernet adapter
# ipw: Intel PRO/Wireless 2100 IEEE 802.11 adapter
# Requires the ipw firmware module
+# irdma: Intel 800 Series RDMA driver
+# Requires the ice module
# iwi: Intel PRO/Wireless 2200BG/2225BG/2915ABG IEEE 802.11 adapters
# Requires the iwi firmware module
# iwn: Intel Wireless WiFi Link 1000/105/135/2000/4965/5000/6000/6050 abgn
@@ -319,6 +321,7 @@ device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
device ice # Intel 800 Series Physical Function
device ice_ddp # Intel 800 Series DDP Package
+device irdma # Intel 800 Series RDMA driver
device mthca # Mellanox HCA InfiniBand
device mlx4 # Shared code module between IB and Ethernet
device mlx4ib # Mellanox ConnectX HCA InfiniBand
diff --git a/sys/conf/files b/sys/conf/files
index aaf9d4223bbe..c15f87d9f830 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -4764,6 +4764,37 @@ ofed/drivers/infiniband/ulp/sdp/sdp_cma.c optional sdp inet \
ofed/drivers/infiniband/ulp/sdp/sdp_tx.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/"
+dev/irdma/icrdma.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_cm.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_ctrl.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_hmc.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_hw.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/icrdma_hw.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/fbsd_kcompat.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_kcompat.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_pble.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_puda.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_uda.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_uk.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_utils.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_verbs.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+dev/irdma/irdma_ws.c optional irdma ice pci ofed \
+ compile-with "${OFED_C} -I$S/dev/ice/"
+
dev/mthca/mthca_allocator.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_av.c optional mthca pci ofed \
diff --git a/sys/dev/irdma/fbsd_kcompat.c b/sys/dev/irdma/fbsd_kcompat.c
new file mode 100644
index 000000000000..2f6b350ac5b0
--- /dev/null
+++ b/sys/dev/irdma/fbsd_kcompat.c
@@ -0,0 +1,736 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2021 - 2022 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#include "osdep.h"
+#include "ice_rdma.h"
+#include "irdma_di_if.h"
+#include "irdma_main.h"
+#include <sys/gsb_crc32.h>
+#include <netinet/in_fib.h>
+#include <netinet6/in6_fib.h>
+#include <net/route/nhop.h>
+
+/* additional QP debuging option. Keep false unless needed */
+bool irdma_upload_context = false;
+
+inline u32
+irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
+
+ KASSERT(reg < dev_ctx->mem_bus_space_size,
+ ("irdma: register offset %#jx too large (max is %#jx)",
+ (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
+
+ return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
+ dev_ctx->mem_bus_space_handle, reg));
+}
+
+inline void
+irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
+{
+
+ KASSERT(reg < dev_ctx->mem_bus_space_size,
+ ("irdma: register offset %#jx too large (max is %#jx)",
+ (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
+
+ bus_space_write_4(dev_ctx->mem_bus_space_tag,
+ dev_ctx->mem_bus_space_handle, reg, value);
+}
+
+inline u64
+irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
+
+ KASSERT(reg < dev_ctx->mem_bus_space_size,
+ ("irdma: register offset %#jx too large (max is %#jx)",
+ (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
+
+ return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
+ dev_ctx->mem_bus_space_handle, reg));
+}
+
+inline void
+irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
+{
+
+ KASSERT(reg < dev_ctx->mem_bus_space_size,
+ ("irdma: register offset %#jx too large (max is %#jx)",
+ (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
+
+ bus_space_write_8(dev_ctx->mem_bus_space_tag,
+ dev_ctx->mem_bus_space_handle, reg, value);
+
+}
+
+int
+irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct ice_rdma_peer *peer = iwdev->rf->peer_info;
+ struct ice_rdma_request req = {0};
+ struct ice_rdma_qset_update *res = &req.res;
+
+ req.type = ICE_RDMA_EVENT_QSET_REGISTER;
+ res->cnt_req = 1;
+ res->res_type = ICE_RDMA_QSET_ALLOC;
+ res->qsets.qs_handle = tc_node->qs_handle;
+ res->qsets.tc = tc_node->traffic_class;
+ res->qsets.vsi_id = vsi->vsi_idx;
+
+ IRDMA_DI_REQ_HANDLER(peer, &req);
+
+ tc_node->l2_sched_node_id = res->qsets.teid;
+ vsi->qos[tc_node->user_pri].l2_sched_node_id =
+ res->qsets.teid;
+
+ return 0;
+}
+
+void
+irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct ice_rdma_peer *peer = iwdev->rf->peer_info;
+ struct ice_rdma_request req = {0};
+ struct ice_rdma_qset_update *res = &req.res;
+
+ req.type = ICE_RDMA_EVENT_QSET_REGISTER;
+ res->res_allocated = 1;
+ res->res_type = ICE_RDMA_QSET_FREE;
+ res->qsets.vsi_id = vsi->vsi_idx;
+ res->qsets.teid = tc_node->l2_sched_node_id;
+ res->qsets.qs_handle = tc_node->qs_handle;
+
+ IRDMA_DI_REQ_HANDLER(peer, &req);
+}
+
+void *
+hw_to_dev(struct irdma_hw *hw)
+{
+ struct irdma_pci_f *rf;
+
+ rf = container_of(hw, struct irdma_pci_f, hw);
+ return rf->pcidev;
+}
+
+void
+irdma_free_hash_desc(void *desc)
+{
+ return;
+}
+
+int
+irdma_init_hash_desc(void **desc)
+{
+ return 0;
+}
+
+int
+irdma_ieq_check_mpacrc(void *desc,
+ void *addr, u32 len, u32 val)
+{
+ u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
+ int ret_code = 0;
+
+ if (crc != val) {
+ irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
+ ret_code = -EINVAL;
+ }
+ printf("%s: result crc=%x value=%x\n", __func__, crc, val);
+ return ret_code;
+}
+
+/**
+ * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
+ * @iwdev: irdma device
+ * @ifp: interface network device pointer
+ */
+static void
+irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
+{
+ struct ifaddr *ifa, *tmp;
+ struct sockaddr_in6 *sin6;
+ u32 local_ipaddr6[4];
+ u8 *mac_addr;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ if_addr_rlock(ifp);
+ IRDMA_TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, tmp) {
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ if (sin6->sin6_family != AF_INET6)
+ continue;
+
+ irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
+ mac_addr = IF_LLADDR(ifp);
+
+ printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
+ __func__, __LINE__,
+ ip6_sprintf(ip6buf, &sin6->sin6_addr),
+ mac_addr[0], mac_addr[1], mac_addr[2],
+ mac_addr[3], mac_addr[4], mac_addr[5]);
+
+ irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
+ IRDMA_ARP_ADD);
+
+ }
+ if_addr_runlock(ifp);
+}
+
+/**
+ * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
+ * @iwdev: irdma device
+ * @ifp: interface network device pointer
+ */
+static void
+irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_in *sin;
+ u32 ip_addr[4] = {};
+ u8 *mac_addr;
+
+ if_addr_rlock(ifp);
+ IRDMA_TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ sin = (struct sockaddr_in *)ifa->ifa_addr;
+ if (sin->sin_family != AF_INET)
+ continue;
+
+ ip_addr[0] = ntohl(sin->sin_addr.s_addr);
+ mac_addr = IF_LLADDR(ifp);
+
+ printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
+ __func__, __LINE__,
+ ip_addr[0] >> 24,
+ (ip_addr[0] >> 16) & 0xFF,
+ (ip_addr[0] >> 8) & 0xFF,
+ ip_addr[0] & 0xFF,
+ mac_addr[0], mac_addr[1], mac_addr[2],
+ mac_addr[3], mac_addr[4], mac_addr[5]);
+
+ irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
+ IRDMA_ARP_ADD);
+ }
+ if_addr_runlock(ifp);
+}
+
+/**
+ * irdma_add_ip - add ip addresses
+ * @iwdev: irdma device
+ *
+ * Add ipv4/ipv6 addresses to the arp cache
+ */
+void
+irdma_add_ip(struct irdma_device *iwdev)
+{
+ struct ifnet *ifp = iwdev->netdev;
+ struct ifnet *ifv;
+ int i;
+
+ irdma_add_ipv4_addr(iwdev, ifp);
+ irdma_add_ipv6_addr(iwdev, ifp);
+ for (i = 0; ifp->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
+ ifv = VLAN_DEVAT(ifp, i);
+ if (!ifv)
+ continue;
+ irdma_add_ipv4_addr(iwdev, ifv);
+ irdma_add_ipv6_addr(iwdev, ifv);
+ }
+}
+
+static void
+irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
+{
+ struct irdma_pci_f *rf = arg;
+ struct ifnet *ifv = NULL;
+ struct sockaddr_in *sin;
+ struct epoch_tracker et;
+ int arp_index = 0, i = 0;
+ u32 ip[4] = {};
+
+ if (!ifa || !ifa->ifa_addr || !ifp)
+ return;
+ if (rf->iwdev->netdev != ifp) {
+ for (i = 0; rf->iwdev->netdev->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
+ NET_EPOCH_ENTER(et);
+ ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
+ NET_EPOCH_EXIT(et);
+ if (ifv == ifp)
+ break;
+ }
+ if (ifv != ifp)
+ return;
+ }
+ sin = (struct sockaddr_in *)ifa->ifa_addr;
+
+ switch (event) {
+ case IFADDR_EVENT_ADD:
+ if (sin->sin_family == AF_INET)
+ irdma_add_ipv4_addr(rf->iwdev, ifp);
+ else if (sin->sin_family == AF_INET6)
+ irdma_add_ipv6_addr(rf->iwdev, ifp);
+ break;
+ case IFADDR_EVENT_DEL:
+ if (sin->sin_family == AF_INET) {
+ ip[0] = ntohl(sin->sin_addr.s_addr);
+ } else if (sin->sin_family == AF_INET6) {
+ irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
+ } else {
+ break;
+ }
+ for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
+ if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
+ irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
+ rf->arp_table[arp_index].ip_addr,
+ IRDMA_ARP_DELETE);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void
+irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
+{
+ rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
+ irdma_ifaddrevent_handler,
+ rf,
+ EVENTHANDLER_PRI_ANY);
+}
+
+void
+irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
+{
+ EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
+}
+
+static int
+irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
+ struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
+{
+ struct nhop_object *nh;
+
+ if (dst_sin->sa_family == AF_INET6)
+ nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
+ else
+ nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
+ if (!nh || (nh->nh_ifp != netdev &&
+ rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
+ goto rt_not_found;
+ *gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
+ *nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
+ *ifp = nh->nh_ifp;
+
+ return 0;
+
+rt_not_found:
+ pr_err("irdma: route not found\n");
+ return -ENETUNREACH;
+}
+
+/**
+ * irdma_get_dst_mac - get destination mac address
+ * @cm_node: connection's node
+ * @dst_sin: destination address information
+ * @dst_mac: mac address array to return
+ */
+int
+irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
+{
+ struct ifnet *netdev = cm_node->iwdev->netdev;
+#ifdef VIMAGE
+ struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
+ struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
+#endif
+ struct ifnet *ifp;
+ struct llentry *lle;
+ struct sockaddr *nexthop;
+ struct epoch_tracker et;
+ int err;
+ bool gateway;
+
+ NET_EPOCH_ENTER(et);
+ CURVNET_SET_QUIET(vnet);
+ err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
+ if (err)
+ goto get_route_fail;
+
+ if (dst_sin->sa_family == AF_INET) {
+ err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
+ } else if (dst_sin->sa_family == AF_INET6) {
+ err = nd6_resolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
+ } else {
+ err = -EPROTONOSUPPORT;
+ }
+
+get_route_fail:
+ CURVNET_RESTORE();
+ NET_EPOCH_EXIT(et);
+ if (err) {
+ pr_err("failed to resolve neighbor address (err=%d)\n",
+ err);
+ return -ENETUNREACH;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_addr_resolve_neigh - resolve neighbor address
+ * @cm_node: connection's node
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+int
+irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
+ u32 dst_ip, int arpindex)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+ struct sockaddr_in dst_sin = {};
+ int err;
+ u32 ip[4] = {};
+ u8 dst_mac[MAX_ADDR_LEN];
+
+ dst_sin.sin_len = sizeof(dst_sin);
+ dst_sin.sin_family = AF_INET;
+ dst_sin.sin_port = 0;
+ dst_sin.sin_addr.s_addr = htonl(dst_ip);
+
+ err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
+ if (err)
+ return arpindex;
+
+ ip[0] = dst_ip;
+
+ return irdma_add_arp(iwdev->rf, ip, dst_mac);
+}
+
+/**
+ * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
+ * @cm_node: connection's node
+ * @dest: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+int
+irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
+ u32 *dest, int arpindex)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+ struct sockaddr_in6 dst_addr = {};
+ int err;
+ u8 dst_mac[MAX_ADDR_LEN];
+
+ dst_addr.sin6_family = AF_INET6;
+ dst_addr.sin6_len = sizeof(dst_addr);
+ dst_addr.sin6_scope_id = iwdev->netdev->if_index;
+
+ irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
+ err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
+ if (err)
+ return arpindex;
+
+ return irdma_add_arp(iwdev->rf, dest, dst_mac);
+}
+
+int
+irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
+ struct irdma_cm_info *cm_info)
+{
+ int arpindex;
+ int oldarpindex;
+
+ if ((cm_node->ipv4 &&
+ irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+ (!cm_node->ipv4 &&
+ irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
+ cm_node->do_lpb = true;
+ arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
+ NULL,
+ IRDMA_ARP_RESOLVE);
+ } else {
+ oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
+ NULL,
+ IRDMA_ARP_RESOLVE);
+ if (cm_node->ipv4)
+ arpindex = irdma_addr_resolve_neigh(cm_node,
+ cm_info->rem_addr[0],
+ oldarpindex);
+ else
+ arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
+ cm_info->rem_addr,
+ oldarpindex);
+ }
+ return arpindex;
+}
+
+/**
+ * irdma_add_handler - add a handler to the list
+ * @hdl: handler to be added to the handler list
+ */
+void
+irdma_add_handler(struct irdma_handler *hdl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irdma_handler_lock, flags);
+ list_add(&hdl->list, &irdma_handlers);
+ spin_unlock_irqrestore(&irdma_handler_lock, flags);
+}
+
+/**
+ * irdma_del_handler - delete a handler from the list
+ * @hdl: handler to be deleted from the handler list
+ */
+void
+irdma_del_handler(struct irdma_handler *hdl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irdma_handler_lock, flags);
+ list_del(&hdl->list);
+ spin_unlock_irqrestore(&irdma_handler_lock, flags);
+}
+
+/**
+ * irdma_set_rf_user_cfg_params - apply user configurable settings
+ * @rf: RDMA PCI function
+ */
+void
+irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
+{
+ int en_rem_endpoint_trk = 0;
+ int limits_sel = 4;
+
+ rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
+ rf->limits_sel = limits_sel;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ /* Enable DCQCN algorithm by default */
+ rf->dcqcn_ena = true;
+}
+
+/**
+ * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
+ * @arg1: pointer to rf
+ * @arg2: unused
+ * @oidp: sysctl oid structure
+ * @req: sysctl request pointer
+ */
+static int
+irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
+{
+ struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
+ int ret;
+ u8 dcqcn_ena = rf->dcqcn_ena;
+
+ ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
+ if ((ret) || (req->newptr == NULL))
+ return ret;
+ if (dcqcn_ena == 0)
+ rf->dcqcn_ena = false;
+ else
+ rf->dcqcn_ena = true;
+
+ return 0;
+}
+
+/**
+ * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
+ * @rf: RDMA PCI function
+ *
+ * Create DCQCN related sysctls for the driver.
+ * dcqcn_ena is writeable settings and applicable to next QP creation or
+ * context setting.
+ * all other settings are of RDTUN type (read on driver load) and are
+ * applicable only to CQP creation.
+ */
+void
+irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
+{
+ struct sysctl_oid_list *irdma_sysctl_oid_list;
+
+ irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
+
+ SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
+ irdma_sysctl_dcqcn_update, "A",
+ "enables DCQCN algorithm for RoCEv2 on all ports, default=true");
+
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
+ &rf->dcqcn_params.cc_cfg_valid, 0,
+ "set DCQCN parameters to be valid, default=false");
+
+ rf->dcqcn_params.min_dec_factor = 1;
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
+ &rf->dcqcn_params.min_dec_factor, 0,
+ "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
+
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
+ &rf->dcqcn_params.min_rate, 0,
+ "set minimum rate limit value, in MBits per second, default=0");
+
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
+ "set number of times to stay in each stage of bandwidth recovery, default=0");
+
+ SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
+ "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0");
+
+ SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
+ "set number of MSS to add to the congestion window in additive increase mode, default=0");
+
+ SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
+ &rf->dcqcn_params.rai_factor, 0,
+ "set number of MSS to add to the congestion window in additive increase mode, default=0");
+
+ SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
+ &rf->dcqcn_params.hai_factor, 0,
+ "set number of MSS to add to the congestion window in hyperactive increase mode, default=0");
+
+ SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
+ &rf->dcqcn_params.rreduce_mperiod, 0,
+ "set minimum time between 2 consecutive rate reductions for a single flow, default=0");
+}
+
+/**
+ * irdma_dmamap_cb - callback for bus_dmamap_load
+ */
+static void
+irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *) arg = segs->ds_addr;
+ return;
+}
+
+/**
+ * irdma_allocate_dma_mem - allocate dma memory
+ * @hw: pointer to hw structure
+ * @mem: structure holding memory information
+ * @size: requested size
+ * @alignment: requested alignment
+ */
+void *
+irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
+ device_t dev = dev_ctx->dev;
+ void *va;
+ int ret;
+
+ ret = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ alignment, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &mem->tag);
+ if (ret != 0) {
+ device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
+ __func__, ret);
+ goto fail_0;
+ }
+ ret = bus_dmamem_alloc(mem->tag, (void **)&va,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
+ if (ret != 0) {
+ device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
+ __func__, ret);
+ goto fail_1;
+ }
+ ret = bus_dmamap_load(mem->tag, mem->map, va, size,
+ irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
+ if (ret != 0) {
+ device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
+ __func__, ret);
+ goto fail_2;
+ }
+ mem->nseg = 1;
+ mem->size = size;
+ bus_dmamap_sync(mem->tag, mem->map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ return va;
+fail_2:
+ bus_dmamem_free(mem->tag, va, mem->map);
+fail_1:
+ bus_dma_tag_destroy(mem->tag);
+fail_0:
+ mem->map = NULL;
+ mem->tag = NULL;
+
+ return NULL;
+}
+
+/**
+ * irdma_free_dma_mem - Memory free helper fn
+ * @hw: pointer to hw structure
+ * @mem: ptr to mem struct to free
+ */
+int
+irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
+{
+ if (!mem)
+ return -EINVAL;
+ bus_dmamap_sync(mem->tag, mem->map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(mem->tag, mem->map);
+ if (!mem->va)
+ return -ENOMEM;
+ bus_dmamem_free(mem->tag, mem->va, mem->map);
+ bus_dma_tag_destroy(mem->tag);
+
+ mem->va = NULL;
+
+ return 0;
+}
+
+inline void
+irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
+{
+ kfree(chunk->bitmapmem.va);
+}
diff --git a/sys/dev/irdma/fbsd_kcompat.h b/sys/dev/irdma/fbsd_kcompat.h
new file mode 100644
index 000000000000..179229d6be03
--- /dev/null
+++ b/sys/dev/irdma/fbsd_kcompat.h
@@ -0,0 +1,251 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2021 - 2022 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef FBSD_KCOMPAT_H
+#define FBSD_KCOMPAT_H
+#include "ice_rdma.h"
+
+#define TASKLET_DATA_TYPE unsigned long
+#define TASKLET_FUNC_TYPE void (*)(TASKLET_DATA_TYPE)
+
+#define tasklet_setup(tasklet, callback) \
+ tasklet_init((tasklet), (TASKLET_FUNC_TYPE)(callback), \
+ (TASKLET_DATA_TYPE)(tasklet))
+
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
+
+#define IRDMA_SET_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
+ (sizeof(struct drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO( \
+ !__same_type(((struct drv_struct *)NULL)->member, \
+ struct ib_struct)))
+#define set_ibdev_dma_device(ibdev, dev) \
+ ibdev.dma_device = (dev)
+#define set_max_sge(props, rf) \
+ ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags)
+#define kc_set_props_ip_gid_caps(props) \
+ ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS)
+#define rdma_query_gid(ibdev, port, index, gid) \
+ ib_get_cached_gid(ibdev, port, index, gid, NULL)
+#define kmap(pg) page_address(pg)
+#define kmap_local_page(pg) page_address(pg)
+#define kunmap(pg)
+#define kunmap_local(pg)
+#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \
+ ((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr, NULL))
+
+#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION
+#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp, udata)
+
+#ifndef IB_QP_ATTR_STANDARD_BITS
+#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0)
+#endif
+
+#define IRDMA_QOS_MODE_VLAN 0x0
+#define IRDMA_QOS_MODE_DSCP 0x1
+
+void kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev);
+void kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev);
+
+struct irdma_tunable_info {
+ struct sysctl_ctx_list irdma_sysctl_ctx;
+ struct sysctl_oid *irdma_sysctl_tree;
+ u8 roce_ena;
+};
+
+static inline int irdma_iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ *pkey = 0;
+ return 0;
+}
+
+static inline int cq_validate_flags(u32 flags, u8 hw_rev)
+{
+ /* GEN1 does not support CQ create flags */
+ if (hw_rev == IRDMA_GEN_1)
+ return flags ? -EOPNOTSUPP : 0;
+
+ return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
+}
+static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
+ u32 *idx)
+{
+ *idx += 1;
+ if (!(*pinfo) || *idx != (*pinfo)->cnt)
+ return ++pbl;
+ *idx = 0;
+ (*pinfo)++;
+
+ return (*pinfo)->addr;
+}
+int irdma_create_cq(struct ib_cq *ibcq,
+ const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
+struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int irdma_create_ah(struct ib_ah *ib_ah,
+ struct ib_ah_attr *attr, u32 flags,
+ struct ib_udata *udata);
+int irdma_create_ah_stub(struct ib_ah *ib_ah,
+ struct ib_ah_attr *attr, u32 flags,
+ struct ib_udata *udata);
+void irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr);
+
+void irdma_destroy_ah(struct ib_ah *ibah, u32 flags);
+void irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags);
+int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
+void irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
+ u8 *active_width);
+enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
+ u8 port_num);
+int irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable);
+int irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable);
+int irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid);
+int irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid);
+int irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey);
+int irdma_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+struct rdma_hw_stats *irdma_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
+int irdma_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats, u8 port_num,
+ int index);
+
+int irdma_register_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+void irdma_unregister_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+void ib_unregister_device(struct ib_device *ibdev);
+void irdma_disassociate_ucontext(struct ib_ucontext *context);
+int kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp,
+ struct ib_qp_attr *attr,
+ u16 *vlan_id);
+struct irdma_device *kc_irdma_get_device(struct ifnet *netdev);
+void kc_irdma_put_device(struct irdma_device *iwdev);
+
+void kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node);
+
+void irdma_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len);
+
+int irdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
+ struct ib_port_modify *props);
+int irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin,
+ u8 *dst_mac);
+int irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
+ struct irdma_cm_info *cm_info);
+int irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node, u32 dst_ip,
+ int arpindex);
+int irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node, u32 *dest,
+ int arpindex);
+void irdma_dcqcn_tunables_init(struct irdma_pci_f *rf);
+u32 irdma_create_stag(struct irdma_device *iwdev);
+void irdma_free_stag(struct irdma_device *iwdev, u32 stag);
+
+struct irdma_mr;
+struct irdma_cq;
+struct irdma_cq_buf;
+struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata);
+int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr);
+struct ib_mw *irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata);
+int irdma_hw_alloc_stag(struct irdma_device *iwdev, struct irdma_mr *iwmr);
+void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq);
+int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
+ struct irdma_device *iwdev);
+void irdma_setup_virt_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *init_info);
+int irdma_setup_kmode_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr);
+void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
+ struct irdma_qp_host_ctx_info *ctx_info);
+void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
+ struct irdma_qp_host_ctx_info *ctx_info);
+int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp);
+void irdma_dealloc_push_page(struct irdma_pci_f *rf,
+ struct irdma_sc_qp *qp);
+int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev,
+ struct irdma_cq_buf *lcqe_buf);
+void irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void irdma_dealloc_ucontext(struct ib_ucontext *context);
+int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+void irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int irdma_add_gid(struct ib_device *, u8, unsigned int, const union ib_gid *,
+ const struct ib_gid_attr *, void **);
+int irdma_del_gid(struct ib_device *, u8, unsigned int, void **);
+struct ib_device *ib_device_get_by_netdev(struct ifnet *ndev, int driver_id);
+void ib_device_put(struct ib_device *device);
+void ib_unregister_device_put(struct ib_device *device);
+enum ib_mtu ib_mtu_int_to_enum(int mtu);
+struct irdma_pbl *irdma_get_pbl(unsigned long va, struct list_head *pbl_list);
+void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq);
+void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp);
+
+struct irdma_ucontext;
+void irdma_del_memlist(struct irdma_mr *iwmr, struct irdma_ucontext *ucontext);
+void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
+ enum irdma_pble_level level);
+void irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf);
+void irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf);
+
+/* Introduced in this series https://lore.kernel.org/linux-rdma/0-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com/
+ * An irdma version helper doing same for older functions with difference that iova is passed in
+ * as opposed to derived from umem->iova.
+ */
+static inline size_t irdma_ib_umem_num_dma_blocks(struct ib_umem *umem, unsigned long pgsz, u64 iova)
+{
+ /* some older OFED distros do not have ALIGN_DOWN */
+#ifndef ALIGN_DOWN
+#define ALIGN_DOWN(x, a) ALIGN((x) - ((a) - 1), (a))
+#endif
+
+ return (size_t)((ALIGN(iova + umem->length, pgsz) -
+ ALIGN_DOWN(iova, pgsz))) / pgsz;
+}
+
+#endif /* FBSD_KCOMPAT_H */
diff --git a/sys/dev/irdma/ice_devids.h b/sys/dev/irdma/ice_devids.h
new file mode 100644
index 000000000000..57f26bc33260
--- /dev/null
+++ b/sys/dev/irdma/ice_devids.h
@@ -0,0 +1,92 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2019 - 2020 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef ICE_DEVIDS_H
+#define ICE_DEVIDS_H
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+
+/* Device IDs */
+/* Intel(R) Ethernet Connection E823-L for backplane */
+#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
+/* Intel(R) Ethernet Connection E823-L for SFP */
+#define ICE_DEV_ID_E823L_SFP 0x124D
+/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
+/* Intel(R) Ethernet Connection E823-L 1GbE */
+#define ICE_DEV_ID_E823L_1GBE 0x124F
+/* Intel(R) Ethernet Connection E823-L for QSFP */
+#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E810-C for backplane */
+#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
+/* Intel(R) Ethernet Controller E810-C for QSFP */
+#define ICE_DEV_ID_E810C_QSFP 0x1592
+/* Intel(R) Ethernet Controller E810-C for SFP */
+#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
+/* Intel(R) Ethernet Controller E810-XXV for SFP */
+#define ICE_DEV_ID_E810_XXV_SFP 0x159B
+/* Intel(R) Ethernet Connection E823-C for backplane */
+#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
+/* Intel(R) Ethernet Connection E823-C for QSFP */
+#define ICE_DEV_ID_E823C_QSFP 0x188B
+/* Intel(R) Ethernet Connection E823-C for SFP */
+#define ICE_DEV_ID_E823C_SFP 0x188C
+/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
+/* Intel(R) Ethernet Connection E823-C 1GbE */
+#define ICE_DEV_ID_E823C_SGMII 0x188E
+/* Intel(R) Ethernet Connection C822N for backplane */
+#define ICE_DEV_ID_C822N_BACKPLANE 0x1890
+/* Intel(R) Ethernet Connection C822N for QSFP */
+#define ICE_DEV_ID_C822N_QSFP 0x1891
+/* Intel(R) Ethernet Connection C822N for SFP */
+#define ICE_DEV_ID_C822N_SFP 0x1892
+/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
+/* Intel(R) Ethernet Connection E822-C 1GbE */
+#define ICE_DEV_ID_E822C_SGMII 0x1894
+/* Intel(R) Ethernet Connection E822-L for backplane */
+#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for SFP */
+#define ICE_DEV_ID_E822L_SFP 0x1898
+/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
+/* Intel(R) Ethernet Connection E822-L 1GbE */
+#define ICE_DEV_ID_E822L_SGMII 0x189A
+#endif /* ICE_DEVIDS_H */
diff --git a/sys/dev/irdma/icrdma.c b/sys/dev/irdma/icrdma.c
new file mode 100644
index 000000000000..1bc78dce90aa
--- /dev/null
+++ b/sys/dev/irdma/icrdma.c
@@ -0,0 +1,704 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2021 - 2022 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+#include <machine/bus.h>
+#include <linux/device.h>
+#include <sys/rman.h>
+
+#include "ice_rdma.h"
+#include "irdma_main.h"
+#include "icrdma_hw.h"
+
+#include "irdma_if.h"
+#include "irdma_di_if.h"
+
+/**
+ * Driver version
+ */
+char irdma_driver_version[] = "0.0.51-k";
+
+#define pf_if_d(peer) peer->ifp->if_dunit
+
+/**
+ * irdma_init_tunable - prepare tunables
+ * @rf: RDMA PCI function
+ * @pf_id: id of the pf
+ */
+static void
+irdma_init_tunable(struct irdma_pci_f *rf, uint8_t pf_id)
+{
+ struct sysctl_oid_list *irdma_sysctl_oid_list;
+ char pf_name[16];
+
+ snprintf(pf_name, 15, "irdma%d", pf_id);
+ sysctl_ctx_init(&rf->tun_info.irdma_sysctl_ctx);
+
+ rf->tun_info.irdma_sysctl_tree = SYSCTL_ADD_NODE(&rf->tun_info.irdma_sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_dev),
+ OID_AUTO, pf_name, CTLFLAG_RD,
+ NULL, "");
+
+ irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
+
+ /*
+ * debug mask setting
+ */
+ SYSCTL_ADD_S32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "debug", CTLFLAG_RWTUN, &rf->sc_dev.debug_mask,
+ 0, "irdma debug");
+
+ /*
+ * RoCEv2/iWARP setting RoCEv2 the default mode
+ */
+ rf->tun_info.roce_ena = 1;
+ SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, OID_AUTO,
+ "roce_enable", CTLFLAG_RDTUN, &rf->tun_info.roce_ena, 0,
+ "RoCEv2 mode enable");
+
+ rf->protocol_used = IRDMA_IWARP_PROTOCOL_ONLY;
+ if (rf->tun_info.roce_ena == 1)
+ rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
+ else if (rf->tun_info.roce_ena != 0)
+ printf("%s:%d wrong roce_enable value (%d), using iWARP\n",
+ __func__, __LINE__, rf->tun_info.roce_ena);
+ printf("%s:%d protocol: %s, roce_enable value: %d\n", __func__, __LINE__,
+ (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? "iWARP" : "RoCEv2",
+ rf->tun_info.roce_ena);
+
+ irdma_dcqcn_tunables_init(rf);
+}
+
+/**
+ * irdma_find_handler - obtain hdl object to identify pf
+ * @p_dev: the peer interface structure
+ */
+static struct irdma_handler *
+irdma_find_handler(struct ice_rdma_peer *p_dev)
+{
+ struct irdma_handler *hdl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irdma_handler_lock, flags);
+ list_for_each_entry(hdl, &irdma_handlers, list) {
+ if (!hdl)
+ continue;
+ if (!hdl->iwdev->rf->peer_info)
+ continue;
+ if (hdl->iwdev->rf->peer_info->dev == p_dev->dev) {
+ spin_unlock_irqrestore(&irdma_handler_lock, flags);
+ return hdl;
+ }
+ }
+ spin_unlock_irqrestore(&irdma_handler_lock, flags);
+
+ return NULL;
+}
+
+/**
+ * peer_to_iwdev - return iwdev based on peer
+ * @peer: the peer interface structure
+ */
+static struct irdma_device *
+peer_to_iwdev(struct ice_rdma_peer *peer)
+{
+ struct irdma_handler *hdl;
+
+ hdl = irdma_find_handler(peer);
+ if (!hdl) {
+ printf("%s:%d rdma handler not found\n", __func__, __LINE__);
+ return NULL;
+ }
+
+ return hdl->iwdev;
+}
+
+/**
+ * irdma_get_qos_info - save qos info from parameters to internal struct
+ * @l2params: destination, qos, tc, mtu info structure
+ * @qos_info: source, DCB settings structure
+ */
+static void
+irdma_get_qos_info(struct irdma_l2params *l2params, struct ice_qos_params *qos_info)
+{
+ int i;
+
+ l2params->num_tc = qos_info->num_tc;
+ l2params->num_apps = qos_info->num_apps;
+ l2params->vsi_prio_type = qos_info->vsi_priority_type;
+ l2params->vsi_rel_bw = qos_info->vsi_relative_bw;
+ for (i = 0; i < l2params->num_tc; i++) {
+ l2params->tc_info[i].egress_virt_up =
+ qos_info->tc_info[i].egress_virt_up;
+ l2params->tc_info[i].ingress_virt_up =
+ qos_info->tc_info[i].ingress_virt_up;
+ l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
+ l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
+ l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
+ }
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ l2params->up2tc[i] = qos_info->up2tc[i];
+
+ if (qos_info->pfc_mode == IRDMA_QOS_MODE_DSCP) {
+ l2params->dscp_mode = true;
+ memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
+ }
+ printf("%s:%d: l2params settings:\n num_tc %d,\n num_apps %d,\n",
+ __func__, __LINE__, l2params->num_tc, l2params->num_apps);
+ printf(" vsi_prio_type %d,\n vsi_rel_bw %d,\n egress_virt_up:",
+ l2params->vsi_prio_type, l2params->vsi_rel_bw);
+ for (i = 0; i < l2params->num_tc; i++)
+ printf(" %d", l2params->tc_info[i].egress_virt_up);
+ printf("\n ingress_virt_up:");
+ for (i = 0; i < l2params->num_tc; i++)
+ printf(" %d", l2params->tc_info[i].ingress_virt_up);
+ printf("\n prio_type:");
+ for (i = 0; i < l2params->num_tc; i++)
+ printf(" %d", l2params->tc_info[i].prio_type);
+ printf("\n rel_bw:");
+ for (i = 0; i < l2params->num_tc; i++)
+ printf(" %d", l2params->tc_info[i].rel_bw);
+ printf("\n tc_ctx:");
+ for (i = 0; i < l2params->num_tc; i++)
+ printf(" %lu", l2params->tc_info[i].tc_ctx);
+ printf("\n up2tc:");
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ printf(" %d", l2params->up2tc[i]);
+ printf(" dscp_mode: %d,\n", l2params->dscp_mode);
+ for (i = 0; i < IRDMA_DSCP_NUM_VAL; i++)
+ printf(" %d", l2params->dscp_map[i]);
+ printf("\n");
+
+ dump_struct(l2params, sizeof(*l2params), "l2params");
+}
+
+/**
+ * irdma_log_invalid_mtu - check mtu setting validity
+ * @mtu: mtu value
+ * @dev: hardware control device structure
+ */
+static void
+irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
+{
+ if (mtu < IRDMA_MIN_MTU_IPV4)
+ irdma_dev_warn(dev, "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
+ else if (mtu < IRDMA_MIN_MTU_IPV6)
+ irdma_dev_warn(dev, "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
+}
+
+/**
+ * irdma_event_handler - handling events from lan driver
+ * @peer: the peer interface structure
+ * @event: event info structure
+ */
+static void
+irdma_event_handler(struct ice_rdma_peer *peer, struct ice_rdma_event *event)
+{
+ struct irdma_device *iwdev;
+ struct irdma_l2params l2params = {};
+
+ printf("%s:%d event_handler %s (%x) on pf %d (%d)\n", __func__, __LINE__,
+ (event->type == 1) ? "LINK CHANGE" :
+ (event->type == 2) ? "MTU CHANGE" :
+ (event->type == 3) ? "TC CHANGE" : "UNKNOWN",
+ event->type, peer->pf_id, pf_if_d(peer));
+ iwdev = peer_to_iwdev(peer);
+ if (!iwdev) {
+ printf("%s:%d rdma device not found\n", __func__, __LINE__);
+ return;
+ }
+
+ switch (event->type) {
+ case ICE_RDMA_EVENT_LINK_CHANGE:
+ printf("%s:%d PF: %x (%x), state: %d, speed: %lu\n", __func__, __LINE__,
+ peer->pf_id, pf_if_d(peer), event->linkstate, event->baudrate);
+ break;
+ case ICE_RDMA_EVENT_MTU_CHANGE:
+ if (iwdev->vsi.mtu != event->mtu) {
+ l2params.mtu = event->mtu;
+ l2params.mtu_changed = true;
+ irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ }
+ break;
+ case ICE_RDMA_EVENT_TC_CHANGE:
+ /*
+ * 1. check if it is pre or post 2. check if it is currently being done
+ */
+ if (event->prep == iwdev->vsi.tc_change_pending) {
+ printf("%s:%d can't process %s TC change if TC change is %spending\n",
+ __func__, __LINE__,
+ event->prep ? "pre" : "post",
+ event->prep ? " " : "not ");
+ goto done;
+ }
+ if (event->prep) {
+ iwdev->vsi.tc_change_pending = true;
+ irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+ IRDMA_EVENT_TIMEOUT_MS * 10);
+ irdma_ws_reset(&iwdev->vsi);
+ printf("%s:%d TC change preparation done\n", __func__, __LINE__);
+ } else {
+ l2params.tc_changed = true;
+ irdma_get_qos_info(&l2params, &event->port_qos);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
+
+ irdma_check_fc_for_tc_update(&iwdev->vsi, &l2params);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ printf("%s:%d TC change done\n", __func__, __LINE__);
+ }
+ break;
+ case ICE_RDMA_EVENT_CRIT_ERR:
+ printf("%s:%d event type received: %d\n", __func__, __LINE__, event->type);
+ break;
+ default:
+ printf("%s:%d event type unsupported: %d\n", __func__, __LINE__, event->type);
+ }
+done:
+ return;
+}
+
+/**
+ * irdma_link_change - Callback for link state change
+ * @peer: the peer interface structure
+ * @linkstate: state of the link
+ * @baudrate: speed of the link
+ */
+static void
+irdma_link_change(struct ice_rdma_peer *peer, int linkstate, uint64_t baudrate)
+{
+ printf("%s:%d PF: %x (%x), state: %d, speed: %lu\n", __func__, __LINE__,
+ peer->pf_id, pf_if_d(peer), linkstate, baudrate);
+}
+
+/**
+ * irdma_finalize_task - Finish open or close phase in a separate thread
+ * @context: instance holding peer and iwdev information
+ *
+ * Triggered from irdma_open or irdma_close to perform rt_init_hw or
+ * rt_deinit_hw respectively. Does registration and unregistration of
+ * the device.
+ */
+static void
+irdma_finalize_task(void *context, int pending)
+{
+ struct irdma_task_arg *task_arg = (struct irdma_task_arg *)context;
+ struct irdma_device *iwdev = task_arg->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct ice_rdma_peer *peer = task_arg->peer;
+ struct irdma_l2params l2params = {{{0}}};
+ struct ice_rdma_request req = {0};
+ int status = 0;
+
+ if (iwdev->iw_status) {
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Starting deferred closing %d (%d)\n",
+ rf->peer_info->pf_id, pf_if_d(peer));
+ irdma_dereg_ipaddr_event_cb(rf);
+ irdma_ib_unregister_device(iwdev);
+ req.type = ICE_RDMA_EVENT_VSI_FILTER_UPDATE;
+ req.enable_filter = false;
+ IRDMA_DI_REQ_HANDLER(peer, &req);
+ irdma_rt_deinit_hw(iwdev);
+ } else {
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Starting deferred opening %d (%d)\n",
+ rf->peer_info->pf_id, pf_if_d(peer));
+ l2params.mtu = peer->mtu;
+ irdma_get_qos_info(&l2params, &peer->initial_qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
+
+ status = irdma_rt_init_hw(iwdev, &l2params);
+ if (status) {
+ irdma_pr_err("RT init failed %d\n", status);
+ ib_dealloc_device(&iwdev->ibdev);
+ return;
+ }
+ status = irdma_ib_register_device(iwdev);
+ if (status) {
+ irdma_pr_err("Registration failed %d\n", status);
+ irdma_rt_deinit_hw(iwdev);
+ ib_dealloc_device(&iwdev->ibdev);
+ }
+ req.type = ICE_RDMA_EVENT_VSI_FILTER_UPDATE;
+ req.enable_filter = true;
+ IRDMA_DI_REQ_HANDLER(peer, &req);
+ irdma_reg_ipaddr_event_cb(rf);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Deferred opening finished %d (%d)\n",
+ rf->peer_info->pf_id, pf_if_d(peer));
+ }
+}
+
+/**
+ * irdma_open - Callback for operation open for RDMA device
+ * @peer: the new peer interface structure
+ *
+ * Callback implementing the RDMA_OPEN function. Called by the ice driver to
+ * notify the RDMA client driver that a new device has been initialized.
+ */
+static int
+irdma_open(struct ice_rdma_peer *peer)
+{
+ struct ice_rdma_event event = {0};
+
+ event.type = ICE_RDMA_EVENT_MTU_CHANGE;
+ event.mtu = peer->mtu;
+
+ irdma_event_handler(peer, &event);
+
+ return 0;
+}
+
+/**
+ * irdma_close - Callback to notify that a peer device is down
+ * @peer: the RDMA peer device being stopped
+ *
+ * Callback implementing the RDMA_CLOSE function. Called by the ice driver to
+ * notify the RDMA client driver that a peer device is being stopped.
+ */
+static int
+irdma_close(struct ice_rdma_peer *peer)
+{
+ /*
+ * This is called when ifconfig down. Keeping it for compatibility with ice. This event might be usefull for
+ * future.
+ */
+ return 0;
+}
+
+/**
+ * irdma_alloc_pcidev - allocate memory for pcidev and populate data
+ * @peer: the new peer interface structure
+ * @rf: RDMA PCI function
+ */
+static int
+irdma_alloc_pcidev(struct ice_rdma_peer *peer, struct irdma_pci_f *rf)
+{
+ rf->pcidev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
+ if (!rf->pcidev) {
+ return -ENOMEM;
+ }
+ if (linux_pci_attach_device(rf->dev_ctx.dev, NULL, NULL, rf->pcidev))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * irdma_dealloc_pcidev - deallocate memory for pcidev
+ * @rf: RDMA PCI function
+ */
+static void
+irdma_dealloc_pcidev(struct irdma_pci_f *rf)
+{
+ linux_pci_detach_device(rf->pcidev);
+ kfree(rf->pcidev);
+}
+
+/**
+ * irdma_fill_device_info - assign initial values to rf variables
+ * @iwdev: irdma device
+ * @peer: the peer interface structure
+ */
+static void
+irdma_fill_device_info(struct irdma_device *iwdev,
+ struct ice_rdma_peer *peer)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ rf->peer_info = peer;
+ rf->gen_ops.register_qset = irdma_register_qset;
+ rf->gen_ops.unregister_qset = irdma_unregister_qset;
+
+ rf->rdma_ver = IRDMA_GEN_2;
+ rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_2;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->check_fc = irdma_check_fc_for_qp;
+ irdma_set_rf_user_cfg_params(rf);
+
+ rf->default_vsi.vsi_idx = peer->pf_vsi_num;
+ rf->dev_ctx.dev = peer->dev;
+ rf->dev_ctx.mem_bus_space_tag = rman_get_bustag(peer->pci_mem);
+ rf->dev_ctx.mem_bus_space_handle = rman_get_bushandle(peer->pci_mem);
+ rf->dev_ctx.mem_bus_space_size = rman_get_size(peer->pci_mem);
+
+ rf->hw.dev_context = &rf->dev_ctx;
+ rf->hw.hw_addr = (u8 *)rman_get_virtual(peer->pci_mem);
+ rf->msix_count = peer->msix.count;
+ rf->msix_info.entry = peer->msix.base;
+ rf->msix_info.vector = peer->msix.count;
+ printf("%s:%d msix_info: %d %d %d\n", __func__, __LINE__,
+ rf->msix_count, rf->msix_info.entry, rf->msix_info.vector);
+
+ rf->iwdev = iwdev;
+ iwdev->netdev = peer->ifp;
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->vsi_num = peer->pf_vsi_num;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
+ iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
+
+ if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY) {
+ iwdev->roce_mode = true;
+ }
+}
+
+/**
+ * irdma_probe - Callback to probe a new RDMA peer device
+ * @peer: the new peer interface structure
+ *
+ * Callback implementing the RDMA_PROBE function. Called by the ice driver to
+ * notify the RDMA client driver that a new device has been created
+ */
+static int
+irdma_probe(struct ice_rdma_peer *peer)
+{
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ struct irdma_handler *hdl;
+ int err = 0;
+
+ irdma_pr_info("probe: irdma-%s peer=%p, peer->pf_id=%d, peer->ifp=%p, peer->ifp->if_dunit=%d, peer->pci_mem->r_bustag=%p\n",
+ irdma_driver_version, peer, peer->pf_id, peer->ifp,
+ pf_if_d(peer), (void *)(uintptr_t)peer->pci_mem->r_bustag);
+
+ hdl = irdma_find_handler(peer);
+ if (hdl)
+ return -EBUSY;
+
+ hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
+ if (!hdl)
+ return -ENOMEM;
+
+ iwdev = (struct irdma_device *)ib_alloc_device(sizeof(*iwdev));
+ if (!iwdev) {
+ kfree(hdl);
+ return -ENOMEM;
+ }
+
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ kfree(hdl);
+ return -ENOMEM;
+ }
+ hdl->iwdev = iwdev;
+ iwdev->hdl = hdl;
+
+ irdma_init_tunable(iwdev->rf, pf_if_d(peer));
+ irdma_fill_device_info(iwdev, peer);
+ rf = iwdev->rf;
+
+ if (irdma_alloc_pcidev(peer, rf))
+ goto err_pcidev;
+
+ irdma_add_handler(hdl);
+
+ if (irdma_ctrl_init_hw(rf)) {
+ err = -EIO;
+ goto err_ctrl_init;
+ }
+
+ rf->dev_ctx.task_arg.peer = peer;
+ rf->dev_ctx.task_arg.iwdev = iwdev;
+ rf->dev_ctx.task_arg.peer = peer;
+
+ TASK_INIT(&hdl->deferred_task, 0, irdma_finalize_task, &rf->dev_ctx.task_arg);
+ hdl->deferred_tq = taskqueue_create_fast("irdma_defer",
+ M_NOWAIT, taskqueue_thread_enqueue,
+ &hdl->deferred_tq);
+ taskqueue_start_threads(&hdl->deferred_tq, 1, PI_NET, "irdma_defer_t");
+
+ taskqueue_enqueue(hdl->deferred_tq, &hdl->deferred_task);
+
+ return 0;
+
+err_ctrl_init:
+ irdma_del_handler(hdl);
+ irdma_dealloc_pcidev(rf);
+err_pcidev:
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+ kfree(hdl);
+
+ return err;
+}
+
+/**
+ * irdma_remove - Callback to remove an RDMA peer device
+ * @peer: the new peer interface structure
+ *
+ * Callback implementing the RDMA_REMOVE function. Called by the ice driver to
+ * notify the RDMA client driver that the device wille be delated
+ */
+static int
+irdma_remove(struct ice_rdma_peer *peer)
+{
+ struct irdma_handler *hdl;
+ struct irdma_device *iwdev;
+
+ irdma_debug((struct irdma_sc_dev *)NULL, IRDMA_DEBUG_INIT, "removing %s\n", __FUNCTION__);
+
+ hdl = irdma_find_handler(peer);
+ if (!hdl)
+ return 0;
+
+ iwdev = hdl->iwdev;
+
+ if (iwdev->vsi.tc_change_pending) {
+ iwdev->vsi.tc_change_pending = false;
+ irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_RESUME);
+ }
+
+ taskqueue_enqueue(hdl->deferred_tq, &hdl->deferred_task);
+
+ taskqueue_drain(hdl->deferred_tq, &hdl->deferred_task);
+ taskqueue_free(hdl->deferred_tq);
+ hdl->iwdev->rf->dev_ctx.task_arg.iwdev = NULL;
+ hdl->iwdev->rf->dev_ctx.task_arg.peer = NULL;
+
+ sysctl_ctx_free(&iwdev->rf->tun_info.irdma_sysctl_ctx);
+ hdl->iwdev->rf->tun_info.irdma_sysctl_tree = NULL;
+
+ irdma_ctrl_deinit_hw(iwdev->rf);
+
+ irdma_dealloc_pcidev(iwdev->rf);
+
+ irdma_del_handler(iwdev->hdl);
+ kfree(iwdev->hdl);
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+ irdma_pr_info("IRDMA hardware deinitialization complete\n");
+
+ return 0;
+}
+
+/**
+ * irdma_prep_for_unregister - ensure the driver is ready to unregister
+ */
+static void
+irdma_prep_for_unregister()
+{
+ struct irdma_handler *hdl;
+ unsigned long flags;
+ bool hdl_valid;
+
+ do {
+ hdl_valid = false;
+ spin_lock_irqsave(&irdma_handler_lock, flags);
+ list_for_each_entry(hdl, &irdma_handlers, list) {
+ if (!hdl)
+ continue;
+ if (!hdl->iwdev->rf->peer_info)
+ continue;
+ hdl_valid = true;
+ break;
+ }
+ spin_unlock_irqrestore(&irdma_handler_lock, flags);
+ if (!hdl || !hdl_valid)
+ break;
+ IRDMA_CLOSE(hdl->iwdev->rf->peer_info);
+ IRDMA_REMOVE(hdl->iwdev->rf->peer_info);
+ } while (1);
+}
+
+static kobj_method_t irdma_methods[] = {
+ KOBJMETHOD(irdma_probe, irdma_probe),
+ KOBJMETHOD(irdma_open, irdma_open),
+ KOBJMETHOD(irdma_close, irdma_close),
+ KOBJMETHOD(irdma_remove, irdma_remove),
+ KOBJMETHOD(irdma_link_change, irdma_link_change),
+ KOBJMETHOD(irdma_event_handler, irdma_event_handler),
+ KOBJMETHOD_END
+};
+
+/* declare irdma_class which extends the ice_rdma_di class */
+DEFINE_CLASS_1(irdma, irdma_class, irdma_methods, sizeof(struct ice_rdma_peer), ice_rdma_di_class);
+
+static struct ice_rdma_info irdma_info = {
+ .major_version = ICE_RDMA_MAJOR_VERSION,
+ .minor_version = ICE_RDMA_MINOR_VERSION,
+ .patch_version = ICE_RDMA_PATCH_VERSION,
+ .rdma_class = &irdma_class,
+};
+
+/**
+ * irdma_module_event_handler - Module event handler callback
+ * @mod: unused mod argument
+ * @what: the module event to handle
+ * @arg: unused module event argument
+ *
+ * Callback used by the FreeBSD module stack to notify the driver of module
+ * events. Used to implement custom handling for certain module events such as
+ * load and unload.
+ */
+static int
+irdma_module_event_handler(module_t __unused mod, int what, void __unused * arg)
+{
+ switch (what) {
+ case MOD_LOAD:
+ printf("Loading irdma module\n");
+ return ice_rdma_register(&irdma_info);
+ case MOD_UNLOAD:
+ printf("Unloading irdma module\n");
+ irdma_prep_for_unregister();
+ ice_rdma_unregister();
+ return (0);
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+}
+
+static moduledata_t irdma_moduledata = {
+ "irdma",
+ irdma_module_event_handler,
+ NULL
+};
+
+DECLARE_MODULE(irdma, irdma_moduledata, SI_SUB_LAST, SI_ORDER_ANY);
+MODULE_VERSION(irdma, 1);
+MODULE_DEPEND(irdma, ice, 1, 1, 1);
+MODULE_DEPEND(irdma, ibcore, 1, 1, 1);
diff --git a/sys/dev/irdma/icrdma_hw.c b/sys/dev/irdma/icrdma_hw.c
new file mode 100644
index 000000000000..4a1d2a17269e
--- /dev/null
+++ b/sys/dev/irdma/icrdma_hw.c
@@ -0,0 +1,418 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2017 - 2021 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#include "osdep.h"
+#include "irdma_type.h"
+#include "icrdma_hw.h"
+
+void disable_prefetch(struct irdma_hw *hw);
+
+void disable_tx_spad(struct irdma_hw *hw);
+
+void rdpu_ackreqpmthresh(struct irdma_hw *hw);
+
+static u32 icrdma_regs[IRDMA_MAX_REGS] = {
+ PFPE_CQPTAIL,
+ PFPE_CQPDB,
+ PFPE_CCQPSTATUS,
+ PFPE_CCQPHIGH,
+ PFPE_CCQPLOW,
+ PFPE_CQARM,
+ PFPE_CQACK,
+ PFPE_AEQALLOC,
+ PFPE_CQPERRCODES,
+ PFPE_WQEALLOC,
+ GLINT_DYN_CTL(0),
+ ICRDMA_DB_ADDR_OFFSET,
+
+ GLPCI_LBARCTRL,
+ GLPE_CPUSTATUS0,
+ GLPE_CPUSTATUS1,
+ GLPE_CPUSTATUS2,
+ PFINT_AEQCTL,
+ GLINT_CEQCTL(0),
+ VSIQF_PE_CTL1(0),
+ PFHMC_PDINV,
+ GLHMC_VFPDINV(0),
+ GLPE_CRITERR,
+ GLINT_RATE(0),
+};
+
+static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
+ ICRDMA_CCQPSTATUS_CCQP_DONE_M,
+ ICRDMA_CCQPSTATUS_CCQP_ERR_M,
+ ICRDMA_CQPSQ_STAG_PDID_M,
+ ICRDMA_CQPSQ_CQ_CEQID_M,
+ ICRDMA_CQPSQ_CQ_CQID_M,
+ ICRDMA_COMMIT_FPM_CQCNT_M,
+};
+
+static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
+ ICRDMA_CCQPSTATUS_CCQP_DONE_S,
+ ICRDMA_CCQPSTATUS_CCQP_ERR_S,
+ ICRDMA_CQPSQ_STAG_PDID_S,
+ ICRDMA_CQPSQ_CQ_CEQID_S,
+ ICRDMA_CQPSQ_CQ_CQID_S,
+ ICRDMA_COMMIT_FPM_CQCNT_S,
+};
+
+/**
+ * icrdma_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void
+icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+ u32 interval = 0;
+
+ if (dev->ceq_itr && dev->aeq->msix_idx != idx)
+ interval = dev->ceq_itr >> 1; /* 2 usec units */
+ val = LS_64(0, IRDMA_GLINT_DYN_CTL_ITR_INDX) |
+ LS_64(interval, IRDMA_GLINT_DYN_CTL_INTERVAL) |
+ IRDMA_GLINT_DYN_CTL_INTENA_M | IRDMA_GLINT_DYN_CTL_CLEARPBA_M;
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
+}
+
+/**
+ * icrdma_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void
+icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
+}
+
+/**
+ * icrdma_cfg_ceq- Configure CEQ interrupt
+ * @dev: pointer to the device structure
+ * @ceq_id: Completion Event Queue ID
+ * @idx: vector index
+ * @enable: True to enable, False disables
+ */
+static void
+icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA_M : 0;
+ reg_val |= (idx << IRDMA_GLINT_CEQCTL_MSIX_INDX_S) |
+ IRDMA_GLINT_CEQCTL_ITR_INDX_M;
+
+ writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
+}
+
+static const struct irdma_irq_ops icrdma_irq_ops = {
+ .irdma_cfg_aeq = irdma_cfg_aeq,
+ .irdma_cfg_ceq = icrdma_cfg_ceq,
+ .irdma_dis_irq = icrdma_disable_irq,
+ .irdma_en_irq = icrdma_ena_irq,
+};
+
+static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = {
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = {0, 32, IRDMA_MAX_STATS_24},
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = {8, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = {16, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = {24, 32, IRDMA_MAX_STATS_32},
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = {24, 0, IRDMA_MAX_STATS_32},
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = {32, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = {40, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = {48, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = {56, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = {64, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = {72, 32, IRDMA_MAX_STATS_32},
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = {72, 0, IRDMA_MAX_STATS_32},
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = {80, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = {88, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = {96, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = {104, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = {112, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = {120, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = {128, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = {136, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = {144, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = {152, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = {160, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = {168, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = {176, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = {184, 32, IRDMA_MAX_STATS_24},
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = {184, 0, IRDMA_MAX_STATS_24},
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = {192, 32, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = {200, 32, IRDMA_MAX_STATS_24},
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = {200, 0, IRDMA_MAX_STATS_24},
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = {208, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = {216, 32, IRDMA_MAX_STATS_32},
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = {224, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = {232, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = {240, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = {248, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = {256, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = {264, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = {272, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = {280, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = {288, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = {296, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = {304, 0, IRDMA_MAX_STATS_48},
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = {312, 32, IRDMA_MAX_STATS_16},
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = {312, 0, IRDMA_MAX_STATS_32},
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = {320, 0, IRDMA_MAX_STATS_32},
+};
+
+void
+icrdma_init_hw(struct irdma_sc_dev *dev)
+{
+ int i;
+ u8 IOMEM *hw_addr;
+
+ for (i = 0; i < IRDMA_MAX_REGS; ++i) {
+ hw_addr = dev->hw->hw_addr;
+
+ if (i == IRDMA_DB_ADDR_OFFSET)
+ hw_addr = NULL;
+
+ dev->hw_regs[i] = (u32 IOMEM *) (hw_addr + icrdma_regs[i]);
+ }
+ dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
+ dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
+
+ for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
+ dev->hw_shifts[i] = icrdma_shifts[i];
+
+ for (i = 0; i < IRDMA_MAX_MASKS; ++i)
+ dev->hw_masks[i] = icrdma_masks[i];
+
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+ dev->irq_ops = &icrdma_irq_ops;
+ dev->hw_stats_map = icrdma_hw_stat_map;
+
+ dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
+ dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
+ dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
+
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD;
+ dev->hw_attrs.uk_attrs.max_hw_wq_size = IRDMA_QP_WQE_MAX_SIZE;
+ dev->hw_attrs.uk_attrs.min_sw_wq_size = IRDMA_QP_SW_MIN_WQSIZE;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
+ disable_tx_spad(dev->hw);
+ disable_prefetch(dev->hw);
+ rdpu_ackreqpmthresh(dev->hw);
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RELAX_RQ_ORDER;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
+ IRDMA_FEATURE_CQ_RESIZE;
+}
+
+void
+irdma_init_config_check(struct irdma_config_check *cc, u8 traffic_class, u16 qs_handle)
+{
+ cc->config_ok = false;
+ cc->traffic_class = traffic_class;
+ cc->qs_handle = qs_handle;
+ cc->lfc_set = 0;
+ cc->pfc_set = 0;
+}
+
+static bool
+irdma_is_lfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
+{
+ u32 lfc = 1;
+ u8 fn_id = vsi->dev->hmc_fn_id;
+
+ lfc &= (rd32(vsi->dev->hw,
+ PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
+ lfc &= (rd32(vsi->dev->hw,
+ PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
+ lfc &= rd32(vsi->dev->hw,
+ PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 + 4 * vsi->dev->hmc_fn_id);
+
+ if (lfc)
+ return true;
+ return false;
+}
+
+static bool
+irdma_check_tc_has_pfc(struct irdma_sc_vsi *vsi, u64 reg_offset, u16 traffic_class)
+{
+ u32 value, pfc = 0;
+ u32 i;
+
+ value = rd32(vsi->dev->hw, reg_offset);
+ for (i = 0; i < 4; i++)
+ pfc |= (value >> (8 * i + traffic_class)) & 0x1;
+
+ if (pfc)
+ return true;
+ return false;
+}
+
+static bool
+irdma_is_pfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
+{
+ u32 pause;
+ u8 fn_id = vsi->dev->hmc_fn_id;
+
+ pause = (rd32(vsi->dev->hw,
+ PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >>
+ cc->traffic_class) & BIT(0);
+ pause &= (rd32(vsi->dev->hw,
+ PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >>
+ cc->traffic_class) & BIT(0);
+
+ return irdma_check_tc_has_pfc(vsi, GLDCB_TC2PFC, cc->traffic_class) &&
+ pause;
+}
+
+bool
+irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
+{
+ cc->lfc_set = irdma_is_lfc_set(cc, vsi);
+ cc->pfc_set = irdma_is_pfc_set(cc, vsi);
+
+ cc->config_ok = cc->lfc_set || cc->pfc_set;
+
+ return cc->config_ok;
+}
+
+#define IRDMA_RCV_WND_NO_FC 65536
+#define IRDMA_RCV_WND_FC 65536
+
+#define IRDMA_CWND_NO_FC 0x1
+#define IRDMA_CWND_FC 0x18
+
+#define IRDMA_ACKCREDS_NO_FC 0x02
+#define IRDMA_ACKCREDS_FC 0x06
+
+static void
+irdma_check_flow_ctrl(struct irdma_sc_vsi *vsi, u8 user_prio, u8 traffic_class)
+{
+ struct irdma_config_check *cfg_chk = &vsi->cfg_check[user_prio];
+
+ if (!irdma_is_config_ok(cfg_chk, vsi)) {
+ if (vsi->tc_print_warning[traffic_class]) {
+ irdma_pr_info("INFO: Flow control is disabled for this traffic class (%d) on this vsi.\n", traffic_class);
+ vsi->tc_print_warning[traffic_class] = false;
+ }
+ } else {
+ if (vsi->tc_print_warning[traffic_class]) {
+ irdma_pr_info("INFO: Flow control is enabled for this traffic class (%d) on this vsi.\n", traffic_class);
+ vsi->tc_print_warning[traffic_class] = false;
+ }
+ }
+}
+
+void
+irdma_check_fc_for_tc_update(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2params)
+{
+ u8 i;
+
+ for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++)
+ vsi->tc_print_warning[i] = true;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
+ u8 tc = l2params->up2tc[i];
+
+ cfg_chk->traffic_class = tc;
+ cfg_chk->qs_handle = vsi->qos[i].qs_handle;
+ irdma_check_flow_ctrl(vsi, i, tc);
+ }
+}
+
+void
+irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp)
+{
+ u8 i;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
+
+ irdma_init_config_check(cfg_chk,
+ vsi->qos[i].traffic_class,
+ vsi->qos[i].qs_handle);
+ if (sc_qp->qs_handle == cfg_chk->qs_handle)
+ irdma_check_flow_ctrl(vsi, i, cfg_chk->traffic_class);
+ }
+}
+
+#define GLPE_WQMTXIDXADDR 0x50E000
+#define GLPE_WQMTXIDXDATA 0x50E004
+
+void
+disable_prefetch(struct irdma_hw *hw)
+{
+ u32 wqm_data;
+
+ wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
+ irdma_mb();
+
+ wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
+ wqm_data &= ~(1);
+ wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
+}
+
+void
+disable_tx_spad(struct irdma_hw *hw)
+{
+ u32 wqm_data;
+
+ wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
+ irdma_mb();
+
+ wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
+ wqm_data &= ~(1 << 3);
+ wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
+}
+
+#define GL_RDPU_CNTRL 0x52054
+void
+rdpu_ackreqpmthresh(struct irdma_hw *hw)
+{
+ u32 val;
+
+ val = rd32(hw, GL_RDPU_CNTRL);
+ val &= ~(0x3f << 10);
+ val |= (3 << 10);
+ wr32(hw, GL_RDPU_CNTRL, val);
+}
diff --git a/sys/dev/irdma/icrdma_hw.h b/sys/dev/irdma/icrdma_hw.h
new file mode 100644
index 000000000000..4da0d7fcc9e8
--- /dev/null
+++ b/sys/dev/irdma/icrdma_hw.h
@@ -0,0 +1,137 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2019 - 2020 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef ICRDMA_HW_H
+#define ICRDMA_HW_H
+
+#include "irdma.h"
+
+#define VFPE_CQPTAIL1 0x0000a000
+#define VFPE_CQPDB1 0x0000bc00
+#define VFPE_CCQPSTATUS1 0x0000b800
+#define VFPE_CCQPHIGH1 0x00009800
+#define VFPE_CCQPLOW1 0x0000ac00
+#define VFPE_CQARM1 0x0000b400
+#define VFPE_CQARM1 0x0000b400
+#define VFPE_CQACK1 0x0000b000
+#define VFPE_AEQALLOC1 0x0000a400
+#define VFPE_CQPERRCODES1 0x00009c00
+#define VFPE_WQEALLOC1 0x0000c000
+#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) /* _i=0...63 */
+
+#define PFPE_CQPTAIL 0x00500880
+#define PFPE_CQPDB 0x00500800
+#define PFPE_CCQPSTATUS 0x0050a000
+#define PFPE_CCQPHIGH 0x0050a100
+#define PFPE_CCQPLOW 0x0050a080
+#define PFPE_CQARM 0x00502c00
+#define PFPE_CQACK 0x00502c80
+#define PFPE_AEQALLOC 0x00502d00
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) /* _i=0...2047 */
+#define GLPCI_LBARCTRL 0x0009de74
+#define GLPE_CPUSTATUS0 0x0050ba5c
+#define GLPE_CPUSTATUS1 0x0050ba60
+#define GLPE_CPUSTATUS2 0x0050ba64
+#define PFINT_AEQCTL 0x0016cb00
+#define PFPE_CQPERRCODES 0x0050a200
+#define PFPE_WQEALLOC 0x00504400
+#define GLINT_CEQCTL(_INT) (0x0015c000 + ((_INT) * 4)) /* _i=0...2047 */
+#define VSIQF_PE_CTL1(_VSI) (0x00414000 + ((_VSI) * 4)) /* _i=0...767 */
+#define PFHMC_PDINV 0x00520300
+#define GLHMC_VFPDINV(_i) (0x00528300 + ((_i) * 4)) /* _i=0...31 */
+#define GLPE_CRITERR 0x00534000
+#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+
+#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 0x001e3180
+#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_1 0x001e3184
+#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_2 0x001e3188
+#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_3 0x001e318c
+
+#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 0x001e31a0
+#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_1 0x001e31a4
+#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_2 0x001e31a8
+#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_3 0x001e31aC
+
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 0x001e34c0
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_1 0x001e34c4
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_2 0x001e34c8
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_3 0x001e34cC
+
+#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_0 0x001e35c0
+#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_1 0x001e35c4
+#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_2 0x001e35c8
+#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_3 0x001e35cC
+
+#define GLDCB_TC2PFC 0x001d2694
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001e31c0
+
+#define ICRDMA_DB_ADDR_OFFSET (8 * 1024 * 1024 - 64 * 1024)
+
+#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024)
+
+/* CCQSTATUS */
+#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
+#define ICRDMA_CCQPSTATUS_CCQP_DONE_M (0x1ULL << ICRDMA_CCQPSTATUS_CCQP_DONE_S)
+#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
+#define ICRDMA_CCQPSTATUS_CCQP_ERR_M (0x1ULL << ICRDMA_CCQPSTATUS_CCQP_ERR_S)
+#define ICRDMA_CQPSQ_STAG_PDID_S 46
+#define ICRDMA_CQPSQ_STAG_PDID_M (0x3ffffULL << ICRDMA_CQPSQ_STAG_PDID_S)
+#define ICRDMA_CQPSQ_CQ_CEQID_S 22
+#define ICRDMA_CQPSQ_CQ_CEQID_M (0x3ffULL << ICRDMA_CQPSQ_CQ_CEQID_S)
+#define ICRDMA_CQPSQ_CQ_CQID_S 0
+#define ICRDMA_CQPSQ_CQ_CQID_M (0x7ffffULL << ICRDMA_CQPSQ_CQ_CQID_S)
+#define ICRDMA_COMMIT_FPM_CQCNT_S 0
+#define ICRDMA_COMMIT_FPM_CQCNT_M (0xfffffULL << ICRDMA_COMMIT_FPM_CQCNT_S)
+
+enum icrdma_device_caps_const {
+ ICRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
+ ICRDMA_MAX_SGE_RD = 13,
+
+ ICRDMA_MAX_STATS_COUNT = 128,
+
+ ICRDMA_MAX_IRD_SIZE = 127,
+ ICRDMA_MAX_ORD_SIZE = 255,
+
+};
+
+void icrdma_init_hw(struct irdma_sc_dev *dev);
+void irdma_init_config_check(struct irdma_config_check *cc,
+ u8 traffic_class,
+ u16 qs_handle);
+bool irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi);
+void irdma_check_fc_for_tc_update(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2params);
+void irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp);
+#endif /* ICRDMA_HW_H*/
diff --git a/sys/dev/irdma/irdma-abi.h b/sys/dev/irdma/irdma-abi.h
new file mode 100644
index 000000000000..779c14fa30ac
--- /dev/null
+++ b/sys/dev/irdma/irdma-abi.h
@@ -0,0 +1,143 @@
+/*-
+ * SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB)
+ *
+ *
+ * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef IRDMA_ABI_H
+#define IRDMA_ABI_H
+
+#include <linux/types.h>
+
+/* irdma must support legacy GEN_1 i40iw kernel
+ * and user-space whose last ABI ver is 5
+ */
+#define IRDMA_ABI_VER 5
+
+enum irdma_memreg_type {
+ IRDMA_MEMREG_TYPE_MEM = 0,
+ IRDMA_MEMREG_TYPE_QP = 1,
+ IRDMA_MEMREG_TYPE_CQ = 2,
+};
+
+struct irdma_alloc_ucontext_req {
+ __u32 rsvd32;
+ __u8 userspace_ver;
+ __u8 rsvd8[3];
+};
+
+struct irdma_alloc_ucontext_resp {
+ __u32 max_pds;
+ __u32 max_qps;
+ __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
+ __u8 kernel_ver;
+ __u8 rsvd[3];
+ __aligned_u64 feature_flags;
+ __aligned_u64 db_mmap_key;
+ __u32 max_hw_wq_frags;
+ __u32 max_hw_read_sges;
+ __u32 max_hw_inline;
+ __u32 max_hw_rq_quanta;
+ __u32 max_hw_wq_quanta;
+ __u32 min_hw_cq_size;
+ __u32 max_hw_cq_size;
+ __u16 max_hw_sq_chunk;
+ __u8 hw_rev;
+ __u8 rsvd2;
+};
+
+struct irdma_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 rsvd[4];
+};
+
+struct irdma_resize_cq_req {
+ __aligned_u64 user_cq_buffer;
+};
+
+struct irdma_create_cq_req {
+ __aligned_u64 user_cq_buf;
+ __aligned_u64 user_shadow_area;
+};
+
+struct irdma_create_qp_req {
+ __aligned_u64 user_wqe_bufs;
+ __aligned_u64 user_compl_ctx;
+};
+
+struct irdma_mem_reg_req {
+ __u16 reg_type; /* enum irdma_memreg_type */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct irdma_modify_qp_req {
+ __u8 sq_flush;
+ __u8 rq_flush;
+ __u8 rsvd[6];
+};
+
+struct irdma_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+};
+
+struct irdma_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 irdma_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd;
+ __u32 qp_caps;
+};
+
+struct irdma_modify_qp_resp {
+ __aligned_u64 push_wqe_mmap_key;
+ __aligned_u64 push_db_mmap_key;
+ __u16 push_offset;
+ __u8 push_valid;
+ __u8 rd_fence_rate;
+ __u8 rsvd[4];
+};
+
+struct irdma_create_ah_resp {
+ __u32 ah_id;
+ __u8 rsvd[4];
+};
+#endif /* IRDMA_ABI_H */
diff --git a/sys/dev/irdma/irdma.h b/sys/dev/irdma/irdma.h
new file mode 100644
index 000000000000..793ba3c2ae39
--- /dev/null
+++ b/sys/dev/irdma/irdma.h
@@ -0,0 +1,238 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2017 - 2021 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef IRDMA_H
+#define IRDMA_H
+
+#define RDMA_BIT2(type, a) ((u##type) 1UL << a)
+#define RDMA_MASK3(type, mask, shift) ((u##type) mask << shift)
+#define MAKEMASK(m, s) ((m) << (s))
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX_M (0xfff << IRDMA_WQEALLOC_WQE_DESC_INDEX_S)
+
+#define IRDMA_CQPTAIL_WQTAIL_S 0
+#define IRDMA_CQPTAIL_WQTAIL_M (0x7ff << IRDMA_CQPTAIL_WQTAIL_S)
+
+#define IRDMA_CQPTAIL_CQP_OP_ERR_S 31
+#define IRDMA_CQPTAIL_CQP_OP_ERR_M (0x1 << IRDMA_CQPTAIL_CQP_OP_ERR_S)
+
+#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_S 0
+#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_M (0xffff << IRDMA_CQPERRCODES_CQP_MINOR_CODE_S)
+#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S 16
+#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_M (0xffff << IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S)
+
+#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S 4
+#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_M (0x3 << IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S)
+
+#define IRDMA_GLINT_RATE_INTERVAL_S 0
+#define IRDMA_GLINT_RATE_INTERVAL_M (0x3c << IRDMA_GLINT_RATE_INTERVAL_S)
+
+#define IRDMA_GLINT_RATE_INTRL_ENA_S 6
+#define IRDMA_GLINT_RATE_INTRL_ENA_M BIT(6)
+
+#define IRDMA_GLINT_DYN_CTL_INTENA_S 0
+#define IRDMA_GLINT_DYN_CTL_INTENA_M (0x1 << IRDMA_GLINT_DYN_CTL_INTENA_S)
+
+#define IRDMA_GLINT_DYN_CTL_CLEARPBA_S 1
+#define IRDMA_GLINT_DYN_CTL_CLEARPBA_M (0x1 << IRDMA_GLINT_DYN_CTL_CLEARPBA_S)
+
+#define IRDMA_GLINT_DYN_CTL_ITR_INDX_S 3
+#define IRDMA_GLINT_DYN_CTL_ITR_INDX_M (0x3 << IRDMA_GLINT_DYN_CTL_ITR_INDX_S)
+
+#define IRDMA_GLINT_DYN_CTL_INTERVAL_S 5
+#define IRDMA_GLINT_DYN_CTL_INTERVAL_M (0xfff << IRDMA_GLINT_DYN_CTL_INTERVAL_S)
+
+#define IRDMA_GLINT_CEQCTL_ITR_INDX_S 11
+#define IRDMA_GLINT_CEQCTL_ITR_INDX_M (0x3 << IRDMA_GLINT_CEQCTL_ITR_INDX_S)
+
+#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_S 30
+#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_M (0x1 << IRDMA_GLINT_CEQCTL_CAUSE_ENA_S)
+
+#define IRDMA_GLINT_CEQCTL_MSIX_INDX_S 0
+#define IRDMA_GLINT_CEQCTL_MSIX_INDX_M (0x7ff << IRDMA_GLINT_CEQCTL_MSIX_INDX_S)
+
+#define IRDMA_PFINT_AEQCTL_MSIX_INDX_S 0
+#define IRDMA_PFINT_AEQCTL_MSIX_INDX_M (0x7ff << IRDMA_PFINT_AEQCTL_MSIX_INDX_S)
+
+#define IRDMA_PFINT_AEQCTL_ITR_INDX_S 11
+#define IRDMA_PFINT_AEQCTL_ITR_INDX_M (0x3 << IRDMA_PFINT_AEQCTL_ITR_INDX_S)
+
+#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_S 30
+#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_M (0x1 << IRDMA_PFINT_AEQCTL_CAUSE_ENA_S)
+
+#define IRDMA_PFHMC_PDINV_PMSDIDX_S 0
+#define IRDMA_PFHMC_PDINV_PMSDIDX_M (0xfff << IRDMA_PFHMC_PDINV_PMSDIDX_S)
+
+#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_S 15
+#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_M (0x1 << IRDMA_PFHMC_PDINV_PMSDPARTSEL_S)
+
+#define IRDMA_PFHMC_PDINV_PMPDIDX_S 16
+#define IRDMA_PFHMC_PDINV_PMPDIDX_M (0x1ff << IRDMA_PFHMC_PDINV_PMPDIDX_S)
+
+#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_S 0
+#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_M (0x1 << IRDMA_PFHMC_SDDATALOW_PMSDVALID_S)
+#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S 1
+#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_M (0x1 << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S)
+#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S 2
+#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_M (0x3ff << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S)
+#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S 12
+#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_M (0xfffff << IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S)
+
+#define IRDMA_PFHMC_SDCMD_PMSDWR_S 31
+#define IRDMA_PFHMC_SDCMD_PMSDWR_M (0x1 << IRDMA_PFHMC_SDCMD_PMSDWR_S)
+
+#define IRDMA_INVALID_CQ_IDX 0xffffffff
+
+enum irdma_registers {
+ IRDMA_CQPTAIL,
+ IRDMA_CQPDB,
+ IRDMA_CCQPSTATUS,
+ IRDMA_CCQPHIGH,
+ IRDMA_CCQPLOW,
+ IRDMA_CQARM,
+ IRDMA_CQACK,
+ IRDMA_AEQALLOC,
+ IRDMA_CQPERRCODES,
+ IRDMA_WQEALLOC,
+ IRDMA_GLINT_DYN_CTL,
+ IRDMA_DB_ADDR_OFFSET,
+ IRDMA_GLPCI_LBARCTRL,
+ IRDMA_GLPE_CPUSTATUS0,
+ IRDMA_GLPE_CPUSTATUS1,
+ IRDMA_GLPE_CPUSTATUS2,
+ IRDMA_PFINT_AEQCTL,
+ IRDMA_GLINT_CEQCTL,
+ IRDMA_VSIQF_PE_CTL1,
+ IRDMA_PFHMC_PDINV,
+ IRDMA_GLHMC_VFPDINV,
+ IRDMA_GLPE_CRITERR,
+ IRDMA_GLINT_RATE,
+ IRDMA_MAX_REGS, /* Must be last entry */
+};
+
+enum irdma_shifts {
+ IRDMA_CCQPSTATUS_CCQP_DONE_S,
+ IRDMA_CCQPSTATUS_CCQP_ERR_S,
+ IRDMA_CQPSQ_STAG_PDID_S,
+ IRDMA_CQPSQ_CQ_CEQID_S,
+ IRDMA_CQPSQ_CQ_CQID_S,
+ IRDMA_COMMIT_FPM_CQCNT_S,
+ IRDMA_MAX_SHIFTS,
+};
+
+enum irdma_masks {
+ IRDMA_CCQPSTATUS_CCQP_DONE_M,
+ IRDMA_CCQPSTATUS_CCQP_ERR_M,
+ IRDMA_CQPSQ_STAG_PDID_M,
+ IRDMA_CQPSQ_CQ_CEQID_M,
+ IRDMA_CQPSQ_CQ_CQID_M,
+ IRDMA_COMMIT_FPM_CQCNT_M,
+ IRDMA_MAX_MASKS, /* Must be last entry */
+};
+
+#define IRDMA_MAX_MGS_PER_CTX 8
+
+struct irdma_mcast_grp_ctx_entry_info {
+ u32 qp_id;
+ bool valid_entry;
+ u16 dest_port;
+ u32 use_cnt;
+};
+
+struct irdma_mcast_grp_info {
+ u8 dest_mac_addr[ETH_ALEN];
+ u16 vlan_id;
+ u8 hmc_fcn_id;
+ bool ipv4_valid:1;
+ bool vlan_valid:1;
+ u16 mg_id;
+ u32 no_of_mgs;
+ u32 dest_ip_addr[4];
+ u16 qs_handle;
+ struct irdma_dma_mem dma_mem_mc;
+ struct irdma_mcast_grp_ctx_entry_info mg_ctx_info[IRDMA_MAX_MGS_PER_CTX];
+};
+
+enum irdma_vers {
+ IRDMA_GEN_RSVD,
+ IRDMA_GEN_1,
+ IRDMA_GEN_2,
+};
+
+struct irdma_uk_attrs {
+ u64 feature_flags;
+ u32 max_hw_wq_frags;
+ u32 max_hw_read_sges;
+ u32 max_hw_inline;
+ u32 max_hw_rq_quanta;
+ u32 max_hw_wq_quanta;
+ u32 min_hw_cq_size;
+ u32 max_hw_cq_size;
+ u16 max_hw_sq_chunk;
+ u16 max_hw_wq_size;
+ u16 min_sw_wq_size;
+ u8 hw_rev;
+};
+
+struct irdma_hw_attrs {
+ struct irdma_uk_attrs uk_attrs;
+ u64 max_hw_outbound_msg_size;
+ u64 max_hw_inbound_msg_size;
+ u64 max_mr_size;
+ u32 min_hw_qp_id;
+ u32 min_hw_aeq_size;
+ u32 max_hw_aeq_size;
+ u32 min_hw_ceq_size;
+ u32 max_hw_ceq_size;
+ u32 max_hw_device_pages;
+ u32 max_hw_vf_fpm_id;
+ u32 first_hw_vf_fpm_id;
+ u32 max_hw_ird;
+ u32 max_hw_ord;
+ u32 max_hw_wqes;
+ u32 max_hw_pds;
+ u32 max_hw_ena_vf_count;
+ u32 max_qp_wr;
+ u32 max_pe_ready_count;
+ u32 max_done_count;
+ u32 max_sleep_count;
+ u32 max_cqp_compl_wait_time_ms;
+ u16 max_stat_inst;
+ u16 max_stat_idx;
+};
+
+void icrdma_init_hw(struct irdma_sc_dev *dev);
+void irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp);
+#endif /* IRDMA_H*/
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
new file mode 100644
index 000000000000..e107903efe4d
--- /dev/null
+++ b/sys/dev/irdma/irdma_cm.c
@@ -0,0 +1,4253 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2015 - 2022 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#include "irdma_main.h"
+
+static void irdma_cm_post_event(struct irdma_cm_event *event);
+static void irdma_disconnect_worker(struct work_struct *work);
+
+/**
+ * irdma_free_sqbuf - put back puda buffer if refcount is 0
+ * @vsi: The VSI structure of the device
+ * @bufp: puda buffer to free
+ */
+void
+irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp)
+{
+ struct irdma_puda_buf *buf = bufp;
+ struct irdma_puda_rsrc *ilq = vsi->ilq;
+
+ if (atomic_dec_and_test(&buf->refcount))
+ irdma_puda_ret_bufpool(ilq, buf);
+}
+
+/**
+ * irdma_record_ird_ord - Record IRD/ORD passed in
+ * @cm_node: connection's node
+ * @conn_ird: connection IRD
+ * @conn_ord: connection ORD
+ */
+static void
+irdma_record_ird_ord(struct irdma_cm_node *cm_node, u32 conn_ird,
+ u32 conn_ord)
+{
+ if (conn_ird > cm_node->dev->hw_attrs.max_hw_ird)
+ conn_ird = cm_node->dev->hw_attrs.max_hw_ird;
+
+ if (conn_ord > cm_node->dev->hw_attrs.max_hw_ord)
+ conn_ord = cm_node->dev->hw_attrs.max_hw_ord;
+ else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
+ conn_ord = 1;
+ cm_node->ird_size = conn_ird;
+ cm_node->ord_size = conn_ord;
+}
+
+/**
+ * irdma_copy_ip_ntohl - copy IP address from network to host
+ * @dst: IP address in host order
+ * @src: IP address in network order (big endian)
+ */
+void
+irdma_copy_ip_ntohl(u32 *dst, __be32 *src)
+{
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst = ntohl(*src);
+}
+
+/**
+ * irdma_copy_ip_htonl - copy IP address from host to network order
+ * @dst: IP address in network order (big endian)
+ * @src: IP address in host order
+ */
+void
+irdma_copy_ip_htonl(__be32 *dst, u32 *src)
+{
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst = htonl(*src);
+}
+
+/**
+ * irdma_get_addr_info
+ * @cm_node: contains ip/tcp info
+ * @cm_info: to get a copy of the cm_node ip/tcp info
+ */
+static void
+irdma_get_addr_info(struct irdma_cm_node *cm_node,
+ struct irdma_cm_info *cm_info)
+{
+ memset(cm_info, 0, sizeof(*cm_info));
+ cm_info->ipv4 = cm_node->ipv4;
+ cm_info->vlan_id = cm_node->vlan_id;
+ memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
+ memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
+ cm_info->loc_port = cm_node->loc_port;
+ cm_info->rem_port = cm_node->rem_port;
+}
+
+/**
+ * irdma_fill_sockaddr4 - fill in addr info for IPv4 connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void
+irdma_fill_sockaddr4(struct irdma_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+ laddr->sin_family = AF_INET;
+ raddr->sin_family = AF_INET;
+
+ laddr->sin_port = htons(cm_node->loc_port);
+ raddr->sin_port = htons(cm_node->rem_port);
+
+ laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
+ raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
+}
+
+/**
+ * irdma_fill_sockaddr6 - fill in addr info for IPv6 connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void
+irdma_fill_sockaddr6(struct irdma_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+ struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
+
+ laddr6->sin6_family = AF_INET6;
+ raddr6->sin6_family = AF_INET6;
+
+ laddr6->sin6_port = htons(cm_node->loc_port);
+ raddr6->sin6_port = htons(cm_node->rem_port);
+
+ irdma_copy_ip_htonl(laddr6->sin6_addr.__u6_addr.__u6_addr32,
+ cm_node->loc_addr);
+ irdma_copy_ip_htonl(raddr6->sin6_addr.__u6_addr.__u6_addr32,
+ cm_node->rem_addr);
+}
+
+/**
+ * irdma_get_cmevent_info - for cm event upcall
+ * @cm_node: connection's node
+ * @cm_id: upper layers cm struct for the event
+ * @event: upper layer's cm event
+ */
+static inline void
+irdma_get_cmevent_info(struct irdma_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ struct iw_cm_event *event)
+{
+ memcpy(&event->local_addr, &cm_id->m_local_addr,
+ sizeof(event->local_addr));
+ memcpy(&event->remote_addr, &cm_id->m_remote_addr,
+ sizeof(event->remote_addr));
+ if (cm_node) {
+ event->private_data = cm_node->pdata_buf;
+ event->private_data_len = (u8)cm_node->pdata.size;
+ event->ird = cm_node->ird_size;
+ event->ord = cm_node->ord_size;
+ }
+}
+
+/**
+ * irdma_send_cm_event - upcall cm's event handler
+ * @cm_node: connection's node
+ * @cm_id: upper layer's cm info struct
+ * @type: Event type to indicate
+ * @status: status for the event type
+ */
+static int
+irdma_send_cm_event(struct irdma_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ enum iw_cm_event_type type, int status)
+{
+ struct iw_cm_event event = {0};
+
+ event.event = type;
+ event.status = status;
+
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "cm_node %p cm_id=%p state=%d accel=%d event_type=%d status=%d\n",
+ cm_node, cm_id, cm_node->accelerated, cm_node->state, type, status);
+
+ switch (type) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ if (cm_node->ipv4)
+ irdma_fill_sockaddr4(cm_node, &event);
+ else
+ irdma_fill_sockaddr6(cm_node, &event);
+ event.provider_data = cm_node;
+ event.private_data = cm_node->pdata_buf;
+ event.private_data_len = (u8)cm_node->pdata.size;
+ event.ird = cm_node->ird_size;
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ irdma_get_cmevent_info(cm_node, cm_id, &event);
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ event.ird = cm_node->ird_size;
+ event.ord = cm_node->ord_size;
+ break;
+ case IW_CM_EVENT_DISCONNECT:
+ case IW_CM_EVENT_CLOSE:
+ /* Wait if we are in RTS but havent issued the iwcm event upcall */
+ if (!cm_node->accelerated)
+ wait_for_completion(&cm_node->establish_comp);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cm_id->event_handler(cm_id, &event);
+}
+
+/**
+ * irdma_timer_list_prep - add connection nodes to a list to perform timer tasks
+ * @cm_core: cm's core
+ * @timer_list: a timer list to which cm_node will be selected
+ */
+static void
+irdma_timer_list_prep(struct irdma_cm_core *cm_core,
+ struct list_head *timer_list)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ HASH_FOR_EACH_RCU(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if ((cm_node->close_entry || cm_node->send_entry) &&
+ atomic_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->timer_entry, timer_list);
+ }
+}
+
+/**
+ * irdma_create_event - create cm event
+ * @cm_node: connection's node
+ * @type: Event type to generate
+ */
+static struct irdma_cm_event *
+irdma_create_event(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type)
+{
+ struct irdma_cm_event *event;
+
+ if (!cm_node->cm_id)
+ return NULL;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+
+ if (!event)
+ return NULL;
+
+ event->type = type;
+ event->cm_node = cm_node;
+ memcpy(event->cm_info.rem_addr, cm_node->rem_addr,
+ sizeof(event->cm_info.rem_addr));
+ memcpy(event->cm_info.loc_addr, cm_node->loc_addr,
+ sizeof(event->cm_info.loc_addr));
+ event->cm_info.rem_port = cm_node->rem_port;
+ event->cm_info.loc_port = cm_node->loc_port;
+ event->cm_info.cm_id = cm_node->cm_id;
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "node=%p event=%p type=%u dst=%pI4 src=%pI4\n", cm_node,
+ event, type, event->cm_info.loc_addr,
+ event->cm_info.rem_addr);
+ irdma_cm_post_event(event);
+
+ return event;
+}
+
+/**
+ * irdma_free_retrans_entry - free send entry
+ * @cm_node: connection's node
+ */
+static void
+irdma_free_retrans_entry(struct irdma_cm_node *cm_node)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+ struct irdma_timer_entry *send_entry;
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ return;
+
+ cm_node->send_entry = NULL;
+ irdma_free_sqbuf(&iwdev->vsi, send_entry->sqbuf);
+ kfree(send_entry);
+ atomic_dec(&cm_node->refcnt);
+}
+
+/**
+ * irdma_cleanup_retrans_entry - free send entry with lock
+ * @cm_node: connection's node
+ */
+static void
+irdma_cleanup_retrans_entry(struct irdma_cm_node *cm_node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ irdma_free_retrans_entry(cm_node);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+}
+
+/**
+ * irdma_form_ah_cm_frame - get a free packet and build frame with address handle
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct irdma_puda_buf *
+irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ u8 *buf;
+ struct tcphdr *tcph;
+ u16 pktsize;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ if (!cm_node->ah || !cm_node->ah->ah_info.ah_valid) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "AH invalid\n");
+ return NULL;
+ }
+
+ sqbuf = irdma_puda_get_bufpool(vsi->ilq);
+ if (!sqbuf) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "SQ buf NULL\n");
+ return NULL;
+ }
+
+ sqbuf->ah_id = cm_node->ah->ah_info.ah_idx;
+ buf = sqbuf->mem.va;
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata)
+ pd_len = pdata->size;
+
+ pktsize = sizeof(*tcph) + opts_len + hdr_len + pd_len;
+
+ memset(buf, 0, pktsize);
+
+ sqbuf->totallen = pktsize;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = cm_node;
+
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ tcph->th_sport = htons(cm_node->loc_port);
+ tcph->th_dport = htons(cm_node->rem_port);
+ tcph->th_seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->th_ack = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->th_flags |= TH_ACK;
+ } else {
+ tcph->th_ack = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->th_flags |= TH_SYN;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->th_flags |= TH_FIN;
+ }
+
+ if (flags & SET_RST)
+ tcph->th_flags |= TH_RST;
+
+ tcph->th_off = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->th_off << 2;
+ tcph->th_win = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->th_urp = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
+
+ atomic_set(&sqbuf->refcount, 1);
+
+ irdma_debug_buf(vsi->dev, IRDMA_DEBUG_ILQ, "TRANSMIT ILQ BUFFER",
+ sqbuf->mem.va, sqbuf->totallen);
+
+ return sqbuf;
+}
+
+/**
+ * irdma_form_uda_cm_frame - get a free packet and build frame full tcpip packet
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct irdma_puda_buf *
+irdma_form_uda_cm_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ u8 *buf;
+
+ struct tcphdr *tcph;
+ struct ip *iph;
+ struct ip6_hdr *ip6h;
+ struct ether_header *ethh;
+ u16 pktsize;
+ u16 eth_hlen = ETH_HLEN;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ u16 vtag;
+
+ sqbuf = irdma_puda_get_bufpool(vsi->ilq);
+ if (!sqbuf)
+ return NULL;
+
+ buf = sqbuf->mem.va;
+
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata)
+ pd_len = pdata->size;
+
+ if (cm_node->vlan_id < VLAN_N_VID)
+ eth_hlen += 4;
+
+ if (cm_node->ipv4)
+ pktsize = sizeof(*iph) + sizeof(*tcph);
+ else
+ pktsize = sizeof(*ip6h) + sizeof(*tcph);
+ pktsize += opts_len + hdr_len + pd_len;
+
+ memset(buf, 0, eth_hlen + pktsize);
+
+ sqbuf->totallen = pktsize + eth_hlen;
+ sqbuf->maclen = eth_hlen;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = cm_node;
+
+ ethh = (struct ether_header *)buf;
+ buf += eth_hlen;
+
+ if (cm_node->do_lpb)
+ sqbuf->do_lpb = true;
+
+ if (cm_node->ipv4) {
+ sqbuf->ipv4 = true;
+
+ iph = (struct ip *)buf;
+ buf += sizeof(*iph);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->ether_dhost, cm_node->rem_mac);
+ ether_addr_copy(ethh->ether_shost, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ((struct ether_vlan_header *)ethh)->evl_proto =
+ htons(ETH_P_8021Q);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id;
+ ((struct ether_vlan_header *)ethh)->evl_tag = htons(vtag);
+
+ ((struct ether_vlan_header *)ethh)->evl_encap_proto =
+ htons(ETH_P_IP);
+ } else {
+ ethh->ether_type = htons(ETH_P_IP);
+ }
+
+ iph->ip_v = IPVERSION;
+ iph->ip_hl = 5; /* 5 * 4Byte words, IP headr len */
+ iph->ip_tos = cm_node->tos;
+ iph->ip_len = htons(pktsize);
+ iph->ip_id = htons(++cm_node->tcp_cntxt.loc_id);
+
+ iph->ip_off = htons(0x4000);
+ iph->ip_ttl = 0x40;
+ iph->ip_p = IPPROTO_TCP;
+ iph->ip_src.s_addr = htonl(cm_node->loc_addr[0]);
+ iph->ip_dst.s_addr = htonl(cm_node->rem_addr[0]);
+ } else {
+ sqbuf->ipv4 = false;
+ ip6h = (struct ip6_hdr *)buf;
+ buf += sizeof(*ip6h);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->ether_dhost, cm_node->rem_mac);
+ ether_addr_copy(ethh->ether_shost, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ((struct ether_vlan_header *)ethh)->evl_proto =
+ htons(ETH_P_8021Q);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id;
+ ((struct ether_vlan_header *)ethh)->evl_tag = htons(vtag);
+ ((struct ether_vlan_header *)ethh)->evl_encap_proto =
+ htons(ETH_P_IPV6);
+ } else {
+ ethh->ether_type = htons(ETH_P_IPV6);
+ }
+ ip6h->ip6_vfc = 6 << 4;
+ ip6h->ip6_vfc |= cm_node->tos >> 4;
+ ip6h->ip6_flow = cm_node->tos << 20;
+ ip6h->ip6_plen = htons(pktsize - sizeof(*ip6h));
+ ip6h->ip6_nxt = 6;
+ ip6h->ip6_hops = 128;
+ irdma_copy_ip_htonl(ip6h->ip6_src.__u6_addr.__u6_addr32,
+ cm_node->loc_addr);
+ irdma_copy_ip_htonl(ip6h->ip6_dst.__u6_addr.__u6_addr32,
+ cm_node->rem_addr);
+ }
+
+ tcph->th_sport = htons(cm_node->loc_port);
+ tcph->th_dport = htons(cm_node->rem_port);
+ tcph->th_seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->th_ack = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->th_flags |= TH_ACK;
+ } else {
+ tcph->th_ack = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->th_flags |= TH_SYN;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->th_flags |= TH_FIN;
+ }
+
+ if (flags & SET_RST)
+ tcph->th_flags |= TH_RST;
+
+ tcph->th_off = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->th_off << 2;
+ tcph->th_win = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->th_urp = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
+
+ atomic_set(&sqbuf->refcount, 1);
+
+ irdma_debug_buf(vsi->dev, IRDMA_DEBUG_ILQ, "TRANSMIT ILQ BUFFER",
+ sqbuf->mem.va, sqbuf->totallen);
+
+ return sqbuf;
+}
+
+/**
+ * irdma_send_reset - Send RST packet
+ * @cm_node: connection's node
+ */
+int
+irdma_send_reset(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+ int flags = SET_RST | SET_ACK;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ flags);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ __builtin_return_address(0), cm_node, cm_node->cm_id,
+ cm_node->accelerated, cm_node->state, cm_node->rem_port,
+ cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr);
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 0,
+ 1);
+}
+
+/**
+ * irdma_active_open_err - send event for active side cm error
+ * @cm_node: connection's node
+ * @reset: Flag to send reset or not
+ */
+static void
+irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset)
+{
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_connect_errs++;
+ if (reset) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "cm_node=%p state=%d\n", cm_node, cm_node->state);
+ atomic_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ }
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+}
+
+/**
+ * irdma_passive_open_err - handle passive side cm error
+ * @cm_node: connection's node
+ * @reset: send reset or just free cm_node
+ */
+static void
+irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset)
+{
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_passive_errs++;
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "cm_node=%p state =%d\n", cm_node, cm_node->state);
+ if (reset)
+ irdma_send_reset(cm_node);
+ else
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * irdma_event_connect_error - to create connect error event
+ * @event: cm information for connect event
+ */
+static void
+irdma_event_connect_error(struct irdma_cm_event *event)
+{
+ struct irdma_qp *iwqp;
+ struct iw_cm_id *cm_id;
+
+ cm_id = event->cm_node->cm_id;
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+
+ if (!iwqp || !iwqp->iwdev)
+ return;
+
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ irdma_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * irdma_process_options - process options from TCP header
+ * @cm_node: connection's node
+ * @optionsloc: point to start of options
+ * @optionsize: size of all options
+ * @syn_pkt: flag if syn packet
+ */
+static int
+irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc,
+ u32 optionsize, u32 syn_pkt)
+{
+ u32 tmp;
+ u32 offset = 0;
+ union all_known_options *all_options;
+ char got_mss_option = 0;
+
+ while (offset < optionsize) {
+ all_options = (union all_known_options *)(optionsloc + offset);
+ switch (all_options->base.optionnum) {
+ case OPTION_NUM_EOL:
+ offset = optionsize;
+ break;
+ case OPTION_NUM_NONE:
+ offset += 1;
+ continue;
+ case OPTION_NUM_MSS:
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM,
+ "MSS Length: %d Offset: %d Size: %d\n",
+ all_options->mss.len, offset, optionsize);
+ got_mss_option = 1;
+ if (all_options->mss.len != 4)
+ return -EINVAL;
+ tmp = ntohs(all_options->mss.mss);
+ if ((cm_node->ipv4 &&
+ (tmp + IRDMA_MTU_TO_MSS_IPV4) < IRDMA_MIN_MTU_IPV4) ||
+ (!cm_node->ipv4 &&
+ (tmp + IRDMA_MTU_TO_MSS_IPV6) < IRDMA_MIN_MTU_IPV6))
+ return -EINVAL;
+ if (tmp < cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss = tmp;
+ break;
+ case OPTION_NUM_WINDOW_SCALE:
+ cm_node->tcp_cntxt.snd_wscale =
+ all_options->windowscale.shiftcount;
+ break;
+ default:
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM,
+ "Unsupported TCP Option: %x\n",
+ all_options->base.optionnum);
+ break;
+ }
+ offset += all_options->base.len;
+ }
+ if (!got_mss_option && syn_pkt)
+ cm_node->tcp_cntxt.mss = IRDMA_CM_DEFAULT_MSS;
+
+ return 0;
+}
+
+/**
+ * irdma_handle_tcp_options - setup TCP context info after parsing TCP options
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ * @optionsize: size of options rcvd
+ * @passive: active or passive flag
+ */
+static int
+irdma_handle_tcp_options(struct irdma_cm_node *cm_node,
+ struct tcphdr *tcph, int optionsize,
+ int passive)
+{
+ u8 *optionsloc = (u8 *)&tcph[1];
+ int ret;
+
+ if (optionsize) {
+ ret = irdma_process_options(cm_node, optionsloc, optionsize,
+ (u32)tcph->th_flags & TH_SYN);
+ if (ret) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM,
+ "Node %p, Sending Reset\n", cm_node);
+ if (passive)
+ irdma_passive_open_err(cm_node, true);
+ else
+ irdma_active_open_err(cm_node, true);
+ return ret;
+ }
+ }
+
+ cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->th_win)
+ << cm_node->tcp_cntxt.snd_wscale;
+
+ if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
+ cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+
+ return 0;
+}
+
+/**
+ * irdma_build_mpa_v1 - build a MPA V1 frame
+ * @cm_node: connection's node
+ * @start_addr: address where to build frame
+ * @mpa_key: to do read0 or write0
+ */
+static void
+irdma_build_mpa_v1(struct irdma_cm_node *cm_node, void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v1 *mpa_frame = start_addr;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
+ break;
+ case MPA_KEY_REPLY:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+ break;
+ default:
+ break;
+ }
+ mpa_frame->flags = IETF_MPA_FLAGS_CRC;
+ mpa_frame->rev = cm_node->mpa_frame_rev;
+ mpa_frame->priv_data_len = htons(cm_node->pdata.size);
+}
+
+/**
+ * irdma_build_mpa_v2 - build a MPA V2 frame
+ * @cm_node: connection's node
+ * @start_addr: buffer start address
+ * @mpa_key: to do read0 or write0
+ */
+static void
+irdma_build_mpa_v2(struct irdma_cm_node *cm_node, void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v2 *mpa_frame = start_addr;
+ struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+ u16 ctrl_ird, ctrl_ord;
+
+ /* initialize the upper 5 bytes of the frame */
+ irdma_build_mpa_v1(cm_node, start_addr, mpa_key);
+ mpa_frame->flags |= IETF_MPA_V2_FLAG;
+ if (cm_node->iwdev->iw_ooo) {
+ mpa_frame->flags |= IETF_MPA_FLAGS_MARKERS;
+ cm_node->rcv_mark_en = true;
+ }
+ mpa_frame->priv_data_len = cpu_to_be16(be16_to_cpu(mpa_frame->priv_data_len) +
+ IETF_RTR_MSG_SIZE);
+
+ /* initialize RTR msg */
+ if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
+ ctrl_ird = IETF_NO_IRD_ORD;
+ ctrl_ord = IETF_NO_IRD_ORD;
+ } else {
+ ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD :
+ cm_node->ird_size;
+ ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD :
+ cm_node->ord_size;
+ }
+ ctrl_ird |= IETF_PEER_TO_PEER;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ case MPA_KEY_REPLY:
+ switch (cm_node->send_rdma0_op) {
+ case SEND_RDMA_WRITE_ZERO:
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ break;
+ case SEND_RDMA_READ_ZERO:
+ ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ rtr_msg->ctrl_ird = htons(ctrl_ird);
+ rtr_msg->ctrl_ord = htons(ctrl_ord);
+}
+
+/**
+ * irdma_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
+ * @cm_node: connection's node
+ * @mpa: mpa: data buffer
+ * @mpa_key: to do read0 or write0
+ */
+static int
+irdma_cm_build_mpa_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *mpa, u8 mpa_key)
+{
+ int hdr_len = 0;
+
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V1:
+ hdr_len = sizeof(struct ietf_mpa_v1);
+ irdma_build_mpa_v1(cm_node, mpa->addr, mpa_key);
+ break;
+ case IETF_MPA_V2:
+ hdr_len = sizeof(struct ietf_mpa_v2);
+ irdma_build_mpa_v2(cm_node, mpa->addr, mpa_key);
+ break;
+ default:
+ break;
+ }
+
+ return hdr_len;
+}
+
+/**
+ * irdma_send_mpa_request - active node send mpa request to passive node
+ * @cm_node: connection's node
+ */
+static int
+irdma_send_mpa_request(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
+ cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REQUEST);
+ if (!cm_node->mpa_hdr.size) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "mpa size = %d\n", cm_node->mpa_hdr.size);
+ return -EINVAL;
+ }
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
+ &cm_node->mpa_hdr,
+ &cm_node->pdata, SET_ACK);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_send_mpa_reject -
+ * @cm_node: connection's node
+ * @pdata: reject data for connection
+ * @plen: length of reject data
+ */
+static int
+irdma_send_mpa_reject(struct irdma_cm_node *cm_node,
+ const void *pdata, u8 plen)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_mpa_priv_info priv_info;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
+ cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REPLY);
+
+ cm_node->mpa_v2_frame.flags |= IETF_MPA_FLAGS_REJECT;
+ priv_info.addr = pdata;
+ priv_info.size = plen;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
+ &cm_node->mpa_hdr, &priv_info,
+ SET_ACK | SET_FIN);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ cm_node->state = IRDMA_CM_STATE_FIN_WAIT1;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_negotiate_mpa_v2_ird_ord - negotiate MPAv2 IRD/ORD
+ * @cm_node: connection's node
+ * @buf: Data pointer
+ */
+static int
+irdma_negotiate_mpa_v2_ird_ord(struct irdma_cm_node *cm_node,
+ u8 *buf)
+{
+ struct ietf_mpa_v2 *mpa_v2_frame;
+ struct ietf_rtr_msg *rtr_msg;
+ u16 ird_size;
+ u16 ord_size;
+ u16 ctrl_ord;
+ u16 ctrl_ird;
+
+ mpa_v2_frame = (struct ietf_mpa_v2 *)buf;
+ rtr_msg = &mpa_v2_frame->rtr_msg;
+
+ /* parse rtr message */
+ ctrl_ord = ntohs(rtr_msg->ctrl_ord);
+ ctrl_ird = ntohs(rtr_msg->ctrl_ird);
+ ird_size = ctrl_ird & IETF_NO_IRD_ORD;
+ ord_size = ctrl_ord & IETF_NO_IRD_ORD;
+
+ if (!(ctrl_ird & IETF_PEER_TO_PEER))
+ return -EOPNOTSUPP;
+
+ if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
+ cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
+ goto negotiate_done;
+ }
+
+ if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ /* responder */
+ if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
+ cm_node->ird_size = 1;
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+ } else {
+ /* initiator */
+ if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
+ /* Remote peer doesn't support RDMA0_READ */
+ return -EOPNOTSUPP;
+
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+
+ if (cm_node->ird_size < ord_size)
+ /* no resources available */
+ return -EINVAL;
+ }
+
+negotiate_done:
+ if (ctrl_ord & IETF_RDMA0_READ)
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ else if (ctrl_ord & IETF_RDMA0_WRITE)
+ cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
+ else
+ /* Not supported RDMA0 operation */
+ return -EOPNOTSUPP;
+
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "MPAV2 Negotiated ORD: %d, IRD: %d\n", cm_node->ord_size,
+ cm_node->ird_size);
+ return 0;
+}
+
+/**
+ * irdma_parse_mpa - process an IETF MPA frame
+ * @cm_node: connection's node
+ * @buf: Data pointer
+ * @type: to return accept or reject
+ * @len: Len of mpa buffer
+ */
+static int
+irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
+ u32 len)
+{
+ struct ietf_mpa_v1 *mpa_frame;
+ int mpa_hdr_len, priv_data_len, ret;
+
+ *type = IRDMA_MPA_REQUEST_ACCEPT;
+
+ if (len < sizeof(struct ietf_mpa_v1)) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "ietf buffer small (%x)\n", len);
+ return -EINVAL;
+ }
+
+ mpa_frame = (struct ietf_mpa_v1 *)buf;
+ mpa_hdr_len = sizeof(struct ietf_mpa_v1);
+ priv_data_len = ntohs(mpa_frame->priv_data_len);
+
+ if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "private_data too big %d\n", priv_data_len);
+ return -EOVERFLOW;
+ }
+
+ if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "unsupported mpa rev = %d\n", mpa_frame->rev);
+ return -EINVAL;
+ }
+
+ if (mpa_frame->rev > cm_node->mpa_frame_rev) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "rev %d\n", mpa_frame->rev);
+ return -EINVAL;
+ }
+
+ cm_node->mpa_frame_rev = mpa_frame->rev;
+ if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ,
+ IETF_MPA_KEY_SIZE)) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM,
+ "Unexpected MPA Key received\n");
+ return -EINVAL;
+ }
+ } else {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP,
+ IETF_MPA_KEY_SIZE)) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM,
+ "Unexpected MPA Key received\n");
+ return -EINVAL;
+ }
+ }
+
+ if (priv_data_len + mpa_hdr_len > len) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "ietf buffer len(%x + %x != %x)\n", priv_data_len,
+ mpa_hdr_len, len);
+ return -EOVERFLOW;
+ }
+
+ if (len > IRDMA_MAX_CM_BUF) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "ietf buffer large len = %d\n", len);
+ return -EOVERFLOW;
+ }
+
+ switch (mpa_frame->rev) {
+ case IETF_MPA_V2:
+ mpa_hdr_len += IETF_RTR_MSG_SIZE;
+ ret = irdma_negotiate_mpa_v2_ird_ord(cm_node, buf);
+ if (ret)
+ return ret;
+ break;
+ case IETF_MPA_V1:
+ default:
+ break;
+ }
+
+ memcpy(cm_node->pdata_buf, buf + mpa_hdr_len, priv_data_len);
+ cm_node->pdata.size = priv_data_len;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
+ *type = IRDMA_MPA_REQUEST_REJECT;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
+ cm_node->snd_mark_en = true;
+
+ return 0;
+}
+
+/**
+ * irdma_schedule_cm_timer
+ * @cm_node: connection's node
+ * @sqbuf: buffer to send
+ * @type: if it is send or close
+ * @send_retrans: if rexmits to be done
+ * @close_when_complete: is cm_node to be removed
+ *
+ * note - cm_node needs to be protected before calling this. Encase in:
+ * irdma_rem_ref_cm_node(cm_core, cm_node);
+ * irdma_schedule_cm_timer(...)
+ * atomic_inc(&cm_node->refcnt);
+ */
+int
+irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *sqbuf,
+ enum irdma_timer_type type, int send_retrans,
+ int close_when_complete)
+{
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ struct irdma_timer_entry *new_send;
+ u32 was_timer_set;
+ unsigned long flags;
+
+ new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+ if (!new_send) {
+ if (type != IRDMA_TIMER_TYPE_CLOSE)
+ irdma_free_sqbuf(vsi, sqbuf);
+ return -ENOMEM;
+ }
+
+ new_send->retrycount = IRDMA_DEFAULT_RETRYS;
+ new_send->retranscount = IRDMA_DEFAULT_RETRANS;
+ new_send->sqbuf = sqbuf;
+ new_send->timetosend = jiffies;
+ new_send->type = type;
+ new_send->send_retrans = send_retrans;
+ new_send->close_when_complete = close_when_complete;
+
+ if (type == IRDMA_TIMER_TYPE_CLOSE) {
+ new_send->timetosend += (HZ / 10);
+ if (cm_node->close_entry) {
+ kfree(new_send);
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM, "already close entry\n");
+ return -EINVAL;
+ }
+
+ cm_node->close_entry = new_send;
+ } else { /* type == IRDMA_TIMER_TYPE_SEND */
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ cm_node->send_entry = new_send;
+ atomic_inc(&cm_node->refcnt);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ new_send->timetosend = jiffies + IRDMA_RETRY_TIMEOUT;
+
+ atomic_inc(&sqbuf->refcount);
+ irdma_puda_send_buf(vsi->ilq, sqbuf);
+ if (!send_retrans) {
+ irdma_cleanup_retrans_entry(cm_node);
+ if (close_when_complete)
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+ }
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = new_send->timetosend;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_retrans_expired - Could not rexmit the packet
+ * @cm_node: connection's node
+ */
+static void
+irdma_retrans_expired(struct irdma_cm_node *cm_node)
+{
+ enum irdma_cm_node_state state = cm_node->state;
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ switch (state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_CLOSING:
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_LAST_ACK:
+ irdma_send_reset(cm_node);
+ break;
+ default:
+ atomic_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+ break;
+ }
+}
+
+/**
+ * irdma_handle_close_entry - for handling retry/timeouts
+ * @cm_node: connection's node
+ * @rem_node: flag for remove cm_node
+ */
+static void
+irdma_handle_close_entry(struct irdma_cm_node *cm_node,
+ u32 rem_node)
+{
+ struct irdma_timer_entry *close_entry = cm_node->close_entry;
+ struct irdma_qp *iwqp;
+ unsigned long flags;
+
+ if (!close_entry)
+ return;
+ iwqp = (struct irdma_qp *)close_entry->sqbuf;
+ if (iwqp) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->cm_id) {
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
+ iwqp->hw_iwarp_state = IRDMA_QP_STATE_ERROR;
+ iwqp->last_aeq = IRDMA_AE_RESET_SENT;
+ iwqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_cm_disconn(iwqp);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ } else if (rem_node) {
+ /* TIME_WAIT state */
+ irdma_rem_ref_cm_node(cm_node);
+ }
+
+ kfree(close_entry);
+ cm_node->close_entry = NULL;
+}
+
+/**
+ * irdma_cm_timer_tick - system's timer expired callback
+ * @t: Pointer to timer_list
+ */
+static void
+irdma_cm_timer_tick(struct timer_list *t)
+{
+ unsigned long nexttimeout = jiffies + IRDMA_LONG_TIME;
+ struct irdma_cm_node *cm_node;
+ struct irdma_timer_entry *send_entry, *close_entry;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
+ struct irdma_sc_vsi *vsi;
+ u32 settimer = 0;
+ unsigned long timetosend;
+ unsigned long flags;
+ struct list_head timer_list;
+
+ INIT_LIST_HEAD(&timer_list);
+
+ rcu_read_lock();
+ irdma_timer_list_prep(cm_core, &timer_list);
+ rcu_read_unlock();
+
+ list_for_each_safe(list_node, list_core_temp, &timer_list) {
+ cm_node = container_of(list_node, struct irdma_cm_node,
+ timer_entry);
+ close_entry = cm_node->close_entry;
+
+ if (close_entry) {
+ if (time_after(close_entry->timetosend, jiffies)) {
+ if (nexttimeout > close_entry->timetosend ||
+ !settimer) {
+ nexttimeout = close_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ irdma_handle_close_entry(cm_node, 1);
+ }
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ goto done;
+ if (time_after(send_entry->timetosend, jiffies)) {
+ if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
+ if (nexttimeout > send_entry->timetosend ||
+ !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ irdma_free_retrans_entry(cm_node);
+ }
+ goto done;
+ }
+
+ if (cm_node->state == IRDMA_CM_STATE_OFFLOADED ||
+ cm_node->state == IRDMA_CM_STATE_CLOSED) {
+ irdma_free_retrans_entry(cm_node);
+ goto done;
+ }
+
+ if (!send_entry->retranscount || !send_entry->retrycount) {
+ irdma_free_retrans_entry(cm_node);
+
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock,
+ flags);
+ irdma_retrans_expired(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ goto done;
+ }
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ vsi = &cm_node->iwdev->vsi;
+ if (!cm_node->ack_rcvd) {
+ atomic_inc(&send_entry->sqbuf->refcount);
+ irdma_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+ cm_node->cm_core->stats_pkt_retrans++;
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ if (send_entry->send_retrans) {
+ send_entry->retranscount--;
+ timetosend = (IRDMA_RETRY_TIMEOUT <<
+ (IRDMA_DEFAULT_RETRANS -
+ send_entry->retranscount));
+
+ send_entry->timetosend = jiffies +
+ min(timetosend, IRDMA_MAX_TIMEOUT);
+ if (nexttimeout > send_entry->timetosend || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ int close_when_complete;
+
+ close_when_complete = send_entry->close_when_complete;
+ irdma_free_retrans_entry(cm_node);
+ if (close_when_complete)
+ irdma_rem_ref_cm_node(cm_node);
+ }
+done:
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ irdma_rem_ref_cm_node(cm_node);
+ }
+
+ if (settimer) {
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (!timer_pending(&cm_core->tcp_timer)) {
+ cm_core->tcp_timer.expires = nexttimeout;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ }
+}
+
+/**
+ * irdma_send_syn - send SYN packet
+ * @cm_node: connection's node
+ * @sendack: flag to set ACK bit or not
+ */
+int
+irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack)
+{
+ struct irdma_puda_buf *sqbuf;
+ int flags = SET_SYN;
+ char optionsbuf[sizeof(struct option_mss) +
+ sizeof(struct option_windowscale) +
+ sizeof(struct option_base) + TCP_OPTIONS_PADDING];
+ struct irdma_kmem_info opts;
+ int optionssize = 0;
+ /* Sending MSS option */
+ union all_known_options *options;
+
+ opts.addr = optionsbuf;
+ if (!cm_node)
+ return -EINVAL;
+
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->mss.optionnum = OPTION_NUM_MSS;
+ options->mss.len = sizeof(struct option_mss);
+ options->mss.mss = htons(cm_node->tcp_cntxt.mss);
+ optionssize += sizeof(struct option_mss);
+
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->windowscale.optionnum = OPTION_NUM_WINDOW_SCALE;
+ options->windowscale.len = sizeof(struct option_windowscale);
+ options->windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
+ optionssize += sizeof(struct option_windowscale);
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->eol = OPTION_NUM_EOL;
+ optionssize += 1;
+
+ if (sendack)
+ flags |= SET_ACK;
+
+ opts.size = optionssize;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, &opts, NULL, NULL,
+ flags);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_send_ack - Send ACK packet
+ * @cm_node: connection's node
+ */
+void
+irdma_send_ack(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ SET_ACK);
+ if (sqbuf)
+ irdma_puda_send_buf(vsi->ilq, sqbuf);
+}
+
+/**
+ * irdma_send_fin - Send FIN pkt
+ * @cm_node: connection's node
+ */
+static int
+irdma_send_fin(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ SET_ACK | SET_FIN);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_find_listener - find a cm node listening on this addr-port pair
+ * @cm_core: cm's core
+ * @dst_addr: listener ip addr
+ * @dst_port: listener tcp port num
+ * @vlan_id: virtual LAN ID
+ * @listener_state: state to match with listen node's
+ */
+static struct irdma_cm_listener *
+irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
+ u16 vlan_id, enum irdma_cm_listener_state listener_state)
+{
+ struct irdma_cm_listener *listen_node;
+ static const u32 ip_zero[4] = {0, 0, 0, 0};
+ u32 listen_addr[4];
+ u16 listen_port;
+ unsigned long flags;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_list, list) {
+ memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
+ listen_port = listen_node->loc_port;
+ if (listen_port != dst_port ||
+ !(listener_state & listen_node->listener_state))
+ continue;
+ /* compare node pair, return node handle if a match */
+ if (!memcmp(listen_addr, ip_zero, sizeof(listen_addr)) ||
+ (!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) &&
+ vlan_id == listen_node->vlan_id)) {
+ atomic_inc(&listen_node->refcnt);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock,
+ flags);
+ return listen_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ return NULL;
+}
+
+/**
+ * irdma_del_multiple_qhash - Remove qhash and child listens
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ */
+static int
+irdma_del_multiple_qhash(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct irdma_cm_listener *child_listen_node;
+ struct list_head *pos, *tpos;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_for_each_safe(pos, tpos,
+ &cm_parent_listen_node->child_listen_list) {
+ child_listen_node = list_entry(pos, struct irdma_cm_listener,
+ child_listen_list);
+ if (child_listen_node->ipv4)
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ else
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ list_del(pos);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ cm_info->vlan_id = child_listen_node->vlan_id;
+ if (child_listen_node->qhash_set) {
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ child_listen_node->qhash_set = false;
+ } else {
+ ret = 0;
+ }
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "Child listen node freed = %p\n",
+ child_listen_node);
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
+ }
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+
+ return ret;
+}
+
+/**
+ * irdma_netdev_vlan_ipv6 - Gets the netdev and mac
+ * @addr: local IPv6 address
+ * @vlan_id: vlan id for the given IPv6 address
+ * @mac: mac address for the given IPv6 address
+ *
+ * Returns the net_device of the IPv6 address and also sets the
+ * vlan id and mac for that address.
+ */
+struct ifnet *
+irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+{
+ struct ifnet *ip_dev = NULL;
+ struct in6_addr laddr6;
+
+ irdma_copy_ip_htonl(laddr6.__u6_addr.__u6_addr32, addr);
+ if (vlan_id)
+ *vlan_id = 0xFFFF; /* Match rdma_vlan_dev_vlan_id() */
+ if (mac)
+ eth_zero_addr(mac);
+
+ ip_dev = ip6_ifp_find(&init_net, laddr6, 0);
+ if (ip_dev) {
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ if (ip_dev->if_addr && ip_dev->if_addr->ifa_addr && mac)
+ ether_addr_copy(mac, IF_LLADDR(ip_dev));
+ }
+
+ return ip_dev;
+}
+
+/**
+ * irdma_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
+ * @addr: local IPv4 address
+ */
+u16
+irdma_get_vlan_ipv4(u32 *addr)
+{
+ struct ifnet *netdev;
+ u16 vlan_id = 0xFFFF;
+
+ netdev = ip_ifp_find(&init_net, htonl(addr[0]));
+ if (netdev) {
+ vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ dev_put(netdev);
+ }
+
+ return vlan_id;
+}
+
+/**
+ * irdma_add_mqh_6 - Adds multiple qhashes for IPv6
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv6 address
+ * on the adapter and adds the associated qhash filter
+ */
+static int
+irdma_add_mqh_6(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct ifnet *ip_dev;
+ struct ifaddr *ifp;
+ struct irdma_cm_listener *child_listen_node;
+ unsigned long flags;
+ int ret = 0;
+
+ IFNET_RLOCK();
+ IRDMA_TAILQ_FOREACH((ip_dev), &V_ifnet, if_link) {
+ if (!(ip_dev->if_flags & IFF_UP))
+ continue;
+
+ if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
+ (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
+ ip_dev != iwdev->netdev)
+ continue;
+
+ if_addr_rlock(ip_dev);
+ IRDMA_TAILQ_FOREACH(ifp, &ip_dev->if_addrhead, ifa_link) {
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "IP=%pI6, vlan_id=%d, MAC=%pM\n",
+ &((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_addr, rdma_vlan_dev_vlan_id(ip_dev),
+ IF_LLADDR(ip_dev));
+ if (((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_family != AF_INET6)
+ continue;
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ irdma_debug(iwdev_to_idev(iwdev),
+ IRDMA_DEBUG_CM,
+ "listener memory allocation\n");
+ ret = -ENOMEM;
+ if_addr_runlock(ip_dev);
+ goto exit;
+ }
+
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ child_listen_node->vlan_id = cm_info->vlan_id;
+ irdma_copy_ip_ntohl(child_listen_node->loc_addr,
+ ((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_addr.__u6_addr.__u6_addr32);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (ret) {
+ kfree(child_listen_node);
+ continue;
+ }
+
+ child_listen_node->qhash_set = true;
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ }
+ if_addr_runlock(ip_dev);
+ }
+exit:
+ IFNET_RUNLOCK();
+
+ return ret;
+}
+
+/**
+ * irdma_add_mqh_4 - Adds multiple qhashes for IPv4
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv4 address
+ * on the adapter and adds the associated qhash filter
+ */
+static int
+irdma_add_mqh_4(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct ifnet *ip_dev;
+ struct irdma_cm_listener *child_listen_node;
+ unsigned long flags;
+ struct ifaddr *ifa;
+ int ret = 0;
+
+ IFNET_RLOCK();
+ IRDMA_TAILQ_FOREACH((ip_dev), &V_ifnet, if_link) {
+ if (!(ip_dev->if_flags & IFF_UP))
+ continue;
+
+ if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
+ (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
+ ip_dev != iwdev->netdev)
+ continue;
+
+ if_addr_rlock(ip_dev);
+ IRDMA_TAILQ_FOREACH(ifa, &ip_dev->if_addrhead, ifa_link) {
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
+ &ifa->ifa_addr,
+ rdma_vlan_dev_vlan_id(ip_dev), IF_LLADDR(ip_dev));
+ if (((struct sockaddr_in *)ifa->ifa_addr)->sin_family != AF_INET)
+ continue;
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "listener memory allocation\n");
+ if_addr_runlock(ip_dev);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+ child_listen_node->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ cm_info->vlan_id = child_listen_node->vlan_id;
+ child_listen_node->loc_addr[0] =
+ ntohl(((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (ret) {
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core
+ ->stats_listen_nodes_created--;
+ continue;
+ }
+
+ child_listen_node->qhash_set = true;
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock,
+ flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ }
+ if_addr_runlock(ip_dev);
+ }
+exit:
+ IFNET_RUNLOCK();
+
+ return ret;
+}
+
+/**
+ * irdma_add_mqh - Adds multiple qhashes
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_listen_node: The parent listen node
+ */
+static int
+irdma_add_mqh(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_listen_node)
+{
+ int err;
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK();
+ VNET_FOREACH(vnet_iter) {
+ IFNET_RLOCK();
+ CURVNET_SET_QUIET(vnet_iter);
+
+ if (cm_info->ipv4)
+ err = irdma_add_mqh_4(iwdev, cm_info, cm_listen_node);
+ else
+ err = irdma_add_mqh_6(iwdev, cm_info, cm_listen_node);
+ CURVNET_RESTORE();
+ IFNET_RUNLOCK();
+ }
+ VNET_LIST_RUNLOCK();
+
+ return err;
+}
+
+/**
+ * irdma_reset_list_prep - add connection nodes slated for reset to list
+ * @cm_core: cm's core
+ * @listener: pointer to listener node
+ * @reset_list: a list to which cm_node will be selected
+ */
+static void
+irdma_reset_list_prep(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ struct list_head *reset_list)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ HASH_FOR_EACH_RCU(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if (cm_node->listener == listener &&
+ !cm_node->accelerated &&
+ atomic_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->reset_entry, reset_list);
+ }
+}
+
+/**
+ * irdma_dec_refcnt_listen - delete listener and associated cm nodes
+ * @cm_core: cm's core
+ * @listener: pointer to listener node
+ * @free_hanging_nodes: to free associated cm_nodes
+ * @apbvt_del: flag to delete the apbvt
+ */
+static int
+irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ int free_hanging_nodes, bool apbvt_del)
+{
+ struct list_head *list_pos;
+ struct list_head *list_temp;
+ struct irdma_cm_node *cm_node;
+ struct list_head reset_list;
+ struct irdma_cm_info nfo;
+ enum irdma_cm_node_state old_state;
+ unsigned long flags;
+ int err;
+
+ /* free non-accelerated child nodes for this listener */
+ INIT_LIST_HEAD(&reset_list);
+ if (free_hanging_nodes) {
+ rcu_read_lock();
+ irdma_reset_list_prep(cm_core, listener, &reset_list);
+ rcu_read_unlock();
+ }
+
+ list_for_each_safe(list_pos, list_temp, &reset_list) {
+ cm_node = container_of(list_pos, struct irdma_cm_node,
+ reset_entry);
+ if (cm_node->state >= IRDMA_CM_STATE_FIN_WAIT1) {
+ irdma_rem_ref_cm_node(cm_node);
+ continue;
+ }
+
+ irdma_cleanup_retrans_entry(cm_node);
+ err = irdma_send_reset(cm_node);
+ if (err) {
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM, "send reset failed\n");
+ } else {
+ old_state = cm_node->state;
+ cm_node->state = IRDMA_CM_STATE_LISTENER_DESTROYED;
+ if (old_state != IRDMA_CM_STATE_MPAREQ_RCVD)
+ irdma_rem_ref_cm_node(cm_node);
+ }
+ }
+
+ if (atomic_dec_and_test(&listener->refcnt)) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_del(&listener->list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ if (apbvt_del)
+ irdma_del_apbvt(listener->iwdev,
+ listener->apbvt_entry);
+ memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
+ nfo.loc_port = listener->loc_port;
+ nfo.ipv4 = listener->ipv4;
+ nfo.vlan_id = listener->vlan_id;
+ nfo.user_pri = listener->user_pri;
+ nfo.qh_qpid = listener->iwdev->vsi.ilq->qp_id;
+
+ if (!list_empty(&listener->child_listen_list)) {
+ irdma_del_multiple_qhash(listener->iwdev, &nfo,
+ listener);
+ } else {
+ if (listener->qhash_set)
+ irdma_manage_qhash(listener->iwdev,
+ &nfo,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ }
+
+ cm_core->stats_listen_destroyed++;
+ cm_core->stats_listen_nodes_destroyed++;
+ irdma_debug(iwdev_to_idev(listener->iwdev), IRDMA_DEBUG_CM,
+ "loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n",
+ listener->loc_port, listener->loc_addr, listener, listener->cm_id, listener->qhash_set,
+ listener->vlan_id, apbvt_del);
+ kfree(listener);
+ listener = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * irdma_cm_del_listen - delete a listener
+ * @cm_core: cm's core
+ * @listener: passive connection's listener
+ * @apbvt_del: flag to delete apbvt
+ */
+static int
+irdma_cm_del_listen(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ bool apbvt_del)
+{
+ listener->listener_state = IRDMA_CM_LISTENER_PASSIVE_STATE;
+ listener->cm_id = NULL;
+
+ return irdma_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
+}
+
+/**
+ * irdma_find_node - find a cm node that matches the reference cm node
+ * @cm_core: cm's core
+ * @rem_port: remote tcp port num
+ * @rem_addr: remote ip addr
+ * @loc_port: local tcp port num
+ * @loc_addr: local ip addr
+ * @vlan_id: local VLAN ID
+ */
+struct irdma_cm_node *
+irdma_find_node(struct irdma_cm_core *cm_core,
+ u16 rem_port, u32 *rem_addr, u16 loc_port,
+ u32 *loc_addr, u16 vlan_id)
+{
+ struct irdma_cm_node *cm_node;
+ u32 key = (rem_port << 16) | loc_port;
+
+ rcu_read_lock();
+ HASH_FOR_EACH_POSSIBLE_RCU(cm_core->cm_hash_tbl, cm_node, list, key) {
+ if (cm_node->vlan_id == vlan_id &&
+ cm_node->loc_port == loc_port && cm_node->rem_port == rem_port &&
+ !memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
+ !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr))) {
+ if (!atomic_inc_not_zero(&cm_node->refcnt))
+ goto exit;
+ rcu_read_unlock();
+ return cm_node;
+ }
+ }
+
+exit:
+ rcu_read_unlock();
+
+ /* no owner node */
+ return NULL;
+}
+
+/**
+ * irdma_add_hte_node - add a cm node to the hash table
+ * @cm_core: cm's core
+ * @cm_node: connection's node
+ */
+static void
+irdma_add_hte_node(struct irdma_cm_core *cm_core,
+ struct irdma_cm_node *cm_node)
+{
+ unsigned long flags;
+ u32 key = (cm_node->rem_port << 16) | cm_node->loc_port;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ HASH_ADD_RCU(cm_core->cm_hash_tbl, &cm_node->list, key);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+}
+
+/**
+ * irdma_ipv4_is_lpb - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+bool
+irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr)
+{
+ return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
+}
+
+/**
+ * irdma_ipv6_is_lpb - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+bool
+irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr)
+{
+ struct in6_addr raddr6;
+
+ irdma_copy_ip_htonl(raddr6.__u6_addr.__u6_addr32, rem_addr);
+
+ return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
+}
+
+/**
+ * irdma_cm_create_ah - create a cm address handle
+ * @cm_node: The connection manager node to create AH for
+ * @wait: Provides option to wait for ah creation or not
+ */
+static int
+irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
+{
+ struct irdma_ah_info ah_info = {0};
+ struct irdma_device *iwdev = cm_node->iwdev;
+
+ ether_addr_copy(ah_info.mac_addr, IF_LLADDR(iwdev->netdev));
+
+ ah_info.hop_ttl = 0x40;
+ ah_info.tc_tos = cm_node->tos;
+ ah_info.vsi = &iwdev->vsi;
+
+ if (cm_node->ipv4) {
+ ah_info.ipv4_valid = true;
+ ah_info.dest_ip_addr[0] = cm_node->rem_addr[0];
+ ah_info.src_ip_addr[0] = cm_node->loc_addr[0];
+ ah_info.do_lpbk = irdma_ipv4_is_lpb(ah_info.src_ip_addr[0],
+ ah_info.dest_ip_addr[0]);
+ } else {
+ memcpy(ah_info.dest_ip_addr, cm_node->rem_addr,
+ sizeof(ah_info.dest_ip_addr));
+ memcpy(ah_info.src_ip_addr, cm_node->loc_addr,
+ sizeof(ah_info.src_ip_addr));
+ ah_info.do_lpbk = irdma_ipv6_is_lpb(ah_info.src_ip_addr,
+ ah_info.dest_ip_addr);
+ }
+
+ ah_info.vlan_tag = cm_node->vlan_id;
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ah_info.insert_vlan_tag = 1;
+ ah_info.vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
+ }
+
+ ah_info.dst_arpindex =
+ irdma_arp_table(iwdev->rf, ah_info.dest_ip_addr,
+ NULL, IRDMA_ARP_RESOLVE);
+
+ if (irdma_puda_create_ah(&iwdev->rf->sc_dev, &ah_info, wait,
+ IRDMA_PUDA_RSRC_TYPE_ILQ, cm_node,
+ &cm_node->ah))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * irdma_cm_free_ah - free a cm address handle
+ * @cm_node: The connection manager node to create AH for
+ */
+static void
+irdma_cm_free_ah(struct irdma_cm_node *cm_node)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+
+ irdma_puda_free_ah(&iwdev->rf->sc_dev, cm_node->ah);
+ cm_node->ah = NULL;
+}
+
+/**
+ * irdma_make_cm_node - create a new instance of a cm node
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ * @listener: passive connection's listener
+ */
+static struct irdma_cm_node *
+irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *listener)
+{
+ struct irdma_cm_node *cm_node;
+ int arpindex;
+ struct ifnet *netdev = iwdev->netdev;
+
+ /* create an hte and cm_node for this instance */
+ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+ if (!cm_node)
+ return NULL;
+
+ /* set our node specific transport info */
+ cm_node->ipv4 = cm_info->ipv4;
+ cm_node->vlan_id = cm_info->vlan_id;
+ if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ cm_node->vlan_id = 0;
+ cm_node->tos = cm_info->tos;
+ cm_node->user_pri = cm_info->user_pri;
+ if (listener) {
+ if (listener->tos != cm_info->tos)
+ irdma_dev_warn(
+ &iwdev->rf->sc_dev,
+ "application TOS[%d] and remote client TOS[%d] mismatch\n",
+ listener->tos, cm_info->tos);
+ if (iwdev->vsi.dscp_mode) {
+ cm_node->user_pri = listener->user_pri;
+ } else {
+ cm_node->tos = max(listener->tos, cm_info->tos);
+ cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ }
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_DCB,
+ "listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
+ cm_node->user_pri);
+ }
+ memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
+ memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
+ cm_node->loc_port = cm_info->loc_port;
+ cm_node->rem_port = cm_info->rem_port;
+
+ cm_node->mpa_frame_rev = IRDMA_CM_DEFAULT_MPA_VER;
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ cm_node->iwdev = iwdev;
+ cm_node->dev = &iwdev->rf->sc_dev;
+
+ cm_node->ird_size = cm_node->dev->hw_attrs.max_hw_ird;
+ cm_node->ord_size = cm_node->dev->hw_attrs.max_hw_ord;
+
+ cm_node->listener = listener;
+ cm_node->cm_id = cm_info->cm_id;
+ ether_addr_copy(cm_node->loc_mac, IF_LLADDR(netdev));
+ spin_lock_init(&cm_node->retrans_list_lock);
+ cm_node->ack_rcvd = false;
+
+ init_completion(&cm_node->establish_comp);
+ atomic_set(&cm_node->refcnt, 1);
+ /* associate our parent CM core */
+ cm_node->cm_core = cm_core;
+ cm_node->tcp_cntxt.loc_id = IRDMA_CM_DEFAULT_LOCAL_ID;
+ cm_node->tcp_cntxt.rcv_wscale = iwdev->rcv_wscale;
+ cm_node->tcp_cntxt.rcv_wnd = iwdev->rcv_wnd >> cm_node->tcp_cntxt.rcv_wscale;
+ kc_set_loc_seq_num_mss(cm_node);
+
+ arpindex = irdma_resolve_neigh_lpb_chk(iwdev, cm_node, cm_info);
+ if (arpindex < 0)
+ goto err;
+
+ ether_addr_copy(cm_node->rem_mac, iwdev->rf->arp_table[arpindex].mac_addr);
+ irdma_add_hte_node(cm_core, cm_node);
+ cm_core->stats_nodes_created++;
+ return cm_node;
+
+err:
+ kfree(cm_node);
+
+ return NULL;
+}
+
+static void
+irdma_cm_node_free_cb(struct rcu_head *rcu_head)
+{
+ struct irdma_cm_node *cm_node =
+ container_of(rcu_head, struct irdma_cm_node, rcu_head);
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ struct irdma_qp *iwqp;
+ struct irdma_cm_info nfo;
+
+ /* if the node is destroyed before connection was accelerated */
+ if (!cm_node->accelerated && cm_node->accept_pend) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "node destroyed before established\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ }
+ if (cm_node->close_entry)
+ irdma_handle_close_entry(cm_node, 0);
+ if (cm_node->listener) {
+ irdma_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
+ } else {
+ if (cm_node->apbvt_set) {
+ irdma_del_apbvt(cm_node->iwdev, cm_node->apbvt_entry);
+ cm_node->apbvt_set = 0;
+ }
+ irdma_get_addr_info(cm_node, &nfo);
+ if (cm_node->qhash_set) {
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL,
+ false);
+ cm_node->qhash_set = 0;
+ }
+ }
+
+ iwqp = cm_node->iwqp;
+ if (iwqp) {
+ cm_node->cm_id->rem_ref(cm_node->cm_id);
+ cm_node->cm_id = NULL;
+ iwqp->cm_id = NULL;
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ cm_node->iwqp = NULL;
+ } else if (cm_node->qhash_set) {
+ irdma_get_addr_info(cm_node, &nfo);
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+ cm_node->qhash_set = 0;
+ }
+
+ cm_core->cm_free_ah(cm_node);
+ kfree(cm_node);
+}
+
+/**
+ * irdma_rem_ref_cm_node - destroy an instance of a cm node
+ * @cm_node: connection's node
+ */
+void
+irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ if (!atomic_dec_and_test(&cm_node->refcnt)) {
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ return;
+ }
+ if (cm_node->iwqp) {
+ cm_node->iwqp->cm_node = NULL;
+ cm_node->iwqp->cm_id = NULL;
+ }
+ HASH_DEL_RCU(cm_core->cm_hash_tbl, &cm_node->list);
+ cm_node->cm_core->stats_nodes_destroyed++;
+
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ /* wait for all list walkers to exit their grace period */
+ call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb);
+}
+
+/**
+ * irdma_handle_fin_pkt - FIN packet received
+ * @cm_node: connection's node
+ */
+static void
+irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_send_fin(cm_node);
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ atomic_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSING;
+ irdma_send_ack(cm_node);
+ /*
+ * Wait for ACK as this is simultaneous close. After we receive ACK, do not send anything. Just rm the
+ * node.
+ */
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_TIME_WAIT;
+ irdma_send_ack(cm_node);
+ irdma_schedule_cm_timer(cm_node, NULL, IRDMA_TIMER_TYPE_CLOSE,
+ 1, 0);
+ break;
+ case IRDMA_CM_STATE_TIME_WAIT:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ default:
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "bad state node state = %d\n", cm_node->state);
+ break;
+ }
+}
+
+/**
+ * irdma_handle_rst_pkt - process received RST packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void
+irdma_handle_rst_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "caller: %pS cm_node=%p state=%d rem_port=0x%04x loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ __builtin_return_address(0), cm_node, cm_node->state,
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr);
+
+ irdma_cleanup_retrans_entry(cm_node);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V2:
+ /* Drop down to MPA_V1 */
+ cm_node->mpa_frame_rev = IETF_MPA_V1;
+ /* send a syn and goto syn sent state */
+ cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ if (irdma_send_syn(cm_node, 0))
+ irdma_active_open_err(cm_node, false);
+ break;
+ case IETF_MPA_V1:
+ default:
+ irdma_active_open_err(cm_node, false);
+ break;
+ }
+ break;
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ atomic_inc(&cm_node->passive_state);
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_passive_open_err(cm_node, false);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ irdma_active_open_err(cm_node, false);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_TIME_WAIT:
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_rcv_mpa - Process a recv'd mpa buffer
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void
+irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ int err;
+ int datasize = rbuf->datalen;
+ u8 *dataloc = rbuf->data;
+
+ enum irdma_cm_event_type type = IRDMA_CM_EVENT_UNKNOWN;
+ u32 res_type;
+
+ err = irdma_parse_mpa(cm_node, dataloc, &res_type, datasize);
+ if (err) {
+ if (cm_node->state == IRDMA_CM_STATE_MPAREQ_SENT)
+ irdma_active_open_err(cm_node, true);
+ else
+ irdma_passive_open_err(cm_node, true);
+ return;
+ }
+
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_ESTABLISHED:
+ if (res_type == IRDMA_MPA_REQUEST_REJECT)
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM, "state for reject\n");
+ cm_node->state = IRDMA_CM_STATE_MPAREQ_RCVD;
+ type = IRDMA_CM_EVENT_MPA_REQ;
+ irdma_send_ack(cm_node); /* ACK received MPA request */
+ atomic_set(&cm_node->passive_state,
+ IRDMA_PASSIVE_STATE_INDICATED);
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ irdma_cleanup_retrans_entry(cm_node);
+ if (res_type == IRDMA_MPA_REQUEST_REJECT) {
+ type = IRDMA_CM_EVENT_MPA_REJECT;
+ cm_node->state = IRDMA_CM_STATE_MPAREJ_RCVD;
+ } else {
+ type = IRDMA_CM_EVENT_CONNECTED;
+ cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ }
+ irdma_send_ack(cm_node);
+ break;
+ default:
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "wrong cm_node state =%d\n", cm_node->state);
+ break;
+ }
+ irdma_create_event(cm_node, type);
+}
+
+/**
+ * irdma_check_syn - Check for error on received syn ack
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int
+irdma_check_syn(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
+{
+ if (ntohl(tcph->th_ack) != cm_node->tcp_cntxt.loc_seq_num) {
+ irdma_active_open_err(cm_node, true);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_check_seq - check seq numbers if OK
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int
+irdma_check_seq(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
+{
+ u32 seq;
+ u32 ack_seq;
+ u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
+ u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ u32 rcv_wnd;
+ int err = 0;
+
+ seq = ntohl(tcph->th_seq);
+ ack_seq = ntohl(tcph->th_ack);
+ rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ if (ack_seq != loc_seq_num ||
+ !between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
+ err = -1;
+ if (err)
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "seq number err\n");
+
+ return err;
+}
+
+void
+irdma_add_conn_est_qh(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_info nfo;
+
+ irdma_get_addr_info(cm_node, &nfo);
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ cm_node, false);
+ cm_node->qhash_set = true;
+}
+
+/**
+ * irdma_handle_syn_pkt - is for Passive node
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void
+irdma_handle_syn_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int err;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->th_off << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->th_seq);
+
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ /* Rcvd syn on active open connection */
+ irdma_active_open_err(cm_node, 1);
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ /* Passive OPEN */
+ if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ cm_node->listener->backlog) {
+ cm_node->cm_core->stats_backlog_drops++;
+ irdma_passive_open_err(cm_node, false);
+ break;
+ }
+ err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (err) {
+ irdma_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ err = cm_node->cm_core->cm_create_ah(cm_node, false);
+ if (err) {
+ irdma_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ cm_node->accept_pend = 1;
+ atomic_inc(&cm_node->listener->pend_accepts_cnt);
+
+ cm_node->state = IRDMA_CM_STATE_SYN_RCVD;
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ irdma_cleanup_retrans_entry(cm_node);
+ atomic_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_CLOSING:
+ case IRDMA_CM_STATE_UNKNOWN:
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_synack_pkt - Process SYN+ACK packet (active side)
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void
+irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int err;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->th_off << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->th_seq);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ irdma_cleanup_retrans_entry(cm_node);
+ /* active open */
+ if (irdma_check_syn(cm_node, tcph)) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM, "check syn fail\n");
+ return;
+ }
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->th_ack);
+ /* setup options */
+ err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 0);
+ if (err) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM,
+ "cm_node=%p tcp_options failed\n",
+ cm_node);
+ break;
+ }
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ irdma_send_ack(cm_node); /* ACK for the syn_ack */
+ err = irdma_send_mpa_request(cm_node);
+ if (err) {
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM,
+ "cm_node=%p irdma_send_mpa_request failed\n",
+ cm_node);
+ break;
+ }
+ cm_node->state = IRDMA_CM_STATE_MPAREQ_SENT;
+ break;
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ irdma_passive_open_err(cm_node, true);
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->th_ack);
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->th_ack);
+ irdma_cleanup_retrans_entry(cm_node);
+ atomic_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_CLOSING:
+ case IRDMA_CM_STATE_UNKNOWN:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_ack_pkt - process packet with ACK
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static int
+irdma_handle_ack_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 inc_sequence;
+ int ret;
+ int optionsize;
+ u32 datasize = rbuf->datalen;
+
+ optionsize = (tcph->th_off << 2) - sizeof(struct tcphdr);
+
+ if (irdma_check_seq(cm_node, tcph))
+ return -EINVAL;
+
+ inc_sequence = ntohl(tcph->th_seq);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ irdma_cleanup_retrans_entry(cm_node);
+ ret = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (ret)
+ return ret;
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->th_ack);
+ cm_node->state = IRDMA_CM_STATE_ESTABLISHED;
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ irdma_cleanup_retrans_entry(cm_node);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->th_ack);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ cm_node->ack_rcvd = false;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ } else {
+ cm_node->ack_rcvd = true;
+ }
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ irdma_cleanup_retrans_entry(cm_node);
+ atomic_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_CLOSING:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_FIN_WAIT2;
+ break;
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ case IRDMA_CM_STATE_UNKNOWN:
+ default:
+ irdma_cleanup_retrans_entry(cm_node);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_process_pkt - process cm packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void
+irdma_process_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ enum irdma_tcpip_pkt_type pkt_type = IRDMA_PKT_TYPE_UNKNOWN;
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 fin_set = 0;
+ int err;
+
+ if (tcph->th_flags & TH_RST) {
+ pkt_type = IRDMA_PKT_TYPE_RST;
+ } else if (tcph->th_flags & TH_SYN) {
+ pkt_type = IRDMA_PKT_TYPE_SYN;
+ if (tcph->th_flags & TH_ACK)
+ pkt_type = IRDMA_PKT_TYPE_SYNACK;
+ } else if (tcph->th_flags & TH_ACK) {
+ pkt_type = IRDMA_PKT_TYPE_ACK;
+ }
+ if (tcph->th_flags & TH_FIN)
+ fin_set = 1;
+
+ switch (pkt_type) {
+ case IRDMA_PKT_TYPE_SYN:
+ irdma_handle_syn_pkt(cm_node, rbuf);
+ break;
+ case IRDMA_PKT_TYPE_SYNACK:
+ irdma_handle_synack_pkt(cm_node, rbuf);
+ break;
+ case IRDMA_PKT_TYPE_ACK:
+ err = irdma_handle_ack_pkt(cm_node, rbuf);
+ if (fin_set && !err)
+ irdma_handle_fin_pkt(cm_node);
+ break;
+ case IRDMA_PKT_TYPE_RST:
+ irdma_handle_rst_pkt(cm_node, rbuf);
+ break;
+ default:
+ if (fin_set &&
+ (!irdma_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
+ irdma_handle_fin_pkt(cm_node);
+ break;
+ }
+}
+
+/**
+ * irdma_make_listen_node - create a listen node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ */
+static struct irdma_cm_listener *
+irdma_make_listen_node(struct irdma_cm_core *cm_core,
+ struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info)
+{
+ struct irdma_cm_listener *listener;
+ unsigned long flags;
+
+ /* cannot have multiple matching listeners */
+ listener = irdma_find_listener(cm_core, cm_info->loc_addr,
+ cm_info->loc_port, cm_info->vlan_id,
+ IRDMA_CM_LISTENER_EITHER_STATE);
+ if (listener &&
+ listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
+ atomic_dec(&listener->refcnt);
+ return NULL;
+ }
+
+ if (!listener) {
+ /*
+ * create a CM listen node 1/2 node to compare incoming traffic to
+ */
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return NULL;
+ cm_core->stats_listen_nodes_created++;
+ memcpy(listener->loc_addr, cm_info->loc_addr,
+ sizeof(listener->loc_addr));
+ listener->loc_port = cm_info->loc_port;
+
+ INIT_LIST_HEAD(&listener->child_listen_list);
+
+ atomic_set(&listener->refcnt, 1);
+ } else {
+ listener->reused_node = 1;
+ }
+
+ listener->cm_id = cm_info->cm_id;
+ listener->ipv4 = cm_info->ipv4;
+ listener->vlan_id = cm_info->vlan_id;
+ atomic_set(&listener->pend_accepts_cnt, 0);
+ listener->cm_core = cm_core;
+ listener->iwdev = iwdev;
+
+ listener->backlog = cm_info->backlog;
+ listener->listener_state = IRDMA_CM_LISTENER_ACTIVE_STATE;
+
+ if (!listener->reused_node) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_add(&listener->list, &cm_core->listen_list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+
+ return listener;
+}
+
+/**
+ * irdma_create_cm_node - make a connection node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @conn_param: connection parameters
+ * @cm_info: quad info for connection
+ * @caller_cm_node: pointer to cm_node structure to return
+ */
+static int
+irdma_create_cm_node(struct irdma_cm_core *cm_core,
+ struct irdma_device *iwdev,
+ struct iw_cm_conn_param *conn_param,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_node **caller_cm_node)
+{
+ struct irdma_cm_node *cm_node;
+ u16 private_data_len = conn_param->private_data_len;
+ const void *private_data = conn_param->private_data;
+
+ /* create a CM connection node */
+ cm_node = irdma_make_cm_node(cm_core, iwdev, cm_info, NULL);
+ if (!cm_node)
+ return -ENOMEM;
+
+ /* set our node side to client (active) side */
+ cm_node->tcp_cntxt.client = 1;
+ cm_node->tcp_cntxt.rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+
+ irdma_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
+
+ cm_node->pdata.size = private_data_len;
+ cm_node->pdata.addr = cm_node->pdata_buf;
+
+ memcpy(cm_node->pdata_buf, private_data, private_data_len);
+ *caller_cm_node = cm_node;
+
+ return 0;
+}
+
+/**
+ * irdma_cm_reject - reject and teardown a connection
+ * @cm_node: connection's node
+ * @pdata: ptr to private data for reject
+ * @plen: size of private data
+ */
+static int
+irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata,
+ u8 plen)
+{
+ int ret;
+ int passive_state;
+
+ if (cm_node->tcp_cntxt.client)
+ return 0;
+
+ irdma_cleanup_retrans_entry(cm_node);
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == IRDMA_SEND_RESET_EVENT) {
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+
+ if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+
+ ret = irdma_send_mpa_reject(cm_node, pdata, plen);
+ if (!ret)
+ return 0;
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ if (irdma_send_reset(cm_node))
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "send reset failed\n");
+
+ return ret;
+}
+
+/**
+ * irdma_cm_close - close of cm connection
+ * @cm_node: connection's node
+ */
+static int
+irdma_cm_close(struct irdma_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_ACCEPTING:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ irdma_cleanup_retrans_entry(cm_node);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSE_WAIT:
+ cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_send_fin(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_TIME_WAIT:
+ case IRDMA_CM_STATE_CLOSING:
+ return -EINVAL;
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_cleanup_retrans_entry(cm_node);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ case IRDMA_CM_STATE_UNKNOWN:
+ case IRDMA_CM_STATE_INITED:
+ case IRDMA_CM_STATE_CLOSED:
+ case IRDMA_CM_STATE_LISTENER_DESTROYED:
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ if (cm_node->send_entry)
+ irdma_debug(iwdev_to_idev(cm_node->iwdev),
+ IRDMA_DEBUG_CM,
+ "CM send_entry in OFFLOADED state\n");
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_receive_ilq - recv an ETHERNET packet, and process it
+ * through CM
+ * @vsi: VSI structure of dev
+ * @rbuf: receive buffer
+ */
+void
+irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
+{
+ struct irdma_cm_node *cm_node;
+ struct irdma_cm_listener *listener;
+ struct ip *iph;
+ struct ip6_hdr *ip6h;
+ struct tcphdr *tcph;
+ struct irdma_cm_info cm_info = {0};
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct ether_vlan_header *ethh;
+ u16 vtag;
+
+ /* if vlan, then maclen = 18 else 14 */
+ iph = (struct ip *)rbuf->iph;
+ irdma_debug_buf(vsi->dev, IRDMA_DEBUG_ILQ, "RECEIVE ILQ BUFFER",
+ rbuf->mem.va, rbuf->totallen);
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (rbuf->vlan_valid) {
+ vtag = rbuf->vlan_id;
+ cm_info.user_pri = (vtag & EVL_PRI_MASK) >>
+ VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & EVL_VLID_MASK;
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ }
+ } else {
+ ethh = rbuf->mem.va;
+
+ if (ethh->evl_proto == htons(ETH_P_8021Q)) {
+ vtag = ntohs(ethh->evl_tag);
+ cm_info.user_pri = (vtag & EVL_PRI_MASK) >>
+ VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & EVL_VLID_MASK;
+ irdma_debug(iwdev_to_idev(cm_core->iwdev),
+ IRDMA_DEBUG_CM, "vlan_id=%d\n",
+ cm_info.vlan_id);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ }
+ }
+ tcph = (struct tcphdr *)rbuf->tcph;
+
+ if (rbuf->ipv4) {
+ cm_info.loc_addr[0] = ntohl(iph->ip_dst.s_addr);
+ cm_info.rem_addr[0] = ntohl(iph->ip_src.s_addr);
+ cm_info.ipv4 = true;
+ cm_info.tos = iph->ip_tos;
+ } else {
+ ip6h = (struct ip6_hdr *)rbuf->iph;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ ip6h->ip6_dst.__u6_addr.__u6_addr32);
+ irdma_copy_ip_ntohl(cm_info.rem_addr,
+ ip6h->ip6_src.__u6_addr.__u6_addr32);
+ cm_info.ipv4 = false;
+ cm_info.tos = (ip6h->ip6_vfc << 4) | ip6h->ip6_flow;
+ }
+ cm_info.loc_port = ntohs(tcph->th_dport);
+ cm_info.rem_port = ntohs(tcph->th_sport);
+ cm_node = irdma_find_node(cm_core, cm_info.rem_port, cm_info.rem_addr,
+ cm_info.loc_port, cm_info.loc_addr, cm_info.vlan_id);
+
+ if (!cm_node) {
+ /*
+ * Only type of packet accepted are for the PASSIVE open (syn only)
+ */
+ if (!(tcph->th_flags & TH_SYN) || tcph->th_flags & TH_ACK)
+ return;
+
+ listener = irdma_find_listener(cm_core,
+ cm_info.loc_addr,
+ cm_info.loc_port,
+ cm_info.vlan_id,
+ IRDMA_CM_LISTENER_ACTIVE_STATE);
+ if (!listener) {
+ cm_info.cm_id = NULL;
+ irdma_debug(iwdev_to_idev(cm_core->iwdev),
+ IRDMA_DEBUG_CM, "no listener found\n");
+ return;
+ }
+
+ cm_info.cm_id = listener->cm_id;
+ cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info,
+ listener);
+ if (!cm_node) {
+ irdma_debug(iwdev_to_idev(cm_core->iwdev),
+ IRDMA_DEBUG_CM, "allocate node failed\n");
+ atomic_dec(&listener->refcnt);
+ return;
+ }
+
+ if (!(tcph->th_flags & (TH_RST | TH_FIN))) {
+ cm_node->state = IRDMA_CM_STATE_LISTENING;
+ } else {
+ irdma_rem_ref_cm_node(cm_node);
+ return;
+ }
+
+ atomic_inc(&cm_node->refcnt);
+ } else if (cm_node->state == IRDMA_CM_STATE_OFFLOADED) {
+ irdma_rem_ref_cm_node(cm_node);
+ return;
+ }
+
+ irdma_process_pkt(cm_node, rbuf);
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+static int
+irdma_add_qh(struct irdma_cm_node *cm_node, bool active)
+{
+ if (!active)
+ irdma_add_conn_est_qh(cm_node);
+ return 0;
+}
+
+static void
+irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node)
+{
+}
+
+/**
+ * irdma_setup_cm_core - setup top level instance of a cm core
+ * @iwdev: iwarp device structure
+ * @rdma_ver: HW version
+ */
+int
+irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+
+ cm_core->iwdev = iwdev;
+ cm_core->dev = &iwdev->rf->sc_dev;
+
+ /* Handles CM event work items send to Iwarp core */
+ cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0);
+ if (!cm_core->event_wq)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cm_core->listen_list);
+
+ timer_setup(&cm_core->tcp_timer, irdma_cm_timer_tick, 0);
+
+ spin_lock_init(&cm_core->ht_lock);
+ spin_lock_init(&cm_core->listen_list_lock);
+ spin_lock_init(&cm_core->apbvt_lock);
+ switch (rdma_ver) {
+ case IRDMA_GEN_1:
+ cm_core->form_cm_frame = irdma_form_uda_cm_frame;
+ cm_core->cm_create_ah = irdma_add_qh;
+ cm_core->cm_free_ah = irdma_cm_free_ah_nop;
+ break;
+ case IRDMA_GEN_2:
+ default:
+ cm_core->form_cm_frame = irdma_form_ah_cm_frame;
+ cm_core->cm_create_ah = irdma_cm_create_ah;
+ cm_core->cm_free_ah = irdma_cm_free_ah;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_cleanup_cm_core - deallocate a top level instance of a
+ * cm core
+ * @cm_core: cm's core
+ */
+void
+irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
+{
+ unsigned long flags;
+
+ if (!cm_core)
+ return;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (timer_pending(&cm_core->tcp_timer))
+ del_timer_sync(&cm_core->tcp_timer);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ destroy_workqueue(cm_core->event_wq);
+ cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
+}
+
+/**
+ * irdma_init_tcp_ctx - setup qp context
+ * @cm_node: connection's node
+ * @tcp_info: offload info for tcp
+ * @iwqp: associate qp for the connection
+ */
+static void
+irdma_init_tcp_ctx(struct irdma_cm_node *cm_node,
+ struct irdma_tcp_offload_info *tcp_info,
+ struct irdma_qp *iwqp)
+{
+ tcp_info->ipv4 = cm_node->ipv4;
+ tcp_info->drop_ooo_seg = !iwqp->iwdev->iw_ooo;
+ tcp_info->wscale = true;
+ tcp_info->ignore_tcp_opt = true;
+ tcp_info->ignore_tcp_uns_opt = true;
+ tcp_info->no_nagle = false;
+
+ tcp_info->ttl = IRDMA_DEFAULT_TTL;
+ tcp_info->rtt_var = IRDMA_DEFAULT_RTT_VAR;
+ tcp_info->ss_thresh = IRDMA_DEFAULT_SS_THRESH;
+ tcp_info->rexmit_thresh = IRDMA_DEFAULT_REXMIT_THRESH;
+
+ tcp_info->tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
+ tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->snd_nxt = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+ tcp_info->rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ tcp_info->snd_max = cm_node->tcp_cntxt.loc_seq_num;
+
+ tcp_info->snd_una = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->cwnd = 2 * cm_node->tcp_cntxt.mss;
+ tcp_info->snd_wl1 = cm_node->tcp_cntxt.rcv_nxt;
+ tcp_info->snd_wl2 = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->max_snd_window = cm_node->tcp_cntxt.max_snd_wnd;
+ tcp_info->rcv_wnd = cm_node->tcp_cntxt.rcv_wnd
+ << cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->flow_label = 0;
+ tcp_info->snd_mss = (u32)cm_node->tcp_cntxt.mss;
+ tcp_info->tos = cm_node->tos;
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ tcp_info->insert_vlan_tag = true;
+ tcp_info->vlan_tag = cm_node->vlan_id;
+ tcp_info->vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
+ }
+ tcp_info->src_port = cm_node->loc_port;
+ tcp_info->dst_port = cm_node->rem_port;
+ tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf,
+ cm_node->rem_addr, NULL,
+ IRDMA_ARP_RESOLVE);
+ if (cm_node->ipv4) {
+ tcp_info->dest_ip_addr[3] = cm_node->rem_addr[0];
+ tcp_info->local_ipaddr[3] = cm_node->loc_addr[0];
+ } else {
+ memcpy(tcp_info->dest_ip_addr, cm_node->rem_addr,
+ sizeof(tcp_info->dest_ip_addr));
+ memcpy(tcp_info->local_ipaddr, cm_node->loc_addr,
+ sizeof(tcp_info->local_ipaddr));
+ }
+}
+
+/**
+ * irdma_cm_init_tsa_conn - setup qp for RTS
+ * @iwqp: associate qp for the connection
+ * @cm_node: connection's node
+ */
+static void
+irdma_cm_init_tsa_conn(struct irdma_qp *iwqp,
+ struct irdma_cm_node *cm_node)
+{
+ struct irdma_iwarp_offload_info *iwarp_info;
+ struct irdma_qp_host_ctx_info *ctx_info;
+
+ iwarp_info = &iwqp->iwarp_info;
+ ctx_info = &iwqp->ctx_info;
+
+ ctx_info->tcp_info = &iwqp->tcp_info;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+ iwarp_info->ord_size = cm_node->ord_size;
+ iwarp_info->ird_size = cm_node->ird_size;
+ iwarp_info->rd_en = true;
+ iwarp_info->rdmap_ver = 1;
+ iwarp_info->ddp_ver = 1;
+ iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
+
+ ctx_info->tcp_info_valid = true;
+ ctx_info->iwarp_info_valid = true;
+ ctx_info->user_pri = cm_node->user_pri;
+
+ irdma_init_tcp_ctx(cm_node, &iwqp->tcp_info, iwqp);
+ if (cm_node->snd_mark_en) {
+ iwarp_info->snd_mark_en = true;
+ iwarp_info->snd_mark_offset = (iwqp->tcp_info.snd_nxt & SNDMARKER_SEQNMASK) +
+ cm_node->lsmm_size;
+ }
+
+ cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ iwqp->tcp_info.tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ iwqp->tcp_info.src_mac_addr_idx = iwqp->iwdev->mac_ip_table_idx;
+
+ if (cm_node->rcv_mark_en) {
+ iwarp_info->rcv_mark_en = true;
+ iwarp_info->align_hdrs = true;
+ }
+
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+
+ /* once tcp_info is set, no need to do it again */
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+}
+
+/**
+ * irdma_cm_disconn - when a connection is being closed
+ * @iwqp: associated qp for the connection
+ */
+void
+irdma_cm_disconn(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct disconn_work *work;
+ unsigned long flags;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
+ if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) {
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "qp_id %d is already freed\n", iwqp->ibqp.qp_num);
+ kfree(work);
+ return;
+ }
+ irdma_qp_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+
+ work->iwqp = iwqp;
+ INIT_WORK(&work->work, irdma_disconnect_worker);
+ queue_work(iwdev->cleanup_wq, &work->work);
+}
+
+/**
+ * irdma_qp_disconnect - free qp and close cm
+ * @iwqp: associate qp for the connection
+ */
+static void
+irdma_qp_disconnect(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+
+ iwqp->active_conn = 0;
+ /* close the CM node down if it is still active */
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "Call close API\n");
+ irdma_cm_close(iwqp->cm_node);
+}
+
+/**
+ * irdma_cm_disconn_true - called by worker thread to disconnect qp
+ * @iwqp: associate qp for the connection
+ */
+static void
+irdma_cm_disconn_true(struct irdma_qp *iwqp)
+{
+ struct iw_cm_id *cm_id;
+ struct irdma_device *iwdev;
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+ u16 last_ae;
+ u8 original_hw_tcp_state;
+ u8 original_ibqp_state;
+ int disconn_status = 0;
+ int issue_disconn = 0;
+ int issue_close = 0;
+ int issue_flush = 0;
+ unsigned long flags;
+ int err;
+
+ iwdev = iwqp->iwdev;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ struct ib_qp_attr attr;
+
+ if (iwqp->flush_issued || iwqp->sc_qp.qp_uk.destroy_pending) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp_roce(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ irdma_ib_qp_event(iwqp, qp->event_type);
+ return;
+ }
+
+ cm_id = iwqp->cm_id;
+ /* make sure we havent already closed this connection */
+ if (!cm_id) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return;
+ }
+
+ original_hw_tcp_state = iwqp->hw_tcp_state;
+ original_ibqp_state = iwqp->ibqp_state;
+ last_ae = iwqp->last_aeq;
+
+ if (qp->term_flags) {
+ issue_disconn = 1;
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ irdma_terminate_del_timer(qp);
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ } else if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == IRDMA_AE_LLP_CONNECTION_RESET))) {
+ issue_disconn = 1;
+ if (last_ae == IRDMA_AE_LLP_CONNECTION_RESET)
+ disconn_status = -ECONNRESET;
+ }
+
+ if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
+ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
+ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
+ last_ae == IRDMA_AE_BAD_CLOSE ||
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ qp->term_flags = 0;
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (issue_flush && !iwqp->sc_qp.qp_uk.destroy_pending) {
+ if (!iwqp->user_mode)
+ queue_delayed_work(iwqp->iwdev->cleanup_wq,
+ &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ |
+ IRDMA_FLUSH_WAIT);
+
+ if (qp->term_flags)
+ irdma_ib_qp_event(iwqp, qp->event_type);
+ }
+
+ if (!cm_id || !cm_id->event_handler)
+ return;
+
+ spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+ if (!iwqp->cm_node) {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ return;
+ }
+ atomic_inc(&iwqp->cm_node->refcnt);
+
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+
+ if (issue_disconn) {
+ err = irdma_send_cm_event(iwqp->cm_node, cm_id,
+ IW_CM_EVENT_DISCONNECT,
+ disconn_status);
+ if (err)
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "disconnect event failed: - cm_id = %p\n",
+ cm_id);
+ }
+ if (issue_close) {
+ cm_id->provider_data = iwqp;
+ err = irdma_send_cm_event(iwqp->cm_node, cm_id,
+ IW_CM_EVENT_CLOSE, 0);
+ if (err)
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "close event failed: - cm_id = %p\n",
+ cm_id);
+ irdma_qp_disconnect(iwqp);
+ }
+ irdma_rem_ref_cm_node(iwqp->cm_node);
+}
+
+/**
+ * irdma_disconnect_worker - worker for connection close
+ * @work: points or disconn structure
+ */
+static void
+irdma_disconnect_worker(struct work_struct *work)
+{
+ struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+ struct irdma_qp *iwqp = dwork->iwqp;
+
+ kfree(dwork);
+ irdma_cm_disconn_true(iwqp);
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_free_lsmm_rsrc - free lsmm memory and deregister
+ * @iwqp: associate qp for the connection
+ */
+void
+irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev;
+
+ iwdev = iwqp->iwdev;
+
+ if (iwqp->ietf_mem.va) {
+ if (iwqp->lsmm_mr)
+ kc_free_lsmm_dereg_mr(iwdev, iwqp);
+ irdma_free_dma_mem(iwdev->rf->sc_dev.hw,
+ &iwqp->ietf_mem);
+ iwqp->ietf_mem.va = NULL;
+ }
+}
+
+/**
+ * irdma_accept - registered call for connection to be accepted
+ * @cm_id: cm information for passive connection
+ * @conn_param: accpet parameters
+ */
+int
+irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct irdma_cm_node *cm_node;
+ struct ib_qp_attr attr = {0};
+ int passive_state;
+ struct ib_mr *ibmr;
+ struct irdma_pd *iwpd;
+ u16 buf_len = 0;
+ struct irdma_kmem_info accept;
+ u64 tagged_offset;
+ int wait_ret;
+ int ret = 0;
+
+ ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+
+ iwqp = to_iwqp(ibqp);
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->rf->sc_dev;
+ cm_node = cm_id->provider_data;
+
+ if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
+ cm_node->ipv4 = true;
+ cm_node->vlan_id = irdma_get_vlan_ipv4(cm_node->loc_addr);
+ } else {
+ cm_node->ipv4 = false;
+ irdma_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
+ NULL);
+ }
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "Accept vlan_id=%d\n", cm_node->vlan_id);
+
+ if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == IRDMA_SEND_RESET_EVENT) {
+ ret = -ECONNRESET;
+ goto error;
+ }
+
+ buf_len = conn_param->private_data_len + IRDMA_MAX_IETF_SIZE;
+ iwqp->ietf_mem.size = buf_len;
+ iwqp->ietf_mem.va = irdma_allocate_dma_mem(dev->hw, &iwqp->ietf_mem,
+ iwqp->ietf_mem.size, 1);
+ if (!iwqp->ietf_mem.va) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ cm_node->pdata.size = conn_param->private_data_len;
+ accept.addr = iwqp->ietf_mem.va;
+ accept.size = irdma_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
+ memcpy((u8 *)accept.addr + accept.size, conn_param->private_data,
+ conn_param->private_data_len);
+
+ if (cm_node->dev->ws_add(iwqp->sc_qp.vsi, cm_node->user_pri)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ iwqp->sc_qp.user_pri = cm_node->user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ if (cm_node->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
+ iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp);
+ /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+ iwpd = iwqp->iwpd;
+ tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
+ ibmr = irdma_reg_phys_mr(&iwpd->ibpd, iwqp->ietf_mem.pa, buf_len,
+ IB_ACCESS_LOCAL_WRITE, &tagged_offset);
+ if (IS_ERR(ibmr)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ibmr->pd = &iwpd->ibpd;
+ ibmr->device = iwpd->ibpd.device;
+ iwqp->lsmm_mr = ibmr;
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
+
+ cm_node->lsmm_size = accept.size + conn_param->private_data_len;
+ irdma_sc_send_lsmm(&iwqp->sc_qp, iwqp->ietf_mem.va, cm_node->lsmm_size,
+ ibmr->lkey);
+
+ if (iwqp->page)
+ kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
+
+ iwqp->cm_id = cm_id;
+ cm_node->cm_id = cm_id;
+
+ cm_id->provider_data = iwqp;
+ iwqp->active_conn = 0;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ irdma_cm_init_tsa_conn(iwqp, cm_node);
+ irdma_qp_add_ref(&iwqp->ibqp);
+ cm_id->add_ref(cm_id);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ cm_node->cm_core->cm_free_ah(cm_node);
+
+ irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
+ wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
+ iwqp->rts_ae_rcvd,
+ IRDMA_MAX_TIMEOUT);
+ if (!wait_ret) {
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
+ cm_node, cm_node->loc_port,
+ cm_node->rem_port, cm_node->cm_id);
+ ret = -ECONNRESET;
+ goto error;
+ }
+ }
+
+ irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+ cm_node->accelerated = true;
+ complete(&cm_node->establish_comp);
+
+ if (cm_node->accept_pend) {
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+ cm_node->cm_core->stats_accepts++;
+
+ return 0;
+error:
+ irdma_free_lsmm_rsrc(iwqp);
+ irdma_rem_ref_cm_node(cm_node);
+
+ return ret;
+}
+
+/**
+ * irdma_reject - registered call for connection to be rejected
+ * @cm_id: cm information for passive connection
+ * @pdata: private data to be sent
+ * @pdata_len: private data length
+ */
+int
+irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+
+ cm_node = cm_id->provider_data;
+ cm_node->pdata.size = pdata_len;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ cm_node->cm_core->stats_rejects++;
+
+ if (pdata_len + sizeof(struct ietf_mpa_v2) > IRDMA_MAX_CM_BUF)
+ return -EINVAL;
+
+ return irdma_cm_reject(cm_node, pdata, pdata_len);
+}
+
+/**
+ * irdma_connect - registered call for connection to be established
+ * @cm_id: cm information for passive connection
+ * @conn_param: Information about the connection
+ */
+int
+irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+ struct irdma_cm_info cm_info;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ int ret = 0;
+
+ ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ iwqp = to_iwqp(ibqp);
+ if (!iwqp)
+ return -EINVAL;
+ iwdev = iwqp->iwdev;
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+ if (!(laddr->sin_port) || !(raddr->sin_port))
+ return -EINVAL;
+
+ iwqp->active_conn = 1;
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = iwqp;
+
+ /* set up the connection params for the node */
+ if (cm_id->remote_addr.ss_family == AF_INET) {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
+ return -EINVAL;
+
+ cm_info.ipv4 = true;
+ memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
+ memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+ cm_info.rem_port = ntohs(raddr->sin_port);
+ cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
+ return -EINVAL;
+
+ cm_info.ipv4 = false;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.__u6_addr.__u6_addr32);
+ irdma_copy_ip_ntohl(cm_info.rem_addr,
+ raddr6->sin6_addr.__u6_addr.__u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ cm_info.rem_port = ntohs(raddr6->sin6_port);
+ irdma_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
+ NULL);
+ }
+ cm_info.cm_id = cm_id;
+ cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
+ cm_info.tos = cm_id->tos;
+ if (iwdev->vsi.dscp_mode)
+ cm_info.user_pri =
+ iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)];
+ else
+ cm_info.user_pri = rt_tos2priority(cm_id->tos);
+
+ if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
+ return -ENOMEM;
+ iwqp->sc_qp.user_pri = cm_info.user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
+ iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp);
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_DCB,
+ "TOS:[%d] UP:[%d]\n", cm_id->tos, cm_info.user_pri);
+
+ ret = irdma_create_cm_node(&iwdev->cm_core, iwdev, conn_param, &cm_info,
+ &cm_node);
+ if (ret)
+ return ret;
+ ret = cm_node->cm_core->cm_create_ah(cm_node, true);
+ if (ret)
+ goto err;
+ if (irdma_manage_qhash(iwdev, &cm_info,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ cm_node->qhash_set = true;
+
+ cm_node->apbvt_entry = irdma_add_apbvt(iwdev, cm_info.loc_port);
+ if (!cm_node->apbvt_entry) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ cm_node->apbvt_set = true;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ iwqp->cm_id = cm_id;
+ irdma_qp_add_ref(&iwqp->ibqp);
+ cm_id->add_ref(cm_id);
+
+ if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
+ cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ ret = irdma_send_syn(cm_node, 0);
+ if (ret)
+ goto err;
+ }
+
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+
+ return 0;
+
+err:
+ if (cm_info.ipv4)
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "connect() FAILED: dest addr=%pI4",
+ cm_info.rem_addr);
+ else
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "connect() FAILED: dest addr=%pI6",
+ cm_info.rem_addr);
+ irdma_rem_ref_cm_node(cm_node);
+ iwdev->cm_core.stats_connect_errs++;
+
+ return ret;
+}
+
+/**
+ * irdma_create_listen - registered call creating listener
+ * @cm_id: cm information for passive connection
+ * @backlog: to max accept pending count
+ */
+int
+irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct irdma_device *iwdev;
+ struct irdma_cm_listener *cm_listen_node;
+ struct irdma_cm_info cm_info = {0};
+ struct sockaddr_in *laddr;
+ struct sockaddr_in6 *laddr6;
+ bool wildcard = false;
+ int err;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
+
+ if (laddr->sin_family == AF_INET) {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
+ return -EINVAL;
+
+ cm_info.ipv4 = true;
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+
+ if (laddr->sin_addr.s_addr != htonl(INADDR_ANY)) {
+ cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ wildcard = true;
+ }
+ } else {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
+ return -EINVAL;
+
+ cm_info.ipv4 = false;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.__u6_addr.__u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ if (!IN6_IS_ADDR_UNSPECIFIED(&laddr6->sin6_addr)) {
+ irdma_netdev_vlan_ipv6(cm_info.loc_addr,
+ &cm_info.vlan_id, NULL);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ wildcard = true;
+ }
+ }
+
+ if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ cm_info.vlan_id = 0;
+ cm_info.backlog = backlog;
+ cm_info.cm_id = cm_id;
+
+ cm_listen_node = irdma_make_listen_node(&iwdev->cm_core, iwdev,
+ &cm_info);
+ if (!cm_listen_node) {
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "cm_listen_node == NULL\n");
+ return -ENOMEM;
+ }
+
+ cm_id->provider_data = cm_listen_node;
+
+ cm_listen_node->tos = cm_id->tos;
+ if (iwdev->vsi.dscp_mode)
+ cm_listen_node->user_pri =
+ iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)];
+ else
+ cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = cm_listen_node->user_pri;
+ if (!cm_listen_node->reused_node) {
+ if (wildcard) {
+ err = irdma_add_mqh(iwdev, &cm_info, cm_listen_node);
+ if (err)
+ goto error;
+ } else {
+ err = irdma_manage_qhash(iwdev, &cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (err)
+ goto error;
+
+ cm_listen_node->qhash_set = true;
+ }
+
+ cm_listen_node->apbvt_entry = irdma_add_apbvt(iwdev,
+ cm_info.loc_port);
+ if (!cm_listen_node->apbvt_entry)
+ goto error;
+ }
+ cm_id->add_ref(cm_id);
+ cm_listen_node->cm_core->stats_listen_created++;
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d\n",
+ cm_listen_node->loc_port, cm_listen_node->loc_addr, cm_listen_node, cm_listen_node->cm_id,
+ cm_listen_node->qhash_set, cm_listen_node->vlan_id);
+
+ return 0;
+
+error:
+
+ irdma_cm_del_listen(&iwdev->cm_core, cm_listen_node, false);
+
+ return -EINVAL;
+}
+
+/**
+ * irdma_destroy_listen - registered call to destroy listener
+ * @cm_id: cm information for passive connection
+ */
+int
+irdma_destroy_listen(struct iw_cm_id *cm_id)
+{
+ struct irdma_device *iwdev;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (cm_id->provider_data)
+ irdma_cm_del_listen(&iwdev->cm_core, cm_id->provider_data,
+ true);
+ else
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "cm_id->provider_data was NULL\n");
+
+ cm_id->rem_ref(cm_id);
+
+ return 0;
+}
+
+/**
+ * irdma_teardown_list_prep - add conn nodes slated for tear down to list
+ * @cm_core: cm's core
+ * @teardown_list: a list to which cm_node will be selected
+ * @ipaddr: pointer to ip address
+ * @nfo: pointer to cm_info structure instance
+ * @disconnect_all: flag indicating disconnect all QPs
+ */
+static void
+irdma_teardown_list_prep(struct irdma_cm_core *cm_core,
+ struct list_head *teardown_list,
+ u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ HASH_FOR_EACH_RCU(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if ((disconnect_all ||
+ (nfo->vlan_id == cm_node->vlan_id &&
+ !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) &&
+ atomic_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->teardown_entry, teardown_list);
+ }
+}
+
+/**
+ * irdma_cm_event_connected - handle connected active node
+ * @event: the info for cm_node of connection
+ */
+static void
+irdma_cm_event_connected(struct irdma_cm_event *event)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+ struct irdma_sc_dev *dev;
+ struct ib_qp_attr attr = {0};
+ struct iw_cm_id *cm_id;
+ int status;
+ bool read0;
+ int wait_ret = 0;
+
+ cm_node = event->cm_node;
+ cm_id = cm_node->cm_id;
+ iwqp = cm_id->provider_data;
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->rf->sc_dev;
+ if (iwqp->sc_qp.qp_uk.destroy_pending) {
+ status = -ETIMEDOUT;
+ goto error;
+ }
+
+ irdma_cm_init_tsa_conn(iwqp, cm_node);
+ read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
+ irdma_sc_send_rtt(&iwqp->sc_qp, read0);
+ if (iwqp->page)
+ kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
+ wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
+ iwqp->rts_ae_rcvd,
+ IRDMA_MAX_TIMEOUT);
+ if (!wait_ret)
+ irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
+ "Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
+ cm_node, cm_node->loc_port,
+ cm_node->rem_port, cm_node->cm_id);
+ }
+
+ irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
+ cm_node->accelerated = true;
+ complete(&cm_node->establish_comp);
+ cm_node->cm_core->cm_free_ah(cm_node);
+ return;
+
+error:
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+ status);
+ irdma_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * irdma_cm_event_reset - handle reset
+ * @event: the info for cm_node of connection
+ */
+static void
+irdma_cm_event_reset(struct irdma_cm_event *event)
+{
+ struct irdma_cm_node *cm_node = event->cm_node;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct irdma_qp *iwqp;
+
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+ if (!iwqp)
+ return;
+
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "reset event %p - cm_id = %p\n", event->cm_node, cm_id);
+ iwqp->cm_id = NULL;
+
+ irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT,
+ -ECONNRESET);
+ irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
+}
+
+/**
+ * irdma_cm_event_handler - send event to cm upper layer
+ * @work: pointer of cm event info.
+ */
+static void
+irdma_cm_event_handler(struct work_struct *work)
+{
+ struct irdma_cm_event *event = container_of(work, struct irdma_cm_event, event_work);
+ struct irdma_cm_node *cm_node;
+
+ if (!event || !event->cm_node || !event->cm_node->cm_core)
+ return;
+
+ cm_node = event->cm_node;
+
+ switch (event->type) {
+ case IRDMA_CM_EVENT_MPA_REQ:
+ irdma_send_cm_event(cm_node, cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REQUEST, 0);
+ break;
+ case IRDMA_CM_EVENT_RESET:
+ irdma_cm_event_reset(event);
+ break;
+ case IRDMA_CM_EVENT_CONNECTED:
+ if (!event->cm_node->cm_id ||
+ event->cm_node->state != IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_cm_event_connected(event);
+ break;
+ case IRDMA_CM_EVENT_MPA_REJECT:
+ if (!event->cm_node->cm_id ||
+ cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_send_cm_event(cm_node, cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
+ break;
+ case IRDMA_CM_EVENT_ABORTED:
+ if (!event->cm_node->cm_id ||
+ event->cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_event_connect_error(event);
+ break;
+ default:
+ irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
+ "bad event type = %d\n", event->type);
+ break;
+ }
+
+ irdma_rem_ref_cm_node(event->cm_node);
+ kfree(event);
+}
+
+/**
+ * irdma_cm_post_event - queue event request for worker thread
+ * @event: cm node's info for up event call
+ */
+static void
+irdma_cm_post_event(struct irdma_cm_event *event)
+{
+ atomic_inc(&event->cm_node->refcnt);
+ INIT_WORK(&event->event_work, irdma_cm_event_handler);
+ queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+}
+
+/**
+ * irdma_cm_teardown_connections - teardown QPs
+ * @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @nfo: Connection info
+ * @disconnect_all: flag indicating disconnect all QPs
+ *
+ * teardown QPs where source or destination addr matches ip addr
+ */
+void
+irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct irdma_cm_node *cm_node;
+ struct list_head teardown_list;
+ struct ib_qp_attr attr;
+ struct irdma_sc_vsi *vsi = &iwdev->vsi;
+ struct irdma_sc_qp *sc_qp;
+ struct irdma_qp *qp;
+ int i;
+
+ INIT_LIST_HEAD(&teardown_list);
+
+ rcu_read_lock();
+ irdma_teardown_list_prep(cm_core, &teardown_list, ipaddr, nfo, disconnect_all);
+ rcu_read_unlock();
+
+ list_for_each_safe(list_node, list_core_temp, &teardown_list) {
+ cm_node = container_of(list_node, struct irdma_cm_node,
+ teardown_entry);
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (iwdev->rf->reset)
+ irdma_cm_disconn(cm_node->iwqp);
+ irdma_rem_ref_cm_node(cm_node);
+ }
+ if (!iwdev->roce_mode)
+ return;
+
+ INIT_LIST_HEAD(&teardown_list);
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ mutex_lock(&vsi->qos[i].qos_mutex);
+ list_for_each_safe(list_node, list_core_temp,
+ &vsi->qos[i].qplist) {
+ u32 qp_ip[4];
+
+ sc_qp = container_of(list_node, struct irdma_sc_qp,
+ list);
+ if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
+ continue;
+
+ qp = sc_qp->qp_uk.back_qp;
+ if (!disconnect_all) {
+ if (nfo->ipv4)
+ qp_ip[0] = qp->udp_info.local_ipaddr[3];
+ else
+ memcpy(qp_ip,
+ &qp->udp_info.local_ipaddr[0],
+ sizeof(qp_ip));
+ }
+
+ if (disconnect_all ||
+ (nfo->vlan_id == (qp->udp_info.vlan_tag & EVL_VLID_MASK) &&
+ !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
+ spin_lock(&iwdev->rf->qptable_lock);
+ if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
+ irdma_qp_add_ref(&qp->ibqp);
+ list_add(&qp->teardown_entry,
+ &teardown_list);
+ }
+ spin_unlock(&iwdev->rf->qptable_lock);
+ }
+ }
+ mutex_unlock(&vsi->qos[i].qos_mutex);
+ }
+
+ list_for_each_safe(list_node, list_core_temp, &teardown_list) {
+ qp = container_of(list_node, struct irdma_qp, teardown_entry);
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
+ irdma_qp_rem_ref(&qp->ibqp);
+ }
+}
diff --git a/sys/dev/irdma/irdma_cm.h b/sys/dev/irdma/irdma_cm.h
new file mode 100644
index 000000000000..e66db15a6fff
--- /dev/null
+++ b/sys/dev/irdma/irdma_cm.h
@@ -0,0 +1,453 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2015 - 2021 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#ifndef IRDMA_CM_H
+#define IRDMA_CM_H
+
+#define IRDMA_MPA_REQUEST_ACCEPT 1
+#define IRDMA_MPA_REQUEST_REJECT 2
+
+/* IETF MPA -- defines */
+#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
+#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
+#define IETF_MPA_KEY_SIZE 16
+#define IETF_MPA_VER 1
+#define IETF_MAX_PRIV_DATA_LEN 512
+#define IETF_MPA_FRAME_SIZE 20
+#define IETF_RTR_MSG_SIZE 4
+#define IETF_MPA_V2_FLAG 0x10
+#define SNDMARKER_SEQNMASK 0x000001ff
+#define IRDMA_MAX_IETF_SIZE 32
+
+/* IETF RTR MSG Fields */
+#define IETF_PEER_TO_PEER 0x8000
+#define IETF_FLPDU_ZERO_LEN 0x4000
+#define IETF_RDMA0_WRITE 0x8000
+#define IETF_RDMA0_READ 0x4000
+#define IETF_NO_IRD_ORD 0x3fff
+
+#define MAX_PORTS 65536
+
+#define IRDMA_PASSIVE_STATE_INDICATED 0
+#define IRDMA_DO_NOT_SEND_RESET_EVENT 1
+#define IRDMA_SEND_RESET_EVENT 2
+
+#define MAX_IRDMA_IFS 4
+
+#define SET_ACK 1
+#define SET_SYN 2
+#define SET_FIN 4
+#define SET_RST 8
+
+#define TCP_OPTIONS_PADDING 3
+
+#define IRDMA_DEFAULT_RETRYS 64
+#define IRDMA_DEFAULT_RETRANS 8
+#define IRDMA_DEFAULT_TTL 0x40
+#define IRDMA_DEFAULT_RTT_VAR 6
+#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
+#define IRDMA_DEFAULT_REXMIT_THRESH 8
+
+#define IRDMA_RETRY_TIMEOUT HZ
+#define IRDMA_SHORT_TIME 10
+#define IRDMA_LONG_TIME (2 * HZ)
+#define IRDMA_MAX_TIMEOUT ((unsigned long)(12 * HZ))
+
+#define IRDMA_CM_HASHTABLE_SIZE 1024
+#define IRDMA_CM_TCP_TIMER_INTERVAL 3000
+#define IRDMA_CM_DEFAULT_MTU 1540
+#define IRDMA_CM_DEFAULT_FRAME_CNT 10
+#define IRDMA_CM_THREAD_STACK_SIZE 256
+#define IRDMA_CM_DEFAULT_RCV_WND 64240
+#define IRDMA_CM_DEFAULT_RCV_WND_SCALED 0x3FFFC
+#define IRDMA_CM_DEFAULT_RCV_WND_SCALE 2
+#define IRDMA_CM_DEFAULT_FREE_PKTS 10
+#define IRDMA_CM_FREE_PKT_LO_WATERMARK 2
+#define IRDMA_CM_DEFAULT_MSS 536
+#define IRDMA_CM_DEFAULT_MPA_VER 2
+#define IRDMA_CM_DEFAULT_SEQ 0x159bf75f
+#define IRDMA_CM_DEFAULT_LOCAL_ID 0x3b47
+#define IRDMA_CM_DEFAULT_SEQ2 0x18ed5740
+#define IRDMA_CM_DEFAULT_LOCAL_ID2 0xb807
+#define IRDMA_MAX_CM_BUF (IRDMA_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
+
+enum ietf_mpa_flags {
+ IETF_MPA_FLAGS_REJECT = 0x20,
+ IETF_MPA_FLAGS_CRC = 0x40,
+ IETF_MPA_FLAGS_MARKERS = 0x80,
+};
+
+enum irdma_timer_type {
+ IRDMA_TIMER_TYPE_SEND,
+ IRDMA_TIMER_TYPE_CLOSE,
+};
+
+enum option_nums {
+ OPTION_NUM_EOL,
+ OPTION_NUM_NONE,
+ OPTION_NUM_MSS,
+ OPTION_NUM_WINDOW_SCALE,
+ OPTION_NUM_SACK_PERM,
+ OPTION_NUM_SACK,
+ OPTION_NUM_WRITE0 = 0xbc,
+};
+
+/* cm node transition states */
+enum irdma_cm_node_state {
+ IRDMA_CM_STATE_UNKNOWN,
+ IRDMA_CM_STATE_INITED,
+ IRDMA_CM_STATE_LISTENING,
+ IRDMA_CM_STATE_SYN_RCVD,
+ IRDMA_CM_STATE_SYN_SENT,
+ IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED,
+ IRDMA_CM_STATE_ESTABLISHED,
+ IRDMA_CM_STATE_ACCEPTING,
+ IRDMA_CM_STATE_MPAREQ_SENT,
+ IRDMA_CM_STATE_MPAREQ_RCVD,
+ IRDMA_CM_STATE_MPAREJ_RCVD,
+ IRDMA_CM_STATE_OFFLOADED,
+ IRDMA_CM_STATE_FIN_WAIT1,
+ IRDMA_CM_STATE_FIN_WAIT2,
+ IRDMA_CM_STATE_CLOSE_WAIT,
+ IRDMA_CM_STATE_TIME_WAIT,
+ IRDMA_CM_STATE_LAST_ACK,
+ IRDMA_CM_STATE_CLOSING,
+ IRDMA_CM_STATE_LISTENER_DESTROYED,
+ IRDMA_CM_STATE_CLOSED,
+};
+
+enum mpa_frame_ver {
+ IETF_MPA_V1 = 1,
+ IETF_MPA_V2 = 2,
+};
+
+enum mpa_frame_key {
+ MPA_KEY_REQUEST,
+ MPA_KEY_REPLY,
+};
+
+enum send_rdma0 {
+ SEND_RDMA_READ_ZERO = 1,
+ SEND_RDMA_WRITE_ZERO = 2,
+};
+
+enum irdma_tcpip_pkt_type {
+ IRDMA_PKT_TYPE_UNKNOWN,
+ IRDMA_PKT_TYPE_SYN,
+ IRDMA_PKT_TYPE_SYNACK,
+ IRDMA_PKT_TYPE_ACK,
+ IRDMA_PKT_TYPE_FIN,
+ IRDMA_PKT_TYPE_RST,
+};
+
+enum irdma_cm_listener_state {
+ IRDMA_CM_LISTENER_PASSIVE_STATE = 1,
+ IRDMA_CM_LISTENER_ACTIVE_STATE = 2,
+ IRDMA_CM_LISTENER_EITHER_STATE = 3,
+};
+
+/* CM event codes */
+enum irdma_cm_event_type {
+ IRDMA_CM_EVENT_UNKNOWN,
+ IRDMA_CM_EVENT_ESTABLISHED,
+ IRDMA_CM_EVENT_MPA_REQ,
+ IRDMA_CM_EVENT_MPA_CONNECT,
+ IRDMA_CM_EVENT_MPA_ACCEPT,
+ IRDMA_CM_EVENT_MPA_REJECT,
+ IRDMA_CM_EVENT_MPA_ESTABLISHED,
+ IRDMA_CM_EVENT_CONNECTED,
+ IRDMA_CM_EVENT_RESET,
+ IRDMA_CM_EVENT_ABORTED,
+};
+
+struct irdma_bth { /* Base Trasnport Header */
+ u8 opcode;
+ u8 flags;
+ __be16 pkey;
+ __be32 qpn;
+ __be32 apsn;
+};
+
+struct ietf_mpa_v1 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ u8 priv_data[];
+};
+
+struct ietf_rtr_msg {
+ __be16 ctrl_ird;
+ __be16 ctrl_ord;
+};
+
+struct ietf_mpa_v2 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ struct ietf_rtr_msg rtr_msg;
+ u8 priv_data[];
+};
+
+struct option_base {
+ u8 optionnum;
+ u8 len;
+};
+
+struct option_mss {
+ u8 optionnum;
+ u8 len;
+ __be16 mss;
+};
+
+struct option_windowscale {
+ u8 optionnum;
+ u8 len;
+ u8 shiftcount;
+};
+
+union all_known_options {
+ char eol;
+ struct option_base base;
+ struct option_mss mss;
+ struct option_windowscale windowscale;
+};
+
+struct irdma_timer_entry {
+ struct list_head list;
+ unsigned long timetosend; /* jiffies */
+ struct irdma_puda_buf *sqbuf;
+ u32 type;
+ u32 retrycount;
+ u32 retranscount;
+ u32 context;
+ u32 send_retrans;
+ int close_when_complete;
+};
+
+/* CM context params */
+struct irdma_cm_tcp_context {
+ u8 client;
+ u32 loc_seq_num;
+ u32 loc_ack_num;
+ u32 rem_ack_num;
+ u32 rcv_nxt;
+ u32 loc_id;
+ u32 rem_id;
+ u32 snd_wnd;
+ u32 max_snd_wnd;
+ u32 rcv_wnd;
+ u32 mss;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+};
+
+struct irdma_apbvt_entry {
+ struct hlist_node hlist;
+ u32 use_cnt;
+ u16 port;
+};
+
+struct irdma_cm_listener {
+ struct list_head list;
+ struct iw_cm_id *cm_id;
+ struct irdma_cm_core *cm_core;
+ struct irdma_device *iwdev;
+ struct list_head child_listen_list;
+ struct irdma_apbvt_entry *apbvt_entry;
+ enum irdma_cm_listener_state listener_state;
+ atomic_t refcnt;
+ atomic_t pend_accepts_cnt;
+ u32 loc_addr[4];
+ u32 reused_node;
+ int backlog;
+ u16 loc_port;
+ u16 vlan_id;
+ u8 loc_mac[ETH_ALEN];
+ u8 user_pri;
+ u8 tos;
+ bool qhash_set:1;
+ bool ipv4:1;
+};
+
+struct irdma_kmem_info {
+ void *addr;
+ u32 size;
+};
+
+struct irdma_mpa_priv_info {
+ const void *addr;
+ u32 size;
+};
+
+struct irdma_cm_node {
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct irdma_cm_tcp_context tcp_cntxt;
+ struct irdma_cm_core *cm_core;
+ struct irdma_timer_entry *send_entry;
+ struct irdma_timer_entry *close_entry;
+ struct irdma_cm_listener *listener;
+ struct list_head timer_entry;
+ struct list_head reset_entry;
+ struct list_head teardown_entry;
+ struct irdma_apbvt_entry *apbvt_entry;
+ struct rcu_head rcu_head;
+ struct irdma_mpa_priv_info pdata;
+ struct irdma_sc_ah *ah;
+ struct irdma_kmem_info mpa_hdr;
+ struct iw_cm_id *cm_id;
+ struct hlist_node list;
+ struct completion establish_comp;
+ spinlock_t retrans_list_lock; /* protect CM node rexmit updates*/
+ atomic_t passive_state;
+ atomic_t refcnt;
+ enum irdma_cm_node_state state;
+ enum send_rdma0 send_rdma0_op;
+ enum mpa_frame_ver mpa_frame_rev;
+ u32 loc_addr[4], rem_addr[4];
+ u16 loc_port, rem_port;
+ int apbvt_set;
+ int accept_pend;
+ u16 vlan_id;
+ u16 ird_size;
+ u16 ord_size;
+ u16 mpav2_ird_ord;
+ u16 lsmm_size;
+ u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ u8 user_pri;
+ u8 tos;
+ bool ack_rcvd:1;
+ bool qhash_set:1;
+ bool ipv4:1;
+ bool snd_mark_en:1;
+ bool rcv_mark_en:1;
+ bool do_lpb:1;
+ bool accelerated:1;
+ struct ietf_mpa_v2 mpa_v2_frame;
+};
+
+/* Used by internal CM APIs to pass CM information*/
+struct irdma_cm_info {
+ struct iw_cm_id *cm_id;
+ u16 loc_port;
+ u16 rem_port;
+ u32 loc_addr[4];
+ u32 rem_addr[4];
+ u32 qh_qpid;
+ u16 vlan_id;
+ int backlog;
+ u8 user_pri;
+ u8 tos;
+ bool ipv4;
+};
+
+struct irdma_cm_event {
+ enum irdma_cm_event_type type;
+ struct irdma_cm_info cm_info;
+ struct work_struct event_work;
+ struct irdma_cm_node *cm_node;
+};
+
+struct irdma_cm_core {
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct list_head listen_list;
+ DECLARE_HASHTABLE(cm_hash_tbl, 8);
+ DECLARE_HASHTABLE(apbvt_hash_tbl, 8);
+ struct timer_list tcp_timer;
+ struct workqueue_struct *event_wq;
+ spinlock_t ht_lock; /* protect CM node (active side) list */
+ spinlock_t listen_list_lock; /* protect listener list */
+ spinlock_t apbvt_lock; /*serialize apbvt add/del entries*/
+ u64 stats_nodes_created;
+ u64 stats_nodes_destroyed;
+ u64 stats_listen_created;
+ u64 stats_listen_destroyed;
+ u64 stats_listen_nodes_created;
+ u64 stats_listen_nodes_destroyed;
+ u64 stats_lpbs;
+ u64 stats_accepts;
+ u64 stats_rejects;
+ u64 stats_connect_errs;
+ u64 stats_passive_errs;
+ u64 stats_pkt_retrans;
+ u64 stats_backlog_drops;
+ struct irdma_puda_buf *(*form_cm_frame)(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags);
+ int (*cm_create_ah)(struct irdma_cm_node *cm_node, bool wait);
+ void (*cm_free_ah)(struct irdma_cm_node *cm_node);
+};
+
+int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *sqbuf,
+ enum irdma_timer_type type, int send_retrans,
+ int close_when_complete);
+
+static inline u8 irdma_tos2dscp(u8 tos)
+{
+#define IRDMA_DSCP_S 2
+#define IRDMA_DSCP_M (0x3f << IRDMA_DSCP_S)
+ return RS_32(tos, IRDMA_DSCP);
+}
+
+int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
+int irdma_destroy_listen(struct iw_cm_id *cm_id);
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, u8 *mac);
+void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all);
+int irdma_cm_start(struct irdma_device *dev);
+int irdma_cm_stop(struct irdma_device *dev);
+bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
+bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
+int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr,
+ u8 *mac_addr, u32 action);
+bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
+void irdma_send_ack(struct irdma_cm_node *cm_node);
+void irdma_lpb_nop(struct irdma_sc_qp *qp);
+void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node);
+void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node);
+#endif /* IRDMA_CM_H */
diff --git a/sys/dev/irdma/irdma_ctrl.c b/sys/dev/irdma/irdma_ctrl.c
new file mode 100644
index 000000000000..cfc9b161d643
--- /dev/null
+++ b/sys/dev/irdma/irdma_ctrl.c
@@ -0,0 +1,5644 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+ *
+ * Copyright (c) 2015 - 2022 Intel Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenFabrics.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*$FreeBSD$*/
+
+#include "osdep.h"
+#include "irdma_hmc.h"
+#include "irdma_defs.h"
+#include "irdma_type.h"
+#include "irdma_ws.h"
+#include "irdma_protos.h"
+
+/**
+ * irdma_qp_from_entry - Given entry, get to the qp structure
+ * @entry: Points to list of qp structure
+ */
+static struct irdma_sc_qp *
+irdma_qp_from_entry(struct list_head *entry)
+{
+ if (!entry)
+ return NULL;
+
+ return (struct irdma_sc_qp *)((char *)entry -
+ offsetof(struct irdma_sc_qp, list));
+}
+
+/**
+ * irdma_get_qp_from_list - get next qp from a list
+ * @head: Listhead of qp's
+ * @qp: current qp
+ */
+struct irdma_sc_qp *
+irdma_get_qp_from_list(struct list_head *head,
+ struct irdma_sc_qp *qp)
+{
+ struct list_head *lastentry;
+ struct list_head *entry = NULL;
+
+ if (list_empty(head))
+ return NULL;
+
+ if (!qp) {
+ entry = (head)->next;
+ } else {
+ lastentry = &qp->list;
+ entry = (lastentry)->next;
+ if (entry == head)
+ return NULL;
+ }
+
+ return irdma_qp_from_entry(entry);
+}
+
+/**
+ * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
+ * @vsi: the VSI struct pointer
+ * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
+ */
+void
+irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
+{
+ struct irdma_sc_qp *qp = NULL;
+ u8 i;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ mutex_lock(&vsi->qos[i].qos_mutex);
+ qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+ while (qp) {
+ if (op == IRDMA_OP_RESUME) {
+ if (!qp->dev->ws_add(vsi, i)) {
+ qp->qs_handle =
+ vsi->qos[qp->user_pri].qs_handle;
+ irdma_cqp_qp_suspend_resume(qp, op);
+ } else {
+ irdma_cqp_qp_suspend_resume(qp, op);
+ irdma_modify_qp_to_err(qp);
+ }
+ } else if (op == IRDMA_OP_SUSPEND) {
+ /* issue cqp suspend command */
+ if (!irdma_cqp_qp_suspend_resume(qp, op))
+ atomic_inc(&vsi->qp_suspend_reqs);
+ }
+ qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+ }
+ mutex_unlock(&vsi->qos[i].qos_mutex);
+ }
+}
+
+static void
+irdma_set_qos_info(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2p)
+{
+ u8 i;
+
+ vsi->qos_rel_bw = l2p->vsi_rel_bw;
+ vsi->qos_prio_type = l2p->vsi_prio_type;
+ vsi->dscp_mode = l2p->dscp_mode;
+ if (l2p->dscp_mode) {
+ irdma_memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ l2p->up2tc[i] = i;
+ }
+ for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++)
+ vsi->tc_print_warning[i] = true;
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
+ irdma_init_config_check(&vsi->cfg_check[i],
+ l2p->up2tc[i],
+ l2p->qs_handle_list[i]);
+ vsi->qos[i].traffic_class = l2p->up2tc[i];
+ vsi->qos[i].rel_bw =
+ l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
+ vsi->qos[i].prio_type =
+ l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
+ vsi->qos[i].valid = false;
+ }
+}
+
+/**
+ * irdma_change_l2params - given the new l2 parameters, change all qp
+ * @vsi: RDMA VSI pointer
+ * @l2params: New parameters from l2
+ */
+void
+irdma_change_l2params(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2params)
+{
+ if (l2params->mtu_changed) {
+ vsi->mtu = l2params->mtu;
+ if (vsi->ieq)
+ irdma_reinitialize_ieq(vsi);
+ }
+
+ if (!l2params->tc_changed)
+ return;
+
+ vsi->tc_change_pending = false;
+ irdma_set_qos_info(vsi, l2params);
+ irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
+}
+
+/**
+ * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
+ * @qp: qp to be removed from qos
+ */
+void
+irdma_qp_rem_qos(struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_vsi *vsi = qp->vsi;
+
+ irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
+ "DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist);
+ mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
+ if (qp->on_qoslist) {
+ qp->on_qoslist = false;
+ list_del(&qp->list);
+ }
+ mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
+}
+
+/**
+ * irdma_qp_add_qos - called during setctx for qp to be added to qos
+ * @qp: qp to be added to qos
+ */
+void
+irdma_qp_add_qos(struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_vsi *vsi = qp->vsi;
+
+ irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
+ "DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist);
+ mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
+ if (!qp->on_qoslist) {
+ list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
+ qp->on_qoslist = true;
+ qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
+ }
+ mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
+}
+
+/**
+ * irdma_sc_pd_init - initialize sc pd struct
+ * @dev: sc device struct
+ * @pd: sc pd ptr
+ * @pd_id: pd_id for allocated pd
+ * @abi_ver: User/Kernel ABI version
+ */
+void
+irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
+ int abi_ver)
+{
+ pd->pd_id = pd_id;
+ pd->abi_ver = abi_ver;
+ pd->dev = dev;
+}
+
+/**
+ * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
+ * @cqp: struct for cqp hw
+ * @info: arp entry information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_add_arp_cache_entry_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 temp, hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+ set_64bit_val(wqe, IRDMA_BYTE_8, info->reach_max);
+
+ temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
+ LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
+ LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
+ set_64bit_val(wqe, IRDMA_BYTE_16, temp);
+
+ hdr = info->arp_index |
+ LS_64(IRDMA_CQP_OP_MANAGE_ARP, IRDMA_CQPSQ_OPCODE) |
+ LS_64((info->permanent ? 1 : 0), IRDMA_CQPSQ_MAT_PERMANENT) |
+ LS_64(1, IRDMA_CQPSQ_MAT_ENTRYVALID) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_del_arp_cache_entry - dele arp cache entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 arp_index, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ hdr = arp_index | LS_64(IRDMA_CQP_OP_MANAGE_ARP, IRDMA_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
+ wqe, IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
+ * @cqp: struct for cqp hw
+ * @info: info for apbvt entry to add or delete
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_apbvt_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, info->port);
+
+ hdr = LS_64(IRDMA_CQP_OP_MANAGE_APBVT, IRDMA_CQPSQ_OPCODE) |
+ LS_64(info->add, IRDMA_CQPSQ_MAPT_ADDPORT) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_APBVT WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_qhash_table_entry - manage quad hash entries
+ * @cqp: struct for cqp hw
+ * @info: info for quad hash to manage
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ *
+ * This is called before connection establishment is started.
+ * For passive connections, when listener is created, it will
+ * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local
+ * ip address and tcp port. When SYN is received (passive
+ * connections) or sent (active connections), this routine is
+ * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
+ * and quad is passed in info.
+ *
+ * When iwarp connection is done and its state moves to RTS, the
+ * quad hash entry in the hardware will point to iwarp's qp
+ * number and requires no calls from the driver.
+ */
+static int
+irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_qhash_table_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 qw1 = 0;
+ u64 qw2 = 0;
+ u64 temp;
+ struct irdma_sc_vsi *vsi = info->vsi;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+ temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
+ LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
+ LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
+ set_64bit_val(wqe, IRDMA_BYTE_0, temp);
+
+ qw1 = LS_64(info->qp_num, IRDMA_CQPSQ_QHASH_QPN) |
+ LS_64(info->dest_port, IRDMA_CQPSQ_QHASH_DEST_PORT);
+ if (info->ipv4_valid) {
+ set_64bit_val(wqe, IRDMA_BYTE_48,
+ LS_64(info->dest_ip[0], IRDMA_CQPSQ_QHASH_ADDR3));
+ } else {
+ set_64bit_val(wqe, IRDMA_BYTE_56,
+ LS_64(info->dest_ip[0], IRDMA_CQPSQ_QHASH_ADDR0) |
+ LS_64(info->dest_ip[1], IRDMA_CQPSQ_QHASH_ADDR1));
+
+ set_64bit_val(wqe, IRDMA_BYTE_48,
+ LS_64(info->dest_ip[2], IRDMA_CQPSQ_QHASH_ADDR2) |
+ LS_64(info->dest_ip[3], IRDMA_CQPSQ_QHASH_ADDR3));
+ }
+ qw2 = LS_64(vsi->qos[info->user_pri].qs_handle,
+ IRDMA_CQPSQ_QHASH_QS_HANDLE);
+ if (info->vlan_valid)
+ qw2 |= LS_64(info->vlan_id, IRDMA_CQPSQ_QHASH_VLANID);
+ set_64bit_val(wqe, IRDMA_BYTE_16, qw2);
+ if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
+ qw1 |= LS_64(info->src_port, IRDMA_CQPSQ_QHASH_SRC_PORT);
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, IRDMA_BYTE_40,
+ LS_64(info->src_ip[0], IRDMA_CQPSQ_QHASH_ADDR0) |
+ LS_64(info->src_ip[1], IRDMA_CQPSQ_QHASH_ADDR1));
+ set_64bit_val(wqe, IRDMA_BYTE_32,
+ LS_64(info->src_ip[2], IRDMA_CQPSQ_QHASH_ADDR2) |
+ LS_64(info->src_ip[3], IRDMA_CQPSQ_QHASH_ADDR3));
+ } else {
+ set_64bit_val(wqe, IRDMA_BYTE_32,
+ LS_64(info->src_ip[0], IRDMA_CQPSQ_QHASH_ADDR3));
+ }
+ }
+
+ set_64bit_val(wqe, IRDMA_BYTE_8, qw1);
+ temp = LS_64(cqp->polarity, IRDMA_CQPSQ_QHASH_WQEVALID) |
+ LS_64(IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY,
+ IRDMA_CQPSQ_QHASH_OPCODE) |
+ LS_64(info->manage, IRDMA_CQPSQ_QHASH_MANAGE) |
+ LS_64(info->ipv4_valid, IRDMA_CQPSQ_QHASH_IPV4VALID) |
+ LS_64(info->vlan_valid, IRDMA_CQPSQ_QHASH_VLANVALID) |
+ LS_64(info->entry_type, IRDMA_CQPSQ_QHASH_ENTRYTYPE);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, temp);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_QHASH WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_init - initialize qp
+ * @qp: sc qp
+ * @info: initialization qp info
+ */
+int
+irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
+{
+ int ret_code;
+ u32 pble_obj_cnt;
+ u16 wqe_size;
+
+ if (info->qp_uk_init_info.max_sq_frag_cnt >
+ info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
+ info->qp_uk_init_info.max_rq_frag_cnt >
+ info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
+ return -EINVAL;
+
+ qp->dev = info->pd->dev;
+ qp->vsi = info->vsi;
+ qp->ieq_qp = info->vsi->exception_lan_q;
+ qp->sq_pa = info->sq_pa;
+ qp->rq_pa = info->rq_pa;
+ qp->hw_host_ctx_pa = info->host_ctx_pa;
+ qp->q2_pa = info->q2_pa;
+ qp->shadow_area_pa = info->shadow_area_pa;
+ qp->q2_buf = info->q2;
+ qp->pd = info->pd;
+ qp->hw_host_ctx = info->host_ctx;
+ info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
+ ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ qp->virtual_map = info->virtual_map;
+ pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
+ (info->virtual_map && info->rq_pa >= pble_obj_cnt))
+ return -EINVAL;
+
+ qp->llp_stream_handle = (void *)(-1);
+ qp->qp_uk.force_fence = true;
+ qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
+ IRDMA_QUEUE_TYPE_SQ_RQ);
+ irdma_debug(qp->dev, IRDMA_DEBUG_WQE,
+ "hw_sq_size[%04d] sq_ring.size[%04d]\n", qp->hw_sq_size,
+ qp->qp_uk.sq_ring.size);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
+ wqe_size = IRDMA_WQE_SIZE_128;
+ else
+ ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+ &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ qp->hw_rq_size =
+ irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
+ (wqe_size / IRDMA_QP_WQE_MIN_SIZE),
+ IRDMA_QUEUE_TYPE_SQ_RQ);
+ irdma_debug(qp->dev, IRDMA_DEBUG_WQE,
+ "hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
+ qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
+
+ qp->sq_tph_val = info->sq_tph_val;
+ qp->rq_tph_val = info->rq_tph_val;
+ qp->sq_tph_en = info->sq_tph_en;
+ qp->rq_tph_en = info->rq_tph_en;
+ qp->rcv_tph_en = info->rcv_tph_en;
+ qp->xmit_tph_en = info->xmit_tph_en;
+ qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
+ qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_create - create qp
+ * @qp: sc qp
+ * @info: qp create info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int
+irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
+ u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = qp->dev->cqp;
+ if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
+ qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1))
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ LS_64(IRDMA_CQP_OP_CREATE_QP, IRDMA_CQPSQ_OPCODE) |
+ LS_64((info->ord_valid ? 1 : 0), IRDMA_CQPSQ_QP_ORDVALID) |
+ LS_64(info->tcp_ctx_valid, IRDMA_CQPSQ_QP_TOECTXVALID) |
+ LS_64(info->mac_valid, IRDMA_CQPSQ_QP_MACVALID) |
+ LS_64(qp->qp_uk.qp_type, IRDMA_CQPSQ_QP_QPTYPE) |
+ LS_64(qp->virtual_map, IRDMA_CQPSQ_QP_VQ) |
+ LS_64(info->force_lpb, IRDMA_CQPSQ_QP_FORCELOOPBACK) |
+ LS_64(info->cq_num_valid, IRDMA_CQPSQ_QP_CQNUMVALID) |
+ LS_64(info->arp_cache_idx_valid, IRDMA_CQPSQ_QP_ARPTABIDXVALID) |
+ LS_64(info->next_iwarp_state, IRDMA_CQPSQ_QP_NEXTIWSTATE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_CREATE WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_modify - modify qp cqp wqe
+ * @qp: sc qp
+ * @info: modify qp info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int
+irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ u8 term_actions = 0;
+ u8 term_len = 0;
+
+ cqp = qp->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
+ if (info->dont_send_fin)
+ term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
+ if (info->dont_send_term)
+ term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
+ if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
+ term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
+ term_len = info->termlen;
+ }
+
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(info->new_mss, IRDMA_CQPSQ_QP_NEWMSS) |
+ LS_64(term_len, IRDMA_CQPSQ_QP_TERMLEN));
+ set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ LS_64(IRDMA_CQP_OP_MODIFY_QP, IRDMA_CQPSQ_OPCODE) |
+ LS_64(info->ord_valid, IRDMA_CQPSQ_QP_ORDVALID) |
+ LS_64(info->tcp_ctx_valid, IRDMA_CQPSQ_QP_TOECTXVALID) |
+ LS_64(info->cached_var_valid, IRDMA_CQPSQ_QP_CACHEDVARVALID) |
+ LS_64(qp->virtual_map, IRDMA_CQPSQ_QP_VQ) |
+ LS_64(info->force_lpb, IRDMA_CQPSQ_QP_FORCELOOPBACK) |
+ LS_64(info->cq_num_valid, IRDMA_CQPSQ_QP_CQNUMVALID) |
+ LS_64(info->mac_valid, IRDMA_CQPSQ_QP_MACVALID) |
+ LS_64(qp->qp_uk.qp_type, IRDMA_CQPSQ_QP_QPTYPE) |
+ LS_64(info->mss_change, IRDMA_CQPSQ_QP_MSSCHANGE) |
+ LS_64(info->remove_hash_idx, IRDMA_CQPSQ_QP_REMOVEHASHENTRY) |
+ LS_64(term_actions, IRDMA_CQPSQ_QP_TERMACT) |
+ LS_64(info->reset_tcp_conn, IRDMA_CQPSQ_QP_RESETCON) |
+ LS_64(info->arp_cache_idx_valid, IRDMA_CQPSQ_QP_ARPTABIDXVALID) |
+ LS_64(info->next_iwarp_state, IRDMA_CQPSQ_QP_NEXTIWSTATE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_MODIFY WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_destroy - cqp destroy qp
+ * @qp: sc qp
+ * @scratch: u64 saved to be used during cqp completion
+ * @remove_hash_idx: flag if to remove hash idx
+ * @ignore_mw_bnd: memory window bind flag
+ * @post_sq: flag for cqp db to ring
+ */
+int
+irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = qp->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ LS_64(IRDMA_CQP_OP_DESTROY_QP, IRDMA_CQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.qp_type, IRDMA_CQPSQ_QP_QPTYPE) |
+ LS_64(ignore_mw_bnd, IRDMA_CQPSQ_QP_IGNOREMWBOUND) |
+ LS_64(remove_hash_idx, IRDMA_CQPSQ_QP_REMOVEHASHENTRY) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_DESTROY WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_get_encoded_ird_size -
+ * @ird_size: IRD size
+ * The ird from the connection is rounded to a supported HW setting and then encoded
+ * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
+ * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
+ */
+static u8 irdma_sc_get_encoded_ird_size(u16 ird_size) {
+ switch (ird_size ?
+ roundup_pow_of_two(2 * ird_size) : 4) {
+ case 256:
+ return IRDMA_IRD_HW_SIZE_256;
+ case 128:
+ return IRDMA_IRD_HW_SIZE_128;
+ case 64:
+ case 32:
+ return IRDMA_IRD_HW_SIZE_64;
+ case 16:
+ case 8:
+ return IRDMA_IRD_HW_SIZE_16;
+ case 4:
+ default:
+ break;
+ }
+
+ return IRDMA_IRD_HW_SIZE_4;
+}
+
+/**
+ * irdma_sc_qp_setctx_roce - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+void
+irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp;
+ u8 push_mode_en;
+ u32 push_idx;
+ u64 mac;
+
+ roce_info = info->roce_info;
+ udp = info->udp_info;
+
+ mac = LS_64_1(roce_info->mac_addr[5], 16) |
+ LS_64_1(roce_info->mac_addr[4], 24) |
+ LS_64_1(roce_info->mac_addr[3], 32) |
+ LS_64_1(roce_info->mac_addr[2], 40) |
+ LS_64_1(roce_info->mac_addr[1], 48) |
+ LS_64_1(roce_info->mac_addr[0], 56);
+
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+ set_64bit_val(qp_ctx, IRDMA_BYTE_0,
+ LS_64(qp->qp_uk.rq_wqe_size, IRDMAQPC_RQWQESIZE) |
+ LS_64(qp->rcv_tph_en, IRDMAQPC_RCVTPHEN) |
+ LS_64(qp->xmit_tph_en, IRDMAQPC_XMITTPHEN) |
+ LS_64(qp->rq_tph_en, IRDMAQPC_RQTPHEN) |
+ LS_64(qp->sq_tph_en, IRDMAQPC_SQTPHEN) |
+ LS_64(push_idx, IRDMAQPC_PPIDX) |
+ LS_64(push_mode_en, IRDMAQPC_PMENA) |
+ LS_64(roce_info->pd_id >> 16, IRDMAQPC_PDIDXHI) |
+ LS_64(roce_info->dctcp_en, IRDMAQPC_DC_TCP_EN) |
+ LS_64(roce_info->err_rq_idx_valid, IRDMAQPC_ERR_RQ_IDX_VALID) |
+ LS_64(roce_info->is_qp1, IRDMAQPC_ISQP1) |
+ LS_64(roce_info->roce_tver, IRDMAQPC_ROCE_TVER) |
+ LS_64(udp->ipv4, IRDMAQPC_IPV4) |
+ LS_64(udp->insert_vlan_tag, IRDMAQPC_INSERTVLANTAG));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
+ if (roce_info->dcqcn_en || roce_info->dctcp_en) {
+ udp->tos &= ~ECN_CODE_PT_MASK;
+ udp->tos |= ECN_CODE_PT_VAL;
+ }
+
+ set_64bit_val(qp_ctx, IRDMA_BYTE_24,
+ LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) |
+ LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE) |
+ LS_64(udp->ttl, IRDMAQPC_TTL) | LS_64(udp->tos, IRDMAQPC_TOS) |
+ LS_64(udp->src_port, IRDMAQPC_SRCPORTNUM) |
+ LS_64(udp->dst_port, IRDMAQPC_DESTPORTNUM));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_32,
+ LS_64(udp->dest_ip_addr[2], IRDMAQPC_DESTIPADDR2) |
+ LS_64(udp->dest_ip_addr[3], IRDMAQPC_DESTIPADDR3));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_40,
+ LS_64(udp->dest_ip_addr[0], IRDMAQPC_DESTIPADDR0) |
+ LS_64(udp->dest_ip_addr[1], IRDMAQPC_DESTIPADDR1));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_48,
+ LS_64(udp->snd_mss, IRDMAQPC_SNDMSS) |
+ LS_64(udp->vlan_tag, IRDMAQPC_VLANTAG) |
+ LS_64(udp->arp_idx, IRDMAQPC_ARPIDX));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_56,
+ LS_64(roce_info->p_key, IRDMAQPC_PKEY) |
+ LS_64(roce_info->pd_id, IRDMAQPC_PDIDX) |
+ LS_64(roce_info->ack_credits, IRDMAQPC_ACKCREDITS) |
+ LS_64(udp->flow_label, IRDMAQPC_FLOWLABEL));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_64,
+ LS_64(roce_info->qkey, IRDMAQPC_QKEY) |
+ LS_64(roce_info->dest_qp, IRDMAQPC_DESTQP));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_80,
+ LS_64(udp->psn_nxt, IRDMAQPC_PSNNXT) |
+ LS_64(udp->lsn, IRDMAQPC_LSN));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_88, LS_64(udp->epsn, IRDMAQPC_EPSN));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_96,
+ LS_64(udp->psn_max, IRDMAQPC_PSNMAX) |
+ LS_64(udp->psn_una, IRDMAQPC_PSNUNA));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_112,
+ LS_64(udp->cwnd, IRDMAQPC_CWNDROCE));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_128,
+ LS_64(roce_info->err_rq_idx, IRDMAQPC_ERR_RQ_IDX) |
+ LS_64(udp->rnr_nak_thresh, IRDMAQPC_RNRNAK_THRESH) |
+ LS_64(udp->rexmit_thresh, IRDMAQPC_REXMIT_THRESH) |
+ LS_64(roce_info->rtomin, IRDMAQPC_RTOMIN));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_136,
+ LS_64(info->send_cq_num, IRDMAQPC_TXCQNUM) |
+ LS_64(info->rcv_cq_num, IRDMAQPC_RXCQNUM));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_144,
+ LS_64(info->stats_idx, IRDMAQPC_STAT_INDEX));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_152, mac);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_160,
+ LS_64(roce_info->ord_size, IRDMAQPC_ORDSIZE) |
+ LS_64(irdma_sc_get_encoded_ird_size(roce_info->ird_size), IRDMAQPC_IRDSIZE) |
+ LS_64(roce_info->wr_rdresp_en, IRDMAQPC_WRRDRSPOK) |
+ LS_64(roce_info->rd_en, IRDMAQPC_RDOK) |
+ LS_64(info->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE) |
+ LS_64(roce_info->bind_en, IRDMAQPC_BINDEN) |
+ LS_64(roce_info->fast_reg_en, IRDMAQPC_FASTREGEN) |
+ LS_64(roce_info->dcqcn_en, IRDMAQPC_DCQCNENABLE) |
+ LS_64(roce_info->rcv_no_icrc, IRDMAQPC_RCVNOICRC) |
+ LS_64(roce_info->fw_cc_enable, IRDMAQPC_FW_CC_ENABLE) |
+ LS_64(roce_info->udprivcq_en, IRDMAQPC_UDPRIVCQENABLE) |
+ LS_64(roce_info->priv_mode_en, IRDMAQPC_PRIVEN) |
+ LS_64(roce_info->timely_en, IRDMAQPC_TIMELYENABLE));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_168,
+ LS_64(info->qp_compl_ctx, IRDMAQPC_QPCOMPCTX));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_176,
+ LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) |
+ LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) |
+ LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_184,
+ LS_64(udp->local_ipaddr[3], IRDMAQPC_LOCAL_IPADDR3) |
+ LS_64(udp->local_ipaddr[2], IRDMAQPC_LOCAL_IPADDR2));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_192,
+ LS_64(udp->local_ipaddr[1], IRDMAQPC_LOCAL_IPADDR1) |
+ LS_64(udp->local_ipaddr[0], IRDMAQPC_LOCAL_IPADDR0));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_200,
+ LS_64(roce_info->t_high, IRDMAQPC_THIGH) |
+ LS_64(roce_info->t_low, IRDMAQPC_TLOW));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_208,
+ LS_64(info->rem_endpoint_idx, IRDMAQPC_REMENDPOINTIDX));
+
+ irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX WQE", qp_ctx,
+ IRDMA_QP_CTX_SIZE);
+}
+
+/*
+ * irdma_sc_alloc_local_mac_entry - allocate a mac entry @cqp: struct for cqp hw @scratch: u64 saved to be used during
+ * cqp completion @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ hdr = LS_64(IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY,
+ IRDMA_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ALLOCATE_LOCAL_MAC WQE",
+ wqe, IRDMA_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_add_local_mac_entry - add mac enry
+ * @cqp: struct for cqp hw
+ * @info:mac addr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_local_mac_entry_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 temp, header;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+ temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
+ LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
+ LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
+
+ set_64bit_val(wqe, IRDMA_BYTE_32, temp);
+
+ header = LS_64(info->entry_idx, IRDMA_CQPSQ_MLM_TABLEIDX) |
+ LS_64(IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE, IRDMA_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, header);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ADD_LOCAL_MAC WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @entry_idx: index of mac entry
+ * @ignore_ref_count: to force mac adde delete
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 entry_idx, u8 ignore_ref_count,
+ bool post_sq)
+{
+ __le64 *wqe;
+ u64 header;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+ header = LS_64(entry_idx, IRDMA_CQPSQ_MLM_TABLEIDX) |
+ LS_64(IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE, IRDMA_CQPSQ_OPCODE) |
+ LS_64(1, IRDMA_CQPSQ_MLM_FREEENTRY) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID) |
+ LS_64(ignore_ref_count, IRDMA_CQPSQ_MLM_IGNORE_REF_CNT);
+
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, header);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
+ wqe, IRDMA_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_setctx - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+void
+irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_iwarp_offload_info *iw;
+ struct irdma_tcp_offload_info *tcp;
+ struct irdma_sc_dev *dev;
+ u8 push_mode_en;
+ u32 push_idx;
+ u64 qw0, qw3, qw7 = 0, qw16 = 0;
+ u64 mac = 0;
+
+ iw = info->iwarp_info;
+ tcp = info->tcp_info;
+ dev = qp->dev;
+ if (iw->rcv_mark_en) {
+ qp->pfpdu.marker_len = 4;
+ qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
+ }
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+ qw0 = LS_64(qp->qp_uk.rq_wqe_size, IRDMAQPC_RQWQESIZE) |
+ LS_64(qp->rcv_tph_en, IRDMAQPC_RCVTPHEN) |
+ LS_64(qp->xmit_tph_en, IRDMAQPC_XMITTPHEN) |
+ LS_64(qp->rq_tph_en, IRDMAQPC_RQTPHEN) |
+ LS_64(qp->sq_tph_en, IRDMAQPC_SQTPHEN) |
+ LS_64(push_idx, IRDMAQPC_PPIDX) |
+ LS_64(push_mode_en, IRDMAQPC_PMENA);
+
+ set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
+
+ qw3 = LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) |
+ LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ qw3 |= LS_64(qp->src_mac_addr_idx, IRDMAQPC_GEN1_SRCMACADDRIDX);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_136,
+ LS_64(info->send_cq_num, IRDMAQPC_TXCQNUM) |
+ LS_64(info->rcv_cq_num, IRDMAQPC_RXCQNUM));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_168,
+ LS_64(info->qp_compl_ctx, IRDMAQPC_QPCOMPCTX));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_176,
+ LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) |
+ LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) |
+ LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE) |
+ LS_64(qp->ieq_qp, IRDMAQPC_EXCEPTION_LAN_QUEUE));
+ if (info->iwarp_info_valid) {
+ qw0 |= LS_64(iw->ddp_ver, IRDMAQPC_DDP_VER) |
+ LS_64(iw->rdmap_ver, IRDMAQPC_RDMAP_VER) |
+ LS_64(iw->dctcp_en, IRDMAQPC_DC_TCP_EN) |
+ LS_64(iw->ecn_en, IRDMAQPC_ECN_EN) |
+ LS_64(iw->ib_rd_en, IRDMAQPC_IBRDENABLE) |
+ LS_64(iw->pd_id >> 16, IRDMAQPC_PDIDXHI) |
+ LS_64(iw->err_rq_idx_valid, IRDMAQPC_ERR_RQ_IDX_VALID);
+ qw7 |= LS_64(iw->pd_id, IRDMAQPC_PDIDX);
+ qw16 |= LS_64(iw->err_rq_idx, IRDMAQPC_ERR_RQ_IDX) |
+ LS_64(iw->rtomin, IRDMAQPC_RTOMIN);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_144,
+ LS_64(qp->q2_pa >> 8, IRDMAQPC_Q2ADDR) |
+ LS_64(info->stats_idx, IRDMAQPC_STAT_INDEX));
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ mac = LS_64_1(iw->mac_addr[5], 16) |
+ LS_64_1(iw->mac_addr[4], 24) |
+ LS_64_1(iw->mac_addr[3], 32) |
+ LS_64_1(iw->mac_addr[2], 40) |
+ LS_64_1(iw->mac_addr[1], 48) |
+ LS_64_1(iw->mac_addr[0], 56);
+ }
+
+ set_64bit_val(qp_ctx, IRDMA_BYTE_152,
+ mac | LS_64(iw->last_byte_sent, IRDMAQPC_LASTBYTESENT));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_160,
+ LS_64(iw->ord_size, IRDMAQPC_ORDSIZE) |
+ LS_64(irdma_sc_get_encoded_ird_size(iw->ird_size), IRDMAQPC_IRDSIZE) |
+ LS_64(iw->wr_rdresp_en, IRDMAQPC_WRRDRSPOK) |
+ LS_64(iw->rd_en, IRDMAQPC_RDOK) |
+ LS_64(iw->snd_mark_en, IRDMAQPC_SNDMARKERS) |
+ LS_64(iw->bind_en, IRDMAQPC_BINDEN) |
+ LS_64(iw->fast_reg_en, IRDMAQPC_FASTREGEN) |
+ LS_64(iw->priv_mode_en, IRDMAQPC_PRIVEN) |
+ LS_64(info->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE) |
+ LS_64(1, IRDMAQPC_IWARPMODE) |
+ LS_64(iw->rcv_mark_en, IRDMAQPC_RCVMARKERS) |
+ LS_64(iw->align_hdrs, IRDMAQPC_ALIGNHDRS) |
+ LS_64(iw->rcv_no_mpa_crc, IRDMAQPC_RCVNOMPACRC) |
+ LS_64(iw->rcv_mark_offset, IRDMAQPC_RCVMARKOFFSET) |
+ LS_64(iw->snd_mark_offset, IRDMAQPC_SNDMARKOFFSET) |
+ LS_64(iw->timely_en, IRDMAQPC_TIMELYENABLE));
+ }
+ if (info->tcp_info_valid) {
+ qw0 |= LS_64(tcp->ipv4, IRDMAQPC_IPV4) |
+ LS_64(tcp->no_nagle, IRDMAQPC_NONAGLE) |
+ LS_64(tcp->insert_vlan_tag, IRDMAQPC_INSERTVLANTAG) |
+ LS_64(tcp->time_stamp, IRDMAQPC_TIMESTAMP) |
+ LS_64(tcp->cwnd_inc_limit, IRDMAQPC_LIMIT) |
+ LS_64(tcp->drop_ooo_seg, IRDMAQPC_DROPOOOSEG) |
+ LS_64(tcp->dup_ack_thresh, IRDMAQPC_DUPACK_THRESH);
+
+ if (iw->ecn_en || iw->dctcp_en) {
+ tcp->tos &= ~ECN_CODE_PT_MASK;
+ tcp->tos |= ECN_CODE_PT_VAL;
+ }
+
+ qw3 |= LS_64(tcp->ttl, IRDMAQPC_TTL) |
+ LS_64(tcp->avoid_stretch_ack, IRDMAQPC_AVOIDSTRETCHACK) |
+ LS_64(tcp->tos, IRDMAQPC_TOS) |
+ LS_64(tcp->src_port, IRDMAQPC_SRCPORTNUM) |
+ LS_64(tcp->dst_port, IRDMAQPC_DESTPORTNUM);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ qw3 |= LS_64(tcp->src_mac_addr_idx,
+ IRDMAQPC_GEN1_SRCMACADDRIDX);
+
+ qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
+ }
+ set_64bit_val(qp_ctx, IRDMA_BYTE_32,
+ LS_64(tcp->dest_ip_addr[2], IRDMAQPC_DESTIPADDR2) |
+ LS_64(tcp->dest_ip_addr[3], IRDMAQPC_DESTIPADDR3));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_40,
+ LS_64(tcp->dest_ip_addr[0], IRDMAQPC_DESTIPADDR0) |
+ LS_64(tcp->dest_ip_addr[1], IRDMAQPC_DESTIPADDR1));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_48,
+ LS_64(tcp->snd_mss, IRDMAQPC_SNDMSS) |
+ LS_64(tcp->syn_rst_handling, IRDMAQPC_SYN_RST_HANDLING) |
+ LS_64(tcp->vlan_tag, IRDMAQPC_VLANTAG) |
+ LS_64(tcp->arp_idx, IRDMAQPC_ARPIDX));
+ qw7 |= LS_64(tcp->flow_label, IRDMAQPC_FLOWLABEL) |
+ LS_64(tcp->wscale, IRDMAQPC_WSCALE) |
+ LS_64(tcp->ignore_tcp_opt, IRDMAQPC_IGNORE_TCP_OPT) |
+ LS_64(tcp->ignore_tcp_uns_opt,
+ IRDMAQPC_IGNORE_TCP_UNS_OPT) |
+ LS_64(tcp->tcp_state, IRDMAQPC_TCPSTATE) |
+ LS_64(tcp->rcv_wscale, IRDMAQPC_RCVSCALE) |
+ LS_64(tcp->snd_wscale, IRDMAQPC_SNDSCALE);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_72,
+ LS_64(tcp->time_stamp_recent, IRDMAQPC_TIMESTAMP_RECENT) |
+ LS_64(tcp->time_stamp_age, IRDMAQPC_TIMESTAMP_AGE));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_80,
+ LS_64(tcp->snd_nxt, IRDMAQPC_SNDNXT) |
+ LS_64(tcp->snd_wnd, IRDMAQPC_SNDWND));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_88,
+ LS_64(tcp->rcv_nxt, IRDMAQPC_RCVNXT) |
+ LS_64(tcp->rcv_wnd, IRDMAQPC_RCVWND));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_96,
+ LS_64(tcp->snd_max, IRDMAQPC_SNDMAX) |
+ LS_64(tcp->snd_una, IRDMAQPC_SNDUNA));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_104,
+ LS_64(tcp->srtt, IRDMAQPC_SRTT) |
+ LS_64(tcp->rtt_var, IRDMAQPC_RTTVAR));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_112,
+ LS_64(tcp->ss_thresh, IRDMAQPC_SSTHRESH) |
+ LS_64(tcp->cwnd, IRDMAQPC_CWND));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_120,
+ LS_64(tcp->snd_wl1, IRDMAQPC_SNDWL1) |
+ LS_64(tcp->snd_wl2, IRDMAQPC_SNDWL2));
+ qw16 |= LS_64(tcp->max_snd_window, IRDMAQPC_MAXSNDWND) |
+ LS_64(tcp->rexmit_thresh, IRDMAQPC_REXMIT_THRESH);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_184,
+ LS_64(tcp->local_ipaddr[3], IRDMAQPC_LOCAL_IPADDR3) |
+ LS_64(tcp->local_ipaddr[2], IRDMAQPC_LOCAL_IPADDR2));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_192,
+ LS_64(tcp->local_ipaddr[1], IRDMAQPC_LOCAL_IPADDR1) |
+ LS_64(tcp->local_ipaddr[0], IRDMAQPC_LOCAL_IPADDR0));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_200,
+ LS_64(iw->t_high, IRDMAQPC_THIGH) |
+ LS_64(iw->t_low, IRDMAQPC_TLOW));
+ set_64bit_val(qp_ctx, IRDMA_BYTE_208,
+ LS_64(info->rem_endpoint_idx, IRDMAQPC_REMENDPOINTIDX));
+ }
+
+ set_64bit_val(qp_ctx, IRDMA_BYTE_0, qw0);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_24, qw3);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_56, qw7);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_128, qw16);
+
+ irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX", qp_ctx,
+ IRDMA_QP_CTX_SIZE);
+}
+
+/**
+ * irdma_sc_alloc_stag - mr stag alloc
+ * @dev: sc device struct
+ * @info: stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_allocate_stag_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ enum irdma_page_size page_size;
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else
+ page_size = IRDMA_PAGE_SIZE_4K;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
+ LS_64(info->total_len, IRDMA_CQPSQ_STAG_STAGLEN));
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(info->stag_idx, IRDMA_CQPSQ_STAG_IDX));
+ set_64bit_val(wqe, IRDMA_BYTE_40,
+ LS_64(info->hmc_fcn_index, IRDMA_CQPSQ_STAG_HMCFNIDX));
+
+ if (info->chunk_size)
+ set_64bit_val(wqe, IRDMA_BYTE_48,
+ LS_64(info->first_pm_pbl_idx, IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX));
+
+ hdr = LS_64(IRDMA_CQP_OP_ALLOC_STAG, IRDMA_CQPSQ_OPCODE) |
+ LS_64(1, IRDMA_CQPSQ_STAG_MR) |
+ LS_64(info->access_rights, IRDMA_CQPSQ_STAG_ARIGHTS) |
+ LS_64(info->chunk_size, IRDMA_CQPSQ_STAG_LPBLSIZE) |
+ LS_64(page_size, IRDMA_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(info->remote_access, IRDMA_CQPSQ_STAG_REMACCENABLED) |
+ LS_64(info->use_hmc_fcn_index, IRDMA_CQPSQ_STAG_USEHMCFNIDX) |
+ LS_64(info->use_pf_rid, IRDMA_CQPSQ_STAG_USEPFRID) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "ALLOC_STAG WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mr_reg_non_shared - non-shared mr registration
+ * @dev: sc device struct
+ * @info: mr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
+ struct irdma_reg_ns_stag_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 fbo;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ u32 pble_obj_cnt;
+ bool remote_access;
+ u8 addr_type;
+ enum irdma_page_size page_size;
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else if (info->page_size == 0x1000)
+ page_size = IRDMA_PAGE_SIZE_4K;
+ else
+ return -EINVAL;
+
+ if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
+ IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+ remote_access = true;
+ else
+ remote_access = false;
+
+ pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
+ return -EINVAL;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+ fbo = info->va & (info->page_size - 1);
+
+ set_64bit_val(wqe, IRDMA_BYTE_0,
+ (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
+ info->va : fbo));
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(info->total_len, IRDMA_CQPSQ_STAG_STAGLEN) |
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(info->stag_key, IRDMA_CQPSQ_STAG_KEY) |
+ LS_64(info->stag_idx, IRDMA_CQPSQ_STAG_IDX));
+ if (!info->chunk_size)
+ set_64bit_val(wqe, IRDMA_BYTE_32, info->reg_addr_pa);
+ else
+ set_64bit_val(wqe, IRDMA_BYTE_48,
+ LS_64(info->first_pm_pbl_index, IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX));
+
+ set_64bit_val(wqe, IRDMA_BYTE_40, info->hmc_fcn_index);
+
+ addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
+ hdr = LS_64(IRDMA_CQP_OP_REG_MR, IRDMA_CQPSQ_OPCODE) |
+ LS_64(1, IRDMA_CQPSQ_STAG_MR) |
+ LS_64(info->chunk_size, IRDMA_CQPSQ_STAG_LPBLSIZE) |
+ LS_64(page_size, IRDMA_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(info->access_rights, IRDMA_CQPSQ_STAG_ARIGHTS) |
+ LS_64(remote_access, IRDMA_CQPSQ_STAG_REMACCENABLED) |
+ LS_64(addr_type, IRDMA_CQPSQ_STAG_VABASEDTO) |
+ LS_64(info->use_hmc_fcn_index, IRDMA_CQPSQ_STAG_USEHMCFNIDX) |
+ LS_64(info->use_pf_rid, IRDMA_CQPSQ_STAG_USEPFRID) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MR_REG_NS WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_dealloc_stag - deallocate stag
+ * @dev: sc device struct
+ * @info: dealloc stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_dealloc_stag_info *info,
+ u64 scratch, bool post_sq)
+{
+ u64 hdr;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(info->stag_idx, IRDMA_CQPSQ_STAG_IDX));
+
+ hdr = LS_64(IRDMA_CQP_OP_DEALLOC_STAG, IRDMA_CQPSQ_OPCODE) |
+ LS_64(info->mr, IRDMA_CQPSQ_STAG_MR) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "DEALLOC_STAG WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mw_alloc - mw allocate
+ * @dev: sc device struct
+ * @info: memory window allocation information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
+ struct irdma_mw_alloc_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 hdr;
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(info->mw_stag_index, IRDMA_CQPSQ_STAG_IDX));
+
+ hdr = LS_64(IRDMA_CQP_OP_ALLOC_STAG, IRDMA_CQPSQ_OPCODE) |
+ LS_64(info->mw_wide, IRDMA_CQPSQ_STAG_MWTYPE) |
+ LS_64(info->mw1_bind_dont_vldt_key,
+ IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MW_ALLOC WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
+ * @qp: sc qp struct
+ * @info: fast mr info
+ * @post_sq: flag for cqp db to ring
+ */
+int
+irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq)
+{
+ u64 temp, hdr;
+ __le64 *wqe;
+ u32 wqe_idx;
+ enum irdma_page_size page_size;
+ struct irdma_post_sq_info sq_info = {0};
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else
+ page_size = IRDMA_PAGE_SIZE_4K;
+
+ sq_info.wr_id = info->wr_id;
+ sq_info.signaled = info->signaled;
+ sq_info.push_wqe = info->push_wqe;
+
+ wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
+ IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
+ if (!wqe)
+ return -ENOSPC;
+
+ irdma_clr_wqes(&qp->qp_uk, wqe_idx);
+
+ qp->qp_uk.sq_wrtrk_array[wqe_idx].signaled = info->signaled;
+ irdma_debug(qp->dev, IRDMA_DEBUG_MR,
+ "wr_id[%llxh] wqe_idx[%04d] location[%p]\n", (unsigned long long)info->wr_id,
+ wqe_idx, &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
+
+ temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
+ (uintptr_t)info->va : info->fbo;
+ set_64bit_val(wqe, IRDMA_BYTE_0, temp);
+
+ temp = RS_64(info->first_pm_pbl_index >> 16, IRDMAQPSQ_FIRSTPMPBLIDXHI);
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(temp, IRDMAQPSQ_FIRSTPMPBLIDXHI) |
+ LS_64(info->reg_addr_pa >> IRDMAQPSQ_PBLADDR_S, IRDMAQPSQ_PBLADDR));
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ info->total_len |
+ LS_64(info->first_pm_pbl_index, IRDMAQPSQ_FIRSTPMPBLIDXLO));
+
+ hdr = LS_64(info->stag_key, IRDMAQPSQ_STAGKEY) |
+ LS_64(info->stag_idx, IRDMAQPSQ_STAGINDEX) |
+ LS_64(IRDMAQP_OP_FAST_REGISTER, IRDMAQPSQ_OPCODE) |
+ LS_64(info->chunk_size, IRDMAQPSQ_LPBLSIZE) |
+ LS_64(page_size, IRDMAQPSQ_HPAGESIZE) |
+ LS_64(info->access_rights, IRDMAQPSQ_STAGRIGHTS) |
+ LS_64(info->addr_type, IRDMAQPSQ_VABASEDTO) |
+ LS_64((sq_info.push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |
+ LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |
+ LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |
+ LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "FAST_REG WQE", wqe,
+ IRDMA_QP_WQE_MIN_SIZE);
+ if (sq_info.push_wqe) {
+ irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA,
+ wqe_idx, post_sq);
+ } else {
+ if (post_sq)
+ irdma_uk_qp_post_wr(&qp->qp_uk);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_sc_gen_rts_ae - request AE generated after RTS
+ * @qp: sc qp struct
+ */
+static void
+irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+
+ wqe = qp_uk->sq_base[1].elem;
+
+ hdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |
+ LS_64(1, IRDMAQPSQ_LOCALFENCE) |
+ LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+ irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "NOP W/LOCAL FENCE WQE", wqe,
+ IRDMA_QP_WQE_MIN_SIZE);
+
+ wqe = qp_uk->sq_base[2].elem;
+ hdr = LS_64(IRDMAQP_OP_GEN_RTS_AE, IRDMAQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+ irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "CONN EST WQE", wqe,
+ IRDMA_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * irdma_sc_send_lsmm - send last streaming mode message
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ * @stag: stag of lsmm buffer
+ */
+int
+irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
+ irdma_stag stag)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(size, IRDMAQPSQ_GEN1_FRAG_LEN) |
+ LS_64(stag, IRDMAQPSQ_GEN1_FRAG_STAG));
+ } else {
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(size, IRDMAQPSQ_FRAG_LEN) |
+ LS_64(stag, IRDMAQPSQ_FRAG_STAG) |
+ LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID));
+ }
+ set_64bit_val(wqe, IRDMA_BYTE_16, 0);
+
+ hdr = LS_64(IRDMAQP_OP_RDMA_SEND, IRDMAQPSQ_OPCODE) |
+ LS_64(1, IRDMAQPSQ_STREAMMODE) |
+ LS_64(1, IRDMAQPSQ_WAITFORRCVPDU) |
+ LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM WQE", wqe,
+ IRDMA_QP_WQE_MIN_SIZE);
+
+ if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
+ irdma_sc_gen_rts_ae(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_send_lsmm_nostag - for privilege qp
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ */
+int
+irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf);
+
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(size, IRDMAQPSQ_GEN1_FRAG_LEN));
+ else
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(size, IRDMAQPSQ_FRAG_LEN) |
+ LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID));
+ set_64bit_val(wqe, IRDMA_BYTE_16, 0);
+
+ hdr = LS_64(IRDMAQP_OP_RDMA_SEND, IRDMAQPSQ_OPCODE) |
+ LS_64(1, IRDMAQPSQ_STREAMMODE) |
+ LS_64(1, IRDMAQPSQ_WAITFORRCVPDU) |
+ LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", wqe,
+ IRDMA_QP_WQE_MIN_SIZE);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_send_rtt - send last read0 or write0
+ * @qp: sc qp struct
+ * @read: Do read0 or write0
+ */
+int
+irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, 0);
+ set_64bit_val(wqe, IRDMA_BYTE_16, 0);
+ if (read) {
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(0xabcd, IRDMAQPSQ_GEN1_FRAG_STAG));
+ } else {
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ (u64)0xabcd | LS_64(qp->qp_uk.swqe_polarity,
+ IRDMAQPSQ_VALID));
+ }
+ hdr = LS_64(0x1234, IRDMAQPSQ_REMSTAG) |
+ LS_64(IRDMAQP_OP_RDMA_READ, IRDMAQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+
+ } else {
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, IRDMA_BYTE_8, 0);
+ } else {
+ set_64bit_val(wqe, IRDMA_BYTE_8,
+ LS_64(qp->qp_uk.swqe_polarity,
+ IRDMAQPSQ_VALID));
+ }
+ hdr = LS_64(IRDMAQP_OP_RDMA_WRITE, IRDMAQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID);
+ }
+
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "RTR WQE", wqe,
+ IRDMA_QP_WQE_MIN_SIZE);
+
+ if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
+ irdma_sc_gen_rts_ae(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_iwarp_opcode - determine if incoming is rdma layer
+ * @info: aeq info for the packet
+ * @pkt: packet for error
+ */
+static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt){
+ BE16 *mpa;
+ u32 opcode = 0xffffffff;
+
+ if (info->q2_data_written) {
+ mpa = (BE16 *) pkt;
+ opcode = IRDMA_NTOHS(mpa[1]) & 0xf;
+ }
+
+ return opcode;
+}
+
+/**
+ * irdma_locate_mpa - return pointer to mpa in the pkt
+ * @pkt: packet with data
+ */
+static u8 *irdma_locate_mpa(u8 *pkt) {
+ /* skip over ethernet header */
+ pkt += IRDMA_MAC_HLEN;
+
+ /* Skip over IP and TCP headers */
+ pkt += 4 * (pkt[0] & 0x0f);
+ pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+
+ return pkt;
+}
+
+/**
+ * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
+ * @qp: sc qp ptr for pkt
+ * @hdr: term hdr
+ * @opcode: flush opcode for termhdr
+ * @layer_etype: error layer + error type
+ * @err: error cod ein the header
+ */
+static void
+irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
+ struct irdma_terminate_hdr *hdr,
+ enum irdma_flush_opcode opcode,
+ u8 layer_etype, u8 err)
+{
+ qp->flush_code = opcode;
+ hdr->layer_etype = layer_etype;
+ hdr->error_code = err;
+}
+
+/**
+ * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
+ * @pkt: ptr to mpa in offending pkt
+ * @hdr: term hdr
+ * @copy_len: offending pkt length to be copied to term hdr
+ * @is_tagged: DDP tagged or untagged
+ */
+static void
+irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
+ int *copy_len, u8 *is_tagged)
+{
+ u16 ddp_seg_len;
+
+ ddp_seg_len = IRDMA_NTOHS(*(BE16 *) pkt);
+ if (ddp_seg_len) {
+ *copy_len = 2;
+ hdr->hdrct = DDP_LEN_FLAG;
+ if (pkt[2] & 0x80) {
+ *is_tagged = 1;
+ if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+ *copy_len += TERM_DDP_LEN_TAGGED;
+ hdr->hdrct |= DDP_HDR_FLAG;
+ }
+ } else {
+ if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+ *copy_len += TERM_DDP_LEN_UNTAGGED;
+ hdr->hdrct |= DDP_HDR_FLAG;
+ }
+ if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
+ ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
+ *copy_len += TERM_RDMA_LEN;
+ hdr->hdrct |= RDMA_HDR_FLAG;
+ }
+ }
+ }
+}
+
+/**
+ * irdma_bld_terminate_hdr - build terminate message header
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+static int
+irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ int copy_len = 0;
+ u8 is_tagged = 0;
+ u32 opcode;
+ struct irdma_terminate_hdr *termhdr;
+
+ termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
+ memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
+
+ if (info->q2_data_written) {
+ pkt = irdma_locate_mpa(pkt);
+ irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
+ }
+
+ opcode = irdma_iwarp_opcode(info, pkt);
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ qp->sq_flush_code = info->sq;
+ qp->rq_flush_code = info->rq;
+
+ switch (info->ae_id) {
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_INV_STAG);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_STAG);
+ break;
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ if (info->q2_data_written)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_BOUNDS);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_BOUNDS);
+ break;
+ case IRDMA_AE_AMP_BAD_PD:
+ switch (opcode) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_UNASSOC_STAG);
+ break;
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_UNASSOC_STAG);
+ }
+ break;
+ case IRDMA_AE_AMP_INVALID_STAG:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_STAG);
+ break;
+ case IRDMA_AE_AMP_BAD_QP:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_QN);
+ break;
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ switch (opcode) {
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_INV_STAG);
+ }
+ break;
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_ACCESS);
+ break;
+ case IRDMA_AE_AMP_TO_WRAP:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_TO_WRAP);
+ break;
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC,
+ DDP_CATASTROPHIC_LOCAL);
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC,
+ DDP_CATASTROPHIC_LOCAL);
+ break;
+ case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MSN_RANGE);
+ break;
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_TOO_LONG);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ if (is_tagged)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_INV_DDP_VER);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_DDP_VER);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MO);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MSN_NO_BUF);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_QN);
+ break;
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_INV_RDMAP_VER);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_UNSPECIFIED);
+ break;
+ }
+
+ if (copy_len)
+ irdma_memcpy(termhdr + 1, pkt, copy_len);
+
+ return sizeof(struct irdma_terminate_hdr) + copy_len;
+}
+
+/**
+ * irdma_terminate_send_fin() - Send fin for terminate message
+ * @qp: qp associated with received terminate AE
+ */
+void
+irdma_terminate_send_fin(struct irdma_sc_qp *qp)
+{
+ irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
+ IRDMAQP_TERM_SEND_FIN_ONLY, 0);
+}
+
+/**
+ * irdma_terminate_connection() - Bad AE and send terminate to remote QP
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void
+irdma_terminate_connection(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 termlen = 0;
+
+ if (qp->term_flags & IRDMA_TERM_SENT)
+ return;
+
+ termlen = irdma_bld_terminate_hdr(qp, info);
+ irdma_terminate_start_timer(qp);
+ qp->term_flags |= IRDMA_TERM_SENT;
+ irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
+ IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
+}
+
+/**
+ * irdma_terminate_received - handle terminate received AE
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void
+irdma_terminate_received(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ BE32 *mpa;
+ u8 ddp_ctl;
+ u8 rdma_ctl;
+ u16 aeq_id = 0;
+ struct irdma_terminate_hdr *termhdr;
+
+ mpa = (BE32 *) irdma_locate_mpa(pkt);
+ if (info->q2_data_written) {
+ /* did not validate the frame - do it now */
+ ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
+ rdma_ctl = ntohl(mpa[0]) & 0xff;
+ if ((ddp_ctl & 0xc0) != 0x40)
+ aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
+ else if ((ddp_ctl & 0x03) != 1)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
+ else if (ntohl(mpa[2]) != 2)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
+ else if (ntohl(mpa[3]) != 1)
+ aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
+ else if (ntohl(mpa[4]) != 0)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
+ else if ((rdma_ctl & 0xc0) != 0x40)
+ aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+ info->ae_id = aeq_id;
+ if (info->ae_id) {
+ /* Bad terminate recvd - send back a terminate */
+ irdma_terminate_connection(qp, info);
+ return;
+ }
+ }
+
+ qp->term_flags |= IRDMA_TERM_RCVD;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ termhdr = (struct irdma_terminate_hdr *)&mpa[5];
+ if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
+ termhdr->layer_etype == RDMAP_REMOTE_OP) {
+ irdma_terminate_done(qp, 0);
+ } else {
+ irdma_terminate_start_timer(qp);
+ irdma_terminate_send_fin(qp);
+ }
+}
+
+static int
+irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ return 0;
+}
+
+static void
+irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ /* do nothing */
+}
+
+static void
+irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
+{
+ /* do nothing */
+}
+
+/**
+ * irdma_sc_vsi_init - Init the vsi structure
+ * @vsi: pointer to vsi structure to initialize
+ * @info: the info used to initialize the vsi struct
+ */
+void
+irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_init_info *info)
+{
+ u8 i;
+
+ vsi->dev = info->dev;
+ vsi->back_vsi = info->back_vsi;
+ vsi->register_qset = info->register_qset;
+ vsi->unregister_qset = info->unregister_qset;
+ vsi->mtu = info->params->mtu;
+ vsi->exception_lan_q = info->exception_lan_q;
+ vsi->vsi_idx = info->pf_data_vsi_num;
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ vsi->fcn_id = info->dev->hmc_fn_id;
+
+ irdma_set_qos_info(vsi, info->params);
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ mutex_init(&vsi->qos[i].qos_mutex);
+ INIT_LIST_HEAD(&vsi->qos[i].qplist);
+ }
+ if (vsi->register_qset) {
+ vsi->dev->ws_add = irdma_ws_add;
+ vsi->dev->ws_remove = irdma_ws_remove;
+ vsi->dev->ws_reset = irdma_ws_reset;
+ } else {
+ vsi->dev->ws_add = irdma_null_ws_add;
+ vsi->dev->ws_remove = irdma_null_ws_remove;
+ vsi->dev->ws_reset = irdma_null_ws_reset;
+ }
+}
+
+/**
+ * irdma_get_fcn_id - Return the function id
+ * @vsi: pointer to the vsi
+ */
+static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi){
+ struct irdma_stats_inst_info stats_info = {0};
+ struct irdma_sc_dev *dev = vsi->dev;
+ u8 fcn_id = IRDMA_INVALID_FCN_ID;
+ u8 start_idx, max_stats, i;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
+ if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
+ &stats_info))
+ return stats_info.stats_idx;
+ }
+
+ start_idx = 1;
+ max_stats = 16;
+ for (i = start_idx; i < max_stats; i++)
+ if (!dev->fcn_id_array[i]) {
+ fcn_id = i;
+ dev->fcn_id_array[i] = true;
+ break;
+ }
+
+ return fcn_id;
+}
+
+/**
+ * irdma_vsi_stats_init - Initialize the vsi statistics
+ * @vsi: pointer to the vsi structure
+ * @info: The info structure used for initialization
+ */
+int
+irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info)
+{
+ u8 fcn_id = info->fcn_id;
+ struct irdma_dma_mem *stats_buff_mem;
+
+ vsi->pestat = info->pestat;
+ vsi->pestat->hw = vsi->dev->hw;
+ vsi->pestat->vsi = vsi;
+ stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
+ stats_buff_mem->size = IRDMA_GATHER_STATS_BUF_SIZE * 2;
+ stats_buff_mem->va = irdma_allocate_dma_mem(vsi->pestat->hw,
+ stats_buff_mem,
+ stats_buff_mem->size, 1);
+ if (!stats_buff_mem->va)
+ return -ENOMEM;
+
+ vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
+ vsi->pestat->gather_info.last_gather_stats_va =
+ (void *)((uintptr_t)stats_buff_mem->va +
+ IRDMA_GATHER_STATS_BUF_SIZE);
+
+ irdma_hw_stats_start_timer(vsi);
+ if (info->alloc_fcn_id)
+ fcn_id = irdma_get_fcn_id(vsi);
+ if (fcn_id == IRDMA_INVALID_FCN_ID)
+ goto stats_error;
+
+ vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
+ vsi->fcn_id = fcn_id;
+ if (info->alloc_fcn_id) {
+ vsi->pestat->gather_info.use_stats_inst = true;
+ vsi->pestat->gather_info.stats_inst_index = fcn_id;
+ }
+
+ return 0;
+
+stats_error:
+ irdma_free_dma_mem(vsi->pestat->hw, stats_buff_mem);
+
+ return -EIO;
+}
+
+/**
+ * irdma_vsi_stats_free - Free the vsi stats
+ * @vsi: pointer to the vsi structure
+ */
+void
+irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_stats_inst_info stats_info = {0};
+ u8 fcn_id = vsi->fcn_id;
+ struct irdma_sc_dev *dev = vsi->dev;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
+ if (vsi->stats_fcn_id_alloc) {
+ stats_info.stats_idx = vsi->fcn_id;
+ irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
+ &stats_info);
+ }
+ } else {
+ if (vsi->stats_fcn_id_alloc &&
+ fcn_id < vsi->dev->hw_attrs.max_stat_inst)
+ vsi->dev->fcn_id_array[fcn_id] = false;
+ }
+
+ if (!vsi->pestat)
+ return;
+ irdma_hw_stats_stop_timer(vsi);
+ irdma_free_dma_mem(vsi->pestat->hw,
+ &vsi->pestat->gather_info.stats_buff_mem);
+}
+
+/**
+ * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
+ * @wqsize: size of the wq (sq, rq) to encoded_size
+ * @queue_type: queue type selected for the calculation algorithm
+ */
+u8
+irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
+{
+ u8 encoded_size = 0;
+
+ /*
+ * cqp sq's hw coded value starts from 1 for size of 4 while it starts from 0 for qp' wq's.
+ */
+ if (queue_type == IRDMA_QUEUE_TYPE_CQP)
+ encoded_size = 1;
+ wqsize >>= 2;
+ while (wqsize >>= 1)
+ encoded_size++;
+
+ return encoded_size;
+}
+
+/**
+ * irdma_sc_gather_stats - collect the statistics
+ * @cqp: struct for cqp hw
+ * @info: gather stats info structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int
+irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_gather_info *info,
+ u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
+ return -ENOSPC;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_40,
+ LS_64(info->hmc_fcn_index, IRDMA_CQPSQ_STATS_HMC_FCN_INDEX));
+ set_64bit_val(wqe, IRDMA_BYTE_32, info->stats_buff_mem.pa);
+
+ temp = LS_64(cqp->polarity, IRDMA_CQPSQ_STATS_WQEVALID) |
+ LS_64(info->use_stats_inst, IRDMA_CQPSQ_STATS_USE_INST) |
+ LS_64(info->stats_inst_index, IRDMA_CQPSQ_STATS_INST_INDEX) |
+ LS_64(info->use_hmc_fcn_index,
+ IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX) |
+ LS_64(IRDMA_CQP_OP_GATHER_STATS, IRDMA_CQPSQ_STATS_OP);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, temp);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_STATS, "GATHER_STATS WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+
+ irdma_sc_cqp_post_sq(cqp);
+ irdma_debug(cqp->dev, IRDMA_DEBUG_STATS,
+ "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head,
+ cqp->sq_ring.tail, cqp->sq_ring.size);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_stats_inst - allocate or free stats instance
+ * @cqp: struct for cqp hw
+ * @info: stats info structure
+ * @alloc: alloc vs. delete flag
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int
+irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_inst_info *info,
+ bool alloc, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_40,
+ LS_64(info->hmc_fn_id, IRDMA_CQPSQ_STATS_HMC_FCN_INDEX));
+ temp = LS_64(cqp->polarity, IRDMA_CQPSQ_STATS_WQEVALID) |
+ LS_64(alloc, IRDMA_CQPSQ_STATS_ALLOC_INST) |
+ LS_64(info->use_hmc_fcn_index, IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX) |
+ LS_64(info->stats_idx, IRDMA_CQPSQ_STATS_INST_INDEX) |
+ LS_64(IRDMA_CQP_OP_MANAGE_STATS, IRDMA_CQPSQ_STATS_OP);
+
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, temp);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_STATS WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_set_up_map - set the up map table
+ * @cqp: struct for cqp hw
+ * @info: User priority map info
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int
+irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
+ struct irdma_up_info *info, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ temp = info->map[0] | LS_64_1(info->map[1], 8) |
+ LS_64_1(info->map[2], 16) | LS_64_1(info->map[3], 24) |
+ LS_64_1(info->map[4], 32) | LS_64_1(info->map[5], 40) |
+ LS_64_1(info->map[6], 48) | LS_64_1(info->map[7], 56);
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, temp);
+ set_64bit_val(wqe, IRDMA_BYTE_40,
+ LS_64(info->cnp_up_override, IRDMA_CQPSQ_UP_CNPOVERRIDE) |
+ LS_64(info->hmc_fcn_idx, IRDMA_CQPSQ_UP_HMCFCNIDX));
+
+ temp = LS_64(cqp->polarity, IRDMA_CQPSQ_UP_WQEVALID) |
+ LS_64(info->use_vlan, IRDMA_CQPSQ_UP_USEVLAN) |
+ LS_64(info->use_cnp_up_override, IRDMA_CQPSQ_UP_USEOVERRIDE) |
+ LS_64(IRDMA_CQP_OP_UP_MAP, IRDMA_CQPSQ_UP_OP);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, temp);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPMAP WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_ws_node - create/modify/destroy WS node
+ * @cqp: struct for cqp hw
+ * @info: node info structure
+ * @node_op: 0 for add 1 for modify, 2 for delete
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int
+irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
+ struct irdma_ws_node_info *info,
+ enum irdma_ws_node_op node_op, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_32,
+ LS_64(info->vsi, IRDMA_CQPSQ_WS_VSI) |
+ LS_64(info->weight, IRDMA_CQPSQ_WS_WEIGHT));
+
+ temp = LS_64(cqp->polarity, IRDMA_CQPSQ_WS_WQEVALID) |
+ LS_64(node_op, IRDMA_CQPSQ_WS_NODEOP) |
+ LS_64(info->enable, IRDMA_CQPSQ_WS_ENABLENODE) |
+ LS_64(info->type_leaf, IRDMA_CQPSQ_WS_NODETYPE) |
+ LS_64(info->prio_type, IRDMA_CQPSQ_WS_PRIOTYPE) |
+ LS_64(info->tc, IRDMA_CQPSQ_WS_TC) |
+ LS_64(IRDMA_CQP_OP_WORK_SCHED_NODE, IRDMA_CQPSQ_WS_OP) |
+ LS_64(info->parent_id, IRDMA_CQPSQ_WS_PARENTID) |
+ LS_64(info->id, IRDMA_CQPSQ_WS_NODEID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, temp);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_WS WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_flush_wqes - flush qp's wqe
+ * @qp: sc qp
+ * @info: dlush information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int
+irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 temp = 0;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ bool flush_sq = false, flush_rq = false;
+
+ if (info->rq && !qp->flush_rq)
+ flush_rq = true;
+ if (info->sq && !qp->flush_sq)
+ flush_sq = true;
+ qp->flush_sq |= flush_sq;
+ qp->flush_rq |= flush_rq;
+
+ if (!flush_sq && !flush_rq) {
+ irdma_debug(qp->dev, IRDMA_DEBUG_CQP,
+ "Additional flush request ignored for qp %x\n", qp->qp_uk.qp_id);
+ return -EALREADY;
+ }
+
+ cqp = qp->pd->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ if (info->userflushcode) {
+ if (flush_rq)
+ temp |= LS_64(info->rq_minor_code, IRDMA_CQPSQ_FWQE_RQMNERR) |
+ LS_64(info->rq_major_code, IRDMA_CQPSQ_FWQE_RQMJERR);
+ if (flush_sq)
+ temp |= LS_64(info->sq_minor_code, IRDMA_CQPSQ_FWQE_SQMNERR) |
+ LS_64(info->sq_major_code, IRDMA_CQPSQ_FWQE_SQMJERR);
+ }
+ set_64bit_val(wqe, IRDMA_BYTE_16, temp);
+
+ temp = (info->generate_ae) ?
+ info->ae_code | LS_64(info->ae_src, IRDMA_CQPSQ_FWQE_AESOURCE) : 0;
+ set_64bit_val(wqe, IRDMA_BYTE_8, temp);
+
+ hdr = qp->qp_uk.qp_id |
+ LS_64(IRDMA_CQP_OP_FLUSH_WQES, IRDMA_CQPSQ_OPCODE) |
+ LS_64(info->generate_ae, IRDMA_CQPSQ_FWQE_GENERATE_AE) |
+ LS_64(info->userflushcode, IRDMA_CQPSQ_FWQE_USERFLCODE) |
+ LS_64(flush_sq, IRDMA_CQPSQ_FWQE_FLUSHSQ) |
+ LS_64(flush_rq, IRDMA_CQPSQ_FWQE_FLUSHRQ) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_FLUSH WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
+ * @qp: sc qp
+ * @info: gen ae information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_gen_ae(struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 temp;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ temp = info->ae_code | LS_64(info->ae_src, IRDMA_CQPSQ_FWQE_AESOURCE);
+ set_64bit_val(wqe, IRDMA_BYTE_8, temp);
+
+ hdr = qp->qp_uk.qp_id | LS_64(IRDMA_CQP_OP_GEN_AE, IRDMA_CQPSQ_OPCODE) |
+ LS_64(1, IRDMA_CQPSQ_FWQE_GENERATE_AE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "GEN_AE WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/*** irdma_sc_qp_upload_context - upload qp's context
+ * @dev: sc device struct
+ * @info: upload context info ptr for return
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
+ struct irdma_upload_context_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, info->buf_pa);
+
+ hdr = LS_64(info->qp_id, IRDMA_CQPSQ_UCTX_QPID) |
+ LS_64(IRDMA_CQP_OP_UPLOAD_CONTEXT, IRDMA_CQPSQ_OPCODE) |
+ LS_64(info->qp_type, IRDMA_CQPSQ_UCTX_QPTYPE) |
+ LS_64(info->raw_format, IRDMA_CQPSQ_UCTX_RAWFORMAT) |
+ LS_64(info->freeze_qp, IRDMA_CQPSQ_UCTX_FREEZEQP) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QP_UPLOAD_CTX WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_push_page - Handle push page
+ * @cqp: struct for cqp hw
+ * @info: push page info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_manage_push_page_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ if (info->free_page &&
+ info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, info->qs_handle);
+ hdr = LS_64(info->push_idx, IRDMA_CQPSQ_MPP_PPIDX) |
+ LS_64(info->push_page_type, IRDMA_CQPSQ_MPP_PPTYPE) |
+ LS_64(IRDMA_CQP_OP_MANAGE_PUSH_PAGES, IRDMA_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID) |
+ LS_64(info->free_page, IRDMA_CQPSQ_MPP_FREE_PAGE);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_suspend_qp - suspend qp for param change
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int
+irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
+{
+ u64 hdr;
+ __le64 *wqe;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ hdr = LS_64(qp->qp_uk.qp_id, IRDMA_CQPSQ_SUSPENDQP_QPID) |
+ LS_64(IRDMA_CQP_OP_SUSPEND_QP, IRDMA_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SUSPEND_QP WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_resume_qp - resume qp after suspend
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int
+irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
+{
+ u64 hdr;
+ __le64 *wqe;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(qp->qs_handle, IRDMA_CQPSQ_RESUMEQP_QSHANDLE));
+
+ hdr = LS_64(qp->qp_uk.qp_id, IRDMA_CQPSQ_RESUMEQP_QPID) |
+ LS_64(IRDMA_CQP_OP_RESUME_QP, IRDMA_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "RESUME_QP WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_ack - acknowledge completion q
+ * @cq: cq struct
+ */
+static inline void
+irdma_sc_cq_ack(struct irdma_sc_cq *cq)
+{
+ db_wr32(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
+}
+
+/**
+ * irdma_sc_cq_init - initialize completion q
+ * @cq: cq struct
+ * @info: cq initialization info
+ */
+int
+irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
+{
+ int ret_code;
+ u32 pble_obj_cnt;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ cq->cq_pa = info->cq_base_pa;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
+ info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
+ ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->ceqe_mask = info->ceqe_mask;
+ cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->vsi = info->vsi;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_create - create completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: flag for overflow check
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
+ bool check_overflow, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ struct irdma_sc_ceq *ceq;
+ int ret_code = 0;
+
+ cqp = cq->dev->cqp;
+ if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1))
+ return -EINVAL;
+
+ if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1))
+ return -EINVAL;
+
+ ceq = cq->dev->ceq[cq->ceq_id];
+ if (ceq && ceq->reg_cq)
+ ret_code = irdma_sc_add_cq_ctx(ceq, cq);
+
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe) {
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, cq);
+ return -ENOSPC;
+ }
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(cq->shadow_read_threshold,
+ IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, IRDMA_BYTE_32, (cq->virtual_map ? 0 : cq->cq_pa));
+ set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
+ set_64bit_val(wqe, IRDMA_BYTE_48,
+ LS_64((cq->virtual_map ? cq->first_pm_pbl_idx : 0),
+ IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX));
+ set_64bit_val(wqe, IRDMA_BYTE_56,
+ LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) |
+ LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX));
+
+ hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
+ FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ LS_64(IRDMA_CQP_OP_CREATE_CQ, IRDMA_CQPSQ_OPCODE) |
+ LS_64(cq->pbl_chunk_size, IRDMA_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(check_overflow, IRDMA_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(cq->virtual_map, IRDMA_CQPSQ_CQ_VIRTMAP) |
+ LS_64(cq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(cq->ceq_id_valid, IRDMA_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cq->tph_en, IRDMA_CQPSQ_TPHEN) |
+ LS_64(cq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_CREATE WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_destroy - destroy completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int
+irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_sc_ceq *ceq;
+
+ cqp = cq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ ceq = cq->dev->ceq[cq->ceq_id];
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, cq);
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
+ set_64bit_val(wqe, IRDMA_BYTE_48,
+ (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+
+ hdr = cq->cq_uk.cq_id |
+ FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ LS_64(IRDMA_CQP_OP_DESTROY_CQ, IRDMA_CQPSQ_OPCODE) |
+ LS_64(cq->pbl_chunk_size, IRDMA_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(cq->virtual_map, IRDMA_CQPSQ_CQ_VIRTMAP) |
+ LS_64(cq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(cq->ceq_id_valid, IRDMA_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cq->tph_en, IRDMA_CQPSQ_TPHEN) |
+ LS_64(cq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_DESTROY WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_resize - set resized cq buffer info
+ * @cq: resized cq
+ * @info: resized cq buffer info
+ */
+void
+irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
+{
+ cq->virtual_map = info->virtual_map;
+ cq->cq_pa = info->cq_pa;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
+}
+
+/**
+ * irdma_sc_cq_modify - modify a Completion Queue
+ * @cq: cq struct
+ * @info: modification info struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag to post to sq
+ */
+static int
+irdma_sc_cq_modify(struct irdma_sc_cq *cq,
+ struct irdma_modify_cq_info *info, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ u32 pble_obj_cnt;
+
+ pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->cq_resize && info->virtual_map &&
+ info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ cqp = cq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, info->cq_size);
+ set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, IRDMA_BYTE_16,
+ LS_64(info->shadow_read_threshold,
+ IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, IRDMA_BYTE_32, info->cq_pa);
+ set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
+ set_64bit_val(wqe, IRDMA_BYTE_48, info->first_pm_pbl_idx);
+ set_64bit_val(wqe, IRDMA_BYTE_56,
+ LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) |
+ LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX));
+
+ hdr = cq->cq_uk.cq_id |
+ LS_64(IRDMA_CQP_OP_MODIFY_CQ, IRDMA_CQPSQ_OPCODE) |
+ LS_64(info->cq_resize, IRDMA_CQPSQ_CQ_CQRESIZE) |
+ LS_64(info->pbl_chunk_size, IRDMA_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(info->check_overflow, IRDMA_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(info->virtual_map, IRDMA_CQPSQ_CQ_VIRTMAP) |
+ LS_64(cq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(cq->tph_en, IRDMA_CQPSQ_TPHEN) |
+ LS_64(cq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_MODIFY WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_check_cqp_progress - check cqp processing progress
+ * @timeout: timeout info struct
+ * @dev: sc device struct
+ */
+void
+irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
+{
+ if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
+ timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
+ timeout->count = 0;
+ } else {
+ if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
+ timeout->compl_cqp_cmds)
+ timeout->count++;
+ }
+}
+
+/**
+ * irdma_get_cqp_reg_info - get head and tail for cqp using registers
+ * @cqp: struct for cqp hw
+ * @val: cqp tail register value
+ * @tail: wqtail register value
+ * @error: cqp processing err
+ */
+static inline void
+irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
+ u32 *tail, u32 *error)
+{
+ *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
+ *tail = RS_32(*val, IRDMA_CQPTAIL_WQTAIL);
+ *error = RS_32(*val, IRDMA_CQPTAIL_CQP_OP_ERR);
+}
+
+/**
+ * irdma_cqp_poll_registers - poll cqp registers
+ * @cqp: struct for cqp hw
+ * @tail: wqtail register value
+ * @count: how many times to try for completion
+ */
+static int
+irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
+ u32 count)
+{
+ u32 i = 0;
+ u32 newtail, error, val;
+
+ while (i++ < count) {
+ irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
+ if (error) {
+ error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ irdma_debug(cqp->dev, IRDMA_DEBUG_CQP,
+ "CQPERRCODES error_code[x%08X]\n", error);
+ return -EIO;
+ }
+ if (newtail != tail) {
+ /* SUCCESS */
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
+ return 0;
+ }
+ irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
+ * @dev: sc device struct
+ * @buf: pointer to commit buffer
+ * @buf_idx: buffer index
+ * @obj_info: object info pointer
+ * @rsrc_idx: indexs of memory resource
+ */
+static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 * buf,
+ u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
+ u32 rsrc_idx){
+ u64 temp;
+
+ get_64bit_val(buf, buf_idx, &temp);
+
+ switch (rsrc_idx) {
+ case IRDMA_HMC_IW_QP:
+ obj_info[rsrc_idx].cnt = (u32)RS_64(temp, IRDMA_COMMIT_FPM_QPCNT);
+ break;
+ case IRDMA_HMC_IW_CQ:
+ obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
+ break;
+ case IRDMA_HMC_IW_APBVT_ENTRY:
+ obj_info[rsrc_idx].cnt = 1;
+ break;
+ default:
+ obj_info[rsrc_idx].cnt = (u32)temp;
+ break;
+ }
+
+ obj_info[rsrc_idx].base = (u64)RS_64_1(temp, IRDMA_COMMIT_FPM_BASE_S) * 512;
+
+ return temp;
+}
+
+/**
+ * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
+ * @dev: pointer to dev struct
+ * @buf: ptr to fpm commit buffer
+ * @info: ptr to irdma_hmc_obj_info struct
+ * @sd: number of SDs for HMC objects
+ *
+ * parses fpm commit info and copy base value
+ * of hmc objects in hmc_info
+ */
+static int
+irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf,
+ struct irdma_hmc_obj_info *info,
+ u32 *sd)
+{
+ u64 size;
+ u32 i;
+ u64 max_base = 0;
+ u32 last_hmc_obj = 0;
+
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_0, info,
+ IRDMA_HMC_IW_QP);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_8, info,
+ IRDMA_HMC_IW_CQ);
+ /* skiping RSRVD */
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_24, info,
+ IRDMA_HMC_IW_HTE);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_32, info,
+ IRDMA_HMC_IW_ARP);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_40, info,
+ IRDMA_HMC_IW_APBVT_ENTRY);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_48, info,
+ IRDMA_HMC_IW_MR);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_56, info,
+ IRDMA_HMC_IW_XF);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_64, info,
+ IRDMA_HMC_IW_XFFL);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_72, info,
+ IRDMA_HMC_IW_Q1);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_80, info,
+ IRDMA_HMC_IW_Q1FL);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_88, info,
+ IRDMA_HMC_IW_TIMER);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_112, info,
+ IRDMA_HMC_IW_PBLE);
+ /* skipping RSVD. */
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_96, info,
+ IRDMA_HMC_IW_FSIMC);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_104, info,
+ IRDMA_HMC_IW_FSIAV);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_128, info,
+ IRDMA_HMC_IW_RRF);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_136, info,
+ IRDMA_HMC_IW_RRFFL);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_144, info,
+ IRDMA_HMC_IW_HDR);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_152, info,
+ IRDMA_HMC_IW_MD);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info,
+ IRDMA_HMC_IW_OOISC);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info,
+ IRDMA_HMC_IW_OOISCFFL);
+ }
+
+ /* searching for the last object in HMC to find the size of the HMC area. */
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
+ if (info[i].base > max_base) {
+ max_base = info[i].base;
+ last_hmc_obj = i;
+ }
+ }
+
+ size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
+ info[last_hmc_obj].base;
+
+ if (size & 0x1FFFFF)
+ *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
+ else
+ *sd = (u32)(size >> 21);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
+ * @buf: ptr to fpm query buffer
+ * @buf_idx: index into buf
+ * @obj_info: ptr to irdma_hmc_obj_info struct
+ * @rsrc_idx: resource index into info
+ *
+ * Decode a 64 bit value from fpm query buffer into max count and size
+ */
+static u64 irdma_sc_decode_fpm_query(__le64 * buf, u32 buf_idx,
+ struct irdma_hmc_obj_info *obj_info,
+ u32 rsrc_idx){
+ u64 temp;
+ u32 size;
+
+ get_64bit_val(buf, buf_idx, &temp);
+ obj_info[rsrc_idx].max_cnt = (u32)temp;
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[rsrc_idx].size = LS_64_1(1, size);
+
+ return temp;
+}
+
+/**
+ * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
+ * @dev: ptr to shared code device
+ * @buf: ptr to fpm query buffer
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ *
+ * parses fpm query buffer and copy max_cnt and
+ * size value of hmc objects in hmc_info
+ */
+static int
+irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+{
+ struct irdma_hmc_obj_info *obj_info;
+ u64 temp;
+ u32 size;
+ u16 max_pe_sds;
+
+ obj_info = hmc_info->hmc_obj;
+
+ get_64bit_val(buf, IRDMA_BYTE_0, &temp);
+ hmc_info->first_sd_index = (u16)RS_64(temp, IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX);
+ max_pe_sds = (u16)RS_64(temp, IRDMA_QUERY_FPM_MAX_PE_SDS);
+
+ hmc_fpm_misc->max_sds = max_pe_sds;
+ hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
+ get_64bit_val(buf, 8, &temp);
+ obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)RS_64(temp, IRDMA_QUERY_FPM_MAX_QPS);
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[IRDMA_HMC_IW_QP].size = LS_64_1(1, size);
+
+ get_64bit_val(buf, 16, &temp);
+ obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, IRDMA_QUERY_FPM_MAX_CQS);
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[IRDMA_HMC_IW_CQ].size = LS_64_1(1, size);
+
+ irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
+ irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
+
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+ irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
+ irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
+
+ get_64bit_val(buf, 64, &temp);
+ obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_XFFL].size = 4;
+ hmc_fpm_misc->xf_block_size = RS_64(temp, IRDMA_QUERY_FPM_XFBLOCKSIZE);
+ if (!hmc_fpm_misc->xf_block_size)
+ return -EINVAL;
+
+ irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
+ get_64bit_val(buf, 80, &temp);
+ obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
+
+ hmc_fpm_misc->q1_block_size = RS_64(temp, IRDMA_QUERY_FPM_Q1BLOCKSIZE);
+ if (!hmc_fpm_misc->q1_block_size)
+ return -EINVAL;
+
+ irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
+
+ get_64bit_val(buf, 112, &temp);
+ obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_PBLE].size = 8;
+
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->max_ceqs = RS_64(temp, IRDMA_QUERY_FPM_MAX_CEQS);
+ hmc_fpm_misc->ht_multiplier = RS_64(temp, IRDMA_QUERY_FPM_HTMULTIPLIER);
+ hmc_fpm_misc->timer_bucket = RS_64(temp, IRDMA_QUERY_FPM_TIMERBUCKET);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return 0;
+ irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
+ irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
+ irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
+
+ get_64bit_val(buf, IRDMA_BYTE_136, &temp);
+ obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
+ hmc_fpm_misc->rrf_block_size = RS_64(temp, IRDMA_QUERY_FPM_RRFBLOCKSIZE);
+ if (!hmc_fpm_misc->rrf_block_size &&
+ obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
+ return -EINVAL;
+
+ irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
+ irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
+ irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
+
+ get_64bit_val(buf, IRDMA_BYTE_168, &temp);
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
+ hmc_fpm_misc->ooiscf_block_size = RS_64(temp, IRDMA_QUERY_FPM_OOISCFBLOCKSIZE);
+ if (!hmc_fpm_misc->ooiscf_block_size &&
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_find_reg_cq - find cq ctx index
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
+ struct irdma_sc_cq *cq){
+ u32 i;
+
+ for (i = 0; i < ceq->reg_cq_size; i++) {
+ if (cq == ceq->reg_cq[i])
+ return i;
+ }
+
+ return IRDMA_INVALID_CQ_IDX;
+}
+
+/**
+ * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+int
+irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+
+ if (ceq->reg_cq_size == ceq->elem_cnt) {
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+ return -ENOSPC;
+ }
+
+ ceq->reg_cq[ceq->reg_cq_size++] = cq;
+
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+void
+irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
+{
+ unsigned long flags;
+ u32 cq_ctx_idx;
+
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+ cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
+ if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
+ goto exit;
+
+ ceq->reg_cq_size--;
+ if (cq_ctx_idx != ceq->reg_cq_size)
+ ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
+ ceq->reg_cq[ceq->reg_cq_size] = NULL;
+
+exit:
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+}
+
+/**
+ * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
+ * @cqp: IWARP control queue pair pointer
+ * @info: IWARP control queue pair init info pointer
+ *
+ * Initializes the object and context buffers for a control Queue Pair.
+ */
+int
+irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info)
+{
+ u8 hw_sq_size;
+
+ if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
+ info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
+ ((info->sq_size & (info->sq_size - 1))))
+ return -EINVAL;
+
+ hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
+ IRDMA_QUEUE_TYPE_CQP);
+ cqp->size = sizeof(*cqp);
+ cqp->sq_size = info->sq_size;
+ cqp->hw_sq_size = hw_sq_size;
+ cqp->sq_base = info->sq;
+ cqp->host_ctx = info->host_ctx;
+ cqp->sq_pa = info->sq_pa;
+ cqp->host_ctx_pa = info->host_ctx_pa;
+ cqp->dev = info->dev;
+ cqp->struct_ver = info->struct_ver;
+ cqp->hw_maj_ver = info->hw_maj_ver;
+ cqp->hw_min_ver = info->hw_min_ver;
+ cqp->scratch_array = info->scratch_array;
+ cqp->polarity = 0;
+ cqp->en_datacenter_tcp = info->en_datacenter_tcp;
+ cqp->ena_vf_count = info->ena_vf_count;
+ cqp->hmc_profile = info->hmc_profile;
+ cqp->ceqs_per_vf = info->ceqs_per_vf;
+ cqp->disable_packed = info->disable_packed;
+ cqp->rocev2_rto_policy = info->rocev2_rto_policy;
+ cqp->protocol_used = info->protocol_used;
+ irdma_memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
+ cqp->en_rem_endpoint_trk = info->en_rem_endpoint_trk;
+ info->dev->cqp = cqp;
+
+ IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
+ cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
+ /* for the cqp commands backlog. */
+ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
+
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+
+ irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
+ "sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04x]\n",
+ cqp->sq_size, cqp->hw_sq_size, cqp->sq_base, (unsigned long long)cqp->sq_pa, cqp,
+ cqp->polarity);
+ return 0;
+}
+
+/**
+ * irdma_sc_cqp_create - create cqp during bringup
+ * @cqp: struct for cqp hw
+ * @maj_err: If error, major err number
+ * @min_err: If error, minor err number
+ */
+int
+irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
+{
+ u64 temp;
+ u8 hw_rev;
+ u32 cnt = 0, p1, p2, val = 0, err_code;
+ int ret_code;
+
+ hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
+ cqp->sdbuf.size = IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size;
+ cqp->sdbuf.va = irdma_allocate_dma_mem(cqp->dev->hw, &cqp->sdbuf,
+ cqp->sdbuf.size,
+ IRDMA_SD_BUF_ALIGNMENT);
+ if (!cqp->sdbuf.va)
+ return -ENOMEM;
+
+ spin_lock_init(&cqp->dev->cqp_lock);
+
+ temp = LS_64(cqp->hw_sq_size, IRDMA_CQPHC_SQSIZE) |
+ LS_64(cqp->struct_ver, IRDMA_CQPHC_SVER) |
+ LS_64(cqp->disable_packed, IRDMA_CQPHC_DISABLE_PFPDUS) |
+ LS_64(cqp->ceqs_per_vf, IRDMA_CQPHC_CEQPERVF);
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= LS_64(cqp->rocev2_rto_policy, IRDMA_CQPHC_ROCEV2_RTO_POLICY) |
+ LS_64(cqp->protocol_used, IRDMA_CQPHC_PROTOCOL_USED);
+ }
+
+ set_64bit_val(cqp->host_ctx, IRDMA_BYTE_0, temp);
+ set_64bit_val(cqp->host_ctx, IRDMA_BYTE_8, cqp->sq_pa);
+
+ temp = LS_64(cqp->ena_vf_count, IRDMA_CQPHC_ENABLED_VFS) |
+ LS_64(cqp->hmc_profile, IRDMA_CQPHC_HMC_PROFILE);
+ if (hw_rev >= IRDMA_GEN_2)
+ temp |= LS_64(cqp->en_rem_endpoint_trk, IRDMA_CQPHC_EN_REM_ENDPOINT_TRK);
+ set_64bit_val(cqp->host_ctx, IRDMA_BYTE_16, temp);
+ set_64bit_val(cqp->host_ctx, IRDMA_BYTE_24, (uintptr_t)cqp);
+ temp = LS_64(cqp->hw_maj_ver, IRDMA_CQPHC_HW_MAJVER) |
+ LS_64(cqp->hw_min_ver, IRDMA_CQPHC_HW_MINVER);
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= LS_64(cqp->dcqcn_params.min_rate, IRDMA_CQPHC_MIN_RATE) |
+ LS_64(cqp->dcqcn_params.min_dec_factor, IRDMA_CQPHC_MIN_DEC_FACTOR);
+ }
+ set_64bit_val(cqp->host_ctx, IRDMA_BYTE_32, temp);
+ set_64bit_val(cqp->host_ctx, IRDMA_BYTE_40, 0);
+ temp = 0;
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= LS_64(cqp->dcqcn_params.dcqcn_t, IRDMA_CQPHC_DCQCN_T) |
+ LS_64(cqp->dcqcn_params.rai_factor, IRDMA_CQPHC_RAI_FACTOR) |
+ LS_64(cqp->dcqcn_params.hai_factor, IRDMA_CQPHC_HAI_FACTOR);
+ }
+ set_64bit_val(cqp->host_ctx, IRDMA_BYTE_48, temp);
+ temp = 0;
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= LS_64(cqp->dcqcn_params.dcqcn_b, IRDMA_CQPHC_DCQCN_B) |
+ LS_64(cqp->dcqcn_params.dcqcn_f, IRDMA_CQPHC_DCQCN_F) |
+ LS_64(cqp->dcqcn_params.cc_cfg_valid, IRDMA_CQPHC_CC_CFG_VALID) |
+ LS_64(cqp->dcqcn_params.rreduce_mperiod, IRDMA_CQPHC_RREDUCE_MPERIOD);
+ }
+ set_64bit_val(cqp->host_ctx, IRDMA_BYTE_56, temp);
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQP_HOST_CTX WQE",
+ cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8);
+ p1 = RS_32_1(cqp->host_ctx_pa, 32);
+ p2 = (u32)cqp->host_ctx_pa;
+
+ writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
+ writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
+
+ do {
+ if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
+ ret_code = -ETIMEDOUT;
+ goto err;
+ }
+ irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
+ val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ } while (!val);
+
+ if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
+ ret_code = -EOPNOTSUPP;
+ goto err;
+ }
+
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+ return 0;
+
+err:
+ spin_lock_destroy(&cqp->dev->cqp_lock);
+ irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
+ err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ *min_err = RS_32(err_code, IRDMA_CQPERRCODES_CQP_MINOR_CODE);
+ *maj_err = RS_32(err_code, IRDMA_CQPERRCODES_CQP_MAJOR_CODE);
+ return ret_code;
+}
+
+/**
+ * irdma_sc_cqp_post_sq - post of cqp's sq
+ * @cqp: struct for cqp hw
+ */
+void
+irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
+{
+ db_wr32(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
+
+ irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
+ "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head,
+ cqp->sq_ring.tail, cqp->sq_ring.size);
+}
+
+/**
+ * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
+ * and pass back index
+ * @cqp: CQP HW structure
+ * @scratch: private data for CQP WQE
+ * @wqe_idx: WQE index of CQP SQ
+ */
+__le64 *
+irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
+ u32 *wqe_idx)
+{
+ __le64 *wqe = NULL;
+ int ret_code;
+
+ if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
+ irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
+ "CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail,
+ cqp->sq_ring.size);
+ return NULL;
+ }
+ IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
+ if (!*wqe_idx)
+ cqp->polarity = !cqp->polarity;
+ wqe = cqp->sq_base[*wqe_idx].elem;
+ cqp->scratch_array[*wqe_idx] = scratch;
+
+ memset(&wqe[0], 0, 24);
+ memset(&wqe[4], 0, 32);
+
+ return wqe;
+}
+
+/**
+ * irdma_sc_cqp_destroy - destroy cqp during close
+ * @cqp: struct for cqp hw
+ */
+int
+irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
+{
+ u32 cnt = 0, val;
+ int ret_code = 0;
+
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
+ do {
+ if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
+ ret_code = -ETIMEDOUT;
+ break;
+ }
+ irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
+ val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
+
+ irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
+ spin_lock_destroy(&cqp->dev->cqp_lock);
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ccq_arm - enable intr for control cq
+ * @ccq: ccq sc struct
+ */
+void
+irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_seq_num;
+
+ get_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, &temp_val);
+ sw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);
+ arm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);
+ arm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);
+ arm_seq_num++;
+ temp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |
+ LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |
+ LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |
+ LS_64(1, IRDMA_CQ_DBSA_ARM_NEXT);
+ set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, temp_val);
+
+ irdma_wmb(); /* make sure shadow area is updated before arming */
+
+ db_wr32(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
+}
+
+/**
+ * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
+ * @ccq: ccq sc struct
+ * @info: completion q entry to return
+ */
+int
+irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info)
+{
+ u64 qp_ctx, temp, temp1;
+ __le64 *cqe;
+ struct irdma_sc_cqp *cqp;
+ u32 wqe_idx;
+ u32 error;
+ u8 polarity;
+ int ret_code = 0;
+
+ if (ccq->cq_uk.avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
+
+ get_64bit_val(cqe, IRDMA_BYTE_24, &temp);
+ polarity = (u8)RS_64(temp, IRDMA_CQ_VALID);
+ if (polarity != ccq->cq_uk.polarity)
+ return -ENOENT;
+
+ get_64bit_val(cqe, IRDMA_BYTE_8, &qp_ctx);
+ cqp = (struct irdma_sc_cqp *)(irdma_uintptr) qp_ctx;
+ info->error = (bool)RS_64(temp, IRDMA_CQ_ERROR);
+ info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
+ info->min_err_code = (u16)RS_64(temp, IRDMA_CQ_MINERR);
+ if (info->error) {
+ info->maj_err_code = (u16)RS_64(temp, IRDMA_CQ_MAJERR);
+ error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ irdma_debug(cqp->dev, IRDMA_DEBUG_CQP,
+ "CQPERRCODES error_code[x%08X]\n", error);
+ }
+
+ wqe_idx = (u32)RS_64(temp, IRDMA_CQ_WQEIDX);
+ info->scratch = cqp->scratch_array[wqe_idx];
+
+ get_64bit_val(cqe, IRDMA_BYTE_16, &temp1);
+ info->op_ret_val = (u32)RS_64(temp1, IRDMA_CCQ_OPRETVAL);
+ get_64bit_val(cqp->sq_base[wqe_idx].elem, IRDMA_BYTE_24, &temp1);
+ info->op_code = (u8)RS_64(temp1, IRDMA_CQPSQ_OPCODE);
+ info->cqp = cqp;
+
+ /* move the head for cq */
+ IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
+ if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
+ ccq->cq_uk.polarity ^= 1;
+
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
+ set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_0,
+ IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
+
+ irdma_wmb(); /* make sure shadow area is updated before moving tail */
+
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
+ * @cqp: struct for cqp hw
+ * @op_code: cqp opcode for completion
+ * @compl_info: completion q entry to return
+ */
+int
+irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
+ struct irdma_ccq_cqe_info *compl_info)
+{
+ struct irdma_ccq_cqe_info info = {0};
+ struct irdma_sc_cq *ccq;
+ int ret_code = 0;
+ u32 cnt = 0;
+
+ ccq = cqp->dev->ccq;
+ while (1) {
+ if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
+ return -ETIMEDOUT;
+
+ if (cqp->dev->no_cqp)
+ return -ETIMEDOUT;
+
+ if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
+ irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
+ continue;
+ }
+ if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
+ ret_code = -EIO;
+ break;
+ }
+ /* make sure op code matches */
+ if (op_code == info.op_code)
+ break;
+ irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
+ "opcode mismatch for my op code 0x%x, returned opcode %x\n",
+ op_code, info.op_code);
+ }
+
+ if (compl_info)
+ irdma_memcpy(compl_info, &info, sizeof(*compl_info));
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_manage_hmc_pm_func_table - manage of function table
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @info: info for the manage function table operation
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
+ struct irdma_hmc_fcn_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ hdr = LS_64(info->vf_id, IRDMA_CQPSQ_MHMC_VFIDX) |
+ LS_64(IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE,
+ IRDMA_CQPSQ_OPCODE) |
+ LS_64(info->free_fcn, IRDMA_CQPSQ_MHMC_FREEPMFN) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE,
+ "MANAGE_HMC_PM_FUNC_TABLE WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
+ * for fpm commit
+ * @cqp: struct for cqp hw
+ */
+static int
+irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
+ NULL);
+}
+
+/**
+ * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @commit_fpm_mem: Memory for fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static int
+irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *commit_fpm_mem,
+ bool post_sq, u8 wait_type)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 tail, val, error;
+ int ret_code = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id);
+ set_64bit_val(wqe, IRDMA_BYTE_32, commit_fpm_mem->pa);
+
+ hdr = LS_64(IRDMA_COMMIT_FPM_BUF_SIZE, IRDMA_CQPSQ_BUFSIZE) |
+ LS_64(IRDMA_CQP_OP_COMMIT_FPM_VAL, IRDMA_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "COMMIT_FPM_VAL WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
+ ret_code = irdma_sc_commit_fpm_val_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
+ * query fpm
+ * @cqp: struct for cqp hw
+ */
+static int
+irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
+ NULL);
+}
+
+/**
+ * irdma_sc_query_fpm_val - cqp wqe query fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @query_fpm_mem: memory for return fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static int
+irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *query_fpm_mem,
+ bool post_sq, u8 wait_type)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 tail, val, error;
+ int ret_code = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id);
+ set_64bit_val(wqe, IRDMA_BYTE_32, query_fpm_mem->pa);
+
+ hdr = LS_64(IRDMA_CQP_OP_QUERY_FPM_VAL, IRDMA_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY_FPM WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
+ ret_code = irdma_sc_query_fpm_val_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ceq_init - initialize ceq
+ * @ceq: ceq sc structure
+ * @info: ceq initialization info
+ */
+int
+irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
+ info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
+ return -EINVAL;
+
+ if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
+ return -EINVAL;
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ ceq->size = sizeof(*ceq);
+ ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
+ ceq->ceq_id = info->ceq_id;
+ ceq->dev = info->dev;
+ ceq->elem_cnt = info->elem_cnt;
+ ceq->ceq_elem_pa = info->ceqe_pa;
+ ceq->virtual_map = info->virtual_map;
+ ceq->itr_no_expire = info->itr_no_expire;
+ ceq->reg_cq = info->reg_cq;
+ ceq->reg_cq_size = 0;
+ spin_lock_init(&ceq->req_cq_lock);
+ ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
+ ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
+ ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
+ ceq->tph_en = info->tph_en;
+ ceq->tph_val = info->tph_val;
+ ceq->vsi = info->vsi;
+ ceq->polarity = 1;
+ IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
+ ceq->dev->ceq[info->ceq_id] = ceq;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ceq_create - create ceq wqe
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+
+static int
+irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = ceq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+ set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt);
+ set_64bit_val(wqe, IRDMA_BYTE_32,
+ (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
+ set_64bit_val(wqe, IRDMA_BYTE_48,
+ (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, IRDMA_BYTE_56,
+ LS_64(ceq->tph_val, IRDMA_CQPSQ_TPHVAL) |
+ LS_64(ceq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX));
+ hdr = LS_64(ceq->ceq_id, IRDMA_CQPSQ_CEQ_CEQID) |
+ LS_64(IRDMA_CQP_OP_CREATE_CEQ, IRDMA_CQPSQ_OPCODE) |
+ LS_64(ceq->pbl_chunk_size, IRDMA_CQPSQ_CEQ_LPBLSIZE) |
+ LS_64(ceq->virtual_map, IRDMA_CQPSQ_CEQ_VMAP) |
+ LS_64(ceq->itr_no_expire, IRDMA_CQPSQ_CEQ_ITRNOEXPIRE) |
+ LS_64(ceq->tph_en, IRDMA_CQPSQ_TPHEN) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_CREATE WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
+ * @ceq: ceq sc structure
+ */
+static int
+irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ cqp = ceq->dev->cqp;
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
+ NULL);
+}
+
+/**
+ * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
+ * @ceq: ceq sc structure
+ */
+int
+irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ if (ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
+
+ cqp = ceq->dev->cqp;
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
+ NULL);
+}
+
+/**
+ * irdma_sc_cceq_create - create cceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+int
+irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
+{
+ int ret_code;
+ struct irdma_sc_dev *dev = ceq->dev;
+
+ dev->ccq->vsi = ceq->vsi;
+ if (ceq->reg_cq) {
+ ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
+ if (ret_code)
+ return ret_code;
+ }
+
+ ret_code = irdma_sc_ceq_create(ceq, scratch, true);
+ if (!ret_code)
+ return irdma_sc_cceq_create_done(ceq);
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ceq_destroy - destroy ceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int
+irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = ceq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt);
+ set_64bit_val(wqe, IRDMA_BYTE_48, ceq->first_pm_pbl_idx);
+ hdr = ceq->ceq_id |
+ LS_64(IRDMA_CQP_OP_DESTROY_CEQ, IRDMA_CQPSQ_OPCODE) |
+ LS_64(ceq->pbl_chunk_size, IRDMA_CQPSQ_CEQ_LPBLSIZE) |
+ LS_64(ceq->virtual_map, IRDMA_CQPSQ_CEQ_VMAP) |
+ LS_64(ceq->tph_en, IRDMA_CQPSQ_TPHEN) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_DESTROY WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ ceq->dev->ceq[ceq->ceq_id] = NULL;
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_process_ceq - process ceq
+ * @dev: sc device struct
+ * @ceq: ceq sc structure
+ *
+ * It is expected caller serializes this function with cleanup_ceqes()
+ * because these functions manipulate the same ceq
+ */
+void *
+irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
+{
+ u64 temp;
+ __le64 *ceqe;
+ struct irdma_sc_cq *cq = NULL;
+ struct irdma_sc_cq *temp_cq;
+ u8 polarity;
+ u32 cq_idx;
+ unsigned long flags;
+
+ do {
+ cq_idx = 0;
+ ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
+ get_64bit_val(ceqe, IRDMA_BYTE_0, &temp);
+ polarity = (u8)RS_64(temp, IRDMA_CEQE_VALID);
+ if (polarity != ceq->polarity)
+ return NULL;
+
+ temp_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1);
+ if (!temp_cq) {
+ cq_idx = IRDMA_INVALID_CQ_IDX;
+ IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
+
+ if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
+ ceq->polarity ^= 1;
+ continue;
+ }
+
+ cq = temp_cq;
+ if (ceq->reg_cq) {
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+ cq_idx = irdma_sc_find_reg_cq(ceq, cq);
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+ }
+
+ IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
+ if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
+ ceq->polarity ^= 1;
+ } while (cq_idx == IRDMA_INVALID_CQ_IDX);
+
+ if (cq) {
+ cq->cq_uk.armed = false;
+ irdma_sc_cq_ack(cq);
+ }
+ return cq;
+}
+
+/**
+ * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
+ * @cq: cq for which the ceqes need to be cleaned up
+ * @ceq: ceq ptr
+ *
+ * The function is called after the cq is destroyed to cleanup
+ * its pending ceqe entries. It is expected caller serializes this
+ * function with process_ceq() in interrupt context.
+ */
+void
+irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cq *next_cq;
+ u8 ceq_polarity = ceq->polarity;
+ __le64 *ceqe;
+ u8 polarity;
+ u64 temp;
+ int next;
+ u32 i;
+
+ next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
+
+ for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
+ ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
+
+ get_64bit_val(ceqe, IRDMA_BYTE_0, &temp);
+ polarity = (u8)RS_64(temp, IRDMA_CEQE_VALID);
+ if (polarity != ceq_polarity)
+ return;
+
+ next_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1);
+ if (cq == next_cq)
+ set_64bit_val(ceqe, IRDMA_BYTE_0, temp & IRDMA_CEQE_VALID_M);
+
+ next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
+ if (!next)
+ ceq_polarity ^= 1;
+ }
+}
+
+/**
+ * irdma_sc_aeq_init - initialize aeq
+ * @aeq: aeq structure ptr
+ * @info: aeq initialization info
+ */
+int
+irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
+ info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
+ return -EINVAL;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ aeq->size = sizeof(*aeq);
+ aeq->polarity = 1;
+ aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
+ aeq->dev = info->dev;
+ aeq->elem_cnt = info->elem_cnt;
+ aeq->aeq_elem_pa = info->aeq_elem_pa;
+ IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
+ aeq->virtual_map = info->virtual_map;
+ aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
+ aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
+ aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
+ aeq->msix_idx = info->msix_idx;
+ info->dev->aeq = aeq;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_aeq_create - create aeq
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = aeq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+ set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt);
+ set_64bit_val(wqe, IRDMA_BYTE_32,
+ (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
+ set_64bit_val(wqe, IRDMA_BYTE_48,
+ (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+
+ hdr = LS_64(IRDMA_CQP_OP_CREATE_AEQ, IRDMA_CQPSQ_OPCODE) |
+ LS_64(aeq->pbl_chunk_size, IRDMA_CQPSQ_AEQ_LPBLSIZE) |
+ LS_64(aeq->virtual_map, IRDMA_CQPSQ_AEQ_VMAP) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "AEQ_CREATE WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_aeq_destroy - destroy aeq during close
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int
+irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_dev *dev;
+ u64 hdr;
+
+ dev = aeq->dev;
+ writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+ set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt);
+ set_64bit_val(wqe, IRDMA_BYTE_48, aeq->first_pm_pbl_idx);
+ hdr = LS_64(IRDMA_CQP_OP_DESTROY_AEQ, IRDMA_CQPSQ_OPCODE) |
+ LS_64(aeq->pbl_chunk_size, IRDMA_CQPSQ_AEQ_LPBLSIZE) |
+ LS_64(aeq->virtual_map, IRDMA_CQPSQ_AEQ_VMAP) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "AEQ_DESTROY WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_get_next_aeqe - get next aeq entry
+ * @aeq: aeq structure ptr
+ * @info: aeqe info to be returned
+ */
+int
+irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info)
+{
+ u64 temp, compl_ctx;
+ __le64 *aeqe;
+ u16 wqe_idx;
+ u8 ae_src;
+ u8 polarity;
+
+ aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
+ get_64bit_val(aeqe, IRDMA_BYTE_0, &compl_ctx);
+ get_64bit_val(aeqe, IRDMA_BYTE_8, &temp);
+ polarity = (u8)RS_64(temp, IRDMA_AEQE_VALID);
+
+ if (aeq->polarity != polarity)
+ return -ENOENT;
+
+ irdma_debug_buf(aeq->dev, IRDMA_DEBUG_WQE, "AEQ_ENTRY WQE", aeqe, 16);
+
+ ae_src = (u8)RS_64(temp, IRDMA_AEQE_AESRC);
+ wqe_idx = (u16)RS_64(temp, IRDMA_AEQE_WQDESCIDX);
+ info->qp_cq_id = (u32)RS_64(temp, IRDMA_AEQE_QPCQID_LOW) |
+ ((u32)RS_64(temp, IRDMA_AEQE_QPCQID_HI) << 18);
+ info->ae_id = (u16)RS_64(temp, IRDMA_AEQE_AECODE);
+ info->tcp_state = (u8)RS_64(temp, IRDMA_AEQE_TCPSTATE);
+ info->iwarp_state = (u8)RS_64(temp, IRDMA_AEQE_IWSTATE);
+ info->q2_data_written = (u8)RS_64(temp, IRDMA_AEQE_Q2DATA);
+ info->aeqe_overflow = (bool)RS_64(temp, IRDMA_AEQE_OVERFLOW);
+
+ info->ae_src = ae_src;
+ switch (info->ae_id) {
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
+ case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
+ case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
+ case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_BAD_CLOSE:
+ case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
+ case IRDMA_AE_STAG_ZERO_INVALID:
+ case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_INVALID_ARP_ENTRY:
+ case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
+ case IRDMA_AE_STALE_ARP_ENTRY:
+ case IRDMA_AE_INVALID_AH_ENTRY:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LLP_DOUBT_REACHABILITY:
+ case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
+ case IRDMA_AE_RESET_SENT:
+ case IRDMA_AE_TERMINATE_SENT:
+ case IRDMA_AE_RESET_NOT_SENT:
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ info->cq = true;
+ info->compl_ctx = LS_64_1(compl_ctx, 1);
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
+ case IRDMA_AE_ROCE_EMPTY_MCG:
+ case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
+ case IRDMA_AE_ROCE_BAD_MC_QPID:
+ case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
+ /* fallthrough */
+ case IRDMA_AE_LLP_CONNECTION_RESET:
+ case IRDMA_AE_LLP_SYN_RECEIVED:
+ case IRDMA_AE_LLP_FIN_RECEIVED:
+ case IRDMA_AE_LLP_CLOSE_COMPLETE:
+ case IRDMA_AE_LLP_TERMINATE_RECEIVED:
+ case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ default:
+ break;
+ }
+
+ switch (ae_src) {
+ case IRDMA_AE_SOURCE_RQ:
+ case IRDMA_AE_SOURCE_RQ_0011:
+ info->qp = true;
+ info->rq = true;
+ info->wqe_idx = wqe_idx;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_SOURCE_CQ:
+ case IRDMA_AE_SOURCE_CQ_0110:
+ case IRDMA_AE_SOURCE_CQ_1010:
+ case IRDMA_AE_SOURCE_CQ_1110:
+ info->cq = true;
+ info->compl_ctx = LS_64_1(compl_ctx, 1);
+ break;
+ case IRDMA_AE_SOURCE_SQ:
+ case IRDMA_AE_SOURCE_SQ_0111:
+ info->qp = true;
+ info->sq = true;
+ info->wqe_idx = wqe_idx;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_SOURCE_IN_WR:
+ case IRDMA_AE_SOURCE_IN_RR:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
+ case IRDMA_AE_SOURCE_OUT_RR:
+ case IRDMA_AE_SOURCE_OUT_RR_1111:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->out_rdrsp = true;
+ break;
+ case IRDMA_AE_SOURCE_RSVD:
+ default:
+ break;
+ }
+
+ IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
+ if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
+ aeq->polarity ^= 1;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_repost_aeq_entries - repost completed aeq entries
+ * @dev: sc device struct
+ * @count: allocate count
+ */
+int
+irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
+{
+ writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_init - initialize control cq
+ * @cq: sc's cq ctruct
+ * @info: info for control cq initialization
+ */
+int
+irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
+ info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
+ return -EINVAL;
+
+ if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
+ return -EINVAL;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ cq->cq_pa = info->cq_pa;
+ cq->cq_uk.cq_base = info->cq_base;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->cq_uk.shadow_area = info->shadow_area;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ cq->cq_uk.cq_size = info->num_elem;
+ cq->cq_type = IRDMA_CQ_TYPE_CQP;
+ cq->ceqe_mask = info->ceqe_mask;
+ IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
+ cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
+ cq->pbl_list = info->pbl_list;
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->cq_uk.polarity = true;
+ cq->vsi = info->vsi;
+ cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
+
+ /* Only applicable to CQs other than CCQ so initialize to zero */
+ cq->cq_uk.cqe_alloc_db = NULL;
+
+ info->dev->ccq = cq;
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_create_done - poll cqp for ccq create
+ * @ccq: ccq sc struct
+ */
+static inline int
+irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ cqp = ccq->dev->cqp;
+
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
+}
+
+/**
+ * irdma_sc_ccq_create - create control cq
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: overlow flag for ccq
+ * @post_sq: flag for cqp db to ring
+ */
+int
+irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq)
+{
+ int ret_code;
+
+ ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
+ if (ret_code)
+ return ret_code;
+
+ if (post_sq) {
+ ret_code = irdma_sc_ccq_create_done(ccq);
+ if (ret_code)
+ return ret_code;
+ }
+ ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_destroy - destroy ccq during close
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int
+irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ int ret_code = 0;
+ u32 tail, val, error;
+
+ cqp = ccq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOSPC;
+
+ set_64bit_val(wqe, IRDMA_BYTE_0, ccq->cq_uk.cq_size);
+ set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(ccq, 1));
+ set_64bit_val(wqe, IRDMA_BYTE_40, ccq->shadow_area_pa);
+
+ hdr = ccq->cq_uk.cq_id |
+ FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ LS_64(IRDMA_CQP_OP_DESTROY_CQ, IRDMA_CQPSQ_OPCODE) |
+ LS_64(ccq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(ccq->ceq_id_valid, IRDMA_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(ccq->tph_en, IRDMA_CQPSQ_TPHEN) |
+ LS_64(ccq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+
+ irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CCQ_DESTROY WQE", wqe,
+ IRDMA_CQP_WQE_SIZE * 8);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ }
+
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
+ * @dev : ptr to irdma_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+int
+irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
+{
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ struct irdma_dma_mem query_fpm_mem;
+ int ret_code = 0;
+ u8 wait_type;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ query_fpm_mem.pa = dev->fpm_query_buf_pa;
+ query_fpm_mem.va = d