aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBartosz Sobczak <bartosz.sobczak@intel.com>2023-07-05 14:23:06 +0000
committerEric Joyner <erj@FreeBSD.org>2023-09-22 21:54:37 +0000
commitf5f2cc9ee688b0f6fc372f784ae8642de2e6ef7e (patch)
tree346bf24028dc99162023014257f8964524e70302
parentdfdf0d7cb41dd00aceace8d4e2c46205ded35fc4 (diff)
downloadsrc-f5f2cc9ee688b0f6fc372f784ae8642de2e6ef7e.tar.gz
src-f5f2cc9ee688b0f6fc372f784ae8642de2e6ef7e.zip
irdma(4): upgrade to 1.2.17-k
Update Intel irdma driver to version 1.2.17-k Notable changes: - pf-reset handling improvements, including fixes in communication with if_ice(4) - avoid racing when handling various events - adding sw stats sysctls - hand over pe_criterr handling from ice(4) to irdma(4) - debug prints adjustments - fix crash after changes in irdma_add_mqh_ifa_cb Additional fixes in stable/13 but not in original cherry-picked commit: - code appearance improvements - bug fixes in fbsd_kcompat.c - avoid waiting procedures under lock in irdma_add_mqh_ifa_cb - busy wait for manage_qhash - stats don't need to be RDTUN, RD is enough Signed-off-by: Bartosz Sobczak <bartosz.sobczak@intel.com> Signed-off-by: Eric Joyner <erj@FreeBSD.org> Reviewed by: erj@ Sponsored by: Intel Corporation Differential Revision: https://reviews.freebsd.org/D41425 (cherry picked from commit 01fbb86991d524288a785b544c29d9c5ea1b61fb)
-rw-r--r--contrib/ofed/libirdma/abi.h1
-rw-r--r--contrib/ofed/libirdma/i40e_devids.h1
-rw-r--r--contrib/ofed/libirdma/i40iw_hw.h3
-rw-r--r--contrib/ofed/libirdma/ice_devids.h1
-rw-r--r--contrib/ofed/libirdma/irdma-abi.h1
-rw-r--r--contrib/ofed/libirdma/irdma.h1
-rw-r--r--contrib/ofed/libirdma/irdma_defs.h26
-rw-r--r--contrib/ofed/libirdma/irdma_uk.c61
-rw-r--r--contrib/ofed/libirdma/irdma_umain.c3
-rw-r--r--contrib/ofed/libirdma/irdma_umain.h1
-rw-r--r--contrib/ofed/libirdma/irdma_uquery.h1
-rw-r--r--contrib/ofed/libirdma/irdma_user.h2
-rw-r--r--contrib/ofed/libirdma/irdma_uverbs.c62
-rw-r--r--contrib/ofed/libirdma/osdep.h80
-rw-r--r--sys/dev/irdma/fbsd_kcompat.c444
-rw-r--r--sys/dev/irdma/fbsd_kcompat.h37
-rw-r--r--sys/dev/irdma/icrdma.c310
-rw-r--r--sys/dev/irdma/icrdma_hw.c2
-rw-r--r--sys/dev/irdma/irdma.h2
-rw-r--r--sys/dev/irdma/irdma_cm.c522
-rw-r--r--sys/dev/irdma/irdma_cm.h13
-rw-r--r--sys/dev/irdma/irdma_ctrl.c164
-rw-r--r--sys/dev/irdma/irdma_defs.h87
-rw-r--r--sys/dev/irdma/irdma_hmc.c6
-rw-r--r--sys/dev/irdma/irdma_hmc.h6
-rw-r--r--sys/dev/irdma/irdma_hw.c106
-rw-r--r--sys/dev/irdma/irdma_kcompat.c118
-rw-r--r--sys/dev/irdma/irdma_main.h14
-rw-r--r--sys/dev/irdma/irdma_pble.c16
-rw-r--r--sys/dev/irdma/irdma_pble.h4
-rw-r--r--sys/dev/irdma/irdma_protos.h6
-rw-r--r--sys/dev/irdma/irdma_puda.c9
-rw-r--r--sys/dev/irdma/irdma_puda.h4
-rw-r--r--sys/dev/irdma/irdma_type.h25
-rw-r--r--sys/dev/irdma/irdma_uda.c15
-rw-r--r--sys/dev/irdma/irdma_uda.h2
-rw-r--r--sys/dev/irdma/irdma_uk.c85
-rw-r--r--sys/dev/irdma/irdma_user.h1
-rw-r--r--sys/dev/irdma/irdma_utils.c229
-rw-r--r--sys/dev/irdma/irdma_verbs.c438
-rw-r--r--sys/dev/irdma/irdma_verbs.h92
-rw-r--r--sys/dev/irdma/irdma_ws.c4
-rw-r--r--sys/dev/irdma/osdep.h60
-rw-r--r--sys/modules/irdma/Makefile2
44 files changed, 1761 insertions, 1306 deletions
diff --git a/contrib/ofed/libirdma/abi.h b/contrib/ofed/libirdma/abi.h
index 6553ebcbcaca..1d84fbc747f6 100644
--- a/contrib/ofed/libirdma/abi.h
+++ b/contrib/ofed/libirdma/abi.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef PROVIDER_IRDMA_ABI_H
#define PROVIDER_IRDMA_ABI_H
diff --git a/contrib/ofed/libirdma/i40e_devids.h b/contrib/ofed/libirdma/i40e_devids.h
index 1b0eaae95b82..e775a75bade5 100644
--- a/contrib/ofed/libirdma/i40e_devids.h
+++ b/contrib/ofed/libirdma/i40e_devids.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef I40E_DEVIDS_H
#define I40E_DEVIDS_H
diff --git a/contrib/ofed/libirdma/i40iw_hw.h b/contrib/ofed/libirdma/i40iw_hw.h
index d04c37d689cb..fcbfea8dfe09 100644
--- a/contrib/ofed/libirdma/i40iw_hw.h
+++ b/contrib/ofed/libirdma/i40iw_hw.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef I40IW_HW_H
#define I40IW_HW_H
diff --git a/contrib/ofed/libirdma/ice_devids.h b/contrib/ofed/libirdma/ice_devids.h
index 57f26bc33260..57a7f2f7c2af 100644
--- a/contrib/ofed/libirdma/ice_devids.h
+++ b/contrib/ofed/libirdma/ice_devids.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef ICE_DEVIDS_H
#define ICE_DEVIDS_H
diff --git a/contrib/ofed/libirdma/irdma-abi.h b/contrib/ofed/libirdma/irdma-abi.h
index b7d4b61c162d..8c04dcce2e8b 100644
--- a/contrib/ofed/libirdma/irdma-abi.h
+++ b/contrib/ofed/libirdma/irdma-abi.h
@@ -35,7 +35,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_ABI_H
#define IRDMA_ABI_H
diff --git a/contrib/ofed/libirdma/irdma.h b/contrib/ofed/libirdma/irdma.h
index 1dd09c36c7ea..0e2a5189acd1 100644
--- a/contrib/ofed/libirdma/irdma.h
+++ b/contrib/ofed/libirdma/irdma.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_H
#define IRDMA_H
diff --git a/contrib/ofed/libirdma/irdma_defs.h b/contrib/ofed/libirdma/irdma_defs.h
index 3d8b59c4b78e..da9eea472568 100644
--- a/contrib/ofed/libirdma/irdma_defs.h
+++ b/contrib/ofed/libirdma/irdma_defs.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_DEFS_H
#define IRDMA_DEFS_H
@@ -312,7 +311,7 @@
#define IRDMA_GET_CQ_ELEM_AT_OFFSET(_cq, _i, _cqe) \
{ \
- register __u32 offset; \
+ __u32 offset; \
offset = IRDMA_GET_RING_OFFSET((_cq)->cq_ring, _i); \
(_cqe) = (_cq)->cq_base[offset].buf; \
}
@@ -338,7 +337,7 @@
#define IRDMA_RING_MOVE_HEAD(_ring, _retcode) \
{ \
- register u32 size; \
+ u32 size; \
size = (_ring).size; \
if (!IRDMA_RING_FULL_ERR(_ring)) { \
(_ring).head = ((_ring).head + 1) % size; \
@@ -349,7 +348,7 @@
}
#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
{ \
- register u32 size; \
+ u32 size; \
size = (_ring).size; \
if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < size) { \
(_ring).head = ((_ring).head + (_count)) % size; \
@@ -360,7 +359,7 @@
}
#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
{ \
- register u32 size; \
+ u32 size; \
size = (_ring).size; \
if (!IRDMA_SQ_RING_FULL_ERR(_ring)) { \
(_ring).head = ((_ring).head + 1) % size; \
@@ -371,7 +370,7 @@
}
#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
{ \
- register u32 size; \
+ u32 size; \
size = (_ring).size; \
if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < (size - 256)) { \
(_ring).head = ((_ring).head + (_count)) % size; \
@@ -457,6 +456,19 @@ enum irdma_qp_wqe_size {
IRDMA_WQE_SIZE_256 = 256,
};
+enum irdma_ws_op_type {
+ IRDMA_WS_OP_TYPE_NODE = 0,
+ IRDMA_WS_OP_TYPE_LEAF_NODE_GROUP,
+};
+
+enum irdma_ws_rate_limit_flags {
+ IRDMA_WS_RATE_LIMIT_FLAGS_VALID = 0x1,
+ IRDMA_WS_NO_RDMA_RATE_LIMIT = 0x2,
+ IRDMA_WS_LEAF_NODE_IS_PART_GROUP = 0x4,
+ IRDMA_WS_TREE_RATE_LIMITING = 0x8,
+ IRDMA_WS_PACING_CONTROL = 0x10,
+};
+
/**
* set_64bit_val - set 64 bit value to hw wqe
* @wqe_words: wqe addr to write
diff --git a/contrib/ofed/libirdma/irdma_uk.c b/contrib/ofed/libirdma/irdma_uk.c
index 97e3ac553c26..5fa9d792745f 100644
--- a/contrib/ofed/libirdma/irdma_uk.c
+++ b/contrib/ofed/libirdma/irdma_uk.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#include "osdep.h"
#include "irdma_defs.h"
@@ -275,7 +274,8 @@ irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 &&
(IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
- wqe_0[3] = htole64(FIELD_PREP(IRDMAQPSQ_VALID, !qp->swqe_polarity));
+ wqe_0[3] = htole64(FIELD_PREP(IRDMAQPSQ_VALID,
+ qp->swqe_polarity ? 0 : 1));
}
qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
@@ -683,8 +683,8 @@ irdma_set_mw_bind_wqe(__le64 * wqe,
* @polarity: polarity of wqe valid bit
*/
static void
-irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list, u32 num_sges,
- u8 polarity)
+irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
+ u32 num_sges, u8 polarity)
{
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
u32 quanta_bytes_remaining = 8;
@@ -1173,7 +1173,7 @@ irdma_repost_rq_wqes(struct irdma_qp_uk *qp, u32 start_idx,
u32 end_idx)
{
__le64 *dst_wqe, *src_wqe;
- u32 wqe_idx;
+ u32 wqe_idx = 0;
u8 wqe_quanta = qp->rq_wqe_size_multiplier;
bool flip_polarity;
u64 val;
@@ -1480,7 +1480,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
sw_wqe = qp->sq_base[tail].elem;
get_64bit_val(sw_wqe, IRDMA_BYTE_24,
&wqe_qword);
- info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
+ info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
+ wqe_qword);
IRDMA_RING_SET_TAIL(qp->sq_ring,
tail + qp->sq_wrtrk_array[tail].quanta);
if (info->op_type != IRDMAQP_OP_NOP) {
@@ -1690,6 +1691,7 @@ irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
{
bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
int status;
+
irdma_get_wqe_shift(ukinfo->uk_attrs,
imm_support ? ukinfo->max_sq_frag_cnt + 1 :
ukinfo->max_sq_frag_cnt,
@@ -1834,6 +1836,9 @@ irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
if (polarity != temp)
break;
+ /* Ensure CQE contents are read after valid bit is checked */
+ udma_from_device_barrier();
+
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
if ((void *)(irdma_uintptr) comp_ctx == q)
set_64bit_val(cqe, IRDMA_BYTE_8, 0);
@@ -1846,48 +1851,6 @@ irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
}
/**
- * irdma_nop - post a nop
- * @qp: hw qp ptr
- * @wr_id: work request id
- * @signaled: signaled for completion
- * @post_sq: ring doorbell
- */
-int
-irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
-{
- __le64 *wqe;
- u64 hdr;
- u32 wqe_idx;
- struct irdma_post_sq_info info = {0};
- u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
-
- info.push_wqe = qp->push_db ? true : false;
- info.wr_id = wr_id;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, &info);
- if (!wqe)
- return ENOSPC;
-
- set_64bit_val(wqe, IRDMA_BYTE_0, 0);
- set_64bit_val(wqe, IRDMA_BYTE_8, 0);
- set_64bit_val(wqe, IRDMA_BYTE_16, 0);
-
- hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
- FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
-
- udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
-
- if (info.push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- else if (post_sq)
- irdma_uk_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
* irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
* @frag_cnt: number of fragments
* @quanta: quanta for frag_cnt
diff --git a/contrib/ofed/libirdma/irdma_umain.c b/contrib/ofed/libirdma/irdma_umain.c
index 6c823646b375..2902dd963e18 100644
--- a/contrib/ofed/libirdma/irdma_umain.c
+++ b/contrib/ofed/libirdma/irdma_umain.c
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#include <sys/mman.h>
@@ -49,7 +48,7 @@
/**
* Driver version
*/
-char libirdma_version[] = "1.1.11-k";
+char libirdma_version[] = "1.2.17-k";
unsigned int irdma_dbg;
diff --git a/contrib/ofed/libirdma/irdma_umain.h b/contrib/ofed/libirdma/irdma_umain.h
index c67c5d7076f9..312310dcb754 100644
--- a/contrib/ofed/libirdma/irdma_umain.h
+++ b/contrib/ofed/libirdma/irdma_umain.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_UMAIN_H
#define IRDMA_UMAIN_H
diff --git a/contrib/ofed/libirdma/irdma_uquery.h b/contrib/ofed/libirdma/irdma_uquery.h
index cf56818e4d51..4660c05f0a91 100644
--- a/contrib/ofed/libirdma/irdma_uquery.h
+++ b/contrib/ofed/libirdma/irdma_uquery.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_UQUERY_H
diff --git a/contrib/ofed/libirdma/irdma_user.h b/contrib/ofed/libirdma/irdma_user.h
index 8eb9ae7990e7..0f0aa30bec91 100644
--- a/contrib/ofed/libirdma/irdma_user.h
+++ b/contrib/ofed/libirdma/irdma_user.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_USER_H
#define IRDMA_USER_H
@@ -572,7 +571,6 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth);
-int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift, u32 *srqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
diff --git a/contrib/ofed/libirdma/irdma_uverbs.c b/contrib/ofed/libirdma/irdma_uverbs.c
index 14efab96a107..50fd2f23476d 100644
--- a/contrib/ofed/libirdma/irdma_uverbs.c
+++ b/contrib/ofed/libirdma/irdma_uverbs.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (C) 2019 - 2022 Intel Corporation
+ * Copyright (C) 2019 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#include <config.h>
#include <stdlib.h>
@@ -266,11 +265,13 @@ irdma_ualloc_mw(struct ibv_pd *pd, enum ibv_mw_type type)
if (!mw)
return NULL;
- if (ibv_cmd_alloc_mw(pd, type, mw, &cmd, sizeof(cmd), &resp,
- sizeof(resp))) {
+ err = ibv_cmd_alloc_mw(pd, type, mw, &cmd, sizeof(cmd), &resp,
+ sizeof(resp));
+ if (err) {
printf("%s: Failed to alloc memory window\n",
__func__);
free(mw);
+ errno = err;
return NULL;
}
@@ -446,8 +447,10 @@ ucreate_cq(struct ibv_context *context,
if (!iwucq)
return NULL;
- if (pthread_spin_init(&iwucq->lock, PTHREAD_PROCESS_PRIVATE)) {
+ ret = pthread_spin_init(&iwucq->lock, PTHREAD_PROCESS_PRIVATE);
+ if (ret) {
free(iwucq);
+ errno = ret;
return NULL;
}
@@ -464,8 +467,10 @@ ucreate_cq(struct ibv_context *context,
iwucq->buf_size = total_size;
info.cq_base = irdma_alloc_hw_buf(total_size);
- if (!info.cq_base)
+ if (!info.cq_base) {
+ ret = ENOMEM;
goto err_cq_base;
+ }
memset(info.cq_base, 0, total_size);
reg_mr_cmd.reg_type = IRDMA_MEMREG_TYPE_CQ;
@@ -476,17 +481,17 @@ ucreate_cq(struct ibv_context *context,
IBV_ACCESS_LOCAL_WRITE, &iwucq->vmr.ibv_mr,
&reg_mr_cmd.ibv_cmd, sizeof(reg_mr_cmd),
&reg_mr_resp, sizeof(reg_mr_resp));
- if (ret) {
- errno = ret;
+ if (ret)
goto err_dereg_mr;
- }
iwucq->vmr.ibv_mr.pd = &iwvctx->iwupd->ibv_pd;
if (uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE) {
info.shadow_area = irdma_alloc_hw_buf(IRDMA_DB_SHADOW_AREA_SIZE);
- if (!info.shadow_area)
+ if (!info.shadow_area) {
+ ret = ENOMEM;
goto err_alloc_shadow;
+ }
memset(info.shadow_area, 0, IRDMA_DB_SHADOW_AREA_SIZE);
reg_mr_shadow_cmd.reg_type = IRDMA_MEMREG_TYPE_CQ;
@@ -499,7 +504,6 @@ ucreate_cq(struct ibv_context *context,
&reg_mr_shadow_resp, sizeof(reg_mr_shadow_resp));
if (ret) {
irdma_free_hw_buf(info.shadow_area, IRDMA_DB_SHADOW_AREA_SIZE);
- errno = ret;
goto err_alloc_shadow;
}
@@ -517,10 +521,8 @@ ucreate_cq(struct ibv_context *context,
&cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd), &resp.ibv_resp,
sizeof(resp.ibv_resp), sizeof(resp));
attr_ex->cqe = ncqe;
- if (ret) {
- errno = ret;
+ if (ret)
goto err_create_cq;
- }
if (ext_cq)
irdma_ibvcq_ex_fill_priv_funcs(iwucq, attr_ex);
@@ -548,6 +550,7 @@ err_cq_base:
free(iwucq);
+ errno = ret;
return NULL;
}
@@ -1560,7 +1563,8 @@ irdma_ucreate_qp(struct ibv_pd *pd,
memset(iwuqp, 0, sizeof(*iwuqp));
- if (pthread_spin_init(&iwuqp->lock, PTHREAD_PROCESS_PRIVATE))
+ status = pthread_spin_init(&iwuqp->lock, PTHREAD_PROCESS_PRIVATE);
+ if (status)
goto err_free_qp;
info.sq_size = info.sq_depth >> info.sq_shift;
@@ -1575,35 +1579,37 @@ irdma_ucreate_qp(struct ibv_pd *pd,
}
iwuqp->recv_sges = calloc(attr->cap.max_recv_sge, sizeof(*iwuqp->recv_sges));
- if (!iwuqp->recv_sges)
+ if (!iwuqp->recv_sges) {
+ status = errno; /* preserve errno */
goto err_destroy_lock;
+ }
info.wqe_alloc_db = (u32 *)iwvctx->db;
info.legacy_mode = iwvctx->legacy_mode;
info.sq_wrtrk_array = calloc(info.sq_depth, sizeof(*info.sq_wrtrk_array));
- if (!info.sq_wrtrk_array)
+ if (!info.sq_wrtrk_array) {
+ status = errno; /* preserve errno */
goto err_free_rsges;
+ }
info.rq_wrid_array = calloc(info.rq_depth, sizeof(*info.rq_wrid_array));
- if (!info.rq_wrid_array)
+ if (!info.rq_wrid_array) {
+ status = errno; /* preserve errno */
goto err_free_sq_wrtrk;
+ }
iwuqp->sq_sig_all = attr->sq_sig_all;
iwuqp->qp_type = attr->qp_type;
status = irdma_vmapped_qp(iwuqp, pd, attr, &info, iwvctx->legacy_mode);
- if (status) {
- errno = status;
+ if (status)
goto err_free_rq_wrid;
- }
iwuqp->qp.back_qp = iwuqp;
iwuqp->qp.lock = &iwuqp->lock;
status = irdma_uk_qp_init(&iwuqp->qp, &info);
- if (status) {
- errno = status;
+ if (status)
goto err_free_vmap_qp;
- }
attr->cap.max_send_wr = (info.sq_depth - IRDMA_SQ_RSVD) >> info.sq_shift;
attr->cap.max_recv_wr = (info.rq_depth - IRDMA_RQ_RSVD) >> info.rq_shift;
@@ -1625,6 +1631,7 @@ err_free_qp:
printf("%s: failed to create QP\n", __func__);
free(iwuqp);
+ errno = status;
return NULL;
}
@@ -2081,11 +2088,10 @@ irdma_ucreate_ah(struct ibv_pd *ibpd, struct ibv_ah_attr *attr)
struct irdma_ucreate_ah_resp resp = {};
int err;
- err = ibv_query_gid(ibpd->context, attr->port_num, attr->grh.sgid_index,
- &sgid);
- if (err) {
+ if (ibv_query_gid(ibpd->context, attr->port_num, attr->grh.sgid_index,
+ &sgid)) {
fprintf(stderr, "irdma: Error from ibv_query_gid.\n");
- errno = err;
+ errno = ENOENT;
return NULL;
}
diff --git a/contrib/ofed/libirdma/osdep.h b/contrib/ofed/libirdma/osdep.h
index fc39c139b6ca..961e9cc99cf6 100644
--- a/contrib/ofed/libirdma/osdep.h
+++ b/contrib/ofed/libirdma/osdep.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2022 Intel Corporation
+ * Copyright (c) 2021 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef _ICRDMA_OSDEP_H_
#define _ICRDMA_OSDEP_H_
@@ -46,7 +45,6 @@
#include <sys/bus_dma.h>
#include <sys/endian.h>
-#define ATOMIC atomic_t
#define IOMEM
#define IRDMA_NTOHL(a) ntohl(a)
#define IRDMA_NTOHS(a) ntohs(a)
@@ -80,23 +78,13 @@
#define STATS_TIMER_DELAY 60000
/* a couple of linux size defines */
-#define SZ_128 128
-#define SZ_2K SZ_128 * 16
-#define SZ_1G (SZ_1K * SZ_1K * SZ_1K)
-#define SPEED_1000 1000
-#define SPEED_10000 10000
-#define SPEED_20000 20000
-#define SPEED_25000 25000
-#define SPEED_40000 40000
-#define SPEED_100000 100000
-
#define BIT_ULL(a) (1ULL << (a))
#define min(a, b) ((a) > (b) ? (b) : (a))
#ifndef likely
-#define likely(x) __builtin_expect((x), 1)
+#define likely(x) __builtin_expect((x), 1)
#endif
#ifndef unlikely
-#define unlikely(x) __builtin_expect((x), 0)
+#define unlikely(x) __builtin_expect((x), 0)
#endif
#define __aligned_u64 uint64_t __aligned(8)
@@ -110,7 +98,7 @@
#define irdma_print(S, ...) printf("%s:%d "S, __FUNCTION__, __LINE__, ##__VA_ARGS__)
#define irdma_debug_buf(dev, mask, desc, buf, size) \
do { \
- u32 i; \
+ u32 i; \
if (!((mask) & (dev)->debug_mask)) { \
break; \
} \
@@ -120,20 +108,20 @@ do { \
irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)(buf))[i / 8]); \
} while(0)
-#define irdma_debug(h, m, s, ...) \
-do { \
- if (!(h)) { \
- if ((m) == IRDMA_DEBUG_INIT) \
+#define irdma_debug(h, m, s, ...) \
+do { \
+ if (!(h)) { \
+ if ((m) == IRDMA_DEBUG_INIT) \
printf("irdma INIT " s, ##__VA_ARGS__); \
- } else if (((m) & (h)->debug_mask)) { \
- printf("irdma " s, ##__VA_ARGS__); \
- } \
+ } else if (((m) & (h)->debug_mask)) { \
+ printf("irdma " s, ##__VA_ARGS__); \
+ } \
} while (0)
extern unsigned int irdma_dbg;
-#define libirdma_debug(fmt, args...) \
-do { \
- if (irdma_dbg) \
- printf("libirdma-%s: " fmt, __func__, ##args); \
+#define libirdma_debug(fmt, args...) \
+do { \
+ if (irdma_dbg) \
+ printf("libirdma-%s: " fmt, __func__, ##args); \
} while (0)
#define irdma_dev_err(ibdev, fmt, ...) \
pr_err("%s:%s:%d ERR "fmt, (ibdev)->name, __func__, __LINE__, ##__VA_ARGS__)
@@ -142,18 +130,6 @@ do { \
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
-#define dump_struct(s, sz, name) \
-do { \
- unsigned char *a; \
- printf("%s %u", (name), (unsigned int)(sz)); \
- for (a = (unsigned char*)(s); a < (unsigned char *)(s) + (sz) ; a ++) { \
- if ((u64)a % 8 == 0) \
- printf("\n%p ", a); \
- printf("%2x ", *a); \
- } \
- printf("\n"); \
-}while(0)
-
/*
* debug definition end
*/
@@ -177,13 +153,11 @@ struct irdma_sc_vsi;
#define rt_tos2priority(tos) (tos >> 5)
#define ah_attr_to_dmac(attr) ((attr).dmac)
-#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \
- ib_modify_qp_is_ok(cur_state, next_state, type, mask)
#define kc_typeq_ib_wr const
#define kc_ifp_find ip_ifp_find
#define kc_ifp6_find ip6_ifp_find
#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \
- ib_gid_to_network_type(gid_type, gid)
+ ib_gid_to_network_type(gid_type, gid)
#define irdma_del_timer_compat(tt) del_timer((tt))
#define IRDMA_TAILQ_FOREACH CK_STAILQ_FOREACH
#define IRDMA_TAILQ_FOREACH_SAFE CK_STAILQ_FOREACH_SAFE
@@ -191,36 +165,36 @@ struct irdma_sc_vsi;
static inline void db_wr32(__u32 val, __u32 *wqe_word)
{
- *wqe_word = val;
+ *wqe_word = val;
}
void *hw_to_dev(struct irdma_hw *hw);
struct irdma_dma_mem {
- void *va;
- u64 pa;
+ void *va;
+ u64 pa;
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_dma_segment_t seg;
bus_size_t size;
- int nseg;
- int flags;
+ int nseg;
+ int flags;
};
struct irdma_virt_mem {
- void *va;
- u32 size;
+ void *va;
+ u32 size;
};
#ifndef verbs_mr
enum ibv_mr_type {
- IBV_MR_TYPE_MR,
- IBV_MR_TYPE_NULL_MR,
+ IBV_MR_TYPE_MR,
+ IBV_MR_TYPE_NULL_MR,
};
struct verbs_mr {
- struct ibv_mr ibv_mr;
- enum ibv_mr_type mr_type;
+ struct ibv_mr ibv_mr;
+ enum ibv_mr_type mr_type;
int access;
};
#define verbs_get_mr(mr) container_of((mr), struct verbs_mr, ibv_mr)
diff --git a/sys/dev/irdma/fbsd_kcompat.c b/sys/dev/irdma/fbsd_kcompat.c
index 4928b4189cca..5c11f7a35544 100644
--- a/sys/dev/irdma/fbsd_kcompat.c
+++ b/sys/dev/irdma/fbsd_kcompat.c
@@ -182,6 +182,31 @@ irdma_ieq_check_mpacrc(void *desc,
return ret_code;
}
+static u_int
+irdma_add_ipv6_cb(void *arg, struct ifaddr *addr, u_int count __unused){
+ struct irdma_device *iwdev = arg;
+ struct sockaddr_in6 *sin6;
+ u32 local_ipaddr6[4] = {};
+ char ip6buf[INET6_ADDRSTRLEN];
+ u8 *mac_addr;
+
+ sin6 = (struct sockaddr_in6 *)addr->ifa_addr;
+
+ irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
+
+ mac_addr = if_getlladdr(addr->ifa_ifp);
+
+ printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
+ __func__, __LINE__,
+ ip6_sprintf(ip6buf, &sin6->sin6_addr),
+ mac_addr[0], mac_addr[1], mac_addr[2],
+ mac_addr[3], mac_addr[4], mac_addr[5]);
+
+ irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
+ IRDMA_ARP_ADD);
+ return (0);
+}
+
/**
* irdma_add_ipv6_addr - add ipv6 address to the hw arp table
* @iwdev: irdma device
@@ -190,32 +215,36 @@ irdma_ieq_check_mpacrc(void *desc,
static void
irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
{
- struct ifaddr *ifa, *tmp;
- struct sockaddr_in6 *sin6;
- u32 local_ipaddr6[4];
- u8 *mac_addr;
- char ip6buf[INET6_ADDRSTRLEN];
-
if_addr_rlock(ifp);
- IRDMA_TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, tmp) {
- sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
- if (sin6->sin6_family != AF_INET6)
- continue;
+ if_foreach_addr_type(ifp, AF_INET6, irdma_add_ipv6_cb, iwdev);
+ if_addr_runlock(ifp);
+}
- irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
- mac_addr = IF_LLADDR(ifp);
+static u_int
+irdma_add_ipv4_cb(void *arg, struct ifaddr *addr, u_int count __unused){
+ struct irdma_device *iwdev = arg;
+ struct sockaddr_in *sin;
+ u32 ip_addr[4] = {};
+ uint8_t *mac_addr;
- printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
- __func__, __LINE__,
- ip6_sprintf(ip6buf, &sin6->sin6_addr),
- mac_addr[0], mac_addr[1], mac_addr[2],
- mac_addr[3], mac_addr[4], mac_addr[5]);
+ sin = (struct sockaddr_in *)addr->ifa_addr;
- irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
- IRDMA_ARP_ADD);
+ ip_addr[0] = ntohl(sin->sin_addr.s_addr);
- }
- if_addr_runlock(ifp);
+ mac_addr = if_getlladdr(addr->ifa_ifp);
+
+ printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
+ __func__, __LINE__,
+ ip_addr[0] >> 24,
+ (ip_addr[0] >> 16) & 0xFF,
+ (ip_addr[0] >> 8) & 0xFF,
+ ip_addr[0] & 0xFF,
+ mac_addr[0], mac_addr[1], mac_addr[2],
+ mac_addr[3], mac_addr[4], mac_addr[5]);
+
+ irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
+ IRDMA_ARP_ADD);
+ return (0);
}
/**
@@ -226,32 +255,8 @@ irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
static void
irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
{
- struct ifaddr *ifa;
- struct sockaddr_in *sin;
- u32 ip_addr[4] = {};
- u8 *mac_addr;
-
if_addr_rlock(ifp);
- IRDMA_TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
- sin = (struct sockaddr_in *)ifa->ifa_addr;
- if (sin->sin_family != AF_INET)
- continue;
-
- ip_addr[0] = ntohl(sin->sin_addr.s_addr);
- mac_addr = IF_LLADDR(ifp);
-
- printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
- __func__, __LINE__,
- ip_addr[0] >> 24,
- (ip_addr[0] >> 16) & 0xFF,
- (ip_addr[0] >> 8) & 0xFF,
- ip_addr[0] & 0xFF,
- mac_addr[0], mac_addr[1], mac_addr[2],
- mac_addr[3], mac_addr[4], mac_addr[5]);
-
- irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
- IRDMA_ARP_ADD);
- }
+ if_foreach_addr_type(ifp, AF_INET, irdma_add_ipv4_cb, iwdev);
if_addr_runlock(ifp);
}
@@ -266,12 +271,15 @@ irdma_add_ip(struct irdma_device *iwdev)
{
struct ifnet *ifp = iwdev->netdev;
struct ifnet *ifv;
+ struct epoch_tracker et;
int i;
irdma_add_ipv4_addr(iwdev, ifp);
irdma_add_ipv6_addr(iwdev, ifp);
- for (i = 0; ifp->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
+ for (i = 0; if_getvlantrunk(ifp) != NULL && i < VLAN_N_VID; ++i) {
+ NET_EPOCH_ENTER(et);
ifv = VLAN_DEVAT(ifp, i);
+ NET_EPOCH_EXIT(et);
if (!ifv)
continue;
irdma_add_ipv4_addr(iwdev, ifv);
@@ -292,7 +300,7 @@ irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int
if (!ifa || !ifa->ifa_addr || !ifp)
return;
if (rf->iwdev->netdev != ifp) {
- for (i = 0; rf->iwdev->netdev->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
+ for (i = 0; if_getvlantrunk(rf->iwdev->netdev) != NULL && i < VLAN_N_VID; ++i) {
NET_EPOCH_ENTER(et);
ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
NET_EPOCH_EXIT(et);
@@ -354,7 +362,8 @@ irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
struct nhop_object *nh;
if (dst_sin->sa_family == AF_INET6)
- nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
+ nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr,
+ ((struct sockaddr_in6 *)dst_sin)->sin6_scope_id, NHR_NONE, 0);
else
nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
if (!nh || (nh->nh_ifp != netdev &&
@@ -466,7 +475,7 @@ irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
dst_addr.sin6_family = AF_INET6;
dst_addr.sin6_len = sizeof(dst_addr);
- dst_addr.sin6_scope_id = iwdev->netdev->if_index;
+ dst_addr.sin6_scope_id = if_getindex(iwdev->netdev);
irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
@@ -584,6 +593,186 @@ irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
return 0;
}
+enum irdma_cqp_stats_info {
+ IRDMA_CQP_REQ_CMDS = 28,
+ IRDMA_CQP_CMPL_CMDS = 29
+};
+
+static int
+irdma_sysctl_cqp_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct irdma_sc_cqp *cqp = (struct irdma_sc_cqp *)arg1;
+ char rslt[192] = "no cqp available yet";
+ int rslt_size = sizeof(rslt) - 1;
+ int option = (int)arg2;
+
+ if (!cqp) {
+ return sysctl_handle_string(oidp, rslt, sizeof(rslt), req);
+ }
+
+ snprintf(rslt, sizeof(rslt), "");
+ switch (option) {
+ case IRDMA_CQP_REQ_CMDS:
+ snprintf(rslt, rslt_size, "%lu", cqp->requested_ops);
+ break;
+ case IRDMA_CQP_CMPL_CMDS:
+ snprintf(rslt, rslt_size, "%lu", atomic64_read(&cqp->completed_ops));
+ break;
+ }
+
+ return sysctl_handle_string(oidp, rslt, sizeof(rslt), req);
+}
+
+struct irdma_sw_stats_tunable_info {
+ u8 op_type;
+ const char name[32];
+ const char desc[32];
+ uintptr_t value;
+};
+
+static const struct irdma_sw_stats_tunable_info irdma_sws_list[] = {
+ {IRDMA_OP_CEQ_DESTROY, "ceq_destroy", "ceq_destroy", 0},
+ {IRDMA_OP_AEQ_DESTROY, "aeq_destroy", "aeq_destroy", 0},
+ {IRDMA_OP_DELETE_ARP_CACHE_ENTRY, "delete_arp_cache_entry",
+ "delete_arp_cache_entry", 0},
+ {IRDMA_OP_MANAGE_APBVT_ENTRY, "manage_apbvt_entry",
+ "manage_apbvt_entry", 0},
+ {IRDMA_OP_CEQ_CREATE, "ceq_create", "ceq_create", 0},
+ {IRDMA_OP_AEQ_CREATE, "aeq_create", "aeq_create", 0},
+ {IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY, "manage_qhash_table_entry",
+ "manage_qhash_table_entry", 0},
+ {IRDMA_OP_QP_MODIFY, "qp_modify", "qp_modify", 0},
+ {IRDMA_OP_QP_UPLOAD_CONTEXT, "qp_upload_context", "qp_upload_context",
+ 0},
+ {IRDMA_OP_CQ_CREATE, "cq_create", "cq_create", 0},
+ {IRDMA_OP_CQ_DESTROY, "cq_destroy", "cq_destroy", 0},
+ {IRDMA_OP_QP_CREATE, "qp_create", "qp_create", 0},
+ {IRDMA_OP_QP_DESTROY, "qp_destroy", "qp_destroy", 0},
+ {IRDMA_OP_ALLOC_STAG, "alloc_stag", "alloc_stag", 0},
+ {IRDMA_OP_MR_REG_NON_SHARED, "mr_reg_non_shared", "mr_reg_non_shared",
+ 0},
+ {IRDMA_OP_DEALLOC_STAG, "dealloc_stag", "dealloc_stag", 0},
+ {IRDMA_OP_MW_ALLOC, "mw_alloc", "mw_alloc", 0},
+ {IRDMA_OP_QP_FLUSH_WQES, "qp_flush_wqes", "qp_flush_wqes", 0},
+ {IRDMA_OP_ADD_ARP_CACHE_ENTRY, "add_arp_cache_entry",
+ "add_arp_cache_entry", 0},
+ {IRDMA_OP_MANAGE_PUSH_PAGE, "manage_push_page", "manage_push_page", 0},
+ {IRDMA_OP_UPDATE_PE_SDS, "update_pe_sds", "update_pe_sds", 0},
+ {IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE, "manage_hmc_pm_func_table",
+ "manage_hmc_pm_func_table", 0},
+ {IRDMA_OP_SUSPEND, "suspend", "suspend", 0},
+ {IRDMA_OP_RESUME, "resume", "resume", 0},
+ {IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP, "manage_vchnl_req_pble_bp",
+ "manage_vchnl_req_pble_bp", 0},
+ {IRDMA_OP_QUERY_FPM_VAL, "query_fpm_val", "query_fpm_val", 0},
+ {IRDMA_OP_COMMIT_FPM_VAL, "commit_fpm_val", "commit_fpm_val", 0},
+ {IRDMA_OP_AH_CREATE, "ah_create", "ah_create", 0},
+ {IRDMA_OP_AH_MODIFY, "ah_modify", "ah_modify", 0},
+ {IRDMA_OP_AH_DESTROY, "ah_destroy", "ah_destroy", 0},
+ {IRDMA_OP_MC_CREATE, "mc_create", "mc_create", 0},
+ {IRDMA_OP_MC_DESTROY, "mc_destroy", "mc_destroy", 0},
+ {IRDMA_OP_MC_MODIFY, "mc_modify", "mc_modify", 0},
+ {IRDMA_OP_STATS_ALLOCATE, "stats_allocate", "stats_allocate", 0},
+ {IRDMA_OP_STATS_FREE, "stats_free", "stats_free", 0},
+ {IRDMA_OP_STATS_GATHER, "stats_gather", "stats_gather", 0},
+ {IRDMA_OP_WS_ADD_NODE, "ws_add_node", "ws_add_node", 0},
+ {IRDMA_OP_WS_MODIFY_NODE, "ws_modify_node", "ws_modify_node", 0},
+ {IRDMA_OP_WS_DELETE_NODE, "ws_delete_node", "ws_delete_node", 0},
+ {IRDMA_OP_WS_FAILOVER_START, "ws_failover_start", "ws_failover_start",
+ 0},
+ {IRDMA_OP_WS_FAILOVER_COMPLETE, "ws_failover_complete",
+ "ws_failover_complete", 0},
+ {IRDMA_OP_SET_UP_MAP, "set_up_map", "set_up_map", 0},
+ {IRDMA_OP_GEN_AE, "gen_ae", "gen_ae", 0},
+ {IRDMA_OP_QUERY_RDMA_FEATURES, "query_rdma_features",
+ "query_rdma_features", 0},
+ {IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY, "alloc_local_mac_entry",
+ "alloc_local_mac_entry", 0},
+ {IRDMA_OP_ADD_LOCAL_MAC_ENTRY, "add_local_mac_entry",
+ "add_local_mac_entry", 0},
+ {IRDMA_OP_DELETE_LOCAL_MAC_ENTRY, "delete_local_mac_entry",
+ "delete_local_mac_entry", 0},
+ {IRDMA_OP_CQ_MODIFY, "cq_modify", "cq_modify", 0}
+};
+
+static const struct irdma_sw_stats_tunable_info irdma_cmcs_list[] = {
+ {0, "cm_nodes_created", "cm_nodes_created",
+ offsetof(struct irdma_cm_core, stats_nodes_created)},
+ {0, "cm_nodes_destroyed", "cm_nodes_destroyed",
+ offsetof(struct irdma_cm_core, stats_nodes_destroyed)},
+ {0, "cm_listen_created", "cm_listen_created",
+ offsetof(struct irdma_cm_core, stats_listen_created)},
+ {0, "cm_listen_destroyed", "cm_listen_destroyed",
+ offsetof(struct irdma_cm_core, stats_listen_destroyed)},
+ {0, "cm_listen_nodes_created", "cm_listen_nodes_created",
+ offsetof(struct irdma_cm_core, stats_listen_nodes_created)},
+ {0, "cm_listen_nodes_destroyed", "cm_listen_nodes_destroyed",
+ offsetof(struct irdma_cm_core, stats_listen_nodes_destroyed)},
+ {0, "cm_lpbs", "cm_lpbs", offsetof(struct irdma_cm_core, stats_lpbs)},
+ {0, "cm_accepts", "cm_accepts", offsetof(struct irdma_cm_core,
+ stats_accepts)},
+ {0, "cm_rejects", "cm_rejects", offsetof(struct irdma_cm_core,
+ stats_rejects)},
+ {0, "cm_connect_errs", "cm_connect_errs",
+ offsetof(struct irdma_cm_core, stats_connect_errs)},
+ {0, "cm_passive_errs", "cm_passive_errs",
+ offsetof(struct irdma_cm_core, stats_passive_errs)},
+ {0, "cm_pkt_retrans", "cm_pkt_retrans", offsetof(struct irdma_cm_core,
+ stats_pkt_retrans)},
+ {0, "cm_backlog_drops", "cm_backlog_drops",
+ offsetof(struct irdma_cm_core, stats_backlog_drops)},
+};
+
+static const struct irdma_sw_stats_tunable_info irdma_ilqs32_list[] = {
+ {0, "ilq_avail_buf_count", "ilq_avail_buf_count",
+ offsetof(struct irdma_puda_rsrc, avail_buf_count)},
+ {0, "ilq_alloc_buf_count", "ilq_alloc_buf_count",
+ offsetof(struct irdma_puda_rsrc, alloc_buf_count)}
+};
+static const struct irdma_sw_stats_tunable_info irdma_ilqs_list[] = {
+ {0, "ilq_stats_buf_alloc_fail", "ilq_stats_buf_alloc_fail",
+ offsetof(struct irdma_puda_rsrc, stats_buf_alloc_fail)},
+ {0, "ilq_stats_pkt_rcvd", "ilq_stats_pkt_rcvd",
+ offsetof(struct irdma_puda_rsrc, stats_pkt_rcvd)},
+ {0, "ilq_stats_pkt_sent", "ilq_stats_pkt_sent",
+ offsetof(struct irdma_puda_rsrc, stats_pkt_sent)},
+ {0, "ilq_stats_rcvd_pkt_err", "ilq_stats_rcvd_pkt_err",
+ offsetof(struct irdma_puda_rsrc, stats_rcvd_pkt_err)},
+ {0, "ilq_stats_sent_pkt_q", "ilq_stats_sent_pkt_q",
+ offsetof(struct irdma_puda_rsrc, stats_sent_pkt_q)}
+};
+
+static const struct irdma_sw_stats_tunable_info irdma_ieqs32_list[] = {
+ {0, "ieq_avail_buf_count", "ieq_avail_buf_count",
+ offsetof(struct irdma_puda_rsrc, avail_buf_count)},
+ {0, "ieq_alloc_buf_count", "ieq_alloc_buf_count",
+ offsetof(struct irdma_puda_rsrc, alloc_buf_count)}
+};
+static const struct irdma_sw_stats_tunable_info irdma_ieqs_list[] = {
+ {0, "ieq_stats_buf_alloc_fail", "ieq_stats_buf_alloc_fail",
+ offsetof(struct irdma_puda_rsrc, stats_buf_alloc_fail)},
+ {0, "ieq_stats_pkt_rcvd", "ieq_stats_pkt_rcvd",
+ offsetof(struct irdma_puda_rsrc, stats_pkt_rcvd)},
+ {0, "ieq_stats_pkt_sent", "ieq_stats_pkt_sent",
+ offsetof(struct irdma_puda_rsrc, stats_pkt_sent)},
+ {0, "ieq_stats_rcvd_pkt_err", "ieq_stats_rcvd_pkt_err",
+ offsetof(struct irdma_puda_rsrc, stats_rcvd_pkt_err)},
+ {0, "ieq_stats_sent_pkt_q", "ieq_stats_sent_pkt_q",
+ offsetof(struct irdma_puda_rsrc, stats_sent_pkt_q)},
+ {0, "ieq_stats_bad_qp_id", "ieq_stats_bad_qp_id",
+ offsetof(struct irdma_puda_rsrc, stats_bad_qp_id)},
+ {0, "ieq_fpdu_processed", "ieq_fpdu_processed",
+ offsetof(struct irdma_puda_rsrc, fpdu_processed)},
+ {0, "ieq_bad_seq_num", "ieq_bad_seq_num",
+ offsetof(struct irdma_puda_rsrc, bad_seq_num)},
+ {0, "ieq_crc_err", "ieq_crc_err", offsetof(struct irdma_puda_rsrc,
+ crc_err)},
+ {0, "ieq_pmode_count", "ieq_pmode_count",
+ offsetof(struct irdma_puda_rsrc, pmode_count)},
+ {0, "ieq_partials_handled", "ieq_partials_handled",
+ offsetof(struct irdma_puda_rsrc, partials_handled)},
+};
+
/**
* irdma_dcqcn_tunables_init - create tunables for dcqcn settings
* @rf: RDMA PCI function
@@ -630,7 +819,7 @@ irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
rf->dcqcn_params.dcqcn_t = 0x37;
SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
- "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0x37");
+ "number of us to elapse before increasing the CWND in DCQCN mode, default=0x37");
rf->dcqcn_params.dcqcn_b = 0x249f0;
SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
@@ -657,6 +846,90 @@ irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
}
/**
+ * irdma_sysctl_settings - sysctl runtime settings init
+ * @rf: RDMA PCI function
+ */
+void
+irdma_sysctl_settings(struct irdma_pci_f *rf)
+{
+ struct sysctl_oid_list *irdma_sysctl_oid_list;
+
+ irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
+
+ SYSCTL_ADD_BOOL(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ OID_AUTO, "upload_context", CTLFLAG_RWTUN,
+ &irdma_upload_context, 0,
+ "allow for generating QP's upload context, default=0");
+}
+
+void
+irdma_sw_stats_tunables_init(struct irdma_pci_f *rf)
+{
+ struct sysctl_oid_list *sws_oid_list;
+ struct sysctl_ctx_list *irdma_ctx = &rf->tun_info.irdma_sysctl_ctx;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cm_core *cm_core = &rf->iwdev->cm_core;
+ struct irdma_puda_rsrc *ilq = rf->iwdev->vsi.ilq;
+ struct irdma_puda_rsrc *ieq = rf->iwdev->vsi.ieq;
+ u64 *ll_ptr;
+ u32 *l_ptr;
+ int cqp_stat_cnt = sizeof(irdma_sws_list) / sizeof(struct irdma_sw_stats_tunable_info);
+ int cmcore_stat_cnt = sizeof(irdma_cmcs_list) / sizeof(struct irdma_sw_stats_tunable_info);
+ int ilqs_stat_cnt = sizeof(irdma_ilqs_list) / sizeof(struct irdma_sw_stats_tunable_info);
+ int ilqs32_stat_cnt = sizeof(irdma_ilqs32_list) / sizeof(struct irdma_sw_stats_tunable_info);
+ int ieqs_stat_cnt = sizeof(irdma_ieqs_list) / sizeof(struct irdma_sw_stats_tunable_info);
+ int ieqs32_stat_cnt = sizeof(irdma_ieqs32_list) / sizeof(struct irdma_sw_stats_tunable_info);
+ int i;
+
+ sws_oid_list = SYSCTL_CHILDREN(rf->tun_info.sws_sysctl_tree);
+
+ for (i = 0; i < cqp_stat_cnt; ++i) {
+ SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
+ irdma_sws_list[i].name, CTLFLAG_RD,
+ &dev->cqp_cmd_stats[irdma_sws_list[i].op_type],
+ 0, irdma_sws_list[i].desc);
+ }
+ SYSCTL_ADD_PROC(irdma_ctx, sws_oid_list, OID_AUTO,
+ "req_cmds", CTLFLAG_RD | CTLTYPE_STRING,
+ dev->cqp, IRDMA_CQP_REQ_CMDS, irdma_sysctl_cqp_stats, "A",
+ "req_cmds");
+ SYSCTL_ADD_PROC(irdma_ctx, sws_oid_list, OID_AUTO,
+ "cmpl_cmds", CTLFLAG_RD | CTLTYPE_STRING,
+ dev->cqp, IRDMA_CQP_CMPL_CMDS, irdma_sysctl_cqp_stats, "A",
+ "cmpl_cmds");
+ for (i = 0; i < cmcore_stat_cnt; ++i) {
+ ll_ptr = (u64 *)((uintptr_t)cm_core + irdma_cmcs_list[i].value);
+ SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
+ irdma_cmcs_list[i].name, CTLFLAG_RD, ll_ptr,
+ 0, irdma_cmcs_list[i].desc);
+ }
+ for (i = 0; ilq && i < ilqs_stat_cnt; ++i) {
+ ll_ptr = (u64 *)((uintptr_t)ilq + irdma_ilqs_list[i].value);
+ SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
+ irdma_ilqs_list[i].name, CTLFLAG_RD, ll_ptr,
+ 0, irdma_ilqs_list[i].desc);
+ }
+ for (i = 0; ilq && i < ilqs32_stat_cnt; ++i) {
+ l_ptr = (u32 *)((uintptr_t)ilq + irdma_ilqs32_list[i].value);
+ SYSCTL_ADD_U32(irdma_ctx, sws_oid_list, OID_AUTO,
+ irdma_ilqs32_list[i].name, CTLFLAG_RD, l_ptr,
+ 0, irdma_ilqs32_list[i].desc);
+ }
+ for (i = 0; ieq && i < ieqs_stat_cnt; ++i) {
+ ll_ptr = (u64 *)((uintptr_t)ieq + irdma_ieqs_list[i].value);
+ SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
+ irdma_ieqs_list[i].name, CTLFLAG_RD, ll_ptr,
+ 0, irdma_ieqs_list[i].desc);
+ }
+ for (i = 0; ieq && i < ieqs32_stat_cnt; ++i) {
+ l_ptr = (u32 *)((uintptr_t)ieq + irdma_ieqs32_list[i].value);
+ SYSCTL_ADD_U32(irdma_ctx, sws_oid_list, OID_AUTO,
+ irdma_ieqs32_list[i].name, CTLFLAG_RD, l_ptr,
+ 0, irdma_ieqs32_list[i].desc);
+ }
+}
+
+/**
* irdma_dmamap_cb - callback for bus_dmamap_load
*/
static void
@@ -755,12 +1028,75 @@ irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
return 0;
}
-inline void
-irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
+u_int
+if_foreach_addr_type(if_t ifp, int type, if_addr_cb_t cb, void *cb_arg){
+ struct epoch_tracker et;
+ struct ifaddr *ifa;
+ u_int count;
+
+ MPASS(cb);
+
+ count = 0;
+ NET_EPOCH_ENTER(et);
+ CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != type)
+ continue;
+ count += (*cb) (cb_arg, ifa, count);
+ }
+ NET_EPOCH_EXIT(et);
+
+ return (count);
+}
+
+int
+if_foreach(if_foreach_cb_t cb, void *cb_arg)
{
- kfree(chunk->bitmapmem.va);
+ if_t ifp;
+ int error;
+
+ NET_EPOCH_ASSERT();
+ MPASS(cb);
+
+ error = 0;
+ CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ error = cb(ifp, cb_arg);
+ if (error != 0)
+ break;
+ }
+
+ return (error);
}
+if_t
+if_iter_start(struct if_iter *iter){
+ if_t ifp;
+
+ NET_EPOCH_ASSERT();
+ bzero(iter, sizeof(*iter));
+ ifp = CK_STAILQ_FIRST(&V_ifnet);
+ if (ifp != NULL)
+ iter->context[0] = CK_STAILQ_NEXT(ifp, if_link);
+ else
+ iter->context[0] = NULL;
+ return (ifp);
+}
+
+if_t
+if_iter_next(struct if_iter *iter){
+ if_t cur_ifp = iter->context[0];
+
+ if (cur_ifp != NULL)
+ iter->context[0] = CK_STAILQ_NEXT(cur_ifp, if_link);
+ return (cur_ifp);
+}
+
+void
+if_iter_finish(struct if_iter *iter)
+{
+ /* NOP */
+}
+
+
void
irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi)
{
diff --git a/sys/dev/irdma/fbsd_kcompat.h b/sys/dev/irdma/fbsd_kcompat.h
index 903e33614d9e..630e49068a74 100644
--- a/sys/dev/irdma/fbsd_kcompat.h
+++ b/sys/dev/irdma/fbsd_kcompat.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2022 Intel Corporation
+ * Copyright (c) 2021 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -53,16 +53,12 @@
ibdev.dma_device = (dev)
#define set_max_sge(props, rf) \
((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags)
-#define kc_set_props_ip_gid_caps(props) \
- ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS)
#define rdma_query_gid(ibdev, port, index, gid) \
ib_get_cached_gid(ibdev, port, index, gid, NULL)
#define kmap(pg) page_address(pg)
#define kmap_local_page(pg) page_address(pg)
#define kunmap(pg)
#define kunmap_local(pg)
-#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \
- ((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr))
#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION
#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp)
@@ -75,16 +71,42 @@
#define IRDMA_VER_LEN 24
+#ifndef EVNT_HNDLR_CRITERR
+#if ICE_RDMA_MAJOR_VERSION == 1 && ICE_RDMA_MINOR_VERSION == 1
+#define EVNT_HNDLR_CRITERR
+#else
+#undef EVNT_HNDLR_CRITERR
+#endif
+
+#endif
void kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev);
void kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev);
struct irdma_tunable_info {
struct sysctl_ctx_list irdma_sysctl_ctx;
struct sysctl_oid *irdma_sysctl_tree;
+ struct sysctl_oid *sws_sysctl_tree;
char drv_ver[IRDMA_VER_LEN];
u8 roce_ena;
};
+typedef u_int if_addr_cb_t(void *, struct ifaddr *, u_int);
+u_int if_foreach_addr_type(if_t ifp, int type, if_addr_cb_t cb, void *cb_arg);
+typedef int (*if_foreach_cb_t)(if_t, void *);
+int if_foreach(if_foreach_cb_t cb, void *cb_arg);
+#ifndef if_iter
+struct if_iter {
+ void *context[4];
+};
+#endif
+if_t if_iter_start(struct if_iter *iter);
+if_t if_iter_next(struct if_iter *iter);
+void if_iter_finish(struct if_iter *iter);
+#define if_getdunit(ifp) ifp->if_dunit
+#define if_getindex(ifp) ifp->if_index
+#define if_getlinkstate(ndev) ndev->if_link_state
+#define if_getvlantrunk(ifp) ifp->if_vlantrunk
+#define if_getvnet(ndev) ndev->if_vnet
static inline int irdma_iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
@@ -125,7 +147,6 @@ struct ib_ah *irdma_create_ah_stub(struct ib_pd *ibpd,
struct ib_ah_attr *attr,
struct ib_udata *udata);
void irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr);
-
int irdma_destroy_ah(struct ib_ah *ibah);
int irdma_destroy_ah_stub(struct ib_ah *ibah);
int irdma_destroy_qp(struct ib_qp *ibqp);
@@ -162,8 +183,6 @@ void irdma_disassociate_ucontext(struct ib_ucontext *context);
int kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp,
struct ib_qp_attr *attr,
u16 *vlan_id);
-struct irdma_device *kc_irdma_get_device(struct ifnet *netdev);
-void kc_irdma_put_device(struct irdma_device *iwdev);
void kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node);
u16 kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn);
@@ -181,6 +200,8 @@ int irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node, u32 dst_ip,
int irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node, u32 *dest,
int arpindex);
void irdma_dcqcn_tunables_init(struct irdma_pci_f *rf);
+void irdma_sysctl_settings(struct irdma_pci_f *rf);
+void irdma_sw_stats_tunables_init(struct irdma_pci_f *rf);
u32 irdma_create_stag(struct irdma_device *iwdev);
void irdma_free_stag(struct irdma_device *iwdev, u32 stag);
diff --git a/sys/dev/irdma/icrdma.c b/sys/dev/irdma/icrdma.c
index 38a970700558..848f8126e57c 100644
--- a/sys/dev/irdma/icrdma.c
+++ b/sys/dev/irdma/icrdma.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2022 Intel Corporation
+ * Copyright (c) 2021 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -52,9 +52,7 @@
/**
* Driver version
*/
-char irdma_driver_version[] = "1.1.11-k";
-
-#define pf_if_d(peer) peer->ifp->if_dunit
+char irdma_driver_version[] = "1.2.17-k";
/**
* irdma_init_tunable - prepare tunables
@@ -64,50 +62,56 @@ char irdma_driver_version[] = "1.1.11-k";
static void
irdma_init_tunable(struct irdma_pci_f *rf, uint8_t pf_id)
{
- struct sysctl_oid_list *irdma_sysctl_oid_list;
+ struct sysctl_oid_list *irdma_oid_list;
+ struct irdma_tunable_info *t_info = &rf->tun_info;
char pf_name[16];
snprintf(pf_name, 15, "irdma%d", pf_id);
- sysctl_ctx_init(&rf->tun_info.irdma_sysctl_ctx);
+ sysctl_ctx_init(&t_info->irdma_sysctl_ctx);
- rf->tun_info.irdma_sysctl_tree = SYSCTL_ADD_NODE(&rf->tun_info.irdma_sysctl_ctx,
- SYSCTL_STATIC_CHILDREN(_dev),
- OID_AUTO, pf_name, CTLFLAG_RD,
- NULL, "");
+ t_info->irdma_sysctl_tree = SYSCTL_ADD_NODE(&t_info->irdma_sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_dev),
+ OID_AUTO, pf_name,
+ CTLFLAG_RD, NULL, "");
- irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
+ irdma_oid_list = SYSCTL_CHILDREN(t_info->irdma_sysctl_tree);
+ t_info->sws_sysctl_tree = SYSCTL_ADD_NODE(&t_info->irdma_sysctl_ctx,
+ irdma_oid_list, OID_AUTO,
+ "sw_stats", CTLFLAG_RD,
+ NULL, "");
/*
* debug mask setting
*/
- SYSCTL_ADD_S32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
+ SYSCTL_ADD_S32(&t_info->irdma_sysctl_ctx, irdma_oid_list,
OID_AUTO, "debug", CTLFLAG_RWTUN, &rf->sc_dev.debug_mask,
0, "irdma debug");
/*
* RoCEv2/iWARP setting RoCEv2 the default mode
*/
- rf->tun_info.roce_ena = 1;
- SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, OID_AUTO,
- "roce_enable", CTLFLAG_RDTUN, &rf->tun_info.roce_ena, 0,
+ t_info->roce_ena = 1;
+ SYSCTL_ADD_U8(&t_info->irdma_sysctl_ctx, irdma_oid_list, OID_AUTO,
+ "roce_enable", CTLFLAG_RDTUN, &t_info->roce_ena, 0,
"RoCEv2 mode enable");
rf->protocol_used = IRDMA_IWARP_PROTOCOL_ONLY;
- if (rf->tun_info.roce_ena == 1)
+ if (t_info->roce_ena == 1)
rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
- else if (rf->tun_info.roce_ena != 0)
+ else if (t_info->roce_ena != 0)
printf("%s:%d wrong roce_enable value (%d), using iWARP\n",
- __func__, __LINE__, rf->tun_info.roce_ena);
+ __func__, __LINE__, t_info->roce_ena);
printf("%s:%d protocol: %s, roce_enable value: %d\n", __func__, __LINE__,
(rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? "iWARP" : "RoCEv2",
- rf->tun_info.roce_ena);
+ t_info->roce_ena);
- snprintf(rf->tun_info.drv_ver, IRDMA_VER_LEN, "%s", irdma_driver_version);
- SYSCTL_ADD_STRING(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
- OID_AUTO, "drv_ver", CTLFLAG_RDTUN, rf->tun_info.drv_ver,
+ snprintf(t_info->drv_ver, IRDMA_VER_LEN, "%s", irdma_driver_version);
+ SYSCTL_ADD_STRING(&t_info->irdma_sysctl_ctx, irdma_oid_list,
+ OID_AUTO, "drv_ver", CTLFLAG_RDTUN, t_info->drv_ver,
IRDMA_VER_LEN, "driver version");
irdma_dcqcn_tunables_init(rf);
+ irdma_sysctl_settings(rf);
}
/**
@@ -122,8 +126,6 @@ irdma_find_handler(struct ice_rdma_peer *p_dev)
spin_lock_irqsave(&irdma_handler_lock, flags);
list_for_each_entry(hdl, &irdma_handlers, list) {
- if (!hdl)
- continue;
if (!hdl->iwdev->rf->peer_info)
continue;
if (hdl->iwdev->rf->peer_info->dev == p_dev->dev) {
@@ -160,9 +162,12 @@ peer_to_iwdev(struct ice_rdma_peer *peer)
* @qos_info: source, DCB settings structure
*/
static void
-irdma_get_qos_info(struct irdma_l2params *l2params, struct ice_qos_params *qos_info)
+irdma_get_qos_info(struct irdma_pci_f *rf, struct irdma_l2params *l2params,
+ struct ice_qos_params *qos_info)
{
int i;
+ char txt[7][128] = {"", "", "", "", "", "", ""};
+ u8 len;
l2params->num_tc = qos_info->num_tc;
l2params->num_apps = qos_info->num_apps;
@@ -184,33 +189,46 @@ irdma_get_qos_info(struct irdma_l2params *l2params, struct ice_qos_params *qos_i
l2params->dscp_mode = true;
memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
}
- printf("%s:%d: l2params settings:\n num_tc %d,\n num_apps %d,\n",
- __func__, __LINE__, l2params->num_tc, l2params->num_apps);
- printf(" vsi_prio_type %d,\n vsi_rel_bw %d,\n egress_virt_up:",
- l2params->vsi_prio_type, l2params->vsi_rel_bw);
- for (i = 0; i < l2params->num_tc; i++)
- printf(" %d", l2params->tc_info[i].egress_virt_up);
- printf("\n ingress_virt_up:");
- for (i = 0; i < l2params->num_tc; i++)
- printf(" %d", l2params->tc_info[i].ingress_virt_up);
- printf("\n prio_type:");
- for (i = 0; i < l2params->num_tc; i++)
- printf(" %d", l2params->tc_info[i].prio_type);
- printf("\n rel_bw:");
- for (i = 0; i < l2params->num_tc; i++)
- printf(" %d", l2params->tc_info[i].rel_bw);
- printf("\n tc_ctx:");
- for (i = 0; i < l2params->num_tc; i++)
- printf(" %lu", l2params->tc_info[i].tc_ctx);
- printf("\n up2tc:");
+ if (!(rf->sc_dev.debug_mask & IRDMA_DEBUG_DCB))
+ return;
+ for (i = 0; i < l2params->num_tc; i++) {
+ len = strlen(txt[0]);
+ snprintf(txt[0] + len, sizeof(txt[0]) - 5, " %d",
+ l2params->tc_info[i].egress_virt_up);
+ len = strlen(txt[1]);
+ snprintf(txt[1] + len, sizeof(txt[1]) - 5, " %d",
+ l2params->tc_info[i].ingress_virt_up);
+ len = strlen(txt[2]);
+ snprintf(txt[2] + len, sizeof(txt[2]) - 5, " %d",
+ l2params->tc_info[i].prio_type);
+ len = strlen(txt[3]);
+ snprintf(txt[3] + len, sizeof(txt[3]) - 5, " %d",
+ l2params->tc_info[i].rel_bw);
+ len = strlen(txt[4]);
+ snprintf(txt[4] + len, sizeof(txt[4]) - 5, " %lu",
+ l2params->tc_info[i].tc_ctx);
+ }
+ len = strlen(txt[5]);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
- printf(" %d", l2params->up2tc[i]);
- printf(" dscp_mode: %d,\n", l2params->dscp_mode);
+ len += snprintf(txt[5] + len, sizeof(txt[5]) - 5, " %d",
+ l2params->up2tc[i]);
+ len = strlen(txt[6]);
for (i = 0; i < IRDMA_DSCP_NUM_VAL; i++)
- printf(" %d", l2params->dscp_map[i]);
- printf("\n");
-
- dump_struct(l2params, sizeof(*l2params), "l2params");
+ len += snprintf(txt[6] + len, sizeof(txt[6]) - 5, " %d",
+ l2params->dscp_map[i]);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "num_tc: %d\n", l2params->num_tc);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "num_apps: %d\n", l2params->num_apps);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "vsi_prio_type: %d\n", l2params->vsi_prio_type);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "vsi_rel_bw: %d\n", l2params->vsi_rel_bw);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "egress_virt_up: %s\n", txt[0]);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "ingress_virt_up:%s\n", txt[1]);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "prio_type: %s\n", txt[2]);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "rel_bw: %s\n", txt[3]);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "tc_ctx: %s\n", txt[4]);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "up2tc: %s\n", txt[5]);
+ irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "dscp_mode: %s\n", txt[6]);
+
+ irdma_debug_buf(&rf->sc_dev, IRDMA_DEBUG_DCB, "l2params", l2params, sizeof(*l2params));
}
/**
@@ -227,11 +245,40 @@ irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
mtu);
else if (mtu < IRDMA_MIN_MTU_IPV6)
irdma_dev_warn(to_ibdev(dev),
- "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n",
+ "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\n",
mtu);
}
/**
+ * irdma_get_event_name - convert type enum to string
+ * @type: event type enum
+ */
+static const char *
+irdma_get_event_name(enum ice_rdma_event_type type)
+{
+ switch (type) {
+ case ICE_RDMA_EVENT_LINK_CHANGE:
+ return "LINK CHANGE";
+ case ICE_RDMA_EVENT_MTU_CHANGE:
+ return "MTU CHANGE";
+ case ICE_RDMA_EVENT_TC_CHANGE:
+ return "TC CHANGE";
+ case ICE_RDMA_EVENT_API_CHANGE:
+ return "API CHANGE";
+ case ICE_RDMA_EVENT_CRIT_ERR:
+ return "CRITICAL ERROR";
+ case ICE_RDMA_EVENT_RESET:
+ return "RESET";
+ case ICE_RDMA_EVENT_QSET_REGISTER:
+ return "QSET REGISTER";
+ case ICE_RDMA_EVENT_VSI_FILTER_UPDATE:
+ return "VSI FILTER UPDATE";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
* irdma_event_handler - handling events from lan driver
* @peer: the peer interface structure
* @event: event info structure
@@ -243,10 +290,8 @@ irdma_event_handler(struct ice_rdma_peer *peer, struct ice_rdma_event *event)
struct irdma_l2params l2params = {};
printf("%s:%d event_handler %s (%x) on pf %d (%d)\n", __func__, __LINE__,
- (event->type == 1) ? "LINK CHANGE" :
- (event->type == 2) ? "MTU CHANGE" :
- (event->type == 3) ? "TC CHANGE" : "UNKNOWN",
- event->type, peer->pf_id, pf_if_d(peer));
+ irdma_get_event_name(event->type),
+ event->type, peer->pf_id, if_getdunit(peer->ifp));
iwdev = peer_to_iwdev(peer);
if (!iwdev) {
printf("%s:%d rdma device not found\n", __func__, __LINE__);
@@ -256,7 +301,8 @@ irdma_event_handler(struct ice_rdma_peer *peer, struct ice_rdma_event *event)
switch (event->type) {
case ICE_RDMA_EVENT_LINK_CHANGE:
printf("%s:%d PF: %x (%x), state: %d, speed: %lu\n", __func__, __LINE__,
- peer->pf_id, pf_if_d(peer), event->linkstate, event->baudrate);
+ peer->pf_id, if_getdunit(peer->ifp), event->linkstate,
+ event->baudrate);
break;
case ICE_RDMA_EVENT_MTU_CHANGE:
if (iwdev->vsi.mtu != event->mtu) {
@@ -277,6 +323,12 @@ irdma_event_handler(struct ice_rdma_peer *peer, struct ice_rdma_event *event)
event->prep ? " " : "not ");
goto done;
}
+ if (!atomic_inc_not_zero(&iwdev->rf->dev_ctx.event_rfcnt)) {
+ printf("%s:%d (%d) EVENT_TC_CHANGE received, but not processed %d\n",
+ __func__, __LINE__, if_getdunit(peer->ifp),
+ atomic_read(&iwdev->rf->dev_ctx.event_rfcnt));
+ break;
+ }
if (event->prep) {
iwdev->vsi.tc_change_pending = true;
irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
@@ -287,7 +339,7 @@ irdma_event_handler(struct ice_rdma_peer *peer, struct ice_rdma_event *event)
printf("%s:%d TC change preparation done\n", __func__, __LINE__);
} else {
l2params.tc_changed = true;
- irdma_get_qos_info(&l2params, &event->port_qos);
+ irdma_get_qos_info(iwdev->rf, &l2params, &event->port_qos);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
@@ -295,9 +347,36 @@ irdma_event_handler(struct ice_rdma_peer *peer, struct ice_rdma_event *event)
irdma_change_l2params(&iwdev->vsi, &l2params);
printf("%s:%d TC change done\n", __func__, __LINE__);
}
+ atomic_dec(&iwdev->rf->dev_ctx.event_rfcnt);
break;
case ICE_RDMA_EVENT_CRIT_ERR:
+#ifdef EVNT_HNDLR_CRITERR
+ if (event->oicr_reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
+ u32 pe_criterr;
+
+#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
+ pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
+ if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
+ irdma_pr_err("critical PE Error, GLPE_CRITERR=0x%08x\n",
+ pe_criterr);
+ iwdev->rf->reset = true;
+ } else {
+ irdma_dev_warn(to_ibdev(&iwdev->rf->sc_dev),
+ "Q1 Resource Check\n");
+ }
+ }
+ if (event->oicr_reg & IRDMAPFINT_OICR_HMC_ERR_M) {
+ irdma_pr_err("HMC Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (iwdev->rf->reset)
+ iwdev->rf->gen_ops.request_reset(iwdev->rf);
+#else
printf("%s:%d event type received: %d\n", __func__, __LINE__, event->type);
+#endif
+ break;
+ case ICE_RDMA_EVENT_RESET:
+ iwdev->rf->reset = true;
break;
default:
printf("%s:%d event type unsupported: %d\n", __func__, __LINE__, event->type);
@@ -316,7 +395,7 @@ static void
irdma_link_change(struct ice_rdma_peer *peer, int linkstate, uint64_t baudrate)
{
printf("%s:%d PF: %x (%x), state: %d, speed: %lu\n", __func__, __LINE__,
- peer->pf_id, pf_if_d(peer), linkstate, baudrate);
+ peer->pf_id, if_getdunit(peer->ifp), linkstate, baudrate);
}
/**
@@ -341,7 +420,16 @@ irdma_finalize_task(void *context, int pending)
if (iwdev->iw_status) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
"Starting deferred closing %d (%d)\n",
- rf->peer_info->pf_id, pf_if_d(peer));
+ rf->peer_info->pf_id, if_getdunit(peer->ifp));
+ atomic_dec(&rf->dev_ctx.event_rfcnt);
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&rf->dev_ctx.event_rfcnt),
+ IRDMA_MAX_TIMEOUT);
+ if (atomic_read(&rf->dev_ctx.event_rfcnt) != 0) {
+ printf("%s:%d (%d) waiting for event_rfcnt (%d) timeout, proceed with unload\n",
+ __func__, __LINE__, if_getdunit(peer->ifp),
+ atomic_read(&rf->dev_ctx.event_rfcnt));
+ }
irdma_dereg_ipaddr_event_cb(rf);
irdma_ib_unregister_device(iwdev);
req.type = ICE_RDMA_EVENT_VSI_FILTER_UPDATE;
@@ -352,8 +440,8 @@ irdma_finalize_task(void *context, int pending)
} else {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
"Starting deferred opening %d (%d)\n",
- rf->peer_info->pf_id, pf_if_d(peer));
- irdma_get_qos_info(&l2params, &peer->initial_qos_info);
+ rf->peer_info->pf_id, if_getdunit(peer->ifp));
+ irdma_get_qos_info(iwdev->rf, &l2params, &peer->initial_qos_info);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
@@ -370,54 +458,19 @@ irdma_finalize_task(void *context, int pending)
irdma_rt_deinit_hw(iwdev);
ib_dealloc_device(&iwdev->ibdev);
}
+ irdma_sw_stats_tunables_init(rf);
req.type = ICE_RDMA_EVENT_VSI_FILTER_UPDATE;
req.enable_filter = true;
IRDMA_DI_REQ_HANDLER(peer, &req);
irdma_reg_ipaddr_event_cb(rf);
+ atomic_inc(&rf->dev_ctx.event_rfcnt);
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
"Deferred opening finished %d (%d)\n",
- rf->peer_info->pf_id, pf_if_d(peer));
+ rf->peer_info->pf_id, if_getdunit(peer->ifp));
}
}
/**
- * irdma_open - Callback for operation open for RDMA device
- * @peer: the new peer interface structure
- *
- * Callback implementing the RDMA_OPEN function. Called by the ice driver to
- * notify the RDMA client driver that a new device has been initialized.
- */
-static int
-irdma_open(struct ice_rdma_peer *peer)
-{
- struct ice_rdma_event event = {0};
-
- event.type = ICE_RDMA_EVENT_MTU_CHANGE;
- event.mtu = peer->mtu;
-
- irdma_event_handler(peer, &event);
-
- return 0;
-}
-
-/**
- * irdma_close - Callback to notify that a peer device is down
- * @peer: the RDMA peer device being stopped
- *
- * Callback implementing the RDMA_CLOSE function. Called by the ice driver to
- * notify the RDMA client driver that a peer device is being stopped.
- */
-static int
-irdma_close(struct ice_rdma_peer *peer)
-{
- /*
- * This is called when ifconfig down. Keeping it for compatibility with ice. This event might be usefull for
- * future.
- */
- return 0;
-}
-
-/**
* irdma_alloc_pcidev - allocate memory for pcidev and populate data
* @peer: the new peer interface structure
* @rf: RDMA PCI function
@@ -515,7 +568,7 @@ irdma_probe(struct ice_rdma_peer *peer)
irdma_pr_info("probe: irdma-%s peer=%p, peer->pf_id=%d, peer->ifp=%p, peer->ifp->if_dunit=%d, peer->pci_mem->r_bustag=%p\n",
irdma_driver_version, peer, peer->pf_id, peer->ifp,
- pf_if_d(peer), (void *)(uintptr_t)peer->pci_mem->r_bustag);
+ if_getdunit(peer->ifp), (void *)(uintptr_t)peer->pci_mem->r_bustag);
hdl = irdma_find_handler(peer);
if (hdl)
@@ -540,7 +593,7 @@ irdma_probe(struct ice_rdma_peer *peer)
hdl->iwdev = iwdev;
iwdev->hdl = hdl;
- irdma_init_tunable(iwdev->rf, pf_if_d(peer));
+ irdma_init_tunable(iwdev->rf, if_getdunit(peer->ifp));
irdma_fill_device_info(iwdev, peer);
rf = iwdev->rf;
@@ -593,7 +646,7 @@ irdma_remove(struct ice_rdma_peer *peer)
struct irdma_device *iwdev;
irdma_debug((struct irdma_sc_dev *)NULL, IRDMA_DEBUG_INIT,
- "removing %s irdma%d\n", __func__, pf_if_d(peer));
+ "removing %s irdma%d\n", __func__, if_getdunit(peer->ifp));
hdl = irdma_find_handler(peer);
if (!hdl)
@@ -615,6 +668,7 @@ irdma_remove(struct ice_rdma_peer *peer)
sysctl_ctx_free(&iwdev->rf->tun_info.irdma_sysctl_ctx);
hdl->iwdev->rf->tun_info.irdma_sysctl_tree = NULL;
+ hdl->iwdev->rf->tun_info.sws_sysctl_tree = NULL;
irdma_ctrl_deinit_hw(iwdev->rf);
@@ -625,7 +679,55 @@ irdma_remove(struct ice_rdma_peer *peer)
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
irdma_pr_info("IRDMA hardware deinitialization complete irdma%d\n",
- pf_if_d(peer));
+ if_getdunit(peer->ifp));
+
+ return 0;
+}
+
+/**
+ * irdma_open - Callback for operation open for RDMA device
+ * @peer: the new peer interface structure
+ *
+ * Callback implementing the RDMA_OPEN function. Called by the ice driver to
+ * notify the RDMA client driver that a new device has been initialized.
+ */
+static int
+irdma_open(struct ice_rdma_peer *peer)
+{
+ struct irdma_device *iwdev;
+ struct ice_rdma_event event = {0};
+
+ iwdev = peer_to_iwdev(peer);
+ if (iwdev) {
+ event.type = ICE_RDMA_EVENT_MTU_CHANGE;
+ event.mtu = peer->mtu;
+
+ irdma_event_handler(peer, &event);
+ } else {
+ irdma_probe(peer);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_close - Callback to notify that a peer device is down
+ * @peer: the RDMA peer device being stopped
+ *
+ * Callback implementing the RDMA_CLOSE function. Called by the ice driver to
+ * notify the RDMA client driver that a peer device is being stopped.
+ */
+static int
+irdma_close(struct ice_rdma_peer *peer)
+{
+ /*
+ * This is called when ifconfig down or pf-reset is about to happen.
+ */
+ struct irdma_device *iwdev;
+
+ iwdev = peer_to_iwdev(peer);
+ if (iwdev && iwdev->rf->reset)
+ irdma_remove(peer);
return 0;
}
@@ -644,8 +746,6 @@ irdma_prep_for_unregister(void)
hdl_valid = false;
spin_lock_irqsave(&irdma_handler_lock, flags);
list_for_each_entry(hdl, &irdma_handlers, list) {
- if (!hdl)
- continue;
if (!hdl->iwdev->rf->peer_info)
continue;
hdl_valid = true;
diff --git a/sys/dev/irdma/icrdma_hw.c b/sys/dev/irdma/icrdma_hw.c
index 7f593a6bf1b3..d1da9b81e5b7 100644
--- a/sys/dev/irdma/icrdma_hw.c
+++ b/sys/dev/irdma/icrdma_hw.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2022 Intel Corporation
+ * Copyright (c) 2017 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/sys/dev/irdma/irdma.h b/sys/dev/irdma/irdma.h
index d30bf5f20955..0b8fdf1440a9 100644
--- a/sys/dev/irdma/irdma.h
+++ b/sys/dev/irdma/irdma.h
@@ -166,7 +166,7 @@ struct irdma_mcast_grp_ctx_entry_info {
};
struct irdma_mcast_grp_info {
- u8 dest_mac_addr[ETH_ALEN];
+ u8 dest_mac_addr[ETHER_ADDR_LEN];
u16 vlan_id;
u16 hmc_fcn_id;
bool ipv4_valid:1;
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
index 6322a34acc13..cb508bf52cb8 100644
--- a/sys/dev/irdma/irdma_cm.c
+++ b/sys/dev/irdma/irdma_cm.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -289,10 +289,9 @@ irdma_create_event(struct irdma_cm_node *cm_node,
event->cm_info.loc_port = cm_node->loc_port;
event->cm_info.cm_id = cm_node->cm_id;
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
- cm_node,
- event, type, event->cm_info.loc_addr,
- event->cm_info.rem_addr);
+ "node=%p event=%p type=%u dst=%x src=%x\n", cm_node, event,
+ type, event->cm_info.loc_addr[0],
+ event->cm_info.rem_addr[0]);
irdma_cm_post_event(event);
return event;
@@ -645,10 +644,11 @@ irdma_send_reset(struct irdma_cm_node *cm_node)
return -ENOMEM;
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ "caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%x loc_addr=%x\n",
__builtin_return_address(0), cm_node, cm_node->cm_id,
cm_node->accelerated, cm_node->state, cm_node->rem_port,
- cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr);
+ cm_node->loc_port, cm_node->rem_addr[0],
+ cm_node->loc_addr[0]);
return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 0,
1);
@@ -665,9 +665,8 @@ irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset)
irdma_cleanup_retrans_entry(cm_node);
cm_node->cm_core->stats_connect_errs++;
if (reset) {
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "cm_node=%p state=%d\n", cm_node,
- cm_node->state);
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "cm_node=%p state=%d\n", cm_node, cm_node->state);
atomic_inc(&cm_node->refcnt);
irdma_send_reset(cm_node);
}
@@ -688,8 +687,7 @@ irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset)
cm_node->cm_core->stats_passive_errs++;
cm_node->state = IRDMA_CM_STATE_CLOSED;
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "cm_node=%p state =%d\n",
- cm_node, cm_node->state);
+ "cm_node=%p state=%d\n", cm_node, cm_node->state);
if (reset)
irdma_send_reset(cm_node);
else
@@ -801,8 +799,7 @@ irdma_handle_tcp_options(struct irdma_cm_node *cm_node,
(u32)tcph->th_flags & TH_SYN);
if (ret) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "Node %p, Sending Reset\n",
- cm_node);
+ "Node %p, Sending Reset\n", cm_node);
if (passive)
irdma_passive_open_err(cm_node, true);
else
@@ -950,8 +947,7 @@ irdma_send_mpa_request(struct irdma_cm_node *cm_node)
MPA_KEY_REQUEST);
if (!cm_node->mpa_hdr.size) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "mpa size = %d\n",
- cm_node->mpa_hdr.size);
+ "mpa size = %d\n", cm_node->mpa_hdr.size);
return -EINVAL;
}
@@ -1061,9 +1057,9 @@ negotiate_done:
/* Not supported RDMA0 operation */
return -EOPNOTSUPP;
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "MPAV2 Negotiated ORD: %d, IRD: %d\n",
- cm_node->ord_size, cm_node->ird_size);
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "MPAV2 Negotiated ORD: %d, IRD: %d\n", cm_node->ord_size,
+ cm_node->ird_size);
return 0;
}
@@ -1084,8 +1080,8 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
*type = IRDMA_MPA_REQUEST_ACCEPT;
if (len < sizeof(struct ietf_mpa_v1)) {
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "ietf buffer small (%x)\n", len);
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "ietf buffer small (%x)\n", len);
return -EINVAL;
}
@@ -1095,22 +1091,19 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "private_data too big %d\n",
- priv_data_len);
+ "private_data too big %d\n", priv_data_len);
return -EOVERFLOW;
}
if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "unsupported mpa rev = %d\n",
- mpa_frame->rev);
+ "unsupported mpa rev = %d\n", mpa_frame->rev);
return -EINVAL;
}
if (mpa_frame->rev > cm_node->mpa_frame_rev) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "rev %d\n",
- mpa_frame->rev);
+ "rev %d\n", mpa_frame->rev);
return -EINVAL;
}
@@ -1118,29 +1111,29 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ,
IETF_MPA_KEY_SIZE)) {
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "Unexpected MPA Key received\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "Unexpected MPA Key received\n");
return -EINVAL;
}
} else {
if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP,
IETF_MPA_KEY_SIZE)) {
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "Unexpected MPA Key received\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "Unexpected MPA Key received\n");
return -EINVAL;
}
}
if (priv_data_len + mpa_hdr_len > len) {
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "ietf buffer len(%x + %x != %x)\n",
- priv_data_len, mpa_hdr_len, len);
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "ietf buffer len(%x + %x != %x)\n", priv_data_len,
+ mpa_hdr_len, len);
return -EOVERFLOW;
}
if (len > IRDMA_MAX_CM_BUF) {
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "ietf buffer large len = %d\n", len);
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "ietf buffer large len = %d\n", len);
return -EOVERFLOW;
}
@@ -1212,8 +1205,8 @@ irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
new_send->timetosend += (HZ / 10);
if (cm_node->close_entry) {
kfree(new_send);
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "already close entry\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "already close entry\n");
return -EINVAL;
}
@@ -1447,6 +1440,7 @@ irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack)
sizeof(struct option_base) + TCP_OPTIONS_PADDING];
struct irdma_kmem_info opts;
int optionssize = 0;
+
/* Sending MSS option */
union all_known_options *options;
@@ -1582,14 +1576,14 @@ irdma_del_multiple_qhash(struct irdma_device *iwdev,
child_listen_list);
if (child_listen_node->ipv4)
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
- child_listen_node->loc_addr,
+ "removing child listen for IP=%x, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr[0],
child_listen_node->loc_port,
child_listen_node->vlan_id);
else
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
- child_listen_node->loc_addr,
+ "removing child listen for IP=%x:%x:%x:%x, port=%d, vlan=%d\n",
+ IRDMA_PRINT_IP6(child_listen_node->loc_addr),
child_listen_node->loc_port,
child_listen_node->vlan_id);
list_del(pos);
@@ -1605,8 +1599,8 @@ irdma_del_multiple_qhash(struct irdma_device *iwdev,
} else {
ret = 0;
}
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "Child listen node freed = %p\n",
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "Child listen node freed = %p\n",
child_listen_node);
kfree(child_listen_node);
cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
@@ -1616,7 +1610,8 @@ irdma_del_multiple_qhash(struct irdma_device *iwdev,
return ret;
}
-static u8 irdma_get_egress_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4){
+static u8 irdma_iw_get_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4)
+{
return prio;
}
@@ -1629,11 +1624,12 @@ static u8 irdma_get_egress_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4){
* Returns the net_device of the IPv6 address and also sets the
* vlan id and mac for that address.
*/
-struct ifnet *
+if_t
irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
{
- struct ifnet *ip_dev = NULL;
+ if_t ip_dev = NULL;
struct in6_addr laddr6;
+ struct ifaddr *ifa;
u16 scope_id = 0;
irdma_copy_ip_htonl(laddr6.__u6_addr.__u6_addr32, addr);
@@ -1650,8 +1646,9 @@ irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
if (ip_dev) {
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
- if (ip_dev->if_addr && ip_dev->if_addr->ifa_addr && mac)
- ether_addr_copy(mac, IF_LLADDR(ip_dev));
+ ifa = if_getifaddr(ip_dev);
+ if (ifa && ifa->ifa_addr && mac)
+ ether_addr_copy(mac, if_getlladdr(ip_dev));
}
return ip_dev;
@@ -1664,7 +1661,7 @@ irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
u16
irdma_get_vlan_ipv4(u32 *addr)
{
- struct ifnet *netdev;
+ if_t netdev;
u16 vlan_id = 0xFFFF;
netdev = ip_ifp_find(&init_net, htonl(addr[0]));
@@ -1676,179 +1673,116 @@ irdma_get_vlan_ipv4(u32 *addr)
return vlan_id;
}
-/**
- * irdma_add_mqh_6 - Adds multiple qhashes for IPv6
- * @iwdev: iWarp device
- * @cm_info: CM info for parent listen node
- * @cm_parent_listen_node: The parent listen node
- *
- * Adds a qhash and a child listen node for every IPv6 address
- * on the adapter and adds the associated qhash filter
- */
static int
-irdma_add_mqh_6(struct irdma_device *iwdev,
- struct irdma_cm_info *cm_info,
- struct irdma_cm_listener *cm_parent_listen_node)
+irdma_manage_qhash_wait(struct irdma_pci_f *rf, struct irdma_cm_info *cm_info)
{
- struct ifnet *ip_dev;
- struct ifaddr *ifp;
- struct irdma_cm_listener *child_listen_node;
- unsigned long flags;
- int ret = 0;
-
- IFNET_RLOCK();
- IRDMA_TAILQ_FOREACH((ip_dev), &V_ifnet, if_link) {
- if (!(ip_dev->if_flags & IFF_UP))
- continue;
+ struct irdma_cqp_request *cqp_request = cm_info->cqp_request;
+ int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms * CQP_TIMEOUT_THRESHOLD;
+ u32 ret_val;
- if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
- (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
- ip_dev != iwdev->netdev)
- continue;
-
- if_addr_rlock(ip_dev);
- IRDMA_TAILQ_FOREACH(ifp, &ip_dev->if_addrhead, ifa_link) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "IP=%pI6, vlan_id=%d, MAC=%pM\n",
- &((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_addr, rdma_vlan_dev_vlan_id(ip_dev),
- IF_LLADDR(ip_dev));
- if (((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_family != AF_INET6)
- continue;
- child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "Allocating child listener %p\n",
- child_listen_node);
- if (!child_listen_node) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "listener memory allocation\n");
- ret = -ENOMEM;
- if_addr_runlock(ip_dev);
- goto exit;
- }
-
- memcpy(child_listen_node, cm_parent_listen_node,
- sizeof(*child_listen_node));
- cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
- child_listen_node->vlan_id = cm_info->vlan_id;
- irdma_copy_ip_ntohl(child_listen_node->loc_addr,
- ((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_addr.__u6_addr.__u6_addr32);
- memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
- sizeof(cm_info->loc_addr));
- if (!iwdev->vsi.dscp_mode)
- cm_info->user_pri =
- irdma_get_egress_vlan_prio(child_listen_node->loc_addr,
- cm_info->user_pri,
- false);
- ret = irdma_manage_qhash(iwdev, cm_info,
- IRDMA_QHASH_TYPE_TCP_SYN,
- IRDMA_QHASH_MANAGE_TYPE_ADD,
- NULL, true);
- if (ret) {
- kfree(child_listen_node);
- continue;
- }
-
- child_listen_node->qhash_set = true;
- spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
- list_add(&child_listen_node->child_listen_list,
- &cm_parent_listen_node->child_listen_list);
- spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
- cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
- }
- if_addr_runlock(ip_dev);
+ if (!cqp_request)
+ return -ENOMEM;
+ do {
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+ mdelay(1);
+ } while (!READ_ONCE(cqp_request->request_done) && --cnt);
+
+ ret_val = cqp_request->compl_info.op_ret_val;
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (cnt) {
+ if (!ret_val)
+ return 0;
+ return -EINVAL;
}
-exit:
- IFNET_RUNLOCK();
- return ret;
+ return -ETIMEDOUT;
}
/**
- * irdma_add_mqh_4 - Adds multiple qhashes for IPv4
- * @iwdev: iWarp device
- * @cm_info: CM info for parent listen node
- * @cm_parent_listen_node: The parent listen node
+ * irdma_add_mqh_ifa_cb - Adds multiple qhashes for IPv4/IPv6
+ * @arg: Calback argument structure from irdma_add_mqh
+ * @ifa: Current address to compute against
+ * @count: Current cumulative output of all callbacks in this iteration
*
- * Adds a qhash and a child listen node for every IPv4 address
+ * Adds a qhash and a child listen node for a single IPv4/IPv6 address
* on the adapter and adds the associated qhash filter
*/
-static int
-irdma_add_mqh_4(struct irdma_device *iwdev,
- struct irdma_cm_info *cm_info,
- struct irdma_cm_listener *cm_parent_listen_node)
-{
- struct ifnet *ip_dev;
+static u_int
+irdma_add_mqh_ifa_cb(void *arg, struct ifaddr *ifa, u_int count){
+ struct irdma_add_mqh_cbs *cbs = arg;
struct irdma_cm_listener *child_listen_node;
+ struct irdma_cm_info *cm_info = cbs->cm_info;
+ struct irdma_device *iwdev = cbs->iwdev;
+ struct irdma_cm_listener *cm_parent_listen_node = cbs->cm_listen_node;
+ if_t ip_dev = ifa->ifa_ifp;
unsigned long flags;
- struct ifaddr *ifa;
- int ret = 0;
-
- IFNET_RLOCK();
- IRDMA_TAILQ_FOREACH((ip_dev), &V_ifnet, if_link) {
- if (!(ip_dev->if_flags & IFF_UP))
- continue;
-
- if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
- (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
- ip_dev != iwdev->netdev)
- continue;
+ int ret;
- if_addr_rlock(ip_dev);
- IRDMA_TAILQ_FOREACH(ifa, &ip_dev->if_addrhead, ifa_link) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
- &ifa->ifa_addr, rdma_vlan_dev_vlan_id(ip_dev),
- IF_LLADDR(ip_dev));
- if (((struct sockaddr_in *)ifa->ifa_addr)->sin_family != AF_INET)
- continue;
- child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
- cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "Allocating child listener %p\n",
- child_listen_node);
- if (!child_listen_node) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "listener memory allocation\n");
- if_addr_runlock(ip_dev);
- ret = -ENOMEM;
- goto exit;
- }
+ if (count)
+ return 0;
- memcpy(child_listen_node, cm_parent_listen_node,
- sizeof(*child_listen_node));
- child_listen_node->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
- cm_info->vlan_id = child_listen_node->vlan_id;
- child_listen_node->loc_addr[0] =
- ntohl(((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr);
- memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
- sizeof(cm_info->loc_addr));
- if (!iwdev->vsi.dscp_mode)
- cm_info->user_pri =
- irdma_get_egress_vlan_prio(child_listen_node->loc_addr,
- cm_info->user_pri,
- true);
- ret = irdma_manage_qhash(iwdev, cm_info,
- IRDMA_QHASH_TYPE_TCP_SYN,
- IRDMA_QHASH_MANAGE_TYPE_ADD,
- NULL, true);
- if (ret) {
- kfree(child_listen_node);
- cm_parent_listen_node->cm_core
- ->stats_listen_nodes_created--;
- continue;
- }
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+ if (!child_listen_node) {
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "listener memory allocation\n");
+ return -ENOMEM;
+ }
- child_listen_node->qhash_set = true;
- spin_lock_irqsave(&iwdev->cm_core.listen_list_lock,
- flags);
- list_add(&child_listen_node->child_listen_list,
- &cm_parent_listen_node->child_listen_list);
- spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
- }
- if_addr_runlock(ip_dev);
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ child_listen_node->vlan_id = cm_info->vlan_id;
+ if (cm_info->ipv4) {
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "Allocating child CM Listener forIP=%x, vlan_id=%d, MAC=%x:%x:%x:%x:%x:%x\n",
+ ((struct sockaddr_in *)&ifa->ifa_addr)->sin_addr.s_addr,
+ rdma_vlan_dev_vlan_id(ip_dev),
+ if_getlladdr(ip_dev)[0], if_getlladdr(ip_dev)[1],
+ if_getlladdr(ip_dev)[2], if_getlladdr(ip_dev)[3],
+ if_getlladdr(ip_dev)[4], if_getlladdr(ip_dev)[5]);
+ child_listen_node->loc_addr[0] =
+ ntohl(((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr);
+ } else {
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "IP=%x:%x:%x:%x, vlan_id=%d, MAC=%x:%x:%x:%x:%x:%x\n",
+ IRDMA_PRINT_IP6(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr),
+ rdma_vlan_dev_vlan_id(ip_dev),
+ if_getlladdr(ip_dev)[0], if_getlladdr(ip_dev)[1],
+ if_getlladdr(ip_dev)[2], if_getlladdr(ip_dev)[3],
+ if_getlladdr(ip_dev)[4], if_getlladdr(ip_dev)[5]);
+ irdma_copy_ip_ntohl(child_listen_node->loc_addr,
+ ((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr.__u6_addr.__u6_addr32);
+ }
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ if (!iwdev->vsi.dscp_mode)
+ cm_info->user_pri =
+ irdma_iw_get_vlan_prio(child_listen_node->loc_addr,
+ cm_info->user_pri,
+ cm_info->ipv4);
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, false);
+ if (ret) {
+ kfree(child_listen_node);
+ return ret;
+ }
+ /* wait for qhash finish */
+ ret = irdma_manage_qhash_wait(iwdev->rf, cm_info);
+ if (ret) {
+ kfree(child_listen_node);
+ return ret;
}
-exit:
- IFNET_RUNLOCK();
- return ret;
+ child_listen_node->qhash_set = true;
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+
+ return 0;
}
/**
@@ -1862,21 +1796,42 @@ irdma_add_mqh(struct irdma_device *iwdev,
struct irdma_cm_info *cm_info,
struct irdma_cm_listener *cm_listen_node)
{
- int err;
+ struct epoch_tracker et;
+ struct irdma_add_mqh_cbs cbs;
+ struct if_iter iter;
+ if_t ifp;
+ int err = -ENOENT;
+
+ cbs.iwdev = iwdev;
+ cbs.cm_info = cm_info;
+ cbs.cm_listen_node = cm_listen_node;
+
VNET_ITERATOR_DECL(vnet_iter);
VNET_LIST_RLOCK();
+ NET_EPOCH_ENTER(et);
VNET_FOREACH(vnet_iter) {
- IFNET_RLOCK();
CURVNET_SET_QUIET(vnet_iter);
+ for (ifp = if_iter_start(&iter); ifp != NULL; ifp = if_iter_next(&iter)) {
+ if (!(if_getflags(ifp) & IFF_UP))
+ continue;
- if (cm_info->ipv4)
- err = irdma_add_mqh_4(iwdev, cm_info, cm_listen_node);
- else
- err = irdma_add_mqh_6(iwdev, cm_info, cm_listen_node);
+ if (((rdma_vlan_dev_vlan_id(ifp) >= VLAN_N_VID) ||
+ (rdma_vlan_dev_real_dev(ifp) != iwdev->netdev)) &&
+ ifp != iwdev->netdev)
+ continue;
+
+ if_addr_rlock(ifp);
+ if (cm_info->ipv4)
+ err = if_foreach_addr_type(ifp, AF_INET, irdma_add_mqh_ifa_cb, &cbs);
+ else
+ err = if_foreach_addr_type(ifp, AF_INET6, irdma_add_mqh_ifa_cb, &cbs);
+ if_addr_runlock(ifp);
+ }
+ if_iter_finish(&iter);
CURVNET_RESTORE();
- IFNET_RUNLOCK();
}
+ NET_EPOCH_EXIT(et);
VNET_LIST_RUNLOCK();
return err;
@@ -1945,8 +1900,8 @@ irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
err = irdma_send_reset(cm_node);
if (err) {
cm_node->state = IRDMA_CM_STATE_CLOSED;
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "send reset failed\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "send reset failed\n");
} else {
old_state = cm_node->state;
cm_node->state = IRDMA_CM_STATE_LISTENER_DESTROYED;
@@ -1985,8 +1940,8 @@ irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
cm_core->stats_listen_destroyed++;
cm_core->stats_listen_nodes_destroyed++;
irdma_debug(&listener->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n",
- listener->loc_port, listener->loc_addr, listener,
+ "loc_port=0x%04x loc_addr=%x cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n",
+ listener->loc_port, listener->loc_addr[0], listener,
listener->cm_id, listener->qhash_set,
listener->vlan_id, apbvt_del);
kfree(listener);
@@ -2109,7 +2064,7 @@ irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
#endif
- ether_addr_copy(ah_info.mac_addr, IF_LLADDR(iwdev->netdev));
+ ether_addr_copy(ah_info.mac_addr, if_getlladdr(iwdev->netdev));
ah_info.hop_ttl = 0x40;
ah_info.tc_tos = cm_node->tos;
@@ -2177,7 +2132,7 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
{
struct irdma_cm_node *cm_node;
int arpindex;
- struct ifnet *netdev = iwdev->netdev;
+ if_t netdev = iwdev->netdev;
/* create an hte and cm_node for this instance */
cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
@@ -2202,13 +2157,12 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
cm_node->tos = max(listener->tos, cm_info->tos);
cm_node->user_pri = rt_tos2priority(cm_node->tos);
cm_node->user_pri =
- irdma_get_egress_vlan_prio(cm_info->loc_addr,
- cm_node->user_pri,
- cm_info->ipv4);
+ irdma_iw_get_vlan_prio(cm_info->loc_addr,
+ cm_node->user_pri,
+ cm_info->ipv4);
}
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DCB,
- "listener: TOS:[%d] UP:[%d]\n",
- cm_node->tos,
+ "listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
cm_node->user_pri);
}
memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
@@ -2226,7 +2180,7 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
cm_node->listener = listener;
cm_node->cm_id = cm_info->cm_id;
- ether_addr_copy(cm_node->loc_mac, IF_LLADDR(netdev));
+ ether_addr_copy(cm_node->loc_mac, if_getlladdr(netdev));
spin_lock_init(&cm_node->retrans_list_lock);
cm_node->ack_rcvd = false;
@@ -2263,8 +2217,8 @@ irdma_destroy_connection(struct irdma_cm_node *cm_node)
/* if the node is destroyed before connection was accelerated */
if (!cm_node->accelerated && cm_node->accept_pend) {
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "node destroyed before established\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "node destroyed before established\n");
atomic_dec(&cm_node->listener->pend_accepts_cnt);
}
if (cm_node->close_entry)
@@ -2388,8 +2342,7 @@ irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
case IRDMA_CM_STATE_OFFLOADED:
default:
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "bad state node state = %d\n",
- cm_node->state);
+ "bad state node state = %d\n", cm_node->state);
break;
}
}
@@ -2404,10 +2357,10 @@ irdma_handle_rst_pkt(struct irdma_cm_node *cm_node,
struct irdma_puda_buf *rbuf)
{
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "caller: %pS cm_node=%p state=%d rem_port=0x%04x loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ "caller: %pS cm_node=%p state=%d rem_port=0x%04x loc_port=0x%04x rem_addr=%x loc_addr=%x\n",
__builtin_return_address(0), cm_node, cm_node->state,
- cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
- cm_node->loc_addr);
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr[0],
+ cm_node->loc_addr[0]);
irdma_cleanup_retrans_entry(cm_node);
switch (cm_node->state) {
@@ -2481,8 +2434,8 @@ irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
switch (cm_node->state) {
case IRDMA_CM_STATE_ESTABLISHED:
if (res_type == IRDMA_MPA_REQUEST_REJECT)
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "state for reject\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "state for reject\n");
cm_node->state = IRDMA_CM_STATE_MPAREQ_RCVD;
type = IRDMA_CM_EVENT_MPA_REQ;
irdma_send_ack(cm_node); /* ACK received MPA request */
@@ -2502,8 +2455,7 @@ irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
break;
default:
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "wrong cm_node state =%d\n",
- cm_node->state);
+ "wrong cm_node state=%d\n", cm_node->state);
break;
}
irdma_create_event(cm_node, type);
@@ -2547,8 +2499,8 @@ irdma_check_seq(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
err = -1;
if (err)
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "seq number err\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "seq number err\n");
return err;
}
@@ -2655,8 +2607,8 @@ irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
irdma_cleanup_retrans_entry(cm_node);
/* active open */
if (irdma_check_syn(cm_node, tcph)) {
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "check syn fail\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "check syn fail\n");
return;
}
cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->th_ack);
@@ -2664,8 +2616,7 @@ irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 0);
if (err) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "cm_node=%p tcp_options failed\n",
- cm_node);
+ "cm_node=%p tcp_options failed\n", cm_node);
break;
}
irdma_cleanup_retrans_entry(cm_node);
@@ -2978,8 +2929,8 @@ irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata,
cm_node->state = IRDMA_CM_STATE_CLOSED;
if (irdma_send_reset(cm_node))
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "send reset failed\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "send reset failed\n");
return ret;
}
@@ -3025,8 +2976,8 @@ irdma_cm_close(struct irdma_cm_node *cm_node)
break;
case IRDMA_CM_STATE_OFFLOADED:
if (cm_node->send_entry)
- irdma_debug(&cm_node->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "CM send_entry in OFFLOADED state\n");
+ irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "CM send_entry in OFFLOADED state\n");
irdma_rem_ref_cm_node(cm_node);
break;
}
@@ -3076,8 +3027,7 @@ irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
VLAN_PRIO_SHIFT;
cm_info.vlan_id = vtag & EVL_VLID_MASK;
irdma_debug(&cm_core->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "vlan_id=%d\n",
- cm_info.vlan_id);
+ "vlan_id=%d\n", cm_info.vlan_id);
} else {
cm_info.vlan_id = 0xFFFF;
}
@@ -3118,8 +3068,8 @@ irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
IRDMA_CM_LISTENER_ACTIVE_STATE);
if (!listener) {
cm_info.cm_id = NULL;
- irdma_debug(&cm_core->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "no listener found\n");
+ irdma_debug(&cm_core->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "no listener found\n");
return;
}
@@ -3127,8 +3077,8 @@ irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info,
listener);
if (!cm_node) {
- irdma_debug(&cm_core->iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "allocate node failed\n");
+ irdma_debug(&cm_core->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "allocate node failed\n");
atomic_dec(&listener->refcnt);
return;
}
@@ -3356,9 +3306,8 @@ irdma_cm_disconn(struct irdma_qp *iwqp)
spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) {
spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "qp_id %d is already freed\n",
- iwqp->ibqp.qp_num);
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "qp_id %d is already freed\n", iwqp->ibqp.qp_num);
kfree(work);
return;
}
@@ -3530,7 +3479,8 @@ irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
if (iwqp->ietf_mem.va) {
if (iwqp->lsmm_mr)
- kc_free_lsmm_dereg_mr(iwdev, iwqp);
+ iwdev->ibdev.dereg_mr(iwqp->lsmm_mr);
+
irdma_free_dma_mem(iwdev->rf->sc_dev.hw,
&iwqp->ietf_mem);
iwqp->ietf_mem.va = NULL;
@@ -3677,9 +3627,9 @@ irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
- cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
- cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+ "rem_port=0x%04x, loc_port=0x%04x rem_addr=%x loc_addr=%x cm_node=%p cm_id=%p qp_id=%d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr[0],
+ cm_node->loc_addr[0], cm_node, cm_id, ibqp->qp_num);
cm_node->cm_core->stats_accepts++;
return 0;
@@ -3792,9 +3742,10 @@ irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)];
} else {
cm_info.user_pri = rt_tos2priority(cm_id->tos);
- cm_info.user_pri = irdma_get_egress_vlan_prio(cm_info.loc_addr,
- cm_info.user_pri,
- cm_info.ipv4);
+ cm_info.user_pri =
+ irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
}
if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
@@ -3803,9 +3754,8 @@ irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
irdma_qp_add_qos(&iwqp->sc_qp);
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp);
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DCB,
- "TOS:[%d] UP:[%d]\n", cm_id->tos,
- cm_info.user_pri);
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DCB, "TOS:[%d] UP:[%d]\n",
+ cm_id->tos, cm_info.user_pri);
ret = irdma_create_cm_node(&iwdev->cm_core, iwdev, conn_param, &cm_info,
&cm_node);
@@ -3843,21 +3793,21 @@ irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
- cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
- cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+ "rem_port=0x%04x, loc_port=0x%04x rem_addr=%x loc_addr=%x cm_node=%p cm_id=%p qp_id = %d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr[0],
+ cm_node->loc_addr[0], cm_node, cm_id, ibqp->qp_num);
return 0;
err:
if (cm_info.ipv4)
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "connect() FAILED: dest addr=%pI4",
- cm_info.rem_addr);
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "connect() FAILED: dest addr=%x",
+ cm_info.rem_addr[0]);
else
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "connect() FAILED: dest addr=%pI6",
- cm_info.rem_addr);
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "connect() FAILED: dest addr=%x:%x:%x:%x",
+ IRDMA_PRINT_IP6(cm_info.rem_addr));
irdma_rem_ref_cm_node(cm_node);
iwdev->cm_core.stats_connect_errs++;
@@ -3927,8 +3877,8 @@ irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_listen_node = irdma_make_listen_node(&iwdev->cm_core, iwdev,
&cm_info);
if (!cm_listen_node) {
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "cm_listen_node == NULL\n");
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "cm_listen_node == NULL\n");
return -ENOMEM;
}
@@ -3949,9 +3899,9 @@ irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
} else {
if (!iwdev->vsi.dscp_mode)
cm_info.user_pri = cm_listen_node->user_pri =
- irdma_get_egress_vlan_prio(cm_info.loc_addr,
- cm_info.user_pri,
- cm_info.ipv4);
+ irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
err = irdma_manage_qhash(iwdev, &cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
@@ -3970,8 +3920,8 @@ irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->add_ref(cm_id);
cm_listen_node->cm_core->stats_listen_created++;
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d\n",
- cm_listen_node->loc_port, cm_listen_node->loc_addr,
+ "loc_port=0x%04x loc_addr=%x cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d\n",
+ cm_listen_node->loc_port, cm_listen_node->loc_addr[0],
cm_listen_node, cm_listen_node->cm_id,
cm_listen_node->qhash_set, cm_listen_node->vlan_id);
@@ -3998,8 +3948,8 @@ irdma_destroy_listen(struct iw_cm_id *cm_id)
irdma_cm_del_listen(&iwdev->cm_core, cm_id->provider_data,
true);
else
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_CM, "cm_id->provider_data was NULL\n");
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
+ "cm_id->provider_data was NULL\n");
cm_id->rem_ref(cm_id);
@@ -4185,8 +4135,7 @@ irdma_cm_event_reset(struct irdma_cm_event *event)
return;
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "reset event %p - cm_id = %p\n",
- event->cm_node, cm_id);
+ "reset event %p - cm_id = %p\n", event->cm_node, cm_id);
iwqp->cm_id = NULL;
irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT,
@@ -4238,8 +4187,7 @@ irdma_cm_event_handler(struct work_struct *work)
break;
default:
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "bad event type = %d\n",
- event->type);
+ "bad event type = %d\n", event->type);
break;
}
@@ -4268,7 +4216,7 @@ irdma_cm_post_event(struct irdma_cm_event *event)
*
* teardown QPs where source or destination addr matches ip addr
*/
-static void
+static void __unused
irdma_cm_teardown_connections(struct irdma_device *iwdev,
u32 *ipaddr,
struct irdma_cm_info *nfo,
diff --git a/sys/dev/irdma/irdma_cm.h b/sys/dev/irdma/irdma_cm.h
index 96c4f4c6e29d..9f71f2450147 100644
--- a/sys/dev/irdma/irdma_cm.h
+++ b/sys/dev/irdma/irdma_cm.h
@@ -287,7 +287,7 @@ struct irdma_cm_listener {
int backlog;
u16 loc_port;
u16 vlan_id;
- u8 loc_mac[ETH_ALEN];
+ u8 loc_mac[ETHER_ADDR_LEN];
u8 user_pri;
u8 tos;
bool qhash_set:1;
@@ -340,8 +340,8 @@ struct irdma_cm_node {
u16 mpav2_ird_ord;
u16 lsmm_size;
u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
- u8 loc_mac[ETH_ALEN];
- u8 rem_mac[ETH_ALEN];
+ u8 loc_mac[ETHER_ADDR_LEN];
+ u8 rem_mac[ETHER_ADDR_LEN];
u8 user_pri;
u8 tos;
bool ack_rcvd:1;
@@ -357,6 +357,7 @@ struct irdma_cm_node {
/* Used by internal CM APIs to pass CM information*/
struct irdma_cm_info {
struct iw_cm_id *cm_id;
+ struct irdma_cqp_request *cqp_request;
u16 loc_port;
u16 rem_port;
u32 loc_addr[4];
@@ -409,6 +410,12 @@ struct irdma_cm_core {
void (*cm_free_ah)(struct irdma_cm_node *cm_node);
};
+struct irdma_add_mqh_cbs {
+ struct irdma_device *iwdev;
+ struct irdma_cm_info *cm_info;
+ struct irdma_cm_listener *cm_listen_node;
+};
+
int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
struct irdma_puda_buf *sqbuf,
enum irdma_timer_type type, int send_retrans,
diff --git a/sys/dev/irdma/irdma_ctrl.c b/sys/dev/irdma/irdma_ctrl.c
index 6bd0520e9bb8..b0f5357a21e5 100644
--- a/sys/dev/irdma/irdma_ctrl.c
+++ b/sys/dev/irdma/irdma_ctrl.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -178,7 +178,8 @@ irdma_qp_rem_qos(struct irdma_sc_qp *qp)
irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
"DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
- qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist);
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
+ qp->on_qoslist);
mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
if (qp->on_qoslist) {
qp->on_qoslist = false;
@@ -198,7 +199,8 @@ irdma_qp_add_qos(struct irdma_sc_qp *qp)
irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
"DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
- qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist);
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
+ qp->on_qoslist);
mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
if (!qp->on_qoslist) {
list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
@@ -237,17 +239,14 @@ irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
u64 scratch, bool post_sq)
{
__le64 *wqe;
- u64 temp, hdr;
+ u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOSPC;
set_64bit_val(wqe, IRDMA_BYTE_8, info->reach_max);
- temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
- LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
- LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
- set_64bit_val(wqe, IRDMA_BYTE_16, temp);
+ set_64bit_val(wqe, IRDMA_BYTE_16, irdma_mac_to_u64(info->mac_addr));
hdr = info->arp_index |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
@@ -368,10 +367,7 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOSPC;
- temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
- LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
- LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
- set_64bit_val(wqe, IRDMA_BYTE_0, temp);
+ set_64bit_val(wqe, IRDMA_BYTE_0, irdma_mac_to_u64(info->mac_addr));
qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
@@ -710,18 +706,10 @@ irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx,
struct irdma_udp_offload_info *udp;
u8 push_mode_en;
u32 push_idx;
- u64 mac;
roce_info = info->roce_info;
udp = info->udp_info;
- mac = LS_64_1(roce_info->mac_addr[5], 16) |
- LS_64_1(roce_info->mac_addr[4], 24) |
- LS_64_1(roce_info->mac_addr[3], 32) |
- LS_64_1(roce_info->mac_addr[2], 40) |
- LS_64_1(roce_info->mac_addr[1], 48) |
- LS_64_1(roce_info->mac_addr[0], 56);
-
qp->user_pri = info->user_pri;
if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
push_mode_en = 0;
@@ -796,7 +784,8 @@ irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx,
FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
set_64bit_val(qp_ctx, IRDMA_BYTE_144,
FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
- set_64bit_val(qp_ctx, IRDMA_BYTE_152, mac);
+ set_64bit_val(qp_ctx, IRDMA_BYTE_152,
+ FIELD_PREP(IRDMAQPC_MACADDRESS, irdma_mac_to_u64(roce_info->mac_addr)));
set_64bit_val(qp_ctx, IRDMA_BYTE_160,
FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
@@ -877,16 +866,13 @@ irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
u64 scratch, bool post_sq)
{
__le64 *wqe;
- u64 temp, header;
+ u64 header;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOSPC;
- temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
- LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
- LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
- set_64bit_val(wqe, IRDMA_BYTE_32, temp);
+ set_64bit_val(wqe, IRDMA_BYTE_32, irdma_mac_to_u64(info->mac_addr));
header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE,
@@ -1018,14 +1004,9 @@ irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx,
FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
- mac = LS_64_1(iw->mac_addr[5], 16) |
- LS_64_1(iw->mac_addr[4], 24) |
- LS_64_1(iw->mac_addr[3], 32) |
- LS_64_1(iw->mac_addr[2], 40) |
- LS_64_1(iw->mac_addr[1], 48) |
- LS_64_1(iw->mac_addr[0], 56);
- }
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ mac = FIELD_PREP(IRDMAQPC_MACADDRESS,
+ irdma_mac_to_u64(iw->mac_addr));
set_64bit_val(qp_ctx, IRDMA_BYTE_152,
mac | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
@@ -1410,8 +1391,9 @@ irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
qp->qp_uk.sq_wrtrk_array[wqe_idx].signaled = info->signaled;
irdma_debug(qp->dev, IRDMA_DEBUG_MR,
- "wr_id[%llxh] wqe_idx[%04d] location[%p]\n", (unsigned long long)info->wr_id,
- wqe_idx, &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
+ "wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
+ (unsigned long long)info->wr_id, wqe_idx,
+ &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
(uintptr_t)info->va : info->fbo;
@@ -1533,45 +1515,6 @@ irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
}
/**
- * irdma_sc_send_lsmm_nostag - for privilege qp
- * @qp: sc qp struct
- * @lsmm_buf: buffer with lsmm message
- * @size: size of lsmm buffer
- */
-void
-irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
-{
- __le64 *wqe;
- u64 hdr;
- struct irdma_qp_uk *qp_uk;
-
- qp_uk = &qp->qp_uk;
- wqe = qp_uk->sq_base->elem;
-
- set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf);
-
- if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
- set_64bit_val(wqe, IRDMA_BYTE_8,
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size));
- else
- set_64bit_val(wqe, IRDMA_BYTE_8,
- FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
- set_64bit_val(wqe, IRDMA_BYTE_16, 0);
-
- hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
- FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
- FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
- irdma_wmb(); /* make sure WQE is written before valid bit is set */
-
- set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
-
- irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", wqe,
- IRDMA_QP_WQE_MIN_SIZE);
-}
-
-/**
* irdma_sc_send_rtt - send last read0 or write0
* @qp: sc qp struct
* @read: Do read0 or write0
@@ -2238,16 +2181,15 @@ irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
struct irdma_up_info *info, u64 scratch)
{
__le64 *wqe;
- u64 temp;
+ u64 temp = 0;
+ int i;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOSPC;
- temp = info->map[0] | LS_64_1(info->map[1], 8) |
- LS_64_1(info->map[2], 16) | LS_64_1(info->map[3], 24) |
- LS_64_1(info->map[4], 32) | LS_64_1(info->map[5], 40) |
- LS_64_1(info->map[6], 48) | LS_64_1(info->map[7], 56);
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ temp |= (u64)info->map[i] << (i * 8);
set_64bit_val(wqe, IRDMA_BYTE_0, temp);
set_64bit_val(wqe, IRDMA_BYTE_40,
@@ -2340,7 +2282,8 @@ irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
if (!flush_sq && !flush_rq) {
irdma_debug(qp->dev, IRDMA_DEBUG_CQP,
- "Additional flush request ignored for qp %x\n", qp->qp_uk.qp_id);
+ "Additional flush request ignored for qp %x\n",
+ qp->qp_uk.qp_id);
return -EALREADY;
}
@@ -2648,11 +2591,11 @@ irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
return -EINVAL;
ceq = cq->dev->ceq[cq->ceq_id];
- if (ceq && ceq->reg_cq)
+ if (ceq && ceq->reg_cq) {
ret_code = irdma_sc_add_cq_ctx(ceq, cq);
-
- if (ret_code)
- return ret_code;
+ if (ret_code)
+ return ret_code;
+ }
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe) {
@@ -2836,11 +2779,12 @@ void
irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout,
struct irdma_sc_dev *dev)
{
- if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
- timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
+ u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
+
+ if (timeout->compl_cqp_cmds != completed_ops) {
+ timeout->compl_cqp_cmds = completed_ops;
timeout->count = 0;
- } else if (timeout->compl_cqp_cmds !=
- dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]) {
+ } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
timeout->count++;
}
}
@@ -2885,7 +2829,7 @@ irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
if (newtail != tail) {
/* SUCCESS */
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
- cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
+ atomic64_inc(&cqp->completed_ops);
return 0;
}
irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
@@ -3251,8 +3195,8 @@ irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
info->dev->cqp = cqp;
IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
- cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
- cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
+ cqp->requested_ops = 0;
+ atomic64_set(&cqp->completed_ops, 0);
/* for the cqp commands backlog. */
INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
@@ -3262,8 +3206,8 @@ irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
"sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04x]\n",
- cqp->sq_size, cqp->hw_sq_size, cqp->sq_base, (unsigned long long)cqp->sq_pa, cqp,
- cqp->polarity);
+ cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
+ (unsigned long long)cqp->sq_pa, cqp, cqp->polarity);
return 0;
}
@@ -3408,7 +3352,7 @@ irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
if (ret_code)
return NULL;
- cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
+ cqp->requested_ops++;
if (!*wqe_idx)
cqp->polarity = !cqp->polarity;
wqe = cqp->sq_base[*wqe_idx].elem;
@@ -3503,6 +3447,9 @@ irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
if (polarity != ccq->cq_uk.polarity)
return -ENOENT;
+ /* Ensure CEQE contents are read after valid bit is checked */
+ rmb();
+
get_64bit_val(cqe, IRDMA_BYTE_8, &qp_ctx);
cqp = (struct irdma_sc_cqp *)(irdma_uintptr) qp_ctx;
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
@@ -3538,7 +3485,7 @@ irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
irdma_wmb(); /* make sure shadow area is updated before moving tail */
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
- ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
+ atomic64_inc(&cqp->completed_ops);
return ret_code;
}
@@ -3862,7 +3809,6 @@ irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
if (ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
-
cqp = ceq->dev->cqp;
cqp->process_cqp_sds = irdma_update_sds_noccq;
@@ -3887,7 +3833,6 @@ irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
if (ret_code)
return ret_code;
}
-
ret_code = irdma_sc_ceq_create(ceq, scratch, true);
if (!ret_code)
return irdma_sc_cceq_create_done(ceq);
@@ -3977,7 +3922,6 @@ irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
cq_idx = irdma_sc_find_reg_cq(ceq, cq);
spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
}
-
IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
ceq->polarity ^= 1;
@@ -4158,13 +4102,17 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
u8 polarity;
aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
- get_64bit_val(aeqe, IRDMA_BYTE_0, &compl_ctx);
get_64bit_val(aeqe, IRDMA_BYTE_8, &temp);
polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
if (aeq->polarity != polarity)
return -ENOENT;
+ /* Ensure AEQE contents are read after valid bit is checked */
+ rmb();
+
+ get_64bit_val(aeqe, IRDMA_BYTE_0, &compl_ctx);
+
irdma_debug_buf(aeq->dev, IRDMA_DEBUG_WQE, "AEQ_ENTRY WQE", aeqe, 16);
ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
@@ -4751,16 +4699,18 @@ irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
*/
static u32 irdma_est_sd(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info){
- int i;
+ struct irdma_hmc_obj_info *pble_info;
u64 size = 0;
u64 sd;
+ int i;
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
if (i != IRDMA_HMC_IW_PBLE)
size += round_up(hmc_info->hmc_obj[i].cnt *
hmc_info->hmc_obj[i].size, 512);
- size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
- hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
+
+ pble_info = &hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE];
+ size += round_up(pble_info->cnt * pble_info->size, 512);
if (size & 0x1FFFFF)
sd = (size >> 21) + 1; /* add 1 for remainder */
else
@@ -4845,8 +4795,7 @@ irdma_get_rdma_features(struct irdma_sc_dev *dev)
goto exit;
} else if (feat_cnt > IRDMA_MAX_FEATURES) {
irdma_debug(dev, IRDMA_DEBUG_DEV,
- "feature buf size insufficient,"
- "retrying with larger buffer\n");
+ "feature buf size insufficient, retrying with larger buffer\n");
irdma_free_dma_mem(dev->hw, &feat_buf);
feat_buf.size = 8 * feat_cnt;
feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf,
@@ -5061,9 +5010,9 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
continue;
- } else if (pblewanted > (100 * FPM_MULTIPLIER)) {
+ } else if (pblewanted > 100 * FPM_MULTIPLIER) {
pblewanted -= 10 * FPM_MULTIPLIER;
- } else if (pblewanted > FPM_MULTIPLIER) {
+ } else if (pblewanted > 16 * FPM_MULTIPLIER) {
pblewanted -= FPM_MULTIPLIER;
} else if (qpwanted <= 128) {
if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
@@ -5457,6 +5406,7 @@ void
irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
{
u32 reg_val;
+
reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, IRDMA_IDX_NOITR);
diff --git a/sys/dev/irdma/irdma_defs.h b/sys/dev/irdma/irdma_defs.h
index 37e664ba69b3..dcd6a0b5956b 100644
--- a/sys/dev/irdma/irdma_defs.h
+++ b/sys/dev/irdma/irdma_defs.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -249,40 +249,38 @@ enum irdma_cqp_op_type {
IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE = 22,
IRDMA_OP_SUSPEND = 23,
IRDMA_OP_RESUME = 24,
- IRDMA_OP_MANAGE_VF_PBLE_BP = 25,
+ IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP = 25,
IRDMA_OP_QUERY_FPM_VAL = 26,
IRDMA_OP_COMMIT_FPM_VAL = 27,
- IRDMA_OP_REQ_CMDS = 28,
- IRDMA_OP_CMPL_CMDS = 29,
- IRDMA_OP_AH_CREATE = 30,
- IRDMA_OP_AH_MODIFY = 31,
- IRDMA_OP_AH_DESTROY = 32,
- IRDMA_OP_MC_CREATE = 33,
- IRDMA_OP_MC_DESTROY = 34,
- IRDMA_OP_MC_MODIFY = 35,
- IRDMA_OP_STATS_ALLOCATE = 36,
- IRDMA_OP_STATS_FREE = 37,
- IRDMA_OP_STATS_GATHER = 38,
- IRDMA_OP_WS_ADD_NODE = 39,
- IRDMA_OP_WS_MODIFY_NODE = 40,
- IRDMA_OP_WS_DELETE_NODE = 41,
- IRDMA_OP_WS_FAILOVER_START = 42,
- IRDMA_OP_WS_FAILOVER_COMPLETE = 43,
- IRDMA_OP_SET_UP_MAP = 44,
- IRDMA_OP_GEN_AE = 45,
- IRDMA_OP_QUERY_RDMA_FEATURES = 46,
- IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 47,
- IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 48,
- IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 49,
- IRDMA_OP_CQ_MODIFY = 50,
+ IRDMA_OP_AH_CREATE = 28,
+ IRDMA_OP_AH_MODIFY = 29,
+ IRDMA_OP_AH_DESTROY = 30,
+ IRDMA_OP_MC_CREATE = 31,
+ IRDMA_OP_MC_DESTROY = 32,
+ IRDMA_OP_MC_MODIFY = 33,
+ IRDMA_OP_STATS_ALLOCATE = 34,
+ IRDMA_OP_STATS_FREE = 35,
+ IRDMA_OP_STATS_GATHER = 36,
+ IRDMA_OP_WS_ADD_NODE = 37,
+ IRDMA_OP_WS_MODIFY_NODE = 38,
+ IRDMA_OP_WS_DELETE_NODE = 39,
+ IRDMA_OP_WS_FAILOVER_START = 40,
+ IRDMA_OP_WS_FAILOVER_COMPLETE = 41,
+ IRDMA_OP_SET_UP_MAP = 42,
+ IRDMA_OP_GEN_AE = 43,
+ IRDMA_OP_QUERY_RDMA_FEATURES = 44,
+ IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 45,
+ IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
+ IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
+ IRDMA_OP_CQ_MODIFY = 48,
/* Must be last entry */
- IRDMA_MAX_CQP_OPS = 51,
+ IRDMA_MAX_CQP_OPS = 49,
};
/* CQP SQ WQES */
-#define IRDMA_CQP_OP_CREATE_QP 0
-#define IRDMA_CQP_OP_MODIFY_QP 0x1
+#define IRDMA_CQP_OP_CREATE_QP 0x00
+#define IRDMA_CQP_OP_MODIFY_QP 0x01
#define IRDMA_CQP_OP_DESTROY_QP 0x02
#define IRDMA_CQP_OP_CREATE_CQ 0x03
#define IRDMA_CQP_OP_MODIFY_CQ 0x04
@@ -294,12 +292,11 @@ enum irdma_cqp_op_type {
#define IRDMA_CQP_OP_DEALLOC_STAG 0x0d
#define IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE 0x0e
#define IRDMA_CQP_OP_MANAGE_ARP 0x0f
-#define IRDMA_CQP_OP_MANAGE_VF_PBLE_BP 0x10
+#define IRDMA_CQP_OP_MANAGE_VCHNL_REQ_PBLE_BP 0x10
#define IRDMA_CQP_OP_MANAGE_PUSH_PAGES 0x11
#define IRDMA_CQP_OP_QUERY_RDMA_FEATURES 0x12
#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
#define IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY 0x14
-#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
#define IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15
#define IRDMA_CQP_OP_CREATE_CEQ 0x16
#define IRDMA_CQP_OP_DESTROY_CEQ 0x18
@@ -699,7 +696,6 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_QP_MACVALID BIT_ULL(51)
#define IRDMA_CQPSQ_QP_MSSCHANGE_S 52
#define IRDMA_CQPSQ_QP_MSSCHANGE BIT_ULL(52)
-
#define IRDMA_CQPSQ_QP_IGNOREMWBOUND_S 54
#define IRDMA_CQPSQ_QP_IGNOREMWBOUND BIT_ULL(54)
#define IRDMA_CQPSQ_QP_REMOVEHASHENTRY_S 55
@@ -1402,7 +1398,7 @@ enum irdma_cqp_op_type {
#define IRDMA_GET_CQ_ELEM_AT_OFFSET(_cq, _i, _cqe) \
{ \
- register __u32 offset; \
+ __u32 offset; \
offset = IRDMA_GET_RING_OFFSET((_cq)->cq_ring, _i); \
(_cqe) = (_cq)->cq_base[offset].buf; \
}
@@ -1428,7 +1424,7 @@ enum irdma_cqp_op_type {
#define IRDMA_RING_MOVE_HEAD(_ring, _retcode) \
{ \
- register u32 size; \
+ u32 size; \
size = (_ring).size; \
if (!IRDMA_RING_FULL_ERR(_ring)) { \
(_ring).head = ((_ring).head + 1) % size; \
@@ -1439,7 +1435,7 @@ enum irdma_cqp_op_type {
}
#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
{ \
- register u32 size; \
+ u32 size; \
size = (_ring).size; \
if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < size) { \
(_ring).head = ((_ring).head + (_count)) % size; \
@@ -1450,7 +1446,7 @@ enum irdma_cqp_op_type {
}
#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
{ \
- register u32 size; \
+ u32 size; \
size = (_ring).size; \
if (!IRDMA_SQ_RING_FULL_ERR(_ring)) { \
(_ring).head = ((_ring).head + 1) % size; \
@@ -1461,7 +1457,7 @@ enum irdma_cqp_op_type {
}
#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
{ \
- register u32 size; \
+ u32 size; \
size = (_ring).size; \
if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < (size - 256)) { \
(_ring).head = ((_ring).head + (_count)) % size; \
@@ -1553,6 +1549,19 @@ enum irdma_qp_wqe_size {
IRDMA_WQE_SIZE_256 = 256,
};
+enum irdma_ws_op_type {
+ IRDMA_WS_OP_TYPE_NODE = 0,
+ IRDMA_WS_OP_TYPE_LEAF_NODE_GROUP,
+};
+
+enum irdma_ws_rate_limit_flags {
+ IRDMA_WS_RATE_LIMIT_FLAGS_VALID = 0x1,
+ IRDMA_WS_NO_RDMA_RATE_LIMIT = 0x2,
+ IRDMA_WS_LEAF_NODE_IS_PART_GROUP = 0x4,
+ IRDMA_WS_TREE_RATE_LIMITING = 0x8,
+ IRDMA_WS_PACING_CONTROL = 0x10,
+};
+
enum irdma_ws_node_op {
IRDMA_ADD_NODE = 0,
IRDMA_MODIFY_NODE,
@@ -1579,12 +1588,6 @@ enum irdma_alignment {
IRDMA_FEATURE_BUF_ALIGNMENT = 0x10,
};
-enum icrdma_protocol_used {
- ICRDMA_ANY_PROTOCOL = 0,
- ICRDMA_IWARP_PROTOCOL_ONLY = 1,
- ICRDMA_ROCE_PROTOCOL_ONLY = 2,
-};
-
/**
* set_64bit_val - set 64 bit value to hw wqe
* @wqe_words: wqe addr to write
diff --git a/sys/dev/irdma/irdma_hmc.c b/sys/dev/irdma/irdma_hmc.c
index 1da0c9da9746..a3c47c8b1434 100644
--- a/sys/dev/irdma/irdma_hmc.c
+++ b/sys/dev/irdma/irdma_hmc.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -421,7 +421,7 @@ irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
irdma_debug(dev, IRDMA_DEBUG_HMC,
- "error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
+ "error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
info->start_idx, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return -EINVAL;
@@ -430,7 +430,7 @@ irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
irdma_debug(dev, IRDMA_DEBUG_HMC,
- "error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
+ "error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
info->start_idx, info->count, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return -EINVAL;
diff --git a/sys/dev/irdma/irdma_hmc.h b/sys/dev/irdma/irdma_hmc.h
index 7babecfcaf59..f0b0eff5d127 100644
--- a/sys/dev/irdma/irdma_hmc.h
+++ b/sys/dev/irdma/irdma_hmc.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -184,10 +184,6 @@ int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u16 hmc_fn_id, u64 pa, u32 sd_idx
bool setsd);
int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
-struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
- u16 hmc_fn_id);
-struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
- u16 hmc_fn_id);
int irdma_add_sd_table_entry(struct irdma_hw *hw,
struct irdma_hmc_info *hmc_info, u32 sd_index,
enum irdma_sd_entry_type type, u64 direct_mode_sz);
diff --git a/sys/dev/irdma/irdma_hw.c b/sys/dev/irdma/irdma_hw.c
index e00578b16dc3..e80ffc1d2fd7 100644
--- a/sys/dev/irdma/irdma_hw.c
+++ b/sys/dev/irdma/irdma_hw.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -123,8 +123,7 @@ irdma_puda_ce_handler(struct irdma_pci_f *rf,
}
if (compl_error) {
irdma_debug(dev, IRDMA_DEBUG_ERR,
- "puda compl_err =0x%x\n",
- compl_error);
+ "puda compl_err = 0x%x\n", compl_error);
break;
}
} while (1);
@@ -193,12 +192,11 @@ static void
irdma_complete_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request)
{
- if (cqp_request->waiting) {
- cqp_request->request_done = true;
+ WRITE_ONCE(cqp_request->request_done, true);
+ if (cqp_request->waiting)
wake_up(&cqp_request->waitq);
- } else if (cqp_request->callback_fcn) {
+ else if (cqp_request->callback_fcn)
cqp_request->callback_fcn(cqp_request);
- }
irdma_put_cqp_request(cqp, cqp_request);
}
@@ -235,8 +233,9 @@ irdma_process_aeq(struct irdma_pci_f *rf)
aeqcnt++;
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
- "ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
- info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
+ "ae_id = 0x%x (%s), is_qp = %d, qp_id = %d, tcp_state = %d, iwarp_state = %d, ae_src = %d\n",
+ info->ae_id, irdma_get_ae_desc(info->ae_id),
+ info->qp, info->qp_cq_id, info->tcp_state,
info->iwarp_state, info->ae_src);
if (info->qp) {
@@ -277,6 +276,7 @@ irdma_process_aeq(struct irdma_pci_f *rf)
switch (info->ae_id) {
struct irdma_cm_node *cm_node;
+
case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
cm_node = iwqp->cm_node;
if (cm_node->accept_pend) {
@@ -322,7 +322,11 @@ irdma_process_aeq(struct irdma_pci_f *rf)
break;
case IRDMA_AE_QP_SUSPEND_COMPLETE:
if (iwqp->iwdev->vsi.tc_change_pending) {
- atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
+ if (!atomic_dec_return(&iwqp->sc_qp.vsi->qp_suspend_reqs))
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ if (iwqp->suspend_pending) {
+ iwqp->suspend_pending = false;
wake_up(&iwqp->iwdev->suspend_wq);
}
break;
@@ -392,8 +396,10 @@ irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
- irdma_dev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d ae_source=%d\n",
- info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
+ irdma_dev_err(&iwdev->ibdev,
+ "AEQ: abnormal ae_id = 0x%x (%s), is_qp = %d, qp_id = %d, ae_source = %d\n",
+ info->ae_id, irdma_get_ae_desc(info->ae_id),
+ info->qp, info->qp_cq_id, info->ae_src);
if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
ctx_info->roce_info->err_rq_idx_valid = info->err_rq_idx_valid;
if (info->rq) {
@@ -697,8 +703,7 @@ irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
if (status)
irdma_debug(dev, IRDMA_DEBUG_ERR,
- "CEQ destroy completion failed %d\n",
- status);
+ "CEQ destroy completion failed %d\n", status);
exit:
spin_lock_destroy(&iwceq->ce_lock);
spin_lock_destroy(&iwceq->sc_ceq.req_cq_lock);
@@ -810,8 +815,7 @@ irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
info.privileged = privileged;
if (irdma_sc_del_hmc_obj(dev, &info, reset))
irdma_debug(dev, IRDMA_DEBUG_ERR,
- "del HMC obj of type %d failed\n",
- obj_type);
+ "del HMC obj of type %d failed\n", obj_type);
}
/**
@@ -1025,7 +1029,7 @@ irdma_create_cqp(struct irdma_pci_f *rf)
irdma_debug(dev, IRDMA_DEBUG_ERR,
"cqp create failed - status %d maj_err %d min_err %d\n",
status, maj_err, min_err);
- goto err_create;
+ goto err_ctx;
}
INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
@@ -1039,7 +1043,6 @@ irdma_create_cqp(struct irdma_pci_f *rf)
init_waitqueue_head(&cqp->remove_wq);
return 0;
-err_create:
err_ctx:
irdma_free_dma_mem(dev->hw, &cqp->sq);
err_sq:
@@ -1121,7 +1124,7 @@ irdma_alloc_set_mac(struct irdma_device *iwdev)
&iwdev->mac_ip_table_idx);
if (!status) {
status = irdma_add_local_mac_entry(iwdev->rf,
- (const u8 *)IF_LLADDR(iwdev->netdev),
+ (const u8 *)if_getlladdr(iwdev->netdev),
(u8)iwdev->mac_ip_table_idx);
if (status)
irdma_del_local_mac_entry(iwdev->rf,
@@ -1154,16 +1157,14 @@ irdma_irq_request(struct irdma_pci_f *rf,
msix_vec->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
if (!msix_vec->res) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
- "Unable to allocate bus resource int[%d]\n",
- rid);
+ "Unable to allocate bus resource int[%d]\n", rid);
return -EINVAL;
}
err = bus_setup_intr(dev, msix_vec->res, INTR_TYPE_NET | INTR_MPSAFE,
NULL, handler, argument, &msix_vec->tag);
if (err) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
- "Unable to register handler with %x status\n",
- err);
+ "Unable to register handler with %x status\n", err);
status = -EINVAL;
goto fail_intr;
}
@@ -1229,20 +1230,21 @@ static int
irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
{
struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
- u32 ret = 0;
+ int status = 0;
if (!rf->msix_shared) {
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
"irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
- ret = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf);
- if (ret)
- return ret;
+ status = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf);
+ if (status)
+ return status;
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
}
- if (ret) {
+
+ if (status) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, "aeq irq config fail\n");
- return -EINVAL;
+ return status;
}
rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
@@ -1337,8 +1339,7 @@ irdma_setup_ceq_0(struct irdma_pci_f *rf)
status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
if (status) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
- "create ceq status = %d\n",
- status);
+ "create ceq status = %d\n", status);
goto exit;
}
@@ -1393,8 +1394,7 @@ irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
if (status) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
- "create ceq status = %d\n",
- status);
+ "create ceq status = %d\n", status);
goto del_ceqs;
}
spin_lock_init(&iwceq->ce_lock);
@@ -1633,8 +1633,8 @@ irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
static int
irdma_hmc_setup(struct irdma_pci_f *rf)
{
- int status;
struct irdma_sc_dev *dev = &rf->sc_dev;
+ int status;
u32 qpcnt;
qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
@@ -1751,6 +1751,7 @@ void
irdma_rt_deinit_hw(struct irdma_device *iwdev)
{
struct irdma_sc_qp qp = {{0}};
+
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_INIT, "state = %d\n", iwdev->init_state);
switch (iwdev->init_state) {
@@ -2012,6 +2013,7 @@ irdma_ctrl_init_hw(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
int status;
+
do {
status = irdma_setup_init_state(rf);
if (status)
@@ -2428,8 +2430,9 @@ irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
cqp_info->post_sq = 1;
cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "%s: port=0x%04x\n",
- (!add_port) ? "DELETE" : "ADD", accel_local_port);
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV,
+ "%s: port=0x%04x\n", (!add_port) ? "DELETE" : "ADD",
+ accel_local_port);
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
@@ -2590,6 +2593,9 @@ irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
if (!cqp_request)
return -ENOMEM;
+ cminfo->cqp_request = cqp_request;
+ if (!wait)
+ atomic_inc(&cqp_request->refcnt);
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_qhash_table_entry.info;
memset(info, 0, sizeof(*info));
@@ -2604,7 +2610,7 @@ irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
}
info->ipv4_valid = cminfo->ipv4;
info->user_pri = cminfo->user_pri;
- ether_addr_copy(info->mac_addr, IF_LLADDR(iwdev->netdev));
+ ether_addr_copy(info->mac_addr, if_getlladdr(iwdev->netdev));
info->qp_num = cminfo->qh_qpid;
info->dest_port = cminfo->loc_port;
info->dest_ip[0] = cminfo->loc_addr[0];
@@ -2630,19 +2636,24 @@ irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
}
if (info->ipv4_valid)
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
+ "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%x rem_addr=%x mac=%x:%x:%x:%x:%x:%x, vlan_id=%d cm_node=%p\n",
(!mtype) ? "DELETE" : "ADD",
- __builtin_return_address(0), info->dest_port,
- info->src_port, info->dest_ip, info->src_ip,
- info->mac_addr, cminfo->vlan_id,
- cmnode ? cmnode : NULL);
+ __builtin_return_address(0), info->src_port,
+ info->dest_port, info->src_ip[0], info->dest_ip[0],
+ info->mac_addr[0], info->mac_addr[1],
+ info->mac_addr[2], info->mac_addr[3],
+ info->mac_addr[4], info->mac_addr[5],
+ cminfo->vlan_id, cmnode ? cmnode : NULL);
else
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
- "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
+ "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%x:%x:%x:%x rem_addr=%x:%x:%x:%x mac=%x:%x:%x:%x:%x:%x, vlan_id=%d cm_node=%p\n",
(!mtype) ? "DELETE" : "ADD",
- __builtin_return_address(0), info->dest_port,
- info->src_port, info->dest_ip, info->src_ip,
- info->mac_addr, cminfo->vlan_id,
+ __builtin_return_address(0), info->src_port,
+ info->dest_port, IRDMA_PRINT_IP6(info->src_ip),
+ IRDMA_PRINT_IP6(info->dest_ip), info->mac_addr[0],
+ info->mac_addr[1], info->mac_addr[2],
+ info->mac_addr[3], info->mac_addr[4],
+ info->mac_addr[5], cminfo->vlan_id,
cmnode ? cmnode : NULL);
cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
@@ -2716,7 +2727,8 @@ irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
"qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
- cqp_request->compl_info.maj_err_code, cqp_request->compl_info.min_err_code);
+ cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
put_cqp:
irdma_put_cqp_request(&rf->cqp, cqp_request);
diff --git a/sys/dev/irdma/irdma_kcompat.c b/sys/dev/irdma/irdma_kcompat.c
index e6835285fefb..1ddfd6791fc0 100644
--- a/sys/dev/irdma/irdma_kcompat.c
+++ b/sys/dev/irdma/irdma_kcompat.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2018 - 2022 Intel Corporation
+ * Copyright (c) 2018 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -405,36 +405,38 @@ irdma_find_qp_update_qs(struct irdma_pci_f *rf,
static void
irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
const struct ib_gid_attr *sgid_attr,
- struct sockaddr *sgid_addr, struct sockaddr *dgid_addr,
+ union irdma_sockaddr *sgid_addr,
+ union irdma_sockaddr *dgid_addr,
u8 *dmac, u8 net_type)
{
if (net_type == RDMA_NETWORK_IPV4) {
ah_info->ipv4_valid = true;
ah_info->dest_ip_addr[0] =
- ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr);
+ ntohl(dgid_addr->saddr_in.sin_addr.s_addr);
ah_info->src_ip_addr[0] =
- ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr);
+ ntohl(sgid_addr->saddr_in.sin_addr.s_addr);
CURVNET_SET_QUIET(vnet);
ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
ah_info->dest_ip_addr[0]);
CURVNET_RESTORE();
- if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) {
+ if (ipv4_is_multicast(dgid_addr->saddr_in.sin_addr.s_addr)) {
irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
}
} else {
irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
- ((struct sockaddr_in6 *)dgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
+ dgid_addr->saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
irdma_copy_ip_ntohl(ah_info->src_ip_addr,
- ((struct sockaddr_in6 *)sgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
+ sgid_addr->saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
ah_info->dest_ip_addr);
- if (rdma_is_multicast_addr(&((struct sockaddr_in6 *)dgid_addr)->sin6_addr)) {
+ if (rdma_is_multicast_addr(&dgid_addr->saddr_in6.sin6_addr)) {
irdma_mcast_mac_v6(ah_info->dest_ip_addr, dmac);
}
}
}
-static inline u8 irdma_get_vlan_ndev_prio(struct ifnet *ndev, u8 prio){
+static inline u8 irdma_roce_get_vlan_prio(if_t ndev, u8 prio)
+{
return prio;
}
@@ -461,10 +463,9 @@ irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
ah_info->vlan_tag = 0;
if (ah_info->vlan_tag < VLAN_N_VID) {
- struct ifnet *ndev = sgid_attr->ndev;
-
ah_info->insert_vlan_tag = true;
- vlan_prio = (u16)irdma_get_vlan_ndev_prio(ndev, rt_tos2priority(ah_info->tc_tos));
+ vlan_prio = (u16)irdma_roce_get_vlan_prio(sgid_attr->ndev,
+ rt_tos2priority(ah_info->tc_tos));
ah_info->vlan_tag |= vlan_prio << VLAN_PRIO_SHIFT;
irdma_find_qp_update_qs(iwdev->rf, pd, vlan_prio);
}
@@ -539,14 +540,10 @@ irdma_create_ah(struct ib_pd *ibpd,
struct irdma_sc_ah *sc_ah;
u32 ah_id = 0;
struct irdma_ah_info *ah_info;
- struct irdma_create_ah_resp uresp;
- union {
- struct sockaddr saddr;
- struct sockaddr_in saddr_in;
- struct sockaddr_in6 saddr_in6;
- } sgid_addr, dgid_addr;
+ struct irdma_create_ah_resp uresp = {};
+ union irdma_sockaddr sgid_addr, dgid_addr;
int err;
- u8 dmac[ETH_ALEN];
+ u8 dmac[ETHER_ADDR_LEN];
bool sleep = udata ? true : false;
if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
@@ -585,20 +582,16 @@ irdma_create_ah(struct ib_pd *ibpd,
rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
ah->av.attrs = *attr;
- ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr,
- sgid_attr.gid_type,
- &sgid);
+ ah->av.net_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
if (sgid_attr.ndev)
dev_put(sgid_attr.ndev);
- ah->av.sgid_addr.saddr = sgid_addr.saddr;
- ah->av.dgid_addr.saddr = dgid_addr.saddr;
ah_info = &sc_ah->ah_info;
ah_info->ah_idx = ah_id;
ah_info->pd_idx = pd->sc_pd.pd_id;
- ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev));
+ ether_addr_copy(ah_info->mac_addr, if_getlladdr(iwdev->netdev));
if (attr->ah_flags & IB_AH_GRH) {
ah_info->flow_label = attr->grh.flow_label;
ah_info->hop_ttl = attr->grh.hop_limit;
@@ -609,7 +602,7 @@ irdma_create_ah(struct ib_pd *ibpd,
ib_resolve_eth_dmac(ibpd->device, attr);
irdma_ether_copy(dmac, attr);
- irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
+ irdma_fill_ah_info(if_getvnet(iwdev->netdev), ah_info, &sgid_attr, &sgid_addr, &dgid_addr,
dmac, ah->av.net_type);
err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac);
@@ -631,7 +624,7 @@ irdma_create_ah(struct ib_pd *ibpd,
if (udata) {
uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
- err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
if (err) {
irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
IRDMA_OP_AH_DESTROY, false, NULL, ah);
@@ -779,6 +772,8 @@ irdma_create_qp(struct ib_pd *ibpd,
init_waitqueue_head(&iwqp->waitq);
init_waitqueue_head(&iwqp->mod_qp_waitq);
+ spin_lock_init(&iwqp->dwork_flush_lock);
+
if (udata) {
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr);
@@ -902,15 +897,9 @@ irdma_destroy_qp(struct ib_qp *ibqp)
list_del(&iwqp->ud_list_elem);
spin_unlock_irqrestore(&iwqp->iwpd->udqp_list_lock, flags);
- if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
+ if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE)
irdma_modify_qp_to_err(&iwqp->sc_qp);
- irdma_qp_rem_ref(&iwqp->ibqp);
- wait_for_completion(&iwqp->free_qp);
- irdma_free_lsmm_rsrc(iwqp);
- if (!iwdev->rf->reset && irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
- return (iwdev->rf->rdma_ver <= IRDMA_GEN_2 && !iwqp->user_mode) ? 0 : -ENOTRECOVERABLE;
-free_rsrc:
if (!iwqp->user_mode) {
if (iwqp->iwscq) {
irdma_clean_cqes(iwqp, iwqp->iwscq);
@@ -918,6 +907,12 @@ free_rsrc:
irdma_clean_cqes(iwqp, iwqp->iwrcq);
}
}
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ wait_for_completion(&iwqp->free_qp);
+ irdma_free_lsmm_rsrc(iwqp);
+ if (!iwdev->rf->reset && irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
+ return (iwdev->rf->rdma_ver <= IRDMA_GEN_2 && !iwqp->user_mode) ? 0 : -ENOTRECOVERABLE;
+free_rsrc:
irdma_remove_push_mmap_entries(iwqp);
irdma_free_qp_rsrc(iwqp);
@@ -1077,7 +1072,7 @@ irdma_create_cq(struct ib_device *ibdev,
if (!iwcq->kmem_shadow.va) {
err_code = -ENOMEM;
- goto cq_free_rsrc;
+ goto cq_kmem_free;
}
info.shadow_area_pa = iwcq->kmem_shadow.pa;
ukinfo->shadow_area = iwcq->kmem_shadow.va;
@@ -1085,19 +1080,18 @@ irdma_create_cq(struct ib_device *ibdev,
info.cq_base_pa = iwcq->kmem.pa;
}
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
- info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
- (u32)IRDMA_MAX_CQ_READ_THRESH);
+ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+ (u32)IRDMA_MAX_CQ_READ_THRESH);
if (irdma_sc_cq_init(cq, &info)) {
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "init cq fail\n");
err_code = -EPROTO;
- goto cq_free_rsrc;
+ goto cq_kmem_free;
}
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request) {
err_code = -ENOMEM;
- goto cq_free_rsrc;
+ goto cq_kmem_free;
}
cqp_info = &cqp_request->info;
cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
@@ -1109,7 +1103,7 @@ irdma_create_cq(struct ib_device *ibdev,
irdma_put_cqp_request(&rf->cqp, cqp_request);
if (status) {
err_code = -ENOMEM;
- goto cq_free_rsrc;
+ goto cq_kmem_free;
}
if (udata) {
@@ -1131,8 +1125,13 @@ irdma_create_cq(struct ib_device *ibdev,
return &iwcq->ibcq;
cq_destroy:
irdma_cq_wq_destroy(rf, cq);
+cq_kmem_free:
+ if (!iwcq->user_mode) {
+ irdma_free_dma_mem(dev->hw, &iwcq->kmem);
+ irdma_free_dma_mem(dev->hw, &iwcq->kmem_shadow);
+ }
cq_free_rsrc:
- irdma_cq_free_rsrc(rf, iwcq);
+ irdma_free_rsrc(rf, rf->allocated_cqs, cq_num);
error:
kfree(iwcq);
return ERR_PTR(err_code);
@@ -1310,12 +1309,10 @@ kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
if (sgid_attr.ndev) {
*vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
- ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, IF_LLADDR(sgid_attr.ndev));
+ ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, if_getlladdr(sgid_attr.ndev));
}
- av->net_type = kc_rdma_gid_attr_network_type(sgid_attr,
- sgid_attr.gid_type,
- &sgid);
+ av->net_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
dev_put(sgid_attr.ndev);
iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri;
@@ -1375,6 +1372,9 @@ irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
int err_code;
u32 stag;
+ if (type != IB_MW_TYPE_1 && type != IB_MW_TYPE_2)
+ return ERR_PTR(-EINVAL);
+
iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
if (!iwmr)
return ERR_PTR(-ENOMEM);
@@ -1530,7 +1530,7 @@ rdma_user_mmap_io(struct ib_ucontext *context, struct vm_area_struct *vma,
}
struct ib_device *
-ib_device_get_by_netdev(struct ifnet *netdev, int driver_id)
+ib_device_get_by_netdev(if_t netdev, int driver_id)
{
struct irdma_device *iwdev;
struct irdma_handler *hdl;
@@ -1659,17 +1659,17 @@ irdma_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct irdma_device *iwdev = to_iwdev(ibdev);
- struct ifnet *netdev = iwdev->netdev;
+ if_t netdev = iwdev->netdev;
/* no need to zero out pros here. done by caller */
props->max_mtu = IB_MTU_4096;
- props->active_mtu = ib_mtu_int_to_enum(netdev->if_mtu);
+ props->active_mtu = ib_mtu_int_to_enum(if_getmtu(netdev));
props->lid = 1;
props->lmc = 0;
props->sm_lid = 0;
props->sm_sl = 0;
- if ((netdev->if_link_state == LINK_STATE_UP) && (netdev->if_drv_flags & IFF_DRV_RUNNING)) {
+ if ((if_getlinkstate(netdev) == LINK_STATE_UP) && (if_getdrvflags(netdev) & IFF_DRV_RUNNING)) {
props->state = IB_PORT_ACTIVE;
props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else {
@@ -1680,7 +1680,7 @@ irdma_query_port(struct ib_device *ibdev, u8 port,
if (rdma_protocol_roce(ibdev, 1)) {
props->gid_tbl_len = 32;
- kc_set_props_ip_gid_caps(props);
+ props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
} else {
props->gid_tbl_len = 1;
@@ -1802,7 +1802,7 @@ irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
struct irdma_device *iwdev = to_iwdev(ibdev);
memset(gid->raw, 0, sizeof(gid->raw));
- ether_addr_copy(gid->raw, IF_LLADDR(iwdev->netdev));
+ ether_addr_copy(gid->raw, if_getlladdr(iwdev->netdev));
return 0;
}
@@ -1877,7 +1877,7 @@ kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
int
ib_get_eth_speed(struct ib_device *ibdev, u32 port_num, u8 *speed, u8 *width)
{
- struct ifnet *netdev = ibdev->get_netdev(ibdev, port_num);
+ if_t netdev = ibdev->get_netdev(ibdev, port_num);
u32 netdev_speed;
if (!netdev)
@@ -1907,3 +1907,15 @@ ib_get_eth_speed(struct ib_device *ibdev, u32 port_num, u8 *speed, u8 *width)
return 0;
}
+
+u64
+irdma_mac_to_u64(const u8 *eth_add)
+{
+ int idx;
+ u64 u64_eth_add;
+
+ for (idx = 0, u64_eth_add = 0; idx < ETHER_ADDR_LEN; idx++)
+ u64_eth_add = u64_eth_add << 8 | eth_add[idx];
+
+ return u64_eth_add;
+}
diff --git a/sys/dev/irdma/irdma_main.h b/sys/dev/irdma/irdma_main.h
index 31af05b512c3..8e304cae551f 100644
--- a/sys/dev/irdma/irdma_main.h
+++ b/sys/dev/irdma/irdma_main.h
@@ -148,6 +148,11 @@ enum init_completion_state {
IP_ADDR_REGISTERED, /* Last state of open */
};
+struct ae_desc {
+ u16 id;
+ const char *desc;
+};
+
struct irdma_rsrc_limits {
u32 qplimit;
u32 mrlimit;
@@ -176,8 +181,8 @@ struct irdma_cqp_request {
void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
void *param;
struct irdma_cqp_compl_info compl_info;
+ bool request_done; /* READ/WRITE_ONCE macros operate on it */
bool waiting:1;
- bool request_done:1;
bool dynamic:1;
};
@@ -220,7 +225,7 @@ struct irdma_aeq {
struct irdma_arp_entry {
u32 ip_addr[4];
- u8 mac_addr[ETH_ALEN];
+ u8 mac_addr[ETHER_ADDR_LEN];
};
struct irdma_msix_vector {
@@ -359,7 +364,7 @@ struct irdma_pci_f {
struct irdma_device {
struct ib_device ibdev;
struct irdma_pci_f *rf;
- struct ifnet *netdev;
+ if_t netdev;
struct notifier_block nb_netdevice_event;
struct irdma_handler *hdl;
struct workqueue_struct *cleanup_wq;
@@ -522,6 +527,7 @@ void irdma_put_cqp_request(struct irdma_cqp *cqp,
int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx);
void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
+const char *irdma_get_ae_desc(u16 ae_id);
u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
void irdma_port_ibevent(struct irdma_device *iwdev);
@@ -567,7 +573,7 @@ void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
u16 irdma_get_vlan_ipv4(u32 *addr);
-struct ifnet *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
+if_t irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
int acc, u64 *iova_start);
int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
diff --git a/sys/dev/irdma/irdma_pble.c b/sys/dev/irdma/irdma_pble.c
index 4ab998bb3e90..aaf9d8917622 100644
--- a/sys/dev/irdma/irdma_pble.c
+++ b/sys/dev/irdma/irdma_pble.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -56,8 +56,7 @@ irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
list_del(&chunk->list);
if (chunk->type == PBLE_SD_PAGED)
irdma_pble_free_paged_mem(chunk);
- if (chunk->bitmapbuf)
- irdma_prm_rem_bitmapmem(pble_rsrc->dev->hw, chunk);
+ bitmap_free(chunk->bitmapbuf);
kfree(chunk->chunkmem.va);
}
spin_lock_destroy(&pinfo->prm_lock);
@@ -289,7 +288,8 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
irdma_debug(dev, IRDMA_DEBUG_PBLE,
"pages = %d, unallocated_pble[%d] current_fpm_addr = %lx\n",
- pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
+ pages, pble_rsrc->unallocated_pble,
+ pble_rsrc->next_fpm_addr);
irdma_debug(dev, IRDMA_DEBUG_PBLE, "sd_entry_type = %d\n",
sd_entry_type);
if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
@@ -303,14 +303,14 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
ret_code = add_bp_pages(pble_rsrc, &info);
if (ret_code)
- goto error;
+ goto err_bp_pages;
else
pble_rsrc->stats_paged_sds++;
}
ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
if (ret_code)
- goto error;
+ goto err_bp_pages;
pble_rsrc->next_fpm_addr += chunk->size;
irdma_debug(dev, IRDMA_DEBUG_PBLE,
@@ -332,8 +332,8 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
return 0;
error:
- if (chunk->bitmapbuf)
- irdma_prm_rem_bitmapmem(pble_rsrc->dev->hw, chunk);
+ bitmap_free(chunk->bitmapbuf);
+err_bp_pages:
kfree(chunk->chunkmem.va);
return ret_code;
diff --git a/sys/dev/irdma/irdma_pble.h b/sys/dev/irdma/irdma_pble.h
index 6f6ac2341e1c..213be2018457 100644
--- a/sys/dev/irdma/irdma_pble.h
+++ b/sys/dev/irdma/irdma_pble.h
@@ -101,7 +101,7 @@ struct irdma_add_page_info {
struct irdma_chunk {
struct list_head list;
struct irdma_dma_info dmainfo;
- void *bitmapbuf;
+ unsigned long *bitmapbuf;
u32 sizeofbitmap;
u64 size;
@@ -110,7 +110,6 @@ struct irdma_chunk {
u32 pg_cnt;
enum irdma_alloc_type type;
struct irdma_sc_dev *dev;
- struct irdma_virt_mem bitmapmem;
struct irdma_virt_mem chunkmem;
};
@@ -161,5 +160,4 @@ void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
unsigned long *flags);
void irdma_pble_free_paged_mem(struct irdma_chunk *chunk);
int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt);
-void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk);
#endif /* IRDMA_PBLE_H */
diff --git a/sys/dev/irdma/irdma_protos.h b/sys/dev/irdma/irdma_protos.h
index 6f248c9da5e9..4f9a97d54088 100644
--- a/sys/dev/irdma/irdma_protos.h
+++ b/sys/dev/irdma/irdma_protos.h
@@ -60,8 +60,6 @@ int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
struct irdma_vsi_pestat *pestat, bool wait);
int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
struct irdma_ws_node_info *node_info);
-int irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
- struct irdma_up_info *map_info);
int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
u8 op);
int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
@@ -130,8 +128,6 @@ int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u16 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
-int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
- struct irdma_hmc_fcn_info *hmcfcninfo,
- u16 *pmf_idx);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
+u64 irdma_mac_to_u64(const u8 *eth_add);
#endif /* IRDMA_PROTOS_H */
diff --git a/sys/dev/irdma/irdma_puda.c b/sys/dev/irdma/irdma_puda.c
index 45f37f4a750c..86bdb334e658 100644
--- a/sys/dev/irdma/irdma_puda.c
+++ b/sys/dev/irdma/irdma_puda.c
@@ -47,6 +47,7 @@ static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);
static void
irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
struct irdma_puda_buf *buf, u32 wqe_idx);
+
/**
* irdma_puda_get_listbuf - get buffer from puda list
* @list: list to use for buffers (ILQ or IEQ)
@@ -269,6 +270,9 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
if (valid_bit != cq_uk->polarity)
return -ENOENT;
+ /* Ensure CQE contents are read after valid bit is checked */
+ rmb();
+
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
@@ -282,6 +286,9 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
if (polarity != cq_uk->polarity)
return -ENOENT;
+ /* Ensure ext CQE contents are read after ext valid bit is checked */
+ rmb();
+
IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
cq_uk->polarity = !cq_uk->polarity;
@@ -929,6 +936,7 @@ irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
struct irdma_sc_ceq *ceq;
ceq = vsi->dev->ceq[0];
+
switch (type) {
case IRDMA_PUDA_RSRC_TYPE_ILQ:
rsrc = vsi->ilq;
@@ -1697,6 +1705,7 @@ irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
struct irdma_pfpdu *pfpdu = &qp->pfpdu;
u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
u32 rcv_wnd = hw_host_ctx[23];
+
/* first partial seq # in q2 */
u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
struct list_head *rxlist = &pfpdu->rxlist;
diff --git a/sys/dev/irdma/irdma_puda.h b/sys/dev/irdma/irdma_puda.h
index 20fb9df9a25e..aff435a90ecd 100644
--- a/sys/dev/irdma/irdma_puda.h
+++ b/sys/dev/irdma/irdma_puda.h
@@ -71,7 +71,7 @@ struct irdma_puda_cmpl_info {
bool ipv4:1;
bool smac_valid:1;
bool vlan_valid:1;
- u8 smac[ETH_ALEN];
+ u8 smac[ETHER_ADDR_LEN];
};
struct irdma_puda_send_info {
@@ -108,7 +108,7 @@ struct irdma_puda_buf {
bool smac_valid:1;
u32 seqnum;
u32 ah_id;
- u8 smac[ETH_ALEN];
+ u8 smac[ETHER_ADDR_LEN];
struct irdma_sc_vsi *vsi;
};
diff --git a/sys/dev/irdma/irdma_type.h b/sys/dev/irdma/irdma_type.h
index ac9860c956d4..2f3ab8c471d5 100644
--- a/sys/dev/irdma/irdma_type.h
+++ b/sys/dev/irdma/irdma_type.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -397,6 +397,8 @@ struct irdma_sc_cqp {
struct irdma_dcqcn_cc_params dcqcn_params;
__le64 *host_ctx;
u64 *scratch_array;
+ u64 requested_ops;
+ atomic64_t completed_ops;
u32 cqp_id;
u32 sq_size;
u32 hw_sq_size;
@@ -531,7 +533,7 @@ struct irdma_up_info {
bool use_cnp_up_override:1;
};
-#define IRDMA_MAX_WS_NODES 0x3FF
+#define IRDMA_MAX_WS_NODES 0x3FF
#define IRDMA_WS_NODE_INVALID 0xFFFF
struct irdma_ws_node_info {
@@ -597,7 +599,7 @@ struct irdma_sc_vsi {
bool tc_change_pending:1;
bool mtu_change_pending:1;
struct irdma_vsi_pestat *pestat;
- ATOMIC qp_suspend_reqs;
+ atomic_t qp_suspend_reqs;
int (*register_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi,
@@ -627,8 +629,6 @@ struct irdma_sc_dev {
u32 IOMEM *aeq_alloc_db;
u32 IOMEM *cqp_db;
u32 IOMEM *cq_ack_db;
- u32 IOMEM *ceq_itr_mask_db;
- u32 IOMEM *aeq_itr_mask_db;
u32 IOMEM *hw_regs[IRDMA_MAX_REGS];
u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
u64 hw_masks[IRDMA_MAX_MASKS];
@@ -781,7 +781,6 @@ struct irdma_ceq_init_info {
u32 first_pm_pbl_idx;
struct irdma_sc_vsi *vsi;
struct irdma_sc_cq **reg_cq;
- u32 reg_cq_idx;
};
struct irdma_aeq_init_info {
@@ -868,7 +867,7 @@ struct irdma_roce_offload_info {
u16 t_high;
u16 t_low;
u8 last_byte_sent;
- u8 mac_addr[ETH_ALEN];
+ u8 mac_addr[ETHER_ADDR_LEN];
u8 rtomin;
};
@@ -900,7 +899,7 @@ struct irdma_iwarp_offload_info {
u16 t_high;
u16 t_low;
u8 last_byte_sent;
- u8 mac_addr[ETH_ALEN];
+ u8 mac_addr[ETHER_ADDR_LEN];
u8 rtomin;
};
@@ -967,7 +966,6 @@ struct irdma_qp_host_ctx_info {
u32 rcv_cq_num;
u32 rem_endpoint_idx;
u16 stats_idx;
- bool srq_valid:1;
bool tcp_info_valid:1;
bool iwarp_info_valid:1;
bool stats_idx_valid:1;
@@ -988,10 +986,6 @@ struct irdma_aeqe_info {
bool in_rdrsp_wr:1;
bool out_rdrsp:1;
bool aeqe_overflow:1;
- /* This flag is used to determine if we should pass the rq tail
- * in the QP context for FW/HW. It is set when ae_src is rq for GEN1/GEN2
- * And additionally set for inbound atomic, read and write for GEN3
- */
bool err_rq_idx_valid:1;
u8 q2_data_written;
u8 ae_src;
@@ -1133,7 +1127,7 @@ struct irdma_local_mac_entry_info {
};
struct irdma_add_arp_cache_entry_info {
- u8 mac_addr[ETH_ALEN];
+ u8 mac_addr[ETHER_ADDR_LEN];
u32 reach_max;
u16 arp_index;
bool permanent;
@@ -1150,7 +1144,7 @@ struct irdma_qhash_table_info {
enum irdma_quad_entry_type entry_type;
bool vlan_valid:1;
bool ipv4_valid:1;
- u8 mac_addr[ETH_ALEN];
+ u8 mac_addr[ETHER_ADDR_LEN];
u16 vlan_id;
u8 user_pri;
u32 qp_num;
@@ -1248,7 +1242,6 @@ int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
bool post_sq);
void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
irdma_stag stag);
-void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info);
diff --git a/sys/dev/irdma/irdma_uda.c b/sys/dev/irdma/irdma_uda.c
index 85850a726e74..850bfa4ae6d6 100644
--- a/sys/dev/irdma/irdma_uda.c
+++ b/sys/dev/irdma/irdma_uda.c
@@ -58,12 +58,8 @@ irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
if (!wqe)
return -ENOSPC;
- set_64bit_val(wqe, IRDMA_BYTE_0, LS_64_1(info->mac_addr[5], 16) |
- LS_64_1(info->mac_addr[4], 24) |
- LS_64_1(info->mac_addr[3], 32) |
- LS_64_1(info->mac_addr[2], 40) |
- LS_64_1(info->mac_addr[1], 48) |
- LS_64_1(info->mac_addr[0], 56));
+ set_64bit_val(wqe, IRDMA_BYTE_0,
+ FIELD_PREP(IRDMAQPC_MACADDRESS, irdma_mac_to_u64(info->mac_addr)));
qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
@@ -174,12 +170,7 @@ irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
set_64bit_val(wqe, IRDMA_BYTE_16,
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
- set_64bit_val(wqe, IRDMA_BYTE_0, LS_64_1(info->dest_mac_addr[5], 0) |
- LS_64_1(info->dest_mac_addr[4], 8) |
- LS_64_1(info->dest_mac_addr[3], 16) |
- LS_64_1(info->dest_mac_addr[2], 24) |
- LS_64_1(info->dest_mac_addr[1], 32) |
- LS_64_1(info->dest_mac_addr[0], 40));
+ set_64bit_val(wqe, IRDMA_BYTE_0, irdma_mac_to_u64(info->dest_mac_addr));
set_64bit_val(wqe, IRDMA_BYTE_8,
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
diff --git a/sys/dev/irdma/irdma_uda.h b/sys/dev/irdma/irdma_uda.h
index fcf6c875ea45..4be9a144d18b 100644
--- a/sys/dev/irdma/irdma_uda.h
+++ b/sys/dev/irdma/irdma_uda.h
@@ -53,7 +53,7 @@ struct irdma_ah_info {
u8 insert_vlan_tag;
u8 tc_tos;
u8 hop_ttl;
- u8 mac_addr[ETH_ALEN];
+ u8 mac_addr[ETHER_ADDR_LEN];
bool ah_valid:1;
bool ipv4_valid:1;
bool do_lpbk:1;
diff --git a/sys/dev/irdma/irdma_uk.c b/sys/dev/irdma/irdma_uk.c
index 76648af33488..1333b1ca4845 100644
--- a/sys/dev/irdma/irdma_uk.c
+++ b/sys/dev/irdma/irdma_uk.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -274,7 +274,8 @@ irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 &&
(IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
- wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, !qp->swqe_polarity));
+ wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID,
+ qp->swqe_polarity ? 0 : 1));
}
qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
@@ -596,22 +597,6 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
}
/**
- * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
- * @wqe: wqe for setting fragment
- * @op_info: info for setting bind wqe values
- */
-static void
-irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,
- struct irdma_bind_window *op_info)
-{
- set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
- set_64bit_val(wqe, IRDMA_BYTE_8,
- FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
- FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
- set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
-}
-
-/**
* irdma_copy_inline_data_gen_1 - Copy inline data to wqe
* @wqe: pointer to wqe
* @sge_list: table of pointers to inline data
@@ -659,22 +644,6 @@ static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) {
}
/**
- * irdma_set_mw_bind_wqe - set mw bind in wqe
- * @wqe: wqe for setting mw bind
- * @op_info: info for setting wqe values
- */
-static void
-irdma_set_mw_bind_wqe(__le64 * wqe,
- struct irdma_bind_window *op_info)
-{
- set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
- set_64bit_val(wqe, IRDMA_BYTE_8,
- FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
- FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
- set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
-}
-
-/**
* irdma_copy_inline_data - Copy inline data to wqe
* @wqe: pointer to wqe
* @sge_list: table of pointers to inline data
@@ -1546,14 +1515,12 @@ static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
.iw_copy_inline_data = irdma_copy_inline_data,
.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
.iw_set_fragment = irdma_set_fragment,
- .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
};
static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
.iw_set_fragment = irdma_set_fragment_gen_1,
- .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
};
/**
@@ -1615,6 +1582,7 @@ irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
{
bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
int status;
+
irdma_get_wqe_shift(ukinfo->uk_attrs,
imm_support ? ukinfo->max_sq_frag_cnt + 1 :
ukinfo->max_sq_frag_cnt,
@@ -1759,6 +1727,9 @@ irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
if (polarity != temp)
break;
+ /* Ensure CQE contents are read after valid bit is checked */
+ rmb();
+
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
if ((void *)(irdma_uintptr) comp_ctx == q)
set_64bit_val(cqe, IRDMA_BYTE_8, 0);
@@ -1771,48 +1742,6 @@ irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
}
/**
- * irdma_nop - post a nop
- * @qp: hw qp ptr
- * @wr_id: work request id
- * @signaled: signaled for completion
- * @post_sq: ring doorbell
- */
-int
-irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
-{
- __le64 *wqe;
- u64 hdr;
- u32 wqe_idx;
- struct irdma_post_sq_info info = {0};
- u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
-
- info.push_wqe = qp->push_db ? true : false;
- info.wr_id = wr_id;
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, &info);
- if (!wqe)
- return -ENOSPC;
-
- set_64bit_val(wqe, IRDMA_BYTE_0, 0);
- set_64bit_val(wqe, IRDMA_BYTE_8, 0);
- set_64bit_val(wqe, IRDMA_BYTE_16, 0);
-
- hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
- FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
-
- irdma_wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
-
- if (info.push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- else if (post_sq)
- irdma_uk_qp_post_wr(qp);
-
- return 0;
-}
-
-/**
* irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
* @frag_cnt: number of fragments
* @quanta: quanta for frag_cnt
diff --git a/sys/dev/irdma/irdma_user.h b/sys/dev/irdma/irdma_user.h
index 5fe9d5cfdfbe..6793aa8eb706 100644
--- a/sys/dev/irdma/irdma_user.h
+++ b/sys/dev/irdma/irdma_user.h
@@ -567,7 +567,6 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth);
-int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift, u32 *srqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
diff --git a/sys/dev/irdma/irdma_utils.c b/sys/dev/irdma/irdma_utils.c
index 83c0dfcdbce4..acffd36fd445 100644
--- a/sys/dev/irdma/irdma_utils.c
+++ b/sys/dev/irdma/irdma_utils.c
@@ -37,6 +37,169 @@
LIST_HEAD(irdma_handlers);
DEFINE_SPINLOCK(irdma_handler_lock);
+static const struct ae_desc ae_desc_list[] = {
+ {IRDMA_AE_AMP_UNALLOCATED_STAG, "Unallocated memory key (L-Key/R-Key)"},
+ {IRDMA_AE_AMP_INVALID_STAG, "Invalid memory key (L-Key/R-Key)"},
+ {IRDMA_AE_AMP_BAD_QP,
+ "Memory protection error: Accessing Memory Window (MW) which belongs to a different QP"},
+ {IRDMA_AE_AMP_BAD_PD,
+ "Memory protection error: Accessing Memory Window (MW)/Memory Region (MR) which belongs to a different PD"},
+ {IRDMA_AE_AMP_BAD_STAG_KEY, "Bad memory key (L-Key/R-Key)"},
+ {IRDMA_AE_AMP_BAD_STAG_INDEX, "Bad memory key (L-Key/R-Key): Too large memory key index"},
+ {IRDMA_AE_AMP_BOUNDS_VIOLATION, "Memory Window (MW)/Memory Region (MR) bounds violation"},
+ {IRDMA_AE_AMP_RIGHTS_VIOLATION, "Memory Window (MW)/Memory Region (MR) rights violation"},
+ {IRDMA_AE_AMP_TO_WRAP,
+ "Memory protection error: The address within Memory Window (MW)/Memory Region (MR) wraps"},
+ {IRDMA_AE_AMP_FASTREG_VALID_STAG,
+ "Fastreg error: Registration to a valid MR"},
+ {IRDMA_AE_AMP_FASTREG_MW_STAG,
+ "Fastreg error: Registration to a valid Memory Window (MW)"},
+ {IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS, "Fastreg error: Invalid rights"},
+ {IRDMA_AE_AMP_FASTREG_INVALID_LENGTH, "Fastreg error: Invalid length"},
+ {IRDMA_AE_AMP_INVALIDATE_SHARED, "Attempt to invalidate a shared MR"},
+ {IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS,
+ "Attempt to remotely invalidate Memory Window (MW)/Memory Region (MR) without rights"},
+ {IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS,
+ "Attempt to invalidate MR with a bound Memory Window (MW)"},
+ {IRDMA_AE_AMP_MWBIND_VALID_STAG,
+ "Attempt to bind an Memory Window (MW) with a valid MW memory key (L-Key/R-Key)"},
+ {IRDMA_AE_AMP_MWBIND_OF_MR_STAG,
+ "Attempt to bind an Memory Window (MW) with an MR memory key (L-Key/R-Key)"},
+ {IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG,
+ "Attempt to bind an Memory Window (MW) to a zero based MR"},
+ {IRDMA_AE_AMP_MWBIND_TO_MW_STAG,
+ "Attempt to bind an Memory Window (MW) using MW memory key (L-Key/R-Key) instead of MR memory key (L-Key/R-Key)"},
+ {IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS, "Memory Window (MW) bind error: Invalid rights"},
+ {IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS, "Memory Window (MW) bind error: Invalid bounds"},
+ {IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT,
+ "Memory Window (MW) bind error: Invalid parent MR"},
+ {IRDMA_AE_AMP_MWBIND_BIND_DISABLED,
+ "Memory Window (MW) bind error: Disabled bind support"},
+ {IRDMA_AE_PRIV_OPERATION_DENIED,
+ "Denying a privileged operation on a non-privileged QP"},
+ {IRDMA_AE_AMP_INVALIDATE_TYPE1_MW, "Memory Window (MW) error: Invalidate type 1 MW"},
+ {IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW,
+ "Memory Window (MW) bind error: Zero-based addressing for type 1 MW"},
+ {IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG,
+ "Fastreg error: Invalid host page size config"},
+ {IRDMA_AE_AMP_MWBIND_WRONG_TYPE, "MB bind error: Wrong Memory Window (MW) type"},
+ {IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH,
+ "Fastreg error: Invalid request to change physical MR to virtual or vice versa"},
+ {IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG,
+ "Userspace Direct Access (UDA) QP xmit error: Packet length exceeds the QP MTU"},
+ {IRDMA_AE_UDA_XMIT_BAD_PD,
+ "Userspace Direct Access (UDA) QP xmit error: Attempt to access a different PD"},
+ {IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT,
+ "Userspace Direct Access (UDA) QP xmit error: Too short packet length"},
+ {IRDMA_AE_UDA_L4LEN_INVALID,
+ "Userspace Direct Access (UDA) error: Invalid packet length field"},
+ {IRDMA_AE_BAD_CLOSE,
+ "iWARP error: Data is received when QP state is closing"},
+ {IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE,
+ "iWARP error: FIN is received when xmit data is pending"},
+ {IRDMA_AE_CQ_OPERATION_ERROR, "CQ overflow"},
+ {IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO,
+ "QP error: Attempted RDMA Read when the outbound RDMA Read queue depth is zero"},
+ {IRDMA_AE_STAG_ZERO_INVALID,
+ "Zero invalid memory key (L-Key/R-Key) on inbound RDMA R/W"},
+ {IRDMA_AE_IB_RREQ_AND_Q1_FULL,
+ "QP error: Received RDMA Read request when the inbound RDMA Read queue is full"},
+ {IRDMA_AE_IB_INVALID_REQUEST,
+ "QP error: Invalid operation detected by the remote peer"},
+ {IRDMA_AE_WQE_UNEXPECTED_OPCODE,
+ "QP error: Invalid opcode in SQ WQE"},
+ {IRDMA_AE_WQE_INVALID_PARAMETER,
+ "QP error: Invalid parameter in a WQE"},
+ {IRDMA_AE_WQE_INVALID_FRAG_DATA,
+ "QP error: Invalid fragment in a WQE"},
+ {IRDMA_AE_IB_REMOTE_ACCESS_ERROR,
+ "RoCEv2 error: Remote access error"},
+ {IRDMA_AE_IB_REMOTE_OP_ERROR,
+ "RoCEv2 error: Remote operation error"},
+ {IRDMA_AE_WQE_LSMM_TOO_LONG, "iWARP error: Connection error"},
+ {IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN,
+ "iWARP error: Invalid message sequence number"},
+ {IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER,
+ "iWARP error: Inbound message is too long for the available buffer"},
+ {IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION, "iWARP error: Invalid DDP protocol version"},
+ {IRDMA_AE_DDP_UBE_INVALID_MO, "Received message with too large offset"},
+ {IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE,
+ "iWARP error: Inbound Send message when no receive buffer is available"},
+ {IRDMA_AE_DDP_UBE_INVALID_QN, "iWARP error: Invalid QP number in inbound packet"},
+ {IRDMA_AE_DDP_NO_L_BIT,
+ "iWARP error: Last bit not set in an inbound packet which completes RDMA Read"},
+ {IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION, "iWARP error: Invalid RDMAP protocol version"},
+ {IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE, "QP error: Invalid opcode"},
+ {IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST, "Inbound Read request when QP isn't enabled for RDMA Read"},
+ {IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP,
+ "Inbound RDMA Read response or RDMA Write when QP isn't enabled for RDMA R/W"},
+ {IRDMA_AE_ROCE_RSP_LENGTH_ERROR, "RoCEv2 error: Received packet with incorrect length field"},
+ {IRDMA_AE_ROCE_EMPTY_MCG, "RoCEv2 error: Multicast group has no valid members"},
+ {IRDMA_AE_ROCE_BAD_MC_IP_ADDR, "RoCEv2 error: Multicast IP address doesn't match"},
+ {IRDMA_AE_ROCE_BAD_MC_QPID, "RoCEv2 error: Multicast packet QP number isn't 0xffffff"},
+ {IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH, "RoCEv2 error: Multicast packet protocol mismatch"},
+ {IRDMA_AE_INVALID_ARP_ENTRY, "Invalid ARP entry"},
+ {IRDMA_AE_INVALID_TCP_OPTION_RCVD, "iWARP error: Invalid TCP option"},
+ {IRDMA_AE_STALE_ARP_ENTRY, "Stale ARP entry"},
+ {IRDMA_AE_INVALID_AH_ENTRY, "Invalid AH entry"},
+ {IRDMA_AE_LLP_CLOSE_COMPLETE,
+ "iWARP event: Graceful close complete"},
+ {IRDMA_AE_LLP_CONNECTION_RESET,
+ "iWARP event: Received a TCP packet with a RST bit set"},
+ {IRDMA_AE_LLP_FIN_RECEIVED,
+ "iWARP event: Received a TCP packet with a FIN bit set"},
+ {IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH,
+ "iWARP error: Unable to close a gap in the TCP sequence"},
+ {IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR, "Received an ICRC error"},
+ {IRDMA_AE_LLP_SEGMENT_TOO_SMALL,
+ "iWARP error: Received a packet with insufficient space for protocol headers"},
+ {IRDMA_AE_LLP_SYN_RECEIVED,
+ "iWARP event: Received a TCP packet with a SYN bit set"},
+ {IRDMA_AE_LLP_TERMINATE_RECEIVED,
+ "iWARP error: Received a terminate message"},
+ {IRDMA_AE_LLP_TOO_MANY_RETRIES, "Connection error: The max number of retries has been reached"},
+ {IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES,
+ "Connection error: The max number of keepalive retries has been reached"},
+ {IRDMA_AE_LLP_DOUBT_REACHABILITY,
+ "Connection error: Doubt reachability (usually occurs after the max number of retries has been reached)"},
+ {IRDMA_AE_LLP_CONNECTION_ESTABLISHED,
+ "iWARP event: Connection established"},
+ {IRDMA_AE_RESOURCE_EXHAUSTION,
+ "QP error: Resource exhaustion"},
+ {IRDMA_AE_RESET_SENT,
+ "Reset sent (as requested via Modify QP)"},
+ {IRDMA_AE_TERMINATE_SENT,
+ "Terminate sent (as requested via Modify QP)"},
+ {IRDMA_AE_RESET_NOT_SENT,
+ "Reset not sent (but requested via Modify QP)"},
+ {IRDMA_AE_LCE_QP_CATASTROPHIC,
+ "QP error: HW transaction resulted in catastrophic error"},
+ {IRDMA_AE_LCE_FUNCTION_CATASTROPHIC,
+ "PCIe function error: HW transaction resulted in catastrophic error"},
+ {IRDMA_AE_LCE_CQ_CATASTROPHIC,
+ "CQ error: HW transaction resulted in catastrophic error"},
+ {IRDMA_AE_QP_SUSPEND_COMPLETE, "QP event: Suspend complete"},
+};
+
+/**
+ * irdma_get_ae_desc - returns AE description
+ * @ae_id: the AE number
+ */
+const char *
+irdma_get_ae_desc(u16 ae_id)
+{
+ const char *desc = "";
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ae_desc_list); i++) {
+ if (ae_desc_list[i].id == ae_id) {
+ desc = ae_desc_list[i].desc;
+ break;
+ }
+ }
+ return desc;
+}
+
/**
* irdma_arp_table -manage arp table
* @rf: RDMA PCI function
@@ -241,7 +404,7 @@ irdma_free_cqp_request(struct irdma_cqp *cqp,
if (cqp_request->dynamic) {
kfree(cqp_request);
} else {
- cqp_request->request_done = false;
+ WRITE_ONCE(cqp_request->request_done, false);
cqp_request->callback_fcn = NULL;
cqp_request->waiting = false;
@@ -276,7 +439,7 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
{
if (cqp_request->waiting) {
cqp_request->compl_info.error = true;
- cqp_request->request_done = true;
+ WRITE_ONCE(cqp_request->request_done, true);
wake_up(&cqp_request->waitq);
}
wait_event_timeout(cqp->remove_wq,
@@ -327,23 +490,22 @@ irdma_wait_event(struct irdma_pci_f *rf,
struct irdma_cqp_request *cqp_request)
{
struct irdma_cqp_timeout cqp_timeout = {0};
- int timeout_threshold = CQP_TIMEOUT_THRESHOLD;
bool cqp_error = false;
int err_code = 0;
- cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
+ cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops);
do {
int wait_time_ms = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms;
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
if (wait_event_timeout(cqp_request->waitq,
- cqp_request->request_done,
+ READ_ONCE(cqp_request->request_done),
msecs_to_jiffies(wait_time_ms)))
break;
irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
- if (cqp_timeout.count < timeout_threshold)
+ if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
continue;
if (!rf->reset) {
@@ -396,7 +558,8 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
[IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
[IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
[IRDMA_OP_RESUME] = "Resume QP Cmd",
- [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
+ [IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP] =
+ "Manage Virtual Channel Requester Function PBLE Backing Pages Cmd",
[IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
[IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
[IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
@@ -1620,41 +1783,6 @@ exit:
}
/**
- * irdma_cqp_up_map_cmd - Set the up-up mapping
- * @dev: pointer to device structure
- * @cmd: map command
- * @map_info: pointer to up map info
- */
-int
-irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
- struct irdma_up_info *map_info)
-{
- struct irdma_pci_f *rf = dev_to_rf(dev);
- struct irdma_cqp *iwcqp = &rf->cqp;
- struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- int status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
- cqp_info->cqp_cmd = cmd;
- cqp_info->post_sq = 1;
- cqp_info->in.u.up_map.info = *map_info;
- cqp_info->in.u.up_map.cqp = cqp;
- cqp_info->in.u.up_map.scratch = (uintptr_t)cqp_request;
-
- status = irdma_handle_cqp_op(rf, cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
* irdma_ah_cqp_op - perform an AH cqp operation
* @rf: RDMA PCI function
* @sc_ah: address handle
@@ -1706,7 +1834,7 @@ irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
return -ENOMEM;
if (wait)
- sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
+ sc_ah->ah_info.ah_valid = (cmd != IRDMA_OP_AH_DESTROY);
return 0;
}
@@ -1851,15 +1979,10 @@ irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
- pchunk->bitmapmem.size = sizeofbitmap >> 3;
- pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
-
- if (!pchunk->bitmapmem.va)
+ pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
+ if (!pchunk->bitmapbuf)
return -ENOMEM;
- pchunk->bitmapbuf = pchunk->bitmapmem.va;
- bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
-
pchunk->sizeofbitmap = sizeofbitmap;
/* each pble is 8 bytes hence shift by 3 */
pprm->total_pble_alloc += pchunk->size >> 3;
@@ -1887,6 +2010,7 @@ irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
struct list_head *chunk_entry = (&pprm->clist)->next;
u32 offset;
unsigned long flags;
+
*vaddr = NULL;
*fpm_addr = 0;
@@ -2197,9 +2321,8 @@ irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw)
clear_qp_ctx_addr(dma_mem.va);
for (i = 0, j = 0; i < 32; i++, j += 4)
irdma_debug(dev, IRDMA_DEBUG_QP,
- "%d:\t [%08X %08x %08X %08X]\n",
- (j * 4), ctx[j], ctx[j + 1], ctx[j + 2],
- ctx[j + 3]);
+ "%d:\t [%08X %08x %08X %08X]\n", (j * 4),
+ ctx[j], ctx[j + 1], ctx[j + 2], ctx[j + 3]);
}
error:
irdma_put_cqp_request(iwcqp, cqp_request);
diff --git a/sys/dev/irdma/irdma_verbs.c b/sys/dev/irdma/irdma_verbs.c
index e7ef31de026a..288b075ab79f 100644
--- a/sys/dev/irdma/irdma_verbs.c
+++ b/sys/dev/irdma/irdma_verbs.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -55,7 +55,7 @@ irdma_query_device(struct ib_device *ibdev,
memset(props, 0, sizeof(*props));
addrconf_addr_eui48((u8 *)&props->sys_image_guid,
- IF_LLADDR(iwdev->netdev));
+ if_getlladdr(iwdev->netdev));
props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
irdma_fw_minor_ver(&rf->sc_dev);
props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
@@ -233,9 +233,9 @@ irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma->vm_pgoff);
return -EINVAL;
}
- irdma_debug(&ucontext->iwdev->rf->sc_dev,
- IRDMA_DEBUG_VERBS, "bar_offset [0x%lx] mmap_flag [%d]\n",
- entry->bar_offset, entry->mmap_flag);
+ irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
+ "bar_offset [0x%lx] mmap_flag [%d]\n", entry->bar_offset,
+ entry->mmap_flag);
pfn = (entry->bar_offset +
pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
@@ -441,11 +441,7 @@ irdma_setup_umode_qp(struct ib_udata *udata,
struct irdma_qp_init_info *info,
struct ib_qp_init_attr *init_attr)
{
-#if __FreeBSD_version >= 1400026
- struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
-#else
struct irdma_ucontext *ucontext = to_ucontext(iwqp->iwpd->ibpd.uobject->context);
-#endif
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
struct irdma_create_qp_req req = {0};
unsigned long flags;
@@ -642,7 +638,7 @@ irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
udp_info->src_port = 0xc000;
udp_info->dst_port = ROCE_V2_UDP_DPORT;
roce_info = &iwqp->roce_info;
- ether_addr_copy(roce_info->mac_addr, IF_LLADDR(iwdev->netdev));
+ ether_addr_copy(roce_info->mac_addr, if_getlladdr(iwdev->netdev));
roce_info->rd_en = true;
roce_info->wr_rdresp_en = true;
@@ -675,7 +671,7 @@ irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
struct irdma_iwarp_offload_info *iwarp_info;
iwarp_info = &iwqp->iwarp_info;
- ether_addr_copy(iwarp_info->mac_addr, IF_LLADDR(iwdev->netdev));
+ ether_addr_copy(iwarp_info->mac_addr, if_getlladdr(iwdev->netdev));
iwarp_info->rd_en = true;
iwarp_info->wr_rdresp_en = true;
iwarp_info->bind_en = true;
@@ -728,12 +724,16 @@ irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
void
irdma_sched_qp_flush_work(struct irdma_qp *iwqp)
{
+ unsigned long flags;
+
if (iwqp->sc_qp.qp_uk.destroy_pending)
return;
irdma_qp_add_ref(&iwqp->ibqp);
+ spin_lock_irqsave(&iwqp->dwork_flush_lock, flags);
if (mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)))
irdma_qp_rem_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwqp->dwork_flush_lock, flags);
}
void
@@ -822,6 +822,22 @@ irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
}
+static int
+irdma_wait_for_suspend(struct irdma_qp *iwqp)
+{
+ if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
+ !iwqp->suspend_pending,
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
+ iwqp->suspend_pending = false;
+ irdma_dev_warn(&iwqp->iwdev->ibdev,
+ "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
+ iwqp->ibqp.qp_num, iwqp->last_aeq);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/**
* irdma_modify_qp_roce - modify qp request
* @ibqp: qp's pointer for modify
@@ -1017,11 +1033,9 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
spin_lock_irqsave(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
- if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
- iwqp->ibqp.qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
- irdma_dev_warn(&iwdev->ibdev,
- "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
+ if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
+ iwqp->ibqp.qp_type, attr_mask)) {
+ irdma_dev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
iwqp->ibqp.qp_num, iwqp->ibqp_state,
attr->qp_state);
ret = -EINVAL;
@@ -1086,19 +1100,11 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
info.next_iwarp_state = IRDMA_QP_STATE_SQD;
issue_modify_qp = 1;
+ iwqp->suspend_pending = true;
break;
case IB_QPS_SQE:
case IB_QPS_ERR:
case IB_QPS_RESET:
- if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
- if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
- irdma_cqp_qp_suspend_resume(&iwqp->sc_qp, IRDMA_OP_SUSPEND);
- spin_unlock_irqrestore(&iwqp->lock, flags);
- info.next_iwarp_state = IRDMA_QP_STATE_SQD;
- irdma_hw_modify_qp(iwdev, iwqp, &info, true);
- spin_lock_irqsave(&iwqp->lock, flags);
- }
-
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
if (udata && udata->inlen) {
@@ -1135,6 +1141,11 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ctx_info->rem_endpoint_idx = udp_info->arp_idx;
if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
return -EINVAL;
+ if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
+ ret = irdma_wait_for_suspend(iwqp);
+ if (ret)
+ return ret;
+ }
spin_lock_irqsave(&iwqp->lock, flags);
if (iwqp->iwarp_state == info.curr_iwarp_state) {
iwqp->iwarp_state = info.next_iwarp_state;
@@ -1417,8 +1428,8 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
udata->outlen));
if (err) {
irdma_remove_push_mmap_entries(iwqp);
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_VERBS, "copy_to_udata failed\n");
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
+ "copy_to_udata failed\n");
return err;
}
}
@@ -2110,6 +2121,169 @@ irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
return ret;
}
+/*
+ * irdma_alloc_iwmr - Allocate iwmr @region - memory region @pd - protection domain @virt - virtual address @reg_type -
+ * registration type
+ */
+static struct irdma_mr *
+irdma_alloc_iwmr(struct ib_umem *region,
+ struct ib_pd *pd, u64 virt,
+ enum irdma_memreg_type reg_type)
+{
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->region = region;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwmr->ibmr.iova = virt;
+ iwmr->type = reg_type;
+
+ /* Some OOT versions of irdma_copy_user_pg_addr require the pg mask */
+ iwmr->page_msk = ~(IRDMA_HW_PAGE_SIZE - 1);
+ iwmr->page_size = IRDMA_HW_PAGE_SIZE;
+ iwmr->len = region->length;
+ iwpbl->user_base = virt;
+ iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, virt);
+
+ return iwmr;
+}
+
+static void
+irdma_free_iwmr(struct irdma_mr *iwmr)
+{
+ kfree(iwmr);
+}
+
+/*
+ * irdma_reg_user_mr_type_mem - Handle memory registration @iwmr - irdma mr @access - access rights
+ */
+static int
+irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ u32 stag;
+ int err;
+ u8 lvl;
+
+ lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
+
+ err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
+ if (err)
+ return err;
+
+ if (lvl) {
+ err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
+ iwmr->page_size);
+ if (err) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+ iwpbl->pbl_allocated = false;
+ }
+ }
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err = -ENOMEM;
+ goto free_pble;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->access = access;
+ err = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (err)
+ goto err_hwreg;
+
+ return 0;
+
+err_hwreg:
+ irdma_free_stag(iwdev, stag);
+
+free_pble:
+ if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+
+ return err;
+}
+
+/*
+ * irdma_reg_user_mr_type_qp - Handle QP memory registration @req - memory reg req @udata - user info @iwmr - irdma mr
+ */
+static int
+irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ total = req.sq_pages + req.rq_pages + IRDMA_SHADOW_PGCNT;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ total = req.sq_pages + req.rq_pages;
+ lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = to_ucontext(iwmr->ibmr.pd->uobject->context);
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
+/*
+ * irdma_reg_user_mr_type_cq - Handle CQ memory registration @req - memory reg req @udata - user info @iwmr - irdma mr
+ */
+static int
+irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ total = req.cq_pages +
+ ((iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) ? 0 : IRDMA_SHADOW_PGCNT);
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = to_ucontext(iwmr->ibmr.pd->uobject->context);
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
/**
* irdma_reg_user_mr - Register a user memory region
* @pd: ptr of pd
@@ -2126,18 +2300,10 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
{
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device);
- struct irdma_ucontext *ucontext;
- struct irdma_pble_alloc *palloc;
- struct irdma_pbl *iwpbl;
- struct irdma_mr *iwmr;
- struct ib_umem *region;
struct irdma_mem_reg_req req = {};
- u32 total, stag = 0;
- u8 shadow_pgcnt = 1;
- unsigned long flags;
- int err = -EINVAL;
- u8 lvl;
- int ret;
+ struct ib_umem *region;
+ struct irdma_mr *iwmr;
+ int err;
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
@@ -2158,111 +2324,41 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
return ERR_PTR(-EFAULT);
}
- iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
- if (!iwmr) {
+ iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
+ if (IS_ERR(iwmr)) {
ib_umem_release(region);
- return ERR_PTR(-ENOMEM);
+ return (struct ib_mr *)iwmr;
}
- iwpbl = &iwmr->iwpbl;
- iwpbl->iwmr = iwmr;
- iwmr->region = region;
- iwmr->ibmr.pd = pd;
- iwmr->ibmr.device = pd->device;
- iwmr->ibmr.iova = virt;
- iwmr->page_size = IRDMA_HW_PAGE_SIZE;
- iwmr->page_msk = ~(IRDMA_HW_PAGE_SIZE - 1);
-
- iwmr->len = region->length;
- iwpbl->user_base = virt;
- palloc = &iwpbl->pble_alloc;
- iwmr->type = req.reg_type;
- iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, virt);
-
switch (req.reg_type) {
case IRDMA_MEMREG_TYPE_QP:
- total = req.sq_pages + req.rq_pages + shadow_pgcnt;
- if (total > iwmr->page_cnt) {
- err = -EINVAL;
- goto error;
- }
- total = req.sq_pages + req.rq_pages;
- lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
- err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
if (err)
goto error;
- ucontext = to_ucontext(pd->uobject->context);
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
- iwpbl->on_list = true;
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break;
case IRDMA_MEMREG_TYPE_CQ:
- if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
- shadow_pgcnt = 0;
- total = req.cq_pages + shadow_pgcnt;
- if (total > iwmr->page_cnt) {
- err = -EINVAL;
- goto error;
- }
-
- lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
- err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
if (err)
goto error;
- ucontext = to_ucontext(pd->uobject->context);
- spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
- list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
- iwpbl->on_list = true;
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IRDMA_MEMREG_TYPE_MEM:
- lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
- err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
+ err = irdma_reg_user_mr_type_mem(iwmr, access);
if (err)
goto error;
- if (lvl) {
- ret = irdma_check_mr_contiguous(palloc,
- iwmr->page_size);
- if (ret) {
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- }
- }
-
- stag = irdma_create_stag(iwdev);
- if (!stag) {
- err = -ENOMEM;
- goto error;
- }
-
- iwmr->stag = stag;
- iwmr->ibmr.rkey = stag;
- iwmr->ibmr.lkey = stag;
- iwmr->access = access;
- err = irdma_hwreg_mr(iwdev, iwmr, access);
- if (err) {
- irdma_free_stag(iwdev, stag);
- goto error;
- }
-
break;
default:
+ err = -EINVAL;
goto error;
}
- iwmr->type = req.reg_type;
-
return &iwmr->ibmr;
error:
- if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
ib_umem_release(region);
- kfree(iwmr);
+ irdma_free_iwmr(iwmr);
return ERR_PTR(err);
}
@@ -2710,8 +2806,7 @@ irdma_post_recv(struct ib_qp *ibqp,
err = irdma_uk_post_receive(ukqp, &post_recv);
if (err) {
irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "post_recv err %d\n",
- err);
+ "post_recv err %d\n", err);
goto out;
}
@@ -2761,64 +2856,6 @@ irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
}
}
-static inline void
-set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
- struct ib_wc *entry)
-{
- struct irdma_sc_qp *qp;
-
- switch (cq_poll_info->op_type) {
- case IRDMA_OP_TYPE_RDMA_WRITE:
- case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
- entry->opcode = IB_WC_RDMA_WRITE;
- break;
- case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
- case IRDMA_OP_TYPE_RDMA_READ:
- entry->opcode = IB_WC_RDMA_READ;
- break;
- case IRDMA_OP_TYPE_SEND_SOL:
- case IRDMA_OP_TYPE_SEND_SOL_INV:
- case IRDMA_OP_TYPE_SEND_INV:
- case IRDMA_OP_TYPE_SEND:
- entry->opcode = IB_WC_SEND;
- break;
- case IRDMA_OP_TYPE_FAST_REG_NSMR:
- entry->opcode = IB_WC_REG_MR;
- break;
- case IRDMA_OP_TYPE_INV_STAG:
- entry->opcode = IB_WC_LOCAL_INV;
- break;
- default:
- qp = cq_poll_info->qp_handle;
- irdma_dev_err(to_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
- cq_poll_info->op_type);
- entry->status = IB_WC_GENERAL_ERR;
- }
-}
-
-static inline void
-set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
- struct ib_wc *entry, bool send_imm_support)
-{
- /**
- * iWARP does not support sendImm, so the presence of Imm data
- * must be WriteImm.
- */
- if (!send_imm_support) {
- entry->opcode = cq_poll_info->imm_valid ?
- IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
- return;
- }
- switch (cq_poll_info->op_type) {
- case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
- case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
- entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
- break;
- default:
- entry->opcode = IB_WC_RECV;
- }
-}
-
/**
* irdma_process_cqe - process cqe info
* @entry: processed cqe
@@ -3004,8 +3041,7 @@ __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
return npolled;
error:
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "%s: Error polling CQ, irdma_err: %d\n",
- __func__, ret);
+ "%s: Error polling CQ, irdma_err: %d\n", __func__, ret);
return ret;
}
@@ -3173,12 +3209,8 @@ irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
int ret = 0;
bool ipv4;
u16 vlan_id;
- union {
- struct sockaddr saddr;
- struct sockaddr_in saddr_in;
- struct sockaddr_in6 saddr_in6;
- } sgid_addr;
- unsigned char dmac[ETH_ALEN];
+ union irdma_sockaddr sgid_addr;
+ unsigned char dmac[ETHER_ADDR_LEN];
rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
@@ -3188,9 +3220,8 @@ irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
ipv4 = false;
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "qp_id=%d, IP6address=%pI6\n",
- ibqp->qp_num,
- ip_addr);
+ "qp_id=%d, IP6address=%x:%x:%x:%x\n", ibqp->qp_num,
+ IRDMA_PRINT_IP6(ip_addr));
irdma_mcast_mac_v6(ip_addr, dmac);
} else {
ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
@@ -3198,8 +3229,9 @@ irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
vlan_id = irdma_get_vlan_ipv4(ip_addr);
irdma_mcast_mac_v4(ip_addr, dmac);
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "qp_id=%d, IP4address=%pI4, MAC=%pM\n",
- ibqp->qp_num, ip_addr, dmac);
+ "qp_id=%d, IP4address=%x, MAC=%x:%x:%x:%x:%x:%x\n",
+ ibqp->qp_num, ip_addr[0], dmac[0], dmac[1], dmac[2],
+ dmac[3], dmac[4], dmac[5]);
}
spin_lock_irqsave(&rf->qh_list_lock, flags);
@@ -3310,11 +3342,7 @@ irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
int ret;
unsigned long flags;
- union {
- struct sockaddr saddr;
- struct sockaddr_in saddr_in;
- struct sockaddr_in6 saddr_in6;
- } sgid_addr;
+ union irdma_sockaddr sgid_addr;
rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
@@ -3327,8 +3355,8 @@ irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
if (!mc_qht_elem) {
spin_unlock_irqrestore(&rf->qh_list_lock, flags);
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_VERBS, "address not found MCG\n");
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
+ "address not found MCG\n");
return 0;
}
@@ -3340,8 +3368,8 @@ irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
IRDMA_OP_MC_DESTROY);
if (ret) {
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_VERBS, "failed MC_DESTROY MCG\n");
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
+ "failed MC_DESTROY MCG\n");
spin_lock_irqsave(&rf->qh_list_lock, flags);
mcast_list_add(rf, mc_qht_elem);
spin_unlock_irqrestore(&rf->qh_list_lock, flags);
@@ -3358,8 +3386,8 @@ irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
IRDMA_OP_MC_MODIFY);
if (ret) {
- irdma_debug(&iwdev->rf->sc_dev,
- IRDMA_DEBUG_VERBS, "failed Modify MCG\n");
+ irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
+ "failed Modify MCG\n");
return ret;
}
}
@@ -3392,7 +3420,7 @@ irdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
return 0;
}
-static struct ifnet *
+static if_t
irdma_get_netdev(struct ib_device *ibdev, u8 port_num)
{
struct irdma_device *iwdev = to_iwdev(ibdev);
@@ -3447,6 +3475,7 @@ static void
irdma_set_device_mcast_ops(struct ib_device *ibdev)
{
struct ib_device *dev_ops = ibdev;
+
dev_ops->attach_mcast = irdma_attach_mcast;
dev_ops->detach_mcast = irdma_detach_mcast;
}
@@ -3455,6 +3484,7 @@ static void
irdma_set_device_roce_ops(struct ib_device *ibdev)
{
struct ib_device *dev_ops = ibdev;
+
dev_ops->create_ah = irdma_create_ah;
dev_ops->destroy_ah = irdma_destroy_ah;
dev_ops->get_link_layer = irdma_get_link_layer;
@@ -3499,7 +3529,7 @@ irdma_init_roce_device(struct irdma_device *iwdev)
kc_set_roce_uverbs_cmd_mask(iwdev);
iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
- IF_LLADDR(iwdev->netdev));
+ if_getlladdr(iwdev->netdev));
irdma_set_device_roce_ops(&iwdev->ibdev);
if (iwdev->rf->rdma_ver == IRDMA_GEN_2)
irdma_set_device_mcast_ops(&iwdev->ibdev);
@@ -3512,11 +3542,11 @@ irdma_init_roce_device(struct irdma_device *iwdev)
static int
irdma_init_iw_device(struct irdma_device *iwdev)
{
- struct ifnet *netdev = iwdev->netdev;
+ if_t netdev = iwdev->netdev;
iwdev->ibdev.node_type = RDMA_NODE_RNIC;
addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
- IF_LLADDR(netdev));
+ if_getlladdr(netdev));
iwdev->ibdev.iwcm = kzalloc(sizeof(*iwdev->ibdev.iwcm), GFP_KERNEL);
if (!iwdev->ibdev.iwcm)
return -ENOMEM;
diff --git a/sys/dev/irdma/irdma_verbs.h b/sys/dev/irdma/irdma_verbs.h
index c4553aaa8baf..890840c5d1d9 100644
--- a/sys/dev/irdma/irdma_verbs.h
+++ b/sys/dev/irdma/irdma_verbs.h
@@ -41,6 +41,8 @@
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
+#define IRDMA_SHADOW_PGCNT 1
+
#define iwdev_to_idev(iwdev) (&(iwdev)->rf->sc_dev)
struct irdma_ucontext {
@@ -68,14 +70,16 @@ struct irdma_pd {
spinlock_t udqp_list_lock;
};
+union irdma_sockaddr {
+ struct sockaddr_in saddr_in;
+ struct sockaddr_in6 saddr_in6;
+};
+
struct irdma_av {
u8 macaddr[16];
struct ib_ah_attr attrs;
- union {
- struct sockaddr saddr;
- struct sockaddr_in saddr_in;
- struct sockaddr_in6 saddr_in6;
- } sgid_addr, dgid_addr;
+ union irdma_sockaddr sgid_addr;
+ union irdma_sockaddr dgid_addr;
u8 net_type;
};
@@ -236,6 +240,7 @@ struct irdma_qp {
int max_recv_wr;
atomic_t close_timer_started;
spinlock_t lock; /* serialize posting WRs to SQ/RQ */
+ spinlock_t dwork_flush_lock; /* protect mod_delayed_work */
struct irdma_qp_context *iwqp_context;
void *pbl_vbase;
dma_addr_t pbl_pbase;
@@ -256,12 +261,13 @@ struct irdma_qp {
wait_queue_head_t waitq;
wait_queue_head_t mod_qp_waitq;
u8 rts_ae_rcvd;
- u8 active_conn : 1;
- u8 user_mode : 1;
- u8 hte_added : 1;
- u8 flush_issued : 1;
- u8 sig_all : 1;
- u8 pau_mode : 1;
+ bool active_conn:1;
+ bool user_mode:1;
+ bool hte_added:1;
+ bool flush_issued:1;
+ bool sig_all:1;
+ bool pau_mode:1;
+ bool suspend_pending:1;
};
struct irdma_udqs_work {
@@ -294,6 +300,63 @@ static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
+static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry)
+{
+ struct irdma_sc_qp *qp;
+
+ switch (cq_poll_info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
+ case IRDMA_OP_TYPE_RDMA_READ:
+ entry->opcode = IB_WC_RDMA_READ;
+ break;
+ case IRDMA_OP_TYPE_SEND_SOL:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND:
+ entry->opcode = IB_WC_SEND;
+ break;
+ case IRDMA_OP_TYPE_FAST_REG_NSMR:
+ entry->opcode = IB_WC_REG_MR;
+ break;
+ case IRDMA_OP_TYPE_INV_STAG:
+ entry->opcode = IB_WC_LOCAL_INV;
+ break;
+ default:
+ qp = cq_poll_info->qp_handle;
+ irdma_dev_err(to_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
+ cq_poll_info->op_type);
+ entry->status = IB_WC_GENERAL_ERR;
+ }
+}
+
+static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry, bool send_imm_support)
+{
+ /**
+ * iWARP does not support sendImm, so the presence of Imm data
+ * must be WriteImm.
+ */
+ if (!send_imm_support) {
+ entry->opcode = cq_poll_info->imm_valid ?
+ IB_WC_RECV_RDMA_WITH_IMM :
+ IB_WC_RECV;
+ return;
+ }
+ switch (cq_poll_info->op_type) {
+ case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
+ case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ }
+}
+
/**
* irdma_mcast_mac_v4 - Get the multicast MAC for an IP address
* @ip_addr: IPv4 address
@@ -303,8 +366,8 @@ static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
static inline void irdma_mcast_mac_v4(u32 *ip_addr, u8 *mac)
{
u8 *ip = (u8 *)ip_addr;
- unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, ip[2] & 0x7F, ip[1],
- ip[0]};
+ unsigned char mac4[ETHER_ADDR_LEN] = {0x01, 0x00, 0x5E, ip[2] & 0x7F,
+ ip[1], ip[0]};
ether_addr_copy(mac, mac4);
}
@@ -318,7 +381,8 @@ static inline void irdma_mcast_mac_v4(u32 *ip_addr, u8 *mac)
static inline void irdma_mcast_mac_v6(u32 *ip_addr, u8 *mac)
{
u8 *ip = (u8 *)ip_addr;
- unsigned char mac6[ETH_ALEN] = {0x33, 0x33, ip[3], ip[2], ip[1], ip[0]};
+ unsigned char mac6[ETHER_ADDR_LEN] = {0x33, 0x33, ip[3], ip[2], ip[1],
+ ip[0]};
ether_addr_copy(mac, mac6);
}
diff --git a/sys/dev/irdma/irdma_ws.c b/sys/dev/irdma/irdma_ws.c
index a9a0bd38e1b2..a2afba5c738a 100644
--- a/sys/dev/irdma/irdma_ws.c
+++ b/sys/dev/irdma/irdma_ws.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2022 Intel Corporation
+ * Copyright (c) 2017 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -292,13 +292,13 @@ irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
ws_tree_root = vsi->dev->ws_tree_root;
if (!ws_tree_root) {
- irdma_debug(vsi->dev, IRDMA_DEBUG_WS, "Creating root node\n");
ws_tree_root = irdma_alloc_node(vsi, user_pri,
WS_NODE_TYPE_PARENT, NULL);
if (!ws_tree_root) {
ret = -ENOMEM;
goto exit;
}
+ irdma_debug(vsi->dev, IRDMA_DEBUG_WS, "Creating root node = %d\n", ws_tree_root->index);
ret = irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_ADD_NODE);
if (ret) {
diff --git a/sys/dev/irdma/osdep.h b/sys/dev/irdma/osdep.h
index b96aa2b4d00f..a336df4a66b9 100644
--- a/sys/dev/irdma/osdep.h
+++ b/sys/dev/irdma/osdep.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2022 Intel Corporation
+ * Copyright (c) 2021 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -50,11 +50,11 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/atomic.h>
#include <sys/bus.h>
#include <machine/bus.h>
-#define ATOMIC atomic_t
#define IOMEM
#define IRDMA_NTOHS(a) ntohs(a)
#define MAKEMASK(m, s) ((m) << (s))
@@ -85,7 +85,7 @@
#define STATS_TIMER_DELAY 60000
/* a couple of linux size defines */
-#define SZ_128 128
+#define SZ_128 128
#define SPEED_1000 1000
#define SPEED_10000 10000
#define SPEED_20000 20000
@@ -108,7 +108,7 @@
#define irdma_print(S, ...) printf("%s:%d "S, __FUNCTION__, __LINE__, ##__VA_ARGS__)
#define irdma_debug_buf(dev, mask, desc, buf, size) \
do { \
- u32 i; \
+ u32 i; \
if (!((mask) & (dev)->debug_mask)) { \
break; \
} \
@@ -118,14 +118,14 @@ do { \
irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)(buf))[i / 8]); \
} while(0)
-#define irdma_debug(h, m, s, ...) \
-do { \
- if (!(h)) { \
- if ((m) == IRDMA_DEBUG_INIT) \
+#define irdma_debug(h, m, s, ...) \
+do { \
+ if (!(h)) { \
+ if ((m) == IRDMA_DEBUG_INIT) \
printf("irdma INIT " s, ##__VA_ARGS__); \
- } else if (((m) & (h)->debug_mask)) { \
- printf("irdma " s, ##__VA_ARGS__); \
- } \
+ } else if (((m) & (h)->debug_mask)) { \
+ printf("irdma " s, ##__VA_ARGS__); \
+ } \
} while (0)
#define irdma_dev_err(ibdev, fmt, ...) \
pr_err("%s:%s:%d ERR "fmt, (ibdev)->name, __func__, __LINE__, ##__VA_ARGS__)
@@ -134,17 +134,8 @@ do { \
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
-#define dump_struct(s, sz, name) \
-do { \
- unsigned char *a; \
- printf("%s %u", (name), (unsigned int)(sz)); \
- for (a = (unsigned char*)(s); a < (unsigned char *)(s) + (sz) ; a ++) { \
- if ((u64)a % 8 == 0) \
- printf("\n%p ", a); \
- printf("%2x ", *a); \
- } \
- printf("\n"); \
-}while(0)
+#define IRDMA_PRINT_IP6(ip6) \
+ ((u32*)ip6)[0], ((u32*)ip6)[1], ((u32*)ip6)[2], ((u32*)ip6)[3]
/*
* debug definition end
@@ -173,6 +164,7 @@ struct irdma_dev_ctx {
bus_size_t mem_bus_space_size;
void *dev;
struct irdma_task_arg task_arg;
+ atomic_t event_rfcnt;
};
#define irdma_pr_info(fmt, args ...) printf("%s: WARN "fmt, __func__, ## args)
@@ -184,38 +176,34 @@ struct irdma_dev_ctx {
#define rt_tos2priority(tos) (tos >> 5)
#define ah_attr_to_dmac(attr) ((attr).dmac)
-#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \
- ib_modify_qp_is_ok(cur_state, next_state, type, mask)
-#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \
- ib_gid_to_network_type(gid_type, gid)
#define irdma_del_timer_compat(tt) del_timer((tt))
#define IRDMA_TAILQ_FOREACH CK_STAILQ_FOREACH
#define IRDMA_TAILQ_FOREACH_SAFE CK_STAILQ_FOREACH_SAFE
#define between(a, b, c) (bool)(c-a >= b-a)
-#define rd32(a, reg) irdma_rd32((a)->dev_context, (reg))
-#define wr32(a, reg, value) irdma_wr32((a)->dev_context, (reg), (value))
+#define rd32(a, reg) irdma_rd32((a)->dev_context, (reg))
+#define wr32(a, reg, value) irdma_wr32((a)->dev_context, (reg), (value))
-#define rd64(a, reg) irdma_rd64((a)->dev_context, (reg))
-#define wr64(a, reg, value) irdma_wr64((a)->dev_context, (reg), (value))
+#define rd64(a, reg) irdma_rd64((a)->dev_context, (reg))
+#define wr64(a, reg, value) irdma_wr64((a)->dev_context, (reg), (value))
#define db_wr32(value, a) writel((value), (a))
void *hw_to_dev(struct irdma_hw *hw);
struct irdma_dma_mem {
- void *va;
- u64 pa;
+ void *va;
+ u64 pa;
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_dma_segment_t seg;
bus_size_t size;
- int nseg;
- int flags;
+ int nseg;
+ int flags;
};
struct irdma_virt_mem {
- void *va;
- u32 size;
+ void *va;
+ u32 size;
};
struct irdma_dma_info {
diff --git a/sys/modules/irdma/Makefile b/sys/modules/irdma/Makefile
index f212a8a1a658..b2ffb67ca66f 100644
--- a/sys/modules/irdma/Makefile
+++ b/sys/modules/irdma/Makefile
@@ -14,7 +14,7 @@ SRCS+= irdma_ctrl.c irdma_hmc.c icrdma_hw.c irdma_pble.c irdma_puda.c irdma_uda.
CFLAGS+= -I${ICE_DIR} -I${OFED_INC_DIR}
CFLAGS+= -I${OFED_INC_DIR}/uapi
-CFLAGS+= -I${.CURDIR}/../../compat/linuxkpi/common/include
+CFLAGS+= ${LINUXKPI_INCLUDES}
CFLAGS+= -DCONFIG_INFINIBAND_USER_MEM
ice_rdma.h: