aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/mlx5
diff options
context:
space:
mode:
authorHans Petter Selasky <hselasky@FreeBSD.org>2021-06-16 13:01:54 +0000
committerHans Petter Selasky <hselasky@FreeBSD.org>2021-07-12 12:22:33 +0000
commitc3987b8ea793c11f61fecb14ef93195a23e3522c (patch)
treecd1e88ef999b0c43bdcc42c0306e280b6159906a /sys/dev/mlx5
parent4fb0a74e081772fc6fc2a43222ee072fd089bf5f (diff)
downloadsrc-c3987b8ea793c11f61fecb14ef93195a23e3522c.tar.gz
src-c3987b8ea793c11f61fecb14ef93195a23e3522c.zip
ibcore: Declare ib_post_send() and ib_post_recv() arguments const
Since neither ib_post_send() nor ib_post_recv() modify the data structure their second argument points at, declare that argument const. This change makes it necessary to declare the 'bad_wr' argument const too and also to modify all ULPs that call ib_post_send(), ib_post_recv() or ib_post_srq_recv(). This patch does not change any functionality but makes it possible for the compiler to verify whether the ib_post_(send|recv|srq_recv) really do not modify the posted work request. Linux commit: f696bf6d64b195b83ca1bdb7cd33c999c9dcf514 7bb1fafc2f163ad03a2007295bb2f57cfdbfb630 d34ac5cd3a73aacd11009c4fc3ba15d7ea62c411 MFC after: 1 week Reviewed by: kib Sponsored by: Mellanox Technologies // NVIDIA Networking
Diffstat (limited to 'sys/dev/mlx5')
-rw-r--r--sys/dev/mlx5/mlx5_ib/mlx5_ib.h22
-rw-r--r--sys/dev/mlx5/mlx5_ib/mlx5_ib_gsi.c8
-rw-r--r--sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c43
-rw-r--r--sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c70
-rw-r--r--sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c4
5 files changed, 74 insertions, 73 deletions
diff --git a/sys/dev/mlx5/mlx5_ib/mlx5_ib.h b/sys/dev/mlx5/mlx5_ib/mlx5_ib.h
index 44a9aa307be7..695b1ab14948 100644
--- a/sys/dev/mlx5/mlx5_ib/mlx5_ib.h
+++ b/sys/dev/mlx5/mlx5_ib/mlx5_ib.h
@@ -449,7 +449,7 @@ struct mlx5_umr_wr {
u32 mkey;
};
-static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
+static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct mlx5_umr_wr, wr);
}
@@ -837,8 +837,8 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq);
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -847,10 +847,10 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
int mlx5_ib_destroy_qp(struct ib_qp *qp);
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length,
@@ -994,10 +994,10 @@ int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
diff --git a/sys/dev/mlx5/mlx5_ib/mlx5_ib_gsi.c b/sys/dev/mlx5/mlx5_ib/mlx5_ib_gsi.c
index 6c0417851665..dc90b1348378 100644
--- a/sys/dev/mlx5/mlx5_ib/mlx5_ib_gsi.c
+++ b/sys/dev/mlx5/mlx5_ib/mlx5_ib_gsi.c
@@ -472,8 +472,8 @@ static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
return gsi->tx_qps[qp_index];
}
-int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
struct ib_qp *tx_qp;
@@ -517,8 +517,8 @@ err:
return ret;
}
-int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
diff --git a/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c b/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
index 585e52d35d3b..145ec55d6757 100644
--- a/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
+++ b/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
@@ -573,41 +573,38 @@ static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
return 0;
}
-static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
+static void prep_umr_wqe_common(struct ib_pd *pd, struct mlx5_umr_wr *umrwr,
struct ib_sge *sg, u64 dma, int n, u32 key,
int page_shift)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
sg->addr = dma;
sg->length = ALIGN(sizeof(u64) * n, 64);
sg->lkey = dev->umrc.pd->local_dma_lkey;
- wr->next = NULL;
- wr->sg_list = sg;
+ umrwr->wr.next = NULL;
+ umrwr->wr.sg_list = sg;
if (n)
- wr->num_sge = 1;
+ umrwr->wr.num_sge = 1;
else
- wr->num_sge = 0;
+ umrwr->wr.num_sge = 0;
- wr->opcode = MLX5_IB_WR_UMR;
+ umrwr->wr.opcode = MLX5_IB_WR_UMR;
umrwr->npages = n;
umrwr->page_shift = page_shift;
umrwr->mkey = key;
}
-static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
+static void prep_umr_reg_wqe(struct ib_pd *pd, struct mlx5_umr_wr *umrwr,
struct ib_sge *sg, u64 dma, int n, u32 key,
int page_shift, u64 virt_addr, u64 len,
int access_flags)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
+ prep_umr_wqe_common(pd, umrwr, sg, dma, n, key, page_shift);
- prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
-
- wr->send_flags = 0;
+ umrwr->wr.send_flags = 0;
umrwr->target.virt_addr = virt_addr;
umrwr->length = len;
@@ -616,12 +613,10 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
}
static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
- struct ib_send_wr *wr, u32 key)
+ struct mlx5_umr_wr *umrwr, u32 key)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
- wr->opcode = MLX5_IB_WR_UMR;
+ umrwr->wr.send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+ umrwr->wr.opcode = MLX5_IB_WR_UMR;
umrwr->mkey = key;
}
@@ -675,7 +670,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
struct mlx5_umr_wr umrwr = {};
- struct ib_send_wr *bad;
+ const struct ib_send_wr *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
int size;
@@ -707,7 +702,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
mlx5_ib_init_umr_context(&umr_context);
umrwr.wr.wr_cqe = &umr_context.cqe;
- prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
+ prep_umr_reg_wqe(pd, &umrwr, &sg, dma, npages, mr->mmkey.key,
page_shift, virt_addr, len, access_flags);
down(&umrc->sem);
@@ -756,7 +751,7 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
int size;
__be64 *pas;
dma_addr_t dma;
- struct ib_send_wr *bad;
+ const struct ib_send_wr *bad;
struct mlx5_umr_wr wr;
struct ib_sge sg;
int err = 0;
@@ -1026,7 +1021,7 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
struct mlx5_umr_wr umrwr = {};
- struct ib_send_wr *bad;
+ const struct ib_send_wr *bad;
int err;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
@@ -1035,7 +1030,7 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mlx5_ib_init_umr_context(&umr_context);
umrwr.wr.wr_cqe = &umr_context.cqe;
- prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
+ prep_umr_unreg_wqe(dev, &umrwr, mr->mmkey.key);
down(&umrc->sem);
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
@@ -1065,7 +1060,7 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_ib_umr_context umr_context;
- struct ib_send_wr *bad;
+ const struct ib_send_wr *bad;
struct mlx5_umr_wr umrwr = {};
struct ib_sge sg;
struct umr_common *umrc = &dev->umrc;
@@ -1090,7 +1085,7 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
}
- prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
+ prep_umr_wqe_common(pd, &umrwr, &sg, dma, npages, mr->mmkey.key,
page_shift);
if (flags & IB_MR_REREG_PD) {
diff --git a/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c b/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c
index ec47b3e07b87..90c6d69e30c2 100644
--- a/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c
+++ b/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c
@@ -3028,7 +3028,7 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
}
static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
- struct ib_send_wr *wr, void *qend,
+ const struct ib_send_wr *wr, void *qend,
struct mlx5_ib_qp *qp, int *size)
{
void *seg = eseg;
@@ -3081,7 +3081,7 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
}
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
@@ -3240,9 +3240,9 @@ static __be64 get_umr_update_pd_mask(void)
}
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(umr, 0, sizeof(*umr));
@@ -3311,9 +3311,9 @@ static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
seg->status = MLX5_MKEY_STATUS_FREE;
}
-static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
+static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, const struct ib_send_wr *wr)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(seg, 0, sizeof(*seg));
if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
@@ -3344,7 +3344,7 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
}
-static __be32 send_ieth(struct ib_send_wr *wr)
+static __be32 send_ieth(const struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
@@ -3376,7 +3376,7 @@ static u8 wq_sig(void *wqe)
return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
}
-static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
void *wqe, int *sz)
{
struct mlx5_wqe_inline_seg *seg;
@@ -3522,7 +3522,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
return 0;
}
-static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
+static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
struct mlx5_ib_qp *qp, void **seg, int *size)
{
struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
@@ -3624,7 +3624,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
}
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- struct ib_sig_handover_wr *wr, u32 nelements,
+ const struct ib_sig_handover_wr *wr, u32 nelements,
u32 length, u32 pdn)
{
struct ib_mr *sig_mr = wr->sig_mr;
@@ -3655,10 +3655,10 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
}
-static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
+static int set_sig_umr_wr(const struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
void **seg, int *size)
{
- struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
+ const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
u32 pdn = get_pd(qp)->pdn;
u32 klm_oct_size;
@@ -3732,7 +3732,7 @@ static int set_psv_wr(struct ib_sig_domain *domain,
}
static int set_reg_wr(struct mlx5_ib_qp *qp,
- struct ib_reg_wr *wr,
+ const struct ib_reg_wr *wr,
void **seg, int *size)
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
@@ -3797,7 +3797,7 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
}
}
-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
+static u8 get_fence(u8 fence, const struct ib_send_wr *wr)
{
if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
wr->send_flags & IB_SEND_FENCE))
@@ -3815,10 +3815,10 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
return 0;
}
-static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
- struct mlx5_wqe_ctrl_seg **ctrl,
- struct ib_send_wr *wr, unsigned *idx,
- int *size, int nreq)
+static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned *idx,
+ int *size, int nreq, bool send_signaled, bool solicited)
{
if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
return -ENOMEM;
@@ -3829,10 +3829,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
*(uint32_t *)(*seg + 8) = 0;
(*ctrl)->imm = send_ieth(wr);
(*ctrl)->fm_ce_se = qp->sq_signal_bits |
- (wr->send_flags & IB_SEND_SIGNALED ?
- MLX5_WQE_CTRL_CQ_UPDATE : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- MLX5_WQE_CTRL_SOLICITED : 0);
+ (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
*seg += sizeof(**ctrl);
*size = sizeof(**ctrl) / 16;
@@ -3840,6 +3838,16 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
return 0;
}
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned *idx,
+ int *size, int nreq)
+{
+ return __begin_wqe(qp, seg, ctrl, wr, idx, size, nreq,
+ wr->send_flags & IB_SEND_SIGNALED,
+ wr->send_flags & IB_SEND_SOLICITED);
+}
+
static void finish_wqe(struct mlx5_ib_qp *qp,
struct mlx5_wqe_ctrl_seg *ctrl,
u8 size, unsigned idx, u64 wr_id,
@@ -3864,8 +3872,8 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
}
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@@ -3993,10 +4001,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* SET_PSV WQEs are not signaled and solicited
* on error
*/
- wr->send_flags &= ~IB_SEND_SIGNALED;
- wr->send_flags |= IB_SEND_SOLICITED;
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
+ err = __begin_wqe(qp, &seg, &ctrl, wr,
+ &idx, &size, nreq, false, true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
@@ -4016,8 +4022,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
finish_wqe(qp, ctrl, size, idx, wr->wr_id,
nreq, get_fence(fence, wr),
next_fence, MLX5_OPCODE_SET_PSV);
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
+ err = __begin_wqe(qp, &seg, &ctrl, wr,
+ &idx, &size, nreq, false, true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
@@ -4183,8 +4189,8 @@ static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
sig->signature = calc_sig(sig, size);
}
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_wqe_data_seg *scat;
diff --git a/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c b/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c
index 42e10f9a50de..f0d09a2aca8d 100644
--- a/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c
+++ b/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c
@@ -427,8 +427,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
spin_unlock(&srq->lock);
}
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
struct mlx5_wqe_srq_next_seg *next;