aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/cxgbe/iw_cxgbe
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/cxgbe/iw_cxgbe')
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/device.c20
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h5
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/mem.c103
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/qp.c2
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/resource.c38
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/t4.h1
6 files changed, 35 insertions, 134 deletions
diff --git a/sys/dev/cxgbe/iw_cxgbe/device.c b/sys/dev/cxgbe/iw_cxgbe/device.c
index 3c4d269f6c69..4610f91e96ac 100644
--- a/sys/dev/cxgbe/iw_cxgbe/device.c
+++ b/sys/dev/cxgbe/iw_cxgbe/device.c
@@ -132,26 +132,21 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->stats.rqt.total = sc->vres.rq.size;
rdev->stats.qid.total = sc->vres.qp.size;
- rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ rc = c4iw_init_resource(rdev, T4_MAX_NUM_PD);
if (rc) {
device_printf(sc->dev, "error %d initializing resources\n", rc);
goto err1;
}
- rc = c4iw_pblpool_create(rdev);
- if (rc) {
- device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
- goto err2;
- }
rc = c4iw_rqtpool_create(rdev);
if (rc) {
device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
- goto err3;
+ goto err2;
}
rdev->status_page = (struct t4_dev_status_page *)
__get_free_page(GFP_KERNEL);
if (!rdev->status_page) {
rc = -ENOMEM;
- goto err4;
+ goto err3;
}
rdev->status_page->qp_start = sc->vres.qp.start;
rdev->status_page->qp_size = sc->vres.qp.size;
@@ -168,15 +163,13 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) {
rc = -ENOMEM;
- goto err5;
+ goto err4;
}
return (0);
-err5:
- free_page((unsigned long)rdev->status_page);
err4:
- c4iw_rqtpool_destroy(rdev);
+ free_page((unsigned long)rdev->status_page);
err3:
- c4iw_pblpool_destroy(rdev);
+ c4iw_rqtpool_destroy(rdev);
err2:
c4iw_destroy_resource(&rdev->resource);
err1:
@@ -186,7 +179,6 @@ err1:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
free_page((unsigned long)rdev->status_page);
- c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource);
}
diff --git a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
index ca2595b65b02..47ce10562c66 100644
--- a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
+++ b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
@@ -99,7 +99,6 @@ struct c4iw_id_table {
};
struct c4iw_resource {
- struct c4iw_id_table tpt_table;
struct c4iw_id_table qid_table;
struct c4iw_id_table pdid_table;
};
@@ -904,11 +903,9 @@ int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
struct l2t_entry *l2t);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid);
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
-int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
diff --git a/sys/dev/cxgbe/iw_cxgbe/mem.c b/sys/dev/cxgbe/iw_cxgbe/mem.c
index 4a1adc118b7c..ae0aa0edc17a 100644
--- a/sys/dev/cxgbe/iw_cxgbe/mem.c
+++ b/sys/dev/cxgbe/iw_cxgbe/mem.c
@@ -56,46 +56,23 @@ mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
static int
_c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, int wait)
+ dma_addr_t data, int wait)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_sgl *sgl;
u8 wr_len;
int ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- addr &= 0x7FFFFFF;
-
if (wait)
c4iw_init_wr_wait(&wr_wait);
- wr_len = roundup(sizeof *ulpmc + sizeof *sgl, 16);
+ wr_len = T4_WRITE_MEM_DMA_LEN;
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- (wait ? F_FW_WR_COMPL : 0));
- ulpmc->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0;
- ulpmc->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- ulpmc->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
- V_T5_ULP_MEMIO_ORDER(1) |
- V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr));
-
- sgl = (struct ulptx_sgl *)(ulpmc + 1);
- sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
- V_ULPTX_NSGE(1));
- sgl->len0 = cpu_to_be32(len);
- sgl->addr0 = cpu_to_be64((u64)data);
-
+ t4_write_mem_dma_wr(sc, wrtod(wr), wr_len, 0, addr, len, data,
+ wait ? (u64)(unsigned long)&wr_wait : 0);
t4_wrq_tx(sc, wr);
if (wait)
@@ -108,70 +85,32 @@ static int
_c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_idata *ulpsc;
- u8 wr_len, *to_dp, *from_dp;
+ u8 wr_len, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- u32 cmd;
-
- cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
- cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
-
- addr &= 0x7FFFFFF;
CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
- num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
c4iw_init_wr_wait(&wr_wait);
+ num_wqe = DIV_ROUND_UP(len, T4_MAX_INLINE_SIZE);
+ from_dp = data;
for (i = 0; i < num_wqe; i++) {
-
- copy_len = min(len, C4IW_MAX_INLINE_SIZE);
- wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
- roundup(copy_len, T4_ULPTX_MIN_IO), 16);
+ copy_len = min(len, T4_MAX_INLINE_SIZE);
+ wr_len = T4_WRITE_MEM_INLINE_LEN(copy_len);
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
-
- if (i == (num_wqe-1)) {
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- F_FW_WR_COMPL);
- ulpmc->wr.wr_lo =
- (__force __be64)(unsigned long) &wr_wait;
- } else
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
- ulpmc->wr.wr_mid = cpu_to_be32(
- V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
-
- ulpmc->cmd = cmd;
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
- DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
- 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
-
- ulpsc = (struct ulptx_idata *)(ulpmc + 1);
- ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
- ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
-
- to_dp = (u8 *)(ulpsc + 1);
- from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
- if (data)
- memcpy(to_dp, from_dp, copy_len);
- else
- memset(to_dp, 0, copy_len);
- if (copy_len % T4_ULPTX_MIN_IO)
- memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
- (copy_len % T4_ULPTX_MIN_IO));
+ t4_write_mem_inline_wr(sc, wrtod(wr), wr_len, 0, addr, copy_len,
+ from_dp, i == (num_wqe - 1) ?
+ (__force __be64)(unsigned long) &wr_wait : 0);
t4_wrq_tx(sc, wr);
- len -= C4IW_MAX_INLINE_SIZE;
- }
+ if (from_dp != NULL)
+ from_dp += T4_MAX_INLINE_SIZE;
+ addr += T4_MAX_INLINE_SIZE >> 5;
+ len -= T4_MAX_INLINE_SIZE;
+ }
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
return ret;
}
@@ -201,7 +140,7 @@ _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen,
- (void *)daddr, !remain);
+ daddr, !remain);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -263,8 +202,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
stag_idx = (*stag) >> 8;
if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
- stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
- if (!stag_idx) {
+ stag_idx = t4_stag_alloc(rdev->adap, 1);
+ if (stag_idx == T4_STAG_UNSET) {
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
@@ -309,7 +248,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
sizeof(tpt), &tpt);
if (reset_tpt_entry) {
- c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
+ t4_stag_free(rdev->adap, stag_idx, 1);
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c
index 0e374bc961c4..cbf4bae00a60 100644
--- a/sys/dev/cxgbe/iw_cxgbe/qp.c
+++ b/sys/dev/cxgbe/iw_cxgbe/qp.c
@@ -1326,6 +1326,8 @@ creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
return (EINVAL);
}
txsd = &toep->txsd[toep->txsd_pidx];
+ KASSERT(howmany(wrsize, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %zu too large", __func__, howmany(wrsize, 16)));
txsd->tx_credits = howmany(wrsize, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
diff --git a/sys/dev/cxgbe/iw_cxgbe/resource.c b/sys/dev/cxgbe/iw_cxgbe/resource.c
index 644ea0c631bf..cd20f1eafdd6 100644
--- a/sys/dev/cxgbe/iw_cxgbe/resource.c
+++ b/sys/dev/cxgbe/iw_cxgbe/resource.c
@@ -59,13 +59,9 @@ static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
}
/* nr_* must be power of 2 */
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid)
{
int err = 0;
- err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
- C4IW_ID_TABLE_F_RANDOM);
- if (err)
- goto tpt_err;
err = c4iw_init_qid_table(rdev);
if (err)
goto qid_err;
@@ -77,8 +73,6 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
- c4iw_id_table_free(&rdev->resource.tpt_table);
- tpt_err:
return -ENOMEM;
}
@@ -243,7 +237,6 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
void c4iw_destroy_resource(struct c4iw_resource *rscp)
{
- c4iw_id_table_free(&rscp->tpt_table);
c4iw_id_table_free(&rscp->qid_table);
c4iw_id_table_free(&rscp->pdid_table);
}
@@ -254,12 +247,9 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
- unsigned long addr;
+ u32 addr;
- vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
- 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
- M_FIRSTFIT|M_NOWAIT, &addr);
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size);
+ addr = t4_pblpool_alloc(rdev->adap, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
@@ -268,33 +258,15 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
- return (u32)addr;
+ return addr;
}
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
- vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT)));
-}
-
-int c4iw_pblpool_create(struct c4iw_rdev *rdev)
-{
- rdev->pbl_arena = vmem_create("PBL_MEM_POOL",
- rdev->adap->vres.pbl.start,
- rdev->adap->vres.pbl.size,
- 1, 0, M_FIRSTFIT| M_NOWAIT);
- if (!rdev->pbl_arena)
- return -ENOMEM;
-
- return 0;
-}
-
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
-{
- vmem_destroy(rdev->pbl_arena);
+ t4_pblpool_free(rdev->adap, addr, size);
}
/* RQT Memory Manager. */
diff --git a/sys/dev/cxgbe/iw_cxgbe/t4.h b/sys/dev/cxgbe/iw_cxgbe/t4.h
index 48f85cf7965b..ffb610420640 100644
--- a/sys/dev/cxgbe/iw_cxgbe/t4.h
+++ b/sys/dev/cxgbe/iw_cxgbe/t4.h
@@ -64,7 +64,6 @@
#define T4_MAX_NUM_PD 65536
#define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */
-#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
#define A_PCIE_MA_SYNC 0x30b4