aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib
diff options
context:
space:
mode:
authorMarcin Wojtas <mw@FreeBSD.org>2020-11-25 09:37:17 +0000
committerMarcin Wojtas <mw@FreeBSD.org>2020-11-25 09:37:17 +0000
commitf981217427acdd982b5b44c77886e663ef10dbf4 (patch)
tree6c5a0f938e1d742ad6deacbc2924ae2b05df035a /sys/contrib
parentd93e744ca8dc351056f5cabff1049dbdb7aa5fdc (diff)
downloadsrc-f981217427acdd982b5b44c77886e663ef10dbf4.tar.gz
src-f981217427acdd982b5b44c77886e663ef10dbf4.zip
MFC: Merge ENA v2.3.0 driver
r367805 Update ENA driver version to v2.3.0 r367803 Rename descriptions of the supported ENA devices r367802 Add ENI metrics for the ENA driver r367801 Add SPDX license tag to the ENA driver files r367800 Add Rx offsets support for the ENA driver r367799 Adjust ENA driver files to latest ena-com changes r367795 Fix completion descriptors alignment for the ENA Obtained from: Semihalf Sponsored by: Amazon, Inc
Notes
Notes: svn path=/stable/11/; revision=368013
Diffstat (limited to 'sys/contrib')
-rw-r--r--sys/contrib/ena-com/ena_com.c471
-rw-r--r--sys/contrib/ena-com/ena_com.h37
-rw-r--r--sys/contrib/ena-com/ena_defs/ena_admin_defs.h152
-rw-r--r--sys/contrib/ena-com/ena_defs/ena_common_defs.h2
-rw-r--r--sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h2
-rw-r--r--sys/contrib/ena-com/ena_defs/ena_gen_info.h6
-rw-r--r--sys/contrib/ena-com/ena_defs/ena_regs_defs.h2
-rw-r--r--sys/contrib/ena-com/ena_eth_com.c129
-rw-r--r--sys/contrib/ena-com/ena_eth_com.h19
-rw-r--r--sys/contrib/ena-com/ena_plat.h77
10 files changed, 536 insertions, 361 deletions
diff --git a/sys/contrib/ena-com/ena_com.c b/sys/contrib/ena-com/ena_com.c
index dde0c3357f63..8c63c1a03f76 100644
--- a/sys/contrib/ena-com/ena_com.c
+++ b/sys/contrib/ena-com/ena_com.c
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
@@ -70,9 +70,9 @@
#define ENA_REGS_ADMIN_INTR_MASK 1
-#define ENA_MIN_POLL_US 100
+#define ENA_MIN_ADMIN_POLL_US 100
-#define ENA_MAX_POLL_US 5000
+#define ENA_MAX_ADMIN_POLL_US 5000
/*****************************************************************************/
/*****************************************************************************/
@@ -106,7 +106,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
dma_addr_t addr)
{
if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
- ena_trc_err("dma address has more bits that the device supports\n");
+ ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n");
return ENA_COM_INVAL;
}
@@ -116,16 +116,17 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_admin_sq *sq = &queue->sq;
- u16 size = ADMIN_SQ_SIZE(queue->q_depth);
+ struct ena_com_dev *ena_dev = admin_queue->ena_dev;
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
+ ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
sq->mem_handle);
if (!sq->entries) {
- ena_trc_err("memory allocation failed\n");
+ ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@@ -138,16 +139,17 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
return 0;
}
-static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_admin_cq *cq = &queue->cq;
- u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+ struct ena_com_dev *ena_dev = admin_queue->ena_dev;
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
+ ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
cq->mem_handle);
if (!cq->entries) {
- ena_trc_err("memory allocation failed\n");
+ ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@@ -157,22 +159,22 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
return 0;
}
-static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
struct ena_aenq_handlers *aenq_handlers)
{
- struct ena_com_aenq *aenq = &dev->aenq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
u32 addr_low, addr_high, aenq_caps;
u16 size;
- dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
aenq->entries,
aenq->dma_addr,
aenq->mem_handle);
if (!aenq->entries) {
- ena_trc_err("memory allocation failed\n");
+ ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@@ -182,18 +184,18 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
- ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
- ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
aenq_caps = 0;
- aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
- ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- ena_trc_err("aenq handlers pointer is NULL\n");
+ ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n");
return ENA_COM_INVAL;
}
@@ -209,31 +211,34 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
ATOMIC32_DEC(&queue->outstanding_cmds);
}
-static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
u16 command_id, bool capture)
{
- if (unlikely(command_id >= queue->q_depth)) {
- ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
- command_id, queue->q_depth);
+ if (unlikely(command_id >= admin_queue->q_depth)) {
+ ena_trc_err(admin_queue->ena_dev,
+ "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, admin_queue->q_depth);
return NULL;
}
- if (unlikely(!queue->comp_ctx)) {
- ena_trc_err("Completion context is NULL\n");
+ if (unlikely(!admin_queue->comp_ctx)) {
+ ena_trc_err(admin_queue->ena_dev,
+ "Completion context is NULL\n");
return NULL;
}
- if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
- ena_trc_err("Completion context is occupied\n");
+ if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
+ ena_trc_err(admin_queue->ena_dev,
+ "Completion context is occupied\n");
return NULL;
}
if (capture) {
- ATOMIC32_INC(&queue->outstanding_cmds);
- queue->comp_ctx[command_id].occupied = true;
+ ATOMIC32_INC(&admin_queue->outstanding_cmds);
+ admin_queue->comp_ctx[command_id].occupied = true;
}
- return &queue->comp_ctx[command_id];
+ return &admin_queue->comp_ctx[command_id];
}
static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
@@ -254,7 +259,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- ena_trc_dbg("admin queue is full.\n");
+ ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(ENA_COM_NO_SPACE);
}
@@ -296,20 +301,21 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
return comp_ctx;
}
-static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
{
- size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+ struct ena_com_dev *ena_dev = admin_queue->ena_dev;
+ size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
struct ena_comp_ctx *comp_ctx;
u16 i;
- queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
- if (unlikely(!queue->comp_ctx)) {
- ena_trc_err("memory allocation failed\n");
+ admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
+ if (unlikely(!admin_queue->comp_ctx)) {
+ ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
- for (i = 0; i < queue->q_depth; i++) {
- comp_ctx = get_comp_ctxt(queue, i, false);
+ for (i = 0; i < admin_queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(admin_queue, i, false);
if (comp_ctx)
ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
}
@@ -377,7 +383,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
}
if (!io_sq->desc_addr.virt_addr) {
- ena_trc_err("memory allocation failed\n");
+ ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
}
@@ -402,7 +408,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- ena_trc_err("bounce buffer memory allocation failed\n");
+ ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@@ -449,23 +455,25 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
io_cq->bus = ena_dev->bus;
- ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
- size,
- io_cq->cdesc_addr.virt_addr,
- io_cq->cdesc_addr.phys_addr,
- io_cq->cdesc_addr.mem_handle,
- ctx->numa_node,
- prev_node);
+ ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle,
+ ctx->numa_node,
+ prev_node,
+ ENA_CDESC_RING_SIZE_ALIGNMENT);
if (!io_cq->cdesc_addr.virt_addr) {
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- size,
- io_cq->cdesc_addr.virt_addr,
- io_cq->cdesc_addr.phys_addr,
- io_cq->cdesc_addr.mem_handle);
+ ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle,
+ ENA_CDESC_RING_SIZE_ALIGNMENT);
}
if (!io_cq->cdesc_addr.virt_addr) {
- ena_trc_err("memory allocation failed\n");
+ ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@@ -486,7 +494,8 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a
comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
if (unlikely(!comp_ctx)) {
- ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
+ ena_trc_err(admin_queue->ena_dev,
+ "comp_ctx is NULL. Changing the admin queue running state\n");
admin_queue->running_state = false;
return;
}
@@ -538,10 +547,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
admin_queue->stats.completed_cmd += comp_num;
}
-static int ena_com_comp_status_to_errno(u8 comp_status)
+static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
+ u8 comp_status)
{
if (unlikely(comp_status != 0))
- ena_trc_err("admin command failed[%u]\n", comp_status);
+ ena_trc_err(admin_queue->ena_dev,
+ "Admin command failed[%u]\n", comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -555,15 +566,17 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_ILLEGAL_PARAMETER:
case ENA_ADMIN_UNKNOWN_ERROR:
return ENA_COM_INVAL;
+ case ENA_ADMIN_RESOURCE_BUSY:
+ return ENA_COM_TRY_AGAIN;
}
return ENA_COM_INVAL;
}
-static inline void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
+static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
{
- delay_us = ENA_MAX32(ENA_MIN_POLL_US, delay_us);
- delay_us = ENA_MIN32(delay_us * (1 << exp), ENA_MAX_POLL_US);
+ delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
+ delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
ENA_USLEEP(delay_us);
}
@@ -586,7 +599,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
break;
if (ENA_TIME_EXPIRE(timeout)) {
- ena_trc_err("Wait for completion (polling) timeout\n");
+ ena_trc_err(admin_queue->ena_dev,
+ "Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
admin_queue->stats.no_completion++;
@@ -597,11 +611,12 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- ena_delay_exponential_backoff_us(exp++, admin_queue->ena_dev->ena_min_poll_delay_us);
+ ena_delay_exponential_backoff_us(exp++,
+ admin_queue->ena_dev->ena_min_poll_delay_us);
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
- ena_trc_err("Command was aborted\n");
+ ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -610,15 +625,16 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
- "Invalid comp status %d\n", comp_ctx->status);
+ admin_queue->ena_dev, "Invalid comp status %d\n",
+ comp_ctx->status);
- ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+ ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
comp_ctxt_release(admin_queue, comp_ctx);
return ret;
}
-/**
+/*
* Set the LLQ configurations of the firmware
*
* The driver provides only the enabled feature values to the device,
@@ -643,13 +659,9 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
- if (llq_info->disable_meta_caching)
- cmd.u.llq.accel_mode.u.set.enabled_flags |=
- BIT(ENA_ADMIN_DISABLE_META_CACHING);
-
- if (llq_info->max_entries_in_tx_burst)
- cmd.u.llq.accel_mode.u.set.enabled_flags |=
- BIT(ENA_ADMIN_LIMIT_TX_BURST);
+ cmd.u.llq.accel_mode.u.set.enabled_flags =
+ BIT(ENA_ADMIN_DISABLE_META_CACHING) |
+ BIT(ENA_ADMIN_LIMIT_TX_BURST);
ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd,
@@ -658,7 +670,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret);
return ret;
}
@@ -668,6 +680,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
struct ena_llq_configurations *llq_default_cfg)
{
struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+ struct ena_admin_accel_mode_get llq_accel_mode_get;
u16 supported_feat;
int rc;
@@ -679,7 +692,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->header_location_ctrl =
llq_default_cfg->llq_header_location;
} else {
- ena_trc_err("Invalid header location control, supported: 0x%x\n",
+ ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n",
supported_feat);
return -EINVAL;
}
@@ -694,12 +707,12 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
} else {
- ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
+ ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n",
supported_feat);
return -EINVAL;
}
- ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
llq_default_cfg->llq_stride_ctrl,
supported_feat,
llq_info->desc_stride_ctrl);
@@ -723,11 +736,12 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
llq_info->desc_list_entry_size = 256;
} else {
- ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
+ ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n",
+ supported_feat);
return -EINVAL;
}
- ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
llq_default_cfg->llq_ring_entry_size,
supported_feat,
llq_info->desc_list_entry_size);
@@ -736,7 +750,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
- ena_trc_err("illegal entry size %d\n",
+ ena_trc_err(ena_dev, "Illegal entry size %d\n",
llq_info->desc_list_entry_size);
return -EINVAL;
}
@@ -760,29 +774,31 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
} else {
- ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
+ ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n",
supported_feat);
return -EINVAL;
}
- ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
llq_default_cfg->llq_num_decs_before_header,
supported_feat,
llq_info->descs_num_before_header);
}
/* Check for accelerated queue supported */
+ llq_accel_mode_get = llq_features->accel_mode.u.get;
+
llq_info->disable_meta_caching =
- llq_features->accel_mode.u.get.supported_flags &
- BIT(ENA_ADMIN_DISABLE_META_CACHING);
+ !!(llq_accel_mode_get.supported_flags &
+ BIT(ENA_ADMIN_DISABLE_META_CACHING));
- if (llq_features->accel_mode.u.get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
+ if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
llq_info->max_entries_in_tx_burst =
- llq_features->accel_mode.u.get.max_tx_burst_size /
+ llq_accel_mode_get.max_tx_burst_size /
llq_default_cfg->llq_ring_entry_size_value;
rc = ena_com_set_llq(ena_dev);
if (rc)
- ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
+ ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
return rc;
}
@@ -808,13 +824,15 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
if (comp_ctx->status == ENA_CMD_COMPLETED) {
- ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+ ena_trc_err(admin_queue->ena_dev,
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
/* Check if fallback to polling is enabled */
if (admin_queue->auto_polling)
admin_queue->polling = true;
} else {
- ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
+ ena_trc_err(admin_queue->ena_dev,
+ "The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
}
/* Check if shifted to polling mode.
@@ -828,7 +846,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
}
}
- ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+ ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
comp_ctxt_release(admin_queue, comp_ctx);
return ret;
@@ -876,7 +894,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (unlikely(i == timeout)) {
- ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num,
offset,
read_resp->req_id,
@@ -886,7 +904,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (read_resp->reg_off != offset) {
- ena_trc_err("Read failure: wrong offset provided\n");
+ ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -945,7 +963,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
- ena_trc_err("failed to destroy io sq error: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -1001,7 +1019,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
- ena_trc_err("Reg read timeout occurred\n");
+ ena_trc_err(ena_dev, "Reg read timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
@@ -1041,7 +1059,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- ena_trc_dbg("Feature %d isn't supported\n", feature_id);
+ ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id);
return ENA_COM_UNSUPPORTED;
}
@@ -1060,7 +1078,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
&get_cmd.control_buffer.address,
control_buf_dma_addr);
if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
+ ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@@ -1077,7 +1095,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- ena_trc_err("Failed to submit get_feature command %d error: %d\n",
+ ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n",
feature_id, ret);
return ret;
@@ -1108,13 +1126,9 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
/* The key buffer is stored in the device in an array of
- * uint32 elements. Therefore the number of elements can be derived
- * by dividing the buffer length by the size of each array element.
- * In current implementation each element is sized at uint32_t
- * so it's actually a division by 4 but if the element size changes,
- * there is no need to rewrite this code.
+ * uint32 elements.
*/
- hash_key->keys_num = sizeof(hash_key->key) / sizeof(hash_key->key[0]);
+ hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
}
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
@@ -1187,13 +1201,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
int ret;
ret = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
if (unlikely(ret))
return ret;
if ((get_resp.u.ind_table.min_size > log_size) ||
(get_resp.u.ind_table.max_size < log_size)) {
- ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1 << log_size,
1 << get_resp.u.ind_table.min_size,
1 << get_resp.u.ind_table.max_size);
@@ -1297,7 +1311,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
+ ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
}
@@ -1308,7 +1322,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret);
return ret;
}
@@ -1326,7 +1340,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
cmd_completion.llq_descriptors_offset);
}
- ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+ ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1360,7 +1374,7 @@ static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
if (unlikely(!intr_delay_resolution)) {
- ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
+ ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
}
@@ -1396,23 +1410,25 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
comp, comp_size);
if (IS_ERR(comp_ctx)) {
if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
- ena_trc_dbg("Failed to submit command [%ld]\n",
+ ena_trc_dbg(admin_queue->ena_dev,
+ "Failed to submit command [%ld]\n",
PTR_ERR(comp_ctx));
else
- ena_trc_err("Failed to submit command [%ld]\n",
+ ena_trc_err(admin_queue->ena_dev,
+ "Failed to submit command [%ld]\n",
PTR_ERR(comp_ctx));
- return PTR_ERR(comp_ctx);
+ return (int)PTR_ERR(comp_ctx);
}
ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
if (unlikely(ret)) {
if (admin_queue->running_state)
- ena_trc_err("Failed to process command. ret = %d\n",
- ret);
+ ena_trc_err(admin_queue->ena_dev,
+ "Failed to process command. ret = %d\n", ret);
else
- ena_trc_dbg("Failed to process command. ret = %d\n",
- ret);
+ ena_trc_dbg(admin_queue->ena_dev,
+ "Failed to process command. ret = %d\n", ret);
}
return ret;
}
@@ -1441,7 +1457,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
&create_cmd.cq_ba,
io_cq->cdesc_addr.phys_addr);
if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
+ ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@@ -1451,7 +1467,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret);
return ret;
}
@@ -1470,7 +1486,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+ ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1480,7 +1496,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- ena_trc_err("Invalid queue number %d but the max is %d\n",
+ ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
qid, ENA_TOTAL_NUM_QUEUES);
return ENA_COM_INVAL;
}
@@ -1546,7 +1562,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
- ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret);
return ret;
}
@@ -1570,7 +1586,7 @@ void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
{
u16 depth = ena_dev->aenq.q_depth;
- ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
+ ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
/* Init head_db to mark that all entries in the queue
* are initially available
@@ -1588,12 +1604,12 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
if (ret) {
- ena_trc_info("Can't get aenq configuration\n");
+ ena_trc_info(ena_dev, "Can't get aenq configuration\n");
return ret;
}
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
- ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
+ ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
get_resp.u.aenq.supported_groups,
groups_flag);
return ENA_COM_UNSUPPORTED;
@@ -1614,7 +1630,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
sizeof(resp));
if (unlikely(ret))
- ena_trc_err("Failed to config AENQ ret: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret);
return ret;
}
@@ -1622,20 +1638,20 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
{
u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- int width;
+ u32 width;
if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
- ena_trc_err("Reg read timeout occurred\n");
+ ena_trc_err(ena_dev, "Reg read timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
- ena_trc_dbg("ENA dma width: %d\n", width);
+ ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
- ena_trc_err("DMA width illegal value: %d\n", width);
+ ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
return ENA_COM_INVAL;
}
@@ -1659,16 +1675,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
(ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
- ena_trc_err("Reg read timeout occurred\n");
+ ena_trc_err(ena_dev, "Reg read timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
- ena_trc_info("ena device version: %d.%d\n",
+ ena_trc_info(ena_dev, "ENA device version: %d.%d\n",
(ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
+ ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
>> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
@@ -1684,13 +1700,29 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
/* Validate the ctrl version without the implementation ID */
if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
- ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
return -1;
}
return 0;
}
+static void
+ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
+ struct ena_com_admin_queue *admin_queue)
+
+{
+ if (!admin_queue->comp_ctx)
+ return;
+
+ ENA_WAIT_EVENTS_DESTROY(admin_queue);
+ ENA_MEM_FREE(ena_dev->dmadev,
+ admin_queue->comp_ctx,
+ (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
+
+ admin_queue->comp_ctx = NULL;
+}
+
void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
@@ -1699,12 +1731,8 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
struct ena_com_aenq *aenq = &ena_dev->aenq;
u16 size;
- ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
- if (admin_queue->comp_ctx)
- ENA_MEM_FREE(ena_dev->dmadev,
- admin_queue->comp_ctx,
- (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
- admin_queue->comp_ctx = NULL;
+ ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
+
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
@@ -1820,12 +1848,12 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
- ena_trc_err("Reg read timeout occurred\n");
+ ena_trc_err(ena_dev, "Reg read timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
- ena_trc_err("Device isn't ready, abort com init\n");
+ ena_trc_err(ena_dev, "Device isn't ready, abort com init\n");
return ENA_COM_NO_DEVICE;
}
@@ -1903,7 +1931,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
- ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
ctx->qid, ENA_TOTAL_NUM_QUEUES);
return ENA_COM_INVAL;
}
@@ -1962,7 +1990,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
qid, ENA_TOTAL_NUM_QUEUES);
return;
}
@@ -1995,6 +2023,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
sizeof(get_resp.u.dev_attr));
+
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
@@ -2061,17 +2090,6 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
- rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
- if (!rc)
- memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
- sizeof(get_resp.u.ind_table));
- else if (rc == ENA_COM_UNSUPPORTED)
- memset(&get_feat_ctx->ind_table, 0x0,
- sizeof(get_feat_ctx->ind_table));
- else
- return rc;
-
return 0;
}
@@ -2083,10 +2101,10 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
/* ena_handle_specific_aenq_event:
* return the handler that is relevant to the specific event group
*/
-static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
u16 group)
{
- struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+ struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
return aenq_handlers->handlers[group];
@@ -2098,11 +2116,11 @@ static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
* handles the aenq incoming events.
* pop events from the queue and apply the specific handler
*/
-void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
{
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
- struct ena_com_aenq *aenq = &dev->aenq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
u64 timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
@@ -2123,13 +2141,14 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
timestamp = (u64)aenq_common->timestamp_low |
((u64)aenq_common->timestamp_high << 32);
- ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n",
+
+ ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n",
aenq_common->group,
- aenq_common->syndrom,
+ aenq_common->syndrome,
timestamp);
/* Handle specific event*/
- handler_cb = ena_com_get_specific_aenq_cb(dev,
+ handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
aenq_common->group);
handler_cb(data, aenq_e); /* call the actual event handler*/
@@ -2154,8 +2173,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
- dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head,
+ ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
mmiowb();
}
#ifdef ENA_EXTENDED_STATS
@@ -2191,19 +2210,19 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
(cap == ENA_MMIO_READ_TIMEOUT))) {
- ena_trc_err("Reg read32 timeout occurred\n");
+ ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
- ena_trc_err("Device isn't ready, can't reset device\n");
+ ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n");
return ENA_COM_INVAL;
}
timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
if (timeout == 0) {
- ena_trc_err("Invalid timeout value\n");
+ ena_trc_err(ena_dev, "Invalid timeout value\n");
return ENA_COM_INVAL;
}
@@ -2219,7 +2238,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
- ena_trc_err("Reset indication didn't turn on\n");
+ ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
return rc;
}
@@ -2227,7 +2246,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
- ena_trc_err("Reset indication didn't turn off\n");
+ ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
return rc;
}
@@ -2264,7 +2283,22 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- ena_trc_err("Failed to get stats. error: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats)
+{
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.u.eni_stats,
+ sizeof(ctx.get_resp.u.eni_stats));
return ret;
}
@@ -2278,8 +2312,8 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
memset(&ctx, 0x0, sizeof(ctx));
ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.basic_stats,
- sizeof(ctx.get_resp.basic_stats));
+ memcpy(stats, &ctx.get_resp.u.basic_stats,
+ sizeof(ctx.get_resp.u.basic_stats));
return ret;
}
@@ -2306,7 +2340,7 @@ int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
&get_cmd->u.control_buffer.address,
phys_addr);
if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
+ ena_trc_err(ena_dev, "Memory address set failed\n");
goto free_ext_stats_mem;
}
get_cmd->u.control_buffer.length = len;
@@ -2337,7 +2371,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
return ENA_COM_UNSUPPORTED;
}
@@ -2347,7 +2381,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
cmd.aq_common_descriptor.flags = 0;
cmd.feat_common.feature_id = ENA_ADMIN_MTU;
- cmd.u.mtu.mtu = mtu;
+ cmd.u.mtu.mtu = (u32)mtu;
ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd,
@@ -2356,7 +2390,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
sizeof(resp));
if (unlikely(ret))
- ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
+ ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret);
return ret;
}
@@ -2370,7 +2404,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ret = ena_com_get_feature(ena_dev, &resp,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (unlikely(ret)) {
- ena_trc_err("Failed to get offload capabilities %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret);
return ret;
}
@@ -2390,7 +2424,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_FUNCTION)) {
- ena_trc_dbg("Feature %d isn't supported\n",
+ ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
return ENA_COM_UNSUPPORTED;
}
@@ -2402,7 +2436,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
return ret;
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
- ena_trc_err("Func hash %d isn't supported by device, abort\n",
+ ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return ENA_COM_UNSUPPORTED;
}
@@ -2420,7 +2454,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_key_dma_addr);
if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
+ ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@@ -2432,7 +2466,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret)) {
- ena_trc_err("Failed to set hash function %d. error: %d\n",
+ ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n",
rss->hash_func, ret);
return ENA_COM_INVAL;
}
@@ -2464,7 +2498,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
- ena_trc_err("Flow hash function %d isn't supported\n", func);
+ ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func);
return ENA_COM_UNSUPPORTED;
}
@@ -2472,20 +2506,20 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
case ENA_ADMIN_TOEPLITZ:
if (key) {
if (key_len != sizeof(hash_key->key)) {
- ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n",
+ ena_trc_err(ena_dev, "key len (%hu) doesn't equal the supported size (%zu)\n",
key_len, sizeof(hash_key->key));
return ENA_COM_INVAL;
}
memcpy(hash_key->key, key, key_len);
rss->hash_init_val = init_val;
- hash_key->keys_num = key_len / sizeof(hash_key->key[0]);
+ hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
}
break;
case ENA_ADMIN_CRC32:
rss->hash_init_val = init_val;
break;
default:
- ena_trc_err("Invalid hash function (%d)\n", func);
+ ena_trc_err(ena_dev, "Invalid hash function (%d)\n", func);
return ENA_COM_INVAL;
}
@@ -2533,7 +2567,8 @@ int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
ena_dev->rss.hash_key;
if (key)
- memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
+ memcpy(key, hash_key->key,
+ (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
return 0;
}
@@ -2570,7 +2605,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_INPUT)) {
- ena_trc_dbg("Feature %d isn't supported\n",
+ ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
return ENA_COM_UNSUPPORTED;
}
@@ -2589,7 +2624,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_ctrl_dma_addr);
if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
+ ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
cmd.control_buffer.length = sizeof(*hash_ctrl);
@@ -2600,7 +2635,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret))
- ena_trc_err("Failed to set hash input. error: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret);
return ret;
}
@@ -2650,7 +2685,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
available_fields = hash_ctrl->selected_fields[i].fields &
hash_ctrl->supported_fields[i].fields;
if (available_fields != hash_ctrl->selected_fields[i].fields) {
- ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
return ENA_COM_UNSUPPORTED;
@@ -2676,7 +2711,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
int rc;
if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- ena_trc_err("Invalid proto num (%u)\n", proto);
+ ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
return ENA_COM_INVAL;
}
@@ -2688,7 +2723,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* Make sure all the fields are supported */
supported_fields = hash_ctrl->supported_fields[proto].fields;
if ((hash_fields & supported_fields) != hash_fields) {
- ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+ ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
proto, hash_fields, supported_fields);
}
@@ -2728,15 +2763,15 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
- ena_trc_dbg("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+ ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return ENA_COM_UNSUPPORTED;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
if (ret) {
- ena_trc_err("Failed to convert host indirection table to device table\n");
+ ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n");
return ret;
}
@@ -2745,7 +2780,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
cmd.aq_common_descriptor.flags =
ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
- cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
cmd.u.ind_table.size = rss->tbl_log_size;
cmd.u.ind_table.inline_index = 0xFFFFFFFF;
@@ -2753,11 +2788,11 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->rss_ind_tbl_dma_addr);
if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
+ ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
- cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
+ cmd.control_buffer.length = (u32)(1ULL << rss->tbl_log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
ret = ena_com_execute_admin_command(admin_queue,
@@ -2767,7 +2802,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- ena_trc_err("Failed to set indirect table. error: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret);
return ret;
}
@@ -2779,11 +2814,11 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
u32 tbl_size;
int i, rc;
- tbl_size = (1ULL << rss->tbl_log_size) *
+ tbl_size = (u32)(1ULL << rss->tbl_log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
rss->rss_ind_tbl_dma_addr,
tbl_size, 0);
if (unlikely(rc))
@@ -2932,7 +2967,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.debug_ba,
host_attr->debug_area_dma_addr);
if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
+ ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@@ -2940,7 +2975,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.os_info_ba,
host_attr->host_info_dma_addr);
if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
+ ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@@ -2953,7 +2988,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- ena_trc_err("Failed to set host attributes: %d\n", ret);
+ ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret);
return ret;
}
@@ -2965,12 +3000,13 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
}
-static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
+static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
+ u32 coalesce_usecs,
u32 intr_delay_resolution,
u32 *intr_moder_interval)
{
if (!intr_delay_resolution) {
- ena_trc_err("Illegal interrupt delay granularity value\n");
+ ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n");
return ENA_COM_FAULT;
}
@@ -2979,11 +3015,11 @@ static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
return 0;
}
-
int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
u32 tx_coalesce_usecs)
{
- return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
+ return ena_com_update_nonadaptive_moderation_interval(ena_dev,
+ tx_coalesce_usecs,
ena_dev->intr_delay_resolution,
&ena_dev->intr_moder_tx_interval);
}
@@ -2991,7 +3027,8 @@ int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_de
int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
u32 rx_coalesce_usecs)
{
- return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
+ return ena_com_update_nonadaptive_moderation_interval(ena_dev,
+ rx_coalesce_usecs,
ena_dev->intr_delay_resolution,
&ena_dev->intr_moder_rx_interval);
}
@@ -3007,12 +3044,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == ENA_COM_UNSUPPORTED) {
- ena_trc_dbg("Feature %d isn't supported\n",
+ ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
- ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
- rc);
+ ena_trc_err(ena_dev,
+ "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
}
/* no moderation supported, disable adaptive support */
@@ -3060,7 +3097,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- ena_trc_err("the size of the LLQ entry is smaller than needed\n");
+ ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/sys/contrib/ena-com/ena_com.h b/sys/contrib/ena-com/ena_com.h
index c1b9540edd0b..414301bdaf91 100644
--- a/sys/contrib/ena-com/ena_com.h
+++ b/sys/contrib/ena-com/ena_com.h
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
@@ -51,6 +51,8 @@
#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+#define ENA_CDESC_RING_SIZE_ALIGNMENT (1 << 12) /* 4K */
+
/*****************************************************************************/
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
@@ -326,6 +328,7 @@ struct ena_com_dev {
void __iomem *mem_bar;
void *dmadev;
void *bus;
+ ena_netdev *net_device;
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
@@ -363,7 +366,6 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_feature_offload_desc offload;
struct ena_admin_ena_hw_hints hw_hints;
struct ena_admin_feature_llq_desc llq;
- struct ena_admin_feature_rss_ind_table ind_table;
};
struct ena_com_create_io_ctx {
@@ -548,7 +550,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
* This method goes over the async event notification queue and calls the proper
* aenq handler.
*/
-void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);
/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
* @ena_dev: ENA communication layer struct
@@ -628,6 +630,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
struct ena_admin_basic_stats *stats);
+/* ena_com_get_eni_stats - Get extended network interface statistics
+ * @ena_dev: ENA communication layer struct
+ * @stats: stats return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats);
+
/* ena_com_set_dev_mtu - Configure the device mtu.
* @ena_dev: ENA communication layer struct
* @mtu: mtu value
@@ -961,6 +972,26 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_config);
+/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
+ * @io_sq: IO submit queue struct
+ *
+ * @return - ena_com_dev struct extracted from io_sq
+ */
+static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq)
+{
+ return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
+}
+
+/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq.
+ * @io_sq: IO submit queue struct
+ *
+ * @return - ena_com_dev struct extracted from io_sq
+ */
+static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq)
+{
+ return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
+}
+
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
diff --git a/sys/contrib/ena-com/ena_defs/ena_admin_defs.h b/sys/contrib/ena-com/ena_defs/ena_admin_defs.h
index 52cdb9e5e394..edfdad3473d7 100644
--- a/sys/contrib/ena-com/ena_defs/ena_admin_defs.h
+++ b/sys/contrib/ena-com/ena_defs/ena_admin_defs.h
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
@@ -36,6 +36,8 @@
#define ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN 32
#define ENA_ADMIN_EXTRA_PROPERTIES_COUNT 32
+#define ENA_ADMIN_RSS_KEY_PARTS 10
+
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
ENA_ADMIN_DESTROY_SQ = 2,
@@ -58,6 +60,7 @@ enum ena_admin_aq_completion_status {
ENA_ADMIN_RESOURCE_BUSY = 7,
};
+/* subcommands for the set/get feature admin commands */
enum ena_admin_aq_feature_id {
ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
ENA_ADMIN_MAX_QUEUES_NUM = 2,
@@ -68,7 +71,7 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_EXT = 7,
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG = 12,
ENA_ADMIN_MTU = 14,
ENA_ADMIN_RSS_HASH_INPUT = 18,
ENA_ADMIN_INTERRUPT_MODERATION = 20,
@@ -122,6 +125,8 @@ enum ena_admin_completion_policy_type {
enum ena_admin_get_stats_type {
ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+ /* extra HW stats for specific network interface */
+ ENA_ADMIN_GET_STATS_TYPE_ENI = 2,
};
enum ena_admin_get_stats_scope {
@@ -198,7 +203,7 @@ struct ena_admin_acq_common_desc {
uint16_t extended_status;
/* indicates to the driver which AQ entry has been consumed by the
- * device and could be reused
+ * device and could be reused
*/
uint16_t sq_head_indx;
};
@@ -243,8 +248,8 @@ struct ena_admin_aq_create_sq_cmd {
*/
uint8_t sq_caps_3;
- /* associated completion queue id. This CQ must be created prior to
- * SQ creation
+ /* associated completion queue id. This CQ must be created prior to SQ
+ * creation
*/
uint16_t cq_idx;
@@ -383,7 +388,7 @@ struct ena_admin_aq_get_stats_cmd {
uint16_t queue_idx;
/* device id, value 0xFFFF means mine. only privileged device can get
- * stats of other device
+ * stats of other device
*/
uint16_t device_id;
};
@@ -415,10 +420,43 @@ struct ena_admin_basic_stats {
uint32_t tx_drops_high;
};
+/* ENI Statistics Command. */
+struct ena_admin_eni_stats {
+ /* The number of packets shaped due to inbound aggregate BW
+ * allowance being exceeded
+ */
+ uint64_t bw_in_allowance_exceeded;
+
+ /* The number of packets shaped due to outbound aggregate BW
+ * allowance being exceeded
+ */
+ uint64_t bw_out_allowance_exceeded;
+
+ /* The number of packets shaped due to PPS allowance being exceeded */
+ uint64_t pps_allowance_exceeded;
+
+ /* The number of packets shaped due to connection tracking
+ * allowance being exceeded and leading to failure in establishment
+ * of new connections
+ */
+ uint64_t conntrack_allowance_exceeded;
+
+ /* The number of packets shaped due to linklocal packet rate
+ * allowance being exceeded
+ */
+ uint64_t linklocal_allowance_exceeded;
+};
+
struct ena_admin_acq_get_stats_resp {
struct ena_admin_acq_common_desc acq_common_desc;
- struct ena_admin_basic_stats basic_stats;
+ union {
+ uint64_t raw[7];
+
+ struct ena_admin_basic_stats basic_stats;
+
+ struct ena_admin_eni_stats eni_stats;
+ } u;
};
struct ena_admin_get_set_feature_common_desc {
@@ -432,8 +470,8 @@ struct ena_admin_get_set_feature_common_desc {
uint8_t feature_id;
/* The driver specifies the max feature version it supports and the
- * device responds with the currently supported feature version. The
- * field is zero based
+ * device responds with the currently supported feature version. The
+ * field is zero based
*/
uint8_t feature_version;
@@ -445,7 +483,9 @@ struct ena_admin_device_attr_feature_desc {
uint32_t device_version;
- /* bitmap of ena_admin_aq_feature_id */
+ /* bitmap of ena_admin_aq_feature_id, which represents supported
+ * subcommands for the set/get feature admin commands.
+ */
uint32_t supported_features;
uint32_t reserved3;
@@ -531,32 +571,30 @@ struct ena_admin_feature_llq_desc {
uint32_t max_llq_depth;
- /* specify the header locations the device supports. bitfield of
- * enum ena_admin_llq_header_location.
+ /* specify the header locations the device supports. bitfield of enum
+ * ena_admin_llq_header_location.
*/
uint16_t header_location_ctrl_supported;
/* the header location the driver selected to use. */
uint16_t header_location_ctrl_enabled;
- /* if inline header is specified - this is the size of descriptor
- * list entry. If header in a separate ring is specified - this is
- * the size of header ring entry. bitfield of enum
- * ena_admin_llq_ring_entry_size. specify the entry sizes the device
- * supports
+ /* if inline header is specified - this is the size of descriptor list
+ * entry. If header in a separate ring is specified - this is the size
+ * of header ring entry. bitfield of enum ena_admin_llq_ring_entry_size.
+ * specify the entry sizes the device supports
*/
uint16_t entry_size_ctrl_supported;
/* the entry size the driver selected to use. */
uint16_t entry_size_ctrl_enabled;
- /* valid only if inline header is specified. First entry associated
- * with the packet includes descriptors and header. Rest of the
- * entries occupied by descriptors. This parameter defines the max
- * number of descriptors precedding the header in the first entry.
- * The field is bitfield of enum
- * ena_admin_llq_num_descs_before_header and specify the values the
- * device supports
+ /* valid only if inline header is specified. First entry associated with
+ * the packet includes descriptors and header. Rest of the entries
+ * occupied by descriptors. This parameter defines the max number of
+ * descriptors precedding the header in the first entry. The field is
+ * bitfield of enum ena_admin_llq_num_descs_before_header and specify
+ * the values the device supports
*/
uint16_t desc_num_before_header_supported;
@@ -564,7 +602,7 @@ struct ena_admin_feature_llq_desc {
uint16_t desc_num_before_header_enabled;
/* valid only if inline was chosen. bitfield of enum
- * ena_admin_llq_stride_ctrl
+ * ena_admin_llq_stride_ctrl
*/
uint16_t descriptors_stride_ctrl_supported;
@@ -574,8 +612,8 @@ struct ena_admin_feature_llq_desc {
/* reserved */
uint32_t reserved1;
- /* accelerated low latency queues requirment. driver needs to
- * support those requirments in order to use accelerated llq
+ /* accelerated low latency queues requirement. driver needs to
+ * support those requirements in order to use accelerated llq
*/
struct ena_admin_accel_mode_req accel_mode;
};
@@ -599,8 +637,8 @@ struct ena_admin_queue_ext_feature_fields {
uint32_t max_tx_header_size;
- /* Maximum Descriptors number, including meta descriptor, allowed for
- * a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for a
+ * single Tx packet
*/
uint16_t max_per_packet_tx_descs;
@@ -623,8 +661,8 @@ struct ena_admin_queue_feature_desc {
uint32_t max_header_size;
- /* Maximum Descriptors number, including meta descriptor, allowed for
- * a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for a
+ * single Tx packet
*/
uint16_t max_packet_tx_descs;
@@ -720,11 +758,11 @@ enum ena_admin_hash_functions {
};
struct ena_admin_feature_rss_flow_hash_control {
- uint32_t keys_num;
+ uint32_t key_parts;
uint32_t reserved;
- uint32_t key[10];
+ uint32_t key[ENA_ADMIN_RSS_KEY_PARTS];
};
struct ena_admin_feature_rss_flow_hash_function {
@@ -859,11 +897,12 @@ struct ena_admin_host_info {
uint16_t reserved;
- /* 0 : mutable_rss_table_size
+ /* 0 : reserved
* 1 : rx_offset
* 2 : interrupt_moderation
- * 3 : map_rx_buf_bidirectional
- * 31:4 : reserved
+ * 3 : rx_buf_mirroring
+ * 4 : rss_configurable_function_key
+ * 31:5 : reserved
*/
uint32_t driver_supported_features;
};
@@ -945,7 +984,7 @@ struct ena_admin_queue_ext_feature_desc {
struct ena_admin_queue_ext_feature_fields max_queue_ext;
uint32_t raw[10];
- } ;
+ };
};
struct ena_admin_get_feat_resp {
@@ -1028,7 +1067,7 @@ struct ena_admin_set_feat_resp {
struct ena_admin_aenq_common_desc {
uint16_t group;
- uint16_t syndrom;
+ uint16_t syndrome;
/* 0 : phase
* 7:1 : reserved - MBZ
@@ -1052,7 +1091,7 @@ enum ena_admin_aenq_group {
ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
-enum ena_admin_aenq_notification_syndrom {
+enum ena_admin_aenq_notification_syndrome {
ENA_ADMIN_SUSPEND = 0,
ENA_ADMIN_RESUME = 1,
ENA_ADMIN_UPDATE_HINTS = 2,
@@ -1181,13 +1220,14 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
-#define ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK BIT(0)
#define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT 1
#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1)
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2)
-#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT 3
-#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK BIT(3)
+#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT 3
+#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3)
+#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4
+#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
/* feature_rss_ind_table */
#define ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK BIT(0)
@@ -1609,16 +1649,6 @@ static inline void set_ena_admin_host_info_bus(struct ena_admin_host_info *p, ui
p->bdf |= (val << ENA_ADMIN_HOST_INFO_BUS_SHIFT) & ENA_ADMIN_HOST_INFO_BUS_MASK;
}
-static inline uint32_t get_ena_admin_host_info_mutable_rss_table_size(const struct ena_admin_host_info *p)
-{
- return p->driver_supported_features & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK;
-}
-
-static inline void set_ena_admin_host_info_mutable_rss_table_size(struct ena_admin_host_info *p, uint32_t val)
-{
- p->driver_supported_features |= val & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK;
-}
-
static inline uint32_t get_ena_admin_host_info_rx_offset(const struct ena_admin_host_info *p)
{
return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK) >> ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT;
@@ -1639,14 +1669,24 @@ static inline void set_ena_admin_host_info_interrupt_moderation(struct ena_admin
p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT) & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
}
-static inline uint32_t get_ena_admin_host_info_map_rx_buf_bidirectional(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_rx_buf_mirroring(const struct ena_admin_host_info *p)
+{
+ return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK) >> ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_rx_buf_mirroring(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT) & ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_rss_configurable_function_key(const struct ena_admin_host_info *p)
{
- return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK) >> ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT;
+ return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK) >> ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT;
}
-static inline void set_ena_admin_host_info_map_rx_buf_bidirectional(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_rss_configurable_function_key(struct ena_admin_host_info *p, uint32_t val)
{
- p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT) & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK;
+ p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT) & ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
}
static inline uint8_t get_ena_admin_feature_rss_ind_table_one_entry_update(const struct ena_admin_feature_rss_ind_table *p)
diff --git a/sys/contrib/ena-com/ena_defs/ena_common_defs.h b/sys/contrib/ena-com/ena_defs/ena_common_defs.h
index 88b90d44a79a..ee49ff6e2776 100644
--- a/sys/contrib/ena-com/ena_defs/ena_common_defs.h
+++ b/sys/contrib/ena-com/ena_defs/ena_common_defs.h
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
diff --git a/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h b/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
index 14f44d0d9a86..817375a947c5 100644
--- a/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
+++ b/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
diff --git a/sys/contrib/ena-com/ena_defs/ena_gen_info.h b/sys/contrib/ena-com/ena_defs/ena_gen_info.h
index 83ed024ae4cc..726750a67d4e 100644
--- a/sys/contrib/ena-com/ena_defs/ena_gen_info.h
+++ b/sys/contrib/ena-com/ena_defs/ena_gen_info.h
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
@@ -30,5 +30,5 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define ENA_GEN_DATE "Mon Apr 20 15:41:59 DST 2020"
-#define ENA_GEN_COMMIT "daa45ac"
+#define ENA_GEN_DATE "Fri Sep 18 17:09:00 IDT 2020"
+#define ENA_GEN_COMMIT "0f80d82"
diff --git a/sys/contrib/ena-com/ena_defs/ena_regs_defs.h b/sys/contrib/ena-com/ena_defs/ena_regs_defs.h
index 53ac662b6189..bdd91ef2e026 100644
--- a/sys/contrib/ena-com/ena_defs/ena_regs_defs.h
+++ b/sys/contrib/ena-com/ena_defs/ena_regs_defs.h
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
diff --git a/sys/contrib/ena-com/ena_eth_com.c b/sys/contrib/ena-com/ena_eth_com.c
index 58ddb82246fd..47ca4e4afdb6 100644
--- a/sys/contrib/ena-com/ena_eth_com.c
+++ b/sys/contrib/ena-com/ena_eth_com.c
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
@@ -85,12 +85,14 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
if (is_llq_max_tx_burst_exists(io_sq)) {
if (unlikely(!io_sq->entries_in_tx_burst_left)) {
- ena_trc_err("Error: trying to send more packets than tx burst allows\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Error: trying to send more packets than tx burst allows\n");
return ENA_COM_NO_SPACE;
}
io_sq->entries_in_tx_burst_left--;
- ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n",
+ ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
+ "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
io_sq->qid, io_sq->entries_in_tx_burst_left);
}
@@ -129,12 +131,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
llq_info->descs_num_before_header * io_sq->desc_entry_size;
if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
- ena_trc_err("trying to write header larger than llq entry can accommodate\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Trying to write header larger than llq entry can accommodate\n");
return ENA_COM_FAULT;
}
if (unlikely(!bounce_buffer)) {
- ena_trc_err("bounce buffer is NULL\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Bounce buffer is NULL\n");
return ENA_COM_FAULT;
}
@@ -152,7 +156,8 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- ena_trc_err("bounce buffer is NULL\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Bounce buffer is NULL\n");
return NULL;
}
@@ -177,7 +182,8 @@ static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
if (unlikely(rc)) {
- ena_trc_err("failed to write bounce buffer to device\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Failed to write bounce buffer to device\n");
return rc;
}
@@ -210,7 +216,8 @@ static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
if (unlikely(rc)) {
- ena_trc_err("failed to write bounce buffer to device\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Failed to write bounce buffer to device\n");
return rc;
}
@@ -280,7 +287,8 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
- ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
+ "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
io_cq->cur_rx_pkt_cdesc_count += count;
@@ -296,6 +304,9 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
meta_desc = get_sq_desc(io_sq);
+ if (unlikely(!meta_desc))
+ return ENA_COM_FAULT;
+
memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
@@ -303,7 +314,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
/* bits 0-9 of the mss */
- meta_desc->word2 |= (ena_meta->mss <<
+ meta_desc->word2 |= ((u32)ena_meta->mss <<
ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
/* bits 10-13 of the mss */
@@ -313,7 +324,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
/* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
- meta_desc->len_ctrl |= (io_sq->phase <<
+ meta_desc->len_ctrl |= ((u32)io_sq->phase <<
ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
@@ -326,7 +337,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
- meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+ meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
@@ -348,20 +359,23 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
*have_meta = true;
return ena_com_create_meta(io_sq, ena_meta);
- } else if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
+ }
+
+ if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
*have_meta = true;
/* Cache the meta desc */
memcpy(&io_sq->cached_tx_meta, ena_meta,
sizeof(struct ena_com_tx_meta));
return ena_com_create_meta(io_sq, ena_meta);
- } else {
- *have_meta = false;
- return ENA_COM_OK;
}
+
+ *have_meta = false;
+ return ENA_COM_OK;
}
-static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
- struct ena_eth_io_rx_cdesc_base *cdesc)
+static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
+ struct ena_com_rx_ctx *ena_rx_ctx,
+ struct ena_eth_io_rx_cdesc_base *cdesc)
{
ena_rx_ctx->l3_proto = cdesc->status &
ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
@@ -382,7 +396,8 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
- ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+ ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
+ "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
ena_rx_ctx->l3_proto,
ena_rx_ctx->l4_proto,
ena_rx_ctx->l3_csum_err,
@@ -411,23 +426,26 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
u64 addr_hi;
ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
- "wrong Q type");
+ ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
/* num_bufs +1 for potential meta desc */
if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
- ena_trc_dbg("Not enough space in the tx queue\n");
+ ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
+ "Not enough space in the tx queue\n");
return ENA_COM_NO_MEM;
}
if (unlikely(header_len > io_sq->tx_max_header_size)) {
- ena_trc_err("header size is too large %d max header: %d\n",
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Header size is too large %d max header: %d\n",
header_len, io_sq->tx_max_header_size);
return ENA_COM_INVAL;
}
if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
&& !buffer_to_push)) {
- ena_trc_err("push header wasn't provided on LLQ mode\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Push header wasn't provided on LLQ mode\n");
return ENA_COM_INVAL;
}
@@ -437,7 +455,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
if (unlikely(rc)) {
- ena_trc_err("failed to create and store tx meta desc\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Failed to create and store tx meta desc\n");
return rc;
}
@@ -445,7 +464,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (unlikely(!num_bufs && !header_len)) {
rc = ena_com_close_bounce_buffer(io_sq);
if (rc)
- ena_trc_err("failed to write buffers to LLQ\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Failed to write buffers to LLQ\n");
*nb_hw_desc = io_sq->tail - start_tail;
return rc;
}
@@ -459,16 +479,16 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (!have_meta)
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
- desc->buff_addr_hi_hdr_sz |= (header_len <<
+ desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
- desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_DESC_PHASE_MASK;
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
/* Bits 0-9 */
- desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+ desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
@@ -506,7 +526,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (likely(i != 0)) {
rc = ena_com_sq_update_tail(io_sq);
if (unlikely(rc)) {
- ena_trc_err("failed to update sq tail\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Failed to update sq tail\n");
return rc;
}
@@ -516,7 +537,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
- desc->len_ctrl |= (io_sq->phase <<
+ desc->len_ctrl |= ((u32)io_sq->phase <<
ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_DESC_PHASE_MASK;
}
@@ -538,13 +559,15 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_sq_update_tail(io_sq);
if (unlikely(rc)) {
- ena_trc_err("failed to update sq tail of the last descriptor\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Failed to update sq tail of the last descriptor\n");
return rc;
}
rc = ena_com_close_bounce_buffer(io_sq);
if (rc)
- ena_trc_err("failed when closing bounce buffer\n");
+ ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
+ "Failed when closing bounce buffer\n");
*nb_hw_desc = io_sq->tail - start_tail;
return rc;
@@ -556,12 +579,13 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
{
struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+ u16 q_depth = io_cq->q_depth;
u16 cdesc_idx = 0;
u16 nb_hw_desc;
u16 i = 0;
ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
- "wrong Q type");
+ ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type");
nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
if (nb_hw_desc == 0) {
@@ -569,11 +593,13 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return 0;
}
- ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
+ ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
+ "Fetch rx packet: queue %d completed desc: %d\n",
io_cq->qid, nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
- ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
+ ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
+ "Too many RX cdescs (%d) > MAX(%d)\n",
nb_hw_desc, ena_rx_ctx->max_bufs);
return ENA_COM_NO_SPACE;
}
@@ -582,21 +608,30 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
ena_rx_ctx->pkt_offset = cdesc->offset;
do {
- ena_buf->len = cdesc->length;
- ena_buf->req_id = cdesc->req_id;
- ena_buf++;
- } while ((++i < nb_hw_desc) && (cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i)));
+ ena_buf[i].len = cdesc->length;
+ ena_buf[i].req_id = cdesc->req_id;
+ if (unlikely(ena_buf[i].req_id >= q_depth))
+ return ENA_COM_EIO;
+
+ if (++i >= nb_hw_desc)
+ break;
+
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
+
+ } while (1);
/* Update SQ head ptr */
io_sq->next_to_comp += nb_hw_desc;
- ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+ ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
+ "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
io_sq->qid, io_sq->next_to_comp);
/* Get rx flags from the last pkt */
- ena_com_rx_set_flags(ena_rx_ctx, cdesc);
+ ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
ena_rx_ctx->descs = nb_hw_desc;
+
return 0;
}
@@ -607,7 +642,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
struct ena_eth_io_rx_desc *desc;
ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
- "wrong Q type");
+ ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
return ENA_COM_NO_SPACE;
@@ -621,12 +656,16 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->length = ena_buf->len;
desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
- ENA_ETH_IO_RX_DESC_LAST_MASK |
- (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
- ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ ENA_ETH_IO_RX_DESC_LAST_MASK |
+ ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
+ (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
desc->req_id = req_id;
+ ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
+ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
+ __func__, io_sq->qid, req_id);
+
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
diff --git a/sys/contrib/ena-com/ena_eth_com.h b/sys/contrib/ena-com/ena_eth_com.h
index 4b91221ea093..85675bb004b9 100644
--- a/sys/contrib/ena-com/ena_eth_com.h
+++ b/sys/contrib/ena-com/ena_eth_com.h
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
@@ -171,7 +171,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info->descs_per_entry);
}
- ena_trc_dbg("queue: %d num_descs: %d num_entries_needed: %d\n",
+ ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
+ "Queue: %d num_descs: %d num_entries_needed: %d\n",
io_sq->qid, num_descs, num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
@@ -182,14 +183,16 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail;
- ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
+ ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
+ "Write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
- ena_trc_dbg("reset available entries in tx burst for queue %d to %d\n",
- io_sq->qid, max_entries_in_tx_burst);
+ ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
+ "Reset available entries in tx burst for queue %d to %d\n",
+ io_sq->qid, max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
@@ -207,7 +210,8 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
if (unlikely(need_update)) {
- ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
+ ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
+ "Write completion queue doorbell for queue %d: head: %d\n",
io_cq->qid, head);
ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head;
@@ -271,7 +275,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE16(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
- ena_trc_err("Invalid req id %d\n", cdesc->req_id);
+ ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
+ "Invalid req id %d\n", cdesc->req_id);
return ENA_COM_INVAL;
}
diff --git a/sys/contrib/ena-com/ena_plat.h b/sys/contrib/ena-com/ena_plat.h
index e3536cdf3573..8fe1ec9aa731 100644
--- a/sys/contrib/ena-com/ena_plat.h
+++ b/sys/contrib/ena-com/ena_plat.h
@@ -1,5 +1,5 @@
/*-
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
@@ -106,24 +106,37 @@ extern struct ena_bus_space ebs;
#define ENA_ADMQ (1 << 8) /* Detailed info about admin queue. */
#define ENA_NETMAP (1 << 9) /* Detailed info about netmap. */
+#define DEFAULT_ALLOC_ALIGNMENT 8
+
extern int ena_log_level;
-#define ena_trace_raw(level, fmt, args...) \
+#define container_of(ptr, type, member) \
+ ({ \
+ const __typeof(((type *)0)->member) *__p = (ptr); \
+ (type *)((uintptr_t)__p - offsetof(type, member)); \
+ })
+
+#define ena_trace_raw(ctx, level, fmt, args...) \
do { \
+ ((void)(ctx)); \
if (((level) & ena_log_level) != (level)) \
break; \
printf(fmt, ##args); \
} while (0)
-#define ena_trace(level, fmt, args...) \
- ena_trace_raw(level, "%s() [TID:%d]: " \
+#define ena_trace(ctx, level, fmt, args...) \
+ ena_trace_raw(ctx, level, "%s() [TID:%d]: " \
fmt, __func__, curthread->td_tid, ##args)
-#define ena_trc_dbg(format, arg...) ena_trace(ENA_DBG, format, ##arg)
-#define ena_trc_info(format, arg...) ena_trace(ENA_INFO, format, ##arg)
-#define ena_trc_warn(format, arg...) ena_trace(ENA_WARNING, format, ##arg)
-#define ena_trc_err(format, arg...) ena_trace(ENA_ALERT, format, ##arg)
+#define ena_trc_dbg(ctx, format, arg...) \
+ ena_trace(ctx, ENA_DBG, format, ##arg)
+#define ena_trc_info(ctx, format, arg...) \
+ ena_trace(ctx, ENA_INFO, format, ##arg)
+#define ena_trc_warn(ctx, format, arg...) \
+ ena_trace(ctx, ENA_WARNING, format, ##arg)
+#define ena_trc_err(ctx, format, arg...) \
+ ena_trace(ctx, ENA_ALERT, format, ##arg)
#define unlikely(x) __predict_false(!!(x))
#define likely(x) __predict_true(!!(x))
@@ -134,19 +147,10 @@ extern int ena_log_level;
#define MAX_ERRNO 4095
#define IS_ERR_VALUE(x) unlikely((x) <= (unsigned long)MAX_ERRNO)
-#define ENA_ASSERT(cond, format, arg...) \
- do { \
- if (unlikely(!(cond))) { \
- ena_trc_err( \
- "Assert failed on %s:%s:%d:" format, \
- __FILE__, __func__, __LINE__, ##arg); \
- } \
- } while (0)
-
-#define ENA_WARN(cond, format, arg...) \
+#define ENA_WARN(cond, ctx, format, arg...) \
do { \
if (unlikely((cond))) { \
- ena_trc_warn(format, ##arg); \
+ ena_trc_warn(ctx, format, ##arg); \
} \
} while (0)
@@ -185,6 +189,7 @@ static inline long PTR_ERR(const void *ptr)
#define ENA_COM_NO_DEVICE ENODEV
#define ENA_COM_PERMISSION EPERM
#define ENA_COM_TIMER_EXPIRED ETIMEDOUT
+#define ENA_COM_EIO EIO
#define ENA_MSLEEP(x) pause_sbt("ena", SBT_1MS * (x), SBT_1MS, 0)
#define ENA_USLEEP(x) pause_sbt("ena", SBT_1US * (x), SBT_1US, 0)
@@ -233,10 +238,17 @@ static inline long PTR_ERR(const void *ptr)
cv_init(&((waitqueue).wq), "cv"); \
mtx_init(&((waitqueue).mtx), "wq", NULL, MTX_DEF); \
} while (0)
-#define ENA_WAIT_EVENT_DESTROY(waitqueue) \
+#define ENA_WAIT_EVENTS_DESTROY(admin_queue) \
do { \
- cv_destroy(&((waitqueue).wq)); \
- mtx_destroy(&((waitqueue).mtx)); \
+ struct ena_comp_ctx *comp_ctx; \
+ int i; \
+ for (i = 0; i < admin_queue->q_depth; i++) { \
+ comp_ctx = get_comp_ctxt(admin_queue, i, false); \
+ if (comp_ctx != NULL) { \
+ cv_destroy(&((comp_ctx->wait_event).wq)); \
+ mtx_destroy(&((comp_ctx->wait_event).mtx)); \
+ } \
+ } \
} while (0)
#define ENA_WAIT_EVENT_CLEAR(waitqueue) \
cv_init(&((waitqueue).wq), (waitqueue).wq.cv_description)
@@ -281,11 +293,12 @@ typedef uint32_t ena_atomic32_t;
#define ENA_PRIu64 PRIu64
typedef uint64_t ena_time_t;
+typedef struct ifnet ena_netdev;
void ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg,
int error);
int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
- int mapflags);
+ int mapflags, bus_size_t alignment);
static inline uint32_t
ena_reg_read32(struct ena_bus *bus, bus_size_t offset)
@@ -313,20 +326,30 @@ ena_reg_read32(struct ena_bus *bus, bus_size_t offset)
(void)(size); \
free(ptr, M_DEVBUF); \
} while (0)
-#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, handle, node, \
- dev_node) \
+#define ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(dmadev, size, virt, phys, \
+ handle, node, dev_node, alignment) \
do { \
((virt) = NULL); \
(void)(dev_node); \
} while (0)
-#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, dma) \
+#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, handle, \
+ node, dev_node) \
+ ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(dmadev, size, virt, \
+ phys, handle, node, dev_node, DEFAULT_ALLOC_ALIGNMENT)
+
+#define ENA_MEM_ALLOC_COHERENT_ALIGNED(dmadev, size, virt, phys, dma, \
+ alignment) \
do { \
- ena_dma_alloc((dmadev), (size), &(dma), 0); \
+ ena_dma_alloc((dmadev), (size), &(dma), 0, alignment); \
(virt) = (void *)(dma).vaddr; \
(phys) = (dma).paddr; \
} while (0)
+#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, dma) \
+ ENA_MEM_ALLOC_COHERENT_ALIGNED(dmadev, size, virt, \
+ phys, dma, DEFAULT_ALLOC_ALIGNMENT)
+
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, dma) \
do { \
(void)size; \