aboutsummaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/acpica/acpi_powerres.c274
-rw-r--r--sys/dev/acpica/acpivar.h1
-rw-r--r--sys/dev/amdgpio/amdgpio.c3
-rw-r--r--sys/dev/bhnd/cores/chipc/chipc_gpio.c4
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_res.c4
-rw-r--r--sys/dev/e1000/e1000_phy.c5
-rw-r--r--sys/dev/e1000/if_em.c44
-rw-r--r--sys/dev/ftgpio/ftgpio.c3
-rw-r--r--sys/dev/gpio/acpi_gpiobus.c3
-rw-r--r--sys/dev/gpio/bytgpio.c3
-rw-r--r--sys/dev/gpio/chvgpio.c3
-rw-r--r--sys/dev/gpio/dwgpio/dwgpio.c3
-rw-r--r--sys/dev/gpio/gpiobus.c18
-rw-r--r--sys/dev/gpio/gpiobusvar.h1
-rw-r--r--sys/dev/gpio/pl061.c3
-rw-r--r--sys/dev/gpio/qoriq_gpio.c3
-rw-r--r--sys/dev/hid/hidbus.c41
-rw-r--r--sys/dev/hid/hidquirk.h1
-rw-r--r--sys/dev/hid/hidraw.c12
-rw-r--r--sys/dev/hid/ietp.c31
-rw-r--r--sys/dev/hid/u2f.c590
-rw-r--r--sys/dev/ice/ice_bitops.h4
-rw-r--r--sys/dev/ice/ice_lan_tx_rx.h2
-rw-r--r--sys/dev/ice/ice_lib.h2
-rw-r--r--sys/dev/ice/ice_protocol_type.h2
-rw-r--r--sys/dev/iicbus/gpio/pcf8574.c7
-rw-r--r--sys/dev/iicbus/gpio/tca64xx.c7
-rw-r--r--sys/dev/iicbus/iichid.c3
-rw-r--r--sys/dev/isci/scil/intel_sata.h2
-rw-r--r--sys/dev/ixgbe/if_ix.c231
-rw-r--r--sys/dev/ixgbe/if_ixv.c6
-rw-r--r--sys/dev/ixgbe/ixgbe.h11
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c16
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c25
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.c5567
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.h224
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.c26
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h31
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h69
-rw-r--r--sys/dev/ixgbe/ixgbe_type_e610.h2278
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c3
-rw-r--r--sys/dev/ixl/if_ixl.c25
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c4
-rw-r--r--sys/dev/nctgpio/nctgpio.c3
-rw-r--r--sys/dev/p2sb/lewisburg_gpio.c3
-rw-r--r--sys/dev/psci/smccc_trng.c2
-rw-r--r--sys/dev/qcom_rnd/qcom_rnd.c2
-rw-r--r--sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c3
-rw-r--r--sys/dev/random/armv8rng.c2
-rw-r--r--sys/dev/random/darn.c2
-rw-r--r--sys/dev/random/ivy.c2
-rw-r--r--sys/dev/random/nehemiah.c2
-rw-r--r--sys/dev/random/random_harvestq.c12
-rw-r--r--sys/dev/random/randomdev.h4
-rw-r--r--sys/dev/rccgpio/rccgpio.c3
-rw-r--r--sys/dev/uart/uart_cpu_acpi.c20
-rw-r--r--sys/dev/uart/uart_cpu_acpi.h17
-rw-r--r--sys/dev/uart/uart_dev_ns8250.c42
-rw-r--r--sys/dev/uart/uart_dev_pl011.c16
-rw-r--r--sys/dev/ufshci/ufshci.h104
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr.c33
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr_cmd.c26
-rw-r--r--sys/dev/ufshci/ufshci_private.h36
-rw-r--r--sys/dev/ufshci/ufshci_req_queue.c123
-rw-r--r--sys/dev/ufshci/ufshci_req_sdb.c133
-rw-r--r--sys/dev/usb/input/uhid.c6
-rw-r--r--sys/dev/usb/input/usbhid.c8
-rw-r--r--sys/dev/usb/misc/cp2112.c5
-rw-r--r--sys/dev/usb/net/if_ipheth.c218
-rw-r--r--sys/dev/usb/net/if_iphethvar.h21
-rw-r--r--sys/dev/usb/usb_device.c48
-rw-r--r--sys/dev/usb/usb_generic.c37
-rw-r--r--sys/dev/usb/usbdi.h3
-rw-r--r--sys/dev/virtio/random/virtio_random.c2
-rw-r--r--sys/dev/watchdog/watchdog.c212
76 files changed, 10206 insertions, 540 deletions
diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c
index 29d1690f1bdd..0a8b67a5fa84 100644
--- a/sys/dev/acpica/acpi_powerres.c
+++ b/sys/dev/acpica/acpi_powerres.c
@@ -76,6 +76,13 @@ struct acpi_powerconsumer {
/* Device which is powered */
ACPI_HANDLE ac_consumer;
int ac_state;
+
+ struct {
+ bool prx_has;
+ size_t prx_count;
+ ACPI_HANDLE *prx_deps;
+ } ac_prx[ACPI_D_STATE_COUNT];
+
TAILQ_ENTRY(acpi_powerconsumer) ac_link;
TAILQ_HEAD(,acpi_powerreference) ac_references;
};
@@ -96,9 +103,7 @@ static TAILQ_HEAD(acpi_powerconsumer_list, acpi_powerconsumer)
ACPI_SERIAL_DECL(powerres, "ACPI power resources");
static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer);
-#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer);
-#endif /* notyet */
static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res);
#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res);
@@ -112,6 +117,8 @@ static struct acpi_powerresource
*acpi_pwr_find_resource(ACPI_HANDLE res);
static struct acpi_powerconsumer
*acpi_pwr_find_consumer(ACPI_HANDLE consumer);
+static ACPI_STATUS acpi_pwr_infer_state(struct acpi_powerconsumer *pc);
+static ACPI_STATUS acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state);
/*
* Register a power resource.
@@ -222,6 +229,84 @@ acpi_pwr_deregister_resource(ACPI_HANDLE res)
#endif /* notyet */
/*
+ * Evaluate the _PRx (power resources each D-state depends on). This also
+ * populates the acpi_powerresources queue with the power resources discovered
+ * during this step.
+ *
+ * ACPI 7.3.8 - 7.3.11 guarantee that _PRx will return the same data each
+ * time they are evaluated.
+ *
+ * If this function fails, acpi_pwr_deregister_consumer() must be called on the
+ * power consumer to free already allocated memory.
+ */
+static ACPI_STATUS
+acpi_pwr_get_power_resources(ACPI_HANDLE consumer, struct acpi_powerconsumer *pc)
+{
+ ACPI_INTEGER status;
+ ACPI_STRING reslist_name;
+ ACPI_HANDLE reslist_handle;
+ ACPI_STRING reslist_names[] = {"_PR0", "_PR1", "_PR2", "_PR3"};
+ ACPI_BUFFER reslist;
+ ACPI_OBJECT *reslist_object;
+ ACPI_OBJECT *dep;
+ ACPI_HANDLE *res;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ MPASS(consumer != NULL);
+
+ for (int state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
+ pc->ac_prx[state].prx_has = false;
+ pc->ac_prx[state].prx_count = 0;
+ pc->ac_prx[state].prx_deps = NULL;
+
+ reslist_name = reslist_names[state - ACPI_STATE_D0];
+ if (ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle)))
+ continue;
+
+ reslist.Pointer = NULL;
+ reslist.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObjectTyped(reslist_handle, NULL, NULL, &reslist,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status) || reslist.Pointer == NULL)
+ /*
+ * ACPI_ALLOCATE_BUFFER entails everything will be freed on error
+ * by AcpiEvaluateObjectTyped.
+ */
+ continue;
+
+ reslist_object = (ACPI_OBJECT *)reslist.Pointer;
+ pc->ac_prx[state].prx_has = true;
+ pc->ac_prx[state].prx_count = reslist_object->Package.Count;
+
+ if (reslist_object->Package.Count == 0) {
+ AcpiOsFree(reslist_object);
+ continue;
+ }
+
+ pc->ac_prx[state].prx_deps = mallocarray(pc->ac_prx[state].prx_count,
+ sizeof(*pc->ac_prx[state].prx_deps), M_ACPIPWR, M_NOWAIT);
+ if (pc->ac_prx[state].prx_deps == NULL) {
+ AcpiOsFree(reslist_object);
+ return_ACPI_STATUS (AE_NO_MEMORY);
+ }
+
+ for (size_t i = 0; i < reslist_object->Package.Count; i++) {
+ dep = &reslist_object->Package.Elements[i];
+ res = dep->Reference.Handle;
+ pc->ac_prx[state].prx_deps[i] = res;
+
+ /* It's fine to attempt to register the same resource twice. */
+ acpi_pwr_register_resource(res);
+ }
+ AcpiOsFree(reslist_object);
+ }
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+/*
* Register a power consumer.
*
* It's OK to call this if we already know about the consumer.
@@ -229,6 +314,7 @@ acpi_pwr_deregister_resource(ACPI_HANDLE res)
static ACPI_STATUS
acpi_pwr_register_consumer(ACPI_HANDLE consumer)
{
+ ACPI_INTEGER status;
struct acpi_powerconsumer *pc;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -239,14 +325,30 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
/* Allocate a new power consumer */
- if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT)) == NULL)
+ if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL)
return_ACPI_STATUS (AE_NO_MEMORY);
TAILQ_INSERT_HEAD(&acpi_powerconsumers, pc, ac_link);
TAILQ_INIT(&pc->ac_references);
pc->ac_consumer = consumer;
- /* XXX we should try to find its current state */
- pc->ac_state = ACPI_STATE_UNKNOWN;
+ /*
+ * Get all its power resource dependencies, if it has _PRx. We do this now
+ * as an opportunity to populate the acpi_powerresources queue.
+ *
+ * If this fails, immediately deregister it.
+ */
+ status = acpi_pwr_get_power_resources(consumer, pc);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "failed to get power resources for %s\n",
+ acpi_name(consumer)));
+ acpi_pwr_deregister_consumer(consumer);
+ return_ACPI_STATUS (status);
+ }
+
+ /* Find its initial state. */
+ if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &pc->ac_state)))
+ pc->ac_state = ACPI_STATE_UNKNOWN;
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power consumer %s\n",
acpi_name(consumer)));
@@ -254,7 +356,6 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
-#ifdef notyet
/*
* Deregister a power consumer.
*
@@ -279,6 +380,9 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
/* Pull the consumer off the list and free it */
TAILQ_REMOVE(&acpi_powerconsumers, pc, ac_link);
+ for (size_t i = 0; i < sizeof(pc->ac_prx) / sizeof(*pc->ac_prx); i++)
+ if (pc->ac_prx[i].prx_deps != NULL)
+ free(pc->ac_prx[i].prx_deps, M_ACPIPWR);
free(pc, M_ACPIPWR);
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power consumer %s\n",
@@ -286,10 +390,139 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
-#endif /* notyet */
/*
- * Set a power consumer to a particular power state.
+ * The _PSC control method isn't required if it's possible to infer the D-state
+ * from the _PRx control methods. (See 7.3.6.)
+ * We can infer that a given D-state has been achieved when all the dependencies
+ * are in the ON state.
+ */
+static ACPI_STATUS
+acpi_pwr_infer_state(struct acpi_powerconsumer *pc)
+{
+ ACPI_HANDLE *res;
+ uint32_t on;
+ bool all_on = false;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ /* It is important we go from the hottest to the coldest state. */
+ for (
+ pc->ac_state = ACPI_STATE_D0;
+ pc->ac_state <= ACPI_STATE_D3_HOT && !all_on;
+ pc->ac_state++
+ ) {
+ MPASS(pc->ac_state <= sizeof(pc->ac_prx) / sizeof(*pc->ac_prx));
+
+ if (!pc->ac_prx[pc->ac_state].prx_has)
+ continue;
+
+ all_on = true;
+
+ for (size_t i = 0; i < pc->ac_prx[pc->ac_state].prx_count; i++) {
+ res = pc->ac_prx[pc->ac_state].prx_deps[i];
+ /* If failure, better to assume D-state is hotter than colder. */
+ if (ACPI_FAILURE(acpi_GetInteger(res, "_STA", &on)))
+ continue;
+ if (on == 0) {
+ all_on = false;
+ break;
+ }
+ }
+ }
+
+ MPASS(pc->ac_state != ACPI_STATE_D0);
+
+ /*
+ * If none of the power resources required for the shallower D-states are
+ * on, then we can assume it is unpowered (i.e. D3cold). A device is not
+ * required to support D3cold however; in that case, _PR3 is not explicitly
+ * provided. Those devices should default to D3hot instead.
+ *
+ * See comments of first row of table 7.1 in ACPI spec.
+ */
+ if (!all_on)
+ pc->ac_state = pc->ac_prx[ACPI_STATE_D3_HOT].prx_has ?
+ ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT;
+ else
+ pc->ac_state--;
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+static ACPI_STATUS
+acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state)
+{
+ struct acpi_powerconsumer *pc;
+ ACPI_HANDLE method_handle;
+ ACPI_STATUS status;
+ ACPI_BUFFER result;
+ ACPI_OBJECT *object = NULL;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ if (consumer == NULL)
+ return_ACPI_STATUS (AE_NOT_FOUND);
+
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) {
+ if (ACPI_FAILURE(status = acpi_pwr_register_consumer(consumer)))
+ goto out;
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL)
+ panic("acpi added power consumer but can't find it");
+ }
+
+ status = AcpiGetHandle(consumer, "_PSC", &method_handle);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "no _PSC object - %s\n",
+ AcpiFormatException(status)));
+ status = acpi_pwr_infer_state(pc);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "couldn't infer D-state - %s\n",
+ AcpiFormatException(status)));
+ pc->ac_state = ACPI_STATE_UNKNOWN;
+ }
+ goto out;
+ }
+
+ result.Pointer = NULL;
+ result.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObjectTyped(method_handle, NULL, NULL, &result, ACPI_TYPE_INTEGER);
+ if (ACPI_FAILURE(status) || result.Pointer == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to get state with _PSC - %s\n",
+ AcpiFormatException(status)));
+ pc->ac_state = ACPI_STATE_UNKNOWN;
+ goto out;
+ }
+
+ object = (ACPI_OBJECT *)result.Pointer;
+ pc->ac_state = ACPI_STATE_D0 + object->Integer.Value;
+ status = AE_OK;
+
+out:
+ if (object != NULL)
+ AcpiOsFree(object);
+ *state = pc->ac_state;
+ return_ACPI_STATUS (status);
+}
+
+/*
+ * Get a power consumer's D-state.
+ */
+ACPI_STATUS
+acpi_pwr_get_state(ACPI_HANDLE consumer, int *state)
+{
+ ACPI_STATUS res;
+
+ ACPI_SERIAL_BEGIN(powerres);
+ res = acpi_pwr_get_state_locked(consumer, state);
+ ACPI_SERIAL_END(powerres);
+ return (res);
+}
+
+/*
+ * Set a power consumer to a particular D-state.
*/
ACPI_STATUS
acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
@@ -300,6 +533,7 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
ACPI_OBJECT *reslist_object;
ACPI_STATUS status;
char *method_name, *reslist_name = NULL;
+ int new_state;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -501,8 +735,28 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
}
}
- /* Transition was successful */
- pc->ac_state = state;
+ /*
+ * Make sure the transition succeeded. If getting new state failed,
+ * just assume the new state is what we wanted. This was the behaviour
+ * before we were checking D-states.
+ */
+ if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &new_state))) {
+ printf("%s: failed to get new D-state\n", __func__);
+ pc->ac_state = state;
+ } else {
+ if (new_state != state)
+ printf("%s: new power state %s is not the one requested %s\n",
+ __func__, acpi_d_state_to_str(new_state),
+ acpi_d_state_to_str(state));
+ pc->ac_state = new_state;
+ }
+
+ /*
+ * We consider the transition successful even if the state we got doesn't
+ * reflect what we set it to. This is because we weren't previously
+ * checking the new state at all, so there might exist buggy platforms on
+ * which suspend would otherwise succeed if we failed here.
+ */
status = AE_OK;
out:
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 6887f080311d..7495a010432b 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -490,6 +490,7 @@ EVENTHANDLER_DECLARE(acpi_video_event, acpi_event_handler_t);
/* Device power control. */
ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable);
+ACPI_STATUS acpi_pwr_get_state(ACPI_HANDLE consumer, int *state);
ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state);
acpi_pwr_for_sleep_t acpi_device_pwr_for_sleep;
int acpi_set_powerstate(device_t child, int state);
diff --git a/sys/dev/amdgpio/amdgpio.c b/sys/dev/amdgpio/amdgpio.c
index f39006d95805..2bd455c612b8 100644
--- a/sys/dev/amdgpio/amdgpio.c
+++ b/sys/dev/amdgpio/amdgpio.c
@@ -408,12 +408,13 @@ amdgpio_attach(device_t dev)
GPIO_PIN_OUTPUT : GPIO_PIN_INPUT;
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
device_printf(dev, "could not attach gpiobus\n");
goto err_bus;
}
+ bus_attach_children(dev);
return (0);
err_bus:
diff --git a/sys/dev/bhnd/cores/chipc/chipc_gpio.c b/sys/dev/bhnd/cores/chipc/chipc_gpio.c
index a110bdda5fa7..429de0fc1fd8 100644
--- a/sys/dev/bhnd/cores/chipc/chipc_gpio.c
+++ b/sys/dev/bhnd/cores/chipc/chipc_gpio.c
@@ -173,11 +173,13 @@ chipc_gpio_attach(device_t dev)
if (CC_GPIO_QUIRK(sc, NO_GPIOC)) {
sc->gpiobus = NULL;
} else {
- if ((sc->gpiobus = gpiobus_attach_bus(dev)) == NULL) {
+ if ((sc->gpiobus = gpiobus_add_bus(dev)) == NULL) {
device_printf(dev, "failed to attach gpiobus\n");
error = ENXIO;
goto failed;
}
+
+ bus_attach_children(dev);
}
/* Register as the bus GPIO provider */
diff --git a/sys/dev/bnxt/bnxt_re/qplib_res.c b/sys/dev/bnxt/bnxt_re/qplib_res.c
index 69661c67708c..f527af031176 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_res.c
+++ b/sys/dev/bnxt/bnxt_re/qplib_res.c
@@ -875,7 +875,7 @@ int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
dpi->umdbr = umaddr;
switch (type) {
case BNXT_QPLIB_DPI_TYPE_KERNEL:
- /* priviledged dbr was already mapped just initialize it. */
+ /* privileged dbr was already mapped just initialize it. */
dpi->umdbr = dpit->ucreg.bar_base +
dpit->ucreg.offset + bit_num * PAGE_SIZE;
dpi->dbr = dpit->priv_db;
@@ -1150,7 +1150,7 @@ int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
}
ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
if (!ucreg->bar_reg) {
- dev_err(&res->pdev->dev, "priviledged dpi map failed!\n");
+ dev_err(&res->pdev->dev, "privileged dpi map failed!\n");
return -ENOMEM;
}
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index c34897e3b31a..634f48171c3e 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -1707,10 +1707,9 @@ s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
* autonegotiation.
*/
ret_val = e1000_copper_link_autoneg(hw);
- if (ret_val && !hw->mac.forced_speed_duplex)
+ if (ret_val)
return ret_val;
- }
- if (!hw->mac.autoneg || (ret_val && hw->mac.forced_speed_duplex)) {
+ } else {
/* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index f0ef6051fab1..9c5ae2806f75 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -2000,18 +2000,7 @@ em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
(sc->hw.phy.media_type == e1000_media_type_internal_serdes)) {
if (sc->hw.mac.type == e1000_82545)
fiber_type = IFM_1000_LX;
- switch (sc->link_speed) {
- case 10:
- ifmr->ifm_active |= IFM_10_FL;
- break;
- case 100:
- ifmr->ifm_active |= IFM_100_FX;
- break;
- case 1000:
- default:
- ifmr->ifm_active |= fiber_type | IFM_FDX;
- break;
- }
+ ifmr->ifm_active |= fiber_type | IFM_FDX;
} else {
switch (sc->link_speed) {
case 10:
@@ -2024,12 +2013,11 @@ em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
ifmr->ifm_active |= IFM_1000_T;
break;
}
+ if (sc->link_duplex == FULL_DUPLEX)
+ ifmr->ifm_active |= IFM_FDX;
+ else
+ ifmr->ifm_active |= IFM_HDX;
}
-
- if (sc->link_duplex == FULL_DUPLEX)
- ifmr->ifm_active |= IFM_FDX;
- else
- ifmr->ifm_active |= IFM_HDX;
}
/*********************************************************************
@@ -2063,26 +2051,6 @@ em_if_media_change(if_ctx_t ctx)
sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
break;
case IFM_100_TX:
- sc->hw.mac.autoneg = DO_AUTO_NEG;
- if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
- sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
- } else {
- sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
- }
- break;
- case IFM_10_T:
- sc->hw.mac.autoneg = DO_AUTO_NEG;
- if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
- sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
- } else {
- sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
- }
- break;
- case IFM_100_FX:
sc->hw.mac.autoneg = false;
sc->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
@@ -2090,7 +2058,7 @@ em_if_media_change(if_ctx_t ctx)
else
sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
break;
- case IFM_10_FL:
+ case IFM_10_T:
sc->hw.mac.autoneg = false;
sc->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
diff --git a/sys/dev/ftgpio/ftgpio.c b/sys/dev/ftgpio/ftgpio.c
index 7acfdd5b900e..68787b54bb16 100644
--- a/sys/dev/ftgpio/ftgpio.c
+++ b/sys/dev/ftgpio/ftgpio.c
@@ -398,12 +398,13 @@ ftgpio_attach(device_t dev)
FTGPIO_VERBOSE_PRINTF(sc->dev, "groups GPIO1..GPIO6 enabled\n");
GPIO_UNLOCK(sc);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
GPIO_LOCK_DESTROY(sc);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/gpio/acpi_gpiobus.c b/sys/dev/gpio/acpi_gpiobus.c
index 94f4e5771266..170f23615416 100644
--- a/sys/dev/gpio/acpi_gpiobus.c
+++ b/sys/dev/gpio/acpi_gpiobus.c
@@ -383,7 +383,8 @@ acpi_gpiobus_detach(device_t dev)
}
static int
-acpi_gpiobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
+acpi_gpiobus_read_ivar(device_t dev, device_t child, int which,
+ uintptr_t *result)
{
struct acpi_gpiobus_ivar *devi = device_get_ivars(child);
diff --git a/sys/dev/gpio/bytgpio.c b/sys/dev/gpio/bytgpio.c
index f7b2a73ec6cb..5d685c155a03 100644
--- a/sys/dev/gpio/bytgpio.c
+++ b/sys/dev/gpio/bytgpio.c
@@ -608,7 +608,7 @@ bytgpio_attach(device_t dev)
sc->sc_pad_funcs[pin] = val & BYTGPIO_PCONF0_FUNC_MASK;
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
BYTGPIO_LOCK_DESTROY(sc);
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -616,6 +616,7 @@ bytgpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
error:
diff --git a/sys/dev/gpio/chvgpio.c b/sys/dev/gpio/chvgpio.c
index 199ad4d6f373..3273aad9242b 100644
--- a/sys/dev/gpio/chvgpio.c
+++ b/sys/dev/gpio/chvgpio.c
@@ -441,7 +441,7 @@ chvgpio_attach(device_t dev)
bus_write_4(sc->sc_mem_res, CHVGPIO_INTERRUPT_MASK, 0);
bus_write_4(sc->sc_mem_res, CHVGPIO_INTERRUPT_STATUS, 0xffff);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
CHVGPIO_LOCK_DESTROY(sc);
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -451,6 +451,7 @@ chvgpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/gpio/dwgpio/dwgpio.c b/sys/dev/gpio/dwgpio/dwgpio.c
index 5acb99ca591e..3908113d5fd4 100644
--- a/sys/dev/gpio/dwgpio/dwgpio.c
+++ b/sys/dev/gpio/dwgpio/dwgpio.c
@@ -167,12 +167,13 @@ dwgpio_attach(device_t dev)
snprintf(sc->gpio_pins[i].gp_name, GPIOMAXNAME,
"dwgpio%d.%d", device_get_unit(dev), i);
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
mtx_destroy(&sc->sc_mtx);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index 764bcb7e6ee8..c25c41f43042 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -330,24 +330,6 @@ gpiobus_add_bus(device_t dev)
return (busdev);
}
-/*
- * Attach a gpiobus child.
- * Note that the controller is expected
- * to be fully initialized at this point.
- */
-device_t
-gpiobus_attach_bus(device_t dev)
-{
- device_t busdev;
-
- busdev = gpiobus_add_bus(dev);
- if (busdev == NULL)
- return (NULL);
-
- bus_attach_children(dev);
- return (busdev);
-}
-
int
gpiobus_detach_bus(device_t dev)
{
diff --git a/sys/dev/gpio/gpiobusvar.h b/sys/dev/gpio/gpiobusvar.h
index 7f504236a774..0528efe45525 100644
--- a/sys/dev/gpio/gpiobusvar.h
+++ b/sys/dev/gpio/gpiobusvar.h
@@ -171,7 +171,6 @@ struct resource *gpio_alloc_intr_resource(device_t consumer_dev, int *rid,
int gpio_check_flags(uint32_t, uint32_t);
device_t gpiobus_add_bus(device_t);
-device_t gpiobus_attach_bus(device_t);
int gpiobus_detach_bus(device_t);
#endif /* __GPIOBUS_H__ */
diff --git a/sys/dev/gpio/pl061.c b/sys/dev/gpio/pl061.c
index 87d4310a6396..32109e5982bc 100644
--- a/sys/dev/gpio/pl061.c
+++ b/sys/dev/gpio/pl061.c
@@ -495,13 +495,14 @@ pl061_attach(device_t dev)
goto free_isrc;
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
device_printf(dev, "couldn't attach gpio bus\n");
PL061_LOCK_DESTROY(sc);
goto free_isrc;
}
+ bus_attach_children(dev);
return (0);
free_isrc:
diff --git a/sys/dev/gpio/qoriq_gpio.c b/sys/dev/gpio/qoriq_gpio.c
index 8b44cd256c79..d11868a23751 100644
--- a/sys/dev/gpio/qoriq_gpio.c
+++ b/sys/dev/gpio/qoriq_gpio.c
@@ -379,12 +379,13 @@ qoriq_gpio_attach(device_t dev)
OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
qoriq_gpio_detach(dev);
return (ENOMEM);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/hid/hidbus.c b/sys/dev/hid/hidbus.c
index 96d36c8d191d..683449fca49c 100644
--- a/sys/dev/hid/hidbus.c
+++ b/sys/dev/hid/hidbus.c
@@ -65,7 +65,7 @@ struct hidbus_ivars {
struct mtx *mtx; /* child intr mtx */
hid_intr_t *intr_handler; /* executed under mtx*/
void *intr_ctx;
- unsigned int refcnt; /* protected by mtx */
+ bool active; /* protected by mtx */
struct epoch_context epoch_ctx;
CK_STAILQ_ENTRY(hidbus_ivars) link;
};
@@ -398,7 +398,7 @@ hidbus_child_detached(device_t bus, device_t child)
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *tlc = device_get_ivars(child);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
tlc->mtx = &sc->mtx;
tlc->intr_handler = NULL;
tlc->flags &= ~HIDBUS_FLAG_CAN_POLL;
@@ -423,7 +423,7 @@ hidbus_child_deleted(device_t bus, device_t child)
struct hidbus_ivars *tlc = device_get_ivars(child);
sx_xlock(&sc->sx);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
CK_STAILQ_REMOVE(&sc->tlcs, tlc, hidbus_ivars, link);
sx_unlock(&sc->sx);
epoch_call(INPUT_EPOCH, hidbus_ivar_dtor, &tlc->epoch_ctx);
@@ -572,7 +572,7 @@ hidbus_intr(void *context, void *buf, hid_size_t len)
if (!HID_IN_POLLING_MODE())
epoch_enter_preempt(INPUT_EPOCH, &et);
CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc->refcnt == 0 || tlc->intr_handler == NULL)
+ if (!tlc->active || tlc->intr_handler == NULL)
continue;
if (HID_IN_POLLING_MODE()) {
if ((tlc->flags & HIDBUS_FLAG_CAN_POLL) != 0)
@@ -602,21 +602,14 @@ hidbus_intr_start(device_t bus, device_t child)
MPASS(bus == device_get_parent(child));
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
- struct hidbus_ivars *tlc;
- bool refcnted = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- refcnted |= (tlc->refcnt != 0);
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- ++tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- }
- error = refcnted ? 0 : hid_intr_start(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = true;
+ mtx_unlock(ivar->mtx);
+ error = hid_intr_start(bus);
sx_unlock(&sc->sx);
return (error);
@@ -629,21 +622,17 @@ hidbus_intr_stop(device_t bus, device_t child)
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
struct hidbus_ivars *tlc;
- bool refcnted = false;
+ bool active = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- MPASS(tlc->refcnt != 0);
- --tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- refcnted |= (tlc->refcnt != 0);
- }
- error = refcnted ? 0 : hid_intr_stop(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = false;
+ mtx_unlock(ivar->mtx);
+ CK_STAILQ_FOREACH(tlc, &sc->tlcs, link)
+ active |= tlc->active;
+ error = active ? 0 : hid_intr_stop(bus);
sx_unlock(&sc->sx);
return (error);
diff --git a/sys/dev/hid/hidquirk.h b/sys/dev/hid/hidquirk.h
index 4f8b8acbe201..f6fa9f88c6c9 100644
--- a/sys/dev/hid/hidquirk.h
+++ b/sys/dev/hid/hidquirk.h
@@ -50,6 +50,7 @@
HQ(IS_XBOX360GP), /* device is XBox 360 GamePad */ \
HQ(NOWRITE), /* device does not support writes */ \
HQ(IICHID_SAMPLING), /* IIC backend runs in sampling mode */ \
+ HQ(NO_READAHEAD), /* Disable interrupt after one report */\
\
/* Various quirks */ \
HQ(HID_IGNORE), /* device should be ignored by hid class */ \
diff --git a/sys/dev/hid/hidraw.c b/sys/dev/hid/hidraw.c
index 06f70070f61b..4855843cd265 100644
--- a/sys/dev/hid/hidraw.c
+++ b/sys/dev/hid/hidraw.c
@@ -85,6 +85,12 @@ SYSCTL_INT(_hw_hid_hidraw, OID_AUTO, debug, CTLFLAG_RWTUN,
free((buf), M_DEVBUF); \
}
+#ifdef HIDRAW_MAKE_UHID_ALIAS
+#define HIDRAW_NAME "uhid"
+#else
+#define HIDRAW_NAME "hidraw"
+#endif
+
struct hidraw_softc {
device_t sc_dev; /* base device */
@@ -183,8 +189,8 @@ hidraw_identify(driver_t *driver, device_t parent)
{
device_t child;
- if (device_find_child(parent, "hidraw", DEVICE_UNIT_ANY) == NULL) {
- child = BUS_ADD_CHILD(parent, 0, "hidraw",
+ if (device_find_child(parent, HIDRAW_NAME, DEVICE_UNIT_ANY) == NULL) {
+ child = BUS_ADD_CHILD(parent, 0, HIDRAW_NAME,
device_get_unit(parent));
if (child != NULL)
hidbus_set_index(child, HIDRAW_INDEX);
@@ -1050,7 +1056,7 @@ static device_method_t hidraw_methods[] = {
};
static driver_t hidraw_driver = {
- "hidraw",
+ HIDRAW_NAME,
hidraw_methods,
sizeof(struct hidraw_softc)
};
diff --git a/sys/dev/hid/ietp.c b/sys/dev/hid/ietp.c
index 217585a7948b..73a5cb7414d4 100644
--- a/sys/dev/hid/ietp.c
+++ b/sys/dev/hid/ietp.c
@@ -102,6 +102,7 @@ struct ietp_softc {
device_t dev;
struct evdev_dev *evdev;
+ bool open;
uint8_t report_id;
hid_size_t report_len;
@@ -217,13 +218,25 @@ static const struct evdev_methods ietp_evdev_methods = {
static int
ietp_ev_open(struct evdev_dev *evdev)
{
- return (hid_intr_start(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_start(sc->dev);
+ if (error == 0)
+ sc->open = true;
+ return (error);
}
static int
ietp_ev_close(struct evdev_dev *evdev)
{
- return (hid_intr_stop(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_stop(sc->dev);
+ if (error == 0)
+ sc->open = false;
+ return (error);
}
static int
@@ -275,7 +288,7 @@ ietp_attach(struct ietp_softc *sc)
evdev_set_id(sc->evdev, hw->idBus, hw->idVendor, hw->idProduct,
hw->idVersion);
evdev_set_serial(sc->evdev, hw->serial);
- evdev_set_methods(sc->evdev, sc->dev, &ietp_evdev_methods);
+ evdev_set_methods(sc->evdev, sc, &ietp_evdev_methods);
evdev_set_flag(sc->evdev, EVDEV_FLAG_MT_STCOMPAT);
evdev_set_flag(sc->evdev, EVDEV_FLAG_EXT_EPOCH); /* hidbus child */
@@ -584,11 +597,13 @@ ietp_iic_set_absolute_mode(device_t dev, bool enable)
* Some ASUS touchpads need to be powered on to enter absolute mode.
*/
require_wakeup = false;
- for (i = 0; i < nitems(special_fw); i++) {
- if (sc->ic_type == special_fw[i].ic_type &&
- sc->product_id == special_fw[i].product_id) {
- require_wakeup = true;
- break;
+ if (!sc->open) {
+ for (i = 0; i < nitems(special_fw); i++) {
+ if (sc->ic_type == special_fw[i].ic_type &&
+ sc->product_id == special_fw[i].product_id) {
+ require_wakeup = true;
+ break;
+ }
}
}
diff --git a/sys/dev/hid/u2f.c b/sys/dev/hid/u2f.c
new file mode 100644
index 000000000000..ac2eba7a499d
--- /dev/null
+++ b/sys/dev/hid/u2f.c
@@ -0,0 +1,590 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022-2023 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_hid.h"
+
+#include <sys/param.h>
+#ifdef COMPAT_FREEBSD32
+#include <sys/abi_compat.h>
+#endif
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/filio.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/poll.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/selinfo.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <dev/evdev/input.h>
+
+#define HID_DEBUG_VAR u2f_debug
+#include <dev/hid/hid.h>
+#include <dev/hid/hidbus.h>
+#include <dev/hid/hidquirk.h>
+
+#include <dev/usb/usb_ioctl.h>
+
+#ifdef HID_DEBUG
+static int u2f_debug = 0;
+static SYSCTL_NODE(_hw_hid, OID_AUTO, u2f, CTLFLAG_RW, 0,
+ "FIDO/U2F authenticator");
+SYSCTL_INT(_hw_hid_u2f, OID_AUTO, debug, CTLFLAG_RWTUN,
+ &u2f_debug, 0, "Debug level");
+#endif
+
+#define U2F_MAX_REPORT_SIZE 64
+
+/* A match on these entries will load u2f */
+static const struct hid_device_id u2f_devs[] = {
+ { HID_BUS(BUS_USB), HID_TLC(HUP_FIDO, HUF_U2FHID) },
+};
+
+struct u2f_softc {
+ device_t sc_dev; /* base device */
+ struct cdev *dev;
+
+ struct mtx sc_mtx; /* hidbus private mutex */
+ void *sc_rdesc;
+ hid_size_t sc_rdesc_size;
+ hid_size_t sc_isize;
+ hid_size_t sc_osize;
+ struct selinfo sc_rsel;
+ struct { /* driver state */
+ bool open:1; /* device is open */
+ bool aslp:1; /* waiting for device data in read() */
+ bool sel:1; /* waiting for device data in poll() */
+ bool data:1; /* input report is stored in sc_buf */
+ int reserved:28;
+ } sc_state;
+ int sc_fflags; /* access mode for open lifetime */
+
+ uint8_t sc_buf[U2F_MAX_REPORT_SIZE];
+};
+
+static d_open_t u2f_open;
+static d_read_t u2f_read;
+static d_write_t u2f_write;
+static d_ioctl_t u2f_ioctl;
+static d_poll_t u2f_poll;
+static d_kqfilter_t u2f_kqfilter;
+
+static d_priv_dtor_t u2f_dtor;
+
+static struct cdevsw u2f_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = u2f_open,
+ .d_read = u2f_read,
+ .d_write = u2f_write,
+ .d_ioctl = u2f_ioctl,
+ .d_poll = u2f_poll,
+ .d_kqfilter = u2f_kqfilter,
+ .d_name = "u2f",
+};
+
+static hid_intr_t u2f_intr;
+
+static device_probe_t u2f_probe;
+static device_attach_t u2f_attach;
+static device_detach_t u2f_detach;
+
+static int u2f_kqread(struct knote *, long);
+static void u2f_kqdetach(struct knote *);
+static void u2f_notify(struct u2f_softc *);
+
+static struct filterops u2f_filterops_read = {
+ .f_isfd = 1,
+ .f_detach = u2f_kqdetach,
+ .f_event = u2f_kqread,
+};
+
+static int
+u2f_probe(device_t dev)
+{
+ int error;
+
+ error = HIDBUS_LOOKUP_DRIVER_INFO(dev, u2f_devs);
+ if (error != 0)
+ return (error);
+
+ hidbus_set_desc(dev, "Authenticator");
+
+ return (BUS_PROBE_GENERIC);
+}
+
+static int
+u2f_attach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+ struct hid_device_info *hw = __DECONST(struct hid_device_info *,
+ hid_get_device_info(dev));
+ struct make_dev_args mda;
+ int error;
+
+ sc->sc_dev = dev;
+
+ error = hid_get_report_descr(dev, &sc->sc_rdesc, &sc->sc_rdesc_size);
+ if (error != 0)
+ return (ENXIO);
+ sc->sc_isize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_input, NULL);
+ if (sc->sc_isize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Input report size too large. Truncate.\n");
+ sc->sc_isize = U2F_MAX_REPORT_SIZE;
+ }
+ sc->sc_osize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_output, NULL);
+ if (sc->sc_osize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Output report size too large. Truncate.\n");
+ sc->sc_osize = U2F_MAX_REPORT_SIZE;
+ }
+
+ mtx_init(&sc->sc_mtx, "u2f lock", NULL, MTX_DEF);
+ knlist_init_mtx(&sc->sc_rsel.si_note, &sc->sc_mtx);
+
+ make_dev_args_init(&mda);
+ mda.mda_flags = MAKEDEV_WAITOK;
+ mda.mda_devsw = &u2f_cdevsw;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_U2F;
+ mda.mda_mode = 0660;
+ mda.mda_si_drv1 = sc;
+
+ error = make_dev_s(&mda, &sc->dev, "u2f/%d", device_get_unit(dev));
+ if (error) {
+ device_printf(dev, "Can not create character device\n");
+ u2f_detach(dev);
+ return (error);
+ }
+#ifdef U2F_MAKE_UHID_ALIAS
+ (void)make_dev_alias(sc->dev, "uhid%d", device_get_unit(dev));
+#endif
+
+ hid_add_dynamic_quirk(hw, HQ_NO_READAHEAD);
+
+ hidbus_set_lock(dev, &sc->sc_mtx);
+ hidbus_set_intr(dev, u2f_intr, sc);
+
+ return (0);
+}
+
+static int
+u2f_detach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+
+ DPRINTF("sc=%p\n", sc);
+
+ if (sc->dev != NULL) {
+ mtx_lock(&sc->sc_mtx);
+ sc->dev->si_drv1 = NULL;
+ /* Wake everyone */
+ u2f_notify(sc);
+ mtx_unlock(&sc->sc_mtx);
+ destroy_dev(sc->dev);
+ }
+
+ hid_intr_stop(sc->sc_dev);
+
+ knlist_clear(&sc->sc_rsel.si_note, 0);
+ knlist_destroy(&sc->sc_rsel.si_note);
+ seldrain(&sc->sc_rsel);
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+void
+u2f_intr(void *context, void *buf, hid_size_t len)
+{
+ struct u2f_softc *sc = context;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ DPRINTFN(5, "len=%d\n", len);
+ DPRINTFN(5, "data = %*D\n", len, buf, " ");
+
+ if (sc->sc_state.data)
+ return;
+
+ if (len > sc->sc_isize)
+ len = sc->sc_isize;
+
+ bcopy(buf, sc->sc_buf, len);
+
+ /* Make sure we don't process old data */
+ if (len < sc->sc_isize)
+ bzero(sc->sc_buf + len, sc->sc_isize - len);
+
+ sc->sc_state.data = true;
+
+ u2f_notify(sc);
+}
+
+static int
+u2f_open(struct cdev *dev, int flag, int mode, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ DPRINTF("sc=%p\n", sc);
+
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.open) {
+ mtx_unlock(&sc->sc_mtx);
+ return (EBUSY);
+ }
+ sc->sc_state.open = true;
+ mtx_unlock(&sc->sc_mtx);
+
+ error = devfs_set_cdevpriv(sc, u2f_dtor);
+ if (error != 0) {
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+ return (error);
+ }
+
+ /* Set up interrupt pipe. */
+ sc->sc_state.data = false;
+ sc->sc_fflags = flag;
+
+ return (0);
+}
+
+
+static void
+u2f_dtor(void *data)
+{
+ struct u2f_softc *sc = data;
+
+#ifdef NOT_YET
+ /* Disable interrupts. */
+ hid_intr_stop(sc->sc_dev);
+#endif
+
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+}
+
+static int
+u2f_read(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ size_t length = 0;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (!sc->sc_state.data)
+ hid_intr_start(sc->sc_dev);
+
+ mtx_lock(&sc->sc_mtx);
+ if (dev->si_drv1 == NULL) {
+ error = EIO;
+ goto exit;
+ }
+
+ while (!sc->sc_state.data) {
+ if (flag & O_NONBLOCK) {
+ error = EWOULDBLOCK;
+ goto exit;
+ }
+ sc->sc_state.aslp = true;
+ DPRINTFN(5, "sleep on %p\n", &sc->sc_buf);
+ error = mtx_sleep(&sc->sc_buf, &sc->sc_mtx, PZERO | PCATCH,
+ "u2frd", 0);
+ DPRINTFN(5, "woke, error=%d\n", error);
+ if (dev->si_drv1 == NULL)
+ error = EIO;
+ if (error) {
+ sc->sc_state.aslp = false;
+ goto exit;
+ }
+ }
+
+ if (sc->sc_state.data && uio->uio_resid > 0) {
+ length = min(uio->uio_resid, sc->sc_isize);
+ memcpy(buf, sc->sc_buf, length);
+ sc->sc_state.data = false;
+ }
+exit:
+ mtx_unlock(&sc->sc_mtx);
+ if (length != 0) {
+ /* Copy the data to the user process. */
+ DPRINTFN(5, "got %lu chars\n", (u_long)length);
+ error = uiomove(buf, length, uio);
+ }
+
+ return (error);
+}
+
+static int
+u2f_write(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (uio->uio_resid != sc->sc_osize)
+ return (EINVAL);
+ error = uiomove(buf, uio->uio_resid, uio);
+ if (error == 0)
+ error = hid_write(sc->sc_dev, buf, sc->sc_osize);
+
+ return (error);
+}
+
+#ifdef COMPAT_FREEBSD32
+static void
+update_ugd32(const struct usb_gen_descriptor *ugd,
+ struct usb_gen_descriptor32 *ugd32)
+{
+ /* Don't update hgd_data pointer */
+ CP(*ugd, *ugd32, ugd_lang_id);
+ CP(*ugd, *ugd32, ugd_maxlen);
+ CP(*ugd, *ugd32, ugd_actlen);
+ CP(*ugd, *ugd32, ugd_offset);
+ CP(*ugd, *ugd32, ugd_config_index);
+ CP(*ugd, *ugd32, ugd_string_index);
+ CP(*ugd, *ugd32, ugd_iface_index);
+ CP(*ugd, *ugd32, ugd_altif_index);
+ CP(*ugd, *ugd32, ugd_endpt_index);
+ CP(*ugd, *ugd32, ugd_report_type);
+ /* Don't update reserved */
+}
+#endif
+
+static int
+u2f_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td)
+{
+#ifdef COMPAT_FREEBSD32
+ struct usb_gen_descriptor local_ugd;
+ struct usb_gen_descriptor32 *ugd32 = NULL;
+#endif
+ struct u2f_softc *sc = dev->si_drv1;
+ struct usb_gen_descriptor *ugd = (struct usb_gen_descriptor *)addr;
+ uint32_t size;
+
+ DPRINTFN(2, "cmd=%lx\n", cmd);
+
+ if (sc == NULL)
+ return (EIO);
+
+#ifdef COMPAT_FREEBSD32
+ switch (cmd) {
+ case USB_GET_REPORT_DESC32:
+ cmd = _IOC_NEWTYPE(cmd, struct usb_gen_descriptor);
+ ugd32 = (struct usb_gen_descriptor32 *)addr;
+ ugd = &local_ugd;
+ PTRIN_CP(*ugd32, *ugd, ugd_data);
+ CP(*ugd32, *ugd, ugd_lang_id);
+ CP(*ugd32, *ugd, ugd_maxlen);
+ CP(*ugd32, *ugd, ugd_actlen);
+ CP(*ugd32, *ugd, ugd_offset);
+ CP(*ugd32, *ugd, ugd_config_index);
+ CP(*ugd32, *ugd, ugd_string_index);
+ CP(*ugd32, *ugd, ugd_iface_index);
+ CP(*ugd32, *ugd, ugd_altif_index);
+ CP(*ugd32, *ugd, ugd_endpt_index);
+ CP(*ugd32, *ugd, ugd_report_type);
+ /* Don't copy reserved */
+ break;
+ }
+#endif
+
+ /* fixed-length ioctls handling */
+ switch (cmd) {
+ case FIONBIO:
+ /* All handled in the upper FS layer. */
+ return (0);
+
+ case USB_GET_REPORT_DESC:
+ size = MIN(sc->sc_rdesc_size, ugd->ugd_maxlen);
+ ugd->ugd_actlen = size;
+#ifdef COMPAT_FREEBSD32
+ if (ugd32 != NULL)
+ update_ugd32(ugd, ugd32);
+#endif
+ if (ugd->ugd_data == NULL)
+ return (0); /* descriptor length only */
+
+ return (copyout(sc->sc_rdesc, ugd->ugd_data, size));
+
+ case USB_GET_DEVICEINFO:
+ return(hid_ioctl(
+ sc->sc_dev, USB_GET_DEVICEINFO, (uintptr_t)addr));
+ }
+
+ return (EINVAL);
+}
+
+static int
+u2f_poll(struct cdev *dev, int events, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int revents = 0;
+ bool start_intr = false;
+
+ if (sc == NULL)
+ return (POLLHUP);
+
+ if (events & (POLLOUT | POLLWRNORM) && (sc->sc_fflags & FWRITE))
+ revents |= events & (POLLOUT | POLLWRNORM);
+ if (events & (POLLIN | POLLRDNORM) && (sc->sc_fflags & FREAD)) {
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.data)
+ revents |= events & (POLLIN | POLLRDNORM);
+ else {
+ sc->sc_state.sel = true;
+ start_intr = true;
+ selrecord(td, &sc->sc_rsel);
+ }
+ mtx_unlock(&sc->sc_mtx);
+ if (start_intr)
+ hid_intr_start(sc->sc_dev);
+ }
+
+ return (revents);
+}
+
+static int
+u2f_kqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ switch(kn->kn_filter) {
+ case EVFILT_READ:
+ if (sc->sc_fflags & FREAD) {
+ kn->kn_fop = &u2f_filterops_read;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ return(EINVAL);
+ }
+ kn->kn_hook = sc;
+
+ knlist_add(&sc->sc_rsel.si_note, kn, 0);
+ return (0);
+}
+
+static int
+u2f_kqread(struct knote *kn, long hint)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+ int ret;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->dev->si_drv1 == NULL) {
+ kn->kn_flags |= EV_EOF;
+ ret = 1;
+ } else {
+ ret = sc->sc_state.data ? 1 : 0;
+ if (!sc->sc_state.data)
+ hid_intr_start(sc->sc_dev);
+ }
+
+ return (ret);
+}
+
+static void
+u2f_kqdetach(struct knote *kn)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+
+ knlist_remove(&sc->sc_rsel.si_note, kn, 0);
+}
+
+static void
+u2f_notify(struct u2f_softc *sc)
+{
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->sc_state.aslp) {
+ sc->sc_state.aslp = false;
+ DPRINTFN(5, "waking %p\n", &sc->sc_buf);
+ wakeup(&sc->sc_buf);
+ }
+ if (sc->sc_state.sel) {
+ sc->sc_state.sel = false;
+ selwakeuppri(&sc->sc_rsel, PZERO);
+ }
+ KNOTE_LOCKED(&sc->sc_rsel.si_note, 0);
+}
+
+static device_method_t u2f_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, u2f_probe),
+ DEVMETHOD(device_attach, u2f_attach),
+ DEVMETHOD(device_detach, u2f_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t u2f_driver = {
+#ifdef U2F_MAKE_UHID_ALIAS
+ "uhid",
+#else
+ "u2f",
+#endif
+ u2f_methods,
+ sizeof(struct u2f_softc)
+};
+
+DRIVER_MODULE(u2f, hidbus, u2f_driver, NULL, NULL);
+MODULE_DEPEND(u2f, hidbus, 1, 1, 1);
+MODULE_DEPEND(u2f, hid, 1, 1, 1);
+MODULE_VERSION(u2f, 1);
+HID_PNP_INFO(u2f_devs);
diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h
index c480900596f4..a623f810c101 100644
--- a/sys/dev/ice/ice_bitops.h
+++ b/sys/dev/ice/ice_bitops.h
@@ -198,7 +198,7 @@ static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size)
* ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise AND on two "source" bitmaps of the same size
@@ -237,7 +237,7 @@ ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
* ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise OR on two "source" bitmaps of the same size
diff --git a/sys/dev/ice/ice_lan_tx_rx.h b/sys/dev/ice/ice_lan_tx_rx.h
index 693e0ca5efc6..eedacdab0216 100644
--- a/sys/dev/ice/ice_lan_tx_rx.h
+++ b/sys/dev/ice/ice_lan_tx_rx.h
@@ -630,7 +630,7 @@ enum ice_rxdid {
ICE_RXDID_LAST = 63,
};
-/* Recceive Flex descriptor Dword Index */
+/* Receive Flex descriptor Dword Index */
enum ice_flex_word {
ICE_RX_FLEX_DWORD_0 = 0,
ICE_RX_FLEX_DWORD_1,
diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h
index 308b2bda2790..640bdf8fed7b 100644
--- a/sys/dev/ice/ice_lib.h
+++ b/sys/dev/ice/ice_lib.h
@@ -313,7 +313,7 @@ enum ice_dyn_idx_t {
ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
-/* By convenction ITR0 is used for RX, and ITR1 is used for TX */
+/* By convention ITR0 is used for RX, and ITR1 is used for TX */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
diff --git a/sys/dev/ice/ice_protocol_type.h b/sys/dev/ice/ice_protocol_type.h
index 300d61bfb5d9..b90c25e6c427 100644
--- a/sys/dev/ice/ice_protocol_type.h
+++ b/sys/dev/ice/ice_protocol_type.h
@@ -143,7 +143,7 @@ enum ice_prot_id {
ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_EAPOL_OF = 120,
- ICE_PROT_META_ID = 255, /* when offset == metaddata */
+ ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
diff --git a/sys/dev/iicbus/gpio/pcf8574.c b/sys/dev/iicbus/gpio/pcf8574.c
index ab6e2bc07d1f..bf60dec67557 100644
--- a/sys/dev/iicbus/gpio/pcf8574.c
+++ b/sys/dev/iicbus/gpio/pcf8574.c
@@ -142,12 +142,13 @@ pcf8574_attach(device_t dev)
(void)pcf8574_write(sc, 0xff);
sx_init(&sc->lock, "pcf8574");
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "Could not create busdev child\n");
sx_destroy(&sc->lock);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
@@ -158,9 +159,7 @@ pcf8574_detach(device_t dev)
sc = device_get_softc(dev);
- if (sc->busdev != NULL)
- gpiobus_detach_bus(sc->busdev);
-
+ gpiobus_detach_bus(dev);
sx_destroy(&sc->lock);
return (0);
}
diff --git a/sys/dev/iicbus/gpio/tca64xx.c b/sys/dev/iicbus/gpio/tca64xx.c
index cd011ae9be75..ab8fedd3f8fd 100644
--- a/sys/dev/iicbus/gpio/tca64xx.c
+++ b/sys/dev/iicbus/gpio/tca64xx.c
@@ -262,7 +262,7 @@ tca64xx_attach(device_t dev)
mtx_init(&sc->mtx, "tca64xx gpio", "gpio", MTX_DEF);
OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "Could not create busdev child\n");
return (ENXIO);
@@ -281,6 +281,7 @@ tca64xx_attach(device_t dev)
}
#endif
+ bus_attach_children(dev);
return (0);
}
@@ -291,9 +292,7 @@ tca64xx_detach(device_t dev)
sc = device_get_softc(dev);
- if (sc->busdev != NULL)
- gpiobus_detach_bus(sc->busdev);
-
+ gpiobus_detach_bus(dev);
mtx_destroy(&sc->mtx);
return (0);
diff --git a/sys/dev/iicbus/iichid.c b/sys/dev/iicbus/iichid.c
index 3f1d7a0cefba..fdb4816b8bd9 100644
--- a/sys/dev/iicbus/iichid.c
+++ b/sys/dev/iicbus/iichid.c
@@ -861,7 +861,8 @@ iichid_intr_start(device_t dev, device_t child __unused)
sc = device_get_softc(dev);
DPRINTF(sc, "iichid device open\n");
- iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
+ if (!sc->open)
+ iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
return (0);
}
diff --git a/sys/dev/isci/scil/intel_sata.h b/sys/dev/isci/scil/intel_sata.h
index 4cf4adf03e07..fdad5be9b083 100644
--- a/sys/dev/isci/scil/intel_sata.h
+++ b/sys/dev/isci/scil/intel_sata.h
@@ -61,7 +61,7 @@
*
* @brief This file defines all of the SATA releated constants, enumerations,
* and types. Please note that this file does not necessarily contain
- * an exhaustive list of all contants and commands.
+ * an exhaustive list of all constants and commands.
*/
/**
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 959afa79e7da..73c0fd1ab16f 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -45,7 +45,7 @@
/************************************************************************
* Driver version
************************************************************************/
-static const char ixgbe_driver_version[] = "4.0.1-k";
+static const char ixgbe_driver_version[] = "5.0.1-k";
/************************************************************************
* PCI Device ID Table
@@ -144,6 +144,16 @@ static const pci_vendor_info_t ixgbe_vendor_info_array[] =
"Intel(R) X540-T2 (Bypass)"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
"Intel(R) X520 82599 (Bypass)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE,
+ "Intel(R) E610 (Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP,
+ "Intel(R) E610 (SFP)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T,
+ "Intel(R) E610 (2.5 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T,
+ "Intel(R) E610 (10 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII,
+ "Intel(R) E610 (SGMII)"),
/* required last entry */
PVID_END
};
@@ -253,6 +263,10 @@ static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
static void ixgbe_handle_msf(void *);
static void ixgbe_handle_mod(void *);
static void ixgbe_handle_phy(void *);
+static void ixgbe_handle_fw_event(void *);
+
+static int ixgbe_enable_lse(struct ixgbe_softc *sc);
+static int ixgbe_disable_lse(struct ixgbe_softc *sc);
/************************************************************************
* FreeBSD Device Interface Entry Points
@@ -621,6 +635,7 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
table_size = 512;
break;
default:
@@ -902,6 +917,32 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx)
} /* ixgbe_initialize_transmit_units */
+static int
+ixgbe_check_fw_api_version(struct ixgbe_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ device_printf(sc->dev,
+ "The driver for the device stopped because the NVM "
+ "image is newer than expected. You must install the "
+ "most recent version of the network driver.\n");
+ return (EOPNOTSUPP);
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > (IXGBE_FW_API_VER_MINOR + 2)) {
+ device_printf(sc->dev,
+ "The driver for the device detected a newer version of "
+ "the NVM image than expected. Please install the most "
+ "recent version of the network driver.\n");
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - 2) {
+ device_printf(sc->dev,
+ "The driver for the device detected an older version "
+ "of the NVM image than expected. "
+ "Please update the NVM image.\n");
+ }
+ return (0);
+}
+
/************************************************************************
* ixgbe_register
************************************************************************/
@@ -970,6 +1011,9 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
goto err_pci;
}
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_init_aci(hw);
+
if (hw->mac.ops.fw_recovery_mode &&
hw->mac.ops.fw_recovery_mode(hw)) {
device_printf(dev,
@@ -1058,6 +1102,12 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
break;
}
+ /* Check the FW API version */
+ if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
+ error = EIO;
+ goto err_pci;
+ }
+
/* Most of the iflib initialization... */
iflib_set_mac(ctx, hw->mac.addr);
@@ -1111,6 +1161,9 @@ err_pci:
IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
ixgbe_free_pci_resources(ctx);
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_shutdown_aci(hw);
+
return (error);
} /* ixgbe_if_attach_pre */
@@ -1358,6 +1411,10 @@ ixgbe_add_media_types(if_ctx_t ctx)
/* Media types with matching FreeBSD media defines */
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_5000BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
@@ -1459,6 +1516,7 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
}
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
return (true);
return (false);
@@ -1525,6 +1583,15 @@ ixgbe_config_link(if_ctx_t ctx)
IXGBE_LINK_SPEED_5GB_FULL);
}
+ if (hw->mac.type == ixgbe_mac_E610) {
+ hw->phy.ops.init(hw);
+ err = ixgbe_enable_lse(sc);
+ if (err)
+ device_printf(sc->dev,
+ "Failed to enable Link Status Event, "
+ "error: %d", err);
+ }
+
if (hw->mac.ops.setup_link)
err = hw->mac.ops.setup_link(hw, autoneg,
sc->link_up);
@@ -2158,14 +2225,15 @@ get_parent_info:
ixgbe_set_pci_config_data_generic(hw, link);
display:
- device_printf(dev, "PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
+ device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
+ ((hw->bus.speed == ixgbe_bus_speed_16000) ? "16.0GT/s" :
+ (hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
"Unknown"),
- ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
+ ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
"Unknown"));
if (bus_info_valid) {
@@ -2372,14 +2440,17 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
layer = sc->phy_layer;
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
- layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
+ if (layer & IXGBE_PHYSICAL_LAYERS_BASE_T_ALL)
switch (sc->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
+ break;
case IXGBE_LINK_SPEED_1GB_FULL:
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
break;
@@ -2390,15 +2461,6 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_10_T | IFM_FDX;
break;
}
- if (hw->mac.type == ixgbe_mac_X550)
- switch (sc->link_speed) {
- case IXGBE_LINK_SPEED_5GB_FULL:
- ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
- break;
- case IXGBE_LINK_SPEED_2_5GB_FULL:
- ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
- break;
- }
if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
switch (sc->link_speed) {
@@ -2676,6 +2738,11 @@ ixgbe_msix_link(void *arg)
sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
}
+ if (eicr & IXGBE_EICR_FW_EVENT) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FW_EVENT);
+ sc->task_requests |= IXGBE_REQUEST_TASK_FWEVENT;
+ }
+
if (sc->hw.mac.type != ixgbe_mac_82598EB) {
if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
(eicr & IXGBE_EICR_FLOW_DIR)) {
@@ -2734,11 +2801,16 @@ ixgbe_msix_link(void *arg)
/* Check for VF message */
if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
- (eicr & IXGBE_EICR_MAILBOX))
+ (eicr & IXGBE_EICR_MAILBOX)) {
sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
+ }
}
- if (ixgbe_is_sfp(hw)) {
+ /*
+ * On E610, the firmware handles PHY configuration, so
+ * there is no need to perform any SFP-specific tasks.
+ */
+ if (hw->mac.type != ixgbe_mac_E610 && ixgbe_is_sfp(hw)) {
/* Pluggable optics-related interrupt */
if (hw->mac.type >= ixgbe_mac_X540)
eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
@@ -2985,7 +3057,13 @@ ixgbe_if_detach(if_ctx_t ctx)
callout_drain(&sc->fw_mode_timer);
+ if (sc->hw.mac.type == ixgbe_mac_E610) {
+ ixgbe_disable_lse(sc);
+ ixgbe_shutdown_aci(&sc->hw);
+ }
+
ixgbe_free_pci_resources(ctx);
+
free(sc->mta, M_IXGBE);
return (0);
@@ -3404,6 +3482,7 @@ ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (type == -1) { /* MISC IVAR */
index = (entry & 1) * 8;
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
@@ -3826,6 +3905,96 @@ ixgbe_handle_phy(void *context)
} /* ixgbe_handle_phy */
/************************************************************************
+ * ixgbe_enable_lse - enable link status events
+ *
+ * Sets mask and enables link status events
+ ************************************************************************/
+s32 ixgbe_enable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
+
+ error = ixgbe_configure_lse(&sc->hw, TRUE, mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = mask;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_enable_lse */
+
+/************************************************************************
+ * ixgbe_disable_lse - disable link status events
+ ************************************************************************/
+s32 ixgbe_disable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ error = ixgbe_configure_lse(&sc->hw, false, sc->lse_mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = 0;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_disable_lse */
+
+/************************************************************************
+ * ixgbe_handle_fw_event - Tasklet for MSI-X Link Status Event interrupts
+ ************************************************************************/
+static void
+ixgbe_handle_fw_event(void *context)
+{
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_aci_event event;
+ bool pending = false;
+ s32 error;
+
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = malloc(event.buf_len, M_IXGBE, M_ZERO | M_NOWAIT);
+ if (!event.msg_buf) {
+ device_printf(sc->dev, "Can not allocate buffer for "
+ "event message\n");
+ return;
+ }
+
+ do {
+ error = ixgbe_aci_get_event(hw, &event, &pending);
+ if (error) {
+ device_printf(sc->dev, "Error getting event from "
+ "FW:%d\n", error);
+ break;
+ }
+
+ switch (le16toh(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
+ break;
+
+ case ixgbe_aci_opc_temp_tca_event:
+ if (hw->adapter_stopped == FALSE)
+ ixgbe_if_stop(ctx);
+ device_printf(sc->dev,
+ "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ device_printf(sc->dev, "System shutdown required!\n");
+ break;
+
+ default:
+ device_printf(sc->dev,
+ "Unknown FW event captured, opcode=0x%04X\n",
+ le16toh(event.desc.opcode));
+ break;
+ }
+ } while (pending);
+
+ free(event.msg_buf, M_IXGBE);
+} /* ixgbe_handle_fw_event */
+
+/************************************************************************
* ixgbe_if_stop - Stop the hardware
*
* Disables all traffic on the adapter by issuing a
@@ -3899,6 +4068,8 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
}
/* Handle task requests from msix_link() */
+ if (sc->task_requests & IXGBE_REQUEST_TASK_FWEVENT)
+ ixgbe_handle_fw_event(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
ixgbe_handle_mod(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
@@ -3986,6 +4157,9 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
break;
+ case ixgbe_mac_E610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ break;
default:
break;
}
@@ -4008,6 +4182,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
/* Don't autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
mask &= ~IXGBE_EIMS_LSC;
+ mask &= ~IXGBE_EIMS_FW_EVENT;
if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
mask &= ~IXGBE_EIMS_MAILBOX;
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
@@ -4026,7 +4201,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
} /* ixgbe_if_enable_intr */
/************************************************************************
- * ixgbe_disable_intr
+ * ixgbe_if_disable_intr
************************************************************************/
static void
ixgbe_if_disable_intr(if_ctx_t ctx)
@@ -4176,8 +4351,9 @@ ixgbe_intr(void *arg)
/* External PHY interrupt */
if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
- (eicr & IXGBE_EICR_GPI_SDP0_X540))
+ (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
+ }
return (FILTER_SCHEDULE_THREAD);
} /* ixgbe_intr */
@@ -4219,7 +4395,7 @@ ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
int error, fc;
sc = (struct ixgbe_softc *)arg1;
- fc = sc->hw.fc.current_mode;
+ fc = sc->hw.fc.requested_mode;
error = sysctl_handle_int(oidp, &fc, 0, req);
if ((error) || (req->newptr == NULL))
@@ -4248,12 +4424,10 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
case ixgbe_fc_rx_pause:
case ixgbe_fc_tx_pause:
case ixgbe_fc_full:
- sc->hw.fc.requested_mode = fc;
if (sc->num_rx_queues > 1)
ixgbe_disable_rx_drop(sc);
break;
case ixgbe_fc_none:
- sc->hw.fc.requested_mode = ixgbe_fc_none;
if (sc->num_rx_queues > 1)
ixgbe_enable_rx_drop(sc);
break;
@@ -4261,6 +4435,8 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
return (EINVAL);
}
+ sc->hw.fc.requested_mode = fc;
+
/* Don't autoneg if forcing a value */
sc->hw.fc.disable_fc_autoneg = true;
ixgbe_fc_enable(&sc->hw);
@@ -4978,6 +5154,9 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
break;
+ case ixgbe_mac_E610:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
+ break;
default:
break;
}
diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c
index 54b2c8c1dd68..8a1c1aae041d 100644
--- a/sys/dev/ixgbe/if_ixv.c
+++ b/sys/dev/ixgbe/if_ixv.c
@@ -68,6 +68,8 @@ static const pci_vendor_info_t ixv_vendor_info_array[] =
"Intel(R) X552 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF,
"Intel(R) X553 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_VF,
+ "Intel(R) E610 Virtual Function"),
/* required last entry */
PVID_END
};
@@ -1020,6 +1022,9 @@ ixv_identify_hardware(if_ctx_t ctx)
case IXGBE_DEV_ID_X550EM_A_VF:
hw->mac.type = ixgbe_mac_X550EM_a_vf;
break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ break;
default:
device_printf(dev, "unknown mac type\n");
hw->mac.type = ixgbe_mac_unknown;
@@ -1955,6 +1960,7 @@ ixv_init_device_features(struct ixgbe_softc *sc)
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
sc->feat_cap |= IXGBE_FEATURE_RSS;
break;
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index 341d4ebfcebc..844064bf8543 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -86,6 +86,7 @@
#include "ixgbe_phy.h"
#include "ixgbe_vf.h"
#include "ixgbe_features.h"
+#include "ixgbe_e610.h"
/* Tunables */
@@ -195,6 +196,15 @@
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
+/* All BASE-T Physical layers */
+#define IXGBE_PHYSICAL_LAYERS_BASE_T_ALL \
+ (IXGBE_PHYSICAL_LAYER_10GBASE_T |\
+ IXGBE_PHYSICAL_LAYER_5000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_2500BASE_T |\
+ IXGBE_PHYSICAL_LAYER_1000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_100BASE_TX |\
+ IXGBE_PHYSICAL_LAYER_10BASE_T)
+
#define IXGBE_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | \
IFCAP_LRO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | \
@@ -464,6 +474,7 @@ struct ixgbe_softc {
/* Feature capable/enabled flags. See ixgbe_features.h */
u32 feat_cap;
u32 feat_en;
+ u16 lse_mask;
};
/* Precision Time Sync (IEEE 1588) defines */
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index 4c50f10ed92e..f11f52a646e4 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -112,11 +112,15 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
case ixgbe_mac_X550EM_a:
status = ixgbe_init_ops_X550EM_a(hw);
break;
+ case ixgbe_mac_E610:
+ status = ixgbe_init_ops_E610(hw);
+ break;
case ixgbe_mac_82599_vf:
case ixgbe_mac_X540_vf:
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
status = ixgbe_init_ops_vf(hw);
break;
default:
@@ -240,6 +244,18 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
hw->mac.type = ixgbe_mac_X550EM_a_vf;
hw->mvals = ixgbe_mvals_X550EM_a;
break;
+ case IXGBE_DEV_ID_E610_BACKPLANE:
+ case IXGBE_DEV_ID_E610_SFP:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
+ case IXGBE_DEV_ID_E610_SGMII:
+ hw->mac.type = ixgbe_mac_E610;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index b81510dacb95..2b4cec8d110e 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -48,6 +48,7 @@ extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index df7ab90e72ab..bff022585a03 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -178,6 +178,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_SFP_N:
case IXGBE_DEV_ID_X550EM_A_QSFP:
case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -210,6 +211,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -616,7 +619,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550) {
if (hw->phy.id == 0)
ixgbe_identify_phy(hw);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
@@ -1037,6 +1041,9 @@ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
case IXGBE_PCI_LINK_SPEED_8000:
hw->bus.speed = ixgbe_bus_speed_8000;
break;
+ case IXGBE_PCI_LINK_SPEED_16000:
+ hw->bus.speed = ixgbe_bus_speed_16000;
+ break;
default:
hw->bus.speed = ixgbe_bus_speed_unknown;
break;
@@ -1059,7 +1066,9 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_bus_info_generic");
/* Get the negotiated link width and speed from PCI config space */
- link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+ link_status = IXGBE_READ_PCIE_WORD(hw, hw->mac.type == ixgbe_mac_E610 ?
+ IXGBE_PCI_LINK_STATUS_E610 :
+ IXGBE_PCI_LINK_STATUS);
ixgbe_set_pci_config_data_generic(hw, link_status);
@@ -1878,7 +1887,6 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_eeprom_semaphore");
-
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
@@ -3363,7 +3371,6 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
-
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
@@ -3692,6 +3699,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_E610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return msix_count;
}
@@ -4139,7 +4150,6 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
-
/**
* ixgbe_toggle_txdctl_generic - Toggle VF's queues
* @hw: pointer to hardware structure
@@ -4323,7 +4333,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_E610) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -5494,6 +5505,7 @@ void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
/* version of eeprom section */
if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
word = NVM_VER_INVALID;
@@ -5512,6 +5524,7 @@ void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
/* intel phy firmware version */
if (ixgbe_read_eeprom(hw, NVM_EEP_PHY_OFF_X540, &word))
word = NVM_VER_INVALID;
diff --git a/sys/dev/ixgbe/ixgbe_e610.c b/sys/dev/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..95c6dca416c6
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.c
@@ -0,0 +1,5567 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_api.h"
+
+/**
+ * ixgbe_init_aci - initialization routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the ACI lock.
+ */
+void ixgbe_init_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_init_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Destroy the ACI lock.
+ */
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_destroy_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
+ * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
+ * * - IXGBE_ERR_PARAM - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
+ * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static s32
+ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u32 hicr = 0, tmp_buf_size = 0, i = 0;
+ u32 *raw_desc = (u32 *)desc;
+ s32 status = IXGBE_SUCCESS;
+ bool valid_buf = false;
+ u32 *tmp_buf = NULL;
+ u16 opcode = 0;
+
+ do {
+ hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if (!(hicr & PF_HICR_EN)) {
+ status = IXGBE_ERR_ACI_DISABLED;
+ break;
+ }
+ if (hicr & PF_HICR_C) {
+ hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ status = IXGBE_ERR_ACI_BUSY;
+ break;
+ }
+ opcode = desc->opcode;
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+
+ if (buf)
+ desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
+
+ /* Check if buf and buf_size are proper params */
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
+ if ((buf && buf_size == 0) ||
+ (buf == NULL && buf_size)) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf == true) {
+ if (buf_size % 4 == 0)
+ tmp_buf_size = buf_size;
+ else
+ tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
+
+ tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
+ if (!tmp_buf)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ /* tmp_buf will be firstly filled with 0xFF and after
+ * that the content of buf will be written into it.
+ * This approach lets us use valid buf_size and
+ * prevents us from reading past buf area
+ * when buf_size mod 4 not equal to 0.
+ */
+ memset(tmp_buf, 0xFF, tmp_buf_size);
+ memcpy(tmp_buf, buf, buf_size);
+
+ if (tmp_buf_size > IXGBE_ACI_LG_BUF)
+ desc->flags |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
+
+ desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
+
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ IXGBE_WRITE_REG(hw, PF_HIBA(i),
+ IXGBE_LE32_TO_CPU(tmp_buf[i]));
+ }
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, PF_HIDA(i),
+ IXGBE_LE32_TO_CPU(raw_desc[i]));
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, PF_HICR, hicr);
+
+ /* Wait for sync Admin Command response */
+ for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+
+ /* Wait for async Admin Command response */
+ if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
+ i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+ }
+
+ /* Read sync Admin Command response */
+ if ((hicr & PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & PF_HICR_C) {
+ status = IXGBE_ERR_ACI_TIMEOUT;
+ break;
+ } else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != opcode &&
+ opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ if (desc->retval != IXGBE_ACI_RC_OK) {
+ hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf && (desc->flags &
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
+ tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
+ }
+ memcpy(buf, tmp_buf, buf_size);
+ }
+ } while (0);
+
+ if (tmp_buf)
+ ixgbe_free(hw, tmp_buf);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ struct ixgbe_aci_desc desc_cpy;
+ enum ixgbe_aci_err last_status;
+ bool is_cmd_for_retry;
+ u8 *buf_cpy = NULL;
+ s32 status;
+ u16 opcode;
+ u8 idx = 0;
+
+ opcode = IXGBE_LE16_TO_CPU(desc->opcode);
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ memset(&desc_cpy, 0, sizeof(desc_cpy));
+
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
+ if (!buf_cpy)
+ return IXGBE_ERR_OUT_OF_MEM;
+ }
+ memcpy(&desc_cpy, desc, sizeof(desc_cpy));
+ }
+
+ do {
+ ixgbe_acquire_lock(&hw->aci.lock);
+ status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ ixgbe_release_lock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
+ (last_status != IXGBE_ACI_RC_EBUSY && status != IXGBE_ERR_ACI_ERROR))
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ memcpy(desc, &desc_cpy, sizeof(desc_cpy));
+
+ msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
+
+ if (buf_cpy)
+ ixgbe_free(hw, buf_cpy);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask;
+ u32 fwsts;
+
+ ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+
+ /* Check state of Event Pending (EP) bit */
+ fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_acquire_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (status)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending) {
+ *pending = ixgbe_aci_check_event_pending(hw);
+ }
+
+aci_get_event_exit:
+ ixgbe_release_lock(&hw->aci.lock);
+
+ return status;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+ /* zero out the desc */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = IXGBE_CPU_TO_LE16(opcode);
+ desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_get_fw_ver - get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_ver *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (!status) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_driver_ver - send the driver version to firmware
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ *
+ * Send the driver version to the firmware
+ * using the ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * Returns IXGBE_ERR_PARAM, if dv is NULL.
+ */
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
+{
+ struct ixgbe_aci_cmd_driver_ver *cmd;
+ struct ixgbe_aci_desc desc;
+ u16 len;
+
+ cmd = &desc.params.driver_ver;
+
+ if (!dv)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->major_ver = dv->major_ver;
+ cmd->minor_ver = dv->minor_ver;
+ cmd->build_ver = dv->build_ver;
+ cmd->subbuild_ver = dv->subbuild_ver;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
+ len++;
+
+ return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u8 sdp_number,
+ u32 *timeout)
+{
+ struct ixgbe_aci_cmd_req_res *cmd_resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
+ cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+ cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
+ *timeout = 0;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ *timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ u8 sdp_number)
+{
+ struct ixgbe_aci_cmd_req_res *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout = 0;
+ s32 status;
+
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ goto ixgbe_acquire_res_exit;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (status && retry_timeout && res_timeout) {
+ msec_delay(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ /* lock free, but no work to do */
+ break;
+
+ if (!status)
+ /* lock acquired */
+ break;
+ }
+
+ixgbe_acquire_res_exit:
+ return status;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+ u32 total_delay = 0;
+ s32 status;
+
+ status = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
+ (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
+ msec_delay(1);
+ status = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_common_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
+ struct ixgbe_aci_cmd_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
+ u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
+ u32 number = IXGBE_LE32_TO_CPU(elem->number);
+ u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
+ bool found = true;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case IXGBE_ACI_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case IXGBE_ACI_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_NVM_VER:
+ break;
+ case IXGBE_ACI_CAPS_NVM_MGMT:
+ caps->sec_rev_disabled =
+ (number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
+ true : false;
+ caps->update_disabled =
+ (number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
+ true : false;
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ caps->netlist_auth =
+ (number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
+ true : false;
+ break;
+ case IXGBE_ACI_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
+ IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ case IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE:
+ caps->orom_recovery_update = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
+ caps->next_cluster_id_support = (number == 1);
+ DEBUGOUT2("%s: next_cluster_id_support = %d\n",
+ prefix, caps->next_cluster_id_support);
+ break;
+ default:
+ /* Not one of the recognized common capabilities */
+ found = false;
+ }
+
+ return found;
+}
+
+/**
+ * ixgbe_hweight8 - count set bits among the 8 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 8 lowest bits in the provided value.
+ */
+static u8 ixgbe_hweight8(u32 w)
+{
+ u8 hweight = 0, i;
+
+ for (i = 0; i < 8; i++)
+ if (w & (1 << i))
+ hweight++;
+
+ return hweight;
+}
+
+/**
+ * ixgbe_hweight32 - count set bits among the 32 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 32 lowest bits in the
+ * provided value.
+ */
+static u8 ixgbe_hweight32(u32 w)
+{
+ u32 bitMask = 0x1, i;
+ u8 bitCnt = 0;
+
+ for (i = 0; i < 32; i++)
+ {
+ if (w & bitMask)
+ bitCnt++;
+
+ bitMask = bitMask << 0x1;
+ }
+
+ return bitCnt;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_funcs = ixgbe_hweight32(number);
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vfs_exposed = number;
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vsi_allocd_to_host = number;
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_flow_director_fltr = number;
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ bool found;
+
+ found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
+ &cap_resp[i], "dev caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+ u8 funcs;
+
+#define IXGBE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return max / funcs;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ ixgbe_parse_common_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
+ */
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct ixgbe_aci_cmd_list_caps *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = IXGBE_LE32_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 status, cap_count = 0;
+ u8 *cbuf = NULL;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!status)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count = 0;
+ u8 *cbuf = NULL;
+ s32 status;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if(!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!status)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (status)
+ return status;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct ixgbe_aci_desc desc;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ cmd = &desc.params.disable_rxen;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = (u8)hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
+ status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+
+ if (status == IXGBE_SUCCESS &&
+ report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
+ hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities match PHY
+ * configuration
+ *
+ * Return: true if PHY capabilities match PHY configuration.
+ */
+bool
+ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
+{
+ u8 caps_mask, cfg_mask;
+
+ if (!phy_caps || !phy_cfg)
+ return false;
+
+ /* These bits are not common between capabilities and configuration.
+ * Do not use them to determine equality.
+ */
+ caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
+ IXGBE_ACI_PHY_EN_MOD_QUAL);
+ cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
+ ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+ return false;
+
+ return true;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+ }
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+
+ if (!status)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_topo *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ (IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT <<
+ IXGBE_ACI_LINK_TOPO_NODE_CTX_S);
+
+ /* set node type */
+ cmd->addr.topo_params.node_type_ctx |=
+ (IXGBE_ACI_LINK_TOPO_NODE_TYPE_M &
+ IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li = &hw->link.link_info;
+
+ status = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (status)
+ return status;
+
+ if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (status == IXGBE_SUCCESS)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ ixgbe_free(hw, pcaps);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ if (hw->link.get_link_info) {
+ status = ixgbe_update_link_info(hw);
+ if (status) {
+ return status;
+ }
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct ixgbe_aci_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* save off old link status information */
+ *li_old = *li;
+
+ /* update current link status information */
+ li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
+ li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
+ li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* update fc info */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* save link status information */
+ if (link)
+ *link = *li;
+
+ /* flag cleared so calling functions don't call AQ again */
+ hw->link.get_link_info = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: bool value deciding if lse should be enabled nor disabled
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ s32 rc;
+
+ rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (rc) {
+ return rc;
+ }
+
+ /* Enabling link status events generation by fw */
+ rc = ixgbe_aci_get_link_info(hw, activate, NULL);
+ if (rc) {
+ return rc;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ if (node_handle)
+ *node_handle =
+ IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_find_netlist_node - find a node handle
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Find and return the node handle for a given node type and part number in the
+ * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
+ * otherwise. If @node_handle provided, it would be set to found node handle.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_cmd_get_link_topo cmd;
+ u8 rec_node_part_number;
+ u16 rec_node_handle;
+ s32 status;
+ u8 idx;
+
+ for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.addr.topo_params.node_type_ctx =
+ (node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
+ cmd.addr.topo_params.index = idx;
+
+ status = ixgbe_aci_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ &rec_node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number) {
+ if (node_handle)
+ *node_handle = rec_node_handle;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ return IXGBE_ERR_NOT_SUPPORTED;
+}
+
+/**
+ * ixgbe_aci_read_i2c - read I2C register value
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ * bits [6:5] data offset size,
+ * bit [4] - I2C address type, bits [3:0] - data size
+ * to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ *
+ * Read the value of the I2C pin register using ACI command (0x06E2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 data_size;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ if (!data)
+ return IXGBE_ERR_PARAM;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status) {
+ struct ixgbe_aci_cmd_read_i2c_resp *resp;
+ u8 i;
+
+ resp = &desc.params.read_i2c_resp;
+ for (i = 0; i < data_size; i++) {
+ *data = resp->i2c_data[i];
+ data++;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_i2c - write a value to I2C register
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
+ * to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ *
+ * Write a value to the I2C pin register using ACI command (0x06E3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 i, data_size;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ /* data_size limited to 4 */
+ if (data_size > 4)
+ return IXGBE_ERR_PARAM;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ for (i = 0; i < data_size; i++) {
+ cmd->i2c_data[i] = *data;
+ data++;
+ }
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_gpio - set GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ *
+ * Set the GPIO pin state that is a part of the topology
+ * using ACI command (0x06EC).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+ cmd->gpio_val = value ? 1 : 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_gpio - get GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ *
+ * Get the value of a GPIO signal which is part of the topology
+ * using ACI command (0x06ED).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ *value = !!cmd->gpio_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ *
+ * Read/write SFF EEPROM using ACI command (0x06EE).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write)
+{
+ struct ixgbe_aci_cmd_sff_eeprom *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || (mem_addr & 0xff00))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
+ IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
+ ((page_bank_ctrl <<
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
+ cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
+ cmd->module_page = page;
+ if (write)
+ cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ return status;
+}
+
+/**
+ * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ *
+ * Program Topology Device NVM using ACI command (0x06F2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params)
+{
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.prog_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
+
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ * @start_address: byte offset in the topology device NVM
+ * @data: pointer to data buffer
+ * @data_size: number of bytes to be read from the topology device NVM
+ * Read Topology Device NVM (0x06F3)
+ *
+ * Read Topology of Device NVM using ACI command (0x06F3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size)
+{
+ struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || data_size == 0 ||
+ data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
+ return IXGBE_ERR_PARAM;
+
+ cmd = &desc.params.read_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
+
+ desc.datalen = IXGBE_CPU_TO_LE16(data_size);
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+ cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ memcpy(data, cmd->data_read, data_size);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return IXGBE_SUCCESS;
+
+ return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+ __le16 len;
+
+ /* read a length value from SR, so module_typeid is equal to 0 */
+ /* calculate offset where module size is placed from bytes to words */
+ /* set last command and read from SR values to true */
+ status = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_read_nvm_cfg - read an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature ID
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: pointer to count of elements read by FW
+ *
+ * Reads a single or multiple feature/field ID and data using ACI command
+ * (0x0704).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_read);
+
+ cmd->cmd_flags = cmd_flags;
+ cmd->id = IXGBE_CPU_TO_LE16(field_id);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+ if (!status && elem_count)
+ *elem_count = IXGBE_LE16_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_nvm_cfg - write an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: count of elements to be written
+ *
+ * Writes a single or multiple feature/field ID and data using ACI command
+ * (0x0705).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_write);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ cmd->count = IXGBE_CPU_TO_LE16(elem_count);
+ cmd->cmd_flags = cmd_flags;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
+ IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid Shadow Ram checksum");
+ status = IXGBE_ERR_NVM_CHECKSUM;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_recalculate_checksum - recalculate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Recalculate NVM PFA checksum using ACI command (0x0706).
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = LO_BYTE(cmd_flags);
+ cmd->offset_high = HI_BYTE(cmd_flags);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return status;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ s32 status;
+ u32 start;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
+ * NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ s32 status;
+
+ status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (status)
+ return status;
+
+ hdr_len = ROUND_UP(hdr_len, 32);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: structure to store NVM and OROM minsrev values
+ *
+ * Read the Minimum Security Revision TLV and extract
+ * the revision values from the flash image
+ * into a readable structure for processing.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+ u16 valid;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
+ 0, sizeof(data), &data,
+ true, false);
+
+ ixgbe_release_nvm(hw);
+
+ if (status)
+ return status;
+
+ valid = IXGBE_LE16_TO_CPU(data.validity);
+
+ /* Extract NVM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
+
+ minsrevs->nvm = minsrev_h << 16 | minsrev_l;
+ minsrevs->nvm_valid = true;
+ }
+
+ /* Extract the OROM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
+
+ minsrevs->orom = minsrev_h << 16 | minsrev_l;
+ minsrevs->orom_valid = true;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_update_nvm_minsrevs - Update minsrevs TLV data in flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: minimum security revision information
+ *
+ * Update the NVM or Option ROM minimum security revision fields in the PFA
+ * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid
+ * fields to determine what update is being requested. If the valid bit is not
+ * set for that module, then the associated minsrev will be left as is.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+
+ if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ /* Get current data */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, true, false);
+ if (status)
+ goto exit_release_res;
+
+ if (minsrevs->nvm_valid) {
+ data.nvm_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->nvm & 0xFFFF);
+ data.nvm_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->nvm >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_NVM_VALID);
+ }
+
+ if (minsrevs->orom_valid) {
+ data.orom_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->orom & 0xFFFF);
+ data.orom_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->orom >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_OROM_VALID);
+ }
+
+ /* Update flash data */
+ status = ixgbe_aci_update_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, false,
+ IXGBE_ACI_NVM_SPECIAL_UPDATE);
+ if (status)
+ goto exit_release_res;
+
+ /* Dump the Shadow RAM to the flash */
+ status = ixgbe_nvm_write_activate(hw, 0, NULL);
+
+exit_release_res:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (status)
+ return status;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ s32 status;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank,
+ E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (status) {
+ return status;
+ }
+
+ nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
+ nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (status) {
+ return status;
+ }
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (status) {
+ return status;
+ }
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ s32 status;
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (status)
+ return status;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (status)
+ return status;
+
+ /* sanity check that we have at least enough words to store the
+ * netlist ID block
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (status)
+ return status;
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = (u16 *)ixgbe_calloc(hw, IXGBE_NETLIST_ID_BLK_SIZE,
+ sizeof(*id_blk));
+ if (!id_blk)
+ return IXGBE_ERR_NO_SPACE;
+
+ /* Read out the entire Netlist ID Block at once. */
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
+ (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE * sizeof(u16));
+ if (status)
+ goto exit_error;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = IXGBE_LE16_TO_CPU(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+exit_error:
+ ixgbe_free(hw, id_blk);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
+ else
+ *pointer = value * 2;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * 4 * 1024;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_discover_flash_size - Discover the available flash size.
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (status == IXGBE_ERR_ACI_ERROR &&
+ hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+ status = IXGBE_SUCCESS;
+ max_size = offset;
+ } else if (!status) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
+ if (status) {
+ return status;
+ }
+
+ /* Check that the control word indicates validity */
+ if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
+ IXGBE_SR_CTRL_WORD_VALID) {
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+ if (status) {
+ return status;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_nvm - initializes NVM setting
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate NVM settings such as Shadow RAM size,
+ * max_timeout, and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat, status;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
+ flash->blank_nvm_mode = false;
+ } else {
+ /* Blank programming mode */
+ flash->blank_nvm_mode = true;
+ return IXGBE_ERR_NVM_BLANK_MODE;
+ }
+
+ status = ixgbe_discover_flash_size(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_determine_active_flash_banks(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (status) {
+ return status;
+ }
+
+ /* read the netlist version information */
+ status = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_operate - Clear the user data
+ * @hw: pointer to the HW struct
+ *
+ * Clear user data from NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u8 values;
+
+ u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
+ IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
+
+ status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
+ if (status)
+ return status;
+ if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
+ return IXGBE_ERR_ACI_ERROR;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_nvm - Sanitize NVM
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flag to the ACI command
+ * @values: values returned from the command
+ *
+ * Sanitize NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm_sanitization *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm_sanitization;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
+ cmd->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (values)
+ *values = cmd->values;
+
+ return status;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (status)
+ return status;
+
+ *data = IXGBE_LE16_TO_CPU(data_local);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
+ * taken before reading the buffer and later released.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2, i;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+
+ *words = bytes / 2;
+
+ for (i = 0; i < *words; i++)
+ data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns a status code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ s32 status;
+
+ *length = 0;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u))) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = MIN_T(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (status)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return status;
+}
+
+/**
+ * ixgbe_check_sr_access_params - verify params for Shadow RAM R/W operations.
+ * @hw: pointer to the HW structure
+ * @offset: offset in words from module start
+ * @words: number of words to access
+ *
+ * Check if all the parameters are valid
+ * before performing any Shadow RAM read/write operations.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_PARAM - NVM error: offset beyond SR limit or
+ * NVM error: tried to access more words then the set limit or
+ * NVM error: cannot spread over two sectors.
+ */
+static s32 ixgbe_check_sr_access_params(struct ixgbe_hw *hw, u32 offset,
+ u16 words)
+{
+ if ((offset + words) > hw->eeprom.word_size) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (words > IXGBE_SR_SECTOR_SIZE_IN_WORDS) {
+ /* We can access only up to 4KB (one sector),
+ * in one Admin Command write
+ */
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (((offset + (words - 1)) / IXGBE_SR_SECTOR_SIZE_IN_WORDS) !=
+ (offset / IXGBE_SR_SECTOR_SIZE_IN_WORDS)) {
+ /* A single access cannot spread over two sectors */
+ return IXGBE_ERR_PARAM;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_sr_word_aci - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data)
+{
+ __le16 data_local = IXGBE_CPU_TO_LE16(*data);
+ s32 status;
+
+ status = ixgbe_check_sr_access_params(hw, offset, 1);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD, &data_local,
+ false, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_sr_buf_aci - Writes Shadow RAM buf
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words,
+ const u16 *data)
+{
+ __le16 *data_local;
+ s32 status;
+ void *vmem;
+ u32 i;
+
+ vmem = ixgbe_calloc(hw, words, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data_local = (__le16 *)vmem;
+
+ for (i = 0; i < words; i++)
+ data_local[i] = IXGBE_CPU_TO_LE16(data[i]);
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ status = ixgbe_check_sr_access_params(hw, offset, words);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD * words,
+ data_local, false, 0);
+
+ ixgbe_free(hw, vmem);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write - write to alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be written
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be written
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure using ACI command (0x0900).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+ cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
+ cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_read - read from alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure using ACI command (0x0902).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
+ * is read.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ if (!reg_val0)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (status == IXGBE_SUCCESS) {
+ *reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
+
+ if (reg_val1)
+ *reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write_done - check if writing to alternate structure
+ * is done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed)
+{
+ struct ixgbe_aci_cmd_done_alt_write *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.done_alt_write;
+
+ if (!reset_needed)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
+ cmd->flags = bios_mode;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status)
+ *reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
+ IXGBE_ACI_RESP_RESET_NEEDED) != 0;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_clear - clear alternate structure
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_clear_port_alt_write);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_internal_data - get internal FW/HW data
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table ID within cluster
+ * @start: index of line in the block to read
+ * @buf: dump buffer
+ * @buf_size: dump buffer size
+ * @ret_buf_size: return buffer size (returned by FW)
+ * @ret_next_cluster: next cluster to read (returned by FW)
+ * @ret_next_table: next block to read (returned by FW)
+ * @ret_next_index: next index to read (returned by FW)
+ *
+ * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index)
+{
+ struct ixgbe_aci_cmd_debug_dump_internals *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.debug_dump;
+
+ if (buf_size == 0 || !buf)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_debug_dump_internals);
+
+ cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
+ cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
+ cmd->idx = IXGBE_CPU_TO_LE32(start);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (!status) {
+ if (ret_buf_size)
+ *ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
+ if (ret_next_cluster)
+ *ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
+ if (ret_next_table)
+ *ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
+ if (ret_next_index)
+ *ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
+ * @cmd: NVM access command structure
+ *
+ * Validates that an NVM access structure is request to read or write a valid
+ * register offset. First validates that the module and flags are correct, and
+ * then ensures that the register offset is one of the accepted registers.
+ *
+ * Return: 0 if the register access is valid, out of range error code otherwise.
+ */
+static s32
+ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
+{
+ u16 i;
+
+ switch (cmd->offset) {
+ case GL_HICR:
+ case GL_HICR_EN: /* Note, this register is read only */
+ case GL_FWSTS:
+ case GL_MNG_FWSM:
+ case GLNVM_GENS:
+ case GLNVM_FLA:
+ case GL_FWRESETCNT:
+ return 0;
+ default:
+ break;
+ }
+
+ for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIDA(i))
+ return 0;
+
+ for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIBA(i))
+ return 0;
+
+ /* All other register offsets are not valid */
+ return IXGBE_ERR_OUT_OF_RANGE;
+}
+
+/**
+ * ixgbe_nvm_access_read - Handle an NVM read request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: storage for the register value read
+ *
+ * Process an NVM access request to read a register.
+ *
+ * Return: 0 if the register read is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Always initialize the output data, even on failure */
+ memset(&data->regval, 0, cmd->data_size);
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
+
+ /* Read the register and store the contents in the data field */
+ data->regval = IXGBE_READ_REG(hw, cmd->offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_nvm_access_write - Handle an NVM write request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: NVM access data to write
+ *
+ * Process an NVM access request to write a register.
+ *
+ * Return: 0 if the register write is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ /* Reject requests to write to read-only registers */
+ switch (cmd->offset) {
+ case GL_HICR_EN:
+ return IXGBE_ERR_OUT_OF_RANGE;
+ default:
+ break;
+ }
+
+ DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
+ cmd->offset, data->regval);
+
+ /* Write the data field to the specified register */
+ IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_nvm_access - Handle an NVM access request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command info
+ * @data: pointer to read or return data
+ *
+ * Process an NVM access request. Read the command structure information and
+ * determine if it is valid. If not, report an error indicating the command
+ * was invalid.
+ *
+ * For valid commands, perform the necessary function, copying the data into
+ * the provided data buffer.
+ *
+ * Return: 0 if the nvm access request is valid and successful,
+ * error code otherwise.
+ */
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ switch (cmd->command) {
+ case IXGBE_NVM_CMD_READ:
+ return ixgbe_nvm_access_read(hw, cmd, data);
+ case IXGBE_NVM_CMD_WRITE:
+ return ixgbe_nvm_access_write(hw, cmd, data);
+ default:
+ return IXGBE_ERR_PARAM;
+ }
+}
+
+/**
+ * ixgbe_aci_set_health_status_config - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF using ACI command (0xFF20). The supported event types are: PF-specific,
+ * all PFs, and global.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source)
+{
+ struct ixgbe_aci_cmd_set_health_status_config *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_health_status_config;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_set_health_status_config);
+
+ cmd->event_source = event_source;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for E610.
+ * Does not touch the hardware.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ ret_val = ixgbe_init_ops_X550(hw);
+
+ /* MAC */
+ mac->ops.reset_hw = ixgbe_reset_hw_E610;
+ mac->ops.start_hw = ixgbe_start_hw_E610;
+ mac->ops.get_media_type = ixgbe_get_media_type_E610;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_E610;
+ mac->ops.get_san_mac_addr = NULL;
+ mac->ops.set_san_mac_addr = NULL;
+ mac->ops.get_wwn_prefix = NULL;
+ mac->ops.setup_link = ixgbe_setup_link_E610;
+ mac->ops.check_link = ixgbe_check_link_E610;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
+ mac->ops.setup_fc = ixgbe_setup_fc_E610;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
+ mac->ops.disable_rx = ixgbe_disable_rx_E610;
+ mac->ops.setup_eee = ixgbe_setup_eee_E610;
+ mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
+ mac->ops.fw_rollback_mode = ixgbe_fw_rollback_mode_E610;
+ mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
+ mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
+ mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
+ mac->ops.get_thermal_sensor_data = NULL;
+ mac->ops.init_thermal_sensor_thresh = NULL;
+
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_E610;
+ phy->ops.identify = ixgbe_identify_phy_E610;
+ phy->eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ phy->eee_speeds_advertised = phy->eee_speeds_supported;
+
+ /* Additional ops overrides for e610 to go here */
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
+ eeprom->ops.read = ixgbe_read_ee_aci_E610;
+ eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
+ eeprom->ops.write = ixgbe_write_ee_aci_E610;
+ eeprom->ops.write_buffer = ixgbe_write_ee_aci_buffer_E610;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_E610;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
+ eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_reset_hw_E610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_reset_hw_E610");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ status = hw->phy.ops.init(hw);
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Reset polling failed to complete.\n");
+ }
+ msec_delay(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Gets firmware version and if API version matches it
+ * starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ ret_val = hw->mac.ops.get_fw_version(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ixgbe_start_hw_gen2(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_media_type_E610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_mask = 0;
+ s32 rc;
+ u8 i;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* Check if there is some bit set in phy_type_high */
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_high & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = phy_mask;
+ hw->link.link_info.phy_type_low = 0;
+ break;
+ }
+ phy_mask = 0;
+ }
+
+ /* If nothing found in phy_type_high search in phy_type_low */
+ if (phy_mask == 0) {
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_low & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = 0;
+ hw->link.link_info.phy_type_low = phy_mask;
+ break;
+ }
+ }
+ }
+
+ }
+
+ /* Based on link status or search above try to discover media type */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ *
+ * Return: the exit code of the operation.
+ **/
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type;
+ s32 rc;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return IXGBE_PHYSICAL_LAYER_UNKNOWN;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
+ if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_setup_link_E610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_E610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ s32 rc;
+ u32 i;
+
+ if (!speed || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command. */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && *link_up == false) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msec_delay(100);
+ hw->link.get_link_info = true;
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW. */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_link_capabilities_E610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return IXGBE_ERR_PARAM;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
+ s32 status = IXGBE_SUCCESS;
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ switch (req_mode) {
+ case ixgbe_fc_auto:
+ {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps) {
+ status = IXGBE_ERR_OUT_OF_MEM;
+ goto out;
+ }
+
+ /* Query the value of FC that both the NIC and the attached
+ * media can do. */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
+ if (status)
+ goto out;
+
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+
+ break;
+ }
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* clear the old pause settings */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg->caps |= pause_mask;
+
+out:
+ if (pcaps)
+ ixgbe_free(hw, pcaps);
+ return status;
+}
+
+/**
+ * ixgbe_setup_fc_E610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
+ s32 status;
+
+ /* Get the current PHY config */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (status)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (status)
+ return status;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (status)
+ return status;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_fc_autoneg_E610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /* Get current link status.
+ * Current FC mode will be stored in the hw context. */
+ status = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (status) {
+ goto out;
+ }
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @minor: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Send driver version number to Firmware using ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * IXGBE_SUCCESS - OK
+ * IXGBE_ERR_PARAM - incorrect parameters were given
+ * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
+ * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
+ * IXGBE_ERR_OUT_OF_MEM - ran out of memory
+ */
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
+ u8 sub, u16 len, const char *driver_ver)
+{
+ size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
+ struct ixgbe_driver_ver dv;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
+
+ if (!len || !driver_ver)
+ return IXGBE_ERR_PARAM;
+
+ dv.major_ver = maj;
+ dv.minor_ver = minor;
+ dv.build_ver = build;
+ dv.subbuild_ver = sub;
+
+ memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
+ memcpy(dv.driver_string, driver_ver, limited_len);
+
+ return ixgbe_aci_send_driver_ver(hw, &dv);
+}
+
+/**
+ * ixgbe_disable_rx_E610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl;
+
+ DEBUGFUNC("ixgbe_disable_rx_E610");
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ u32 pfdtxgswc;
+ s32 status;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ status = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (status) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+ }
+}
+
+/**
+ * ixgbe_setup_eee_E610 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enables/disable EEE based on enable_eee flag.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ u16 eee_cap = 0;
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (enable_eee) {
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_KX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_KR;
+ if (phy_caps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10BASE_T;
+ }
+
+ /* Set EEE capability for particular PHY types */
+ phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM recovery mode by
+ * reading the value of the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_E610 - Check FW NVM Rollback
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM Rollback mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in Rollback mode, otherwise false.
+ */
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_ROLLBACK_M);
+}
+
+/**
+ * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks Thermal Sensor Autonomous Mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in TSAM, otherwise false.
+ */
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
+
+ return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
+}
+
+/**
+ * ixgbe_init_phy_ops_E610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ phy->ops.identify_sfp = ixgbe_identify_module_E610;
+ phy->ops.read_reg = NULL; /* PHY reg access is not required */
+ phy->ops.write_reg = NULL;
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ phy->ops.setup_link = ixgbe_setup_phy_link_E610;
+ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
+ phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
+ phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
+ phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation */
+ phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
+ else
+ phy->ops.set_phy_power = NULL;
+ phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
+ phy->ops.handle_lasi = NULL; /* no implementation for E610 */
+ phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
+
+ /* TODO: Set functions pointers based on device ID */
+
+ /* Identify the PHY */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* TODO: Set functions pointers based on PHY type */
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_E610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 rc;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+
+ if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* 2.5 and 5 Gbps link speeds must be excluded from the
+ * auto-negotiation set used during driver initialization due to
+ * compatibility issues with certain switches. Those issues do not
+ * exist in case of E610 2.5G SKU device (0x57b1).
+ */
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_module_E610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ s32 rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ goto err;
+
+ media_available =
+ (hw->link.link_info.link_info &
+ IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by ixgbe_update_link_info() */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ rc = IXGBE_SUCCESS;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ rc = IXGBE_ERR_SFP_NOT_PRESENT;
+ }
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ s32 rc;
+
+ rc = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (rc) {
+ goto err;
+ }
+
+ /* If media is not available get default config */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (rc) {
+ goto err;
+ }
+
+ sup_phy_type_low = pcaps.phy_type_low;
+ sup_phy_type_high = pcaps.phy_type_high;
+
+ /* Get Active configuration to avoid unintended changes */
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (rc) {
+ goto err;
+ }
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ /* Set default PHY types for a given speed */
+ pcfg.phy_type_low = 0;
+ pcfg.phy_type_high = 0;
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types */
+ pcfg.phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_high &= sup_phy_type_high;
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ }
+
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ *
+ * Determines PHY FW version based on response to Get PHY Capabilities
+ * admin command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 status;
+
+ if (!firmware_version)
+ return IXGBE_ERR_PARAM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (status)
+ return status;
+
+ /* TODO: determine which bytes of the 8-byte phy_fw_ver
+ * field should be written to the 2-byte firmware_version
+ * output argument. */
+ memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @sff8472_data: value read
+ *
+ * Performs byte read operation from SFP module's SFF-8472 data over I2C.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ sff8472_data, 1, false);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation from SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ eeprom_data, 1, false);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ &eeprom_data, 1, true);
+}
+
+/**
+ * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ *
+ * Get the link status and check if the PHY temperature alarm detected.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+ "PHY Temperature Alarm detected");
+ status = IXGBE_ERR_OVERTEMP;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_set_phy_power_E610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on) {
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ } else {
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+ }
+
+ /* PHY is already in requested power mode */
+ if (phy_caps.caps == phy_cfg.caps)
+ return IXGBE_SUCCESS;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_enter_lplu_E610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
+ GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_E610 - Write EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_word_aci(hw, (u32)offset, &data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_buffer_E610 - Write EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_buf_aci(hw, (u32)offset, words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Calculate SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ * If the EEPROM params are not initialized, the function
+ * initializes them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the negative error code on error, or the 16-bit checksum
+ */
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ bool nvm_acquired = false;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 checksum = 0;
+ u16 vpd_module;
+ void *vmem;
+ s32 status;
+ u16 *data;
+ u16 i;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data = (u16 *)vmem;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+ nvm_acquired = true;
+
+ /* read pointer to VPD area */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* read pointer to PCIe Alt Auto-load module */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->eeprom.word_size; i++) {
+ /* Read SR page */
+ if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
+
+ status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
+ if (status != IXGBE_SUCCESS)
+ goto ixgbe_calc_sr_checksum_exit;
+ }
+
+ /* Skip Checksum word */
+ if (i == E610_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i >= (u32)vpd_module &&
+ i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
+ continue;
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i >= (u32)pcie_alt_module &&
+ i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
+ continue;
+
+ checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
+
+ixgbe_calc_sr_checksum_exit:
+ if(nvm_acquired)
+ ixgbe_release_nvm(hw);
+ ixgbe_free(hw, vmem);
+
+ if(!status)
+ return (s32)checksum;
+ else
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_E610 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to Shadow RAM, software sends the admin command
+ * to recalculate and update EEPROM checksum and instructs the hardware
+ * to update the flash.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_recalculate_checksum(hw);
+ if (status)
+ return status;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_nvm_write_activate(hw, IXGBE_ACI_NVM_ACTIV_REQ_EMPR,
+ NULL);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ u32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_validate_checksum(hw);
+
+ if (status)
+ return status;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ *checksum_val = tmp_checksum;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ /* Read TLV length */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return IXGBE_SUCCESS;
+ }
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return IXGBE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ixgbe_read_pba_string_E610 - Reads part number string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ s32 status;
+ u16 i;
+
+ status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ E610_SR_PBA_BLOCK_PTR);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ /* pba_size is the next word */
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ if (pba_tlv_len < pba_size) {
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
diff --git a/sys/dev/ixgbe/ixgbe_e610.h b/sys/dev/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..94e600139499
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.h
@@ -0,0 +1,224 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+void ixgbe_init_aci(struct ixgbe_hw *hw);
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv);
+s32 ixgbe_aci_set_pf_context(struct ixgbe_hw *hw, u8 pf_id);
+
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+s32 ixgbe_discover_func_caps(struct ixgbe_hw* hw,
+ struct ixgbe_hw_func_caps* func_caps);
+s32 ixgbe_get_caps(struct ixgbe_hw *hw);
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+bool ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle);
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value);
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value);
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write);
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params);
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size);
+
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count);
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count);
+
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
+
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw, struct ixgbe_netlist_info *netlist);
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw);
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values);
+
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, u16 *data);
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data);
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words, const u16 *data);
+
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1);
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1);
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed);
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index);
+
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data);
+
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source);
+
+/* E610 operations */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 sub, u16 len, const char *driver_ver);
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee);
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on);
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw);
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_osdep.c b/sys/dev/ixgbe/ixgbe_osdep.c
index 892924712c38..9bd9ce63b786 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.c
+++ b/sys/dev/ixgbe/ixgbe_osdep.c
@@ -114,3 +114,29 @@ ixgbe_link_speed_to_baudrate(ixgbe_link_speed speed)
return baudrate;
}
+
+void
+ixgbe_init_lock(struct ixgbe_lock *lock)
+{
+ mtx_init(&lock->mutex, "mutex",
+ "ixgbe ACI lock", MTX_DEF | MTX_DUPOK);
+}
+
+void
+ixgbe_acquire_lock(struct ixgbe_lock *lock)
+{
+ mtx_lock(&lock->mutex);
+}
+
+void
+ixgbe_release_lock(struct ixgbe_lock *lock)
+{
+ mtx_unlock(&lock->mutex);
+}
+
+void
+ixgbe_destroy_lock(struct ixgbe_lock *lock)
+{
+ if (mtx_initialized(&lock->mutex))
+ mtx_destroy(&lock->mutex);
+}
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index cf7c578fd684..8cf1d13736ce 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -133,7 +133,9 @@ enum {
/* XXX these need to be revisited */
#define IXGBE_CPU_TO_LE16 htole16
#define IXGBE_CPU_TO_LE32 htole32
+#define IXGBE_LE16_TO_CPU le16toh
#define IXGBE_LE32_TO_CPU le32toh
+#define IXGBE_LE64_TO_CPU le64toh
#define IXGBE_LE32_TO_CPUS(x) *(x) = le32dec(x)
#define IXGBE_CPU_TO_BE16 htobe16
#define IXGBE_CPU_TO_BE32 htobe32
@@ -146,6 +148,7 @@ typedef int16_t s16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
+typedef int64_t s64;
#ifndef __bool_true_false_are_defined
typedef boolean_t bool;
#endif
@@ -195,6 +198,11 @@ struct ixgbe_osdep
bus_space_handle_t mem_bus_space_handle;
};
+struct ixgbe_lock
+{
+ struct mtx mutex;
+};
+
/* These routines need struct ixgbe_hw declared */
struct ixgbe_hw;
device_t ixgbe_dev_from_hw(struct ixgbe_hw *hw);
@@ -222,4 +230,27 @@ extern void ixgbe_write_reg_array(struct ixgbe_hw *, u32, u32, u32);
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, val) \
ixgbe_write_reg_array(a, reg, offset, val)
+void ixgbe_init_lock(struct ixgbe_lock *);
+void ixgbe_destroy_lock(struct ixgbe_lock *);
+void ixgbe_acquire_lock(struct ixgbe_lock *);
+void ixgbe_release_lock(struct ixgbe_lock *);
+
+static inline void *
+ixgbe_calloc(struct ixgbe_hw __unused *hw, size_t count, size_t size)
+{
+ return (malloc(count * size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void *
+ixgbe_malloc(struct ixgbe_hw __unused *hw, size_t size)
+{
+ return (malloc(size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void
+ixgbe_free(struct ixgbe_hw __unused *hw, void *addr)
+{
+ free(addr, M_DEVBUF);
+}
+
#endif /* _IXGBE_OSDEP_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 91b46da72c75..0bbe7806d41d 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -74,6 +74,7 @@
*/
#include "ixgbe_osdep.h"
+#include "ixgbe_type_e610.h"
/* Override this by setting IOMEM in your ixgbe_osdep.h header */
#define IOMEM
@@ -150,12 +151,19 @@
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x0001
#define IXGBE_CAT(r, m) IXGBE_##r##m
@@ -1969,6 +1977,7 @@ enum {
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -2004,6 +2013,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2025,6 +2035,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -2047,6 +2058,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2454,6 +2466,7 @@ enum {
#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11
#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2571,6 +2584,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2581,6 +2595,7 @@ enum {
#define IXGBE_PCI_LINK_SPEED_2500 0x1
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
+#define IXGBE_PCI_LINK_SPEED_16000 0x4
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
@@ -3743,6 +3758,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_X550EM_a_vf,
+ ixgbe_mac_E610,
+ ixgbe_mac_E610_vf,
ixgbe_num_macs
};
@@ -3822,7 +3839,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui
};
/* Flow Control Settings */
@@ -3831,6 +3850,7 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
+ ixgbe_fc_auto,
ixgbe_fc_default
};
@@ -3863,6 +3883,7 @@ enum ixgbe_bus_speed {
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_8000 = 8000,
+ ixgbe_bus_speed_16000 = 16000,
ixgbe_bus_speed_reserved
};
@@ -4007,6 +4028,7 @@ struct ixgbe_eeprom_operations {
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
s32 (*calc_checksum)(struct ixgbe_hw *);
+ s32 (*read_pba_string)(struct ixgbe_hw *, u8 *, u32);
};
struct ixgbe_mac_operations {
@@ -4118,6 +4140,10 @@ struct ixgbe_mac_operations {
void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ bool (*get_fw_tsam_mode)(struct ixgbe_hw *hw);
+ s32 (*get_fw_version)(struct ixgbe_hw *hw);
+ s32 (*get_nvm_version)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
};
struct ixgbe_phy_operations {
@@ -4162,6 +4188,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -4233,6 +4262,9 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
#include "ixgbe_mbx.h"
@@ -4261,6 +4293,22 @@ struct ixgbe_hw {
bool wol_enabled;
bool need_crosstalk_fix;
u32 fw_rst_cnt;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
+ struct ixgbe_fwlog_cfg fwlog_cfg;
+ bool fwlog_support_ena;
+ struct ixgbe_fwlog_ring fwlog_ring;
};
#define ixgbe_call_func(hw, func, params, error) \
@@ -4312,6 +4360,24 @@ struct ixgbe_hw {
#define IXGBE_ERR_MBX_NOMSG -42
#define IXGBE_ERR_TIMEOUT -43
+#define IXGBE_ERR_NOT_SUPPORTED -45
+#define IXGBE_ERR_OUT_OF_RANGE -46
+
+#define IXGBE_ERR_NVM -50
+#define IXGBE_ERR_NVM_CHECKSUM -51
+#define IXGBE_ERR_BUF_TOO_SHORT -52
+#define IXGBE_ERR_NVM_BLANK_MODE -53
+#define IXGBE_ERR_INVAL_SIZE -54
+#define IXGBE_ERR_DOES_NOT_EXIST -55
+
+#define IXGBE_ERR_ACI_ERROR -100
+#define IXGBE_ERR_ACI_DISABLED -101
+#define IXGBE_ERR_ACI_TIMEOUT -102
+#define IXGBE_ERR_ACI_BUSY -103
+#define IXGBE_ERR_ACI_NO_WORK -104
+#define IXGBE_ERR_ACI_NO_EVENTS -105
+#define IXGBE_ERR_FW_API_VER -106
+
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
@@ -4540,5 +4606,6 @@ struct ixgbe_bypass_eeprom {
#define IXGBE_REQUEST_TASK_FDIR 0x08
#define IXGBE_REQUEST_TASK_PHY 0x10
#define IXGBE_REQUEST_TASK_LSC 0x20
+#define IXGBE_REQUEST_TASK_FWEVENT 0x40
#endif /* _IXGBE_TYPE_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type_e610.h b/sys/dev/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..e300030c3ba4
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,2278 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+
+/* Generic defines */
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* !BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* !BIT_ULL */
+#ifndef BITS_PER_BYTE
+#define BITS_PER_BYTE 8
+#endif /* !BITS_PER_BYTE */
+#ifndef DIVIDE_AND_ROUND_UP
+#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
+#endif /* !DIVIDE_AND_ROUND_UP */
+
+#ifndef ROUND_UP
+/**
+ * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
+ * @a: value to round up
+ * @b: arbitrary multiple
+ *
+ * Round up to the next multiple of the arbitrary b.
+ */
+#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b)))
+#endif /* !ROUND_UP */
+
+#define MAKEMASK(mask, shift) (mask << shift)
+
+#define BYTES_PER_WORD 2
+#define BYTES_PER_DWORD 4
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG 64
+#endif /* !BITS_PER_LONG */
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif /* !BITS_PER_LONG_LONG */
+#undef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+/* Data type manipulation macros. */
+#define HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+#define HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define LO_WORD(x) ((u16)((x) & 0xFFFF))
+#define HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define LO_BYTE(x) ((u8)((x) & 0xFF))
+
+#ifndef MIN_T
+#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
+#endif
+
+#define IS_ASCII(_ch) ((_ch) < 0x80)
+
+/**
+ * ixgbe_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
+#define ixgbe_struct_size(ptr, field, num) \
+ (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define E610_SR_VPD_SIZE_WORDS 512
+#define E610_SR_PCIE_ALT_SIZE_WORDS 512
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_DEV_STARTER_VER 0x18
+#define E610_NVM_VER_LO_SHIFT 0
+#define E610_NVM_VER_LO_MASK (0xff << E610_NVM_VER_LO_SHIFT)
+#define E610_NVM_VER_HI_SHIFT 12
+#define E610_NVM_VER_HI_MASK (0xf << E610_NVM_VER_HI_SHIFT)
+#define E610_SR_NVM_MAP_VER 0x29
+#define E610_SR_NVM_EETRACK_LO 0x2D
+#define E610_SR_NVM_EETRACK_HI 0x2E
+#define E610_SR_VPD_PTR 0x2F
+#define E610_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define E610_SR_SW_CHECKSUM_WORD 0x3F
+#define E610_SR_PFA_PTR 0x40
+#define E610_SR_1ST_NVM_BANK_PTR 0x42
+#define E610_SR_NVM_BANK_SIZE 0x43
+#define E610_SR_1ST_OROM_BANK_PTR 0x44
+#define E610_SR_OROM_BANK_SIZE 0x45
+#define E610_SR_NETLIST_BANK_PTR 0x46
+#define E610_SR_NETLIST_BANK_SIZE 0x47
+#define E610_SR_POINTER_TYPE_BIT BIT(15)
+#define E610_SR_POINTER_MASK 0x7fff
+#define E610_SR_HALF_4KB_SECTOR_UNITS 2048
+#define E610_GET_PFA_POINTER_IN_WORDS(offset) \
+ ((offset & E610_SR_POINTER_TYPE_BIT) == E610_SR_POINTER_TYPE_BIT) ? \
+ ((offset & E610_SR_POINTER_MASK) * E610_SR_HALF_4KB_SECTOR_UNITS) : \
+ (offset & E610_SR_POINTER_MASK)
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_CTRL_WORD 0x00
+#define E610_SR_PBA_BLOCK_PTR 0x16
+
+/* The Orom version topology */
+#define IXGBE_OROM_VER_PATCH_SHIFT 0
+#define IXGBE_OROM_VER_PATCH_MASK (0xff << IXGBE_OROM_VER_PATCH_SHIFT)
+#define IXGBE_OROM_VER_BUILD_SHIFT 8
+#define IXGBE_OROM_VER_BUILD_MASK (0xffff << IXGBE_OROM_VER_BUILD_SHIFT)
+#define IXGBE_OROM_VER_SHIFT 24
+#define IXGBE_OROM_VER_MASK (0xff << IXGBE_OROM_VER_SHIFT)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M MAKEMASK(0x3FF, 0)
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_S 0x06
+#define IXGBE_SR_CTRL_WORD_1_M (0x03 << IXGBE_SR_CTRL_WORD_1_S)
+#define IXGBE_SR_CTRL_WORD_VALID 0x1
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
+/* These macros strip from NVM Image Revision the particular part of NVM ver:
+ major ver, minor ver and image id */
+#define E610_NVM_MAJOR_VER(x) ((x & 0xF000) >> 12)
+#define E610_NVM_MINOR_VER(x) (x & 0x00FF)
+
+/* Shadow RAM related */
+#define IXGBE_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define IXGBE_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define IXGBE_SR_SW_CHECKSUM_BASE 0xBABA
+
+/* Netlist */
+#define IXGBE_MAX_NETLIST_SIZE 10
+
+/* General registers */
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_FWS0B_S 0
+#define GL_FWSTS_FWS0B_M MAKEMASK(0xFF, 0)
+#define GL_FWSTS_FWROWD_S 8
+#define GL_FWSTS_FWROWD_M BIT(8)
+#define GL_FWSTS_FWRI_S 9
+#define GL_FWSTS_FWRI_M BIT(9)
+#define GL_FWSTS_FWS1B_S 16
+#define GL_FWSTS_FWS1B_M MAKEMASK(0xFF, 16)
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Recovery mode values of Firmware Status 1 Byte (FWS1B) bitfield */
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER_LEGACY 0x0B
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR_LEGACY 0x0C
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER 0x30
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR 0x31
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_TRANSITION 0x32
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_NVM 0x33
+
+/* Firmware Status (GL_MNG_FWSM) */
+#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define GL_MNG_FWSM_FW_MODES_S 0
+#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
+#define GL_MNG_FWSM_RSV0_S 2
+#define GL_MNG_FWSM_RSV0_M MAKEMASK(0xFF, 2)
+#define GL_MNG_FWSM_EEP_RELOAD_IND_S 10
+#define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10)
+#define GL_MNG_FWSM_RSV1_S 11
+#define GL_MNG_FWSM_RSV1_M MAKEMASK(0xF, 11)
+#define GL_MNG_FWSM_RSV2_S 15
+#define GL_MNG_FWSM_RSV2_M BIT(15)
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_S 16
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_M BIT(16)
+#define GL_MNG_FWSM_POR_AL_FAILURE_S 17
+#define GL_MNG_FWSM_POR_AL_FAILURE_M BIT(17)
+#define GL_MNG_FWSM_RSV3_S 18
+#define GL_MNG_FWSM_RSV3_M BIT(18)
+#define GL_MNG_FWSM_EXT_ERR_IND_S 19
+#define GL_MNG_FWSM_EXT_ERR_IND_M MAKEMASK(0x3F, 19)
+#define GL_MNG_FWSM_RSV4_S 25
+#define GL_MNG_FWSM_RSV4_M BIT(25)
+#define GL_MNG_FWSM_RESERVED_11_S 26
+#define GL_MNG_FWSM_RESERVED_11_M MAKEMASK(0xF, 26)
+#define GL_MNG_FWSM_RSV5_S 30
+#define GL_MNG_FWSM_RSV5_M MAKEMASK(0x3, 30)
+
+/* FW mode indications */
+#define GL_MNG_FWSM_FW_MODES_DEBUG_M BIT(0)
+#define GL_MNG_FWSM_FW_MODES_RECOVERY_M BIT(1)
+#define GL_MNG_FWSM_FW_MODES_ROLLBACK_M BIT(2)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_NVM_PRES_S 0
+#define GLNVM_GENS_NVM_PRES_M BIT(0)
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M MAKEMASK(0x7, 5)
+#define GLNVM_GENS_BANK1VAL_S 8
+#define GLNVM_GENS_BANK1VAL_M BIT(8)
+#define GLNVM_GENS_ALT_PRST_S 23
+#define GLNVM_GENS_ALT_PRST_M BIT(23)
+#define GLNVM_GENS_FL_AUTO_RD_S 25
+#define GLNVM_GENS_FL_AUTO_RD_M BIT(25)
+
+/* Flash Access Register */
+#define GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define GLNVM_FLA_LOCKED_S 6
+#define GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Bit Bang registers */
+#define RDASB_MSGCTL 0x000B6820
+#define RDASB_MSGCTL_HDR_DWS_S 0
+#define RDASB_MSGCTL_EXP_RDW_S 8
+#define RDASB_MSGCTL_CMDV_M BIT(31)
+#define RDASB_RSPCTL 0x000B6824
+#define RDASB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define RDASB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define RDASB_WHDR0 0x000B68F4
+#define RDASB_WHDR1 0x000B68F8
+#define RDASB_WHDR2 0x000B68FC
+#define RDASB_WHDR3 0x000B6900
+#define RDASB_WHDR4 0x000B6904
+#define RDASB_RHDR0 0x000B6AFC
+#define RDASB_RHDR0_RESPONSE_S 27
+#define RDASB_RHDR0_RESPONSE_M MAKEMASK(0x7, 27)
+#define RDASB_RDATA0 0x000B6B00
+#define RDASB_RDATA1 0x000B6B04
+
+/* SPI Registers */
+#define SPISB_MSGCTL 0x000B7020
+#define SPISB_MSGCTL_HDR_DWS_S 0
+#define SPISB_MSGCTL_EXP_RDW_S 8
+#define SPISB_MSGCTL_MSG_MODE_S 26
+#define SPISB_MSGCTL_TOKEN_MODE_S 28
+#define SPISB_MSGCTL_BARCLR_S 30
+#define SPISB_MSGCTL_CMDV_S 31
+#define SPISB_MSGCTL_CMDV_M BIT(31)
+#define SPISB_RSPCTL 0x000B7024
+#define SPISB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define SPISB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define SPISB_WHDR0 0x000B70F4
+#define SPISB_WHDR0_DEST_SEL_S 12
+#define SPISB_WHDR0_OPCODE_SEL_S 16
+#define SPISB_WHDR0_TAG_S 24
+#define SPISB_WHDR1 0x000B70F8
+#define SPISB_WHDR2 0x000B70FC
+#define SPISB_RDATA 0x000B7300
+#define SPISB_WDATA 0x000B7100
+
+/* Firmware Reset Count register */
+#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
+#define GL_FWRESETCNT_FWRESETCNT_S 0
+#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0)
+
+/* Admin Command Interface (ACI) registers */
+#define PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define PF_HICR 0x00082048
+
+#define PF_HIDA_MAX_INDEX 15
+#define PF_HIBA_MAX_INDEX 1023
+
+#define PF_HICR_EN BIT(0)
+#define PF_HICR_C BIT(1)
+#define PF_HICR_SV BIT(2)
+#define PF_HICR_EV BIT(3)
+
+#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
+#define GL_HIDA_2(_i) (0x00082020 + ((_i) * 4))
+#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
+#define GL_HICR 0x00082040
+
+#define GL_HIDA_MAX_INDEX 15
+#define GL_HIBA_MAX_INDEX 1023
+
+#define GL_HICR_C BIT(1)
+#define GL_HICR_SV BIT(2)
+#define GL_HICR_EV BIT(3)
+
+#define GL_HICR_EN 0x00082044
+
+#define GL_HICR_EN_CHECK BIT(0)
+
+/* Admin Command Interface (ACI) defines */
+/* Defines that help manage the driver vs FW API checks.
+ */
+#define IXGBE_FW_API_VER_BRANCH 0x00
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_DESC_COOKIE_L_DWORD_OFFSET 3
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* Timestamp spacing for Tools ACI: queue is active if spacing is within the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_LO 0
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_HI 200
+
+/* Timestamp spacing for Tools ACI: queue is expired if spacing is outside the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_LO -5
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_HI 205
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets */
+#define IXGBE_ACI_FLAG_DD_S 0
+#define IXGBE_ACI_FLAG_CMP_S 1
+#define IXGBE_ACI_FLAG_ERR_S 2
+#define IXGBE_ACI_FLAG_VFE_S 3
+#define IXGBE_ACI_FLAG_LB_S 9
+#define IXGBE_ACI_FLAG_RD_S 10
+#define IXGBE_ACI_FLAG_VFC_S 11
+#define IXGBE_ACI_FLAG_BUF_S 12
+#define IXGBE_ACI_FLAG_SI_S 13
+#define IXGBE_ACI_FLAG_EI_S 14
+#define IXGBE_ACI_FLAG_FE_S 15
+
+#define IXGBE_ACI_FLAG_DD BIT(IXGBE_ACI_FLAG_DD_S) /* 0x1 */
+#define IXGBE_ACI_FLAG_CMP BIT(IXGBE_ACI_FLAG_CMP_S) /* 0x2 */
+#define IXGBE_ACI_FLAG_ERR BIT(IXGBE_ACI_FLAG_ERR_S) /* 0x4 */
+#define IXGBE_ACI_FLAG_VFE BIT(IXGBE_ACI_FLAG_VFE_S) /* 0x8 */
+#define IXGBE_ACI_FLAG_LB BIT(IXGBE_ACI_FLAG_LB_S) /* 0x200 */
+#define IXGBE_ACI_FLAG_RD BIT(IXGBE_ACI_FLAG_RD_S) /* 0x400 */
+#define IXGBE_ACI_FLAG_VFC BIT(IXGBE_ACI_FLAG_VFC_S) /* 0x800 */
+#define IXGBE_ACI_FLAG_BUF BIT(IXGBE_ACI_FLAG_BUF_S) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI BIT(IXGBE_ACI_FLAG_SI_S) /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI BIT(IXGBE_ACI_FLAG_EI_S) /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE BIT(IXGBE_ACI_FLAG_FE_S) /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+ IXGBE_ACI_RC_OK = 0, /* Success */
+ IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
+ IXGBE_ACI_RC_ENOENT = 2, /* No such element */
+ IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
+ IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
+ IXGBE_ACI_RC_EIO = 5, /* I/O error */
+ IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
+ IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
+ IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
+ IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
+ IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
+ IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
+ IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
+ IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
+ IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
+ IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
+ IXGBE_ACI_RC_ENOSPC = 16, /* No space left or allocation failure */
+ IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
+ IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
+ IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IXGBE_ACI_RC_EFBIG = 22, /* File too big */
+ IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
+ IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
+ IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
+ IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+ ixgbe_aci_opc_nvm_sanitization = 0x070C,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+
+ /* FW Logging Commands */
+ ixgbe_aci_opc_fw_logs_config = 0xFF30,
+ ixgbe_aci_opc_fw_logs_register = 0xFF31,
+ ixgbe_aci_opc_fw_logs_query = 0xFF32,
+ ixgbe_aci_opc_fw_logs_event = 0xFF33,
+ ixgbe_aci_opc_fw_logs_get = 0xFF34,
+ ixgbe_aci_opc_fw_logs_clear = 0xFF35
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IXGBE_CHECK_STRUCT_LEN(n, X) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used to generate a compilation error if a variable-length
+ * structure is not exactly the correct length assuming a single element of
+ * the variable-length object as the last element of the structure. It gives
+ * a divide by zero error if the structure is not of the correct size,
+ * otherwise it creates an enum that is never used.
+ */
+#define IXGBE_CHECK_VAR_LEN_STRUCT_LEN(n, X, T) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / \
+ (((sizeof(struct X) + sizeof(T)) == (n)) ? 1 : 0) }
+
+/* This macro is used to ensure that parameter structures (i.e. structures
+ * in the params union member of struct ixgbe_aci_desc) are 16 bytes in length.
+ *
+ * NOT intended to be used to check the size of an indirect command/response
+ * additional data buffer (e.g. struct foo) which should just happen to be 16
+ * bytes (instead, use IXGBE_CHECK_STRUCT_LEN(16, foo) for that).
+ */
+#define IXGBE_CHECK_PARAM_LEN(X) IXGBE_CHECK_STRUCT_LEN(16, X)
+
+struct ixgbe_aci_cmd_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_generic);
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_ver);
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+struct ixgbe_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 driver_string[IXGBE_DRV_VER_STR_LEN_E610];
+};
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_driver_ver);
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_exp_err);
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+#define IXGBE_CHANGE_LOCK_TIMEOUT 1000
+#define IXGBE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+
+enum ixgbe_aci_res_access_type {
+ IXGBE_RES_READ = 1,
+ IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+ IXGBE_NVM_RES_ID = 1,
+ IXGBE_SPD_RES_ID,
+ IXGBE_CHANGE_LOCK_RES_ID,
+ IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+ __le16 res_id;
+#define IXGBE_ACI_RES_ID_NVM 1
+#define IXGBE_ACI_RES_ID_SDP 2
+#define IXGBE_ACI_RES_ID_CHNG_LOCK 3
+#define IXGBE_ACI_RES_ID_GLBL_LOCK 4
+ __le16 access_type;
+#define IXGBE_ACI_RES_ACCESS_READ 1
+#define IXGBE_ACI_RES_ACCESS_WRITE 2
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ /* Status is only used for IXGBE_ACI_RES_ID_GLBL_LOCK */
+ __le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS 0
+#define IXGBE_ACI_RES_GLBL_IN_PROG 1
+#define IXGBE_ACI_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_req_res);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_list_caps);
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+ __le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
+#define IXGBE_ACI_CAPS_SRIOV 0x0012
+#define IXGBE_ACI_CAPS_VF 0x0013
+#define IXGBE_ACI_CAPS_VMDQ 0x0014
+#define IXGBE_ACI_CAPS_VSI 0x0017
+#define IXGBE_ACI_CAPS_DCB 0x0018
+#define IXGBE_ACI_CAPS_RSS 0x0040
+#define IXGBE_ACI_CAPS_RXQS 0x0041
+#define IXGBE_ACI_CAPS_TXQS 0x0042
+#define IXGBE_ACI_CAPS_MSIX 0x0043
+#define IXGBE_ACI_CAPS_FD 0x0045
+#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
+#define IXGBE_ACI_CAPS_NVM_VER 0x0048
+#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE 0x0090
+#define IXGBE_ACI_CAPS_NEXT_CLUSTER_ID 0x0096
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(32, ixgbe_aci_cmd_list_caps_elem);
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_disable_rxen);
+
+/* Get FW Event (indirect 0x0014) */
+struct ixgbe_aci_cmd_get_fw_event {
+ __le16 fw_buf_status;
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_OBTAINED BIT(0)
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_PENDING BIT(1)
+ u8 rsvd[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_fw_event);
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_S 1
+#define IXGBE_ACI_REPORT_MODE_M (7 << IXGBE_ACI_REPORT_MODE_S)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_phy_caps);
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 18
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK MAKEMASK(0xff, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK MAKEMASK(0xdf, 0)
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+IXGBE_CHECK_STRUCT_LEN(560, ixgbe_aci_cmd_get_phy_caps_data);
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 reserved[8];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_phy_cfg);
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0)
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_set_phy_cfg_data);
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_restart_an);
+
+#pragma pack(1)
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_LSE_M 0x3
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ u8 reserved2[5];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_status);
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_S 2
+#define IXGBE_ACI_LINK_TX_M (0x03 << IXGBE_ACI_LINK_TX_S)
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+#define IXGBE_ACI_LINK_LB_PHY_IDX_S 3
+#define IXGBE_ACI_LINK_LB_PHY_IDX_M (0x7 << IXGBE_ACI_LB_PHY_IDX_S)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK MAKEMASK(0x7, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_S 3
+#define IXGBE_ACI_CFG_PACING_M (0xF << IXGBE_ACI_CFG_PACING_S)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M 0x3F
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M 0x7FF
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3; /* Aligns next field to 8-byte boundary */
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+};
+#pragma pack()
+
+IXGBE_CHECK_STRUCT_LEN(56, ixgbe_aci_cmd_get_link_status_data);
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_event_mask);
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_S 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M (0xF << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M \
+ (0xF << IXGBE_ACI_LINK_TOPO_NODE_CTX_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_link_topo_params);
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+#define IXGBE_ACI_LINK_TOPO_HANDLE_S 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_M (0x3FF << IXGBE_ACI_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S 0
+/* In case of a Mezzanine type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_NODE_M \
+ (0x3F << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S 6
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_M \
+ (0x7 << IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_LOM_NODE_M \
+ (0x1FF << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_link_topo_addr);
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_topo);
+
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ixgbe_aci_cmd_i2c {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define IXGBE_ACI_I2C_DATA_SIZE_S 0
+#define IXGBE_ACI_I2C_DATA_SIZE_M (0xF << IXGBE_ACI_I2C_DATA_SIZE_S)
+#define IXGBE_ACI_I2C_ADDR_TYPE_M BIT(4)
+#define IXGBE_ACI_I2C_ADDR_TYPE_7BIT 0
+#define IXGBE_ACI_I2C_ADDR_TYPE_10BIT IXGBE_ACI_I2C_ADDR_TYPE_M
+#define IXGBE_ACI_I2C_DATA_OFFSET_S 5
+#define IXGBE_ACI_I2C_DATA_OFFSET_M (0x3 << IXGBE_ACI_I2C_DATA_OFFSET_S)
+#define IXGBE_ACI_I2C_USE_REPEATED_START BIT(7)
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_I2C_ADDR_7BIT_MASK 0x7F
+#define IXGBE_ACI_I2C_ADDR_10BIT_MASK 0x3FF
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_i2c);
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ixgbe_aci_cmd_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_i2c_resp);
+
+/* Read/Write MDIO (direct, 0x06E4/0x06E5) */
+struct ixgbe_aci_cmd_mdio {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 mdio_device_addr;
+#define IXGBE_ACI_MDIO_DEV_S 0
+#define IXGBE_ACI_MDIO_DEV_M (0x1F << IXGBE_ACI_MDIO_DEV_S)
+#define IXGBE_ACI_MDIO_CLAUSE_22 BIT(5)
+#define IXGBE_ACI_MDIO_CLAUSE_45 BIT(6)
+ u8 mdio_bus_address;
+#define IXGBE_ACI_MDIO_BUS_ADDR_S 0
+#define IXGBE_ACI_MDIO_BUS_ADDR_M (0x1F << IXGBE_ACI_MDIO_BUS_ADDR_S)
+ __le16 offset;
+ __le16 data; /* Input in write cmd, output in read cmd. */
+ u8 rsvd1[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_mdio);
+
+/* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */
+struct ixgbe_aci_cmd_gpio_by_func {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 io_func_num;
+#define IXGBE_ACI_GPIO_FUNC_S 0
+#define IXGBE_ACI_GPIO_FUNC_M (0x1F << IXGBE_ACI_GPIO_IO_FUNC_NUM_S)
+ u8 io_value; /* Input in write cmd, output in read cmd. */
+#define IXGBE_ACI_GPIO_ON BIT(0)
+#define IXGBE_ACI_GPIO_OFF 0
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio_by_func);
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+ u8 ident_mode;
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_port_id_led);
+
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ixgbe_aci_cmd_gpio {
+ __le16 gpio_ctrl_handle;
+#define IXGBE_ACI_GPIO_HANDLE_S 0
+#define IXGBE_ACI_GPIO_HANDLE_M (0x3FF << IXGBE_ACI_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio);
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M 0x7F
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M 0x3FF
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_S 11
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_M (0x3 << IXGBE_ACI_SFF_PAGE_BANK_CTRL_S)
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_sff_eeprom);
+
+/* Program Topology Device NVM (direct, 0x06F2) */
+struct ixgbe_aci_cmd_prog_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_prog_topo_dev_nvm);
+
+/* Read Topology Device NVM (direct, 0x06F3) */
+struct ixgbe_aci_cmd_read_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le32 start_address;
+#define IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE 8
+ u8 data_read[IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_topo_dev_nvm);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */
+#define IXGBE_ACI_NVM_PRESERVATION_M (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_NO_PRESERVATION (0 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_FACTORY_DEFAULT (2 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3)
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M MAKEMASK(0x3, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for struct ixgbe_aci_cmd_nvm. */
+#define IXGBE_ACI_NVM_SECTOR_UNIT 4096 /* In Bytes */
+#define IXGBE_ACI_NVM_WORD_UNIT 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_START_POINT 0
+#define IXGBE_ACI_NVM_EMP_SR_PTR_OFFSET 0x90
+#define IXGBE_ACI_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_S 15
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_M BIT(15)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_SECTOR 1
+
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_OFFSET 0x46
+#define IXGBE_ACI_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_LLDP_PRESERVED_MOD_ID 0x129
+#define IXGBE_ACI_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0)
+#define IXGBE_ACI_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
+#define IXGBE_ACI_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+
+#define IXGBE_ACI_NVM_MINSREV_MOD_ID 0x130
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm);
+
+/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
+ * type field is excluded from the section when reading and writing from
+ * a module using the module_typeid field with these AQ commands.
+ */
+struct ixgbe_aci_cmd_nvm_minsrev {
+ __le16 length;
+ __le16 validity;
+#define IXGBE_ACI_NVM_MINSREV_NVM_VALID BIT(0)
+#define IXGBE_ACI_NVM_MINSREV_OROM_VALID BIT(1)
+ __le16 nvm_minsrev_l;
+ __le16 nvm_minsrev_h;
+ __le16 orom_minsrev_l;
+ __le16 orom_minsrev_h;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_nvm_minsrev);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+struct ixgbe_aci_cmd_nvm_cfg {
+ u8 cmd_flags;
+#define IXGBE_ACI_ANVM_MULTIPLE_ELEMS BIT(0)
+#define IXGBE_ACI_ANVM_IMMEDIATE_FIELD BIT(1)
+#define IXGBE_ACI_ANVM_NEW_CFG BIT(2)
+ u8 reserved;
+ __le16 count;
+ __le16 id;
+ u8 reserved1[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_cfg);
+
+struct ixgbe_aci_cmd_nvm_cfg_data {
+ __le16 field_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_nvm_cfg_data);
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_checksum);
+
+/* Used for NVM Sanitization command - 0x070C */
+struct ixgbe_aci_cmd_nvm_sanitization {
+ u8 cmd_flags;
+#define IXGBE_ACI_SANITIZE_REQ_READ 0
+#define IXGBE_ACI_SANITIZE_REQ_OPERATE BIT(0)
+
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_BITS 0
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_STATE BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR 0
+ u8 values;
+#define IXGBE_ACI_SANITIZE_NVM_BITS_HOST_CLEAN_SUPPORT BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_BITS_BMC_CLEAN_SUPPORT BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_SUCCESS BIT(3)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS BIT(3)
+ u8 reserved[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_sanitization);
+
+/* Write/Read Alternate - Direct (direct 0x0900/0x0902) */
+struct ixgbe_aci_cmd_read_write_alt_direct {
+ __le32 dword0_addr;
+ __le32 dword0_value;
+ __le32 dword1_addr;
+ __le32 dword1_value;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_direct);
+
+/* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */
+struct ixgbe_aci_cmd_read_write_alt_indirect {
+ __le32 base_dword_addr;
+ __le32 num_dwords;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_indirect);
+
+/* Done Alternate Write (direct 0x0904) */
+struct ixgbe_aci_cmd_done_alt_write {
+ u8 flags;
+#define IXGBE_ACI_CMD_UEFI_BIOS_MODE BIT(0)
+#define IXGBE_ACI_RESP_RESET_NEEDED BIT(1)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_done_alt_write);
+
+/* Clear Port Alternate Write (direct 0x0906) */
+struct ixgbe_aci_cmd_clear_port_alt_write {
+ u8 reserved[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_port_alt_write);
+
+/* Get CGU abilities command response data structure (indirect 0x0C61) */
+struct ixgbe_aci_cmd_get_cgu_abilities {
+ u8 num_inputs;
+ u8 num_outputs;
+ u8 pps_dpll_idx;
+ u8 synce_dpll_idx;
+ __le32 max_in_freq;
+ __le32 max_in_phase_adj;
+ __le32 max_out_freq;
+ __le32 max_out_phase_adj;
+ u8 cgu_part_num;
+ u8 rsvd[3];
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_get_cgu_abilities);
+
+#define IXGBE_ACI_NODE_HANDLE_VALID BIT(10)
+#define IXGBE_ACI_NODE_HANDLE MAKEMASK(0x3FF, 0)
+#define IXGBE_ACI_DRIVING_CLK_NUM_SHIFT 10
+#define IXGBE_ACI_DRIVING_CLK_NUM MAKEMASK(0x3F, IXGBE_ACI_DRIVING_CLK_NUM_SHIFT)
+
+/* Set CGU input config (direct 0x0C62) */
+struct ixgbe_aci_cmd_set_cgu_input_config {
+ u8 input_idx;
+ u8 flags1;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7)
+ u8 flags2;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_input_config);
+
+/* Get CGU input config response descriptor structure (direct 0x0C63) */
+struct ixgbe_aci_cmd_get_cgu_input_config {
+ u8 input_idx;
+ u8 status;
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7)
+ u8 type;
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_GPS BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_PHY BIT(6)
+ u8 flags1;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7)
+ __le32 freq;
+ __le32 phase_delay;
+ u8 flags2;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_input_config);
+
+/* Set CGU output config (direct 0x0C64) */
+struct ixgbe_aci_cmd_set_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4)
+ u8 src_sel;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_DPLL_SRC_SEL MAKEMASK(0x1F, 0)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_output_config);
+
+/* Get CGU output config (direct 0x0C65) */
+struct ixgbe_aci_cmd_get_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2)
+ u8 src_sel;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT)
+ u8 rsvd;
+ __le32 freq;
+ __le32 src_freq;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_output_config);
+
+/* Get CGU DPLL status (direct 0x0C66) */
+struct ixgbe_aci_cmd_get_cgu_dpll_status {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6)
+ __le16 dpll_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT 8
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT 13
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT)
+ __le32 phase_offset_h;
+ __le32 phase_offset_l;
+ u8 eec_mode;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_dpll_status);
+
+/* Set CGU DPLL config (direct 0x0C67) */
+struct ixgbe_aci_cmd_set_cgu_dpll_config {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6)
+ u8 rsvd;
+ u8 config;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_CLK_REF_SEL MAKEMASK(0x1F, 0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_MODE MAKEMASK(0x7, 5)
+ u8 rsvd2[8];
+ u8 eec_mode;
+ u8 rsvd3[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_dpll_config);
+
+/* Set CGU reference priority (direct 0x0C68) */
+struct ixgbe_aci_cmd_set_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority;
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_ref_prio);
+
+/* Get CGU reference priority (direct 0x0C69) */
+struct ixgbe_aci_cmd_get_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority; /* Valid only in response */
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_ref_prio);
+
+/* Get CGU info (direct 0x0C6A) */
+struct ixgbe_aci_cmd_get_cgu_info {
+ __le32 cgu_id;
+ __le32 cgu_cfg_ver;
+ __le32 cgu_fw_ver;
+ u8 node_part_num;
+ u8 dev_rev;
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_info);
+
+struct ixgbe_aci_cmd_temp_tca_event {
+ u8 event_desc;
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_SHIFT 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_NVM 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_EVENT_STATE 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_ALL 2
+
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_SHIFT 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_CLEARED 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_CLEARED 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_RAISED 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_RAISED 3
+
+ u8 reserved;
+ __le16 temperature;
+ __le16 thermal_sensor_max_value;
+ __le16 thermal_sensor_min_value;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_temp_tca_event);
+
+/* Debug Dump Internal Data (indirect 0xFF08) */
+struct ixgbe_aci_cmd_debug_dump_internals {
+ __le16 cluster_id; /* Expresses next cluster ID in response */
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_LINK 0
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 1
+ __le16 table_id; /* Used only for non-memory clusters */
+ __le32 idx; /* In table entries for tables, in bytes for memory */
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_debug_dump_internals);
+
+/* Set Health Status (direct 0xFF20) */
+struct ixgbe_aci_cmd_set_health_status_config {
+ u8 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_SET_PF_SPECIFIC_MASK BIT(0)
+#define IXGBE_ACI_HEALTH_STATUS_SET_ALL_PF_MASK BIT(1)
+#define IXGBE_ACI_HEALTH_STATUS_SET_GLOBAL_MASK BIT(2)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_health_status_config);
+
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT 0x101
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_TYPE 0x102
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_QUAL 0x103
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_COMM 0x104
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_CONFLICT 0x105
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106
+#define IXGBE_ACI_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109
+#define IXGBE_ACI_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED 0x10F
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PARALLEL_FAULT 0x110
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED 0x111
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST_TOPO 0x112
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST 0x113
+#define IXGBE_ACI_HEALTH_STATUS_ERR_TOPO_CONFLICT 0x114
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_HW_ACCESS 0x115
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_RUNTIME 0x116
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DNL_INIT 0x117
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_NVM_PROG 0x120
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_FW_LOAD 0x121
+#define IXGBE_ACI_HEALTH_STATUS_INFO_RECOVERY 0x500
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FLASH_ACCESS 0x501
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_AUTH 0x502
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_AUTH 0x503
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DDP_AUTH 0x504
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_COMPAT 0x505
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_COMPAT 0x506
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DCB_MIB 0x509
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A
+#define IXGBE_ACI_HEALTH_STATUS_ERR_BMC_RESET 0x50B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_LOOP 0x1000
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002
+
+/* Get Health Status codes (indirect 0xFF21) */
+struct ixgbe_aci_cmd_get_supported_health_status_codes {
+ __le16 health_code_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_supported_health_status_codes);
+
+/* Get Health Status (indirect 0xFF22) */
+struct ixgbe_aci_cmd_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_health_status);
+
+/* Get Health Status event buffer entry, (0xFF22)
+ * repeated per reported health status
+ */
+struct ixgbe_aci_cmd_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_PF (0x1)
+#define IXGBE_ACI_HEALTH_STATUS_PORT (0x2)
+#define IXGBE_ACI_HEALTH_STATUS_GLOBAL (0x3)
+ __le32 internal_data1;
+#define IXGBE_ACI_HEALTH_STATUS_UNDEFINED_DATA (0xDEADBEEF)
+ __le32 internal_data2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_health_status_elem);
+
+/* Clear Health Status (direct 0xFF23) */
+struct ixgbe_aci_cmd_clear_health_status {
+ __le32 reserved[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_health_status);
+
+enum ixgbe_aci_fw_logging_mod {
+ IXGBE_ACI_FW_LOG_ID_GENERAL = 0,
+ IXGBE_ACI_FW_LOG_ID_CTRL = 1,
+ IXGBE_ACI_FW_LOG_ID_LINK = 2,
+ IXGBE_ACI_FW_LOG_ID_LINK_TOPO = 3,
+ IXGBE_ACI_FW_LOG_ID_DNL = 4,
+ IXGBE_ACI_FW_LOG_ID_I2C = 5,
+ IXGBE_ACI_FW_LOG_ID_SDP = 6,
+ IXGBE_ACI_FW_LOG_ID_MDIO = 7,
+ IXGBE_ACI_FW_LOG_ID_ADMINQ = 8,
+ IXGBE_ACI_FW_LOG_ID_HDMA = 9,
+ IXGBE_ACI_FW_LOG_ID_LLDP = 10,
+ IXGBE_ACI_FW_LOG_ID_DCBX = 11,
+ IXGBE_ACI_FW_LOG_ID_DCB = 12,
+ IXGBE_ACI_FW_LOG_ID_XLR = 13,
+ IXGBE_ACI_FW_LOG_ID_NVM = 14,
+ IXGBE_ACI_FW_LOG_ID_AUTH = 15,
+ IXGBE_ACI_FW_LOG_ID_VPD = 16,
+ IXGBE_ACI_FW_LOG_ID_IOSF = 17,
+ IXGBE_ACI_FW_LOG_ID_PARSER = 18,
+ IXGBE_ACI_FW_LOG_ID_SW = 19,
+ IXGBE_ACI_FW_LOG_ID_SCHEDULER = 20,
+ IXGBE_ACI_FW_LOG_ID_TXQ = 21,
+ IXGBE_ACI_FW_LOG_ID_ACL = 22,
+ IXGBE_ACI_FW_LOG_ID_POST = 23,
+ IXGBE_ACI_FW_LOG_ID_WATCHDOG = 24,
+ IXGBE_ACI_FW_LOG_ID_TASK_DISPATCH = 25,
+ IXGBE_ACI_FW_LOG_ID_MNG = 26,
+ IXGBE_ACI_FW_LOG_ID_SYNCE = 27,
+ IXGBE_ACI_FW_LOG_ID_HEALTH = 28,
+ IXGBE_ACI_FW_LOG_ID_TSDRV = 29,
+ IXGBE_ACI_FW_LOG_ID_PFREG = 30,
+ IXGBE_ACI_FW_LOG_ID_MDLVER = 31,
+ IXGBE_ACI_FW_LOG_ID_MAX = 32,
+};
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to IXGBE_FWLOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except IXGBE_FWLOG_LEVEL_NONE)
+ */
+enum ixgbe_fwlog_level {
+ IXGBE_FWLOG_LEVEL_NONE = 0,
+ IXGBE_FWLOG_LEVEL_ERROR = 1,
+ IXGBE_FWLOG_LEVEL_WARNING = 2,
+ IXGBE_FWLOG_LEVEL_NORMAL = 3,
+ IXGBE_FWLOG_LEVEL_VERBOSE = 4,
+ IXGBE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct ixgbe_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct ixgbe_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct ixgbe_fwlog_module_entry module_entries[IXGBE_ACI_FW_LOG_ID_MAX];
+#define IXGBE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define IXGBE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling ixgbe_fwlog_init() so the PF registers for firmware
+ * logging on initialization
+ */
+#define IXGBE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the ixgbe_fwlog_get() response if the PF is registered for FW
+ * logging events over ARQ
+ */
+#define IXGBE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+ /* options used to configure firmware logging */
+ u16 options;
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u8 log_resolution;
+};
+
+struct ixgbe_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct ixgbe_fwlog_ring {
+ struct ixgbe_fwlog_data *rings;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define IXGBE_FWLOG_RING_SIZE_DFLT 256
+#define IXGBE_FWLOG_RING_SIZE_MAX 512
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ * Get FW Log (indirect 0xFF34)
+ * Clear FW Log (indirect 0xFF35)
+ */
+struct ixgbe_aci_cmd_fw_log {
+ u8 cmd_flags;
+#define IXGBE_ACI_FW_LOG_CONF_UART_EN BIT(0)
+#define IXGBE_ACI_FW_LOG_CONF_AQ_EN BIT(1)
+#define IXGBE_ACI_FW_LOG_QUERY_REGISTERED BIT(2)
+#define IXGBE_ACI_FW_LOG_CONF_SET_VALID BIT(3)
+#define IXGBE_ACI_FW_LOG_AQ_REGISTER BIT(0)
+#define IXGBE_ACI_FW_LOG_AQ_QUERY BIT(2)
+#define IXGBE_ACI_FW_LOG_PERSISTENT BIT(0)
+ u8 rsp_flag;
+#define IXGBE_ACI_FW_LOG_MORE_DATA BIT(1)
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+#define IXGBE_ACI_FW_LOG_MIN_RESOLUTION (1)
+#define IXGBE_ACI_FW_LOG_MAX_RESOLUTION (128)
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_fw_log);
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct ixgbe_aci_cmd_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_fw_log_cfg_resp);
+
+/**
+ * struct ixgbe_aq_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the Admin Command Interface
+ * (ACI). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ixgbe_aci_cmd_generic generic;
+ struct ixgbe_aci_cmd_get_ver get_ver;
+ struct ixgbe_aci_cmd_driver_ver driver_ver;
+ struct ixgbe_aci_cmd_get_exp_err exp_err;
+ struct ixgbe_aci_cmd_req_res res_owner;
+ struct ixgbe_aci_cmd_list_caps get_cap;
+ struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+ struct ixgbe_aci_cmd_get_fw_event get_fw_event;
+ struct ixgbe_aci_cmd_get_phy_caps get_phy;
+ struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+ struct ixgbe_aci_cmd_restart_an restart_an;
+ struct ixgbe_aci_cmd_get_link_status get_link_status;
+ struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+ struct ixgbe_aci_cmd_i2c read_write_i2c;
+ struct ixgbe_aci_cmd_read_i2c_resp read_i2c_resp;
+ struct ixgbe_aci_cmd_mdio read_write_mdio;
+ struct ixgbe_aci_cmd_mdio read_mdio;
+ struct ixgbe_aci_cmd_mdio write_mdio;
+ struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
+ struct ixgbe_aci_cmd_gpio_by_func read_write_gpio_by_func;
+ struct ixgbe_aci_cmd_gpio read_write_gpio;
+ struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm prog_topo_dev_nvm;
+ struct ixgbe_aci_cmd_read_topo_dev_nvm read_topo_dev_nvm;
+ struct ixgbe_aci_cmd_nvm nvm;
+ struct ixgbe_aci_cmd_nvm_cfg nvm_cfg;
+ struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ struct ixgbe_aci_cmd_read_write_alt_direct read_write_alt_direct;
+ struct ixgbe_aci_cmd_read_write_alt_indirect read_write_alt_indirect;
+ struct ixgbe_aci_cmd_done_alt_write done_alt_write;
+ struct ixgbe_aci_cmd_clear_port_alt_write clear_port_alt_write;
+ struct ixgbe_aci_cmd_debug_dump_internals debug_dump;
+ struct ixgbe_aci_cmd_set_health_status_config
+ set_health_status_config;
+ struct ixgbe_aci_cmd_get_supported_health_status_codes
+ get_supported_health_status_codes;
+ struct ixgbe_aci_cmd_get_health_status get_health_status;
+ struct ixgbe_aci_cmd_clear_health_status clear_health_status;
+ struct ixgbe_aci_cmd_fw_log fw_log;
+ struct ixgbe_aci_cmd_nvm_sanitization nvm_sanitization;
+ } params;
+};
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u8 topo_media_conflict;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE] of
+ * ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_common_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M 0xF
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M 0xF0
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M \
+ MAKEMASK(0xFF, IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+ /* Support for OROM update in Recovery Mode. */
+ bool orom_recovery_update;
+ bool next_cluster_id_support;
+};
+
+#pragma pack(1)
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+};
+#pragma pack()
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct ixgbe_aci_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+struct ixgbe_aci_info {
+ enum ixgbe_aci_err last_status; /* last status of sent admin command */
+ struct ixgbe_lock lock; /* admin command interface lock */
+};
+
+/* Minimum Security Revision information */
+struct ixgbe_minsrev_info {
+ u32 nvm;
+ u32 orom;
+ u8 nvm_valid : 1;
+ u8 orom_valid : 1;
+};
+
+/* Enumeration of which flash bank is desired to read from, either the active
+ * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
+ * code which just wants to read the active or inactive flash bank.
+ */
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+};
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Pointer to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u32 flash_size; /* Size of available flash in bytes */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#define IXGBE_NVM_CMD_READ 0x0000000B
+#define IXGBE_NVM_CMD_WRITE 0x0000000C
+
+/* NVM Access command */
+struct ixgbe_nvm_access_cmd {
+ u32 command; /* NVM command: READ or WRITE */
+ u32 offset; /* Offset to read/write, in bytes */
+ u32 data_size; /* Size of data field, in bytes */
+};
+
+/* NVM Access data */
+struct ixgbe_nvm_access_data {
+ u32 regval; /* Storage for register value */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index cac3c6b5e5e7..4e48f7f33c9d 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -656,7 +656,8 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550_vf) {
+ if (hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_E610_vf) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 60e66aeaf579..43c3af056b67 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -1151,13 +1151,20 @@ ixl_if_enable_intr(if_ctx_t ctx)
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = vsi->hw;
- struct ixl_rx_queue *que = vsi->rx_queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
ixl_enable_intr0(hw);
/* Enable queue interrupts */
- for (int i = 0; i < vsi->num_rx_queues; i++, que++)
- /* TODO: Queue index parameter is probably wrong */
- ixl_enable_queue(hw, que->rxr.me);
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
+ ixl_enable_queue(hw, rx_que->rxr.me);
+ } else {
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to 0x0 to enable
+ * triggering interrupts by queues.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, 0x0);
+ }
}
/*
@@ -1175,11 +1182,13 @@ ixl_if_disable_intr(if_ctx_t ctx)
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
- ixl_disable_queue(hw, rx_que->msix - 1);
+ ixl_disable_queue(hw, rx_que->rxr.me);
} else {
- // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
- // stops queues from triggering interrupts
- wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to End of List (0x7FF)
+ * to stop queues from triggering interrupts.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, IXL_QUEUE_EOL);
}
}
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
index 6e24395b5577..c45f02cdaf42 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
@@ -1783,8 +1783,8 @@ mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
el->refcount++;
if (el->installed)
return (0);
- }
- el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
+ } else
+ el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
err = mlx5e_add_vxlan_rule_from_db(priv, el);
diff --git a/sys/dev/nctgpio/nctgpio.c b/sys/dev/nctgpio/nctgpio.c
index 75ea1fbdba17..ddc2ceef7dfb 100644
--- a/sys/dev/nctgpio/nctgpio.c
+++ b/sys/dev/nctgpio/nctgpio.c
@@ -1258,13 +1258,14 @@ nct_attach(device_t dev)
GPIO_UNLOCK(sc);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "failed to attach to gpiobus\n");
GPIO_LOCK_DESTROY(sc);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/p2sb/lewisburg_gpio.c b/sys/dev/p2sb/lewisburg_gpio.c
index b45d7767602c..3be777ab9524 100644
--- a/sys/dev/p2sb/lewisburg_gpio.c
+++ b/sys/dev/p2sb/lewisburg_gpio.c
@@ -217,10 +217,11 @@ lbggpio_attach(device_t dev)
}
/* support gpio */
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL)
return (ENXIO);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/psci/smccc_trng.c b/sys/dev/psci/smccc_trng.c
index ab98837d3841..8a2e5508ef48 100644
--- a/sys/dev/psci/smccc_trng.c
+++ b/sys/dev/psci/smccc_trng.c
@@ -58,7 +58,7 @@ static device_attach_t trng_attach;
static unsigned trng_read(void *, unsigned);
-static struct random_source random_trng = {
+static const struct random_source random_trng = {
.rs_ident = "Arm SMCCC TRNG",
.rs_source = RANDOM_PURE_ARM_TRNG,
.rs_read = trng_read,
diff --git a/sys/dev/qcom_rnd/qcom_rnd.c b/sys/dev/qcom_rnd/qcom_rnd.c
index fdd0b553523e..a5ece7e00f28 100644
--- a/sys/dev/qcom_rnd/qcom_rnd.c
+++ b/sys/dev/qcom_rnd/qcom_rnd.c
@@ -63,7 +63,7 @@ static int qcom_rnd_detach(device_t);
static int qcom_rnd_harvest(struct qcom_rnd_softc *, void *, size_t *);
static unsigned qcom_rnd_read(void *, unsigned);
-static struct random_source random_qcom_rnd = {
+static const struct random_source random_qcom_rnd = {
.rs_ident = "Qualcomm Entropy Adapter",
.rs_source = RANDOM_PURE_QUALCOMM,
.rs_read = qcom_rnd_read,
diff --git a/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c b/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c
index 2d390cd449af..50f54b896748 100644
--- a/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c
+++ b/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c
@@ -346,13 +346,14 @@ qcom_tlmm_ipq4018_attach(device_t dev)
fdt_pinctrl_register(dev, NULL);
fdt_pinctrl_configure_by_name(dev, "default");
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "%s: failed to attach bus\n", __func__);
qcom_tlmm_ipq4018_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/random/armv8rng.c b/sys/dev/random/armv8rng.c
index 61698bfff820..524d80317681 100644
--- a/sys/dev/random/armv8rng.c
+++ b/sys/dev/random/armv8rng.c
@@ -44,7 +44,7 @@
static u_int random_rndr_read(void *, u_int);
static bool has_rndr;
-static struct random_source random_armv8_rndr = {
+static const struct random_source random_armv8_rndr = {
.rs_ident = "Armv8 rndr RNG",
.rs_source = RANDOM_PURE_ARMV8,
.rs_read = random_rndr_read,
diff --git a/sys/dev/random/darn.c b/sys/dev/random/darn.c
index a66754e095fb..9bb4991df82f 100644
--- a/sys/dev/random/darn.c
+++ b/sys/dev/random/darn.c
@@ -56,7 +56,7 @@
static u_int random_darn_read(void *, u_int);
-static struct random_source random_darn = {
+static const struct random_source random_darn = {
.rs_ident = "PowerISA DARN random number generator",
.rs_source = RANDOM_PURE_DARN,
.rs_read = random_darn_read
diff --git a/sys/dev/random/ivy.c b/sys/dev/random/ivy.c
index 05474d977276..fa1e4831f1b9 100644
--- a/sys/dev/random/ivy.c
+++ b/sys/dev/random/ivy.c
@@ -51,7 +51,7 @@
static bool has_rdrand, has_rdseed;
static u_int random_ivy_read(void *, u_int);
-static struct random_source random_ivy = {
+static const struct random_source random_ivy = {
.rs_ident = "Intel Secure Key RNG",
.rs_source = RANDOM_PURE_RDRAND,
.rs_read = random_ivy_read
diff --git a/sys/dev/random/nehemiah.c b/sys/dev/random/nehemiah.c
index f76071290b8f..56f144169dae 100644
--- a/sys/dev/random/nehemiah.c
+++ b/sys/dev/random/nehemiah.c
@@ -44,7 +44,7 @@
static u_int random_nehemiah_read(void *, u_int);
-static struct random_source random_nehemiah = {
+static const struct random_source random_nehemiah = {
.rs_ident = "VIA Nehemiah Padlock RNG",
.rs_source = RANDOM_PURE_NEHEMIAH,
.rs_read = random_nehemiah_read
diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c
index c7762967c4fb..84ec174bd08e 100644
--- a/sys/dev/random/random_harvestq.c
+++ b/sys/dev/random/random_harvestq.c
@@ -110,7 +110,7 @@ __read_frequently u_int hc_source_mask;
struct random_sources {
CK_LIST_ENTRY(random_sources) rrs_entries;
- struct random_source *rrs_source;
+ const struct random_source *rrs_source;
};
static CK_LIST_HEAD(sources_head, random_sources) source_list =
@@ -493,9 +493,9 @@ random_healthtest_init(enum random_entropy_source source)
* The RCT limit comes from the formula in section 4.4.1.
*
* The APT cutoff is calculated using the formula in section 4.4.2
- * footnote 10 with the window size changed from 512 to 511, since the
- * test as written counts the number of samples equal to the first
- * sample in the window, and thus tests W-1 samples.
+ * footnote 10 with the number of Bernoulli trials changed from W to
+ * W-1, since the test as written counts the number of samples equal to
+ * the first sample in the window, and thus tests W-1 samples.
*/
ht->ht_rct_limit = 35;
ht->ht_apt_cutoff = 330;
@@ -849,7 +849,7 @@ random_harvest_deregister_source(enum random_entropy_source source)
}
void
-random_source_register(struct random_source *rsource)
+random_source_register(const struct random_source *rsource)
{
struct random_sources *rrs;
@@ -868,7 +868,7 @@ random_source_register(struct random_source *rsource)
}
void
-random_source_deregister(struct random_source *rsource)
+random_source_deregister(const struct random_source *rsource)
{
struct random_sources *rrs = NULL;
diff --git a/sys/dev/random/randomdev.h b/sys/dev/random/randomdev.h
index e1c9ac7b680d..6d742447ea8b 100644
--- a/sys/dev/random/randomdev.h
+++ b/sys/dev/random/randomdev.h
@@ -103,8 +103,8 @@ struct random_source {
random_source_read_t *rs_read;
};
-void random_source_register(struct random_source *);
-void random_source_deregister(struct random_source *);
+void random_source_register(const struct random_source *);
+void random_source_deregister(const struct random_source *);
#endif /* _KERNEL */
diff --git a/sys/dev/rccgpio/rccgpio.c b/sys/dev/rccgpio/rccgpio.c
index b2b775b879ad..dafd0b511fa9 100644
--- a/sys/dev/rccgpio/rccgpio.c
+++ b/sys/dev/rccgpio/rccgpio.c
@@ -308,7 +308,7 @@ rcc_gpio_attach(device_t dev)
RCC_WRITE(sc, RCC_GPIO_GP_LVL, sc->sc_output);
/* Attach the gpiobus. */
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
bus_release_resource(dev, SYS_RES_IOPORT, sc->sc_io_rid,
sc->sc_io_res);
@@ -316,6 +316,7 @@ rcc_gpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/uart/uart_cpu_acpi.c b/sys/dev/uart/uart_cpu_acpi.c
index 7382c47a8db6..da77603f0093 100644
--- a/sys/dev/uart/uart_cpu_acpi.c
+++ b/sys/dev/uart/uart_cpu_acpi.c
@@ -44,23 +44,15 @@
#include <contrib/dev/acpica/include/accommon.h>
#include <contrib/dev/acpica/include/actables.h>
-static struct acpi_uart_compat_data *
+static struct acpi_spcr_compat_data *
uart_cpu_acpi_scan(uint8_t interface_type)
{
- struct acpi_uart_compat_data **cd, *curcd;
+ struct acpi_spcr_compat_data **cd, *curcd;
int i;
- SET_FOREACH(cd, uart_acpi_class_and_device_set) {
+ SET_FOREACH(cd, uart_acpi_spcr_class_set) {
curcd = *cd;
- for (i = 0; curcd[i].cd_hid != NULL; i++) {
- if (curcd[i].cd_port_subtype == interface_type)
- return (&curcd[i]);
- }
- }
-
- SET_FOREACH(cd, uart_acpi_class_set) {
- curcd = *cd;
- for (i = 0; curcd[i].cd_hid != NULL; i++) {
+ for (i = 0; curcd[i].cd_class != NULL; i++) {
if (curcd[i].cd_port_subtype == interface_type)
return (&curcd[i]);
}
@@ -143,7 +135,7 @@ uart_cpu_acpi_spcr(int devtype, struct uart_devinfo *di)
{
vm_paddr_t spcr_physaddr;
ACPI_TABLE_SPCR *spcr;
- struct acpi_uart_compat_data *cd;
+ struct acpi_spcr_compat_data *cd;
struct uart_class *class;
int error = ENXIO;
@@ -237,7 +229,7 @@ uart_cpu_acpi_dbg2(struct uart_devinfo *di)
ACPI_TABLE_DBG2 *dbg2;
ACPI_DBG2_DEVICE *dbg2_dev;
ACPI_GENERIC_ADDRESS *base_address;
- struct acpi_uart_compat_data *cd;
+ struct acpi_spcr_compat_data *cd;
struct uart_class *class;
int error;
bool found;
diff --git a/sys/dev/uart/uart_cpu_acpi.h b/sys/dev/uart/uart_cpu_acpi.h
index 94329e1f1349..218f643c7621 100644
--- a/sys/dev/uart/uart_cpu_acpi.h
+++ b/sys/dev/uart/uart_cpu_acpi.h
@@ -35,11 +35,18 @@
struct uart_class;
+struct acpi_spcr_compat_data {
+ struct uart_class *cd_class;
+ uint16_t cd_port_subtype;
+};
+SET_DECLARE(uart_acpi_spcr_class_set, struct acpi_spcr_compat_data);
+#define UART_ACPI_SPCR_CLASS(data) \
+ DATA_SET(uart_acpi_spcr_class_set, data)
+
struct acpi_uart_compat_data {
const char *cd_hid;
struct uart_class *cd_class;
- uint16_t cd_port_subtype;
int cd_regshft;
int cd_regiowidth;
int cd_rclk;
@@ -56,14 +63,6 @@ SET_DECLARE(uart_acpi_class_and_device_set, struct acpi_uart_compat_data);
#define UART_ACPI_CLASS_AND_DEVICE(data) \
DATA_SET(uart_acpi_class_and_device_set, data)
-/*
- * If your UART driver implements uart_class and custom device layer,
- * then use UART_ACPI_CLASS for its declaration
- */
-SET_DECLARE(uart_acpi_class_set, struct acpi_uart_compat_data);
-#define UART_ACPI_CLASS(data) \
- DATA_SET(uart_acpi_class_set, data)
-
/* Try to initialize UART device from ACPI tables */
int uart_cpu_acpi_setup(int devtype, struct uart_devinfo *di);
diff --git a/sys/dev/uart/uart_dev_ns8250.c b/sys/dev/uart/uart_dev_ns8250.c
index 0f19ede6d9df..c38d50e54ad8 100644
--- a/sys/dev/uart/uart_dev_ns8250.c
+++ b/sys/dev/uart/uart_dev_ns8250.c
@@ -492,24 +492,32 @@ UART_CLASS(uart_ns8250_class);
* XXX -- refactor out ACPI and FDT ifdefs
*/
#ifdef DEV_ACPI
+static struct acpi_spcr_compat_data acpi_spcr_compat_data[] = {
+ { &uart_ns8250_class, ACPI_DBG2_16550_COMPATIBLE },
+ { &uart_ns8250_class, ACPI_DBG2_16550_SUBSET },
+ { &uart_ns8250_class, ACPI_DBG2_16550_WITH_GAS },
+ { NULL, 0 },
+};
+UART_ACPI_SPCR_CLASS(acpi_spcr_compat_data);
+
static struct acpi_uart_compat_data acpi_compat_data[] = {
- {"AMD0020", &uart_ns8250_class, 0, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
- {"AMDI0020", &uart_ns8250_class, 0, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
- {"APMC0D08", &uart_ns8250_class, ACPI_DBG2_16550_COMPATIBLE, 2, 4, 0, 0, "APM compatible UART"},
- {"MRVL0001", &uart_ns8250_class, ACPI_DBG2_16550_SUBSET, 2, 0, 200000000, UART_F_BUSY_DETECT, "Marvell / Synopsys Designware UART"},
- {"SCX0006", &uart_ns8250_class, 0, 2, 0, 62500000, UART_F_BUSY_DETECT, "SynQuacer / Synopsys Designware UART"},
- {"HISI0031", &uart_ns8250_class, 0, 2, 0, 200000000, UART_F_BUSY_DETECT, "HiSilicon / Synopsys Designware UART"},
- {"INTC1006", &uart_ns8250_class, 0, 2, 0, 25000000, 0, "Intel ARM64 UART"},
- {"NXP0018", &uart_ns8250_class, 0, 0, 0, 350000000, UART_F_BUSY_DETECT, "NXP / Synopsys Designware UART"},
- {"PNP0500", &uart_ns8250_class, 0, 0, 0, 0, 0, "Standard PC COM port"},
- {"PNP0501", &uart_ns8250_class, 0, 0, 0, 0, 0, "16550A-compatible COM port"},
- {"PNP0502", &uart_ns8250_class, 0, 0, 0, 0, 0, "Multiport serial device (non-intelligent 16550)"},
- {"PNP0510", &uart_ns8250_class, 0, 0, 0, 0, 0, "Generic IRDA-compatible device"},
- {"PNP0511", &uart_ns8250_class, 0, 0, 0, 0, 0, "Generic IRDA-compatible device"},
- {"WACF004", &uart_ns8250_class, 0, 0, 0, 0, 0, "Wacom Tablet PC Screen"},
- {"WACF00E", &uart_ns8250_class, 0, 0, 0, 0, 0, "Wacom Tablet PC Screen 00e"},
- {"FUJ02E5", &uart_ns8250_class, 0, 0, 0, 0, 0, "Wacom Tablet at FuS Lifebook T"},
- {NULL, NULL, 0, 0 , 0, 0, 0, NULL},
+ {"AMD0020", &uart_ns8250_class, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
+ {"AMDI0020", &uart_ns8250_class, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
+ {"APMC0D08", &uart_ns8250_class, 2, 4, 0, 0, "APM compatible UART"},
+ {"MRVL0001", &uart_ns8250_class, 2, 0, 200000000, UART_F_BUSY_DETECT, "Marvell / Synopsys Designware UART"},
+ {"SCX0006", &uart_ns8250_class, 2, 0, 62500000, UART_F_BUSY_DETECT, "SynQuacer / Synopsys Designware UART"},
+ {"HISI0031", &uart_ns8250_class, 2, 0, 200000000, UART_F_BUSY_DETECT, "HiSilicon / Synopsys Designware UART"},
+ {"INTC1006", &uart_ns8250_class, 2, 0, 25000000, 0, "Intel ARM64 UART"},
+ {"NXP0018", &uart_ns8250_class, 0, 0, 350000000, UART_F_BUSY_DETECT, "NXP / Synopsys Designware UART"},
+ {"PNP0500", &uart_ns8250_class, 0, 0, 0, 0, "Standard PC COM port"},
+ {"PNP0501", &uart_ns8250_class, 0, 0, 0, 0, "16550A-compatible COM port"},
+ {"PNP0502", &uart_ns8250_class, 0, 0, 0, 0, "Multiport serial device (non-intelligent 16550)"},
+ {"PNP0510", &uart_ns8250_class, 0, 0, 0, 0, "Generic IRDA-compatible device"},
+ {"PNP0511", &uart_ns8250_class, 0, 0, 0, 0, "Generic IRDA-compatible device"},
+ {"WACF004", &uart_ns8250_class, 0, 0, 0, 0, "Wacom Tablet PC Screen"},
+ {"WACF00E", &uart_ns8250_class, 0, 0, 0, 0, "Wacom Tablet PC Screen 00e"},
+ {"FUJ02E5", &uart_ns8250_class, 0, 0, 0, 0, "Wacom Tablet at FuS Lifebook T"},
+ {NULL, NULL, 0 , 0, 0, 0, NULL},
};
UART_ACPI_CLASS_AND_DEVICE(acpi_compat_data);
#endif
diff --git a/sys/dev/uart/uart_dev_pl011.c b/sys/dev/uart/uart_dev_pl011.c
index a0d5a5b1c7e2..6afc693cd347 100644
--- a/sys/dev/uart/uart_dev_pl011.c
+++ b/sys/dev/uart/uart_dev_pl011.c
@@ -391,11 +391,19 @@ UART_FDT_CLASS_AND_DEVICE(fdt_compat_data);
#endif
#ifdef DEV_ACPI
+static struct acpi_spcr_compat_data acpi_spcr_compat_data[] = {
+ { &uart_pl011_class, ACPI_DBG2_ARM_PL011 },
+ { &uart_pl011_class, ACPI_DBG2_ARM_SBSA_GENERIC },
+ { &uart_pl011_class, ACPI_DBG2_ARM_SBSA_32BIT },
+ { NULL, 0 },
+};
+UART_ACPI_SPCR_CLASS(acpi_spcr_compat_data);
+
static struct acpi_uart_compat_data acpi_compat_data[] = {
- {"ARMH0011", &uart_pl011_class, ACPI_DBG2_ARM_PL011, 2, 0, 0, 0, "uart pl011"},
- {"ARMHB000", &uart_pl011_class, ACPI_DBG2_ARM_SBSA_GENERIC, 2, 0, 0, 0, "uart pl011"},
- {"ARMHB000", &uart_pl011_class, ACPI_DBG2_ARM_SBSA_32BIT, 2, 0, 0, 0, "uart pl011"},
- {NULL, NULL, 0, 0, 0, 0, 0, NULL},
+ {"ARMH0011", &uart_pl011_class, 2, 0, 0, 0, "uart pl011"},
+ {"ARMHB000", &uart_pl011_class, 2, 0, 0, 0, "uart pl011"},
+ {"ARMHB000", &uart_pl011_class, 2, 0, 0, 0, "uart pl011"},
+ {NULL, NULL, 0, 0, 0, 0, NULL},
};
UART_ACPI_CLASS_AND_DEVICE(acpi_compat_data);
#endif
diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h
index 9f0faaadeb57..b96d82ff836e 100644
--- a/sys/dev/ufshci/ufshci.h
+++ b/sys/dev/ufshci/ufshci.h
@@ -160,19 +160,19 @@ enum ufshci_data_direction {
UFSHCI_DATA_DIRECTION_RESERVED = 0b11,
};
-enum ufshci_overall_command_status {
- UFSHCI_OCS_SUCCESS = 0x0,
- UFSHCI_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
- UFSHCI_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
- UFSHCI_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
- UFSHCI_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
- UFSHCI_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
- UFSHCI_OCS_ABORTED = 0x06,
- UFSHCI_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
- UFSHCI_OCS_DEVICE_FATAL_ERROR = 0x08,
- UFSHCI_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
- UFSHCI_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
- UFSHCI_OCS_INVALID = 0xF,
+enum ufshci_utr_overall_command_status {
+ UFSHCI_UTR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTR_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
+ UFSHCI_UTR_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
+ UFSHCI_UTR_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
+ UFSHCI_UTR_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
+ UFSHCI_UTR_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
+ UFSHCI_UTR_OCS_ABORTED = 0x06,
+ UFSHCI_UTR_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
+ UFSHCI_UTR_OCS_DEVICE_FATAL_ERROR = 0x08,
+ UFSHCI_UTR_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
+ UFSHCI_UTR_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
+ UFSHCI_UTR_OCS_INVALID = 0xF,
};
struct ufshci_utp_xfer_req_desc {
@@ -271,6 +271,18 @@ _Static_assert(sizeof(struct ufshci_utp_cmd_desc) ==
#define UFSHCI_UTP_TASK_MGMT_REQ_SIZE 32
#define UFSHCI_UTP_TASK_MGMT_RESP_SIZE 32
+enum ufshci_utmr_overall_command_status {
+ UFSHCI_UTMR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTMR_OCS_INVALID_TASK_MANAGEMENT_FUNCTION_ATTRIBUTES = 0x01,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_REQUEST_SIZE = 0x02,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_RESPONSE_SIZE = 0x03,
+ UFSHCI_UTMR_OCS_PEER_COMMUNICATION_FAILURE = 0x04,
+ UFSHCI_UTMR_OCS_ABORTED = 0x05,
+ UFSHCI_UTMR_OCS_FATAL_ERROR = 0x06,
+ UFSHCI_UTMR_OCS_DEVICE_FATAL_ERROR = 0x07,
+ UFSHCI_UTMR_OCS_INVALID = 0xF,
+};
+
/* UFSHCI spec 4.1, section 6.3.1 "UTP Task Management Request Descriptor" */
struct ufshci_utp_task_mgmt_req_desc {
/* dword 0 */
@@ -356,6 +368,7 @@ struct ufshci_upiu {
_Static_assert(sizeof(struct ufshci_upiu) == 512,
"ufshci_upiu must be 512 bytes");
+/* UFS Spec 4.1, section 10.7.1 "COMMAND UPIU" */
struct ufshci_cmd_command_upiu {
/* dword 0-2 */
struct ufshci_upiu_header header;
@@ -376,6 +389,7 @@ _Static_assert(sizeof(struct ufshci_cmd_command_upiu) % UFSHCI_UPIU_ALIGNMENT ==
0,
"UPIU requires 64-bit alignment");
+/* UFS Spec 4.1, section 10.7.2 "RESPONSE UPIU" */
struct ufshci_cmd_response_upiu {
/* dword 0-2 */
struct ufshci_upiu_header header;
@@ -403,6 +417,69 @@ _Static_assert(sizeof(struct ufshci_cmd_response_upiu) %
0,
"UPIU requires 64-bit alignment");
+enum task_management_function {
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK = 0x01,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK_SET = 0x02,
+ UFSHCI_TASK_MGMT_FUNCTION_CLEAR_TASK_SET = 0x04,
+ UFSHCI_TASK_MGMT_FUNCTION_LOGICAL_UNIT_RESET = 0x08,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASK = 0x80,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASKSET = 0x81,
+};
+
+/* UFS Spec 4.1, section 10.7.6 "TASK MANAGEMENT REQUEST UPIU" */
+struct ufshci_task_mgmt_request_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t input_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t input_param2; /* (Big-endian) */
+ /* dword 5 */
+ uint32_t input_param3; /* (Big-endian) */
+ /* dword 6-7 */
+ uint8_t reserved[8];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) == 32,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+enum task_management_service_response {
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE = 0x00,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_NOT_SUPPORTED = 0x04,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_FAILED = 0x05,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED = 0x08,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_INCORRECT_LUN = 0x09,
+};
+
+/* UFS Spec 4.1, section 10.7.7 "TASK MANAGEMENT RESPONSE UPIU" */
+struct ufshci_task_mgmt_response_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t output_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t output_param2; /* (Big-endian) */
+ /* dword 5-7 */
+ uint8_t reserved[12];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) == 32,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
/* UFS Spec 4.1, section 10.7.8 "QUERY REQUEST UPIU" */
enum ufshci_query_function {
UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
@@ -554,6 +631,7 @@ union ufshci_reponse_upiu {
struct ufshci_upiu_header header;
struct ufshci_cmd_response_upiu cmd_response_upiu;
struct ufshci_query_response_upiu query_response_upiu;
+ struct ufshci_task_mgmt_response_upiu task_mgmt_response_upiu;
struct ufshci_nop_in_upiu nop_in_upiu;
};
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
index 55d8363d3287..37bd32665b2b 100644
--- a/sys/dev/ufshci/ufshci_ctrlr.c
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -154,12 +154,12 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
/* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
/* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utm_req_queue_construct(ctrlr);
+ error = ufshci_utmr_req_queue_construct(ctrlr);
if (error)
return (error);
/* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_ut_req_queue_construct(ctrlr);
+ error = ufshci_utr_req_queue_construct(ctrlr);
if (error)
return (error);
@@ -179,8 +179,8 @@ ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
/* TODO: Flush In-flight IOs */
/* Release resources */
- ufshci_utm_req_queue_destroy(ctrlr);
- ufshci_ut_req_queue_destroy(ctrlr);
+ ufshci_utmr_req_queue_destroy(ctrlr);
+ ufshci_utr_req_queue_destroy(ctrlr);
if (ctrlr->tag)
bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
@@ -215,8 +215,8 @@ ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, ie, 0);
/* Release resources */
- ufshci_utm_req_queue_destroy(ctrlr);
- ufshci_ut_req_queue_destroy(ctrlr);
+ ufshci_utmr_req_queue_destroy(ctrlr);
+ ufshci_utr_req_queue_destroy(ctrlr);
/* Reset Host Controller */
error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
@@ -232,12 +232,12 @@ ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, ie, ie);
/* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utm_req_queue_construct(ctrlr);
+ error = ufshci_utmr_req_queue_construct(ctrlr);
if (error)
return (error);
/* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_ut_req_queue_construct(ctrlr);
+ error = ufshci_utr_req_queue_construct(ctrlr);
if (error)
return (error);
@@ -245,6 +245,15 @@ ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
}
int
+ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req)
+{
+ return (
+ ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req,
+ /*is_admin*/ false));
+}
+
+int
ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req)
{
@@ -360,8 +369,8 @@ ufshci_ctrlr_start_config_hook(void *arg)
TSENTER();
- if (ufshci_utm_req_queue_enable(ctrlr) == 0 &&
- ufshci_ut_req_queue_enable(ctrlr) == 0)
+ if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
+ ufshci_utr_req_queue_enable(ctrlr) == 0)
ufshci_ctrlr_start(ctrlr);
else
ufshci_ctrlr_fail(ctrlr, false);
@@ -445,9 +454,9 @@ ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
}
/* UTP Task Management Request Completion Status */
if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
- ufshci_printf(ctrlr, "TODO: Implement UTMR completion\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
- /* TODO: Implement UTMR completion */
+ ufshci_req_queue_process_completions(
+ &ctrlr->task_mgmt_req_queue);
}
/* UTP Transfer Request Completion Status */
if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
index ddf28c58fa88..71d163d998af 100644
--- a/sys/dev/ufshci/ufshci_ctrlr_cmd.c
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -8,6 +8,32 @@
#include "ufshci_private.h"
void
+ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid)
+{
+ struct ufshci_request *req;
+ struct ufshci_task_mgmt_request_upiu *upiu;
+
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+
+ req->request_size = sizeof(struct ufshci_task_mgmt_request_upiu);
+ req->response_size = sizeof(struct ufshci_task_mgmt_response_upiu);
+
+ upiu = (struct ufshci_task_mgmt_request_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type =
+ UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST;
+ upiu->header.lun = lun;
+ upiu->header.ext_iid_or_function = function;
+ upiu->input_param1 = lun;
+ upiu->input_param2 = task_tag;
+ upiu->input_param3 = iid;
+
+ ufshci_ctrlr_submit_task_mgmt_request(ctrlr, req);
+}
+
+void
ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn,
void *cb_arg)
{
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
index ac58d44102a0..1a2742ae2e80 100644
--- a/sys/dev/ufshci/ufshci_private.h
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -125,6 +125,8 @@ struct ufshci_qops {
struct ufshci_tracker **tr);
void (*ring_doorbell)(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
+ bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr,
+ uint8_t slot);
void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
bool (*process_cpl)(struct ufshci_req_queue *req_queue);
@@ -143,7 +145,10 @@ struct ufshci_hw_queue {
int domain;
int cpu;
- struct ufshci_utp_xfer_req_desc *utrd;
+ union {
+ struct ufshci_utp_xfer_req_desc *utrd;
+ struct ufshci_utp_task_mgmt_req_desc *utmrd;
+ };
bus_dma_tag_t dma_tag_queue;
bus_dmamap_t queuemem_map;
@@ -333,6 +338,8 @@ int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
void ufshci_ctrlr_start_config_hook(void *arg);
void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
+int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req);
int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req);
int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
@@ -351,6 +358,9 @@ int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
/* Controller Command */
+void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid);
void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr,
ufshci_cb_fn_t cb_fn, void *cb_arg);
void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
@@ -361,12 +371,12 @@ void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
/* Request Queue */
bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
-int ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr);
-int ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr);
-void ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr);
-void ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr);
-int ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr);
-int ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr);
+int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
+int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
struct ufshci_hw_queue *hwq);
int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
@@ -385,9 +395,17 @@ int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
-void ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
-void ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
index cc9a2ddae768..bb6efa6d2ccc 100644
--- a/sys/dev/ufshci/ufshci_req_queue.c
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -19,21 +19,36 @@
static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
-static const struct ufshci_qops sdb_qops = {
+static const struct ufshci_qops sdb_utmr_qops = {
.construct = ufshci_req_sdb_construct,
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
- .ring_doorbell = ufshci_req_sdb_ring_doorbell,
- .clear_cpl_ntf = ufshci_req_sdb_clear_cpl_ntf,
+ .ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf,
+ .process_cpl = ufshci_req_sdb_process_cpl,
+ .get_inflight_io = ufshci_req_sdb_get_inflight_io,
+};
+
+static const struct ufshci_qops sdb_utr_qops = {
+ .construct = ufshci_req_sdb_construct,
+ .destroy = ufshci_req_sdb_destroy,
+ .get_hw_queue = ufshci_req_sdb_get_hw_queue,
+ .enable = ufshci_req_sdb_enable,
+ .reserve_slot = ufshci_req_sdb_reserve_slot,
+ .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
+ .ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf,
.process_cpl = ufshci_req_sdb_process_cpl,
.get_inflight_io = ufshci_req_sdb_get_inflight_io,
};
int
-ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
@@ -44,7 +59,7 @@ ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
*/
req_queue = &ctrlr->task_mgmt_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
- req_queue->qops = sdb_qops;
+ req_queue->qops = sdb_utmr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
/*is_task_mgmt*/ true);
@@ -53,21 +68,21 @@ ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
}
void
-ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
&ctrlr->task_mgmt_req_queue);
}
int
-ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
&ctrlr->task_mgmt_req_queue));
}
int
-ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
@@ -79,7 +94,7 @@ ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
*/
req_queue = &ctrlr->transfer_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
- req_queue->qops = sdb_qops;
+ req_queue->qops = sdb_utr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
/*is_task_mgmt*/ false);
@@ -88,14 +103,14 @@ ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
}
void
-ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->transfer_req_queue.qops.destroy(ctrlr,
&ctrlr->transfer_req_queue);
}
int
-ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
&ctrlr->transfer_req_queue));
@@ -213,20 +228,30 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
struct ufshci_req_queue *req_queue = tr->req_queue;
struct ufshci_request *req = tr->req;
struct ufshci_completion cpl;
- struct ufshci_utp_xfer_req_desc *desc;
uint8_t ocs;
bool retry, error, retriable;
mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
- bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ /* Copy the response from the Request Descriptor or UTP Command
+ * Descriptor. */
+ if (req_queue->is_task_mgmt) {
+ cpl.size = tr->response_size;
+ memcpy(&cpl.response_upiu,
+ (void *)tr->hwq->utmrd[tr->slot_num].response_upiu,
+ cpl.size);
- cpl.size = tr->response_size;
- memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, cpl.size);
+ ocs = tr->hwq->utmrd[tr->slot_num].overall_command_status;
+ } else {
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- desc = &tr->hwq->utrd[tr->slot_num];
- ocs = desc->overall_command_status;
+ cpl.size = tr->response_size;
+ memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
+ cpl.size);
+
+ ocs = tr->hwq->utrd[tr->slot_num].overall_command_status;
+ }
error = ufshci_req_queue_response_is_error(req_queue, ocs,
&cpl.response_upiu);
@@ -358,7 +383,19 @@ ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
}
static void
-ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
+ufshci_req_queue_fill_utmr_descriptor(
+ struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req)
+{
+ memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc));
+ desc->interrupt = true;
+ /* Set the initial value to Invalid. */
+ desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID;
+
+ memcpy(desc->request_upiu, &req->request_upiu, req->request_size);
+}
+
+static void
+ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
const uint16_t response_len, const uint16_t prdt_off,
const uint16_t prdt_entry_cnt)
@@ -378,7 +415,7 @@ ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
desc->data_direction = data_direction;
desc->interrupt = true;
/* Set the initial value to Invalid. */
- desc->overall_command_status = UFSHCI_OCS_INVALID;
+ desc->overall_command_status = UFSHCI_UTR_OCS_INVALID;
desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
0xffffffff);
desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
@@ -407,26 +444,32 @@ ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
/* TODO: Check timeout */
- request_len = req->request_size;
- response_off = UFSHCI_UTP_XFER_REQ_SIZE;
- response_len = req->response_size;
-
- /* Prepare UTP Command Descriptor */
- memcpy(tr->ucd, &req->request_upiu, request_len);
- memset((uint8_t *)tr->ucd + response_off, 0, response_len);
-
- /* Prepare PRDT */
- if (req->payload_valid)
- ufshci_req_queue_prepare_prdt(tr);
-
- bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Prepare UTP Transfer Request Descriptor. */
- ucd_paddr = tr->ucd_bus_addr;
- ufshci_req_queue_fill_descriptor(&tr->hwq->utrd[slot_num],
- data_direction, ucd_paddr, response_off, response_len, tr->prdt_off,
- tr->prdt_entry_cnt);
+ if (req_queue->is_task_mgmt) {
+ /* Prepare UTP Task Management Request Descriptor. */
+ ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num],
+ req);
+ } else {
+ request_len = req->request_size;
+ response_off = UFSHCI_UTP_XFER_REQ_SIZE;
+ response_len = req->response_size;
+
+ /* Prepare UTP Command Descriptor */
+ memcpy(tr->ucd, &req->request_upiu, request_len);
+ memset((uint8_t *)tr->ucd + response_off, 0, response_len);
+
+ /* Prepare PRDT */
+ if (req->payload_valid)
+ ufshci_req_queue_prepare_prdt(tr);
+
+ /* Prepare UTP Transfer Request Descriptor. */
+ ucd_paddr = tr->ucd_bus_addr;
+ ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num],
+ data_direction, ucd_paddr, response_off, response_len,
+ tr->prdt_off, tr->prdt_entry_cnt);
+
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
index b1f303afaef5..834a459d48e3 100644
--- a/sys/dev/ufshci/ufshci_req_sdb.c
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -26,12 +26,6 @@ ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
tr = hwq->act_tr[i];
bus_dmamap_destroy(req_queue->dma_tag_payload,
tr->payload_dma_map);
- free(tr, M_UFSHCI);
- }
-
- if (hwq->act_tr) {
- free(hwq->act_tr, M_UFSHCI);
- hwq->act_tr = NULL;
}
if (req_queue->ucd) {
@@ -76,7 +70,6 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
uint32_t num_entries, struct ufshci_controller *ctrlr)
{
struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
- struct ufshci_tracker *tr;
size_t ucd_allocsz, payload_allocsz;
uint8_t *ucdmem;
int i, error;
@@ -134,27 +127,14 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
goto out;
}
- hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
- req_queue->num_entries,
- M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
-
for (i = 0; i < req_queue->num_trackers; i++) {
- tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
- DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
-
bus_dmamap_create(req_queue->dma_tag_payload, 0,
- &tr->payload_dma_map);
+ &hwq->act_tr[i]->payload_dma_map);
- tr->req_queue = req_queue;
- tr->slot_num = i;
- tr->slot_state = UFSHCI_SLOT_STATE_FREE;
-
- tr->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
- tr->ucd_bus_addr = hwq->ucd_bus_addr[i];
+ hwq->act_tr[i]->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
+ hwq->act_tr[i]->ucd_bus_addr = hwq->ucd_bus_addr[i];
ucdmem += sizeof(struct ufshci_utp_cmd_desc);
-
- hwq->act_tr[i] = tr;
}
return (0);
@@ -163,25 +143,16 @@ out:
return (ENOMEM);
}
-static bool
-ufshci_req_sdb_is_doorbell_cleared(struct ufshci_controller *ctrlr,
- uint8_t slot)
-{
- uint32_t utrldbr;
-
- utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
- return (!(utrldbr & (1 << slot)));
-}
-
int
ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
{
struct ufshci_hw_queue *hwq;
- size_t allocsz;
+ size_t desc_size, alloc_size;
uint64_t queuemem_phys;
uint8_t *queuemem;
- int error;
+ struct ufshci_tracker *tr;
+ int i, error;
req_queue->ctrlr = ctrlr;
req_queue->is_task_mgmt = is_task_mgmt;
@@ -209,10 +180,13 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
* Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
* Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
*/
- allocsz = num_entries * sizeof(struct ufshci_utp_xfer_req_desc);
+ desc_size = is_task_mgmt ?
+ sizeof(struct ufshci_utp_task_mgmt_req_desc) :
+ sizeof(struct ufshci_utp_xfer_req_desc);
+ alloc_size = num_entries * desc_size;
error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
- allocsz, 1, allocsz, 0, NULL, NULL, &hwq->dma_tag_queue);
+ alloc_size, 1, alloc_size, 0, NULL, NULL, &hwq->dma_tag_queue);
if (error != 0) {
ufshci_printf(ctrlr, "request queue tag create failed %d\n",
error);
@@ -227,7 +201,7 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
}
if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
- allocsz, ufshci_single_map, &queuemem_phys, 0) != 0) {
+ alloc_size, ufshci_single_map, &queuemem_phys, 0) != 0) {
ufshci_printf(ctrlr, "failed to load request queue memory\n");
bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
hwq->queuemem_map);
@@ -238,13 +212,30 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
hwq->num_intr_handler_calls = 0;
hwq->num_retries = 0;
hwq->num_failures = 0;
- hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
hwq->req_queue_addr = queuemem_phys;
+ /* Allocate trackers */
+ hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
+ req_queue->num_entries,
+ M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
+ DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ tr->req_queue = req_queue;
+ tr->slot_num = i;
+ tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ hwq->act_tr[i] = tr;
+ }
+
if (is_task_mgmt) {
/* UTP Task Management Request (UTMR) */
uint32_t utmrlba, utmrlbau;
+ hwq->utmrd = (struct ufshci_utp_task_mgmt_req_desc *)queuemem;
+
utmrlba = hwq->req_queue_addr & 0xffffffff;
utmrlbau = hwq->req_queue_addr >> 32;
ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
@@ -253,6 +244,8 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
/* UTP Transfer Request (UTR) */
uint32_t utrlba, utrlbau;
+ hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
+
/*
* Allocate physical memory for the command descriptor.
* UTP Transfer Request (UTR) requires memory for a separate
@@ -284,10 +277,22 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ int i;
if (!req_queue->is_task_mgmt)
ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = hwq->act_tr[i];
+ free(tr, M_UFSHCI);
+ }
+
+ if (hwq->act_tr) {
+ free(hwq->act_tr, M_UFSHCI);
+ hwq->act_tr = NULL;
+ }
+
if (hwq->utrd != NULL) {
bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
@@ -389,7 +394,18 @@ ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
}
void
-ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ /*
+ * NOP
+ * UTP Task Management does not have a Completion Notification
+ * Register.
+ */
+}
+
+void
+ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr)
{
uint32_t utrlcnr;
@@ -399,7 +415,19 @@ ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
}
void
-ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ uint32_t utmrldbr = 0;
+
+ utmrldbr |= 1 << tr->slot_num;
+ ufshci_mmio_write_4(ctrlr, utmrldbr, utmrldbr);
+
+ tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+void
+ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr)
{
uint32_t utrldbr = 0;
@@ -408,9 +436,26 @@ ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+bool
+ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utmrldbr;
+
+ utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
+ return (!(utmrldbr & (1 << slot)));
+}
- // utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
- // printf("DB=0x%08x\n", utrldbr);
+bool
+ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utrldbr;
+
+ utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
+ return (!(utrldbr & (1 << slot)));
}
bool
@@ -435,7 +480,7 @@ ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
* is cleared.
*/
if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
- ufshci_req_sdb_is_doorbell_cleared(req_queue->ctrlr,
+ req_queue->qops.is_doorbell_cleared(req_queue->ctrlr,
slot)) {
ufshci_req_queue_complete_tracker(tr);
done = true;
diff --git a/sys/dev/usb/input/uhid.c b/sys/dev/usb/input/uhid.c
index a31081663f0c..e2b97f5accac 100644
--- a/sys/dev/usb/input/uhid.c
+++ b/sys/dev/usb/input/uhid.c
@@ -40,8 +40,6 @@
* HID spec: http://www.usb.org/developers/devclass_docs/HID1_11.pdf
*/
-#include "opt_hid.h"
-
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
@@ -928,11 +926,7 @@ static device_method_t uhid_methods[] = {
};
static driver_t uhid_driver = {
-#ifdef HIDRAW_MAKE_UHID_ALIAS
- .name = "hidraw",
-#else
.name = "uhid",
-#endif
.methods = uhid_methods,
.size = sizeof(struct uhid_softc),
};
diff --git a/sys/dev/usb/input/usbhid.c b/sys/dev/usb/input/usbhid.c
index df810012b3f8..cba3f34053e5 100644
--- a/sys/dev/usb/input/usbhid.c
+++ b/sys/dev/usb/input/usbhid.c
@@ -114,6 +114,7 @@ struct usbhid_xfer_ctx {
void *cb_ctx;
int waiters;
bool influx;
+ bool no_readahead;
};
struct usbhid_softc {
@@ -272,7 +273,7 @@ usbhid_intr_handler_cb(struct usbhid_xfer_ctx *xfer_ctx)
sc->sc_intr_handler(sc->sc_intr_ctx, xfer_ctx->buf,
xfer_ctx->req.intr.actlen);
- return (0);
+ return (xfer_ctx->no_readahead ? ECANCELED : 0);
}
static int
@@ -430,6 +431,7 @@ usbhid_intr_start(device_t dev, device_t child __unused)
.cb = usbhid_intr_handler_cb,
.cb_ctx = sc,
.buf = sc->sc_intr_buf,
+ .no_readahead = hid_test_quirk(&sc->sc_hw, HQ_NO_READAHEAD),
};
sc->sc_xfer_ctx[POLL_XFER(USBHID_INTR_IN_DT)] = (struct usbhid_xfer_ctx) {
.req.intr.maxlen =
@@ -705,6 +707,10 @@ usbhid_ioctl(device_t dev, device_t child __unused, unsigned long cmd,
if (error == 0)
ucr->ucr_actlen = UGETW(req.ctrl.wLength);
break;
+ case USB_GET_DEVICEINFO:
+ error = usbd_fill_deviceinfo(sc->sc_udev,
+ (struct usb_device_info *)data);
+ break;
default:
error = EINVAL;
}
diff --git a/sys/dev/usb/misc/cp2112.c b/sys/dev/usb/misc/cp2112.c
index d4776ca342cb..201a3ec51ce4 100644
--- a/sys/dev/usb/misc/cp2112.c
+++ b/sys/dev/usb/misc/cp2112.c
@@ -708,11 +708,12 @@ cp2112gpio_attach(device_t dev)
}
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
- device_printf(dev, "gpiobus_attach_bus failed\n");
+ device_printf(dev, "gpiobus_add_bus failed\n");
goto detach;
}
+ bus_attach_children(dev);
return (0);
detach:
diff --git a/sys/dev/usb/net/if_ipheth.c b/sys/dev/usb/net/if_ipheth.c
index f70113c53eb4..cfa800707391 100644
--- a/sys/dev/usb/net/if_ipheth.c
+++ b/sys/dev/usb/net/if_ipheth.c
@@ -55,6 +55,7 @@
#include <net/if_var.h>
#include <dev/usb/usb.h>
+#include <dev/usb/usb_cdc.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
#include "usbdevs.h"
@@ -81,6 +82,9 @@ static uether_fn_t ipheth_start;
static uether_fn_t ipheth_setmulti;
static uether_fn_t ipheth_setpromisc;
+static ipheth_consumer_t ipheth_consume_read;
+static ipheth_consumer_t ipheth_consume_read_ncm;
+
#ifdef USB_DEBUG
static int ipheth_debug = 0;
@@ -96,7 +100,31 @@ static const struct usb_config ipheth_config[IPHETH_N_TRANSFER] = {
.direction = UE_DIR_RX,
.frames = IPHETH_RX_FRAMES_MAX,
.bufsize = (IPHETH_RX_FRAMES_MAX * MCLBYTES),
- .flags = {.short_frames_ok = 1,.short_xfer_ok = 1,.ext_buffer = 1,},
+ .flags = {.short_frames_ok = 1, .short_xfer_ok = 1, .ext_buffer = 1,},
+ .callback = ipheth_bulk_read_callback,
+ .timeout = 0, /* no timeout */
+ },
+
+ [IPHETH_BULK_TX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_TX,
+ .frames = IPHETH_TX_FRAMES_MAX,
+ .bufsize = (IPHETH_TX_FRAMES_MAX * IPHETH_BUF_SIZE),
+ .flags = {.force_short_xfer = 1,},
+ .callback = ipheth_bulk_write_callback,
+ .timeout = IPHETH_TX_TIMEOUT,
+ },
+};
+
+static const struct usb_config ipheth_config_ncm[IPHETH_N_TRANSFER] = {
+ [IPHETH_BULK_RX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_RX,
+ .frames = 1,
+ .bufsize = IPHETH_RX_NCM_BUF_SIZE,
+ .flags = {.short_frames_ok = 1, .short_xfer_ok = 1,},
.callback = ipheth_bulk_read_callback,
.timeout = 0, /* no timeout */
},
@@ -204,6 +232,21 @@ ipheth_get_mac_addr(struct ipheth_softc *sc)
return (0);
}
+static bool
+ipheth_enable_ncm(struct ipheth_softc *sc)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_VENDOR_INTERFACE;
+ req.bRequest = IPHETH_CMD_ENABLE_NCM;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = sc->sc_iface_no;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+
+ return (usbd_do_request(sc->sc_ue.ue_udev, NULL, &req, NULL) == 0);
+}
+
static int
ipheth_probe(device_t dev)
{
@@ -221,6 +264,7 @@ ipheth_attach(device_t dev)
struct ipheth_softc *sc = device_get_softc(dev);
struct usb_ether *ue = &sc->sc_ue;
struct usb_attach_arg *uaa = device_get_ivars(dev);
+ const struct usb_config *config;
int error;
sc->sc_iface_no = uaa->info.bIfaceIndex;
@@ -235,18 +279,29 @@ ipheth_attach(device_t dev)
device_printf(dev, "Cannot set alternate setting\n");
goto detach;
}
- error = usbd_transfer_setup(uaa->device, &sc->sc_iface_no,
- sc->sc_xfer, ipheth_config, IPHETH_N_TRANSFER, sc, &sc->sc_mtx);
- if (error) {
- device_printf(dev, "Cannot setup USB transfers\n");
- goto detach;
- }
+
ue->ue_sc = sc;
ue->ue_dev = dev;
ue->ue_udev = uaa->device;
ue->ue_mtx = &sc->sc_mtx;
ue->ue_methods = &ipheth_ue_methods;
+ if (ipheth_enable_ncm(sc)) {
+ config = ipheth_config_ncm;
+ sc->is_ncm = true;
+ sc->consume = &ipheth_consume_read_ncm;
+ } else {
+ config = ipheth_config;
+ sc->consume = &ipheth_consume_read;
+ }
+
+ error = usbd_transfer_setup(uaa->device, &sc->sc_iface_no, sc->sc_xfer,
+ config, IPHETH_N_TRANSFER, sc, &sc->sc_mtx);
+ if (error) {
+ device_printf(dev, "Cannot setup USB transfers\n");
+ goto detach;
+ }
+
error = ipheth_get_mac_addr(sc);
if (error) {
device_printf(dev, "Cannot get MAC address\n");
@@ -389,12 +444,9 @@ ipheth_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
int actlen;
int aframes;
- usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
-
- DPRINTFN(1, "\n");
-
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
+ usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
DPRINTFN(11, "transfer complete: %u bytes in %u frames\n",
actlen, aframes);
@@ -471,53 +523,40 @@ ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
uint8_t x;
int actlen;
int aframes;
- int len;
-
- usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
-
+ usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
DPRINTF("received %u bytes in %u frames\n", actlen, aframes);
- for (x = 0; x != aframes; x++) {
- m = sc->sc_rx_buf[x];
- sc->sc_rx_buf[x] = NULL;
- len = usbd_xfer_frame_len(xfer, x);
-
- if (len < (int)(sizeof(struct ether_header) +
- IPHETH_RX_ADJ)) {
- m_freem(m);
- continue;
- }
-
- m_adj(m, IPHETH_RX_ADJ);
-
- /* queue up mbuf */
- uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ);
- }
+ for (x = 0; x != aframes; x++)
+ sc->consume(xfer, x);
/* FALLTHROUGH */
case USB_ST_SETUP:
-
- for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) {
- if (sc->sc_rx_buf[x] == NULL) {
- m = uether_newbuf();
- if (m == NULL)
- goto tr_stall;
-
- /* cancel alignment for ethernet */
- m_adj(m, ETHER_ALIGN);
-
- sc->sc_rx_buf[x] = m;
- } else {
- m = sc->sc_rx_buf[x];
+ if (!sc->is_ncm) {
+ for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) {
+ if (sc->sc_rx_buf[x] == NULL) {
+ m = uether_newbuf();
+ if (m == NULL)
+ goto tr_stall;
+
+ /* cancel alignment for ethernet */
+ m_adj(m, ETHER_ALIGN);
+
+ sc->sc_rx_buf[x] = m;
+ } else {
+ m = sc->sc_rx_buf[x];
+ }
+ usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
}
-
- usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
+ usbd_xfer_set_frames(xfer, x);
+ } else {
+ usbd_xfer_set_frame_len(xfer, 0,
+ IPHETH_RX_NCM_BUF_SIZE);
+ usbd_xfer_set_frames(xfer, 1);
}
- /* set number of frames and start hardware */
- usbd_xfer_set_frames(xfer, x);
+
usbd_transfer_submit(xfer);
/* flush any received frames */
uether_rxflush(&sc->sc_ue);
@@ -539,3 +578,86 @@ ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
break;
}
}
+
+static void
+ipheth_consume_read(struct usb_xfer *xfer, int x)
+{
+ struct ipheth_softc *sc = usbd_xfer_softc(xfer);
+ struct mbuf *m = sc->sc_rx_buf[x];
+ int len;
+
+ sc->sc_rx_buf[x] = NULL;
+ len = usbd_xfer_frame_len(xfer, x);
+
+ if (len < (int)(sizeof(struct ether_header) + IPHETH_RX_ADJ)) {
+ m_freem(m);
+ return;
+ }
+
+ m_adj(m, IPHETH_RX_ADJ);
+
+ /* queue up mbuf */
+ uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ);
+}
+
+static void
+ipheth_consume_read_ncm(struct usb_xfer *xfer, int x)
+{
+ struct ipheth_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc = usbd_xfer_get_frame(xfer, 0);
+ struct ncm_data_cache ncm;
+ if_t ifp = uether_getifp(&sc->sc_ue);
+ struct mbuf *new_buf;
+ int i, actlen;
+ uint16_t dp_offset, dp_len;
+
+ usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
+
+ if (actlen < IPHETH_NCM_HEADER_SIZE)
+ return;
+
+ usbd_copy_out(pc, 0, &ncm.hdr, sizeof(ncm.hdr));
+
+ if (UGETDW(ncm.hdr.dwSignature) != 0x484D434E)
+ return;
+
+ /* Dpt follows the hdr on iOS */
+ if (UGETW(ncm.hdr.wDptIndex) != (int)(sizeof(struct usb_ncm16_hdr)))
+ return;
+
+ usbd_copy_out(pc, UGETW(ncm.hdr.wDptIndex), &ncm.dpt, sizeof(ncm.dpt));
+
+ if (UGETDW(ncm.dpt.dwSignature) != 0x304D434E)
+ return;
+
+ usbd_copy_out(pc, UGETW(ncm.hdr.wDptIndex) + sizeof(ncm.dpt), &ncm.dp,
+ sizeof(ncm.dp));
+
+ for (i = 0; i < IPHETH_NCM_DPT_DP_NUM; ++i) {
+ dp_offset = UGETW(ncm.dp[i].wFrameIndex);
+ dp_len = UGETW(ncm.dp[i].wFrameLength);
+
+ /* (3.3.1 USB CDC NCM spec v1.0) */
+ if (dp_offset == 0 && dp_len == 0)
+ break;
+
+ if (dp_offset < IPHETH_NCM_HEADER_SIZE || dp_offset >= actlen ||
+ actlen < (dp_len + dp_offset) ||
+ dp_len < sizeof(struct ether_header)) {
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ continue;
+ }
+ if (dp_len > (MCLBYTES - ETHER_ALIGN)) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ continue;
+ }
+
+ new_buf = uether_newbuf();
+ if (new_buf == NULL) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ continue;
+ }
+ usbd_copy_out(pc, dp_offset, new_buf->m_data, dp_len);
+ uether_rxmbuf(&sc->sc_ue, new_buf, dp_len);
+ }
+}
diff --git a/sys/dev/usb/net/if_iphethvar.h b/sys/dev/usb/net/if_iphethvar.h
index 203bb96b6f22..d637e8f67d01 100644
--- a/sys/dev/usb/net/if_iphethvar.h
+++ b/sys/dev/usb/net/if_iphethvar.h
@@ -41,6 +41,7 @@
#define IPHETH_BUF_SIZE 1514
#define IPHETH_TX_TIMEOUT 5000 /* ms */
+#define IPHETH_RX_NCM_BUF_SIZE 65536
#define IPHETH_RX_FRAMES_MAX 1
#define IPHETH_TX_FRAMES_MAX 8
@@ -55,10 +56,20 @@
#define IPHETH_CTRL_TIMEOUT 5000 /* ms */
#define IPHETH_CMD_GET_MACADDR 0x00
+#define IPHETH_CMD_ENABLE_NCM 0x04
#define IPHETH_CMD_CARRIER_CHECK 0x45
#define IPHETH_CARRIER_ON 0x04
+#define IPHETH_NCM_DPT_DP_NUM 22
+#define IPHETH_NCM_DPT_HEADER_SIZE \
+ (sizeof(struct usb_ncm16_dpt) + \
+ IPHETH_NCM_DPT_DP_NUM * sizeof(struct usb_ncm16_dp))
+#define IPHETH_NCM_HEADER_SIZE \
+ (sizeof(struct usb_ncm16_hdr) + IPHETH_NCM_DPT_HEADER_SIZE)
+
+typedef void (ipheth_consumer_t)(struct usb_xfer *xfer, int idx);
+
enum {
IPHETH_BULK_TX,
IPHETH_BULK_RX,
@@ -76,6 +87,16 @@ struct ipheth_softc {
uint8_t sc_data[IPHETH_CTRL_BUF_SIZE];
uint8_t sc_iface_no;
uint8_t sc_carrier_on;
+
+ bool is_ncm;
+
+ ipheth_consumer_t *consume;
+};
+
+struct ncm_data_cache {
+ struct usb_ncm16_hdr hdr;
+ struct usb_ncm16_dpt dpt;
+ struct usb_ncm16_dp dp[IPHETH_NCM_DPT_DP_NUM];
};
#define IPHETH_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index 60c2d6745b3f..f0989972f49f 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -3111,3 +3111,51 @@ usbd_get_endpoint_mode(struct usb_device *udev, struct usb_endpoint *ep)
{
return (ep->ep_mode);
}
+
+/*------------------------------------------------------------------------*
+ * usbd_fill_deviceinfo
+ *
+ * This function dumps information about an USB device to the
+ * structure pointed to by the "di" argument.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+int
+usbd_fill_deviceinfo(struct usb_device *udev, struct usb_device_info *di)
+{
+ struct usb_device *hub;
+
+ bzero(di, sizeof(di[0]));
+
+ di->udi_bus = device_get_unit(udev->bus->bdev);
+ di->udi_addr = udev->address;
+ di->udi_index = udev->device_index;
+ strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
+ strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
+ strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
+ usb_printbcd(di->udi_release, sizeof(di->udi_release),
+ UGETW(udev->ddesc.bcdDevice));
+ di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
+ di->udi_productNo = UGETW(udev->ddesc.idProduct);
+ di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
+ di->udi_class = udev->ddesc.bDeviceClass;
+ di->udi_subclass = udev->ddesc.bDeviceSubClass;
+ di->udi_protocol = udev->ddesc.bDeviceProtocol;
+ di->udi_config_no = udev->curr_config_no;
+ di->udi_config_index = udev->curr_config_index;
+ di->udi_power = udev->flags.self_powered ? 0 : udev->power;
+ di->udi_speed = udev->speed;
+ di->udi_mode = udev->flags.usb_mode;
+ di->udi_power_mode = udev->power_mode;
+ di->udi_suspended = udev->flags.peer_suspended;
+
+ hub = udev->parent_hub;
+ if (hub) {
+ di->udi_hubaddr = hub->address;
+ di->udi_hubindex = hub->device_index;
+ di->udi_hubport = udev->port_no;
+ }
+ return (0);
+}
diff --git a/sys/dev/usb/usb_generic.c b/sys/dev/usb/usb_generic.c
index c0af27d77e5d..ccb0b2184ec4 100644
--- a/sys/dev/usb/usb_generic.c
+++ b/sys/dev/usb/usb_generic.c
@@ -831,42 +831,7 @@ ugen_get_iface_driver(struct usb_fifo *f, struct usb_gen_descriptor *ugd)
int
ugen_fill_deviceinfo(struct usb_fifo *f, struct usb_device_info *di)
{
- struct usb_device *udev;
- struct usb_device *hub;
-
- udev = f->udev;
-
- bzero(di, sizeof(di[0]));
-
- di->udi_bus = device_get_unit(udev->bus->bdev);
- di->udi_addr = udev->address;
- di->udi_index = udev->device_index;
- strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
- strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
- strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
- usb_printbcd(di->udi_release, sizeof(di->udi_release),
- UGETW(udev->ddesc.bcdDevice));
- di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
- di->udi_productNo = UGETW(udev->ddesc.idProduct);
- di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
- di->udi_class = udev->ddesc.bDeviceClass;
- di->udi_subclass = udev->ddesc.bDeviceSubClass;
- di->udi_protocol = udev->ddesc.bDeviceProtocol;
- di->udi_config_no = udev->curr_config_no;
- di->udi_config_index = udev->curr_config_index;
- di->udi_power = udev->flags.self_powered ? 0 : udev->power;
- di->udi_speed = udev->speed;
- di->udi_mode = udev->flags.usb_mode;
- di->udi_power_mode = udev->power_mode;
- di->udi_suspended = udev->flags.peer_suspended;
-
- hub = udev->parent_hub;
- if (hub) {
- di->udi_hubaddr = hub->address;
- di->udi_hubindex = hub->device_index;
- di->udi_hubport = udev->port_no;
- }
- return (0);
+ return (usbd_fill_deviceinfo(f->udev, di));
}
int
diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h
index 08d130aa2868..0826d9f078c4 100644
--- a/sys/dev/usb/usbdi.h
+++ b/sys/dev/usb/usbdi.h
@@ -38,6 +38,7 @@ struct usb_process;
struct usb_proc_msg;
struct usb_mbuf;
struct usb_fs_privdata;
+struct usb_device_info;
struct mbuf;
typedef enum { /* keep in sync with usb_errstr_table */
@@ -587,6 +588,8 @@ usb_error_t usbd_set_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep, uint8_t ep_mode);
uint8_t usbd_get_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep);
+int usbd_fill_deviceinfo(struct usb_device *udev,
+ struct usb_device_info *di);
const struct usb_device_id *usbd_lookup_id_by_info(
const struct usb_device_id *id, usb_size_t sizeof_id,
diff --git a/sys/dev/virtio/random/virtio_random.c b/sys/dev/virtio/random/virtio_random.c
index f938ba99ae53..3f30c8b68f4c 100644
--- a/sys/dev/virtio/random/virtio_random.c
+++ b/sys/dev/virtio/random/virtio_random.c
@@ -77,7 +77,7 @@ static struct virtio_feature_desc vtrnd_feature_desc[] = {
{ 0, NULL }
};
-static struct random_source random_vtrnd = {
+static const struct random_source random_vtrnd = {
.rs_ident = "VirtIO Entropy Adapter",
.rs_source = RANDOM_PURE_VIRTIO,
.rs_read = vtrnd_read,
diff --git a/sys/dev/watchdog/watchdog.c b/sys/dev/watchdog/watchdog.c
index e6b6dc1eac70..e1b2e08c3f10 100644
--- a/sys/dev/watchdog/watchdog.c
+++ b/sys/dev/watchdog/watchdog.c
@@ -50,11 +50,20 @@
#include <sys/syscallsubr.h> /* kern_clock_gettime() */
-static int wd_set_pretimeout(int newtimeout, int disableiftoolong);
+#ifdef COMPAT_FREEBSD14
+#define WDIOCPATPAT_14 _IOW('W', 42, u_int) /* pat the watchdog */
+#define WDIOC_SETTIMEOUT_14 _IOW('W', 43, int) /* set/reset the timer */
+#define WDIOC_GETTIMEOUT_14 _IOR('W', 44, int) /* get total timeout */
+#define WDIOC_GETTIMELEFT_14 _IOR('W', 45, int) /* get time left */
+#define WDIOC_GETPRETIMEOUT_14 _IOR('W', 46, int) /* get the pre-timeout */
+#define WDIOC_SETPRETIMEOUT_14 _IOW('W', 47, int) /* set the pre-timeout */
+#endif
+
+static int wd_set_pretimeout(sbintime_t newtimeout, int disableiftoolong);
static void wd_timeout_cb(void *arg);
static struct callout wd_pretimeo_handle;
-static int wd_pretimeout;
+static sbintime_t wd_pretimeout;
static int wd_pretimeout_act = WD_SOFT_LOG;
static struct callout wd_softtimeo_handle;
@@ -63,6 +72,8 @@ static int wd_softtimer; /* true = use softtimer instead of hardware
static int wd_softtimeout_act = WD_SOFT_LOG; /* action for the software timeout */
static struct cdev *wd_dev;
+static volatile sbintime_t wd_last_sbt; /* last timeout value (sbt) */
+static sbintime_t wd_last_sbt_sysctl; /* last timeout value (sbt) */
static volatile u_int wd_last_u; /* last timeout value set by kern_do_pat */
static u_int wd_last_u_sysctl; /* last timeout value set by kern_do_pat */
static u_int wd_last_u_sysctl_secs; /* wd_last_u in seconds */
@@ -73,6 +84,8 @@ SYSCTL_UINT(_hw_watchdog, OID_AUTO, wd_last_u, CTLFLAG_RD,
&wd_last_u_sysctl, 0, "Watchdog last update time");
SYSCTL_UINT(_hw_watchdog, OID_AUTO, wd_last_u_secs, CTLFLAG_RD,
&wd_last_u_sysctl_secs, 0, "Watchdog last update time");
+SYSCTL_SBINTIME_MSEC(_hw_watchdog, OID_AUTO, wd_last_msecs, CTLFLAG_RD,
+ &wd_last_sbt_sysctl, "Watchdog last update time (milliseconds)");
static int wd_lastpat_valid = 0;
static time_t wd_lastpat = 0; /* when the watchdog was last patted */
@@ -80,105 +93,94 @@ static time_t wd_lastpat = 0; /* when the watchdog was last patted */
/* Hook for external software watchdog to register for use if needed */
void (*wdog_software_attach)(void);
-static void
-pow2ns_to_ts(int pow2ns, struct timespec *ts)
+/* Legacy interface to watchdog. */
+int
+wdog_kern_pat(u_int utim)
{
- uint64_t ns;
+ sbintime_t sbt;
- ns = 1ULL << pow2ns;
- ts->tv_sec = ns / 1000000000ULL;
- ts->tv_nsec = ns % 1000000000ULL;
-}
+ if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0)
+ return (EINVAL);
-static int
-pow2ns_to_ticks(int pow2ns)
-{
- struct timeval tv;
- struct timespec ts;
+ if ((utim & WD_LASTVAL) != 0) {
+ return (wdog_control(WD_CTRL_RESET));
+ }
- pow2ns_to_ts(pow2ns, &ts);
- TIMESPEC_TO_TIMEVAL(&tv, &ts);
- return (tvtohz(&tv));
+ utim &= WD_INTERVAL;
+ if (utim == WD_TO_NEVER)
+ sbt = 0;
+ else
+ sbt = nstosbt(1 << utim);
+
+ return (wdog_kern_pat_sbt(sbt));
}
-static int
-seconds_to_pow2ns(int seconds)
+int
+wdog_control(int ctrl)
{
- uint64_t power;
- uint64_t ns;
- uint64_t shifted;
-
- ns = ((uint64_t)seconds) * 1000000000ULL;
- power = flsll(ns);
- shifted = 1ULL << power;
- if (shifted <= ns) {
- power++;
+ /* Disable takes precedence */
+ if (ctrl == WD_CTRL_DISABLE) {
+ wdog_kern_pat(0);
}
- return (power);
+
+ if ((ctrl & WD_CTRL_RESET) != 0) {
+ wdog_kern_pat_sbt(wd_last_sbt);
+ } else if ((ctrl & WD_CTRL_ENABLE) != 0) {
+ wdog_kern_pat_sbt(wd_last_sbt);
+ }
+
+ return (0);
}
int
-wdog_kern_pat(u_int utim)
+wdog_kern_pat_sbt(sbintime_t sbt)
{
- int error;
- static int first = 1;
-
- if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0)
- return (EINVAL);
-
- if ((utim & WD_LASTVAL) != 0) {
- /*
- * if WD_LASTVAL is set, fill in the bits for timeout
- * from the saved value in wd_last_u.
- */
- MPASS((wd_last_u & ~WD_INTERVAL) == 0);
- utim &= ~WD_LASTVAL;
- utim |= wd_last_u;
- } else {
- /*
- * Otherwise save the new interval.
- * This can be zero (to disable the watchdog)
- */
- wd_last_u = (utim & WD_INTERVAL);
+ sbintime_t error_sbt = 0;
+ int pow2ns = 0;
+ int error = 0;
+ static bool first = true;
+
+ /* legacy uses power-of-2-nanoseconds time. */
+ if (sbt != 0) {
+ pow2ns = flsl(sbttons(sbt));
+ }
+ if (wd_last_sbt != sbt) {
+ wd_last_u = pow2ns;
wd_last_u_sysctl = wd_last_u;
- wd_last_u_sysctl_secs = pow2ns_to_ticks(wd_last_u) / hz;
+ wd_last_u_sysctl_secs = sbt / SBT_1S;
+
+ wd_last_sbt = sbt;
}
- if ((utim & WD_INTERVAL) == WD_TO_NEVER) {
- utim = 0;
- /* Assume all is well; watchdog signals failure. */
- error = 0;
- } else {
- /* Assume no watchdog available; watchdog flags success */
+ if (sbt != 0)
error = EOPNOTSUPP;
- }
+
if (wd_softtimer) {
- if (utim == 0) {
+ if (sbt == 0) {
callout_stop(&wd_softtimeo_handle);
} else {
- (void) callout_reset(&wd_softtimeo_handle,
- pow2ns_to_ticks(utim), wd_timeout_cb, "soft");
+ (void) callout_reset_sbt(&wd_softtimeo_handle,
+ sbt, 0, wd_timeout_cb, "soft", 0);
}
error = 0;
} else {
- EVENTHANDLER_INVOKE(watchdog_list, utim, &error);
+ EVENTHANDLER_INVOKE(watchdog_sbt_list, sbt, &error_sbt, &error);
+ EVENTHANDLER_INVOKE(watchdog_list, pow2ns, &error);
}
/*
- * If we no hardware watchdog responded, we have not tried to
+ * If no hardware watchdog responded, we have not tried to
* attach an external software watchdog, and one is available,
* attach it now and retry.
*/
- if (error == EOPNOTSUPP && first && *wdog_software_attach != NULL) {
+ if (error == EOPNOTSUPP && first && wdog_software_attach != NULL) {
(*wdog_software_attach)();
- EVENTHANDLER_INVOKE(watchdog_list, utim, &error);
+ EVENTHANDLER_INVOKE(watchdog_sbt_list, sbt, &error_sbt, &error);
+ EVENTHANDLER_INVOKE(watchdog_list, pow2ns, &error);
}
- first = 0;
+ first = false;
+ /* TODO: Print a (rate limited?) warning if error_sbt is too far away */
wd_set_pretimeout(wd_pretimeout, true);
- /*
- * If we were able to arm/strobe the watchdog, then
- * update the last time it was strobed for WDIOC_GETTIMELEFT
- */
if (!error) {
struct timespec ts;
@@ -189,6 +191,7 @@ wdog_kern_pat(u_int utim)
wd_lastpat_valid = 1;
}
}
+
return (error);
}
@@ -265,16 +268,14 @@ wd_timeout_cb(void *arg)
* current actual watchdog timeout.
*/
static int
-wd_set_pretimeout(int newtimeout, int disableiftoolong)
+wd_set_pretimeout(sbintime_t newtimeout, int disableiftoolong)
{
- u_int utime;
- struct timespec utime_ts;
- int timeout_ticks;
+ sbintime_t utime;
+ sbintime_t timeout_left;
- utime = wdog_kern_last_timeout();
- pow2ns_to_ts(utime, &utime_ts);
+ utime = wdog_kern_last_timeout_sbt();
/* do not permit a pre-timeout >= than the timeout. */
- if (newtimeout >= utime_ts.tv_sec) {
+ if (newtimeout >= utime) {
/*
* If 'disableiftoolong' then just fall through
* so as to disable the pre-watchdog
@@ -292,7 +293,7 @@ wd_set_pretimeout(int newtimeout, int disableiftoolong)
return 0;
}
- timeout_ticks = pow2ns_to_ticks(utime) - (hz*newtimeout);
+ timeout_left = utime - newtimeout;
#if 0
printf("wd_set_pretimeout: "
"newtimeout: %d, "
@@ -306,8 +307,8 @@ wd_set_pretimeout(int newtimeout, int disableiftoolong)
#endif
/* We determined the value is sane, so reset the callout */
- (void) callout_reset(&wd_pretimeo_handle,
- timeout_ticks, wd_timeout_cb, "pre");
+ (void) callout_reset_sbt(&wd_pretimeo_handle,
+ timeout_left, 0, wd_timeout_cb, "pre", 0);
wd_pretimeout = newtimeout;
return 0;
}
@@ -316,6 +317,7 @@ static int
wd_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
int flags __unused, struct thread *td)
{
+ sbintime_t sb;
u_int u;
time_t timeleft;
int error;
@@ -351,29 +353,55 @@ wd_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
error = EINVAL;
}
break;
- case WDIOC_GETPRETIMEOUT:
- *(int *)data = (int)wd_pretimeout;
+#ifdef COMPAT_FREEBSD14
+ case WDIOC_GETPRETIMEOUT_14:
+ *(int *)data = (int)(wd_pretimeout / SBT_1S);
break;
- case WDIOC_SETPRETIMEOUT:
- error = wd_set_pretimeout(*(int *)data, false);
+ case WDIOC_SETPRETIMEOUT_14:
+ error = wd_set_pretimeout(*(int *)data * SBT_1S, false);
break;
- case WDIOC_GETTIMELEFT:
+ case WDIOC_GETTIMELEFT_14:
error = wd_get_time_left(td, &timeleft);
if (error)
break;
*(int *)data = (int)timeleft;
break;
- case WDIOC_SETTIMEOUT:
+ case WDIOC_SETTIMEOUT_14:
u = *(u_int *)data;
- error = wdog_kern_pat(seconds_to_pow2ns(u));
+ error = wdog_kern_pat_sbt(mstosbt(u * 1000ULL));
break;
- case WDIOC_GETTIMEOUT:
+ case WDIOC_GETTIMEOUT_14:
u = wdog_kern_last_timeout();
*(u_int *)data = u;
break;
- case WDIOCPATPAT:
+ case WDIOCPATPAT_14:
error = wd_ioctl_patpat(data);
break;
+#endif
+
+ /* New API */
+ case WDIOC_CONTROL:
+ wdog_control(*(int *)data);
+ break;
+ case WDIOC_SETTIMEOUT:
+ sb = *(sbintime_t *)data;
+ error = wdog_kern_pat_sbt(sb);
+ break;
+ case WDIOC_GETTIMEOUT:
+ *(sbintime_t *)data = wdog_kern_last_timeout_sbt();
+ break;
+ case WDIOC_GETTIMELEFT:
+ error = wd_get_time_left(td, &timeleft);
+ if (error)
+ break;
+ *(sbintime_t *)data = (sbintime_t)timeleft * SBT_1S;
+ break;
+ case WDIOC_GETPRETIMEOUT:
+ *(sbintime_t *)data = wd_pretimeout;
+ break;
+ case WDIOC_SETPRETIMEOUT:
+ error = wd_set_pretimeout(*(sbintime_t *)data, false);
+ break;
default:
error = ENOIOCTL;
break;
@@ -392,6 +420,12 @@ wdog_kern_last_timeout(void)
return (wd_last_u);
}
+sbintime_t
+wdog_kern_last_timeout_sbt(void)
+{
+ return (wd_last_sbt);
+}
+
static struct cdevsw wd_cdevsw = {
.d_version = D_VERSION,
.d_ioctl = wd_ioctl,