aboutsummaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/acpica/acpi_powerres.c274
-rw-r--r--sys/dev/acpica/acpivar.h1
-rw-r--r--sys/dev/amdgpio/amdgpio.c136
-rw-r--r--sys/dev/amdgpio/amdgpio.h9
-rw-r--r--sys/dev/ath/ath_rate/sample/sample.c8
-rw-r--r--sys/dev/ath/if_ath_tx_ht.c6
-rw-r--r--sys/dev/bce/if_bce.c2
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_res.c4
-rw-r--r--sys/dev/cxgbe/t4_main.c2
-rw-r--r--sys/dev/cyapa/cyapa.c95
-rw-r--r--sys/dev/e1000/e1000_phy.c5
-rw-r--r--sys/dev/e1000/if_em.c44
-rw-r--r--sys/dev/fdt/fdt_common.c2
-rw-r--r--sys/dev/fdt/fdt_common.h7
-rw-r--r--sys/dev/gpio/acpi_gpiobus.c165
-rw-r--r--sys/dev/gpio/acpi_gpiobusvar.h6
-rw-r--r--sys/dev/gpio/gpio_if.m26
-rw-r--r--sys/dev/gpio/gpioaei.c204
-rw-r--r--sys/dev/gpio/gpiobus.c120
-rw-r--r--sys/dev/gpio/gpiobus_if.m30
-rw-r--r--sys/dev/gpio/gpiobus_internal.h3
-rw-r--r--sys/dev/gpio/gpioc.c157
-rw-r--r--sys/dev/gpio/gpioled.c108
-rw-r--r--sys/dev/gpio/ofw_gpiobus.c17
-rw-r--r--sys/dev/hid/hidbus.c41
-rw-r--r--sys/dev/hid/hidquirk.h1
-rw-r--r--sys/dev/hid/hidraw.c12
-rw-r--r--sys/dev/hid/hkbd.c19
-rw-r--r--sys/dev/hid/ietp.c31
-rw-r--r--sys/dev/hid/u2f.c590
-rw-r--r--sys/dev/hpt27xx/hptintf.h6
-rw-r--r--sys/dev/ice/ice_bitops.h4
-rw-r--r--sys/dev/ice/ice_fw_logging.c2
-rw-r--r--sys/dev/ice/ice_lan_tx_rx.h2
-rw-r--r--sys/dev/ice/ice_lib.h2
-rw-r--r--sys/dev/ice/ice_protocol_type.h2
-rw-r--r--sys/dev/ichwd/i6300esbwd.c245
-rw-r--r--sys/dev/ichwd/i6300esbwd.h46
-rw-r--r--sys/dev/ichwd/ichwd.c2
-rw-r--r--sys/dev/ichwd/ichwd.h3
-rw-r--r--sys/dev/iicbus/gpio/pcf8574.c4
-rw-r--r--sys/dev/iicbus/gpio/tca64xx.c4
-rw-r--r--sys/dev/iicbus/iichid.c3
-rw-r--r--sys/dev/irdma/irdma_cm.c2
-rw-r--r--sys/dev/irdma/irdma_utils.c4
-rw-r--r--sys/dev/isci/scil/intel_sata.h2
-rw-r--r--sys/dev/iwx/if_iwx.c7
-rw-r--r--sys/dev/ixgbe/if_ix.c231
-rw-r--r--sys/dev/ixgbe/if_ixv.c6
-rw-r--r--sys/dev/ixgbe/ixgbe.h11
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c16
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c25
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.c5567
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.h224
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.c26
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h31
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h69
-rw-r--r--sys/dev/ixgbe/ixgbe_type_e610.h2278
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c3
-rw-r--r--sys/dev/ixl/if_ixl.c25
-rw-r--r--sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c2
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c4
-rw-r--r--sys/dev/mpr/mpr.c10
-rw-r--r--sys/dev/mpr/mpr_mapping.c18
-rw-r--r--sys/dev/mpr/mprvar.h1
-rw-r--r--sys/dev/mwl/if_mwl.c4
-rw-r--r--sys/dev/netmap/if_ptnet.c6
-rw-r--r--sys/dev/nvme/nvme.c1
-rw-r--r--sys/dev/nvme/nvme_ahci.c1
-rw-r--r--sys/dev/nvme/nvme_ctrlr.c109
-rw-r--r--sys/dev/nvme/nvme_ctrlr_cmd.c3
-rw-r--r--sys/dev/nvme/nvme_ns.c3
-rw-r--r--sys/dev/nvme/nvme_pci.c1
-rw-r--r--sys/dev/nvme/nvme_private.h3
-rw-r--r--sys/dev/nvme/nvme_qpair.c3
-rw-r--r--sys/dev/nvme/nvme_sim.c1
-rw-r--r--sys/dev/nvme/nvme_sysctl.c1
-rw-r--r--sys/dev/nvme/nvme_util.c23
-rw-r--r--sys/dev/nvmf/nvmf_tcp.c2
-rw-r--r--sys/dev/pci/pci_user.c121
-rw-r--r--sys/dev/psci/smccc_trng.c2
-rw-r--r--sys/dev/puc/pucdata.c43
-rw-r--r--sys/dev/qat/qat_common/adf_gen4_timer.c2
-rw-r--r--sys/dev/qcom_rnd/qcom_rnd.c2
-rw-r--r--sys/dev/random/armv8rng.c2
-rw-r--r--sys/dev/random/darn.c2
-rw-r--r--sys/dev/random/ivy.c2
-rw-r--r--sys/dev/random/nehemiah.c2
-rw-r--r--sys/dev/random/random_harvestq.c12
-rw-r--r--sys/dev/random/randomdev.h4
-rw-r--r--sys/dev/rtwn/if_rtwn.c3
-rw-r--r--sys/dev/rtwn/if_rtwn_tx.c8
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_tx.c11
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_tx.c16
-rw-r--r--sys/dev/sound/pci/hda/hdac.c10
-rw-r--r--sys/dev/tpm/tpm_tis_core.c7
-rw-r--r--sys/dev/uart/uart_bus_pci.c2
-rw-r--r--sys/dev/uart/uart_dev_ns8250.c1
-rw-r--r--sys/dev/ufshci/ufshci.h173
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr.c74
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr_cmd.c26
-rw-r--r--sys/dev/ufshci/ufshci_dev.c355
-rw-r--r--sys/dev/ufshci/ufshci_pci.c3
-rw-r--r--sys/dev/ufshci/ufshci_private.h51
-rw-r--r--sys/dev/ufshci/ufshci_reg.h2
-rw-r--r--sys/dev/ufshci/ufshci_req_queue.c123
-rw-r--r--sys/dev/ufshci/ufshci_req_sdb.c133
-rw-r--r--sys/dev/ufshci/ufshci_sysctl.c20
-rw-r--r--sys/dev/ufshci/ufshci_uic_cmd.c19
-rw-r--r--sys/dev/usb/controller/xhci.c85
-rw-r--r--sys/dev/usb/controller/xhcireg.h5
-rw-r--r--sys/dev/usb/input/uhid.c6
-rw-r--r--sys/dev/usb/input/usbhid.c8
-rw-r--r--sys/dev/usb/net/if_ipheth.c218
-rw-r--r--sys/dev/usb/net/if_iphethvar.h21
-rw-r--r--sys/dev/usb/net/if_umb.c6
-rw-r--r--sys/dev/usb/usb_device.c48
-rw-r--r--sys/dev/usb/usb_generic.c37
-rw-r--r--sys/dev/usb/usb_hub.c3
-rw-r--r--sys/dev/usb/usbdi.h3
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio.c48
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio.h1
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio_fdt.c47
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio_if.m99
-rw-r--r--sys/dev/virtio/network/if_vtnet.c449
-rw-r--r--sys/dev/virtio/network/if_vtnetvar.h2
-rw-r--r--sys/dev/virtio/random/virtio_random.c2
-rw-r--r--sys/dev/virtio/virtio_bus_if.m4
-rw-r--r--sys/dev/virtio/virtqueue.c2
-rw-r--r--sys/dev/watchdog/watchdog.c212
131 files changed, 12428 insertions, 1283 deletions
diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c
index 29d1690f1bdd..0a8b67a5fa84 100644
--- a/sys/dev/acpica/acpi_powerres.c
+++ b/sys/dev/acpica/acpi_powerres.c
@@ -76,6 +76,13 @@ struct acpi_powerconsumer {
/* Device which is powered */
ACPI_HANDLE ac_consumer;
int ac_state;
+
+ struct {
+ bool prx_has;
+ size_t prx_count;
+ ACPI_HANDLE *prx_deps;
+ } ac_prx[ACPI_D_STATE_COUNT];
+
TAILQ_ENTRY(acpi_powerconsumer) ac_link;
TAILQ_HEAD(,acpi_powerreference) ac_references;
};
@@ -96,9 +103,7 @@ static TAILQ_HEAD(acpi_powerconsumer_list, acpi_powerconsumer)
ACPI_SERIAL_DECL(powerres, "ACPI power resources");
static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer);
-#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer);
-#endif /* notyet */
static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res);
#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res);
@@ -112,6 +117,8 @@ static struct acpi_powerresource
*acpi_pwr_find_resource(ACPI_HANDLE res);
static struct acpi_powerconsumer
*acpi_pwr_find_consumer(ACPI_HANDLE consumer);
+static ACPI_STATUS acpi_pwr_infer_state(struct acpi_powerconsumer *pc);
+static ACPI_STATUS acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state);
/*
* Register a power resource.
@@ -222,6 +229,84 @@ acpi_pwr_deregister_resource(ACPI_HANDLE res)
#endif /* notyet */
/*
+ * Evaluate the _PRx (power resources each D-state depends on). This also
+ * populates the acpi_powerresources queue with the power resources discovered
+ * during this step.
+ *
+ * ACPI 7.3.8 - 7.3.11 guarantee that _PRx will return the same data each
+ * time they are evaluated.
+ *
+ * If this function fails, acpi_pwr_deregister_consumer() must be called on the
+ * power consumer to free already allocated memory.
+ */
+static ACPI_STATUS
+acpi_pwr_get_power_resources(ACPI_HANDLE consumer, struct acpi_powerconsumer *pc)
+{
+ ACPI_INTEGER status;
+ ACPI_STRING reslist_name;
+ ACPI_HANDLE reslist_handle;
+ ACPI_STRING reslist_names[] = {"_PR0", "_PR1", "_PR2", "_PR3"};
+ ACPI_BUFFER reslist;
+ ACPI_OBJECT *reslist_object;
+ ACPI_OBJECT *dep;
+ ACPI_HANDLE *res;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ MPASS(consumer != NULL);
+
+ for (int state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
+ pc->ac_prx[state].prx_has = false;
+ pc->ac_prx[state].prx_count = 0;
+ pc->ac_prx[state].prx_deps = NULL;
+
+ reslist_name = reslist_names[state - ACPI_STATE_D0];
+ if (ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle)))
+ continue;
+
+ reslist.Pointer = NULL;
+ reslist.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObjectTyped(reslist_handle, NULL, NULL, &reslist,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status) || reslist.Pointer == NULL)
+ /*
+ * ACPI_ALLOCATE_BUFFER entails everything will be freed on error
+ * by AcpiEvaluateObjectTyped.
+ */
+ continue;
+
+ reslist_object = (ACPI_OBJECT *)reslist.Pointer;
+ pc->ac_prx[state].prx_has = true;
+ pc->ac_prx[state].prx_count = reslist_object->Package.Count;
+
+ if (reslist_object->Package.Count == 0) {
+ AcpiOsFree(reslist_object);
+ continue;
+ }
+
+ pc->ac_prx[state].prx_deps = mallocarray(pc->ac_prx[state].prx_count,
+ sizeof(*pc->ac_prx[state].prx_deps), M_ACPIPWR, M_NOWAIT);
+ if (pc->ac_prx[state].prx_deps == NULL) {
+ AcpiOsFree(reslist_object);
+ return_ACPI_STATUS (AE_NO_MEMORY);
+ }
+
+ for (size_t i = 0; i < reslist_object->Package.Count; i++) {
+ dep = &reslist_object->Package.Elements[i];
+ res = dep->Reference.Handle;
+ pc->ac_prx[state].prx_deps[i] = res;
+
+ /* It's fine to attempt to register the same resource twice. */
+ acpi_pwr_register_resource(res);
+ }
+ AcpiOsFree(reslist_object);
+ }
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+/*
* Register a power consumer.
*
* It's OK to call this if we already know about the consumer.
@@ -229,6 +314,7 @@ acpi_pwr_deregister_resource(ACPI_HANDLE res)
static ACPI_STATUS
acpi_pwr_register_consumer(ACPI_HANDLE consumer)
{
+ ACPI_INTEGER status;
struct acpi_powerconsumer *pc;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -239,14 +325,30 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
/* Allocate a new power consumer */
- if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT)) == NULL)
+ if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL)
return_ACPI_STATUS (AE_NO_MEMORY);
TAILQ_INSERT_HEAD(&acpi_powerconsumers, pc, ac_link);
TAILQ_INIT(&pc->ac_references);
pc->ac_consumer = consumer;
- /* XXX we should try to find its current state */
- pc->ac_state = ACPI_STATE_UNKNOWN;
+ /*
+ * Get all its power resource dependencies, if it has _PRx. We do this now
+ * as an opportunity to populate the acpi_powerresources queue.
+ *
+ * If this fails, immediately deregister it.
+ */
+ status = acpi_pwr_get_power_resources(consumer, pc);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "failed to get power resources for %s\n",
+ acpi_name(consumer)));
+ acpi_pwr_deregister_consumer(consumer);
+ return_ACPI_STATUS (status);
+ }
+
+ /* Find its initial state. */
+ if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &pc->ac_state)))
+ pc->ac_state = ACPI_STATE_UNKNOWN;
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power consumer %s\n",
acpi_name(consumer)));
@@ -254,7 +356,6 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
-#ifdef notyet
/*
* Deregister a power consumer.
*
@@ -279,6 +380,9 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
/* Pull the consumer off the list and free it */
TAILQ_REMOVE(&acpi_powerconsumers, pc, ac_link);
+ for (size_t i = 0; i < sizeof(pc->ac_prx) / sizeof(*pc->ac_prx); i++)
+ if (pc->ac_prx[i].prx_deps != NULL)
+ free(pc->ac_prx[i].prx_deps, M_ACPIPWR);
free(pc, M_ACPIPWR);
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power consumer %s\n",
@@ -286,10 +390,139 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
-#endif /* notyet */
/*
- * Set a power consumer to a particular power state.
+ * The _PSC control method isn't required if it's possible to infer the D-state
+ * from the _PRx control methods. (See 7.3.6.)
+ * We can infer that a given D-state has been achieved when all the dependencies
+ * are in the ON state.
+ */
+static ACPI_STATUS
+acpi_pwr_infer_state(struct acpi_powerconsumer *pc)
+{
+ ACPI_HANDLE *res;
+ uint32_t on;
+ bool all_on = false;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ /* It is important we go from the hottest to the coldest state. */
+ for (
+ pc->ac_state = ACPI_STATE_D0;
+ pc->ac_state <= ACPI_STATE_D3_HOT && !all_on;
+ pc->ac_state++
+ ) {
+ MPASS(pc->ac_state <= sizeof(pc->ac_prx) / sizeof(*pc->ac_prx));
+
+ if (!pc->ac_prx[pc->ac_state].prx_has)
+ continue;
+
+ all_on = true;
+
+ for (size_t i = 0; i < pc->ac_prx[pc->ac_state].prx_count; i++) {
+ res = pc->ac_prx[pc->ac_state].prx_deps[i];
+ /* If failure, better to assume D-state is hotter than colder. */
+ if (ACPI_FAILURE(acpi_GetInteger(res, "_STA", &on)))
+ continue;
+ if (on == 0) {
+ all_on = false;
+ break;
+ }
+ }
+ }
+
+ MPASS(pc->ac_state != ACPI_STATE_D0);
+
+ /*
+ * If none of the power resources required for the shallower D-states are
+ * on, then we can assume it is unpowered (i.e. D3cold). A device is not
+ * required to support D3cold however; in that case, _PR3 is not explicitly
+ * provided. Those devices should default to D3hot instead.
+ *
+ * See comments of first row of table 7.1 in ACPI spec.
+ */
+ if (!all_on)
+ pc->ac_state = pc->ac_prx[ACPI_STATE_D3_HOT].prx_has ?
+ ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT;
+ else
+ pc->ac_state--;
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+static ACPI_STATUS
+acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state)
+{
+ struct acpi_powerconsumer *pc;
+ ACPI_HANDLE method_handle;
+ ACPI_STATUS status;
+ ACPI_BUFFER result;
+ ACPI_OBJECT *object = NULL;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ if (consumer == NULL)
+ return_ACPI_STATUS (AE_NOT_FOUND);
+
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) {
+ if (ACPI_FAILURE(status = acpi_pwr_register_consumer(consumer)))
+ goto out;
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL)
+ panic("acpi added power consumer but can't find it");
+ }
+
+ status = AcpiGetHandle(consumer, "_PSC", &method_handle);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "no _PSC object - %s\n",
+ AcpiFormatException(status)));
+ status = acpi_pwr_infer_state(pc);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "couldn't infer D-state - %s\n",
+ AcpiFormatException(status)));
+ pc->ac_state = ACPI_STATE_UNKNOWN;
+ }
+ goto out;
+ }
+
+ result.Pointer = NULL;
+ result.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObjectTyped(method_handle, NULL, NULL, &result, ACPI_TYPE_INTEGER);
+ if (ACPI_FAILURE(status) || result.Pointer == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to get state with _PSC - %s\n",
+ AcpiFormatException(status)));
+ pc->ac_state = ACPI_STATE_UNKNOWN;
+ goto out;
+ }
+
+ object = (ACPI_OBJECT *)result.Pointer;
+ pc->ac_state = ACPI_STATE_D0 + object->Integer.Value;
+ status = AE_OK;
+
+out:
+ if (object != NULL)
+ AcpiOsFree(object);
+ *state = pc->ac_state;
+ return_ACPI_STATUS (status);
+}
+
+/*
+ * Get a power consumer's D-state.
+ */
+ACPI_STATUS
+acpi_pwr_get_state(ACPI_HANDLE consumer, int *state)
+{
+ ACPI_STATUS res;
+
+ ACPI_SERIAL_BEGIN(powerres);
+ res = acpi_pwr_get_state_locked(consumer, state);
+ ACPI_SERIAL_END(powerres);
+ return (res);
+}
+
+/*
+ * Set a power consumer to a particular D-state.
*/
ACPI_STATUS
acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
@@ -300,6 +533,7 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
ACPI_OBJECT *reslist_object;
ACPI_STATUS status;
char *method_name, *reslist_name = NULL;
+ int new_state;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -501,8 +735,28 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
}
}
- /* Transition was successful */
- pc->ac_state = state;
+ /*
+ * Make sure the transition succeeded. If getting new state failed,
+ * just assume the new state is what we wanted. This was the behaviour
+ * before we were checking D-states.
+ */
+ if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &new_state))) {
+ printf("%s: failed to get new D-state\n", __func__);
+ pc->ac_state = state;
+ } else {
+ if (new_state != state)
+ printf("%s: new power state %s is not the one requested %s\n",
+ __func__, acpi_d_state_to_str(new_state),
+ acpi_d_state_to_str(state));
+ pc->ac_state = new_state;
+ }
+
+ /*
+ * We consider the transition successful even if the state we got doesn't
+ * reflect what we set it to. This is because we weren't previously
+ * checking the new state at all, so there might exist buggy platforms on
+ * which suspend would otherwise succeed if we failed here.
+ */
status = AE_OK;
out:
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 6887f080311d..7495a010432b 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -490,6 +490,7 @@ EVENTHANDLER_DECLARE(acpi_video_event, acpi_event_handler_t);
/* Device power control. */
ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable);
+ACPI_STATUS acpi_pwr_get_state(ACPI_HANDLE consumer, int *state);
ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state);
acpi_pwr_for_sleep_t acpi_device_pwr_for_sleep;
int acpi_set_powerstate(device_t child, int state);
diff --git a/sys/dev/amdgpio/amdgpio.c b/sys/dev/amdgpio/amdgpio.c
index 2bd455c612b8..20589ff71b0b 100644
--- a/sys/dev/amdgpio/amdgpio.c
+++ b/sys/dev/amdgpio/amdgpio.c
@@ -3,6 +3,10 @@
*
* Copyright (c) 2018 Advanced Micro Devices
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Aymeric Wibo
+ * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -51,11 +55,11 @@
#include <dev/acpica/acpivar.h>
#include <dev/gpio/gpiobusvar.h>
-#include "gpio_if.h"
#include "amdgpio.h"
static struct resource_spec amdgpio_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
@@ -196,7 +200,7 @@ static int
amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
{
struct amdgpio_softc *sc;
- uint32_t reg, val, allowed;
+ uint32_t reg, val;
sc = device_get_softc(dev);
@@ -204,18 +208,19 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
if (!amdgpio_valid_pin(sc, pin))
return (EINVAL);
- allowed = GPIO_PIN_INPUT | GPIO_PIN_OUTPUT;
+ if ((flags & ~AMDGPIO_DEFAULT_CAPS) != 0) {
+ device_printf(dev, "disallowed flags (0x%x) trying to be set "
+ "(allowed is 0x%x)\n", flags, AMDGPIO_DEFAULT_CAPS);
+ return (EINVAL);
+ }
- /*
- * Only directtion flag allowed
- */
- if (flags & ~allowed)
+ /* Either input or output must be selected. */
+ if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) == 0)
return (EINVAL);
- /*
- * Not both directions simultaneously
- */
- if ((flags & allowed) == allowed)
+ /* Not both directions simultaneously. */
+ if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) ==
+ (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT))
return (EINVAL);
/* Set the GPIO mode and state */
@@ -224,16 +229,21 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
reg = AMDGPIO_PIN_REGISTER(pin);
val = amdgpio_read_4(sc, reg);
- if (flags & GPIO_PIN_INPUT) {
+ if ((flags & GPIO_PIN_INPUT) != 0)
val &= ~BIT(OUTPUT_ENABLE_OFF);
- sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_INPUT;
- } else {
+ else
val |= BIT(OUTPUT_ENABLE_OFF);
- sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_OUTPUT;
- }
+
+ val &= ~(BIT(PULL_DOWN_ENABLE_OFF) | BIT(PULL_UP_ENABLE_OFF));
+
+ if ((flags & GPIO_PIN_PULLDOWN) != 0)
+ val |= BIT(PULL_DOWN_ENABLE_OFF);
+ if ((flags & GPIO_PIN_PULLUP) != 0)
+ val |= BIT(PULL_UP_ENABLE_OFF);
amdgpio_write_4(sc, reg, val);
+ sc->sc_gpio_pins[pin].gp_flags = flags;
dprintf("pin %d flags 0x%x val 0x%x gp_flags 0x%x\n",
pin, flags, val, sc->sc_gpio_pins[pin].gp_flags);
@@ -359,11 +369,73 @@ amdgpio_probe(device_t dev)
return (rv);
}
+static void
+amdgpio_eoi_locked(struct amdgpio_softc *sc)
+{
+ uint32_t master_reg = amdgpio_read_4(sc, WAKE_INT_MASTER_REG);
+
+ AMDGPIO_ASSERT_LOCKED(sc);
+ master_reg |= EOI_MASK;
+ amdgpio_write_4(sc, WAKE_INT_MASTER_REG, master_reg);
+}
+
+static void
+amdgpio_eoi(struct amdgpio_softc *sc)
+{
+ AMDGPIO_LOCK(sc);
+ amdgpio_eoi_locked(sc);
+ AMDGPIO_UNLOCK(sc);
+}
+
+static int
+amdgpio_intr_filter(void *arg)
+{
+ struct amdgpio_softc *sc = arg;
+ int off, rv = FILTER_STRAY;
+ uint32_t reg;
+
+ /* We can lock in the filter routine as it is MTX_SPIN. */
+ AMDGPIO_LOCK(sc);
+
+ /*
+ * TODO Instead of just reading the registers of all pins, we should
+ * read WAKE_INT_STATUS_REG0/1. A bit set in here denotes a group of
+ * 4 pins where at least one has an interrupt for us. Then we can just
+ * iterate over those 4 pins.
+ *
+ * See GPIO_Interrupt_Status_Index_0 in BKDG.
+ */
+ for (size_t pin = 0; pin < AMD_GPIO_PINS_EXPOSED; pin++) {
+ off = AMDGPIO_PIN_REGISTER(pin);
+ reg = amdgpio_read_4(sc, off);
+ if ((reg & UNSERVICED_INTERRUPT_MASK) == 0)
+ continue;
+ /*
+ * Must write 1's to wake/interrupt status bits to clear them.
+ * We can do this simply by writing back to the register.
+ */
+ amdgpio_write_4(sc, off, reg);
+ }
+
+ amdgpio_eoi_locked(sc);
+ AMDGPIO_UNLOCK(sc);
+
+ rv = FILTER_HANDLED;
+ return (rv);
+}
+
+static void
+amdgpio_intr_handler(void *arg)
+{
+ /* TODO */
+}
+
static int
amdgpio_attach(device_t dev)
{
struct amdgpio_softc *sc;
- int i, pin, bank;
+ int i, pin, bank, reg;
+ uint32_t flags;
sc = device_get_softc(dev);
sc->sc_dev = dev;
@@ -386,6 +458,14 @@ amdgpio_attach(device_t dev)
sc->sc_bst = rman_get_bustag(sc->sc_res[0]);
sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]);
+ /* Set up interrupt handler. */
+ if (bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_MISC | INTR_MPSAFE,
+ amdgpio_intr_filter, amdgpio_intr_handler, sc, &sc->sc_intr_handle)
+ != 0) {
+ device_printf(dev, "couldn't set up interrupt\n");
+ goto err_intr;
+ }
+
/* Initialize all possible pins to be Invalid */
for (i = 0; i < AMD_GPIO_PINS_MAX ; i++) {
snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME,
@@ -395,7 +475,12 @@ amdgpio_attach(device_t dev)
sc->sc_gpio_pins[i].gp_flags = 0;
}
- /* Initialize only driver exposed pins with appropriate capabilities */
+ /*
+ * Initialize only driver exposed pins with appropriate capabilities.
+ *
+ * XXX Also mask and disable interrupts on all pins, since we don't
+ * support them at the moment.
+ */
for (i = 0; i < AMD_GPIO_PINS_EXPOSED ; i++) {
pin = kernzp_pins[i].pin_num;
bank = pin/AMD_GPIO_PINS_PER_BANK;
@@ -406,7 +491,14 @@ amdgpio_attach(device_t dev)
sc->sc_gpio_pins[pin].gp_flags =
amdgpio_is_pin_output(sc, pin) ?
GPIO_PIN_OUTPUT : GPIO_PIN_INPUT;
+
+ reg = AMDGPIO_PIN_REGISTER(pin);
+ flags = amdgpio_read_4(sc, reg);
+ flags &= ~(1 << INTERRUPT_ENABLE_OFF);
+ flags &= ~(1 << INTERRUPT_MASK_OFF);
+ amdgpio_write_4(sc, reg, flags);
}
+ amdgpio_eoi(sc);
sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
@@ -418,8 +510,9 @@ amdgpio_attach(device_t dev)
return (0);
err_bus:
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle);
+err_intr:
bus_release_resources(dev, amdgpio_spec, sc->sc_res);
-
err_rsrc:
AMDGPIO_LOCK_DESTROY(sc);
@@ -434,7 +527,8 @@ amdgpio_detach(device_t dev)
if (sc->sc_busdev)
gpiobus_detach_bus(dev);
-
+ if (sc->sc_intr_handle)
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle);
bus_release_resources(dev, amdgpio_spec, sc->sc_res);
AMDGPIO_LOCK_DESTROY(sc);
diff --git a/sys/dev/amdgpio/amdgpio.h b/sys/dev/amdgpio/amdgpio.h
index aca3039bfc98..3743eba23e17 100644
--- a/sys/dev/amdgpio/amdgpio.h
+++ b/sys/dev/amdgpio/amdgpio.h
@@ -50,7 +50,8 @@
AMD_GPIO_PINS_BANK1 + \
AMD_GPIO_PINS_BANK2 + \
AMD_GPIO_PINS_BANK3)
-#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)
+#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \
+ GPIO_PIN_PULLDOWN | GPIO_PIN_PULLUP)
/* Register related macros */
#define AMDGPIO_PIN_REGISTER(pin) (pin * 4)
@@ -84,6 +85,9 @@
#define INTERRUPT_STS_OFF 28
#define WAKE_STS_OFF 29
+#define UNSERVICED_INTERRUPT_MASK \
+ ((1 << INTERRUPT_STS_OFF) | (1 << WAKE_STS_OFF))
+
#define DB_TMR_OUT_MASK 0xFUL
#define DB_CNTRL_MASK 0x3UL
#define ACTIVE_LEVEL_MASK 0x3UL
@@ -316,12 +320,13 @@ struct amdgpio_softc {
int sc_npins;
int sc_ngroups;
struct mtx sc_mtx;
- struct resource *sc_res[AMD_GPIO_NUM_PIN_BANK + 1];
+ struct resource *sc_res[2];
bus_space_tag_t sc_bst;
bus_space_handle_t sc_bsh;
struct gpio_pin sc_gpio_pins[AMD_GPIO_PINS_MAX];
const struct pin_info *sc_pin_info;
const struct amd_pingroup *sc_groups;
+ void *sc_intr_handle;
};
struct amdgpio_sysctl {
diff --git a/sys/dev/ath/ath_rate/sample/sample.c b/sys/dev/ath/ath_rate/sample/sample.c
index 291d1ec64ed7..79bf08678249 100644
--- a/sys/dev/ath/ath_rate/sample/sample.c
+++ b/sys/dev/ath/ath_rate/sample/sample.c
@@ -179,7 +179,7 @@ ath_rate_sample_find_min_pktlength(struct ath_softc *sc,
const struct txschedule *sched = &sn->sched[rix0];
int max_pkt_length = 65530; // ATH_AGGR_MAXSIZE
// Note: this may not be true in all cases; need to check?
- int is_ht40 = (an->an_node.ni_chw == IEEE80211_STA_RX_BW_40);
+ int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40);
// Note: not great, but good enough..
int idx = is_ht40 ? MCS_HT40 : MCS_HT20;
@@ -979,7 +979,7 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
const int size_bin = size_to_bin(frame_size);
const int size = bin_to_size(size_bin);
int tt;
- int is_ht40 = (an->an_node.ni_chw == IEEE80211_STA_RX_BW_40);
+ int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40);
int pct;
if (!IS_RATE_DEFINED(sn, rix0))
@@ -1365,7 +1365,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
continue;
printf(" %d %s/%d", dot11rate(rt, rix), dot11rate_label(rt, rix),
calc_usecs_unicast_packet(sc, 1600, rix, 0,0,
- (ni->ni_chw == IEEE80211_STA_RX_BW_40)));
+ (ni->ni_chw == NET80211_STA_RX_BW_40)));
}
printf("\n");
}
@@ -1396,7 +1396,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
sn->stats[y][rix].perfect_tx_time =
calc_usecs_unicast_packet(sc, size, rix, 0, 0,
- (ni->ni_chw == IEEE80211_STA_RX_BW_40));
+ (ni->ni_chw == NET80211_STA_RX_BW_40));
sn->stats[y][rix].average_tx_time =
sn->stats[y][rix].perfect_tx_time;
}
diff --git a/sys/dev/ath/if_ath_tx_ht.c b/sys/dev/ath/if_ath_tx_ht.c
index e7ee029fecf0..f42058bacb0d 100644
--- a/sys/dev/ath/if_ath_tx_ht.c
+++ b/sys/dev/ath/if_ath_tx_ht.c
@@ -283,7 +283,7 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
if (IS_HT_RATE(rate)) {
rc[i].flags |= ATH_RC_HT_FLAG;
- if (ni->ni_chw == IEEE80211_STA_RX_BW_40)
+ if (ni->ni_chw == NET80211_STA_RX_BW_40)
rc[i].flags |= ATH_RC_CW40_FLAG;
/*
@@ -295,13 +295,13 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
* and doesn't return the fractional part, so
* we are always "out" by some amount.
*/
- if (ni->ni_chw == IEEE80211_STA_RX_BW_40 &&
+ if (ni->ni_chw == NET80211_STA_RX_BW_40 &&
ieee80211_ht_check_tx_shortgi_40(ni) &&
(bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) {
rc[i].flags |= ATH_RC_SGI_FLAG;
}
- if (ni->ni_chw == IEEE80211_STA_RX_BW_20 &&
+ if (ni->ni_chw == NET80211_STA_RX_BW_20 &&
ieee80211_ht_check_tx_shortgi_20(ni) &&
(bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) {
rc[i].flags |= ATH_RC_SGI_FLAG;
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index 16bfce5338a7..6cf39e035ea6 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -1221,7 +1221,7 @@ bce_attach(device_t dev)
sc->bce_bc_ver[j++] = '.';
}
- /* Check if any management firwmare is enabled. */
+ /* Check if any management firmware is enabled. */
val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
diff --git a/sys/dev/bnxt/bnxt_re/qplib_res.c b/sys/dev/bnxt/bnxt_re/qplib_res.c
index 69661c67708c..f527af031176 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_res.c
+++ b/sys/dev/bnxt/bnxt_re/qplib_res.c
@@ -875,7 +875,7 @@ int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
dpi->umdbr = umaddr;
switch (type) {
case BNXT_QPLIB_DPI_TYPE_KERNEL:
- /* priviledged dbr was already mapped just initialize it. */
+ /* privileged dbr was already mapped just initialize it. */
dpi->umdbr = dpit->ucreg.bar_base +
dpit->ucreg.offset + bit_num * PAGE_SIZE;
dpi->dbr = dpit->priv_db;
@@ -1150,7 +1150,7 @@ int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
}
ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
if (!ucreg->bar_reg) {
- dev_err(&res->pdev->dev, "priviledged dpi map failed!\n");
+ dev_err(&res->pdev->dev, "privileged dpi map failed!\n");
return -ENOMEM;
}
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 9e91250cb61c..9756a6945384 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -9016,7 +9016,7 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
if (rc)
return (rc);
- if (hw_all_ok(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
diff --git a/sys/dev/cyapa/cyapa.c b/sys/dev/cyapa/cyapa.c
index 50fa4faa560a..ed755f992949 100644
--- a/sys/dev/cyapa/cyapa.c
+++ b/sys/dev/cyapa/cyapa.c
@@ -761,42 +761,60 @@ again:
/*
* Generate report
*/
- c0 = 0;
- if (delta_x < 0)
- c0 |= 0x10;
- if (delta_y < 0)
- c0 |= 0x20;
- c0 |= 0x08;
- if (but & CYAPA_FNGR_LEFT)
- c0 |= 0x01;
- if (but & CYAPA_FNGR_MIDDLE)
- c0 |= 0x04;
- if (but & CYAPA_FNGR_RIGHT)
- c0 |= 0x02;
-
- fifo_write_char(sc, &sc->rfifo, c0);
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x);
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y);
- switch(sc->zenabled) {
- case 1:
- /* Z axis all 8 bits */
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z);
- break;
- case 2:
- /*
- * Z axis low 4 bits + 4th button and 5th button
- * (high 2 bits must be left 0). Auto-scale
- * delta_z to fit to avoid a wrong-direction
- * overflow (don't try to retain the remainder).
- */
- while (delta_z > 7 || delta_z < -8)
- delta_z >>= 1;
- c0 = (uint8_t)delta_z & 0x0F;
+ if (sc->mode.level == 1) {
+ c0 = MOUSE_SYS_SYNC;
+ if (but & CYAPA_FNGR_LEFT)
+ c0 |= MOUSE_SYS_BUTTON1UP;
+ if (but & CYAPA_FNGR_MIDDLE)
+ c0 |= MOUSE_SYS_BUTTON2UP;
+ if (but & CYAPA_FNGR_RIGHT)
+ c0 |= MOUSE_SYS_BUTTON3UP;
fifo_write_char(sc, &sc->rfifo, c0);
- break;
- default:
- /* basic PS/2 */
- break;
+ fifo_write_char(sc, &sc->rfifo, delta_x >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_y >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_x - (delta_x >> 1));
+ fifo_write_char(sc, &sc->rfifo, delta_y - (delta_y >> 1));
+ fifo_write_char(sc, &sc->rfifo, delta_z >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_z - (delta_z >> 1));
+ fifo_write_char(sc, &sc->rfifo, MOUSE_SYS_EXTBUTTONS);
+ } else {
+ c0 = 0;
+ if (delta_x < 0)
+ c0 |= 0x10;
+ if (delta_y < 0)
+ c0 |= 0x20;
+ c0 |= 0x08;
+ if (but & CYAPA_FNGR_LEFT)
+ c0 |= 0x01;
+ if (but & CYAPA_FNGR_MIDDLE)
+ c0 |= 0x04;
+ if (but & CYAPA_FNGR_RIGHT)
+ c0 |= 0x02;
+
+ fifo_write_char(sc, &sc->rfifo, c0);
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x);
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y);
+ switch(sc->zenabled) {
+ case 1:
+ /* Z axis all 8 bits */
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z);
+ break;
+ case 2:
+ /*
+ * Z axis low 4 bits + 4th button and 5th button
+ * (high 2 bits must be left 0). Auto-scale
+ * delta_z to fit to avoid a wrong-direction
+ * overflow (don't try to retain the remainder).
+ */
+ while (delta_z > 7 || delta_z < -8)
+ delta_z >>= 1;
+ c0 = (uint8_t)delta_z & 0x0F;
+ fifo_write_char(sc, &sc->rfifo, c0);
+ break;
+ default:
+ /* basic PS/2 */
+ break;
+ }
}
cyapa_notify(sc);
}
@@ -1205,6 +1223,11 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
((mousemode_t *)data)->packetsize =
MOUSE_PS2_PACKETSIZE;
break;
+ case 1:
+ ((mousemode_t *)data)->protocol = MOUSE_PROTO_SYSMOUSE;
+ ((mousemode_t *)data)->packetsize =
+ MOUSE_SYS_PACKETSIZE;
+ break;
case 2:
((mousemode_t *)data)->protocol = MOUSE_PROTO_PS2;
((mousemode_t *)data)->packetsize =
@@ -1223,7 +1246,7 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
error = EINVAL;
break;
}
- sc->mode.level = *(int *)data ? 2 : 0;
+ sc->mode.level = *(int *)data;
sc->zenabled = sc->mode.level ? 1 : 0;
break;
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index c34897e3b31a..634f48171c3e 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -1707,10 +1707,9 @@ s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
* autonegotiation.
*/
ret_val = e1000_copper_link_autoneg(hw);
- if (ret_val && !hw->mac.forced_speed_duplex)
+ if (ret_val)
return ret_val;
- }
- if (!hw->mac.autoneg || (ret_val && hw->mac.forced_speed_duplex)) {
+ } else {
/* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index f0ef6051fab1..9c5ae2806f75 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -2000,18 +2000,7 @@ em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
(sc->hw.phy.media_type == e1000_media_type_internal_serdes)) {
if (sc->hw.mac.type == e1000_82545)
fiber_type = IFM_1000_LX;
- switch (sc->link_speed) {
- case 10:
- ifmr->ifm_active |= IFM_10_FL;
- break;
- case 100:
- ifmr->ifm_active |= IFM_100_FX;
- break;
- case 1000:
- default:
- ifmr->ifm_active |= fiber_type | IFM_FDX;
- break;
- }
+ ifmr->ifm_active |= fiber_type | IFM_FDX;
} else {
switch (sc->link_speed) {
case 10:
@@ -2024,12 +2013,11 @@ em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
ifmr->ifm_active |= IFM_1000_T;
break;
}
+ if (sc->link_duplex == FULL_DUPLEX)
+ ifmr->ifm_active |= IFM_FDX;
+ else
+ ifmr->ifm_active |= IFM_HDX;
}
-
- if (sc->link_duplex == FULL_DUPLEX)
- ifmr->ifm_active |= IFM_FDX;
- else
- ifmr->ifm_active |= IFM_HDX;
}
/*********************************************************************
@@ -2063,26 +2051,6 @@ em_if_media_change(if_ctx_t ctx)
sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
break;
case IFM_100_TX:
- sc->hw.mac.autoneg = DO_AUTO_NEG;
- if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
- sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
- } else {
- sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
- }
- break;
- case IFM_10_T:
- sc->hw.mac.autoneg = DO_AUTO_NEG;
- if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
- sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
- } else {
- sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
- }
- break;
- case IFM_100_FX:
sc->hw.mac.autoneg = false;
sc->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
@@ -2090,7 +2058,7 @@ em_if_media_change(if_ctx_t ctx)
else
sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
break;
- case IFM_10_FL:
+ case IFM_10_T:
sc->hw.mac.autoneg = false;
sc->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
diff --git a/sys/dev/fdt/fdt_common.c b/sys/dev/fdt/fdt_common.c
index 1fea4c6f1392..f43551c6310e 100644
--- a/sys/dev/fdt/fdt_common.c
+++ b/sys/dev/fdt/fdt_common.c
@@ -62,8 +62,6 @@
SYSCTL_NODE(_hw, OID_AUTO, fdt, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Flattened Device Tree");
-struct fdt_ic_list fdt_ic_list_head = SLIST_HEAD_INITIALIZER(fdt_ic_list_head);
-
static int
fdt_get_range_by_busaddr(phandle_t node, u_long addr, u_long *base,
u_long *size)
diff --git a/sys/dev/fdt/fdt_common.h b/sys/dev/fdt/fdt_common.h
index ece54290a6ad..f597233f9771 100644
--- a/sys/dev/fdt/fdt_common.h
+++ b/sys/dev/fdt/fdt_common.h
@@ -59,13 +59,6 @@ struct fdt_fixup_entry {
extern struct fdt_fixup_entry fdt_fixup_table[];
#endif
-extern SLIST_HEAD(fdt_ic_list, fdt_ic) fdt_ic_list_head;
-struct fdt_ic {
- SLIST_ENTRY(fdt_ic) fdt_ics;
- ihandle_t iph;
- device_t dev;
-};
-
#if defined(FDT_DTB_STATIC)
extern u_char fdt_static_dtb;
#endif
diff --git a/sys/dev/gpio/acpi_gpiobus.c b/sys/dev/gpio/acpi_gpiobus.c
index 170f23615416..0d2455cab399 100644
--- a/sys/dev/gpio/acpi_gpiobus.c
+++ b/sys/dev/gpio/acpi_gpiobus.c
@@ -37,6 +37,7 @@
#include <dev/gpio/gpiobusvar.h>
#include <dev/gpio/acpi_gpiobusvar.h>
#include <dev/gpio/gpiobus_internal.h>
+#include <sys/sbuf.h>
#include "gpiobus_if.h"
@@ -52,12 +53,11 @@ struct acpi_gpiobus_ctx {
struct acpi_gpiobus_ivar
{
- struct gpiobus_ivar gpiobus; /* Must come first */
- ACPI_HANDLE dev_handle; /* ACPI handle for bus */
- uint32_t flags;
+ struct gpiobus_ivar gpiobus;
+ ACPI_HANDLE handle;
};
-static uint32_t
+uint32_t
acpi_gpiobus_convflags(ACPI_RESOURCE_GPIO *gpio_res)
{
uint32_t flags = 0;
@@ -150,70 +150,24 @@ acpi_gpiobus_enumerate_res(ACPI_RESOURCE *res, void *context)
return (AE_OK);
}
-static struct acpi_gpiobus_ivar *
-acpi_gpiobus_setup_devinfo(device_t bus, device_t child,
- ACPI_RESOURCE_GPIO *gpio_res)
-{
- struct acpi_gpiobus_ivar *devi;
-
- devi = malloc(sizeof(*devi), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (devi == NULL)
- return (NULL);
- resource_list_init(&devi->gpiobus.rl);
-
- devi->flags = acpi_gpiobus_convflags(gpio_res);
- if (acpi_quirks & ACPI_Q_AEI_NOPULL)
- devi->flags &= ~GPIO_PIN_PULLUP;
-
- devi->gpiobus.npins = 1;
- if (gpiobus_alloc_ivars(&devi->gpiobus) != 0) {
- free(devi, M_DEVBUF);
- return (NULL);
- }
-
- for (int i = 0; i < devi->gpiobus.npins; i++)
- devi->gpiobus.pins[i] = gpio_res->PinTable[i];
-
- return (devi);
-}
-
static ACPI_STATUS
acpi_gpiobus_enumerate_aei(ACPI_RESOURCE *res, void *context)
{
ACPI_RESOURCE_GPIO *gpio_res = &res->Data.Gpio;
- struct acpi_gpiobus_ctx *ctx = context;
- device_t bus = ctx->sc->sc_busdev;
- device_t child;
- struct acpi_gpiobus_ivar *devi;
+ uint32_t *npins = context, *pins = npins + 1;
- /* Check that we have a GpioInt object. */
+ /*
+ * Check that we have a GpioInt object.
+ * Note that according to the spec this
+ * should always be the case.
+ */
if (res->Type != ACPI_RESOURCE_TYPE_GPIO)
return (AE_OK);
if (gpio_res->ConnectionType != ACPI_RESOURCE_GPIO_TYPE_INT)
return (AE_OK);
- /* Add a child. */
- child = device_add_child_ordered(bus, 0, "gpio_aei", DEVICE_UNIT_ANY);
- if (child == NULL)
- return (AE_OK);
- devi = acpi_gpiobus_setup_devinfo(bus, child, gpio_res);
- if (devi == NULL) {
- device_delete_child(bus, child);
- return (AE_OK);
- }
- device_set_ivars(child, devi);
-
- for (int i = 0; i < devi->gpiobus.npins; i++) {
- if (GPIOBUS_PIN_SETFLAGS(bus, child, 0, devi->flags &
- ~GPIO_INTR_MASK)) {
- device_delete_child(bus, child);
- return (AE_OK);
- }
- }
-
- /* Pass ACPI information to children. */
- devi->dev_handle = ctx->dev_handle;
-
+ for (int i = 0; i < gpio_res->PinTableLength; i++)
+ pins[(*npins)++] = gpio_res->PinTable[i];
return (AE_OK);
}
@@ -296,6 +250,63 @@ err:
return (AE_BAD_PARAMETER);
}
+static void
+acpi_gpiobus_attach_aei(struct acpi_gpiobus_softc *sc, ACPI_HANDLE handle)
+{
+ struct acpi_gpiobus_ivar *devi;
+ ACPI_HANDLE aei_handle;
+ device_t child;
+ uint32_t *pins;
+ ACPI_STATUS status;
+ int err;
+
+ status = AcpiGetHandle(handle, "_AEI", &aei_handle);
+ if (ACPI_FAILURE(status))
+ return;
+
+ /* pins[0] specifies the length of the array. */
+ pins = mallocarray(sc->super_sc.sc_npins + 1,
+ sizeof(uint32_t), M_DEVBUF, M_WAITOK);
+ pins[0] = 0;
+
+ status = AcpiWalkResources(handle, "_AEI",
+ acpi_gpiobus_enumerate_aei, pins);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to enumerate AEI resources\n");
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ child = BUS_ADD_CHILD(sc->super_sc.sc_busdev, 0, "gpio_aei",
+ DEVICE_UNIT_ANY);
+ if (child == NULL) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to add gpio_aei child\n");
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ devi = device_get_ivars(child);
+ devi->gpiobus.npins = pins[0];
+ devi->handle = aei_handle;
+
+ err = gpiobus_alloc_ivars(&devi->gpiobus);
+ if (err != 0) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to allocate gpio_aei ivars\n");
+ device_delete_child(sc->super_sc.sc_busdev, child);
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ for (int i = 0; i < pins[0]; i++)
+ devi->gpiobus.pins[i] = pins[i + 1];
+ free(pins, M_DEVBUF);
+
+ bus_attach_children(sc->super_sc.sc_busdev);
+}
+
static int
acpi_gpiobus_probe(device_t dev)
{
@@ -353,13 +364,8 @@ acpi_gpiobus_attach(device_t dev)
if (ACPI_FAILURE(status))
device_printf(dev, "Failed to enumerate GPIO resources\n");
- /* Look for AEI children */
- status = AcpiWalkResources(handle, "_AEI", acpi_gpiobus_enumerate_aei,
- &ctx);
-
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
- device_printf(dev, "Failed to enumerate AEI resources\n");
-
+ /* Look for AEI child */
+ acpi_gpiobus_attach_aei(sc, handle);
return (0);
}
@@ -390,10 +396,7 @@ acpi_gpiobus_read_ivar(device_t dev, device_t child, int which,
switch (which) {
case ACPI_GPIOBUS_IVAR_HANDLE:
- *result = (uintptr_t)devi->dev_handle;
- break;
- case ACPI_GPIOBUS_IVAR_FLAGS:
- *result = (uintptr_t)devi->flags;
+ *result = (uintptr_t)devi->handle;
break;
default:
return (gpiobus_read_ivar(dev, child, which, result));
@@ -402,6 +405,28 @@ acpi_gpiobus_read_ivar(device_t dev, device_t child, int which,
return (0);
}
+static device_t
+acpi_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+ return (gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct acpi_gpiobus_ivar)));
+}
+
+static int
+acpi_gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
+{
+ struct acpi_gpiobus_ivar *devi;
+ int err;
+
+ err = gpiobus_child_location(bus, child, sb);
+ if (err != 0)
+ return (err);
+
+ devi = device_get_ivars(child);
+ sbuf_printf(sb, " handle=%s", acpi_name(devi->handle));
+ return (0);
+}
+
static device_method_t acpi_gpiobus_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, acpi_gpiobus_probe),
@@ -410,6 +435,8 @@ static device_method_t acpi_gpiobus_methods[] = {
/* Bus interface */
DEVMETHOD(bus_read_ivar, acpi_gpiobus_read_ivar),
+ DEVMETHOD(bus_add_child, acpi_gpiobus_add_child),
+ DEVMETHOD(bus_child_location, acpi_gpiobus_child_location),
DEVMETHOD_END
};
diff --git a/sys/dev/gpio/acpi_gpiobusvar.h b/sys/dev/gpio/acpi_gpiobusvar.h
index f8d502eab9d1..288e8bd0f2af 100644
--- a/sys/dev/gpio/acpi_gpiobusvar.h
+++ b/sys/dev/gpio/acpi_gpiobusvar.h
@@ -33,16 +33,16 @@
#include <contrib/dev/acpica/include/acpi.h>
enum acpi_gpiobus_ivars {
- ACPI_GPIOBUS_IVAR_HANDLE = 10600,
- ACPI_GPIOBUS_IVAR_FLAGS,
+ ACPI_GPIOBUS_IVAR_HANDLE = 10600
};
#define ACPI_GPIOBUS_ACCESSOR(var, ivar, type) \
__BUS_ACCESSOR(acpi_gpiobus, var, ACPI_GPIOBUS, ivar, type)
ACPI_GPIOBUS_ACCESSOR(handle, HANDLE, ACPI_HANDLE)
-ACPI_GPIOBUS_ACCESSOR(flags, FLAGS, uint32_t)
#undef ACPI_GPIOBUS_ACCESSOR
+uint32_t acpi_gpiobus_convflags(ACPI_RESOURCE_GPIO *);
+
#endif /* __ACPI_GPIOBUS_H__ */
diff --git a/sys/dev/gpio/gpio_if.m b/sys/dev/gpio/gpio_if.m
index 5501b2b5c0e7..0b6988ceba79 100644
--- a/sys/dev/gpio/gpio_if.m
+++ b/sys/dev/gpio/gpio_if.m
@@ -62,6 +62,22 @@ CODE {
return (0);
}
+
+ static int
+ gpio_default_get_pin_list(device_t dev, uint32_t *pin_list)
+ {
+ uint32_t maxpin;
+ int err;
+
+ err = GPIO_PIN_MAX(dev, &maxpin);
+ if (err != 0)
+ return (ENXIO);
+
+ for (int i = 0; i <= maxpin; i++)
+ pin_list[i] = i;
+
+ return (0);
+ }
};
HEADER {
@@ -185,3 +201,13 @@ METHOD int pin_config_32 {
uint32_t num_pins;
uint32_t *pin_flags;
} DEFAULT gpio_default_nosupport;
+
+#
+# Get the controller's pin numbers. pin_list is expected to be an array with at
+# least GPIO_PIN_MAX() elements. Populates pin_list from 0 to GPIO_PIN_MAX() by
+# default.
+#
+METHOD int get_pin_list {
+ device_t dev;
+ uint32_t *pin_list;
+} DEFAULT gpio_default_get_pin_list;
diff --git a/sys/dev/gpio/gpioaei.c b/sys/dev/gpio/gpioaei.c
index ecae8ccaf2fa..7b97277aaf61 100644
--- a/sys/dev/gpio/gpioaei.c
+++ b/sys/dev/gpio/gpioaei.c
@@ -45,13 +45,21 @@ enum gpio_aei_type {
ACPI_AEI_TYPE_EVT
};
-struct gpio_aei_softc {
- ACPI_HANDLE handle;
- enum gpio_aei_type type;
- int pin;
+struct gpio_aei_ctx {
+ SLIST_ENTRY(gpio_aei_ctx) next;
struct resource * intr_res;
- int intr_rid;
void * intr_cookie;
+ ACPI_HANDLE handle;
+ gpio_pin_t gpio;
+ uint32_t pin;
+ int intr_rid;
+ enum gpio_aei_type type;
+};
+
+struct gpio_aei_softc {
+ SLIST_HEAD(, gpio_aei_ctx) aei_ctx;
+ ACPI_HANDLE dev_handle;
+ device_t dev;
};
static int
@@ -65,69 +73,157 @@ gpio_aei_probe(device_t dev)
static void
gpio_aei_intr(void * arg)
{
- struct gpio_aei_softc * sc = arg;
+ struct gpio_aei_ctx * ctx = arg;
/* Ask ACPI to run the appropriate _EVT, _Exx or _Lxx method. */
- if (sc->type == ACPI_AEI_TYPE_EVT)
- acpi_SetInteger(sc->handle, NULL, sc->pin);
+ if (ctx->type == ACPI_AEI_TYPE_EVT)
+ acpi_SetInteger(ctx->handle, NULL, ctx->pin);
else
- AcpiEvaluateObject(sc->handle, NULL, NULL, NULL);
+ AcpiEvaluateObject(ctx->handle, NULL, NULL, NULL);
+}
+
+static ACPI_STATUS
+gpio_aei_enumerate(ACPI_RESOURCE * res, void * context)
+{
+ ACPI_RESOURCE_GPIO * gpio_res = &res->Data.Gpio;
+ struct gpio_aei_softc * sc = context;
+ uint32_t flags, maxpin;
+ device_t busdev;
+ int err;
+
+ /*
+ * Check that we have a GpioInt object.
+ * Note that according to the spec this
+ * should always be the case.
+ */
+ if (res->Type != ACPI_RESOURCE_TYPE_GPIO)
+ return (AE_OK);
+ if (gpio_res->ConnectionType != ACPI_RESOURCE_GPIO_TYPE_INT)
+ return (AE_OK);
+
+ flags = acpi_gpiobus_convflags(gpio_res);
+ if (acpi_quirks & ACPI_Q_AEI_NOPULL)
+ flags &= ~GPIO_PIN_PULLUP;
+
+ err = GPIO_PIN_MAX(acpi_get_device(sc->dev_handle), &maxpin);
+ if (err != 0)
+ return (AE_ERROR);
+
+ busdev = GPIO_GET_BUS(acpi_get_device(sc->dev_handle));
+ for (int i = 0; i < gpio_res->PinTableLength; i++) {
+ struct gpio_aei_ctx * ctx;
+ uint32_t pin = gpio_res->PinTable[i];
+
+ if (__predict_false(pin > maxpin)) {
+ device_printf(sc->dev,
+ "Invalid pin 0x%x, max: 0x%x (bad ACPI tables?)\n",
+ pin, maxpin);
+ continue;
+ }
+
+ ctx = malloc(sizeof(struct gpio_aei_ctx), M_DEVBUF, M_WAITOK);
+ ctx->type = ACPI_AEI_TYPE_UNKNOWN;
+ if (pin <= 255) {
+ char objname[5]; /* "_EXX" or "_LXX" */
+ sprintf(objname, "_%c%02X",
+ (flags & GPIO_INTR_EDGE_MASK) ? 'E' : 'L', pin);
+ if (ACPI_SUCCESS(AcpiGetHandle(sc->dev_handle, objname,
+ &ctx->handle)))
+ ctx->type = ACPI_AEI_TYPE_ELX;
+ }
+
+ if (ctx->type == ACPI_AEI_TYPE_UNKNOWN) {
+ if (ACPI_SUCCESS(AcpiGetHandle(sc->dev_handle, "_EVT",
+ &ctx->handle)))
+ ctx->type = ACPI_AEI_TYPE_EVT;
+ else {
+ device_printf(sc->dev,
+ "AEI Device type is unknown for pin 0x%x\n",
+ pin);
+
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+ }
+
+ err = gpio_pin_get_by_bus_pinnum(busdev, pin, &ctx->gpio);
+ if (err != 0) {
+ device_printf(sc->dev, "Cannot acquire pin 0x%x\n",
+ pin);
+
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ err = gpio_pin_setflags(ctx->gpio, flags & ~GPIO_INTR_MASK);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "Cannot set pin flags for pin 0x%x\n", pin);
+
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ ctx->intr_rid = 0;
+ ctx->intr_res = gpio_alloc_intr_resource(sc->dev,
+ &ctx->intr_rid, RF_ACTIVE, ctx->gpio,
+ flags & GPIO_INTR_MASK);
+ if (ctx->intr_res == NULL) {
+ device_printf(sc->dev,
+ "Cannot allocate an IRQ for pin 0x%x\n", pin);
+
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ err = bus_setup_intr(sc->dev, ctx->intr_res, INTR_TYPE_MISC |
+ INTR_MPSAFE | INTR_EXCL | INTR_SLEEPABLE, NULL,
+ gpio_aei_intr, ctx, &ctx->intr_cookie);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "Cannot set up an IRQ for pin 0x%x\n", pin);
+
+ bus_release_resource(sc->dev, ctx->intr_res);
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ ctx->pin = pin;
+ SLIST_INSERT_HEAD(&sc->aei_ctx, ctx, next);
+ }
+
+ return (AE_OK);
}
static int
gpio_aei_attach(device_t dev)
{
struct gpio_aei_softc * sc = device_get_softc(dev);
- gpio_pin_t pin;
- uint32_t flags;
ACPI_HANDLE handle;
- int err;
+ ACPI_STATUS status;
/* This is us. */
device_set_desc(dev, "ACPI Event Information Device");
- /* Store parameters needed by gpio_aei_intr. */
handle = acpi_gpiobus_get_handle(dev);
- if (gpio_pin_get_by_child_index(dev, 0, &pin) != 0) {
- device_printf(dev, "Unable to get the input pin\n");
+ status = AcpiGetParent(handle, &sc->dev_handle);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Cannot get parent of %s\n",
+ acpi_name(handle));
return (ENXIO);
}
- sc->type = ACPI_AEI_TYPE_UNKNOWN;
- sc->pin = pin->pin;
-
- flags = acpi_gpiobus_get_flags(dev);
- if (pin->pin <= 255) {
- char objname[5]; /* "_EXX" or "_LXX" */
- sprintf(objname, "_%c%02X",
- (flags & GPIO_INTR_EDGE_MASK) ? 'E' : 'L', pin->pin);
- if (ACPI_SUCCESS(AcpiGetHandle(handle, objname, &sc->handle)))
- sc->type = ACPI_AEI_TYPE_ELX;
- }
- if (sc->type == ACPI_AEI_TYPE_UNKNOWN) {
- if (ACPI_SUCCESS(AcpiGetHandle(handle, "_EVT", &sc->handle)))
- sc->type = ACPI_AEI_TYPE_EVT;
- }
-
- if (sc->type == ACPI_AEI_TYPE_UNKNOWN) {
- device_printf(dev, "ACPI Event Information Device type is unknown");
- return (ENOTSUP);
- }
+ SLIST_INIT(&sc->aei_ctx);
+ sc->dev = dev;
- /* Set up the interrupt. */
- if ((sc->intr_res = gpio_alloc_intr_resource(dev, &sc->intr_rid,
- RF_ACTIVE, pin, flags & GPIO_INTR_MASK)) == NULL) {
- device_printf(dev, "Cannot allocate an IRQ\n");
- return (ENOTSUP);
- }
- err = bus_setup_intr(dev, sc->intr_res, INTR_TYPE_MISC | INTR_MPSAFE |
- INTR_EXCL | INTR_SLEEPABLE, NULL, gpio_aei_intr, sc,
- &sc->intr_cookie);
- if (err != 0) {
- device_printf(dev, "Cannot set up IRQ\n");
- bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
- sc->intr_res);
- return (err);
+ status = AcpiWalkResources(sc->dev_handle, "_AEI",
+ gpio_aei_enumerate, sc);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Failed to enumerate AEI resources\n");
+ return (ENXIO);
}
return (0);
@@ -137,9 +233,15 @@ static int
gpio_aei_detach(device_t dev)
{
struct gpio_aei_softc * sc = device_get_softc(dev);
+ struct gpio_aei_ctx * ctx, * tctx;
+
+ SLIST_FOREACH_SAFE(ctx, &sc->aei_ctx, next, tctx) {
+ bus_teardown_intr(dev, ctx->intr_res, ctx->intr_cookie);
+ bus_release_resource(dev, ctx->intr_res);
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ }
- bus_teardown_intr(dev, sc->intr_res, sc->intr_cookie);
- bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, sc->intr_res);
return (0);
}
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index c25c41f43042..698b5e5fdd01 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -57,7 +57,6 @@ static int gpiobus_suspend(device_t);
static int gpiobus_resume(device_t);
static void gpiobus_probe_nomatch(device_t, device_t);
static int gpiobus_print_child(device_t, device_t);
-static int gpiobus_child_location(device_t, device_t, struct sbuf *);
static device_t gpiobus_add_child(device_t, u_int, const char *, int);
static void gpiobus_hinted_child(device_t, const char *, int);
@@ -320,10 +319,6 @@ gpiobus_add_bus(device_t dev)
busdev = device_add_child(dev, "gpiobus", DEVICE_UNIT_ANY);
if (busdev == NULL)
return (NULL);
- if (device_add_child(dev, "gpioc", DEVICE_UNIT_ANY) == NULL) {
- device_delete_child(dev, busdev);
- return (NULL);
- }
#ifdef FDT
ofw_gpiobus_register_provider(dev);
#endif
@@ -373,6 +368,37 @@ gpiobus_init_softc(device_t dev)
}
int
+gpiobus_add_gpioc(device_t dev)
+{
+ struct gpiobus_ivar *devi;
+ struct gpiobus_softc *sc;
+ device_t gpioc;
+ int err;
+
+ gpioc = BUS_ADD_CHILD(dev, 0, "gpioc", DEVICE_UNIT_ANY);
+ if (gpioc == NULL)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+ devi = device_get_ivars(gpioc);
+
+ devi->npins = sc->sc_npins;
+ err = gpiobus_alloc_ivars(devi);
+ if (err != 0) {
+ device_delete_child(dev, gpioc);
+ return (err);
+ }
+
+ err = GPIO_GET_PIN_LIST(sc->sc_dev, devi->pins);
+ if (err != 0) {
+ device_delete_child(dev, gpioc);
+ gpiobus_free_ivars(devi);
+ }
+
+ return (err);
+}
+
+int
gpiobus_alloc_ivars(struct gpiobus_ivar *devi)
{
@@ -563,6 +589,10 @@ gpiobus_attach(device_t dev)
if (err != 0)
return (err);
+ err = gpiobus_add_gpioc(dev);
+ if (err != 0)
+ return (err);
+
/*
* Get parent's pins and mark them as unmapped
*/
@@ -662,7 +692,7 @@ gpiobus_print_child(device_t dev, device_t child)
return (retval);
}
-static int
+int
gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
{
struct gpiobus_ivar *devi;
@@ -674,16 +704,19 @@ gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
return (0);
}
-static device_t
-gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+device_t
+gpiobus_add_child_common(device_t dev, u_int order, const char *name, int unit,
+ size_t ivars_size)
{
device_t child;
struct gpiobus_ivar *devi;
+ KASSERT(ivars_size >= sizeof(struct gpiobus_ivar),
+ ("child ivars must include gpiobus_ivar as their first member"));
child = device_add_child_ordered(dev, order, name, unit);
if (child == NULL)
return (child);
- devi = malloc(sizeof(struct gpiobus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO);
+ devi = malloc(ivars_size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (devi == NULL) {
device_delete_child(dev, child);
return (NULL);
@@ -694,6 +727,13 @@ gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
return (child);
}
+static device_t
+gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+ return (gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct gpiobus_ivar)));
+}
+
static void
gpiobus_child_deleted(device_t dev, device_t child)
{
@@ -952,7 +992,7 @@ gpiobus_pin_getflags(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags);
+ return (GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags));
}
static int
@@ -965,7 +1005,7 @@ gpiobus_pin_getcaps(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps);
+ return (GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps));
}
static int
@@ -978,7 +1018,7 @@ gpiobus_pin_set(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value);
+ return (GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value));
}
static int
@@ -991,7 +1031,7 @@ gpiobus_pin_get(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value);
+ return (GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value));
}
static int
@@ -1003,7 +1043,57 @@ gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin)
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]);
+ return (GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]));
+}
+
+/*
+ * Verify that a child has all the pins they are requesting
+ * to access in their ivars.
+ */
+static bool
+gpiobus_pin_verify_32(struct gpiobus_ivar *devi, uint32_t first_pin,
+ uint32_t num_pins)
+{
+ if (first_pin + num_pins > devi->npins)
+ return (false);
+
+ /* Make sure the pins are consecutive. */
+ for (uint32_t pin = first_pin; pin < first_pin + num_pins - 1; pin++) {
+ if (devi->pins[pin] + 1 != devi->pins[pin + 1])
+ return (false);
+ }
+
+ return (true);
+}
+
+static int
+gpiobus_pin_access_32(device_t dev, device_t child, uint32_t first_pin,
+ uint32_t clear_pins, uint32_t change_pins, uint32_t *orig_pins)
+{
+ struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
+ struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
+
+ if (!gpiobus_pin_verify_32(devi, first_pin, 32))
+ return (EINVAL);
+
+ return (GPIO_PIN_ACCESS_32(sc->sc_dev, devi->pins[first_pin],
+ clear_pins, change_pins, orig_pins));
+}
+
+static int
+gpiobus_pin_config_32(device_t dev, device_t child, uint32_t first_pin,
+ uint32_t num_pins, uint32_t *pin_flags)
+{
+ struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
+ struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
+
+ if (num_pins > 32)
+ return (EINVAL);
+ if (!gpiobus_pin_verify_32(devi, first_pin, num_pins))
+ return (EINVAL);
+
+ return (GPIO_PIN_CONFIG_32(sc->sc_dev,
+ devi->pins[first_pin], num_pins, pin_flags));
}
static int
@@ -1084,6 +1174,8 @@ static device_method_t gpiobus_methods[] = {
DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get),
DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set),
DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle),
+ DEVMETHOD(gpiobus_pin_access_32,gpiobus_pin_access_32),
+ DEVMETHOD(gpiobus_pin_config_32,gpiobus_pin_config_32),
DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname),
DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname),
diff --git a/sys/dev/gpio/gpiobus_if.m b/sys/dev/gpio/gpiobus_if.m
index 8bf29839ef4e..890738c4e809 100644
--- a/sys/dev/gpio/gpiobus_if.m
+++ b/sys/dev/gpio/gpiobus_if.m
@@ -107,6 +107,36 @@ METHOD int pin_setflags {
};
#
+# Simultaneously read and/or change up to 32 adjacent pins.
+# If the device cannot change the pins simultaneously, returns EOPNOTSUPP.
+#
+# More details about using this interface can be found in sys/gpio.h
+#
+METHOD int pin_access_32 {
+ device_t dev;
+ device_t child;
+ uint32_t first_pin;
+ uint32_t clear_pins;
+ uint32_t change_pins;
+ uint32_t *orig_pins;
+};
+
+#
+# Simultaneously configure up to 32 adjacent pins.
+# This is intended to change the configuration of all the pins simultaneously,
+# but unlike pin_access_32, this will not fail if the hardware can't do so.
+#
+# More details about using this interface can be found in sys/gpio.h
+#
+METHOD int pin_config_32 {
+ device_t dev;
+ device_t child;
+ uint32_t first_pin;
+ uint32_t num_pins;
+ uint32_t *pin_flags;
+};
+
+#
# Get the pin name
#
METHOD int pin_getname {
diff --git a/sys/dev/gpio/gpiobus_internal.h b/sys/dev/gpio/gpiobus_internal.h
index de3f57663132..58f862343403 100644
--- a/sys/dev/gpio/gpiobus_internal.h
+++ b/sys/dev/gpio/gpiobus_internal.h
@@ -42,6 +42,9 @@ void gpiobus_free_ivars(struct gpiobus_ivar *);
int gpiobus_read_ivar(device_t, device_t, int, uintptr_t *);
int gpiobus_acquire_pin(device_t, uint32_t);
void gpiobus_release_pin(device_t, uint32_t);
+int gpiobus_child_location(device_t, device_t, struct sbuf *);
+device_t gpiobus_add_child_common(device_t, u_int, const char *, int, size_t);
+int gpiobus_add_gpioc(device_t);
extern driver_t gpiobus_driver;
#endif
diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c
index 87fed38ebe3e..5a60f939dc78 100644
--- a/sys/dev/gpio/gpioc.c
+++ b/sys/dev/gpio/gpioc.c
@@ -45,7 +45,6 @@
#include <dev/gpio/gpiobusvar.h>
-#include "gpio_if.h"
#include "gpiobus_if.h"
#undef GPIOC_DEBUG
@@ -59,7 +58,7 @@
struct gpioc_softc {
device_t sc_dev; /* gpiocX dev */
- device_t sc_pdev; /* gpioX dev */
+ device_t sc_pdev; /* gpiobusX dev */
struct cdev *sc_ctl_dev; /* controller device */
int sc_unit;
int sc_npins;
@@ -69,6 +68,7 @@ struct gpioc_softc {
struct gpioc_pin_intr {
struct gpioc_softc *sc;
gpio_pin_t pin;
+ uint32_t intr_mode;
bool config_locked;
int intr_rid;
struct resource *intr_res;
@@ -112,8 +112,10 @@ struct gpioc_pin_event {
static MALLOC_DEFINE(M_GPIOC, "gpioc", "gpioc device data");
-static int gpioc_allocate_pin_intr(struct gpioc_pin_intr*, uint32_t);
-static int gpioc_release_pin_intr(struct gpioc_pin_intr*);
+static int gpioc_allocate_pin_intr(struct gpioc_softc*,
+ struct gpioc_pin_intr*, uint32_t, uint32_t);
+static int gpioc_release_pin_intr(struct gpioc_softc*,
+ struct gpioc_pin_intr*);
static int gpioc_attach_priv_pin(struct gpioc_cdevpriv*,
struct gpioc_pin_intr*);
static int gpioc_detach_priv_pin(struct gpioc_cdevpriv*,
@@ -191,27 +193,36 @@ number_of_events(struct gpioc_cdevpriv *priv)
}
static int
-gpioc_allocate_pin_intr(struct gpioc_pin_intr *intr_conf, uint32_t flags)
+gpioc_allocate_pin_intr(struct gpioc_softc *sc,
+ struct gpioc_pin_intr *intr_conf, uint32_t pin, uint32_t flags)
{
int err;
intr_conf->config_locked = true;
mtx_unlock(&intr_conf->mtx);
- intr_conf->intr_res = gpio_alloc_intr_resource(intr_conf->pin->dev,
+ MPASS(intr_conf->pin == NULL);
+ err = gpio_pin_get_by_bus_pinnum(sc->sc_pdev, pin, &intr_conf->pin);
+ if (err != 0)
+ goto error_exit;
+
+ intr_conf->intr_res = gpio_alloc_intr_resource(sc->sc_dev,
&intr_conf->intr_rid, RF_ACTIVE, intr_conf->pin, flags);
if (intr_conf->intr_res == NULL) {
err = ENXIO;
- goto error_exit;
+ goto error_pin;
}
- err = bus_setup_intr(intr_conf->pin->dev, intr_conf->intr_res,
+ err = bus_setup_intr(sc->sc_dev, intr_conf->intr_res,
INTR_TYPE_MISC | INTR_MPSAFE, NULL, gpioc_interrupt_handler,
intr_conf, &intr_conf->intr_cookie);
- if (err != 0)
- goto error_exit;
+ if (err != 0) {
+ bus_release_resource(sc->sc_dev, intr_conf->intr_res);
+ intr_conf->intr_res = NULL;
+ goto error_pin;
+ }
- intr_conf->pin->flags = flags;
+ intr_conf->intr_mode = flags;
error_exit:
mtx_lock(&intr_conf->mtx);
@@ -219,10 +230,15 @@ error_exit:
wakeup(&intr_conf->config_locked);
return (err);
+
+error_pin:
+ gpio_pin_release(intr_conf->pin);
+ intr_conf->pin = NULL;
+ goto error_exit;
}
static int
-gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
+gpioc_release_pin_intr(struct gpioc_softc *sc, struct gpioc_pin_intr *intr_conf)
{
int err;
@@ -230,8 +246,8 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
mtx_unlock(&intr_conf->mtx);
if (intr_conf->intr_cookie != NULL) {
- err = bus_teardown_intr(intr_conf->pin->dev,
- intr_conf->intr_res, intr_conf->intr_cookie);
+ err = bus_teardown_intr(sc->sc_dev, intr_conf->intr_res,
+ intr_conf->intr_cookie);
if (err != 0)
goto error_exit;
else
@@ -239,7 +255,7 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
}
if (intr_conf->intr_res != NULL) {
- err = bus_release_resource(intr_conf->pin->dev, SYS_RES_IRQ,
+ err = bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
intr_conf->intr_rid, intr_conf->intr_res);
if (err != 0)
goto error_exit;
@@ -249,7 +265,10 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
}
}
- intr_conf->pin->flags = 0;
+ gpio_pin_release(intr_conf->pin);
+ intr_conf->pin = NULL;
+
+ intr_conf->intr_mode = 0;
err = 0;
error_exit:
@@ -386,7 +405,7 @@ gpioc_get_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
struct gpioc_privs *priv_link;
uint32_t flags;
- flags = intr_conf->pin->flags;
+ flags = intr_conf->intr_mode;
if (flags == 0)
return (0);
@@ -411,7 +430,7 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
int res;
res = 0;
- if (intr_conf->pin->flags == 0 && flags == 0) {
+ if (intr_conf->intr_mode == 0 && flags == 0) {
/* No interrupt configured and none requested: Do nothing. */
return (0);
}
@@ -419,17 +438,17 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
while (intr_conf->config_locked == true)
mtx_sleep(&intr_conf->config_locked, &intr_conf->mtx, 0,
"gpicfg", 0);
- if (intr_conf->pin->flags == 0 && flags != 0) {
+ if (intr_conf->intr_mode == 0 && flags != 0) {
/*
* No interrupt is configured, but one is requested: Allocate
* and setup interrupt on the according pin.
*/
- res = gpioc_allocate_pin_intr(intr_conf, flags);
+ res = gpioc_allocate_pin_intr(sc, intr_conf, pin, flags);
if (res == 0)
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
res = 0;
- } else if (intr_conf->pin->flags == flags) {
+ } else if (intr_conf->intr_mode == flags) {
/*
* Same interrupt requested as already configured: Attach the
* cdevpriv to the corresponding pin.
@@ -437,14 +456,14 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
res = 0;
- } else if (intr_conf->pin->flags != 0 && flags == 0) {
+ } else if (intr_conf->intr_mode != 0 && flags == 0) {
/*
* Interrupt configured, but none requested: Teardown and
* release the pin when no other cdevpriv is attached. Otherwise
* just detach pin and cdevpriv from each other.
*/
if (gpioc_intr_reconfig_allowed(priv, intr_conf)) {
- res = gpioc_release_pin_intr(intr_conf);
+ res = gpioc_release_pin_intr(sc, intr_conf);
}
if (res == 0)
res = gpioc_detach_priv_pin(priv, intr_conf);
@@ -456,9 +475,10 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
if (!gpioc_intr_reconfig_allowed(priv, intr_conf))
res = EBUSY;
else {
- res = gpioc_release_pin_intr(intr_conf);
+ res = gpioc_release_pin_intr(sc, intr_conf);
if (res == 0)
- res = gpioc_allocate_pin_intr(intr_conf, flags);
+ res = gpioc_allocate_pin_intr(sc, intr_conf,
+ pin, flags);
if (res == 0)
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
@@ -475,18 +495,16 @@ gpioc_interrupt_handler(void *arg)
{
struct gpioc_pin_intr *intr_conf;
struct gpioc_privs *privs;
- struct gpioc_softc *sc;
sbintime_t evtime;
- uint32_t pin_state;
+ bool pin_state;
intr_conf = arg;
- sc = intr_conf->sc;
/* Capture time and pin state first. */
evtime = sbinuptime();
- if (intr_conf->pin->flags & GPIO_INTR_EDGE_BOTH)
- GPIO_PIN_GET(sc->sc_pdev, intr_conf->pin->pin, &pin_state);
- else if (intr_conf->pin->flags & GPIO_INTR_EDGE_RISING)
+ if (intr_conf->intr_mode & GPIO_INTR_EDGE_BOTH)
+ gpio_pin_is_active(intr_conf->pin, &pin_state);
+ else if (intr_conf->intr_mode & GPIO_INTR_EDGE_RISING)
pin_state = true;
else
pin_state = false;
@@ -575,18 +593,11 @@ gpioc_attach(device_t dev)
sc->sc_pdev = device_get_parent(dev);
sc->sc_unit = device_get_unit(dev);
- err = GPIO_PIN_MAX(sc->sc_pdev, &sc->sc_npins);
- sc->sc_npins++; /* Number of pins is one more than max pin number. */
- if (err != 0)
- return (err);
+ sc->sc_npins = gpiobus_get_npins(dev);
sc->sc_pin_intr = malloc(sizeof(struct gpioc_pin_intr) * sc->sc_npins,
M_GPIOC, M_WAITOK | M_ZERO);
for (int i = 0; i < sc->sc_npins; i++) {
- sc->sc_pin_intr[i].pin = malloc(sizeof(struct gpiobus_pin),
- M_GPIOC, M_WAITOK | M_ZERO);
sc->sc_pin_intr[i].sc = sc;
- sc->sc_pin_intr[i].pin->pin = i;
- sc->sc_pin_intr[i].pin->dev = sc->sc_pdev;
mtx_init(&sc->sc_pin_intr[i].mtx, "gpioc pin", NULL, MTX_DEF);
SLIST_INIT(&sc->sc_pin_intr[i].privs);
}
@@ -610,20 +621,16 @@ static int
gpioc_detach(device_t dev)
{
struct gpioc_softc *sc = device_get_softc(dev);
- int err;
if (sc->sc_ctl_dev)
destroy_dev(sc->sc_ctl_dev);
for (int i = 0; i < sc->sc_npins; i++) {
mtx_destroy(&sc->sc_pin_intr[i].mtx);
- free(sc->sc_pin_intr[i].pin, M_GPIOC);
+ MPASS(sc->sc_pin_intr[i].pin == NULL);
}
free(sc->sc_pin_intr, M_GPIOC);
- if ((err = bus_generic_detach(dev)) != 0)
- return (err);
-
return (0);
}
@@ -655,7 +662,7 @@ gpioc_cdevpriv_dtor(void *data)
KASSERT(consistency == 1,
("inconsistent links between pin config and cdevpriv"));
if (gpioc_intr_reconfig_allowed(priv, pin_link->pin)) {
- gpioc_release_pin_intr(pin_link->pin);
+ gpioc_release_pin_intr(priv->sc, pin_link->pin);
}
mtx_unlock(&pin_link->pin->mtx);
SLIST_REMOVE(&priv->pins, pin_link, gpioc_pins, next);
@@ -778,7 +785,6 @@ static int
gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct thread *td)
{
- device_t bus;
int max_pin, res;
struct gpioc_softc *sc = cdev->si_drv1;
struct gpioc_cdevpriv *priv;
@@ -789,30 +795,32 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct gpio_event_config *evcfg;
uint32_t caps, intrflags;
- bus = GPIO_GET_BUS(sc->sc_pdev);
- if (bus == NULL)
- return (EINVAL);
switch (cmd) {
case GPIOMAXPIN:
- max_pin = -1;
- res = GPIO_PIN_MAX(sc->sc_pdev, &max_pin);
+ res = 0;
+ max_pin = sc->sc_npins - 1;
bcopy(&max_pin, arg, sizeof(max_pin));
break;
case GPIOGETCONFIG:
bcopy(arg, &pin, sizeof(pin));
dprintf("get config pin %d\n", pin.gp_pin);
- res = GPIO_PIN_GETFLAGS(sc->sc_pdev, pin.gp_pin,
+ res = GPIOBUS_PIN_GETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
&pin.gp_flags);
/* Fail early */
- if (res)
+ if (res != 0)
break;
res = devfs_get_cdevpriv((void **)&priv);
- if (res)
+ if (res != 0)
break;
pin.gp_flags |= gpioc_get_intr_config(sc, priv,
pin.gp_pin);
- GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &pin.gp_caps);
- GPIOBUS_PIN_GETNAME(bus, pin.gp_pin, pin.gp_name);
+ res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
+ &pin.gp_caps);
+ if (res != 0)
+ break;
+ res = GPIOBUS_PIN_GETNAME(sc->sc_pdev, pin.gp_pin, pin.gp_name);
+ if (res != 0)
+ break;
bcopy(&pin, arg, sizeof(pin));
break;
case GPIOSETCONFIG:
@@ -821,7 +829,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
res = devfs_get_cdevpriv((void **)&priv);
if (res != 0)
break;
- res = GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &caps);
+ res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev,
+ pin.gp_pin, &caps);
if (res != 0)
break;
res = gpio_check_flags(caps, pin.gp_flags);
@@ -847,8 +856,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
}
if (res != 0)
break;
- res = GPIO_PIN_SETFLAGS(sc->sc_pdev, pin.gp_pin,
- (pin.gp_flags & ~GPIO_INTR_MASK));
+ res = GPIOBUS_PIN_SETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
+ pin.gp_flags & ~GPIO_INTR_MASK);
if (res != 0)
break;
res = gpioc_set_intr_config(sc, priv, pin.gp_pin,
@@ -856,40 +865,43 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
break;
case GPIOGET:
bcopy(arg, &req, sizeof(req));
- res = GPIO_PIN_GET(sc->sc_pdev, req.gp_pin,
+ res = GPIOBUS_PIN_GET(sc->sc_pdev, sc->sc_dev, req.gp_pin,
&req.gp_value);
- dprintf("read pin %d -> %d\n",
+ if (res != 0)
+ break;
+ dprintf("read pin %d -> %d\n",
req.gp_pin, req.gp_value);
bcopy(&req, arg, sizeof(req));
break;
case GPIOSET:
bcopy(arg, &req, sizeof(req));
- res = GPIO_PIN_SET(sc->sc_pdev, req.gp_pin,
+ res = GPIOBUS_PIN_SET(sc->sc_pdev, sc->sc_dev, req.gp_pin,
req.gp_value);
- dprintf("write pin %d -> %d\n",
+ dprintf("write pin %d -> %d\n",
req.gp_pin, req.gp_value);
break;
case GPIOTOGGLE:
bcopy(arg, &req, sizeof(req));
- dprintf("toggle pin %d\n",
+ dprintf("toggle pin %d\n",
req.gp_pin);
- res = GPIO_PIN_TOGGLE(sc->sc_pdev, req.gp_pin);
+ res = GPIOBUS_PIN_TOGGLE(sc->sc_pdev, sc->sc_dev, req.gp_pin);
break;
case GPIOSETNAME:
bcopy(arg, &pin, sizeof(pin));
dprintf("set name on pin %d\n", pin.gp_pin);
- res = GPIOBUS_PIN_SETNAME(bus, pin.gp_pin,
+ res = GPIOBUS_PIN_SETNAME(sc->sc_pdev, pin.gp_pin,
pin.gp_name);
break;
case GPIOACCESS32:
a32 = (struct gpio_access_32 *)arg;
- res = GPIO_PIN_ACCESS_32(sc->sc_pdev, a32->first_pin,
- a32->clear_pins, a32->change_pins, &a32->orig_pins);
+ res = GPIOBUS_PIN_ACCESS_32(sc->sc_pdev, sc->sc_dev,
+ a32->first_pin, a32->clear_pins, a32->change_pins,
+ &a32->orig_pins);
break;
case GPIOCONFIG32:
c32 = (struct gpio_config_32 *)arg;
- res = GPIO_PIN_CONFIG_32(sc->sc_pdev, c32->first_pin,
- c32->num_pins, c32->pin_flags);
+ res = GPIOBUS_PIN_CONFIG_32(sc->sc_pdev, sc->sc_dev,
+ c32->first_pin, c32->num_pins, c32->pin_flags);
break;
case GPIOCONFIGEVENTS:
evcfg = (struct gpio_event_config *)arg;
@@ -1050,9 +1062,6 @@ static device_method_t gpioc_methods[] = {
DEVMETHOD(device_probe, gpioc_probe),
DEVMETHOD(device_attach, gpioc_attach),
DEVMETHOD(device_detach, gpioc_detach),
- DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, bus_generic_suspend),
- DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD_END
};
@@ -1063,5 +1072,5 @@ driver_t gpioc_driver = {
sizeof(struct gpioc_softc)
};
-DRIVER_MODULE(gpioc, gpio, gpioc_driver, 0, 0);
+DRIVER_MODULE(gpioc, gpiobus, gpioc_driver, 0, 0);
MODULE_VERSION(gpioc, 1);
diff --git a/sys/dev/gpio/gpioled.c b/sys/dev/gpio/gpioled.c
index ba53cb733971..71af5741b2fe 100644
--- a/sys/dev/gpio/gpioled.c
+++ b/sys/dev/gpio/gpioled.c
@@ -55,13 +55,13 @@
device_get_nameunit((_sc)->sc_dev), "gpioled", MTX_DEF)
#define GPIOLED_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
-struct gpioled_softc
+struct gpioled_softc
{
device_t sc_dev;
device_t sc_busdev;
struct mtx sc_mtx;
struct cdev *sc_leddev;
- int sc_invert;
+ int sc_softinvert;
};
static void gpioled_control(void *, int);
@@ -69,20 +69,19 @@ static int gpioled_probe(device_t);
static int gpioled_attach(device_t);
static int gpioled_detach(device_t);
-static void
+static void
gpioled_control(void *priv, int onoff)
{
struct gpioled_softc *sc;
sc = (struct gpioled_softc *)priv;
+ if (onoff == -1) /* Keep the current state. */
+ return;
+ if (sc->sc_softinvert)
+ onoff = !onoff;
GPIOLED_LOCK(sc);
- if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
- GPIO_PIN_OUTPUT) == 0) {
- if (sc->sc_invert)
- onoff = !onoff;
- GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
- onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
- }
+ GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
GPIOLED_UNLOCK(sc);
}
@@ -95,26 +94,101 @@ gpioled_probe(device_t dev)
}
static int
+gpioled_inv(device_t dev, uint32_t *pin_flags)
+{
+ struct gpioled_softc *sc;
+ int invert;
+ uint32_t pin_caps;
+
+ sc = device_get_softc(dev);
+
+ if (resource_int_value(device_get_name(dev),
+ device_get_unit(dev), "invert", &invert))
+ invert = 0;
+
+ if (GPIOBUS_PIN_GETCAPS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ &pin_caps) != 0) {
+ if (bootverbose)
+ device_printf(sc->sc_dev, "unable to get pin caps\n");
+ return (-1);
+ }
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags &= ~GPIO_PIN_INVOUT;
+ sc->sc_softinvert = 0;
+ if (invert) {
+ const char *invmode;
+
+ if (resource_string_value(device_get_name(dev),
+ device_get_unit(dev), "invmode", &invmode))
+ invmode = NULL;
+
+ if (invmode) {
+ if (!strcmp(invmode, "sw"))
+ sc->sc_softinvert = 1;
+ else if (!strcmp(invmode, "hw")) {
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags |= GPIO_PIN_INVOUT;
+ else {
+ device_printf(sc->sc_dev, "hardware pin inversion not supported\n");
+ return (-1);
+ }
+ } else {
+ if (strcmp(invmode, "auto") != 0)
+ device_printf(sc->sc_dev, "invalid pin inversion mode\n");
+ invmode = NULL;
+ }
+ }
+ /*
+ * auto inversion mode: use hardware support if available, else fallback to
+ * software emulation.
+ */
+ if (invmode == NULL) {
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags |= GPIO_PIN_INVOUT;
+ else
+ sc->sc_softinvert = 1;
+ }
+ }
+ MPASS(!invert ||
+ (((*pin_flags & GPIO_PIN_INVOUT) != 0) && !sc->sc_softinvert) ||
+ (((*pin_flags & GPIO_PIN_INVOUT) == 0) && sc->sc_softinvert));
+ return (invert);
+}
+
+static int
gpioled_attach(device_t dev)
{
struct gpioled_softc *sc;
int state;
const char *name;
+ uint32_t pin_flags;
+ int invert;
sc = device_get_softc(dev);
sc->sc_dev = dev;
sc->sc_busdev = device_get_parent(dev);
GPIOLED_LOCK_INIT(sc);
- state = 0;
-
- if (resource_string_value(device_get_name(dev),
+ if (resource_string_value(device_get_name(dev),
device_get_unit(dev), "name", &name))
name = NULL;
- resource_int_value(device_get_name(dev),
- device_get_unit(dev), "invert", &sc->sc_invert);
- resource_int_value(device_get_name(dev),
- device_get_unit(dev), "state", &state);
+
+ if (resource_int_value(device_get_name(dev),
+ device_get_unit(dev), "state", &state))
+ state = 0;
+
+ pin_flags = GPIO_PIN_OUTPUT;
+ invert = gpioled_inv(dev, &pin_flags);
+ if (invert < 0)
+ return (ENXIO);
+ device_printf(sc->sc_dev, "state %d invert %s\n",
+ state, (invert ? (sc->sc_softinvert ? "sw" : "hw") : "no"));
+ if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ pin_flags) != 0) {
+ if (bootverbose)
+ device_printf(sc->sc_dev, "unable to set pin flags, %#x\n", pin_flags);
+ return (ENXIO);
+ }
sc->sc_leddev = led_create_state(gpioled_control, sc, name ? name :
device_get_nameunit(dev), state);
diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c
index fc5fb03d6824..da1bfbc268b8 100644
--- a/sys/dev/gpio/ofw_gpiobus.c
+++ b/sys/dev/gpio/ofw_gpiobus.c
@@ -426,6 +426,9 @@ ofw_gpiobus_attach(device_t dev)
err = gpiobus_init_softc(dev);
if (err != 0)
return (err);
+ err = gpiobus_add_gpioc(dev);
+ if (err != 0)
+ return (err);
bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
/*
@@ -451,28 +454,22 @@ ofw_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
device_t child;
struct ofw_gpiobus_devinfo *devi;
- child = device_add_child_ordered(dev, order, name, unit);
+ child = gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct ofw_gpiobus_devinfo));
if (child == NULL)
- return (child);
- devi = malloc(sizeof(struct ofw_gpiobus_devinfo), M_DEVBUF,
- M_NOWAIT | M_ZERO);
- if (devi == NULL) {
- device_delete_child(dev, child);
- return (0);
- }
+ return (NULL);
/*
* NULL all the OFW-related parts of the ivars for non-OFW
* children.
*/
+ devi = device_get_ivars(child);
devi->opd_obdinfo.obd_node = -1;
devi->opd_obdinfo.obd_name = NULL;
devi->opd_obdinfo.obd_compat = NULL;
devi->opd_obdinfo.obd_type = NULL;
devi->opd_obdinfo.obd_model = NULL;
- device_set_ivars(child, devi);
-
return (child);
}
diff --git a/sys/dev/hid/hidbus.c b/sys/dev/hid/hidbus.c
index 96d36c8d191d..683449fca49c 100644
--- a/sys/dev/hid/hidbus.c
+++ b/sys/dev/hid/hidbus.c
@@ -65,7 +65,7 @@ struct hidbus_ivars {
struct mtx *mtx; /* child intr mtx */
hid_intr_t *intr_handler; /* executed under mtx*/
void *intr_ctx;
- unsigned int refcnt; /* protected by mtx */
+ bool active; /* protected by mtx */
struct epoch_context epoch_ctx;
CK_STAILQ_ENTRY(hidbus_ivars) link;
};
@@ -398,7 +398,7 @@ hidbus_child_detached(device_t bus, device_t child)
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *tlc = device_get_ivars(child);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
tlc->mtx = &sc->mtx;
tlc->intr_handler = NULL;
tlc->flags &= ~HIDBUS_FLAG_CAN_POLL;
@@ -423,7 +423,7 @@ hidbus_child_deleted(device_t bus, device_t child)
struct hidbus_ivars *tlc = device_get_ivars(child);
sx_xlock(&sc->sx);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
CK_STAILQ_REMOVE(&sc->tlcs, tlc, hidbus_ivars, link);
sx_unlock(&sc->sx);
epoch_call(INPUT_EPOCH, hidbus_ivar_dtor, &tlc->epoch_ctx);
@@ -572,7 +572,7 @@ hidbus_intr(void *context, void *buf, hid_size_t len)
if (!HID_IN_POLLING_MODE())
epoch_enter_preempt(INPUT_EPOCH, &et);
CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc->refcnt == 0 || tlc->intr_handler == NULL)
+ if (!tlc->active || tlc->intr_handler == NULL)
continue;
if (HID_IN_POLLING_MODE()) {
if ((tlc->flags & HIDBUS_FLAG_CAN_POLL) != 0)
@@ -602,21 +602,14 @@ hidbus_intr_start(device_t bus, device_t child)
MPASS(bus == device_get_parent(child));
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
- struct hidbus_ivars *tlc;
- bool refcnted = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- refcnted |= (tlc->refcnt != 0);
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- ++tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- }
- error = refcnted ? 0 : hid_intr_start(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = true;
+ mtx_unlock(ivar->mtx);
+ error = hid_intr_start(bus);
sx_unlock(&sc->sx);
return (error);
@@ -629,21 +622,17 @@ hidbus_intr_stop(device_t bus, device_t child)
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
struct hidbus_ivars *tlc;
- bool refcnted = false;
+ bool active = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- MPASS(tlc->refcnt != 0);
- --tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- refcnted |= (tlc->refcnt != 0);
- }
- error = refcnted ? 0 : hid_intr_stop(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = false;
+ mtx_unlock(ivar->mtx);
+ CK_STAILQ_FOREACH(tlc, &sc->tlcs, link)
+ active |= tlc->active;
+ error = active ? 0 : hid_intr_stop(bus);
sx_unlock(&sc->sx);
return (error);
diff --git a/sys/dev/hid/hidquirk.h b/sys/dev/hid/hidquirk.h
index 4f8b8acbe201..f6fa9f88c6c9 100644
--- a/sys/dev/hid/hidquirk.h
+++ b/sys/dev/hid/hidquirk.h
@@ -50,6 +50,7 @@
HQ(IS_XBOX360GP), /* device is XBox 360 GamePad */ \
HQ(NOWRITE), /* device does not support writes */ \
HQ(IICHID_SAMPLING), /* IIC backend runs in sampling mode */ \
+ HQ(NO_READAHEAD), /* Disable interrupt after one report */\
\
/* Various quirks */ \
HQ(HID_IGNORE), /* device should be ignored by hid class */ \
diff --git a/sys/dev/hid/hidraw.c b/sys/dev/hid/hidraw.c
index 06f70070f61b..4855843cd265 100644
--- a/sys/dev/hid/hidraw.c
+++ b/sys/dev/hid/hidraw.c
@@ -85,6 +85,12 @@ SYSCTL_INT(_hw_hid_hidraw, OID_AUTO, debug, CTLFLAG_RWTUN,
free((buf), M_DEVBUF); \
}
+#ifdef HIDRAW_MAKE_UHID_ALIAS
+#define HIDRAW_NAME "uhid"
+#else
+#define HIDRAW_NAME "hidraw"
+#endif
+
struct hidraw_softc {
device_t sc_dev; /* base device */
@@ -183,8 +189,8 @@ hidraw_identify(driver_t *driver, device_t parent)
{
device_t child;
- if (device_find_child(parent, "hidraw", DEVICE_UNIT_ANY) == NULL) {
- child = BUS_ADD_CHILD(parent, 0, "hidraw",
+ if (device_find_child(parent, HIDRAW_NAME, DEVICE_UNIT_ANY) == NULL) {
+ child = BUS_ADD_CHILD(parent, 0, HIDRAW_NAME,
device_get_unit(parent));
if (child != NULL)
hidbus_set_index(child, HIDRAW_INDEX);
@@ -1050,7 +1056,7 @@ static device_method_t hidraw_methods[] = {
};
static driver_t hidraw_driver = {
- "hidraw",
+ HIDRAW_NAME,
hidraw_methods,
sizeof(struct hidraw_softc)
};
diff --git a/sys/dev/hid/hkbd.c b/sys/dev/hid/hkbd.c
index 5eff7557bc42..6255c42d3b62 100644
--- a/sys/dev/hid/hkbd.c
+++ b/sys/dev/hid/hkbd.c
@@ -95,14 +95,16 @@
#ifdef HID_DEBUG
static int hkbd_debug = 0;
+#endif
static int hkbd_no_leds = 0;
static SYSCTL_NODE(_hw_hid, OID_AUTO, hkbd, CTLFLAG_RW, 0, "USB keyboard");
+#ifdef HID_DEBUG
SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, debug, CTLFLAG_RWTUN,
&hkbd_debug, 0, "Debug level");
+#endif
SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, no_leds, CTLFLAG_RWTUN,
&hkbd_no_leds, 0, "Disables setting of keyboard leds");
-#endif
#define INPUT_EPOCH global_epoch_preempt
@@ -1596,8 +1598,16 @@ hkbd_ioctl_locked(keyboard_t *kbd, u_long cmd, caddr_t arg)
sc->sc_state &= ~LOCK_MASK;
sc->sc_state |= *(int *)arg;
- /* set LEDs and quit */
- return (hkbd_ioctl_locked(kbd, KDSETLED, arg));
+ /*
+ * Attempt to set the keyboard LEDs; ignore the return value
+ * intentionally. Note: Some hypervisors/emulators (e.g., QEMU,
+ * Parallels—at least as of the time of writing) may fail when
+ * setting LEDs. This can prevent kbdmux from attaching the
+ * keyboard, which in turn may block the console from accessing
+ * it.
+ */
+ (void)hkbd_ioctl_locked(kbd, KDSETLED, arg);
+ return (0);
case KDSETREPEAT: /* set keyboard repeat rate (new
* interface) */
@@ -1766,10 +1776,8 @@ hkbd_set_leds(struct hkbd_softc *sc, uint8_t leds)
SYSCONS_LOCK_ASSERT();
DPRINTF("leds=0x%02x\n", leds);
-#ifdef HID_DEBUG
if (hkbd_no_leds)
return (0);
-#endif
memset(sc->sc_buffer, 0, HKBD_BUFFER_SIZE);
@@ -1820,6 +1828,7 @@ hkbd_set_leds(struct hkbd_softc *sc, uint8_t leds)
SYSCONS_UNLOCK();
error = hid_write(sc->sc_dev, buf, len);
SYSCONS_LOCK();
+ DPRINTF("error %d", error);
return (error);
}
diff --git a/sys/dev/hid/ietp.c b/sys/dev/hid/ietp.c
index 217585a7948b..73a5cb7414d4 100644
--- a/sys/dev/hid/ietp.c
+++ b/sys/dev/hid/ietp.c
@@ -102,6 +102,7 @@ struct ietp_softc {
device_t dev;
struct evdev_dev *evdev;
+ bool open;
uint8_t report_id;
hid_size_t report_len;
@@ -217,13 +218,25 @@ static const struct evdev_methods ietp_evdev_methods = {
static int
ietp_ev_open(struct evdev_dev *evdev)
{
- return (hid_intr_start(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_start(sc->dev);
+ if (error == 0)
+ sc->open = true;
+ return (error);
}
static int
ietp_ev_close(struct evdev_dev *evdev)
{
- return (hid_intr_stop(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_stop(sc->dev);
+ if (error == 0)
+ sc->open = false;
+ return (error);
}
static int
@@ -275,7 +288,7 @@ ietp_attach(struct ietp_softc *sc)
evdev_set_id(sc->evdev, hw->idBus, hw->idVendor, hw->idProduct,
hw->idVersion);
evdev_set_serial(sc->evdev, hw->serial);
- evdev_set_methods(sc->evdev, sc->dev, &ietp_evdev_methods);
+ evdev_set_methods(sc->evdev, sc, &ietp_evdev_methods);
evdev_set_flag(sc->evdev, EVDEV_FLAG_MT_STCOMPAT);
evdev_set_flag(sc->evdev, EVDEV_FLAG_EXT_EPOCH); /* hidbus child */
@@ -584,11 +597,13 @@ ietp_iic_set_absolute_mode(device_t dev, bool enable)
* Some ASUS touchpads need to be powered on to enter absolute mode.
*/
require_wakeup = false;
- for (i = 0; i < nitems(special_fw); i++) {
- if (sc->ic_type == special_fw[i].ic_type &&
- sc->product_id == special_fw[i].product_id) {
- require_wakeup = true;
- break;
+ if (!sc->open) {
+ for (i = 0; i < nitems(special_fw); i++) {
+ if (sc->ic_type == special_fw[i].ic_type &&
+ sc->product_id == special_fw[i].product_id) {
+ require_wakeup = true;
+ break;
+ }
}
}
diff --git a/sys/dev/hid/u2f.c b/sys/dev/hid/u2f.c
new file mode 100644
index 000000000000..ac2eba7a499d
--- /dev/null
+++ b/sys/dev/hid/u2f.c
@@ -0,0 +1,590 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022-2023 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_hid.h"
+
+#include <sys/param.h>
+#ifdef COMPAT_FREEBSD32
+#include <sys/abi_compat.h>
+#endif
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/filio.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/poll.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/selinfo.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <dev/evdev/input.h>
+
+#define HID_DEBUG_VAR u2f_debug
+#include <dev/hid/hid.h>
+#include <dev/hid/hidbus.h>
+#include <dev/hid/hidquirk.h>
+
+#include <dev/usb/usb_ioctl.h>
+
+#ifdef HID_DEBUG
+static int u2f_debug = 0;
+static SYSCTL_NODE(_hw_hid, OID_AUTO, u2f, CTLFLAG_RW, 0,
+ "FIDO/U2F authenticator");
+SYSCTL_INT(_hw_hid_u2f, OID_AUTO, debug, CTLFLAG_RWTUN,
+ &u2f_debug, 0, "Debug level");
+#endif
+
+#define U2F_MAX_REPORT_SIZE 64
+
+/* A match on these entries will load u2f */
+static const struct hid_device_id u2f_devs[] = {
+ { HID_BUS(BUS_USB), HID_TLC(HUP_FIDO, HUF_U2FHID) },
+};
+
+struct u2f_softc {
+ device_t sc_dev; /* base device */
+ struct cdev *dev;
+
+ struct mtx sc_mtx; /* hidbus private mutex */
+ void *sc_rdesc;
+ hid_size_t sc_rdesc_size;
+ hid_size_t sc_isize;
+ hid_size_t sc_osize;
+ struct selinfo sc_rsel;
+ struct { /* driver state */
+ bool open:1; /* device is open */
+ bool aslp:1; /* waiting for device data in read() */
+ bool sel:1; /* waiting for device data in poll() */
+ bool data:1; /* input report is stored in sc_buf */
+ int reserved:28;
+ } sc_state;
+ int sc_fflags; /* access mode for open lifetime */
+
+ uint8_t sc_buf[U2F_MAX_REPORT_SIZE];
+};
+
+static d_open_t u2f_open;
+static d_read_t u2f_read;
+static d_write_t u2f_write;
+static d_ioctl_t u2f_ioctl;
+static d_poll_t u2f_poll;
+static d_kqfilter_t u2f_kqfilter;
+
+static d_priv_dtor_t u2f_dtor;
+
+static struct cdevsw u2f_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = u2f_open,
+ .d_read = u2f_read,
+ .d_write = u2f_write,
+ .d_ioctl = u2f_ioctl,
+ .d_poll = u2f_poll,
+ .d_kqfilter = u2f_kqfilter,
+ .d_name = "u2f",
+};
+
+static hid_intr_t u2f_intr;
+
+static device_probe_t u2f_probe;
+static device_attach_t u2f_attach;
+static device_detach_t u2f_detach;
+
+static int u2f_kqread(struct knote *, long);
+static void u2f_kqdetach(struct knote *);
+static void u2f_notify(struct u2f_softc *);
+
+static struct filterops u2f_filterops_read = {
+ .f_isfd = 1,
+ .f_detach = u2f_kqdetach,
+ .f_event = u2f_kqread,
+};
+
+static int
+u2f_probe(device_t dev)
+{
+ int error;
+
+ error = HIDBUS_LOOKUP_DRIVER_INFO(dev, u2f_devs);
+ if (error != 0)
+ return (error);
+
+ hidbus_set_desc(dev, "Authenticator");
+
+ return (BUS_PROBE_GENERIC);
+}
+
+static int
+u2f_attach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+ struct hid_device_info *hw = __DECONST(struct hid_device_info *,
+ hid_get_device_info(dev));
+ struct make_dev_args mda;
+ int error;
+
+ sc->sc_dev = dev;
+
+ error = hid_get_report_descr(dev, &sc->sc_rdesc, &sc->sc_rdesc_size);
+ if (error != 0)
+ return (ENXIO);
+ sc->sc_isize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_input, NULL);
+ if (sc->sc_isize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Input report size too large. Truncate.\n");
+ sc->sc_isize = U2F_MAX_REPORT_SIZE;
+ }
+ sc->sc_osize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_output, NULL);
+ if (sc->sc_osize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Output report size too large. Truncate.\n");
+ sc->sc_osize = U2F_MAX_REPORT_SIZE;
+ }
+
+ mtx_init(&sc->sc_mtx, "u2f lock", NULL, MTX_DEF);
+ knlist_init_mtx(&sc->sc_rsel.si_note, &sc->sc_mtx);
+
+ make_dev_args_init(&mda);
+ mda.mda_flags = MAKEDEV_WAITOK;
+ mda.mda_devsw = &u2f_cdevsw;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_U2F;
+ mda.mda_mode = 0660;
+ mda.mda_si_drv1 = sc;
+
+ error = make_dev_s(&mda, &sc->dev, "u2f/%d", device_get_unit(dev));
+ if (error) {
+ device_printf(dev, "Can not create character device\n");
+ u2f_detach(dev);
+ return (error);
+ }
+#ifdef U2F_MAKE_UHID_ALIAS
+ (void)make_dev_alias(sc->dev, "uhid%d", device_get_unit(dev));
+#endif
+
+ hid_add_dynamic_quirk(hw, HQ_NO_READAHEAD);
+
+ hidbus_set_lock(dev, &sc->sc_mtx);
+ hidbus_set_intr(dev, u2f_intr, sc);
+
+ return (0);
+}
+
+static int
+u2f_detach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+
+ DPRINTF("sc=%p\n", sc);
+
+ if (sc->dev != NULL) {
+ mtx_lock(&sc->sc_mtx);
+ sc->dev->si_drv1 = NULL;
+ /* Wake everyone */
+ u2f_notify(sc);
+ mtx_unlock(&sc->sc_mtx);
+ destroy_dev(sc->dev);
+ }
+
+ hid_intr_stop(sc->sc_dev);
+
+ knlist_clear(&sc->sc_rsel.si_note, 0);
+ knlist_destroy(&sc->sc_rsel.si_note);
+ seldrain(&sc->sc_rsel);
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+void
+u2f_intr(void *context, void *buf, hid_size_t len)
+{
+ struct u2f_softc *sc = context;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ DPRINTFN(5, "len=%d\n", len);
+ DPRINTFN(5, "data = %*D\n", len, buf, " ");
+
+ if (sc->sc_state.data)
+ return;
+
+ if (len > sc->sc_isize)
+ len = sc->sc_isize;
+
+ bcopy(buf, sc->sc_buf, len);
+
+ /* Make sure we don't process old data */
+ if (len < sc->sc_isize)
+ bzero(sc->sc_buf + len, sc->sc_isize - len);
+
+ sc->sc_state.data = true;
+
+ u2f_notify(sc);
+}
+
+static int
+u2f_open(struct cdev *dev, int flag, int mode, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ DPRINTF("sc=%p\n", sc);
+
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.open) {
+ mtx_unlock(&sc->sc_mtx);
+ return (EBUSY);
+ }
+ sc->sc_state.open = true;
+ mtx_unlock(&sc->sc_mtx);
+
+ error = devfs_set_cdevpriv(sc, u2f_dtor);
+ if (error != 0) {
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+ return (error);
+ }
+
+ /* Set up interrupt pipe. */
+ sc->sc_state.data = false;
+ sc->sc_fflags = flag;
+
+ return (0);
+}
+
+
+static void
+u2f_dtor(void *data)
+{
+ struct u2f_softc *sc = data;
+
+#ifdef NOT_YET
+ /* Disable interrupts. */
+ hid_intr_stop(sc->sc_dev);
+#endif
+
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+}
+
+static int
+u2f_read(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ size_t length = 0;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (!sc->sc_state.data)
+ hid_intr_start(sc->sc_dev);
+
+ mtx_lock(&sc->sc_mtx);
+ if (dev->si_drv1 == NULL) {
+ error = EIO;
+ goto exit;
+ }
+
+ while (!sc->sc_state.data) {
+ if (flag & O_NONBLOCK) {
+ error = EWOULDBLOCK;
+ goto exit;
+ }
+ sc->sc_state.aslp = true;
+ DPRINTFN(5, "sleep on %p\n", &sc->sc_buf);
+ error = mtx_sleep(&sc->sc_buf, &sc->sc_mtx, PZERO | PCATCH,
+ "u2frd", 0);
+ DPRINTFN(5, "woke, error=%d\n", error);
+ if (dev->si_drv1 == NULL)
+ error = EIO;
+ if (error) {
+ sc->sc_state.aslp = false;
+ goto exit;
+ }
+ }
+
+ if (sc->sc_state.data && uio->uio_resid > 0) {
+ length = min(uio->uio_resid, sc->sc_isize);
+ memcpy(buf, sc->sc_buf, length);
+ sc->sc_state.data = false;
+ }
+exit:
+ mtx_unlock(&sc->sc_mtx);
+ if (length != 0) {
+ /* Copy the data to the user process. */
+ DPRINTFN(5, "got %lu chars\n", (u_long)length);
+ error = uiomove(buf, length, uio);
+ }
+
+ return (error);
+}
+
+static int
+u2f_write(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (uio->uio_resid != sc->sc_osize)
+ return (EINVAL);
+ error = uiomove(buf, uio->uio_resid, uio);
+ if (error == 0)
+ error = hid_write(sc->sc_dev, buf, sc->sc_osize);
+
+ return (error);
+}
+
+#ifdef COMPAT_FREEBSD32
+static void
+update_ugd32(const struct usb_gen_descriptor *ugd,
+ struct usb_gen_descriptor32 *ugd32)
+{
+ /* Don't update hgd_data pointer */
+ CP(*ugd, *ugd32, ugd_lang_id);
+ CP(*ugd, *ugd32, ugd_maxlen);
+ CP(*ugd, *ugd32, ugd_actlen);
+ CP(*ugd, *ugd32, ugd_offset);
+ CP(*ugd, *ugd32, ugd_config_index);
+ CP(*ugd, *ugd32, ugd_string_index);
+ CP(*ugd, *ugd32, ugd_iface_index);
+ CP(*ugd, *ugd32, ugd_altif_index);
+ CP(*ugd, *ugd32, ugd_endpt_index);
+ CP(*ugd, *ugd32, ugd_report_type);
+ /* Don't update reserved */
+}
+#endif
+
+static int
+u2f_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td)
+{
+#ifdef COMPAT_FREEBSD32
+ struct usb_gen_descriptor local_ugd;
+ struct usb_gen_descriptor32 *ugd32 = NULL;
+#endif
+ struct u2f_softc *sc = dev->si_drv1;
+ struct usb_gen_descriptor *ugd = (struct usb_gen_descriptor *)addr;
+ uint32_t size;
+
+ DPRINTFN(2, "cmd=%lx\n", cmd);
+
+ if (sc == NULL)
+ return (EIO);
+
+#ifdef COMPAT_FREEBSD32
+ switch (cmd) {
+ case USB_GET_REPORT_DESC32:
+ cmd = _IOC_NEWTYPE(cmd, struct usb_gen_descriptor);
+ ugd32 = (struct usb_gen_descriptor32 *)addr;
+ ugd = &local_ugd;
+ PTRIN_CP(*ugd32, *ugd, ugd_data);
+ CP(*ugd32, *ugd, ugd_lang_id);
+ CP(*ugd32, *ugd, ugd_maxlen);
+ CP(*ugd32, *ugd, ugd_actlen);
+ CP(*ugd32, *ugd, ugd_offset);
+ CP(*ugd32, *ugd, ugd_config_index);
+ CP(*ugd32, *ugd, ugd_string_index);
+ CP(*ugd32, *ugd, ugd_iface_index);
+ CP(*ugd32, *ugd, ugd_altif_index);
+ CP(*ugd32, *ugd, ugd_endpt_index);
+ CP(*ugd32, *ugd, ugd_report_type);
+ /* Don't copy reserved */
+ break;
+ }
+#endif
+
+ /* fixed-length ioctls handling */
+ switch (cmd) {
+ case FIONBIO:
+ /* All handled in the upper FS layer. */
+ return (0);
+
+ case USB_GET_REPORT_DESC:
+ size = MIN(sc->sc_rdesc_size, ugd->ugd_maxlen);
+ ugd->ugd_actlen = size;
+#ifdef COMPAT_FREEBSD32
+ if (ugd32 != NULL)
+ update_ugd32(ugd, ugd32);
+#endif
+ if (ugd->ugd_data == NULL)
+ return (0); /* descriptor length only */
+
+ return (copyout(sc->sc_rdesc, ugd->ugd_data, size));
+
+ case USB_GET_DEVICEINFO:
+ return(hid_ioctl(
+ sc->sc_dev, USB_GET_DEVICEINFO, (uintptr_t)addr));
+ }
+
+ return (EINVAL);
+}
+
+static int
+u2f_poll(struct cdev *dev, int events, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int revents = 0;
+ bool start_intr = false;
+
+ if (sc == NULL)
+ return (POLLHUP);
+
+ if (events & (POLLOUT | POLLWRNORM) && (sc->sc_fflags & FWRITE))
+ revents |= events & (POLLOUT | POLLWRNORM);
+ if (events & (POLLIN | POLLRDNORM) && (sc->sc_fflags & FREAD)) {
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.data)
+ revents |= events & (POLLIN | POLLRDNORM);
+ else {
+ sc->sc_state.sel = true;
+ start_intr = true;
+ selrecord(td, &sc->sc_rsel);
+ }
+ mtx_unlock(&sc->sc_mtx);
+ if (start_intr)
+ hid_intr_start(sc->sc_dev);
+ }
+
+ return (revents);
+}
+
+static int
+u2f_kqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ switch(kn->kn_filter) {
+ case EVFILT_READ:
+ if (sc->sc_fflags & FREAD) {
+ kn->kn_fop = &u2f_filterops_read;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ return(EINVAL);
+ }
+ kn->kn_hook = sc;
+
+ knlist_add(&sc->sc_rsel.si_note, kn, 0);
+ return (0);
+}
+
+static int
+u2f_kqread(struct knote *kn, long hint)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+ int ret;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->dev->si_drv1 == NULL) {
+ kn->kn_flags |= EV_EOF;
+ ret = 1;
+ } else {
+ ret = sc->sc_state.data ? 1 : 0;
+ if (!sc->sc_state.data)
+ hid_intr_start(sc->sc_dev);
+ }
+
+ return (ret);
+}
+
+static void
+u2f_kqdetach(struct knote *kn)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+
+ knlist_remove(&sc->sc_rsel.si_note, kn, 0);
+}
+
+static void
+u2f_notify(struct u2f_softc *sc)
+{
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->sc_state.aslp) {
+ sc->sc_state.aslp = false;
+ DPRINTFN(5, "waking %p\n", &sc->sc_buf);
+ wakeup(&sc->sc_buf);
+ }
+ if (sc->sc_state.sel) {
+ sc->sc_state.sel = false;
+ selwakeuppri(&sc->sc_rsel, PZERO);
+ }
+ KNOTE_LOCKED(&sc->sc_rsel.si_note, 0);
+}
+
+static device_method_t u2f_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, u2f_probe),
+ DEVMETHOD(device_attach, u2f_attach),
+ DEVMETHOD(device_detach, u2f_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t u2f_driver = {
+#ifdef U2F_MAKE_UHID_ALIAS
+ "uhid",
+#else
+ "u2f",
+#endif
+ u2f_methods,
+ sizeof(struct u2f_softc)
+};
+
+DRIVER_MODULE(u2f, hidbus, u2f_driver, NULL, NULL);
+MODULE_DEPEND(u2f, hidbus, 1, 1, 1);
+MODULE_DEPEND(u2f, hid, 1, 1, 1);
+MODULE_VERSION(u2f, 1);
+HID_PNP_INFO(u2f_devs);
diff --git a/sys/dev/hpt27xx/hptintf.h b/sys/dev/hpt27xx/hptintf.h
index 558b479ec2ee..eb8105ec5666 100644
--- a/sys/dev/hpt27xx/hptintf.h
+++ b/sys/dev/hpt27xx/hptintf.h
@@ -155,8 +155,8 @@ typedef HPT_U32 DEVICEID;
#define ARRAY_FLAG_NEED_AUTOREBUILD 0x00000080 /* auto-rebuild should start */
#define ARRAY_FLAG_VERIFYING 0x00000100 /* is being verified */
#define ARRAY_FLAG_INITIALIZING 0x00000200 /* is being initialized */
-#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* tranform in progress */
-#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need tranform */
+#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* transform in progress */
+#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need transform */
#define ARRAY_FLAG_NEEDINITIALIZING 0x00001000 /* the array's initialization hasn't finished*/
#define ARRAY_FLAG_BROKEN_REDUNDANT 0x00002000 /* broken but redundant (raid6) */
#define ARRAY_FLAG_RAID15PLUS 0x80000000 /* display this RAID 1 as RAID 1.5 */
@@ -2018,7 +2018,7 @@ DEVICEID hpt_create_transform_v2(DEVICEID idArray, PCREATE_ARRAY_PARAMS_V3 destI
#endif
/* hpt_step_transform
- * move a block in a tranform progress.
+ * move a block in a transform progress.
* This function is called by mid-layer, not GUI (which uses set_array_state instead).
* Version compatibility: v2.0.0.0 or later
* Parameters:
diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h
index c480900596f4..a623f810c101 100644
--- a/sys/dev/ice/ice_bitops.h
+++ b/sys/dev/ice/ice_bitops.h
@@ -198,7 +198,7 @@ static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size)
* ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise AND on two "source" bitmaps of the same size
@@ -237,7 +237,7 @@ ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
* ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise OR on two "source" bitmaps of the same size
diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c
index 0025a65d73fc..16a9ab6823bf 100644
--- a/sys/dev/ice/ice_fw_logging.c
+++ b/sys/dev/ice/ice_fw_logging.c
@@ -48,7 +48,7 @@ SDT_PROVIDER_DEFINE(ice_fwlog);
/*
* SDT DTrace probe fired when a firmware log message is received over the
- * AdminQ. It passes the buffer of the firwmare log message along with its
+ * AdminQ. It passes the buffer of the firmware log message along with its
* length in bytes to the DTrace framework.
*/
SDT_PROBE_DEFINE2(ice_fwlog, , , message, "uint8_t *", "int");
diff --git a/sys/dev/ice/ice_lan_tx_rx.h b/sys/dev/ice/ice_lan_tx_rx.h
index 693e0ca5efc6..eedacdab0216 100644
--- a/sys/dev/ice/ice_lan_tx_rx.h
+++ b/sys/dev/ice/ice_lan_tx_rx.h
@@ -630,7 +630,7 @@ enum ice_rxdid {
ICE_RXDID_LAST = 63,
};
-/* Recceive Flex descriptor Dword Index */
+/* Receive Flex descriptor Dword Index */
enum ice_flex_word {
ICE_RX_FLEX_DWORD_0 = 0,
ICE_RX_FLEX_DWORD_1,
diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h
index 308b2bda2790..640bdf8fed7b 100644
--- a/sys/dev/ice/ice_lib.h
+++ b/sys/dev/ice/ice_lib.h
@@ -313,7 +313,7 @@ enum ice_dyn_idx_t {
ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
-/* By convenction ITR0 is used for RX, and ITR1 is used for TX */
+/* By convention ITR0 is used for RX, and ITR1 is used for TX */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
diff --git a/sys/dev/ice/ice_protocol_type.h b/sys/dev/ice/ice_protocol_type.h
index 300d61bfb5d9..b90c25e6c427 100644
--- a/sys/dev/ice/ice_protocol_type.h
+++ b/sys/dev/ice/ice_protocol_type.h
@@ -143,7 +143,7 @@ enum ice_prot_id {
ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_EAPOL_OF = 120,
- ICE_PROT_META_ID = 255, /* when offset == metaddata */
+ ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
diff --git a/sys/dev/ichwd/i6300esbwd.c b/sys/dev/ichwd/i6300esbwd.c
new file mode 100644
index 000000000000..d95aeb53c3f5
--- /dev/null
+++ b/sys/dev/ichwd/i6300esbwd.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+/*
+ * Reference: Intel 6300ESB Controller Hub Datasheet Section 16
+ */
+
+#include <sys/param.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+#include <sys/errno.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <sys/watchdog.h>
+
+#include <dev/pci/pcireg.h>
+
+#include <dev/ichwd/ichwd.h>
+#include <dev/ichwd/i6300esbwd.h>
+
+#include <x86/pci_cfgreg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+struct i6300esbwd_softc {
+ device_t dev;
+ int res_id;
+ struct resource *res;
+ eventhandler_tag ev_tag;
+ bool locked;
+};
+
+static const struct i6300esbwd_pci_id {
+ uint16_t id;
+ const char *name;
+} i6300esbwd_pci_devices[] = {
+ { DEVICEID_6300ESB_2, "6300ESB Watchdog Timer" },
+};
+
+static uint16_t
+i6300esbwd_cfg_read(struct i6300esbwd_softc *sc)
+{
+ return (pci_read_config(sc->dev, WDT_CONFIG_REG, 2));
+}
+
+static void
+i6300esbwd_cfg_write(struct i6300esbwd_softc *sc, uint16_t val)
+{
+ pci_write_config(sc->dev, WDT_CONFIG_REG, val, 2);
+}
+
+static uint8_t
+i6300esbwd_lock_read(struct i6300esbwd_softc *sc)
+{
+ return (pci_read_config(sc->dev, WDT_LOCK_REG, 1));
+}
+
+static void
+i6300esbwd_lock_write(struct i6300esbwd_softc *sc, uint8_t val)
+{
+ pci_write_config(sc->dev, WDT_LOCK_REG, val, 1);
+}
+
+/*
+ * According to Intel 6300ESB I/O Controller Hub Datasheet 16.5.2,
+ * the resource should be unlocked before modifing any registers.
+ * The way to unlock is by write 0x80, 0x86 to the reload register.
+ */
+static void
+i6300esbwd_unlock_res(struct i6300esbwd_softc *sc)
+{
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_1_VAL);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_2_VAL);
+}
+
+static int
+i6300esbwd_sysctl_locked(SYSCTL_HANDLER_ARGS)
+{
+ struct i6300esbwd_softc *sc = (struct i6300esbwd_softc *)arg1;
+ int error;
+ int result;
+
+ result = sc->locked;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1 && !sc->locked) {
+ i6300esbwd_lock_write(sc, i6300esbwd_lock_read(sc) | WDT_LOCK);
+ sc->locked = true;
+ }
+
+ return (0);
+}
+
+static void
+i6300esbwd_event(void *arg, unsigned int cmd, int *error)
+{
+ struct i6300esbwd_softc *sc = arg;
+ uint32_t timeout;
+ uint16_t regval;
+
+ cmd &= WD_INTERVAL;
+ if (cmd != 0 &&
+ (cmd < WD_TO_1MS || (cmd - WD_TO_1MS) >= WDT_PRELOAD_BIT)) {
+ *error = EINVAL;
+ return;
+ }
+ timeout = 1 << (cmd - WD_TO_1MS);
+
+ /* reset the timer to prevent timeout a timeout is about to occur */
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD);
+
+ if (!cmd) {
+ /*
+ * when the lock is enabled, we are unable to overwrite LOCK
+ * register
+ */
+ if (sc->locked)
+ *error = EPERM;
+ else
+ i6300esbwd_lock_write(sc,
+ i6300esbwd_lock_read(sc) & ~WDT_ENABLE);
+ return;
+ }
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_4(sc->res, WDT_PRELOAD_1_REG, timeout);
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_4(sc->res, WDT_PRELOAD_2_REG, timeout);
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD);
+
+ if (!sc->locked) {
+ i6300esbwd_lock_write(sc, WDT_ENABLE);
+ regval = i6300esbwd_lock_read(sc);
+ sc->locked = regval & WDT_LOCK;
+ }
+}
+
+static int
+i6300esbwd_probe(device_t dev)
+{
+ const struct i6300esbwd_pci_id *pci_id;
+ uint16_t pci_dev_id;
+ int err = ENXIO;
+
+ if (pci_get_vendor(dev) != VENDORID_INTEL)
+ goto end;
+
+ pci_dev_id = pci_get_device(dev);
+ for (pci_id = i6300esbwd_pci_devices;
+ pci_id < i6300esbwd_pci_devices + nitems(i6300esbwd_pci_devices);
+ ++pci_id) {
+ if (pci_id->id == pci_dev_id) {
+ device_set_desc(dev, pci_id->name);
+ err = BUS_PROBE_DEFAULT;
+ break;
+ }
+ }
+
+end:
+ return (err);
+}
+
+static int
+i6300esbwd_attach(device_t dev)
+{
+ struct i6300esbwd_softc *sc = device_get_softc(dev);
+ uint16_t regval;
+
+ sc->dev = dev;
+ sc->res_id = PCIR_BAR(0);
+ sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->res_id,
+ RF_ACTIVE);
+ if (sc->res == NULL) {
+ device_printf(dev, "unable to map memory region\n");
+ return (ENXIO);
+ }
+
+ i6300esbwd_cfg_write(sc, WDT_INT_TYPE_DISABLED_VAL);
+ regval = i6300esbwd_lock_read(sc);
+ if (regval & WDT_LOCK)
+ sc->locked = true;
+ else {
+ sc->locked = false;
+ i6300esbwd_lock_write(sc, WDT_TOUT_CNF_WT_MODE);
+ }
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD | WDT_TIMEOUT);
+
+ sc->ev_tag = EVENTHANDLER_REGISTER(watchdog_list, i6300esbwd_event, sc,
+ 0);
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "locked",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
+ i6300esbwd_sysctl_locked, "I",
+ "Lock the timer so that we cannot disable it");
+
+ return (0);
+}
+
+static int
+i6300esbwd_detach(device_t dev)
+{
+ struct i6300esbwd_softc *sc = device_get_softc(dev);
+
+ if (sc->ev_tag)
+ EVENTHANDLER_DEREGISTER(watchdog_list, sc->ev_tag);
+
+ if (sc->res)
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->res_id, sc->res);
+
+ return (0);
+}
+
+static device_method_t i6300esbwd_methods[] = {
+ DEVMETHOD(device_probe, i6300esbwd_probe),
+ DEVMETHOD(device_attach, i6300esbwd_attach),
+ DEVMETHOD(device_detach, i6300esbwd_detach),
+ DEVMETHOD(device_shutdown, i6300esbwd_detach),
+ DEVMETHOD_END
+};
+
+static driver_t i6300esbwd_driver = {
+ "i6300esbwd",
+ i6300esbwd_methods,
+ sizeof(struct i6300esbwd_softc),
+};
+
+DRIVER_MODULE(i6300esbwd, pci, i6300esbwd_driver, NULL, NULL);
diff --git a/sys/dev/ichwd/i6300esbwd.h b/sys/dev/ichwd/i6300esbwd.h
new file mode 100644
index 000000000000..39ed5d5a84f6
--- /dev/null
+++ b/sys/dev/ichwd/i6300esbwd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef _I6300ESBWD_H_
+#define _I6300ESBWD_H_
+
+#define WDT_CONFIG_REG 0x60
+#define WDT_LOCK_REG 0x68
+
+#define WDT_PRELOAD_1_REG 0x00
+#define WDT_PRELOAD_2_REG 0x04
+#define WDT_INTR_REG 0x08
+#define WDT_RELOAD_REG 0x0C
+
+/* For config register */
+#define WDT_OUTPUT_EN (0x1 << 5)
+#define WDT_PRE_SEL (0x1 << 2)
+#define WDT_INT_TYPE_BITS (0x3)
+#define WDT_INT_TYPE_IRQ_VAL (0x0)
+#define WDT_INT_TYPE_RES_VAL (0x1)
+#define WDT_INT_TYPE_SMI_VAL (0x2)
+#define WDT_INT_TYPE_DISABLED_VAL (0x3)
+
+/* For lock register */
+#define WDT_TOUT_CNF_WT_MODE (0x0 << 2)
+#define WDT_TOUT_CNF_FR_MODE (0x1 << 2)
+#define WDT_ENABLE (0x02)
+#define WDT_LOCK (0x01)
+
+/* For preload 1/2 registers */
+#define WDT_PRELOAD_BIT 20
+#define WDT_PRELOAD_BITS ((0x1 << WDT_PRELOAD_BIT) - 1)
+
+/* For interrupt register */
+#define WDT_INTR_ACT (0x01 << 0)
+
+/* For reload register */
+#define WDT_TIMEOUT (0x01 << 9)
+#define WDT_RELOAD (0x01 << 8)
+#define WDT_UNLOCK_SEQ_1_VAL 0x80
+#define WDT_UNLOCK_SEQ_2_VAL 0x86
+
+#endif /* _I6300ESBWD_H_ */
diff --git a/sys/dev/ichwd/ichwd.c b/sys/dev/ichwd/ichwd.c
index cade2cc4fb45..5481553cc175 100644
--- a/sys/dev/ichwd/ichwd.c
+++ b/sys/dev/ichwd/ichwd.c
@@ -90,7 +90,7 @@ static struct ichwd_device ichwd_devices[] = {
{ DEVICEID_82801E, "Intel 82801E watchdog timer", 5, 1 },
{ DEVICEID_82801EB, "Intel 82801EB watchdog timer", 5, 1 },
{ DEVICEID_82801EBR, "Intel 82801EB/ER watchdog timer", 5, 1 },
- { DEVICEID_6300ESB, "Intel 6300ESB watchdog timer", 5, 1 },
+ { DEVICEID_6300ESB_1, "Intel 6300ESB watchdog timer", 5, 1 },
{ DEVICEID_82801FBR, "Intel 82801FB/FR watchdog timer", 6, 2 },
{ DEVICEID_ICH6M, "Intel ICH6M watchdog timer", 6, 2 },
{ DEVICEID_ICH6W, "Intel ICH6W watchdog timer", 6, 2 },
diff --git a/sys/dev/ichwd/ichwd.h b/sys/dev/ichwd/ichwd.h
index 90fda08b74c1..72d0ca1cd6aa 100644
--- a/sys/dev/ichwd/ichwd.h
+++ b/sys/dev/ichwd/ichwd.h
@@ -151,7 +151,8 @@ struct ichwd_softc {
#define DEVICEID_82801E 0x2450
#define DEVICEID_82801EB 0x24dc
#define DEVICEID_82801EBR 0x24d0
-#define DEVICEID_6300ESB 0x25a1
+#define DEVICEID_6300ESB_1 0x25a1
+#define DEVICEID_6300ESB_2 0x25ab
#define DEVICEID_82801FBR 0x2640
#define DEVICEID_ICH6M 0x2641
#define DEVICEID_ICH6W 0x2642
diff --git a/sys/dev/iicbus/gpio/pcf8574.c b/sys/dev/iicbus/gpio/pcf8574.c
index 86c78ffb15e6..bf60dec67557 100644
--- a/sys/dev/iicbus/gpio/pcf8574.c
+++ b/sys/dev/iicbus/gpio/pcf8574.c
@@ -159,9 +159,7 @@ pcf8574_detach(device_t dev)
sc = device_get_softc(dev);
- if (sc->busdev != NULL)
- gpiobus_detach_bus(sc->busdev);
-
+ gpiobus_detach_bus(dev);
sx_destroy(&sc->lock);
return (0);
}
diff --git a/sys/dev/iicbus/gpio/tca64xx.c b/sys/dev/iicbus/gpio/tca64xx.c
index a973ef22f3fa..ab8fedd3f8fd 100644
--- a/sys/dev/iicbus/gpio/tca64xx.c
+++ b/sys/dev/iicbus/gpio/tca64xx.c
@@ -292,9 +292,7 @@ tca64xx_detach(device_t dev)
sc = device_get_softc(dev);
- if (sc->busdev != NULL)
- gpiobus_detach_bus(sc->busdev);
-
+ gpiobus_detach_bus(dev);
mtx_destroy(&sc->mtx);
return (0);
diff --git a/sys/dev/iicbus/iichid.c b/sys/dev/iicbus/iichid.c
index 3f1d7a0cefba..fdb4816b8bd9 100644
--- a/sys/dev/iicbus/iichid.c
+++ b/sys/dev/iicbus/iichid.c
@@ -861,7 +861,8 @@ iichid_intr_start(device_t dev, device_t child __unused)
sc = device_get_softc(dev);
DPRINTF(sc, "iichid device open\n");
- iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
+ if (!sc->open)
+ iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
return (0);
}
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
index 450fae662dd8..d4d4f328fb43 100644
--- a/sys/dev/irdma/irdma_cm.c
+++ b/sys/dev/irdma/irdma_cm.c
@@ -1316,7 +1316,7 @@ irdma_cm_timer_tick(struct timer_list *t)
struct irdma_timer_entry *send_entry, *close_entry;
struct list_head *list_core_temp;
struct list_head *list_node;
- struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
+ struct irdma_cm_core *cm_core = timer_container_of(cm_core, t, tcp_timer);
struct irdma_sc_vsi *vsi;
u32 settimer = 0;
unsigned long timetosend;
diff --git a/sys/dev/irdma/irdma_utils.c b/sys/dev/irdma/irdma_utils.c
index 5fc37022981f..038f1980082b 100644
--- a/sys/dev/irdma/irdma_utils.c
+++ b/sys/dev/irdma/irdma_utils.c
@@ -876,7 +876,7 @@ irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
static void
irdma_terminate_timeout(struct timer_list *t)
{
- struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
+ struct irdma_qp *iwqp = timer_container_of(iwqp, t, terminate_timer);
struct irdma_sc_qp *qp = &iwqp->sc_qp;
irdma_terminate_done(qp, 1);
@@ -1528,7 +1528,7 @@ static void
irdma_hw_stats_timeout(struct timer_list *t)
{
struct irdma_vsi_pestat *pf_devstat =
- from_timer(pf_devstat, t, stats_timer);
+ timer_container_of(pf_devstat, t, stats_timer);
struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
diff --git a/sys/dev/isci/scil/intel_sata.h b/sys/dev/isci/scil/intel_sata.h
index 4cf4adf03e07..fdad5be9b083 100644
--- a/sys/dev/isci/scil/intel_sata.h
+++ b/sys/dev/isci/scil/intel_sata.h
@@ -61,7 +61,7 @@
*
* @brief This file defines all of the SATA releated constants, enumerations,
* and types. Please note that this file does not necessarily contain
- * an exhaustive list of all contants and commands.
+ * an exhaustive list of all constants and commands.
*/
/**
diff --git a/sys/dev/iwx/if_iwx.c b/sys/dev/iwx/if_iwx.c
index d60ef1874a6c..1fe531d69933 100644
--- a/sys/dev/iwx/if_iwx.c
+++ b/sys/dev/iwx/if_iwx.c
@@ -5673,6 +5673,10 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
if (rinfo == NULL)
return EINVAL;
+ /* Offloaded sequence number assignment */
+ /* Note: Should be done in firmware on all supported devices */
+
+ /* Radiotap */
if (ieee80211_radiotap_active_vap(vap)) {
struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
@@ -5685,6 +5689,7 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
ieee80211_radiotap_tx(vap, m);
}
+ /* Encrypt - CCMP via direct HW path, TKIP/WEP indirected openbsd-style for now */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_get_txkey(ni, m);
if (k == NULL) {
@@ -10467,6 +10472,8 @@ iwx_attach(device_t dev)
IEEE80211_C_BGSCAN /* capable of bg scanning */
;
ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
+ /* Enable seqno offload */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
ic->ic_txstream = 2;
ic->ic_rxstream = 2;
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 959afa79e7da..73c0fd1ab16f 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -45,7 +45,7 @@
/************************************************************************
* Driver version
************************************************************************/
-static const char ixgbe_driver_version[] = "4.0.1-k";
+static const char ixgbe_driver_version[] = "5.0.1-k";
/************************************************************************
* PCI Device ID Table
@@ -144,6 +144,16 @@ static const pci_vendor_info_t ixgbe_vendor_info_array[] =
"Intel(R) X540-T2 (Bypass)"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
"Intel(R) X520 82599 (Bypass)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE,
+ "Intel(R) E610 (Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP,
+ "Intel(R) E610 (SFP)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T,
+ "Intel(R) E610 (2.5 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T,
+ "Intel(R) E610 (10 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII,
+ "Intel(R) E610 (SGMII)"),
/* required last entry */
PVID_END
};
@@ -253,6 +263,10 @@ static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
static void ixgbe_handle_msf(void *);
static void ixgbe_handle_mod(void *);
static void ixgbe_handle_phy(void *);
+static void ixgbe_handle_fw_event(void *);
+
+static int ixgbe_enable_lse(struct ixgbe_softc *sc);
+static int ixgbe_disable_lse(struct ixgbe_softc *sc);
/************************************************************************
* FreeBSD Device Interface Entry Points
@@ -621,6 +635,7 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
table_size = 512;
break;
default:
@@ -902,6 +917,32 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx)
} /* ixgbe_initialize_transmit_units */
+static int
+ixgbe_check_fw_api_version(struct ixgbe_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ device_printf(sc->dev,
+ "The driver for the device stopped because the NVM "
+ "image is newer than expected. You must install the "
+ "most recent version of the network driver.\n");
+ return (EOPNOTSUPP);
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > (IXGBE_FW_API_VER_MINOR + 2)) {
+ device_printf(sc->dev,
+ "The driver for the device detected a newer version of "
+ "the NVM image than expected. Please install the most "
+ "recent version of the network driver.\n");
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - 2) {
+ device_printf(sc->dev,
+ "The driver for the device detected an older version "
+ "of the NVM image than expected. "
+ "Please update the NVM image.\n");
+ }
+ return (0);
+}
+
/************************************************************************
* ixgbe_register
************************************************************************/
@@ -970,6 +1011,9 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
goto err_pci;
}
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_init_aci(hw);
+
if (hw->mac.ops.fw_recovery_mode &&
hw->mac.ops.fw_recovery_mode(hw)) {
device_printf(dev,
@@ -1058,6 +1102,12 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
break;
}
+ /* Check the FW API version */
+ if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
+ error = EIO;
+ goto err_pci;
+ }
+
/* Most of the iflib initialization... */
iflib_set_mac(ctx, hw->mac.addr);
@@ -1111,6 +1161,9 @@ err_pci:
IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
ixgbe_free_pci_resources(ctx);
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_shutdown_aci(hw);
+
return (error);
} /* ixgbe_if_attach_pre */
@@ -1358,6 +1411,10 @@ ixgbe_add_media_types(if_ctx_t ctx)
/* Media types with matching FreeBSD media defines */
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_5000BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
@@ -1459,6 +1516,7 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
}
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
return (true);
return (false);
@@ -1525,6 +1583,15 @@ ixgbe_config_link(if_ctx_t ctx)
IXGBE_LINK_SPEED_5GB_FULL);
}
+ if (hw->mac.type == ixgbe_mac_E610) {
+ hw->phy.ops.init(hw);
+ err = ixgbe_enable_lse(sc);
+ if (err)
+ device_printf(sc->dev,
+ "Failed to enable Link Status Event, "
+ "error: %d", err);
+ }
+
if (hw->mac.ops.setup_link)
err = hw->mac.ops.setup_link(hw, autoneg,
sc->link_up);
@@ -2158,14 +2225,15 @@ get_parent_info:
ixgbe_set_pci_config_data_generic(hw, link);
display:
- device_printf(dev, "PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
+ device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
+ ((hw->bus.speed == ixgbe_bus_speed_16000) ? "16.0GT/s" :
+ (hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
"Unknown"),
- ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
+ ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
"Unknown"));
if (bus_info_valid) {
@@ -2372,14 +2440,17 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
layer = sc->phy_layer;
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
- layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
+ if (layer & IXGBE_PHYSICAL_LAYERS_BASE_T_ALL)
switch (sc->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
+ break;
case IXGBE_LINK_SPEED_1GB_FULL:
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
break;
@@ -2390,15 +2461,6 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_10_T | IFM_FDX;
break;
}
- if (hw->mac.type == ixgbe_mac_X550)
- switch (sc->link_speed) {
- case IXGBE_LINK_SPEED_5GB_FULL:
- ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
- break;
- case IXGBE_LINK_SPEED_2_5GB_FULL:
- ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
- break;
- }
if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
switch (sc->link_speed) {
@@ -2676,6 +2738,11 @@ ixgbe_msix_link(void *arg)
sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
}
+ if (eicr & IXGBE_EICR_FW_EVENT) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FW_EVENT);
+ sc->task_requests |= IXGBE_REQUEST_TASK_FWEVENT;
+ }
+
if (sc->hw.mac.type != ixgbe_mac_82598EB) {
if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
(eicr & IXGBE_EICR_FLOW_DIR)) {
@@ -2734,11 +2801,16 @@ ixgbe_msix_link(void *arg)
/* Check for VF message */
if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
- (eicr & IXGBE_EICR_MAILBOX))
+ (eicr & IXGBE_EICR_MAILBOX)) {
sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
+ }
}
- if (ixgbe_is_sfp(hw)) {
+ /*
+ * On E610, the firmware handles PHY configuration, so
+ * there is no need to perform any SFP-specific tasks.
+ */
+ if (hw->mac.type != ixgbe_mac_E610 && ixgbe_is_sfp(hw)) {
/* Pluggable optics-related interrupt */
if (hw->mac.type >= ixgbe_mac_X540)
eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
@@ -2985,7 +3057,13 @@ ixgbe_if_detach(if_ctx_t ctx)
callout_drain(&sc->fw_mode_timer);
+ if (sc->hw.mac.type == ixgbe_mac_E610) {
+ ixgbe_disable_lse(sc);
+ ixgbe_shutdown_aci(&sc->hw);
+ }
+
ixgbe_free_pci_resources(ctx);
+
free(sc->mta, M_IXGBE);
return (0);
@@ -3404,6 +3482,7 @@ ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (type == -1) { /* MISC IVAR */
index = (entry & 1) * 8;
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
@@ -3826,6 +3905,96 @@ ixgbe_handle_phy(void *context)
} /* ixgbe_handle_phy */
/************************************************************************
+ * ixgbe_enable_lse - enable link status events
+ *
+ * Sets mask and enables link status events
+ ************************************************************************/
+s32 ixgbe_enable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
+
+ error = ixgbe_configure_lse(&sc->hw, TRUE, mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = mask;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_enable_lse */
+
+/************************************************************************
+ * ixgbe_disable_lse - disable link status events
+ ************************************************************************/
+s32 ixgbe_disable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ error = ixgbe_configure_lse(&sc->hw, false, sc->lse_mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = 0;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_disable_lse */
+
+/************************************************************************
+ * ixgbe_handle_fw_event - Tasklet for MSI-X Link Status Event interrupts
+ ************************************************************************/
+static void
+ixgbe_handle_fw_event(void *context)
+{
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_aci_event event;
+ bool pending = false;
+ s32 error;
+
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = malloc(event.buf_len, M_IXGBE, M_ZERO | M_NOWAIT);
+ if (!event.msg_buf) {
+ device_printf(sc->dev, "Can not allocate buffer for "
+ "event message\n");
+ return;
+ }
+
+ do {
+ error = ixgbe_aci_get_event(hw, &event, &pending);
+ if (error) {
+ device_printf(sc->dev, "Error getting event from "
+ "FW:%d\n", error);
+ break;
+ }
+
+ switch (le16toh(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
+ break;
+
+ case ixgbe_aci_opc_temp_tca_event:
+ if (hw->adapter_stopped == FALSE)
+ ixgbe_if_stop(ctx);
+ device_printf(sc->dev,
+ "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ device_printf(sc->dev, "System shutdown required!\n");
+ break;
+
+ default:
+ device_printf(sc->dev,
+ "Unknown FW event captured, opcode=0x%04X\n",
+ le16toh(event.desc.opcode));
+ break;
+ }
+ } while (pending);
+
+ free(event.msg_buf, M_IXGBE);
+} /* ixgbe_handle_fw_event */
+
+/************************************************************************
* ixgbe_if_stop - Stop the hardware
*
* Disables all traffic on the adapter by issuing a
@@ -3899,6 +4068,8 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
}
/* Handle task requests from msix_link() */
+ if (sc->task_requests & IXGBE_REQUEST_TASK_FWEVENT)
+ ixgbe_handle_fw_event(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
ixgbe_handle_mod(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
@@ -3986,6 +4157,9 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
break;
+ case ixgbe_mac_E610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ break;
default:
break;
}
@@ -4008,6 +4182,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
/* Don't autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
mask &= ~IXGBE_EIMS_LSC;
+ mask &= ~IXGBE_EIMS_FW_EVENT;
if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
mask &= ~IXGBE_EIMS_MAILBOX;
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
@@ -4026,7 +4201,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
} /* ixgbe_if_enable_intr */
/************************************************************************
- * ixgbe_disable_intr
+ * ixgbe_if_disable_intr
************************************************************************/
static void
ixgbe_if_disable_intr(if_ctx_t ctx)
@@ -4176,8 +4351,9 @@ ixgbe_intr(void *arg)
/* External PHY interrupt */
if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
- (eicr & IXGBE_EICR_GPI_SDP0_X540))
+ (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
+ }
return (FILTER_SCHEDULE_THREAD);
} /* ixgbe_intr */
@@ -4219,7 +4395,7 @@ ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
int error, fc;
sc = (struct ixgbe_softc *)arg1;
- fc = sc->hw.fc.current_mode;
+ fc = sc->hw.fc.requested_mode;
error = sysctl_handle_int(oidp, &fc, 0, req);
if ((error) || (req->newptr == NULL))
@@ -4248,12 +4424,10 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
case ixgbe_fc_rx_pause:
case ixgbe_fc_tx_pause:
case ixgbe_fc_full:
- sc->hw.fc.requested_mode = fc;
if (sc->num_rx_queues > 1)
ixgbe_disable_rx_drop(sc);
break;
case ixgbe_fc_none:
- sc->hw.fc.requested_mode = ixgbe_fc_none;
if (sc->num_rx_queues > 1)
ixgbe_enable_rx_drop(sc);
break;
@@ -4261,6 +4435,8 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
return (EINVAL);
}
+ sc->hw.fc.requested_mode = fc;
+
/* Don't autoneg if forcing a value */
sc->hw.fc.disable_fc_autoneg = true;
ixgbe_fc_enable(&sc->hw);
@@ -4978,6 +5154,9 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
break;
+ case ixgbe_mac_E610:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
+ break;
default:
break;
}
diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c
index 54b2c8c1dd68..8a1c1aae041d 100644
--- a/sys/dev/ixgbe/if_ixv.c
+++ b/sys/dev/ixgbe/if_ixv.c
@@ -68,6 +68,8 @@ static const pci_vendor_info_t ixv_vendor_info_array[] =
"Intel(R) X552 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF,
"Intel(R) X553 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_VF,
+ "Intel(R) E610 Virtual Function"),
/* required last entry */
PVID_END
};
@@ -1020,6 +1022,9 @@ ixv_identify_hardware(if_ctx_t ctx)
case IXGBE_DEV_ID_X550EM_A_VF:
hw->mac.type = ixgbe_mac_X550EM_a_vf;
break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ break;
default:
device_printf(dev, "unknown mac type\n");
hw->mac.type = ixgbe_mac_unknown;
@@ -1955,6 +1960,7 @@ ixv_init_device_features(struct ixgbe_softc *sc)
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
sc->feat_cap |= IXGBE_FEATURE_RSS;
break;
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index 341d4ebfcebc..844064bf8543 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -86,6 +86,7 @@
#include "ixgbe_phy.h"
#include "ixgbe_vf.h"
#include "ixgbe_features.h"
+#include "ixgbe_e610.h"
/* Tunables */
@@ -195,6 +196,15 @@
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
+/* All BASE-T Physical layers */
+#define IXGBE_PHYSICAL_LAYERS_BASE_T_ALL \
+ (IXGBE_PHYSICAL_LAYER_10GBASE_T |\
+ IXGBE_PHYSICAL_LAYER_5000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_2500BASE_T |\
+ IXGBE_PHYSICAL_LAYER_1000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_100BASE_TX |\
+ IXGBE_PHYSICAL_LAYER_10BASE_T)
+
#define IXGBE_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | \
IFCAP_LRO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | \
@@ -464,6 +474,7 @@ struct ixgbe_softc {
/* Feature capable/enabled flags. See ixgbe_features.h */
u32 feat_cap;
u32 feat_en;
+ u16 lse_mask;
};
/* Precision Time Sync (IEEE 1588) defines */
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index 4c50f10ed92e..f11f52a646e4 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -112,11 +112,15 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
case ixgbe_mac_X550EM_a:
status = ixgbe_init_ops_X550EM_a(hw);
break;
+ case ixgbe_mac_E610:
+ status = ixgbe_init_ops_E610(hw);
+ break;
case ixgbe_mac_82599_vf:
case ixgbe_mac_X540_vf:
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
status = ixgbe_init_ops_vf(hw);
break;
default:
@@ -240,6 +244,18 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
hw->mac.type = ixgbe_mac_X550EM_a_vf;
hw->mvals = ixgbe_mvals_X550EM_a;
break;
+ case IXGBE_DEV_ID_E610_BACKPLANE:
+ case IXGBE_DEV_ID_E610_SFP:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
+ case IXGBE_DEV_ID_E610_SGMII:
+ hw->mac.type = ixgbe_mac_E610;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index b81510dacb95..2b4cec8d110e 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -48,6 +48,7 @@ extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index df7ab90e72ab..bff022585a03 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -178,6 +178,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_SFP_N:
case IXGBE_DEV_ID_X550EM_A_QSFP:
case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -210,6 +211,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -616,7 +619,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550) {
if (hw->phy.id == 0)
ixgbe_identify_phy(hw);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
@@ -1037,6 +1041,9 @@ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
case IXGBE_PCI_LINK_SPEED_8000:
hw->bus.speed = ixgbe_bus_speed_8000;
break;
+ case IXGBE_PCI_LINK_SPEED_16000:
+ hw->bus.speed = ixgbe_bus_speed_16000;
+ break;
default:
hw->bus.speed = ixgbe_bus_speed_unknown;
break;
@@ -1059,7 +1066,9 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_bus_info_generic");
/* Get the negotiated link width and speed from PCI config space */
- link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+ link_status = IXGBE_READ_PCIE_WORD(hw, hw->mac.type == ixgbe_mac_E610 ?
+ IXGBE_PCI_LINK_STATUS_E610 :
+ IXGBE_PCI_LINK_STATUS);
ixgbe_set_pci_config_data_generic(hw, link_status);
@@ -1878,7 +1887,6 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_eeprom_semaphore");
-
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
@@ -3363,7 +3371,6 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
-
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
@@ -3692,6 +3699,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_E610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return msix_count;
}
@@ -4139,7 +4150,6 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
-
/**
* ixgbe_toggle_txdctl_generic - Toggle VF's queues
* @hw: pointer to hardware structure
@@ -4323,7 +4333,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_E610) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -5494,6 +5505,7 @@ void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
/* version of eeprom section */
if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
word = NVM_VER_INVALID;
@@ -5512,6 +5524,7 @@ void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
/* intel phy firmware version */
if (ixgbe_read_eeprom(hw, NVM_EEP_PHY_OFF_X540, &word))
word = NVM_VER_INVALID;
diff --git a/sys/dev/ixgbe/ixgbe_e610.c b/sys/dev/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..95c6dca416c6
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.c
@@ -0,0 +1,5567 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_api.h"
+
+/**
+ * ixgbe_init_aci - initialization routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the ACI lock.
+ */
+void ixgbe_init_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_init_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Destroy the ACI lock.
+ */
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_destroy_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
+ * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
+ * * - IXGBE_ERR_PARAM - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
+ * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static s32
+ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u32 hicr = 0, tmp_buf_size = 0, i = 0;
+ u32 *raw_desc = (u32 *)desc;
+ s32 status = IXGBE_SUCCESS;
+ bool valid_buf = false;
+ u32 *tmp_buf = NULL;
+ u16 opcode = 0;
+
+ do {
+ hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if (!(hicr & PF_HICR_EN)) {
+ status = IXGBE_ERR_ACI_DISABLED;
+ break;
+ }
+ if (hicr & PF_HICR_C) {
+ hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ status = IXGBE_ERR_ACI_BUSY;
+ break;
+ }
+ opcode = desc->opcode;
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+
+ if (buf)
+ desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
+
+ /* Check if buf and buf_size are proper params */
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
+ if ((buf && buf_size == 0) ||
+ (buf == NULL && buf_size)) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf == true) {
+ if (buf_size % 4 == 0)
+ tmp_buf_size = buf_size;
+ else
+ tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
+
+ tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
+ if (!tmp_buf)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ /* tmp_buf will be firstly filled with 0xFF and after
+ * that the content of buf will be written into it.
+ * This approach lets us use valid buf_size and
+ * prevents us from reading past buf area
+ * when buf_size mod 4 not equal to 0.
+ */
+ memset(tmp_buf, 0xFF, tmp_buf_size);
+ memcpy(tmp_buf, buf, buf_size);
+
+ if (tmp_buf_size > IXGBE_ACI_LG_BUF)
+ desc->flags |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
+
+ desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
+
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ IXGBE_WRITE_REG(hw, PF_HIBA(i),
+ IXGBE_LE32_TO_CPU(tmp_buf[i]));
+ }
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, PF_HIDA(i),
+ IXGBE_LE32_TO_CPU(raw_desc[i]));
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, PF_HICR, hicr);
+
+ /* Wait for sync Admin Command response */
+ for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+
+ /* Wait for async Admin Command response */
+ if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
+ i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+ }
+
+ /* Read sync Admin Command response */
+ if ((hicr & PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & PF_HICR_C) {
+ status = IXGBE_ERR_ACI_TIMEOUT;
+ break;
+ } else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != opcode &&
+ opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ if (desc->retval != IXGBE_ACI_RC_OK) {
+ hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf && (desc->flags &
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
+ tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
+ }
+ memcpy(buf, tmp_buf, buf_size);
+ }
+ } while (0);
+
+ if (tmp_buf)
+ ixgbe_free(hw, tmp_buf);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ struct ixgbe_aci_desc desc_cpy;
+ enum ixgbe_aci_err last_status;
+ bool is_cmd_for_retry;
+ u8 *buf_cpy = NULL;
+ s32 status;
+ u16 opcode;
+ u8 idx = 0;
+
+ opcode = IXGBE_LE16_TO_CPU(desc->opcode);
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ memset(&desc_cpy, 0, sizeof(desc_cpy));
+
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
+ if (!buf_cpy)
+ return IXGBE_ERR_OUT_OF_MEM;
+ }
+ memcpy(&desc_cpy, desc, sizeof(desc_cpy));
+ }
+
+ do {
+ ixgbe_acquire_lock(&hw->aci.lock);
+ status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ ixgbe_release_lock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
+ (last_status != IXGBE_ACI_RC_EBUSY && status != IXGBE_ERR_ACI_ERROR))
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ memcpy(desc, &desc_cpy, sizeof(desc_cpy));
+
+ msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
+
+ if (buf_cpy)
+ ixgbe_free(hw, buf_cpy);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask;
+ u32 fwsts;
+
+ ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+
+ /* Check state of Event Pending (EP) bit */
+ fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_acquire_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (status)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending) {
+ *pending = ixgbe_aci_check_event_pending(hw);
+ }
+
+aci_get_event_exit:
+ ixgbe_release_lock(&hw->aci.lock);
+
+ return status;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+ /* zero out the desc */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = IXGBE_CPU_TO_LE16(opcode);
+ desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_get_fw_ver - get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_ver *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (!status) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_driver_ver - send the driver version to firmware
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ *
+ * Send the driver version to the firmware
+ * using the ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * Returns IXGBE_ERR_PARAM, if dv is NULL.
+ */
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
+{
+ struct ixgbe_aci_cmd_driver_ver *cmd;
+ struct ixgbe_aci_desc desc;
+ u16 len;
+
+ cmd = &desc.params.driver_ver;
+
+ if (!dv)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->major_ver = dv->major_ver;
+ cmd->minor_ver = dv->minor_ver;
+ cmd->build_ver = dv->build_ver;
+ cmd->subbuild_ver = dv->subbuild_ver;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
+ len++;
+
+ return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u8 sdp_number,
+ u32 *timeout)
+{
+ struct ixgbe_aci_cmd_req_res *cmd_resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
+ cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+ cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
+ *timeout = 0;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ *timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ u8 sdp_number)
+{
+ struct ixgbe_aci_cmd_req_res *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout = 0;
+ s32 status;
+
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ goto ixgbe_acquire_res_exit;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (status && retry_timeout && res_timeout) {
+ msec_delay(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ /* lock free, but no work to do */
+ break;
+
+ if (!status)
+ /* lock acquired */
+ break;
+ }
+
+ixgbe_acquire_res_exit:
+ return status;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+ u32 total_delay = 0;
+ s32 status;
+
+ status = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
+ (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
+ msec_delay(1);
+ status = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_common_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
+ struct ixgbe_aci_cmd_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
+ u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
+ u32 number = IXGBE_LE32_TO_CPU(elem->number);
+ u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
+ bool found = true;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case IXGBE_ACI_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case IXGBE_ACI_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_NVM_VER:
+ break;
+ case IXGBE_ACI_CAPS_NVM_MGMT:
+ caps->sec_rev_disabled =
+ (number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
+ true : false;
+ caps->update_disabled =
+ (number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
+ true : false;
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ caps->netlist_auth =
+ (number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
+ true : false;
+ break;
+ case IXGBE_ACI_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
+ IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ case IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE:
+ caps->orom_recovery_update = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
+ caps->next_cluster_id_support = (number == 1);
+ DEBUGOUT2("%s: next_cluster_id_support = %d\n",
+ prefix, caps->next_cluster_id_support);
+ break;
+ default:
+ /* Not one of the recognized common capabilities */
+ found = false;
+ }
+
+ return found;
+}
+
+/**
+ * ixgbe_hweight8 - count set bits among the 8 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 8 lowest bits in the provided value.
+ */
+static u8 ixgbe_hweight8(u32 w)
+{
+ u8 hweight = 0, i;
+
+ for (i = 0; i < 8; i++)
+ if (w & (1 << i))
+ hweight++;
+
+ return hweight;
+}
+
+/**
+ * ixgbe_hweight32 - count set bits among the 32 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 32 lowest bits in the
+ * provided value.
+ */
+static u8 ixgbe_hweight32(u32 w)
+{
+ u32 bitMask = 0x1, i;
+ u8 bitCnt = 0;
+
+ for (i = 0; i < 32; i++)
+ {
+ if (w & bitMask)
+ bitCnt++;
+
+ bitMask = bitMask << 0x1;
+ }
+
+ return bitCnt;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_funcs = ixgbe_hweight32(number);
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vfs_exposed = number;
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vsi_allocd_to_host = number;
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_flow_director_fltr = number;
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ bool found;
+
+ found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
+ &cap_resp[i], "dev caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+ u8 funcs;
+
+#define IXGBE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return max / funcs;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ ixgbe_parse_common_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
+ */
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct ixgbe_aci_cmd_list_caps *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = IXGBE_LE32_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 status, cap_count = 0;
+ u8 *cbuf = NULL;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!status)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count = 0;
+ u8 *cbuf = NULL;
+ s32 status;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if(!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!status)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (status)
+ return status;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct ixgbe_aci_desc desc;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ cmd = &desc.params.disable_rxen;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = (u8)hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
+ status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+
+ if (status == IXGBE_SUCCESS &&
+ report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
+ hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities match PHY
+ * configuration
+ *
+ * Return: true if PHY capabilities match PHY configuration.
+ */
+bool
+ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
+{
+ u8 caps_mask, cfg_mask;
+
+ if (!phy_caps || !phy_cfg)
+ return false;
+
+ /* These bits are not common between capabilities and configuration.
+ * Do not use them to determine equality.
+ */
+ caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
+ IXGBE_ACI_PHY_EN_MOD_QUAL);
+ cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
+ ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+ return false;
+
+ return true;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+ }
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+
+ if (!status)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_topo *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ (IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT <<
+ IXGBE_ACI_LINK_TOPO_NODE_CTX_S);
+
+ /* set node type */
+ cmd->addr.topo_params.node_type_ctx |=
+ (IXGBE_ACI_LINK_TOPO_NODE_TYPE_M &
+ IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li = &hw->link.link_info;
+
+ status = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (status)
+ return status;
+
+ if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (status == IXGBE_SUCCESS)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ ixgbe_free(hw, pcaps);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ if (hw->link.get_link_info) {
+ status = ixgbe_update_link_info(hw);
+ if (status) {
+ return status;
+ }
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct ixgbe_aci_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* save off old link status information */
+ *li_old = *li;
+
+ /* update current link status information */
+ li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
+ li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
+ li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* update fc info */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* save link status information */
+ if (link)
+ *link = *li;
+
+ /* flag cleared so calling functions don't call AQ again */
+ hw->link.get_link_info = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: bool value deciding if lse should be enabled nor disabled
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ s32 rc;
+
+ rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (rc) {
+ return rc;
+ }
+
+ /* Enabling link status events generation by fw */
+ rc = ixgbe_aci_get_link_info(hw, activate, NULL);
+ if (rc) {
+ return rc;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ if (node_handle)
+ *node_handle =
+ IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_find_netlist_node - find a node handle
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Find and return the node handle for a given node type and part number in the
+ * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
+ * otherwise. If @node_handle provided, it would be set to found node handle.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_cmd_get_link_topo cmd;
+ u8 rec_node_part_number;
+ u16 rec_node_handle;
+ s32 status;
+ u8 idx;
+
+ for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.addr.topo_params.node_type_ctx =
+ (node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
+ cmd.addr.topo_params.index = idx;
+
+ status = ixgbe_aci_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ &rec_node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number) {
+ if (node_handle)
+ *node_handle = rec_node_handle;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ return IXGBE_ERR_NOT_SUPPORTED;
+}
+
+/**
+ * ixgbe_aci_read_i2c - read I2C register value
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ * bits [6:5] data offset size,
+ * bit [4] - I2C address type, bits [3:0] - data size
+ * to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ *
+ * Read the value of the I2C pin register using ACI command (0x06E2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 data_size;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ if (!data)
+ return IXGBE_ERR_PARAM;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status) {
+ struct ixgbe_aci_cmd_read_i2c_resp *resp;
+ u8 i;
+
+ resp = &desc.params.read_i2c_resp;
+ for (i = 0; i < data_size; i++) {
+ *data = resp->i2c_data[i];
+ data++;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_i2c - write a value to I2C register
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
+ * to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ *
+ * Write a value to the I2C pin register using ACI command (0x06E3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 i, data_size;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ /* data_size limited to 4 */
+ if (data_size > 4)
+ return IXGBE_ERR_PARAM;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ for (i = 0; i < data_size; i++) {
+ cmd->i2c_data[i] = *data;
+ data++;
+ }
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_gpio - set GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ *
+ * Set the GPIO pin state that is a part of the topology
+ * using ACI command (0x06EC).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+ cmd->gpio_val = value ? 1 : 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_gpio - get GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ *
+ * Get the value of a GPIO signal which is part of the topology
+ * using ACI command (0x06ED).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ *value = !!cmd->gpio_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ *
+ * Read/write SFF EEPROM using ACI command (0x06EE).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write)
+{
+ struct ixgbe_aci_cmd_sff_eeprom *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || (mem_addr & 0xff00))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
+ IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
+ ((page_bank_ctrl <<
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
+ cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
+ cmd->module_page = page;
+ if (write)
+ cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ return status;
+}
+
+/**
+ * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ *
+ * Program Topology Device NVM using ACI command (0x06F2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params)
+{
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.prog_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
+
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ * @start_address: byte offset in the topology device NVM
+ * @data: pointer to data buffer
+ * @data_size: number of bytes to be read from the topology device NVM
+ * Read Topology Device NVM (0x06F3)
+ *
+ * Read Topology of Device NVM using ACI command (0x06F3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size)
+{
+ struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || data_size == 0 ||
+ data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
+ return IXGBE_ERR_PARAM;
+
+ cmd = &desc.params.read_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
+
+ desc.datalen = IXGBE_CPU_TO_LE16(data_size);
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+ cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ memcpy(data, cmd->data_read, data_size);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return IXGBE_SUCCESS;
+
+ return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+ __le16 len;
+
+ /* read a length value from SR, so module_typeid is equal to 0 */
+ /* calculate offset where module size is placed from bytes to words */
+ /* set last command and read from SR values to true */
+ status = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_read_nvm_cfg - read an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature ID
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: pointer to count of elements read by FW
+ *
+ * Reads a single or multiple feature/field ID and data using ACI command
+ * (0x0704).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_read);
+
+ cmd->cmd_flags = cmd_flags;
+ cmd->id = IXGBE_CPU_TO_LE16(field_id);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+ if (!status && elem_count)
+ *elem_count = IXGBE_LE16_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_nvm_cfg - write an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: count of elements to be written
+ *
+ * Writes a single or multiple feature/field ID and data using ACI command
+ * (0x0705).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_write);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ cmd->count = IXGBE_CPU_TO_LE16(elem_count);
+ cmd->cmd_flags = cmd_flags;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
+ IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid Shadow Ram checksum");
+ status = IXGBE_ERR_NVM_CHECKSUM;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_recalculate_checksum - recalculate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Recalculate NVM PFA checksum using ACI command (0x0706).
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = LO_BYTE(cmd_flags);
+ cmd->offset_high = HI_BYTE(cmd_flags);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return status;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ s32 status;
+ u32 start;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
+ * NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ s32 status;
+
+ status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (status)
+ return status;
+
+ hdr_len = ROUND_UP(hdr_len, 32);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: structure to store NVM and OROM minsrev values
+ *
+ * Read the Minimum Security Revision TLV and extract
+ * the revision values from the flash image
+ * into a readable structure for processing.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+ u16 valid;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
+ 0, sizeof(data), &data,
+ true, false);
+
+ ixgbe_release_nvm(hw);
+
+ if (status)
+ return status;
+
+ valid = IXGBE_LE16_TO_CPU(data.validity);
+
+ /* Extract NVM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
+
+ minsrevs->nvm = minsrev_h << 16 | minsrev_l;
+ minsrevs->nvm_valid = true;
+ }
+
+ /* Extract the OROM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
+
+ minsrevs->orom = minsrev_h << 16 | minsrev_l;
+ minsrevs->orom_valid = true;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_update_nvm_minsrevs - Update minsrevs TLV data in flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: minimum security revision information
+ *
+ * Update the NVM or Option ROM minimum security revision fields in the PFA
+ * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid
+ * fields to determine what update is being requested. If the valid bit is not
+ * set for that module, then the associated minsrev will be left as is.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+
+ if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ /* Get current data */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, true, false);
+ if (status)
+ goto exit_release_res;
+
+ if (minsrevs->nvm_valid) {
+ data.nvm_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->nvm & 0xFFFF);
+ data.nvm_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->nvm >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_NVM_VALID);
+ }
+
+ if (minsrevs->orom_valid) {
+ data.orom_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->orom & 0xFFFF);
+ data.orom_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->orom >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_OROM_VALID);
+ }
+
+ /* Update flash data */
+ status = ixgbe_aci_update_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, false,
+ IXGBE_ACI_NVM_SPECIAL_UPDATE);
+ if (status)
+ goto exit_release_res;
+
+ /* Dump the Shadow RAM to the flash */
+ status = ixgbe_nvm_write_activate(hw, 0, NULL);
+
+exit_release_res:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (status)
+ return status;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ s32 status;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank,
+ E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (status) {
+ return status;
+ }
+
+ nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
+ nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (status) {
+ return status;
+ }
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (status) {
+ return status;
+ }
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ s32 status;
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (status)
+ return status;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (status)
+ return status;
+
+ /* sanity check that we have at least enough words to store the
+ * netlist ID block
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (status)
+ return status;
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = (u16 *)ixgbe_calloc(hw, IXGBE_NETLIST_ID_BLK_SIZE,
+ sizeof(*id_blk));
+ if (!id_blk)
+ return IXGBE_ERR_NO_SPACE;
+
+ /* Read out the entire Netlist ID Block at once. */
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
+ (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE * sizeof(u16));
+ if (status)
+ goto exit_error;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = IXGBE_LE16_TO_CPU(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+exit_error:
+ ixgbe_free(hw, id_blk);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
+ else
+ *pointer = value * 2;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * 4 * 1024;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_discover_flash_size - Discover the available flash size.
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (status == IXGBE_ERR_ACI_ERROR &&
+ hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+ status = IXGBE_SUCCESS;
+ max_size = offset;
+ } else if (!status) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
+ if (status) {
+ return status;
+ }
+
+ /* Check that the control word indicates validity */
+ if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
+ IXGBE_SR_CTRL_WORD_VALID) {
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+ if (status) {
+ return status;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_nvm - initializes NVM setting
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate NVM settings such as Shadow RAM size,
+ * max_timeout, and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat, status;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
+ flash->blank_nvm_mode = false;
+ } else {
+ /* Blank programming mode */
+ flash->blank_nvm_mode = true;
+ return IXGBE_ERR_NVM_BLANK_MODE;
+ }
+
+ status = ixgbe_discover_flash_size(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_determine_active_flash_banks(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (status) {
+ return status;
+ }
+
+ /* read the netlist version information */
+ status = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_operate - Clear the user data
+ * @hw: pointer to the HW struct
+ *
+ * Clear user data from NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u8 values;
+
+ u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
+ IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
+
+ status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
+ if (status)
+ return status;
+ if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
+ return IXGBE_ERR_ACI_ERROR;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_nvm - Sanitize NVM
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flag to the ACI command
+ * @values: values returned from the command
+ *
+ * Sanitize NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm_sanitization *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm_sanitization;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
+ cmd->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (values)
+ *values = cmd->values;
+
+ return status;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (status)
+ return status;
+
+ *data = IXGBE_LE16_TO_CPU(data_local);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
+ * taken before reading the buffer and later released.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2, i;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+
+ *words = bytes / 2;
+
+ for (i = 0; i < *words; i++)
+ data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns a status code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ s32 status;
+
+ *length = 0;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u))) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = MIN_T(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (status)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return status;
+}
+
+/**
+ * ixgbe_check_sr_access_params - verify params for Shadow RAM R/W operations.
+ * @hw: pointer to the HW structure
+ * @offset: offset in words from module start
+ * @words: number of words to access
+ *
+ * Check if all the parameters are valid
+ * before performing any Shadow RAM read/write operations.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_PARAM - NVM error: offset beyond SR limit or
+ * NVM error: tried to access more words then the set limit or
+ * NVM error: cannot spread over two sectors.
+ */
+static s32 ixgbe_check_sr_access_params(struct ixgbe_hw *hw, u32 offset,
+ u16 words)
+{
+ if ((offset + words) > hw->eeprom.word_size) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (words > IXGBE_SR_SECTOR_SIZE_IN_WORDS) {
+ /* We can access only up to 4KB (one sector),
+ * in one Admin Command write
+ */
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (((offset + (words - 1)) / IXGBE_SR_SECTOR_SIZE_IN_WORDS) !=
+ (offset / IXGBE_SR_SECTOR_SIZE_IN_WORDS)) {
+ /* A single access cannot spread over two sectors */
+ return IXGBE_ERR_PARAM;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_sr_word_aci - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data)
+{
+ __le16 data_local = IXGBE_CPU_TO_LE16(*data);
+ s32 status;
+
+ status = ixgbe_check_sr_access_params(hw, offset, 1);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD, &data_local,
+ false, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_sr_buf_aci - Writes Shadow RAM buf
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words,
+ const u16 *data)
+{
+ __le16 *data_local;
+ s32 status;
+ void *vmem;
+ u32 i;
+
+ vmem = ixgbe_calloc(hw, words, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data_local = (__le16 *)vmem;
+
+ for (i = 0; i < words; i++)
+ data_local[i] = IXGBE_CPU_TO_LE16(data[i]);
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ status = ixgbe_check_sr_access_params(hw, offset, words);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD * words,
+ data_local, false, 0);
+
+ ixgbe_free(hw, vmem);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write - write to alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be written
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be written
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure using ACI command (0x0900).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+ cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
+ cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_read - read from alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure using ACI command (0x0902).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
+ * is read.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ if (!reg_val0)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (status == IXGBE_SUCCESS) {
+ *reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
+
+ if (reg_val1)
+ *reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write_done - check if writing to alternate structure
+ * is done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed)
+{
+ struct ixgbe_aci_cmd_done_alt_write *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.done_alt_write;
+
+ if (!reset_needed)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
+ cmd->flags = bios_mode;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status)
+ *reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
+ IXGBE_ACI_RESP_RESET_NEEDED) != 0;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_clear - clear alternate structure
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_clear_port_alt_write);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_internal_data - get internal FW/HW data
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table ID within cluster
+ * @start: index of line in the block to read
+ * @buf: dump buffer
+ * @buf_size: dump buffer size
+ * @ret_buf_size: return buffer size (returned by FW)
+ * @ret_next_cluster: next cluster to read (returned by FW)
+ * @ret_next_table: next block to read (returned by FW)
+ * @ret_next_index: next index to read (returned by FW)
+ *
+ * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index)
+{
+ struct ixgbe_aci_cmd_debug_dump_internals *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.debug_dump;
+
+ if (buf_size == 0 || !buf)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_debug_dump_internals);
+
+ cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
+ cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
+ cmd->idx = IXGBE_CPU_TO_LE32(start);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (!status) {
+ if (ret_buf_size)
+ *ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
+ if (ret_next_cluster)
+ *ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
+ if (ret_next_table)
+ *ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
+ if (ret_next_index)
+ *ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
+ * @cmd: NVM access command structure
+ *
+ * Validates that an NVM access structure is request to read or write a valid
+ * register offset. First validates that the module and flags are correct, and
+ * then ensures that the register offset is one of the accepted registers.
+ *
+ * Return: 0 if the register access is valid, out of range error code otherwise.
+ */
+static s32
+ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
+{
+ u16 i;
+
+ switch (cmd->offset) {
+ case GL_HICR:
+ case GL_HICR_EN: /* Note, this register is read only */
+ case GL_FWSTS:
+ case GL_MNG_FWSM:
+ case GLNVM_GENS:
+ case GLNVM_FLA:
+ case GL_FWRESETCNT:
+ return 0;
+ default:
+ break;
+ }
+
+ for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIDA(i))
+ return 0;
+
+ for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIBA(i))
+ return 0;
+
+ /* All other register offsets are not valid */
+ return IXGBE_ERR_OUT_OF_RANGE;
+}
+
+/**
+ * ixgbe_nvm_access_read - Handle an NVM read request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: storage for the register value read
+ *
+ * Process an NVM access request to read a register.
+ *
+ * Return: 0 if the register read is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Always initialize the output data, even on failure */
+ memset(&data->regval, 0, cmd->data_size);
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
+
+ /* Read the register and store the contents in the data field */
+ data->regval = IXGBE_READ_REG(hw, cmd->offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_nvm_access_write - Handle an NVM write request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: NVM access data to write
+ *
+ * Process an NVM access request to write a register.
+ *
+ * Return: 0 if the register write is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ /* Reject requests to write to read-only registers */
+ switch (cmd->offset) {
+ case GL_HICR_EN:
+ return IXGBE_ERR_OUT_OF_RANGE;
+ default:
+ break;
+ }
+
+ DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
+ cmd->offset, data->regval);
+
+ /* Write the data field to the specified register */
+ IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_nvm_access - Handle an NVM access request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command info
+ * @data: pointer to read or return data
+ *
+ * Process an NVM access request. Read the command structure information and
+ * determine if it is valid. If not, report an error indicating the command
+ * was invalid.
+ *
+ * For valid commands, perform the necessary function, copying the data into
+ * the provided data buffer.
+ *
+ * Return: 0 if the nvm access request is valid and successful,
+ * error code otherwise.
+ */
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ switch (cmd->command) {
+ case IXGBE_NVM_CMD_READ:
+ return ixgbe_nvm_access_read(hw, cmd, data);
+ case IXGBE_NVM_CMD_WRITE:
+ return ixgbe_nvm_access_write(hw, cmd, data);
+ default:
+ return IXGBE_ERR_PARAM;
+ }
+}
+
+/**
+ * ixgbe_aci_set_health_status_config - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF using ACI command (0xFF20). The supported event types are: PF-specific,
+ * all PFs, and global.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source)
+{
+ struct ixgbe_aci_cmd_set_health_status_config *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_health_status_config;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_set_health_status_config);
+
+ cmd->event_source = event_source;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for E610.
+ * Does not touch the hardware.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ ret_val = ixgbe_init_ops_X550(hw);
+
+ /* MAC */
+ mac->ops.reset_hw = ixgbe_reset_hw_E610;
+ mac->ops.start_hw = ixgbe_start_hw_E610;
+ mac->ops.get_media_type = ixgbe_get_media_type_E610;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_E610;
+ mac->ops.get_san_mac_addr = NULL;
+ mac->ops.set_san_mac_addr = NULL;
+ mac->ops.get_wwn_prefix = NULL;
+ mac->ops.setup_link = ixgbe_setup_link_E610;
+ mac->ops.check_link = ixgbe_check_link_E610;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
+ mac->ops.setup_fc = ixgbe_setup_fc_E610;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
+ mac->ops.disable_rx = ixgbe_disable_rx_E610;
+ mac->ops.setup_eee = ixgbe_setup_eee_E610;
+ mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
+ mac->ops.fw_rollback_mode = ixgbe_fw_rollback_mode_E610;
+ mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
+ mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
+ mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
+ mac->ops.get_thermal_sensor_data = NULL;
+ mac->ops.init_thermal_sensor_thresh = NULL;
+
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_E610;
+ phy->ops.identify = ixgbe_identify_phy_E610;
+ phy->eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ phy->eee_speeds_advertised = phy->eee_speeds_supported;
+
+ /* Additional ops overrides for e610 to go here */
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
+ eeprom->ops.read = ixgbe_read_ee_aci_E610;
+ eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
+ eeprom->ops.write = ixgbe_write_ee_aci_E610;
+ eeprom->ops.write_buffer = ixgbe_write_ee_aci_buffer_E610;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_E610;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
+ eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_reset_hw_E610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_reset_hw_E610");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ status = hw->phy.ops.init(hw);
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Reset polling failed to complete.\n");
+ }
+ msec_delay(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Gets firmware version and if API version matches it
+ * starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ ret_val = hw->mac.ops.get_fw_version(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ixgbe_start_hw_gen2(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_media_type_E610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_mask = 0;
+ s32 rc;
+ u8 i;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* Check if there is some bit set in phy_type_high */
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_high & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = phy_mask;
+ hw->link.link_info.phy_type_low = 0;
+ break;
+ }
+ phy_mask = 0;
+ }
+
+ /* If nothing found in phy_type_high search in phy_type_low */
+ if (phy_mask == 0) {
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_low & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = 0;
+ hw->link.link_info.phy_type_low = phy_mask;
+ break;
+ }
+ }
+ }
+
+ }
+
+ /* Based on link status or search above try to discover media type */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ *
+ * Return: the exit code of the operation.
+ **/
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type;
+ s32 rc;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return IXGBE_PHYSICAL_LAYER_UNKNOWN;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
+ if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_setup_link_E610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_E610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ s32 rc;
+ u32 i;
+
+ if (!speed || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command. */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && *link_up == false) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msec_delay(100);
+ hw->link.get_link_info = true;
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW. */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_link_capabilities_E610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return IXGBE_ERR_PARAM;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
+ s32 status = IXGBE_SUCCESS;
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ switch (req_mode) {
+ case ixgbe_fc_auto:
+ {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps) {
+ status = IXGBE_ERR_OUT_OF_MEM;
+ goto out;
+ }
+
+ /* Query the value of FC that both the NIC and the attached
+ * media can do. */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
+ if (status)
+ goto out;
+
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+
+ break;
+ }
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* clear the old pause settings */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg->caps |= pause_mask;
+
+out:
+ if (pcaps)
+ ixgbe_free(hw, pcaps);
+ return status;
+}
+
+/**
+ * ixgbe_setup_fc_E610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
+ s32 status;
+
+ /* Get the current PHY config */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (status)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (status)
+ return status;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (status)
+ return status;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_fc_autoneg_E610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /* Get current link status.
+ * Current FC mode will be stored in the hw context. */
+ status = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (status) {
+ goto out;
+ }
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @minor: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Send driver version number to Firmware using ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * IXGBE_SUCCESS - OK
+ * IXGBE_ERR_PARAM - incorrect parameters were given
+ * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
+ * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
+ * IXGBE_ERR_OUT_OF_MEM - ran out of memory
+ */
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
+ u8 sub, u16 len, const char *driver_ver)
+{
+ size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
+ struct ixgbe_driver_ver dv;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
+
+ if (!len || !driver_ver)
+ return IXGBE_ERR_PARAM;
+
+ dv.major_ver = maj;
+ dv.minor_ver = minor;
+ dv.build_ver = build;
+ dv.subbuild_ver = sub;
+
+ memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
+ memcpy(dv.driver_string, driver_ver, limited_len);
+
+ return ixgbe_aci_send_driver_ver(hw, &dv);
+}
+
+/**
+ * ixgbe_disable_rx_E610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl;
+
+ DEBUGFUNC("ixgbe_disable_rx_E610");
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ u32 pfdtxgswc;
+ s32 status;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ status = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (status) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+ }
+}
+
+/**
+ * ixgbe_setup_eee_E610 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enables/disable EEE based on enable_eee flag.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ u16 eee_cap = 0;
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (enable_eee) {
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_KX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_KR;
+ if (phy_caps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10BASE_T;
+ }
+
+ /* Set EEE capability for particular PHY types */
+ phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM recovery mode by
+ * reading the value of the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_E610 - Check FW NVM Rollback
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM Rollback mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in Rollback mode, otherwise false.
+ */
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_ROLLBACK_M);
+}
+
+/**
+ * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks Thermal Sensor Autonomous Mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in TSAM, otherwise false.
+ */
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
+
+ return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
+}
+
+/**
+ * ixgbe_init_phy_ops_E610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ phy->ops.identify_sfp = ixgbe_identify_module_E610;
+ phy->ops.read_reg = NULL; /* PHY reg access is not required */
+ phy->ops.write_reg = NULL;
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ phy->ops.setup_link = ixgbe_setup_phy_link_E610;
+ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
+ phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
+ phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
+ phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation */
+ phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
+ else
+ phy->ops.set_phy_power = NULL;
+ phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
+ phy->ops.handle_lasi = NULL; /* no implementation for E610 */
+ phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
+
+ /* TODO: Set functions pointers based on device ID */
+
+ /* Identify the PHY */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* TODO: Set functions pointers based on PHY type */
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_E610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 rc;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+
+ if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* 2.5 and 5 Gbps link speeds must be excluded from the
+ * auto-negotiation set used during driver initialization due to
+ * compatibility issues with certain switches. Those issues do not
+ * exist in case of E610 2.5G SKU device (0x57b1).
+ */
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_module_E610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ s32 rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ goto err;
+
+ media_available =
+ (hw->link.link_info.link_info &
+ IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by ixgbe_update_link_info() */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ rc = IXGBE_SUCCESS;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ rc = IXGBE_ERR_SFP_NOT_PRESENT;
+ }
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ s32 rc;
+
+ rc = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (rc) {
+ goto err;
+ }
+
+ /* If media is not available get default config */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (rc) {
+ goto err;
+ }
+
+ sup_phy_type_low = pcaps.phy_type_low;
+ sup_phy_type_high = pcaps.phy_type_high;
+
+ /* Get Active configuration to avoid unintended changes */
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (rc) {
+ goto err;
+ }
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ /* Set default PHY types for a given speed */
+ pcfg.phy_type_low = 0;
+ pcfg.phy_type_high = 0;
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types */
+ pcfg.phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_high &= sup_phy_type_high;
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ }
+
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ *
+ * Determines PHY FW version based on response to Get PHY Capabilities
+ * admin command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 status;
+
+ if (!firmware_version)
+ return IXGBE_ERR_PARAM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (status)
+ return status;
+
+ /* TODO: determine which bytes of the 8-byte phy_fw_ver
+ * field should be written to the 2-byte firmware_version
+ * output argument. */
+ memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @sff8472_data: value read
+ *
+ * Performs byte read operation from SFP module's SFF-8472 data over I2C.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ sff8472_data, 1, false);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation from SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ eeprom_data, 1, false);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ &eeprom_data, 1, true);
+}
+
+/**
+ * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ *
+ * Get the link status and check if the PHY temperature alarm detected.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+ "PHY Temperature Alarm detected");
+ status = IXGBE_ERR_OVERTEMP;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_set_phy_power_E610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on) {
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ } else {
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+ }
+
+ /* PHY is already in requested power mode */
+ if (phy_caps.caps == phy_cfg.caps)
+ return IXGBE_SUCCESS;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_enter_lplu_E610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
+ GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_E610 - Write EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_word_aci(hw, (u32)offset, &data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_buffer_E610 - Write EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_buf_aci(hw, (u32)offset, words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Calculate SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ * If the EEPROM params are not initialized, the function
+ * initializes them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the negative error code on error, or the 16-bit checksum
+ */
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ bool nvm_acquired = false;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 checksum = 0;
+ u16 vpd_module;
+ void *vmem;
+ s32 status;
+ u16 *data;
+ u16 i;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data = (u16 *)vmem;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+ nvm_acquired = true;
+
+ /* read pointer to VPD area */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* read pointer to PCIe Alt Auto-load module */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->eeprom.word_size; i++) {
+ /* Read SR page */
+ if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
+
+ status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
+ if (status != IXGBE_SUCCESS)
+ goto ixgbe_calc_sr_checksum_exit;
+ }
+
+ /* Skip Checksum word */
+ if (i == E610_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i >= (u32)vpd_module &&
+ i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
+ continue;
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i >= (u32)pcie_alt_module &&
+ i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
+ continue;
+
+ checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
+
+ixgbe_calc_sr_checksum_exit:
+ if(nvm_acquired)
+ ixgbe_release_nvm(hw);
+ ixgbe_free(hw, vmem);
+
+ if(!status)
+ return (s32)checksum;
+ else
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_E610 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to Shadow RAM, software sends the admin command
+ * to recalculate and update EEPROM checksum and instructs the hardware
+ * to update the flash.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_recalculate_checksum(hw);
+ if (status)
+ return status;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_nvm_write_activate(hw, IXGBE_ACI_NVM_ACTIV_REQ_EMPR,
+ NULL);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ u32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_validate_checksum(hw);
+
+ if (status)
+ return status;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ *checksum_val = tmp_checksum;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ /* Read TLV length */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return IXGBE_SUCCESS;
+ }
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return IXGBE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ixgbe_read_pba_string_E610 - Reads part number string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ s32 status;
+ u16 i;
+
+ status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ E610_SR_PBA_BLOCK_PTR);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ /* pba_size is the next word */
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ if (pba_tlv_len < pba_size) {
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
diff --git a/sys/dev/ixgbe/ixgbe_e610.h b/sys/dev/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..94e600139499
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.h
@@ -0,0 +1,224 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+void ixgbe_init_aci(struct ixgbe_hw *hw);
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv);
+s32 ixgbe_aci_set_pf_context(struct ixgbe_hw *hw, u8 pf_id);
+
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+s32 ixgbe_discover_func_caps(struct ixgbe_hw* hw,
+ struct ixgbe_hw_func_caps* func_caps);
+s32 ixgbe_get_caps(struct ixgbe_hw *hw);
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+bool ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle);
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value);
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value);
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write);
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params);
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size);
+
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count);
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count);
+
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
+
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw, struct ixgbe_netlist_info *netlist);
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw);
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values);
+
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, u16 *data);
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data);
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words, const u16 *data);
+
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1);
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1);
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed);
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index);
+
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data);
+
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source);
+
+/* E610 operations */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 sub, u16 len, const char *driver_ver);
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee);
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on);
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw);
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_osdep.c b/sys/dev/ixgbe/ixgbe_osdep.c
index 892924712c38..9bd9ce63b786 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.c
+++ b/sys/dev/ixgbe/ixgbe_osdep.c
@@ -114,3 +114,29 @@ ixgbe_link_speed_to_baudrate(ixgbe_link_speed speed)
return baudrate;
}
+
+void
+ixgbe_init_lock(struct ixgbe_lock *lock)
+{
+ mtx_init(&lock->mutex, "mutex",
+ "ixgbe ACI lock", MTX_DEF | MTX_DUPOK);
+}
+
+void
+ixgbe_acquire_lock(struct ixgbe_lock *lock)
+{
+ mtx_lock(&lock->mutex);
+}
+
+void
+ixgbe_release_lock(struct ixgbe_lock *lock)
+{
+ mtx_unlock(&lock->mutex);
+}
+
+void
+ixgbe_destroy_lock(struct ixgbe_lock *lock)
+{
+ if (mtx_initialized(&lock->mutex))
+ mtx_destroy(&lock->mutex);
+}
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index cf7c578fd684..8cf1d13736ce 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -133,7 +133,9 @@ enum {
/* XXX these need to be revisited */
#define IXGBE_CPU_TO_LE16 htole16
#define IXGBE_CPU_TO_LE32 htole32
+#define IXGBE_LE16_TO_CPU le16toh
#define IXGBE_LE32_TO_CPU le32toh
+#define IXGBE_LE64_TO_CPU le64toh
#define IXGBE_LE32_TO_CPUS(x) *(x) = le32dec(x)
#define IXGBE_CPU_TO_BE16 htobe16
#define IXGBE_CPU_TO_BE32 htobe32
@@ -146,6 +148,7 @@ typedef int16_t s16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
+typedef int64_t s64;
#ifndef __bool_true_false_are_defined
typedef boolean_t bool;
#endif
@@ -195,6 +198,11 @@ struct ixgbe_osdep
bus_space_handle_t mem_bus_space_handle;
};
+struct ixgbe_lock
+{
+ struct mtx mutex;
+};
+
/* These routines need struct ixgbe_hw declared */
struct ixgbe_hw;
device_t ixgbe_dev_from_hw(struct ixgbe_hw *hw);
@@ -222,4 +230,27 @@ extern void ixgbe_write_reg_array(struct ixgbe_hw *, u32, u32, u32);
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, val) \
ixgbe_write_reg_array(a, reg, offset, val)
+void ixgbe_init_lock(struct ixgbe_lock *);
+void ixgbe_destroy_lock(struct ixgbe_lock *);
+void ixgbe_acquire_lock(struct ixgbe_lock *);
+void ixgbe_release_lock(struct ixgbe_lock *);
+
+static inline void *
+ixgbe_calloc(struct ixgbe_hw __unused *hw, size_t count, size_t size)
+{
+ return (malloc(count * size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void *
+ixgbe_malloc(struct ixgbe_hw __unused *hw, size_t size)
+{
+ return (malloc(size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void
+ixgbe_free(struct ixgbe_hw __unused *hw, void *addr)
+{
+ free(addr, M_DEVBUF);
+}
+
#endif /* _IXGBE_OSDEP_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 91b46da72c75..0bbe7806d41d 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -74,6 +74,7 @@
*/
#include "ixgbe_osdep.h"
+#include "ixgbe_type_e610.h"
/* Override this by setting IOMEM in your ixgbe_osdep.h header */
#define IOMEM
@@ -150,12 +151,19 @@
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x0001
#define IXGBE_CAT(r, m) IXGBE_##r##m
@@ -1969,6 +1977,7 @@ enum {
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -2004,6 +2013,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2025,6 +2035,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -2047,6 +2058,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2454,6 +2466,7 @@ enum {
#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11
#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2571,6 +2584,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2581,6 +2595,7 @@ enum {
#define IXGBE_PCI_LINK_SPEED_2500 0x1
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
+#define IXGBE_PCI_LINK_SPEED_16000 0x4
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
@@ -3743,6 +3758,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_X550EM_a_vf,
+ ixgbe_mac_E610,
+ ixgbe_mac_E610_vf,
ixgbe_num_macs
};
@@ -3822,7 +3839,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui
};
/* Flow Control Settings */
@@ -3831,6 +3850,7 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
+ ixgbe_fc_auto,
ixgbe_fc_default
};
@@ -3863,6 +3883,7 @@ enum ixgbe_bus_speed {
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_8000 = 8000,
+ ixgbe_bus_speed_16000 = 16000,
ixgbe_bus_speed_reserved
};
@@ -4007,6 +4028,7 @@ struct ixgbe_eeprom_operations {
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
s32 (*calc_checksum)(struct ixgbe_hw *);
+ s32 (*read_pba_string)(struct ixgbe_hw *, u8 *, u32);
};
struct ixgbe_mac_operations {
@@ -4118,6 +4140,10 @@ struct ixgbe_mac_operations {
void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ bool (*get_fw_tsam_mode)(struct ixgbe_hw *hw);
+ s32 (*get_fw_version)(struct ixgbe_hw *hw);
+ s32 (*get_nvm_version)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
};
struct ixgbe_phy_operations {
@@ -4162,6 +4188,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -4233,6 +4262,9 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
#include "ixgbe_mbx.h"
@@ -4261,6 +4293,22 @@ struct ixgbe_hw {
bool wol_enabled;
bool need_crosstalk_fix;
u32 fw_rst_cnt;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
+ struct ixgbe_fwlog_cfg fwlog_cfg;
+ bool fwlog_support_ena;
+ struct ixgbe_fwlog_ring fwlog_ring;
};
#define ixgbe_call_func(hw, func, params, error) \
@@ -4312,6 +4360,24 @@ struct ixgbe_hw {
#define IXGBE_ERR_MBX_NOMSG -42
#define IXGBE_ERR_TIMEOUT -43
+#define IXGBE_ERR_NOT_SUPPORTED -45
+#define IXGBE_ERR_OUT_OF_RANGE -46
+
+#define IXGBE_ERR_NVM -50
+#define IXGBE_ERR_NVM_CHECKSUM -51
+#define IXGBE_ERR_BUF_TOO_SHORT -52
+#define IXGBE_ERR_NVM_BLANK_MODE -53
+#define IXGBE_ERR_INVAL_SIZE -54
+#define IXGBE_ERR_DOES_NOT_EXIST -55
+
+#define IXGBE_ERR_ACI_ERROR -100
+#define IXGBE_ERR_ACI_DISABLED -101
+#define IXGBE_ERR_ACI_TIMEOUT -102
+#define IXGBE_ERR_ACI_BUSY -103
+#define IXGBE_ERR_ACI_NO_WORK -104
+#define IXGBE_ERR_ACI_NO_EVENTS -105
+#define IXGBE_ERR_FW_API_VER -106
+
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
@@ -4540,5 +4606,6 @@ struct ixgbe_bypass_eeprom {
#define IXGBE_REQUEST_TASK_FDIR 0x08
#define IXGBE_REQUEST_TASK_PHY 0x10
#define IXGBE_REQUEST_TASK_LSC 0x20
+#define IXGBE_REQUEST_TASK_FWEVENT 0x40
#endif /* _IXGBE_TYPE_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type_e610.h b/sys/dev/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..e300030c3ba4
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,2278 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+
+/* Generic defines */
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* !BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* !BIT_ULL */
+#ifndef BITS_PER_BYTE
+#define BITS_PER_BYTE 8
+#endif /* !BITS_PER_BYTE */
+#ifndef DIVIDE_AND_ROUND_UP
+#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
+#endif /* !DIVIDE_AND_ROUND_UP */
+
+#ifndef ROUND_UP
+/**
+ * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
+ * @a: value to round up
+ * @b: arbitrary multiple
+ *
+ * Round up to the next multiple of the arbitrary b.
+ */
+#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b)))
+#endif /* !ROUND_UP */
+
+#define MAKEMASK(mask, shift) (mask << shift)
+
+#define BYTES_PER_WORD 2
+#define BYTES_PER_DWORD 4
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG 64
+#endif /* !BITS_PER_LONG */
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif /* !BITS_PER_LONG_LONG */
+#undef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+/* Data type manipulation macros. */
+#define HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+#define HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define LO_WORD(x) ((u16)((x) & 0xFFFF))
+#define HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define LO_BYTE(x) ((u8)((x) & 0xFF))
+
+#ifndef MIN_T
+#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
+#endif
+
+#define IS_ASCII(_ch) ((_ch) < 0x80)
+
+/**
+ * ixgbe_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
+#define ixgbe_struct_size(ptr, field, num) \
+ (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define E610_SR_VPD_SIZE_WORDS 512
+#define E610_SR_PCIE_ALT_SIZE_WORDS 512
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_DEV_STARTER_VER 0x18
+#define E610_NVM_VER_LO_SHIFT 0
+#define E610_NVM_VER_LO_MASK (0xff << E610_NVM_VER_LO_SHIFT)
+#define E610_NVM_VER_HI_SHIFT 12
+#define E610_NVM_VER_HI_MASK (0xf << E610_NVM_VER_HI_SHIFT)
+#define E610_SR_NVM_MAP_VER 0x29
+#define E610_SR_NVM_EETRACK_LO 0x2D
+#define E610_SR_NVM_EETRACK_HI 0x2E
+#define E610_SR_VPD_PTR 0x2F
+#define E610_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define E610_SR_SW_CHECKSUM_WORD 0x3F
+#define E610_SR_PFA_PTR 0x40
+#define E610_SR_1ST_NVM_BANK_PTR 0x42
+#define E610_SR_NVM_BANK_SIZE 0x43
+#define E610_SR_1ST_OROM_BANK_PTR 0x44
+#define E610_SR_OROM_BANK_SIZE 0x45
+#define E610_SR_NETLIST_BANK_PTR 0x46
+#define E610_SR_NETLIST_BANK_SIZE 0x47
+#define E610_SR_POINTER_TYPE_BIT BIT(15)
+#define E610_SR_POINTER_MASK 0x7fff
+#define E610_SR_HALF_4KB_SECTOR_UNITS 2048
+#define E610_GET_PFA_POINTER_IN_WORDS(offset) \
+ ((offset & E610_SR_POINTER_TYPE_BIT) == E610_SR_POINTER_TYPE_BIT) ? \
+ ((offset & E610_SR_POINTER_MASK) * E610_SR_HALF_4KB_SECTOR_UNITS) : \
+ (offset & E610_SR_POINTER_MASK)
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_CTRL_WORD 0x00
+#define E610_SR_PBA_BLOCK_PTR 0x16
+
+/* The Orom version topology */
+#define IXGBE_OROM_VER_PATCH_SHIFT 0
+#define IXGBE_OROM_VER_PATCH_MASK (0xff << IXGBE_OROM_VER_PATCH_SHIFT)
+#define IXGBE_OROM_VER_BUILD_SHIFT 8
+#define IXGBE_OROM_VER_BUILD_MASK (0xffff << IXGBE_OROM_VER_BUILD_SHIFT)
+#define IXGBE_OROM_VER_SHIFT 24
+#define IXGBE_OROM_VER_MASK (0xff << IXGBE_OROM_VER_SHIFT)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M MAKEMASK(0x3FF, 0)
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_S 0x06
+#define IXGBE_SR_CTRL_WORD_1_M (0x03 << IXGBE_SR_CTRL_WORD_1_S)
+#define IXGBE_SR_CTRL_WORD_VALID 0x1
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
+/* These macros strip from NVM Image Revision the particular part of NVM ver:
+ major ver, minor ver and image id */
+#define E610_NVM_MAJOR_VER(x) ((x & 0xF000) >> 12)
+#define E610_NVM_MINOR_VER(x) (x & 0x00FF)
+
+/* Shadow RAM related */
+#define IXGBE_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define IXGBE_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define IXGBE_SR_SW_CHECKSUM_BASE 0xBABA
+
+/* Netlist */
+#define IXGBE_MAX_NETLIST_SIZE 10
+
+/* General registers */
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_FWS0B_S 0
+#define GL_FWSTS_FWS0B_M MAKEMASK(0xFF, 0)
+#define GL_FWSTS_FWROWD_S 8
+#define GL_FWSTS_FWROWD_M BIT(8)
+#define GL_FWSTS_FWRI_S 9
+#define GL_FWSTS_FWRI_M BIT(9)
+#define GL_FWSTS_FWS1B_S 16
+#define GL_FWSTS_FWS1B_M MAKEMASK(0xFF, 16)
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Recovery mode values of Firmware Status 1 Byte (FWS1B) bitfield */
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER_LEGACY 0x0B
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR_LEGACY 0x0C
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER 0x30
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR 0x31
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_TRANSITION 0x32
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_NVM 0x33
+
+/* Firmware Status (GL_MNG_FWSM) */
+#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define GL_MNG_FWSM_FW_MODES_S 0
+#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
+#define GL_MNG_FWSM_RSV0_S 2
+#define GL_MNG_FWSM_RSV0_M MAKEMASK(0xFF, 2)
+#define GL_MNG_FWSM_EEP_RELOAD_IND_S 10
+#define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10)
+#define GL_MNG_FWSM_RSV1_S 11
+#define GL_MNG_FWSM_RSV1_M MAKEMASK(0xF, 11)
+#define GL_MNG_FWSM_RSV2_S 15
+#define GL_MNG_FWSM_RSV2_M BIT(15)
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_S 16
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_M BIT(16)
+#define GL_MNG_FWSM_POR_AL_FAILURE_S 17
+#define GL_MNG_FWSM_POR_AL_FAILURE_M BIT(17)
+#define GL_MNG_FWSM_RSV3_S 18
+#define GL_MNG_FWSM_RSV3_M BIT(18)
+#define GL_MNG_FWSM_EXT_ERR_IND_S 19
+#define GL_MNG_FWSM_EXT_ERR_IND_M MAKEMASK(0x3F, 19)
+#define GL_MNG_FWSM_RSV4_S 25
+#define GL_MNG_FWSM_RSV4_M BIT(25)
+#define GL_MNG_FWSM_RESERVED_11_S 26
+#define GL_MNG_FWSM_RESERVED_11_M MAKEMASK(0xF, 26)
+#define GL_MNG_FWSM_RSV5_S 30
+#define GL_MNG_FWSM_RSV5_M MAKEMASK(0x3, 30)
+
+/* FW mode indications */
+#define GL_MNG_FWSM_FW_MODES_DEBUG_M BIT(0)
+#define GL_MNG_FWSM_FW_MODES_RECOVERY_M BIT(1)
+#define GL_MNG_FWSM_FW_MODES_ROLLBACK_M BIT(2)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_NVM_PRES_S 0
+#define GLNVM_GENS_NVM_PRES_M BIT(0)
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M MAKEMASK(0x7, 5)
+#define GLNVM_GENS_BANK1VAL_S 8
+#define GLNVM_GENS_BANK1VAL_M BIT(8)
+#define GLNVM_GENS_ALT_PRST_S 23
+#define GLNVM_GENS_ALT_PRST_M BIT(23)
+#define GLNVM_GENS_FL_AUTO_RD_S 25
+#define GLNVM_GENS_FL_AUTO_RD_M BIT(25)
+
+/* Flash Access Register */
+#define GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define GLNVM_FLA_LOCKED_S 6
+#define GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Bit Bang registers */
+#define RDASB_MSGCTL 0x000B6820
+#define RDASB_MSGCTL_HDR_DWS_S 0
+#define RDASB_MSGCTL_EXP_RDW_S 8
+#define RDASB_MSGCTL_CMDV_M BIT(31)
+#define RDASB_RSPCTL 0x000B6824
+#define RDASB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define RDASB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define RDASB_WHDR0 0x000B68F4
+#define RDASB_WHDR1 0x000B68F8
+#define RDASB_WHDR2 0x000B68FC
+#define RDASB_WHDR3 0x000B6900
+#define RDASB_WHDR4 0x000B6904
+#define RDASB_RHDR0 0x000B6AFC
+#define RDASB_RHDR0_RESPONSE_S 27
+#define RDASB_RHDR0_RESPONSE_M MAKEMASK(0x7, 27)
+#define RDASB_RDATA0 0x000B6B00
+#define RDASB_RDATA1 0x000B6B04
+
+/* SPI Registers */
+#define SPISB_MSGCTL 0x000B7020
+#define SPISB_MSGCTL_HDR_DWS_S 0
+#define SPISB_MSGCTL_EXP_RDW_S 8
+#define SPISB_MSGCTL_MSG_MODE_S 26
+#define SPISB_MSGCTL_TOKEN_MODE_S 28
+#define SPISB_MSGCTL_BARCLR_S 30
+#define SPISB_MSGCTL_CMDV_S 31
+#define SPISB_MSGCTL_CMDV_M BIT(31)
+#define SPISB_RSPCTL 0x000B7024
+#define SPISB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define SPISB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define SPISB_WHDR0 0x000B70F4
+#define SPISB_WHDR0_DEST_SEL_S 12
+#define SPISB_WHDR0_OPCODE_SEL_S 16
+#define SPISB_WHDR0_TAG_S 24
+#define SPISB_WHDR1 0x000B70F8
+#define SPISB_WHDR2 0x000B70FC
+#define SPISB_RDATA 0x000B7300
+#define SPISB_WDATA 0x000B7100
+
+/* Firmware Reset Count register */
+#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
+#define GL_FWRESETCNT_FWRESETCNT_S 0
+#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0)
+
+/* Admin Command Interface (ACI) registers */
+#define PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define PF_HICR 0x00082048
+
+#define PF_HIDA_MAX_INDEX 15
+#define PF_HIBA_MAX_INDEX 1023
+
+#define PF_HICR_EN BIT(0)
+#define PF_HICR_C BIT(1)
+#define PF_HICR_SV BIT(2)
+#define PF_HICR_EV BIT(3)
+
+#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
+#define GL_HIDA_2(_i) (0x00082020 + ((_i) * 4))
+#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
+#define GL_HICR 0x00082040
+
+#define GL_HIDA_MAX_INDEX 15
+#define GL_HIBA_MAX_INDEX 1023
+
+#define GL_HICR_C BIT(1)
+#define GL_HICR_SV BIT(2)
+#define GL_HICR_EV BIT(3)
+
+#define GL_HICR_EN 0x00082044
+
+#define GL_HICR_EN_CHECK BIT(0)
+
+/* Admin Command Interface (ACI) defines */
+/* Defines that help manage the driver vs FW API checks.
+ */
+#define IXGBE_FW_API_VER_BRANCH 0x00
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_DESC_COOKIE_L_DWORD_OFFSET 3
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* Timestamp spacing for Tools ACI: queue is active if spacing is within the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_LO 0
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_HI 200
+
+/* Timestamp spacing for Tools ACI: queue is expired if spacing is outside the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_LO -5
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_HI 205
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets */
+#define IXGBE_ACI_FLAG_DD_S 0
+#define IXGBE_ACI_FLAG_CMP_S 1
+#define IXGBE_ACI_FLAG_ERR_S 2
+#define IXGBE_ACI_FLAG_VFE_S 3
+#define IXGBE_ACI_FLAG_LB_S 9
+#define IXGBE_ACI_FLAG_RD_S 10
+#define IXGBE_ACI_FLAG_VFC_S 11
+#define IXGBE_ACI_FLAG_BUF_S 12
+#define IXGBE_ACI_FLAG_SI_S 13
+#define IXGBE_ACI_FLAG_EI_S 14
+#define IXGBE_ACI_FLAG_FE_S 15
+
+#define IXGBE_ACI_FLAG_DD BIT(IXGBE_ACI_FLAG_DD_S) /* 0x1 */
+#define IXGBE_ACI_FLAG_CMP BIT(IXGBE_ACI_FLAG_CMP_S) /* 0x2 */
+#define IXGBE_ACI_FLAG_ERR BIT(IXGBE_ACI_FLAG_ERR_S) /* 0x4 */
+#define IXGBE_ACI_FLAG_VFE BIT(IXGBE_ACI_FLAG_VFE_S) /* 0x8 */
+#define IXGBE_ACI_FLAG_LB BIT(IXGBE_ACI_FLAG_LB_S) /* 0x200 */
+#define IXGBE_ACI_FLAG_RD BIT(IXGBE_ACI_FLAG_RD_S) /* 0x400 */
+#define IXGBE_ACI_FLAG_VFC BIT(IXGBE_ACI_FLAG_VFC_S) /* 0x800 */
+#define IXGBE_ACI_FLAG_BUF BIT(IXGBE_ACI_FLAG_BUF_S) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI BIT(IXGBE_ACI_FLAG_SI_S) /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI BIT(IXGBE_ACI_FLAG_EI_S) /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE BIT(IXGBE_ACI_FLAG_FE_S) /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+ IXGBE_ACI_RC_OK = 0, /* Success */
+ IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
+ IXGBE_ACI_RC_ENOENT = 2, /* No such element */
+ IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
+ IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
+ IXGBE_ACI_RC_EIO = 5, /* I/O error */
+ IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
+ IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
+ IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
+ IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
+ IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
+ IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
+ IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
+ IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
+ IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
+ IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
+ IXGBE_ACI_RC_ENOSPC = 16, /* No space left or allocation failure */
+ IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
+ IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
+ IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IXGBE_ACI_RC_EFBIG = 22, /* File too big */
+ IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
+ IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
+ IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
+ IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+ ixgbe_aci_opc_nvm_sanitization = 0x070C,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+
+ /* FW Logging Commands */
+ ixgbe_aci_opc_fw_logs_config = 0xFF30,
+ ixgbe_aci_opc_fw_logs_register = 0xFF31,
+ ixgbe_aci_opc_fw_logs_query = 0xFF32,
+ ixgbe_aci_opc_fw_logs_event = 0xFF33,
+ ixgbe_aci_opc_fw_logs_get = 0xFF34,
+ ixgbe_aci_opc_fw_logs_clear = 0xFF35
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IXGBE_CHECK_STRUCT_LEN(n, X) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used to generate a compilation error if a variable-length
+ * structure is not exactly the correct length assuming a single element of
+ * the variable-length object as the last element of the structure. It gives
+ * a divide by zero error if the structure is not of the correct size,
+ * otherwise it creates an enum that is never used.
+ */
+#define IXGBE_CHECK_VAR_LEN_STRUCT_LEN(n, X, T) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / \
+ (((sizeof(struct X) + sizeof(T)) == (n)) ? 1 : 0) }
+
+/* This macro is used to ensure that parameter structures (i.e. structures
+ * in the params union member of struct ixgbe_aci_desc) are 16 bytes in length.
+ *
+ * NOT intended to be used to check the size of an indirect command/response
+ * additional data buffer (e.g. struct foo) which should just happen to be 16
+ * bytes (instead, use IXGBE_CHECK_STRUCT_LEN(16, foo) for that).
+ */
+#define IXGBE_CHECK_PARAM_LEN(X) IXGBE_CHECK_STRUCT_LEN(16, X)
+
+struct ixgbe_aci_cmd_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_generic);
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_ver);
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+struct ixgbe_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 driver_string[IXGBE_DRV_VER_STR_LEN_E610];
+};
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_driver_ver);
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_exp_err);
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+#define IXGBE_CHANGE_LOCK_TIMEOUT 1000
+#define IXGBE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+
+enum ixgbe_aci_res_access_type {
+ IXGBE_RES_READ = 1,
+ IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+ IXGBE_NVM_RES_ID = 1,
+ IXGBE_SPD_RES_ID,
+ IXGBE_CHANGE_LOCK_RES_ID,
+ IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+ __le16 res_id;
+#define IXGBE_ACI_RES_ID_NVM 1
+#define IXGBE_ACI_RES_ID_SDP 2
+#define IXGBE_ACI_RES_ID_CHNG_LOCK 3
+#define IXGBE_ACI_RES_ID_GLBL_LOCK 4
+ __le16 access_type;
+#define IXGBE_ACI_RES_ACCESS_READ 1
+#define IXGBE_ACI_RES_ACCESS_WRITE 2
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ /* Status is only used for IXGBE_ACI_RES_ID_GLBL_LOCK */
+ __le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS 0
+#define IXGBE_ACI_RES_GLBL_IN_PROG 1
+#define IXGBE_ACI_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_req_res);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_list_caps);
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+ __le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
+#define IXGBE_ACI_CAPS_SRIOV 0x0012
+#define IXGBE_ACI_CAPS_VF 0x0013
+#define IXGBE_ACI_CAPS_VMDQ 0x0014
+#define IXGBE_ACI_CAPS_VSI 0x0017
+#define IXGBE_ACI_CAPS_DCB 0x0018
+#define IXGBE_ACI_CAPS_RSS 0x0040
+#define IXGBE_ACI_CAPS_RXQS 0x0041
+#define IXGBE_ACI_CAPS_TXQS 0x0042
+#define IXGBE_ACI_CAPS_MSIX 0x0043
+#define IXGBE_ACI_CAPS_FD 0x0045
+#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
+#define IXGBE_ACI_CAPS_NVM_VER 0x0048
+#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE 0x0090
+#define IXGBE_ACI_CAPS_NEXT_CLUSTER_ID 0x0096
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(32, ixgbe_aci_cmd_list_caps_elem);
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_disable_rxen);
+
+/* Get FW Event (indirect 0x0014) */
+struct ixgbe_aci_cmd_get_fw_event {
+ __le16 fw_buf_status;
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_OBTAINED BIT(0)
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_PENDING BIT(1)
+ u8 rsvd[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_fw_event);
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_S 1
+#define IXGBE_ACI_REPORT_MODE_M (7 << IXGBE_ACI_REPORT_MODE_S)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_phy_caps);
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 18
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK MAKEMASK(0xff, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK MAKEMASK(0xdf, 0)
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+IXGBE_CHECK_STRUCT_LEN(560, ixgbe_aci_cmd_get_phy_caps_data);
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 reserved[8];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_phy_cfg);
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0)
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_set_phy_cfg_data);
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_restart_an);
+
+#pragma pack(1)
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_LSE_M 0x3
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ u8 reserved2[5];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_status);
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_S 2
+#define IXGBE_ACI_LINK_TX_M (0x03 << IXGBE_ACI_LINK_TX_S)
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+#define IXGBE_ACI_LINK_LB_PHY_IDX_S 3
+#define IXGBE_ACI_LINK_LB_PHY_IDX_M (0x7 << IXGBE_ACI_LB_PHY_IDX_S)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK MAKEMASK(0x7, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_S 3
+#define IXGBE_ACI_CFG_PACING_M (0xF << IXGBE_ACI_CFG_PACING_S)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M 0x3F
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M 0x7FF
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3; /* Aligns next field to 8-byte boundary */
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+};
+#pragma pack()
+
+IXGBE_CHECK_STRUCT_LEN(56, ixgbe_aci_cmd_get_link_status_data);
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_event_mask);
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_S 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M (0xF << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M \
+ (0xF << IXGBE_ACI_LINK_TOPO_NODE_CTX_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_link_topo_params);
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+#define IXGBE_ACI_LINK_TOPO_HANDLE_S 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_M (0x3FF << IXGBE_ACI_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S 0
+/* In case of a Mezzanine type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_NODE_M \
+ (0x3F << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S 6
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_M \
+ (0x7 << IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_LOM_NODE_M \
+ (0x1FF << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_link_topo_addr);
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_topo);
+
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ixgbe_aci_cmd_i2c {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define IXGBE_ACI_I2C_DATA_SIZE_S 0
+#define IXGBE_ACI_I2C_DATA_SIZE_M (0xF << IXGBE_ACI_I2C_DATA_SIZE_S)
+#define IXGBE_ACI_I2C_ADDR_TYPE_M BIT(4)
+#define IXGBE_ACI_I2C_ADDR_TYPE_7BIT 0
+#define IXGBE_ACI_I2C_ADDR_TYPE_10BIT IXGBE_ACI_I2C_ADDR_TYPE_M
+#define IXGBE_ACI_I2C_DATA_OFFSET_S 5
+#define IXGBE_ACI_I2C_DATA_OFFSET_M (0x3 << IXGBE_ACI_I2C_DATA_OFFSET_S)
+#define IXGBE_ACI_I2C_USE_REPEATED_START BIT(7)
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_I2C_ADDR_7BIT_MASK 0x7F
+#define IXGBE_ACI_I2C_ADDR_10BIT_MASK 0x3FF
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_i2c);
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ixgbe_aci_cmd_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_i2c_resp);
+
+/* Read/Write MDIO (direct, 0x06E4/0x06E5) */
+struct ixgbe_aci_cmd_mdio {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 mdio_device_addr;
+#define IXGBE_ACI_MDIO_DEV_S 0
+#define IXGBE_ACI_MDIO_DEV_M (0x1F << IXGBE_ACI_MDIO_DEV_S)
+#define IXGBE_ACI_MDIO_CLAUSE_22 BIT(5)
+#define IXGBE_ACI_MDIO_CLAUSE_45 BIT(6)
+ u8 mdio_bus_address;
+#define IXGBE_ACI_MDIO_BUS_ADDR_S 0
+#define IXGBE_ACI_MDIO_BUS_ADDR_M (0x1F << IXGBE_ACI_MDIO_BUS_ADDR_S)
+ __le16 offset;
+ __le16 data; /* Input in write cmd, output in read cmd. */
+ u8 rsvd1[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_mdio);
+
+/* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */
+struct ixgbe_aci_cmd_gpio_by_func {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 io_func_num;
+#define IXGBE_ACI_GPIO_FUNC_S 0
+#define IXGBE_ACI_GPIO_FUNC_M (0x1F << IXGBE_ACI_GPIO_IO_FUNC_NUM_S)
+ u8 io_value; /* Input in write cmd, output in read cmd. */
+#define IXGBE_ACI_GPIO_ON BIT(0)
+#define IXGBE_ACI_GPIO_OFF 0
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio_by_func);
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+ u8 ident_mode;
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_port_id_led);
+
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ixgbe_aci_cmd_gpio {
+ __le16 gpio_ctrl_handle;
+#define IXGBE_ACI_GPIO_HANDLE_S 0
+#define IXGBE_ACI_GPIO_HANDLE_M (0x3FF << IXGBE_ACI_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio);
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M 0x7F
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M 0x3FF
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_S 11
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_M (0x3 << IXGBE_ACI_SFF_PAGE_BANK_CTRL_S)
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_sff_eeprom);
+
+/* Program Topology Device NVM (direct, 0x06F2) */
+struct ixgbe_aci_cmd_prog_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_prog_topo_dev_nvm);
+
+/* Read Topology Device NVM (direct, 0x06F3) */
+struct ixgbe_aci_cmd_read_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le32 start_address;
+#define IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE 8
+ u8 data_read[IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_topo_dev_nvm);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */
+#define IXGBE_ACI_NVM_PRESERVATION_M (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_NO_PRESERVATION (0 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_FACTORY_DEFAULT (2 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3)
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M MAKEMASK(0x3, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for struct ixgbe_aci_cmd_nvm. */
+#define IXGBE_ACI_NVM_SECTOR_UNIT 4096 /* In Bytes */
+#define IXGBE_ACI_NVM_WORD_UNIT 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_START_POINT 0
+#define IXGBE_ACI_NVM_EMP_SR_PTR_OFFSET 0x90
+#define IXGBE_ACI_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_S 15
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_M BIT(15)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_SECTOR 1
+
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_OFFSET 0x46
+#define IXGBE_ACI_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_LLDP_PRESERVED_MOD_ID 0x129
+#define IXGBE_ACI_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0)
+#define IXGBE_ACI_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
+#define IXGBE_ACI_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+
+#define IXGBE_ACI_NVM_MINSREV_MOD_ID 0x130
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm);
+
+/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
+ * type field is excluded from the section when reading and writing from
+ * a module using the module_typeid field with these AQ commands.
+ */
+struct ixgbe_aci_cmd_nvm_minsrev {
+ __le16 length;
+ __le16 validity;
+#define IXGBE_ACI_NVM_MINSREV_NVM_VALID BIT(0)
+#define IXGBE_ACI_NVM_MINSREV_OROM_VALID BIT(1)
+ __le16 nvm_minsrev_l;
+ __le16 nvm_minsrev_h;
+ __le16 orom_minsrev_l;
+ __le16 orom_minsrev_h;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_nvm_minsrev);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+struct ixgbe_aci_cmd_nvm_cfg {
+ u8 cmd_flags;
+#define IXGBE_ACI_ANVM_MULTIPLE_ELEMS BIT(0)
+#define IXGBE_ACI_ANVM_IMMEDIATE_FIELD BIT(1)
+#define IXGBE_ACI_ANVM_NEW_CFG BIT(2)
+ u8 reserved;
+ __le16 count;
+ __le16 id;
+ u8 reserved1[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_cfg);
+
+struct ixgbe_aci_cmd_nvm_cfg_data {
+ __le16 field_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_nvm_cfg_data);
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_checksum);
+
+/* Used for NVM Sanitization command - 0x070C */
+struct ixgbe_aci_cmd_nvm_sanitization {
+ u8 cmd_flags;
+#define IXGBE_ACI_SANITIZE_REQ_READ 0
+#define IXGBE_ACI_SANITIZE_REQ_OPERATE BIT(0)
+
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_BITS 0
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_STATE BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR 0
+ u8 values;
+#define IXGBE_ACI_SANITIZE_NVM_BITS_HOST_CLEAN_SUPPORT BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_BITS_BMC_CLEAN_SUPPORT BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_SUCCESS BIT(3)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS BIT(3)
+ u8 reserved[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_sanitization);
+
+/* Write/Read Alternate - Direct (direct 0x0900/0x0902) */
+struct ixgbe_aci_cmd_read_write_alt_direct {
+ __le32 dword0_addr;
+ __le32 dword0_value;
+ __le32 dword1_addr;
+ __le32 dword1_value;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_direct);
+
+/* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */
+struct ixgbe_aci_cmd_read_write_alt_indirect {
+ __le32 base_dword_addr;
+ __le32 num_dwords;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_indirect);
+
+/* Done Alternate Write (direct 0x0904) */
+struct ixgbe_aci_cmd_done_alt_write {
+ u8 flags;
+#define IXGBE_ACI_CMD_UEFI_BIOS_MODE BIT(0)
+#define IXGBE_ACI_RESP_RESET_NEEDED BIT(1)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_done_alt_write);
+
+/* Clear Port Alternate Write (direct 0x0906) */
+struct ixgbe_aci_cmd_clear_port_alt_write {
+ u8 reserved[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_port_alt_write);
+
+/* Get CGU abilities command response data structure (indirect 0x0C61) */
+struct ixgbe_aci_cmd_get_cgu_abilities {
+ u8 num_inputs;
+ u8 num_outputs;
+ u8 pps_dpll_idx;
+ u8 synce_dpll_idx;
+ __le32 max_in_freq;
+ __le32 max_in_phase_adj;
+ __le32 max_out_freq;
+ __le32 max_out_phase_adj;
+ u8 cgu_part_num;
+ u8 rsvd[3];
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_get_cgu_abilities);
+
+#define IXGBE_ACI_NODE_HANDLE_VALID BIT(10)
+#define IXGBE_ACI_NODE_HANDLE MAKEMASK(0x3FF, 0)
+#define IXGBE_ACI_DRIVING_CLK_NUM_SHIFT 10
+#define IXGBE_ACI_DRIVING_CLK_NUM MAKEMASK(0x3F, IXGBE_ACI_DRIVING_CLK_NUM_SHIFT)
+
+/* Set CGU input config (direct 0x0C62) */
+struct ixgbe_aci_cmd_set_cgu_input_config {
+ u8 input_idx;
+ u8 flags1;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7)
+ u8 flags2;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_input_config);
+
+/* Get CGU input config response descriptor structure (direct 0x0C63) */
+struct ixgbe_aci_cmd_get_cgu_input_config {
+ u8 input_idx;
+ u8 status;
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7)
+ u8 type;
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_GPS BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_PHY BIT(6)
+ u8 flags1;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7)
+ __le32 freq;
+ __le32 phase_delay;
+ u8 flags2;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_input_config);
+
+/* Set CGU output config (direct 0x0C64) */
+struct ixgbe_aci_cmd_set_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4)
+ u8 src_sel;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_DPLL_SRC_SEL MAKEMASK(0x1F, 0)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_output_config);
+
+/* Get CGU output config (direct 0x0C65) */
+struct ixgbe_aci_cmd_get_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2)
+ u8 src_sel;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT)
+ u8 rsvd;
+ __le32 freq;
+ __le32 src_freq;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_output_config);
+
+/* Get CGU DPLL status (direct 0x0C66) */
+struct ixgbe_aci_cmd_get_cgu_dpll_status {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6)
+ __le16 dpll_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT 8
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT 13
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT)
+ __le32 phase_offset_h;
+ __le32 phase_offset_l;
+ u8 eec_mode;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_dpll_status);
+
+/* Set CGU DPLL config (direct 0x0C67) */
+struct ixgbe_aci_cmd_set_cgu_dpll_config {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6)
+ u8 rsvd;
+ u8 config;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_CLK_REF_SEL MAKEMASK(0x1F, 0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_MODE MAKEMASK(0x7, 5)
+ u8 rsvd2[8];
+ u8 eec_mode;
+ u8 rsvd3[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_dpll_config);
+
+/* Set CGU reference priority (direct 0x0C68) */
+struct ixgbe_aci_cmd_set_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority;
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_ref_prio);
+
+/* Get CGU reference priority (direct 0x0C69) */
+struct ixgbe_aci_cmd_get_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority; /* Valid only in response */
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_ref_prio);
+
+/* Get CGU info (direct 0x0C6A) */
+struct ixgbe_aci_cmd_get_cgu_info {
+ __le32 cgu_id;
+ __le32 cgu_cfg_ver;
+ __le32 cgu_fw_ver;
+ u8 node_part_num;
+ u8 dev_rev;
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_info);
+
+struct ixgbe_aci_cmd_temp_tca_event {
+ u8 event_desc;
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_SHIFT 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_NVM 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_EVENT_STATE 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_ALL 2
+
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_SHIFT 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_CLEARED 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_CLEARED 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_RAISED 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_RAISED 3
+
+ u8 reserved;
+ __le16 temperature;
+ __le16 thermal_sensor_max_value;
+ __le16 thermal_sensor_min_value;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_temp_tca_event);
+
+/* Debug Dump Internal Data (indirect 0xFF08) */
+struct ixgbe_aci_cmd_debug_dump_internals {
+ __le16 cluster_id; /* Expresses next cluster ID in response */
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_LINK 0
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 1
+ __le16 table_id; /* Used only for non-memory clusters */
+ __le32 idx; /* In table entries for tables, in bytes for memory */
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_debug_dump_internals);
+
+/* Set Health Status (direct 0xFF20) */
+struct ixgbe_aci_cmd_set_health_status_config {
+ u8 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_SET_PF_SPECIFIC_MASK BIT(0)
+#define IXGBE_ACI_HEALTH_STATUS_SET_ALL_PF_MASK BIT(1)
+#define IXGBE_ACI_HEALTH_STATUS_SET_GLOBAL_MASK BIT(2)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_health_status_config);
+
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT 0x101
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_TYPE 0x102
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_QUAL 0x103
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_COMM 0x104
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_CONFLICT 0x105
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106
+#define IXGBE_ACI_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109
+#define IXGBE_ACI_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED 0x10F
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PARALLEL_FAULT 0x110
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED 0x111
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST_TOPO 0x112
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST 0x113
+#define IXGBE_ACI_HEALTH_STATUS_ERR_TOPO_CONFLICT 0x114
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_HW_ACCESS 0x115
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_RUNTIME 0x116
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DNL_INIT 0x117
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_NVM_PROG 0x120
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_FW_LOAD 0x121
+#define IXGBE_ACI_HEALTH_STATUS_INFO_RECOVERY 0x500
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FLASH_ACCESS 0x501
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_AUTH 0x502
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_AUTH 0x503
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DDP_AUTH 0x504
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_COMPAT 0x505
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_COMPAT 0x506
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DCB_MIB 0x509
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A
+#define IXGBE_ACI_HEALTH_STATUS_ERR_BMC_RESET 0x50B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_LOOP 0x1000
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002
+
+/* Get Health Status codes (indirect 0xFF21) */
+struct ixgbe_aci_cmd_get_supported_health_status_codes {
+ __le16 health_code_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_supported_health_status_codes);
+
+/* Get Health Status (indirect 0xFF22) */
+struct ixgbe_aci_cmd_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_health_status);
+
+/* Get Health Status event buffer entry, (0xFF22)
+ * repeated per reported health status
+ */
+struct ixgbe_aci_cmd_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_PF (0x1)
+#define IXGBE_ACI_HEALTH_STATUS_PORT (0x2)
+#define IXGBE_ACI_HEALTH_STATUS_GLOBAL (0x3)
+ __le32 internal_data1;
+#define IXGBE_ACI_HEALTH_STATUS_UNDEFINED_DATA (0xDEADBEEF)
+ __le32 internal_data2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_health_status_elem);
+
+/* Clear Health Status (direct 0xFF23) */
+struct ixgbe_aci_cmd_clear_health_status {
+ __le32 reserved[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_health_status);
+
+enum ixgbe_aci_fw_logging_mod {
+ IXGBE_ACI_FW_LOG_ID_GENERAL = 0,
+ IXGBE_ACI_FW_LOG_ID_CTRL = 1,
+ IXGBE_ACI_FW_LOG_ID_LINK = 2,
+ IXGBE_ACI_FW_LOG_ID_LINK_TOPO = 3,
+ IXGBE_ACI_FW_LOG_ID_DNL = 4,
+ IXGBE_ACI_FW_LOG_ID_I2C = 5,
+ IXGBE_ACI_FW_LOG_ID_SDP = 6,
+ IXGBE_ACI_FW_LOG_ID_MDIO = 7,
+ IXGBE_ACI_FW_LOG_ID_ADMINQ = 8,
+ IXGBE_ACI_FW_LOG_ID_HDMA = 9,
+ IXGBE_ACI_FW_LOG_ID_LLDP = 10,
+ IXGBE_ACI_FW_LOG_ID_DCBX = 11,
+ IXGBE_ACI_FW_LOG_ID_DCB = 12,
+ IXGBE_ACI_FW_LOG_ID_XLR = 13,
+ IXGBE_ACI_FW_LOG_ID_NVM = 14,
+ IXGBE_ACI_FW_LOG_ID_AUTH = 15,
+ IXGBE_ACI_FW_LOG_ID_VPD = 16,
+ IXGBE_ACI_FW_LOG_ID_IOSF = 17,
+ IXGBE_ACI_FW_LOG_ID_PARSER = 18,
+ IXGBE_ACI_FW_LOG_ID_SW = 19,
+ IXGBE_ACI_FW_LOG_ID_SCHEDULER = 20,
+ IXGBE_ACI_FW_LOG_ID_TXQ = 21,
+ IXGBE_ACI_FW_LOG_ID_ACL = 22,
+ IXGBE_ACI_FW_LOG_ID_POST = 23,
+ IXGBE_ACI_FW_LOG_ID_WATCHDOG = 24,
+ IXGBE_ACI_FW_LOG_ID_TASK_DISPATCH = 25,
+ IXGBE_ACI_FW_LOG_ID_MNG = 26,
+ IXGBE_ACI_FW_LOG_ID_SYNCE = 27,
+ IXGBE_ACI_FW_LOG_ID_HEALTH = 28,
+ IXGBE_ACI_FW_LOG_ID_TSDRV = 29,
+ IXGBE_ACI_FW_LOG_ID_PFREG = 30,
+ IXGBE_ACI_FW_LOG_ID_MDLVER = 31,
+ IXGBE_ACI_FW_LOG_ID_MAX = 32,
+};
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to IXGBE_FWLOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except IXGBE_FWLOG_LEVEL_NONE)
+ */
+enum ixgbe_fwlog_level {
+ IXGBE_FWLOG_LEVEL_NONE = 0,
+ IXGBE_FWLOG_LEVEL_ERROR = 1,
+ IXGBE_FWLOG_LEVEL_WARNING = 2,
+ IXGBE_FWLOG_LEVEL_NORMAL = 3,
+ IXGBE_FWLOG_LEVEL_VERBOSE = 4,
+ IXGBE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct ixgbe_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct ixgbe_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct ixgbe_fwlog_module_entry module_entries[IXGBE_ACI_FW_LOG_ID_MAX];
+#define IXGBE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define IXGBE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling ixgbe_fwlog_init() so the PF registers for firmware
+ * logging on initialization
+ */
+#define IXGBE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the ixgbe_fwlog_get() response if the PF is registered for FW
+ * logging events over ARQ
+ */
+#define IXGBE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+ /* options used to configure firmware logging */
+ u16 options;
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u8 log_resolution;
+};
+
+struct ixgbe_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct ixgbe_fwlog_ring {
+ struct ixgbe_fwlog_data *rings;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define IXGBE_FWLOG_RING_SIZE_DFLT 256
+#define IXGBE_FWLOG_RING_SIZE_MAX 512
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ * Get FW Log (indirect 0xFF34)
+ * Clear FW Log (indirect 0xFF35)
+ */
+struct ixgbe_aci_cmd_fw_log {
+ u8 cmd_flags;
+#define IXGBE_ACI_FW_LOG_CONF_UART_EN BIT(0)
+#define IXGBE_ACI_FW_LOG_CONF_AQ_EN BIT(1)
+#define IXGBE_ACI_FW_LOG_QUERY_REGISTERED BIT(2)
+#define IXGBE_ACI_FW_LOG_CONF_SET_VALID BIT(3)
+#define IXGBE_ACI_FW_LOG_AQ_REGISTER BIT(0)
+#define IXGBE_ACI_FW_LOG_AQ_QUERY BIT(2)
+#define IXGBE_ACI_FW_LOG_PERSISTENT BIT(0)
+ u8 rsp_flag;
+#define IXGBE_ACI_FW_LOG_MORE_DATA BIT(1)
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+#define IXGBE_ACI_FW_LOG_MIN_RESOLUTION (1)
+#define IXGBE_ACI_FW_LOG_MAX_RESOLUTION (128)
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_fw_log);
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct ixgbe_aci_cmd_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_fw_log_cfg_resp);
+
+/**
+ * struct ixgbe_aq_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the Admin Command Interface
+ * (ACI). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ixgbe_aci_cmd_generic generic;
+ struct ixgbe_aci_cmd_get_ver get_ver;
+ struct ixgbe_aci_cmd_driver_ver driver_ver;
+ struct ixgbe_aci_cmd_get_exp_err exp_err;
+ struct ixgbe_aci_cmd_req_res res_owner;
+ struct ixgbe_aci_cmd_list_caps get_cap;
+ struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+ struct ixgbe_aci_cmd_get_fw_event get_fw_event;
+ struct ixgbe_aci_cmd_get_phy_caps get_phy;
+ struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+ struct ixgbe_aci_cmd_restart_an restart_an;
+ struct ixgbe_aci_cmd_get_link_status get_link_status;
+ struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+ struct ixgbe_aci_cmd_i2c read_write_i2c;
+ struct ixgbe_aci_cmd_read_i2c_resp read_i2c_resp;
+ struct ixgbe_aci_cmd_mdio read_write_mdio;
+ struct ixgbe_aci_cmd_mdio read_mdio;
+ struct ixgbe_aci_cmd_mdio write_mdio;
+ struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
+ struct ixgbe_aci_cmd_gpio_by_func read_write_gpio_by_func;
+ struct ixgbe_aci_cmd_gpio read_write_gpio;
+ struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm prog_topo_dev_nvm;
+ struct ixgbe_aci_cmd_read_topo_dev_nvm read_topo_dev_nvm;
+ struct ixgbe_aci_cmd_nvm nvm;
+ struct ixgbe_aci_cmd_nvm_cfg nvm_cfg;
+ struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ struct ixgbe_aci_cmd_read_write_alt_direct read_write_alt_direct;
+ struct ixgbe_aci_cmd_read_write_alt_indirect read_write_alt_indirect;
+ struct ixgbe_aci_cmd_done_alt_write done_alt_write;
+ struct ixgbe_aci_cmd_clear_port_alt_write clear_port_alt_write;
+ struct ixgbe_aci_cmd_debug_dump_internals debug_dump;
+ struct ixgbe_aci_cmd_set_health_status_config
+ set_health_status_config;
+ struct ixgbe_aci_cmd_get_supported_health_status_codes
+ get_supported_health_status_codes;
+ struct ixgbe_aci_cmd_get_health_status get_health_status;
+ struct ixgbe_aci_cmd_clear_health_status clear_health_status;
+ struct ixgbe_aci_cmd_fw_log fw_log;
+ struct ixgbe_aci_cmd_nvm_sanitization nvm_sanitization;
+ } params;
+};
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u8 topo_media_conflict;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE] of
+ * ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_common_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M 0xF
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M 0xF0
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M \
+ MAKEMASK(0xFF, IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+ /* Support for OROM update in Recovery Mode. */
+ bool orom_recovery_update;
+ bool next_cluster_id_support;
+};
+
+#pragma pack(1)
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+};
+#pragma pack()
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct ixgbe_aci_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+struct ixgbe_aci_info {
+ enum ixgbe_aci_err last_status; /* last status of sent admin command */
+ struct ixgbe_lock lock; /* admin command interface lock */
+};
+
+/* Minimum Security Revision information */
+struct ixgbe_minsrev_info {
+ u32 nvm;
+ u32 orom;
+ u8 nvm_valid : 1;
+ u8 orom_valid : 1;
+};
+
+/* Enumeration of which flash bank is desired to read from, either the active
+ * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
+ * code which just wants to read the active or inactive flash bank.
+ */
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+};
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Pointer to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u32 flash_size; /* Size of available flash in bytes */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#define IXGBE_NVM_CMD_READ 0x0000000B
+#define IXGBE_NVM_CMD_WRITE 0x0000000C
+
+/* NVM Access command */
+struct ixgbe_nvm_access_cmd {
+ u32 command; /* NVM command: READ or WRITE */
+ u32 offset; /* Offset to read/write, in bytes */
+ u32 data_size; /* Size of data field, in bytes */
+};
+
+/* NVM Access data */
+struct ixgbe_nvm_access_data {
+ u32 regval; /* Storage for register value */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index cac3c6b5e5e7..4e48f7f33c9d 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -656,7 +656,8 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550_vf) {
+ if (hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_E610_vf) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 60e66aeaf579..43c3af056b67 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -1151,13 +1151,20 @@ ixl_if_enable_intr(if_ctx_t ctx)
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = vsi->hw;
- struct ixl_rx_queue *que = vsi->rx_queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
ixl_enable_intr0(hw);
/* Enable queue interrupts */
- for (int i = 0; i < vsi->num_rx_queues; i++, que++)
- /* TODO: Queue index parameter is probably wrong */
- ixl_enable_queue(hw, que->rxr.me);
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
+ ixl_enable_queue(hw, rx_que->rxr.me);
+ } else {
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to 0x0 to enable
+ * triggering interrupts by queues.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, 0x0);
+ }
}
/*
@@ -1175,11 +1182,13 @@ ixl_if_disable_intr(if_ctx_t ctx)
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
- ixl_disable_queue(hw, rx_que->msix - 1);
+ ixl_disable_queue(hw, rx_que->rxr.me);
} else {
- // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
- // stops queues from triggering interrupts
- wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to End of List (0x7FF)
+ * to stop queues from triggering interrupts.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, IXL_QUEUE_EOL);
}
}
diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
index 978e5f25ceaf..cc0bc1f3fcd2 100644
--- a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
+++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
@@ -120,7 +120,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
switch (attrs->dir) {
case IPSEC_DIR_OUTBOUND:
- if (attrs->replay_esn.replay_window != 0)
+ if (attrs->replay_esn.trigger)
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
else
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_MODE);
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
index 6e24395b5577..c45f02cdaf42 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
@@ -1783,8 +1783,8 @@ mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
el->refcount++;
if (el->installed)
return (0);
- }
- el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
+ } else
+ el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
err = mlx5e_add_vxlan_rule_from_db(priv, el);
diff --git a/sys/dev/mpr/mpr.c b/sys/dev/mpr/mpr.c
index d1c572e40669..262d6b58b705 100644
--- a/sys/dev/mpr/mpr.c
+++ b/sys/dev/mpr/mpr.c
@@ -1729,6 +1729,7 @@ mpr_get_tunables(struct mpr_softc *sc)
sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD;
sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
sc->use_phynum = 1;
+ sc->encl_min_slots = 0;
sc->max_reqframes = MPR_REQ_FRAMES;
sc->max_prireqframes = MPR_PRI_REQ_FRAMES;
sc->max_replyframes = MPR_REPLY_FRAMES;
@@ -1748,6 +1749,7 @@ mpr_get_tunables(struct mpr_softc *sc)
TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu);
TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time);
TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum);
+ TUNABLE_INT_FETCH("hw.mpr.encl_min_slots", &sc->encl_min_slots);
TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes);
TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes);
TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes);
@@ -1797,6 +1799,10 @@ mpr_get_tunables(struct mpr_softc *sc)
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.encl_min_slots",
+ device_get_unit(sc->mpr_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->encl_min_slots);
+
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
@@ -1951,6 +1957,10 @@ mpr_setup_sysctl(struct mpr_softc *sc)
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD,
&sc->prp_page_alloc_fail, "PRP page allocation failures");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "encl_min_slots", CTLFLAG_RW, &sc->encl_min_slots, 0,
+ "force enclosure minimum slots");
}
static struct mpr_debug_string {
diff --git a/sys/dev/mpr/mpr_mapping.c b/sys/dev/mpr/mpr_mapping.c
index f9a9ac1c53d0..38aa4dfc7ef2 100644
--- a/sys/dev/mpr/mpr_mapping.c
+++ b/sys/dev/mpr/mpr_mapping.c
@@ -2785,6 +2785,8 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
* DPM, if it's being used.
*/
if (enc_idx != MPR_ENCTABLE_BAD_IDX) {
+ u16 new_num_slots;
+
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->init_complete &&
!et_entry->missing_count) {
@@ -2796,6 +2798,17 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
et_entry->enc_handle = le16toh(event_data->
EnclosureHandle);
et_entry->start_slot = le16toh(event_data->StartSlot);
+ new_num_slots = le16toh(event_data->NumSlots);
+ if (new_num_slots < sc->encl_min_slots) {
+ mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d num_slots %d, overriding with %d.\n",
+ __func__, enc_idx, new_num_slots, sc->encl_min_slots);
+ new_num_slots = sc->encl_min_slots;
+ }
+ if (et_entry->num_slots != new_num_slots) {
+ mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d old num_slots %d, new %d.\n",
+ __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots);
+ et_entry->num_slots = new_num_slots;
+ }
saved_phy_bits = et_entry->phy_bits;
et_entry->phy_bits |= le32toh(event_data->PhyBits);
if (saved_phy_bits != et_entry->phy_bits)
@@ -2858,6 +2871,11 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
et_entry->start_index = MPR_MAPTABLE_BAD_IDX;
et_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
et_entry->num_slots = le16toh(event_data->NumSlots);
+ if (et_entry->num_slots < sc->encl_min_slots) {
+ mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Enclosure %d num_slots is %d, overriding with %d.\n",
+ __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots);
+ et_entry->num_slots = sc->encl_min_slots;
+ }
et_entry->start_slot = le16toh(event_data->StartSlot);
et_entry->phy_bits = le32toh(event_data->PhyBits);
}
diff --git a/sys/dev/mpr/mprvar.h b/sys/dev/mpr/mprvar.h
index 0f1743f4266e..93f3fbffe079 100644
--- a/sys/dev/mpr/mprvar.h
+++ b/sys/dev/mpr/mprvar.h
@@ -366,6 +366,7 @@ struct mpr_softc {
int spinup_wait_time;
int use_phynum;
int dump_reqs_alltypes;
+ int encl_min_slots;
uint64_t chain_alloc_fail;
uint64_t prp_page_alloc_fail;
struct sysctl_ctx_list sysctl_ctx;
diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c
index 2570cbce525b..c885968dfe15 100644
--- a/sys/dev/mwl/if_mwl.c
+++ b/sys/dev/mwl/if_mwl.c
@@ -1797,7 +1797,7 @@ mwl_updateslot(struct ieee80211com *ic)
return;
/*
- * Calculate the ERP flags. The firwmare will use
+ * Calculate the ERP flags. The firmware will use
* this to carry out the appropriate measures.
*/
prot = 0;
@@ -4017,7 +4017,7 @@ mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
- if (ni->ni_chw != IEEE80211_STA_RX_BW_40)
+ if (ni->ni_chw != NET80211_STA_RX_BW_40)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
}
return pi;
diff --git a/sys/dev/netmap/if_ptnet.c b/sys/dev/netmap/if_ptnet.c
index bf14bfdb73ea..9c06f7fec530 100644
--- a/sys/dev/netmap/if_ptnet.c
+++ b/sys/dev/netmap/if_ptnet.c
@@ -27,8 +27,9 @@
/* Driver for ptnet paravirtualized network device. */
#include <sys/cdefs.h>
+#include "opt_inet.h"
+#include "opt_inet6.h"
-#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -75,9 +76,6 @@
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
-#include "opt_inet.h"
-#include "opt_inet6.h"
-
#include <sys/selinfo.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
diff --git a/sys/dev/nvme/nvme.c b/sys/dev/nvme/nvme.c
index 84f365024f13..ead91f0d01fe 100644
--- a/sys/dev/nvme/nvme.c
+++ b/sys/dev/nvme/nvme.c
@@ -295,7 +295,6 @@ nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
void
nvme_unregister_consumer(struct nvme_consumer *consumer)
{
-
consumer->id = INVALID_CONSUMER_ID;
}
diff --git a/sys/dev/nvme/nvme_ahci.c b/sys/dev/nvme/nvme_ahci.c
index 888207a454f7..b06661226d34 100644
--- a/sys/dev/nvme/nvme_ahci.c
+++ b/sys/dev/nvme/nvme_ahci.c
@@ -124,6 +124,5 @@ bad:
static int
nvme_ahci_detach(device_t dev)
{
-
return (nvme_detach(dev));
}
diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c
index fd7f00ced14b..fc912c1342f4 100644
--- a/sys/dev/nvme/nvme_ctrlr.c
+++ b/sys/dev/nvme/nvme_ctrlr.c
@@ -41,6 +41,9 @@
#include <sys/endian.h>
#include <sys/stdarg.h>
#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
#include "nvme_private.h"
#include "nvme_linux.h"
@@ -597,7 +600,6 @@ nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
static bool
is_log_page_id_valid(uint8_t page_id)
{
-
switch (page_id) {
case NVME_LOG_ERROR:
case NVME_LOG_HEALTH_INFORMATION:
@@ -653,7 +655,6 @@ static void
nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
uint8_t state)
{
-
if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n");
@@ -781,7 +782,6 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
static void
nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
{
-
ctrlr->int_coal_time = 0;
TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
&ctrlr->int_coal_time);
@@ -1268,6 +1268,34 @@ nvme_ctrlr_shared_handler(void *arg)
nvme_mmio_write_4(ctrlr, intmc, 1);
}
+#define NVME_MAX_PAGES (int)(1024 / sizeof(vm_page_t))
+
+static int
+nvme_user_ioctl_req(vm_offset_t addr, size_t len, bool is_read,
+ vm_page_t *upages, int max_pages, int *npagesp, struct nvme_request **req,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ vm_prot_t prot = VM_PROT_READ;
+ int err;
+
+ if (is_read)
+ prot |= VM_PROT_WRITE; /* Device will write to host memory */
+ err = vm_fault_hold_pages(&curproc->p_vmspace->vm_map,
+ addr, len, prot, upages, max_pages, npagesp);
+ if (err != 0)
+ return (err);
+ *req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
+ (*req)->payload = memdesc_vmpages(upages, len, addr & PAGE_MASK);
+ (*req)->payload_valid = true;
+ return (0);
+}
+
+static void
+nvme_user_ioctl_free(vm_page_t *pages, int npage)
+{
+ vm_page_unhold_pages(pages, npage);
+}
+
static void
nvme_pt_done(void *arg, const struct nvme_completion *cpl)
{
@@ -1290,30 +1318,28 @@ nvme_pt_done(void *arg, const struct nvme_completion *cpl)
int
nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
- struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
+ struct nvme_pt_command *pt, uint32_t nsid, int is_user,
int is_admin_cmd)
{
- struct nvme_request *req;
- struct mtx *mtx;
- struct buf *buf = NULL;
- int ret = 0;
+ struct nvme_request *req;
+ struct mtx *mtx;
+ int ret = 0;
+ int npages = 0;
+ vm_page_t upages[NVME_MAX_PAGES];
if (pt->len > 0) {
if (pt->len > ctrlr->max_xfer_size) {
- nvme_printf(ctrlr, "pt->len (%d) "
- "exceeds max_xfer_size (%d)\n", pt->len,
- ctrlr->max_xfer_size);
- return EIO;
+ nvme_printf(ctrlr,
+ "len (%d) exceeds max_xfer_size (%d)\n",
+ pt->len, ctrlr->max_xfer_size);
+ return (EIO);
}
- if (is_user_buffer) {
- buf = uma_zalloc(pbuf_zone, M_WAITOK);
- buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
- if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
- ret = EFAULT;
- goto err;
- }
- req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
- M_WAITOK, nvme_pt_done, pt);
+ if (is_user) {
+ ret = nvme_user_ioctl_req((vm_offset_t)pt->buf, pt->len,
+ pt->is_read, upages, nitems(upages), &npages, &req,
+ nvme_pt_done, pt);
+ if (ret != 0)
+ return (ret);
} else
req = nvme_allocate_request_vaddr(pt->buf, pt->len,
M_WAITOK, nvme_pt_done, pt);
@@ -1347,11 +1373,8 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
mtx_unlock(mtx);
- if (buf != NULL) {
- vunmapbuf(buf);
-err:
- uma_zfree(pbuf_zone, buf);
- }
+ if (npages > 0)
+ nvme_user_ioctl_free(upages, npages);
return (ret);
}
@@ -1377,8 +1400,9 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
{
struct nvme_request *req;
struct mtx *mtx;
- struct buf *buf = NULL;
int ret = 0;
+ int npages = 0;
+ vm_page_t upages[NVME_MAX_PAGES];
/*
* We don't support metadata.
@@ -1389,7 +1413,7 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
if (npc->data_len > 0 && npc->addr != 0) {
if (npc->data_len > ctrlr->max_xfer_size) {
nvme_printf(ctrlr,
- "npc->data_len (%d) exceeds max_xfer_size (%d)\n",
+ "data_len (%d) exceeds max_xfer_size (%d)\n",
npc->data_len, ctrlr->max_xfer_size);
return (EIO);
}
@@ -1402,15 +1426,11 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
if ((npc->opcode & 0x3) == 3)
return (EINVAL);
if (is_user) {
- buf = uma_zalloc(pbuf_zone, M_WAITOK);
- buf->b_iocmd = npc->opcode & 1 ? BIO_WRITE : BIO_READ;
- if (vmapbuf(buf, (void *)(uintptr_t)npc->addr,
- npc->data_len, 1) < 0) {
- ret = EFAULT;
- goto err;
- }
- req = nvme_allocate_request_vaddr(buf->b_data,
- npc->data_len, M_WAITOK, nvme_npc_done, npc);
+ ret = nvme_user_ioctl_req(npc->addr, npc->data_len,
+ npc->opcode & 0x1, upages, nitems(upages), &npages,
+ &req, nvme_npc_done, npc);
+ if (ret != 0)
+ return (ret);
} else
req = nvme_allocate_request_vaddr(
(void *)(uintptr_t)npc->addr, npc->data_len,
@@ -1420,8 +1440,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
req->cmd.opc = npc->opcode;
req->cmd.fuse = npc->flags;
- req->cmd.rsvd2 = htole16(npc->cdw2);
- req->cmd.rsvd3 = htole16(npc->cdw3);
+ req->cmd.rsvd2 = htole32(npc->cdw2);
+ req->cmd.rsvd3 = htole32(npc->cdw3);
req->cmd.cdw10 = htole32(npc->cdw10);
req->cmd.cdw11 = htole32(npc->cdw11);
req->cmd.cdw12 = htole32(npc->cdw12);
@@ -1445,11 +1465,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
mtx_sleep(npc, mtx, PRIBIO, "nvme_npc", 0);
mtx_unlock(mtx);
- if (buf != NULL) {
- vunmapbuf(buf);
-err:
- uma_zfree(pbuf_zone, buf);
- }
+ if (npages > 0)
+ nvme_user_ioctl_free(upages, npages);
return (ret);
}
@@ -1776,7 +1793,6 @@ void
nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
struct nvme_request *req)
{
-
nvme_qpair_submit_request(&ctrlr->adminq, req);
}
@@ -1793,14 +1809,12 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
device_t
nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
{
-
return (ctrlr->dev);
}
const struct nvme_controller_data *
nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
{
-
return (&ctrlr->cdata);
}
@@ -1853,7 +1867,6 @@ nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
int
nvme_ctrlr_resume(struct nvme_controller *ctrlr)
{
-
/*
* Can't touch failed controllers, so nothing to do to resume.
*/
diff --git a/sys/dev/nvme/nvme_ctrlr_cmd.c b/sys/dev/nvme/nvme_ctrlr_cmd.c
index 993a7718356d..5a44ed425acb 100644
--- a/sys/dev/nvme/nvme_ctrlr_cmd.c
+++ b/sys/dev/nvme/nvme_ctrlr_cmd.c
@@ -281,7 +281,6 @@ nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
struct nvme_error_information_entry *payload, uint32_t num_entries,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
/* Controller's error log page entries is 0-based. */
@@ -302,7 +301,6 @@ nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
uint32_t nsid, struct nvme_health_information_page *payload,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
nsid, payload, sizeof(*payload), cb_fn, cb_arg);
}
@@ -311,7 +309,6 @@ void
nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
cb_arg);
diff --git a/sys/dev/nvme/nvme_ns.c b/sys/dev/nvme/nvme_ns.c
index 3f29382fe42f..e84d2066930e 100644
--- a/sys/dev/nvme/nvme_ns.c
+++ b/sys/dev/nvme/nvme_ns.c
@@ -129,7 +129,6 @@ static int
nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
struct thread *td)
{
-
return (0);
}
@@ -231,7 +230,6 @@ nvme_ns_get_model_number(struct nvme_namespace *ns)
const struct nvme_namespace_data *
nvme_ns_get_data(struct nvme_namespace *ns)
{
-
return (&ns->data);
}
@@ -631,7 +629,6 @@ nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
void
nvme_ns_destruct(struct nvme_namespace *ns)
{
-
if (ns->cdev != NULL) {
if (ns->cdev->si_drv2 != NULL)
destroy_dev(ns->cdev->si_drv2);
diff --git a/sys/dev/nvme/nvme_pci.c b/sys/dev/nvme/nvme_pci.c
index 29b49b7df403..c07a68d2f0dc 100644
--- a/sys/dev/nvme/nvme_pci.c
+++ b/sys/dev/nvme/nvme_pci.c
@@ -151,7 +151,6 @@ nvme_pci_probe (device_t device)
static int
nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
{
-
ctrlr->resource_id = PCIR_BAR(0);
ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h
index 36f00fedc48e..52f9e12f8f9a 100644
--- a/sys/dev/nvme/nvme_private.h
+++ b/sys/dev/nvme/nvme_private.h
@@ -459,8 +459,7 @@ int nvme_detach(device_t dev);
* vast majority of these without waiting for a tick plus scheduling delays. Since
* these are on startup, this drastically reduces startup time.
*/
-static __inline
-void
+static __inline void
nvme_completion_poll(struct nvme_completion_poll_status *status)
{
int timeout = ticks + 10 * hz;
diff --git a/sys/dev/nvme/nvme_qpair.c b/sys/dev/nvme/nvme_qpair.c
index bd8626e32209..4f2c44da3b4f 100644
--- a/sys/dev/nvme/nvme_qpair.c
+++ b/sys/dev/nvme/nvme_qpair.c
@@ -793,7 +793,6 @@ nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
void
nvme_io_qpair_destroy(struct nvme_qpair *qpair)
{
-
nvme_qpair_destroy(qpair);
}
@@ -1202,7 +1201,6 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
void
nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
{
-
mtx_lock(&qpair->lock);
_nvme_qpair_submit_request(qpair, req);
mtx_unlock(&qpair->lock);
@@ -1226,7 +1224,6 @@ nvme_qpair_enable(struct nvme_qpair *qpair)
void
nvme_qpair_reset(struct nvme_qpair *qpair)
{
-
qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
/*
diff --git a/sys/dev/nvme/nvme_sim.c b/sys/dev/nvme/nvme_sim.c
index 4974bb718222..a06774a64761 100644
--- a/sys/dev/nvme/nvme_sim.c
+++ b/sys/dev/nvme/nvme_sim.c
@@ -301,7 +301,6 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
static void
nvme_sim_poll(struct cam_sim *sim)
{
-
nvme_ctrlr_poll(sim2ctrlr(sim));
}
diff --git a/sys/dev/nvme/nvme_sysctl.c b/sys/dev/nvme/nvme_sysctl.c
index a5a44721f9f9..50d19e730a16 100644
--- a/sys/dev/nvme/nvme_sysctl.c
+++ b/sys/dev/nvme/nvme_sysctl.c
@@ -153,7 +153,6 @@ nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
static void
nvme_qpair_reset_stats(struct nvme_qpair *qpair)
{
-
/*
* Reset the values. Due to sanity checks in
* nvme_qpair_process_completions, we reset the number of interrupt
diff --git a/sys/dev/nvme/nvme_util.c b/sys/dev/nvme/nvme_util.c
index 0a07653a7378..cb0ba729ac96 100644
--- a/sys/dev/nvme/nvme_util.c
+++ b/sys/dev/nvme/nvme_util.c
@@ -208,31 +208,33 @@ nvme_opcode_sbuf(bool admin, uint8_t opc, struct sbuf *sb)
if (s == NULL)
sbuf_printf(sb, "%s (%02x)", type, opc);
else
- sbuf_printf(sb, "%s", s);
+ sbuf_printf(sb, "%s (%02x)", s, opc);
}
void
nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb)
{
const char *s, *type;
- uint16_t status;
+ uint16_t status, sc, sct;
status = le16toh(cpl->status);
- switch (NVME_STATUS_GET_SCT(status)) {
+ sc = NVME_STATUS_GET_SC(status);
+ sct = NVME_STATUS_GET_SCT(status);
+ switch (sct) {
case NVME_SCT_GENERIC:
- s = generic_status[NVME_STATUS_GET_SC(status)];
+ s = generic_status[sc];
type = "GENERIC";
break;
case NVME_SCT_COMMAND_SPECIFIC:
- s = command_specific_status[NVME_STATUS_GET_SC(status)];
+ s = command_specific_status[sc];
type = "COMMAND SPECIFIC";
break;
case NVME_SCT_MEDIA_ERROR:
- s = media_error_status[NVME_STATUS_GET_SC(status)];
+ s = media_error_status[sc];
type = "MEDIA ERROR";
break;
case NVME_SCT_PATH_RELATED:
- s = path_related_status[NVME_STATUS_GET_SC(status)];
+ s = path_related_status[sc];
type = "PATH RELATED";
break;
case NVME_SCT_VENDOR_SPECIFIC:
@@ -246,12 +248,11 @@ nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb)
}
if (type == NULL)
- sbuf_printf(sb, "RESERVED (%02x/%02x)",
- NVME_STATUS_GET_SCT(status), NVME_STATUS_GET_SC(status));
+ sbuf_printf(sb, "RESERVED (%02x/%02x)", sct, sc);
else if (s == NULL)
- sbuf_printf(sb, "%s (%02x)", type, NVME_STATUS_GET_SC(status));
+ sbuf_printf(sb, "%s (%02x/%02x)", type, sct, sc);
else
- sbuf_printf(sb, "%s", s);
+ sbuf_printf(sb, "%s (%02x/%02x)", s, sct, sc);
}
void
diff --git a/sys/dev/nvmf/nvmf_tcp.c b/sys/dev/nvmf/nvmf_tcp.c
index 6ad5229f6043..e50d7ff48d2b 100644
--- a/sys/dev/nvmf/nvmf_tcp.c
+++ b/sys/dev/nvmf/nvmf_tcp.c
@@ -970,7 +970,7 @@ nvmf_tcp_handle_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu)
}
/*
- * XXX: The spec does not specify how to handle R2T tranfers
+ * XXX: The spec does not specify how to handle R2T transfers
* out of range of the original command.
*/
data_len = le32toh(r2t->r2tl);
diff --git a/sys/dev/pci/pci_user.c b/sys/dev/pci/pci_user.c
index f68b5b7e71ff..9768030995e7 100644
--- a/sys/dev/pci/pci_user.c
+++ b/sys/dev/pci/pci_user.c
@@ -79,6 +79,9 @@ struct pci_conf32 {
u_int8_t pc_revid; /* chip revision ID */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_int32_t pd_unit; /* device unit number */
+ int pd_numa_domain; /* device NUMA domain */
+ u_int32_t pc_reported_len;/* length of PCI data reported */
+ char pc_spare[64]; /* space for future fields */
};
struct pci_match_conf32 {
@@ -502,11 +505,58 @@ pci_conf_match_freebsd6_32(struct pci_match_conf_freebsd6_32 *matches, int num_m
#endif /* COMPAT_FREEBSD32 */
#endif /* !PRE7_COMPAT */
+#ifdef COMPAT_FREEBSD14
+struct pci_conf_freebsd14 {
+ struct pcisel pc_sel; /* domain+bus+slot+function */
+ u_int8_t pc_hdr; /* PCI header type */
+ u_int16_t pc_subvendor; /* card vendor ID */
+ u_int16_t pc_subdevice; /* card device ID, assigned by
+ card vendor */
+ u_int16_t pc_vendor; /* chip vendor ID */
+ u_int16_t pc_device; /* chip device ID, assigned by
+ chip vendor */
+ u_int8_t pc_class; /* chip PCI class */
+ u_int8_t pc_subclass; /* chip PCI subclass */
+ u_int8_t pc_progif; /* chip PCI programming interface */
+ u_int8_t pc_revid; /* chip revision ID */
+ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
+ u_long pd_unit; /* device unit number */
+};
+#define PCIOCGETCONF_FREEBSD14 _IOWR('p', 5, struct pci_conf_io)
+
+#ifdef COMPAT_FREEBSD32
+struct pci_conf_freebsd14_32 {
+ struct pcisel pc_sel; /* domain+bus+slot+function */
+ u_int8_t pc_hdr; /* PCI header type */
+ u_int16_t pc_subvendor; /* card vendor ID */
+ u_int16_t pc_subdevice; /* card device ID, assigned by
+ card vendor */
+ u_int16_t pc_vendor; /* chip vendor ID */
+ u_int16_t pc_device; /* chip device ID, assigned by
+ chip vendor */
+ u_int8_t pc_class; /* chip PCI class */
+ u_int8_t pc_subclass; /* chip PCI subclass */
+ u_int8_t pc_progif; /* chip PCI programming interface */
+ u_int8_t pc_revid; /* chip revision ID */
+ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
+ u_int32_t pd_unit; /* device unit number */
+};
+#define PCIOCGETCONF_FREEBSD14_32 \
+ _IOC_NEWTYPE(PCIOCGETCONF_FREEBSD14, struct pci_conf_io32)
+#endif /* COMPAT_FREEBSD32 */
+#endif /* COMPAT_FREEBSD14 */
+
union pci_conf_union {
struct pci_conf pc;
#ifdef COMPAT_FREEBSD32
struct pci_conf32 pc32;
#endif
+#ifdef COMPAT_FREEBSD14
+ struct pci_conf_freebsd14 pc14;
+#ifdef COMPAT_FREEBSD32
+ struct pci_conf_freebsd14_32 pc14_32;
+#endif
+#endif
#ifdef PRE7_COMPAT
struct pci_conf_freebsd6 pco;
#ifdef COMPAT_FREEBSD32
@@ -522,10 +572,16 @@ pci_conf_match(u_long cmd, struct pci_match_conf *matches, int num_matches,
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
return (pci_conf_match_native(
(struct pci_match_conf *)matches, num_matches, match_buf));
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
return (pci_conf_match32((struct pci_match_conf32 *)matches,
num_matches, match_buf));
#endif
@@ -645,9 +701,15 @@ pci_match_conf_size(u_long cmd)
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
return (sizeof(struct pci_match_conf));
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
return (sizeof(struct pci_match_conf32));
#endif
#ifdef PRE7_COMPAT
@@ -675,6 +737,14 @@ pci_conf_size(u_long cmd)
case PCIOCGETCONF32:
return (sizeof(struct pci_conf32));
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+ return (sizeof(struct pci_conf_freebsd14));
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+ return (sizeof(struct pci_conf_freebsd14_32));
+#endif
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
return (sizeof(struct pci_conf_freebsd6));
@@ -698,6 +768,9 @@ pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd)
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#endif
@@ -706,6 +779,9 @@ pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd)
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6_32:
#endif
@@ -739,6 +815,9 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data,
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#endif
@@ -751,6 +830,9 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data,
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6_32:
#endif
@@ -781,8 +863,17 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup,
pcup->pc = *pcp;
return;
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+ memcpy(&pcup->pc14, pcp, sizeof(pcup->pc14));
+ return;
+#endif
+
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
pcup->pc32.pc_sel = pcp->pc_sel;
pcup->pc32.pc_hdr = pcp->pc_hdr;
pcup->pc32.pc_subvendor = pcp->pc_subvendor;
@@ -796,8 +887,13 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup,
strlcpy(pcup->pc32.pd_name, pcp->pd_name,
sizeof(pcup->pc32.pd_name));
pcup->pc32.pd_unit = (uint32_t)pcp->pd_unit;
+ if (cmd == PCIOCGETCONF32) {
+ pcup->pc32.pd_numa_domain = pcp->pd_numa_domain;
+ pcup->pc32.pc_reported_len =
+ (uint32_t)offsetof(struct pci_conf32, pc_spare);
+ }
return;
-#endif
+#endif /* COMPAT_FREEBSD32 */
#ifdef PRE7_COMPAT
#ifdef COMPAT_FREEBSD32
@@ -1024,7 +1120,7 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
struct pci_map *pm;
struct pci_bar_mmap *pbm;
size_t confsz, iolen;
- int error, ionum, i, num_patterns;
+ int domain, error, ionum, i, num_patterns;
union pci_conf_union pcu;
#ifdef PRE7_COMPAT
struct pci_io iodata;
@@ -1044,6 +1140,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#ifdef COMPAT_FREEBSD32
@@ -1069,6 +1171,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#ifdef COMPAT_FREEBSD32
@@ -1201,6 +1309,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
dinfo->conf.pd_unit = 0;
}
+ if (dinfo->cfg.dev != NULL &&
+ bus_get_domain(dinfo->cfg.dev, &domain) == 0)
+ dinfo->conf.pd_numa_domain = domain;
+ else
+ dinfo->conf.pd_numa_domain = 0;
+
if (pattern_buf == NULL ||
pci_conf_match(cmd, pattern_buf, num_patterns,
&dinfo->conf) == 0) {
@@ -1217,6 +1331,9 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
break;
}
+ dinfo->conf.pc_reported_len =
+ offsetof(struct pci_conf, pc_spare);
+
pci_conf_for_copyout(&dinfo->conf, &pcu, cmd);
error = copyout(&pcu,
(caddr_t)cio->matches +
diff --git a/sys/dev/psci/smccc_trng.c b/sys/dev/psci/smccc_trng.c
index ab98837d3841..8a2e5508ef48 100644
--- a/sys/dev/psci/smccc_trng.c
+++ b/sys/dev/psci/smccc_trng.c
@@ -58,7 +58,7 @@ static device_attach_t trng_attach;
static unsigned trng_read(void *, unsigned);
-static struct random_source random_trng = {
+static const struct random_source random_trng = {
.rs_ident = "Arm SMCCC TRNG",
.rs_source = RANDOM_PURE_ARM_TRNG,
.rs_read = trng_read,
diff --git a/sys/dev/puc/pucdata.c b/sys/dev/puc/pucdata.c
index e911a407cca9..436af76001da 100644
--- a/sys/dev/puc/pucdata.c
+++ b/sys/dev/puc/pucdata.c
@@ -64,6 +64,7 @@ static puc_config_f puc_config_quatech;
static puc_config_f puc_config_syba;
static puc_config_f puc_config_siig;
static puc_config_f puc_config_sunix;
+static puc_config_f puc_config_systembase;
static puc_config_f puc_config_timedia;
static puc_config_f puc_config_titan;
@@ -1705,6 +1706,23 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_4S, 0x10, 0, 8,
.config_function = puc_config_icbook
},
+
+ /*
+ * Systembase cards using SB16C1050 UARTs:
+ */
+ { 0x14a1, 0x0008, 0x14a1, 0x0008,
+ "Systembase SB16C1058",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_8S, 0x10, 0, 8,
+ .config_function = puc_config_systembase,
+ },
+ { 0x14a1, 0x0004, 0x14a1, 0x0004,
+ "Systembase SB16C1054",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_4S, 0x10, 0, 8,
+ .config_function = puc_config_systembase,
+ },
+
{ 0xffff, 0, 0xffff, 0, NULL, 0 }
};
@@ -2294,3 +2312,28 @@ puc_config_titan(struct puc_softc *sc __unused, enum puc_cfg_cmd cmd,
}
return (ENXIO);
}
+
+static int
+puc_config_systembase(struct puc_softc *sc __unused,
+ enum puc_cfg_cmd cmd, int port, intptr_t *res)
+{
+ struct puc_bar *bar;
+
+ switch (cmd) {
+ case PUC_CFG_SETUP:
+ bar = puc_get_bar(sc, 0x14);
+ if (bar == NULL)
+ return (ENXIO);
+
+ /*
+ * The Systembase SB16C1058 (and probably other devices
+ * based on the SB16C1050 UART core) require poking a
+ * register in the *other* RID to turn on interrupts.
+ */
+ bus_write_1(bar->b_res, /* OPT_IMRREG0 */ 0xc, 0xff);
+ return (0);
+ default:
+ break;
+ }
+ return (ENXIO);
+}
diff --git a/sys/dev/qat/qat_common/adf_gen4_timer.c b/sys/dev/qat/qat_common/adf_gen4_timer.c
index 96b65cdff181..2c74d09418e5 100644
--- a/sys/dev/qat/qat_common/adf_gen4_timer.c
+++ b/sys/dev/qat/qat_common/adf_gen4_timer.c
@@ -57,7 +57,7 @@ end:
static void
timer_handler(struct timer_list *tl)
{
- struct adf_int_timer *int_timer = from_timer(int_timer, tl, timer);
+ struct adf_int_timer *int_timer = timer_container_of(int_timer, tl, timer);
struct adf_accel_dev *accel_dev = int_timer->accel_dev;
struct adf_hb_timer_data *hb_timer_data = NULL;
u64 timeout_val = adf_get_next_timeout(int_timer->timeout_val);
diff --git a/sys/dev/qcom_rnd/qcom_rnd.c b/sys/dev/qcom_rnd/qcom_rnd.c
index fdd0b553523e..a5ece7e00f28 100644
--- a/sys/dev/qcom_rnd/qcom_rnd.c
+++ b/sys/dev/qcom_rnd/qcom_rnd.c
@@ -63,7 +63,7 @@ static int qcom_rnd_detach(device_t);
static int qcom_rnd_harvest(struct qcom_rnd_softc *, void *, size_t *);
static unsigned qcom_rnd_read(void *, unsigned);
-static struct random_source random_qcom_rnd = {
+static const struct random_source random_qcom_rnd = {
.rs_ident = "Qualcomm Entropy Adapter",
.rs_source = RANDOM_PURE_QUALCOMM,
.rs_read = qcom_rnd_read,
diff --git a/sys/dev/random/armv8rng.c b/sys/dev/random/armv8rng.c
index 61698bfff820..524d80317681 100644
--- a/sys/dev/random/armv8rng.c
+++ b/sys/dev/random/armv8rng.c
@@ -44,7 +44,7 @@
static u_int random_rndr_read(void *, u_int);
static bool has_rndr;
-static struct random_source random_armv8_rndr = {
+static const struct random_source random_armv8_rndr = {
.rs_ident = "Armv8 rndr RNG",
.rs_source = RANDOM_PURE_ARMV8,
.rs_read = random_rndr_read,
diff --git a/sys/dev/random/darn.c b/sys/dev/random/darn.c
index a66754e095fb..9bb4991df82f 100644
--- a/sys/dev/random/darn.c
+++ b/sys/dev/random/darn.c
@@ -56,7 +56,7 @@
static u_int random_darn_read(void *, u_int);
-static struct random_source random_darn = {
+static const struct random_source random_darn = {
.rs_ident = "PowerISA DARN random number generator",
.rs_source = RANDOM_PURE_DARN,
.rs_read = random_darn_read
diff --git a/sys/dev/random/ivy.c b/sys/dev/random/ivy.c
index 05474d977276..fa1e4831f1b9 100644
--- a/sys/dev/random/ivy.c
+++ b/sys/dev/random/ivy.c
@@ -51,7 +51,7 @@
static bool has_rdrand, has_rdseed;
static u_int random_ivy_read(void *, u_int);
-static struct random_source random_ivy = {
+static const struct random_source random_ivy = {
.rs_ident = "Intel Secure Key RNG",
.rs_source = RANDOM_PURE_RDRAND,
.rs_read = random_ivy_read
diff --git a/sys/dev/random/nehemiah.c b/sys/dev/random/nehemiah.c
index f76071290b8f..56f144169dae 100644
--- a/sys/dev/random/nehemiah.c
+++ b/sys/dev/random/nehemiah.c
@@ -44,7 +44,7 @@
static u_int random_nehemiah_read(void *, u_int);
-static struct random_source random_nehemiah = {
+static const struct random_source random_nehemiah = {
.rs_ident = "VIA Nehemiah Padlock RNG",
.rs_source = RANDOM_PURE_NEHEMIAH,
.rs_read = random_nehemiah_read
diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c
index c7762967c4fb..84ec174bd08e 100644
--- a/sys/dev/random/random_harvestq.c
+++ b/sys/dev/random/random_harvestq.c
@@ -110,7 +110,7 @@ __read_frequently u_int hc_source_mask;
struct random_sources {
CK_LIST_ENTRY(random_sources) rrs_entries;
- struct random_source *rrs_source;
+ const struct random_source *rrs_source;
};
static CK_LIST_HEAD(sources_head, random_sources) source_list =
@@ -493,9 +493,9 @@ random_healthtest_init(enum random_entropy_source source)
* The RCT limit comes from the formula in section 4.4.1.
*
* The APT cutoff is calculated using the formula in section 4.4.2
- * footnote 10 with the window size changed from 512 to 511, since the
- * test as written counts the number of samples equal to the first
- * sample in the window, and thus tests W-1 samples.
+ * footnote 10 with the number of Bernoulli trials changed from W to
+ * W-1, since the test as written counts the number of samples equal to
+ * the first sample in the window, and thus tests W-1 samples.
*/
ht->ht_rct_limit = 35;
ht->ht_apt_cutoff = 330;
@@ -849,7 +849,7 @@ random_harvest_deregister_source(enum random_entropy_source source)
}
void
-random_source_register(struct random_source *rsource)
+random_source_register(const struct random_source *rsource)
{
struct random_sources *rrs;
@@ -868,7 +868,7 @@ random_source_register(struct random_source *rsource)
}
void
-random_source_deregister(struct random_source *rsource)
+random_source_deregister(const struct random_source *rsource)
{
struct random_sources *rrs = NULL;
diff --git a/sys/dev/random/randomdev.h b/sys/dev/random/randomdev.h
index e1c9ac7b680d..6d742447ea8b 100644
--- a/sys/dev/random/randomdev.h
+++ b/sys/dev/random/randomdev.h
@@ -103,8 +103,8 @@ struct random_source {
random_source_read_t *rs_read;
};
-void random_source_register(struct random_source *);
-void random_source_deregister(struct random_source *);
+void random_source_register(const struct random_source *);
+void random_source_deregister(const struct random_source *);
#endif /* _KERNEL */
diff --git a/sys/dev/rtwn/if_rtwn.c b/sys/dev/rtwn/if_rtwn.c
index 7a547e13cafa..25287f222270 100644
--- a/sys/dev/rtwn/if_rtwn.c
+++ b/sys/dev/rtwn/if_rtwn.c
@@ -268,6 +268,9 @@ rtwn_attach(struct rtwn_softc *sc)
ic->ic_flags_ext |= IEEE80211_FEXT_WATCHDOG;
#endif
+ /* Enable seqno offload */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/* Adjust capabilities. */
rtwn_adj_devcaps(sc);
diff --git a/sys/dev/rtwn/if_rtwn_tx.c b/sys/dev/rtwn/if_rtwn_tx.c
index 2c9c246dfbb4..fa7f35f2de83 100644
--- a/sys/dev/rtwn/if_rtwn_tx.c
+++ b/sys/dev/rtwn/if_rtwn_tx.c
@@ -183,6 +183,10 @@ rtwn_tx_data(struct rtwn_softc *sc, struct ieee80211_node *ni,
}
}
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
cipher = IEEE80211_CIPHER_NONE;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
@@ -229,6 +233,10 @@ rtwn_tx_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
uint8_t type;
u_int cipher;
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
cipher = IEEE80211_CIPHER_NONE;
if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
diff --git a/sys/dev/rtwn/rtl8192c/r92c_tx.c b/sys/dev/rtwn/rtl8192c/r92c_tx.c
index 6b013de0c536..ba2f60bd9295 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_tx.c
+++ b/sys/dev/rtwn/rtl8192c/r92c_tx.c
@@ -452,11 +452,10 @@ r92c_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
} else {
uint16_t seqno;
- if (m->m_flags & M_AMPDU_MPDU) {
- seqno = ni->ni_txseqs[tid] % IEEE80211_SEQ_RANGE;
- ni->ni_txseqs[tid]++;
- } else
- seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE;
+ if (m->m_flags & M_AMPDU_MPDU)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
+ seqno = M_SEQNO_GET(m);
/* Set sequence number. */
txd->txdseq = htole16(seqno);
@@ -511,7 +510,7 @@ r92c_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
rtwn_r92c_tx_setup_hwseq(sc, txd);
} else {
/* Set sequence number. */
- txd->txdseq |= htole16(M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE);
+ txd->txdseq |= htole16(M_SEQNO_GET(m));
}
}
diff --git a/sys/dev/rtwn/rtl8812a/r12a_tx.c b/sys/dev/rtwn/rtl8812a/r12a_tx.c
index acb238316559..6a7af0a9b674 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_tx.c
+++ b/sys/dev/rtwn/rtl8812a/r12a_tx.c
@@ -101,12 +101,12 @@ r12a_tx_set_vht_bw(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
prim_chan = r12a_get_primary_channel(sc, ni->ni_chan);
- if (ieee80211_vht_check_tx_bw(ni, IEEE80211_STA_RX_BW_80)) {
+ if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_80)) {
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW,
R12A_TXDW5_DATA_BW80));
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN,
prim_chan));
- } else if (ieee80211_vht_check_tx_bw(ni, IEEE80211_STA_RX_BW_40)) {
+ } else if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_40)) {
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW,
R12A_TXDW5_DATA_BW40));
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN,
@@ -433,12 +433,9 @@ r12a_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
} else {
uint16_t seqno;
- if (m->m_flags & M_AMPDU_MPDU) {
- seqno = ni->ni_txseqs[tid];
- ni->ni_txseqs[tid]++;
- } else
- seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE;
-
+ if (m->m_flags & M_AMPDU_MPDU)
+ ieee80211_output_seqno_assign(ni, -1, m);
+ seqno = M_SEQNO_GET(m);
/* Set sequence number. */
txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, seqno));
}
@@ -493,8 +490,7 @@ r12a_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, uvp->id));
} else {
/* Set sequence number. */
- txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ,
- M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE));
+ txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, M_SEQNO_GET(m)));
}
}
diff --git a/sys/dev/sound/pci/hda/hdac.c b/sys/dev/sound/pci/hda/hdac.c
index 900578b73de4..90cd74d28b3d 100644
--- a/sys/dev/sound/pci/hda/hdac.c
+++ b/sys/dev/sound/pci/hda/hdac.c
@@ -1773,17 +1773,17 @@ hdac_detach(device_t dev)
struct hdac_softc *sc = device_get_softc(dev);
int i, error;
+ callout_drain(&sc->poll_callout);
+ hdac_irq_free(sc);
+ taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
+
error = bus_generic_detach(dev);
if (error != 0)
return (error);
hdac_lock(sc);
- callout_stop(&sc->poll_callout);
hdac_reset(sc, false);
hdac_unlock(sc);
- callout_drain(&sc->poll_callout);
- taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
- hdac_irq_free(sc);
for (i = 0; i < sc->num_ss; i++)
hdac_dma_free(sc, &sc->streams[i].bdl);
@@ -2206,4 +2206,4 @@ static driver_t hdac_driver = {
sizeof(struct hdac_softc),
};
-DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL);
+DRIVER_MODULE_ORDERED(snd_hda, pci, hdac_driver, NULL, NULL, SI_ORDER_ANY);
diff --git a/sys/dev/tpm/tpm_tis_core.c b/sys/dev/tpm/tpm_tis_core.c
index d8421f8156c9..4159de4daf3b 100644
--- a/sys/dev/tpm/tpm_tis_core.c
+++ b/sys/dev/tpm/tpm_tis_core.c
@@ -97,6 +97,7 @@ tpmtis_attach(device_t dev)
{
struct tpm_sc *sc;
int result;
+ int poll = 0;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -105,6 +106,12 @@ tpmtis_attach(device_t dev)
sx_init(&sc->dev_lock, "TPM driver lock");
sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK);
+ resource_int_value("tpm", device_get_unit(dev), "use_polling", &poll);
+ if (poll != 0) {
+ device_printf(dev, "Using poll method to get TPM operation status \n");
+ goto skip_irq;
+ }
+
sc->irq_rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
RF_ACTIVE | RF_SHAREABLE);
diff --git a/sys/dev/uart/uart_bus_pci.c b/sys/dev/uart/uart_bus_pci.c
index 14ac213066b8..22af8ee8663c 100644
--- a/sys/dev/uart/uart_bus_pci.c
+++ b/sys/dev/uart/uart_bus_pci.c
@@ -141,6 +141,8 @@ static const struct pci_id pci_ns8250_ids[] = {
0x10, 16384000 },
{ 0x1415, 0xc120, 0xffff, 0, "Oxford Semiconductor OXPCIe952 PCIe 16950 UART",
0x10 },
+{ 0x14a1, 0x0008, 0x14a1, 0x0008, "Systembase SB16C1058",
+ 0x10, 8 * DEFAULT_RCLK, },
{ 0x14e4, 0x160a, 0xffff, 0, "Broadcom TruManage UART", 0x10,
128 * DEFAULT_RCLK, 2},
{ 0x14e4, 0x4344, 0xffff, 0, "Sony Ericsson GC89 PC Card", 0x10},
diff --git a/sys/dev/uart/uart_dev_ns8250.c b/sys/dev/uart/uart_dev_ns8250.c
index 56db0b26487b..c38d50e54ad8 100644
--- a/sys/dev/uart/uart_dev_ns8250.c
+++ b/sys/dev/uart/uart_dev_ns8250.c
@@ -495,6 +495,7 @@ UART_CLASS(uart_ns8250_class);
static struct acpi_spcr_compat_data acpi_spcr_compat_data[] = {
{ &uart_ns8250_class, ACPI_DBG2_16550_COMPATIBLE },
{ &uart_ns8250_class, ACPI_DBG2_16550_SUBSET },
+ { &uart_ns8250_class, ACPI_DBG2_16550_WITH_GAS },
{ NULL, 0 },
};
UART_ACPI_SPCR_CLASS(acpi_spcr_compat_data);
diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h
index 9f0faaadeb57..b055d2d2d769 100644
--- a/sys/dev/ufshci/ufshci.h
+++ b/sys/dev/ufshci/ufshci.h
@@ -160,19 +160,19 @@ enum ufshci_data_direction {
UFSHCI_DATA_DIRECTION_RESERVED = 0b11,
};
-enum ufshci_overall_command_status {
- UFSHCI_OCS_SUCCESS = 0x0,
- UFSHCI_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
- UFSHCI_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
- UFSHCI_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
- UFSHCI_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
- UFSHCI_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
- UFSHCI_OCS_ABORTED = 0x06,
- UFSHCI_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
- UFSHCI_OCS_DEVICE_FATAL_ERROR = 0x08,
- UFSHCI_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
- UFSHCI_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
- UFSHCI_OCS_INVALID = 0xF,
+enum ufshci_utr_overall_command_status {
+ UFSHCI_UTR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTR_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
+ UFSHCI_UTR_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
+ UFSHCI_UTR_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
+ UFSHCI_UTR_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
+ UFSHCI_UTR_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
+ UFSHCI_UTR_OCS_ABORTED = 0x06,
+ UFSHCI_UTR_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
+ UFSHCI_UTR_OCS_DEVICE_FATAL_ERROR = 0x08,
+ UFSHCI_UTR_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
+ UFSHCI_UTR_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
+ UFSHCI_UTR_OCS_INVALID = 0xF,
};
struct ufshci_utp_xfer_req_desc {
@@ -271,6 +271,18 @@ _Static_assert(sizeof(struct ufshci_utp_cmd_desc) ==
#define UFSHCI_UTP_TASK_MGMT_REQ_SIZE 32
#define UFSHCI_UTP_TASK_MGMT_RESP_SIZE 32
+enum ufshci_utmr_overall_command_status {
+ UFSHCI_UTMR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTMR_OCS_INVALID_TASK_MANAGEMENT_FUNCTION_ATTRIBUTES = 0x01,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_REQUEST_SIZE = 0x02,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_RESPONSE_SIZE = 0x03,
+ UFSHCI_UTMR_OCS_PEER_COMMUNICATION_FAILURE = 0x04,
+ UFSHCI_UTMR_OCS_ABORTED = 0x05,
+ UFSHCI_UTMR_OCS_FATAL_ERROR = 0x06,
+ UFSHCI_UTMR_OCS_DEVICE_FATAL_ERROR = 0x07,
+ UFSHCI_UTMR_OCS_INVALID = 0xF,
+};
+
/* UFSHCI spec 4.1, section 6.3.1 "UTP Task Management Request Descriptor" */
struct ufshci_utp_task_mgmt_req_desc {
/* dword 0 */
@@ -356,6 +368,7 @@ struct ufshci_upiu {
_Static_assert(sizeof(struct ufshci_upiu) == 512,
"ufshci_upiu must be 512 bytes");
+/* UFS Spec 4.1, section 10.7.1 "COMMAND UPIU" */
struct ufshci_cmd_command_upiu {
/* dword 0-2 */
struct ufshci_upiu_header header;
@@ -376,6 +389,7 @@ _Static_assert(sizeof(struct ufshci_cmd_command_upiu) % UFSHCI_UPIU_ALIGNMENT ==
0,
"UPIU requires 64-bit alignment");
+/* UFS Spec 4.1, section 10.7.2 "RESPONSE UPIU" */
struct ufshci_cmd_response_upiu {
/* dword 0-2 */
struct ufshci_upiu_header header;
@@ -403,6 +417,69 @@ _Static_assert(sizeof(struct ufshci_cmd_response_upiu) %
0,
"UPIU requires 64-bit alignment");
+enum task_management_function {
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK = 0x01,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK_SET = 0x02,
+ UFSHCI_TASK_MGMT_FUNCTION_CLEAR_TASK_SET = 0x04,
+ UFSHCI_TASK_MGMT_FUNCTION_LOGICAL_UNIT_RESET = 0x08,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASK = 0x80,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASKSET = 0x81,
+};
+
+/* UFS Spec 4.1, section 10.7.6 "TASK MANAGEMENT REQUEST UPIU" */
+struct ufshci_task_mgmt_request_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t input_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t input_param2; /* (Big-endian) */
+ /* dword 5 */
+ uint32_t input_param3; /* (Big-endian) */
+ /* dword 6-7 */
+ uint8_t reserved[8];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) == 32,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+enum task_management_service_response {
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE = 0x00,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_NOT_SUPPORTED = 0x04,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_FAILED = 0x05,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED = 0x08,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_INCORRECT_LUN = 0x09,
+};
+
+/* UFS Spec 4.1, section 10.7.7 "TASK MANAGEMENT RESPONSE UPIU" */
+struct ufshci_task_mgmt_response_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t output_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t output_param2; /* (Big-endian) */
+ /* dword 5-7 */
+ uint8_t reserved[12];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) == 32,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
/* UFS Spec 4.1, section 10.7.8 "QUERY REQUEST UPIU" */
enum ufshci_query_function {
UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
@@ -554,6 +631,7 @@ union ufshci_reponse_upiu {
struct ufshci_upiu_header header;
struct ufshci_cmd_response_upiu cmd_response_upiu;
struct ufshci_query_response_upiu query_response_upiu;
+ struct ufshci_task_mgmt_response_upiu task_mgmt_response_upiu;
struct ufshci_nop_in_upiu nop_in_upiu;
};
@@ -638,6 +716,42 @@ struct ufshci_device_descriptor {
_Static_assert(sizeof(struct ufshci_device_descriptor) == 89,
"bad size for ufshci_device_descriptor");
+/* Defines the bit field of dExtendedUfsFeaturesSupport. */
+enum ufshci_desc_wb_ext_ufs_feature {
+ UFSHCI_DESC_EXT_UFS_FEATURE_FFU = (1 << 0),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PSA = (1 << 1),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LIFE_SPAN = (1 << 2),
+ UFSHCI_DESC_EXT_UFS_FEATURE_REFRESH_OP = (1 << 3),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_HIGH_TEMP = (1 << 4),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_LOW_TEMP = (1 << 5),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_TEMP = (1 << 6),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HPB_SUPPORT = (1 << 7),
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER = (1 << 8),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PERF_THROTTLING = (1 << 9),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ADVANCED_RPMB = (1 << 10),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ZONED_UFS_EXTENSION = (1 << 11),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LEVEL_EXCEPTION = (1 << 12),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HID = (1 << 13),
+ UFSHCI_DESC_EXT_UFS_FEATURE_BARRIER = (1 << 14),
+ UFSHCI_DESC_EXT_UFS_FEATURE_CLEAR_ERROR_HISTORY = (1 << 15),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_IID = (1 << 16),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FBO = (1 << 17),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FAST_RECOVERY_MODE = (1 << 18),
+ UFSHCI_DESC_EXT_UFS_FEATURE_RPMB_VENDOR_CMD = (1 << 19),
+};
+
+/* Defines the bit field of bWriteBoosterBufferType. */
+enum ufshci_desc_wb_buffer_type {
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED = 0x00,
+ UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED = 0x01,
+};
+
+/* Defines the bit field of bWriteBoosterBufferPreserveUserSpaceEn. */
+enum ufshci_desc_user_space_config {
+ UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION = 0x00,
+ UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE = 0x01,
+};
+
/*
* UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor"
* ConfigurationDescriptor use big-endian byte ordering.
@@ -936,4 +1050,37 @@ enum ufshci_attributes {
UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f,
};
+/* bAvailableWriteBoosterBufferSize codes (UFS WriteBooster abailable buffer
+ * left %) */
+enum ufshci_wb_available_buffer_Size {
+ UFSHCI_ATTR_WB_AVAILABLE_0 = 0x00, /* 0% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_10 = 0x01, /* 10% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_20 = 0x02, /* 20% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_30 = 0x03, /* 30% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_40 = 0x04, /* 40% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_50 = 0x05, /* 50% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_60 = 0x06, /* 60% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_70 = 0x07, /* 70% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_80 = 0x08, /* 80% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_90 = 0x09, /* 90% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_100 = 0x0A, /* 100% buffer remains */
+};
+
+/* bWriteBoosterBufferLifeTimeEst codes (UFS WriteBooster buffer life %) */
+enum ufshci_wb_lifetime {
+ UFSHCI_ATTR_WB_LIFE_DISABLED = 0x00, /* Info not available */
+ UFSHCI_ATTR_WB_LIFE_0_10 = 0x01, /* 0%–10% used */
+ UFSHCI_ATTR_WB_LIFE_10_20 = 0x02, /* 10%–20% used */
+ UFSHCI_ATTR_WB_LIFE_20_30 = 0x03, /* 20%–30% used */
+ UFSHCI_ATTR_WB_LIFE_30_40 = 0x04, /* 30%–40% used */
+ UFSHCI_ATTR_WB_LIFE_40_50 = 0x05, /* 40%–50% used */
+ UFSHCI_ATTR_WB_LIFE_50_60 = 0x06, /* 50%–60% used */
+ UFSHCI_ATTR_WB_LIFE_60_70 = 0x07, /* 60%–70% used */
+ UFSHCI_ATTR_WB_LIFE_70_80 = 0x08, /* 70%–80% used */
+ UFSHCI_ATTR_WB_LIFE_80_90 = 0x09, /* 80%–90% used */
+ UFSHCI_ATTR_WB_LIFE_90_100 = 0x0A, /* 90%–100% used */
+ UFSHCI_ATTR_WB_LIFE_EXCEEDED =
+ 0x0B, /* Exceeded estimated life (treat as WB disabled) */
+};
+
#endif /* __UFSHCI_H__ */
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
index 55d8363d3287..36be94b8b8b7 100644
--- a/sys/dev/ufshci/ufshci_ctrlr.c
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -61,7 +61,7 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
int
ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
{
- uint32_t ver, cap, hcs, ie;
+ uint32_t ver, cap, hcs, ie, ahit;
uint32_t timeout_period, retry_count;
int error;
@@ -127,6 +127,13 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
if (error)
return (error);
+ /* Read the UECPA register to clear */
+ ufshci_mmio_read_4(ctrlr, uecpa);
+
+ /* Diable Auto-hibernate */
+ ahit = 0;
+ ufshci_mmio_write_4(ctrlr, ahit, ahit);
+
/*
* The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
* controller has successfully received a Link Startup UIC command
@@ -139,6 +146,16 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
return (ENXIO);
}
+ /* Allocate and initialize UTP Task Management Request List. */
+ error = ufshci_utmr_req_queue_construct(ctrlr);
+ if (error)
+ return (error);
+
+ /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
+ error = ufshci_utr_req_queue_construct(ctrlr);
+ if (error)
+ return (error);
+
/* Enable additional interrupts by programming the IE register. */
ie = ufshci_mmio_read_4(ctrlr, ie);
ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
@@ -153,19 +170,12 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
/* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
- /* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utm_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_ut_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
/* TODO: Separate IO and Admin slot */
- /* max_hw_pend_io is the number of slots in the transfer_req_queue */
- ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries;
+ /*
+ * max_hw_pend_io is the number of slots in the transfer_req_queue.
+ * Reduce num_entries by one to reserve an admin slot.
+ */
+ ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1;
return (0);
}
@@ -179,8 +189,8 @@ ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
/* TODO: Flush In-flight IOs */
/* Release resources */
- ufshci_utm_req_queue_destroy(ctrlr);
- ufshci_ut_req_queue_destroy(ctrlr);
+ ufshci_utmr_req_queue_destroy(ctrlr);
+ ufshci_utr_req_queue_destroy(ctrlr);
if (ctrlr->tag)
bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
@@ -215,8 +225,8 @@ ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, ie, 0);
/* Release resources */
- ufshci_utm_req_queue_destroy(ctrlr);
- ufshci_ut_req_queue_destroy(ctrlr);
+ ufshci_utmr_req_queue_destroy(ctrlr);
+ ufshci_utr_req_queue_destroy(ctrlr);
/* Reset Host Controller */
error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
@@ -232,12 +242,12 @@ ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, ie, ie);
/* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utm_req_queue_construct(ctrlr);
+ error = ufshci_utmr_req_queue_construct(ctrlr);
if (error)
return (error);
/* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_ut_req_queue_construct(ctrlr);
+ error = ufshci_utr_req_queue_construct(ctrlr);
if (error)
return (error);
@@ -245,6 +255,15 @@ ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
}
int
+ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req)
+{
+ return (
+ ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req,
+ /*is_admin*/ false));
+}
+
+int
ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req)
{
@@ -333,18 +352,19 @@ ufshci_ctrlr_start(struct ufshci_controller *ctrlr)
return;
}
- /* Read Controller Descriptor (Device, Geometry)*/
+ /* Read Controller Descriptor (Device, Geometry) */
if (ufshci_dev_get_descriptor(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr, false);
return;
}
- /* TODO: Configure Write Protect */
+ if (ufshci_dev_config_write_booster(ctrlr)) {
+ ufshci_ctrlr_fail(ctrlr, false);
+ return;
+ }
/* TODO: Configure Background Operations */
- /* TODO: Configure Write Booster */
-
if (ufshci_sim_attach(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr, false);
return;
@@ -360,8 +380,8 @@ ufshci_ctrlr_start_config_hook(void *arg)
TSENTER();
- if (ufshci_utm_req_queue_enable(ctrlr) == 0 &&
- ufshci_ut_req_queue_enable(ctrlr) == 0)
+ if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
+ ufshci_utr_req_queue_enable(ctrlr) == 0)
ufshci_ctrlr_start(ctrlr);
else
ufshci_ctrlr_fail(ctrlr, false);
@@ -445,9 +465,9 @@ ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
}
/* UTP Task Management Request Completion Status */
if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
- ufshci_printf(ctrlr, "TODO: Implement UTMR completion\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
- /* TODO: Implement UTMR completion */
+ ufshci_req_queue_process_completions(
+ &ctrlr->task_mgmt_req_queue);
}
/* UTP Transfer Request Completion Status */
if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
index ddf28c58fa88..71d163d998af 100644
--- a/sys/dev/ufshci/ufshci_ctrlr_cmd.c
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -8,6 +8,32 @@
#include "ufshci_private.h"
void
+ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid)
+{
+ struct ufshci_request *req;
+ struct ufshci_task_mgmt_request_upiu *upiu;
+
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+
+ req->request_size = sizeof(struct ufshci_task_mgmt_request_upiu);
+ req->response_size = sizeof(struct ufshci_task_mgmt_response_upiu);
+
+ upiu = (struct ufshci_task_mgmt_request_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type =
+ UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST;
+ upiu->header.lun = lun;
+ upiu->header.ext_iid_or_function = function;
+ upiu->input_param1 = lun;
+ upiu->input_param2 = task_tag;
+ upiu->input_param3 = iid;
+
+ ufshci_ctrlr_submit_task_mgmt_request(ctrlr, req);
+}
+
+void
ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn,
void *cb_arg)
{
diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c
index a0e32914e2aa..dd196b1d638b 100644
--- a/sys/dev/ufshci/ufshci_dev.c
+++ b/sys/dev/ufshci/ufshci_dev.c
@@ -60,6 +60,14 @@ ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
}
static int
+ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun,
+ struct ufshci_unit_descriptor *desc)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0,
+ desc, sizeof(struct ufshci_unit_descriptor)));
+}
+
+static int
ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
enum ufshci_flags flag_type, uint8_t *flag)
{
@@ -114,6 +122,61 @@ ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
}
static int
+ufshci_dev_clear_flag(struct ufshci_controller *ctrlr,
+ enum ufshci_flags flag_type)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG;
+ param.type = flag_type;
+ param.index = 0;
+ param.selector = 0;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
+ enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
+ uint64_t *value)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
+ param.type = attr_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
+ return (ENXIO);
+ }
+
+ *value = status.cpl.response_upiu.query_response_upiu.value_64;
+
+ return (0);
+}
+
+static int
ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
uint64_t value)
@@ -270,7 +333,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
*/
const uint32_t fast_mode = 1;
const uint32_t rx_bit_shift = 4;
- const uint32_t power_mode = (fast_mode << rx_bit_shift) | fast_mode;
+ uint32_t power_mode, peer_granularity;
/* Update lanes with available TX/RX lanes */
if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
@@ -295,6 +358,20 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
ctrlr->rx_lanes))
return (ENXIO);
+ if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
+ /* Before changing gears, first change the number of lanes. */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
+ return (ENXIO);
+
+ /* Wait for power mode changed. */
+ if (ufshci_uic_power_mode_ready(ctrlr)) {
+ ufshci_reg_dump(ctrlr);
+ return (ENXIO);
+ }
+ }
+
/* Set HS-GEAR to max gear */
ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
@@ -346,6 +423,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
return (ENXIO);
/* Set TX/RX PWRMode */
+ power_mode = (fast_mode << rx_bit_shift) | fast_mode;
if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
return (ENXIO);
@@ -366,7 +444,8 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
/* Test with dme_peer_get to make sure there are no errors. */
- if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, NULL))
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
+ &peer_granularity))
return (ENXIO);
}
@@ -398,7 +477,7 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
return (error);
ver = be16toh(device->dev_desc.wSpecVersion);
- ufshci_printf(ctrlr, "UFS device spec version %u.%u%u\n",
+ ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n",
UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
UFSHCIV(UFSHCI_VER_REG_VS, ver));
ufshci_printf(ctrlr, "%u enabled LUNs found\n",
@@ -426,3 +505,273 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
return (0);
}
+
+static int
+ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Enable WriteBooster */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to enable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = true;
+
+ /* Enable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_set_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Enable WriteBooster buffer flush */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = true;
+
+ return (0);
+}
+
+static int
+ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Disable WriteBooster buffer flush */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = false;
+
+ /* Disable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_clear_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Disable WriteBooster */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to disable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = false;
+
+ return (0);
+}
+
+static int
+ufshci_dev_is_write_booster_buffer_life_time_left(
+ struct ufshci_controller *ctrlr, bool *is_life_time_left)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint8_t buffer_lun;
+ uint64_t life_time;
+ uint32_t error;
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time);
+ if (error)
+ return (error);
+
+ *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED);
+
+ return (0);
+}
+
+/*
+ * This function is not yet in use. It will be used when suspend/resume is
+ * implemented.
+ */
+static __unused int
+ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr,
+ bool *need_flush)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ bool is_life_time_left = false;
+ uint64_t available_buffer_size, current_buffer_size;
+ uint8_t buffer_lun;
+ uint32_t error;
+
+ *need_flush = false;
+
+ if (!dev->is_wb_enabled)
+ return (0);
+
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ return (error);
+
+ if (!is_life_time_left)
+ return (ufshci_dev_disable_write_booster(ctrlr));
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0,
+ &available_buffer_size);
+ if (error)
+ return (error);
+
+ switch (dev->wb_user_space_config_option) {
+ case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION:
+ *need_flush = (available_buffer_size <=
+ UFSHCI_ATTR_WB_AVAILABLE_10);
+ break;
+ case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE:
+ /*
+ * In PRESERVE USER SPACE mode, flush should be performed when
+ * the current buffer is greater than 0 and the available buffer
+ * below write_booster_flush_threshold is left.
+ */
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0,
+ &current_buffer_size);
+ if (error)
+ return (error);
+
+ if (current_buffer_size == 0)
+ return (0);
+
+ *need_flush = (available_buffer_size <
+ dev->write_booster_flush_threshold);
+ break;
+ default:
+ ufshci_printf(ctrlr,
+ "Invalid bWriteBoosterBufferPreserveUserSpaceEn value");
+ return (EINVAL);
+ }
+
+ /*
+ * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from
+ * wExceptionEventStatus attribute.
+ */
+
+ return (0);
+}
+
+int
+ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint32_t extended_ufs_feature_support;
+ uint32_t alloc_units;
+ struct ufshci_unit_descriptor unit_desc;
+ uint8_t lun;
+ bool is_life_time_left;
+ uint32_t mega_byte = 1024 * 1024;
+ uint32_t error = 0;
+
+ extended_ufs_feature_support = be32toh(
+ dev->dev_desc.dExtendedUfsFeaturesSupport);
+ if (!(extended_ufs_feature_support &
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) {
+ /* This device does not support Write Booster */
+ return (0);
+ }
+
+ if (ufshci_dev_enable_write_booster(ctrlr))
+ return (0);
+
+ /* Get WriteBooster buffer parameters */
+ dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType;
+ dev->wb_user_space_config_option =
+ dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn;
+
+ /*
+ * Find the size of the write buffer.
+ * With LU-dedicated (00h), the WriteBooster buffer is assigned
+ * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h)
+ * uses a single device-wide buffer shared by multiple LUs.
+ */
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) {
+ alloc_units = be32toh(
+ dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits);
+ ufshci_printf(ctrlr,
+ "WriteBooster buffer type = Shared, alloc_units=%d\n",
+ alloc_units);
+ } else if (dev->wb_buffer_type ==
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) {
+ ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n");
+ for (lun = 0; lun < ctrlr->max_lun_count; lun++) {
+ /* Find a dedicated buffer using a unit descriptor */
+ if (ufshci_dev_read_unit_descriptor(ctrlr, lun,
+ &unit_desc))
+ continue;
+
+ alloc_units = be32toh(
+ unit_desc.dLUNumWriteBoosterBufferAllocUnits);
+ if (alloc_units) {
+ dev->wb_dedicated_lu = lun;
+ break;
+ }
+ }
+ } else {
+ ufshci_printf(ctrlr,
+ "Not supported WriteBooster buffer type: 0x%x\n",
+ dev->wb_buffer_type);
+ goto out;
+ }
+
+ if (alloc_units == 0) {
+ ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n");
+ goto out;
+ }
+
+ dev->wb_buffer_size_mb = alloc_units *
+ dev->geo_desc.bAllocationUnitSize *
+ (be32toh(dev->geo_desc.dSegmentSize)) /
+ (mega_byte / UFSHCI_SECTOR_SIZE);
+
+ /* Set to flush when 40% of the available buffer size remains */
+ dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40;
+
+ /*
+ * Check if WriteBooster Buffer lifetime is left.
+ * WriteBooster Buffer lifetime — percent of life used based on P/E
+ * cycles. If "preserve user space" is enabled, writes to normal user
+ * space also consume WB life since the area is shared.
+ */
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ goto out;
+
+ if (!is_life_time_left) {
+ ufshci_printf(ctrlr,
+ "There is no WriteBooster buffer life time left.\n");
+ goto out;
+ }
+
+ ufshci_printf(ctrlr, "WriteBooster Enabled\n");
+ return (0);
+out:
+ ufshci_dev_disable_write_booster(ctrlr);
+ return (error);
+}
+
diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c
index 65a69ee0b518..d64b7526f713 100644
--- a/sys/dev/ufshci/ufshci_pci.c
+++ b/sys/dev/ufshci/ufshci_pci.c
@@ -53,7 +53,8 @@ static struct _pcsid {
{ 0x98fa8086, "Intel Lakefield UFS Host Controller",
UFSHCI_REF_CLK_19_2MHz,
UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE |
- UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE },
+ UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE |
+ UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY },
{ 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz },
{ 0x00000000, NULL } };
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
index ac58d44102a0..2e033f84c373 100644
--- a/sys/dev/ufshci/ufshci_private.h
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -46,6 +46,8 @@ MALLOC_DECLARE(M_UFSHCI);
#define UFSHCI_UTR_ENTRIES (32)
#define UFSHCI_UTRM_ENTRIES (8)
+#define UFSHCI_SECTOR_SIZE (512)
+
struct ufshci_controller;
struct ufshci_completion_poll_status {
@@ -125,6 +127,8 @@ struct ufshci_qops {
struct ufshci_tracker **tr);
void (*ring_doorbell)(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
+ bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr,
+ uint8_t slot);
void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
bool (*process_cpl)(struct ufshci_req_queue *req_queue);
@@ -143,7 +147,10 @@ struct ufshci_hw_queue {
int domain;
int cpu;
- struct ufshci_utp_xfer_req_desc *utrd;
+ union {
+ struct ufshci_utp_xfer_req_desc *utrd;
+ struct ufshci_utp_task_mgmt_req_desc *utmrd;
+ };
bus_dma_tag_t dma_tag_queue;
bus_dmamap_t queuemem_map;
@@ -209,6 +216,15 @@ struct ufshci_device {
struct ufshci_geometry_descriptor geo_desc;
uint32_t unipro_version;
+
+ /* WriteBooster */
+ bool is_wb_enabled;
+ bool is_wb_flush_enabled;
+ uint32_t wb_buffer_type;
+ uint32_t wb_buffer_size_mb;
+ uint32_t wb_user_space_config_option;
+ uint8_t wb_dedicated_lu;
+ uint32_t write_booster_flush_threshold;
};
/*
@@ -224,7 +240,8 @@ struct ufshci_controller {
2 /* Need an additional 200 ms of PA_TActivate */
#define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
4 /* Need to wait 1250us after power mode change */
-
+#define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \
+ 8 /* Need to change the number of lanes before changing HS-GEAR. */
uint32_t ref_clk;
struct cam_sim *ufshci_sim;
@@ -333,6 +350,8 @@ int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
void ufshci_ctrlr_start_config_hook(void *arg);
void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
+int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req);
int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req);
int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
@@ -349,8 +368,12 @@ int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
+int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr);
/* Controller Command */
+void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid);
void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr,
ufshci_cb_fn_t cb_fn, void *cb_arg);
void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
@@ -361,12 +384,12 @@ void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
/* Request Queue */
bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
-int ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr);
-int ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr);
-void ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr);
-void ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr);
-int ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr);
-int ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr);
+int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
+int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
struct ufshci_hw_queue *hwq);
int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
@@ -385,9 +408,17 @@ int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
-void ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
-void ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
diff --git a/sys/dev/ufshci/ufshci_reg.h b/sys/dev/ufshci/ufshci_reg.h
index 6c9b3e2c8c04..6d5768505102 100644
--- a/sys/dev/ufshci/ufshci_reg.h
+++ b/sys/dev/ufshci/ufshci_reg.h
@@ -274,7 +274,7 @@ struct ufshci_registers {
#define UFSHCI_HCS_REG_UTMRLRDY_MASK (0x1)
#define UFSHCI_HCS_REG_UCRDY_SHIFT (3)
#define UFSHCI_HCS_REG_UCRDY_MASK (0x1)
-#define UFSHCI_HCS_REG_UPMCRS_SHIFT (7)
+#define UFSHCI_HCS_REG_UPMCRS_SHIFT (8)
#define UFSHCI_HCS_REG_UPMCRS_MASK (0x7)
#define UFSHCI_HCS_REG_UTPEC_SHIFT (12)
#define UFSHCI_HCS_REG_UTPEC_MASK (0xF)
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
index cc9a2ddae768..bb6efa6d2ccc 100644
--- a/sys/dev/ufshci/ufshci_req_queue.c
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -19,21 +19,36 @@
static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
-static const struct ufshci_qops sdb_qops = {
+static const struct ufshci_qops sdb_utmr_qops = {
.construct = ufshci_req_sdb_construct,
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
- .ring_doorbell = ufshci_req_sdb_ring_doorbell,
- .clear_cpl_ntf = ufshci_req_sdb_clear_cpl_ntf,
+ .ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf,
+ .process_cpl = ufshci_req_sdb_process_cpl,
+ .get_inflight_io = ufshci_req_sdb_get_inflight_io,
+};
+
+static const struct ufshci_qops sdb_utr_qops = {
+ .construct = ufshci_req_sdb_construct,
+ .destroy = ufshci_req_sdb_destroy,
+ .get_hw_queue = ufshci_req_sdb_get_hw_queue,
+ .enable = ufshci_req_sdb_enable,
+ .reserve_slot = ufshci_req_sdb_reserve_slot,
+ .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
+ .ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf,
.process_cpl = ufshci_req_sdb_process_cpl,
.get_inflight_io = ufshci_req_sdb_get_inflight_io,
};
int
-ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
@@ -44,7 +59,7 @@ ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
*/
req_queue = &ctrlr->task_mgmt_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
- req_queue->qops = sdb_qops;
+ req_queue->qops = sdb_utmr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
/*is_task_mgmt*/ true);
@@ -53,21 +68,21 @@ ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
}
void
-ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
&ctrlr->task_mgmt_req_queue);
}
int
-ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
&ctrlr->task_mgmt_req_queue));
}
int
-ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
@@ -79,7 +94,7 @@ ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
*/
req_queue = &ctrlr->transfer_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
- req_queue->qops = sdb_qops;
+ req_queue->qops = sdb_utr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
/*is_task_mgmt*/ false);
@@ -88,14 +103,14 @@ ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
}
void
-ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->transfer_req_queue.qops.destroy(ctrlr,
&ctrlr->transfer_req_queue);
}
int
-ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
&ctrlr->transfer_req_queue));
@@ -213,20 +228,30 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
struct ufshci_req_queue *req_queue = tr->req_queue;
struct ufshci_request *req = tr->req;
struct ufshci_completion cpl;
- struct ufshci_utp_xfer_req_desc *desc;
uint8_t ocs;
bool retry, error, retriable;
mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
- bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ /* Copy the response from the Request Descriptor or UTP Command
+ * Descriptor. */
+ if (req_queue->is_task_mgmt) {
+ cpl.size = tr->response_size;
+ memcpy(&cpl.response_upiu,
+ (void *)tr->hwq->utmrd[tr->slot_num].response_upiu,
+ cpl.size);
- cpl.size = tr->response_size;
- memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, cpl.size);
+ ocs = tr->hwq->utmrd[tr->slot_num].overall_command_status;
+ } else {
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- desc = &tr->hwq->utrd[tr->slot_num];
- ocs = desc->overall_command_status;
+ cpl.size = tr->response_size;
+ memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
+ cpl.size);
+
+ ocs = tr->hwq->utrd[tr->slot_num].overall_command_status;
+ }
error = ufshci_req_queue_response_is_error(req_queue, ocs,
&cpl.response_upiu);
@@ -358,7 +383,19 @@ ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
}
static void
-ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
+ufshci_req_queue_fill_utmr_descriptor(
+ struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req)
+{
+ memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc));
+ desc->interrupt = true;
+ /* Set the initial value to Invalid. */
+ desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID;
+
+ memcpy(desc->request_upiu, &req->request_upiu, req->request_size);
+}
+
+static void
+ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
const uint16_t response_len, const uint16_t prdt_off,
const uint16_t prdt_entry_cnt)
@@ -378,7 +415,7 @@ ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
desc->data_direction = data_direction;
desc->interrupt = true;
/* Set the initial value to Invalid. */
- desc->overall_command_status = UFSHCI_OCS_INVALID;
+ desc->overall_command_status = UFSHCI_UTR_OCS_INVALID;
desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
0xffffffff);
desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
@@ -407,26 +444,32 @@ ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
/* TODO: Check timeout */
- request_len = req->request_size;
- response_off = UFSHCI_UTP_XFER_REQ_SIZE;
- response_len = req->response_size;
-
- /* Prepare UTP Command Descriptor */
- memcpy(tr->ucd, &req->request_upiu, request_len);
- memset((uint8_t *)tr->ucd + response_off, 0, response_len);
-
- /* Prepare PRDT */
- if (req->payload_valid)
- ufshci_req_queue_prepare_prdt(tr);
-
- bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Prepare UTP Transfer Request Descriptor. */
- ucd_paddr = tr->ucd_bus_addr;
- ufshci_req_queue_fill_descriptor(&tr->hwq->utrd[slot_num],
- data_direction, ucd_paddr, response_off, response_len, tr->prdt_off,
- tr->prdt_entry_cnt);
+ if (req_queue->is_task_mgmt) {
+ /* Prepare UTP Task Management Request Descriptor. */
+ ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num],
+ req);
+ } else {
+ request_len = req->request_size;
+ response_off = UFSHCI_UTP_XFER_REQ_SIZE;
+ response_len = req->response_size;
+
+ /* Prepare UTP Command Descriptor */
+ memcpy(tr->ucd, &req->request_upiu, request_len);
+ memset((uint8_t *)tr->ucd + response_off, 0, response_len);
+
+ /* Prepare PRDT */
+ if (req->payload_valid)
+ ufshci_req_queue_prepare_prdt(tr);
+
+ /* Prepare UTP Transfer Request Descriptor. */
+ ucd_paddr = tr->ucd_bus_addr;
+ ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num],
+ data_direction, ucd_paddr, response_off, response_len,
+ tr->prdt_off, tr->prdt_entry_cnt);
+
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
index b1f303afaef5..834a459d48e3 100644
--- a/sys/dev/ufshci/ufshci_req_sdb.c
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -26,12 +26,6 @@ ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
tr = hwq->act_tr[i];
bus_dmamap_destroy(req_queue->dma_tag_payload,
tr->payload_dma_map);
- free(tr, M_UFSHCI);
- }
-
- if (hwq->act_tr) {
- free(hwq->act_tr, M_UFSHCI);
- hwq->act_tr = NULL;
}
if (req_queue->ucd) {
@@ -76,7 +70,6 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
uint32_t num_entries, struct ufshci_controller *ctrlr)
{
struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
- struct ufshci_tracker *tr;
size_t ucd_allocsz, payload_allocsz;
uint8_t *ucdmem;
int i, error;
@@ -134,27 +127,14 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
goto out;
}
- hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
- req_queue->num_entries,
- M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
-
for (i = 0; i < req_queue->num_trackers; i++) {
- tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
- DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
-
bus_dmamap_create(req_queue->dma_tag_payload, 0,
- &tr->payload_dma_map);
+ &hwq->act_tr[i]->payload_dma_map);
- tr->req_queue = req_queue;
- tr->slot_num = i;
- tr->slot_state = UFSHCI_SLOT_STATE_FREE;
-
- tr->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
- tr->ucd_bus_addr = hwq->ucd_bus_addr[i];
+ hwq->act_tr[i]->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
+ hwq->act_tr[i]->ucd_bus_addr = hwq->ucd_bus_addr[i];
ucdmem += sizeof(struct ufshci_utp_cmd_desc);
-
- hwq->act_tr[i] = tr;
}
return (0);
@@ -163,25 +143,16 @@ out:
return (ENOMEM);
}
-static bool
-ufshci_req_sdb_is_doorbell_cleared(struct ufshci_controller *ctrlr,
- uint8_t slot)
-{
- uint32_t utrldbr;
-
- utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
- return (!(utrldbr & (1 << slot)));
-}
-
int
ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
{
struct ufshci_hw_queue *hwq;
- size_t allocsz;
+ size_t desc_size, alloc_size;
uint64_t queuemem_phys;
uint8_t *queuemem;
- int error;
+ struct ufshci_tracker *tr;
+ int i, error;
req_queue->ctrlr = ctrlr;
req_queue->is_task_mgmt = is_task_mgmt;
@@ -209,10 +180,13 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
* Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
* Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
*/
- allocsz = num_entries * sizeof(struct ufshci_utp_xfer_req_desc);
+ desc_size = is_task_mgmt ?
+ sizeof(struct ufshci_utp_task_mgmt_req_desc) :
+ sizeof(struct ufshci_utp_xfer_req_desc);
+ alloc_size = num_entries * desc_size;
error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
- allocsz, 1, allocsz, 0, NULL, NULL, &hwq->dma_tag_queue);
+ alloc_size, 1, alloc_size, 0, NULL, NULL, &hwq->dma_tag_queue);
if (error != 0) {
ufshci_printf(ctrlr, "request queue tag create failed %d\n",
error);
@@ -227,7 +201,7 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
}
if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
- allocsz, ufshci_single_map, &queuemem_phys, 0) != 0) {
+ alloc_size, ufshci_single_map, &queuemem_phys, 0) != 0) {
ufshci_printf(ctrlr, "failed to load request queue memory\n");
bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
hwq->queuemem_map);
@@ -238,13 +212,30 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
hwq->num_intr_handler_calls = 0;
hwq->num_retries = 0;
hwq->num_failures = 0;
- hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
hwq->req_queue_addr = queuemem_phys;
+ /* Allocate trackers */
+ hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
+ req_queue->num_entries,
+ M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
+ DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ tr->req_queue = req_queue;
+ tr->slot_num = i;
+ tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ hwq->act_tr[i] = tr;
+ }
+
if (is_task_mgmt) {
/* UTP Task Management Request (UTMR) */
uint32_t utmrlba, utmrlbau;
+ hwq->utmrd = (struct ufshci_utp_task_mgmt_req_desc *)queuemem;
+
utmrlba = hwq->req_queue_addr & 0xffffffff;
utmrlbau = hwq->req_queue_addr >> 32;
ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
@@ -253,6 +244,8 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
/* UTP Transfer Request (UTR) */
uint32_t utrlba, utrlbau;
+ hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
+
/*
* Allocate physical memory for the command descriptor.
* UTP Transfer Request (UTR) requires memory for a separate
@@ -284,10 +277,22 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ int i;
if (!req_queue->is_task_mgmt)
ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = hwq->act_tr[i];
+ free(tr, M_UFSHCI);
+ }
+
+ if (hwq->act_tr) {
+ free(hwq->act_tr, M_UFSHCI);
+ hwq->act_tr = NULL;
+ }
+
if (hwq->utrd != NULL) {
bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
@@ -389,7 +394,18 @@ ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
}
void
-ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ /*
+ * NOP
+ * UTP Task Management does not have a Completion Notification
+ * Register.
+ */
+}
+
+void
+ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr)
{
uint32_t utrlcnr;
@@ -399,7 +415,19 @@ ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
}
void
-ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ uint32_t utmrldbr = 0;
+
+ utmrldbr |= 1 << tr->slot_num;
+ ufshci_mmio_write_4(ctrlr, utmrldbr, utmrldbr);
+
+ tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+void
+ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr)
{
uint32_t utrldbr = 0;
@@ -408,9 +436,26 @@ ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+bool
+ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utmrldbr;
+
+ utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
+ return (!(utmrldbr & (1 << slot)));
+}
- // utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
- // printf("DB=0x%08x\n", utrldbr);
+bool
+ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utrldbr;
+
+ utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
+ return (!(utrldbr & (1 << slot)));
}
bool
@@ -435,7 +480,7 @@ ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
* is cleared.
*/
if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
- ufshci_req_sdb_is_doorbell_cleared(req_queue->ctrlr,
+ req_queue->qops.is_doorbell_cleared(req_queue->ctrlr,
slot)) {
ufshci_req_queue_complete_tracker(tr);
done = true;
diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c
index 5e5069f12e5f..56bc06b13f3c 100644
--- a/sys/dev/ufshci/ufshci_sysctl.c
+++ b/sys/dev/ufshci/ufshci_sysctl.c
@@ -152,6 +152,7 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
struct sysctl_ctx_list *ctrlr_ctx;
struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree;
struct sysctl_oid_list *ctrlr_list, *ioq_list;
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
#define QUEUE_NAME_LENGTH 16
char queue_name[QUEUE_NAME_LENGTH];
int i;
@@ -177,6 +178,25 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD,
&ctrlr->cap, 0, "Number of I/O queue pairs");
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled",
+ CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable");
+
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_flush_enabled",
+ CTLFLAG_RD, &dev->is_wb_flush_enabled, 0,
+ "WriteBooster flush enable/disable");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_type",
+ CTLFLAG_RD, &dev->wb_buffer_type, 0, "WriteBooster type");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_size_mb",
+ CTLFLAG_RD, &dev->wb_buffer_size_mb, 0,
+ "WriteBooster buffer size in MB");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "wb_user_space_config_option", CTLFLAG_RD,
+ &dev->wb_user_space_config_option, 0,
+ "WriteBooster preserve user space mode");
+
SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period,
0, ufshci_sysctl_timeout_period, "IU",
diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c
index 2c5f635dc11e..b9c867ff7065 100644
--- a/sys/dev/ufshci/ufshci_uic_cmd.c
+++ b/sys/dev/ufshci/ufshci_uic_cmd.c
@@ -14,7 +14,7 @@
int
ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
{
- uint32_t is;
+ uint32_t is, hcs;
int timeout;
/* Wait for the IS flag to change */
@@ -40,6 +40,15 @@ ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
DELAY(10);
}
+ /* Check HCS power mode change request status */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs) != 0x01) {
+ ufshci_printf(ctrlr,
+ "Power mode change request status error: 0x%x\n",
+ UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs));
+ return (ENXIO);
+ }
+
return (0);
}
@@ -112,6 +121,7 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value)
{
int error;
+ uint32_t config_result_code;
mtx_lock(&ctrlr->uic_cmd_lock);
@@ -134,6 +144,13 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
if (error)
return (ENXIO);
+ config_result_code = ufshci_mmio_read_4(ctrlr, ucmdarg2);
+ if (config_result_code) {
+ ufshci_printf(ctrlr,
+ "Failed to send UIC command. (config result code = 0x%x)\n",
+ config_result_code);
+ }
+
if (return_value != NULL)
*return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3);
diff --git a/sys/dev/usb/controller/xhci.c b/sys/dev/usb/controller/xhci.c
index 5be592512196..788b2b718062 100644
--- a/sys/dev/usb/controller/xhci.c
+++ b/sys/dev/usb/controller/xhci.c
@@ -156,6 +156,7 @@ struct xhci_std_temp {
static void xhci_do_poll(struct usb_bus *);
static void xhci_device_done(struct usb_xfer *, usb_error_t);
+static void xhci_get_xecp(struct xhci_softc *);
static void xhci_root_intr(struct xhci_softc *);
static void xhci_free_device_ext(struct usb_device *);
static struct xhci_endpoint_ext *xhci_get_endpoint_ext(struct usb_device *,
@@ -566,6 +567,8 @@ xhci_init(struct xhci_softc *sc, device_t self, uint8_t dma32)
device_printf(self, "%d bytes context size, %d-bit DMA\n",
sc->sc_ctx_is_64_byte ? 64 : 32, (int)sc->sc_bus.dma_bits);
+ xhci_get_xecp(sc);
+
/* enable 64Kbyte control endpoint quirk */
sc->sc_bus.control_ep_quirk = (xhcictlquirk ? 1 : 0);
@@ -654,6 +657,88 @@ xhci_uninit(struct xhci_softc *sc)
}
static void
+xhci_get_xecp(struct xhci_softc *sc)
+{
+
+ uint32_t hccp1;
+ uint32_t eec;
+ uint32_t eecp;
+ bool first = true;
+
+ hccp1 = XREAD4(sc, capa, XHCI_HCSPARAMS0);
+
+ if (XHCI_HCS0_XECP(hccp1) == 0) {
+ device_printf(sc->sc_bus.parent,
+ "xECP: no capabilities found\n");
+ return;
+ }
+
+ /*
+ * Parse the xECP Capabilities table and print known caps.
+ * Implemented, vendor and reserved xECP Capabilities values are
+ * documented in Table 7.2 of eXtensible Host Controller Interface for
+ * Universal Serial Bus (xHCI) Rev 1.2b 2023.
+ */
+ device_printf(sc->sc_bus.parent, "xECP capabilities <");
+
+ eec = -1;
+ for (eecp = XHCI_HCS0_XECP(hccp1) << 2;
+ eecp != 0 && XHCI_XECP_NEXT(eec) != 0;
+ eecp += XHCI_XECP_NEXT(eec) << 2) {
+ eec = XREAD4(sc, capa, eecp);
+
+ uint8_t xecpid = XHCI_XECP_ID(eec);
+
+ if ((xecpid >= 11 && xecpid <= 16) ||
+ (xecpid >= 19 && xecpid <= 191)) {
+ if (!first)
+ printf(",");
+ printf("RES(%x)", xecpid);
+ } else if (xecpid > 191) {
+ if (!first)
+ printf(",");
+ printf("VEND(%x)", xecpid);
+ } else {
+ if (!first)
+ printf(",");
+ switch (xecpid)
+ {
+ case XHCI_ID_USB_LEGACY:
+ printf("LEGACY");
+ break;
+ case XHCI_ID_PROTOCOLS:
+ printf("PROTO");
+ break;
+ case XHCI_ID_POWER_MGMT:
+ printf("POWER");
+ break;
+ case XHCI_ID_VIRTUALIZATION:
+ printf("VIRT");
+ break;
+ case XHCI_ID_MSG_IRQ:
+ printf("MSG IRQ");
+ break;
+ case XHCI_ID_USB_LOCAL_MEM:
+ printf("LOCAL MEM");
+ break;
+ case XHCI_ID_USB_DEBUG:
+ printf("DEBUG");
+ break;
+ case XHCI_ID_EXT_MSI:
+ printf("EXT MSI");
+ break;
+ case XHCI_ID_USB3_TUN:
+ printf("TUN");
+ break;
+
+ }
+ }
+ first = false;
+ }
+ printf(">\n");
+}
+
+static void
xhci_set_hw_power_sleep(struct usb_bus *bus, uint32_t state)
{
struct xhci_softc *sc = XHCI_BUS2SC(bus);
diff --git a/sys/dev/usb/controller/xhcireg.h b/sys/dev/usb/controller/xhcireg.h
index 9d0b6e2f4b4b..821897155544 100644
--- a/sys/dev/usb/controller/xhcireg.h
+++ b/sys/dev/usb/controller/xhcireg.h
@@ -205,6 +205,11 @@
#define XHCI_ID_VIRTUALIZATION 0x0004
#define XHCI_ID_MSG_IRQ 0x0005
#define XHCI_ID_USB_LOCAL_MEM 0x0006
+/* values 7-9 are reserved */
+#define XHCI_ID_USB_DEBUG 0x000a
+/* values 11-16 are reserved */
+#define XHCI_ID_EXT_MSI 0x0011
+#define XHCI_ID_USB3_TUN 0x0012
/* XHCI register R/W wrappers */
#define XREAD1(sc, what, a) \
diff --git a/sys/dev/usb/input/uhid.c b/sys/dev/usb/input/uhid.c
index a31081663f0c..e2b97f5accac 100644
--- a/sys/dev/usb/input/uhid.c
+++ b/sys/dev/usb/input/uhid.c
@@ -40,8 +40,6 @@
* HID spec: http://www.usb.org/developers/devclass_docs/HID1_11.pdf
*/
-#include "opt_hid.h"
-
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
@@ -928,11 +926,7 @@ static device_method_t uhid_methods[] = {
};
static driver_t uhid_driver = {
-#ifdef HIDRAW_MAKE_UHID_ALIAS
- .name = "hidraw",
-#else
.name = "uhid",
-#endif
.methods = uhid_methods,
.size = sizeof(struct uhid_softc),
};
diff --git a/sys/dev/usb/input/usbhid.c b/sys/dev/usb/input/usbhid.c
index df810012b3f8..cba3f34053e5 100644
--- a/sys/dev/usb/input/usbhid.c
+++ b/sys/dev/usb/input/usbhid.c
@@ -114,6 +114,7 @@ struct usbhid_xfer_ctx {
void *cb_ctx;
int waiters;
bool influx;
+ bool no_readahead;
};
struct usbhid_softc {
@@ -272,7 +273,7 @@ usbhid_intr_handler_cb(struct usbhid_xfer_ctx *xfer_ctx)
sc->sc_intr_handler(sc->sc_intr_ctx, xfer_ctx->buf,
xfer_ctx->req.intr.actlen);
- return (0);
+ return (xfer_ctx->no_readahead ? ECANCELED : 0);
}
static int
@@ -430,6 +431,7 @@ usbhid_intr_start(device_t dev, device_t child __unused)
.cb = usbhid_intr_handler_cb,
.cb_ctx = sc,
.buf = sc->sc_intr_buf,
+ .no_readahead = hid_test_quirk(&sc->sc_hw, HQ_NO_READAHEAD),
};
sc->sc_xfer_ctx[POLL_XFER(USBHID_INTR_IN_DT)] = (struct usbhid_xfer_ctx) {
.req.intr.maxlen =
@@ -705,6 +707,10 @@ usbhid_ioctl(device_t dev, device_t child __unused, unsigned long cmd,
if (error == 0)
ucr->ucr_actlen = UGETW(req.ctrl.wLength);
break;
+ case USB_GET_DEVICEINFO:
+ error = usbd_fill_deviceinfo(sc->sc_udev,
+ (struct usb_device_info *)data);
+ break;
default:
error = EINVAL;
}
diff --git a/sys/dev/usb/net/if_ipheth.c b/sys/dev/usb/net/if_ipheth.c
index f70113c53eb4..cfa800707391 100644
--- a/sys/dev/usb/net/if_ipheth.c
+++ b/sys/dev/usb/net/if_ipheth.c
@@ -55,6 +55,7 @@
#include <net/if_var.h>
#include <dev/usb/usb.h>
+#include <dev/usb/usb_cdc.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
#include "usbdevs.h"
@@ -81,6 +82,9 @@ static uether_fn_t ipheth_start;
static uether_fn_t ipheth_setmulti;
static uether_fn_t ipheth_setpromisc;
+static ipheth_consumer_t ipheth_consume_read;
+static ipheth_consumer_t ipheth_consume_read_ncm;
+
#ifdef USB_DEBUG
static int ipheth_debug = 0;
@@ -96,7 +100,31 @@ static const struct usb_config ipheth_config[IPHETH_N_TRANSFER] = {
.direction = UE_DIR_RX,
.frames = IPHETH_RX_FRAMES_MAX,
.bufsize = (IPHETH_RX_FRAMES_MAX * MCLBYTES),
- .flags = {.short_frames_ok = 1,.short_xfer_ok = 1,.ext_buffer = 1,},
+ .flags = {.short_frames_ok = 1, .short_xfer_ok = 1, .ext_buffer = 1,},
+ .callback = ipheth_bulk_read_callback,
+ .timeout = 0, /* no timeout */
+ },
+
+ [IPHETH_BULK_TX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_TX,
+ .frames = IPHETH_TX_FRAMES_MAX,
+ .bufsize = (IPHETH_TX_FRAMES_MAX * IPHETH_BUF_SIZE),
+ .flags = {.force_short_xfer = 1,},
+ .callback = ipheth_bulk_write_callback,
+ .timeout = IPHETH_TX_TIMEOUT,
+ },
+};
+
+static const struct usb_config ipheth_config_ncm[IPHETH_N_TRANSFER] = {
+ [IPHETH_BULK_RX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_RX,
+ .frames = 1,
+ .bufsize = IPHETH_RX_NCM_BUF_SIZE,
+ .flags = {.short_frames_ok = 1, .short_xfer_ok = 1,},
.callback = ipheth_bulk_read_callback,
.timeout = 0, /* no timeout */
},
@@ -204,6 +232,21 @@ ipheth_get_mac_addr(struct ipheth_softc *sc)
return (0);
}
+static bool
+ipheth_enable_ncm(struct ipheth_softc *sc)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_VENDOR_INTERFACE;
+ req.bRequest = IPHETH_CMD_ENABLE_NCM;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = sc->sc_iface_no;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+
+ return (usbd_do_request(sc->sc_ue.ue_udev, NULL, &req, NULL) == 0);
+}
+
static int
ipheth_probe(device_t dev)
{
@@ -221,6 +264,7 @@ ipheth_attach(device_t dev)
struct ipheth_softc *sc = device_get_softc(dev);
struct usb_ether *ue = &sc->sc_ue;
struct usb_attach_arg *uaa = device_get_ivars(dev);
+ const struct usb_config *config;
int error;
sc->sc_iface_no = uaa->info.bIfaceIndex;
@@ -235,18 +279,29 @@ ipheth_attach(device_t dev)
device_printf(dev, "Cannot set alternate setting\n");
goto detach;
}
- error = usbd_transfer_setup(uaa->device, &sc->sc_iface_no,
- sc->sc_xfer, ipheth_config, IPHETH_N_TRANSFER, sc, &sc->sc_mtx);
- if (error) {
- device_printf(dev, "Cannot setup USB transfers\n");
- goto detach;
- }
+
ue->ue_sc = sc;
ue->ue_dev = dev;
ue->ue_udev = uaa->device;
ue->ue_mtx = &sc->sc_mtx;
ue->ue_methods = &ipheth_ue_methods;
+ if (ipheth_enable_ncm(sc)) {
+ config = ipheth_config_ncm;
+ sc->is_ncm = true;
+ sc->consume = &ipheth_consume_read_ncm;
+ } else {
+ config = ipheth_config;
+ sc->consume = &ipheth_consume_read;
+ }
+
+ error = usbd_transfer_setup(uaa->device, &sc->sc_iface_no, sc->sc_xfer,
+ config, IPHETH_N_TRANSFER, sc, &sc->sc_mtx);
+ if (error) {
+ device_printf(dev, "Cannot setup USB transfers\n");
+ goto detach;
+ }
+
error = ipheth_get_mac_addr(sc);
if (error) {
device_printf(dev, "Cannot get MAC address\n");
@@ -389,12 +444,9 @@ ipheth_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
int actlen;
int aframes;
- usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
-
- DPRINTFN(1, "\n");
-
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
+ usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
DPRINTFN(11, "transfer complete: %u bytes in %u frames\n",
actlen, aframes);
@@ -471,53 +523,40 @@ ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
uint8_t x;
int actlen;
int aframes;
- int len;
-
- usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
-
+ usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
DPRINTF("received %u bytes in %u frames\n", actlen, aframes);
- for (x = 0; x != aframes; x++) {
- m = sc->sc_rx_buf[x];
- sc->sc_rx_buf[x] = NULL;
- len = usbd_xfer_frame_len(xfer, x);
-
- if (len < (int)(sizeof(struct ether_header) +
- IPHETH_RX_ADJ)) {
- m_freem(m);
- continue;
- }
-
- m_adj(m, IPHETH_RX_ADJ);
-
- /* queue up mbuf */
- uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ);
- }
+ for (x = 0; x != aframes; x++)
+ sc->consume(xfer, x);
/* FALLTHROUGH */
case USB_ST_SETUP:
-
- for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) {
- if (sc->sc_rx_buf[x] == NULL) {
- m = uether_newbuf();
- if (m == NULL)
- goto tr_stall;
-
- /* cancel alignment for ethernet */
- m_adj(m, ETHER_ALIGN);
-
- sc->sc_rx_buf[x] = m;
- } else {
- m = sc->sc_rx_buf[x];
+ if (!sc->is_ncm) {
+ for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) {
+ if (sc->sc_rx_buf[x] == NULL) {
+ m = uether_newbuf();
+ if (m == NULL)
+ goto tr_stall;
+
+ /* cancel alignment for ethernet */
+ m_adj(m, ETHER_ALIGN);
+
+ sc->sc_rx_buf[x] = m;
+ } else {
+ m = sc->sc_rx_buf[x];
+ }
+ usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
}
-
- usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
+ usbd_xfer_set_frames(xfer, x);
+ } else {
+ usbd_xfer_set_frame_len(xfer, 0,
+ IPHETH_RX_NCM_BUF_SIZE);
+ usbd_xfer_set_frames(xfer, 1);
}
- /* set number of frames and start hardware */
- usbd_xfer_set_frames(xfer, x);
+
usbd_transfer_submit(xfer);
/* flush any received frames */
uether_rxflush(&sc->sc_ue);
@@ -539,3 +578,86 @@ ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
break;
}
}
+
+static void
+ipheth_consume_read(struct usb_xfer *xfer, int x)
+{
+ struct ipheth_softc *sc = usbd_xfer_softc(xfer);
+ struct mbuf *m = sc->sc_rx_buf[x];
+ int len;
+
+ sc->sc_rx_buf[x] = NULL;
+ len = usbd_xfer_frame_len(xfer, x);
+
+ if (len < (int)(sizeof(struct ether_header) + IPHETH_RX_ADJ)) {
+ m_freem(m);
+ return;
+ }
+
+ m_adj(m, IPHETH_RX_ADJ);
+
+ /* queue up mbuf */
+ uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ);
+}
+
+static void
+ipheth_consume_read_ncm(struct usb_xfer *xfer, int x)
+{
+ struct ipheth_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc = usbd_xfer_get_frame(xfer, 0);
+ struct ncm_data_cache ncm;
+ if_t ifp = uether_getifp(&sc->sc_ue);
+ struct mbuf *new_buf;
+ int i, actlen;
+ uint16_t dp_offset, dp_len;
+
+ usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
+
+ if (actlen < IPHETH_NCM_HEADER_SIZE)
+ return;
+
+ usbd_copy_out(pc, 0, &ncm.hdr, sizeof(ncm.hdr));
+
+ if (UGETDW(ncm.hdr.dwSignature) != 0x484D434E)
+ return;
+
+ /* Dpt follows the hdr on iOS */
+ if (UGETW(ncm.hdr.wDptIndex) != (int)(sizeof(struct usb_ncm16_hdr)))
+ return;
+
+ usbd_copy_out(pc, UGETW(ncm.hdr.wDptIndex), &ncm.dpt, sizeof(ncm.dpt));
+
+ if (UGETDW(ncm.dpt.dwSignature) != 0x304D434E)
+ return;
+
+ usbd_copy_out(pc, UGETW(ncm.hdr.wDptIndex) + sizeof(ncm.dpt), &ncm.dp,
+ sizeof(ncm.dp));
+
+ for (i = 0; i < IPHETH_NCM_DPT_DP_NUM; ++i) {
+ dp_offset = UGETW(ncm.dp[i].wFrameIndex);
+ dp_len = UGETW(ncm.dp[i].wFrameLength);
+
+ /* (3.3.1 USB CDC NCM spec v1.0) */
+ if (dp_offset == 0 && dp_len == 0)
+ break;
+
+ if (dp_offset < IPHETH_NCM_HEADER_SIZE || dp_offset >= actlen ||
+ actlen < (dp_len + dp_offset) ||
+ dp_len < sizeof(struct ether_header)) {
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ continue;
+ }
+ if (dp_len > (MCLBYTES - ETHER_ALIGN)) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ continue;
+ }
+
+ new_buf = uether_newbuf();
+ if (new_buf == NULL) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ continue;
+ }
+ usbd_copy_out(pc, dp_offset, new_buf->m_data, dp_len);
+ uether_rxmbuf(&sc->sc_ue, new_buf, dp_len);
+ }
+}
diff --git a/sys/dev/usb/net/if_iphethvar.h b/sys/dev/usb/net/if_iphethvar.h
index 203bb96b6f22..d637e8f67d01 100644
--- a/sys/dev/usb/net/if_iphethvar.h
+++ b/sys/dev/usb/net/if_iphethvar.h
@@ -41,6 +41,7 @@
#define IPHETH_BUF_SIZE 1514
#define IPHETH_TX_TIMEOUT 5000 /* ms */
+#define IPHETH_RX_NCM_BUF_SIZE 65536
#define IPHETH_RX_FRAMES_MAX 1
#define IPHETH_TX_FRAMES_MAX 8
@@ -55,10 +56,20 @@
#define IPHETH_CTRL_TIMEOUT 5000 /* ms */
#define IPHETH_CMD_GET_MACADDR 0x00
+#define IPHETH_CMD_ENABLE_NCM 0x04
#define IPHETH_CMD_CARRIER_CHECK 0x45
#define IPHETH_CARRIER_ON 0x04
+#define IPHETH_NCM_DPT_DP_NUM 22
+#define IPHETH_NCM_DPT_HEADER_SIZE \
+ (sizeof(struct usb_ncm16_dpt) + \
+ IPHETH_NCM_DPT_DP_NUM * sizeof(struct usb_ncm16_dp))
+#define IPHETH_NCM_HEADER_SIZE \
+ (sizeof(struct usb_ncm16_hdr) + IPHETH_NCM_DPT_HEADER_SIZE)
+
+typedef void (ipheth_consumer_t)(struct usb_xfer *xfer, int idx);
+
enum {
IPHETH_BULK_TX,
IPHETH_BULK_RX,
@@ -76,6 +87,16 @@ struct ipheth_softc {
uint8_t sc_data[IPHETH_CTRL_BUF_SIZE];
uint8_t sc_iface_no;
uint8_t sc_carrier_on;
+
+ bool is_ncm;
+
+ ipheth_consumer_t *consume;
+};
+
+struct ncm_data_cache {
+ struct usb_ncm16_hdr hdr;
+ struct usb_ncm16_dpt dpt;
+ struct usb_ncm16_dp dp[IPHETH_NCM_DPT_DP_NUM];
};
#define IPHETH_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
diff --git a/sys/dev/usb/net/if_umb.c b/sys/dev/usb/net/if_umb.c
index 5703bc03dd39..b1082b117259 100644
--- a/sys/dev/usb/net/if_umb.c
+++ b/sys/dev/usb/net/if_umb.c
@@ -177,9 +177,7 @@ static void umb_ncm_setup(struct umb_softc *, struct usb_config *);
static void umb_close_bulkpipes(struct umb_softc *);
static int umb_ioctl(if_t , u_long, caddr_t);
static void umb_init(void *);
-#ifdef DEV_NETMAP
static void umb_input(if_t , struct mbuf *);
-#endif
static int umb_output(if_t , struct mbuf *,
const struct sockaddr *, struct route *);
static void umb_start(if_t );
@@ -585,9 +583,7 @@ umb_attach_task(struct usb_proc_msg *msg)
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_POINTOPOINT);
if_setioctlfn(ifp, umb_ioctl);
-#ifdef DEV_NETMAP
if_setinputfn(ifp, umb_input);
-#endif
if_setoutputfn(ifp, umb_output);
if_setstartfn(ifp, umb_start);
if_setinitfn(ifp, umb_init);
@@ -666,7 +662,7 @@ umb_ncm_setup(struct umb_softc *sc, struct usb_config * config)
struct ncm_ntb_parameters np;
usb_error_t error;
- /* Query NTB tranfers sizes */
+ /* Query NTB transfers sizes */
req.bmRequestType = UT_READ_CLASS_INTERFACE;
req.bRequest = NCM_GET_NTB_PARAMETERS;
USETW(req.wValue, 0);
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index 60c2d6745b3f..f0989972f49f 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -3111,3 +3111,51 @@ usbd_get_endpoint_mode(struct usb_device *udev, struct usb_endpoint *ep)
{
return (ep->ep_mode);
}
+
+/*------------------------------------------------------------------------*
+ * usbd_fill_deviceinfo
+ *
+ * This function dumps information about an USB device to the
+ * structure pointed to by the "di" argument.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+int
+usbd_fill_deviceinfo(struct usb_device *udev, struct usb_device_info *di)
+{
+ struct usb_device *hub;
+
+ bzero(di, sizeof(di[0]));
+
+ di->udi_bus = device_get_unit(udev->bus->bdev);
+ di->udi_addr = udev->address;
+ di->udi_index = udev->device_index;
+ strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
+ strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
+ strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
+ usb_printbcd(di->udi_release, sizeof(di->udi_release),
+ UGETW(udev->ddesc.bcdDevice));
+ di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
+ di->udi_productNo = UGETW(udev->ddesc.idProduct);
+ di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
+ di->udi_class = udev->ddesc.bDeviceClass;
+ di->udi_subclass = udev->ddesc.bDeviceSubClass;
+ di->udi_protocol = udev->ddesc.bDeviceProtocol;
+ di->udi_config_no = udev->curr_config_no;
+ di->udi_config_index = udev->curr_config_index;
+ di->udi_power = udev->flags.self_powered ? 0 : udev->power;
+ di->udi_speed = udev->speed;
+ di->udi_mode = udev->flags.usb_mode;
+ di->udi_power_mode = udev->power_mode;
+ di->udi_suspended = udev->flags.peer_suspended;
+
+ hub = udev->parent_hub;
+ if (hub) {
+ di->udi_hubaddr = hub->address;
+ di->udi_hubindex = hub->device_index;
+ di->udi_hubport = udev->port_no;
+ }
+ return (0);
+}
diff --git a/sys/dev/usb/usb_generic.c b/sys/dev/usb/usb_generic.c
index c0af27d77e5d..ccb0b2184ec4 100644
--- a/sys/dev/usb/usb_generic.c
+++ b/sys/dev/usb/usb_generic.c
@@ -831,42 +831,7 @@ ugen_get_iface_driver(struct usb_fifo *f, struct usb_gen_descriptor *ugd)
int
ugen_fill_deviceinfo(struct usb_fifo *f, struct usb_device_info *di)
{
- struct usb_device *udev;
- struct usb_device *hub;
-
- udev = f->udev;
-
- bzero(di, sizeof(di[0]));
-
- di->udi_bus = device_get_unit(udev->bus->bdev);
- di->udi_addr = udev->address;
- di->udi_index = udev->device_index;
- strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
- strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
- strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
- usb_printbcd(di->udi_release, sizeof(di->udi_release),
- UGETW(udev->ddesc.bcdDevice));
- di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
- di->udi_productNo = UGETW(udev->ddesc.idProduct);
- di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
- di->udi_class = udev->ddesc.bDeviceClass;
- di->udi_subclass = udev->ddesc.bDeviceSubClass;
- di->udi_protocol = udev->ddesc.bDeviceProtocol;
- di->udi_config_no = udev->curr_config_no;
- di->udi_config_index = udev->curr_config_index;
- di->udi_power = udev->flags.self_powered ? 0 : udev->power;
- di->udi_speed = udev->speed;
- di->udi_mode = udev->flags.usb_mode;
- di->udi_power_mode = udev->power_mode;
- di->udi_suspended = udev->flags.peer_suspended;
-
- hub = udev->parent_hub;
- if (hub) {
- di->udi_hubaddr = hub->address;
- di->udi_hubindex = hub->device_index;
- di->udi_hubport = udev->port_no;
- }
- return (0);
+ return (usbd_fill_deviceinfo(f->udev, di));
}
int
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index e3509862ef54..ee9d8ab0c9bb 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -954,7 +954,8 @@ done:
* packet. This function is called having the "bus_mtx" locked.
*------------------------------------------------------------------------*/
void
-uhub_root_intr(struct usb_bus *bus, const uint8_t *ptr, uint8_t len)
+uhub_root_intr(struct usb_bus *bus,
+ const uint8_t *ptr __unused, uint8_t len __unused)
{
USB_BUS_LOCK_ASSERT(bus, MA_OWNED);
diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h
index 08d130aa2868..0826d9f078c4 100644
--- a/sys/dev/usb/usbdi.h
+++ b/sys/dev/usb/usbdi.h
@@ -38,6 +38,7 @@ struct usb_process;
struct usb_proc_msg;
struct usb_mbuf;
struct usb_fs_privdata;
+struct usb_device_info;
struct mbuf;
typedef enum { /* keep in sync with usb_errstr_table */
@@ -587,6 +588,8 @@ usb_error_t usbd_set_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep, uint8_t ep_mode);
uint8_t usbd_get_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep);
+int usbd_fill_deviceinfo(struct usb_device *udev,
+ struct usb_device_info *di);
const struct usb_device_id *usbd_lookup_id_by_info(
const struct usb_device_id *id, usb_size_t sizeof_id,
diff --git a/sys/dev/virtio/mmio/virtio_mmio.c b/sys/dev/virtio/mmio/virtio_mmio.c
index 5a81c8a24779..fe531fced998 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.c
+++ b/sys/dev/virtio/mmio/virtio_mmio.c
@@ -53,7 +53,6 @@
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/mmio/virtio_mmio.h>
-#include "virtio_mmio_if.h"
#include "virtio_bus_if.h"
#include "virtio_if.h"
@@ -79,7 +78,6 @@ static int vtmmio_alloc_virtqueues(device_t, int,
struct vq_alloc_info *);
static int vtmmio_setup_intr(device_t, enum intr_type);
static void vtmmio_stop(device_t);
-static void vtmmio_poll(device_t);
static int vtmmio_reinit(device_t, uint64_t);
static void vtmmio_reinit_complete(device_t);
static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t);
@@ -104,29 +102,11 @@ static void vtmmio_vq_intr(void *);
* I/O port read/write wrappers.
*/
#define vtmmio_write_config_1(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_1((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_1((sc)->res[0], (o), (v))
#define vtmmio_write_config_2(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_2((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_2((sc)->res[0], (o), (v))
#define vtmmio_write_config_4(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_4((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_4((sc)->res[0], (o), (v))
#define vtmmio_read_config_1(sc, o) \
bus_read_1((sc)->res[0], (o))
@@ -157,7 +137,6 @@ static device_method_t vtmmio_methods[] = {
DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
DEVMETHOD(virtio_bus_stop, vtmmio_stop),
- DEVMETHOD(virtio_bus_poll, vtmmio_poll),
DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
@@ -220,19 +199,9 @@ vtmmio_setup_intr(device_t dev, enum intr_type type)
{
struct vtmmio_softc *sc;
int rid;
- int err;
sc = device_get_softc(dev);
- if (sc->platform != NULL) {
- err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev,
- vtmmio_vq_intr, sc);
- if (err == 0) {
- /* Okay we have backend-specific interrupts */
- return (0);
- }
- }
-
rid = 0;
sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
@@ -597,17 +566,6 @@ vtmmio_stop(device_t dev)
vtmmio_reset(device_get_softc(dev));
}
-static void
-vtmmio_poll(device_t dev)
-{
- struct vtmmio_softc *sc;
-
- sc = device_get_softc(dev);
-
- if (sc->platform != NULL)
- VIRTIO_MMIO_POLL(sc->platform);
-}
-
static int
vtmmio_reinit(device_t dev, uint64_t features)
{
diff --git a/sys/dev/virtio/mmio/virtio_mmio.h b/sys/dev/virtio/mmio/virtio_mmio.h
index ac6a96c1c7fe..edcbf0519acc 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.h
+++ b/sys/dev/virtio/mmio/virtio_mmio.h
@@ -37,7 +37,6 @@ struct vtmmio_virtqueue;
struct vtmmio_softc {
device_t dev;
- device_t platform;
struct resource *res[2];
uint64_t vtmmio_features;
diff --git a/sys/dev/virtio/mmio/virtio_mmio_fdt.c b/sys/dev/virtio/mmio/virtio_mmio_fdt.c
index 7fba8aad8db8..bb9ea8efbaeb 100644
--- a/sys/dev/virtio/mmio/virtio_mmio_fdt.c
+++ b/sys/dev/virtio/mmio/virtio_mmio_fdt.c
@@ -63,12 +63,10 @@
#include <dev/virtio/mmio/virtio_mmio.h>
static int vtmmio_fdt_probe(device_t);
-static int vtmmio_fdt_attach(device_t);
static device_method_t vtmmio_fdt_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtmmio_fdt_probe),
- DEVMETHOD(device_attach, vtmmio_fdt_attach),
DEVMETHOD_END
};
@@ -93,48 +91,3 @@ vtmmio_fdt_probe(device_t dev)
return (vtmmio_probe(dev));
}
-
-static int
-vtmmio_setup_platform(device_t dev, struct vtmmio_softc *sc)
-{
- phandle_t platform_node;
- struct fdt_ic *ic;
- phandle_t xref;
- phandle_t node;
-
- sc->platform = NULL;
-
- if ((node = ofw_bus_get_node(dev)) == -1)
- return (ENXIO);
-
- if (OF_searchencprop(node, "platform", &xref,
- sizeof(xref)) == -1) {
- return (ENXIO);
- }
-
- platform_node = OF_node_from_xref(xref);
-
- SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
- if (ic->iph == platform_node) {
- sc->platform = ic->dev;
- break;
- }
- }
-
- if (sc->platform == NULL) {
- /* No platform-specific device. Ignore it. */
- }
-
- return (0);
-}
-
-static int
-vtmmio_fdt_attach(device_t dev)
-{
- struct vtmmio_softc *sc;
-
- sc = device_get_softc(dev);
- vtmmio_setup_platform(dev, sc);
-
- return (vtmmio_attach(dev));
-}
diff --git a/sys/dev/virtio/mmio/virtio_mmio_if.m b/sys/dev/virtio/mmio/virtio_mmio_if.m
deleted file mode 100644
index baebbd9a0b1c..000000000000
--- a/sys/dev/virtio/mmio/virtio_mmio_if.m
+++ /dev/null
@@ -1,99 +0,0 @@
-#-
-# Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
-# All rights reserved.
-#
-# This software was developed by SRI International and the University of
-# Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
-# ("CTSRD"), as part of the DARPA CRASH research programme.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-#
-#
-
-#include <sys/types.h>
-
-#
-# This is optional interface to virtio mmio backend.
-# Useful when backend is implemented not by the hardware but software, e.g.
-# by using another cpu core.
-#
-
-INTERFACE virtio_mmio;
-
-CODE {
- static int
- virtio_mmio_prewrite(device_t dev, size_t offset, int val)
- {
-
- return (1);
- }
-
- static int
- virtio_mmio_note(device_t dev, size_t offset, int val)
- {
-
- return (1);
- }
-
- static int
- virtio_mmio_setup_intr(device_t dev, device_t mmio_dev,
- void *handler, void *ih_user)
- {
-
- return (1);
- }
-};
-
-#
-# Inform backend we are going to write data at offset.
-#
-METHOD int prewrite {
- device_t dev;
- size_t offset;
- int val;
-} DEFAULT virtio_mmio_prewrite;
-
-#
-# Inform backend we have data wrotten to offset.
-#
-METHOD int note {
- device_t dev;
- size_t offset;
- int val;
-} DEFAULT virtio_mmio_note;
-
-#
-# Inform backend we are going to poll virtqueue.
-#
-METHOD int poll {
- device_t dev;
-};
-
-#
-# Setup backend-specific interrupts.
-#
-METHOD int setup_intr {
- device_t dev;
- device_t mmio_dev;
- void *handler;
- void *ih_user;
-} DEFAULT virtio_mmio_setup_intr;
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 2ff9be9680b8..528ff3372097 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -28,6 +28,9 @@
/* Driver for VirtIO network devices. */
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
@@ -82,9 +85,6 @@
#include <dev/virtio/network/if_vtnetvar.h>
#include "virtio_if.h"
-#include "opt_inet.h"
-#include "opt_inet6.h"
-
#if defined(INET) || defined(INET6)
#include <machine/in_cksum.h>
#endif
@@ -133,12 +133,14 @@ static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_new_buf(struct vtnet_rxq *);
+#if defined(INET) || defined(INET6)
static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
- uint16_t, int, struct virtio_net_hdr *);
-static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
- uint16_t, int, struct virtio_net_hdr *);
+ bool, int, struct virtio_net_hdr *);
+static void vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
+ int);
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
+#endif
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
@@ -1178,6 +1180,7 @@ vtnet_setup_interface(struct vtnet_softc *sc)
if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
+ if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
/*
* Capabilities after here are not enabled by default.
@@ -1760,164 +1763,165 @@ vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
return (error);
}
+#if defined(INET) || defined(INET6)
static int
-vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
- int hoff, struct virtio_net_hdr *hdr)
+vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, bool isipv6,
+ int protocol, struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
- int error;
- sc = rxq->vtnrx_sc;
+ /*
+ * The packet is likely from another VM on the same host or from the
+ * host that itself performed checksum offloading so Tx/Rx is basically
+ * a memcpy and the checksum has little value so far.
+ */
+
+ KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP,
+ ("%s: unsupported IP protocol %d", __func__, protocol));
/*
- * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
- * not have an analogous CSUM flag. The checksum has been validated,
- * but is incomplete (TCP/UDP pseudo header).
- *
- * The packet is likely from another VM on the same host that itself
- * performed checksum offloading so Tx/Rx is basically a memcpy and
- * the checksum has little value.
- *
- * Default to receiving the packet as-is for performance reasons, but
- * this can cause issues if the packet is to be forwarded because it
- * does not contain a valid checksum. This patch may be helpful:
- * https://reviews.freebsd.org/D6611. In the meantime, have the driver
- * compute the checksum if requested.
- *
- * BMV: Need to add an CSUM_PARTIAL flag?
+ * If the user don't want us to fix it up here by computing the
+ * checksum, just forward the order to compute the checksum by setting
+ * the corresponding mbuf flag (e.g., CSUM_TCP).
*/
+ sc = rxq->vtnrx_sc;
if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
- error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
- return (error);
+ switch (protocol) {
+ case IPPROTO_TCP:
+ m->m_pkthdr.csum_flags |=
+ (isipv6 ? CSUM_TCP_IPV6 : CSUM_TCP);
+ break;
+ case IPPROTO_UDP:
+ m->m_pkthdr.csum_flags |=
+ (isipv6 ? CSUM_UDP_IPV6 : CSUM_UDP);
+ break;
+ }
+ m->m_pkthdr.csum_data = hdr->csum_offset;
+ return (0);
}
/*
* Compute the checksum in the driver so the packet will contain a
* valid checksum. The checksum is at csum_offset from csum_start.
*/
- switch (etype) {
-#if defined(INET) || defined(INET6)
- case ETHERTYPE_IP:
- case ETHERTYPE_IPV6: {
- int csum_off, csum_end;
- uint16_t csum;
-
- csum_off = hdr->csum_start + hdr->csum_offset;
- csum_end = csum_off + sizeof(uint16_t);
+ int csum_off, csum_end;
+ uint16_t csum;
- /* Assume checksum will be in the first mbuf. */
- if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
- return (1);
+ csum_off = hdr->csum_start + hdr->csum_offset;
+ csum_end = csum_off + sizeof(uint16_t);
- /*
- * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
- * checksum and write it at the specified offset. We could
- * try to verify the packet: csum_start should probably
- * correspond to the start of the TCP/UDP header.
- *
- * BMV: Need to properly handle UDP with zero checksum. Is
- * the IPv4 header checksum implicitly validated?
- */
- csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
- *(uint16_t *)(mtodo(m, csum_off)) = csum;
- m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xFFFF;
- break;
- }
-#endif
- default:
- sc->vtnet_stats.rx_csum_bad_ethtype++;
+ /* Assume checksum will be in the first mbuf. */
+ if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) {
+ sc->vtnet_stats.rx_csum_bad_offset++;
return (1);
}
+ /*
+ * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
+ * checksum and write it at the specified offset. We could
+ * try to verify the packet: csum_start should probably
+ * correspond to the start of the TCP/UDP header.
+ *
+ * BMV: Need to properly handle UDP with zero checksum. Is
+ * the IPv4 header checksum implicitly validated?
+ */
+ csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
+ *(uint16_t *)(mtodo(m, csum_off)) = csum;
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+
return (0);
}
+static void
+vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, int protocol)
+{
+ KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP,
+ ("%s: unsupported IP protocol %d", __func__, protocol));
+
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+}
+
static int
-vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
- uint16_t etype, int hoff, struct virtio_net_hdr *hdr __unused)
+vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
+ struct virtio_net_hdr *hdr)
{
-#if 0
+ const struct ether_header *eh;
struct vtnet_softc *sc;
-#endif
- int protocol;
+ int hoff, protocol;
+ uint16_t etype;
+ bool isipv6;
+
+ KASSERT(hdr->flags &
+ (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID),
+ ("%s: missing checksum offloading flag %x", __func__, hdr->flags));
+
+ eh = mtod(m, const struct ether_header *);
+ etype = ntohs(eh->ether_type);
+ if (etype == ETHERTYPE_VLAN) {
+ /* TODO BMV: Handle QinQ. */
+ const struct ether_vlan_header *evh =
+ mtod(m, const struct ether_vlan_header *);
+ etype = ntohs(evh->evl_proto);
+ hoff = sizeof(struct ether_vlan_header);
+ } else
+ hoff = sizeof(struct ether_header);
-#if 0
sc = rxq->vtnrx_sc;
-#endif
+ /* Check whether ethernet type is IP or IPv6, and get protocol. */
switch (etype) {
#if defined(INET)
case ETHERTYPE_IP:
- if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
- protocol = IPPROTO_DONE;
- else {
+ if (__predict_false(m->m_len < hoff + sizeof(struct ip))) {
+ sc->vtnet_stats.rx_csum_inaccessible_ipproto++;
+ return (1);
+ } else {
struct ip *ip = (struct ip *)(m->m_data + hoff);
protocol = ip->ip_p;
}
+ isipv6 = false;
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
- || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
- protocol = IPPROTO_DONE;
+ || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) {
+ sc->vtnet_stats.rx_csum_inaccessible_ipproto++;
+ return (1);
+ }
+ isipv6 = true;
break;
#endif
default:
- protocol = IPPROTO_DONE;
- break;
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ return (1);
}
+ /* Check whether protocol is TCP or UDP. */
switch (protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
- m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xFFFF;
break;
default:
/*
* FreeBSD does not support checksum offloading of this
- * protocol. Let the stack re-verify the checksum later
- * if the protocol is supported.
+ * protocol here.
*/
-#if 0
- if_printf(sc->vtnet_ifp,
- "%s: checksum offload of unsupported protocol "
- "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
- __func__, etype, protocol, hdr->csum_start,
- hdr->csum_offset);
-#endif
- break;
+ sc->vtnet_stats.rx_csum_bad_ipproto++;
+ return (1);
}
- return (0);
-}
-
-static int
-vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
- struct virtio_net_hdr *hdr)
-{
- const struct ether_header *eh;
- int hoff;
- uint16_t etype;
-
- eh = mtod(m, const struct ether_header *);
- etype = ntohs(eh->ether_type);
- if (etype == ETHERTYPE_VLAN) {
- /* TODO BMV: Handle QinQ. */
- const struct ether_vlan_header *evh =
- mtod(m, const struct ether_vlan_header *);
- etype = ntohs(evh->evl_proto);
- hoff = sizeof(struct ether_vlan_header);
- } else
- hoff = sizeof(struct ether_header);
-
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
- return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
+ return (vtnet_rxq_csum_needs_csum(rxq, m, isipv6, protocol,
+ hdr));
else /* VIRTIO_NET_HDR_F_DATA_VALID */
- return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
+ vtnet_rxq_csum_data_valid(rxq, m, protocol);
+
+ return (0);
}
+#endif
static void
vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
@@ -2040,10 +2044,15 @@ vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
if (hdr->flags &
(VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
+#if defined(INET) || defined(INET6)
if (vtnet_rxq_csum(rxq, m, hdr) == 0)
rxq->vtnrx_stats.vrxs_csum++;
else
rxq->vtnrx_stats.vrxs_csum_failed++;
+#else
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ rxq->vtnrx_stats.vrxs_csum_failed++;
+#endif
}
if (hdr->gso_size != 0) {
@@ -2497,6 +2506,10 @@ vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
hdr->csum_start = vtnet_gtoh16(sc, csum_start);
hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
txq->vtntx_stats.vtxs_csum++;
+ } else if ((flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) &&
+ (proto == IPPROTO_TCP || proto == IPPROTO_UDP) &&
+ (m->m_pkthdr.csum_data == 0xFFFF)) {
+ hdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID;
}
if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
@@ -2551,8 +2564,10 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
error = sglist_append_mbuf(sg, m);
if (error) {
m = m_defrag(m, M_NOWAIT);
- if (m == NULL)
+ if (m == NULL) {
+ sc->vtnet_stats.tx_defrag_failed++;
goto fail;
+ }
*m_head = m;
sc->vtnet_stats.tx_defragged++;
@@ -2568,7 +2583,6 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
return (error);
fail:
- sc->vtnet_stats.tx_defrag_failed++;
m_freem(*m_head);
*m_head = NULL;
@@ -2609,7 +2623,8 @@ vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
m->m_flags &= ~M_VLANTAG;
}
- if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
+ if (m->m_pkthdr.csum_flags &
+ (VTNET_CSUM_ALL_OFFLOAD | CSUM_DATA_VALID)) {
m = vtnet_txq_offload(txq, m, hdr);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
@@ -3031,16 +3046,14 @@ vtnet_get_counter(if_t ifp, ift_counter cnt)
return (rxaccum.vrxs_iqdrops);
case IFCOUNTER_IERRORS:
return (rxaccum.vrxs_ierrors);
+ case IFCOUNTER_IBYTES:
+ return (rxaccum.vrxs_ibytes);
case IFCOUNTER_OPACKETS:
return (txaccum.vtxs_opackets);
case IFCOUNTER_OBYTES:
- if (!VTNET_ALTQ_ENABLED)
- return (txaccum.vtxs_obytes);
- /* FALLTHROUGH */
+ return (txaccum.vtxs_obytes);
case IFCOUNTER_OMCASTS:
- if (!VTNET_ALTQ_ENABLED)
- return (txaccum.vtxs_omcasts);
- /* FALLTHROUGH */
+ return (txaccum.vtxs_omcasts);
default:
return (if_get_counter_default(ifp, cnt));
}
@@ -3813,9 +3826,9 @@ vtnet_rx_filter_mac(struct vtnet_softc *sc)
if_printf(ifp, "error setting host MAC filter table\n");
out:
- if (promisc != 0 && vtnet_set_promisc(sc, true) != 0)
+ if (promisc && vtnet_set_promisc(sc, true) != 0)
if_printf(ifp, "cannot enable promiscuous mode\n");
- if (allmulti != 0 && vtnet_set_allmulti(sc, true) != 0)
+ if (allmulti && vtnet_set_allmulti(sc, true) != 0)
if_printf(ifp, "cannot enable all-multicast mode\n");
}
@@ -4100,21 +4113,29 @@ vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
stats = &rxq->vtnrx_stats;
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ipackets, "Receive packets");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ibytes, "Receive bytes");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_iqdrops, "Receive drops");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ierrors, "Receive errors");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_csum, "Receive checksum offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_csum_failed, "Receive checksum offload failed");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_host_lro, "Receive host segmentation offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_rescheduled,
"Receive interrupt handler rescheduled");
}
@@ -4135,17 +4156,23 @@ vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
stats = &txq->vtntx_stats;
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_opackets, "Transmit packets");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_obytes, "Transmit bytes");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_omcasts, "Transmit multicasts");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_csum, "Transmit checksum offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_tso, "Transmit TCP segmentation offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_rescheduled,
"Transmit interrupt handler rescheduled");
}
@@ -4170,6 +4197,102 @@ vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
}
}
+static int
+vtnet_sysctl_rx_csum_failed(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_csum_failed = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_csum_failed += rxst->vrxs_csum_failed;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_csum_failed, req));
+}
+
+static int
+vtnet_sysctl_rx_csum_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_csum_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_csum_offloaded += rxst->vrxs_csum;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_csum_offloaded, req));
+}
+
+static int
+vtnet_sysctl_rx_task_rescheduled(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_task_rescheduled = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_task_rescheduled += rxst->vrxs_rescheduled;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_task_rescheduled, req));
+}
+
+static int
+vtnet_sysctl_tx_csum_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_csum_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_csum_offloaded += txst->vtxs_csum;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_csum_offloaded, req));
+}
+
+static int
+vtnet_sysctl_tx_tso_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_tso_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_tso_offloaded += txst->vtxs_tso;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_tso_offloaded, req));
+}
+
+static int
+vtnet_sysctl_tx_task_rescheduled(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_task_rescheduled = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_task_rescheduled += txst->vtxs_rescheduled;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_task_rescheduled, req));
+}
+
static void
vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_softc *sc)
@@ -4189,69 +4312,75 @@ vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
- CTLFLAG_RD, &stats->mbuf_alloc_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->mbuf_alloc_failed,
"Mbuf cluster allocation failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
- CTLFLAG_RD, &stats->rx_frame_too_large,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_frame_too_large,
"Received frame larger than the mbuf chain");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
- CTLFLAG_RD, &stats->rx_enq_replacement_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_enq_replacement_failed,
"Enqueuing the replacement receive mbuf failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
- CTLFLAG_RD, &stats->rx_mergeable_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_mergeable_failed,
"Mergeable buffers receive failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
- CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ethtype,
"Received checksum offloaded buffer with unsupported "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
- CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ipproto,
"Received checksum offloaded buffer with incorrect IP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
- CTLFLAG_RD, &stats->rx_csum_bad_offset,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_offset,
"Received checksum offloaded buffer with incorrect offset");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
- CTLFLAG_RD, &stats->rx_csum_bad_proto,
- "Received checksum offloaded buffer with incorrect protocol");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
- CTLFLAG_RD, &stats->rx_csum_failed,
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_inaccessible_ipproto",
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_inaccessible_ipproto,
+ "Received checksum offloaded buffer with inaccessible IP protocol");
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_failed",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_csum_failed, "QU",
"Received buffer checksum offload failed");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
- CTLFLAG_RD, &stats->rx_csum_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_csum_offloaded, "QU",
"Received buffer checksum offload succeeded");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
- CTLFLAG_RD, &stats->rx_task_rescheduled,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_task_rescheduled",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_task_rescheduled, "QU",
"Times the receive interrupt task rescheduled itself");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
- CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_unknown_ethtype,
"Aborted transmit of checksum offloaded buffer with unknown "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
- CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_proto_mismatch,
"Aborted transmit of checksum offloaded buffer because mismatched "
"protocols");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
- CTLFLAG_RD, &stats->tx_tso_not_tcp,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_not_tcp,
"Aborted transmit of TSO buffer with non TCP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
- CTLFLAG_RD, &stats->tx_tso_without_csum,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_without_csum,
"Aborted transmit of TSO buffer without TCP checksum offload");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
- CTLFLAG_RD, &stats->tx_defragged,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defragged,
"Transmit mbufs defragged");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
- CTLFLAG_RD, &stats->tx_defrag_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defrag_failed,
"Aborted transmit of buffer because defrag failed");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
- CTLFLAG_RD, &stats->tx_csum_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_csum_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_csum_offloaded, "QU",
"Offloaded checksum of transmitted buffer");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
- CTLFLAG_RD, &stats->tx_tso_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_tso_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_tso_offloaded, "QU",
"Segmentation offload of transmitted buffer");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
- CTLFLAG_RD, &stats->tx_task_rescheduled,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_task_rescheduled",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_task_rescheduled, "QU",
"Times the transmit interrupt task rescheduled itself");
}
diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h
index 0144b0f3232d..cab7ced639a7 100644
--- a/sys/dev/virtio/network/if_vtnetvar.h
+++ b/sys/dev/virtio/network/if_vtnetvar.h
@@ -46,7 +46,7 @@ struct vtnet_statistics {
uint64_t rx_csum_bad_ethtype;
uint64_t rx_csum_bad_ipproto;
uint64_t rx_csum_bad_offset;
- uint64_t rx_csum_bad_proto;
+ uint64_t rx_csum_inaccessible_ipproto;
uint64_t tx_csum_unknown_ethtype;
uint64_t tx_csum_proto_mismatch;
uint64_t tx_tso_not_tcp;
diff --git a/sys/dev/virtio/random/virtio_random.c b/sys/dev/virtio/random/virtio_random.c
index f938ba99ae53..3f30c8b68f4c 100644
--- a/sys/dev/virtio/random/virtio_random.c
+++ b/sys/dev/virtio/random/virtio_random.c
@@ -77,7 +77,7 @@ static struct virtio_feature_desc vtrnd_feature_desc[] = {
{ 0, NULL }
};
-static struct random_source random_vtrnd = {
+static const struct random_source random_vtrnd = {
.rs_ident = "VirtIO Entropy Adapter",
.rs_source = RANDOM_PURE_VIRTIO,
.rs_read = vtrnd_read,
diff --git a/sys/dev/virtio/virtio_bus_if.m b/sys/dev/virtio/virtio_bus_if.m
index 57ae90bdc917..4181b641faad 100644
--- a/sys/dev/virtio/virtio_bus_if.m
+++ b/sys/dev/virtio/virtio_bus_if.m
@@ -109,7 +109,3 @@ METHOD void write_device_config {
int len;
};
-METHOD void poll {
- device_t dev;
-};
-
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
index 8cc3326dc08e..cc7a233d60ee 100644
--- a/sys/dev/virtio/virtqueue.c
+++ b/sys/dev/virtio/virtqueue.c
@@ -605,10 +605,8 @@ virtqueue_poll(struct virtqueue *vq, uint32_t *len)
{
void *cookie;
- VIRTIO_BUS_POLL(vq->vq_dev);
while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
cpu_spinwait();
- VIRTIO_BUS_POLL(vq->vq_dev);
}
return (cookie);
diff --git a/sys/dev/watchdog/watchdog.c b/sys/dev/watchdog/watchdog.c
index e6b6dc1eac70..e1b2e08c3f10 100644
--- a/sys/dev/watchdog/watchdog.c
+++ b/sys/dev/watchdog/watchdog.c
@@ -50,11 +50,20 @@
#include <sys/syscallsubr.h> /* kern_clock_gettime() */
-static int wd_set_pretimeout(int newtimeout, int disableiftoolong);
+#ifdef COMPAT_FREEBSD14
+#define WDIOCPATPAT_14 _IOW('W', 42, u_int) /* pat the watchdog */
+#define WDIOC_SETTIMEOUT_14 _IOW('W', 43, int) /* set/reset the timer */
+#define WDIOC_GETTIMEOUT_14 _IOR('W', 44, int) /* get total timeout */
+#define WDIOC_GETTIMELEFT_14 _IOR('W', 45, int) /* get time left */
+#define WDIOC_GETPRETIMEOUT_14 _IOR('W', 46, int) /* get the pre-timeout */
+#define WDIOC_SETPRETIMEOUT_14 _IOW('W', 47, int) /* set the pre-timeout */
+#endif
+
+static int wd_set_pretimeout(sbintime_t newtimeout, int disableiftoolong);
static void wd_timeout_cb(void *arg);
static struct callout wd_pretimeo_handle;
-static int wd_pretimeout;
+static sbintime_t wd_pretimeout;
static int wd_pretimeout_act = WD_SOFT_LOG;
static struct callout wd_softtimeo_handle;
@@ -63,6 +72,8 @@ static int wd_softtimer; /* true = use softtimer instead of hardware
static int wd_softtimeout_act = WD_SOFT_LOG; /* action for the software timeout */
static struct cdev *wd_dev;
+static volatile sbintime_t wd_last_sbt; /* last timeout value (sbt) */
+static sbintime_t wd_last_sbt_sysctl; /* last timeout value (sbt) */
static volatile u_int wd_last_u; /* last timeout value set by kern_do_pat */
static u_int wd_last_u_sysctl; /* last timeout value set by kern_do_pat */
static u_int wd_last_u_sysctl_secs; /* wd_last_u in seconds */
@@ -73,6 +84,8 @@ SYSCTL_UINT(_hw_watchdog, OID_AUTO, wd_last_u, CTLFLAG_RD,
&wd_last_u_sysctl, 0, "Watchdog last update time");
SYSCTL_UINT(_hw_watchdog, OID_AUTO, wd_last_u_secs, CTLFLAG_RD,
&wd_last_u_sysctl_secs, 0, "Watchdog last update time");
+SYSCTL_SBINTIME_MSEC(_hw_watchdog, OID_AUTO, wd_last_msecs, CTLFLAG_RD,
+ &wd_last_sbt_sysctl, "Watchdog last update time (milliseconds)");
static int wd_lastpat_valid = 0;
static time_t wd_lastpat = 0; /* when the watchdog was last patted */
@@ -80,105 +93,94 @@ static time_t wd_lastpat = 0; /* when the watchdog was last patted */
/* Hook for external software watchdog to register for use if needed */
void (*wdog_software_attach)(void);
-static void
-pow2ns_to_ts(int pow2ns, struct timespec *ts)
+/* Legacy interface to watchdog. */
+int
+wdog_kern_pat(u_int utim)
{
- uint64_t ns;
+ sbintime_t sbt;
- ns = 1ULL << pow2ns;
- ts->tv_sec = ns / 1000000000ULL;
- ts->tv_nsec = ns % 1000000000ULL;
-}
+ if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0)
+ return (EINVAL);
-static int
-pow2ns_to_ticks(int pow2ns)
-{
- struct timeval tv;
- struct timespec ts;
+ if ((utim & WD_LASTVAL) != 0) {
+ return (wdog_control(WD_CTRL_RESET));
+ }
- pow2ns_to_ts(pow2ns, &ts);
- TIMESPEC_TO_TIMEVAL(&tv, &ts);
- return (tvtohz(&tv));
+ utim &= WD_INTERVAL;
+ if (utim == WD_TO_NEVER)
+ sbt = 0;
+ else
+ sbt = nstosbt(1 << utim);
+
+ return (wdog_kern_pat_sbt(sbt));
}
-static int
-seconds_to_pow2ns(int seconds)
+int
+wdog_control(int ctrl)
{
- uint64_t power;
- uint64_t ns;
- uint64_t shifted;
-
- ns = ((uint64_t)seconds) * 1000000000ULL;
- power = flsll(ns);
- shifted = 1ULL << power;
- if (shifted <= ns) {
- power++;
+ /* Disable takes precedence */
+ if (ctrl == WD_CTRL_DISABLE) {
+ wdog_kern_pat(0);
}
- return (power);
+
+ if ((ctrl & WD_CTRL_RESET) != 0) {
+ wdog_kern_pat_sbt(wd_last_sbt);
+ } else if ((ctrl & WD_CTRL_ENABLE) != 0) {
+ wdog_kern_pat_sbt(wd_last_sbt);
+ }
+
+ return (0);
}
int
-wdog_kern_pat(u_int utim)
+wdog_kern_pat_sbt(sbintime_t sbt)
{
- int error;
- static int first = 1;
-
- if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0)
- return (EINVAL);
-
- if ((utim & WD_LASTVAL) != 0) {
- /*
- * if WD_LASTVAL is set, fill in the bits for timeout
- * from the saved value in wd_last_u.
- */
- MPASS((wd_last_u & ~WD_INTERVAL) == 0);
- utim &= ~WD_LASTVAL;
- utim |= wd_last_u;
- } else {
- /*
- * Otherwise save the new interval.
- * This can be zero (to disable the watchdog)
- */
- wd_last_u = (utim & WD_INTERVAL);
+ sbintime_t error_sbt = 0;
+ int pow2ns = 0;
+ int error = 0;
+ static bool first = true;
+
+ /* legacy uses power-of-2-nanoseconds time. */
+ if (sbt != 0) {
+ pow2ns = flsl(sbttons(sbt));
+ }
+ if (wd_last_sbt != sbt) {
+ wd_last_u = pow2ns;
wd_last_u_sysctl = wd_last_u;
- wd_last_u_sysctl_secs = pow2ns_to_ticks(wd_last_u) / hz;
+ wd_last_u_sysctl_secs = sbt / SBT_1S;
+
+ wd_last_sbt = sbt;
}
- if ((utim & WD_INTERVAL) == WD_TO_NEVER) {
- utim = 0;
- /* Assume all is well; watchdog signals failure. */
- error = 0;
- } else {
- /* Assume no watchdog available; watchdog flags success */
+ if (sbt != 0)
error = EOPNOTSUPP;
- }
+
if (wd_softtimer) {
- if (utim == 0) {
+ if (sbt == 0) {
callout_stop(&wd_softtimeo_handle);
} else {
- (void) callout_reset(&wd_softtimeo_handle,
- pow2ns_to_ticks(utim), wd_timeout_cb, "soft");
+ (void) callout_reset_sbt(&wd_softtimeo_handle,
+ sbt, 0, wd_timeout_cb, "soft", 0);
}
error = 0;
} else {
- EVENTHANDLER_INVOKE(watchdog_list, utim, &error);
+ EVENTHANDLER_INVOKE(watchdog_sbt_list, sbt, &error_sbt, &error);
+ EVENTHANDLER_INVOKE(watchdog_list, pow2ns, &error);
}
/*
- * If we no hardware watchdog responded, we have not tried to
+ * If no hardware watchdog responded, we have not tried to
* attach an external software watchdog, and one is available,
* attach it now and retry.
*/
- if (error == EOPNOTSUPP && first && *wdog_software_attach != NULL) {
+ if (error == EOPNOTSUPP && first && wdog_software_attach != NULL) {
(*wdog_software_attach)();
- EVENTHANDLER_INVOKE(watchdog_list, utim, &error);
+ EVENTHANDLER_INVOKE(watchdog_sbt_list, sbt, &error_sbt, &error);
+ EVENTHANDLER_INVOKE(watchdog_list, pow2ns, &error);
}
- first = 0;
+ first = false;
+ /* TODO: Print a (rate limited?) warning if error_sbt is too far away */
wd_set_pretimeout(wd_pretimeout, true);
- /*
- * If we were able to arm/strobe the watchdog, then
- * update the last time it was strobed for WDIOC_GETTIMELEFT
- */
if (!error) {
struct timespec ts;
@@ -189,6 +191,7 @@ wdog_kern_pat(u_int utim)
wd_lastpat_valid = 1;
}
}
+
return (error);
}
@@ -265,16 +268,14 @@ wd_timeout_cb(void *arg)
* current actual watchdog timeout.
*/
static int
-wd_set_pretimeout(int newtimeout, int disableiftoolong)
+wd_set_pretimeout(sbintime_t newtimeout, int disableiftoolong)
{
- u_int utime;
- struct timespec utime_ts;
- int timeout_ticks;
+ sbintime_t utime;
+ sbintime_t timeout_left;
- utime = wdog_kern_last_timeout();
- pow2ns_to_ts(utime, &utime_ts);
+ utime = wdog_kern_last_timeout_sbt();
/* do not permit a pre-timeout >= than the timeout. */
- if (newtimeout >= utime_ts.tv_sec) {
+ if (newtimeout >= utime) {
/*
* If 'disableiftoolong' then just fall through
* so as to disable the pre-watchdog
@@ -292,7 +293,7 @@ wd_set_pretimeout(int newtimeout, int disableiftoolong)
return 0;
}
- timeout_ticks = pow2ns_to_ticks(utime) - (hz*newtimeout);
+ timeout_left = utime - newtimeout;
#if 0
printf("wd_set_pretimeout: "
"newtimeout: %d, "
@@ -306,8 +307,8 @@ wd_set_pretimeout(int newtimeout, int disableiftoolong)
#endif
/* We determined the value is sane, so reset the callout */
- (void) callout_reset(&wd_pretimeo_handle,
- timeout_ticks, wd_timeout_cb, "pre");
+ (void) callout_reset_sbt(&wd_pretimeo_handle,
+ timeout_left, 0, wd_timeout_cb, "pre", 0);
wd_pretimeout = newtimeout;
return 0;
}
@@ -316,6 +317,7 @@ static int
wd_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
int flags __unused, struct thread *td)
{
+ sbintime_t sb;
u_int u;
time_t timeleft;
int error;
@@ -351,29 +353,55 @@ wd_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
error = EINVAL;
}
break;
- case WDIOC_GETPRETIMEOUT:
- *(int *)data = (int)wd_pretimeout;
+#ifdef COMPAT_FREEBSD14
+ case WDIOC_GETPRETIMEOUT_14:
+ *(int *)data = (int)(wd_pretimeout / SBT_1S);
break;
- case WDIOC_SETPRETIMEOUT:
- error = wd_set_pretimeout(*(int *)data, false);
+ case WDIOC_SETPRETIMEOUT_14:
+ error = wd_set_pretimeout(*(int *)data * SBT_1S, false);
break;
- case WDIOC_GETTIMELEFT:
+ case WDIOC_GETTIMELEFT_14:
error = wd_get_time_left(td, &timeleft);
if (error)
break;
*(int *)data = (int)timeleft;
break;
- case WDIOC_SETTIMEOUT:
+ case WDIOC_SETTIMEOUT_14:
u = *(u_int *)data;
- error = wdog_kern_pat(seconds_to_pow2ns(u));
+ error = wdog_kern_pat_sbt(mstosbt(u * 1000ULL));
break;
- case WDIOC_GETTIMEOUT:
+ case WDIOC_GETTIMEOUT_14:
u = wdog_kern_last_timeout();
*(u_int *)data = u;
break;
- case WDIOCPATPAT:
+ case WDIOCPATPAT_14:
error = wd_ioctl_patpat(data);
break;
+#endif
+
+ /* New API */
+ case WDIOC_CONTROL:
+ wdog_control(*(int *)data);
+ break;
+ case WDIOC_SETTIMEOUT:
+ sb = *(sbintime_t *)data;
+ error = wdog_kern_pat_sbt(sb);
+ break;
+ case WDIOC_GETTIMEOUT:
+ *(sbintime_t *)data = wdog_kern_last_timeout_sbt();
+ break;
+ case WDIOC_GETTIMELEFT:
+ error = wd_get_time_left(td, &timeleft);
+ if (error)
+ break;
+ *(sbintime_t *)data = (sbintime_t)timeleft * SBT_1S;
+ break;
+ case WDIOC_GETPRETIMEOUT:
+ *(sbintime_t *)data = wd_pretimeout;
+ break;
+ case WDIOC_SETPRETIMEOUT:
+ error = wd_set_pretimeout(*(sbintime_t *)data, false);
+ break;
default:
error = ENOIOCTL;
break;
@@ -392,6 +420,12 @@ wdog_kern_last_timeout(void)
return (wd_last_u);
}
+sbintime_t
+wdog_kern_last_timeout_sbt(void)
+{
+ return (wd_last_sbt);
+}
+
static struct cdevsw wd_cdevsw = {
.d_version = D_VERSION,
.d_ioctl = wd_ioctl,