aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/ufshci
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/ufshci')
-rw-r--r--sys/dev/ufshci/ufshci.h69
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr.c408
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr_cmd.c2
-rw-r--r--sys/dev/ufshci/ufshci_dev.c354
-rw-r--r--sys/dev/ufshci/ufshci_pci.c6
-rw-r--r--sys/dev/ufshci/ufshci_private.h54
-rw-r--r--sys/dev/ufshci/ufshci_reg.h2
-rw-r--r--sys/dev/ufshci/ufshci_req_queue.c292
-rw-r--r--sys/dev/ufshci/ufshci_req_sdb.c77
-rw-r--r--sys/dev/ufshci/ufshci_sim.c1
-rw-r--r--sys/dev/ufshci/ufshci_sysctl.c20
-rw-r--r--sys/dev/ufshci/ufshci_uic_cmd.c19
12 files changed, 1117 insertions, 187 deletions
diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h
index b96d82ff836e..b055d2d2d769 100644
--- a/sys/dev/ufshci/ufshci.h
+++ b/sys/dev/ufshci/ufshci.h
@@ -716,6 +716,42 @@ struct ufshci_device_descriptor {
_Static_assert(sizeof(struct ufshci_device_descriptor) == 89,
"bad size for ufshci_device_descriptor");
+/* Defines the bit field of dExtendedUfsFeaturesSupport. */
+enum ufshci_desc_wb_ext_ufs_feature {
+ UFSHCI_DESC_EXT_UFS_FEATURE_FFU = (1 << 0),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PSA = (1 << 1),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LIFE_SPAN = (1 << 2),
+ UFSHCI_DESC_EXT_UFS_FEATURE_REFRESH_OP = (1 << 3),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_HIGH_TEMP = (1 << 4),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_LOW_TEMP = (1 << 5),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_TEMP = (1 << 6),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HPB_SUPPORT = (1 << 7),
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER = (1 << 8),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PERF_THROTTLING = (1 << 9),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ADVANCED_RPMB = (1 << 10),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ZONED_UFS_EXTENSION = (1 << 11),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LEVEL_EXCEPTION = (1 << 12),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HID = (1 << 13),
+ UFSHCI_DESC_EXT_UFS_FEATURE_BARRIER = (1 << 14),
+ UFSHCI_DESC_EXT_UFS_FEATURE_CLEAR_ERROR_HISTORY = (1 << 15),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_IID = (1 << 16),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FBO = (1 << 17),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FAST_RECOVERY_MODE = (1 << 18),
+ UFSHCI_DESC_EXT_UFS_FEATURE_RPMB_VENDOR_CMD = (1 << 19),
+};
+
+/* Defines the bit field of bWriteBoosterBufferType. */
+enum ufshci_desc_wb_buffer_type {
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED = 0x00,
+ UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED = 0x01,
+};
+
+/* Defines the bit field of bWriteBoosterBufferPreserveUserSpaceEn. */
+enum ufshci_desc_user_space_config {
+ UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION = 0x00,
+ UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE = 0x01,
+};
+
/*
* UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor"
* ConfigurationDescriptor use big-endian byte ordering.
@@ -1014,4 +1050,37 @@ enum ufshci_attributes {
UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f,
};
+/* bAvailableWriteBoosterBufferSize codes (UFS WriteBooster abailable buffer
+ * left %) */
+enum ufshci_wb_available_buffer_Size {
+ UFSHCI_ATTR_WB_AVAILABLE_0 = 0x00, /* 0% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_10 = 0x01, /* 10% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_20 = 0x02, /* 20% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_30 = 0x03, /* 30% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_40 = 0x04, /* 40% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_50 = 0x05, /* 50% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_60 = 0x06, /* 60% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_70 = 0x07, /* 70% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_80 = 0x08, /* 80% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_90 = 0x09, /* 90% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_100 = 0x0A, /* 100% buffer remains */
+};
+
+/* bWriteBoosterBufferLifeTimeEst codes (UFS WriteBooster buffer life %) */
+enum ufshci_wb_lifetime {
+ UFSHCI_ATTR_WB_LIFE_DISABLED = 0x00, /* Info not available */
+ UFSHCI_ATTR_WB_LIFE_0_10 = 0x01, /* 0%–10% used */
+ UFSHCI_ATTR_WB_LIFE_10_20 = 0x02, /* 10%–20% used */
+ UFSHCI_ATTR_WB_LIFE_20_30 = 0x03, /* 20%–30% used */
+ UFSHCI_ATTR_WB_LIFE_30_40 = 0x04, /* 30%–40% used */
+ UFSHCI_ATTR_WB_LIFE_40_50 = 0x05, /* 40%–50% used */
+ UFSHCI_ATTR_WB_LIFE_50_60 = 0x06, /* 50%–60% used */
+ UFSHCI_ATTR_WB_LIFE_60_70 = 0x07, /* 60%–70% used */
+ UFSHCI_ATTR_WB_LIFE_70_80 = 0x08, /* 70%–80% used */
+ UFSHCI_ATTR_WB_LIFE_80_90 = 0x09, /* 80%–90% used */
+ UFSHCI_ATTR_WB_LIFE_90_100 = 0x0A, /* 90%–100% used */
+ UFSHCI_ATTR_WB_LIFE_EXCEEDED =
+ 0x0B, /* Exceeded estimated life (treat as WB disabled) */
+};
+
#endif /* __UFSHCI_H__ */
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
index 37bd32665b2b..35663b480cfa 100644
--- a/sys/dev/ufshci/ufshci_ctrlr.c
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -12,8 +12,108 @@
#include "ufshci_private.h"
#include "ufshci_reg.h"
+static void
+ufshci_ctrlr_fail(struct ufshci_controller *ctrlr)
+{
+ ctrlr->is_failed = true;
+
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->task_mgmt_req_queue.qops.get_hw_queue(
+ &ctrlr->task_mgmt_req_queue));
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->transfer_req_queue.qops.get_hw_queue(
+ &ctrlr->transfer_req_queue));
+}
+
+static void
+ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
+{
+ TSENTER();
+
+ /*
+ * If `resetting` is true, we are on the reset path.
+ * Re-enable request queues here because ufshci_ctrlr_reset_task()
+ * disables them during reset.
+ */
+ if (resetting) {
+ if (ufshci_utmr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ if (ufshci_utr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ }
+
+ if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS target drvice */
+ if (ufshci_dev_init(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize Reference Clock */
+ if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize unipro */
+ if (ufshci_dev_init_unipro(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /*
+ * Initialize UIC Power Mode
+ * QEMU UFS devices do not support unipro and power mode.
+ */
+ if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
+ ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS Power Mode */
+ if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Read Controller Descriptor (Device, Geometry) */
+ if (ufshci_dev_get_descriptor(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (ufshci_dev_config_write_booster(ctrlr)) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* TODO: Configure Write Protect */
+
+ /* TODO: Configure Background Operations */
+
+ /*
+ * If the reset is due to a timeout, it is already attached to the SIM
+ * and does not need to be attached again.
+ */
+ if (!resetting && ufshci_sim_attach(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ TSEXIT();
+}
+
static int
-ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr)
{
int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
sbintime_t delta_t = SBT_1US;
@@ -27,6 +127,35 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, hce, hce);
}
+ /* Wait for the HCE flag to change */
+ while (1) {
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+ if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "host controller failed to disable "
+ "within %d ms\n",
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint32_t hce;
+
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+
/* Enable UFS host controller */
hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
ufshci_mmio_write_4(ctrlr, hce, hce);
@@ -36,7 +165,7 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
* unstable, so we need to read the HCE value after some time after
* initialization is complete.
*/
- pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1));
/* Wait for the HCE flag to change */
while (1) {
@@ -51,17 +180,103 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
return (ENXIO);
}
- pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1));
delta_t = min(SBT_1MS, delta_t * 3 / 2);
}
return (0);
}
+static int
+ufshci_ctrlr_disable(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ /* Disable all interrupts */
+ ufshci_mmio_write_4(ctrlr, ie, 0);
+
+ error = ufshci_ctrlr_disable_host_ctrlr(ctrlr);
+ return (error);
+}
+
+static int
+ufshci_ctrlr_enable(struct ufshci_controller *ctrlr)
+{
+ uint32_t ie, hcs;
+ int error;
+
+ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ if (error)
+ return (error);
+
+ /* Send DME_LINKSTARTUP command to start the link startup procedure */
+ error = ufshci_uic_send_dme_link_startup(ctrlr);
+ if (error)
+ return (error);
+
+ /*
+ * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
+ * controller has successfully received a Link Startup UIC command
+ * response and the UFS device has found a physical link to the
+ * controller.
+ */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
+ ufshci_printf(ctrlr, "UFS device not found\n");
+ return (ENXIO);
+ }
+
+ /* Enable additional interrupts by programming the IE register. */
+ ie = ufshci_mmio_read_4(ctrlr, ie);
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
+ ufshci_mmio_write_4(ctrlr, ie, ie);
+
+ /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ error = ufshci_ctrlr_disable(ctrlr);
+ if (error)
+ return (error);
+
+ error = ufshci_ctrlr_enable(ctrlr);
+ return (error);
+}
+
+static void
+ufshci_ctrlr_reset_task(void *arg, int pending)
+{
+ struct ufshci_controller *ctrlr = arg;
+ int error;
+
+ /* Release resources */
+ ufshci_utmr_req_queue_disable(ctrlr);
+ ufshci_utr_req_queue_disable(ctrlr);
+
+ error = ufshci_ctrlr_hw_reset(ctrlr);
+ if (error)
+ return (ufshci_ctrlr_fail(ctrlr));
+
+ ufshci_ctrlr_start(ctrlr, true);
+}
+
int
ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
{
- uint32_t ver, cap, hcs, ie;
+ uint32_t ver, cap, ahit;
uint32_t timeout_period, retry_count;
int error;
@@ -114,44 +329,24 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
ctrlr->retry_count = retry_count;
- /* Disable all interrupts */
- ufshci_mmio_write_4(ctrlr, ie, 0);
+ ctrlr->enable_aborts = 1;
+ if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK)
+ ctrlr->enable_aborts = 0;
+ else
+ TUNABLE_INT_FETCH("hw.ufshci.enable_aborts",
+ &ctrlr->enable_aborts);
- /* Enable Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ /* Reset the UFSHCI controller */
+ error = ufshci_ctrlr_hw_reset(ctrlr);
if (error)
return (error);
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
- if (error)
- return (error);
+ /* Read the UECPA register to clear */
+ ufshci_mmio_read_4(ctrlr, uecpa);
- /*
- * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
- * controller has successfully received a Link Startup UIC command
- * response and the UFS device has found a physical link to the
- * controller.
- */
- hcs = ufshci_mmio_read_4(ctrlr, hcs);
- if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
- ufshci_printf(ctrlr, "UFS device not found\n");
- return (ENXIO);
- }
-
- /* Enable additional interrupts by programming the IE register. */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+ /* Diable Auto-hibernate */
+ ahit = 0;
+ ufshci_mmio_write_4(ctrlr, ahit, ahit);
/* Allocate and initialize UTP Task Management Request List. */
error = ufshci_utmr_req_queue_construct(ctrlr);
@@ -164,8 +359,19 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
return (error);
/* TODO: Separate IO and Admin slot */
- /* max_hw_pend_io is the number of slots in the transfer_req_queue */
- ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries;
+
+ /*
+ * max_hw_pend_io is the number of slots in the transfer_req_queue.
+ * Reduce num_entries by one to reserve an admin slot.
+ */
+ ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1;
+
+ /* Create a thread for the taskqueue. */
+ ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &ctrlr->taskqueue);
+ taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq");
+
+ TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr);
return (0);
}
@@ -198,50 +404,21 @@ ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
ctrlr->resource);
nores:
+ KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock),
+ ("destroying uic_cmd_lock while still owned"));
mtx_destroy(&ctrlr->uic_cmd_lock);
+
+ KASSERT(!mtx_owned(&ctrlr->sc_mtx),
+ ("destroying sc_mtx while still owned"));
mtx_destroy(&ctrlr->sc_mtx);
return;
}
-int
+void
ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
{
- uint32_t ie;
- int error;
-
- /* Backup and disable all interrupts */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ufshci_mmio_write_4(ctrlr, ie, 0);
-
- /* Release resources */
- ufshci_utmr_req_queue_destroy(ctrlr);
- ufshci_utr_req_queue_destroy(ctrlr);
-
- /* Reset Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
- if (error)
- return (error);
-
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
- if (error)
- return (error);
-
- /* Enable interrupts */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utmr_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_utr_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- return (0);
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
}
int
@@ -285,83 +462,6 @@ ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr)
return (0);
}
-static void
-ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also)
-{
- printf("ufshci(4): ufshci_ctrlr_fail\n");
-
- ctrlr->is_failed = true;
-
- /* TODO: task_mgmt_req_queue should be handled as fail */
-
- ufshci_req_queue_fail(ctrlr,
- &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]);
-}
-
-static void
-ufshci_ctrlr_start(struct ufshci_controller *ctrlr)
-{
- TSENTER();
-
- if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS target drvice */
- if (ufshci_dev_init(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize Reference Clock */
- if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize unipro */
- if (ufshci_dev_init_unipro(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /*
- * Initialize UIC Power Mode
- * QEMU UFS devices do not support unipro and power mode.
- */
- if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
- ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS Power Mode */
- if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Read Controller Descriptor (Device, Geometry)*/
- if (ufshci_dev_get_descriptor(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* TODO: Configure Write Protect */
-
- /* TODO: Configure Background Operations */
-
- /* TODO: Configure Write Booster */
-
- if (ufshci_sim_attach(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- TSEXIT();
-}
-
void
ufshci_ctrlr_start_config_hook(void *arg)
{
@@ -371,9 +471,9 @@ ufshci_ctrlr_start_config_hook(void *arg)
if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
ufshci_utr_req_queue_enable(ctrlr) == 0)
- ufshci_ctrlr_start(ctrlr);
+ ufshci_ctrlr_start(ctrlr, false);
else
- ufshci_ctrlr_fail(ctrlr, false);
+ ufshci_ctrlr_fail(ctrlr);
ufshci_sysctl_initialize_ctrlr(ctrlr);
config_intrhook_disestablish(&ctrlr->config_hook);
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
index 71d163d998af..253f31a93c2e 100644
--- a/sys/dev/ufshci/ufshci_ctrlr_cmd.c
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -15,7 +15,7 @@ ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req;
struct ufshci_task_mgmt_request_upiu *upiu;
- req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_NOWAIT, cb_fn, cb_arg);
req->request_size = sizeof(struct ufshci_task_mgmt_request_upiu);
req->response_size = sizeof(struct ufshci_task_mgmt_response_upiu);
diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c
index a0e32914e2aa..975468e5156f 100644
--- a/sys/dev/ufshci/ufshci_dev.c
+++ b/sys/dev/ufshci/ufshci_dev.c
@@ -60,6 +60,14 @@ ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
}
static int
+ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun,
+ struct ufshci_unit_descriptor *desc)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0,
+ desc, sizeof(struct ufshci_unit_descriptor)));
+}
+
+static int
ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
enum ufshci_flags flag_type, uint8_t *flag)
{
@@ -114,6 +122,61 @@ ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
}
static int
+ufshci_dev_clear_flag(struct ufshci_controller *ctrlr,
+ enum ufshci_flags flag_type)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG;
+ param.type = flag_type;
+ param.index = 0;
+ param.selector = 0;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
+ enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
+ uint64_t *value)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
+ param.type = attr_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
+ return (ENXIO);
+ }
+
+ *value = status.cpl.response_upiu.query_response_upiu.value_64;
+
+ return (0);
+}
+
+static int
ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
uint64_t value)
@@ -270,7 +333,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
*/
const uint32_t fast_mode = 1;
const uint32_t rx_bit_shift = 4;
- const uint32_t power_mode = (fast_mode << rx_bit_shift) | fast_mode;
+ uint32_t power_mode, peer_granularity;
/* Update lanes with available TX/RX lanes */
if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
@@ -295,6 +358,20 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
ctrlr->rx_lanes))
return (ENXIO);
+ if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
+ /* Before changing gears, first change the number of lanes. */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
+ return (ENXIO);
+
+ /* Wait for power mode changed. */
+ if (ufshci_uic_power_mode_ready(ctrlr)) {
+ ufshci_reg_dump(ctrlr);
+ return (ENXIO);
+ }
+ }
+
/* Set HS-GEAR to max gear */
ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
@@ -346,6 +423,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
return (ENXIO);
/* Set TX/RX PWRMode */
+ power_mode = (fast_mode << rx_bit_shift) | fast_mode;
if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
return (ENXIO);
@@ -366,7 +444,8 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
/* Test with dme_peer_get to make sure there are no errors. */
- if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, NULL))
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
+ &peer_granularity))
return (ENXIO);
}
@@ -398,7 +477,7 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
return (error);
ver = be16toh(device->dev_desc.wSpecVersion);
- ufshci_printf(ctrlr, "UFS device spec version %u.%u%u\n",
+ ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n",
UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
UFSHCIV(UFSHCI_VER_REG_VS, ver));
ufshci_printf(ctrlr, "%u enabled LUNs found\n",
@@ -426,3 +505,272 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
return (0);
}
+
+static int
+ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Enable WriteBooster */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to enable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = true;
+
+ /* Enable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_set_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Enable WriteBooster buffer flush */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = true;
+
+ return (0);
+}
+
+static int
+ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Disable WriteBooster buffer flush */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = false;
+
+ /* Disable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_clear_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Disable WriteBooster */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to disable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = false;
+
+ return (0);
+}
+
+static int
+ufshci_dev_is_write_booster_buffer_life_time_left(
+ struct ufshci_controller *ctrlr, bool *is_life_time_left)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint8_t buffer_lun;
+ uint64_t life_time;
+ uint32_t error;
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time);
+ if (error)
+ return (error);
+
+ *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED);
+
+ return (0);
+}
+
+/*
+ * This function is not yet in use. It will be used when suspend/resume is
+ * implemented.
+ */
+static __unused int
+ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr,
+ bool *need_flush)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ bool is_life_time_left = false;
+ uint64_t available_buffer_size, current_buffer_size;
+ uint8_t buffer_lun;
+ uint32_t error;
+
+ *need_flush = false;
+
+ if (!dev->is_wb_enabled)
+ return (0);
+
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ return (error);
+
+ if (!is_life_time_left)
+ return (ufshci_dev_disable_write_booster(ctrlr));
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0,
+ &available_buffer_size);
+ if (error)
+ return (error);
+
+ switch (dev->wb_user_space_config_option) {
+ case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION:
+ *need_flush = (available_buffer_size <=
+ UFSHCI_ATTR_WB_AVAILABLE_10);
+ break;
+ case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE:
+ /*
+ * In PRESERVE USER SPACE mode, flush should be performed when
+ * the current buffer is greater than 0 and the available buffer
+ * below write_booster_flush_threshold is left.
+ */
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0,
+ &current_buffer_size);
+ if (error)
+ return (error);
+
+ if (current_buffer_size == 0)
+ return (0);
+
+ *need_flush = (available_buffer_size <
+ dev->write_booster_flush_threshold);
+ break;
+ default:
+ ufshci_printf(ctrlr,
+ "Invalid bWriteBoosterBufferPreserveUserSpaceEn value");
+ return (EINVAL);
+ }
+
+ /*
+ * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from
+ * wExceptionEventStatus attribute.
+ */
+
+ return (0);
+}
+
+int
+ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint32_t extended_ufs_feature_support;
+ uint32_t alloc_units;
+ struct ufshci_unit_descriptor unit_desc;
+ uint8_t lun;
+ bool is_life_time_left;
+ uint32_t mega_byte = 1024 * 1024;
+ uint32_t error = 0;
+
+ extended_ufs_feature_support = be32toh(
+ dev->dev_desc.dExtendedUfsFeaturesSupport);
+ if (!(extended_ufs_feature_support &
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) {
+ /* This device does not support Write Booster */
+ return (0);
+ }
+
+ if (ufshci_dev_enable_write_booster(ctrlr))
+ return (0);
+
+ /* Get WriteBooster buffer parameters */
+ dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType;
+ dev->wb_user_space_config_option =
+ dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn;
+
+ /*
+ * Find the size of the write buffer.
+ * With LU-dedicated (00h), the WriteBooster buffer is assigned
+ * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h)
+ * uses a single device-wide buffer shared by multiple LUs.
+ */
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) {
+ alloc_units = be32toh(
+ dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits);
+ ufshci_printf(ctrlr,
+ "WriteBooster buffer type = Shared, alloc_units=%d\n",
+ alloc_units);
+ } else if (dev->wb_buffer_type ==
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) {
+ ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n");
+ for (lun = 0; lun < ctrlr->max_lun_count; lun++) {
+ /* Find a dedicated buffer using a unit descriptor */
+ if (ufshci_dev_read_unit_descriptor(ctrlr, lun,
+ &unit_desc))
+ continue;
+
+ alloc_units = be32toh(
+ unit_desc.dLUNumWriteBoosterBufferAllocUnits);
+ if (alloc_units) {
+ dev->wb_dedicated_lu = lun;
+ break;
+ }
+ }
+ } else {
+ ufshci_printf(ctrlr,
+ "Not supported WriteBooster buffer type: 0x%x\n",
+ dev->wb_buffer_type);
+ goto out;
+ }
+
+ if (alloc_units == 0) {
+ ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n");
+ goto out;
+ }
+
+ dev->wb_buffer_size_mb = alloc_units *
+ dev->geo_desc.bAllocationUnitSize *
+ (be32toh(dev->geo_desc.dSegmentSize)) /
+ (mega_byte / UFSHCI_SECTOR_SIZE);
+
+ /* Set to flush when 40% of the available buffer size remains */
+ dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40;
+
+ /*
+ * Check if WriteBooster Buffer lifetime is left.
+ * WriteBooster Buffer lifetime — percent of life used based on P/E
+ * cycles. If "preserve user space" is enabled, writes to normal user
+ * space also consume WB life since the area is shared.
+ */
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ goto out;
+
+ if (!is_life_time_left) {
+ ufshci_printf(ctrlr,
+ "There is no WriteBooster buffer life time left.\n");
+ goto out;
+ }
+
+ ufshci_printf(ctrlr, "WriteBooster Enabled\n");
+ return (0);
+out:
+ ufshci_dev_disable_write_booster(ctrlr);
+ return (error);
+}
diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c
index 65a69ee0b518..992026fd4f4d 100644
--- a/sys/dev/ufshci/ufshci_pci.c
+++ b/sys/dev/ufshci/ufshci_pci.c
@@ -49,11 +49,13 @@ static struct _pcsid {
uint32_t ref_clk;
uint32_t quirks;
} pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz,
- UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE },
+ UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE |
+ UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK },
{ 0x98fa8086, "Intel Lakefield UFS Host Controller",
UFSHCI_REF_CLK_19_2MHz,
UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE |
- UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE },
+ UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE |
+ UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY },
{ 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz },
{ 0x00000000, NULL } };
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
index 1a2742ae2e80..ec388c06e248 100644
--- a/sys/dev/ufshci/ufshci_private.h
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -46,6 +46,8 @@ MALLOC_DECLARE(M_UFSHCI);
#define UFSHCI_UTR_ENTRIES (32)
#define UFSHCI_UTRM_ENTRIES (8)
+#define UFSHCI_SECTOR_SIZE (512)
+
struct ufshci_controller;
struct ufshci_completion_poll_status {
@@ -66,7 +68,6 @@ struct ufshci_request {
bool is_admin;
int32_t retries;
bool payload_valid;
- bool timeout;
bool spare[2]; /* Future use */
STAILQ_ENTRY(ufshci_request) stailq;
};
@@ -80,6 +81,7 @@ enum ufshci_slot_state {
};
struct ufshci_tracker {
+ TAILQ_ENTRY(ufshci_tracker) tailq;
struct ufshci_request *req;
struct ufshci_req_queue *req_queue;
struct ufshci_hw_queue *hwq;
@@ -119,6 +121,8 @@ struct ufshci_qops {
struct ufshci_req_queue *req_queue);
int (*enable)(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
+ void (*disable)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int (*reserve_slot)(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
@@ -135,16 +139,27 @@ struct ufshci_qops {
#define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
+enum ufshci_recovery {
+ RECOVERY_NONE = 0, /* Normal operations */
+ RECOVERY_WAITING, /* waiting for the reset to complete */
+};
+
/*
* Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
* (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
* cq_head are not used in SDB but used in MCQ.
*/
struct ufshci_hw_queue {
+ struct ufshci_controller *ctrlr;
+ struct ufshci_req_queue *req_queue;
uint32_t id;
int domain;
int cpu;
+ struct callout timer; /* recovery lock */
+ bool timer_armed; /* recovery lock */
+ enum ufshci_recovery recovery_state; /* recovery lock */
+
union {
struct ufshci_utp_xfer_req_desc *utrd;
struct ufshci_utp_task_mgmt_req_desc *utmrd;
@@ -159,6 +174,9 @@ struct ufshci_hw_queue {
uint32_t num_entries;
uint32_t num_trackers;
+ TAILQ_HEAD(, ufshci_tracker) free_tr;
+ TAILQ_HEAD(, ufshci_tracker) outstanding_tr;
+
/*
* A Request List using the single doorbell method uses a dedicated
* ufshci_tracker, one per slot.
@@ -175,7 +193,13 @@ struct ufshci_hw_queue {
int64_t num_retries;
int64_t num_failures;
+ /*
+ * Each lock may be acquired independently.
+ * When both are required, acquire them in this order to avoid
+ * deadlocks. (recovery_lock -> qlock)
+ */
struct mtx_padalign qlock;
+ struct mtx_padalign recovery_lock;
};
struct ufshci_req_queue {
@@ -214,6 +238,15 @@ struct ufshci_device {
struct ufshci_geometry_descriptor geo_desc;
uint32_t unipro_version;
+
+ /* WriteBooster */
+ bool is_wb_enabled;
+ bool is_wb_flush_enabled;
+ uint32_t wb_buffer_type;
+ uint32_t wb_buffer_size_mb;
+ uint32_t wb_user_space_config_option;
+ uint8_t wb_dedicated_lu;
+ uint32_t write_booster_flush_threshold;
};
/*
@@ -229,6 +262,10 @@ struct ufshci_controller {
2 /* Need an additional 200 ms of PA_TActivate */
#define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
4 /* Need to wait 1250us after power mode change */
+#define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \
+ 8 /* Need to change the number of lanes before changing HS-GEAR. */
+#define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \
+ 16 /* QEMU does not support Task Management Request */
uint32_t ref_clk;
@@ -252,6 +289,9 @@ struct ufshci_controller {
/* Fields for tracking progress during controller initialization. */
struct intr_config_hook config_hook;
+ struct task reset_task;
+ struct taskqueue *taskqueue;
+
/* For shared legacy interrupt. */
int rid;
struct resource *res;
@@ -260,6 +300,8 @@ struct ufshci_controller {
uint32_t major_version;
uint32_t minor_version;
+ uint32_t enable_aborts;
+
uint32_t num_io_queues;
uint32_t max_hw_pend_io;
@@ -333,7 +375,7 @@ void ufshci_sim_detach(struct ufshci_controller *ctrlr);
/* Controller */
int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
-int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
+void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
/* ctrlr defined as void * to allow use with config_intrhook. */
void ufshci_ctrlr_start_config_hook(void *arg);
void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
@@ -356,6 +398,7 @@ int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
+int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr);
/* Controller Command */
void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
@@ -375,7 +418,9 @@ int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr);
int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr);
int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
struct ufshci_hw_queue *hwq);
@@ -391,6 +436,8 @@ void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
struct ufshci_req_queue *req_queue);
+void ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
@@ -476,13 +523,12 @@ _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
struct ufshci_request *req;
KASSERT(how == M_WAITOK || how == M_NOWAIT,
- ("nvme_allocate_request: invalid how %d", how));
+ ("ufshci_allocate_request: invalid how %d", how));
req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
if (req != NULL) {
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
- req->timeout = true;
}
return (req);
}
diff --git a/sys/dev/ufshci/ufshci_reg.h b/sys/dev/ufshci/ufshci_reg.h
index 6c9b3e2c8c04..6d5768505102 100644
--- a/sys/dev/ufshci/ufshci_reg.h
+++ b/sys/dev/ufshci/ufshci_reg.h
@@ -274,7 +274,7 @@ struct ufshci_registers {
#define UFSHCI_HCS_REG_UTMRLRDY_MASK (0x1)
#define UFSHCI_HCS_REG_UCRDY_SHIFT (3)
#define UFSHCI_HCS_REG_UCRDY_MASK (0x1)
-#define UFSHCI_HCS_REG_UPMCRS_SHIFT (7)
+#define UFSHCI_HCS_REG_UPMCRS_SHIFT (8)
#define UFSHCI_HCS_REG_UPMCRS_MASK (0x7)
#define UFSHCI_HCS_REG_UTPEC_SHIFT (12)
#define UFSHCI_HCS_REG_UTPEC_MASK (0xF)
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
index bb6efa6d2ccc..7aa164d00bec 100644
--- a/sys/dev/ufshci/ufshci_req_queue.c
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -24,6 +24,7 @@ static const struct ufshci_qops sdb_utmr_qops = {
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
@@ -38,6 +39,7 @@ static const struct ufshci_qops sdb_utr_qops = {
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
@@ -74,6 +76,13 @@ ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
&ctrlr->task_mgmt_req_queue);
}
+void
+ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
+ &ctrlr->task_mgmt_req_queue);
+}
+
int
ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
{
@@ -109,6 +118,13 @@ ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
&ctrlr->transfer_req_queue);
}
+void
+ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->transfer_req_queue.qops.disable(ctrlr,
+ &ctrlr->transfer_req_queue);
+}
+
int
ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
{
@@ -226,31 +242,30 @@ void
ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
{
struct ufshci_req_queue *req_queue = tr->req_queue;
+ struct ufshci_hw_queue *hwq = tr->hwq;
struct ufshci_request *req = tr->req;
struct ufshci_completion cpl;
uint8_t ocs;
bool retry, error, retriable;
- mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
+ mtx_assert(&hwq->qlock, MA_NOTOWNED);
/* Copy the response from the Request Descriptor or UTP Command
* Descriptor. */
+ cpl.size = tr->response_size;
if (req_queue->is_task_mgmt) {
- cpl.size = tr->response_size;
memcpy(&cpl.response_upiu,
- (void *)tr->hwq->utmrd[tr->slot_num].response_upiu,
- cpl.size);
+ (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
- ocs = tr->hwq->utmrd[tr->slot_num].overall_command_status;
+ ocs = hwq->utmrd[tr->slot_num].overall_command_status;
} else {
bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- cpl.size = tr->response_size;
memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
cpl.size);
- ocs = tr->hwq->utrd[tr->slot_num].overall_command_status;
+ ocs = hwq->utrd[tr->slot_num].overall_command_status;
}
error = ufshci_req_queue_response_is_error(req_queue, ocs,
@@ -262,9 +277,9 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
retry = error && retriable &&
req->retries < req_queue->ctrlr->retry_count;
if (retry)
- tr->hwq->num_retries++;
+ hwq->num_retries++;
if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
- tr->hwq->num_failures++;
+ hwq->num_failures++;
KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
KASSERT(cpl.response_upiu.header.task_tag ==
@@ -282,7 +297,7 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
req->cb_fn(req->cb_arg, &cpl, error);
}
- mtx_lock(&tr->hwq->qlock);
+ mtx_lock(&hwq->qlock);
/* Clear the UTRL Completion Notification register */
req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
@@ -301,6 +316,9 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
ufshci_free_request(req);
tr->req = NULL;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
}
mtx_unlock(&tr->hwq->qlock);
@@ -309,7 +327,16 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
bool
ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
{
- return (req_queue->qops.process_cpl(req_queue));
+ struct ufshci_hw_queue *hwq;
+ bool done;
+
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_lock(&hwq->recovery_lock);
+ done = req_queue->qops.process_cpl(req_queue);
+ mtx_unlock(&hwq->recovery_lock);
+
+ return (done);
}
static void
@@ -427,6 +454,225 @@ ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
desc->prdt_length = prdt_entry_cnt;
}
+static void
+ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq)
+{
+ /* TODO: Step 2. Logical unit reset */
+ /* TODO: Step 3. Target device reset */
+ /* TODO: Step 4. Bus reset */
+
+ /*
+ * Step 5. All previous commands were timeout.
+ * Recovery failed, reset the host controller.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 5: Resetting controller due to a timeout.\n");
+ hwq->recovery_state = RECOVERY_WAITING;
+
+ ufshci_ctrlr_reset(ctrlr);
+}
+
+static void
+ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
+ bool error)
+{
+ struct ufshci_tracker *tr = arg;
+
+ /*
+ * We still need to check the active tracker array, to cover race where
+ * I/O timed out at same time controller was completing the I/O. An
+ * abort request always is on the Task Management Request queue, but
+ * affects either an Task Management Request or an I/O (UTRL) queue, so
+ * take the appropriate queue lock for the original command's queue,
+ * since we'll need it to avoid races with the completion code and to
+ * complete the command manually.
+ */
+ mtx_lock(&tr->hwq->qlock);
+ if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
+ mtx_unlock(&tr->hwq->qlock);
+ /*
+ * An I/O has timed out, and the controller was unable to abort
+ * it for some reason. And we've not processed a completion for
+ * it yet. Construct a fake completion status, and then complete
+ * the I/O's tracker manually.
+ */
+ ufshci_printf(tr->hwq->ctrlr,
+ "abort task request failed, aborting task manually\n");
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+
+ if ((status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
+ (status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
+ ufshci_printf(tr->hwq->ctrlr,
+ "Warning: the abort task request completed \
+ successfully, but the original task is still incomplete.");
+ return;
+ }
+
+ /* Abort Task failed. Perform recovery steps 2-5 */
+ ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
+ } else {
+ mtx_unlock(&tr->hwq->qlock);
+ }
+}
+
+static void
+ufshci_req_queue_timeout(void *arg)
+{
+ struct ufshci_hw_queue *hwq = arg;
+ struct ufshci_controller *ctrlr = hwq->ctrlr;
+ struct ufshci_tracker *tr;
+ sbintime_t now;
+ bool idle = true;
+ bool fast;
+
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
+ /*
+ * If the controller is failed, then stop polling. This ensures that any
+ * failure processing that races with the hwq timeout will fail safely.
+ */
+ if (ctrlr->is_failed) {
+ ufshci_printf(ctrlr,
+ "Failed controller, stopping watchdog timeout.\n");
+ hwq->timer_armed = false;
+ return;
+ }
+
+ /*
+ * Shutdown condition: We set hwq->timer_armed to false in
+ * ufshci_req_sdb_destroy before calling callout_drain. When we call
+ * that, this routine might get called one last time. Exit w/o setting a
+ * timeout. None of the watchdog stuff needs to be done since we're
+ * destroying the hwq.
+ */
+ if (!hwq->timer_armed) {
+ ufshci_printf(ctrlr,
+ "Timeout fired during ufshci_utr_req_queue_destroy\n");
+ return;
+ }
+
+ switch (hwq->recovery_state) {
+ case RECOVERY_NONE:
+ /*
+ * See if there's any recovery needed. First, do a fast check to
+ * see if anything could have timed out. If not, then skip
+ * everything else.
+ */
+ fast = false;
+ mtx_lock(&hwq->qlock);
+ now = getsbinuptime();
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If the first real transaction is not in timeout, then
+ * we're done. Otherwise, we try recovery.
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ fast = true;
+ break;
+ }
+ mtx_unlock(&hwq->qlock);
+ if (idle || fast)
+ break;
+
+ /*
+ * There's a stale transaction at the start of the queue whose
+ * deadline has passed. Poll the competions as a last-ditch
+ * effort in case an interrupt has been missed.
+ */
+ hwq->req_queue->qops.process_cpl(hwq->req_queue);
+
+ /*
+ * Now that we've run the ISR, re-rheck to see if there's any
+ * timed out commands and abort them or reset the card if so.
+ */
+ mtx_lock(&hwq->qlock);
+ idle = true;
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If we know this tracker hasn't timed out, we also
+ * know all subsequent ones haven't timed out. The tr
+ * queue is in submission order and all normal commands
+ * in a queue have the same timeout (or the timeout was
+ * changed by the user, but we eventually timeout then).
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ break;
+
+ /*
+ * Timeout recovery is performed in five steps. If
+ * recovery fails at any step, the process continues to
+ * the next one:
+ * next steps:
+ * Step 1. Abort task
+ * Step 2. Logical unit reset (TODO)
+ * Step 3. Target device reset (TODO)
+ * Step 4. Bus reset (TODO)
+ * Step 5. Host controller reset
+ *
+ * If the timeout occurred in the Task Management
+ * Request queue, ignore Step 1.
+ */
+ if (ctrlr->enable_aborts &&
+ !hwq->req_queue->is_task_mgmt &&
+ tr->req->cb_fn != ufshci_abort_complete) {
+ /*
+ * Step 1. Timeout expired, abort the task.
+ *
+ * This isn't an abort command, ask for a
+ * hardware abort. This goes to the Task
+ * Management Request queue which will reset the
+ * task if it times out.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 1: Timeout occurred. aborting the task(%d).\n",
+ tr->req->request_upiu.header.task_tag);
+ ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
+ ufshci_abort_complete, tr,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
+ tr->req->request_upiu.header.lun,
+ tr->req->request_upiu.header.task_tag, 0);
+ } else {
+ /* Recovery Step 2-5 */
+ ufshci_req_queue_timeout_recovery(ctrlr, hwq);
+ idle = false;
+ break;
+ }
+ }
+ mtx_unlock(&hwq->qlock);
+ break;
+
+ case RECOVERY_WAITING:
+ /*
+ * These messages aren't interesting while we're suspended. We
+ * put the queues into waiting state while suspending.
+ * Suspending takes a while, so we'll see these during that time
+ * and they aren't diagnostic. At other times, they indicate a
+ * problem that's worth complaining about.
+ */
+ if (!device_is_suspended(ctrlr->dev))
+ ufshci_printf(ctrlr, "Waiting for reset to complete\n");
+ idle = false; /* We want to keep polling */
+ break;
+ }
+
+ /*
+ * Rearm the timeout.
+ */
+ if (!idle) {
+ callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
+ } else {
+ hwq->timer_armed = false;
+ }
+}
+
/*
* Submit the tracker to the hardware.
*/
@@ -436,13 +682,30 @@ ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
{
struct ufshci_controller *ctrlr = req_queue->ctrlr;
struct ufshci_request *req = tr->req;
+ struct ufshci_hw_queue *hwq;
uint64_t ucd_paddr;
uint16_t request_len, response_off, response_len;
uint8_t slot_num = tr->slot_num;
+ int timeout;
- mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_assert(&hwq->qlock, MA_OWNED);
- /* TODO: Check timeout */
+ if (req->cb_fn == ufshci_completion_poll_cb)
+ timeout = 1;
+ else
+ timeout = ctrlr->timeout_period;
+ tr->deadline = getsbinuptime() + timeout * SBT_1S;
+ if (!hwq->timer_armed) {
+ hwq->timer_armed = true;
+ /*
+ * It wakes up once every 0.5 seconds to check if the deadline
+ * has passed.
+ */
+ callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
+ ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
+ }
if (req_queue->is_task_mgmt) {
/* Prepare UTP Task Management Request Descriptor. */
@@ -508,6 +771,9 @@ _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
tr->deadline = SBT_MAX;
tr->req = req;
+ TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
+ TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
+
ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
return (0);
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
index 834a459d48e3..ca47aa159c5b 100644
--- a/sys/dev/ufshci/ufshci_req_sdb.c
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -40,6 +40,8 @@ ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
bus_dma_tag_destroy(req_queue->dma_tag_ucd);
req_queue->dma_tag_ucd = NULL;
}
+
+ free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
}
static void
@@ -74,6 +76,10 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
uint8_t *ucdmem;
int i, error;
+ req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
+ req_queue->num_trackers,
+ M_UFSHCI, M_ZERO | M_NOWAIT);
+
/*
* Each component must be page aligned, and individual PRP lists
* cannot cross a page boundary.
@@ -152,6 +158,9 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
uint64_t queuemem_phys;
uint8_t *queuemem;
struct ufshci_tracker *tr;
+ const size_t lock_name_len = 32;
+ char qlock_name[lock_name_len], recovery_lock_name[lock_name_len];
+ char *base;
int i, error;
req_queue->ctrlr = ctrlr;
@@ -169,11 +178,21 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
hwq = &req_queue->hwq[UFSHCI_SDB_Q];
hwq->num_entries = req_queue->num_entries;
hwq->num_trackers = req_queue->num_trackers;
- req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
- req_queue->num_trackers,
- M_UFSHCI, M_ZERO | M_NOWAIT);
+ hwq->ctrlr = ctrlr;
+ hwq->req_queue = req_queue;
+
+ base = is_task_mgmt ? "ufshci utmrq" : "ufshci utrq";
+ snprintf(qlock_name, sizeof(qlock_name), "%s #%d lock", base,
+ UFSHCI_SDB_Q);
+ snprintf(recovery_lock_name, sizeof(recovery_lock_name),
+ "%s #%d recovery lock", base, UFSHCI_SDB_Q);
- mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF);
+ mtx_init(&hwq->qlock, qlock_name, NULL, MTX_DEF);
+ mtx_init(&hwq->recovery_lock, recovery_lock_name, NULL, MTX_DEF);
+
+ callout_init_mtx(&hwq->timer, &hwq->recovery_lock, 0);
+ hwq->timer_armed = false;
+ hwq->recovery_state = RECOVERY_WAITING;
/*
* Allocate physical memory for request queue (UTP Transfer Request
@@ -219,6 +238,9 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
req_queue->num_entries,
M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+ TAILQ_INIT(&hwq->free_tr);
+ TAILQ_INIT(&hwq->outstanding_tr);
+
for (i = 0; i < req_queue->num_trackers; i++) {
tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
@@ -226,6 +248,7 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
tr->req_queue = req_queue;
tr->slot_num = i;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
hwq->act_tr[i] = tr;
}
@@ -255,8 +278,6 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
ctrlr) != 0) {
ufshci_printf(ctrlr,
"failed to construct cmd descriptor memory\n");
- bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
- hwq->queuemem_map);
goto out;
}
@@ -280,6 +301,11 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr;
int i;
+ mtx_lock(&hwq->recovery_lock);
+ hwq->timer_armed = false;
+ mtx_unlock(&hwq->recovery_lock);
+ callout_drain(&hwq->timer);
+
if (!req_queue->is_task_mgmt)
ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
@@ -305,10 +331,11 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
hwq->dma_tag_queue = NULL;
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_destroy(&hwq->recovery_lock);
if (mtx_initialized(&hwq->qlock))
mtx_destroy(&hwq->qlock);
- free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
free(req_queue->hwq, M_UFSHCI);
}
@@ -318,10 +345,36 @@ ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
return &req_queue->hwq[UFSHCI_SDB_Q];
}
+void
+ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr, *tr_temp;
+
+ mtx_lock(&hwq->recovery_lock);
+ mtx_lock(&hwq->qlock);
+
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+
+ hwq->recovery_state = RECOVERY_WAITING;
+ TAILQ_FOREACH_SAFE(tr, &hwq->outstanding_tr, tailq, tr_temp) {
+ tr->deadline = SBT_MAX;
+ }
+
+ mtx_unlock(&hwq->qlock);
+ mtx_unlock(&hwq->recovery_lock);
+}
+
int
ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+
if (req_queue->is_task_mgmt) {
uint32_t hcs, utmrldbr, utmrlrsr;
@@ -373,6 +426,14 @@ ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+ KASSERT(!req_queue->ctrlr->is_failed, ("Enabling a failed hwq\n"));
+
+ hwq->recovery_state = RECOVERY_NONE;
+
return (0);
}
@@ -466,6 +527,8 @@ ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
uint8_t slot;
bool done = false;
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
hwq->num_intr_handler_calls++;
bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c
index db24561f4169..828b520614a5 100644
--- a/sys/dev/ufshci/ufshci_sim.c
+++ b/sys/dev/ufshci/ufshci_sim.c
@@ -241,7 +241,6 @@ ufshci_cam_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_ABORT:
- /* TODO: Implement Task Management CMD*/
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
break;
case XPT_SET_TRAN_SETTINGS:
diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c
index 5e5069f12e5f..56bc06b13f3c 100644
--- a/sys/dev/ufshci/ufshci_sysctl.c
+++ b/sys/dev/ufshci/ufshci_sysctl.c
@@ -152,6 +152,7 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
struct sysctl_ctx_list *ctrlr_ctx;
struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree;
struct sysctl_oid_list *ctrlr_list, *ioq_list;
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
#define QUEUE_NAME_LENGTH 16
char queue_name[QUEUE_NAME_LENGTH];
int i;
@@ -177,6 +178,25 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD,
&ctrlr->cap, 0, "Number of I/O queue pairs");
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled",
+ CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable");
+
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_flush_enabled",
+ CTLFLAG_RD, &dev->is_wb_flush_enabled, 0,
+ "WriteBooster flush enable/disable");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_type",
+ CTLFLAG_RD, &dev->wb_buffer_type, 0, "WriteBooster type");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_size_mb",
+ CTLFLAG_RD, &dev->wb_buffer_size_mb, 0,
+ "WriteBooster buffer size in MB");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "wb_user_space_config_option", CTLFLAG_RD,
+ &dev->wb_user_space_config_option, 0,
+ "WriteBooster preserve user space mode");
+
SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period,
0, ufshci_sysctl_timeout_period, "IU",
diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c
index 2c5f635dc11e..b9c867ff7065 100644
--- a/sys/dev/ufshci/ufshci_uic_cmd.c
+++ b/sys/dev/ufshci/ufshci_uic_cmd.c
@@ -14,7 +14,7 @@
int
ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
{
- uint32_t is;
+ uint32_t is, hcs;
int timeout;
/* Wait for the IS flag to change */
@@ -40,6 +40,15 @@ ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
DELAY(10);
}
+ /* Check HCS power mode change request status */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs) != 0x01) {
+ ufshci_printf(ctrlr,
+ "Power mode change request status error: 0x%x\n",
+ UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs));
+ return (ENXIO);
+ }
+
return (0);
}
@@ -112,6 +121,7 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value)
{
int error;
+ uint32_t config_result_code;
mtx_lock(&ctrlr->uic_cmd_lock);
@@ -134,6 +144,13 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
if (error)
return (ENXIO);
+ config_result_code = ufshci_mmio_read_4(ctrlr, ucmdarg2);
+ if (config_result_code) {
+ ufshci_printf(ctrlr,
+ "Failed to send UIC command. (config result code = 0x%x)\n",
+ config_result_code);
+ }
+
if (return_value != NULL)
*return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3);